summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorDaniel Vetter <daniel.vetter@ffwll.ch>2016-11-17 14:32:57 +0100
committerDaniel Vetter <daniel.vetter@ffwll.ch>2016-11-17 14:32:57 +0100
commit3975797f3e72bd115c6ba80210c5fe65ebd9e14e (patch)
treef2fa6fe843d9d0d458f5455b3852960439572913
parent78424c927cf194e2eb689b7871780e9182bd8c13 (diff)
parentb7c0e47d98249c2ddf21ea197b651093c6aaee00 (diff)
Merge remote-tracking branch 'airlied/drm-next' into drm-intel-next-queued
Tvrtko needs commit b3c11ac267d461d3d597967164ff7278a919a39f Author: Eric Engestrom <eric@engestrom.ch> Date: Sat Nov 12 01:12:56 2016 +0000 drm: move allocation out of drm_get_format_name() to be able to apply his patches without conflicts. Signed-off-by: Daniel Vetter <daniel.vetter@intel.com>
-rw-r--r--CREDITS5
-rw-r--r--Documentation/device-mapper/dm-raid.txt1
-rw-r--r--Documentation/devicetree/bindings/clock/uniphier-clock.txt16
-rw-r--r--Documentation/devicetree/bindings/display/bridge/dw_hdmi.txt4
-rw-r--r--Documentation/devicetree/bindings/display/panel/display-timing.txt8
-rw-r--r--Documentation/devicetree/bindings/display/renesas,du.txt12
-rw-r--r--Documentation/devicetree/bindings/display/sunxi/sun4i-drm.txt10
-rw-r--r--Documentation/devicetree/bindings/display/zte,vou.txt84
-rw-r--r--Documentation/devicetree/bindings/net/marvell-orion-net.txt1
-rw-r--r--Documentation/devicetree/bindings/reset/uniphier-reset.txt62
-rw-r--r--Documentation/devicetree/bindings/serial/cdns,uart.txt4
-rw-r--r--Documentation/devicetree/bindings/serial/renesas,sci-serial.txt8
-rw-r--r--Documentation/devicetree/bindings/usb/dwc2.txt5
-rw-r--r--Documentation/driver-api/infrastructure.rst8
-rw-r--r--Documentation/gpio/board.txt11
-rw-r--r--Documentation/gpu/drm-internals.rst20
-rw-r--r--Documentation/gpu/drm-kms-helpers.rst8
-rw-r--r--Documentation/gpu/drm-kms.rst80
-rw-r--r--Documentation/gpu/i915.rst6
-rw-r--r--Documentation/networking/netdev-FAQ.txt8
-rw-r--r--Documentation/networking/nf_conntrack-sysctl.txt18
-rw-r--r--MAINTAINERS77
-rw-r--r--Makefile2
-rw-r--r--arch/arc/Kconfig27
-rw-r--r--arch/arc/Makefile3
-rw-r--r--arch/arc/boot/Makefile16
-rw-r--r--arch/arc/include/asm/arcregs.h3
-rw-r--r--arch/arc/include/asm/cache.h2
-rw-r--r--arch/arc/include/asm/elf.h2
-rw-r--r--arch/arc/include/asm/mcip.h16
-rw-r--r--arch/arc/include/asm/module.h1
-rw-r--r--arch/arc/include/asm/setup.h6
-rw-r--r--arch/arc/include/asm/syscalls.h1
-rw-r--r--arch/arc/include/uapi/asm/unistd.h9
-rw-r--r--arch/arc/kernel/mcip.c31
-rw-r--r--arch/arc/kernel/module.c53
-rw-r--r--arch/arc/kernel/process.c33
-rw-r--r--arch/arc/kernel/setup.c113
-rw-r--r--arch/arc/kernel/troubleshoot.c110
-rw-r--r--arch/arc/mm/cache.c19
-rw-r--r--arch/arc/mm/dma.c4
-rw-r--r--arch/arc/mm/tlb.c6
-rw-r--r--arch/arc/mm/tlbex.S21
-rw-r--r--arch/arm/boot/dts/ste-snowball.dts15
-rw-r--r--arch/arm/boot/dts/uniphier-pro5.dtsi4
-rw-r--r--arch/arm/boot/dts/uniphier-pxs2.dtsi4
-rw-r--r--arch/arm/boot/dts/vf500.dtsi2
-rw-r--r--arch/arm/configs/multi_v7_defconfig1
-rw-r--r--arch/arm/include/asm/unistd.h2
-rw-r--r--arch/arm/include/uapi/asm/unistd.h3
-rw-r--r--arch/arm/kernel/calls.S3
-rw-r--r--arch/arm/mach-imx/gpc.c15
-rw-r--r--arch/arm/mach-imx/mach-imx6q.c2
-rw-r--r--arch/arm/mach-mvebu/Kconfig4
-rw-r--r--arch/arm/mach-uniphier/Kconfig1
-rw-r--r--arch/arm/mm/abort-lv4t.S34
-rw-r--r--arch/arm64/Kconfig.platforms1
-rw-r--r--arch/arm64/boot/dts/broadcom/ns2-svk.dts2
-rw-r--r--arch/arm64/boot/dts/freescale/fsl-ls1043a.dtsi1
-rw-r--r--arch/arm64/boot/dts/freescale/fsl-ls2080a.dtsi1
-rw-r--r--arch/arm64/boot/dts/marvell/armada-cp110-master.dtsi2
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3368-geekbox.dts3
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3368-orion-r68-meta.dts4
-rw-r--r--arch/arm64/boot/dts/socionext/uniphier-ld20.dtsi12
-rw-r--r--arch/arm64/include/asm/memory.h2
-rw-r--r--arch/arm64/mm/numa.c9
-rw-r--r--arch/cris/arch-v32/drivers/cryptocop.c2
-rw-r--r--arch/h8300/include/asm/thread_info.h4
-rw-r--r--arch/h8300/kernel/signal.c2
-rw-r--r--arch/mips/Makefile2
-rw-r--r--arch/mips/boot/dts/mti/malta.dts3
-rw-r--r--arch/mips/generic/init.c16
-rw-r--r--arch/mips/include/asm/fpu_emulator.h13
-rw-r--r--arch/mips/include/asm/switch_to.h18
-rw-r--r--arch/mips/kernel/mips-cpc.c11
-rw-r--r--arch/mips/kernel/mips-r2-to-r6-emul.c10
-rw-r--r--arch/mips/kernel/ptrace.c8
-rw-r--r--arch/mips/kernel/r2300_fpu.S138
-rw-r--r--arch/mips/kernel/r6000_fpu.S89
-rw-r--r--arch/mips/kernel/relocate.c2
-rw-r--r--arch/mips/kernel/setup.c13
-rw-r--r--arch/mips/kernel/traps.c137
-rw-r--r--arch/mips/lib/dump_tlb.c44
-rw-r--r--arch/mips/lib/r3k_dump_tlb.c18
-rw-r--r--arch/parisc/include/uapi/asm/unistd.h4
-rw-r--r--arch/parisc/kernel/drivers.c6
-rw-r--r--arch/parisc/kernel/syscall.S66
-rw-r--r--arch/powerpc/include/asm/checksum.h12
-rw-r--r--arch/powerpc/include/asm/cpuidle.h2
-rw-r--r--arch/powerpc/include/asm/exception-64s.h16
-rw-r--r--arch/powerpc/include/asm/tlb.h12
-rw-r--r--arch/powerpc/kernel/exceptions-64s.S50
-rw-r--r--arch/powerpc/kernel/hw_breakpoint.c2
-rw-r--r--arch/powerpc/kernel/idle_book3s.S35
-rw-r--r--arch/powerpc/kernel/process.c2
-rw-r--r--arch/powerpc/kvm/book3s_hv_rm_xics.c1
-rw-r--r--arch/powerpc/mm/tlb-radix.c8
-rw-r--r--arch/s390/include/asm/ftrace.h4
-rw-r--r--arch/s390/include/asm/processor.h2
-rw-r--r--arch/s390/include/asm/unistd.h3
-rw-r--r--arch/s390/kernel/dis.c4
-rw-r--r--arch/s390/kernel/dumpstack.c63
-rw-r--r--arch/s390/kernel/perf_event.c2
-rw-r--r--arch/s390/kernel/stacktrace.c4
-rw-r--r--arch/s390/mm/hugetlbpage.c1
-rw-r--r--arch/s390/mm/init.c38
-rw-r--r--arch/s390/oprofile/init.c2
-rw-r--r--arch/sparc/include/asm/cpudata_64.h5
-rw-r--r--arch/sparc/include/asm/spinlock_32.h2
-rw-r--r--arch/sparc/include/asm/spinlock_64.h12
-rw-r--r--arch/sparc/include/asm/topology_64.h8
-rw-r--r--arch/sparc/include/asm/uaccess_64.h28
-rw-r--r--arch/sparc/kernel/head_64.S37
-rw-r--r--arch/sparc/kernel/jump_label.c23
-rw-r--r--arch/sparc/kernel/mdesc.c46
-rw-r--r--arch/sparc/kernel/smp_64.c8
-rw-r--r--arch/sparc/lib/GENcopy_from_user.S4
-rw-r--r--arch/sparc/lib/GENcopy_to_user.S4
-rw-r--r--arch/sparc/lib/GENmemcpy.S48
-rw-r--r--arch/sparc/lib/Makefile2
-rw-r--r--arch/sparc/lib/NG2copy_from_user.S8
-rw-r--r--arch/sparc/lib/NG2copy_to_user.S8
-rw-r--r--arch/sparc/lib/NG2memcpy.S228
-rw-r--r--arch/sparc/lib/NG4copy_from_user.S8
-rw-r--r--arch/sparc/lib/NG4copy_to_user.S8
-rw-r--r--arch/sparc/lib/NG4memcpy.S294
-rw-r--r--arch/sparc/lib/NGcopy_from_user.S4
-rw-r--r--arch/sparc/lib/NGcopy_to_user.S4
-rw-r--r--arch/sparc/lib/NGmemcpy.S233
-rw-r--r--arch/sparc/lib/U1copy_from_user.S8
-rw-r--r--arch/sparc/lib/U1copy_to_user.S8
-rw-r--r--arch/sparc/lib/U1memcpy.S345
-rw-r--r--arch/sparc/lib/U3copy_from_user.S8
-rw-r--r--arch/sparc/lib/U3copy_to_user.S8
-rw-r--r--arch/sparc/lib/U3memcpy.S227
-rw-r--r--arch/sparc/lib/copy_in_user.S35
-rw-r--r--arch/sparc/lib/user_fixup.c71
-rw-r--r--arch/sparc/mm/tsb.c17
-rw-r--r--arch/sparc/mm/ultra.S374
-rw-r--r--arch/x86/entry/Makefile4
-rw-r--r--arch/x86/events/intel/core.c10
-rw-r--r--arch/x86/events/intel/cstate.c30
-rw-r--r--arch/x86/include/asm/io.h6
-rw-r--r--arch/x86/kernel/acpi/boot.c1
-rw-r--r--arch/x86/kernel/cpu/microcode/amd.c2
-rw-r--r--arch/x86/kernel/mcount_64.S3
-rw-r--r--arch/x86/kernel/quirks.c3
-rw-r--r--arch/x86/kernel/setup.c7
-rw-r--r--arch/x86/kernel/unwind_guess.c9
-rw-r--r--arch/x86/mm/kaslr.c6
-rw-r--r--arch/x86/mm/pat.c14
-rw-r--r--arch/x86/xen/enlighten.c2
-rw-r--r--block/badblocks.c23
-rw-r--r--block/blk-flush.c28
-rw-r--r--block/blk-mq.c6
-rw-r--r--drivers/acpi/acpica/dsinit.c11
-rw-r--r--drivers/acpi/acpica/dsmethod.c50
-rw-r--r--drivers/acpi/acpica/dswload2.c2
-rw-r--r--drivers/acpi/acpica/evrgnini.c3
-rw-r--r--drivers/acpi/acpica/nsload.c2
-rw-r--r--drivers/acpi/apei/ghes.c2
-rw-r--r--drivers/acpi/pci_link.c38
-rw-r--r--drivers/android/binder.c35
-rw-r--r--drivers/ata/ahci.c41
-rw-r--r--drivers/base/Kconfig6
-rw-r--r--drivers/block/DAC960.c4
-rw-r--r--drivers/block/nbd.c2
-rw-r--r--drivers/block/virtio_blk.c10
-rw-r--r--drivers/bluetooth/btwilink.c2
-rw-r--r--drivers/bluetooth/hci_bcm.c8
-rw-r--r--drivers/bus/Kconfig1
-rw-r--r--drivers/char/hw_random/core.c6
-rw-r--r--drivers/char/tpm/tpm-interface.c3
-rw-r--r--drivers/char/virtio_console.c22
-rw-r--r--drivers/clk/at91/clk-programmable.c2
-rw-r--r--drivers/clk/bcm/clk-bcm2835.c11
-rw-r--r--drivers/clk/clk-max77686.c1
-rw-r--r--drivers/clk/hisilicon/clk-hi6220.c4
-rw-r--r--drivers/clk/mediatek/Kconfig2
-rw-r--r--drivers/clk/mvebu/armada-37xx-periph.c11
-rw-r--r--drivers/clk/samsung/clk-exynos-audss.c1
-rw-r--r--drivers/clk/uniphier/clk-uniphier-core.c20
-rw-r--r--drivers/clk/uniphier/clk-uniphier-mio.c2
-rw-r--r--drivers/clk/uniphier/clk-uniphier-mux.c2
-rw-r--r--drivers/clk/uniphier/clk-uniphier.h2
-rw-r--r--drivers/cpufreq/intel_pstate.c38
-rw-r--r--drivers/dax/Kconfig2
-rw-r--r--drivers/dax/pmem.c2
-rw-r--r--drivers/dma-buf/dma-fence.c32
-rw-r--r--drivers/dma-buf/reservation.c5
-rw-r--r--drivers/dma-buf/sw_sync.c4
-rw-r--r--drivers/extcon/extcon-qcom-spmi-misc.c2
-rw-r--r--drivers/firewire/net.c59
-rw-r--r--drivers/gpio/Kconfig2
-rw-r--r--drivers/gpio/gpio-ath79.c1
-rw-r--r--drivers/gpio/gpio-mpc8xxx.c2
-rw-r--r--drivers/gpio/gpio-mvebu.c92
-rw-r--r--drivers/gpio/gpio-mxs.c8
-rw-r--r--drivers/gpio/gpio-stmpe.c2
-rw-r--r--drivers/gpio/gpio-ts4800.c1
-rw-r--r--drivers/gpio/gpiolib-acpi.c7
-rw-r--r--drivers/gpio/gpiolib-of.c14
-rw-r--r--drivers/gpio/gpiolib.c99
-rw-r--r--drivers/gpu/drm/Kconfig17
-rw-r--r--drivers/gpu/drm/Makefile4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c177
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_object.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/ci_dpm.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v10_0.c13
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v11_0.c13
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v6_0.c13
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v8_0.c13
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_virtual.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/kv_dpm.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/si_dpm.c61
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vce_v3_0.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vi.c2
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c2
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c3
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.c9
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c17
-rw-r--r--drivers/gpu/drm/amd/scheduler/gpu_scheduler.c1
-rw-r--r--drivers/gpu/drm/amd/scheduler/sched_fence.c4
-rw-r--r--drivers/gpu/drm/arc/arcpgu_drv.c2
-rw-r--r--drivers/gpu/drm/arm/hdlcd_drv.c21
-rw-r--r--drivers/gpu/drm/arm/malidp_drv.c39
-rw-r--r--drivers/gpu/drm/arm/malidp_drv.h3
-rw-r--r--drivers/gpu/drm/arm/malidp_hw.c5
-rw-r--r--drivers/gpu/drm/arm/malidp_hw.h9
-rw-r--r--drivers/gpu/drm/arm/malidp_planes.c96
-rw-r--r--drivers/gpu/drm/armada/armada_fbdev.c8
-rw-r--r--drivers/gpu/drm/ast/ast_drv.c2
-rw-r--r--drivers/gpu/drm/ast/ast_fb.c2
-rw-r--r--drivers/gpu/drm/ast/ast_ttm.c6
-rw-r--r--drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c2
-rw-r--r--drivers/gpu/drm/bochs/bochs_drv.c2
-rw-r--r--drivers/gpu/drm/bochs/bochs_fbdev.c6
-rw-r--r--drivers/gpu/drm/bridge/analogix/analogix_dp_reg.c2
-rw-r--r--drivers/gpu/drm/bridge/dw-hdmi.c279
-rw-r--r--drivers/gpu/drm/bridge/dw-hdmi.h19
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_drv.c2
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_ttm.c7
-rw-r--r--drivers/gpu/drm/drm_atomic.c458
-rw-r--r--drivers/gpu/drm/drm_atomic_helper.c9
-rw-r--r--drivers/gpu/drm/drm_color_mgmt.c12
-rw-r--r--drivers/gpu/drm/drm_connector.c104
-rw-r--r--drivers/gpu/drm/drm_crtc.c723
-rw-r--r--drivers/gpu/drm/drm_crtc_internal.h36
-rw-r--r--drivers/gpu/drm/drm_debugfs.c9
-rw-r--r--drivers/gpu/drm/drm_dp_mst_topology.c6
-rw-r--r--drivers/gpu/drm/drm_drv.c24
-rw-r--r--drivers/gpu/drm/drm_dumb_buffers.c128
-rw-r--r--drivers/gpu/drm/drm_edid.c4
-rw-r--r--drivers/gpu/drm/drm_fb_cma_helper.c41
-rw-r--r--drivers/gpu/drm/drm_fb_helper.c74
-rw-r--r--drivers/gpu/drm/drm_fops.c13
-rw-r--r--drivers/gpu/drm/drm_fourcc.c14
-rw-r--r--drivers/gpu/drm/drm_framebuffer.c12
-rw-r--r--drivers/gpu/drm/drm_internal.h12
-rw-r--r--drivers/gpu/drm/drm_irq.c160
-rw-r--r--drivers/gpu/drm/drm_mm.c76
-rw-r--r--drivers/gpu/drm/drm_mode_config.c494
-rw-r--r--drivers/gpu/drm/drm_modes.c51
-rw-r--r--drivers/gpu/drm/drm_modeset_helper.c7
-rw-r--r--drivers/gpu/drm/drm_modeset_lock.c13
-rw-r--r--drivers/gpu/drm/drm_plane.c8
-rw-r--r--drivers/gpu/drm/drm_plane_helper.c11
-rw-r--r--drivers/gpu/drm/drm_print.c59
-rw-r--r--drivers/gpu/drm/drm_rect.c11
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_drv.c2
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_drv.c22
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_drv.h1
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fb.c2
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fbdev.c6
-rw-r--r--drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c2
-rw-r--r--drivers/gpu/drm/gma500/framebuffer.c12
-rw-r--r--drivers/gpu/drm/gma500/gtt.c2
-rw-r--r--drivers/gpu/drm/gma500/psb_drv.c1
-rw-r--r--drivers/gpu/drm/gma500/psb_drv.h4
-rw-r--r--drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c7
-rw-r--r--drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.c2
-rw-r--r--drivers/gpu/drm/i2c/tda998x_drv.c8
-rw-r--r--drivers/gpu/drm/i810/i810_dma.c2
-rw-r--r--drivers/gpu/drm/i810/i810_drv.c2
-rw-r--r--drivers/gpu/drm/i915/Kconfig.debug1
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c10
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c2
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h2
-rw-r--r--drivers/gpu/drm/i915/intel_atomic_plane.c8
-rw-r--r--drivers/gpu/drm/i915/intel_display.c51
-rw-r--r--drivers/gpu/drm/i915/intel_fbdev.c5
-rw-r--r--drivers/gpu/drm/i915/intel_sprite.c11
-rw-r--r--drivers/gpu/drm/imx/imx-drm-core.c36
-rw-r--r--drivers/gpu/drm/imx/imx-ldb.c12
-rw-r--r--drivers/gpu/drm/imx/ipuv3-plane.c187
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_drv.c4
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_drv.c2
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_ttm.c7
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp4/mdp4_irq.c10
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_irq.c11
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h12
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c18
-rw-r--r--drivers/gpu/drm/msm/msm_atomic.c3
-rw-r--r--drivers/gpu/drm/msm/msm_drv.c6
-rw-r--r--drivers/gpu/drm/msm/msm_fbdev.c7
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/crtc.c4
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/overlay.c2
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/cl5070.h17
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/object.h29
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/fb.h3
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_acpi.c3
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bios.c3
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_connector.c650
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_connector.h98
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_crtc.h7
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_display.c288
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_display.h15
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_dp.c19
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drm.c3
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drv.h4
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_encoder.h14
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fbcon.c97
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fbcon.h2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fence.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fence.h2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_gem.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_ttm.c8
-rw-r--r--drivers/gpu/drm/nouveau/nv04_fbcon.c8
-rw-r--r--drivers/gpu/drm/nouveau/nv10_fence.c5
-rw-r--r--drivers/gpu/drm/nouveau/nv10_fence.h1
-rw-r--r--drivers/gpu/drm/nouveau/nv50_display.c4178
-rw-r--r--drivers/gpu/drm/nouveau/nv50_display.h7
-rw-r--r--drivers/gpu/drm/nouveau/nv50_fbcon.c10
-rw-r--r--drivers/gpu/drm/nouveau/nv50_fence.c20
-rw-r--r--drivers/gpu/drm/nouveau/nv84_fence.c22
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_fbcon.c10
-rw-r--r--drivers/gpu/drm/nouveau/nvif/client.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvif/notify.c6
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/ce/fuc/gf100.fuc3.h4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/ce/fuc/gt215.fuc3.h4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/device/base.c8
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/device/user.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.c8
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/coreg94.c6
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/coregp104.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/dport.c9
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/dport.h4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/gf119.c25
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/outpdp.c33
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/outpdp.h9
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv50.c47
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgf119.c16
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgm107.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgm200.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/chang84.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogf100.c9
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogk104.c8
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf117.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm107.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxnv50.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgf100.fuc3.h4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgf117.fuc3.h4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgk104.fuc3.h4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgk110.fuc3.h4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgk208.fuc5.h4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgm107.fuc5.h4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgf100.fuc3.h4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgf117.fuc3.h4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgk104.fuc3.h4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgk110.fuc3.h4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgk208.fuc5.h4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgm107.fuc5.h4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c54
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gf117.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gm107.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/pm/base.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/pm/gf100.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/sec/fuc/g98.fuc0s.h4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bios/fan.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bios/priv.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadow.c7
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowacpi.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/clk/gt215.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/Kbuild1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf100.h4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/gk20a.c20
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/gm200.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/gm20b.c40
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgf100.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgt215.c8
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/sddr2.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/sddr3.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gpio/gk104.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxg94.c10
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxgm200.c10
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/iccsense/base.c3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/instmem/base.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mc/g84.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mxm/base.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gf100.fuc3.h4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gf119.fuc4.h4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gk208.fuc5.h4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gt215.fuc3.h4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gt215.c9
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pmu/priv.h2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gm200.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/volt/gk104.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/volt/gm20b.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/volt/gpio.c1
-rw-r--r--drivers/gpu/drm/omapdrm/displays/connector-analog-tv.c47
-rw-r--r--drivers/gpu/drm/omapdrm/displays/connector-dvi.c50
-rw-r--r--drivers/gpu/drm/omapdrm/displays/connector-hdmi.c49
-rw-r--r--drivers/gpu/drm/omapdrm/displays/encoder-opa362.c20
-rw-r--r--drivers/gpu/drm/omapdrm/displays/encoder-tfp410.c31
-rw-r--r--drivers/gpu/drm/omapdrm/displays/encoder-tpd12s015.c20
-rw-r--r--drivers/gpu/drm/omapdrm/displays/panel-dpi.c30
-rw-r--r--drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c25
-rw-r--r--drivers/gpu/drm/omapdrm/displays/panel-lgphilips-lb035q02.c59
-rw-r--r--drivers/gpu/drm/omapdrm/displays/panel-nec-nl8048hl11.c52
-rw-r--r--drivers/gpu/drm/omapdrm/displays/panel-sharp-ls037v7dw01.c58
-rw-r--r--drivers/gpu/drm/omapdrm/displays/panel-sony-acx565akm.c53
-rw-r--r--drivers/gpu/drm/omapdrm/displays/panel-tpo-td028ttec1.c57
-rw-r--r--drivers/gpu/drm/omapdrm/displays/panel-tpo-td043mtea1.c54
-rw-r--r--drivers/gpu/drm/omapdrm/dss/dispc.c228
-rw-r--r--drivers/gpu/drm/omapdrm/dss/display.c78
-rw-r--r--drivers/gpu/drm/omapdrm/dss/dpi.c40
-rw-r--r--drivers/gpu/drm/omapdrm/dss/dsi.c160
-rw-r--r--drivers/gpu/drm/omapdrm/dss/dss.h5
-rw-r--r--drivers/gpu/drm/omapdrm/dss/hdmi.h8
-rw-r--r--drivers/gpu/drm/omapdrm/dss/hdmi4.c31
-rw-r--r--drivers/gpu/drm/omapdrm/dss/hdmi4_core.c8
-rw-r--r--drivers/gpu/drm/omapdrm/dss/hdmi5.c31
-rw-r--r--drivers/gpu/drm/omapdrm/dss/hdmi5_core.c85
-rw-r--r--drivers/gpu/drm/omapdrm/dss/hdmi_wp.c73
-rw-r--r--drivers/gpu/drm/omapdrm/dss/omapdss.h98
-rw-r--r--drivers/gpu/drm/omapdrm/dss/output.c5
-rw-r--r--drivers/gpu/drm/omapdrm/dss/rfbi.c49
-rw-r--r--drivers/gpu/drm/omapdrm/dss/sdi.c33
-rw-r--r--drivers/gpu/drm/omapdrm/dss/venc.c97
-rw-r--r--drivers/gpu/drm/omapdrm/omap_connector.c87
-rw-r--r--drivers/gpu/drm/omapdrm/omap_crtc.c30
-rw-r--r--drivers/gpu/drm/omapdrm/omap_drv.h7
-rw-r--r--drivers/gpu/drm/omapdrm/omap_encoder.c10
-rw-r--r--drivers/gpu/drm/omapdrm/omap_fbdev.c5
-rw-r--r--drivers/gpu/drm/omapdrm/omap_gem.c6
-rw-r--r--drivers/gpu/drm/omapdrm/omap_plane.c28
-rw-r--r--drivers/gpu/drm/qxl/qxl_fb.c8
-rw-r--r--drivers/gpu/drm/radeon/atombios_crtc.c14
-rw-r--r--drivers/gpu/drm/radeon/ni.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_dp_auxch.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_fb.c8
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.c5
-rw-r--r--drivers/gpu/drm/radeon/si_dpm.c53
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_crtc.c6
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_drv.c85
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_group.c22
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_hdmienc.c4
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_kms.c29
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_lvdscon.c2
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_lvdscon.h2
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_lvdsenc.c19
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_drv.c2
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_fbdev.c6
-rw-r--r--drivers/gpu/drm/savage/savage_drv.c2
-rw-r--r--drivers/gpu/drm/shmobile/shmob_drm_drv.c2
-rw-r--r--drivers/gpu/drm/sis/sis_drv.c2
-rw-r--r--drivers/gpu/drm/sti/sti_drv.c24
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_backend.c23
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_drv.c7
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_layer.c6
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_tcon.c45
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_tcon.h11
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_tv.c2
-rw-r--r--drivers/gpu/drm/sun4i/sun6i_drc.c4
-rw-r--r--drivers/gpu/drm/tdfx/tdfx_drv.c2
-rw-r--r--drivers/gpu/drm/tegra/drm.c2
-rw-r--r--drivers/gpu/drm/tegra/fb.c6
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_drv.c2
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c9
-rw-r--r--drivers/gpu/drm/udl/udl_drv.c2
-rw-r--r--drivers/gpu/drm/udl/udl_fb.c8
-rw-r--r--drivers/gpu/drm/vc4/vc4_drv.c13
-rw-r--r--drivers/gpu/drm/vc4/vc4_drv.h2
-rw-r--r--drivers/gpu/drm/vc4/vc4_gem.c15
-rw-r--r--drivers/gpu/drm/vc4/vc4_v3d.c2
-rw-r--r--drivers/gpu/drm/vc4/vc4_validate.c24
-rw-r--r--drivers/gpu/drm/vc4/vc4_validate_shaders.c82
-rw-r--r--drivers/gpu/drm/via/via_drv.c2
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_display.c3
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_drv.c2
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_fb.c8
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.c3
-rw-r--r--drivers/gpu/drm/zte/Kconfig8
-rw-r--r--drivers/gpu/drm/zte/Makefile7
-rw-r--r--drivers/gpu/drm/zte/zx_drm_drv.c267
-rw-r--r--drivers/gpu/drm/zte/zx_drm_drv.h36
-rw-r--r--drivers/gpu/drm/zte/zx_hdmi.c624
-rw-r--r--drivers/gpu/drm/zte/zx_hdmi_regs.h56
-rw-r--r--drivers/gpu/drm/zte/zx_plane.c299
-rw-r--r--drivers/gpu/drm/zte/zx_plane.h26
-rw-r--r--drivers/gpu/drm/zte/zx_plane_regs.h91
-rw-r--r--drivers/gpu/drm/zte/zx_vou.c661
-rw-r--r--drivers/gpu/drm/zte/zx_vou.h46
-rw-r--r--drivers/gpu/drm/zte/zx_vou_regs.h157
-rw-r--r--drivers/gpu/ipu-v3/ipu-common.c7
-rw-r--r--drivers/gpu/ipu-v3/ipu-cpmem.c43
-rw-r--r--drivers/gpu/ipu-v3/ipu-csi.c16
-rw-r--r--drivers/gpu/ipu-v3/ipu-di.c2
-rw-r--r--drivers/gpu/ipu-v3/ipu-image-convert.c2
-rw-r--r--drivers/hv/hv_util.c10
-rw-r--r--drivers/i2c/busses/Kconfig12
-rw-r--r--drivers/i2c/busses/i2c-designware-core.c17
-rw-r--r--drivers/i2c/busses/i2c-digicolor.c1
-rw-r--r--drivers/i2c/busses/i2c-i801.c16
-rw-r--r--drivers/i2c/busses/i2c-imx.c11
-rw-r--r--drivers/i2c/busses/i2c-jz4780.c1
-rw-r--r--drivers/i2c/busses/i2c-rk3x.c2
-rw-r--r--drivers/i2c/busses/i2c-xgene-slimpro.c2
-rw-r--r--drivers/i2c/busses/i2c-xlp9xx.c1
-rw-r--r--drivers/i2c/busses/i2c-xlr.c1
-rw-r--r--drivers/i2c/i2c-core.c13
-rw-r--r--drivers/iio/adc/Kconfig2
-rw-r--r--drivers/iio/chemical/atlas-ph-sensor.c7
-rw-r--r--drivers/iio/temperature/maxim_thermocouple.c16
-rw-r--r--drivers/infiniband/hw/mlx5/main.c2
-rw-r--r--drivers/infiniband/hw/mlx5/qp.c1
-rw-r--r--drivers/infiniband/hw/qedr/Kconfig1
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib.h20
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_cm.c15
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_ib.c12
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_main.c54
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_multicast.c6
-rw-r--r--drivers/input/mouse/focaltech.c6
-rw-r--r--drivers/input/serio/i8042-x86ia64io.h7
-rw-r--r--drivers/ipack/ipack.c2
-rw-r--r--drivers/md/dm-raid.c15
-rw-r--r--drivers/md/dm-raid1.c22
-rw-r--r--drivers/md/dm-rq.c7
-rw-r--r--drivers/md/dm-table.c24
-rw-r--r--drivers/md/dm.c4
-rw-r--r--drivers/md/md.c10
-rw-r--r--drivers/md/raid1.c26
-rw-r--r--drivers/md/raid10.c7
-rw-r--r--drivers/md/raid5-cache.c6
-rw-r--r--drivers/media/usb/b2c2/flexcop-usb.c105
-rw-r--r--drivers/media/usb/b2c2/flexcop-usb.h4
-rw-r--r--drivers/media/usb/cpia2/cpia2_usb.c34
-rw-r--r--drivers/media/usb/dvb-usb/af9005.c319
-rw-r--r--drivers/media/usb/dvb-usb/cinergyT2-core.c90
-rw-r--r--drivers/media/usb/dvb-usb/cinergyT2-fe.c100
-rw-r--r--drivers/media/usb/dvb-usb/cxusb.c62
-rw-r--r--drivers/media/usb/dvb-usb/cxusb.h6
-rw-r--r--drivers/media/usb/dvb-usb/dib0700_core.c31
-rw-r--r--drivers/media/usb/dvb-usb/dib0700_devices.c25
-rw-r--r--drivers/media/usb/dvb-usb/dibusb-common.c113
-rw-r--r--drivers/media/usb/dvb-usb/dibusb.h3
-rw-r--r--drivers/media/usb/dvb-usb/digitv.c26
-rw-r--r--drivers/media/usb/dvb-usb/digitv.h5
-rw-r--r--drivers/media/usb/dvb-usb/dtt200u-fe.c128
-rw-r--r--drivers/media/usb/dvb-usb/dtt200u.c120
-rw-r--r--drivers/media/usb/dvb-usb/dtv5100.c10
-rw-r--r--drivers/media/usb/dvb-usb/dw2102.c2
-rw-r--r--drivers/media/usb/dvb-usb/gp8psk.c25
-rw-r--r--drivers/media/usb/dvb-usb/nova-t-usb2.c25
-rw-r--r--drivers/media/usb/dvb-usb/pctv452e.c136
-rw-r--r--drivers/media/usb/dvb-usb/technisat-usb2.c16
-rw-r--r--drivers/media/usb/s2255/s2255drv.c15
-rw-r--r--drivers/media/usb/stkwebcam/stk-webcam.c16
-rw-r--r--drivers/misc/cxl/api.c2
-rw-r--r--drivers/misc/cxl/file.c22
-rw-r--r--drivers/misc/genwqe/card_utils.c12
-rw-r--r--drivers/misc/mei/hw-txe.c6
-rw-r--r--drivers/misc/sgi-gru/grumain.c2
-rw-r--r--drivers/misc/vmw_vmci/vmci_doorbell.c8
-rw-r--r--drivers/misc/vmw_vmci/vmci_driver.c2
-rw-r--r--drivers/mmc/host/dw_mmc-pltfm.c5
-rw-r--r--drivers/mmc/host/sdhci-msm.c1
-rw-r--r--drivers/mtd/nand/gpmi-nand/gpmi-lib.c6
-rw-r--r--drivers/mtd/nand/mtk_ecc.c19
-rw-r--r--drivers/mtd/nand/nand_base.c60
-rw-r--r--drivers/mtd/ubi/fastmap.c10
-rw-r--r--drivers/net/dsa/b53/b53_mmap.c1
-rw-r--r--drivers/net/dsa/bcm_sf2.c16
-rw-r--r--drivers/net/ethernet/aurora/nb8800.c1
-rw-r--r--drivers/net/ethernet/broadcom/bcm63xx_enet.c3
-rw-r--r--drivers/net/ethernet/broadcom/bgmac.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2.c17
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c40
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/sched.c4
-rw-r--r--drivers/net/ethernet/cisco/enic/vnic_rq.c32
-rw-r--r--drivers/net/ethernet/ezchip/nps_enet.c1
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c6
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c1
-rw-r--r--drivers/net/ethernet/hisilicon/hns_mdio.c1
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.c45
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.h3
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e.h1
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c34
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c12
-rw-r--r--drivers/net/ethernet/marvell/mv643xx_eth.c23
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/cmd.c9
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_clock.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_cq.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_netdev.c13
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_port.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_selftest.c26
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/eq.c62
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/fw.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/main.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4.h5
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/port.c13
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/resource_tracker.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/alloc.c31
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en.h5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c21
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rep.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.c50
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.c9
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/health.c76
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c39
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c26
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/pci.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c21
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/switchx2.c1
-rw-r--r--drivers/net/ethernet/qlogic/Kconfig3
-rw-r--r--drivers/net/ethernet/qlogic/qed/Makefile2
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_cxt.c15
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_dcbx.c2
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_debug.c53
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_dev.c19
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_ll2.c27
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_ll2.h20
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_main.c31
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_roce.c216
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_roce.h95
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_sp.h1
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_spq.c4
-rw-r--r--drivers/net/ethernet/qlogic/qede/Makefile2
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede.h3
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_ethtool.c99
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_main.c15
-rw-r--r--drivers/net/ethernet/qualcomm/emac/emac-mac.c8
-rw-r--r--drivers/net/ethernet/qualcomm/emac/emac.c1
-rw-r--r--drivers/net/ethernet/realtek/r8169.c3
-rw-r--r--drivers/net/ethernet/rocker/rocker_main.c2
-rw-r--r--drivers/net/ethernet/rocker/rocker_ofdpa.c4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c7
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac.h2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c6
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c13
-rw-r--r--drivers/net/ethernet/synopsys/dwc_eth_qos.c4
-rw-r--r--drivers/net/geneve.c47
-rw-r--r--drivers/net/hyperv/netvsc_drv.c25
-rw-r--r--drivers/net/macsec.c26
-rw-r--r--drivers/net/phy/at803x.c65
-rw-r--r--drivers/net/phy/dp83848.c3
-rw-r--r--drivers/net/usb/asix_common.c8
-rw-r--r--drivers/net/usb/kalmia.c2
-rw-r--r--drivers/net/vmxnet3/vmxnet3_drv.c17
-rw-r--r--drivers/net/vrf.c2
-rw-r--r--drivers/net/vxlan.c82
-rw-r--r--drivers/net/wan/Kconfig2
-rw-r--r--drivers/net/wan/slic_ds26522.c8
-rw-r--r--drivers/net/wireless/ath/ath10k/core.h1
-rw-r--r--drivers/net/wireless/ath/ath10k/debug.c75
-rw-r--r--drivers/net/wireless/ath/ath6kl/sdio.c1
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_calib.c25
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.h1
-rw-r--r--drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c2
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h4
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192e.c8
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8723b.c4
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c11
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/core.c2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8188ee/sw.c8
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ce/sw.c13
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192cu/sw.c12
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192de/sw.c6
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ee/sw.c8
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192se/sw.c9
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723ae/sw.c12
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723be/sw.c6
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8821ae/sw.c18
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/wifi.h2
-rw-r--r--drivers/net/wireless/ti/wlcore/sdio.c1
-rw-r--r--drivers/nvdimm/Kconfig2
-rw-r--r--drivers/nvdimm/namespace_devs.c14
-rw-r--r--drivers/nvdimm/pmem.c8
-rw-r--r--drivers/pci/host/pcie-designware.c7
-rw-r--r--drivers/pci/host/pcie-qcom.c2
-rw-r--r--drivers/pci/msi.c2
-rw-r--r--drivers/regulator/core.c2
-rw-r--r--drivers/reset/reset-uniphier.c16
-rw-r--r--drivers/s390/block/dasd_eckd.c4
-rw-r--r--drivers/s390/cio/chp.c6
-rw-r--r--drivers/scsi/NCR5380.c6
-rw-r--r--drivers/scsi/arcmsr/arcmsr_hba.c9
-rw-r--r--drivers/scsi/be2iscsi/be_main.c37
-rw-r--r--drivers/scsi/libiscsi.c4
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_base.c13
-rw-r--r--drivers/scsi/scsi_debug.c1
-rw-r--r--drivers/spi/spi-fsl-dspi.c7
-rw-r--r--drivers/spi/spi-fsl-espi.c2
-rw-r--r--drivers/spi/spi.c5
-rw-r--r--drivers/staging/android/ion/ion.c6
-rw-r--r--drivers/staging/android/ion/ion_of.c2
-rw-r--r--drivers/staging/greybus/arche-platform.c1
-rw-r--r--drivers/staging/greybus/es2.c3
-rw-r--r--drivers/staging/greybus/gpio.c6
-rw-r--r--drivers/staging/greybus/module.c2
-rw-r--r--drivers/staging/greybus/uart.c2
-rw-r--r--drivers/staging/iio/accel/sca3000_core.c2
-rw-r--r--drivers/staging/lustre/lustre/llite/lproc_llite.c34
-rw-r--r--drivers/staging/media/bcm2048/radio-bcm2048.c2
-rw-r--r--drivers/staging/wilc1000/host_interface.c1
-rw-r--r--drivers/thermal/intel_pch_thermal.c60
-rw-r--r--drivers/thermal/intel_powerclamp.c14
-rw-r--r--drivers/tty/serial/8250/8250_lpss.c2
-rw-r--r--drivers/tty/serial/8250/8250_port.c3
-rw-r--r--drivers/tty/serial/8250/8250_uniphier.c4
-rw-r--r--drivers/tty/serial/Kconfig1
-rw-r--r--drivers/tty/serial/atmel_serial.c26
-rw-r--r--drivers/tty/serial/fsl_lpuart.c3
-rw-r--r--drivers/tty/serial/pch_uart.c1
-rw-r--r--drivers/tty/serial/sc16is7xx.c8
-rw-r--r--drivers/tty/serial/serial_core.c8
-rw-r--r--drivers/tty/serial/stm32-usart.h2
-rw-r--r--drivers/tty/serial/xilinx_uartps.c2
-rw-r--r--drivers/tty/vt/vt.c7
-rw-r--r--drivers/usb/chipidea/host.c2
-rw-r--r--drivers/usb/dwc2/core.c11
-rw-r--r--drivers/usb/dwc2/core.h7
-rw-r--r--drivers/usb/dwc2/gadget.c53
-rw-r--r--drivers/usb/dwc3/gadget.c26
-rw-r--r--drivers/usb/gadget/function/f_fs.c107
-rw-r--r--drivers/usb/gadget/function/u_ether.c5
-rw-r--r--drivers/usb/gadget/udc/atmel_usba_udc.c2
-rw-r--r--drivers/usb/host/ehci-platform.c2
-rw-r--r--drivers/usb/host/ohci-at91.c9
-rw-r--r--drivers/usb/host/ohci-hcd.c2
-rw-r--r--drivers/usb/host/xhci-hub.c41
-rw-r--r--drivers/usb/host/xhci-pci.c10
-rw-r--r--drivers/usb/host/xhci.h3
-rw-r--r--drivers/usb/musb/musb_gadget.c4
-rw-r--r--drivers/usb/musb/omap2430.c7
-rw-r--r--drivers/usb/renesas_usbhs/rcar3.c8
-rw-r--r--drivers/usb/serial/cp210x.c4
-rw-r--r--drivers/usb/serial/ftdi_sio.c3
-rw-r--r--drivers/usb/serial/ftdi_sio_ids.h5
-rw-r--r--drivers/usb/serial/usb-serial.c3
-rw-r--r--drivers/usb/wusbcore/crypto.c61
-rw-r--r--drivers/vfio/pci/vfio_pci.c33
-rw-r--r--drivers/vfio/pci/vfio_pci_intrs.c2
-rw-r--r--drivers/video/of_display_timing.c15
-rw-r--r--drivers/virtio/config.c12
-rw-r--r--drivers/virtio/virtio_balloon.c2
-rw-r--r--drivers/virtio/virtio_pci_legacy.c16
-rw-r--r--drivers/virtio/virtio_ring.c16
-rw-r--r--drivers/vme/vme.c4
-rw-r--r--drivers/xen/manage.c45
-rw-r--r--drivers/xen/xenbus/xenbus_dev_frontend.c4
-rw-r--r--drivers/xen/xenbus/xenbus_probe_frontend.c4
-rw-r--r--fs/afs/cmservice.c6
-rw-r--r--fs/afs/fsclient.c4
-rw-r--r--fs/afs/internal.h2
-rw-r--r--fs/afs/rxrpc.c3
-rw-r--r--fs/btrfs/extent-tree.c3
-rw-r--r--fs/btrfs/extent_io.c8
-rw-r--r--fs/btrfs/inode.c13
-rw-r--r--fs/btrfs/ioctl.c5
-rw-r--r--fs/btrfs/relocation.c9
-rw-r--r--fs/btrfs/send.c58
-rw-r--r--fs/btrfs/tree-log.c20
-rw-r--r--fs/exofs/dir.c2
-rw-r--r--fs/iomap.c5
-rw-r--r--fs/kernfs/file.c1
-rw-r--r--fs/nfsd/netns.h5
-rw-r--r--fs/nfsd/nfs4state.c38
-rw-r--r--fs/orangefs/dcache.c5
-rw-r--r--fs/orangefs/file.c14
-rw-r--r--fs/orangefs/namei.c8
-rw-r--r--fs/orangefs/orangefs-kernel.h7
-rw-r--r--fs/overlayfs/copy_up.c2
-rw-r--r--fs/overlayfs/inode.c3
-rw-r--r--fs/overlayfs/super.c15
-rw-r--r--fs/proc/base.c21
-rw-r--r--fs/ubifs/dir.c8
-rw-r--r--fs/xfs/libxfs/xfs_bmap.c418
-rw-r--r--fs/xfs/libxfs/xfs_bmap.h8
-rw-r--r--fs/xfs/libxfs/xfs_btree.c2
-rw-r--r--fs/xfs/libxfs/xfs_dquot_buf.c3
-rw-r--r--fs/xfs/libxfs/xfs_format.h1
-rw-r--r--fs/xfs/libxfs/xfs_inode_buf.c13
-rw-r--r--fs/xfs/libxfs/xfs_inode_buf.h2
-rw-r--r--fs/xfs/xfs_file.c232
-rw-r--r--fs/xfs/xfs_icache.c8
-rw-r--r--fs/xfs/xfs_iomap.c57
-rw-r--r--fs/xfs/xfs_mount.c1
-rw-r--r--fs/xfs/xfs_reflink.c499
-rw-r--r--fs/xfs/xfs_reflink.h11
-rw-r--r--fs/xfs/xfs_sysfs.c4
-rw-r--r--fs/xfs/xfs_trace.h4
-rw-r--r--include/asm-generic/export.h2
-rw-r--r--include/drm/drmP.h330
-rw-r--r--include/drm/drm_atomic.h10
-rw-r--r--include/drm/drm_connector.h37
-rw-r--r--include/drm/drm_crtc.h687
-rw-r--r--include/drm/drm_drv.h430
-rw-r--r--include/drm/drm_fb_cma_helper.h5
-rw-r--r--include/drm/drm_fb_helper.h4
-rw-r--r--include/drm/drm_fourcc.h10
-rw-r--r--include/drm/drm_irq.h63
-rw-r--r--include/drm/drm_mm.h6
-rw-r--r--include/drm/drm_mode_config.h663
-rw-r--r--include/drm/drm_modeset_helper_vtables.h16
-rw-r--r--include/drm/drm_modeset_lock.h12
-rw-r--r--include/drm/drm_plane.h101
-rw-r--r--include/drm/drm_print.h112
-rw-r--r--include/linux/acpi.h1
-rw-r--r--include/linux/clk-provider.h2
-rw-r--r--include/linux/dma-fence.h5
-rw-r--r--include/linux/hyperv.h7
-rw-r--r--include/linux/io.h22
-rw-r--r--include/linux/iomap.h17
-rw-r--r--include/linux/ipv6.h17
-rw-r--r--include/linux/kconfig.h5
-rw-r--r--include/linux/mlx4/device.h3
-rw-r--r--include/linux/mlx5/driver.h16
-rw-r--r--include/linux/mm.h4
-rw-r--r--include/linux/mmzone.h30
-rw-r--r--include/linux/mtd/nand.h2
-rw-r--r--include/linux/netdevice.h41
-rw-r--r--include/linux/perf_event.h1
-rw-r--r--include/linux/qed/qed_if.h1
-rw-r--r--include/linux/qed/qede_roce.h2
-rw-r--r--include/linux/regmap.h11
-rw-r--r--include/linux/reservation.h15
-rw-r--r--include/linux/skbuff.h1
-rw-r--r--include/net/addrconf.h1
-rw-r--r--include/net/cfg80211.h32
-rw-r--r--include/net/if_inet6.h2
-rw-r--r--include/net/ip.h12
-rw-r--r--include/net/ip6_fib.h2
-rw-r--r--include/net/ip6_route.h1
-rw-r--r--include/net/mac80211.h21
-rw-r--r--include/net/sock.h4
-rw-r--r--include/net/tcp.h13
-rw-r--r--include/net/udp.h1
-rw-r--r--include/net/vxlan.h4
-rw-r--r--include/uapi/drm/amdgpu_drm.h28
-rw-r--r--include/uapi/drm/drm_mode.h16
-rw-r--r--include/uapi/drm/vc4_drm.h2
-rw-r--r--include/uapi/linux/ethtool.h3
-rw-r--r--include/uapi/linux/rtnetlink.h2
-rw-r--r--include/video/display_timing.h4
-rw-r--r--include/video/imx-ipu-v3.h3
-rw-r--r--include/video/of_display_timing.h15
-rw-r--r--ipc/msgutil.c4
-rw-r--r--kernel/events/core.c23
-rw-r--r--kernel/fork.c4
-rw-r--r--kernel/kcov.c9
-rw-r--r--kernel/power/suspend.c4
-rw-r--r--kernel/sched/core.c28
-rw-r--r--kernel/sched/fair.c3
-rw-r--r--kernel/sched/wait.c10
-rw-r--r--kernel/softirq.c2
-rw-r--r--kernel/time/timer.c74
-rw-r--r--lib/Kconfig.debug1
-rw-r--r--lib/genalloc.c3
-rw-r--r--lib/stackdepot.c2
-rw-r--r--lib/test_bpf.c2
-rw-r--r--mm/Kconfig2
-rw-r--r--mm/filemap.c4
-rw-r--r--mm/gup.c3
-rw-r--r--mm/kmemleak.c7
-rw-r--r--mm/list_lru.c2
-rw-r--r--mm/memcontrol.c9
-rw-r--r--mm/memory_hotplug.c29
-rw-r--r--mm/nommu.c2
-rw-r--r--mm/page_alloc.c133
-rw-r--r--mm/slab.c45
-rw-r--r--mm/slab.h1
-rw-r--r--mm/vmscan.c2
-rw-r--r--net/8021q/vlan.c2
-rw-r--r--net/batman-adv/hard-interface.c1
-rw-r--r--net/batman-adv/log.h2
-rw-r--r--net/batman-adv/originator.c2
-rw-r--r--net/bluetooth/hci_request.c49
-rw-r--r--net/bluetooth/hci_request.h2
-rw-r--r--net/bluetooth/mgmt.c26
-rw-r--r--net/bridge/br_multicast.c23
-rw-r--r--net/core/dev.c12
-rw-r--r--net/core/flow_dissector.c12
-rw-r--r--net/core/net_namespace.c35
-rw-r--r--net/core/pktgen.c17
-rw-r--r--net/core/sock_reuseport.c1
-rw-r--r--net/ethernet/eth.c2
-rw-r--r--net/hsr/hsr_forward.c4
-rw-r--r--net/ipv4/af_inet.c2
-rw-r--r--net/ipv4/fou.c4
-rw-r--r--net/ipv4/gre_offload.c2
-rw-r--r--net/ipv4/inet_hashtables.c8
-rw-r--r--net/ipv4/ip_output.c3
-rw-r--r--net/ipv4/ip_sockglue.c11
-rw-r--r--net/ipv4/ping.c2
-rw-r--r--net/ipv4/raw.c2
-rw-r--r--net/ipv4/sysctl_net_ipv4.c8
-rw-r--r--net/ipv4/tcp_ipv4.c3
-rw-r--r--net/ipv4/udp.c15
-rw-r--r--net/ipv4/udp_offload.c2
-rw-r--r--net/ipv6/addrconf.c101
-rw-r--r--net/ipv6/inet6_hashtables.c13
-rw-r--r--net/ipv6/ip6_offload.c2
-rw-r--r--net/ipv6/ip6_tunnel.c3
-rw-r--r--net/ipv6/ipv6_sockglue.c3
-rw-r--r--net/ipv6/mcast.c17
-rw-r--r--net/ipv6/ping.c2
-rw-r--r--net/ipv6/raw.c2
-rw-r--r--net/ipv6/reassembly.c3
-rw-r--r--net/ipv6/route.c74
-rw-r--r--net/ipv6/udp.c3
-rw-r--r--net/l2tp/l2tp_ip.c2
-rw-r--r--net/l2tp/l2tp_ip6.c2
-rw-r--r--net/mac80211/aes_ccm.c46
-rw-r--r--net/mac80211/aes_ccm.h8
-rw-r--r--net/mac80211/aes_gcm.c43
-rw-r--r--net/mac80211/aes_gcm.h6
-rw-r--r--net/mac80211/aes_gmac.c26
-rw-r--r--net/mac80211/aes_gmac.h4
-rw-r--r--net/mac80211/offchannel.c2
-rw-r--r--net/mac80211/rx.c51
-rw-r--r--net/mac80211/wpa.c22
-rw-r--r--net/ncsi/internal.h2
-rw-r--r--net/ncsi/ncsi-aen.c18
-rw-r--r--net/ncsi/ncsi-manage.c126
-rw-r--r--net/netfilter/core.c13
-rw-r--r--net/netfilter/nf_conntrack_core.c2
-rw-r--r--net/netfilter/nf_internals.h2
-rw-r--r--net/netfilter/nf_queue.c48
-rw-r--r--net/netfilter/nf_tables_api.c2
-rw-r--r--net/netfilter/nft_dynset.c6
-rw-r--r--net/netfilter/nft_exthdr.c3
-rw-r--r--net/netfilter/nft_hash.c1
-rw-r--r--net/netfilter/nft_range.c26
-rw-r--r--net/netfilter/x_tables.c2
-rw-r--r--net/netfilter/xt_NFLOG.c1
-rw-r--r--net/netfilter/xt_hashlimit.c4
-rw-r--r--net/netfilter/xt_ipcomp.c2
-rw-r--r--net/packet/af_packet.c9
-rw-r--r--net/rds/Makefile2
-rw-r--r--net/rds/rds.h2
-rw-r--r--net/rxrpc/call_object.c2
-rw-r--r--net/rxrpc/peer_object.c4
-rw-r--r--net/sched/act_api.c3
-rw-r--r--net/sched/act_mirred.c5
-rw-r--r--net/sched/cls_api.c3
-rw-r--r--net/sctp/output.c8
-rw-r--r--net/sctp/sm_statefuns.c12
-rw-r--r--net/sctp/socket.c5
-rw-r--r--net/sunrpc/auth_gss/auth_gss.c13
-rw-r--r--net/sunrpc/auth_gss/gss_krb5_crypto.c82
-rw-r--r--net/sunrpc/auth_gss/svcauth_gss.c21
-rw-r--r--net/sunrpc/xprtrdma/svc_rdma_backchannel.c12
-rw-r--r--net/sunrpc/xprtsock.c1
-rw-r--r--net/switchdev/switchdev.c9
-rw-r--r--net/tipc/bcast.c14
-rw-r--r--net/tipc/bcast.h3
-rw-r--r--net/tipc/link.c2
-rw-r--r--net/tipc/msg.h17
-rw-r--r--net/tipc/name_distr.c1
-rw-r--r--net/tipc/node.c2
-rw-r--r--net/wireless/sysfs.c5
-rw-r--r--net/wireless/util.c34
-rw-r--r--samples/bpf/parse_ldabs.c1
-rw-r--r--samples/bpf/parse_simple.c1
-rw-r--r--samples/bpf/parse_varlen.c1
-rw-r--r--samples/bpf/tcbpf1_kern.c1
-rw-r--r--samples/bpf/tcbpf2_kern.c1
-rw-r--r--samples/bpf/test_cgrp2_tc_kern.c1
-rw-r--r--scripts/gcc-plugins/cyc_complexity_plugin.c4
-rw-r--r--scripts/gcc-plugins/gcc-common.h1
-rw-r--r--scripts/gcc-plugins/latent_entropy_plugin.c25
-rw-r--r--scripts/gcc-plugins/sancov_plugin.c4
-rw-r--r--security/keys/Kconfig2
-rw-r--r--security/keys/big_key.c59
-rw-r--r--security/keys/proc.c2
-rw-r--r--sound/core/seq/seq_timer.c4
-rw-r--r--sound/pci/asihpi/hpioctl.c2
-rw-r--r--sound/pci/hda/hda_intel.c7
-rw-r--r--sound/pci/hda/patch_realtek.c30
-rw-r--r--sound/usb/quirks-table.h17
-rw-r--r--tools/objtool/builtin-check.c2
-rw-r--r--tools/virtio/ringtest/Makefile4
-rw-r--r--tools/virtio/ringtest/main.c20
-rw-r--r--tools/virtio/ringtest/main.h4
-rw-r--r--tools/virtio/ringtest/noring.c6
-rw-r--r--tools/virtio/ringtest/ptr_ring.c22
-rw-r--r--tools/virtio/ringtest/ring.c18
-rw-r--r--tools/virtio/ringtest/virtio_ring_0_9.c64
-rw-r--r--virt/kvm/kvm_main.c10
1017 files changed, 19706 insertions, 10931 deletions
diff --git a/CREDITS b/CREDITS
index 513aaa3546bf..837367624e45 100644
--- a/CREDITS
+++ b/CREDITS
@@ -1864,10 +1864,11 @@ S: The Netherlands
N: Martin Kepplinger
E: martink@posteo.de
-E: martin.kepplinger@theobroma-systems.com
+E: martin.kepplinger@ginzinger.com
W: http://www.martinkepplinger.com
D: mma8452 accelerators iio driver
-D: Kernel cleanups
+D: pegasus_notetaker input driver
+D: Kernel fixes and cleanups
S: Garnisonstraße 26
S: 4020 Linz
S: Austria
diff --git a/Documentation/device-mapper/dm-raid.txt b/Documentation/device-mapper/dm-raid.txt
index e5b6497116f4..c75b64a85859 100644
--- a/Documentation/device-mapper/dm-raid.txt
+++ b/Documentation/device-mapper/dm-raid.txt
@@ -309,3 +309,4 @@ Version History
with a reshape in progress.
1.9.0 Add support for RAID level takeover/reshape/region size
and set size reduction.
+1.9.1 Fix activation of existing RAID 4/10 mapped devices
diff --git a/Documentation/devicetree/bindings/clock/uniphier-clock.txt b/Documentation/devicetree/bindings/clock/uniphier-clock.txt
index c7179d3b5c33..812163060fa3 100644
--- a/Documentation/devicetree/bindings/clock/uniphier-clock.txt
+++ b/Documentation/devicetree/bindings/clock/uniphier-clock.txt
@@ -24,7 +24,7 @@ Example:
reg = <0x61840000 0x4000>;
clock {
- compatible = "socionext,uniphier-ld20-clock";
+ compatible = "socionext,uniphier-ld11-clock";
#clock-cells = <1>;
};
@@ -43,8 +43,8 @@ Provided clocks:
21: USB3 ch1 PHY1
-Media I/O (MIO) clock
----------------------
+Media I/O (MIO) clock, SD clock
+-------------------------------
Required properties:
- compatible: should be one of the following:
@@ -52,10 +52,10 @@ Required properties:
"socionext,uniphier-ld4-mio-clock" - for LD4 SoC.
"socionext,uniphier-pro4-mio-clock" - for Pro4 SoC.
"socionext,uniphier-sld8-mio-clock" - for sLD8 SoC.
- "socionext,uniphier-pro5-mio-clock" - for Pro5 SoC.
- "socionext,uniphier-pxs2-mio-clock" - for PXs2/LD6b SoC.
+ "socionext,uniphier-pro5-sd-clock" - for Pro5 SoC.
+ "socionext,uniphier-pxs2-sd-clock" - for PXs2/LD6b SoC.
"socionext,uniphier-ld11-mio-clock" - for LD11 SoC.
- "socionext,uniphier-ld20-mio-clock" - for LD20 SoC.
+ "socionext,uniphier-ld20-sd-clock" - for LD20 SoC.
- #clock-cells: should be 1.
Example:
@@ -66,7 +66,7 @@ Example:
reg = <0x59810000 0x800>;
clock {
- compatible = "socionext,uniphier-ld20-mio-clock";
+ compatible = "socionext,uniphier-ld11-mio-clock";
#clock-cells = <1>;
};
@@ -112,7 +112,7 @@ Example:
reg = <0x59820000 0x200>;
clock {
- compatible = "socionext,uniphier-ld20-peri-clock";
+ compatible = "socionext,uniphier-ld11-peri-clock";
#clock-cells = <1>;
};
diff --git a/Documentation/devicetree/bindings/display/bridge/dw_hdmi.txt b/Documentation/devicetree/bindings/display/bridge/dw_hdmi.txt
index dc1452f0d5d8..5e9a84d6e5f1 100644
--- a/Documentation/devicetree/bindings/display/bridge/dw_hdmi.txt
+++ b/Documentation/devicetree/bindings/display/bridge/dw_hdmi.txt
@@ -19,7 +19,9 @@ Required properties:
Optional properties
- reg-io-width: the width of the reg:1,4, default set to 1 if not present
-- ddc-i2c-bus: phandle of an I2C controller used for DDC EDID probing
+- ddc-i2c-bus: phandle of an I2C controller used for DDC EDID probing,
+ if the property is omitted, a functionally reduced I2C bus
+ controller on DW HDMI is probed
- clocks, clock-names: phandle to the HDMI CEC clock, name should be "cec"
Example:
diff --git a/Documentation/devicetree/bindings/display/panel/display-timing.txt b/Documentation/devicetree/bindings/display/panel/display-timing.txt
index e1d4a0b59612..81a75893d1b8 100644
--- a/Documentation/devicetree/bindings/display/panel/display-timing.txt
+++ b/Documentation/devicetree/bindings/display/panel/display-timing.txt
@@ -32,6 +32,14 @@ optional properties:
- active low = drive pixel data on falling edge/
sample data on rising edge
- ignored = ignored
+ - syncclk-active: with
+ - active high = drive sync on rising edge/
+ sample sync on falling edge of pixel
+ clock
+ - active low = drive sync on falling edge/
+ sample sync on rising edge of pixel
+ clock
+ - omitted = same configuration as pixelclk-active
- interlaced (bool): boolean to enable interlaced mode
- doublescan (bool): boolean to enable doublescan mode
- doubleclk (bool): boolean to enable doubleclock mode
diff --git a/Documentation/devicetree/bindings/display/renesas,du.txt b/Documentation/devicetree/bindings/display/renesas,du.txt
index 0d30e42e40be..1a02f099a0ff 100644
--- a/Documentation/devicetree/bindings/display/renesas,du.txt
+++ b/Documentation/devicetree/bindings/display/renesas,du.txt
@@ -6,9 +6,11 @@ Required Properties:
- "renesas,du-r8a7779" for R8A7779 (R-Car H1) compatible DU
- "renesas,du-r8a7790" for R8A7790 (R-Car H2) compatible DU
- "renesas,du-r8a7791" for R8A7791 (R-Car M2-W) compatible DU
+ - "renesas,du-r8a7792" for R8A7792 (R-Car V2H) compatible DU
- "renesas,du-r8a7793" for R8A7793 (R-Car M2-N) compatible DU
- "renesas,du-r8a7794" for R8A7794 (R-Car E2) compatible DU
- "renesas,du-r8a7795" for R8A7795 (R-Car H3) compatible DU
+ - "renesas,du-r8a7796" for R8A7796 (R-Car M3-W) compatible DU
- reg: A list of base address and length of each memory resource, one for
each entry in the reg-names property.
@@ -25,10 +27,10 @@ Required Properties:
- clock-names: Name of the clocks. This property is model-dependent.
- R8A7779 uses a single functional clock. The clock doesn't need to be
named.
- - R8A779[01345] use one functional clock per channel and one clock per LVDS
- encoder (if available). The functional clocks must be named "du.x" with
- "x" being the channel numerical index. The LVDS clocks must be named
- "lvds.x" with "x" being the LVDS encoder numerical index.
+ - R8A779[0123456] use one functional clock per channel and one clock per
+ LVDS encoder (if available). The functional clocks must be named "du.x"
+ with "x" being the channel numerical index. The LVDS clocks must be
+ named "lvds.x" with "x" being the LVDS encoder numerical index.
- In addition to the functional and encoder clocks, all DU versions also
support externally supplied pixel clocks. Those clocks are optional.
When supplied they must be named "dclkin.x" with "x" being the input
@@ -47,9 +49,11 @@ corresponding to each DU output.
R8A7779 (H1) DPAD 0 DPAD 1 - -
R8A7790 (H2) DPAD LVDS 0 LVDS 1 -
R8A7791 (M2-W) DPAD LVDS 0 - -
+ R8A7792 (V2H) DPAD 0 DPAD 1 - -
R8A7793 (M2-N) DPAD LVDS 0 - -
R8A7794 (E2) DPAD 0 DPAD 1 - -
R8A7795 (H3) DPAD HDMI 0 HDMI 1 LVDS
+ R8A7796 (M3-W) DPAD HDMI LVDS -
Example: R8A7790 (R-Car H2) DU
diff --git a/Documentation/devicetree/bindings/display/sunxi/sun4i-drm.txt b/Documentation/devicetree/bindings/display/sunxi/sun4i-drm.txt
index b95696d748c7..b82c00449468 100644
--- a/Documentation/devicetree/bindings/display/sunxi/sun4i-drm.txt
+++ b/Documentation/devicetree/bindings/display/sunxi/sun4i-drm.txt
@@ -28,6 +28,8 @@ The TCON acts as a timing controller for RGB, LVDS and TV interfaces.
Required properties:
- compatible: value must be either:
* allwinner,sun5i-a13-tcon
+ * allwinner,sun6i-a31-tcon
+ * allwinner,sun6i-a31s-tcon
* allwinner,sun8i-a33-tcon
- reg: base address and size of memory-mapped region
- interrupts: interrupt associated to this IP
@@ -50,7 +52,7 @@ Required properties:
second the block connected to the TCON channel 1 (usually the TV
encoder)
-On the A13, there is one more clock required:
+On SoCs other than the A33, there is one more clock required:
- 'tcon-ch1': The clock driving the TCON channel 1
DRC
@@ -64,6 +66,8 @@ adaptive backlight control.
Required properties:
- compatible: value must be one of:
+ * allwinner,sun6i-a31-drc
+ * allwinner,sun6i-a31s-drc
* allwinner,sun8i-a33-drc
- reg: base address and size of the memory-mapped region.
- interrupts: interrupt associated to this IP
@@ -87,6 +91,7 @@ system.
Required properties:
- compatible: value must be one of:
* allwinner,sun5i-a13-display-backend
+ * allwinner,sun6i-a31-display-backend
* allwinner,sun8i-a33-display-backend
- reg: base address and size of the memory-mapped region.
- clocks: phandles to the clocks feeding the frontend and backend
@@ -117,6 +122,7 @@ deinterlacing and color space conversion.
Required properties:
- compatible: value must be one of:
* allwinner,sun5i-a13-display-frontend
+ * allwinner,sun6i-a31-display-frontend
* allwinner,sun8i-a33-display-frontend
- reg: base address and size of the memory-mapped region.
- interrupts: interrupt associated to this IP
@@ -142,6 +148,8 @@ extra node.
Required properties:
- compatible: value must be one of:
* allwinner,sun5i-a13-display-engine
+ * allwinner,sun6i-a31-display-engine
+ * allwinner,sun6i-a31s-display-engine
* allwinner,sun8i-a33-display-engine
- allwinner,pipelines: list of phandle to the display engine
diff --git a/Documentation/devicetree/bindings/display/zte,vou.txt b/Documentation/devicetree/bindings/display/zte,vou.txt
new file mode 100644
index 000000000000..740e5bd2e4f7
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/zte,vou.txt
@@ -0,0 +1,84 @@
+ZTE VOU Display Controller
+
+This is a display controller found on ZTE ZX296718 SoC. It includes multiple
+Graphic Layer (GL) and Video Layer (VL), two Mixers/Channels, and a few blocks
+handling scaling, color space conversion etc. VOU also integrates the support
+for typical output devices, like HDMI, TV Encoder, VGA, and RGB LCD.
+
+* Master VOU node
+
+It must be the parent node of all the sub-device nodes.
+
+Required properties:
+ - compatible: should be "zte,zx296718-vou"
+ - #address-cells: should be <1>
+ - #size-cells: should be <1>
+ - ranges: list of address translations between VOU and sub-devices
+
+* VOU DPC device
+
+Required properties:
+ - compatible: should be "zte,zx296718-dpc"
+ - reg: Physical base address and length of DPC register regions, one for each
+ entry in 'reg-names'
+ - reg-names: The names of register regions. The following regions are required:
+ "osd"
+ "timing_ctrl"
+ "dtrc"
+ "vou_ctrl"
+ "otfppu"
+ - interrupts: VOU DPC interrupt number to CPU
+ - clocks: A list of phandle + clock-specifier pairs, one for each entry
+ in 'clock-names'
+ - clock-names: A list of clock names. The following clocks are required:
+ "aclk"
+ "ppu_wclk"
+ "main_wclk"
+ "aux_wclk"
+
+* HDMI output device
+
+Required properties:
+ - compatible: should be "zte,zx296718-hdmi"
+ - reg: Physical base address and length of the HDMI device IO region
+ - interrupts : HDMI interrupt number to CPU
+ - clocks: A list of phandle + clock-specifier pairs, one for each entry
+ in 'clock-names'
+ - clock-names: A list of clock names. The following clocks are required:
+ "osc_cec"
+ "osc_clk"
+ "xclk"
+
+Example:
+
+vou: vou@1440000 {
+ compatible = "zte,zx296718-vou";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0 0x1440000 0x10000>;
+
+ dpc: dpc@0 {
+ compatible = "zte,zx296718-dpc";
+ reg = <0x0000 0x1000>, <0x1000 0x1000>,
+ <0x5000 0x1000>, <0x6000 0x1000>,
+ <0xa000 0x1000>;
+ reg-names = "osd", "timing_ctrl",
+ "dtrc", "vou_ctrl",
+ "otfppu";
+ interrupts = <GIC_SPI 81 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&topcrm VOU_ACLK>, <&topcrm VOU_PPU_WCLK>,
+ <&topcrm VOU_MAIN_WCLK>, <&topcrm VOU_AUX_WCLK>;
+ clock-names = "aclk", "ppu_wclk",
+ "main_wclk", "aux_wclk";
+ };
+
+ hdmi: hdmi@c000 {
+ compatible = "zte,zx296718-hdmi";
+ reg = <0xc000 0x4000>;
+ interrupts = <GIC_SPI 82 IRQ_TYPE_EDGE_RISING>;
+ clocks = <&topcrm HDMI_OSC_CEC>,
+ <&topcrm HDMI_OSC_CLK>,
+ <&topcrm HDMI_XCLK>;
+ clock-names = "osc_cec", "osc_clk", "xclk";
+ };
+};
diff --git a/Documentation/devicetree/bindings/net/marvell-orion-net.txt b/Documentation/devicetree/bindings/net/marvell-orion-net.txt
index bce52b2ec55e..6fd988c84c4f 100644
--- a/Documentation/devicetree/bindings/net/marvell-orion-net.txt
+++ b/Documentation/devicetree/bindings/net/marvell-orion-net.txt
@@ -49,6 +49,7 @@ Optional port properties:
and
- phy-handle: See ethernet.txt file in the same directory.
+ - phy-mode: See ethernet.txt file in the same directory.
or
diff --git a/Documentation/devicetree/bindings/reset/uniphier-reset.txt b/Documentation/devicetree/bindings/reset/uniphier-reset.txt
index e6bbfccd56c3..5020524cddeb 100644
--- a/Documentation/devicetree/bindings/reset/uniphier-reset.txt
+++ b/Documentation/devicetree/bindings/reset/uniphier-reset.txt
@@ -6,25 +6,25 @@ System reset
Required properties:
- compatible: should be one of the following:
- "socionext,uniphier-sld3-reset" - for PH1-sLD3 SoC.
- "socionext,uniphier-ld4-reset" - for PH1-LD4 SoC.
- "socionext,uniphier-pro4-reset" - for PH1-Pro4 SoC.
- "socionext,uniphier-sld8-reset" - for PH1-sLD8 SoC.
- "socionext,uniphier-pro5-reset" - for PH1-Pro5 SoC.
- "socionext,uniphier-pxs2-reset" - for ProXstream2/PH1-LD6b SoC.
- "socionext,uniphier-ld11-reset" - for PH1-LD11 SoC.
- "socionext,uniphier-ld20-reset" - for PH1-LD20 SoC.
+ "socionext,uniphier-sld3-reset" - for sLD3 SoC.
+ "socionext,uniphier-ld4-reset" - for LD4 SoC.
+ "socionext,uniphier-pro4-reset" - for Pro4 SoC.
+ "socionext,uniphier-sld8-reset" - for sLD8 SoC.
+ "socionext,uniphier-pro5-reset" - for Pro5 SoC.
+ "socionext,uniphier-pxs2-reset" - for PXs2/LD6b SoC.
+ "socionext,uniphier-ld11-reset" - for LD11 SoC.
+ "socionext,uniphier-ld20-reset" - for LD20 SoC.
- #reset-cells: should be 1.
Example:
sysctrl@61840000 {
- compatible = "socionext,uniphier-ld20-sysctrl",
+ compatible = "socionext,uniphier-ld11-sysctrl",
"simple-mfd", "syscon";
reg = <0x61840000 0x4000>;
reset {
- compatible = "socionext,uniphier-ld20-reset";
+ compatible = "socionext,uniphier-ld11-reset";
#reset-cells = <1>;
};
@@ -32,30 +32,30 @@ Example:
};
-Media I/O (MIO) reset
----------------------
+Media I/O (MIO) reset, SD reset
+-------------------------------
Required properties:
- compatible: should be one of the following:
- "socionext,uniphier-sld3-mio-reset" - for PH1-sLD3 SoC.
- "socionext,uniphier-ld4-mio-reset" - for PH1-LD4 SoC.
- "socionext,uniphier-pro4-mio-reset" - for PH1-Pro4 SoC.
- "socionext,uniphier-sld8-mio-reset" - for PH1-sLD8 SoC.
- "socionext,uniphier-pro5-mio-reset" - for PH1-Pro5 SoC.
- "socionext,uniphier-pxs2-mio-reset" - for ProXstream2/PH1-LD6b SoC.
- "socionext,uniphier-ld11-mio-reset" - for PH1-LD11 SoC.
- "socionext,uniphier-ld20-mio-reset" - for PH1-LD20 SoC.
+ "socionext,uniphier-sld3-mio-reset" - for sLD3 SoC.
+ "socionext,uniphier-ld4-mio-reset" - for LD4 SoC.
+ "socionext,uniphier-pro4-mio-reset" - for Pro4 SoC.
+ "socionext,uniphier-sld8-mio-reset" - for sLD8 SoC.
+ "socionext,uniphier-pro5-sd-reset" - for Pro5 SoC.
+ "socionext,uniphier-pxs2-sd-reset" - for PXs2/LD6b SoC.
+ "socionext,uniphier-ld11-mio-reset" - for LD11 SoC.
+ "socionext,uniphier-ld20-sd-reset" - for LD20 SoC.
- #reset-cells: should be 1.
Example:
mioctrl@59810000 {
- compatible = "socionext,uniphier-ld20-mioctrl",
+ compatible = "socionext,uniphier-ld11-mioctrl",
"simple-mfd", "syscon";
reg = <0x59810000 0x800>;
reset {
- compatible = "socionext,uniphier-ld20-mio-reset";
+ compatible = "socionext,uniphier-ld11-mio-reset";
#reset-cells = <1>;
};
@@ -68,24 +68,24 @@ Peripheral reset
Required properties:
- compatible: should be one of the following:
- "socionext,uniphier-ld4-peri-reset" - for PH1-LD4 SoC.
- "socionext,uniphier-pro4-peri-reset" - for PH1-Pro4 SoC.
- "socionext,uniphier-sld8-peri-reset" - for PH1-sLD8 SoC.
- "socionext,uniphier-pro5-peri-reset" - for PH1-Pro5 SoC.
- "socionext,uniphier-pxs2-peri-reset" - for ProXstream2/PH1-LD6b SoC.
- "socionext,uniphier-ld11-peri-reset" - for PH1-LD11 SoC.
- "socionext,uniphier-ld20-peri-reset" - for PH1-LD20 SoC.
+ "socionext,uniphier-ld4-peri-reset" - for LD4 SoC.
+ "socionext,uniphier-pro4-peri-reset" - for Pro4 SoC.
+ "socionext,uniphier-sld8-peri-reset" - for sLD8 SoC.
+ "socionext,uniphier-pro5-peri-reset" - for Pro5 SoC.
+ "socionext,uniphier-pxs2-peri-reset" - for PXs2/LD6b SoC.
+ "socionext,uniphier-ld11-peri-reset" - for LD11 SoC.
+ "socionext,uniphier-ld20-peri-reset" - for LD20 SoC.
- #reset-cells: should be 1.
Example:
perictrl@59820000 {
- compatible = "socionext,uniphier-ld20-perictrl",
+ compatible = "socionext,uniphier-ld11-perictrl",
"simple-mfd", "syscon";
reg = <0x59820000 0x200>;
reset {
- compatible = "socionext,uniphier-ld20-peri-reset";
+ compatible = "socionext,uniphier-ld11-peri-reset";
#reset-cells = <1>;
};
diff --git a/Documentation/devicetree/bindings/serial/cdns,uart.txt b/Documentation/devicetree/bindings/serial/cdns,uart.txt
index a3eb154c32ca..227bb770b027 100644
--- a/Documentation/devicetree/bindings/serial/cdns,uart.txt
+++ b/Documentation/devicetree/bindings/serial/cdns,uart.txt
@@ -1,7 +1,9 @@
Binding for Cadence UART Controller
Required properties:
-- compatible : should be "cdns,uart-r1p8", or "xlnx,xuartps"
+- compatible :
+ Use "xlnx,xuartps","cdns,uart-r1p8" for Zynq-7xxx SoC.
+ Use "xlnx,zynqmp-uart","cdns,uart-r1p12" for Zynq Ultrascale+ MPSoC.
- reg: Should contain UART controller registers location and length.
- interrupts: Should contain UART controller interrupts.
- clocks: Must contain phandles to the UART clocks
diff --git a/Documentation/devicetree/bindings/serial/renesas,sci-serial.txt b/Documentation/devicetree/bindings/serial/renesas,sci-serial.txt
index 1e4000d83aee..8d27d1a603e7 100644
--- a/Documentation/devicetree/bindings/serial/renesas,sci-serial.txt
+++ b/Documentation/devicetree/bindings/serial/renesas,sci-serial.txt
@@ -9,6 +9,14 @@ Required properties:
- "renesas,scifb-r8a73a4" for R8A73A4 (R-Mobile APE6) SCIFB compatible UART.
- "renesas,scifa-r8a7740" for R8A7740 (R-Mobile A1) SCIFA compatible UART.
- "renesas,scifb-r8a7740" for R8A7740 (R-Mobile A1) SCIFB compatible UART.
+ - "renesas,scif-r8a7743" for R8A7743 (RZ/G1M) SCIF compatible UART.
+ - "renesas,scifa-r8a7743" for R8A7743 (RZ/G1M) SCIFA compatible UART.
+ - "renesas,scifb-r8a7743" for R8A7743 (RZ/G1M) SCIFB compatible UART.
+ - "renesas,hscif-r8a7743" for R8A7743 (RZ/G1M) HSCIF compatible UART.
+ - "renesas,scif-r8a7745" for R8A7745 (RZ/G1E) SCIF compatible UART.
+ - "renesas,scifa-r8a7745" for R8A7745 (RZ/G1E) SCIFA compatible UART.
+ - "renesas,scifb-r8a7745" for R8A7745 (RZ/G1E) SCIFB compatible UART.
+ - "renesas,hscif-r8a7745" for R8A7745 (RZ/G1E) HSCIF compatible UART.
- "renesas,scif-r8a7778" for R8A7778 (R-Car M1) SCIF compatible UART.
- "renesas,scif-r8a7779" for R8A7779 (R-Car H1) SCIF compatible UART.
- "renesas,scif-r8a7790" for R8A7790 (R-Car H2) SCIF compatible UART.
diff --git a/Documentation/devicetree/bindings/usb/dwc2.txt b/Documentation/devicetree/bindings/usb/dwc2.txt
index 455f2c310a1b..2c30a5479069 100644
--- a/Documentation/devicetree/bindings/usb/dwc2.txt
+++ b/Documentation/devicetree/bindings/usb/dwc2.txt
@@ -28,10 +28,7 @@ Refer to phy/phy-bindings.txt for generic phy consumer properties
- g-use-dma: enable dma usage in gadget driver.
- g-rx-fifo-size: size of rx fifo size in gadget mode.
- g-np-tx-fifo-size: size of non-periodic tx fifo size in gadget mode.
-
-Deprecated properties:
-- g-tx-fifo-size: size of periodic tx fifo per endpoint (except ep0)
- in gadget mode.
+- g-tx-fifo-size: size of periodic tx fifo per endpoint (except ep0) in gadget mode.
Example:
diff --git a/Documentation/driver-api/infrastructure.rst b/Documentation/driver-api/infrastructure.rst
index 5d50d6733db3..a0d65eb49055 100644
--- a/Documentation/driver-api/infrastructure.rst
+++ b/Documentation/driver-api/infrastructure.rst
@@ -86,10 +86,10 @@ reservation
fence
~~~~~
-.. kernel-doc:: drivers/dma-buf/fence.c
+.. kernel-doc:: drivers/dma-buf/dma-fence.c
:export:
-.. kernel-doc:: include/linux/fence.h
+.. kernel-doc:: include/linux/dma-fence.h
:internal:
.. kernel-doc:: drivers/dma-buf/seqno-fence.c
@@ -98,10 +98,10 @@ fence
.. kernel-doc:: include/linux/seqno-fence.h
:internal:
-.. kernel-doc:: drivers/dma-buf/fence-array.c
+.. kernel-doc:: drivers/dma-buf/dma-fence-array.c
:export:
-.. kernel-doc:: include/linux/fence-array.h
+.. kernel-doc:: include/linux/dma-fence-array.h
:internal:
.. kernel-doc:: drivers/dma-buf/reservation.c
diff --git a/Documentation/gpio/board.txt b/Documentation/gpio/board.txt
index 40884c4fe40c..a0f61898d493 100644
--- a/Documentation/gpio/board.txt
+++ b/Documentation/gpio/board.txt
@@ -6,7 +6,7 @@ Note that it only applies to the new descriptor-based interface. For a
description of the deprecated integer-based GPIO interface please refer to
gpio-legacy.txt (actually, there is no real mapping possible with the old
interface; you just fetch an integer from somewhere and request the
-corresponding GPIO.
+corresponding GPIO).
All platforms can enable the GPIO library, but if the platform strictly
requires GPIO functionality to be present, it needs to select GPIOLIB from its
@@ -162,6 +162,9 @@ The driver controlling "foo.0" will then be able to obtain its GPIOs as follows:
Since the "led" GPIOs are mapped as active-high, this example will switch their
signals to 1, i.e. enabling the LEDs. And for the "power" GPIO, which is mapped
-as active-low, its actual signal will be 0 after this code. Contrary to the legacy
-integer GPIO interface, the active-low property is handled during mapping and is
-thus transparent to GPIO consumers.
+as active-low, its actual signal will be 0 after this code. Contrary to the
+legacy integer GPIO interface, the active-low property is handled during
+mapping and is thus transparent to GPIO consumers.
+
+A set of functions such as gpiod_set_value() is available to work with
+the new descriptor-oriented interface.
diff --git a/Documentation/gpu/drm-internals.rst b/Documentation/gpu/drm-internals.rst
index 37284bcc7764..e35920db1f4c 100644
--- a/Documentation/gpu/drm-internals.rst
+++ b/Documentation/gpu/drm-internals.rst
@@ -143,6 +143,9 @@ Device Instance and Driver Handling
.. kernel-doc:: drivers/gpu/drm/drm_drv.c
:export:
+.. kernel-doc:: include/drm/drm_drv.h
+ :internal:
+
Driver Load
-----------
@@ -350,6 +353,23 @@ how the ioctl is allowed to be called.
.. kernel-doc:: drivers/gpu/drm/drm_ioctl.c
:export:
+
+Misc Utilities
+==============
+
+Printer
+-------
+
+.. kernel-doc:: include/drm/drm_print.h
+ :doc: print
+
+.. kernel-doc:: include/drm/drm_print.h
+ :internal:
+
+.. kernel-doc:: drivers/gpu/drm/drm_print.c
+ :export:
+
+
Legacy Support Code
===================
diff --git a/Documentation/gpu/drm-kms-helpers.rst b/Documentation/gpu/drm-kms-helpers.rst
index bb4254d19cbb..4ca77f675967 100644
--- a/Documentation/gpu/drm-kms-helpers.rst
+++ b/Documentation/gpu/drm-kms-helpers.rst
@@ -261,14 +261,6 @@ Plane Helper Reference
.. kernel-doc:: drivers/gpu/drm/drm_plane_helper.c
:export:
-Tile group
-==========
-
-# FIXME: This should probably be moved into a property documentation section
-
-.. kernel-doc:: drivers/gpu/drm/drm_crtc.c
- :doc: Tile group
-
Auxiliary Modeset Helpers
=========================
diff --git a/Documentation/gpu/drm-kms.rst b/Documentation/gpu/drm-kms.rst
index cb0d3537b705..568f3c2b6e46 100644
--- a/Documentation/gpu/drm-kms.rst
+++ b/Documentation/gpu/drm-kms.rst
@@ -15,25 +15,24 @@ be setup by initializing the following fields.
- struct drm_mode_config_funcs \*funcs;
Mode setting functions.
-Modeset Base Object Abstraction
-===============================
+Mode Configuration
-.. kernel-doc:: include/drm/drm_mode_object.h
- :internal:
+KMS Core Structures and Functions
+=================================
-.. kernel-doc:: drivers/gpu/drm/drm_mode_object.c
+.. kernel-doc:: drivers/gpu/drm/drm_mode_config.c
:export:
-KMS Data Structures
-===================
-
-.. kernel-doc:: include/drm/drm_crtc.h
+.. kernel-doc:: include/drm/drm_mode_config.h
:internal:
-KMS API Functions
-=================
+Modeset Base Object Abstraction
+===============================
-.. kernel-doc:: drivers/gpu/drm/drm_crtc.c
+.. kernel-doc:: include/drm/drm_mode_object.h
+ :internal:
+
+.. kernel-doc:: drivers/gpu/drm/drm_mode_object.c
:export:
Atomic Mode Setting Function Reference
@@ -45,6 +44,15 @@ Atomic Mode Setting Function Reference
.. kernel-doc:: include/drm/drm_atomic.h
:internal:
+CRTC Abstraction
+================
+
+.. kernel-doc:: drivers/gpu/drm/drm_crtc.c
+ :export:
+
+.. kernel-doc:: include/drm/drm_crtc.h
+ :internal:
+
Frame Buffer Abstraction
========================
@@ -72,46 +80,8 @@ DRM Format Handling
Dumb Buffer Objects
===================
-The KMS API doesn't standardize backing storage object creation and
-leaves it to driver-specific ioctls. Furthermore actually creating a
-buffer object even for GEM-based drivers is done through a
-driver-specific ioctl - GEM only has a common userspace interface for
-sharing and destroying objects. While not an issue for full-fledged
-graphics stacks that include device-specific userspace components (in
-libdrm for instance), this limit makes DRM-based early boot graphics
-unnecessarily complex.
-
-Dumb objects partly alleviate the problem by providing a standard API to
-create dumb buffers suitable for scanout, which can then be used to
-create KMS frame buffers.
-
-To support dumb objects drivers must implement the dumb_create,
-dumb_destroy and dumb_map_offset operations.
-
-- int (\*dumb_create)(struct drm_file \*file_priv, struct
- drm_device \*dev, struct drm_mode_create_dumb \*args);
- The dumb_create operation creates a driver object (GEM or TTM
- handle) suitable for scanout based on the width, height and depth
- from the struct :c:type:`struct drm_mode_create_dumb
- <drm_mode_create_dumb>` argument. It fills the argument's
- handle, pitch and size fields with a handle for the newly created
- object and its line pitch and size in bytes.
-
-- int (\*dumb_destroy)(struct drm_file \*file_priv, struct
- drm_device \*dev, uint32_t handle);
- The dumb_destroy operation destroys a dumb object created by
- dumb_create.
-
-- int (\*dumb_map_offset)(struct drm_file \*file_priv, struct
- drm_device \*dev, uint32_t handle, uint64_t \*offset);
- The dumb_map_offset operation associates an mmap fake offset with
- the object given by the handle and returns it. Drivers must use the
- :c:func:`drm_gem_create_mmap_offset()` function to associate
- the fake offset as described in ?.
-
-Note that dumb objects may not be used for gpu acceleration, as has been
-attempted on some ARM embedded platforms. Such drivers really must have
-a hardware-specific ioctl to allocate suitable buffer objects.
+.. kernel-doc:: drivers/gpu/drm/drm_dumb_buffers.c
+ :doc: overview
Plane Abstraction
=================
@@ -311,6 +281,12 @@ Color Management Properties
.. kernel-doc:: drivers/gpu/drm/drm_color_mgmt.c
:export:
+Tile Group Property
+-------------------
+
+.. kernel-doc:: drivers/gpu/drm/drm_connector.c
+ :doc: Tile group
+
Existing KMS Properties
-----------------------
diff --git a/Documentation/gpu/i915.rst b/Documentation/gpu/i915.rst
index ba83b7d88f1f..117d2ab7a5f7 100644
--- a/Documentation/gpu/i915.rst
+++ b/Documentation/gpu/i915.rst
@@ -258,19 +258,19 @@ Global GTT views
GTT Fences and Swizzling
------------------------
-.. kernel-doc:: drivers/gpu/drm/i915/i915_gem_fence.c
+.. kernel-doc:: drivers/gpu/drm/i915/i915_gem_fence_reg.c
:internal:
Global GTT Fence Handling
~~~~~~~~~~~~~~~~~~~~~~~~~
-.. kernel-doc:: drivers/gpu/drm/i915/i915_gem_fence.c
+.. kernel-doc:: drivers/gpu/drm/i915/i915_gem_fence_reg.c
:doc: fence register handling
Hardware Tiling and Swizzling Details
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-.. kernel-doc:: drivers/gpu/drm/i915/i915_gem_fence.c
+.. kernel-doc:: drivers/gpu/drm/i915/i915_gem_fence_reg.c
:doc: tiling swizzling details
Object Tiling IOCTLs
diff --git a/Documentation/networking/netdev-FAQ.txt b/Documentation/networking/netdev-FAQ.txt
index 0fe1c6e0dbcd..a20b2fae942b 100644
--- a/Documentation/networking/netdev-FAQ.txt
+++ b/Documentation/networking/netdev-FAQ.txt
@@ -29,8 +29,8 @@ A: There are always two trees (git repositories) in play. Both are driven
Linus, and net-next is where the new code goes for the future release.
You can find the trees here:
- http://git.kernel.org/?p=linux/kernel/git/davem/net.git
- http://git.kernel.org/?p=linux/kernel/git/davem/net-next.git
+ https://git.kernel.org/pub/scm/linux/kernel/git/davem/net.git
+ https://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next.git
Q: How often do changes from these trees make it to the mainline Linus tree?
@@ -76,7 +76,7 @@ Q: So where are we now in this cycle?
A: Load the mainline (Linus) page here:
- http://git.kernel.org/?p=linux/kernel/git/torvalds/linux.git
+ https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
and note the top of the "tags" section. If it is rc1, it is early
in the dev cycle. If it was tagged rc7 a week ago, then a release
@@ -123,7 +123,7 @@ A: Normally Greg Kroah-Hartman collects stable commits himself, but
It contains the patches which Dave has selected, but not yet handed
off to Greg. If Greg already has the patch, then it will be here:
- http://git.kernel.org/cgit/linux/kernel/git/stable/stable-queue.git
+ https://git.kernel.org/pub/scm/linux/kernel/git/stable/stable-queue.git
A quick way to find whether the patch is in this stable-queue is
to simply clone the repo, and then git grep the mainline commit ID, e.g.
diff --git a/Documentation/networking/nf_conntrack-sysctl.txt b/Documentation/networking/nf_conntrack-sysctl.txt
index 4fb51d32fccc..399e4e866a9c 100644
--- a/Documentation/networking/nf_conntrack-sysctl.txt
+++ b/Documentation/networking/nf_conntrack-sysctl.txt
@@ -33,24 +33,6 @@ nf_conntrack_events - BOOLEAN
If this option is enabled, the connection tracking code will
provide userspace with connection tracking events via ctnetlink.
-nf_conntrack_events_retry_timeout - INTEGER (seconds)
- default 15
-
- This option is only relevant when "reliable connection tracking
- events" are used. Normally, ctnetlink is "lossy", that is,
- events are normally dropped when userspace listeners can't keep up.
-
- Userspace can request "reliable event mode". When this mode is
- active, the conntrack will only be destroyed after the event was
- delivered. If event delivery fails, the kernel periodically
- re-tries to send the event to userspace.
-
- This is the maximum interval the kernel should use when re-trying
- to deliver the destroy event.
-
- A higher number means there will be fewer delivery retries and it
- will take longer for a backlog to be processed.
-
nf_conntrack_expect_max - INTEGER
Maximum size of expectation table. Default value is
nf_conntrack_buckets / 256. Minimum is 1.
diff --git a/MAINTAINERS b/MAINTAINERS
index f3547144e743..02f20dba7055 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -1442,6 +1442,7 @@ F: drivers/cpufreq/mvebu-cpufreq.c
F: arch/arm/configs/mvebu_*_defconfig
ARM/Marvell Berlin SoC support
+M: Jisheng Zhang <jszhang@marvell.com>
M: Sebastian Hesselbarth <sebastian.hesselbarth@gmail.com>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Maintained
@@ -2551,15 +2552,18 @@ S: Supported
F: drivers/net/ethernet/broadcom/genet/
BROADCOM BNX2 GIGABIT ETHERNET DRIVER
-M: Sony Chacko <sony.chacko@qlogic.com>
-M: Dept-HSGLinuxNICDev@qlogic.com
+M: Rasesh Mody <rasesh.mody@cavium.com>
+M: Harish Patil <harish.patil@cavium.com>
+M: Dept-GELinuxNICDev@cavium.com
L: netdev@vger.kernel.org
S: Supported
F: drivers/net/ethernet/broadcom/bnx2.*
F: drivers/net/ethernet/broadcom/bnx2_*
BROADCOM BNX2X 10 GIGABIT ETHERNET DRIVER
-M: Ariel Elior <ariel.elior@qlogic.com>
+M: Yuval Mintz <Yuval.Mintz@cavium.com>
+M: Ariel Elior <ariel.elior@cavium.com>
+M: everest-linux-l2@cavium.com
L: netdev@vger.kernel.org
S: Supported
F: drivers/net/ethernet/broadcom/bnx2x/
@@ -2766,7 +2770,9 @@ S: Supported
F: drivers/scsi/bfa/
BROCADE BNA 10 GIGABIT ETHERNET DRIVER
-M: Rasesh Mody <rasesh.mody@qlogic.com>
+M: Rasesh Mody <rasesh.mody@cavium.com>
+M: Sudarsana Kalluru <sudarsana.kalluru@cavium.com>
+M: Dept-GELinuxNICDev@cavium.com
L: netdev@vger.kernel.org
S: Supported
F: drivers/net/ethernet/brocade/bna/
@@ -3913,8 +3919,10 @@ R: Gustavo Padovan <gustavo@padovan.org>
S: Maintained
L: linux-media@vger.kernel.org
L: dri-devel@lists.freedesktop.org
-F: drivers/dma-buf/sync_file.c
+F: drivers/dma-buf/sync_*
+F: drivers/dma-buf/sw_sync.c
F: include/linux/sync_file.h
+F: include/uapi/linux/sync_file.h
F: Documentation/sync_file.txt
T: git git://git.linaro.org/people/sumitsemwal/linux-dma-buf.git
@@ -4290,6 +4298,13 @@ S: Maintained
F: drivers/gpu/drm/tilcdc/
F: Documentation/devicetree/bindings/display/tilcdc/
+DRM DRIVERS FOR ZTE ZX
+M: Shawn Guo <shawnguo@kernel.org>
+L: dri-devel@lists.freedesktop.org
+S: Maintained
+F: drivers/gpu/drm/zte/
+F: Documentation/devicetree/bindings/display/zte,vou.txt
+
DSBR100 USB FM RADIO DRIVER
M: Alexey Klimov <klimov.linux@gmail.com>
L: linux-media@vger.kernel.org
@@ -5296,6 +5311,12 @@ M: Joe Perches <joe@perches.com>
S: Maintained
F: scripts/get_maintainer.pl
+GENWQE (IBM Generic Workqueue Card)
+M: Frank Haverkamp <haver@linux.vnet.ibm.com>
+M: Gabriel Krisman Bertazi <krisman@linux.vnet.ibm.com>
+S: Supported
+F: drivers/misc/genwqe/
+
GFS2 FILE SYSTEM
M: Steven Whitehouse <swhiteho@redhat.com>
M: Bob Peterson <rpeterso@redhat.com>
@@ -7922,6 +7943,10 @@ F: mm/
MEMORY TECHNOLOGY DEVICES (MTD)
M: David Woodhouse <dwmw2@infradead.org>
M: Brian Norris <computersforpeace@gmail.com>
+M: Boris Brezillon <boris.brezillon@free-electrons.com>
+M: Marek Vasut <marek.vasut@gmail.com>
+M: Richard Weinberger <richard@nod.at>
+M: Cyrille Pitchen <cyrille.pitchen@atmel.com>
L: linux-mtd@lists.infradead.org
W: http://www.linux-mtd.infradead.org/
Q: http://patchwork.ozlabs.org/project/linux-mtd/list/
@@ -8109,6 +8134,7 @@ S: Maintained
F: drivers/media/dvb-frontends/mn88473*
MODULE SUPPORT
+M: Jessica Yu <jeyu@redhat.com>
M: Rusty Russell <rusty@rustcorp.com.au>
S: Maintained
F: include/linux/module.h
@@ -8518,11 +8544,10 @@ F: Documentation/devicetree/bindings/net/wireless/
F: drivers/net/wireless/
NETXEN (1/10) GbE SUPPORT
-M: Manish Chopra <manish.chopra@qlogic.com>
-M: Sony Chacko <sony.chacko@qlogic.com>
-M: Rajesh Borundia <rajesh.borundia@qlogic.com>
+M: Manish Chopra <manish.chopra@cavium.com>
+M: Rahul Verma <rahul.verma@cavium.com>
+M: Dept-GELinuxNICDev@cavium.com
L: netdev@vger.kernel.org
-W: http://www.qlogic.com
S: Supported
F: drivers/net/ethernet/qlogic/netxen/
@@ -9898,33 +9923,32 @@ F: Documentation/scsi/LICENSE.qla4xxx
F: drivers/scsi/qla4xxx/
QLOGIC QLA3XXX NETWORK DRIVER
-M: Jitendra Kalsaria <jitendra.kalsaria@qlogic.com>
-M: Ron Mercer <ron.mercer@qlogic.com>
-M: linux-driver@qlogic.com
+M: Dept-GELinuxNICDev@cavium.com
L: netdev@vger.kernel.org
S: Supported
F: Documentation/networking/LICENSE.qla3xxx
F: drivers/net/ethernet/qlogic/qla3xxx.*
QLOGIC QLCNIC (1/10)Gb ETHERNET DRIVER
-M: Dept-GELinuxNICDev@qlogic.com
+M: Harish Patil <harish.patil@cavium.com>
+M: Manish Chopra <manish.chopra@cavium.com>
+M: Dept-GELinuxNICDev@cavium.com
L: netdev@vger.kernel.org
S: Supported
F: drivers/net/ethernet/qlogic/qlcnic/
QLOGIC QLGE 10Gb ETHERNET DRIVER
-M: Harish Patil <harish.patil@qlogic.com>
-M: Sudarsana Kalluru <sudarsana.kalluru@qlogic.com>
-M: Dept-GELinuxNICDev@qlogic.com
-M: linux-driver@qlogic.com
+M: Harish Patil <harish.patil@cavium.com>
+M: Manish Chopra <manish.chopra@cavium.com>
+M: Dept-GELinuxNICDev@cavium.com
L: netdev@vger.kernel.org
S: Supported
F: drivers/net/ethernet/qlogic/qlge/
QLOGIC QL4xxx ETHERNET DRIVER
-M: Yuval Mintz <Yuval.Mintz@qlogic.com>
-M: Ariel Elior <Ariel.Elior@qlogic.com>
-M: everest-linux-l2@qlogic.com
+M: Yuval Mintz <Yuval.Mintz@cavium.com>
+M: Ariel Elior <Ariel.Elior@cavium.com>
+M: everest-linux-l2@cavium.com
L: netdev@vger.kernel.org
S: Supported
F: drivers/net/ethernet/qlogic/qed/
@@ -11402,6 +11426,17 @@ W: http://www.st.com/spear
S: Maintained
F: drivers/clk/spear/
+SPI NOR SUBSYSTEM
+M: Cyrille Pitchen <cyrille.pitchen@atmel.com>
+M: Marek Vasut <marek.vasut@gmail.com>
+L: linux-mtd@lists.infradead.org
+W: http://www.linux-mtd.infradead.org/
+Q: http://patchwork.ozlabs.org/project/linux-mtd/list/
+T: git git://github.com/spi-nor/linux.git
+S: Maintained
+F: drivers/mtd/spi-nor/
+F: include/linux/mtd/spi-nor.h
+
SPI SUBSYSTEM
M: Mark Brown <broonie@kernel.org>
L: linux-spi@vger.kernel.org
@@ -12781,6 +12816,7 @@ F: include/uapi/linux/virtio_console.h
VIRTIO CORE, NET AND BLOCK DRIVERS
M: "Michael S. Tsirkin" <mst@redhat.com>
+M: Jason Wang <jasowang@redhat.com>
L: virtualization@lists.linux-foundation.org
S: Maintained
F: Documentation/devicetree/bindings/virtio/
@@ -12811,6 +12847,7 @@ F: include/uapi/linux/virtio_gpu.h
VIRTIO HOST (VHOST)
M: "Michael S. Tsirkin" <mst@redhat.com>
+M: Jason Wang <jasowang@redhat.com>
L: kvm@vger.kernel.org
L: virtualization@lists.linux-foundation.org
L: netdev@vger.kernel.org
diff --git a/Makefile b/Makefile
index 93beca4312c4..f97f786de58d 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
VERSION = 4
PATCHLEVEL = 9
SUBLEVEL = 0
-EXTRAVERSION = -rc2
+EXTRAVERSION = -rc4
NAME = Psychotic Stoned Sheep
# *DOCUMENTATION*
diff --git a/arch/arc/Kconfig b/arch/arc/Kconfig
index ecd12379e2cd..bd204bfa29ed 100644
--- a/arch/arc/Kconfig
+++ b/arch/arc/Kconfig
@@ -41,6 +41,8 @@ config ARC
select PERF_USE_VMALLOC
select HAVE_DEBUG_STACKOVERFLOW
select HAVE_GENERIC_DMA_COHERENT
+ select HAVE_KERNEL_GZIP
+ select HAVE_KERNEL_LZMA
config MIGHT_HAVE_PCI
bool
@@ -186,14 +188,6 @@ if SMP
config ARC_HAS_COH_CACHES
def_bool n
-config ARC_MCIP
- bool "ARConnect Multicore IP (MCIP) Support "
- depends on ISA_ARCV2
- help
- This IP block enables SMP in ARC-HS38 cores.
- It provides for cross-core interrupts, multi-core debug
- hardware semaphores, shared memory,....
-
config NR_CPUS
int "Maximum number of CPUs (2-4096)"
range 2 4096
@@ -211,6 +205,15 @@ config ARC_SMP_HALT_ON_RESET
endif #SMP
+config ARC_MCIP
+ bool "ARConnect Multicore IP (MCIP) Support "
+ depends on ISA_ARCV2
+ default y if SMP
+ help
+ This IP block enables SMP in ARC-HS38 cores.
+ It provides for cross-core interrupts, multi-core debug
+ hardware semaphores, shared memory,....
+
menuconfig ARC_CACHE
bool "Enable Cache Support"
default y
@@ -537,14 +540,6 @@ config ARC_DBG_TLB_PARANOIA
bool "Paranoia Checks in Low Level TLB Handlers"
default n
-config ARC_DBG_TLB_MISS_COUNT
- bool "Profile TLB Misses"
- default n
- select DEBUG_FS
- help
- Counts number of I and D TLB Misses and exports them via Debugfs
- The counters can be cleared via Debugfs as well
-
endif
config ARC_UBOOT_SUPPORT
diff --git a/arch/arc/Makefile b/arch/arc/Makefile
index aa82d13d4213..864adad52280 100644
--- a/arch/arc/Makefile
+++ b/arch/arc/Makefile
@@ -50,9 +50,6 @@ atleast_gcc44 := $(call cc-ifversion, -ge, 0404, y)
cflags-$(atleast_gcc44) += -fsection-anchors
-cflags-$(CONFIG_ARC_HAS_LLSC) += -mlock
-cflags-$(CONFIG_ARC_HAS_SWAPE) += -mswape
-
ifdef CONFIG_ISA_ARCV2
ifndef CONFIG_ARC_HAS_LL64
diff --git a/arch/arc/boot/Makefile b/arch/arc/boot/Makefile
index e597cb34c16a..f94cf151e06a 100644
--- a/arch/arc/boot/Makefile
+++ b/arch/arc/boot/Makefile
@@ -14,9 +14,15 @@ UIMAGE_ENTRYADDR = $(LINUX_START_TEXT)
suffix-y := bin
suffix-$(CONFIG_KERNEL_GZIP) := gz
+suffix-$(CONFIG_KERNEL_LZMA) := lzma
-targets += uImage uImage.bin uImage.gz
-extra-y += vmlinux.bin vmlinux.bin.gz
+targets += uImage
+targets += uImage.bin
+targets += uImage.gz
+targets += uImage.lzma
+extra-y += vmlinux.bin
+extra-y += vmlinux.bin.gz
+extra-y += vmlinux.bin.lzma
$(obj)/vmlinux.bin: vmlinux FORCE
$(call if_changed,objcopy)
@@ -24,12 +30,18 @@ $(obj)/vmlinux.bin: vmlinux FORCE
$(obj)/vmlinux.bin.gz: $(obj)/vmlinux.bin FORCE
$(call if_changed,gzip)
+$(obj)/vmlinux.bin.lzma: $(obj)/vmlinux.bin FORCE
+ $(call if_changed,lzma)
+
$(obj)/uImage.bin: $(obj)/vmlinux.bin FORCE
$(call if_changed,uimage,none)
$(obj)/uImage.gz: $(obj)/vmlinux.bin.gz FORCE
$(call if_changed,uimage,gzip)
+$(obj)/uImage.lzma: $(obj)/vmlinux.bin.lzma FORCE
+ $(call if_changed,uimage,lzma)
+
$(obj)/uImage: $(obj)/uImage.$(suffix-y)
@ln -sf $(notdir $<) $@
@echo ' Image $@ is ready'
diff --git a/arch/arc/include/asm/arcregs.h b/arch/arc/include/asm/arcregs.h
index db25c65155cb..7f3f9f63708c 100644
--- a/arch/arc/include/asm/arcregs.h
+++ b/arch/arc/include/asm/arcregs.h
@@ -349,10 +349,11 @@ struct cpuinfo_arc {
struct cpuinfo_arc_bpu bpu;
struct bcr_identity core;
struct bcr_isa isa;
+ const char *details, *name;
unsigned int vec_base;
struct cpuinfo_arc_ccm iccm, dccm;
struct {
- unsigned int swap:1, norm:1, minmax:1, barrel:1, crc:1, pad1:3,
+ unsigned int swap:1, norm:1, minmax:1, barrel:1, crc:1, swape:1, pad1:2,
fpu_sp:1, fpu_dp:1, pad2:6,
debug:1, ap:1, smart:1, rtt:1, pad3:4,
timer0:1, timer1:1, rtc:1, gfrc:1, pad4:4;
diff --git a/arch/arc/include/asm/cache.h b/arch/arc/include/asm/cache.h
index fb781e34f322..b3410ff6a62d 100644
--- a/arch/arc/include/asm/cache.h
+++ b/arch/arc/include/asm/cache.h
@@ -53,7 +53,7 @@ extern void arc_cache_init(void);
extern char *arc_cache_mumbojumbo(int cpu_id, char *buf, int len);
extern void read_decode_cache_bcr(void);
-extern int ioc_exists;
+extern int ioc_enable;
extern unsigned long perip_base, perip_end;
#endif /* !__ASSEMBLY__ */
diff --git a/arch/arc/include/asm/elf.h b/arch/arc/include/asm/elf.h
index 7096f97a1434..aa2d6da9d187 100644
--- a/arch/arc/include/asm/elf.h
+++ b/arch/arc/include/asm/elf.h
@@ -54,7 +54,7 @@ extern int elf_check_arch(const struct elf32_hdr *);
* the loader. We need to make sure that it is out of the way of the program
* that it will "exec", and that there is sufficient room for the brk.
*/
-#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
+#define ELF_ET_DYN_BASE (2UL * TASK_SIZE / 3)
/*
* When the program starts, a1 contains a pointer to a function to be
diff --git a/arch/arc/include/asm/mcip.h b/arch/arc/include/asm/mcip.h
index 847e3bbe387f..c8fbe4114bad 100644
--- a/arch/arc/include/asm/mcip.h
+++ b/arch/arc/include/asm/mcip.h
@@ -55,6 +55,22 @@ struct mcip_cmd {
#define IDU_M_DISTRI_DEST 0x2
};
+struct mcip_bcr {
+#ifdef CONFIG_CPU_BIG_ENDIAN
+ unsigned int pad3:8,
+ idu:1, llm:1, num_cores:6,
+ iocoh:1, gfrc:1, dbg:1, pad2:1,
+ msg:1, sem:1, ipi:1, pad:1,
+ ver:8;
+#else
+ unsigned int ver:8,
+ pad:1, ipi:1, sem:1, msg:1,
+ pad2:1, dbg:1, gfrc:1, iocoh:1,
+ num_cores:6, llm:1, idu:1,
+ pad3:8;
+#endif
+};
+
/*
* MCIP programming model
*
diff --git a/arch/arc/include/asm/module.h b/arch/arc/include/asm/module.h
index 518222bb3f8e..6e91d8b339c3 100644
--- a/arch/arc/include/asm/module.h
+++ b/arch/arc/include/asm/module.h
@@ -18,6 +18,7 @@
struct mod_arch_specific {
void *unw_info;
int unw_sec_idx;
+ const char *secstr;
};
#endif
diff --git a/arch/arc/include/asm/setup.h b/arch/arc/include/asm/setup.h
index 48b37c693db3..cb954cdab070 100644
--- a/arch/arc/include/asm/setup.h
+++ b/arch/arc/include/asm/setup.h
@@ -27,11 +27,6 @@ struct id_to_str {
const char *str;
};
-struct cpuinfo_data {
- struct id_to_str info;
- int up_range;
-};
-
extern int root_mountflags, end_mem;
void setup_processor(void);
@@ -43,5 +38,6 @@ void __init setup_arch_memory(void);
#define IS_USED_RUN(v) ((v) ? "" : "(not used) ")
#define IS_USED_CFG(cfg) IS_USED_RUN(IS_ENABLED(cfg))
#define IS_AVAIL2(v, s, cfg) IS_AVAIL1(v, s), IS_AVAIL1(v, IS_USED_CFG(cfg))
+#define IS_AVAIL3(v, v2, s) IS_AVAIL1(v, s), IS_AVAIL1(v, IS_DISABLED_RUN(v2))
#endif /* __ASMARC_SETUP_H */
diff --git a/arch/arc/include/asm/syscalls.h b/arch/arc/include/asm/syscalls.h
index e56f9fcc5581..772b67ca56e7 100644
--- a/arch/arc/include/asm/syscalls.h
+++ b/arch/arc/include/asm/syscalls.h
@@ -17,6 +17,7 @@ int sys_clone_wrapper(int, int, int, int, int);
int sys_cacheflush(uint32_t, uint32_t uint32_t);
int sys_arc_settls(void *);
int sys_arc_gettls(void);
+int sys_arc_usr_cmpxchg(int *, int, int);
#include <asm-generic/syscalls.h>
diff --git a/arch/arc/include/uapi/asm/unistd.h b/arch/arc/include/uapi/asm/unistd.h
index 41fa2ec9e02c..9a34136d84b2 100644
--- a/arch/arc/include/uapi/asm/unistd.h
+++ b/arch/arc/include/uapi/asm/unistd.h
@@ -27,18 +27,19 @@
#define NR_syscalls __NR_syscalls
+/* Generic syscall (fs/filesystems.c - lost in asm-generic/unistd.h */
+#define __NR_sysfs (__NR_arch_specific_syscall + 3)
+
/* ARC specific syscall */
#define __NR_cacheflush (__NR_arch_specific_syscall + 0)
#define __NR_arc_settls (__NR_arch_specific_syscall + 1)
#define __NR_arc_gettls (__NR_arch_specific_syscall + 2)
+#define __NR_arc_usr_cmpxchg (__NR_arch_specific_syscall + 4)
__SYSCALL(__NR_cacheflush, sys_cacheflush)
__SYSCALL(__NR_arc_settls, sys_arc_settls)
__SYSCALL(__NR_arc_gettls, sys_arc_gettls)
-
-
-/* Generic syscall (fs/filesystems.c - lost in asm-generic/unistd.h */
-#define __NR_sysfs (__NR_arch_specific_syscall + 3)
+__SYSCALL(__NR_arc_usr_cmpxchg, sys_arc_usr_cmpxchg)
__SYSCALL(__NR_sysfs, sys_sysfs)
#undef __SYSCALL
diff --git a/arch/arc/kernel/mcip.c b/arch/arc/kernel/mcip.c
index 72f9179b1a24..c424d5abc318 100644
--- a/arch/arc/kernel/mcip.c
+++ b/arch/arc/kernel/mcip.c
@@ -15,11 +15,12 @@
#include <asm/mcip.h>
#include <asm/setup.h>
-static char smp_cpuinfo_buf[128];
-static int idu_detected;
-
static DEFINE_RAW_SPINLOCK(mcip_lock);
+#ifdef CONFIG_SMP
+
+static char smp_cpuinfo_buf[128];
+
static void mcip_setup_per_cpu(int cpu)
{
smp_ipi_irq_setup(cpu, IPI_IRQ);
@@ -86,21 +87,7 @@ static void mcip_ipi_clear(int irq)
static void mcip_probe_n_setup(void)
{
- struct mcip_bcr {
-#ifdef CONFIG_CPU_BIG_ENDIAN
- unsigned int pad3:8,
- idu:1, llm:1, num_cores:6,
- iocoh:1, gfrc:1, dbg:1, pad2:1,
- msg:1, sem:1, ipi:1, pad:1,
- ver:8;
-#else
- unsigned int ver:8,
- pad:1, ipi:1, sem:1, msg:1,
- pad2:1, dbg:1, gfrc:1, iocoh:1,
- num_cores:6, llm:1, idu:1,
- pad3:8;
-#endif
- } mp;
+ struct mcip_bcr mp;
READ_BCR(ARC_REG_MCIP_BCR, mp);
@@ -114,7 +101,6 @@ static void mcip_probe_n_setup(void)
IS_AVAIL1(mp.gfrc, "GFRC"));
cpuinfo_arc700[0].extn.gfrc = mp.gfrc;
- idu_detected = mp.idu;
if (mp.dbg) {
__mcip_cmd_data(CMD_DEBUG_SET_SELECT, 0, 0xf);
@@ -130,6 +116,8 @@ struct plat_smp_ops plat_smp_ops = {
.ipi_clear = mcip_ipi_clear,
};
+#endif
+
/***************************************************************************
* ARCv2 Interrupt Distribution Unit (IDU)
*
@@ -295,8 +283,11 @@ idu_of_init(struct device_node *intc, struct device_node *parent)
/* Read IDU BCR to confirm nr_irqs */
int nr_irqs = of_irq_count(intc);
int i, irq;
+ struct mcip_bcr mp;
+
+ READ_BCR(ARC_REG_MCIP_BCR, mp);
- if (!idu_detected)
+ if (!mp.idu)
panic("IDU not detected, but DeviceTree using it");
pr_info("MCIP: IDU referenced from Devicetree %d irqs\n", nr_irqs);
diff --git a/arch/arc/kernel/module.c b/arch/arc/kernel/module.c
index 9a2849756022..42e964db2967 100644
--- a/arch/arc/kernel/module.c
+++ b/arch/arc/kernel/module.c
@@ -30,17 +30,9 @@ int module_frob_arch_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs,
char *secstr, struct module *mod)
{
#ifdef CONFIG_ARC_DW2_UNWIND
- int i;
-
mod->arch.unw_sec_idx = 0;
mod->arch.unw_info = NULL;
-
- for (i = 1; i < hdr->e_shnum; i++) {
- if (strcmp(secstr+sechdrs[i].sh_name, ".eh_frame") == 0) {
- mod->arch.unw_sec_idx = i;
- break;
- }
- }
+ mod->arch.secstr = secstr;
#endif
return 0;
}
@@ -59,29 +51,33 @@ int apply_relocate_add(Elf32_Shdr *sechdrs,
unsigned int relsec, /* sec index for relo sec */
struct module *module)
{
- int i, n;
+ int i, n, relo_type;
Elf32_Rela *rel_entry = (void *)sechdrs[relsec].sh_addr;
Elf32_Sym *sym_entry, *sym_sec;
- Elf32_Addr relocation;
- Elf32_Addr location;
- Elf32_Addr sec_to_patch;
- int relo_type;
-
- sec_to_patch = sechdrs[sechdrs[relsec].sh_info].sh_addr;
+ Elf32_Addr relocation, location, tgt_addr;
+ unsigned int tgtsec;
+
+ /*
+ * @relsec has relocations e.g. .rela.init.text
+ * @tgtsec is section to patch e.g. .init.text
+ */
+ tgtsec = sechdrs[relsec].sh_info;
+ tgt_addr = sechdrs[tgtsec].sh_addr;
sym_sec = (Elf32_Sym *) sechdrs[symindex].sh_addr;
n = sechdrs[relsec].sh_size / sizeof(*rel_entry);
- pr_debug("\n========== Module Sym reloc ===========================\n");
- pr_debug("Section to fixup %x\n", sec_to_patch);
+ pr_debug("\nSection to fixup %s @%x\n",
+ module->arch.secstr + sechdrs[tgtsec].sh_name, tgt_addr);
pr_debug("=========================================================\n");
- pr_debug("rela->r_off | rela->addend | sym->st_value | ADDR | VALUE\n");
+ pr_debug("r_off\tr_add\tst_value ADDRESS VALUE\n");
pr_debug("=========================================================\n");
/* Loop thru entries in relocation section */
for (i = 0; i < n; i++) {
+ const char *s;
/* This is where to make the change */
- location = sec_to_patch + rel_entry[i].r_offset;
+ location = tgt_addr + rel_entry[i].r_offset;
/* This is the symbol it is referring to. Note that all
undefined symbols have been resolved. */
@@ -89,10 +85,15 @@ int apply_relocate_add(Elf32_Shdr *sechdrs,
relocation = sym_entry->st_value + rel_entry[i].r_addend;
- pr_debug("\t%x\t\t%x\t\t%x %x %x [%s]\n",
- rel_entry[i].r_offset, rel_entry[i].r_addend,
- sym_entry->st_value, location, relocation,
- strtab + sym_entry->st_name);
+ if (sym_entry->st_name == 0 && ELF_ST_TYPE (sym_entry->st_info) == STT_SECTION) {
+ s = module->arch.secstr + sechdrs[sym_entry->st_shndx].sh_name;
+ } else {
+ s = strtab + sym_entry->st_name;
+ }
+
+ pr_debug(" %x\t%x\t%x %x %x [%s]\n",
+ rel_entry[i].r_offset, rel_entry[i].r_addend,
+ sym_entry->st_value, location, relocation, s);
/* This assumes modules are built with -mlong-calls
* so any branches/jumps are absolute 32 bit jmps
@@ -111,6 +112,10 @@ int apply_relocate_add(Elf32_Shdr *sechdrs,
goto relo_err;
}
+
+ if (strcmp(module->arch.secstr+sechdrs[tgtsec].sh_name, ".eh_frame") == 0)
+ module->arch.unw_sec_idx = tgtsec;
+
return 0;
relo_err:
diff --git a/arch/arc/kernel/process.c b/arch/arc/kernel/process.c
index be1972bd2729..59aa43cb146e 100644
--- a/arch/arc/kernel/process.c
+++ b/arch/arc/kernel/process.c
@@ -41,6 +41,39 @@ SYSCALL_DEFINE0(arc_gettls)
return task_thread_info(current)->thr_ptr;
}
+SYSCALL_DEFINE3(arc_usr_cmpxchg, int *, uaddr, int, expected, int, new)
+{
+ int uval;
+ int ret;
+
+ /*
+ * This is only for old cores lacking LLOCK/SCOND, which by defintion
+ * can't possibly be SMP. Thus doesn't need to be SMP safe.
+ * And this also helps reduce the overhead for serializing in
+ * the UP case
+ */
+ WARN_ON_ONCE(IS_ENABLED(CONFIG_SMP));
+
+ if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int)))
+ return -EFAULT;
+
+ preempt_disable();
+
+ ret = __get_user(uval, uaddr);
+ if (ret)
+ goto done;
+
+ if (uval != expected)
+ ret = -EAGAIN;
+ else
+ ret = __put_user(new, uaddr);
+
+done:
+ preempt_enable();
+
+ return ret;
+}
+
void arch_cpu_idle(void)
{
/* sleep, but enable all interrupts before committing */
diff --git a/arch/arc/kernel/setup.c b/arch/arc/kernel/setup.c
index 3df7f9c72f42..0385df77a697 100644
--- a/arch/arc/kernel/setup.c
+++ b/arch/arc/kernel/setup.c
@@ -40,6 +40,29 @@ struct task_struct *_current_task[NR_CPUS]; /* For stack switching */
struct cpuinfo_arc cpuinfo_arc700[NR_CPUS];
+static const struct id_to_str arc_cpu_rel[] = {
+#ifdef CONFIG_ISA_ARCOMPACT
+ { 0x34, "R4.10"},
+ { 0x35, "R4.11"},
+#else
+ { 0x51, "R2.0" },
+ { 0x52, "R2.1" },
+ { 0x53, "R3.0" },
+#endif
+ { 0x00, NULL }
+};
+
+static const struct id_to_str arc_cpu_nm[] = {
+#ifdef CONFIG_ISA_ARCOMPACT
+ { 0x20, "ARC 600" },
+ { 0x30, "ARC 770" }, /* 750 identified seperately */
+#else
+ { 0x40, "ARC EM" },
+ { 0x50, "ARC HS38" },
+#endif
+ { 0x00, "Unknown" }
+};
+
static void read_decode_ccm_bcr(struct cpuinfo_arc *cpu)
{
if (is_isa_arcompact()) {
@@ -92,11 +115,26 @@ static void read_arc_build_cfg_regs(void)
struct bcr_timer timer;
struct bcr_generic bcr;
struct cpuinfo_arc *cpu = &cpuinfo_arc700[smp_processor_id()];
+ const struct id_to_str *tbl;
+
FIX_PTR(cpu);
READ_BCR(AUX_IDENTITY, cpu->core);
READ_BCR(ARC_REG_ISA_CFG_BCR, cpu->isa);
+ for (tbl = &arc_cpu_rel[0]; tbl->id != 0; tbl++) {
+ if (cpu->core.family == tbl->id) {
+ cpu->details = tbl->str;
+ break;
+ }
+ }
+
+ for (tbl = &arc_cpu_nm[0]; tbl->id != 0; tbl++) {
+ if ((cpu->core.family & 0xF0) == tbl->id)
+ break;
+ }
+ cpu->name = tbl->str;
+
READ_BCR(ARC_REG_TIMERS_BCR, timer);
cpu->extn.timer0 = timer.t0;
cpu->extn.timer1 = timer.t1;
@@ -111,6 +149,9 @@ static void read_arc_build_cfg_regs(void)
cpu->extn.swap = read_aux_reg(ARC_REG_SWAP_BCR) ? 1 : 0; /* 1,3 */
cpu->extn.crc = read_aux_reg(ARC_REG_CRC_BCR) ? 1 : 0;
cpu->extn.minmax = read_aux_reg(ARC_REG_MIXMAX_BCR) > 1 ? 1 : 0; /* 2 */
+ cpu->extn.swape = (cpu->core.family >= 0x34) ? 1 :
+ IS_ENABLED(CONFIG_ARC_HAS_SWAPE);
+
READ_BCR(ARC_REG_XY_MEM_BCR, cpu->extn_xymem);
/* Read CCM BCRs for boot reporting even if not enabled in Kconfig */
@@ -160,64 +201,38 @@ static void read_arc_build_cfg_regs(void)
cpu->extn.rtt = bcr.ver ? 1 : 0;
cpu->extn.debug = cpu->extn.ap | cpu->extn.smart | cpu->extn.rtt;
-}
-static const struct cpuinfo_data arc_cpu_tbl[] = {
-#ifdef CONFIG_ISA_ARCOMPACT
- { {0x20, "ARC 600" }, 0x2F},
- { {0x30, "ARC 700" }, 0x33},
- { {0x34, "ARC 700 R4.10"}, 0x34},
- { {0x35, "ARC 700 R4.11"}, 0x35},
-#else
- { {0x50, "ARC HS38 R2.0"}, 0x51},
- { {0x52, "ARC HS38 R2.1"}, 0x52},
- { {0x53, "ARC HS38 R3.0"}, 0x53},
-#endif
- { {0x00, NULL } }
-};
+ /* some hacks for lack of feature BCR info in old ARC700 cores */
+ if (is_isa_arcompact()) {
+ if (!cpu->isa.ver) /* ISA BCR absent, use Kconfig info */
+ cpu->isa.atomic = IS_ENABLED(CONFIG_ARC_HAS_LLSC);
+ else
+ cpu->isa.atomic = cpu->isa.atomic1;
+ cpu->isa.be = IS_ENABLED(CONFIG_CPU_BIG_ENDIAN);
+
+ /* there's no direct way to distinguish 750 vs. 770 */
+ if (unlikely(cpu->core.family < 0x34 || cpu->mmu.ver < 3))
+ cpu->name = "ARC750";
+ }
+}
static char *arc_cpu_mumbojumbo(int cpu_id, char *buf, int len)
{
struct cpuinfo_arc *cpu = &cpuinfo_arc700[cpu_id];
struct bcr_identity *core = &cpu->core;
- const struct cpuinfo_data *tbl;
- char *isa_nm;
- int i, be, atomic;
- int n = 0;
+ int i, n = 0;
FIX_PTR(cpu);
- if (is_isa_arcompact()) {
- isa_nm = "ARCompact";
- be = IS_ENABLED(CONFIG_CPU_BIG_ENDIAN);
-
- atomic = cpu->isa.atomic1;
- if (!cpu->isa.ver) /* ISA BCR absent, use Kconfig info */
- atomic = IS_ENABLED(CONFIG_ARC_HAS_LLSC);
- } else {
- isa_nm = "ARCv2";
- be = cpu->isa.be;
- atomic = cpu->isa.atomic;
- }
-
n += scnprintf(buf + n, len - n,
"\nIDENTITY\t: ARCVER [%#02x] ARCNUM [%#02x] CHIPID [%#4x]\n",
core->family, core->cpu_id, core->chip_id);
- for (tbl = &arc_cpu_tbl[0]; tbl->info.id != 0; tbl++) {
- if ((core->family >= tbl->info.id) &&
- (core->family <= tbl->up_range)) {
- n += scnprintf(buf + n, len - n,
- "processor [%d]\t: %s (%s ISA) %s\n",
- cpu_id, tbl->info.str, isa_nm,
- IS_AVAIL1(be, "[Big-Endian]"));
- break;
- }
- }
-
- if (tbl->info.id == 0)
- n += scnprintf(buf + n, len - n, "UNKNOWN ARC Processor\n");
+ n += scnprintf(buf + n, len - n, "processor [%d]\t: %s %s (%s ISA) %s\n",
+ cpu_id, cpu->name, cpu->details,
+ is_isa_arcompact() ? "ARCompact" : "ARCv2",
+ IS_AVAIL1(cpu->isa.be, "[Big-Endian]"));
n += scnprintf(buf + n, len - n, "Timers\t\t: %s%s%s%s\nISA Extn\t: ",
IS_AVAIL1(cpu->extn.timer0, "Timer0 "),
@@ -226,7 +241,7 @@ static char *arc_cpu_mumbojumbo(int cpu_id, char *buf, int len)
CONFIG_ARC_HAS_RTC));
n += i = scnprintf(buf + n, len - n, "%s%s%s%s%s",
- IS_AVAIL2(atomic, "atomic ", CONFIG_ARC_HAS_LLSC),
+ IS_AVAIL2(cpu->isa.atomic, "atomic ", CONFIG_ARC_HAS_LLSC),
IS_AVAIL2(cpu->isa.ldd, "ll64 ", CONFIG_ARC_HAS_LL64),
IS_AVAIL1(cpu->isa.unalign, "unalign (not used)"));
@@ -253,7 +268,7 @@ static char *arc_cpu_mumbojumbo(int cpu_id, char *buf, int len)
IS_AVAIL1(cpu->extn.swap, "swap "),
IS_AVAIL1(cpu->extn.minmax, "minmax "),
IS_AVAIL1(cpu->extn.crc, "crc "),
- IS_AVAIL2(1, "swape", CONFIG_ARC_HAS_SWAPE));
+ IS_AVAIL2(cpu->extn.swape, "swape", CONFIG_ARC_HAS_SWAPE));
if (cpu->bpu.ver)
n += scnprintf(buf + n, len - n,
@@ -272,9 +287,7 @@ static char *arc_extn_mumbojumbo(int cpu_id, char *buf, int len)
FIX_PTR(cpu);
- n += scnprintf(buf + n, len - n,
- "Vector Table\t: %#x\nPeripherals\t: %#lx:%#lx\n",
- cpu->vec_base, perip_base, perip_end);
+ n += scnprintf(buf + n, len - n, "Vector Table\t: %#x\n", cpu->vec_base);
if (cpu->extn.fpu_sp || cpu->extn.fpu_dp)
n += scnprintf(buf + n, len - n, "FPU\t\t: %s%s\n",
@@ -507,7 +520,7 @@ static void *c_start(struct seq_file *m, loff_t *pos)
* way to pass it w/o having to kmalloc/free a 2 byte string.
* Encode cpu-id as 0xFFcccc, which is decoded by show routine.
*/
- return *pos < num_possible_cpus() ? cpu_to_ptr(*pos) : NULL;
+ return *pos < nr_cpu_ids ? cpu_to_ptr(*pos) : NULL;
}
static void *c_next(struct seq_file *m, void *v, loff_t *pos)
diff --git a/arch/arc/kernel/troubleshoot.c b/arch/arc/kernel/troubleshoot.c
index 934150e7ac48..82f9bc819f4a 100644
--- a/arch/arc/kernel/troubleshoot.c
+++ b/arch/arc/kernel/troubleshoot.c
@@ -237,113 +237,3 @@ void show_kernel_fault_diag(const char *str, struct pt_regs *regs,
if (!user_mode(regs))
show_stacktrace(current, regs);
}
-
-#ifdef CONFIG_DEBUG_FS
-
-#include <linux/module.h>
-#include <linux/fs.h>
-#include <linux/mount.h>
-#include <linux/pagemap.h>
-#include <linux/init.h>
-#include <linux/namei.h>
-#include <linux/debugfs.h>
-
-static struct dentry *test_dentry;
-static struct dentry *test_dir;
-static struct dentry *test_u32_dentry;
-
-static u32 clr_on_read = 1;
-
-#ifdef CONFIG_ARC_DBG_TLB_MISS_COUNT
-u32 numitlb, numdtlb, num_pte_not_present;
-
-static int fill_display_data(char *kbuf)
-{
- size_t num = 0;
- num += sprintf(kbuf + num, "I-TLB Miss %x\n", numitlb);
- num += sprintf(kbuf + num, "D-TLB Miss %x\n", numdtlb);
- num += sprintf(kbuf + num, "PTE not present %x\n", num_pte_not_present);
-
- if (clr_on_read)
- numitlb = numdtlb = num_pte_not_present = 0;
-
- return num;
-}
-
-static int tlb_stats_open(struct inode *inode, struct file *file)
-{
- file->private_data = (void *)__get_free_page(GFP_KERNEL);
- return 0;
-}
-
-/* called on user read(): display the counters */
-static ssize_t tlb_stats_output(struct file *file, /* file descriptor */
- char __user *user_buf, /* user buffer */
- size_t len, /* length of buffer */
- loff_t *offset) /* offset in the file */
-{
- size_t num;
- char *kbuf = (char *)file->private_data;
-
- /* All of the data can he shoved in one iteration */
- if (*offset != 0)
- return 0;
-
- num = fill_display_data(kbuf);
-
- /* simple_read_from_buffer() is helper for copy to user space
- It copies up to @2 (num) bytes from kernel buffer @4 (kbuf) at offset
- @3 (offset) into the user space address starting at @1 (user_buf).
- @5 (len) is max size of user buffer
- */
- return simple_read_from_buffer(user_buf, num, offset, kbuf, len);
-}
-
-/* called on user write : clears the counters */
-static ssize_t tlb_stats_clear(struct file *file, const char __user *user_buf,
- size_t length, loff_t *offset)
-{
- numitlb = numdtlb = num_pte_not_present = 0;
- return length;
-}
-
-static int tlb_stats_close(struct inode *inode, struct file *file)
-{
- free_page((unsigned long)(file->private_data));
- return 0;
-}
-
-static const struct file_operations tlb_stats_file_ops = {
- .read = tlb_stats_output,
- .write = tlb_stats_clear,
- .open = tlb_stats_open,
- .release = tlb_stats_close
-};
-#endif
-
-static int __init arc_debugfs_init(void)
-{
- test_dir = debugfs_create_dir("arc", NULL);
-
-#ifdef CONFIG_ARC_DBG_TLB_MISS_COUNT
- test_dentry = debugfs_create_file("tlb_stats", 0444, test_dir, NULL,
- &tlb_stats_file_ops);
-#endif
-
- test_u32_dentry =
- debugfs_create_u32("clr_on_read", 0444, test_dir, &clr_on_read);
-
- return 0;
-}
-
-module_init(arc_debugfs_init);
-
-static void __exit arc_debugfs_exit(void)
-{
- debugfs_remove(test_u32_dentry);
- debugfs_remove(test_dentry);
- debugfs_remove(test_dir);
-}
-module_exit(arc_debugfs_exit);
-
-#endif
diff --git a/arch/arc/mm/cache.c b/arch/arc/mm/cache.c
index 97dddbefb86a..2b96cfc3be75 100644
--- a/arch/arc/mm/cache.c
+++ b/arch/arc/mm/cache.c
@@ -22,8 +22,8 @@
#include <asm/setup.h>
static int l2_line_sz;
-int ioc_exists;
-volatile int slc_enable = 1, ioc_enable = 1;
+static int ioc_exists;
+int slc_enable = 1, ioc_enable = 1;
unsigned long perip_base = ARC_UNCACHED_ADDR_SPACE; /* legacy value for boot */
unsigned long perip_end = 0xFFFFFFFF; /* legacy value */
@@ -53,18 +53,15 @@ char *arc_cache_mumbojumbo(int c, char *buf, int len)
PR_CACHE(&cpuinfo_arc700[c].icache, CONFIG_ARC_HAS_ICACHE, "I-Cache");
PR_CACHE(&cpuinfo_arc700[c].dcache, CONFIG_ARC_HAS_DCACHE, "D-Cache");
- if (!is_isa_arcv2())
- return buf;
-
p = &cpuinfo_arc700[c].slc;
if (p->ver)
n += scnprintf(buf + n, len - n,
"SLC\t\t: %uK, %uB Line%s\n",
p->sz_k, p->line_len, IS_USED_RUN(slc_enable));
- if (ioc_exists)
- n += scnprintf(buf + n, len - n, "IOC\t\t:%s\n",
- IS_DISABLED_RUN(ioc_enable));
+ n += scnprintf(buf + n, len - n, "Peripherals\t: %#lx%s%s\n",
+ perip_base,
+ IS_AVAIL3(ioc_exists, ioc_enable, ", IO-Coherency "));
return buf;
}
@@ -113,8 +110,10 @@ static void read_decode_cache_bcr_arcv2(int cpu)
}
READ_BCR(ARC_REG_CLUSTER_BCR, cbcr);
- if (cbcr.c && ioc_enable)
+ if (cbcr.c)
ioc_exists = 1;
+ else
+ ioc_enable = 0;
/* HS 2.0 didn't have AUX_VOL */
if (cpuinfo_arc700[cpu].core.family > 0x51) {
@@ -1002,7 +1001,7 @@ void arc_cache_init(void)
read_aux_reg(ARC_REG_SLC_CTRL) | SLC_CTRL_DISABLE);
}
- if (is_isa_arcv2() && ioc_exists) {
+ if (is_isa_arcv2() && ioc_enable) {
/* IO coherency base - 0x8z */
write_aux_reg(ARC_REG_IO_COH_AP0_BASE, 0x80000);
/* IO coherency aperture size - 512Mb: 0x8z-0xAz */
diff --git a/arch/arc/mm/dma.c b/arch/arc/mm/dma.c
index 20afc65e22dc..60aab5a7522b 100644
--- a/arch/arc/mm/dma.c
+++ b/arch/arc/mm/dma.c
@@ -45,7 +45,7 @@ static void *arc_dma_alloc(struct device *dev, size_t size,
* -For coherent data, Read/Write to buffers terminate early in cache
* (vs. always going to memory - thus are faster)
*/
- if ((is_isa_arcv2() && ioc_exists) ||
+ if ((is_isa_arcv2() && ioc_enable) ||
(attrs & DMA_ATTR_NON_CONSISTENT))
need_coh = 0;
@@ -97,7 +97,7 @@ static void arc_dma_free(struct device *dev, size_t size, void *vaddr,
int is_non_coh = 1;
is_non_coh = (attrs & DMA_ATTR_NON_CONSISTENT) ||
- (is_isa_arcv2() && ioc_exists);
+ (is_isa_arcv2() && ioc_enable);
if (PageHighMem(page) || !is_non_coh)
iounmap((void __force __iomem *)vaddr);
diff --git a/arch/arc/mm/tlb.c b/arch/arc/mm/tlb.c
index ec868a9081a1..bdb295e09160 100644
--- a/arch/arc/mm/tlb.c
+++ b/arch/arc/mm/tlb.c
@@ -793,16 +793,16 @@ char *arc_mmu_mumbojumbo(int cpu_id, char *buf, int len)
char super_pg[64] = "";
if (p_mmu->s_pg_sz_m)
- scnprintf(super_pg, 64, "%dM Super Page%s, ",
+ scnprintf(super_pg, 64, "%dM Super Page %s",
p_mmu->s_pg_sz_m,
IS_USED_CFG(CONFIG_TRANSPARENT_HUGEPAGE));
n += scnprintf(buf + n, len - n,
- "MMU [v%x]\t: %dk PAGE, %sJTLB %d (%dx%d), uDTLB %d, uITLB %d %s%s\n",
+ "MMU [v%x]\t: %dk PAGE, %sJTLB %d (%dx%d), uDTLB %d, uITLB %d%s%s\n",
p_mmu->ver, p_mmu->pg_sz_k, super_pg,
p_mmu->sets * p_mmu->ways, p_mmu->sets, p_mmu->ways,
p_mmu->u_dtlb, p_mmu->u_itlb,
- IS_AVAIL2(p_mmu->pae, "PAE40 ", CONFIG_ARC_HAS_PAE40));
+ IS_AVAIL2(p_mmu->pae, ", PAE40 ", CONFIG_ARC_HAS_PAE40));
return buf;
}
diff --git a/arch/arc/mm/tlbex.S b/arch/arc/mm/tlbex.S
index f1967eeb32e7..b30e4e36bb00 100644
--- a/arch/arc/mm/tlbex.S
+++ b/arch/arc/mm/tlbex.S
@@ -237,15 +237,6 @@ ex_saved_reg1:
2:
-#ifdef CONFIG_ARC_DBG_TLB_MISS_COUNT
- and.f 0, r0, _PAGE_PRESENT
- bz 1f
- ld r3, [num_pte_not_present]
- add r3, r3, 1
- st r3, [num_pte_not_present]
-1:
-#endif
-
.endm
;-----------------------------------------------------------------
@@ -309,12 +300,6 @@ ENTRY(EV_TLBMissI)
TLBMISS_FREEUP_REGS
-#ifdef CONFIG_ARC_DBG_TLB_MISS_COUNT
- ld r0, [@numitlb]
- add r0, r0, 1
- st r0, [@numitlb]
-#endif
-
;----------------------------------------------------------------
; Get the PTE corresponding to V-addr accessed, r2 is setup with EFA
LOAD_FAULT_PTE
@@ -349,12 +334,6 @@ ENTRY(EV_TLBMissD)
TLBMISS_FREEUP_REGS
-#ifdef CONFIG_ARC_DBG_TLB_MISS_COUNT
- ld r0, [@numdtlb]
- add r0, r0, 1
- st r0, [@numdtlb]
-#endif
-
;----------------------------------------------------------------
; Get the PTE corresponding to V-addr accessed
; If PTE exists, it will setup, r0 = PTE, r1 = Ptr to PTE, r2 = EFA
diff --git a/arch/arm/boot/dts/ste-snowball.dts b/arch/arm/boot/dts/ste-snowball.dts
index b3df1c60d465..386eee6de232 100644
--- a/arch/arm/boot/dts/ste-snowball.dts
+++ b/arch/arm/boot/dts/ste-snowball.dts
@@ -239,14 +239,25 @@
arm,primecell-periphid = <0x10480180>;
max-frequency = <100000000>;
bus-width = <4>;
+ cap-sd-highspeed;
cap-mmc-highspeed;
+ sd-uhs-sdr12;
+ sd-uhs-sdr25;
+ /* All direction control is used */
+ st,sig-dir-cmd;
+ st,sig-dir-dat0;
+ st,sig-dir-dat2;
+ st,sig-dir-dat31;
+ st,sig-pin-fbclk;
+ full-pwr-cycle;
vmmc-supply = <&ab8500_ldo_aux3_reg>;
vqmmc-supply = <&vmmci>;
pinctrl-names = "default", "sleep";
pinctrl-0 = <&sdi0_default_mode>;
pinctrl-1 = <&sdi0_sleep_mode>;
- cd-gpios = <&gpio6 26 GPIO_ACTIVE_LOW>; // 218
+ /* GPIO218 MMC_CD */
+ cd-gpios = <&gpio6 26 GPIO_ACTIVE_LOW>;
status = "okay";
};
@@ -549,7 +560,7 @@
/* VMMCI level-shifter enable */
snowball_cfg3 {
pins = "GPIO217_AH12";
- ste,config = <&gpio_out_lo>;
+ ste,config = <&gpio_out_hi>;
};
/* VMMCI level-shifter voltage select */
snowball_cfg4 {
diff --git a/arch/arm/boot/dts/uniphier-pro5.dtsi b/arch/arm/boot/dts/uniphier-pro5.dtsi
index 2c49c3614bda..5357ea9c14b1 100644
--- a/arch/arm/boot/dts/uniphier-pro5.dtsi
+++ b/arch/arm/boot/dts/uniphier-pro5.dtsi
@@ -184,11 +184,11 @@
};
&mio_clk {
- compatible = "socionext,uniphier-pro5-mio-clock";
+ compatible = "socionext,uniphier-pro5-sd-clock";
};
&mio_rst {
- compatible = "socionext,uniphier-pro5-mio-reset";
+ compatible = "socionext,uniphier-pro5-sd-reset";
};
&peri_clk {
diff --git a/arch/arm/boot/dts/uniphier-pxs2.dtsi b/arch/arm/boot/dts/uniphier-pxs2.dtsi
index 8789cd518933..950f07ba0337 100644
--- a/arch/arm/boot/dts/uniphier-pxs2.dtsi
+++ b/arch/arm/boot/dts/uniphier-pxs2.dtsi
@@ -197,11 +197,11 @@
};
&mio_clk {
- compatible = "socionext,uniphier-pxs2-mio-clock";
+ compatible = "socionext,uniphier-pxs2-sd-clock";
};
&mio_rst {
- compatible = "socionext,uniphier-pxs2-mio-reset";
+ compatible = "socionext,uniphier-pxs2-sd-reset";
};
&peri_clk {
diff --git a/arch/arm/boot/dts/vf500.dtsi b/arch/arm/boot/dts/vf500.dtsi
index a3824e61bd72..d7fdb2a7d97b 100644
--- a/arch/arm/boot/dts/vf500.dtsi
+++ b/arch/arm/boot/dts/vf500.dtsi
@@ -70,7 +70,7 @@
global_timer: timer@40002200 {
compatible = "arm,cortex-a9-global-timer";
reg = <0x40002200 0x20>;
- interrupts = <GIC_PPI 11 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts = <GIC_PPI 11 IRQ_TYPE_EDGE_RISING>;
interrupt-parent = <&intc>;
clocks = <&clks VF610_CLK_PLATFORM_BUS>;
};
diff --git a/arch/arm/configs/multi_v7_defconfig b/arch/arm/configs/multi_v7_defconfig
index 437d0740dec6..11f37ed1dbff 100644
--- a/arch/arm/configs/multi_v7_defconfig
+++ b/arch/arm/configs/multi_v7_defconfig
@@ -850,6 +850,7 @@ CONFIG_PWM_SUN4I=y
CONFIG_PWM_TEGRA=y
CONFIG_PWM_VT8500=y
CONFIG_PHY_HIX5HD2_SATA=y
+CONFIG_E1000E=y
CONFIG_PWM_STI=y
CONFIG_PWM_BCM2835=y
CONFIG_PWM_BRCMSTB=m
diff --git a/arch/arm/include/asm/unistd.h b/arch/arm/include/asm/unistd.h
index 194b69923389..ada0d29a660f 100644
--- a/arch/arm/include/asm/unistd.h
+++ b/arch/arm/include/asm/unistd.h
@@ -19,7 +19,7 @@
* This may need to be greater than __NR_last_syscall+1 in order to
* account for the padding in the syscall table
*/
-#define __NR_syscalls (396)
+#define __NR_syscalls (400)
#define __ARCH_WANT_STAT64
#define __ARCH_WANT_SYS_GETHOSTNAME
diff --git a/arch/arm/include/uapi/asm/unistd.h b/arch/arm/include/uapi/asm/unistd.h
index 2cb9dc770e1d..314100a06ccb 100644
--- a/arch/arm/include/uapi/asm/unistd.h
+++ b/arch/arm/include/uapi/asm/unistd.h
@@ -420,6 +420,9 @@
#define __NR_copy_file_range (__NR_SYSCALL_BASE+391)
#define __NR_preadv2 (__NR_SYSCALL_BASE+392)
#define __NR_pwritev2 (__NR_SYSCALL_BASE+393)
+#define __NR_pkey_mprotect (__NR_SYSCALL_BASE+394)
+#define __NR_pkey_alloc (__NR_SYSCALL_BASE+395)
+#define __NR_pkey_free (__NR_SYSCALL_BASE+396)
/*
* The following SWIs are ARM private.
diff --git a/arch/arm/kernel/calls.S b/arch/arm/kernel/calls.S
index 703fa0f3cd8f..08030b18f10a 100644
--- a/arch/arm/kernel/calls.S
+++ b/arch/arm/kernel/calls.S
@@ -403,6 +403,9 @@
CALL(sys_copy_file_range)
CALL(sys_preadv2)
CALL(sys_pwritev2)
+ CALL(sys_pkey_mprotect)
+/* 395 */ CALL(sys_pkey_alloc)
+ CALL(sys_pkey_free)
#ifndef syscalls_counted
.equ syscalls_padding, ((NR_syscalls + 3) & ~3) - NR_syscalls
#define syscalls_counted
diff --git a/arch/arm/mach-imx/gpc.c b/arch/arm/mach-imx/gpc.c
index 0df062d8b2c9..b54db47f6f32 100644
--- a/arch/arm/mach-imx/gpc.c
+++ b/arch/arm/mach-imx/gpc.c
@@ -408,7 +408,7 @@ static struct genpd_onecell_data imx_gpc_onecell_data = {
static int imx_gpc_genpd_init(struct device *dev, struct regulator *pu_reg)
{
struct clk *clk;
- int i;
+ int i, ret;
imx6q_pu_domain.reg = pu_reg;
@@ -430,13 +430,22 @@ static int imx_gpc_genpd_init(struct device *dev, struct regulator *pu_reg)
if (!IS_ENABLED(CONFIG_PM_GENERIC_DOMAINS))
return 0;
- pm_genpd_init(&imx6q_pu_domain.base, NULL, false);
- return of_genpd_add_provider_onecell(dev->of_node,
+ for (i = 0; i < ARRAY_SIZE(imx_gpc_domains); i++)
+ pm_genpd_init(imx_gpc_domains[i], NULL, false);
+
+ ret = of_genpd_add_provider_onecell(dev->of_node,
&imx_gpc_onecell_data);
+ if (ret)
+ goto power_off;
+
+ return 0;
+power_off:
+ imx6q_pm_pu_power_off(&imx6q_pu_domain.base);
clk_err:
while (i--)
clk_put(imx6q_pu_domain.clk[i]);
+ imx6q_pu_domain.reg = NULL;
return -EINVAL;
}
diff --git a/arch/arm/mach-imx/mach-imx6q.c b/arch/arm/mach-imx/mach-imx6q.c
index 97fd25105e2c..45801b27ee5c 100644
--- a/arch/arm/mach-imx/mach-imx6q.c
+++ b/arch/arm/mach-imx/mach-imx6q.c
@@ -173,7 +173,7 @@ static void __init imx6q_enet_phy_init(void)
ksz9021rn_phy_fixup);
phy_register_fixup_for_uid(PHY_ID_KSZ9031, MICREL_PHY_ID_MASK,
ksz9031rn_phy_fixup);
- phy_register_fixup_for_uid(PHY_ID_AR8031, 0xffffffff,
+ phy_register_fixup_for_uid(PHY_ID_AR8031, 0xffffffef,
ar8031_phy_fixup);
phy_register_fixup_for_uid(PHY_ID_AR8035, 0xffffffef,
ar8035_phy_fixup);
diff --git a/arch/arm/mach-mvebu/Kconfig b/arch/arm/mach-mvebu/Kconfig
index f9b6bd306cfe..541647f57192 100644
--- a/arch/arm/mach-mvebu/Kconfig
+++ b/arch/arm/mach-mvebu/Kconfig
@@ -23,6 +23,7 @@ config MACH_MVEBU_V7
select CACHE_L2X0
select ARM_CPU_SUSPEND
select MACH_MVEBU_ANY
+ select MVEBU_CLK_COREDIV
config MACH_ARMADA_370
bool "Marvell Armada 370 boards"
@@ -32,7 +33,6 @@ config MACH_ARMADA_370
select CPU_PJ4B
select MACH_MVEBU_V7
select PINCTRL_ARMADA_370
- select MVEBU_CLK_COREDIV
help
Say 'Y' here if you want your kernel to support boards based
on the Marvell Armada 370 SoC with device tree.
@@ -50,7 +50,6 @@ config MACH_ARMADA_375
select HAVE_SMP
select MACH_MVEBU_V7
select PINCTRL_ARMADA_375
- select MVEBU_CLK_COREDIV
help
Say 'Y' here if you want your kernel to support boards based
on the Marvell Armada 375 SoC with device tree.
@@ -68,7 +67,6 @@ config MACH_ARMADA_38X
select HAVE_SMP
select MACH_MVEBU_V7
select PINCTRL_ARMADA_38X
- select MVEBU_CLK_COREDIV
help
Say 'Y' here if you want your kernel to support boards based
on the Marvell Armada 380/385 SoC with device tree.
diff --git a/arch/arm/mach-uniphier/Kconfig b/arch/arm/mach-uniphier/Kconfig
index 82dddee3a469..3930fbba30b4 100644
--- a/arch/arm/mach-uniphier/Kconfig
+++ b/arch/arm/mach-uniphier/Kconfig
@@ -1,6 +1,7 @@
config ARCH_UNIPHIER
bool "Socionext UniPhier SoCs"
depends on ARCH_MULTI_V7
+ select ARCH_HAS_RESET_CONTROLLER
select ARM_AMBA
select ARM_GLOBAL_TIMER
select ARM_GIC
diff --git a/arch/arm/mm/abort-lv4t.S b/arch/arm/mm/abort-lv4t.S
index 6d8e8e3365d1..4cdfab31a0b6 100644
--- a/arch/arm/mm/abort-lv4t.S
+++ b/arch/arm/mm/abort-lv4t.S
@@ -7,7 +7,7 @@
* : r4 = aborted context pc
* : r5 = aborted context psr
*
- * Returns : r4-r5, r10-r11, r13 preserved
+ * Returns : r4-r5, r9-r11, r13 preserved
*
* Purpose : obtain information about current aborted instruction.
* Note: we read user space. This means we might cause a data
@@ -48,7 +48,10 @@ ENTRY(v4t_late_abort)
/* c */ b do_DataAbort @ ldc rd, [rn], #m @ Same as ldr rd, [rn], #m
/* d */ b do_DataAbort @ ldc rd, [rn, #m]
/* e */ b .data_unknown
-/* f */
+/* f */ b .data_unknown
+
+.data_unknown_r9:
+ ldr r9, [sp], #4
.data_unknown: @ Part of jumptable
mov r0, r4
mov r1, r8
@@ -57,6 +60,7 @@ ENTRY(v4t_late_abort)
.data_arm_ldmstm:
tst r8, #1 << 21 @ check writeback bit
beq do_DataAbort @ no writeback -> no fixup
+ str r9, [sp, #-4]!
mov r7, #0x11
orr r7, r7, #0x1100
and r6, r8, r7
@@ -75,12 +79,14 @@ ENTRY(v4t_late_abort)
subne r7, r7, r6, lsl #2 @ Undo increment
addeq r7, r7, r6, lsl #2 @ Undo decrement
str r7, [r2, r9, lsr #14] @ Put register 'Rn'
+ ldr r9, [sp], #4
b do_DataAbort
.data_arm_lateldrhpre:
tst r8, #1 << 21 @ Check writeback bit
beq do_DataAbort @ No writeback -> no fixup
.data_arm_lateldrhpost:
+ str r9, [sp, #-4]!
and r9, r8, #0x00f @ get Rm / low nibble of immediate value
tst r8, #1 << 22 @ if (immediate offset)
andne r6, r8, #0xf00 @ { immediate high nibble
@@ -93,6 +99,7 @@ ENTRY(v4t_late_abort)
subne r7, r7, r6 @ Undo incrmenet
addeq r7, r7, r6 @ Undo decrement
str r7, [r2, r9, lsr #14] @ Put register 'Rn'
+ ldr r9, [sp], #4
b do_DataAbort
.data_arm_lateldrpreconst:
@@ -101,12 +108,14 @@ ENTRY(v4t_late_abort)
.data_arm_lateldrpostconst:
movs r6, r8, lsl #20 @ Get offset
beq do_DataAbort @ zero -> no fixup
+ str r9, [sp, #-4]!
and r9, r8, #15 << 16 @ Extract 'n' from instruction
ldr r7, [r2, r9, lsr #14] @ Get register 'Rn'
tst r8, #1 << 23 @ Check U bit
subne r7, r7, r6, lsr #20 @ Undo increment
addeq r7, r7, r6, lsr #20 @ Undo decrement
str r7, [r2, r9, lsr #14] @ Put register 'Rn'
+ ldr r9, [sp], #4
b do_DataAbort
.data_arm_lateldrprereg:
@@ -115,6 +124,7 @@ ENTRY(v4t_late_abort)
.data_arm_lateldrpostreg:
and r7, r8, #15 @ Extract 'm' from instruction
ldr r6, [r2, r7, lsl #2] @ Get register 'Rm'
+ str r9, [sp, #-4]!
mov r9, r8, lsr #7 @ get shift count
ands r9, r9, #31
and r7, r8, #0x70 @ get shift type
@@ -126,33 +136,33 @@ ENTRY(v4t_late_abort)
b .data_arm_apply_r6_and_rn
b .data_arm_apply_r6_and_rn @ 1: LSL #0
nop
- b .data_unknown @ 2: MUL?
+ b .data_unknown_r9 @ 2: MUL?
nop
- b .data_unknown @ 3: MUL?
+ b .data_unknown_r9 @ 3: MUL?
nop
mov r6, r6, lsr r9 @ 4: LSR #!0
b .data_arm_apply_r6_and_rn
mov r6, r6, lsr #32 @ 5: LSR #32
b .data_arm_apply_r6_and_rn
- b .data_unknown @ 6: MUL?
+ b .data_unknown_r9 @ 6: MUL?
nop
- b .data_unknown @ 7: MUL?
+ b .data_unknown_r9 @ 7: MUL?
nop
mov r6, r6, asr r9 @ 8: ASR #!0
b .data_arm_apply_r6_and_rn
mov r6, r6, asr #32 @ 9: ASR #32
b .data_arm_apply_r6_and_rn
- b .data_unknown @ A: MUL?
+ b .data_unknown_r9 @ A: MUL?
nop
- b .data_unknown @ B: MUL?
+ b .data_unknown_r9 @ B: MUL?
nop
mov r6, r6, ror r9 @ C: ROR #!0
b .data_arm_apply_r6_and_rn
mov r6, r6, rrx @ D: RRX
b .data_arm_apply_r6_and_rn
- b .data_unknown @ E: MUL?
+ b .data_unknown_r9 @ E: MUL?
nop
- b .data_unknown @ F: MUL?
+ b .data_unknown_r9 @ F: MUL?
.data_thumb_abort:
ldrh r8, [r4] @ read instruction
@@ -190,6 +200,7 @@ ENTRY(v4t_late_abort)
.data_thumb_pushpop:
tst r8, #1 << 10
beq .data_unknown
+ str r9, [sp, #-4]!
and r6, r8, #0x55 @ hweight8(r8) + R bit
and r9, r8, #0xaa
add r6, r6, r9, lsr #1
@@ -204,9 +215,11 @@ ENTRY(v4t_late_abort)
addeq r7, r7, r6, lsl #2 @ increment SP if PUSH
subne r7, r7, r6, lsl #2 @ decrement SP if POP
str r7, [r2, #13 << 2]
+ ldr r9, [sp], #4
b do_DataAbort
.data_thumb_ldmstm:
+ str r9, [sp, #-4]!
and r6, r8, #0x55 @ hweight8(r8)
and r9, r8, #0xaa
add r6, r6, r9, lsr #1
@@ -219,4 +232,5 @@ ENTRY(v4t_late_abort)
and r6, r6, #15 @ number of regs to transfer
sub r7, r7, r6, lsl #2 @ always decrement
str r7, [r2, r9, lsr #6]
+ ldr r9, [sp], #4
b do_DataAbort
diff --git a/arch/arm64/Kconfig.platforms b/arch/arm64/Kconfig.platforms
index cfbdf02ef566..101794f5ce10 100644
--- a/arch/arm64/Kconfig.platforms
+++ b/arch/arm64/Kconfig.platforms
@@ -190,6 +190,7 @@ config ARCH_THUNDER
config ARCH_UNIPHIER
bool "Socionext UniPhier SoC Family"
+ select ARCH_HAS_RESET_CONTROLLER
select PINCTRL
help
This enables support for Socionext UniPhier SoC family.
diff --git a/arch/arm64/boot/dts/broadcom/ns2-svk.dts b/arch/arm64/boot/dts/broadcom/ns2-svk.dts
index 2d7872a36b91..b09f3bc5c6c1 100644
--- a/arch/arm64/boot/dts/broadcom/ns2-svk.dts
+++ b/arch/arm64/boot/dts/broadcom/ns2-svk.dts
@@ -164,6 +164,8 @@
nand-ecc-mode = "hw";
nand-ecc-strength = <8>;
nand-ecc-step-size = <512>;
+ nand-bus-width = <16>;
+ brcm,nand-oob-sector-size = <16>;
#address-cells = <1>;
#size-cells = <1>;
};
diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1043a.dtsi b/arch/arm64/boot/dts/freescale/fsl-ls1043a.dtsi
index 220ac7057d12..97d331ec2500 100644
--- a/arch/arm64/boot/dts/freescale/fsl-ls1043a.dtsi
+++ b/arch/arm64/boot/dts/freescale/fsl-ls1043a.dtsi
@@ -123,6 +123,7 @@
<1 14 0xf08>, /* Physical Non-Secure PPI */
<1 11 0xf08>, /* Virtual PPI */
<1 10 0xf08>; /* Hypervisor PPI */
+ fsl,erratum-a008585;
};
pmu {
diff --git a/arch/arm64/boot/dts/freescale/fsl-ls2080a.dtsi b/arch/arm64/boot/dts/freescale/fsl-ls2080a.dtsi
index 337da90bd7da..7f0dc13b4087 100644
--- a/arch/arm64/boot/dts/freescale/fsl-ls2080a.dtsi
+++ b/arch/arm64/boot/dts/freescale/fsl-ls2080a.dtsi
@@ -195,6 +195,7 @@
<1 14 4>, /* Physical Non-Secure PPI, active-low */
<1 11 4>, /* Virtual PPI, active-low */
<1 10 4>; /* Hypervisor PPI, active-low */
+ fsl,erratum-a008585;
};
pmu {
diff --git a/arch/arm64/boot/dts/marvell/armada-cp110-master.dtsi b/arch/arm64/boot/dts/marvell/armada-cp110-master.dtsi
index e5e3ed678b6f..602e2c2e9a4d 100644
--- a/arch/arm64/boot/dts/marvell/armada-cp110-master.dtsi
+++ b/arch/arm64/boot/dts/marvell/armada-cp110-master.dtsi
@@ -131,7 +131,7 @@
#address-cells = <0x1>;
#size-cells = <0x0>;
cell-index = <1>;
- clocks = <&cpm_syscon0 0 3>;
+ clocks = <&cpm_syscon0 1 21>;
status = "disabled";
};
diff --git a/arch/arm64/boot/dts/rockchip/rk3368-geekbox.dts b/arch/arm64/boot/dts/rockchip/rk3368-geekbox.dts
index 46cdddfcea6c..e5eeca2c2456 100644
--- a/arch/arm64/boot/dts/rockchip/rk3368-geekbox.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3368-geekbox.dts
@@ -116,7 +116,6 @@
cap-mmc-highspeed;
clock-frequency = <150000000>;
disable-wp;
- keep-power-in-suspend;
non-removable;
num-slots = <1>;
vmmc-supply = <&vcc_io>;
@@ -258,8 +257,6 @@
};
vcc_sd: SWITCH_REG1 {
- regulator-always-on;
- regulator-boot-on;
regulator-name = "vcc_sd";
};
diff --git a/arch/arm64/boot/dts/rockchip/rk3368-orion-r68-meta.dts b/arch/arm64/boot/dts/rockchip/rk3368-orion-r68-meta.dts
index 5797933ef80e..ea0a8eceefd4 100644
--- a/arch/arm64/boot/dts/rockchip/rk3368-orion-r68-meta.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3368-orion-r68-meta.dts
@@ -152,8 +152,6 @@
gpio = <&gpio3 11 GPIO_ACTIVE_LOW>;
regulator-min-microvolt = <1800000>;
regulator-max-microvolt = <3300000>;
- regulator-always-on;
- regulator-boot-on;
vin-supply = <&vcc_io>;
};
@@ -201,7 +199,6 @@
bus-width = <8>;
cap-mmc-highspeed;
disable-wp;
- keep-power-in-suspend;
mmc-pwrseq = <&emmc_pwrseq>;
mmc-hs200-1_2v;
mmc-hs200-1_8v;
@@ -350,7 +347,6 @@
clock-freq-min-max = <400000 50000000>;
cap-sd-highspeed;
card-detect-delay = <200>;
- keep-power-in-suspend;
num-slots = <1>;
pinctrl-names = "default";
pinctrl-0 = <&sdmmc_clk &sdmmc_cmd &sdmmc_cd &sdmmc_bus4>;
diff --git a/arch/arm64/boot/dts/socionext/uniphier-ld20.dtsi b/arch/arm64/boot/dts/socionext/uniphier-ld20.dtsi
index 08fd7cf7769c..56a1b2e92cf3 100644
--- a/arch/arm64/boot/dts/socionext/uniphier-ld20.dtsi
+++ b/arch/arm64/boot/dts/socionext/uniphier-ld20.dtsi
@@ -257,18 +257,18 @@
reg = <0x59801000 0x400>;
};
- mioctrl@59810000 {
- compatible = "socionext,uniphier-mioctrl",
+ sdctrl@59810000 {
+ compatible = "socionext,uniphier-ld20-sdctrl",
"simple-mfd", "syscon";
reg = <0x59810000 0x800>;
- mio_clk: clock {
- compatible = "socionext,uniphier-ld20-mio-clock";
+ sd_clk: clock {
+ compatible = "socionext,uniphier-ld20-sd-clock";
#clock-cells = <1>;
};
- mio_rst: reset {
- compatible = "socionext,uniphier-ld20-mio-reset";
+ sd_rst: reset {
+ compatible = "socionext,uniphier-ld20-sd-reset";
#reset-cells = <1>;
};
};
diff --git a/arch/arm64/include/asm/memory.h b/arch/arm64/include/asm/memory.h
index ba62df8c6e35..b71086d25195 100644
--- a/arch/arm64/include/asm/memory.h
+++ b/arch/arm64/include/asm/memory.h
@@ -217,7 +217,7 @@ static inline void *phys_to_virt(phys_addr_t x)
#define _virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
#else
#define __virt_to_pgoff(kaddr) (((u64)(kaddr) & ~PAGE_OFFSET) / PAGE_SIZE * sizeof(struct page))
-#define __page_to_voff(kaddr) (((u64)(page) & ~VMEMMAP_START) * PAGE_SIZE / sizeof(struct page))
+#define __page_to_voff(page) (((u64)(page) & ~VMEMMAP_START) * PAGE_SIZE / sizeof(struct page))
#define page_to_virt(page) ((void *)((__page_to_voff(page)) | PAGE_OFFSET))
#define virt_to_page(vaddr) ((struct page *)((__virt_to_pgoff(vaddr)) | VMEMMAP_START))
diff --git a/arch/arm64/mm/numa.c b/arch/arm64/mm/numa.c
index 778a985c8a70..4b32168cf91a 100644
--- a/arch/arm64/mm/numa.c
+++ b/arch/arm64/mm/numa.c
@@ -147,7 +147,7 @@ static int __init early_cpu_to_node(int cpu)
static int __init pcpu_cpu_distance(unsigned int from, unsigned int to)
{
- return node_distance(from, to);
+ return node_distance(early_cpu_to_node(from), early_cpu_to_node(to));
}
static void * __init pcpu_fc_alloc(unsigned int cpu, size_t size,
@@ -223,8 +223,11 @@ static void __init setup_node_data(int nid, u64 start_pfn, u64 end_pfn)
void *nd;
int tnid;
- pr_info("Initmem setup node %d [mem %#010Lx-%#010Lx]\n",
- nid, start_pfn << PAGE_SHIFT, (end_pfn << PAGE_SHIFT) - 1);
+ if (start_pfn < end_pfn)
+ pr_info("Initmem setup node %d [mem %#010Lx-%#010Lx]\n", nid,
+ start_pfn << PAGE_SHIFT, (end_pfn << PAGE_SHIFT) - 1);
+ else
+ pr_info("Initmem setup node %d [<memory-less node>]\n", nid);
nd_pa = memblock_alloc_try_nid(nd_size, SMP_CACHE_BYTES, nid);
nd = __va(nd_pa);
diff --git a/arch/cris/arch-v32/drivers/cryptocop.c b/arch/cris/arch-v32/drivers/cryptocop.c
index 099e170a93ee..0068fd411a84 100644
--- a/arch/cris/arch-v32/drivers/cryptocop.c
+++ b/arch/cris/arch-v32/drivers/cryptocop.c
@@ -3149,7 +3149,7 @@ static void print_dma_descriptors(struct cryptocop_int_operation *iop)
printk("print_dma_descriptors start\n");
printk("iop:\n");
- printk("\tsid: 0x%lld\n", iop->sid);
+ printk("\tsid: 0x%llx\n", iop->sid);
printk("\tcdesc_out: 0x%p\n", iop->cdesc_out);
printk("\tcdesc_in: 0x%p\n", iop->cdesc_in);
diff --git a/arch/h8300/include/asm/thread_info.h b/arch/h8300/include/asm/thread_info.h
index b408fe660cf8..3cef06875f5c 100644
--- a/arch/h8300/include/asm/thread_info.h
+++ b/arch/h8300/include/asm/thread_info.h
@@ -31,7 +31,6 @@ struct thread_info {
int cpu; /* cpu we're on */
int preempt_count; /* 0 => preemptable, <0 => BUG */
mm_segment_t addr_limit;
- struct restart_block restart_block;
};
/*
@@ -44,9 +43,6 @@ struct thread_info {
.cpu = 0, \
.preempt_count = INIT_PREEMPT_COUNT, \
.addr_limit = KERNEL_DS, \
- .restart_block = { \
- .fn = do_no_restart_syscall, \
- }, \
}
#define init_thread_info (init_thread_union.thread_info)
diff --git a/arch/h8300/kernel/signal.c b/arch/h8300/kernel/signal.c
index ad1f81f574e5..7138303cbbf2 100644
--- a/arch/h8300/kernel/signal.c
+++ b/arch/h8300/kernel/signal.c
@@ -79,7 +79,7 @@ restore_sigcontext(struct sigcontext *usc, int *pd0)
unsigned int er0;
/* Always make any pending restarted system calls return -EINTR */
- current_thread_info()->restart_block.fn = do_no_restart_syscall;
+ current->restart_block.fn = do_no_restart_syscall;
/* restore passed registers */
#define COPY(r) do { err |= get_user(regs->r, &usc->sc_##r); } while (0)
diff --git a/arch/mips/Makefile b/arch/mips/Makefile
index fbf40d3c8123..1a6bac7b076f 100644
--- a/arch/mips/Makefile
+++ b/arch/mips/Makefile
@@ -263,7 +263,7 @@ KBUILD_CPPFLAGS += -DDATAOFFSET=$(if $(dataoffset-y),$(dataoffset-y),0)
bootvars-y = VMLINUX_LOAD_ADDRESS=$(load-y) \
VMLINUX_ENTRY_ADDRESS=$(entry-y) \
- PLATFORM=$(platform-y)
+ PLATFORM="$(platform-y)"
ifdef CONFIG_32BIT
bootvars-y += ADDR_BITS=32
endif
diff --git a/arch/mips/boot/dts/mti/malta.dts b/arch/mips/boot/dts/mti/malta.dts
index f604a272d91d..ffe3a1508e72 100644
--- a/arch/mips/boot/dts/mti/malta.dts
+++ b/arch/mips/boot/dts/mti/malta.dts
@@ -84,12 +84,13 @@
fpga_regs: system-controller@1f000000 {
compatible = "mti,malta-fpga", "syscon", "simple-mfd";
reg = <0x1f000000 0x1000>;
+ native-endian;
reboot {
compatible = "syscon-reboot";
regmap = <&fpga_regs>;
offset = <0x500>;
- mask = <0x4d>;
+ mask = <0x42>;
};
};
diff --git a/arch/mips/generic/init.c b/arch/mips/generic/init.c
index 0ea73e845440..d493ccbf274a 100644
--- a/arch/mips/generic/init.c
+++ b/arch/mips/generic/init.c
@@ -30,9 +30,19 @@ static __initdata const void *mach_match_data;
void __init prom_init(void)
{
+ plat_get_fdt();
+ BUG_ON(!fdt);
+}
+
+void __init *plat_get_fdt(void)
+{
const struct mips_machine *check_mach;
const struct of_device_id *match;
+ if (fdt)
+ /* Already set up */
+ return (void *)fdt;
+
if ((fw_arg0 == -2) && !fdt_check_header((void *)fw_arg1)) {
/*
* We booted using the UHI boot protocol, so we have been
@@ -75,12 +85,6 @@ void __init prom_init(void)
/* Retrieve the machine's FDT */
fdt = mach->fdt;
}
-
- BUG_ON(!fdt);
-}
-
-void __init *plat_get_fdt(void)
-{
return (void *)fdt;
}
diff --git a/arch/mips/include/asm/fpu_emulator.h b/arch/mips/include/asm/fpu_emulator.h
index 355dc25172e7..c05369e0b8d6 100644
--- a/arch/mips/include/asm/fpu_emulator.h
+++ b/arch/mips/include/asm/fpu_emulator.h
@@ -63,6 +63,8 @@ do { \
extern int fpu_emulator_cop1Handler(struct pt_regs *xcp,
struct mips_fpu_struct *ctx, int has_fpu,
void *__user *fault_addr);
+void force_fcr31_sig(unsigned long fcr31, void __user *fault_addr,
+ struct task_struct *tsk);
int process_fpemu_return(int sig, void __user *fault_addr,
unsigned long fcr31);
int isBranchInstr(struct pt_regs *regs, struct mm_decoded_insn dec_insn,
@@ -81,4 +83,15 @@ static inline void fpu_emulator_init_fpu(void)
set_fpr64(&t->thread.fpu.fpr[i], 0, SIGNALLING_NAN);
}
+/*
+ * Mask the FCSR Cause bits according to the Enable bits, observing
+ * that Unimplemented is always enabled.
+ */
+static inline unsigned long mask_fcr31_x(unsigned long fcr31)
+{
+ return fcr31 & (FPU_CSR_UNI_X |
+ ((fcr31 & FPU_CSR_ALL_E) <<
+ (ffs(FPU_CSR_ALL_X) - ffs(FPU_CSR_ALL_E))));
+}
+
#endif /* _ASM_FPU_EMULATOR_H */
diff --git a/arch/mips/include/asm/switch_to.h b/arch/mips/include/asm/switch_to.h
index ebb5c0f2f90d..c0ae27971e31 100644
--- a/arch/mips/include/asm/switch_to.h
+++ b/arch/mips/include/asm/switch_to.h
@@ -76,6 +76,22 @@ do { if (cpu_has_rw_llb) { \
} while (0)
/*
+ * Check FCSR for any unmasked exceptions pending set with `ptrace',
+ * clear them and send a signal.
+ */
+#define __sanitize_fcr31(next) \
+do { \
+ unsigned long fcr31 = mask_fcr31_x(next->thread.fpu.fcr31); \
+ void __user *pc; \
+ \
+ if (unlikely(fcr31)) { \
+ pc = (void __user *)task_pt_regs(next)->cp0_epc; \
+ next->thread.fpu.fcr31 &= ~fcr31; \
+ force_fcr31_sig(fcr31, pc, next); \
+ } \
+} while (0)
+
+/*
* For newly created kernel threads switch_to() will return to
* ret_from_kernel_thread, newly created user threads to ret_from_fork.
* That is, everything following resume() will be skipped for new threads.
@@ -85,6 +101,8 @@ do { if (cpu_has_rw_llb) { \
do { \
__mips_mt_fpaff_switch_to(prev); \
lose_fpu_inatomic(1, prev); \
+ if (tsk_used_math(next)) \
+ __sanitize_fcr31(next); \
if (cpu_has_dsp) { \
__save_dsp(prev); \
__restore_dsp(next); \
diff --git a/arch/mips/kernel/mips-cpc.c b/arch/mips/kernel/mips-cpc.c
index 2a45867d3b4f..a4964c334cab 100644
--- a/arch/mips/kernel/mips-cpc.c
+++ b/arch/mips/kernel/mips-cpc.c
@@ -21,6 +21,11 @@ static DEFINE_PER_CPU_ALIGNED(spinlock_t, cpc_core_lock);
static DEFINE_PER_CPU_ALIGNED(unsigned long, cpc_core_lock_flags);
+phys_addr_t __weak mips_cpc_default_phys_base(void)
+{
+ return 0;
+}
+
/**
* mips_cpc_phys_base - retrieve the physical base address of the CPC
*
@@ -43,8 +48,12 @@ static phys_addr_t mips_cpc_phys_base(void)
if (cpc_base & CM_GCR_CPC_BASE_CPCEN_MSK)
return cpc_base & CM_GCR_CPC_BASE_CPCBASE_MSK;
- /* Otherwise, give it the default address & enable it */
+ /* Otherwise, use the default address */
cpc_base = mips_cpc_default_phys_base();
+ if (!cpc_base)
+ return cpc_base;
+
+ /* Enable the CPC, mapped at the default address */
write_gcr_cpc_base(cpc_base | CM_GCR_CPC_BASE_CPCEN_MSK);
return cpc_base;
}
diff --git a/arch/mips/kernel/mips-r2-to-r6-emul.c b/arch/mips/kernel/mips-r2-to-r6-emul.c
index 22dedd62818a..bd09853aecdf 100644
--- a/arch/mips/kernel/mips-r2-to-r6-emul.c
+++ b/arch/mips/kernel/mips-r2-to-r6-emul.c
@@ -899,7 +899,7 @@ static inline int mipsr2_find_op_func(struct pt_regs *regs, u32 inst,
* mipsr2_decoder: Decode and emulate a MIPS R2 instruction
* @regs: Process register set
* @inst: Instruction to decode and emulate
- * @fcr31: Floating Point Control and Status Register returned
+ * @fcr31: Floating Point Control and Status Register Cause bits returned
*/
int mipsr2_decoder(struct pt_regs *regs, u32 inst, unsigned long *fcr31)
{
@@ -1172,13 +1172,13 @@ fpu_emul:
err = fpu_emulator_cop1Handler(regs, &current->thread.fpu, 0,
&fault_addr);
- *fcr31 = current->thread.fpu.fcr31;
/*
- * We can't allow the emulated instruction to leave any of
- * the cause bits set in $fcr31.
+ * We can't allow the emulated instruction to leave any
+ * enabled Cause bits set in $fcr31.
*/
- current->thread.fpu.fcr31 &= ~FPU_CSR_ALL_X;
+ *fcr31 = res = mask_fcr31_x(current->thread.fpu.fcr31);
+ current->thread.fpu.fcr31 &= ~res;
/*
* this is a tricky issue - lose_fpu() uses LL/SC atomics
diff --git a/arch/mips/kernel/ptrace.c b/arch/mips/kernel/ptrace.c
index 6103b24d1bfc..a92994d60e91 100644
--- a/arch/mips/kernel/ptrace.c
+++ b/arch/mips/kernel/ptrace.c
@@ -79,16 +79,15 @@ void ptrace_disable(struct task_struct *child)
}
/*
- * Poke at FCSR according to its mask. Don't set the cause bits as
- * this is currently not handled correctly in FP context restoration
- * and will cause an oops if a corresponding enable bit is set.
+ * Poke at FCSR according to its mask. Set the Cause bits even
+ * if a corresponding Enable bit is set. This will be noticed at
+ * the time the thread is switched to and SIGFPE thrown accordingly.
*/
static void ptrace_setfcr31(struct task_struct *child, u32 value)
{
u32 fcr31;
u32 mask;
- value &= ~FPU_CSR_ALL_X;
fcr31 = child->thread.fpu.fcr31;
mask = boot_cpu_data.fpu_msk31;
child->thread.fpu.fcr31 = (value & ~mask) | (fcr31 & mask);
@@ -817,6 +816,7 @@ long arch_ptrace(struct task_struct *child, long request,
break;
#endif
case FPC_CSR:
+ init_fp_ctx(child);
ptrace_setfcr31(child, data);
break;
case DSP_BASE ... DSP_BASE + 5: {
diff --git a/arch/mips/kernel/r2300_fpu.S b/arch/mips/kernel/r2300_fpu.S
index b4ac6374a38f..918f2f6d3861 100644
--- a/arch/mips/kernel/r2300_fpu.S
+++ b/arch/mips/kernel/r2300_fpu.S
@@ -21,106 +21,84 @@
#define EX(a,b) \
9: a,##b; \
.section __ex_table,"a"; \
+ PTR 9b,fault; \
+ .previous
+
+#define EX2(a,b) \
+9: a,##b; \
+ .section __ex_table,"a"; \
PTR 9b,bad_stack; \
+ PTR 9b+4,bad_stack; \
.previous
.set noreorder
.set mips1
- /* Save floating point context */
+
+/**
+ * _save_fp_context() - save FP context from the FPU
+ * @a0 - pointer to fpregs field of sigcontext
+ * @a1 - pointer to fpc_csr field of sigcontext
+ *
+ * Save FP context, including the 32 FP data registers and the FP
+ * control & status register, from the FPU to signal context.
+ */
LEAF(_save_fp_context)
.set push
SET_HARDFLOAT
li v0, 0 # assume success
- cfc1 t1,fcr31
- EX(swc1 $f0,(SC_FPREGS+0)(a0))
- EX(swc1 $f1,(SC_FPREGS+8)(a0))
- EX(swc1 $f2,(SC_FPREGS+16)(a0))
- EX(swc1 $f3,(SC_FPREGS+24)(a0))
- EX(swc1 $f4,(SC_FPREGS+32)(a0))
- EX(swc1 $f5,(SC_FPREGS+40)(a0))
- EX(swc1 $f6,(SC_FPREGS+48)(a0))
- EX(swc1 $f7,(SC_FPREGS+56)(a0))
- EX(swc1 $f8,(SC_FPREGS+64)(a0))
- EX(swc1 $f9,(SC_FPREGS+72)(a0))
- EX(swc1 $f10,(SC_FPREGS+80)(a0))
- EX(swc1 $f11,(SC_FPREGS+88)(a0))
- EX(swc1 $f12,(SC_FPREGS+96)(a0))
- EX(swc1 $f13,(SC_FPREGS+104)(a0))
- EX(swc1 $f14,(SC_FPREGS+112)(a0))
- EX(swc1 $f15,(SC_FPREGS+120)(a0))
- EX(swc1 $f16,(SC_FPREGS+128)(a0))
- EX(swc1 $f17,(SC_FPREGS+136)(a0))
- EX(swc1 $f18,(SC_FPREGS+144)(a0))
- EX(swc1 $f19,(SC_FPREGS+152)(a0))
- EX(swc1 $f20,(SC_FPREGS+160)(a0))
- EX(swc1 $f21,(SC_FPREGS+168)(a0))
- EX(swc1 $f22,(SC_FPREGS+176)(a0))
- EX(swc1 $f23,(SC_FPREGS+184)(a0))
- EX(swc1 $f24,(SC_FPREGS+192)(a0))
- EX(swc1 $f25,(SC_FPREGS+200)(a0))
- EX(swc1 $f26,(SC_FPREGS+208)(a0))
- EX(swc1 $f27,(SC_FPREGS+216)(a0))
- EX(swc1 $f28,(SC_FPREGS+224)(a0))
- EX(swc1 $f29,(SC_FPREGS+232)(a0))
- EX(swc1 $f30,(SC_FPREGS+240)(a0))
- EX(swc1 $f31,(SC_FPREGS+248)(a0))
- EX(sw t1,(SC_FPC_CSR)(a0))
- cfc1 t0,$0 # implementation/version
+ cfc1 t1, fcr31
+ EX2(s.d $f0, 0(a0))
+ EX2(s.d $f2, 16(a0))
+ EX2(s.d $f4, 32(a0))
+ EX2(s.d $f6, 48(a0))
+ EX2(s.d $f8, 64(a0))
+ EX2(s.d $f10, 80(a0))
+ EX2(s.d $f12, 96(a0))
+ EX2(s.d $f14, 112(a0))
+ EX2(s.d $f16, 128(a0))
+ EX2(s.d $f18, 144(a0))
+ EX2(s.d $f20, 160(a0))
+ EX2(s.d $f22, 176(a0))
+ EX2(s.d $f24, 192(a0))
+ EX2(s.d $f26, 208(a0))
+ EX2(s.d $f28, 224(a0))
+ EX2(s.d $f30, 240(a0))
jr ra
+ EX(sw t1, (a1))
.set pop
- .set nomacro
- EX(sw t0,(SC_FPC_EIR)(a0))
- .set macro
END(_save_fp_context)
-/*
- * Restore FPU state:
- * - fp gp registers
- * - cp1 status/control register
+/**
+ * _restore_fp_context() - restore FP context to the FPU
+ * @a0 - pointer to fpregs field of sigcontext
+ * @a1 - pointer to fpc_csr field of sigcontext
*
- * We base the decision which registers to restore from the signal stack
- * frame on the current content of c0_status, not on the content of the
- * stack frame which might have been changed by the user.
+ * Restore FP context, including the 32 FP data registers and the FP
+ * control & status register, from signal context to the FPU.
*/
LEAF(_restore_fp_context)
.set push
SET_HARDFLOAT
li v0, 0 # assume success
- EX(lw t0,(SC_FPC_CSR)(a0))
- EX(lwc1 $f0,(SC_FPREGS+0)(a0))
- EX(lwc1 $f1,(SC_FPREGS+8)(a0))
- EX(lwc1 $f2,(SC_FPREGS+16)(a0))
- EX(lwc1 $f3,(SC_FPREGS+24)(a0))
- EX(lwc1 $f4,(SC_FPREGS+32)(a0))
- EX(lwc1 $f5,(SC_FPREGS+40)(a0))
- EX(lwc1 $f6,(SC_FPREGS+48)(a0))
- EX(lwc1 $f7,(SC_FPREGS+56)(a0))
- EX(lwc1 $f8,(SC_FPREGS+64)(a0))
- EX(lwc1 $f9,(SC_FPREGS+72)(a0))
- EX(lwc1 $f10,(SC_FPREGS+80)(a0))
- EX(lwc1 $f11,(SC_FPREGS+88)(a0))
- EX(lwc1 $f12,(SC_FPREGS+96)(a0))
- EX(lwc1 $f13,(SC_FPREGS+104)(a0))
- EX(lwc1 $f14,(SC_FPREGS+112)(a0))
- EX(lwc1 $f15,(SC_FPREGS+120)(a0))
- EX(lwc1 $f16,(SC_FPREGS+128)(a0))
- EX(lwc1 $f17,(SC_FPREGS+136)(a0))
- EX(lwc1 $f18,(SC_FPREGS+144)(a0))
- EX(lwc1 $f19,(SC_FPREGS+152)(a0))
- EX(lwc1 $f20,(SC_FPREGS+160)(a0))
- EX(lwc1 $f21,(SC_FPREGS+168)(a0))
- EX(lwc1 $f22,(SC_FPREGS+176)(a0))
- EX(lwc1 $f23,(SC_FPREGS+184)(a0))
- EX(lwc1 $f24,(SC_FPREGS+192)(a0))
- EX(lwc1 $f25,(SC_FPREGS+200)(a0))
- EX(lwc1 $f26,(SC_FPREGS+208)(a0))
- EX(lwc1 $f27,(SC_FPREGS+216)(a0))
- EX(lwc1 $f28,(SC_FPREGS+224)(a0))
- EX(lwc1 $f29,(SC_FPREGS+232)(a0))
- EX(lwc1 $f30,(SC_FPREGS+240)(a0))
- EX(lwc1 $f31,(SC_FPREGS+248)(a0))
+ EX(lw t0, (a1))
+ EX2(l.d $f0, 0(a0))
+ EX2(l.d $f2, 16(a0))
+ EX2(l.d $f4, 32(a0))
+ EX2(l.d $f6, 48(a0))
+ EX2(l.d $f8, 64(a0))
+ EX2(l.d $f10, 80(a0))
+ EX2(l.d $f12, 96(a0))
+ EX2(l.d $f14, 112(a0))
+ EX2(l.d $f16, 128(a0))
+ EX2(l.d $f18, 144(a0))
+ EX2(l.d $f20, 160(a0))
+ EX2(l.d $f22, 176(a0))
+ EX2(l.d $f24, 192(a0))
+ EX2(l.d $f26, 208(a0))
+ EX2(l.d $f28, 224(a0))
+ EX2(l.d $f30, 240(a0))
jr ra
- ctc1 t0,fcr31
+ ctc1 t0, fcr31
.set pop
END(_restore_fp_context)
.set reorder
diff --git a/arch/mips/kernel/r6000_fpu.S b/arch/mips/kernel/r6000_fpu.S
index 47077380c15c..9cc7bfab3419 100644
--- a/arch/mips/kernel/r6000_fpu.S
+++ b/arch/mips/kernel/r6000_fpu.S
@@ -21,7 +21,14 @@
.set push
SET_HARDFLOAT
- /* Save floating point context */
+/**
+ * _save_fp_context() - save FP context from the FPU
+ * @a0 - pointer to fpregs field of sigcontext
+ * @a1 - pointer to fpc_csr field of sigcontext
+ *
+ * Save FP context, including the 32 FP data registers and the FP
+ * control & status register, from the FPU to signal context.
+ */
LEAF(_save_fp_context)
mfc0 t0,CP0_STATUS
sll t0,t0,2
@@ -30,59 +37,59 @@
cfc1 t1,fcr31
/* Store the 16 double precision registers */
- sdc1 $f0,(SC_FPREGS+0)(a0)
- sdc1 $f2,(SC_FPREGS+16)(a0)
- sdc1 $f4,(SC_FPREGS+32)(a0)
- sdc1 $f6,(SC_FPREGS+48)(a0)
- sdc1 $f8,(SC_FPREGS+64)(a0)
- sdc1 $f10,(SC_FPREGS+80)(a0)
- sdc1 $f12,(SC_FPREGS+96)(a0)
- sdc1 $f14,(SC_FPREGS+112)(a0)
- sdc1 $f16,(SC_FPREGS+128)(a0)
- sdc1 $f18,(SC_FPREGS+144)(a0)
- sdc1 $f20,(SC_FPREGS+160)(a0)
- sdc1 $f22,(SC_FPREGS+176)(a0)
- sdc1 $f24,(SC_FPREGS+192)(a0)
- sdc1 $f26,(SC_FPREGS+208)(a0)
- sdc1 $f28,(SC_FPREGS+224)(a0)
- sdc1 $f30,(SC_FPREGS+240)(a0)
+ sdc1 $f0,0(a0)
+ sdc1 $f2,16(a0)
+ sdc1 $f4,32(a0)
+ sdc1 $f6,48(a0)
+ sdc1 $f8,64(a0)
+ sdc1 $f10,80(a0)
+ sdc1 $f12,96(a0)
+ sdc1 $f14,112(a0)
+ sdc1 $f16,128(a0)
+ sdc1 $f18,144(a0)
+ sdc1 $f20,160(a0)
+ sdc1 $f22,176(a0)
+ sdc1 $f24,192(a0)
+ sdc1 $f26,208(a0)
+ sdc1 $f28,224(a0)
+ sdc1 $f30,240(a0)
jr ra
- sw t0,SC_FPC_CSR(a0)
+ sw t0,(a1)
1: jr ra
nop
END(_save_fp_context)
-/* Restore FPU state:
- * - fp gp registers
- * - cp1 status/control register
+/**
+ * _restore_fp_context() - restore FP context to the FPU
+ * @a0 - pointer to fpregs field of sigcontext
+ * @a1 - pointer to fpc_csr field of sigcontext
*
- * We base the decision which registers to restore from the signal stack
- * frame on the current content of c0_status, not on the content of the
- * stack frame which might have been changed by the user.
+ * Restore FP context, including the 32 FP data registers and the FP
+ * control & status register, from signal context to the FPU.
*/
LEAF(_restore_fp_context)
mfc0 t0,CP0_STATUS
sll t0,t0,2
bgez t0,1f
- lw t0,SC_FPC_CSR(a0)
+ lw t0,(a1)
/* Restore the 16 double precision registers */
- ldc1 $f0,(SC_FPREGS+0)(a0)
- ldc1 $f2,(SC_FPREGS+16)(a0)
- ldc1 $f4,(SC_FPREGS+32)(a0)
- ldc1 $f6,(SC_FPREGS+48)(a0)
- ldc1 $f8,(SC_FPREGS+64)(a0)
- ldc1 $f10,(SC_FPREGS+80)(a0)
- ldc1 $f12,(SC_FPREGS+96)(a0)
- ldc1 $f14,(SC_FPREGS+112)(a0)
- ldc1 $f16,(SC_FPREGS+128)(a0)
- ldc1 $f18,(SC_FPREGS+144)(a0)
- ldc1 $f20,(SC_FPREGS+160)(a0)
- ldc1 $f22,(SC_FPREGS+176)(a0)
- ldc1 $f24,(SC_FPREGS+192)(a0)
- ldc1 $f26,(SC_FPREGS+208)(a0)
- ldc1 $f28,(SC_FPREGS+224)(a0)
- ldc1 $f30,(SC_FPREGS+240)(a0)
+ ldc1 $f0,0(a0)
+ ldc1 $f2,16(a0)
+ ldc1 $f4,32(a0)
+ ldc1 $f6,48(a0)
+ ldc1 $f8,64(a0)
+ ldc1 $f10,80(a0)
+ ldc1 $f12,96(a0)
+ ldc1 $f14,112(a0)
+ ldc1 $f16,128(a0)
+ ldc1 $f18,144(a0)
+ ldc1 $f20,160(a0)
+ ldc1 $f22,176(a0)
+ ldc1 $f24,192(a0)
+ ldc1 $f26,208(a0)
+ ldc1 $f28,224(a0)
+ ldc1 $f30,240(a0)
jr ra
ctc1 t0,fcr31
1: jr ra
diff --git a/arch/mips/kernel/relocate.c b/arch/mips/kernel/relocate.c
index ca1cc30c0891..1958910b75c0 100644
--- a/arch/mips/kernel/relocate.c
+++ b/arch/mips/kernel/relocate.c
@@ -200,7 +200,7 @@ static inline __init unsigned long get_random_boot(void)
#if defined(CONFIG_USE_OF)
/* Get any additional entropy passed in device tree */
- {
+ if (initial_boot_params) {
int node, len;
u64 *prop;
diff --git a/arch/mips/kernel/setup.c b/arch/mips/kernel/setup.c
index 0d57909d9026..f66e5ce505b2 100644
--- a/arch/mips/kernel/setup.c
+++ b/arch/mips/kernel/setup.c
@@ -368,6 +368,19 @@ static void __init bootmem_init(void)
end = PFN_DOWN(boot_mem_map.map[i].addr
+ boot_mem_map.map[i].size);
+#ifndef CONFIG_HIGHMEM
+ /*
+ * Skip highmem here so we get an accurate max_low_pfn if low
+ * memory stops short of high memory.
+ * If the region overlaps HIGHMEM_START, end is clipped so
+ * max_pfn excludes the highmem portion.
+ */
+ if (start >= PFN_DOWN(HIGHMEM_START))
+ continue;
+ if (end > PFN_DOWN(HIGHMEM_START))
+ end = PFN_DOWN(HIGHMEM_START);
+#endif
+
if (end > max_low_pfn)
max_low_pfn = end;
if (start < min_low_pfn)
diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c
index 1f5fdee1dfc3..3905003dfe2b 100644
--- a/arch/mips/kernel/traps.c
+++ b/arch/mips/kernel/traps.c
@@ -156,7 +156,7 @@ static void show_backtrace(struct task_struct *task, const struct pt_regs *regs)
print_ip_sym(pc);
pc = unwind_stack(task, &sp, pc, &ra);
} while (pc);
- printk("\n");
+ pr_cont("\n");
}
/*
@@ -174,22 +174,24 @@ static void show_stacktrace(struct task_struct *task,
printk("Stack :");
i = 0;
while ((unsigned long) sp & (PAGE_SIZE - 1)) {
- if (i && ((i % (64 / field)) == 0))
- printk("\n ");
+ if (i && ((i % (64 / field)) == 0)) {
+ pr_cont("\n");
+ printk(" ");
+ }
if (i > 39) {
- printk(" ...");
+ pr_cont(" ...");
break;
}
if (__get_user(stackdata, sp++)) {
- printk(" (Bad stack address)");
+ pr_cont(" (Bad stack address)");
break;
}
- printk(" %0*lx", field, stackdata);
+ pr_cont(" %0*lx", field, stackdata);
i++;
}
- printk("\n");
+ pr_cont("\n");
show_backtrace(task, regs);
}
@@ -229,18 +231,19 @@ static void show_code(unsigned int __user *pc)
long i;
unsigned short __user *pc16 = NULL;
- printk("\nCode:");
+ printk("Code:");
if ((unsigned long)pc & 1)
pc16 = (unsigned short __user *)((unsigned long)pc & ~1);
for(i = -3 ; i < 6 ; i++) {
unsigned int insn;
if (pc16 ? __get_user(insn, pc16 + i) : __get_user(insn, pc + i)) {
- printk(" (Bad address in epc)\n");
+ pr_cont(" (Bad address in epc)\n");
break;
}
- printk("%c%0*x%c", (i?' ':'<'), pc16 ? 4 : 8, insn, (i?' ':'>'));
+ pr_cont("%c%0*x%c", (i?' ':'<'), pc16 ? 4 : 8, insn, (i?' ':'>'));
}
+ pr_cont("\n");
}
static void __show_regs(const struct pt_regs *regs)
@@ -259,15 +262,15 @@ static void __show_regs(const struct pt_regs *regs)
if ((i % 4) == 0)
printk("$%2d :", i);
if (i == 0)
- printk(" %0*lx", field, 0UL);
+ pr_cont(" %0*lx", field, 0UL);
else if (i == 26 || i == 27)
- printk(" %*s", field, "");
+ pr_cont(" %*s", field, "");
else
- printk(" %0*lx", field, regs->regs[i]);
+ pr_cont(" %0*lx", field, regs->regs[i]);
i++;
if ((i % 4) == 0)
- printk("\n");
+ pr_cont("\n");
}
#ifdef CONFIG_CPU_HAS_SMARTMIPS
@@ -288,46 +291,46 @@ static void __show_regs(const struct pt_regs *regs)
if (cpu_has_3kex) {
if (regs->cp0_status & ST0_KUO)
- printk("KUo ");
+ pr_cont("KUo ");
if (regs->cp0_status & ST0_IEO)
- printk("IEo ");
+ pr_cont("IEo ");
if (regs->cp0_status & ST0_KUP)
- printk("KUp ");
+ pr_cont("KUp ");
if (regs->cp0_status & ST0_IEP)
- printk("IEp ");
+ pr_cont("IEp ");
if (regs->cp0_status & ST0_KUC)
- printk("KUc ");
+ pr_cont("KUc ");
if (regs->cp0_status & ST0_IEC)
- printk("IEc ");
+ pr_cont("IEc ");
} else if (cpu_has_4kex) {
if (regs->cp0_status & ST0_KX)
- printk("KX ");
+ pr_cont("KX ");
if (regs->cp0_status & ST0_SX)
- printk("SX ");
+ pr_cont("SX ");
if (regs->cp0_status & ST0_UX)
- printk("UX ");
+ pr_cont("UX ");
switch (regs->cp0_status & ST0_KSU) {
case KSU_USER:
- printk("USER ");
+ pr_cont("USER ");
break;
case KSU_SUPERVISOR:
- printk("SUPERVISOR ");
+ pr_cont("SUPERVISOR ");
break;
case KSU_KERNEL:
- printk("KERNEL ");
+ pr_cont("KERNEL ");
break;
default:
- printk("BAD_MODE ");
+ pr_cont("BAD_MODE ");
break;
}
if (regs->cp0_status & ST0_ERL)
- printk("ERL ");
+ pr_cont("ERL ");
if (regs->cp0_status & ST0_EXL)
- printk("EXL ");
+ pr_cont("EXL ");
if (regs->cp0_status & ST0_IE)
- printk("IE ");
+ pr_cont("IE ");
}
- printk("\n");
+ pr_cont("\n");
exccode = (cause & CAUSEF_EXCCODE) >> CAUSEB_EXCCODE;
printk("Cause : %08x (ExcCode %02x)\n", cause, exccode);
@@ -705,6 +708,32 @@ asmlinkage void do_ov(struct pt_regs *regs)
exception_exit(prev_state);
}
+/*
+ * Send SIGFPE according to FCSR Cause bits, which must have already
+ * been masked against Enable bits. This is impotant as Inexact can
+ * happen together with Overflow or Underflow, and `ptrace' can set
+ * any bits.
+ */
+void force_fcr31_sig(unsigned long fcr31, void __user *fault_addr,
+ struct task_struct *tsk)
+{
+ struct siginfo si = { .si_addr = fault_addr, .si_signo = SIGFPE };
+
+ if (fcr31 & FPU_CSR_INV_X)
+ si.si_code = FPE_FLTINV;
+ else if (fcr31 & FPU_CSR_DIV_X)
+ si.si_code = FPE_FLTDIV;
+ else if (fcr31 & FPU_CSR_OVF_X)
+ si.si_code = FPE_FLTOVF;
+ else if (fcr31 & FPU_CSR_UDF_X)
+ si.si_code = FPE_FLTUND;
+ else if (fcr31 & FPU_CSR_INE_X)
+ si.si_code = FPE_FLTRES;
+ else
+ si.si_code = __SI_FAULT;
+ force_sig_info(SIGFPE, &si, tsk);
+}
+
int process_fpemu_return(int sig, void __user *fault_addr, unsigned long fcr31)
{
struct siginfo si = { 0 };
@@ -715,27 +744,7 @@ int process_fpemu_return(int sig, void __user *fault_addr, unsigned long fcr31)
return 0;
case SIGFPE:
- si.si_addr = fault_addr;
- si.si_signo = sig;
- /*
- * Inexact can happen together with Overflow or Underflow.
- * Respect the mask to deliver the correct exception.
- */
- fcr31 &= (fcr31 & FPU_CSR_ALL_E) <<
- (ffs(FPU_CSR_ALL_X) - ffs(FPU_CSR_ALL_E));
- if (fcr31 & FPU_CSR_INV_X)
- si.si_code = FPE_FLTINV;
- else if (fcr31 & FPU_CSR_DIV_X)
- si.si_code = FPE_FLTDIV;
- else if (fcr31 & FPU_CSR_OVF_X)
- si.si_code = FPE_FLTOVF;
- else if (fcr31 & FPU_CSR_UDF_X)
- si.si_code = FPE_FLTUND;
- else if (fcr31 & FPU_CSR_INE_X)
- si.si_code = FPE_FLTRES;
- else
- si.si_code = __SI_FAULT;
- force_sig_info(sig, &si, current);
+ force_fcr31_sig(fcr31, fault_addr, current);
return 1;
case SIGBUS:
@@ -799,13 +808,13 @@ static int simulate_fp(struct pt_regs *regs, unsigned int opcode,
/* Run the emulator */
sig = fpu_emulator_cop1Handler(regs, &current->thread.fpu, 1,
&fault_addr);
- fcr31 = current->thread.fpu.fcr31;
/*
- * We can't allow the emulated instruction to leave any of
- * the cause bits set in $fcr31.
+ * We can't allow the emulated instruction to leave any
+ * enabled Cause bits set in $fcr31.
*/
- current->thread.fpu.fcr31 &= ~FPU_CSR_ALL_X;
+ fcr31 = mask_fcr31_x(current->thread.fpu.fcr31);
+ current->thread.fpu.fcr31 &= ~fcr31;
/* Restore the hardware register state */
own_fpu(1);
@@ -831,7 +840,7 @@ asmlinkage void do_fpe(struct pt_regs *regs, unsigned long fcr31)
goto out;
/* Clear FCSR.Cause before enabling interrupts */
- write_32bit_cp1_register(CP1_STATUS, fcr31 & ~FPU_CSR_ALL_X);
+ write_32bit_cp1_register(CP1_STATUS, fcr31 & ~mask_fcr31_x(fcr31));
local_irq_enable();
die_if_kernel("FP exception in kernel code", regs);
@@ -853,13 +862,13 @@ asmlinkage void do_fpe(struct pt_regs *regs, unsigned long fcr31)
/* Run the emulator */
sig = fpu_emulator_cop1Handler(regs, &current->thread.fpu, 1,
&fault_addr);
- fcr31 = current->thread.fpu.fcr31;
/*
- * We can't allow the emulated instruction to leave any of
- * the cause bits set in $fcr31.
+ * We can't allow the emulated instruction to leave any
+ * enabled Cause bits set in $fcr31.
*/
- current->thread.fpu.fcr31 &= ~FPU_CSR_ALL_X;
+ fcr31 = mask_fcr31_x(current->thread.fpu.fcr31);
+ current->thread.fpu.fcr31 &= ~fcr31;
/* Restore the hardware register state */
own_fpu(1); /* Using the FPU again. */
@@ -1424,13 +1433,13 @@ asmlinkage void do_cpu(struct pt_regs *regs)
sig = fpu_emulator_cop1Handler(regs, &current->thread.fpu, 0,
&fault_addr);
- fcr31 = current->thread.fpu.fcr31;
/*
* We can't allow the emulated instruction to leave
- * any of the cause bits set in $fcr31.
+ * any enabled Cause bits set in $fcr31.
*/
- current->thread.fpu.fcr31 &= ~FPU_CSR_ALL_X;
+ fcr31 = mask_fcr31_x(current->thread.fpu.fcr31);
+ current->thread.fpu.fcr31 &= ~fcr31;
/* Send a signal if required. */
if (!process_fpemu_return(sig, fault_addr, fcr31) && !err)
diff --git a/arch/mips/lib/dump_tlb.c b/arch/mips/lib/dump_tlb.c
index 0f80b936e75e..6eb50a7137db 100644
--- a/arch/mips/lib/dump_tlb.c
+++ b/arch/mips/lib/dump_tlb.c
@@ -135,42 +135,42 @@ static void dump_tlb(int first, int last)
c0 = (entrylo0 & ENTRYLO_C) >> ENTRYLO_C_SHIFT;
c1 = (entrylo1 & ENTRYLO_C) >> ENTRYLO_C_SHIFT;
- printk("va=%0*lx asid=%0*lx",
- vwidth, (entryhi & ~0x1fffUL),
- asidwidth, entryhi & asidmask);
+ pr_cont("va=%0*lx asid=%0*lx",
+ vwidth, (entryhi & ~0x1fffUL),
+ asidwidth, entryhi & asidmask);
if (cpu_has_guestid)
- printk(" gid=%02lx",
- (guestctl1 & MIPS_GCTL1_RID)
+ pr_cont(" gid=%02lx",
+ (guestctl1 & MIPS_GCTL1_RID)
>> MIPS_GCTL1_RID_SHIFT);
/* RI/XI are in awkward places, so mask them off separately */
pa = entrylo0 & ~(MIPS_ENTRYLO_RI | MIPS_ENTRYLO_XI);
if (xpa)
pa |= (unsigned long long)readx_c0_entrylo0() << 30;
pa = (pa << 6) & PAGE_MASK;
- printk("\n\t[");
+ pr_cont("\n\t[");
if (cpu_has_rixi)
- printk("ri=%d xi=%d ",
- (entrylo0 & MIPS_ENTRYLO_RI) ? 1 : 0,
- (entrylo0 & MIPS_ENTRYLO_XI) ? 1 : 0);
- printk("pa=%0*llx c=%d d=%d v=%d g=%d] [",
- pwidth, pa, c0,
- (entrylo0 & ENTRYLO_D) ? 1 : 0,
- (entrylo0 & ENTRYLO_V) ? 1 : 0,
- (entrylo0 & ENTRYLO_G) ? 1 : 0);
+ pr_cont("ri=%d xi=%d ",
+ (entrylo0 & MIPS_ENTRYLO_RI) ? 1 : 0,
+ (entrylo0 & MIPS_ENTRYLO_XI) ? 1 : 0);
+ pr_cont("pa=%0*llx c=%d d=%d v=%d g=%d] [",
+ pwidth, pa, c0,
+ (entrylo0 & ENTRYLO_D) ? 1 : 0,
+ (entrylo0 & ENTRYLO_V) ? 1 : 0,
+ (entrylo0 & ENTRYLO_G) ? 1 : 0);
/* RI/XI are in awkward places, so mask them off separately */
pa = entrylo1 & ~(MIPS_ENTRYLO_RI | MIPS_ENTRYLO_XI);
if (xpa)
pa |= (unsigned long long)readx_c0_entrylo1() << 30;
pa = (pa << 6) & PAGE_MASK;
if (cpu_has_rixi)
- printk("ri=%d xi=%d ",
- (entrylo1 & MIPS_ENTRYLO_RI) ? 1 : 0,
- (entrylo1 & MIPS_ENTRYLO_XI) ? 1 : 0);
- printk("pa=%0*llx c=%d d=%d v=%d g=%d]\n",
- pwidth, pa, c1,
- (entrylo1 & ENTRYLO_D) ? 1 : 0,
- (entrylo1 & ENTRYLO_V) ? 1 : 0,
- (entrylo1 & ENTRYLO_G) ? 1 : 0);
+ pr_cont("ri=%d xi=%d ",
+ (entrylo1 & MIPS_ENTRYLO_RI) ? 1 : 0,
+ (entrylo1 & MIPS_ENTRYLO_XI) ? 1 : 0);
+ pr_cont("pa=%0*llx c=%d d=%d v=%d g=%d]\n",
+ pwidth, pa, c1,
+ (entrylo1 & ENTRYLO_D) ? 1 : 0,
+ (entrylo1 & ENTRYLO_V) ? 1 : 0,
+ (entrylo1 & ENTRYLO_G) ? 1 : 0);
}
printk("\n");
diff --git a/arch/mips/lib/r3k_dump_tlb.c b/arch/mips/lib/r3k_dump_tlb.c
index 744f4a7bc49d..85b4086e553e 100644
--- a/arch/mips/lib/r3k_dump_tlb.c
+++ b/arch/mips/lib/r3k_dump_tlb.c
@@ -53,15 +53,15 @@ static void dump_tlb(int first, int last)
*/
printk("Index: %2d ", i);
- printk("va=%08lx asid=%08lx"
- " [pa=%06lx n=%d d=%d v=%d g=%d]",
- entryhi & PAGE_MASK,
- entryhi & asid_mask,
- entrylo0 & PAGE_MASK,
- (entrylo0 & R3K_ENTRYLO_N) ? 1 : 0,
- (entrylo0 & R3K_ENTRYLO_D) ? 1 : 0,
- (entrylo0 & R3K_ENTRYLO_V) ? 1 : 0,
- (entrylo0 & R3K_ENTRYLO_G) ? 1 : 0);
+ pr_cont("va=%08lx asid=%08lx"
+ " [pa=%06lx n=%d d=%d v=%d g=%d]",
+ entryhi & PAGE_MASK,
+ entryhi & asid_mask,
+ entrylo0 & PAGE_MASK,
+ (entrylo0 & R3K_ENTRYLO_N) ? 1 : 0,
+ (entrylo0 & R3K_ENTRYLO_D) ? 1 : 0,
+ (entrylo0 & R3K_ENTRYLO_V) ? 1 : 0,
+ (entrylo0 & R3K_ENTRYLO_G) ? 1 : 0);
}
}
printk("\n");
diff --git a/arch/parisc/include/uapi/asm/unistd.h b/arch/parisc/include/uapi/asm/unistd.h
index a9b9407f38f7..6b0741e7a7ed 100644
--- a/arch/parisc/include/uapi/asm/unistd.h
+++ b/arch/parisc/include/uapi/asm/unistd.h
@@ -368,7 +368,9 @@
#define __IGNORE_select /* newselect */
#define __IGNORE_fadvise64 /* fadvise64_64 */
-
+#define __IGNORE_pkey_mprotect
+#define __IGNORE_pkey_alloc
+#define __IGNORE_pkey_free
#define LINUX_GATEWAY_ADDR 0x100
diff --git a/arch/parisc/kernel/drivers.c b/arch/parisc/kernel/drivers.c
index f8150669b8c6..700e2d2da096 100644
--- a/arch/parisc/kernel/drivers.c
+++ b/arch/parisc/kernel/drivers.c
@@ -873,11 +873,11 @@ static void print_parisc_device(struct parisc_device *dev)
if (dev->num_addrs) {
int k;
- printk(", additional addresses: ");
+ pr_cont(", additional addresses: ");
for (k = 0; k < dev->num_addrs; k++)
- printk("0x%lx ", dev->addr[k]);
+ pr_cont("0x%lx ", dev->addr[k]);
}
- printk("\n");
+ pr_cont("\n");
}
/**
diff --git a/arch/parisc/kernel/syscall.S b/arch/parisc/kernel/syscall.S
index d03422e5f188..23de307c3052 100644
--- a/arch/parisc/kernel/syscall.S
+++ b/arch/parisc/kernel/syscall.S
@@ -100,14 +100,12 @@ set_thread_pointer:
.endr
/* This address must remain fixed at 0x100 for glibc's syscalls to work */
- .align 256
+ .align LINUX_GATEWAY_ADDR
linux_gateway_entry:
gate .+8, %r0 /* become privileged */
mtsp %r0,%sr4 /* get kernel space into sr4 */
mtsp %r0,%sr5 /* get kernel space into sr5 */
mtsp %r0,%sr6 /* get kernel space into sr6 */
- mfsp %sr7,%r1 /* save user sr7 */
- mtsp %r1,%sr3 /* and store it in sr3 */
#ifdef CONFIG_64BIT
/* for now we can *always* set the W bit on entry to the syscall
@@ -133,6 +131,14 @@ linux_gateway_entry:
depdi 0, 31, 32, %r21
1:
#endif
+
+ /* We use a rsm/ssm pair to prevent sr3 from being clobbered
+ * by external interrupts.
+ */
+ mfsp %sr7,%r1 /* save user sr7 */
+ rsm PSW_SM_I, %r0 /* disable interrupts */
+ mtsp %r1,%sr3 /* and store it in sr3 */
+
mfctl %cr30,%r1
xor %r1,%r30,%r30 /* ye olde xor trick */
xor %r1,%r30,%r1
@@ -147,6 +153,7 @@ linux_gateway_entry:
*/
mtsp %r0,%sr7 /* get kernel space into sr7 */
+ ssm PSW_SM_I, %r0 /* enable interrupts */
STREGM %r1,FRAME_SIZE(%r30) /* save r1 (usp) here for now */
mfctl %cr30,%r1 /* get task ptr in %r1 */
LDREG TI_TASK(%r1),%r1
@@ -474,11 +481,6 @@ lws_start:
comiclr,>> __NR_lws_entries, %r20, %r0
b,n lws_exit_nosys
- /* WARNING: Trashing sr2 and sr3 */
- mfsp %sr7,%r1 /* get userspace into sr3 */
- mtsp %r1,%sr3
- mtsp %r0,%sr2 /* get kernel space into sr2 */
-
/* Load table start */
ldil L%lws_table, %r1
ldo R%lws_table(%r1), %r28 /* Scratch use of r28 */
@@ -627,9 +629,9 @@ cas_action:
stw %r1, 4(%sr2,%r20)
#endif
/* The load and store could fail */
-1: ldw,ma 0(%sr3,%r26), %r28
+1: ldw,ma 0(%r26), %r28
sub,<> %r28, %r25, %r0
-2: stw,ma %r24, 0(%sr3,%r26)
+2: stw,ma %r24, 0(%r26)
/* Free lock */
stw,ma %r20, 0(%sr2,%r20)
#if ENABLE_LWS_DEBUG
@@ -706,9 +708,9 @@ lws_compare_and_swap_2:
nop
/* 8bit load */
-4: ldb 0(%sr3,%r25), %r25
+4: ldb 0(%r25), %r25
b cas2_lock_start
-5: ldb 0(%sr3,%r24), %r24
+5: ldb 0(%r24), %r24
nop
nop
nop
@@ -716,9 +718,9 @@ lws_compare_and_swap_2:
nop
/* 16bit load */
-6: ldh 0(%sr3,%r25), %r25
+6: ldh 0(%r25), %r25
b cas2_lock_start
-7: ldh 0(%sr3,%r24), %r24
+7: ldh 0(%r24), %r24
nop
nop
nop
@@ -726,9 +728,9 @@ lws_compare_and_swap_2:
nop
/* 32bit load */
-8: ldw 0(%sr3,%r25), %r25
+8: ldw 0(%r25), %r25
b cas2_lock_start
-9: ldw 0(%sr3,%r24), %r24
+9: ldw 0(%r24), %r24
nop
nop
nop
@@ -737,14 +739,14 @@ lws_compare_and_swap_2:
/* 64bit load */
#ifdef CONFIG_64BIT
-10: ldd 0(%sr3,%r25), %r25
-11: ldd 0(%sr3,%r24), %r24
+10: ldd 0(%r25), %r25
+11: ldd 0(%r24), %r24
#else
/* Load new value into r22/r23 - high/low */
-10: ldw 0(%sr3,%r25), %r22
-11: ldw 4(%sr3,%r25), %r23
+10: ldw 0(%r25), %r22
+11: ldw 4(%r25), %r23
/* Load new value into fr4 for atomic store later */
-12: flddx 0(%sr3,%r24), %fr4
+12: flddx 0(%r24), %fr4
#endif
cas2_lock_start:
@@ -794,30 +796,30 @@ cas2_action:
ldo 1(%r0),%r28
/* 8bit CAS */
-13: ldb,ma 0(%sr3,%r26), %r29
+13: ldb,ma 0(%r26), %r29
sub,= %r29, %r25, %r0
b,n cas2_end
-14: stb,ma %r24, 0(%sr3,%r26)
+14: stb,ma %r24, 0(%r26)
b cas2_end
copy %r0, %r28
nop
nop
/* 16bit CAS */
-15: ldh,ma 0(%sr3,%r26), %r29
+15: ldh,ma 0(%r26), %r29
sub,= %r29, %r25, %r0
b,n cas2_end
-16: sth,ma %r24, 0(%sr3,%r26)
+16: sth,ma %r24, 0(%r26)
b cas2_end
copy %r0, %r28
nop
nop
/* 32bit CAS */
-17: ldw,ma 0(%sr3,%r26), %r29
+17: ldw,ma 0(%r26), %r29
sub,= %r29, %r25, %r0
b,n cas2_end
-18: stw,ma %r24, 0(%sr3,%r26)
+18: stw,ma %r24, 0(%r26)
b cas2_end
copy %r0, %r28
nop
@@ -825,22 +827,22 @@ cas2_action:
/* 64bit CAS */
#ifdef CONFIG_64BIT
-19: ldd,ma 0(%sr3,%r26), %r29
+19: ldd,ma 0(%r26), %r29
sub,*= %r29, %r25, %r0
b,n cas2_end
-20: std,ma %r24, 0(%sr3,%r26)
+20: std,ma %r24, 0(%r26)
copy %r0, %r28
#else
/* Compare first word */
-19: ldw,ma 0(%sr3,%r26), %r29
+19: ldw,ma 0(%r26), %r29
sub,= %r29, %r22, %r0
b,n cas2_end
/* Compare second word */
-20: ldw,ma 4(%sr3,%r26), %r29
+20: ldw,ma 4(%r26), %r29
sub,= %r29, %r23, %r0
b,n cas2_end
/* Perform the store */
-21: fstdx %fr4, 0(%sr3,%r26)
+21: fstdx %fr4, 0(%r26)
copy %r0, %r28
#endif
diff --git a/arch/powerpc/include/asm/checksum.h b/arch/powerpc/include/asm/checksum.h
index ee655ed1ff1b..1e8fceb308a5 100644
--- a/arch/powerpc/include/asm/checksum.h
+++ b/arch/powerpc/include/asm/checksum.h
@@ -53,10 +53,8 @@ static inline __sum16 csum_fold(__wsum sum)
return (__force __sum16)(~((__force u32)sum + tmp) >> 16);
}
-static inline __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr,
- unsigned short len,
- unsigned short proto,
- __wsum sum)
+static inline __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr, __u32 len,
+ __u8 proto, __wsum sum)
{
#ifdef __powerpc64__
unsigned long s = (__force u32)sum;
@@ -83,10 +81,8 @@ static inline __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr,
* computes the checksum of the TCP/UDP pseudo-header
* returns a 16-bit checksum, already complemented
*/
-static inline __sum16 csum_tcpudp_magic(__be32 saddr, __be32 daddr,
- unsigned short len,
- unsigned short proto,
- __wsum sum)
+static inline __sum16 csum_tcpudp_magic(__be32 saddr, __be32 daddr, __u32 len,
+ __u8 proto, __wsum sum)
{
return csum_fold(csum_tcpudp_nofold(saddr, daddr, len, proto, sum));
}
diff --git a/arch/powerpc/include/asm/cpuidle.h b/arch/powerpc/include/asm/cpuidle.h
index 01b8a13f0224..3919332965af 100644
--- a/arch/powerpc/include/asm/cpuidle.h
+++ b/arch/powerpc/include/asm/cpuidle.h
@@ -26,7 +26,7 @@ extern u64 pnv_first_deep_stop_state;
std r0,0(r1); \
ptesync; \
ld r0,0(r1); \
-1: cmp cr0,r0,r0; \
+1: cmpd cr0,r0,r0; \
bne 1b; \
IDLE_INST; \
b .
diff --git a/arch/powerpc/include/asm/exception-64s.h b/arch/powerpc/include/asm/exception-64s.h
index 2e4e7d878c8e..84d49b197c32 100644
--- a/arch/powerpc/include/asm/exception-64s.h
+++ b/arch/powerpc/include/asm/exception-64s.h
@@ -93,6 +93,10 @@
ld reg,PACAKBASE(r13); /* get high part of &label */ \
ori reg,reg,(FIXED_SYMBOL_ABS_ADDR(label))@l;
+#define __LOAD_HANDLER(reg, label) \
+ ld reg,PACAKBASE(r13); \
+ ori reg,reg,(ABS_ADDR(label))@l;
+
/* Exception register prefixes */
#define EXC_HV H
#define EXC_STD
@@ -208,6 +212,18 @@ END_FTR_SECTION_NESTED(ftr,ftr,943)
#define kvmppc_interrupt kvmppc_interrupt_pr
#endif
+#ifdef CONFIG_RELOCATABLE
+#define BRANCH_TO_COMMON(reg, label) \
+ __LOAD_HANDLER(reg, label); \
+ mtctr reg; \
+ bctr
+
+#else
+#define BRANCH_TO_COMMON(reg, label) \
+ b label
+
+#endif
+
#define __KVM_HANDLER_PROLOG(area, n) \
BEGIN_FTR_SECTION_NESTED(947) \
ld r10,area+EX_CFAR(r13); \
diff --git a/arch/powerpc/include/asm/tlb.h b/arch/powerpc/include/asm/tlb.h
index f6f68f73e858..99e1397b71da 100644
--- a/arch/powerpc/include/asm/tlb.h
+++ b/arch/powerpc/include/asm/tlb.h
@@ -52,11 +52,23 @@ static inline int mm_is_core_local(struct mm_struct *mm)
return cpumask_subset(mm_cpumask(mm),
topology_sibling_cpumask(smp_processor_id()));
}
+
+static inline int mm_is_thread_local(struct mm_struct *mm)
+{
+ return cpumask_equal(mm_cpumask(mm),
+ cpumask_of(smp_processor_id()));
+}
+
#else
static inline int mm_is_core_local(struct mm_struct *mm)
{
return 1;
}
+
+static inline int mm_is_thread_local(struct mm_struct *mm)
+{
+ return 1;
+}
#endif
#endif /* __KERNEL__ */
diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
index f129408c6022..08ba447a4b3d 100644
--- a/arch/powerpc/kernel/exceptions-64s.S
+++ b/arch/powerpc/kernel/exceptions-64s.S
@@ -95,19 +95,35 @@ __start_interrupts:
/* No virt vectors corresponding with 0x0..0x100 */
EXC_VIRT_NONE(0x4000, 0x4100)
-EXC_REAL_BEGIN(system_reset, 0x100, 0x200)
- SET_SCRATCH0(r13)
+
#ifdef CONFIG_PPC_P7_NAP
-BEGIN_FTR_SECTION
- /* Running native on arch 2.06 or later, check if we are
- * waking up from nap/sleep/winkle.
+ /*
+ * If running native on arch 2.06 or later, check if we are waking up
+ * from nap/sleep/winkle, and branch to idle handler.
*/
- mfspr r13,SPRN_SRR1
- rlwinm. r13,r13,47-31,30,31
- beq 9f
+#define IDLETEST(n) \
+ BEGIN_FTR_SECTION ; \
+ mfspr r10,SPRN_SRR1 ; \
+ rlwinm. r10,r10,47-31,30,31 ; \
+ beq- 1f ; \
+ cmpwi cr3,r10,2 ; \
+ BRANCH_TO_COMMON(r10, system_reset_idle_common) ; \
+1: \
+ END_FTR_SECTION_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206)
+#else
+#define IDLETEST NOTEST
+#endif
- cmpwi cr3,r13,2
- GET_PACA(r13)
+EXC_REAL_BEGIN(system_reset, 0x100, 0x200)
+ SET_SCRATCH0(r13)
+ EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, system_reset_common, EXC_STD,
+ IDLETEST, 0x100)
+
+EXC_REAL_END(system_reset, 0x100, 0x200)
+EXC_VIRT_NONE(0x4100, 0x4200)
+
+#ifdef CONFIG_PPC_P7_NAP
+EXC_COMMON_BEGIN(system_reset_idle_common)
bl pnv_restore_hyp_resource
li r0,PNV_THREAD_RUNNING
@@ -130,14 +146,8 @@ BEGIN_FTR_SECTION
blt cr3,2f
b pnv_wakeup_loss
2: b pnv_wakeup_noloss
+#endif
-9:
-END_FTR_SECTION_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206)
-#endif /* CONFIG_PPC_P7_NAP */
- EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, system_reset_common, EXC_STD,
- NOTEST, 0x100)
-EXC_REAL_END(system_reset, 0x100, 0x200)
-EXC_VIRT_NONE(0x4100, 0x4200)
EXC_COMMON(system_reset_common, 0x100, system_reset_exception)
#ifdef CONFIG_PPC_PSERIES
@@ -817,10 +827,8 @@ EXC_VIRT(trap_0b, 0x4b00, 0x4c00, 0xb00)
TRAMP_KVM(PACA_EXGEN, 0xb00)
EXC_COMMON(trap_0b_common, 0xb00, unknown_exception)
-
-#define LOAD_SYSCALL_HANDLER(reg) \
- ld reg,PACAKBASE(r13); \
- ori reg,reg,(ABS_ADDR(system_call_common))@l;
+#define LOAD_SYSCALL_HANDLER(reg) \
+ __LOAD_HANDLER(reg, system_call_common)
/* Syscall routine is used twice, in reloc-off and reloc-on paths */
#define SYSCALL_PSERIES_1 \
diff --git a/arch/powerpc/kernel/hw_breakpoint.c b/arch/powerpc/kernel/hw_breakpoint.c
index 9781c69eae57..03d089b3ed72 100644
--- a/arch/powerpc/kernel/hw_breakpoint.c
+++ b/arch/powerpc/kernel/hw_breakpoint.c
@@ -275,7 +275,7 @@ int hw_breakpoint_handler(struct die_args *args)
if (!stepped) {
WARN(1, "Unable to handle hardware breakpoint. Breakpoint at "
"0x%lx will be disabled.", info->address);
- perf_event_disable(bp);
+ perf_event_disable_inatomic(bp);
goto out;
}
/*
diff --git a/arch/powerpc/kernel/idle_book3s.S b/arch/powerpc/kernel/idle_book3s.S
index bd739fed26e3..72dac0b58061 100644
--- a/arch/powerpc/kernel/idle_book3s.S
+++ b/arch/powerpc/kernel/idle_book3s.S
@@ -90,6 +90,7 @@ ALT_FTR_SECTION_END_IFSET(CPU_FTR_ARCH_300)
* Threads will spin in HMT_LOW until the lock bit is cleared.
* r14 - pointer to core_idle_state
* r15 - used to load contents of core_idle_state
+ * r9 - used as a temporary variable
*/
core_idle_lock_held:
@@ -99,6 +100,8 @@ core_idle_lock_held:
bne 3b
HMT_MEDIUM
lwarx r15,0,r14
+ andi. r9,r15,PNV_CORE_IDLE_LOCK_BIT
+ bne core_idle_lock_held
blr
/*
@@ -163,12 +166,6 @@ _GLOBAL(pnv_powersave_common)
std r9,_MSR(r1)
std r1,PACAR1(r13)
-#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
- /* Tell KVM we're entering idle */
- li r4,KVM_HWTHREAD_IN_IDLE
- stb r4,HSTATE_HWTHREAD_STATE(r13)
-#endif
-
/*
* Go to real mode to do the nap, as required by the architecture.
* Also, we need to be in real mode before setting hwthread_state,
@@ -185,6 +182,26 @@ _GLOBAL(pnv_powersave_common)
.globl pnv_enter_arch207_idle_mode
pnv_enter_arch207_idle_mode:
+#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
+ /* Tell KVM we're entering idle */
+ li r4,KVM_HWTHREAD_IN_IDLE
+ /******************************************************/
+ /* N O T E W E L L ! ! ! N O T E W E L L */
+ /* The following store to HSTATE_HWTHREAD_STATE(r13) */
+ /* MUST occur in real mode, i.e. with the MMU off, */
+ /* and the MMU must stay off until we clear this flag */
+ /* and test HSTATE_HWTHREAD_REQ(r13) in the system */
+ /* reset interrupt vector in exceptions-64s.S. */
+ /* The reason is that another thread can switch the */
+ /* MMU to a guest context whenever this flag is set */
+ /* to KVM_HWTHREAD_IN_IDLE, and if the MMU was on, */
+ /* that would potentially cause this thread to start */
+ /* executing instructions from guest memory in */
+ /* hypervisor mode, leading to a host crash or data */
+ /* corruption, or worse. */
+ /******************************************************/
+ stb r4,HSTATE_HWTHREAD_STATE(r13)
+#endif
stb r3,PACA_THREAD_IDLE_STATE(r13)
cmpwi cr3,r3,PNV_THREAD_SLEEP
bge cr3,2f
@@ -250,6 +267,12 @@ enter_winkle:
* r3 - requested stop state
*/
power_enter_stop:
+#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
+ /* Tell KVM we're entering idle */
+ li r4,KVM_HWTHREAD_IN_IDLE
+ /* DO THIS IN REAL MODE! See comment above. */
+ stb r4,HSTATE_HWTHREAD_STATE(r13)
+#endif
/*
* Check if the requested state is a deep idle state.
*/
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
index 9e7c10fe205f..ce6dc61b15b2 100644
--- a/arch/powerpc/kernel/process.c
+++ b/arch/powerpc/kernel/process.c
@@ -1012,7 +1012,7 @@ void restore_tm_state(struct pt_regs *regs)
/* Ensure that restore_math() will restore */
if (msr_diff & MSR_FP)
current->thread.load_fp = 1;
-#ifdef CONFIG_ALIVEC
+#ifdef CONFIG_ALTIVEC
if (cpu_has_feature(CPU_FTR_ALTIVEC) && msr_diff & MSR_VEC)
current->thread.load_vec = 1;
#endif
diff --git a/arch/powerpc/kvm/book3s_hv_rm_xics.c b/arch/powerpc/kvm/book3s_hv_rm_xics.c
index 82ff5de8b1e7..a0ea63ac2b52 100644
--- a/arch/powerpc/kvm/book3s_hv_rm_xics.c
+++ b/arch/powerpc/kvm/book3s_hv_rm_xics.c
@@ -23,6 +23,7 @@
#include <asm/ppc-opcode.h>
#include <asm/pnv-pci.h>
#include <asm/opal.h>
+#include <asm/smp.h>
#include "book3s_xics.h"
diff --git a/arch/powerpc/mm/tlb-radix.c b/arch/powerpc/mm/tlb-radix.c
index 0e49ec541ab5..bda8c43be78a 100644
--- a/arch/powerpc/mm/tlb-radix.c
+++ b/arch/powerpc/mm/tlb-radix.c
@@ -175,7 +175,7 @@ void radix__flush_tlb_mm(struct mm_struct *mm)
if (unlikely(pid == MMU_NO_CONTEXT))
goto no_context;
- if (!mm_is_core_local(mm)) {
+ if (!mm_is_thread_local(mm)) {
int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE);
if (lock_tlbie)
@@ -201,7 +201,7 @@ void radix__flush_tlb_pwc(struct mmu_gather *tlb, unsigned long addr)
if (unlikely(pid == MMU_NO_CONTEXT))
goto no_context;
- if (!mm_is_core_local(mm)) {
+ if (!mm_is_thread_local(mm)) {
int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE);
if (lock_tlbie)
@@ -226,7 +226,7 @@ void radix__flush_tlb_page_psize(struct mm_struct *mm, unsigned long vmaddr,
pid = mm ? mm->context.id : 0;
if (unlikely(pid == MMU_NO_CONTEXT))
goto bail;
- if (!mm_is_core_local(mm)) {
+ if (!mm_is_thread_local(mm)) {
int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE);
if (lock_tlbie)
@@ -321,7 +321,7 @@ void radix__flush_tlb_range_psize(struct mm_struct *mm, unsigned long start,
{
unsigned long pid;
unsigned long addr;
- int local = mm_is_core_local(mm);
+ int local = mm_is_thread_local(mm);
unsigned long ap = mmu_get_ap(psize);
int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE);
unsigned long page_size = 1UL << mmu_psize_defs[psize].shift;
diff --git a/arch/s390/include/asm/ftrace.h b/arch/s390/include/asm/ftrace.h
index 64053d9ac3f2..836c56290499 100644
--- a/arch/s390/include/asm/ftrace.h
+++ b/arch/s390/include/asm/ftrace.h
@@ -12,9 +12,7 @@
#ifndef __ASSEMBLY__
-unsigned long return_address(int depth);
-
-#define ftrace_return_address(n) return_address(n)
+#define ftrace_return_address(n) __builtin_return_address(n)
void _mcount(void);
void ftrace_caller(void);
diff --git a/arch/s390/include/asm/processor.h b/arch/s390/include/asm/processor.h
index 03323175de30..602af692efdc 100644
--- a/arch/s390/include/asm/processor.h
+++ b/arch/s390/include/asm/processor.h
@@ -192,7 +192,7 @@ struct task_struct;
struct mm_struct;
struct seq_file;
-typedef int (*dump_trace_func_t)(void *data, unsigned long address);
+typedef int (*dump_trace_func_t)(void *data, unsigned long address, int reliable);
void dump_trace(dump_trace_func_t func, void *data,
struct task_struct *task, unsigned long sp);
diff --git a/arch/s390/include/asm/unistd.h b/arch/s390/include/asm/unistd.h
index 02613bad8bbb..3066031a73fe 100644
--- a/arch/s390/include/asm/unistd.h
+++ b/arch/s390/include/asm/unistd.h
@@ -9,6 +9,9 @@
#include <uapi/asm/unistd.h>
#define __IGNORE_time
+#define __IGNORE_pkey_mprotect
+#define __IGNORE_pkey_alloc
+#define __IGNORE_pkey_free
#define __ARCH_WANT_OLD_READDIR
#define __ARCH_WANT_SYS_ALARM
diff --git a/arch/s390/kernel/dis.c b/arch/s390/kernel/dis.c
index 43446fa2a4e5..c74c59236f44 100644
--- a/arch/s390/kernel/dis.c
+++ b/arch/s390/kernel/dis.c
@@ -2014,12 +2014,12 @@ void show_code(struct pt_regs *regs)
*ptr++ = '\t';
ptr += print_insn(ptr, code + start, addr);
start += opsize;
- printk("%s", buffer);
+ pr_cont("%s", buffer);
ptr = buffer;
ptr += sprintf(ptr, "\n ");
hops++;
}
- printk("\n");
+ pr_cont("\n");
}
void print_fn_code(unsigned char *code, unsigned long len)
diff --git a/arch/s390/kernel/dumpstack.c b/arch/s390/kernel/dumpstack.c
index 6693383bc01b..55d4fe174fd9 100644
--- a/arch/s390/kernel/dumpstack.c
+++ b/arch/s390/kernel/dumpstack.c
@@ -38,10 +38,10 @@ __dump_trace(dump_trace_func_t func, void *data, unsigned long sp,
if (sp < low || sp > high - sizeof(*sf))
return sp;
sf = (struct stack_frame *) sp;
+ if (func(data, sf->gprs[8], 0))
+ return sp;
/* Follow the backchain. */
while (1) {
- if (func(data, sf->gprs[8]))
- return sp;
low = sp;
sp = sf->back_chain;
if (!sp)
@@ -49,6 +49,8 @@ __dump_trace(dump_trace_func_t func, void *data, unsigned long sp,
if (sp <= low || sp > high - sizeof(*sf))
return sp;
sf = (struct stack_frame *) sp;
+ if (func(data, sf->gprs[8], 1))
+ return sp;
}
/* Zero backchain detected, check for interrupt frame. */
sp = (unsigned long) (sf + 1);
@@ -56,7 +58,7 @@ __dump_trace(dump_trace_func_t func, void *data, unsigned long sp,
return sp;
regs = (struct pt_regs *) sp;
if (!user_mode(regs)) {
- if (func(data, regs->psw.addr))
+ if (func(data, regs->psw.addr, 1))
return sp;
}
low = sp;
@@ -85,33 +87,12 @@ void dump_trace(dump_trace_func_t func, void *data, struct task_struct *task,
}
EXPORT_SYMBOL_GPL(dump_trace);
-struct return_address_data {
- unsigned long address;
- int depth;
-};
-
-static int __return_address(void *data, unsigned long address)
-{
- struct return_address_data *rd = data;
-
- if (rd->depth--)
- return 0;
- rd->address = address;
- return 1;
-}
-
-unsigned long return_address(int depth)
-{
- struct return_address_data rd = { .depth = depth + 2 };
-
- dump_trace(__return_address, &rd, NULL, current_stack_pointer());
- return rd.address;
-}
-EXPORT_SYMBOL_GPL(return_address);
-
-static int show_address(void *data, unsigned long address)
+static int show_address(void *data, unsigned long address, int reliable)
{
- printk("([<%016lx>] %pSR)\n", address, (void *)address);
+ if (reliable)
+ printk(" [<%016lx>] %pSR \n", address, (void *)address);
+ else
+ printk("([<%016lx>] %pSR)\n", address, (void *)address);
return 0;
}
@@ -138,14 +119,14 @@ void show_stack(struct task_struct *task, unsigned long *sp)
else
stack = (unsigned long *)task->thread.ksp;
}
+ printk(KERN_DEFAULT "Stack:\n");
for (i = 0; i < 20; i++) {
if (((addr_t) stack & (THREAD_SIZE-1)) == 0)
break;
- if ((i * sizeof(long) % 32) == 0)
- printk("%s ", i == 0 ? "" : "\n");
- printk("%016lx ", *stack++);
+ if (i % 4 == 0)
+ printk(KERN_DEFAULT " ");
+ pr_cont("%016lx%c", *stack++, i % 4 == 3 ? '\n' : ' ');
}
- printk("\n");
show_trace(task, (unsigned long)sp);
}
@@ -163,13 +144,13 @@ void show_registers(struct pt_regs *regs)
mode = user_mode(regs) ? "User" : "Krnl";
printk("%s PSW : %p %p", mode, (void *)regs->psw.mask, (void *)regs->psw.addr);
if (!user_mode(regs))
- printk(" (%pSR)", (void *)regs->psw.addr);
- printk("\n");
+ pr_cont(" (%pSR)", (void *)regs->psw.addr);
+ pr_cont("\n");
printk(" R:%x T:%x IO:%x EX:%x Key:%x M:%x W:%x "
"P:%x AS:%x CC:%x PM:%x", psw->r, psw->t, psw->i, psw->e,
psw->key, psw->m, psw->w, psw->p, psw->as, psw->cc, psw->pm);
- printk(" RI:%x EA:%x", psw->ri, psw->eaba);
- printk("\n%s GPRS: %016lx %016lx %016lx %016lx\n", mode,
+ pr_cont(" RI:%x EA:%x\n", psw->ri, psw->eaba);
+ printk("%s GPRS: %016lx %016lx %016lx %016lx\n", mode,
regs->gprs[0], regs->gprs[1], regs->gprs[2], regs->gprs[3]);
printk(" %016lx %016lx %016lx %016lx\n",
regs->gprs[4], regs->gprs[5], regs->gprs[6], regs->gprs[7]);
@@ -205,14 +186,14 @@ void die(struct pt_regs *regs, const char *str)
printk("%s: %04x ilc:%d [#%d] ", str, regs->int_code & 0xffff,
regs->int_code >> 17, ++die_counter);
#ifdef CONFIG_PREEMPT
- printk("PREEMPT ");
+ pr_cont("PREEMPT ");
#endif
#ifdef CONFIG_SMP
- printk("SMP ");
+ pr_cont("SMP ");
#endif
if (debug_pagealloc_enabled())
- printk("DEBUG_PAGEALLOC");
- printk("\n");
+ pr_cont("DEBUG_PAGEALLOC");
+ pr_cont("\n");
notify_die(DIE_OOPS, str, regs, 0, regs->int_code & 0xffff, SIGSEGV);
print_modules();
show_regs(regs);
diff --git a/arch/s390/kernel/perf_event.c b/arch/s390/kernel/perf_event.c
index 17431f63de00..955a7b6fa0a4 100644
--- a/arch/s390/kernel/perf_event.c
+++ b/arch/s390/kernel/perf_event.c
@@ -222,7 +222,7 @@ static int __init service_level_perf_register(void)
}
arch_initcall(service_level_perf_register);
-static int __perf_callchain_kernel(void *data, unsigned long address)
+static int __perf_callchain_kernel(void *data, unsigned long address, int reliable)
{
struct perf_callchain_entry_ctx *entry = data;
diff --git a/arch/s390/kernel/stacktrace.c b/arch/s390/kernel/stacktrace.c
index 44f84b23d4e5..355db9db8210 100644
--- a/arch/s390/kernel/stacktrace.c
+++ b/arch/s390/kernel/stacktrace.c
@@ -27,12 +27,12 @@ static int __save_address(void *data, unsigned long address, int nosched)
return 1;
}
-static int save_address(void *data, unsigned long address)
+static int save_address(void *data, unsigned long address, int reliable)
{
return __save_address(data, address, 0);
}
-static int save_address_nosched(void *data, unsigned long address)
+static int save_address_nosched(void *data, unsigned long address, int reliable)
{
return __save_address(data, address, 1);
}
diff --git a/arch/s390/mm/hugetlbpage.c b/arch/s390/mm/hugetlbpage.c
index cd404aa3931c..4a0c5bce3552 100644
--- a/arch/s390/mm/hugetlbpage.c
+++ b/arch/s390/mm/hugetlbpage.c
@@ -217,6 +217,7 @@ static __init int setup_hugepagesz(char *opt)
} else if (MACHINE_HAS_EDAT2 && size == PUD_SIZE) {
hugetlb_add_hstate(PUD_SHIFT - PAGE_SHIFT);
} else {
+ hugetlb_bad_size();
pr_err("hugepagesz= specifies an unsupported page size %s\n",
string);
return 0;
diff --git a/arch/s390/mm/init.c b/arch/s390/mm/init.c
index f56a39bd8ba6..b3e9d18f2ec6 100644
--- a/arch/s390/mm/init.c
+++ b/arch/s390/mm/init.c
@@ -151,36 +151,40 @@ void __init free_initrd_mem(unsigned long start, unsigned long end)
#ifdef CONFIG_MEMORY_HOTPLUG
int arch_add_memory(int nid, u64 start, u64 size, bool for_device)
{
- unsigned long normal_end_pfn = PFN_DOWN(memblock_end_of_DRAM());
- unsigned long dma_end_pfn = PFN_DOWN(MAX_DMA_ADDRESS);
+ unsigned long zone_start_pfn, zone_end_pfn, nr_pages;
unsigned long start_pfn = PFN_DOWN(start);
unsigned long size_pages = PFN_DOWN(size);
- unsigned long nr_pages;
- int rc, zone_enum;
+ pg_data_t *pgdat = NODE_DATA(nid);
+ struct zone *zone;
+ int rc, i;
rc = vmem_add_mapping(start, size);
if (rc)
return rc;
- while (size_pages > 0) {
- if (start_pfn < dma_end_pfn) {
- nr_pages = (start_pfn + size_pages > dma_end_pfn) ?
- dma_end_pfn - start_pfn : size_pages;
- zone_enum = ZONE_DMA;
- } else if (start_pfn < normal_end_pfn) {
- nr_pages = (start_pfn + size_pages > normal_end_pfn) ?
- normal_end_pfn - start_pfn : size_pages;
- zone_enum = ZONE_NORMAL;
+ for (i = 0; i < MAX_NR_ZONES; i++) {
+ zone = pgdat->node_zones + i;
+ if (zone_idx(zone) != ZONE_MOVABLE) {
+ /* Add range within existing zone limits, if possible */
+ zone_start_pfn = zone->zone_start_pfn;
+ zone_end_pfn = zone->zone_start_pfn +
+ zone->spanned_pages;
} else {
- nr_pages = size_pages;
- zone_enum = ZONE_MOVABLE;
+ /* Add remaining range to ZONE_MOVABLE */
+ zone_start_pfn = start_pfn;
+ zone_end_pfn = start_pfn + size_pages;
}
- rc = __add_pages(nid, NODE_DATA(nid)->node_zones + zone_enum,
- start_pfn, size_pages);
+ if (start_pfn < zone_start_pfn || start_pfn >= zone_end_pfn)
+ continue;
+ nr_pages = (start_pfn + size_pages > zone_end_pfn) ?
+ zone_end_pfn - start_pfn : size_pages;
+ rc = __add_pages(nid, zone, start_pfn, nr_pages);
if (rc)
break;
start_pfn += nr_pages;
size_pages -= nr_pages;
+ if (!size_pages)
+ break;
}
if (rc)
vmem_remove_mapping(start, size);
diff --git a/arch/s390/oprofile/init.c b/arch/s390/oprofile/init.c
index 16f4c3960b87..9a4de4599c7b 100644
--- a/arch/s390/oprofile/init.c
+++ b/arch/s390/oprofile/init.c
@@ -13,7 +13,7 @@
#include <linux/init.h>
#include <asm/processor.h>
-static int __s390_backtrace(void *data, unsigned long address)
+static int __s390_backtrace(void *data, unsigned long address, int reliable)
{
unsigned int *depth = data;
diff --git a/arch/sparc/include/asm/cpudata_64.h b/arch/sparc/include/asm/cpudata_64.h
index a6cfdabb6054..5b0ed48e5b0c 100644
--- a/arch/sparc/include/asm/cpudata_64.h
+++ b/arch/sparc/include/asm/cpudata_64.h
@@ -24,9 +24,10 @@ typedef struct {
unsigned int icache_line_size;
unsigned int ecache_size;
unsigned int ecache_line_size;
- unsigned short sock_id;
+ unsigned short sock_id; /* physical package */
unsigned short core_id;
- int proc_id;
+ unsigned short max_cache_id; /* groupings of highest shared cache */
+ unsigned short proc_id; /* strand (aka HW thread) id */
} cpuinfo_sparc;
DECLARE_PER_CPU(cpuinfo_sparc, __cpu_data);
diff --git a/arch/sparc/include/asm/spinlock_32.h b/arch/sparc/include/asm/spinlock_32.h
index d9c5876c6121..8011e79f59c9 100644
--- a/arch/sparc/include/asm/spinlock_32.h
+++ b/arch/sparc/include/asm/spinlock_32.h
@@ -134,7 +134,7 @@ static inline void arch_write_lock(arch_rwlock_t *rw)
*(volatile __u32 *)&lp->lock = ~0U;
}
-static void inline arch_write_unlock(arch_rwlock_t *lock)
+static inline void arch_write_unlock(arch_rwlock_t *lock)
{
__asm__ __volatile__(
" st %%g0, [%0]"
diff --git a/arch/sparc/include/asm/spinlock_64.h b/arch/sparc/include/asm/spinlock_64.h
index 87990b7c6b0d..07c9f2e9bf57 100644
--- a/arch/sparc/include/asm/spinlock_64.h
+++ b/arch/sparc/include/asm/spinlock_64.h
@@ -96,7 +96,7 @@ static inline void arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long fla
/* Multi-reader locks, these are much saner than the 32-bit Sparc ones... */
-static void inline arch_read_lock(arch_rwlock_t *lock)
+static inline void arch_read_lock(arch_rwlock_t *lock)
{
unsigned long tmp1, tmp2;
@@ -119,7 +119,7 @@ static void inline arch_read_lock(arch_rwlock_t *lock)
: "memory");
}
-static int inline arch_read_trylock(arch_rwlock_t *lock)
+static inline int arch_read_trylock(arch_rwlock_t *lock)
{
int tmp1, tmp2;
@@ -140,7 +140,7 @@ static int inline arch_read_trylock(arch_rwlock_t *lock)
return tmp1;
}
-static void inline arch_read_unlock(arch_rwlock_t *lock)
+static inline void arch_read_unlock(arch_rwlock_t *lock)
{
unsigned long tmp1, tmp2;
@@ -156,7 +156,7 @@ static void inline arch_read_unlock(arch_rwlock_t *lock)
: "memory");
}
-static void inline arch_write_lock(arch_rwlock_t *lock)
+static inline void arch_write_lock(arch_rwlock_t *lock)
{
unsigned long mask, tmp1, tmp2;
@@ -181,7 +181,7 @@ static void inline arch_write_lock(arch_rwlock_t *lock)
: "memory");
}
-static void inline arch_write_unlock(arch_rwlock_t *lock)
+static inline void arch_write_unlock(arch_rwlock_t *lock)
{
__asm__ __volatile__(
" stw %%g0, [%0]"
@@ -190,7 +190,7 @@ static void inline arch_write_unlock(arch_rwlock_t *lock)
: "memory");
}
-static int inline arch_write_trylock(arch_rwlock_t *lock)
+static inline int arch_write_trylock(arch_rwlock_t *lock)
{
unsigned long mask, tmp1, tmp2, result;
diff --git a/arch/sparc/include/asm/topology_64.h b/arch/sparc/include/asm/topology_64.h
index bec481aaca16..7b4898a36eee 100644
--- a/arch/sparc/include/asm/topology_64.h
+++ b/arch/sparc/include/asm/topology_64.h
@@ -44,14 +44,20 @@ int __node_distance(int, int);
#define topology_physical_package_id(cpu) (cpu_data(cpu).proc_id)
#define topology_core_id(cpu) (cpu_data(cpu).core_id)
#define topology_core_cpumask(cpu) (&cpu_core_sib_map[cpu])
+#define topology_core_cache_cpumask(cpu) (&cpu_core_sib_cache_map[cpu])
#define topology_sibling_cpumask(cpu) (&per_cpu(cpu_sibling_map, cpu))
#endif /* CONFIG_SMP */
extern cpumask_t cpu_core_map[NR_CPUS];
extern cpumask_t cpu_core_sib_map[NR_CPUS];
+extern cpumask_t cpu_core_sib_cache_map[NR_CPUS];
+
+/**
+ * Return cores that shares the last level cache.
+ */
static inline const struct cpumask *cpu_coregroup_mask(int cpu)
{
- return &cpu_core_map[cpu];
+ return &cpu_core_sib_cache_map[cpu];
}
#endif /* _ASM_SPARC64_TOPOLOGY_H */
diff --git a/arch/sparc/include/asm/uaccess_64.h b/arch/sparc/include/asm/uaccess_64.h
index b68acc563235..5373136c412b 100644
--- a/arch/sparc/include/asm/uaccess_64.h
+++ b/arch/sparc/include/asm/uaccess_64.h
@@ -82,7 +82,6 @@ static inline int access_ok(int type, const void __user * addr, unsigned long si
return 1;
}
-void __ret_efault(void);
void __retl_efault(void);
/* Uh, these should become the main single-value transfer routines..
@@ -189,55 +188,34 @@ int __get_user_bad(void);
unsigned long __must_check ___copy_from_user(void *to,
const void __user *from,
unsigned long size);
-unsigned long copy_from_user_fixup(void *to, const void __user *from,
- unsigned long size);
static inline unsigned long __must_check
copy_from_user(void *to, const void __user *from, unsigned long size)
{
- unsigned long ret;
-
check_object_size(to, size, false);
- ret = ___copy_from_user(to, from, size);
- if (unlikely(ret))
- ret = copy_from_user_fixup(to, from, size);
-
- return ret;
+ return ___copy_from_user(to, from, size);
}
#define __copy_from_user copy_from_user
unsigned long __must_check ___copy_to_user(void __user *to,
const void *from,
unsigned long size);
-unsigned long copy_to_user_fixup(void __user *to, const void *from,
- unsigned long size);
static inline unsigned long __must_check
copy_to_user(void __user *to, const void *from, unsigned long size)
{
- unsigned long ret;
-
check_object_size(from, size, true);
- ret = ___copy_to_user(to, from, size);
- if (unlikely(ret))
- ret = copy_to_user_fixup(to, from, size);
- return ret;
+ return ___copy_to_user(to, from, size);
}
#define __copy_to_user copy_to_user
unsigned long __must_check ___copy_in_user(void __user *to,
const void __user *from,
unsigned long size);
-unsigned long copy_in_user_fixup(void __user *to, void __user *from,
- unsigned long size);
static inline unsigned long __must_check
copy_in_user(void __user *to, void __user *from, unsigned long size)
{
- unsigned long ret = ___copy_in_user(to, from, size);
-
- if (unlikely(ret))
- ret = copy_in_user_fixup(to, from, size);
- return ret;
+ return ___copy_in_user(to, from, size);
}
#define __copy_in_user copy_in_user
diff --git a/arch/sparc/kernel/head_64.S b/arch/sparc/kernel/head_64.S
index beba6c11554c..6aa3da152c20 100644
--- a/arch/sparc/kernel/head_64.S
+++ b/arch/sparc/kernel/head_64.S
@@ -926,48 +926,11 @@ tlb_type: .word 0 /* Must NOT end up in BSS */
EXPORT_SYMBOL(tlb_type)
.section ".fixup",#alloc,#execinstr
- .globl __ret_efault, __retl_efault, __ret_one, __retl_one
-ENTRY(__ret_efault)
- ret
- restore %g0, -EFAULT, %o0
-ENDPROC(__ret_efault)
-EXPORT_SYMBOL(__ret_efault)
-
ENTRY(__retl_efault)
retl
mov -EFAULT, %o0
ENDPROC(__retl_efault)
-ENTRY(__retl_one)
- retl
- mov 1, %o0
-ENDPROC(__retl_one)
-
-ENTRY(__retl_one_fp)
- VISExitHalf
- retl
- mov 1, %o0
-ENDPROC(__retl_one_fp)
-
-ENTRY(__ret_one_asi)
- wr %g0, ASI_AIUS, %asi
- ret
- restore %g0, 1, %o0
-ENDPROC(__ret_one_asi)
-
-ENTRY(__retl_one_asi)
- wr %g0, ASI_AIUS, %asi
- retl
- mov 1, %o0
-ENDPROC(__retl_one_asi)
-
-ENTRY(__retl_one_asi_fp)
- wr %g0, ASI_AIUS, %asi
- VISExitHalf
- retl
- mov 1, %o0
-ENDPROC(__retl_one_asi_fp)
-
ENTRY(__retl_o1)
retl
mov %o1, %o0
diff --git a/arch/sparc/kernel/jump_label.c b/arch/sparc/kernel/jump_label.c
index 59bbeff55024..07933b9e9ce0 100644
--- a/arch/sparc/kernel/jump_label.c
+++ b/arch/sparc/kernel/jump_label.c
@@ -13,19 +13,30 @@
void arch_jump_label_transform(struct jump_entry *entry,
enum jump_label_type type)
{
- u32 val;
u32 *insn = (u32 *) (unsigned long) entry->code;
+ u32 val;
if (type == JUMP_LABEL_JMP) {
s32 off = (s32)entry->target - (s32)entry->code;
+ bool use_v9_branch = false;
+
+ BUG_ON(off & 3);
#ifdef CONFIG_SPARC64
- /* ba,pt %xcc, . + (off << 2) */
- val = 0x10680000 | ((u32) off >> 2);
-#else
- /* ba . + (off << 2) */
- val = 0x10800000 | ((u32) off >> 2);
+ if (off <= 0xfffff && off >= -0x100000)
+ use_v9_branch = true;
#endif
+ if (use_v9_branch) {
+ /* WDISP19 - target is . + immed << 2 */
+ /* ba,pt %xcc, . + off */
+ val = 0x10680000 | (((u32) off >> 2) & 0x7ffff);
+ } else {
+ /* WDISP22 - target is . + immed << 2 */
+ BUG_ON(off > 0x7fffff);
+ BUG_ON(off < -0x800000);
+ /* ba . + off */
+ val = 0x10800000 | (((u32) off >> 2) & 0x3fffff);
+ }
} else {
val = 0x01000000;
}
diff --git a/arch/sparc/kernel/mdesc.c b/arch/sparc/kernel/mdesc.c
index 11228861d9b4..8a6982dfd733 100644
--- a/arch/sparc/kernel/mdesc.c
+++ b/arch/sparc/kernel/mdesc.c
@@ -645,13 +645,20 @@ static void __mark_core_id(struct mdesc_handle *hp, u64 node,
cpu_data(*id).core_id = core_id;
}
-static void __mark_sock_id(struct mdesc_handle *hp, u64 node,
- int sock_id)
+static void __mark_max_cache_id(struct mdesc_handle *hp, u64 node,
+ int max_cache_id)
{
const u64 *id = mdesc_get_property(hp, node, "id", NULL);
- if (*id < num_possible_cpus())
- cpu_data(*id).sock_id = sock_id;
+ if (*id < num_possible_cpus()) {
+ cpu_data(*id).max_cache_id = max_cache_id;
+
+ /**
+ * On systems without explicit socket descriptions socket
+ * is max_cache_id
+ */
+ cpu_data(*id).sock_id = max_cache_id;
+ }
}
static void mark_core_ids(struct mdesc_handle *hp, u64 mp,
@@ -660,10 +667,11 @@ static void mark_core_ids(struct mdesc_handle *hp, u64 mp,
find_back_node_value(hp, mp, "cpu", __mark_core_id, core_id, 10);
}
-static void mark_sock_ids(struct mdesc_handle *hp, u64 mp,
- int sock_id)
+static void mark_max_cache_ids(struct mdesc_handle *hp, u64 mp,
+ int max_cache_id)
{
- find_back_node_value(hp, mp, "cpu", __mark_sock_id, sock_id, 10);
+ find_back_node_value(hp, mp, "cpu", __mark_max_cache_id,
+ max_cache_id, 10);
}
static void set_core_ids(struct mdesc_handle *hp)
@@ -694,14 +702,15 @@ static void set_core_ids(struct mdesc_handle *hp)
}
}
-static int set_sock_ids_by_cache(struct mdesc_handle *hp, int level)
+static int set_max_cache_ids_by_cache(struct mdesc_handle *hp, int level)
{
u64 mp;
int idx = 1;
int fnd = 0;
- /* Identify unique sockets by looking for cpus backpointed to by
- * shared level n caches.
+ /**
+ * Identify unique highest level of shared cache by looking for cpus
+ * backpointed to by shared level N caches.
*/
mdesc_for_each_node_by_name(hp, mp, "cache") {
const u64 *cur_lvl;
@@ -709,8 +718,7 @@ static int set_sock_ids_by_cache(struct mdesc_handle *hp, int level)
cur_lvl = mdesc_get_property(hp, mp, "level", NULL);
if (*cur_lvl != level)
continue;
-
- mark_sock_ids(hp, mp, idx);
+ mark_max_cache_ids(hp, mp, idx);
idx++;
fnd = 1;
}
@@ -745,15 +753,17 @@ static void set_sock_ids(struct mdesc_handle *hp)
{
u64 mp;
- /* If machine description exposes sockets data use it.
- * Otherwise fallback to use shared L3 or L2 caches.
+ /**
+ * Find the highest level of shared cache which pre-T7 is also
+ * the socket.
*/
+ if (!set_max_cache_ids_by_cache(hp, 3))
+ set_max_cache_ids_by_cache(hp, 2);
+
+ /* If machine description exposes sockets data use it.*/
mp = mdesc_node_by_name(hp, MDESC_NODE_NULL, "sockets");
if (mp != MDESC_NODE_NULL)
- return set_sock_ids_by_socket(hp, mp);
-
- if (!set_sock_ids_by_cache(hp, 3))
- set_sock_ids_by_cache(hp, 2);
+ set_sock_ids_by_socket(hp, mp);
}
static void mark_proc_ids(struct mdesc_handle *hp, u64 mp, int proc_id)
diff --git a/arch/sparc/kernel/smp_64.c b/arch/sparc/kernel/smp_64.c
index d3035ba6cd31..8182f7caf5b1 100644
--- a/arch/sparc/kernel/smp_64.c
+++ b/arch/sparc/kernel/smp_64.c
@@ -63,9 +63,13 @@ cpumask_t cpu_core_map[NR_CPUS] __read_mostly =
cpumask_t cpu_core_sib_map[NR_CPUS] __read_mostly = {
[0 ... NR_CPUS-1] = CPU_MASK_NONE };
+cpumask_t cpu_core_sib_cache_map[NR_CPUS] __read_mostly = {
+ [0 ... NR_CPUS - 1] = CPU_MASK_NONE };
+
EXPORT_PER_CPU_SYMBOL(cpu_sibling_map);
EXPORT_SYMBOL(cpu_core_map);
EXPORT_SYMBOL(cpu_core_sib_map);
+EXPORT_SYMBOL(cpu_core_sib_cache_map);
static cpumask_t smp_commenced_mask;
@@ -1265,6 +1269,10 @@ void smp_fill_in_sib_core_maps(void)
unsigned int j;
for_each_present_cpu(j) {
+ if (cpu_data(i).max_cache_id ==
+ cpu_data(j).max_cache_id)
+ cpumask_set_cpu(j, &cpu_core_sib_cache_map[i]);
+
if (cpu_data(i).sock_id == cpu_data(j).sock_id)
cpumask_set_cpu(j, &cpu_core_sib_map[i]);
}
diff --git a/arch/sparc/lib/GENcopy_from_user.S b/arch/sparc/lib/GENcopy_from_user.S
index b7d0bd6b1406..69a439fa2fc1 100644
--- a/arch/sparc/lib/GENcopy_from_user.S
+++ b/arch/sparc/lib/GENcopy_from_user.S
@@ -3,11 +3,11 @@
* Copyright (C) 2007 David S. Miller (davem@davemloft.net)
*/
-#define EX_LD(x) \
+#define EX_LD(x,y) \
98: x; \
.section __ex_table,"a";\
.align 4; \
- .word 98b, __retl_one; \
+ .word 98b, y; \
.text; \
.align 4;
diff --git a/arch/sparc/lib/GENcopy_to_user.S b/arch/sparc/lib/GENcopy_to_user.S
index 780550e1afc7..9947427ce354 100644
--- a/arch/sparc/lib/GENcopy_to_user.S
+++ b/arch/sparc/lib/GENcopy_to_user.S
@@ -3,11 +3,11 @@
* Copyright (C) 2007 David S. Miller (davem@davemloft.net)
*/
-#define EX_ST(x) \
+#define EX_ST(x,y) \
98: x; \
.section __ex_table,"a";\
.align 4; \
- .word 98b, __retl_one; \
+ .word 98b, y; \
.text; \
.align 4;
diff --git a/arch/sparc/lib/GENmemcpy.S b/arch/sparc/lib/GENmemcpy.S
index 89358ee94851..059ea24ad73d 100644
--- a/arch/sparc/lib/GENmemcpy.S
+++ b/arch/sparc/lib/GENmemcpy.S
@@ -4,21 +4,18 @@
*/
#ifdef __KERNEL__
+#include <linux/linkage.h>
#define GLOBAL_SPARE %g7
#else
#define GLOBAL_SPARE %g5
#endif
#ifndef EX_LD
-#define EX_LD(x) x
+#define EX_LD(x,y) x
#endif
#ifndef EX_ST
-#define EX_ST(x) x
-#endif
-
-#ifndef EX_RETVAL
-#define EX_RETVAL(x) x
+#define EX_ST(x,y) x
#endif
#ifndef LOAD
@@ -45,6 +42,29 @@
.register %g3,#scratch
.text
+
+#ifndef EX_RETVAL
+#define EX_RETVAL(x) x
+ENTRY(GEN_retl_o4_1)
+ add %o4, %o2, %o4
+ retl
+ add %o4, 1, %o0
+ENDPROC(GEN_retl_o4_1)
+ENTRY(GEN_retl_g1_8)
+ add %g1, %o2, %g1
+ retl
+ add %g1, 8, %o0
+ENDPROC(GEN_retl_g1_8)
+ENTRY(GEN_retl_o2_4)
+ retl
+ add %o2, 4, %o0
+ENDPROC(GEN_retl_o2_4)
+ENTRY(GEN_retl_o2_1)
+ retl
+ add %o2, 1, %o0
+ENDPROC(GEN_retl_o2_1)
+#endif
+
.align 64
.globl FUNC_NAME
@@ -73,8 +93,8 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
sub %g0, %o4, %o4
sub %o2, %o4, %o2
1: subcc %o4, 1, %o4
- EX_LD(LOAD(ldub, %o1, %g1))
- EX_ST(STORE(stb, %g1, %o0))
+ EX_LD(LOAD(ldub, %o1, %g1),GEN_retl_o4_1)
+ EX_ST(STORE(stb, %g1, %o0),GEN_retl_o4_1)
add %o1, 1, %o1
bne,pt %XCC, 1b
add %o0, 1, %o0
@@ -82,8 +102,8 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
andn %o2, 0x7, %g1
sub %o2, %g1, %o2
1: subcc %g1, 0x8, %g1
- EX_LD(LOAD(ldx, %o1, %g2))
- EX_ST(STORE(stx, %g2, %o0))
+ EX_LD(LOAD(ldx, %o1, %g2),GEN_retl_g1_8)
+ EX_ST(STORE(stx, %g2, %o0),GEN_retl_g1_8)
add %o1, 0x8, %o1
bne,pt %XCC, 1b
add %o0, 0x8, %o0
@@ -100,8 +120,8 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
1:
subcc %o2, 4, %o2
- EX_LD(LOAD(lduw, %o1, %g1))
- EX_ST(STORE(stw, %g1, %o1 + %o3))
+ EX_LD(LOAD(lduw, %o1, %g1),GEN_retl_o2_4)
+ EX_ST(STORE(stw, %g1, %o1 + %o3),GEN_retl_o2_4)
bgu,pt %XCC, 1b
add %o1, 4, %o1
@@ -111,8 +131,8 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
.align 32
90:
subcc %o2, 1, %o2
- EX_LD(LOAD(ldub, %o1, %g1))
- EX_ST(STORE(stb, %g1, %o1 + %o3))
+ EX_LD(LOAD(ldub, %o1, %g1),GEN_retl_o2_1)
+ EX_ST(STORE(stb, %g1, %o1 + %o3),GEN_retl_o2_1)
bgu,pt %XCC, 90b
add %o1, 1, %o1
retl
diff --git a/arch/sparc/lib/Makefile b/arch/sparc/lib/Makefile
index 885f00e81d1a..69912d2f8b54 100644
--- a/arch/sparc/lib/Makefile
+++ b/arch/sparc/lib/Makefile
@@ -38,7 +38,7 @@ lib-$(CONFIG_SPARC64) += NG4patch.o NG4copy_page.o NG4clear_page.o NG4memset.o
lib-$(CONFIG_SPARC64) += GENmemcpy.o GENcopy_from_user.o GENcopy_to_user.o
lib-$(CONFIG_SPARC64) += GENpatch.o GENpage.o GENbzero.o
-lib-$(CONFIG_SPARC64) += copy_in_user.o user_fixup.o memmove.o
+lib-$(CONFIG_SPARC64) += copy_in_user.o memmove.o
lib-$(CONFIG_SPARC64) += mcount.o ipcsum.o xor.o hweight.o ffs.o
obj-$(CONFIG_SPARC64) += iomap.o
diff --git a/arch/sparc/lib/NG2copy_from_user.S b/arch/sparc/lib/NG2copy_from_user.S
index d5242b8c4f94..b79a6998d87c 100644
--- a/arch/sparc/lib/NG2copy_from_user.S
+++ b/arch/sparc/lib/NG2copy_from_user.S
@@ -3,19 +3,19 @@
* Copyright (C) 2007 David S. Miller (davem@davemloft.net)
*/
-#define EX_LD(x) \
+#define EX_LD(x,y) \
98: x; \
.section __ex_table,"a";\
.align 4; \
- .word 98b, __retl_one_asi;\
+ .word 98b, y; \
.text; \
.align 4;
-#define EX_LD_FP(x) \
+#define EX_LD_FP(x,y) \
98: x; \
.section __ex_table,"a";\
.align 4; \
- .word 98b, __retl_one_asi_fp;\
+ .word 98b, y##_fp; \
.text; \
.align 4;
diff --git a/arch/sparc/lib/NG2copy_to_user.S b/arch/sparc/lib/NG2copy_to_user.S
index 4e962d993b10..dcec55f254ab 100644
--- a/arch/sparc/lib/NG2copy_to_user.S
+++ b/arch/sparc/lib/NG2copy_to_user.S
@@ -3,19 +3,19 @@
* Copyright (C) 2007 David S. Miller (davem@davemloft.net)
*/
-#define EX_ST(x) \
+#define EX_ST(x,y) \
98: x; \
.section __ex_table,"a";\
.align 4; \
- .word 98b, __retl_one_asi;\
+ .word 98b, y; \
.text; \
.align 4;
-#define EX_ST_FP(x) \
+#define EX_ST_FP(x,y) \
98: x; \
.section __ex_table,"a";\
.align 4; \
- .word 98b, __retl_one_asi_fp;\
+ .word 98b, y##_fp; \
.text; \
.align 4;
diff --git a/arch/sparc/lib/NG2memcpy.S b/arch/sparc/lib/NG2memcpy.S
index d5f585df2f3f..c629dbd121b6 100644
--- a/arch/sparc/lib/NG2memcpy.S
+++ b/arch/sparc/lib/NG2memcpy.S
@@ -4,6 +4,7 @@
*/
#ifdef __KERNEL__
+#include <linux/linkage.h>
#include <asm/visasm.h>
#include <asm/asi.h>
#define GLOBAL_SPARE %g7
@@ -32,21 +33,17 @@
#endif
#ifndef EX_LD
-#define EX_LD(x) x
+#define EX_LD(x,y) x
#endif
#ifndef EX_LD_FP
-#define EX_LD_FP(x) x
+#define EX_LD_FP(x,y) x
#endif
#ifndef EX_ST
-#define EX_ST(x) x
+#define EX_ST(x,y) x
#endif
#ifndef EX_ST_FP
-#define EX_ST_FP(x) x
-#endif
-
-#ifndef EX_RETVAL
-#define EX_RETVAL(x) x
+#define EX_ST_FP(x,y) x
#endif
#ifndef LOAD
@@ -140,45 +137,110 @@
fsrc2 %x6, %f12; \
fsrc2 %x7, %f14;
#define FREG_LOAD_1(base, x0) \
- EX_LD_FP(LOAD(ldd, base + 0x00, %x0))
+ EX_LD_FP(LOAD(ldd, base + 0x00, %x0), NG2_retl_o2_plus_g1)
#define FREG_LOAD_2(base, x0, x1) \
- EX_LD_FP(LOAD(ldd, base + 0x00, %x0)); \
- EX_LD_FP(LOAD(ldd, base + 0x08, %x1));
+ EX_LD_FP(LOAD(ldd, base + 0x00, %x0), NG2_retl_o2_plus_g1); \
+ EX_LD_FP(LOAD(ldd, base + 0x08, %x1), NG2_retl_o2_plus_g1);
#define FREG_LOAD_3(base, x0, x1, x2) \
- EX_LD_FP(LOAD(ldd, base + 0x00, %x0)); \
- EX_LD_FP(LOAD(ldd, base + 0x08, %x1)); \
- EX_LD_FP(LOAD(ldd, base + 0x10, %x2));
+ EX_LD_FP(LOAD(ldd, base + 0x00, %x0), NG2_retl_o2_plus_g1); \
+ EX_LD_FP(LOAD(ldd, base + 0x08, %x1), NG2_retl_o2_plus_g1); \
+ EX_LD_FP(LOAD(ldd, base + 0x10, %x2), NG2_retl_o2_plus_g1);
#define FREG_LOAD_4(base, x0, x1, x2, x3) \
- EX_LD_FP(LOAD(ldd, base + 0x00, %x0)); \
- EX_LD_FP(LOAD(ldd, base + 0x08, %x1)); \
- EX_LD_FP(LOAD(ldd, base + 0x10, %x2)); \
- EX_LD_FP(LOAD(ldd, base + 0x18, %x3));
+ EX_LD_FP(LOAD(ldd, base + 0x00, %x0), NG2_retl_o2_plus_g1); \
+ EX_LD_FP(LOAD(ldd, base + 0x08, %x1), NG2_retl_o2_plus_g1); \
+ EX_LD_FP(LOAD(ldd, base + 0x10, %x2), NG2_retl_o2_plus_g1); \
+ EX_LD_FP(LOAD(ldd, base + 0x18, %x3), NG2_retl_o2_plus_g1);
#define FREG_LOAD_5(base, x0, x1, x2, x3, x4) \
- EX_LD_FP(LOAD(ldd, base + 0x00, %x0)); \
- EX_LD_FP(LOAD(ldd, base + 0x08, %x1)); \
- EX_LD_FP(LOAD(ldd, base + 0x10, %x2)); \
- EX_LD_FP(LOAD(ldd, base + 0x18, %x3)); \
- EX_LD_FP(LOAD(ldd, base + 0x20, %x4));
+ EX_LD_FP(LOAD(ldd, base + 0x00, %x0), NG2_retl_o2_plus_g1); \
+ EX_LD_FP(LOAD(ldd, base + 0x08, %x1), NG2_retl_o2_plus_g1); \
+ EX_LD_FP(LOAD(ldd, base + 0x10, %x2), NG2_retl_o2_plus_g1); \
+ EX_LD_FP(LOAD(ldd, base + 0x18, %x3), NG2_retl_o2_plus_g1); \
+ EX_LD_FP(LOAD(ldd, base + 0x20, %x4), NG2_retl_o2_plus_g1);
#define FREG_LOAD_6(base, x0, x1, x2, x3, x4, x5) \
- EX_LD_FP(LOAD(ldd, base + 0x00, %x0)); \
- EX_LD_FP(LOAD(ldd, base + 0x08, %x1)); \
- EX_LD_FP(LOAD(ldd, base + 0x10, %x2)); \
- EX_LD_FP(LOAD(ldd, base + 0x18, %x3)); \
- EX_LD_FP(LOAD(ldd, base + 0x20, %x4)); \
- EX_LD_FP(LOAD(ldd, base + 0x28, %x5));
+ EX_LD_FP(LOAD(ldd, base + 0x00, %x0), NG2_retl_o2_plus_g1); \
+ EX_LD_FP(LOAD(ldd, base + 0x08, %x1), NG2_retl_o2_plus_g1); \
+ EX_LD_FP(LOAD(ldd, base + 0x10, %x2), NG2_retl_o2_plus_g1); \
+ EX_LD_FP(LOAD(ldd, base + 0x18, %x3), NG2_retl_o2_plus_g1); \
+ EX_LD_FP(LOAD(ldd, base + 0x20, %x4), NG2_retl_o2_plus_g1); \
+ EX_LD_FP(LOAD(ldd, base + 0x28, %x5), NG2_retl_o2_plus_g1);
#define FREG_LOAD_7(base, x0, x1, x2, x3, x4, x5, x6) \
- EX_LD_FP(LOAD(ldd, base + 0x00, %x0)); \
- EX_LD_FP(LOAD(ldd, base + 0x08, %x1)); \
- EX_LD_FP(LOAD(ldd, base + 0x10, %x2)); \
- EX_LD_FP(LOAD(ldd, base + 0x18, %x3)); \
- EX_LD_FP(LOAD(ldd, base + 0x20, %x4)); \
- EX_LD_FP(LOAD(ldd, base + 0x28, %x5)); \
- EX_LD_FP(LOAD(ldd, base + 0x30, %x6));
+ EX_LD_FP(LOAD(ldd, base + 0x00, %x0), NG2_retl_o2_plus_g1); \
+ EX_LD_FP(LOAD(ldd, base + 0x08, %x1), NG2_retl_o2_plus_g1); \
+ EX_LD_FP(LOAD(ldd, base + 0x10, %x2), NG2_retl_o2_plus_g1); \
+ EX_LD_FP(LOAD(ldd, base + 0x18, %x3), NG2_retl_o2_plus_g1); \
+ EX_LD_FP(LOAD(ldd, base + 0x20, %x4), NG2_retl_o2_plus_g1); \
+ EX_LD_FP(LOAD(ldd, base + 0x28, %x5), NG2_retl_o2_plus_g1); \
+ EX_LD_FP(LOAD(ldd, base + 0x30, %x6), NG2_retl_o2_plus_g1);
.register %g2,#scratch
.register %g3,#scratch
.text
+#ifndef EX_RETVAL
+#define EX_RETVAL(x) x
+__restore_fp:
+ VISExitHalf
+__restore_asi:
+ retl
+ wr %g0, ASI_AIUS, %asi
+ENTRY(NG2_retl_o2)
+ ba,pt %xcc, __restore_asi
+ mov %o2, %o0
+ENDPROC(NG2_retl_o2)
+ENTRY(NG2_retl_o2_plus_1)
+ ba,pt %xcc, __restore_asi
+ add %o2, 1, %o0
+ENDPROC(NG2_retl_o2_plus_1)
+ENTRY(NG2_retl_o2_plus_4)
+ ba,pt %xcc, __restore_asi
+ add %o2, 4, %o0
+ENDPROC(NG2_retl_o2_plus_4)
+ENTRY(NG2_retl_o2_plus_8)
+ ba,pt %xcc, __restore_asi
+ add %o2, 8, %o0
+ENDPROC(NG2_retl_o2_plus_8)
+ENTRY(NG2_retl_o2_plus_o4_plus_1)
+ add %o4, 1, %o4
+ ba,pt %xcc, __restore_asi
+ add %o2, %o4, %o0
+ENDPROC(NG2_retl_o2_plus_o4_plus_1)
+ENTRY(NG2_retl_o2_plus_o4_plus_8)
+ add %o4, 8, %o4
+ ba,pt %xcc, __restore_asi
+ add %o2, %o4, %o0
+ENDPROC(NG2_retl_o2_plus_o4_plus_8)
+ENTRY(NG2_retl_o2_plus_o4_plus_16)
+ add %o4, 16, %o4
+ ba,pt %xcc, __restore_asi
+ add %o2, %o4, %o0
+ENDPROC(NG2_retl_o2_plus_o4_plus_16)
+ENTRY(NG2_retl_o2_plus_g1_fp)
+ ba,pt %xcc, __restore_fp
+ add %o2, %g1, %o0
+ENDPROC(NG2_retl_o2_plus_g1_fp)
+ENTRY(NG2_retl_o2_plus_g1_plus_64_fp)
+ add %g1, 64, %g1
+ ba,pt %xcc, __restore_fp
+ add %o2, %g1, %o0
+ENDPROC(NG2_retl_o2_plus_g1_plus_64_fp)
+ENTRY(NG2_retl_o2_plus_g1_plus_1)
+ add %g1, 1, %g1
+ ba,pt %xcc, __restore_asi
+ add %o2, %g1, %o0
+ENDPROC(NG2_retl_o2_plus_g1_plus_1)
+ENTRY(NG2_retl_o2_and_7_plus_o4)
+ and %o2, 7, %o2
+ ba,pt %xcc, __restore_asi
+ add %o2, %o4, %o0
+ENDPROC(NG2_retl_o2_and_7_plus_o4)
+ENTRY(NG2_retl_o2_and_7_plus_o4_plus_8)
+ and %o2, 7, %o2
+ add %o4, 8, %o4
+ ba,pt %xcc, __restore_asi
+ add %o2, %o4, %o0
+ENDPROC(NG2_retl_o2_and_7_plus_o4_plus_8)
+#endif
+
.align 64
.globl FUNC_NAME
@@ -230,8 +292,8 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
sub %g0, %o4, %o4 ! bytes to align dst
sub %o2, %o4, %o2
1: subcc %o4, 1, %o4
- EX_LD(LOAD(ldub, %o1, %g1))
- EX_ST(STORE(stb, %g1, %o0))
+ EX_LD(LOAD(ldub, %o1, %g1), NG2_retl_o2_plus_o4_plus_1)
+ EX_ST(STORE(stb, %g1, %o0), NG2_retl_o2_plus_o4_plus_1)
add %o1, 1, %o1
bne,pt %XCC, 1b
add %o0, 1, %o0
@@ -281,11 +343,11 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
nop
/* fall through for 0 < low bits < 8 */
110: sub %o4, 64, %g2
- EX_LD_FP(LOAD_BLK(%g2, %f0))
-1: EX_ST_FP(STORE_INIT(%g0, %o4 + %g3))
- EX_LD_FP(LOAD_BLK(%o4, %f16))
+ EX_LD_FP(LOAD_BLK(%g2, %f0), NG2_retl_o2_plus_g1)
+1: EX_ST_FP(STORE_INIT(%g0, %o4 + %g3), NG2_retl_o2_plus_g1)
+ EX_LD_FP(LOAD_BLK(%o4, %f16), NG2_retl_o2_plus_g1)
FREG_FROB(f0, f2, f4, f6, f8, f10, f12, f14, f16)
- EX_ST_FP(STORE_BLK(%f0, %o4 + %g3))
+ EX_ST_FP(STORE_BLK(%f0, %o4 + %g3), NG2_retl_o2_plus_g1)
FREG_MOVE_8(f16, f18, f20, f22, f24, f26, f28, f30)
subcc %g1, 64, %g1
add %o4, 64, %o4
@@ -296,10 +358,10 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
120: sub %o4, 56, %g2
FREG_LOAD_7(%g2, f0, f2, f4, f6, f8, f10, f12)
-1: EX_ST_FP(STORE_INIT(%g0, %o4 + %g3))
- EX_LD_FP(LOAD_BLK(%o4, %f16))
+1: EX_ST_FP(STORE_INIT(%g0, %o4 + %g3), NG2_retl_o2_plus_g1)
+ EX_LD_FP(LOAD_BLK(%o4, %f16), NG2_retl_o2_plus_g1)
FREG_FROB(f0, f2, f4, f6, f8, f10, f12, f16, f18)
- EX_ST_FP(STORE_BLK(%f0, %o4 + %g3))
+ EX_ST_FP(STORE_BLK(%f0, %o4 + %g3), NG2_retl_o2_plus_g1)
FREG_MOVE_7(f18, f20, f22, f24, f26, f28, f30)
subcc %g1, 64, %g1
add %o4, 64, %o4
@@ -310,10 +372,10 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
130: sub %o4, 48, %g2
FREG_LOAD_6(%g2, f0, f2, f4, f6, f8, f10)
-1: EX_ST_FP(STORE_INIT(%g0, %o4 + %g3))
- EX_LD_FP(LOAD_BLK(%o4, %f16))
+1: EX_ST_FP(STORE_INIT(%g0, %o4 + %g3), NG2_retl_o2_plus_g1)
+ EX_LD_FP(LOAD_BLK(%o4, %f16), NG2_retl_o2_plus_g1)
FREG_FROB(f0, f2, f4, f6, f8, f10, f16, f18, f20)
- EX_ST_FP(STORE_BLK(%f0, %o4 + %g3))
+ EX_ST_FP(STORE_BLK(%f0, %o4 + %g3), NG2_retl_o2_plus_g1)
FREG_MOVE_6(f20, f22, f24, f26, f28, f30)
subcc %g1, 64, %g1
add %o4, 64, %o4
@@ -324,10 +386,10 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
140: sub %o4, 40, %g2
FREG_LOAD_5(%g2, f0, f2, f4, f6, f8)
-1: EX_ST_FP(STORE_INIT(%g0, %o4 + %g3))
- EX_LD_FP(LOAD_BLK(%o4, %f16))
+1: EX_ST_FP(STORE_INIT(%g0, %o4 + %g3), NG2_retl_o2_plus_g1)
+ EX_LD_FP(LOAD_BLK(%o4, %f16), NG2_retl_o2_plus_g1)
FREG_FROB(f0, f2, f4, f6, f8, f16, f18, f20, f22)
- EX_ST_FP(STORE_BLK(%f0, %o4 + %g3))
+ EX_ST_FP(STORE_BLK(%f0, %o4 + %g3), NG2_retl_o2_plus_g1)
FREG_MOVE_5(f22, f24, f26, f28, f30)
subcc %g1, 64, %g1
add %o4, 64, %o4
@@ -338,10 +400,10 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
150: sub %o4, 32, %g2
FREG_LOAD_4(%g2, f0, f2, f4, f6)
-1: EX_ST_FP(STORE_INIT(%g0, %o4 + %g3))
- EX_LD_FP(LOAD_BLK(%o4, %f16))
+1: EX_ST_FP(STORE_INIT(%g0, %o4 + %g3), NG2_retl_o2_plus_g1)
+ EX_LD_FP(LOAD_BLK(%o4, %f16), NG2_retl_o2_plus_g1)
FREG_FROB(f0, f2, f4, f6, f16, f18, f20, f22, f24)
- EX_ST_FP(STORE_BLK(%f0, %o4 + %g3))
+ EX_ST_FP(STORE_BLK(%f0, %o4 + %g3), NG2_retl_o2_plus_g1)
FREG_MOVE_4(f24, f26, f28, f30)
subcc %g1, 64, %g1
add %o4, 64, %o4
@@ -352,10 +414,10 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
160: sub %o4, 24, %g2
FREG_LOAD_3(%g2, f0, f2, f4)
-1: EX_ST_FP(STORE_INIT(%g0, %o4 + %g3))
- EX_LD_FP(LOAD_BLK(%o4, %f16))
+1: EX_ST_FP(STORE_INIT(%g0, %o4 + %g3), NG2_retl_o2_plus_g1)
+ EX_LD_FP(LOAD_BLK(%o4, %f16), NG2_retl_o2_plus_g1)
FREG_FROB(f0, f2, f4, f16, f18, f20, f22, f24, f26)
- EX_ST_FP(STORE_BLK(%f0, %o4 + %g3))
+ EX_ST_FP(STORE_BLK(%f0, %o4 + %g3), NG2_retl_o2_plus_g1)
FREG_MOVE_3(f26, f28, f30)
subcc %g1, 64, %g1
add %o4, 64, %o4
@@ -366,10 +428,10 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
170: sub %o4, 16, %g2
FREG_LOAD_2(%g2, f0, f2)
-1: EX_ST_FP(STORE_INIT(%g0, %o4 + %g3))
- EX_LD_FP(LOAD_BLK(%o4, %f16))
+1: EX_ST_FP(STORE_INIT(%g0, %o4 + %g3), NG2_retl_o2_plus_g1)
+ EX_LD_FP(LOAD_BLK(%o4, %f16), NG2_retl_o2_plus_g1)
FREG_FROB(f0, f2, f16, f18, f20, f22, f24, f26, f28)
- EX_ST_FP(STORE_BLK(%f0, %o4 + %g3))
+ EX_ST_FP(STORE_BLK(%f0, %o4 + %g3), NG2_retl_o2_plus_g1)
FREG_MOVE_2(f28, f30)
subcc %g1, 64, %g1
add %o4, 64, %o4
@@ -380,10 +442,10 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
180: sub %o4, 8, %g2
FREG_LOAD_1(%g2, f0)
-1: EX_ST_FP(STORE_INIT(%g0, %o4 + %g3))
- EX_LD_FP(LOAD_BLK(%o4, %f16))
+1: EX_ST_FP(STORE_INIT(%g0, %o4 + %g3), NG2_retl_o2_plus_g1)
+ EX_LD_FP(LOAD_BLK(%o4, %f16), NG2_retl_o2_plus_g1)
FREG_FROB(f0, f16, f18, f20, f22, f24, f26, f28, f30)
- EX_ST_FP(STORE_BLK(%f0, %o4 + %g3))
+ EX_ST_FP(STORE_BLK(%f0, %o4 + %g3), NG2_retl_o2_plus_g1)
FREG_MOVE_1(f30)
subcc %g1, 64, %g1
add %o4, 64, %o4
@@ -393,10 +455,10 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
nop
190:
-1: EX_ST_FP(STORE_INIT(%g0, %o4 + %g3))
+1: EX_ST_FP(STORE_INIT(%g0, %o4 + %g3), NG2_retl_o2_plus_g1)
subcc %g1, 64, %g1
- EX_LD_FP(LOAD_BLK(%o4, %f0))
- EX_ST_FP(STORE_BLK(%f0, %o4 + %g3))
+ EX_LD_FP(LOAD_BLK(%o4, %f0), NG2_retl_o2_plus_g1_plus_64)
+ EX_ST_FP(STORE_BLK(%f0, %o4 + %g3), NG2_retl_o2_plus_g1_plus_64)
add %o4, 64, %o4
bne,pt %xcc, 1b
LOAD(prefetch, %o4 + 64, #one_read)
@@ -423,28 +485,28 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
andn %o2, 0xf, %o4
and %o2, 0xf, %o2
1: subcc %o4, 0x10, %o4
- EX_LD(LOAD(ldx, %o1, %o5))
+ EX_LD(LOAD(ldx, %o1, %o5), NG2_retl_o2_plus_o4_plus_16)
add %o1, 0x08, %o1
- EX_LD(LOAD(ldx, %o1, %g1))
+ EX_LD(LOAD(ldx, %o1, %g1), NG2_retl_o2_plus_o4_plus_16)
sub %o1, 0x08, %o1
- EX_ST(STORE(stx, %o5, %o1 + GLOBAL_SPARE))
+ EX_ST(STORE(stx, %o5, %o1 + GLOBAL_SPARE), NG2_retl_o2_plus_o4_plus_16)
add %o1, 0x8, %o1
- EX_ST(STORE(stx, %g1, %o1 + GLOBAL_SPARE))
+ EX_ST(STORE(stx, %g1, %o1 + GLOBAL_SPARE), NG2_retl_o2_plus_o4_plus_8)
bgu,pt %XCC, 1b
add %o1, 0x8, %o1
73: andcc %o2, 0x8, %g0
be,pt %XCC, 1f
nop
sub %o2, 0x8, %o2
- EX_LD(LOAD(ldx, %o1, %o5))
- EX_ST(STORE(stx, %o5, %o1 + GLOBAL_SPARE))
+ EX_LD(LOAD(ldx, %o1, %o5), NG2_retl_o2_plus_8)
+ EX_ST(STORE(stx, %o5, %o1 + GLOBAL_SPARE), NG2_retl_o2_plus_8)
add %o1, 0x8, %o1
1: andcc %o2, 0x4, %g0
be,pt %XCC, 1f
nop
sub %o2, 0x4, %o2
- EX_LD(LOAD(lduw, %o1, %o5))
- EX_ST(STORE(stw, %o5, %o1 + GLOBAL_SPARE))
+ EX_LD(LOAD(lduw, %o1, %o5), NG2_retl_o2_plus_4)
+ EX_ST(STORE(stw, %o5, %o1 + GLOBAL_SPARE), NG2_retl_o2_plus_4)
add %o1, 0x4, %o1
1: cmp %o2, 0
be,pt %XCC, 85f
@@ -460,8 +522,8 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
sub %o2, %g1, %o2
1: subcc %g1, 1, %g1
- EX_LD(LOAD(ldub, %o1, %o5))
- EX_ST(STORE(stb, %o5, %o1 + GLOBAL_SPARE))
+ EX_LD(LOAD(ldub, %o1, %o5), NG2_retl_o2_plus_g1_plus_1)
+ EX_ST(STORE(stb, %o5, %o1 + GLOBAL_SPARE), NG2_retl_o2_plus_g1_plus_1)
bgu,pt %icc, 1b
add %o1, 1, %o1
@@ -477,16 +539,16 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
8: mov 64, GLOBAL_SPARE
andn %o1, 0x7, %o1
- EX_LD(LOAD(ldx, %o1, %g2))
+ EX_LD(LOAD(ldx, %o1, %g2), NG2_retl_o2)
sub GLOBAL_SPARE, %g1, GLOBAL_SPARE
andn %o2, 0x7, %o4
sllx %g2, %g1, %g2
1: add %o1, 0x8, %o1
- EX_LD(LOAD(ldx, %o1, %g3))
+ EX_LD(LOAD(ldx, %o1, %g3), NG2_retl_o2_and_7_plus_o4)
subcc %o4, 0x8, %o4
srlx %g3, GLOBAL_SPARE, %o5
or %o5, %g2, %o5
- EX_ST(STORE(stx, %o5, %o0))
+ EX_ST(STORE(stx, %o5, %o0), NG2_retl_o2_and_7_plus_o4_plus_8)
add %o0, 0x8, %o0
bgu,pt %icc, 1b
sllx %g3, %g1, %g2
@@ -506,8 +568,8 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
1:
subcc %o2, 4, %o2
- EX_LD(LOAD(lduw, %o1, %g1))
- EX_ST(STORE(stw, %g1, %o1 + GLOBAL_SPARE))
+ EX_LD(LOAD(lduw, %o1, %g1), NG2_retl_o2_plus_4)
+ EX_ST(STORE(stw, %g1, %o1 + GLOBAL_SPARE), NG2_retl_o2_plus_4)
bgu,pt %XCC, 1b
add %o1, 4, %o1
@@ -517,8 +579,8 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
.align 32
90:
subcc %o2, 1, %o2
- EX_LD(LOAD(ldub, %o1, %g1))
- EX_ST(STORE(stb, %g1, %o1 + GLOBAL_SPARE))
+ EX_LD(LOAD(ldub, %o1, %g1), NG2_retl_o2_plus_1)
+ EX_ST(STORE(stb, %g1, %o1 + GLOBAL_SPARE), NG2_retl_o2_plus_1)
bgu,pt %XCC, 90b
add %o1, 1, %o1
retl
diff --git a/arch/sparc/lib/NG4copy_from_user.S b/arch/sparc/lib/NG4copy_from_user.S
index 2e8ee7ad07a9..16a286c1a528 100644
--- a/arch/sparc/lib/NG4copy_from_user.S
+++ b/arch/sparc/lib/NG4copy_from_user.S
@@ -3,19 +3,19 @@
* Copyright (C) 2012 David S. Miller (davem@davemloft.net)
*/
-#define EX_LD(x) \
+#define EX_LD(x, y) \
98: x; \
.section __ex_table,"a";\
.align 4; \
- .word 98b, __retl_one_asi;\
+ .word 98b, y; \
.text; \
.align 4;
-#define EX_LD_FP(x) \
+#define EX_LD_FP(x,y) \
98: x; \
.section __ex_table,"a";\
.align 4; \
- .word 98b, __retl_one_asi_fp;\
+ .word 98b, y##_fp; \
.text; \
.align 4;
diff --git a/arch/sparc/lib/NG4copy_to_user.S b/arch/sparc/lib/NG4copy_to_user.S
index be0bf4590df8..6b0276ffc858 100644
--- a/arch/sparc/lib/NG4copy_to_user.S
+++ b/arch/sparc/lib/NG4copy_to_user.S
@@ -3,19 +3,19 @@
* Copyright (C) 2012 David S. Miller (davem@davemloft.net)
*/
-#define EX_ST(x) \
+#define EX_ST(x,y) \
98: x; \
.section __ex_table,"a";\
.align 4; \
- .word 98b, __retl_one_asi;\
+ .word 98b, y; \
.text; \
.align 4;
-#define EX_ST_FP(x) \
+#define EX_ST_FP(x,y) \
98: x; \
.section __ex_table,"a";\
.align 4; \
- .word 98b, __retl_one_asi_fp;\
+ .word 98b, y##_fp; \
.text; \
.align 4;
diff --git a/arch/sparc/lib/NG4memcpy.S b/arch/sparc/lib/NG4memcpy.S
index 8e13ee1f4454..75bb93b1437f 100644
--- a/arch/sparc/lib/NG4memcpy.S
+++ b/arch/sparc/lib/NG4memcpy.S
@@ -4,6 +4,7 @@
*/
#ifdef __KERNEL__
+#include <linux/linkage.h>
#include <asm/visasm.h>
#include <asm/asi.h>
#define GLOBAL_SPARE %g7
@@ -46,22 +47,19 @@
#endif
#ifndef EX_LD
-#define EX_LD(x) x
+#define EX_LD(x,y) x
#endif
#ifndef EX_LD_FP
-#define EX_LD_FP(x) x
+#define EX_LD_FP(x,y) x
#endif
#ifndef EX_ST
-#define EX_ST(x) x
+#define EX_ST(x,y) x
#endif
#ifndef EX_ST_FP
-#define EX_ST_FP(x) x
+#define EX_ST_FP(x,y) x
#endif
-#ifndef EX_RETVAL
-#define EX_RETVAL(x) x
-#endif
#ifndef LOAD
#define LOAD(type,addr,dest) type [addr], dest
@@ -94,6 +92,158 @@
.register %g3,#scratch
.text
+#ifndef EX_RETVAL
+#define EX_RETVAL(x) x
+__restore_asi_fp:
+ VISExitHalf
+__restore_asi:
+ retl
+ wr %g0, ASI_AIUS, %asi
+
+ENTRY(NG4_retl_o2)
+ ba,pt %xcc, __restore_asi
+ mov %o2, %o0
+ENDPROC(NG4_retl_o2)
+ENTRY(NG4_retl_o2_plus_1)
+ ba,pt %xcc, __restore_asi
+ add %o2, 1, %o0
+ENDPROC(NG4_retl_o2_plus_1)
+ENTRY(NG4_retl_o2_plus_4)
+ ba,pt %xcc, __restore_asi
+ add %o2, 4, %o0
+ENDPROC(NG4_retl_o2_plus_4)
+ENTRY(NG4_retl_o2_plus_o5)
+ ba,pt %xcc, __restore_asi
+ add %o2, %o5, %o0
+ENDPROC(NG4_retl_o2_plus_o5)
+ENTRY(NG4_retl_o2_plus_o5_plus_4)
+ add %o5, 4, %o5
+ ba,pt %xcc, __restore_asi
+ add %o2, %o5, %o0
+ENDPROC(NG4_retl_o2_plus_o5_plus_4)
+ENTRY(NG4_retl_o2_plus_o5_plus_8)
+ add %o5, 8, %o5
+ ba,pt %xcc, __restore_asi
+ add %o2, %o5, %o0
+ENDPROC(NG4_retl_o2_plus_o5_plus_8)
+ENTRY(NG4_retl_o2_plus_o5_plus_16)
+ add %o5, 16, %o5
+ ba,pt %xcc, __restore_asi
+ add %o2, %o5, %o0
+ENDPROC(NG4_retl_o2_plus_o5_plus_16)
+ENTRY(NG4_retl_o2_plus_o5_plus_24)
+ add %o5, 24, %o5
+ ba,pt %xcc, __restore_asi
+ add %o2, %o5, %o0
+ENDPROC(NG4_retl_o2_plus_o5_plus_24)
+ENTRY(NG4_retl_o2_plus_o5_plus_32)
+ add %o5, 32, %o5
+ ba,pt %xcc, __restore_asi
+ add %o2, %o5, %o0
+ENDPROC(NG4_retl_o2_plus_o5_plus_32)
+ENTRY(NG4_retl_o2_plus_g1)
+ ba,pt %xcc, __restore_asi
+ add %o2, %g1, %o0
+ENDPROC(NG4_retl_o2_plus_g1)
+ENTRY(NG4_retl_o2_plus_g1_plus_1)
+ add %g1, 1, %g1
+ ba,pt %xcc, __restore_asi
+ add %o2, %g1, %o0
+ENDPROC(NG4_retl_o2_plus_g1_plus_1)
+ENTRY(NG4_retl_o2_plus_g1_plus_8)
+ add %g1, 8, %g1
+ ba,pt %xcc, __restore_asi
+ add %o2, %g1, %o0
+ENDPROC(NG4_retl_o2_plus_g1_plus_8)
+ENTRY(NG4_retl_o2_plus_o4)
+ ba,pt %xcc, __restore_asi
+ add %o2, %o4, %o0
+ENDPROC(NG4_retl_o2_plus_o4)
+ENTRY(NG4_retl_o2_plus_o4_plus_8)
+ add %o4, 8, %o4
+ ba,pt %xcc, __restore_asi
+ add %o2, %o4, %o0
+ENDPROC(NG4_retl_o2_plus_o4_plus_8)
+ENTRY(NG4_retl_o2_plus_o4_plus_16)
+ add %o4, 16, %o4
+ ba,pt %xcc, __restore_asi
+ add %o2, %o4, %o0
+ENDPROC(NG4_retl_o2_plus_o4_plus_16)
+ENTRY(NG4_retl_o2_plus_o4_plus_24)
+ add %o4, 24, %o4
+ ba,pt %xcc, __restore_asi
+ add %o2, %o4, %o0
+ENDPROC(NG4_retl_o2_plus_o4_plus_24)
+ENTRY(NG4_retl_o2_plus_o4_plus_32)
+ add %o4, 32, %o4
+ ba,pt %xcc, __restore_asi
+ add %o2, %o4, %o0
+ENDPROC(NG4_retl_o2_plus_o4_plus_32)
+ENTRY(NG4_retl_o2_plus_o4_plus_40)
+ add %o4, 40, %o4
+ ba,pt %xcc, __restore_asi
+ add %o2, %o4, %o0
+ENDPROC(NG4_retl_o2_plus_o4_plus_40)
+ENTRY(NG4_retl_o2_plus_o4_plus_48)
+ add %o4, 48, %o4
+ ba,pt %xcc, __restore_asi
+ add %o2, %o4, %o0
+ENDPROC(NG4_retl_o2_plus_o4_plus_48)
+ENTRY(NG4_retl_o2_plus_o4_plus_56)
+ add %o4, 56, %o4
+ ba,pt %xcc, __restore_asi
+ add %o2, %o4, %o0
+ENDPROC(NG4_retl_o2_plus_o4_plus_56)
+ENTRY(NG4_retl_o2_plus_o4_plus_64)
+ add %o4, 64, %o4
+ ba,pt %xcc, __restore_asi
+ add %o2, %o4, %o0
+ENDPROC(NG4_retl_o2_plus_o4_plus_64)
+ENTRY(NG4_retl_o2_plus_o4_fp)
+ ba,pt %xcc, __restore_asi_fp
+ add %o2, %o4, %o0
+ENDPROC(NG4_retl_o2_plus_o4_fp)
+ENTRY(NG4_retl_o2_plus_o4_plus_8_fp)
+ add %o4, 8, %o4
+ ba,pt %xcc, __restore_asi_fp
+ add %o2, %o4, %o0
+ENDPROC(NG4_retl_o2_plus_o4_plus_8_fp)
+ENTRY(NG4_retl_o2_plus_o4_plus_16_fp)
+ add %o4, 16, %o4
+ ba,pt %xcc, __restore_asi_fp
+ add %o2, %o4, %o0
+ENDPROC(NG4_retl_o2_plus_o4_plus_16_fp)
+ENTRY(NG4_retl_o2_plus_o4_plus_24_fp)
+ add %o4, 24, %o4
+ ba,pt %xcc, __restore_asi_fp
+ add %o2, %o4, %o0
+ENDPROC(NG4_retl_o2_plus_o4_plus_24_fp)
+ENTRY(NG4_retl_o2_plus_o4_plus_32_fp)
+ add %o4, 32, %o4
+ ba,pt %xcc, __restore_asi_fp
+ add %o2, %o4, %o0
+ENDPROC(NG4_retl_o2_plus_o4_plus_32_fp)
+ENTRY(NG4_retl_o2_plus_o4_plus_40_fp)
+ add %o4, 40, %o4
+ ba,pt %xcc, __restore_asi_fp
+ add %o2, %o4, %o0
+ENDPROC(NG4_retl_o2_plus_o4_plus_40_fp)
+ENTRY(NG4_retl_o2_plus_o4_plus_48_fp)
+ add %o4, 48, %o4
+ ba,pt %xcc, __restore_asi_fp
+ add %o2, %o4, %o0
+ENDPROC(NG4_retl_o2_plus_o4_plus_48_fp)
+ENTRY(NG4_retl_o2_plus_o4_plus_56_fp)
+ add %o4, 56, %o4
+ ba,pt %xcc, __restore_asi_fp
+ add %o2, %o4, %o0
+ENDPROC(NG4_retl_o2_plus_o4_plus_56_fp)
+ENTRY(NG4_retl_o2_plus_o4_plus_64_fp)
+ add %o4, 64, %o4
+ ba,pt %xcc, __restore_asi_fp
+ add %o2, %o4, %o0
+ENDPROC(NG4_retl_o2_plus_o4_plus_64_fp)
+#endif
.align 64
.globl FUNC_NAME
@@ -124,12 +274,13 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
brz,pt %g1, 51f
sub %o2, %g1, %o2
-1: EX_LD(LOAD(ldub, %o1 + 0x00, %g2))
+
+1: EX_LD(LOAD(ldub, %o1 + 0x00, %g2), NG4_retl_o2_plus_g1)
add %o1, 1, %o1
subcc %g1, 1, %g1
add %o0, 1, %o0
bne,pt %icc, 1b
- EX_ST(STORE(stb, %g2, %o0 - 0x01))
+ EX_ST(STORE(stb, %g2, %o0 - 0x01), NG4_retl_o2_plus_g1_plus_1)
51: LOAD(prefetch, %o1 + 0x040, #n_reads_strong)
LOAD(prefetch, %o1 + 0x080, #n_reads_strong)
@@ -154,43 +305,43 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
brz,pt %g1, .Llarge_aligned
sub %o2, %g1, %o2
-1: EX_LD(LOAD(ldx, %o1 + 0x00, %g2))
+1: EX_LD(LOAD(ldx, %o1 + 0x00, %g2), NG4_retl_o2_plus_g1)
add %o1, 8, %o1
subcc %g1, 8, %g1
add %o0, 8, %o0
bne,pt %icc, 1b
- EX_ST(STORE(stx, %g2, %o0 - 0x08))
+ EX_ST(STORE(stx, %g2, %o0 - 0x08), NG4_retl_o2_plus_g1_plus_8)
.Llarge_aligned:
/* len >= 0x80 && src 8-byte aligned && dest 8-byte aligned */
andn %o2, 0x3f, %o4
sub %o2, %o4, %o2
-1: EX_LD(LOAD(ldx, %o1 + 0x00, %g1))
+1: EX_LD(LOAD(ldx, %o1 + 0x00, %g1), NG4_retl_o2_plus_o4)
add %o1, 0x40, %o1
- EX_LD(LOAD(ldx, %o1 - 0x38, %g2))
+ EX_LD(LOAD(ldx, %o1 - 0x38, %g2), NG4_retl_o2_plus_o4)
subcc %o4, 0x40, %o4
- EX_LD(LOAD(ldx, %o1 - 0x30, %g3))
- EX_LD(LOAD(ldx, %o1 - 0x28, GLOBAL_SPARE))
- EX_LD(LOAD(ldx, %o1 - 0x20, %o5))
- EX_ST(STORE_INIT(%g1, %o0))
+ EX_LD(LOAD(ldx, %o1 - 0x30, %g3), NG4_retl_o2_plus_o4_plus_64)
+ EX_LD(LOAD(ldx, %o1 - 0x28, GLOBAL_SPARE), NG4_retl_o2_plus_o4_plus_64)
+ EX_LD(LOAD(ldx, %o1 - 0x20, %o5), NG4_retl_o2_plus_o4_plus_64)
+ EX_ST(STORE_INIT(%g1, %o0), NG4_retl_o2_plus_o4_plus_64)
add %o0, 0x08, %o0
- EX_ST(STORE_INIT(%g2, %o0))
+ EX_ST(STORE_INIT(%g2, %o0), NG4_retl_o2_plus_o4_plus_56)
add %o0, 0x08, %o0
- EX_LD(LOAD(ldx, %o1 - 0x18, %g2))
- EX_ST(STORE_INIT(%g3, %o0))
+ EX_LD(LOAD(ldx, %o1 - 0x18, %g2), NG4_retl_o2_plus_o4_plus_48)
+ EX_ST(STORE_INIT(%g3, %o0), NG4_retl_o2_plus_o4_plus_48)
add %o0, 0x08, %o0
- EX_LD(LOAD(ldx, %o1 - 0x10, %g3))
- EX_ST(STORE_INIT(GLOBAL_SPARE, %o0))
+ EX_LD(LOAD(ldx, %o1 - 0x10, %g3), NG4_retl_o2_plus_o4_plus_40)
+ EX_ST(STORE_INIT(GLOBAL_SPARE, %o0), NG4_retl_o2_plus_o4_plus_40)
add %o0, 0x08, %o0
- EX_LD(LOAD(ldx, %o1 - 0x08, GLOBAL_SPARE))
- EX_ST(STORE_INIT(%o5, %o0))
+ EX_LD(LOAD(ldx, %o1 - 0x08, GLOBAL_SPARE), NG4_retl_o2_plus_o4_plus_32)
+ EX_ST(STORE_INIT(%o5, %o0), NG4_retl_o2_plus_o4_plus_32)
add %o0, 0x08, %o0
- EX_ST(STORE_INIT(%g2, %o0))
+ EX_ST(STORE_INIT(%g2, %o0), NG4_retl_o2_plus_o4_plus_24)
add %o0, 0x08, %o0
- EX_ST(STORE_INIT(%g3, %o0))
+ EX_ST(STORE_INIT(%g3, %o0), NG4_retl_o2_plus_o4_plus_16)
add %o0, 0x08, %o0
- EX_ST(STORE_INIT(GLOBAL_SPARE, %o0))
+ EX_ST(STORE_INIT(GLOBAL_SPARE, %o0), NG4_retl_o2_plus_o4_plus_8)
add %o0, 0x08, %o0
bne,pt %icc, 1b
LOAD(prefetch, %o1 + 0x200, #n_reads_strong)
@@ -216,17 +367,17 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
sub %o2, %o4, %o2
alignaddr %o1, %g0, %g1
add %o1, %o4, %o1
- EX_LD_FP(LOAD(ldd, %g1 + 0x00, %f0))
-1: EX_LD_FP(LOAD(ldd, %g1 + 0x08, %f2))
+ EX_LD_FP(LOAD(ldd, %g1 + 0x00, %f0), NG4_retl_o2_plus_o4)
+1: EX_LD_FP(LOAD(ldd, %g1 + 0x08, %f2), NG4_retl_o2_plus_o4)
subcc %o4, 0x40, %o4
- EX_LD_FP(LOAD(ldd, %g1 + 0x10, %f4))
- EX_LD_FP(LOAD(ldd, %g1 + 0x18, %f6))
- EX_LD_FP(LOAD(ldd, %g1 + 0x20, %f8))
- EX_LD_FP(LOAD(ldd, %g1 + 0x28, %f10))
- EX_LD_FP(LOAD(ldd, %g1 + 0x30, %f12))
- EX_LD_FP(LOAD(ldd, %g1 + 0x38, %f14))
+ EX_LD_FP(LOAD(ldd, %g1 + 0x10, %f4), NG4_retl_o2_plus_o4_plus_64)
+ EX_LD_FP(LOAD(ldd, %g1 + 0x18, %f6), NG4_retl_o2_plus_o4_plus_64)
+ EX_LD_FP(LOAD(ldd, %g1 + 0x20, %f8), NG4_retl_o2_plus_o4_plus_64)
+ EX_LD_FP(LOAD(ldd, %g1 + 0x28, %f10), NG4_retl_o2_plus_o4_plus_64)
+ EX_LD_FP(LOAD(ldd, %g1 + 0x30, %f12), NG4_retl_o2_plus_o4_plus_64)
+ EX_LD_FP(LOAD(ldd, %g1 + 0x38, %f14), NG4_retl_o2_plus_o4_plus_64)
faligndata %f0, %f2, %f16
- EX_LD_FP(LOAD(ldd, %g1 + 0x40, %f0))
+ EX_LD_FP(LOAD(ldd, %g1 + 0x40, %f0), NG4_retl_o2_plus_o4_plus_64)
faligndata %f2, %f4, %f18
add %g1, 0x40, %g1
faligndata %f4, %f6, %f20
@@ -235,14 +386,14 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
faligndata %f10, %f12, %f26
faligndata %f12, %f14, %f28
faligndata %f14, %f0, %f30
- EX_ST_FP(STORE(std, %f16, %o0 + 0x00))
- EX_ST_FP(STORE(std, %f18, %o0 + 0x08))
- EX_ST_FP(STORE(std, %f20, %o0 + 0x10))
- EX_ST_FP(STORE(std, %f22, %o0 + 0x18))
- EX_ST_FP(STORE(std, %f24, %o0 + 0x20))
- EX_ST_FP(STORE(std, %f26, %o0 + 0x28))
- EX_ST_FP(STORE(std, %f28, %o0 + 0x30))
- EX_ST_FP(STORE(std, %f30, %o0 + 0x38))
+ EX_ST_FP(STORE(std, %f16, %o0 + 0x00), NG4_retl_o2_plus_o4_plus_64)
+ EX_ST_FP(STORE(std, %f18, %o0 + 0x08), NG4_retl_o2_plus_o4_plus_56)
+ EX_ST_FP(STORE(std, %f20, %o0 + 0x10), NG4_retl_o2_plus_o4_plus_48)
+ EX_ST_FP(STORE(std, %f22, %o0 + 0x18), NG4_retl_o2_plus_o4_plus_40)
+ EX_ST_FP(STORE(std, %f24, %o0 + 0x20), NG4_retl_o2_plus_o4_plus_32)
+ EX_ST_FP(STORE(std, %f26, %o0 + 0x28), NG4_retl_o2_plus_o4_plus_24)
+ EX_ST_FP(STORE(std, %f28, %o0 + 0x30), NG4_retl_o2_plus_o4_plus_16)
+ EX_ST_FP(STORE(std, %f30, %o0 + 0x38), NG4_retl_o2_plus_o4_plus_8)
add %o0, 0x40, %o0
bne,pt %icc, 1b
LOAD(prefetch, %g1 + 0x200, #n_reads_strong)
@@ -270,37 +421,38 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
andncc %o2, 0x20 - 1, %o5
be,pn %icc, 2f
sub %o2, %o5, %o2
-1: EX_LD(LOAD(ldx, %o1 + 0x00, %g1))
- EX_LD(LOAD(ldx, %o1 + 0x08, %g2))
- EX_LD(LOAD(ldx, %o1 + 0x10, GLOBAL_SPARE))
- EX_LD(LOAD(ldx, %o1 + 0x18, %o4))
+1: EX_LD(LOAD(ldx, %o1 + 0x00, %g1), NG4_retl_o2_plus_o5)
+ EX_LD(LOAD(ldx, %o1 + 0x08, %g2), NG4_retl_o2_plus_o5)
+ EX_LD(LOAD(ldx, %o1 + 0x10, GLOBAL_SPARE), NG4_retl_o2_plus_o5)
+ EX_LD(LOAD(ldx, %o1 + 0x18, %o4), NG4_retl_o2_plus_o5)
add %o1, 0x20, %o1
subcc %o5, 0x20, %o5
- EX_ST(STORE(stx, %g1, %o0 + 0x00))
- EX_ST(STORE(stx, %g2, %o0 + 0x08))
- EX_ST(STORE(stx, GLOBAL_SPARE, %o0 + 0x10))
- EX_ST(STORE(stx, %o4, %o0 + 0x18))
+ EX_ST(STORE(stx, %g1, %o0 + 0x00), NG4_retl_o2_plus_o5_plus_32)
+ EX_ST(STORE(stx, %g2, %o0 + 0x08), NG4_retl_o2_plus_o5_plus_24)
+ EX_ST(STORE(stx, GLOBAL_SPARE, %o0 + 0x10), NG4_retl_o2_plus_o5_plus_24)
+ EX_ST(STORE(stx, %o4, %o0 + 0x18), NG4_retl_o2_plus_o5_plus_8)
bne,pt %icc, 1b
add %o0, 0x20, %o0
2: andcc %o2, 0x18, %o5
be,pt %icc, 3f
sub %o2, %o5, %o2
-1: EX_LD(LOAD(ldx, %o1 + 0x00, %g1))
+
+1: EX_LD(LOAD(ldx, %o1 + 0x00, %g1), NG4_retl_o2_plus_o5)
add %o1, 0x08, %o1
add %o0, 0x08, %o0
subcc %o5, 0x08, %o5
bne,pt %icc, 1b
- EX_ST(STORE(stx, %g1, %o0 - 0x08))
+ EX_ST(STORE(stx, %g1, %o0 - 0x08), NG4_retl_o2_plus_o5_plus_8)
3: brz,pt %o2, .Lexit
cmp %o2, 0x04
bl,pn %icc, .Ltiny
nop
- EX_LD(LOAD(lduw, %o1 + 0x00, %g1))
+ EX_LD(LOAD(lduw, %o1 + 0x00, %g1), NG4_retl_o2)
add %o1, 0x04, %o1
add %o0, 0x04, %o0
subcc %o2, 0x04, %o2
bne,pn %icc, .Ltiny
- EX_ST(STORE(stw, %g1, %o0 - 0x04))
+ EX_ST(STORE(stw, %g1, %o0 - 0x04), NG4_retl_o2_plus_4)
ba,a,pt %icc, .Lexit
.Lmedium_unaligned:
/* First get dest 8 byte aligned. */
@@ -309,12 +461,12 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
brz,pt %g1, 2f
sub %o2, %g1, %o2
-1: EX_LD(LOAD(ldub, %o1 + 0x00, %g2))
+1: EX_LD(LOAD(ldub, %o1 + 0x00, %g2), NG4_retl_o2_plus_g1)
add %o1, 1, %o1
subcc %g1, 1, %g1
add %o0, 1, %o0
bne,pt %icc, 1b
- EX_ST(STORE(stb, %g2, %o0 - 0x01))
+ EX_ST(STORE(stb, %g2, %o0 - 0x01), NG4_retl_o2_plus_g1_plus_1)
2:
and %o1, 0x7, %g1
brz,pn %g1, .Lmedium_noprefetch
@@ -322,16 +474,16 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
mov 64, %g2
sub %g2, %g1, %g2
andn %o1, 0x7, %o1
- EX_LD(LOAD(ldx, %o1 + 0x00, %o4))
+ EX_LD(LOAD(ldx, %o1 + 0x00, %o4), NG4_retl_o2)
sllx %o4, %g1, %o4
andn %o2, 0x08 - 1, %o5
sub %o2, %o5, %o2
-1: EX_LD(LOAD(ldx, %o1 + 0x08, %g3))
+1: EX_LD(LOAD(ldx, %o1 + 0x08, %g3), NG4_retl_o2_plus_o5)
add %o1, 0x08, %o1
subcc %o5, 0x08, %o5
srlx %g3, %g2, GLOBAL_SPARE
or GLOBAL_SPARE, %o4, GLOBAL_SPARE
- EX_ST(STORE(stx, GLOBAL_SPARE, %o0 + 0x00))
+ EX_ST(STORE(stx, GLOBAL_SPARE, %o0 + 0x00), NG4_retl_o2_plus_o5_plus_8)
add %o0, 0x08, %o0
bne,pt %icc, 1b
sllx %g3, %g1, %o4
@@ -342,17 +494,17 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
ba,pt %icc, .Lsmall_unaligned
.Ltiny:
- EX_LD(LOAD(ldub, %o1 + 0x00, %g1))
+ EX_LD(LOAD(ldub, %o1 + 0x00, %g1), NG4_retl_o2)
subcc %o2, 1, %o2
be,pn %icc, .Lexit
- EX_ST(STORE(stb, %g1, %o0 + 0x00))
- EX_LD(LOAD(ldub, %o1 + 0x01, %g1))
+ EX_ST(STORE(stb, %g1, %o0 + 0x00), NG4_retl_o2_plus_1)
+ EX_LD(LOAD(ldub, %o1 + 0x01, %g1), NG4_retl_o2)
subcc %o2, 1, %o2
be,pn %icc, .Lexit
- EX_ST(STORE(stb, %g1, %o0 + 0x01))
- EX_LD(LOAD(ldub, %o1 + 0x02, %g1))
+ EX_ST(STORE(stb, %g1, %o0 + 0x01), NG4_retl_o2_plus_1)
+ EX_LD(LOAD(ldub, %o1 + 0x02, %g1), NG4_retl_o2)
ba,pt %icc, .Lexit
- EX_ST(STORE(stb, %g1, %o0 + 0x02))
+ EX_ST(STORE(stb, %g1, %o0 + 0x02), NG4_retl_o2)
.Lsmall:
andcc %g2, 0x3, %g0
@@ -360,22 +512,22 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
andn %o2, 0x4 - 1, %o5
sub %o2, %o5, %o2
1:
- EX_LD(LOAD(lduw, %o1 + 0x00, %g1))
+ EX_LD(LOAD(lduw, %o1 + 0x00, %g1), NG4_retl_o2_plus_o5)
add %o1, 0x04, %o1
subcc %o5, 0x04, %o5
add %o0, 0x04, %o0
bne,pt %icc, 1b
- EX_ST(STORE(stw, %g1, %o0 - 0x04))
+ EX_ST(STORE(stw, %g1, %o0 - 0x04), NG4_retl_o2_plus_o5_plus_4)
brz,pt %o2, .Lexit
nop
ba,a,pt %icc, .Ltiny
.Lsmall_unaligned:
-1: EX_LD(LOAD(ldub, %o1 + 0x00, %g1))
+1: EX_LD(LOAD(ldub, %o1 + 0x00, %g1), NG4_retl_o2)
add %o1, 1, %o1
add %o0, 1, %o0
subcc %o2, 1, %o2
bne,pt %icc, 1b
- EX_ST(STORE(stb, %g1, %o0 - 0x01))
+ EX_ST(STORE(stb, %g1, %o0 - 0x01), NG4_retl_o2_plus_1)
ba,a,pt %icc, .Lexit
.size FUNC_NAME, .-FUNC_NAME
diff --git a/arch/sparc/lib/NGcopy_from_user.S b/arch/sparc/lib/NGcopy_from_user.S
index 5d1e4d1ac21e..9cd42fcbc781 100644
--- a/arch/sparc/lib/NGcopy_from_user.S
+++ b/arch/sparc/lib/NGcopy_from_user.S
@@ -3,11 +3,11 @@
* Copyright (C) 2006, 2007 David S. Miller (davem@davemloft.net)
*/
-#define EX_LD(x) \
+#define EX_LD(x,y) \
98: x; \
.section __ex_table,"a";\
.align 4; \
- .word 98b, __ret_one_asi;\
+ .word 98b, y; \
.text; \
.align 4;
diff --git a/arch/sparc/lib/NGcopy_to_user.S b/arch/sparc/lib/NGcopy_to_user.S
index ff630dcb273c..5c358afd464e 100644
--- a/arch/sparc/lib/NGcopy_to_user.S
+++ b/arch/sparc/lib/NGcopy_to_user.S
@@ -3,11 +3,11 @@
* Copyright (C) 2006, 2007 David S. Miller (davem@davemloft.net)
*/
-#define EX_ST(x) \
+#define EX_ST(x,y) \
98: x; \
.section __ex_table,"a";\
.align 4; \
- .word 98b, __ret_one_asi;\
+ .word 98b, y; \
.text; \
.align 4;
diff --git a/arch/sparc/lib/NGmemcpy.S b/arch/sparc/lib/NGmemcpy.S
index 96a14caf6966..d88c4ed50a00 100644
--- a/arch/sparc/lib/NGmemcpy.S
+++ b/arch/sparc/lib/NGmemcpy.S
@@ -4,6 +4,7 @@
*/
#ifdef __KERNEL__
+#include <linux/linkage.h>
#include <asm/asi.h>
#include <asm/thread_info.h>
#define GLOBAL_SPARE %g7
@@ -27,15 +28,11 @@
#endif
#ifndef EX_LD
-#define EX_LD(x) x
+#define EX_LD(x,y) x
#endif
#ifndef EX_ST
-#define EX_ST(x) x
-#endif
-
-#ifndef EX_RETVAL
-#define EX_RETVAL(x) x
+#define EX_ST(x,y) x
#endif
#ifndef LOAD
@@ -79,6 +76,92 @@
.register %g3,#scratch
.text
+#ifndef EX_RETVAL
+#define EX_RETVAL(x) x
+__restore_asi:
+ ret
+ wr %g0, ASI_AIUS, %asi
+ restore
+ENTRY(NG_ret_i2_plus_i4_plus_1)
+ ba,pt %xcc, __restore_asi
+ add %i2, %i5, %i0
+ENDPROC(NG_ret_i2_plus_i4_plus_1)
+ENTRY(NG_ret_i2_plus_g1)
+ ba,pt %xcc, __restore_asi
+ add %i2, %g1, %i0
+ENDPROC(NG_ret_i2_plus_g1)
+ENTRY(NG_ret_i2_plus_g1_minus_8)
+ sub %g1, 8, %g1
+ ba,pt %xcc, __restore_asi
+ add %i2, %g1, %i0
+ENDPROC(NG_ret_i2_plus_g1_minus_8)
+ENTRY(NG_ret_i2_plus_g1_minus_16)
+ sub %g1, 16, %g1
+ ba,pt %xcc, __restore_asi
+ add %i2, %g1, %i0
+ENDPROC(NG_ret_i2_plus_g1_minus_16)
+ENTRY(NG_ret_i2_plus_g1_minus_24)
+ sub %g1, 24, %g1
+ ba,pt %xcc, __restore_asi
+ add %i2, %g1, %i0
+ENDPROC(NG_ret_i2_plus_g1_minus_24)
+ENTRY(NG_ret_i2_plus_g1_minus_32)
+ sub %g1, 32, %g1
+ ba,pt %xcc, __restore_asi
+ add %i2, %g1, %i0
+ENDPROC(NG_ret_i2_plus_g1_minus_32)
+ENTRY(NG_ret_i2_plus_g1_minus_40)
+ sub %g1, 40, %g1
+ ba,pt %xcc, __restore_asi
+ add %i2, %g1, %i0
+ENDPROC(NG_ret_i2_plus_g1_minus_40)
+ENTRY(NG_ret_i2_plus_g1_minus_48)
+ sub %g1, 48, %g1
+ ba,pt %xcc, __restore_asi
+ add %i2, %g1, %i0
+ENDPROC(NG_ret_i2_plus_g1_minus_48)
+ENTRY(NG_ret_i2_plus_g1_minus_56)
+ sub %g1, 56, %g1
+ ba,pt %xcc, __restore_asi
+ add %i2, %g1, %i0
+ENDPROC(NG_ret_i2_plus_g1_minus_56)
+ENTRY(NG_ret_i2_plus_i4)
+ ba,pt %xcc, __restore_asi
+ add %i2, %i4, %i0
+ENDPROC(NG_ret_i2_plus_i4)
+ENTRY(NG_ret_i2_plus_i4_minus_8)
+ sub %i4, 8, %i4
+ ba,pt %xcc, __restore_asi
+ add %i2, %i4, %i0
+ENDPROC(NG_ret_i2_plus_i4_minus_8)
+ENTRY(NG_ret_i2_plus_8)
+ ba,pt %xcc, __restore_asi
+ add %i2, 8, %i0
+ENDPROC(NG_ret_i2_plus_8)
+ENTRY(NG_ret_i2_plus_4)
+ ba,pt %xcc, __restore_asi
+ add %i2, 4, %i0
+ENDPROC(NG_ret_i2_plus_4)
+ENTRY(NG_ret_i2_plus_1)
+ ba,pt %xcc, __restore_asi
+ add %i2, 1, %i0
+ENDPROC(NG_ret_i2_plus_1)
+ENTRY(NG_ret_i2_plus_g1_plus_1)
+ add %g1, 1, %g1
+ ba,pt %xcc, __restore_asi
+ add %i2, %g1, %i0
+ENDPROC(NG_ret_i2_plus_g1_plus_1)
+ENTRY(NG_ret_i2)
+ ba,pt %xcc, __restore_asi
+ mov %i2, %i0
+ENDPROC(NG_ret_i2)
+ENTRY(NG_ret_i2_and_7_plus_i4)
+ and %i2, 7, %i2
+ ba,pt %xcc, __restore_asi
+ add %i2, %i4, %i0
+ENDPROC(NG_ret_i2_and_7_plus_i4)
+#endif
+
.align 64
.globl FUNC_NAME
@@ -126,8 +209,8 @@ FUNC_NAME: /* %i0=dst, %i1=src, %i2=len */
sub %g0, %i4, %i4 ! bytes to align dst
sub %i2, %i4, %i2
1: subcc %i4, 1, %i4
- EX_LD(LOAD(ldub, %i1, %g1))
- EX_ST(STORE(stb, %g1, %o0))
+ EX_LD(LOAD(ldub, %i1, %g1), NG_ret_i2_plus_i4_plus_1)
+ EX_ST(STORE(stb, %g1, %o0), NG_ret_i2_plus_i4_plus_1)
add %i1, 1, %i1
bne,pt %XCC, 1b
add %o0, 1, %o0
@@ -160,7 +243,7 @@ FUNC_NAME: /* %i0=dst, %i1=src, %i2=len */
and %i4, 0x7, GLOBAL_SPARE
sll GLOBAL_SPARE, 3, GLOBAL_SPARE
mov 64, %i5
- EX_LD(LOAD_TWIN(%i1, %g2, %g3))
+ EX_LD(LOAD_TWIN(%i1, %g2, %g3), NG_ret_i2_plus_g1)
sub %i5, GLOBAL_SPARE, %i5
mov 16, %o4
mov 32, %o5
@@ -178,31 +261,31 @@ FUNC_NAME: /* %i0=dst, %i1=src, %i2=len */
srlx WORD3, PRE_SHIFT, TMP; \
or WORD2, TMP, WORD2;
-8: EX_LD(LOAD_TWIN(%i1 + %o4, %o2, %o3))
+8: EX_LD(LOAD_TWIN(%i1 + %o4, %o2, %o3), NG_ret_i2_plus_g1)
MIX_THREE_WORDS(%g2, %g3, %o2, %i5, GLOBAL_SPARE, %o1)
LOAD(prefetch, %i1 + %i3, #one_read)
- EX_ST(STORE_INIT(%g2, %o0 + 0x00))
- EX_ST(STORE_INIT(%g3, %o0 + 0x08))
+ EX_ST(STORE_INIT(%g2, %o0 + 0x00), NG_ret_i2_plus_g1)
+ EX_ST(STORE_INIT(%g3, %o0 + 0x08), NG_ret_i2_plus_g1_minus_8)
- EX_LD(LOAD_TWIN(%i1 + %o5, %g2, %g3))
+ EX_LD(LOAD_TWIN(%i1 + %o5, %g2, %g3), NG_ret_i2_plus_g1_minus_16)
MIX_THREE_WORDS(%o2, %o3, %g2, %i5, GLOBAL_SPARE, %o1)
- EX_ST(STORE_INIT(%o2, %o0 + 0x10))
- EX_ST(STORE_INIT(%o3, %o0 + 0x18))
+ EX_ST(STORE_INIT(%o2, %o0 + 0x10), NG_ret_i2_plus_g1_minus_16)
+ EX_ST(STORE_INIT(%o3, %o0 + 0x18), NG_ret_i2_plus_g1_minus_24)
- EX_LD(LOAD_TWIN(%i1 + %o7, %o2, %o3))
+ EX_LD(LOAD_TWIN(%i1 + %o7, %o2, %o3), NG_ret_i2_plus_g1_minus_32)
MIX_THREE_WORDS(%g2, %g3, %o2, %i5, GLOBAL_SPARE, %o1)
- EX_ST(STORE_INIT(%g2, %o0 + 0x20))
- EX_ST(STORE_INIT(%g3, %o0 + 0x28))
+ EX_ST(STORE_INIT(%g2, %o0 + 0x20), NG_ret_i2_plus_g1_minus_32)
+ EX_ST(STORE_INIT(%g3, %o0 + 0x28), NG_ret_i2_plus_g1_minus_40)
- EX_LD(LOAD_TWIN(%i1 + %i3, %g2, %g3))
+ EX_LD(LOAD_TWIN(%i1 + %i3, %g2, %g3), NG_ret_i2_plus_g1_minus_48)
add %i1, 64, %i1
MIX_THREE_WORDS(%o2, %o3, %g2, %i5, GLOBAL_SPARE, %o1)
- EX_ST(STORE_INIT(%o2, %o0 + 0x30))
- EX_ST(STORE_INIT(%o3, %o0 + 0x38))
+ EX_ST(STORE_INIT(%o2, %o0 + 0x30), NG_ret_i2_plus_g1_minus_48)
+ EX_ST(STORE_INIT(%o3, %o0 + 0x38), NG_ret_i2_plus_g1_minus_56)
subcc %g1, 64, %g1
bne,pt %XCC, 8b
@@ -211,31 +294,31 @@ FUNC_NAME: /* %i0=dst, %i1=src, %i2=len */
ba,pt %XCC, 60f
add %i1, %i4, %i1
-9: EX_LD(LOAD_TWIN(%i1 + %o4, %o2, %o3))
+9: EX_LD(LOAD_TWIN(%i1 + %o4, %o2, %o3), NG_ret_i2_plus_g1)
MIX_THREE_WORDS(%g3, %o2, %o3, %i5, GLOBAL_SPARE, %o1)
LOAD(prefetch, %i1 + %i3, #one_read)
- EX_ST(STORE_INIT(%g3, %o0 + 0x00))
- EX_ST(STORE_INIT(%o2, %o0 + 0x08))
+ EX_ST(STORE_INIT(%g3, %o0 + 0x00), NG_ret_i2_plus_g1)
+ EX_ST(STORE_INIT(%o2, %o0 + 0x08), NG_ret_i2_plus_g1_minus_8)
- EX_LD(LOAD_TWIN(%i1 + %o5, %g2, %g3))
+ EX_LD(LOAD_TWIN(%i1 + %o5, %g2, %g3), NG_ret_i2_plus_g1_minus_16)
MIX_THREE_WORDS(%o3, %g2, %g3, %i5, GLOBAL_SPARE, %o1)
- EX_ST(STORE_INIT(%o3, %o0 + 0x10))
- EX_ST(STORE_INIT(%g2, %o0 + 0x18))
+ EX_ST(STORE_INIT(%o3, %o0 + 0x10), NG_ret_i2_plus_g1_minus_16)
+ EX_ST(STORE_INIT(%g2, %o0 + 0x18), NG_ret_i2_plus_g1_minus_24)
- EX_LD(LOAD_TWIN(%i1 + %o7, %o2, %o3))
+ EX_LD(LOAD_TWIN(%i1 + %o7, %o2, %o3), NG_ret_i2_plus_g1_minus_32)
MIX_THREE_WORDS(%g3, %o2, %o3, %i5, GLOBAL_SPARE, %o1)
- EX_ST(STORE_INIT(%g3, %o0 + 0x20))
- EX_ST(STORE_INIT(%o2, %o0 + 0x28))
+ EX_ST(STORE_INIT(%g3, %o0 + 0x20), NG_ret_i2_plus_g1_minus_32)
+ EX_ST(STORE_INIT(%o2, %o0 + 0x28), NG_ret_i2_plus_g1_minus_40)
- EX_LD(LOAD_TWIN(%i1 + %i3, %g2, %g3))
+ EX_LD(LOAD_TWIN(%i1 + %i3, %g2, %g3), NG_ret_i2_plus_g1_minus_48)
add %i1, 64, %i1
MIX_THREE_WORDS(%o3, %g2, %g3, %i5, GLOBAL_SPARE, %o1)
- EX_ST(STORE_INIT(%o3, %o0 + 0x30))
- EX_ST(STORE_INIT(%g2, %o0 + 0x38))
+ EX_ST(STORE_INIT(%o3, %o0 + 0x30), NG_ret_i2_plus_g1_minus_48)
+ EX_ST(STORE_INIT(%g2, %o0 + 0x38), NG_ret_i2_plus_g1_minus_56)
subcc %g1, 64, %g1
bne,pt %XCC, 9b
@@ -249,25 +332,25 @@ FUNC_NAME: /* %i0=dst, %i1=src, %i2=len */
* one twin load ahead, then add 8 back into source when
* we finish the loop.
*/
- EX_LD(LOAD_TWIN(%i1, %o4, %o5))
+ EX_LD(LOAD_TWIN(%i1, %o4, %o5), NG_ret_i2_plus_g1)
mov 16, %o7
mov 32, %g2
mov 48, %g3
mov 64, %o1
-1: EX_LD(LOAD_TWIN(%i1 + %o7, %o2, %o3))
+1: EX_LD(LOAD_TWIN(%i1 + %o7, %o2, %o3), NG_ret_i2_plus_g1)
LOAD(prefetch, %i1 + %o1, #one_read)
- EX_ST(STORE_INIT(%o5, %o0 + 0x00)) ! initializes cache line
- EX_ST(STORE_INIT(%o2, %o0 + 0x08))
- EX_LD(LOAD_TWIN(%i1 + %g2, %o4, %o5))
- EX_ST(STORE_INIT(%o3, %o0 + 0x10))
- EX_ST(STORE_INIT(%o4, %o0 + 0x18))
- EX_LD(LOAD_TWIN(%i1 + %g3, %o2, %o3))
- EX_ST(STORE_INIT(%o5, %o0 + 0x20))
- EX_ST(STORE_INIT(%o2, %o0 + 0x28))
- EX_LD(LOAD_TWIN(%i1 + %o1, %o4, %o5))
+ EX_ST(STORE_INIT(%o5, %o0 + 0x00), NG_ret_i2_plus_g1) ! initializes cache line
+ EX_ST(STORE_INIT(%o2, %o0 + 0x08), NG_ret_i2_plus_g1_minus_8)
+ EX_LD(LOAD_TWIN(%i1 + %g2, %o4, %o5), NG_ret_i2_plus_g1_minus_16)
+ EX_ST(STORE_INIT(%o3, %o0 + 0x10), NG_ret_i2_plus_g1_minus_16)
+ EX_ST(STORE_INIT(%o4, %o0 + 0x18), NG_ret_i2_plus_g1_minus_24)
+ EX_LD(LOAD_TWIN(%i1 + %g3, %o2, %o3), NG_ret_i2_plus_g1_minus_32)
+ EX_ST(STORE_INIT(%o5, %o0 + 0x20), NG_ret_i2_plus_g1_minus_32)
+ EX_ST(STORE_INIT(%o2, %o0 + 0x28), NG_ret_i2_plus_g1_minus_40)
+ EX_LD(LOAD_TWIN(%i1 + %o1, %o4, %o5), NG_ret_i2_plus_g1_minus_48)
add %i1, 64, %i1
- EX_ST(STORE_INIT(%o3, %o0 + 0x30))
- EX_ST(STORE_INIT(%o4, %o0 + 0x38))
+ EX_ST(STORE_INIT(%o3, %o0 + 0x30), NG_ret_i2_plus_g1_minus_48)
+ EX_ST(STORE_INIT(%o4, %o0 + 0x38), NG_ret_i2_plus_g1_minus_56)
subcc %g1, 64, %g1
bne,pt %XCC, 1b
add %o0, 64, %o0
@@ -282,20 +365,20 @@ FUNC_NAME: /* %i0=dst, %i1=src, %i2=len */
mov 32, %g2
mov 48, %g3
mov 64, %o1
-1: EX_LD(LOAD_TWIN(%i1 + %g0, %o4, %o5))
- EX_LD(LOAD_TWIN(%i1 + %o7, %o2, %o3))
+1: EX_LD(LOAD_TWIN(%i1 + %g0, %o4, %o5), NG_ret_i2_plus_g1)
+ EX_LD(LOAD_TWIN(%i1 + %o7, %o2, %o3), NG_ret_i2_plus_g1)
LOAD(prefetch, %i1 + %o1, #one_read)
- EX_ST(STORE_INIT(%o4, %o0 + 0x00)) ! initializes cache line
- EX_ST(STORE_INIT(%o5, %o0 + 0x08))
- EX_LD(LOAD_TWIN(%i1 + %g2, %o4, %o5))
- EX_ST(STORE_INIT(%o2, %o0 + 0x10))
- EX_ST(STORE_INIT(%o3, %o0 + 0x18))
- EX_LD(LOAD_TWIN(%i1 + %g3, %o2, %o3))
+ EX_ST(STORE_INIT(%o4, %o0 + 0x00), NG_ret_i2_plus_g1) ! initializes cache line
+ EX_ST(STORE_INIT(%o5, %o0 + 0x08), NG_ret_i2_plus_g1_minus_8)
+ EX_LD(LOAD_TWIN(%i1 + %g2, %o4, %o5), NG_ret_i2_plus_g1_minus_16)
+ EX_ST(STORE_INIT(%o2, %o0 + 0x10), NG_ret_i2_plus_g1_minus_16)
+ EX_ST(STORE_INIT(%o3, %o0 + 0x18), NG_ret_i2_plus_g1_minus_24)
+ EX_LD(LOAD_TWIN(%i1 + %g3, %o2, %o3), NG_ret_i2_plus_g1_minus_32)
add %i1, 64, %i1
- EX_ST(STORE_INIT(%o4, %o0 + 0x20))
- EX_ST(STORE_INIT(%o5, %o0 + 0x28))
- EX_ST(STORE_INIT(%o2, %o0 + 0x30))
- EX_ST(STORE_INIT(%o3, %o0 + 0x38))
+ EX_ST(STORE_INIT(%o4, %o0 + 0x20), NG_ret_i2_plus_g1_minus_32)
+ EX_ST(STORE_INIT(%o5, %o0 + 0x28), NG_ret_i2_plus_g1_minus_40)
+ EX_ST(STORE_INIT(%o2, %o0 + 0x30), NG_ret_i2_plus_g1_minus_48)
+ EX_ST(STORE_INIT(%o3, %o0 + 0x38), NG_ret_i2_plus_g1_minus_56)
subcc %g1, 64, %g1
bne,pt %XCC, 1b
add %o0, 64, %o0
@@ -321,28 +404,28 @@ FUNC_NAME: /* %i0=dst, %i1=src, %i2=len */
andn %i2, 0xf, %i4
and %i2, 0xf, %i2
1: subcc %i4, 0x10, %i4
- EX_LD(LOAD(ldx, %i1, %o4))
+ EX_LD(LOAD(ldx, %i1, %o4), NG_ret_i2_plus_i4)
add %i1, 0x08, %i1
- EX_LD(LOAD(ldx, %i1, %g1))
+ EX_LD(LOAD(ldx, %i1, %g1), NG_ret_i2_plus_i4)
sub %i1, 0x08, %i1
- EX_ST(STORE(stx, %o4, %i1 + %i3))
+ EX_ST(STORE(stx, %o4, %i1 + %i3), NG_ret_i2_plus_i4)
add %i1, 0x8, %i1
- EX_ST(STORE(stx, %g1, %i1 + %i3))
+ EX_ST(STORE(stx, %g1, %i1 + %i3), NG_ret_i2_plus_i4_minus_8)
bgu,pt %XCC, 1b
add %i1, 0x8, %i1
73: andcc %i2, 0x8, %g0
be,pt %XCC, 1f
nop
sub %i2, 0x8, %i2
- EX_LD(LOAD(ldx, %i1, %o4))
- EX_ST(STORE(stx, %o4, %i1 + %i3))
+ EX_LD(LOAD(ldx, %i1, %o4), NG_ret_i2_plus_8)
+ EX_ST(STORE(stx, %o4, %i1 + %i3), NG_ret_i2_plus_8)
add %i1, 0x8, %i1
1: andcc %i2, 0x4, %g0
be,pt %XCC, 1f
nop
sub %i2, 0x4, %i2
- EX_LD(LOAD(lduw, %i1, %i5))
- EX_ST(STORE(stw, %i5, %i1 + %i3))
+ EX_LD(LOAD(lduw, %i1, %i5), NG_ret_i2_plus_4)
+ EX_ST(STORE(stw, %i5, %i1 + %i3), NG_ret_i2_plus_4)
add %i1, 0x4, %i1
1: cmp %i2, 0
be,pt %XCC, 85f
@@ -358,8 +441,8 @@ FUNC_NAME: /* %i0=dst, %i1=src, %i2=len */
sub %i2, %g1, %i2
1: subcc %g1, 1, %g1
- EX_LD(LOAD(ldub, %i1, %i5))
- EX_ST(STORE(stb, %i5, %i1 + %i3))
+ EX_LD(LOAD(ldub, %i1, %i5), NG_ret_i2_plus_g1_plus_1)
+ EX_ST(STORE(stb, %i5, %i1 + %i3), NG_ret_i2_plus_g1_plus_1)
bgu,pt %icc, 1b
add %i1, 1, %i1
@@ -375,16 +458,16 @@ FUNC_NAME: /* %i0=dst, %i1=src, %i2=len */
8: mov 64, %i3
andn %i1, 0x7, %i1
- EX_LD(LOAD(ldx, %i1, %g2))
+ EX_LD(LOAD(ldx, %i1, %g2), NG_ret_i2)
sub %i3, %g1, %i3
andn %i2, 0x7, %i4
sllx %g2, %g1, %g2
1: add %i1, 0x8, %i1
- EX_LD(LOAD(ldx, %i1, %g3))
+ EX_LD(LOAD(ldx, %i1, %g3), NG_ret_i2_and_7_plus_i4)
subcc %i4, 0x8, %i4
srlx %g3, %i3, %i5
or %i5, %g2, %i5
- EX_ST(STORE(stx, %i5, %o0))
+ EX_ST(STORE(stx, %i5, %o0), NG_ret_i2_and_7_plus_i4)
add %o0, 0x8, %o0
bgu,pt %icc, 1b
sllx %g3, %g1, %g2
@@ -404,8 +487,8 @@ FUNC_NAME: /* %i0=dst, %i1=src, %i2=len */
1:
subcc %i2, 4, %i2
- EX_LD(LOAD(lduw, %i1, %g1))
- EX_ST(STORE(stw, %g1, %i1 + %i3))
+ EX_LD(LOAD(lduw, %i1, %g1), NG_ret_i2_plus_4)
+ EX_ST(STORE(stw, %g1, %i1 + %i3), NG_ret_i2_plus_4)
bgu,pt %XCC, 1b
add %i1, 4, %i1
@@ -415,8 +498,8 @@ FUNC_NAME: /* %i0=dst, %i1=src, %i2=len */
.align 32
90:
subcc %i2, 1, %i2
- EX_LD(LOAD(ldub, %i1, %g1))
- EX_ST(STORE(stb, %g1, %i1 + %i3))
+ EX_LD(LOAD(ldub, %i1, %g1), NG_ret_i2_plus_1)
+ EX_ST(STORE(stb, %g1, %i1 + %i3), NG_ret_i2_plus_1)
bgu,pt %XCC, 90b
add %i1, 1, %i1
ret
diff --git a/arch/sparc/lib/U1copy_from_user.S b/arch/sparc/lib/U1copy_from_user.S
index ecc5692fa2b4..bb6ff73229e3 100644
--- a/arch/sparc/lib/U1copy_from_user.S
+++ b/arch/sparc/lib/U1copy_from_user.S
@@ -3,19 +3,19 @@
* Copyright (C) 1999, 2000, 2004 David S. Miller (davem@redhat.com)
*/
-#define EX_LD(x) \
+#define EX_LD(x,y) \
98: x; \
.section __ex_table,"a";\
.align 4; \
- .word 98b, __retl_one; \
+ .word 98b, y; \
.text; \
.align 4;
-#define EX_LD_FP(x) \
+#define EX_LD_FP(x,y) \
98: x; \
.section __ex_table,"a";\
.align 4; \
- .word 98b, __retl_one_fp;\
+ .word 98b, y; \
.text; \
.align 4;
diff --git a/arch/sparc/lib/U1copy_to_user.S b/arch/sparc/lib/U1copy_to_user.S
index 9eea392e44d4..ed92ce739558 100644
--- a/arch/sparc/lib/U1copy_to_user.S
+++ b/arch/sparc/lib/U1copy_to_user.S
@@ -3,19 +3,19 @@
* Copyright (C) 1999, 2000, 2004 David S. Miller (davem@redhat.com)
*/
-#define EX_ST(x) \
+#define EX_ST(x,y) \
98: x; \
.section __ex_table,"a";\
.align 4; \
- .word 98b, __retl_one; \
+ .word 98b, y; \
.text; \
.align 4;
-#define EX_ST_FP(x) \
+#define EX_ST_FP(x,y) \
98: x; \
.section __ex_table,"a";\
.align 4; \
- .word 98b, __retl_one_fp;\
+ .word 98b, y; \
.text; \
.align 4;
diff --git a/arch/sparc/lib/U1memcpy.S b/arch/sparc/lib/U1memcpy.S
index 97e1b211090c..4f0d50b33a72 100644
--- a/arch/sparc/lib/U1memcpy.S
+++ b/arch/sparc/lib/U1memcpy.S
@@ -5,6 +5,7 @@
*/
#ifdef __KERNEL__
+#include <linux/linkage.h>
#include <asm/visasm.h>
#include <asm/asi.h>
#include <asm/export.h>
@@ -24,21 +25,17 @@
#endif
#ifndef EX_LD
-#define EX_LD(x) x
+#define EX_LD(x,y) x
#endif
#ifndef EX_LD_FP
-#define EX_LD_FP(x) x
+#define EX_LD_FP(x,y) x
#endif
#ifndef EX_ST
-#define EX_ST(x) x
+#define EX_ST(x,y) x
#endif
#ifndef EX_ST_FP
-#define EX_ST_FP(x) x
-#endif
-
-#ifndef EX_RETVAL
-#define EX_RETVAL(x) x
+#define EX_ST_FP(x,y) x
#endif
#ifndef LOAD
@@ -79,53 +76,169 @@
faligndata %f7, %f8, %f60; \
faligndata %f8, %f9, %f62;
-#define MAIN_LOOP_CHUNK(src, dest, fdest, fsrc, len, jmptgt) \
- EX_LD_FP(LOAD_BLK(%src, %fdest)); \
- EX_ST_FP(STORE_BLK(%fsrc, %dest)); \
- add %src, 0x40, %src; \
- subcc %len, 0x40, %len; \
- be,pn %xcc, jmptgt; \
- add %dest, 0x40, %dest; \
-
-#define LOOP_CHUNK1(src, dest, len, branch_dest) \
- MAIN_LOOP_CHUNK(src, dest, f0, f48, len, branch_dest)
-#define LOOP_CHUNK2(src, dest, len, branch_dest) \
- MAIN_LOOP_CHUNK(src, dest, f16, f48, len, branch_dest)
-#define LOOP_CHUNK3(src, dest, len, branch_dest) \
- MAIN_LOOP_CHUNK(src, dest, f32, f48, len, branch_dest)
+#define MAIN_LOOP_CHUNK(src, dest, fdest, fsrc, jmptgt) \
+ EX_LD_FP(LOAD_BLK(%src, %fdest), U1_gs_80_fp); \
+ EX_ST_FP(STORE_BLK(%fsrc, %dest), U1_gs_80_fp); \
+ add %src, 0x40, %src; \
+ subcc %GLOBAL_SPARE, 0x40, %GLOBAL_SPARE; \
+ be,pn %xcc, jmptgt; \
+ add %dest, 0x40, %dest; \
+
+#define LOOP_CHUNK1(src, dest, branch_dest) \
+ MAIN_LOOP_CHUNK(src, dest, f0, f48, branch_dest)
+#define LOOP_CHUNK2(src, dest, branch_dest) \
+ MAIN_LOOP_CHUNK(src, dest, f16, f48, branch_dest)
+#define LOOP_CHUNK3(src, dest, branch_dest) \
+ MAIN_LOOP_CHUNK(src, dest, f32, f48, branch_dest)
#define DO_SYNC membar #Sync;
#define STORE_SYNC(dest, fsrc) \
- EX_ST_FP(STORE_BLK(%fsrc, %dest)); \
+ EX_ST_FP(STORE_BLK(%fsrc, %dest), U1_gs_80_fp); \
add %dest, 0x40, %dest; \
DO_SYNC
#define STORE_JUMP(dest, fsrc, target) \
- EX_ST_FP(STORE_BLK(%fsrc, %dest)); \
+ EX_ST_FP(STORE_BLK(%fsrc, %dest), U1_gs_40_fp); \
add %dest, 0x40, %dest; \
ba,pt %xcc, target; \
nop;
-#define FINISH_VISCHUNK(dest, f0, f1, left) \
- subcc %left, 8, %left;\
- bl,pn %xcc, 95f; \
- faligndata %f0, %f1, %f48; \
- EX_ST_FP(STORE(std, %f48, %dest)); \
+#define FINISH_VISCHUNK(dest, f0, f1) \
+ subcc %g3, 8, %g3; \
+ bl,pn %xcc, 95f; \
+ faligndata %f0, %f1, %f48; \
+ EX_ST_FP(STORE(std, %f48, %dest), U1_g3_8_fp); \
add %dest, 8, %dest;
-#define UNEVEN_VISCHUNK_LAST(dest, f0, f1, left) \
- subcc %left, 8, %left; \
- bl,pn %xcc, 95f; \
+#define UNEVEN_VISCHUNK_LAST(dest, f0, f1) \
+ subcc %g3, 8, %g3; \
+ bl,pn %xcc, 95f; \
fsrc2 %f0, %f1;
-#define UNEVEN_VISCHUNK(dest, f0, f1, left) \
- UNEVEN_VISCHUNK_LAST(dest, f0, f1, left) \
+#define UNEVEN_VISCHUNK(dest, f0, f1) \
+ UNEVEN_VISCHUNK_LAST(dest, f0, f1) \
ba,a,pt %xcc, 93f;
.register %g2,#scratch
.register %g3,#scratch
.text
+#ifndef EX_RETVAL
+#define EX_RETVAL(x) x
+ENTRY(U1_g1_1_fp)
+ VISExitHalf
+ add %g1, 1, %g1
+ add %g1, %g2, %g1
+ retl
+ add %g1, %o2, %o0
+ENDPROC(U1_g1_1_fp)
+ENTRY(U1_g2_0_fp)
+ VISExitHalf
+ retl
+ add %g2, %o2, %o0
+ENDPROC(U1_g2_0_fp)
+ENTRY(U1_g2_8_fp)
+ VISExitHalf
+ add %g2, 8, %g2
+ retl
+ add %g2, %o2, %o0
+ENDPROC(U1_g2_8_fp)
+ENTRY(U1_gs_0_fp)
+ VISExitHalf
+ add %GLOBAL_SPARE, %g3, %o0
+ retl
+ add %o0, %o2, %o0
+ENDPROC(U1_gs_0_fp)
+ENTRY(U1_gs_80_fp)
+ VISExitHalf
+ add %GLOBAL_SPARE, 0x80, %GLOBAL_SPARE
+ add %GLOBAL_SPARE, %g3, %o0
+ retl
+ add %o0, %o2, %o0
+ENDPROC(U1_gs_80_fp)
+ENTRY(U1_gs_40_fp)
+ VISExitHalf
+ add %GLOBAL_SPARE, 0x40, %GLOBAL_SPARE
+ add %GLOBAL_SPARE, %g3, %o0
+ retl
+ add %o0, %o2, %o0
+ENDPROC(U1_gs_40_fp)
+ENTRY(U1_g3_0_fp)
+ VISExitHalf
+ retl
+ add %g3, %o2, %o0
+ENDPROC(U1_g3_0_fp)
+ENTRY(U1_g3_8_fp)
+ VISExitHalf
+ add %g3, 8, %g3
+ retl
+ add %g3, %o2, %o0
+ENDPROC(U1_g3_8_fp)
+ENTRY(U1_o2_0_fp)
+ VISExitHalf
+ retl
+ mov %o2, %o0
+ENDPROC(U1_o2_0_fp)
+ENTRY(U1_o2_1_fp)
+ VISExitHalf
+ retl
+ add %o2, 1, %o0
+ENDPROC(U1_o2_1_fp)
+ENTRY(U1_gs_0)
+ VISExitHalf
+ retl
+ add %GLOBAL_SPARE, %o2, %o0
+ENDPROC(U1_gs_0)
+ENTRY(U1_gs_8)
+ VISExitHalf
+ add %GLOBAL_SPARE, %o2, %GLOBAL_SPARE
+ retl
+ add %GLOBAL_SPARE, 0x8, %o0
+ENDPROC(U1_gs_8)
+ENTRY(U1_gs_10)
+ VISExitHalf
+ add %GLOBAL_SPARE, %o2, %GLOBAL_SPARE
+ retl
+ add %GLOBAL_SPARE, 0x10, %o0
+ENDPROC(U1_gs_10)
+ENTRY(U1_o2_0)
+ retl
+ mov %o2, %o0
+ENDPROC(U1_o2_0)
+ENTRY(U1_o2_8)
+ retl
+ add %o2, 8, %o0
+ENDPROC(U1_o2_8)
+ENTRY(U1_o2_4)
+ retl
+ add %o2, 4, %o0
+ENDPROC(U1_o2_4)
+ENTRY(U1_o2_1)
+ retl
+ add %o2, 1, %o0
+ENDPROC(U1_o2_1)
+ENTRY(U1_g1_0)
+ retl
+ add %g1, %o2, %o0
+ENDPROC(U1_g1_0)
+ENTRY(U1_g1_1)
+ add %g1, 1, %g1
+ retl
+ add %g1, %o2, %o0
+ENDPROC(U1_g1_1)
+ENTRY(U1_gs_0_o2_adj)
+ and %o2, 7, %o2
+ retl
+ add %GLOBAL_SPARE, %o2, %o0
+ENDPROC(U1_gs_0_o2_adj)
+ENTRY(U1_gs_8_o2_adj)
+ and %o2, 7, %o2
+ add %GLOBAL_SPARE, 8, %GLOBAL_SPARE
+ retl
+ add %GLOBAL_SPARE, %o2, %o0
+ENDPROC(U1_gs_8_o2_adj)
+#endif
+
.align 64
.globl FUNC_NAME
@@ -167,8 +280,8 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
and %g2, 0x38, %g2
1: subcc %g1, 0x1, %g1
- EX_LD_FP(LOAD(ldub, %o1 + 0x00, %o3))
- EX_ST_FP(STORE(stb, %o3, %o1 + %GLOBAL_SPARE))
+ EX_LD_FP(LOAD(ldub, %o1 + 0x00, %o3), U1_g1_1_fp)
+ EX_ST_FP(STORE(stb, %o3, %o1 + %GLOBAL_SPARE), U1_g1_1_fp)
bgu,pt %XCC, 1b
add %o1, 0x1, %o1
@@ -179,20 +292,20 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
be,pt %icc, 3f
alignaddr %o1, %g0, %o1
- EX_LD_FP(LOAD(ldd, %o1, %f4))
-1: EX_LD_FP(LOAD(ldd, %o1 + 0x8, %f6))
+ EX_LD_FP(LOAD(ldd, %o1, %f4), U1_g2_0_fp)
+1: EX_LD_FP(LOAD(ldd, %o1 + 0x8, %f6), U1_g2_0_fp)
add %o1, 0x8, %o1
subcc %g2, 0x8, %g2
faligndata %f4, %f6, %f0
- EX_ST_FP(STORE(std, %f0, %o0))
+ EX_ST_FP(STORE(std, %f0, %o0), U1_g2_8_fp)
be,pn %icc, 3f
add %o0, 0x8, %o0
- EX_LD_FP(LOAD(ldd, %o1 + 0x8, %f4))
+ EX_LD_FP(LOAD(ldd, %o1 + 0x8, %f4), U1_g2_0_fp)
add %o1, 0x8, %o1
subcc %g2, 0x8, %g2
faligndata %f6, %f4, %f0
- EX_ST_FP(STORE(std, %f0, %o0))
+ EX_ST_FP(STORE(std, %f0, %o0), U1_g2_8_fp)
bne,pt %icc, 1b
add %o0, 0x8, %o0
@@ -215,13 +328,13 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
add %g1, %GLOBAL_SPARE, %g1
subcc %o2, %g3, %o2
- EX_LD_FP(LOAD_BLK(%o1, %f0))
+ EX_LD_FP(LOAD_BLK(%o1, %f0), U1_gs_0_fp)
add %o1, 0x40, %o1
add %g1, %g3, %g1
- EX_LD_FP(LOAD_BLK(%o1, %f16))
+ EX_LD_FP(LOAD_BLK(%o1, %f16), U1_gs_0_fp)
add %o1, 0x40, %o1
sub %GLOBAL_SPARE, 0x80, %GLOBAL_SPARE
- EX_LD_FP(LOAD_BLK(%o1, %f32))
+ EX_LD_FP(LOAD_BLK(%o1, %f32), U1_gs_80_fp)
add %o1, 0x40, %o1
/* There are 8 instances of the unrolled loop,
@@ -241,11 +354,11 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
.align 64
1: FREG_FROB(f0, f2, f4, f6, f8, f10,f12,f14,f16)
- LOOP_CHUNK1(o1, o0, GLOBAL_SPARE, 1f)
+ LOOP_CHUNK1(o1, o0, 1f)
FREG_FROB(f16,f18,f20,f22,f24,f26,f28,f30,f32)
- LOOP_CHUNK2(o1, o0, GLOBAL_SPARE, 2f)
+ LOOP_CHUNK2(o1, o0, 2f)
FREG_FROB(f32,f34,f36,f38,f40,f42,f44,f46,f0)
- LOOP_CHUNK3(o1, o0, GLOBAL_SPARE, 3f)
+ LOOP_CHUNK3(o1, o0, 3f)
ba,pt %xcc, 1b+4
faligndata %f0, %f2, %f48
1: FREG_FROB(f16,f18,f20,f22,f24,f26,f28,f30,f32)
@@ -262,11 +375,11 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
STORE_JUMP(o0, f48, 56f)
1: FREG_FROB(f2, f4, f6, f8, f10,f12,f14,f16,f18)
- LOOP_CHUNK1(o1, o0, GLOBAL_SPARE, 1f)
+ LOOP_CHUNK1(o1, o0, 1f)
FREG_FROB(f18,f20,f22,f24,f26,f28,f30,f32,f34)
- LOOP_CHUNK2(o1, o0, GLOBAL_SPARE, 2f)
+ LOOP_CHUNK2(o1, o0, 2f)
FREG_FROB(f34,f36,f38,f40,f42,f44,f46,f0, f2)
- LOOP_CHUNK3(o1, o0, GLOBAL_SPARE, 3f)
+ LOOP_CHUNK3(o1, o0, 3f)
ba,pt %xcc, 1b+4
faligndata %f2, %f4, %f48
1: FREG_FROB(f18,f20,f22,f24,f26,f28,f30,f32,f34)
@@ -283,11 +396,11 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
STORE_JUMP(o0, f48, 57f)
1: FREG_FROB(f4, f6, f8, f10,f12,f14,f16,f18,f20)
- LOOP_CHUNK1(o1, o0, GLOBAL_SPARE, 1f)
+ LOOP_CHUNK1(o1, o0, 1f)
FREG_FROB(f20,f22,f24,f26,f28,f30,f32,f34,f36)
- LOOP_CHUNK2(o1, o0, GLOBAL_SPARE, 2f)
+ LOOP_CHUNK2(o1, o0, 2f)
FREG_FROB(f36,f38,f40,f42,f44,f46,f0, f2, f4)
- LOOP_CHUNK3(o1, o0, GLOBAL_SPARE, 3f)
+ LOOP_CHUNK3(o1, o0, 3f)
ba,pt %xcc, 1b+4
faligndata %f4, %f6, %f48
1: FREG_FROB(f20,f22,f24,f26,f28,f30,f32,f34,f36)
@@ -304,11 +417,11 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
STORE_JUMP(o0, f48, 58f)
1: FREG_FROB(f6, f8, f10,f12,f14,f16,f18,f20,f22)
- LOOP_CHUNK1(o1, o0, GLOBAL_SPARE, 1f)
+ LOOP_CHUNK1(o1, o0, 1f)
FREG_FROB(f22,f24,f26,f28,f30,f32,f34,f36,f38)
- LOOP_CHUNK2(o1, o0, GLOBAL_SPARE, 2f)
+ LOOP_CHUNK2(o1, o0, 2f)
FREG_FROB(f38,f40,f42,f44,f46,f0, f2, f4, f6)
- LOOP_CHUNK3(o1, o0, GLOBAL_SPARE, 3f)
+ LOOP_CHUNK3(o1, o0, 3f)
ba,pt %xcc, 1b+4
faligndata %f6, %f8, %f48
1: FREG_FROB(f22,f24,f26,f28,f30,f32,f34,f36,f38)
@@ -325,11 +438,11 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
STORE_JUMP(o0, f48, 59f)
1: FREG_FROB(f8, f10,f12,f14,f16,f18,f20,f22,f24)
- LOOP_CHUNK1(o1, o0, GLOBAL_SPARE, 1f)
+ LOOP_CHUNK1(o1, o0, 1f)
FREG_FROB(f24,f26,f28,f30,f32,f34,f36,f38,f40)
- LOOP_CHUNK2(o1, o0, GLOBAL_SPARE, 2f)
+ LOOP_CHUNK2(o1, o0, 2f)
FREG_FROB(f40,f42,f44,f46,f0, f2, f4, f6, f8)
- LOOP_CHUNK3(o1, o0, GLOBAL_SPARE, 3f)
+ LOOP_CHUNK3(o1, o0, 3f)
ba,pt %xcc, 1b+4
faligndata %f8, %f10, %f48
1: FREG_FROB(f24,f26,f28,f30,f32,f34,f36,f38,f40)
@@ -346,11 +459,11 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
STORE_JUMP(o0, f48, 60f)
1: FREG_FROB(f10,f12,f14,f16,f18,f20,f22,f24,f26)
- LOOP_CHUNK1(o1, o0, GLOBAL_SPARE, 1f)
+ LOOP_CHUNK1(o1, o0, 1f)
FREG_FROB(f26,f28,f30,f32,f34,f36,f38,f40,f42)
- LOOP_CHUNK2(o1, o0, GLOBAL_SPARE, 2f)
+ LOOP_CHUNK2(o1, o0, 2f)
FREG_FROB(f42,f44,f46,f0, f2, f4, f6, f8, f10)
- LOOP_CHUNK3(o1, o0, GLOBAL_SPARE, 3f)
+ LOOP_CHUNK3(o1, o0, 3f)
ba,pt %xcc, 1b+4
faligndata %f10, %f12, %f48
1: FREG_FROB(f26,f28,f30,f32,f34,f36,f38,f40,f42)
@@ -367,11 +480,11 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
STORE_JUMP(o0, f48, 61f)
1: FREG_FROB(f12,f14,f16,f18,f20,f22,f24,f26,f28)
- LOOP_CHUNK1(o1, o0, GLOBAL_SPARE, 1f)
+ LOOP_CHUNK1(o1, o0, 1f)
FREG_FROB(f28,f30,f32,f34,f36,f38,f40,f42,f44)
- LOOP_CHUNK2(o1, o0, GLOBAL_SPARE, 2f)
+ LOOP_CHUNK2(o1, o0, 2f)
FREG_FROB(f44,f46,f0, f2, f4, f6, f8, f10,f12)
- LOOP_CHUNK3(o1, o0, GLOBAL_SPARE, 3f)
+ LOOP_CHUNK3(o1, o0, 3f)
ba,pt %xcc, 1b+4
faligndata %f12, %f14, %f48
1: FREG_FROB(f28,f30,f32,f34,f36,f38,f40,f42,f44)
@@ -388,11 +501,11 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
STORE_JUMP(o0, f48, 62f)
1: FREG_FROB(f14,f16,f18,f20,f22,f24,f26,f28,f30)
- LOOP_CHUNK1(o1, o0, GLOBAL_SPARE, 1f)
+ LOOP_CHUNK1(o1, o0, 1f)
FREG_FROB(f30,f32,f34,f36,f38,f40,f42,f44,f46)
- LOOP_CHUNK2(o1, o0, GLOBAL_SPARE, 2f)
+ LOOP_CHUNK2(o1, o0, 2f)
FREG_FROB(f46,f0, f2, f4, f6, f8, f10,f12,f14)
- LOOP_CHUNK3(o1, o0, GLOBAL_SPARE, 3f)
+ LOOP_CHUNK3(o1, o0, 3f)
ba,pt %xcc, 1b+4
faligndata %f14, %f16, %f48
1: FREG_FROB(f30,f32,f34,f36,f38,f40,f42,f44,f46)
@@ -408,53 +521,53 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
FREG_FROB(f30,f32,f34,f36,f38,f40,f42,f44,f46)
STORE_JUMP(o0, f48, 63f)
-40: FINISH_VISCHUNK(o0, f0, f2, g3)
-41: FINISH_VISCHUNK(o0, f2, f4, g3)
-42: FINISH_VISCHUNK(o0, f4, f6, g3)
-43: FINISH_VISCHUNK(o0, f6, f8, g3)
-44: FINISH_VISCHUNK(o0, f8, f10, g3)
-45: FINISH_VISCHUNK(o0, f10, f12, g3)
-46: FINISH_VISCHUNK(o0, f12, f14, g3)
-47: UNEVEN_VISCHUNK(o0, f14, f0, g3)
-48: FINISH_VISCHUNK(o0, f16, f18, g3)
-49: FINISH_VISCHUNK(o0, f18, f20, g3)
-50: FINISH_VISCHUNK(o0, f20, f22, g3)
-51: FINISH_VISCHUNK(o0, f22, f24, g3)
-52: FINISH_VISCHUNK(o0, f24, f26, g3)
-53: FINISH_VISCHUNK(o0, f26, f28, g3)
-54: FINISH_VISCHUNK(o0, f28, f30, g3)
-55: UNEVEN_VISCHUNK(o0, f30, f0, g3)
-56: FINISH_VISCHUNK(o0, f32, f34, g3)
-57: FINISH_VISCHUNK(o0, f34, f36, g3)
-58: FINISH_VISCHUNK(o0, f36, f38, g3)
-59: FINISH_VISCHUNK(o0, f38, f40, g3)
-60: FINISH_VISCHUNK(o0, f40, f42, g3)
-61: FINISH_VISCHUNK(o0, f42, f44, g3)
-62: FINISH_VISCHUNK(o0, f44, f46, g3)
-63: UNEVEN_VISCHUNK_LAST(o0, f46, f0, g3)
-
-93: EX_LD_FP(LOAD(ldd, %o1, %f2))
+40: FINISH_VISCHUNK(o0, f0, f2)
+41: FINISH_VISCHUNK(o0, f2, f4)
+42: FINISH_VISCHUNK(o0, f4, f6)
+43: FINISH_VISCHUNK(o0, f6, f8)
+44: FINISH_VISCHUNK(o0, f8, f10)
+45: FINISH_VISCHUNK(o0, f10, f12)
+46: FINISH_VISCHUNK(o0, f12, f14)
+47: UNEVEN_VISCHUNK(o0, f14, f0)
+48: FINISH_VISCHUNK(o0, f16, f18)
+49: FINISH_VISCHUNK(o0, f18, f20)
+50: FINISH_VISCHUNK(o0, f20, f22)
+51: FINISH_VISCHUNK(o0, f22, f24)
+52: FINISH_VISCHUNK(o0, f24, f26)
+53: FINISH_VISCHUNK(o0, f26, f28)
+54: FINISH_VISCHUNK(o0, f28, f30)
+55: UNEVEN_VISCHUNK(o0, f30, f0)
+56: FINISH_VISCHUNK(o0, f32, f34)
+57: FINISH_VISCHUNK(o0, f34, f36)
+58: FINISH_VISCHUNK(o0, f36, f38)
+59: FINISH_VISCHUNK(o0, f38, f40)
+60: FINISH_VISCHUNK(o0, f40, f42)
+61: FINISH_VISCHUNK(o0, f42, f44)
+62: FINISH_VISCHUNK(o0, f44, f46)
+63: UNEVEN_VISCHUNK_LAST(o0, f46, f0)
+
+93: EX_LD_FP(LOAD(ldd, %o1, %f2), U1_g3_0_fp)
add %o1, 8, %o1
subcc %g3, 8, %g3
faligndata %f0, %f2, %f8
- EX_ST_FP(STORE(std, %f8, %o0))
+ EX_ST_FP(STORE(std, %f8, %o0), U1_g3_8_fp)
bl,pn %xcc, 95f
add %o0, 8, %o0
- EX_LD_FP(LOAD(ldd, %o1, %f0))
+ EX_LD_FP(LOAD(ldd, %o1, %f0), U1_g3_0_fp)
add %o1, 8, %o1
subcc %g3, 8, %g3
faligndata %f2, %f0, %f8
- EX_ST_FP(STORE(std, %f8, %o0))
+ EX_ST_FP(STORE(std, %f8, %o0), U1_g3_8_fp)
bge,pt %xcc, 93b
add %o0, 8, %o0
95: brz,pt %o2, 2f
mov %g1, %o1
-1: EX_LD_FP(LOAD(ldub, %o1, %o3))
+1: EX_LD_FP(LOAD(ldub, %o1, %o3), U1_o2_0_fp)
add %o1, 1, %o1
subcc %o2, 1, %o2
- EX_ST_FP(STORE(stb, %o3, %o0))
+ EX_ST_FP(STORE(stb, %o3, %o0), U1_o2_1_fp)
bne,pt %xcc, 1b
add %o0, 1, %o0
@@ -470,27 +583,27 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
72: andn %o2, 0xf, %GLOBAL_SPARE
and %o2, 0xf, %o2
-1: EX_LD(LOAD(ldx, %o1 + 0x00, %o5))
- EX_LD(LOAD(ldx, %o1 + 0x08, %g1))
+1: EX_LD(LOAD(ldx, %o1 + 0x00, %o5), U1_gs_0)
+ EX_LD(LOAD(ldx, %o1 + 0x08, %g1), U1_gs_0)
subcc %GLOBAL_SPARE, 0x10, %GLOBAL_SPARE
- EX_ST(STORE(stx, %o5, %o1 + %o3))
+ EX_ST(STORE(stx, %o5, %o1 + %o3), U1_gs_10)
add %o1, 0x8, %o1
- EX_ST(STORE(stx, %g1, %o1 + %o3))
+ EX_ST(STORE(stx, %g1, %o1 + %o3), U1_gs_8)
bgu,pt %XCC, 1b
add %o1, 0x8, %o1
73: andcc %o2, 0x8, %g0
be,pt %XCC, 1f
nop
- EX_LD(LOAD(ldx, %o1, %o5))
+ EX_LD(LOAD(ldx, %o1, %o5), U1_o2_0)
sub %o2, 0x8, %o2
- EX_ST(STORE(stx, %o5, %o1 + %o3))
+ EX_ST(STORE(stx, %o5, %o1 + %o3), U1_o2_8)
add %o1, 0x8, %o1
1: andcc %o2, 0x4, %g0
be,pt %XCC, 1f
nop
- EX_LD(LOAD(lduw, %o1, %o5))
+ EX_LD(LOAD(lduw, %o1, %o5), U1_o2_0)
sub %o2, 0x4, %o2
- EX_ST(STORE(stw, %o5, %o1 + %o3))
+ EX_ST(STORE(stw, %o5, %o1 + %o3), U1_o2_4)
add %o1, 0x4, %o1
1: cmp %o2, 0
be,pt %XCC, 85f
@@ -504,9 +617,9 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
sub %g0, %g1, %g1
sub %o2, %g1, %o2
-1: EX_LD(LOAD(ldub, %o1, %o5))
+1: EX_LD(LOAD(ldub, %o1, %o5), U1_g1_0)
subcc %g1, 1, %g1
- EX_ST(STORE(stb, %o5, %o1 + %o3))
+ EX_ST(STORE(stb, %o5, %o1 + %o3), U1_g1_1)
bgu,pt %icc, 1b
add %o1, 1, %o1
@@ -522,16 +635,16 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
8: mov 64, %o3
andn %o1, 0x7, %o1
- EX_LD(LOAD(ldx, %o1, %g2))
+ EX_LD(LOAD(ldx, %o1, %g2), U1_o2_0)
sub %o3, %g1, %o3
andn %o2, 0x7, %GLOBAL_SPARE
sllx %g2, %g1, %g2
-1: EX_LD(LOAD(ldx, %o1 + 0x8, %g3))
+1: EX_LD(LOAD(ldx, %o1 + 0x8, %g3), U1_gs_0_o2_adj)
subcc %GLOBAL_SPARE, 0x8, %GLOBAL_SPARE
add %o1, 0x8, %o1
srlx %g3, %o3, %o5
or %o5, %g2, %o5
- EX_ST(STORE(stx, %o5, %o0))
+ EX_ST(STORE(stx, %o5, %o0), U1_gs_8_o2_adj)
add %o0, 0x8, %o0
bgu,pt %icc, 1b
sllx %g3, %g1, %g2
@@ -549,9 +662,9 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
bne,pn %XCC, 90f
sub %o0, %o1, %o3
-1: EX_LD(LOAD(lduw, %o1, %g1))
+1: EX_LD(LOAD(lduw, %o1, %g1), U1_o2_0)
subcc %o2, 4, %o2
- EX_ST(STORE(stw, %g1, %o1 + %o3))
+ EX_ST(STORE(stw, %g1, %o1 + %o3), U1_o2_4)
bgu,pt %XCC, 1b
add %o1, 4, %o1
@@ -559,9 +672,9 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
mov EX_RETVAL(%o4), %o0
.align 32
-90: EX_LD(LOAD(ldub, %o1, %g1))
+90: EX_LD(LOAD(ldub, %o1, %g1), U1_o2_0)
subcc %o2, 1, %o2
- EX_ST(STORE(stb, %g1, %o1 + %o3))
+ EX_ST(STORE(stb, %g1, %o1 + %o3), U1_o2_1)
bgu,pt %XCC, 90b
add %o1, 1, %o1
retl
diff --git a/arch/sparc/lib/U3copy_from_user.S b/arch/sparc/lib/U3copy_from_user.S
index 88ad73d86fe4..db73010a1af8 100644
--- a/arch/sparc/lib/U3copy_from_user.S
+++ b/arch/sparc/lib/U3copy_from_user.S
@@ -3,19 +3,19 @@
* Copyright (C) 1999, 2000, 2004 David S. Miller (davem@redhat.com)
*/
-#define EX_LD(x) \
+#define EX_LD(x,y) \
98: x; \
.section __ex_table,"a";\
.align 4; \
- .word 98b, __retl_one; \
+ .word 98b, y; \
.text; \
.align 4;
-#define EX_LD_FP(x) \
+#define EX_LD_FP(x,y) \
98: x; \
.section __ex_table,"a";\
.align 4; \
- .word 98b, __retl_one_fp;\
+ .word 98b, y##_fp; \
.text; \
.align 4;
diff --git a/arch/sparc/lib/U3copy_to_user.S b/arch/sparc/lib/U3copy_to_user.S
index 845139d75537..c4ee858e352a 100644
--- a/arch/sparc/lib/U3copy_to_user.S
+++ b/arch/sparc/lib/U3copy_to_user.S
@@ -3,19 +3,19 @@
* Copyright (C) 1999, 2000, 2004 David S. Miller (davem@redhat.com)
*/
-#define EX_ST(x) \
+#define EX_ST(x,y) \
98: x; \
.section __ex_table,"a";\
.align 4; \
- .word 98b, __retl_one; \
+ .word 98b, y; \
.text; \
.align 4;
-#define EX_ST_FP(x) \
+#define EX_ST_FP(x,y) \
98: x; \
.section __ex_table,"a";\
.align 4; \
- .word 98b, __retl_one_fp;\
+ .word 98b, y##_fp; \
.text; \
.align 4;
diff --git a/arch/sparc/lib/U3memcpy.S b/arch/sparc/lib/U3memcpy.S
index 491ee69e4995..54f98706b03b 100644
--- a/arch/sparc/lib/U3memcpy.S
+++ b/arch/sparc/lib/U3memcpy.S
@@ -4,6 +4,7 @@
*/
#ifdef __KERNEL__
+#include <linux/linkage.h>
#include <asm/visasm.h>
#include <asm/asi.h>
#define GLOBAL_SPARE %g7
@@ -22,21 +23,17 @@
#endif
#ifndef EX_LD
-#define EX_LD(x) x
+#define EX_LD(x,y) x
#endif
#ifndef EX_LD_FP
-#define EX_LD_FP(x) x
+#define EX_LD_FP(x,y) x
#endif
#ifndef EX_ST
-#define EX_ST(x) x
+#define EX_ST(x,y) x
#endif
#ifndef EX_ST_FP
-#define EX_ST_FP(x) x
-#endif
-
-#ifndef EX_RETVAL
-#define EX_RETVAL(x) x
+#define EX_ST_FP(x,y) x
#endif
#ifndef LOAD
@@ -77,6 +74,87 @@
*/
.text
+#ifndef EX_RETVAL
+#define EX_RETVAL(x) x
+__restore_fp:
+ VISExitHalf
+ retl
+ nop
+ENTRY(U3_retl_o2_plus_g2_plus_g1_plus_1_fp)
+ add %g1, 1, %g1
+ add %g2, %g1, %g2
+ ba,pt %xcc, __restore_fp
+ add %o2, %g2, %o0
+ENDPROC(U3_retl_o2_plus_g2_plus_g1_plus_1_fp)
+ENTRY(U3_retl_o2_plus_g2_fp)
+ ba,pt %xcc, __restore_fp
+ add %o2, %g2, %o0
+ENDPROC(U3_retl_o2_plus_g2_fp)
+ENTRY(U3_retl_o2_plus_g2_plus_8_fp)
+ add %g2, 8, %g2
+ ba,pt %xcc, __restore_fp
+ add %o2, %g2, %o0
+ENDPROC(U3_retl_o2_plus_g2_plus_8_fp)
+ENTRY(U3_retl_o2)
+ retl
+ mov %o2, %o0
+ENDPROC(U3_retl_o2)
+ENTRY(U3_retl_o2_plus_1)
+ retl
+ add %o2, 1, %o0
+ENDPROC(U3_retl_o2_plus_1)
+ENTRY(U3_retl_o2_plus_4)
+ retl
+ add %o2, 4, %o0
+ENDPROC(U3_retl_o2_plus_4)
+ENTRY(U3_retl_o2_plus_8)
+ retl
+ add %o2, 8, %o0
+ENDPROC(U3_retl_o2_plus_8)
+ENTRY(U3_retl_o2_plus_g1_plus_1)
+ add %g1, 1, %g1
+ retl
+ add %o2, %g1, %o0
+ENDPROC(U3_retl_o2_plus_g1_plus_1)
+ENTRY(U3_retl_o2_fp)
+ ba,pt %xcc, __restore_fp
+ mov %o2, %o0
+ENDPROC(U3_retl_o2_fp)
+ENTRY(U3_retl_o2_plus_o3_sll_6_plus_0x80_fp)
+ sll %o3, 6, %o3
+ add %o3, 0x80, %o3
+ ba,pt %xcc, __restore_fp
+ add %o2, %o3, %o0
+ENDPROC(U3_retl_o2_plus_o3_sll_6_plus_0x80_fp)
+ENTRY(U3_retl_o2_plus_o3_sll_6_plus_0x40_fp)
+ sll %o3, 6, %o3
+ add %o3, 0x40, %o3
+ ba,pt %xcc, __restore_fp
+ add %o2, %o3, %o0
+ENDPROC(U3_retl_o2_plus_o3_sll_6_plus_0x40_fp)
+ENTRY(U3_retl_o2_plus_GS_plus_0x10)
+ add GLOBAL_SPARE, 0x10, GLOBAL_SPARE
+ retl
+ add %o2, GLOBAL_SPARE, %o0
+ENDPROC(U3_retl_o2_plus_GS_plus_0x10)
+ENTRY(U3_retl_o2_plus_GS_plus_0x08)
+ add GLOBAL_SPARE, 0x08, GLOBAL_SPARE
+ retl
+ add %o2, GLOBAL_SPARE, %o0
+ENDPROC(U3_retl_o2_plus_GS_plus_0x08)
+ENTRY(U3_retl_o2_and_7_plus_GS)
+ and %o2, 7, %o2
+ retl
+ add %o2, GLOBAL_SPARE, %o2
+ENDPROC(U3_retl_o2_and_7_plus_GS)
+ENTRY(U3_retl_o2_and_7_plus_GS_plus_8)
+ add GLOBAL_SPARE, 8, GLOBAL_SPARE
+ and %o2, 7, %o2
+ retl
+ add %o2, GLOBAL_SPARE, %o2
+ENDPROC(U3_retl_o2_and_7_plus_GS_plus_8)
+#endif
+
.align 64
/* The cheetah's flexible spine, oversized liver, enlarged heart,
@@ -126,8 +204,8 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
and %g2, 0x38, %g2
1: subcc %g1, 0x1, %g1
- EX_LD_FP(LOAD(ldub, %o1 + 0x00, %o3))
- EX_ST_FP(STORE(stb, %o3, %o1 + GLOBAL_SPARE))
+ EX_LD_FP(LOAD(ldub, %o1 + 0x00, %o3), U3_retl_o2_plus_g2_plus_g1_plus_1)
+ EX_ST_FP(STORE(stb, %o3, %o1 + GLOBAL_SPARE), U3_retl_o2_plus_g2_plus_g1_plus_1)
bgu,pt %XCC, 1b
add %o1, 0x1, %o1
@@ -138,20 +216,20 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
be,pt %icc, 3f
alignaddr %o1, %g0, %o1
- EX_LD_FP(LOAD(ldd, %o1, %f4))
-1: EX_LD_FP(LOAD(ldd, %o1 + 0x8, %f6))
+ EX_LD_FP(LOAD(ldd, %o1, %f4), U3_retl_o2_plus_g2)
+1: EX_LD_FP(LOAD(ldd, %o1 + 0x8, %f6), U3_retl_o2_plus_g2)
add %o1, 0x8, %o1
subcc %g2, 0x8, %g2
faligndata %f4, %f6, %f0
- EX_ST_FP(STORE(std, %f0, %o0))
+ EX_ST_FP(STORE(std, %f0, %o0), U3_retl_o2_plus_g2_plus_8)
be,pn %icc, 3f
add %o0, 0x8, %o0
- EX_LD_FP(LOAD(ldd, %o1 + 0x8, %f4))
+ EX_LD_FP(LOAD(ldd, %o1 + 0x8, %f4), U3_retl_o2_plus_g2)
add %o1, 0x8, %o1
subcc %g2, 0x8, %g2
faligndata %f6, %f4, %f2
- EX_ST_FP(STORE(std, %f2, %o0))
+ EX_ST_FP(STORE(std, %f2, %o0), U3_retl_o2_plus_g2_plus_8)
bne,pt %icc, 1b
add %o0, 0x8, %o0
@@ -161,25 +239,25 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
LOAD(prefetch, %o1 + 0x080, #one_read)
LOAD(prefetch, %o1 + 0x0c0, #one_read)
LOAD(prefetch, %o1 + 0x100, #one_read)
- EX_LD_FP(LOAD(ldd, %o1 + 0x000, %f0))
+ EX_LD_FP(LOAD(ldd, %o1 + 0x000, %f0), U3_retl_o2)
LOAD(prefetch, %o1 + 0x140, #one_read)
- EX_LD_FP(LOAD(ldd, %o1 + 0x008, %f2))
+ EX_LD_FP(LOAD(ldd, %o1 + 0x008, %f2), U3_retl_o2)
LOAD(prefetch, %o1 + 0x180, #one_read)
- EX_LD_FP(LOAD(ldd, %o1 + 0x010, %f4))
+ EX_LD_FP(LOAD(ldd, %o1 + 0x010, %f4), U3_retl_o2)
LOAD(prefetch, %o1 + 0x1c0, #one_read)
faligndata %f0, %f2, %f16
- EX_LD_FP(LOAD(ldd, %o1 + 0x018, %f6))
+ EX_LD_FP(LOAD(ldd, %o1 + 0x018, %f6), U3_retl_o2)
faligndata %f2, %f4, %f18
- EX_LD_FP(LOAD(ldd, %o1 + 0x020, %f8))
+ EX_LD_FP(LOAD(ldd, %o1 + 0x020, %f8), U3_retl_o2)
faligndata %f4, %f6, %f20
- EX_LD_FP(LOAD(ldd, %o1 + 0x028, %f10))
+ EX_LD_FP(LOAD(ldd, %o1 + 0x028, %f10), U3_retl_o2)
faligndata %f6, %f8, %f22
- EX_LD_FP(LOAD(ldd, %o1 + 0x030, %f12))
+ EX_LD_FP(LOAD(ldd, %o1 + 0x030, %f12), U3_retl_o2)
faligndata %f8, %f10, %f24
- EX_LD_FP(LOAD(ldd, %o1 + 0x038, %f14))
+ EX_LD_FP(LOAD(ldd, %o1 + 0x038, %f14), U3_retl_o2)
faligndata %f10, %f12, %f26
- EX_LD_FP(LOAD(ldd, %o1 + 0x040, %f0))
+ EX_LD_FP(LOAD(ldd, %o1 + 0x040, %f0), U3_retl_o2)
subcc GLOBAL_SPARE, 0x80, GLOBAL_SPARE
add %o1, 0x40, %o1
@@ -190,26 +268,26 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
.align 64
1:
- EX_LD_FP(LOAD(ldd, %o1 + 0x008, %f2))
+ EX_LD_FP(LOAD(ldd, %o1 + 0x008, %f2), U3_retl_o2_plus_o3_sll_6_plus_0x80)
faligndata %f12, %f14, %f28
- EX_LD_FP(LOAD(ldd, %o1 + 0x010, %f4))
+ EX_LD_FP(LOAD(ldd, %o1 + 0x010, %f4), U3_retl_o2_plus_o3_sll_6_plus_0x80)
faligndata %f14, %f0, %f30
- EX_ST_FP(STORE_BLK(%f16, %o0))
- EX_LD_FP(LOAD(ldd, %o1 + 0x018, %f6))
+ EX_ST_FP(STORE_BLK(%f16, %o0), U3_retl_o2_plus_o3_sll_6_plus_0x80)
+ EX_LD_FP(LOAD(ldd, %o1 + 0x018, %f6), U3_retl_o2_plus_o3_sll_6_plus_0x40)
faligndata %f0, %f2, %f16
add %o0, 0x40, %o0
- EX_LD_FP(LOAD(ldd, %o1 + 0x020, %f8))
+ EX_LD_FP(LOAD(ldd, %o1 + 0x020, %f8), U3_retl_o2_plus_o3_sll_6_plus_0x40)
faligndata %f2, %f4, %f18
- EX_LD_FP(LOAD(ldd, %o1 + 0x028, %f10))
+ EX_LD_FP(LOAD(ldd, %o1 + 0x028, %f10), U3_retl_o2_plus_o3_sll_6_plus_0x40)
faligndata %f4, %f6, %f20
- EX_LD_FP(LOAD(ldd, %o1 + 0x030, %f12))
+ EX_LD_FP(LOAD(ldd, %o1 + 0x030, %f12), U3_retl_o2_plus_o3_sll_6_plus_0x40)
subcc %o3, 0x01, %o3
faligndata %f6, %f8, %f22
- EX_LD_FP(LOAD(ldd, %o1 + 0x038, %f14))
+ EX_LD_FP(LOAD(ldd, %o1 + 0x038, %f14), U3_retl_o2_plus_o3_sll_6_plus_0x80)
faligndata %f8, %f10, %f24
- EX_LD_FP(LOAD(ldd, %o1 + 0x040, %f0))
+ EX_LD_FP(LOAD(ldd, %o1 + 0x040, %f0), U3_retl_o2_plus_o3_sll_6_plus_0x80)
LOAD(prefetch, %o1 + 0x1c0, #one_read)
faligndata %f10, %f12, %f26
bg,pt %XCC, 1b
@@ -217,29 +295,29 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
/* Finally we copy the last full 64-byte block. */
2:
- EX_LD_FP(LOAD(ldd, %o1 + 0x008, %f2))
+ EX_LD_FP(LOAD(ldd, %o1 + 0x008, %f2), U3_retl_o2_plus_o3_sll_6_plus_0x80)
faligndata %f12, %f14, %f28
- EX_LD_FP(LOAD(ldd, %o1 + 0x010, %f4))
+ EX_LD_FP(LOAD(ldd, %o1 + 0x010, %f4), U3_retl_o2_plus_o3_sll_6_plus_0x80)
faligndata %f14, %f0, %f30
- EX_ST_FP(STORE_BLK(%f16, %o0))
- EX_LD_FP(LOAD(ldd, %o1 + 0x018, %f6))
+ EX_ST_FP(STORE_BLK(%f16, %o0), U3_retl_o2_plus_o3_sll_6_plus_0x80)
+ EX_LD_FP(LOAD(ldd, %o1 + 0x018, %f6), U3_retl_o2_plus_o3_sll_6_plus_0x40)
faligndata %f0, %f2, %f16
- EX_LD_FP(LOAD(ldd, %o1 + 0x020, %f8))
+ EX_LD_FP(LOAD(ldd, %o1 + 0x020, %f8), U3_retl_o2_plus_o3_sll_6_plus_0x40)
faligndata %f2, %f4, %f18
- EX_LD_FP(LOAD(ldd, %o1 + 0x028, %f10))
+ EX_LD_FP(LOAD(ldd, %o1 + 0x028, %f10), U3_retl_o2_plus_o3_sll_6_plus_0x40)
faligndata %f4, %f6, %f20
- EX_LD_FP(LOAD(ldd, %o1 + 0x030, %f12))
+ EX_LD_FP(LOAD(ldd, %o1 + 0x030, %f12), U3_retl_o2_plus_o3_sll_6_plus_0x40)
faligndata %f6, %f8, %f22
- EX_LD_FP(LOAD(ldd, %o1 + 0x038, %f14))
+ EX_LD_FP(LOAD(ldd, %o1 + 0x038, %f14), U3_retl_o2_plus_o3_sll_6_plus_0x40)
faligndata %f8, %f10, %f24
cmp %g1, 0
be,pt %XCC, 1f
add %o0, 0x40, %o0
- EX_LD_FP(LOAD(ldd, %o1 + 0x040, %f0))
+ EX_LD_FP(LOAD(ldd, %o1 + 0x040, %f0), U3_retl_o2_plus_o3_sll_6_plus_0x40)
1: faligndata %f10, %f12, %f26
faligndata %f12, %f14, %f28
faligndata %f14, %f0, %f30
- EX_ST_FP(STORE_BLK(%f16, %o0))
+ EX_ST_FP(STORE_BLK(%f16, %o0), U3_retl_o2_plus_o3_sll_6_plus_0x40)
add %o0, 0x40, %o0
add %o1, 0x40, %o1
membar #Sync
@@ -259,20 +337,20 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
sub %o2, %g2, %o2
be,a,pt %XCC, 1f
- EX_LD_FP(LOAD(ldd, %o1 + 0x00, %f0))
+ EX_LD_FP(LOAD(ldd, %o1 + 0x00, %f0), U3_retl_o2_plus_g2)
-1: EX_LD_FP(LOAD(ldd, %o1 + 0x08, %f2))
+1: EX_LD_FP(LOAD(ldd, %o1 + 0x08, %f2), U3_retl_o2_plus_g2)
add %o1, 0x8, %o1
subcc %g2, 0x8, %g2
faligndata %f0, %f2, %f8
- EX_ST_FP(STORE(std, %f8, %o0))
+ EX_ST_FP(STORE(std, %f8, %o0), U3_retl_o2_plus_g2_plus_8)
be,pn %XCC, 2f
add %o0, 0x8, %o0
- EX_LD_FP(LOAD(ldd, %o1 + 0x08, %f0))
+ EX_LD_FP(LOAD(ldd, %o1 + 0x08, %f0), U3_retl_o2_plus_g2)
add %o1, 0x8, %o1
subcc %g2, 0x8, %g2
faligndata %f2, %f0, %f8
- EX_ST_FP(STORE(std, %f8, %o0))
+ EX_ST_FP(STORE(std, %f8, %o0), U3_retl_o2_plus_g2_plus_8)
bne,pn %XCC, 1b
add %o0, 0x8, %o0
@@ -292,30 +370,33 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
andcc %o2, 0x8, %g0
be,pt %icc, 1f
nop
- EX_LD(LOAD(ldx, %o1, %o5))
- EX_ST(STORE(stx, %o5, %o1 + %o3))
+ EX_LD(LOAD(ldx, %o1, %o5), U3_retl_o2)
+ EX_ST(STORE(stx, %o5, %o1 + %o3), U3_retl_o2)
add %o1, 0x8, %o1
+ sub %o2, 8, %o2
1: andcc %o2, 0x4, %g0
be,pt %icc, 1f
nop
- EX_LD(LOAD(lduw, %o1, %o5))
- EX_ST(STORE(stw, %o5, %o1 + %o3))
+ EX_LD(LOAD(lduw, %o1, %o5), U3_retl_o2)
+ EX_ST(STORE(stw, %o5, %o1 + %o3), U3_retl_o2)
add %o1, 0x4, %o1
+ sub %o2, 4, %o2
1: andcc %o2, 0x2, %g0
be,pt %icc, 1f
nop
- EX_LD(LOAD(lduh, %o1, %o5))
- EX_ST(STORE(sth, %o5, %o1 + %o3))
+ EX_LD(LOAD(lduh, %o1, %o5), U3_retl_o2)
+ EX_ST(STORE(sth, %o5, %o1 + %o3), U3_retl_o2)
add %o1, 0x2, %o1
+ sub %o2, 2, %o2
1: andcc %o2, 0x1, %g0
be,pt %icc, 85f
nop
- EX_LD(LOAD(ldub, %o1, %o5))
+ EX_LD(LOAD(ldub, %o1, %o5), U3_retl_o2)
ba,pt %xcc, 85f
- EX_ST(STORE(stb, %o5, %o1 + %o3))
+ EX_ST(STORE(stb, %o5, %o1 + %o3), U3_retl_o2)
.align 64
70: /* 16 < len <= 64 */
@@ -326,26 +407,26 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
andn %o2, 0xf, GLOBAL_SPARE
and %o2, 0xf, %o2
1: subcc GLOBAL_SPARE, 0x10, GLOBAL_SPARE
- EX_LD(LOAD(ldx, %o1 + 0x00, %o5))
- EX_LD(LOAD(ldx, %o1 + 0x08, %g1))
- EX_ST(STORE(stx, %o5, %o1 + %o3))
+ EX_LD(LOAD(ldx, %o1 + 0x00, %o5), U3_retl_o2_plus_GS_plus_0x10)
+ EX_LD(LOAD(ldx, %o1 + 0x08, %g1), U3_retl_o2_plus_GS_plus_0x10)
+ EX_ST(STORE(stx, %o5, %o1 + %o3), U3_retl_o2_plus_GS_plus_0x10)
add %o1, 0x8, %o1
- EX_ST(STORE(stx, %g1, %o1 + %o3))
+ EX_ST(STORE(stx, %g1, %o1 + %o3), U3_retl_o2_plus_GS_plus_0x08)
bgu,pt %XCC, 1b
add %o1, 0x8, %o1
73: andcc %o2, 0x8, %g0
be,pt %XCC, 1f
nop
sub %o2, 0x8, %o2
- EX_LD(LOAD(ldx, %o1, %o5))
- EX_ST(STORE(stx, %o5, %o1 + %o3))
+ EX_LD(LOAD(ldx, %o1, %o5), U3_retl_o2_plus_8)
+ EX_ST(STORE(stx, %o5, %o1 + %o3), U3_retl_o2_plus_8)
add %o1, 0x8, %o1
1: andcc %o2, 0x4, %g0
be,pt %XCC, 1f
nop
sub %o2, 0x4, %o2
- EX_LD(LOAD(lduw, %o1, %o5))
- EX_ST(STORE(stw, %o5, %o1 + %o3))
+ EX_LD(LOAD(lduw, %o1, %o5), U3_retl_o2_plus_4)
+ EX_ST(STORE(stw, %o5, %o1 + %o3), U3_retl_o2_plus_4)
add %o1, 0x4, %o1
1: cmp %o2, 0
be,pt %XCC, 85f
@@ -361,8 +442,8 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
sub %o2, %g1, %o2
1: subcc %g1, 1, %g1
- EX_LD(LOAD(ldub, %o1, %o5))
- EX_ST(STORE(stb, %o5, %o1 + %o3))
+ EX_LD(LOAD(ldub, %o1, %o5), U3_retl_o2_plus_g1_plus_1)
+ EX_ST(STORE(stb, %o5, %o1 + %o3), U3_retl_o2_plus_g1_plus_1)
bgu,pt %icc, 1b
add %o1, 1, %o1
@@ -378,16 +459,16 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
8: mov 64, %o3
andn %o1, 0x7, %o1
- EX_LD(LOAD(ldx, %o1, %g2))
+ EX_LD(LOAD(ldx, %o1, %g2), U3_retl_o2)
sub %o3, %g1, %o3
andn %o2, 0x7, GLOBAL_SPARE
sllx %g2, %g1, %g2
-1: EX_LD(LOAD(ldx, %o1 + 0x8, %g3))
+1: EX_LD(LOAD(ldx, %o1 + 0x8, %g3), U3_retl_o2_and_7_plus_GS)
subcc GLOBAL_SPARE, 0x8, GLOBAL_SPARE
add %o1, 0x8, %o1
srlx %g3, %o3, %o5
or %o5, %g2, %o5
- EX_ST(STORE(stx, %o5, %o0))
+ EX_ST(STORE(stx, %o5, %o0), U3_retl_o2_and_7_plus_GS_plus_8)
add %o0, 0x8, %o0
bgu,pt %icc, 1b
sllx %g3, %g1, %g2
@@ -407,8 +488,8 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
1:
subcc %o2, 4, %o2
- EX_LD(LOAD(lduw, %o1, %g1))
- EX_ST(STORE(stw, %g1, %o1 + %o3))
+ EX_LD(LOAD(lduw, %o1, %g1), U3_retl_o2_plus_4)
+ EX_ST(STORE(stw, %g1, %o1 + %o3), U3_retl_o2_plus_4)
bgu,pt %XCC, 1b
add %o1, 4, %o1
@@ -418,8 +499,8 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
.align 32
90:
subcc %o2, 1, %o2
- EX_LD(LOAD(ldub, %o1, %g1))
- EX_ST(STORE(stb, %g1, %o1 + %o3))
+ EX_LD(LOAD(ldub, %o1, %g1), U3_retl_o2_plus_1)
+ EX_ST(STORE(stb, %g1, %o1 + %o3), U3_retl_o2_plus_1)
bgu,pt %XCC, 90b
add %o1, 1, %o1
retl
diff --git a/arch/sparc/lib/copy_in_user.S b/arch/sparc/lib/copy_in_user.S
index 482de093bdae..0252b218de45 100644
--- a/arch/sparc/lib/copy_in_user.S
+++ b/arch/sparc/lib/copy_in_user.S
@@ -9,18 +9,33 @@
#define XCC xcc
-#define EX(x,y) \
+#define EX(x,y,z) \
98: x,y; \
.section __ex_table,"a";\
.align 4; \
- .word 98b, __retl_one; \
+ .word 98b, z; \
.text; \
.align 4;
+#define EX_O4(x,y) EX(x,y,__retl_o4_plus_8)
+#define EX_O2_4(x,y) EX(x,y,__retl_o2_plus_4)
+#define EX_O2_1(x,y) EX(x,y,__retl_o2_plus_1)
+
.register %g2,#scratch
.register %g3,#scratch
.text
+__retl_o4_plus_8:
+ add %o4, %o2, %o4
+ retl
+ add %o4, 8, %o0
+__retl_o2_plus_4:
+ retl
+ add %o2, 4, %o0
+__retl_o2_plus_1:
+ retl
+ add %o2, 1, %o0
+
.align 32
/* Don't try to get too fancy here, just nice and
@@ -45,8 +60,8 @@ ENTRY(___copy_in_user) /* %o0=dst, %o1=src, %o2=len */
andn %o2, 0x7, %o4
and %o2, 0x7, %o2
1: subcc %o4, 0x8, %o4
- EX(ldxa [%o1] %asi, %o5)
- EX(stxa %o5, [%o0] %asi)
+ EX_O4(ldxa [%o1] %asi, %o5)
+ EX_O4(stxa %o5, [%o0] %asi)
add %o1, 0x8, %o1
bgu,pt %XCC, 1b
add %o0, 0x8, %o0
@@ -54,8 +69,8 @@ ENTRY(___copy_in_user) /* %o0=dst, %o1=src, %o2=len */
be,pt %XCC, 1f
nop
sub %o2, 0x4, %o2
- EX(lduwa [%o1] %asi, %o5)
- EX(stwa %o5, [%o0] %asi)
+ EX_O2_4(lduwa [%o1] %asi, %o5)
+ EX_O2_4(stwa %o5, [%o0] %asi)
add %o1, 0x4, %o1
add %o0, 0x4, %o0
1: cmp %o2, 0
@@ -71,8 +86,8 @@ ENTRY(___copy_in_user) /* %o0=dst, %o1=src, %o2=len */
82:
subcc %o2, 4, %o2
- EX(lduwa [%o1] %asi, %g1)
- EX(stwa %g1, [%o0] %asi)
+ EX_O2_4(lduwa [%o1] %asi, %g1)
+ EX_O2_4(stwa %g1, [%o0] %asi)
add %o1, 4, %o1
bgu,pt %XCC, 82b
add %o0, 4, %o0
@@ -83,8 +98,8 @@ ENTRY(___copy_in_user) /* %o0=dst, %o1=src, %o2=len */
.align 32
90:
subcc %o2, 1, %o2
- EX(lduba [%o1] %asi, %g1)
- EX(stba %g1, [%o0] %asi)
+ EX_O2_1(lduba [%o1] %asi, %g1)
+ EX_O2_1(stba %g1, [%o0] %asi)
add %o1, 1, %o1
bgu,pt %XCC, 90b
add %o0, 1, %o0
diff --git a/arch/sparc/lib/user_fixup.c b/arch/sparc/lib/user_fixup.c
deleted file mode 100644
index ac96ae236709..000000000000
--- a/arch/sparc/lib/user_fixup.c
+++ /dev/null
@@ -1,71 +0,0 @@
-/* user_fixup.c: Fix up user copy faults.
- *
- * Copyright (C) 2004 David S. Miller <davem@redhat.com>
- */
-
-#include <linux/compiler.h>
-#include <linux/kernel.h>
-#include <linux/string.h>
-#include <linux/errno.h>
-#include <linux/module.h>
-
-#include <asm/uaccess.h>
-
-/* Calculating the exact fault address when using
- * block loads and stores can be very complicated.
- *
- * Instead of trying to be clever and handling all
- * of the cases, just fix things up simply here.
- */
-
-static unsigned long compute_size(unsigned long start, unsigned long size, unsigned long *offset)
-{
- unsigned long fault_addr = current_thread_info()->fault_address;
- unsigned long end = start + size;
-
- if (fault_addr < start || fault_addr >= end) {
- *offset = 0;
- } else {
- *offset = fault_addr - start;
- size = end - fault_addr;
- }
- return size;
-}
-
-unsigned long copy_from_user_fixup(void *to, const void __user *from, unsigned long size)
-{
- unsigned long offset;
-
- size = compute_size((unsigned long) from, size, &offset);
- if (likely(size))
- memset(to + offset, 0, size);
-
- return size;
-}
-EXPORT_SYMBOL(copy_from_user_fixup);
-
-unsigned long copy_to_user_fixup(void __user *to, const void *from, unsigned long size)
-{
- unsigned long offset;
-
- return compute_size((unsigned long) to, size, &offset);
-}
-EXPORT_SYMBOL(copy_to_user_fixup);
-
-unsigned long copy_in_user_fixup(void __user *to, void __user *from, unsigned long size)
-{
- unsigned long fault_addr = current_thread_info()->fault_address;
- unsigned long start = (unsigned long) to;
- unsigned long end = start + size;
-
- if (fault_addr >= start && fault_addr < end)
- return end - fault_addr;
-
- start = (unsigned long) from;
- end = start + size;
- if (fault_addr >= start && fault_addr < end)
- return end - fault_addr;
-
- return size;
-}
-EXPORT_SYMBOL(copy_in_user_fixup);
diff --git a/arch/sparc/mm/tsb.c b/arch/sparc/mm/tsb.c
index f2b77112e9d8..e20fbbafb0b0 100644
--- a/arch/sparc/mm/tsb.c
+++ b/arch/sparc/mm/tsb.c
@@ -27,6 +27,20 @@ static inline int tag_compare(unsigned long tag, unsigned long vaddr)
return (tag == (vaddr >> 22));
}
+static void flush_tsb_kernel_range_scan(unsigned long start, unsigned long end)
+{
+ unsigned long idx;
+
+ for (idx = 0; idx < KERNEL_TSB_NENTRIES; idx++) {
+ struct tsb *ent = &swapper_tsb[idx];
+ unsigned long match = idx << 13;
+
+ match |= (ent->tag << 22);
+ if (match >= start && match < end)
+ ent->tag = (1UL << TSB_TAG_INVALID_BIT);
+ }
+}
+
/* TSB flushes need only occur on the processor initiating the address
* space modification, not on each cpu the address space has run on.
* Only the TLB flush needs that treatment.
@@ -36,6 +50,9 @@ void flush_tsb_kernel_range(unsigned long start, unsigned long end)
{
unsigned long v;
+ if ((end - start) >> PAGE_SHIFT >= 2 * KERNEL_TSB_NENTRIES)
+ return flush_tsb_kernel_range_scan(start, end);
+
for (v = start; v < end; v += PAGE_SIZE) {
unsigned long hash = tsb_hash(v, PAGE_SHIFT,
KERNEL_TSB_NENTRIES);
diff --git a/arch/sparc/mm/ultra.S b/arch/sparc/mm/ultra.S
index b4f4733abc6e..5d2fd6cd3189 100644
--- a/arch/sparc/mm/ultra.S
+++ b/arch/sparc/mm/ultra.S
@@ -30,7 +30,7 @@
.text
.align 32
.globl __flush_tlb_mm
-__flush_tlb_mm: /* 18 insns */
+__flush_tlb_mm: /* 19 insns */
/* %o0=(ctx & TAG_CONTEXT_BITS), %o1=SECONDARY_CONTEXT */
ldxa [%o1] ASI_DMMU, %g2
cmp %g2, %o0
@@ -81,7 +81,7 @@ __flush_tlb_page: /* 22 insns */
.align 32
.globl __flush_tlb_pending
-__flush_tlb_pending: /* 26 insns */
+__flush_tlb_pending: /* 27 insns */
/* %o0 = context, %o1 = nr, %o2 = vaddrs[] */
rdpr %pstate, %g7
sllx %o1, 3, %o1
@@ -113,12 +113,14 @@ __flush_tlb_pending: /* 26 insns */
.align 32
.globl __flush_tlb_kernel_range
-__flush_tlb_kernel_range: /* 16 insns */
+__flush_tlb_kernel_range: /* 31 insns */
/* %o0=start, %o1=end */
cmp %o0, %o1
be,pn %xcc, 2f
+ sub %o1, %o0, %o3
+ srlx %o3, 18, %o4
+ brnz,pn %o4, __spitfire_flush_tlb_kernel_range_slow
sethi %hi(PAGE_SIZE), %o4
- sub %o1, %o0, %o3
sub %o3, %o4, %o3
or %o0, 0x20, %o0 ! Nucleus
1: stxa %g0, [%o0 + %o3] ASI_DMMU_DEMAP
@@ -131,6 +133,41 @@ __flush_tlb_kernel_range: /* 16 insns */
retl
nop
nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+
+__spitfire_flush_tlb_kernel_range_slow:
+ mov 63 * 8, %o4
+1: ldxa [%o4] ASI_ITLB_DATA_ACCESS, %o3
+ andcc %o3, 0x40, %g0 /* _PAGE_L_4U */
+ bne,pn %xcc, 2f
+ mov TLB_TAG_ACCESS, %o3
+ stxa %g0, [%o3] ASI_IMMU
+ stxa %g0, [%o4] ASI_ITLB_DATA_ACCESS
+ membar #Sync
+2: ldxa [%o4] ASI_DTLB_DATA_ACCESS, %o3
+ andcc %o3, 0x40, %g0
+ bne,pn %xcc, 2f
+ mov TLB_TAG_ACCESS, %o3
+ stxa %g0, [%o3] ASI_DMMU
+ stxa %g0, [%o4] ASI_DTLB_DATA_ACCESS
+ membar #Sync
+2: sub %o4, 8, %o4
+ brgez,pt %o4, 1b
+ nop
+ retl
+ nop
__spitfire_flush_tlb_mm_slow:
rdpr %pstate, %g1
@@ -285,6 +322,40 @@ __cheetah_flush_tlb_pending: /* 27 insns */
retl
wrpr %g7, 0x0, %pstate
+__cheetah_flush_tlb_kernel_range: /* 31 insns */
+ /* %o0=start, %o1=end */
+ cmp %o0, %o1
+ be,pn %xcc, 2f
+ sub %o1, %o0, %o3
+ srlx %o3, 18, %o4
+ brnz,pn %o4, 3f
+ sethi %hi(PAGE_SIZE), %o4
+ sub %o3, %o4, %o3
+ or %o0, 0x20, %o0 ! Nucleus
+1: stxa %g0, [%o0 + %o3] ASI_DMMU_DEMAP
+ stxa %g0, [%o0 + %o3] ASI_IMMU_DEMAP
+ membar #Sync
+ brnz,pt %o3, 1b
+ sub %o3, %o4, %o3
+2: sethi %hi(KERNBASE), %o3
+ flush %o3
+ retl
+ nop
+3: mov 0x80, %o4
+ stxa %g0, [%o4] ASI_DMMU_DEMAP
+ membar #Sync
+ stxa %g0, [%o4] ASI_IMMU_DEMAP
+ membar #Sync
+ retl
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+
#ifdef DCACHE_ALIASING_POSSIBLE
__cheetah_flush_dcache_page: /* 11 insns */
sethi %hi(PAGE_OFFSET), %g1
@@ -309,19 +380,28 @@ __hypervisor_tlb_tl0_error:
ret
restore
-__hypervisor_flush_tlb_mm: /* 10 insns */
+__hypervisor_flush_tlb_mm: /* 19 insns */
mov %o0, %o2 /* ARG2: mmu context */
mov 0, %o0 /* ARG0: CPU lists unimplemented */
mov 0, %o1 /* ARG1: CPU lists unimplemented */
mov HV_MMU_ALL, %o3 /* ARG3: flags */
mov HV_FAST_MMU_DEMAP_CTX, %o5
ta HV_FAST_TRAP
- brnz,pn %o0, __hypervisor_tlb_tl0_error
+ brnz,pn %o0, 1f
mov HV_FAST_MMU_DEMAP_CTX, %o1
retl
nop
+1: sethi %hi(__hypervisor_tlb_tl0_error), %o5
+ jmpl %o5 + %lo(__hypervisor_tlb_tl0_error), %g0
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
-__hypervisor_flush_tlb_page: /* 11 insns */
+__hypervisor_flush_tlb_page: /* 22 insns */
/* %o0 = context, %o1 = vaddr */
mov %o0, %g2
mov %o1, %o0 /* ARG0: vaddr + IMMU-bit */
@@ -330,12 +410,23 @@ __hypervisor_flush_tlb_page: /* 11 insns */
srlx %o0, PAGE_SHIFT, %o0
sllx %o0, PAGE_SHIFT, %o0
ta HV_MMU_UNMAP_ADDR_TRAP
- brnz,pn %o0, __hypervisor_tlb_tl0_error
+ brnz,pn %o0, 1f
mov HV_MMU_UNMAP_ADDR_TRAP, %o1
retl
nop
+1: sethi %hi(__hypervisor_tlb_tl0_error), %o2
+ jmpl %o2 + %lo(__hypervisor_tlb_tl0_error), %g0
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
-__hypervisor_flush_tlb_pending: /* 16 insns */
+__hypervisor_flush_tlb_pending: /* 27 insns */
/* %o0 = context, %o1 = nr, %o2 = vaddrs[] */
sllx %o1, 3, %g1
mov %o2, %g2
@@ -347,31 +438,57 @@ __hypervisor_flush_tlb_pending: /* 16 insns */
srlx %o0, PAGE_SHIFT, %o0
sllx %o0, PAGE_SHIFT, %o0
ta HV_MMU_UNMAP_ADDR_TRAP
- brnz,pn %o0, __hypervisor_tlb_tl0_error
+ brnz,pn %o0, 1f
mov HV_MMU_UNMAP_ADDR_TRAP, %o1
brnz,pt %g1, 1b
nop
retl
nop
+1: sethi %hi(__hypervisor_tlb_tl0_error), %o2
+ jmpl %o2 + %lo(__hypervisor_tlb_tl0_error), %g0
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
-__hypervisor_flush_tlb_kernel_range: /* 16 insns */
+__hypervisor_flush_tlb_kernel_range: /* 31 insns */
/* %o0=start, %o1=end */
cmp %o0, %o1
be,pn %xcc, 2f
- sethi %hi(PAGE_SIZE), %g3
- mov %o0, %g1
- sub %o1, %g1, %g2
+ sub %o1, %o0, %g2
+ srlx %g2, 18, %g3
+ brnz,pn %g3, 4f
+ mov %o0, %g1
+ sethi %hi(PAGE_SIZE), %g3
sub %g2, %g3, %g2
1: add %g1, %g2, %o0 /* ARG0: virtual address */
mov 0, %o1 /* ARG1: mmu context */
mov HV_MMU_ALL, %o2 /* ARG2: flags */
ta HV_MMU_UNMAP_ADDR_TRAP
- brnz,pn %o0, __hypervisor_tlb_tl0_error
+ brnz,pn %o0, 3f
mov HV_MMU_UNMAP_ADDR_TRAP, %o1
brnz,pt %g2, 1b
sub %g2, %g3, %g2
2: retl
nop
+3: sethi %hi(__hypervisor_tlb_tl0_error), %o2
+ jmpl %o2 + %lo(__hypervisor_tlb_tl0_error), %g0
+ nop
+4: mov 0, %o0 /* ARG0: CPU lists unimplemented */
+ mov 0, %o1 /* ARG1: CPU lists unimplemented */
+ mov 0, %o2 /* ARG2: mmu context == nucleus */
+ mov HV_MMU_ALL, %o3 /* ARG3: flags */
+ mov HV_FAST_MMU_DEMAP_CTX, %o5
+ ta HV_FAST_TRAP
+ brnz,pn %o0, 3b
+ mov HV_FAST_MMU_DEMAP_CTX, %o1
+ retl
+ nop
#ifdef DCACHE_ALIASING_POSSIBLE
/* XXX Niagara and friends have an 8K cache, so no aliasing is
@@ -394,43 +511,6 @@ tlb_patch_one:
retl
nop
- .globl cheetah_patch_cachetlbops
-cheetah_patch_cachetlbops:
- save %sp, -128, %sp
-
- sethi %hi(__flush_tlb_mm), %o0
- or %o0, %lo(__flush_tlb_mm), %o0
- sethi %hi(__cheetah_flush_tlb_mm), %o1
- or %o1, %lo(__cheetah_flush_tlb_mm), %o1
- call tlb_patch_one
- mov 19, %o2
-
- sethi %hi(__flush_tlb_page), %o0
- or %o0, %lo(__flush_tlb_page), %o0
- sethi %hi(__cheetah_flush_tlb_page), %o1
- or %o1, %lo(__cheetah_flush_tlb_page), %o1
- call tlb_patch_one
- mov 22, %o2
-
- sethi %hi(__flush_tlb_pending), %o0
- or %o0, %lo(__flush_tlb_pending), %o0
- sethi %hi(__cheetah_flush_tlb_pending), %o1
- or %o1, %lo(__cheetah_flush_tlb_pending), %o1
- call tlb_patch_one
- mov 27, %o2
-
-#ifdef DCACHE_ALIASING_POSSIBLE
- sethi %hi(__flush_dcache_page), %o0
- or %o0, %lo(__flush_dcache_page), %o0
- sethi %hi(__cheetah_flush_dcache_page), %o1
- or %o1, %lo(__cheetah_flush_dcache_page), %o1
- call tlb_patch_one
- mov 11, %o2
-#endif /* DCACHE_ALIASING_POSSIBLE */
-
- ret
- restore
-
#ifdef CONFIG_SMP
/* These are all called by the slaves of a cross call, at
* trap level 1, with interrupts fully disabled.
@@ -447,7 +527,7 @@ cheetah_patch_cachetlbops:
*/
.align 32
.globl xcall_flush_tlb_mm
-xcall_flush_tlb_mm: /* 21 insns */
+xcall_flush_tlb_mm: /* 24 insns */
mov PRIMARY_CONTEXT, %g2
ldxa [%g2] ASI_DMMU, %g3
srlx %g3, CTX_PGSZ1_NUC_SHIFT, %g4
@@ -469,9 +549,12 @@ xcall_flush_tlb_mm: /* 21 insns */
nop
nop
nop
+ nop
+ nop
+ nop
.globl xcall_flush_tlb_page
-xcall_flush_tlb_page: /* 17 insns */
+xcall_flush_tlb_page: /* 20 insns */
/* %g5=context, %g1=vaddr */
mov PRIMARY_CONTEXT, %g4
ldxa [%g4] ASI_DMMU, %g2
@@ -490,15 +573,20 @@ xcall_flush_tlb_page: /* 17 insns */
retry
nop
nop
+ nop
+ nop
+ nop
.globl xcall_flush_tlb_kernel_range
-xcall_flush_tlb_kernel_range: /* 25 insns */
+xcall_flush_tlb_kernel_range: /* 44 insns */
sethi %hi(PAGE_SIZE - 1), %g2
or %g2, %lo(PAGE_SIZE - 1), %g2
andn %g1, %g2, %g1
andn %g7, %g2, %g7
sub %g7, %g1, %g3
- add %g2, 1, %g2
+ srlx %g3, 18, %g2
+ brnz,pn %g2, 2f
+ add %g2, 1, %g2
sub %g3, %g2, %g3
or %g1, 0x20, %g1 ! Nucleus
1: stxa %g0, [%g1 + %g3] ASI_DMMU_DEMAP
@@ -507,8 +595,25 @@ xcall_flush_tlb_kernel_range: /* 25 insns */
brnz,pt %g3, 1b
sub %g3, %g2, %g3
retry
- nop
- nop
+2: mov 63 * 8, %g1
+1: ldxa [%g1] ASI_ITLB_DATA_ACCESS, %g2
+ andcc %g2, 0x40, %g0 /* _PAGE_L_4U */
+ bne,pn %xcc, 2f
+ mov TLB_TAG_ACCESS, %g2
+ stxa %g0, [%g2] ASI_IMMU
+ stxa %g0, [%g1] ASI_ITLB_DATA_ACCESS
+ membar #Sync
+2: ldxa [%g1] ASI_DTLB_DATA_ACCESS, %g2
+ andcc %g2, 0x40, %g0
+ bne,pn %xcc, 2f
+ mov TLB_TAG_ACCESS, %g2
+ stxa %g0, [%g2] ASI_DMMU
+ stxa %g0, [%g1] ASI_DTLB_DATA_ACCESS
+ membar #Sync
+2: sub %g1, 8, %g1
+ brgez,pt %g1, 1b
+ nop
+ retry
nop
nop
nop
@@ -637,6 +742,52 @@ xcall_fetch_glob_pmu_n4:
retry
+__cheetah_xcall_flush_tlb_kernel_range: /* 44 insns */
+ sethi %hi(PAGE_SIZE - 1), %g2
+ or %g2, %lo(PAGE_SIZE - 1), %g2
+ andn %g1, %g2, %g1
+ andn %g7, %g2, %g7
+ sub %g7, %g1, %g3
+ srlx %g3, 18, %g2
+ brnz,pn %g2, 2f
+ add %g2, 1, %g2
+ sub %g3, %g2, %g3
+ or %g1, 0x20, %g1 ! Nucleus
+1: stxa %g0, [%g1 + %g3] ASI_DMMU_DEMAP
+ stxa %g0, [%g1 + %g3] ASI_IMMU_DEMAP
+ membar #Sync
+ brnz,pt %g3, 1b
+ sub %g3, %g2, %g3
+ retry
+2: mov 0x80, %g2
+ stxa %g0, [%g2] ASI_DMMU_DEMAP
+ membar #Sync
+ stxa %g0, [%g2] ASI_IMMU_DEMAP
+ membar #Sync
+ retry
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+
#ifdef DCACHE_ALIASING_POSSIBLE
.align 32
.globl xcall_flush_dcache_page_cheetah
@@ -700,7 +851,7 @@ __hypervisor_tlb_xcall_error:
ba,a,pt %xcc, rtrap
.globl __hypervisor_xcall_flush_tlb_mm
-__hypervisor_xcall_flush_tlb_mm: /* 21 insns */
+__hypervisor_xcall_flush_tlb_mm: /* 24 insns */
/* %g5=ctx, g1,g2,g3,g4,g7=scratch, %g6=unusable */
mov %o0, %g2
mov %o1, %g3
@@ -714,7 +865,7 @@ __hypervisor_xcall_flush_tlb_mm: /* 21 insns */
mov HV_FAST_MMU_DEMAP_CTX, %o5
ta HV_FAST_TRAP
mov HV_FAST_MMU_DEMAP_CTX, %g6
- brnz,pn %o0, __hypervisor_tlb_xcall_error
+ brnz,pn %o0, 1f
mov %o0, %g5
mov %g2, %o0
mov %g3, %o1
@@ -723,9 +874,12 @@ __hypervisor_xcall_flush_tlb_mm: /* 21 insns */
mov %g7, %o5
membar #Sync
retry
+1: sethi %hi(__hypervisor_tlb_xcall_error), %g4
+ jmpl %g4 + %lo(__hypervisor_tlb_xcall_error), %g0
+ nop
.globl __hypervisor_xcall_flush_tlb_page
-__hypervisor_xcall_flush_tlb_page: /* 17 insns */
+__hypervisor_xcall_flush_tlb_page: /* 20 insns */
/* %g5=ctx, %g1=vaddr */
mov %o0, %g2
mov %o1, %g3
@@ -737,42 +891,64 @@ __hypervisor_xcall_flush_tlb_page: /* 17 insns */
sllx %o0, PAGE_SHIFT, %o0
ta HV_MMU_UNMAP_ADDR_TRAP
mov HV_MMU_UNMAP_ADDR_TRAP, %g6
- brnz,a,pn %o0, __hypervisor_tlb_xcall_error
+ brnz,a,pn %o0, 1f
mov %o0, %g5
mov %g2, %o0
mov %g3, %o1
mov %g4, %o2
membar #Sync
retry
+1: sethi %hi(__hypervisor_tlb_xcall_error), %g4
+ jmpl %g4 + %lo(__hypervisor_tlb_xcall_error), %g0
+ nop
.globl __hypervisor_xcall_flush_tlb_kernel_range
-__hypervisor_xcall_flush_tlb_kernel_range: /* 25 insns */
+__hypervisor_xcall_flush_tlb_kernel_range: /* 44 insns */
/* %g1=start, %g7=end, g2,g3,g4,g5,g6=scratch */
sethi %hi(PAGE_SIZE - 1), %g2
or %g2, %lo(PAGE_SIZE - 1), %g2
andn %g1, %g2, %g1
andn %g7, %g2, %g7
sub %g7, %g1, %g3
+ srlx %g3, 18, %g7
add %g2, 1, %g2
sub %g3, %g2, %g3
mov %o0, %g2
mov %o1, %g4
- mov %o2, %g7
+ brnz,pn %g7, 2f
+ mov %o2, %g7
1: add %g1, %g3, %o0 /* ARG0: virtual address */
mov 0, %o1 /* ARG1: mmu context */
mov HV_MMU_ALL, %o2 /* ARG2: flags */
ta HV_MMU_UNMAP_ADDR_TRAP
mov HV_MMU_UNMAP_ADDR_TRAP, %g6
- brnz,pn %o0, __hypervisor_tlb_xcall_error
+ brnz,pn %o0, 1f
mov %o0, %g5
sethi %hi(PAGE_SIZE), %o2
brnz,pt %g3, 1b
sub %g3, %o2, %g3
- mov %g2, %o0
+5: mov %g2, %o0
mov %g4, %o1
mov %g7, %o2
membar #Sync
retry
+1: sethi %hi(__hypervisor_tlb_xcall_error), %g4
+ jmpl %g4 + %lo(__hypervisor_tlb_xcall_error), %g0
+ nop
+2: mov %o3, %g1
+ mov %o5, %g3
+ mov 0, %o0 /* ARG0: CPU lists unimplemented */
+ mov 0, %o1 /* ARG1: CPU lists unimplemented */
+ mov 0, %o2 /* ARG2: mmu context == nucleus */
+ mov HV_MMU_ALL, %o3 /* ARG3: flags */
+ mov HV_FAST_MMU_DEMAP_CTX, %o5
+ ta HV_FAST_TRAP
+ mov %g1, %o3
+ brz,pt %o0, 5b
+ mov %g3, %o5
+ mov HV_FAST_MMU_DEMAP_CTX, %g6
+ ba,pt %xcc, 1b
+ clr %g5
/* These just get rescheduled to PIL vectors. */
.globl xcall_call_function
@@ -809,6 +985,58 @@ xcall_kgdb_capture:
#endif /* CONFIG_SMP */
+ .globl cheetah_patch_cachetlbops
+cheetah_patch_cachetlbops:
+ save %sp, -128, %sp
+
+ sethi %hi(__flush_tlb_mm), %o0
+ or %o0, %lo(__flush_tlb_mm), %o0
+ sethi %hi(__cheetah_flush_tlb_mm), %o1
+ or %o1, %lo(__cheetah_flush_tlb_mm), %o1
+ call tlb_patch_one
+ mov 19, %o2
+
+ sethi %hi(__flush_tlb_page), %o0
+ or %o0, %lo(__flush_tlb_page), %o0
+ sethi %hi(__cheetah_flush_tlb_page), %o1
+ or %o1, %lo(__cheetah_flush_tlb_page), %o1
+ call tlb_patch_one
+ mov 22, %o2
+
+ sethi %hi(__flush_tlb_pending), %o0
+ or %o0, %lo(__flush_tlb_pending), %o0
+ sethi %hi(__cheetah_flush_tlb_pending), %o1
+ or %o1, %lo(__cheetah_flush_tlb_pending), %o1
+ call tlb_patch_one
+ mov 27, %o2
+
+ sethi %hi(__flush_tlb_kernel_range), %o0
+ or %o0, %lo(__flush_tlb_kernel_range), %o0
+ sethi %hi(__cheetah_flush_tlb_kernel_range), %o1
+ or %o1, %lo(__cheetah_flush_tlb_kernel_range), %o1
+ call tlb_patch_one
+ mov 31, %o2
+
+#ifdef DCACHE_ALIASING_POSSIBLE
+ sethi %hi(__flush_dcache_page), %o0
+ or %o0, %lo(__flush_dcache_page), %o0
+ sethi %hi(__cheetah_flush_dcache_page), %o1
+ or %o1, %lo(__cheetah_flush_dcache_page), %o1
+ call tlb_patch_one
+ mov 11, %o2
+#endif /* DCACHE_ALIASING_POSSIBLE */
+
+#ifdef CONFIG_SMP
+ sethi %hi(xcall_flush_tlb_kernel_range), %o0
+ or %o0, %lo(xcall_flush_tlb_kernel_range), %o0
+ sethi %hi(__cheetah_xcall_flush_tlb_kernel_range), %o1
+ or %o1, %lo(__cheetah_xcall_flush_tlb_kernel_range), %o1
+ call tlb_patch_one
+ mov 44, %o2
+#endif /* CONFIG_SMP */
+
+ ret
+ restore
.globl hypervisor_patch_cachetlbops
hypervisor_patch_cachetlbops:
@@ -819,28 +1047,28 @@ hypervisor_patch_cachetlbops:
sethi %hi(__hypervisor_flush_tlb_mm), %o1
or %o1, %lo(__hypervisor_flush_tlb_mm), %o1
call tlb_patch_one
- mov 10, %o2
+ mov 19, %o2
sethi %hi(__flush_tlb_page), %o0
or %o0, %lo(__flush_tlb_page), %o0
sethi %hi(__hypervisor_flush_tlb_page), %o1
or %o1, %lo(__hypervisor_flush_tlb_page), %o1
call tlb_patch_one
- mov 11, %o2
+ mov 22, %o2
sethi %hi(__flush_tlb_pending), %o0
or %o0, %lo(__flush_tlb_pending), %o0
sethi %hi(__hypervisor_flush_tlb_pending), %o1
or %o1, %lo(__hypervisor_flush_tlb_pending), %o1
call tlb_patch_one
- mov 16, %o2
+ mov 27, %o2
sethi %hi(__flush_tlb_kernel_range), %o0
or %o0, %lo(__flush_tlb_kernel_range), %o0
sethi %hi(__hypervisor_flush_tlb_kernel_range), %o1
or %o1, %lo(__hypervisor_flush_tlb_kernel_range), %o1
call tlb_patch_one
- mov 16, %o2
+ mov 31, %o2
#ifdef DCACHE_ALIASING_POSSIBLE
sethi %hi(__flush_dcache_page), %o0
@@ -857,21 +1085,21 @@ hypervisor_patch_cachetlbops:
sethi %hi(__hypervisor_xcall_flush_tlb_mm), %o1
or %o1, %lo(__hypervisor_xcall_flush_tlb_mm), %o1
call tlb_patch_one
- mov 21, %o2
+ mov 24, %o2
sethi %hi(xcall_flush_tlb_page), %o0
or %o0, %lo(xcall_flush_tlb_page), %o0
sethi %hi(__hypervisor_xcall_flush_tlb_page), %o1
or %o1, %lo(__hypervisor_xcall_flush_tlb_page), %o1
call tlb_patch_one
- mov 17, %o2
+ mov 20, %o2
sethi %hi(xcall_flush_tlb_kernel_range), %o0
or %o0, %lo(xcall_flush_tlb_kernel_range), %o0
sethi %hi(__hypervisor_xcall_flush_tlb_kernel_range), %o1
or %o1, %lo(__hypervisor_xcall_flush_tlb_kernel_range), %o1
call tlb_patch_one
- mov 25, %o2
+ mov 44, %o2
#endif /* CONFIG_SMP */
ret
diff --git a/arch/x86/entry/Makefile b/arch/x86/entry/Makefile
index 77f28ce9c646..9976fcecd17e 100644
--- a/arch/x86/entry/Makefile
+++ b/arch/x86/entry/Makefile
@@ -5,8 +5,8 @@
OBJECT_FILES_NON_STANDARD_entry_$(BITS).o := y
OBJECT_FILES_NON_STANDARD_entry_64_compat.o := y
-CFLAGS_syscall_64.o += -Wno-override-init
-CFLAGS_syscall_32.o += -Wno-override-init
+CFLAGS_syscall_64.o += $(call cc-option,-Wno-override-init,)
+CFLAGS_syscall_32.o += $(call cc-option,-Wno-override-init,)
obj-y := entry_$(BITS).o thunk_$(BITS).o syscall_$(BITS).o
obj-y += common.o
diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c
index eab0915f5995..a74a2dbc0180 100644
--- a/arch/x86/events/intel/core.c
+++ b/arch/x86/events/intel/core.c
@@ -3607,10 +3607,14 @@ __init int intel_pmu_init(void)
/*
* Quirk: v2 perfmon does not report fixed-purpose events, so
- * assume at least 3 events:
+ * assume at least 3 events, when not running in a hypervisor:
*/
- if (version > 1)
- x86_pmu.num_counters_fixed = max((int)edx.split.num_counters_fixed, 3);
+ if (version > 1) {
+ int assume = 3 * !boot_cpu_has(X86_FEATURE_HYPERVISOR);
+
+ x86_pmu.num_counters_fixed =
+ max((int)edx.split.num_counters_fixed, assume);
+ }
if (boot_cpu_has(X86_FEATURE_PDCM)) {
u64 capabilities;
diff --git a/arch/x86/events/intel/cstate.c b/arch/x86/events/intel/cstate.c
index 3ca87b5a8677..4f5ac726335f 100644
--- a/arch/x86/events/intel/cstate.c
+++ b/arch/x86/events/intel/cstate.c
@@ -48,7 +48,8 @@
* Scope: Core
* MSR_CORE_C6_RESIDENCY: CORE C6 Residency Counter
* perf code: 0x02
- * Available model: SLM,AMT,NHM,WSM,SNB,IVB,HSW,BDW,SKL
+ * Available model: SLM,AMT,NHM,WSM,SNB,IVB,HSW,BDW
+ * SKL,KNL
* Scope: Core
* MSR_CORE_C7_RESIDENCY: CORE C7 Residency Counter
* perf code: 0x03
@@ -56,15 +57,16 @@
* Scope: Core
* MSR_PKG_C2_RESIDENCY: Package C2 Residency Counter.
* perf code: 0x00
- * Available model: SNB,IVB,HSW,BDW,SKL
+ * Available model: SNB,IVB,HSW,BDW,SKL,KNL
* Scope: Package (physical package)
* MSR_PKG_C3_RESIDENCY: Package C3 Residency Counter.
* perf code: 0x01
- * Available model: NHM,WSM,SNB,IVB,HSW,BDW,SKL
+ * Available model: NHM,WSM,SNB,IVB,HSW,BDW,SKL,KNL
* Scope: Package (physical package)
* MSR_PKG_C6_RESIDENCY: Package C6 Residency Counter.
* perf code: 0x02
- * Available model: SLM,AMT,NHM,WSM,SNB,IVB,HSW,BDW,SKL
+ * Available model: SLM,AMT,NHM,WSM,SNB,IVB,HSW,BDW
+ * SKL,KNL
* Scope: Package (physical package)
* MSR_PKG_C7_RESIDENCY: Package C7 Residency Counter.
* perf code: 0x03
@@ -118,6 +120,7 @@ struct cstate_model {
/* Quirk flags */
#define SLM_PKG_C6_USE_C7_MSR (1UL << 0)
+#define KNL_CORE_C6_MSR (1UL << 1)
struct perf_cstate_msr {
u64 msr;
@@ -488,6 +491,18 @@ static const struct cstate_model slm_cstates __initconst = {
.quirks = SLM_PKG_C6_USE_C7_MSR,
};
+
+static const struct cstate_model knl_cstates __initconst = {
+ .core_events = BIT(PERF_CSTATE_CORE_C6_RES),
+
+ .pkg_events = BIT(PERF_CSTATE_PKG_C2_RES) |
+ BIT(PERF_CSTATE_PKG_C3_RES) |
+ BIT(PERF_CSTATE_PKG_C6_RES),
+ .quirks = KNL_CORE_C6_MSR,
+};
+
+
+
#define X86_CSTATES_MODEL(model, states) \
{ X86_VENDOR_INTEL, 6, model, X86_FEATURE_ANY, (unsigned long) &(states) }
@@ -523,6 +538,8 @@ static const struct x86_cpu_id intel_cstates_match[] __initconst = {
X86_CSTATES_MODEL(INTEL_FAM6_SKYLAKE_MOBILE, snb_cstates),
X86_CSTATES_MODEL(INTEL_FAM6_SKYLAKE_DESKTOP, snb_cstates),
+
+ X86_CSTATES_MODEL(INTEL_FAM6_XEON_PHI_KNL, knl_cstates),
{ },
};
MODULE_DEVICE_TABLE(x86cpu, intel_cstates_match);
@@ -558,6 +575,11 @@ static int __init cstate_probe(const struct cstate_model *cm)
if (cm->quirks & SLM_PKG_C6_USE_C7_MSR)
pkg_msr[PERF_CSTATE_PKG_C6_RES].msr = MSR_PKG_C7_RESIDENCY;
+ /* KNL has different MSR for CORE C6 */
+ if (cm->quirks & KNL_CORE_C6_MSR)
+ pkg_msr[PERF_CSTATE_CORE_C6_RES].msr = MSR_KNL_CORE_C6_RESIDENCY;
+
+
has_cstate_core = cstate_probe_msr(cm->core_events,
PERF_CSTATE_CORE_EVENT_MAX,
core_msr, core_events_attrs);
diff --git a/arch/x86/include/asm/io.h b/arch/x86/include/asm/io.h
index de25aad07853..d34bd370074b 100644
--- a/arch/x86/include/asm/io.h
+++ b/arch/x86/include/asm/io.h
@@ -351,4 +351,10 @@ extern void arch_phys_wc_del(int handle);
#define arch_phys_wc_add arch_phys_wc_add
#endif
+#ifdef CONFIG_X86_PAT
+extern int arch_io_reserve_memtype_wc(resource_size_t start, resource_size_t size);
+extern void arch_io_free_memtype_wc(resource_size_t start, resource_size_t size);
+#define arch_io_reserve_memtype_wc arch_io_reserve_memtype_wc
+#endif
+
#endif /* _ASM_X86_IO_H */
diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c
index 8a5abaa7d453..931ced8ca345 100644
--- a/arch/x86/kernel/acpi/boot.c
+++ b/arch/x86/kernel/acpi/boot.c
@@ -454,6 +454,7 @@ static void __init acpi_sci_ioapic_setup(u8 bus_irq, u16 polarity, u16 trigger,
polarity = acpi_sci_flags & ACPI_MADT_POLARITY_MASK;
mp_override_legacy_irq(bus_irq, polarity, trigger, gsi);
+ acpi_penalize_sci_irq(bus_irq, trigger, polarity);
/*
* stash over-ride to indicate we've been here
diff --git a/arch/x86/kernel/cpu/microcode/amd.c b/arch/x86/kernel/cpu/microcode/amd.c
index 620ab06bcf45..017bda12caae 100644
--- a/arch/x86/kernel/cpu/microcode/amd.c
+++ b/arch/x86/kernel/cpu/microcode/amd.c
@@ -429,7 +429,7 @@ int __init save_microcode_in_initrd_amd(void)
* We need the physical address of the container for both bitness since
* boot_params.hdr.ramdisk_image is a physical address.
*/
- cont = __pa(container);
+ cont = __pa_nodebug(container);
cont_va = container;
#endif
diff --git a/arch/x86/kernel/mcount_64.S b/arch/x86/kernel/mcount_64.S
index efe73aacf966..7b0d3da52fb4 100644
--- a/arch/x86/kernel/mcount_64.S
+++ b/arch/x86/kernel/mcount_64.S
@@ -18,8 +18,10 @@
#ifdef CC_USING_FENTRY
# define function_hook __fentry__
+EXPORT_SYMBOL(__fentry__)
#else
# define function_hook mcount
+EXPORT_SYMBOL(mcount)
#endif
/* All cases save the original rbp (8 bytes) */
@@ -295,7 +297,6 @@ trace:
jmp fgraph_trace
END(function_hook)
#endif /* CONFIG_DYNAMIC_FTRACE */
-EXPORT_SYMBOL(function_hook)
#endif /* CONFIG_FUNCTION_TRACER */
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
diff --git a/arch/x86/kernel/quirks.c b/arch/x86/kernel/quirks.c
index 51402a7e4ca6..0bee04d41bed 100644
--- a/arch/x86/kernel/quirks.c
+++ b/arch/x86/kernel/quirks.c
@@ -625,8 +625,6 @@ static void amd_disable_seq_and_redirect_scrub(struct pci_dev *dev)
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_NB_F3,
amd_disable_seq_and_redirect_scrub);
-#endif
-
#if defined(CONFIG_X86_64) && defined(CONFIG_X86_MCE)
#include <linux/jump_label.h>
#include <asm/string_64.h>
@@ -657,3 +655,4 @@ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x2fc0, quirk_intel_brickland_xeon_
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x6fc0, quirk_intel_brickland_xeon_ras_cap);
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x2083, quirk_intel_purley_xeon_ras_cap);
#endif
+#endif
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
index bbfbca5fea0c..9c337b0e8ba7 100644
--- a/arch/x86/kernel/setup.c
+++ b/arch/x86/kernel/setup.c
@@ -1221,11 +1221,16 @@ void __init setup_arch(char **cmdline_p)
*/
get_smp_config();
+ /*
+ * Systems w/o ACPI and mptables might not have it mapped the local
+ * APIC yet, but prefill_possible_map() might need to access it.
+ */
+ init_apic_mappings();
+
prefill_possible_map();
init_cpu_to_node();
- init_apic_mappings();
io_apic_init_mappings();
kvm_guest_init();
diff --git a/arch/x86/kernel/unwind_guess.c b/arch/x86/kernel/unwind_guess.c
index 9298993dc8b7..2d721e533cf4 100644
--- a/arch/x86/kernel/unwind_guess.c
+++ b/arch/x86/kernel/unwind_guess.c
@@ -47,7 +47,14 @@ void __unwind_start(struct unwind_state *state, struct task_struct *task,
get_stack_info(first_frame, state->task, &state->stack_info,
&state->stack_mask);
- if (!__kernel_text_address(*first_frame))
+ /*
+ * The caller can provide the address of the first frame directly
+ * (first_frame) or indirectly (regs->sp) to indicate which stack frame
+ * to start unwinding at. Skip ahead until we reach it.
+ */
+ if (!unwind_done(state) &&
+ (!on_stack(&state->stack_info, first_frame, sizeof(long)) ||
+ !__kernel_text_address(*first_frame)))
unwind_next_frame(state);
}
EXPORT_SYMBOL_GPL(__unwind_start);
diff --git a/arch/x86/mm/kaslr.c b/arch/x86/mm/kaslr.c
index ddd2661c4502..887e57182716 100644
--- a/arch/x86/mm/kaslr.c
+++ b/arch/x86/mm/kaslr.c
@@ -104,10 +104,10 @@ void __init kernel_randomize_memory(void)
* consistent with the vaddr_start/vaddr_end variables.
*/
BUILD_BUG_ON(vaddr_start >= vaddr_end);
- BUILD_BUG_ON(config_enabled(CONFIG_X86_ESPFIX64) &&
+ BUILD_BUG_ON(IS_ENABLED(CONFIG_X86_ESPFIX64) &&
vaddr_end >= EFI_VA_START);
- BUILD_BUG_ON((config_enabled(CONFIG_X86_ESPFIX64) ||
- config_enabled(CONFIG_EFI)) &&
+ BUILD_BUG_ON((IS_ENABLED(CONFIG_X86_ESPFIX64) ||
+ IS_ENABLED(CONFIG_EFI)) &&
vaddr_end >= __START_KERNEL_map);
BUILD_BUG_ON(vaddr_end > __START_KERNEL_map);
diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c
index 170cc4ff057b..83e701f160a9 100644
--- a/arch/x86/mm/pat.c
+++ b/arch/x86/mm/pat.c
@@ -730,6 +730,20 @@ void io_free_memtype(resource_size_t start, resource_size_t end)
free_memtype(start, end);
}
+int arch_io_reserve_memtype_wc(resource_size_t start, resource_size_t size)
+{
+ enum page_cache_mode type = _PAGE_CACHE_MODE_WC;
+
+ return io_reserve_memtype(start, start + size, &type);
+}
+EXPORT_SYMBOL(arch_io_reserve_memtype_wc);
+
+void arch_io_free_memtype_wc(resource_size_t start, resource_size_t size)
+{
+ io_free_memtype(start, start + size);
+}
+EXPORT_SYMBOL(arch_io_free_memtype_wc);
+
pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
unsigned long size, pgprot_t vma_prot)
{
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
index c0fdd57da7aa..bdd855685403 100644
--- a/arch/x86/xen/enlighten.c
+++ b/arch/x86/xen/enlighten.c
@@ -1837,6 +1837,7 @@ static void __init init_hvm_pv_info(void)
xen_domain_type = XEN_HVM_DOMAIN;
}
+#endif
static int xen_cpu_up_prepare(unsigned int cpu)
{
@@ -1887,6 +1888,7 @@ static int xen_cpu_up_online(unsigned int cpu)
return 0;
}
+#ifdef CONFIG_XEN_PVHVM
#ifdef CONFIG_KEXEC_CORE
static void xen_hvm_shutdown(void)
{
diff --git a/block/badblocks.c b/block/badblocks.c
index 6610e282a03e..6ebcef282314 100644
--- a/block/badblocks.c
+++ b/block/badblocks.c
@@ -133,6 +133,26 @@ retry:
}
EXPORT_SYMBOL_GPL(badblocks_check);
+static void badblocks_update_acked(struct badblocks *bb)
+{
+ u64 *p = bb->page;
+ int i;
+ bool unacked = false;
+
+ if (!bb->unacked_exist)
+ return;
+
+ for (i = 0; i < bb->count ; i++) {
+ if (!BB_ACK(p[i])) {
+ unacked = true;
+ break;
+ }
+ }
+
+ if (!unacked)
+ bb->unacked_exist = 0;
+}
+
/**
* badblocks_set() - Add a range of bad blocks to the table.
* @bb: the badblocks structure that holds all badblock information
@@ -294,6 +314,8 @@ int badblocks_set(struct badblocks *bb, sector_t s, int sectors,
bb->changed = 1;
if (!acknowledged)
bb->unacked_exist = 1;
+ else
+ badblocks_update_acked(bb);
write_sequnlock_irqrestore(&bb->lock, flags);
return rv;
@@ -401,6 +423,7 @@ int badblocks_clear(struct badblocks *bb, sector_t s, int sectors)
}
}
+ badblocks_update_acked(bb);
bb->changed = 1;
out:
write_sequnlock_irq(&bb->lock);
diff --git a/block/blk-flush.c b/block/blk-flush.c
index 6a14b68b9135..3c882cbc7541 100644
--- a/block/blk-flush.c
+++ b/block/blk-flush.c
@@ -343,6 +343,34 @@ static void flush_data_end_io(struct request *rq, int error)
struct blk_flush_queue *fq = blk_get_flush_queue(q, NULL);
/*
+ * Updating q->in_flight[] here for making this tag usable
+ * early. Because in blk_queue_start_tag(),
+ * q->in_flight[BLK_RW_ASYNC] is used to limit async I/O and
+ * reserve tags for sync I/O.
+ *
+ * More importantly this way can avoid the following I/O
+ * deadlock:
+ *
+ * - suppose there are 40 fua requests comming to flush queue
+ * and queue depth is 31
+ * - 30 rqs are scheduled then blk_queue_start_tag() can't alloc
+ * tag for async I/O any more
+ * - all the 30 rqs are completed before FLUSH_PENDING_TIMEOUT
+ * and flush_data_end_io() is called
+ * - the other rqs still can't go ahead if not updating
+ * q->in_flight[BLK_RW_ASYNC] here, meantime these rqs
+ * are held in flush data queue and make no progress of
+ * handling post flush rq
+ * - only after the post flush rq is handled, all these rqs
+ * can be completed
+ */
+
+ elv_completed_request(q, rq);
+
+ /* for avoiding double accounting */
+ rq->cmd_flags &= ~REQ_STARTED;
+
+ /*
* After populating an empty queue, kick it to avoid stall. Read
* the comment in flush_end_io().
*/
diff --git a/block/blk-mq.c b/block/blk-mq.c
index ddc2eed64771..f3d27a6dee09 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -1217,9 +1217,9 @@ static struct request *blk_mq_map_request(struct request_queue *q,
blk_mq_set_alloc_data(&alloc_data, q, 0, ctx, hctx);
rq = __blk_mq_alloc_request(&alloc_data, op, op_flags);
- hctx->queued++;
- data->hctx = hctx;
- data->ctx = ctx;
+ data->hctx = alloc_data.hctx;
+ data->ctx = alloc_data.ctx;
+ data->hctx->queued++;
return rq;
}
diff --git a/drivers/acpi/acpica/dsinit.c b/drivers/acpi/acpica/dsinit.c
index f1e6dcc7a827..54d48b90de2c 100644
--- a/drivers/acpi/acpica/dsinit.c
+++ b/drivers/acpi/acpica/dsinit.c
@@ -46,6 +46,7 @@
#include "acdispat.h"
#include "acnamesp.h"
#include "actables.h"
+#include "acinterp.h"
#define _COMPONENT ACPI_DISPATCHER
ACPI_MODULE_NAME("dsinit")
@@ -214,23 +215,17 @@ acpi_ds_initialize_objects(u32 table_index,
/* Walk entire namespace from the supplied root */
- status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
- if (ACPI_FAILURE(status)) {
- return_ACPI_STATUS(status);
- }
-
/*
* We don't use acpi_walk_namespace since we do not want to acquire
* the namespace reader lock.
*/
status =
acpi_ns_walk_namespace(ACPI_TYPE_ANY, start_node, ACPI_UINT32_MAX,
- ACPI_NS_WALK_UNLOCK, acpi_ds_init_one_object,
- NULL, &info, NULL);
+ 0, acpi_ds_init_one_object, NULL, &info,
+ NULL);
if (ACPI_FAILURE(status)) {
ACPI_EXCEPTION((AE_INFO, status, "During WalkNamespace"));
}
- (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
status = acpi_get_table_by_index(table_index, &table);
if (ACPI_FAILURE(status)) {
diff --git a/drivers/acpi/acpica/dsmethod.c b/drivers/acpi/acpica/dsmethod.c
index 32e9ddc0cf2b..2b3210f42a46 100644
--- a/drivers/acpi/acpica/dsmethod.c
+++ b/drivers/acpi/acpica/dsmethod.c
@@ -99,14 +99,11 @@ acpi_ds_auto_serialize_method(struct acpi_namespace_node *node,
"Method auto-serialization parse [%4.4s] %p\n",
acpi_ut_get_node_name(node), node));
- acpi_ex_enter_interpreter();
-
/* Create/Init a root op for the method parse tree */
op = acpi_ps_alloc_op(AML_METHOD_OP, obj_desc->method.aml_start);
if (!op) {
- status = AE_NO_MEMORY;
- goto unlock;
+ return_ACPI_STATUS(AE_NO_MEMORY);
}
acpi_ps_set_name(op, node->name.integer);
@@ -118,8 +115,7 @@ acpi_ds_auto_serialize_method(struct acpi_namespace_node *node,
acpi_ds_create_walk_state(node->owner_id, NULL, NULL, NULL);
if (!walk_state) {
acpi_ps_free_op(op);
- status = AE_NO_MEMORY;
- goto unlock;
+ return_ACPI_STATUS(AE_NO_MEMORY);
}
status = acpi_ds_init_aml_walk(walk_state, op, node,
@@ -138,8 +134,6 @@ acpi_ds_auto_serialize_method(struct acpi_namespace_node *node,
status = acpi_ps_parse_aml(walk_state);
acpi_ps_delete_parse_tree(op);
-unlock:
- acpi_ex_exit_interpreter();
return_ACPI_STATUS(status);
}
@@ -731,26 +725,6 @@ acpi_ds_terminate_control_method(union acpi_operand_object *method_desc,
acpi_ds_method_data_delete_all(walk_state);
/*
- * If method is serialized, release the mutex and restore the
- * current sync level for this thread
- */
- if (method_desc->method.mutex) {
-
- /* Acquisition Depth handles recursive calls */
-
- method_desc->method.mutex->mutex.acquisition_depth--;
- if (!method_desc->method.mutex->mutex.acquisition_depth) {
- walk_state->thread->current_sync_level =
- method_desc->method.mutex->mutex.
- original_sync_level;
-
- acpi_os_release_mutex(method_desc->method.
- mutex->mutex.os_mutex);
- method_desc->method.mutex->mutex.thread_id = 0;
- }
- }
-
- /*
* Delete any namespace objects created anywhere within the
* namespace by the execution of this method. Unless:
* 1) This method is a module-level executable code method, in which
@@ -786,6 +760,26 @@ acpi_ds_terminate_control_method(union acpi_operand_object *method_desc,
~ACPI_METHOD_MODIFIED_NAMESPACE;
}
}
+
+ /*
+ * If method is serialized, release the mutex and restore the
+ * current sync level for this thread
+ */
+ if (method_desc->method.mutex) {
+
+ /* Acquisition Depth handles recursive calls */
+
+ method_desc->method.mutex->mutex.acquisition_depth--;
+ if (!method_desc->method.mutex->mutex.acquisition_depth) {
+ walk_state->thread->current_sync_level =
+ method_desc->method.mutex->mutex.
+ original_sync_level;
+
+ acpi_os_release_mutex(method_desc->method.
+ mutex->mutex.os_mutex);
+ method_desc->method.mutex->mutex.thread_id = 0;
+ }
+ }
}
/* Decrement the thread count on the method */
diff --git a/drivers/acpi/acpica/dswload2.c b/drivers/acpi/acpica/dswload2.c
index 028b22a3154e..e36218206bb0 100644
--- a/drivers/acpi/acpica/dswload2.c
+++ b/drivers/acpi/acpica/dswload2.c
@@ -607,11 +607,9 @@ acpi_status acpi_ds_load2_end_op(struct acpi_walk_state *walk_state)
}
}
- acpi_ex_exit_interpreter();
status =
acpi_ev_initialize_region
(acpi_ns_get_attached_object(node), FALSE);
- acpi_ex_enter_interpreter();
if (ACPI_FAILURE(status)) {
/*
diff --git a/drivers/acpi/acpica/evrgnini.c b/drivers/acpi/acpica/evrgnini.c
index 3843f1fc5dbb..75ddd160a716 100644
--- a/drivers/acpi/acpica/evrgnini.c
+++ b/drivers/acpi/acpica/evrgnini.c
@@ -45,6 +45,7 @@
#include "accommon.h"
#include "acevents.h"
#include "acnamesp.h"
+#include "acinterp.h"
#define _COMPONENT ACPI_EVENTS
ACPI_MODULE_NAME("evrgnini")
@@ -597,9 +598,11 @@ acpi_ev_initialize_region(union acpi_operand_object *region_obj,
}
}
+ acpi_ex_exit_interpreter();
status =
acpi_ev_execute_reg_method(region_obj,
ACPI_REG_CONNECT);
+ acpi_ex_enter_interpreter();
if (acpi_ns_locked) {
status =
diff --git a/drivers/acpi/acpica/nsload.c b/drivers/acpi/acpica/nsload.c
index 334d3c5ba617..d1f20143bb11 100644
--- a/drivers/acpi/acpica/nsload.c
+++ b/drivers/acpi/acpica/nsload.c
@@ -137,7 +137,9 @@ unlock:
ACPI_DEBUG_PRINT((ACPI_DB_INFO,
"**** Begin Table Object Initialization\n"));
+ acpi_ex_enter_interpreter();
status = acpi_ds_initialize_objects(table_index, node);
+ acpi_ex_exit_interpreter();
ACPI_DEBUG_PRINT((ACPI_DB_INFO,
"**** Completed Table Object Initialization\n"));
diff --git a/drivers/acpi/apei/ghes.c b/drivers/acpi/apei/ghes.c
index f0a029e68d3e..0d099a24f776 100644
--- a/drivers/acpi/apei/ghes.c
+++ b/drivers/acpi/apei/ghes.c
@@ -662,7 +662,7 @@ static int ghes_proc(struct ghes *ghes)
ghes_do_proc(ghes, ghes->estatus);
out:
ghes_clear_estatus(ghes);
- return 0;
+ return rc;
}
static void ghes_add_timer(struct ghes *ghes)
diff --git a/drivers/acpi/pci_link.c b/drivers/acpi/pci_link.c
index c983bf733ad3..bc3d914dfc3e 100644
--- a/drivers/acpi/pci_link.c
+++ b/drivers/acpi/pci_link.c
@@ -87,6 +87,7 @@ struct acpi_pci_link {
static LIST_HEAD(acpi_link_list);
static DEFINE_MUTEX(acpi_link_lock);
+static int sci_irq = -1, sci_penalty;
/* --------------------------------------------------------------------------
PCI Link Device Management
@@ -496,25 +497,13 @@ static int acpi_irq_get_penalty(int irq)
{
int penalty = 0;
- /*
- * Penalize IRQ used by ACPI SCI. If ACPI SCI pin attributes conflict
- * with PCI IRQ attributes, mark ACPI SCI as ISA_ALWAYS so it won't be
- * use for PCI IRQs.
- */
- if (irq == acpi_gbl_FADT.sci_interrupt) {
- u32 type = irq_get_trigger_type(irq) & IRQ_TYPE_SENSE_MASK;
-
- if (type != IRQ_TYPE_LEVEL_LOW)
- penalty += PIRQ_PENALTY_ISA_ALWAYS;
- else
- penalty += PIRQ_PENALTY_PCI_USING;
- }
+ if (irq == sci_irq)
+ penalty += sci_penalty;
if (irq < ACPI_MAX_ISA_IRQS)
return penalty + acpi_isa_irq_penalty[irq];
- penalty += acpi_irq_pci_sharing_penalty(irq);
- return penalty;
+ return penalty + acpi_irq_pci_sharing_penalty(irq);
}
int __init acpi_irq_penalty_init(void)
@@ -619,6 +608,10 @@ static int acpi_pci_link_allocate(struct acpi_pci_link *link)
acpi_device_bid(link->device));
return -ENODEV;
} else {
+ if (link->irq.active < ACPI_MAX_ISA_IRQS)
+ acpi_isa_irq_penalty[link->irq.active] +=
+ PIRQ_PENALTY_PCI_USING;
+
printk(KERN_WARNING PREFIX "%s [%s] enabled at IRQ %d\n",
acpi_device_name(link->device),
acpi_device_bid(link->device), link->irq.active);
@@ -849,7 +842,7 @@ static int __init acpi_irq_penalty_update(char *str, int used)
continue;
if (used)
- new_penalty = acpi_irq_get_penalty(irq) +
+ new_penalty = acpi_isa_irq_penalty[irq] +
PIRQ_PENALTY_ISA_USED;
else
new_penalty = 0;
@@ -871,7 +864,7 @@ static int __init acpi_irq_penalty_update(char *str, int used)
void acpi_penalize_isa_irq(int irq, int active)
{
if ((irq >= 0) && (irq < ARRAY_SIZE(acpi_isa_irq_penalty)))
- acpi_isa_irq_penalty[irq] = acpi_irq_get_penalty(irq) +
+ acpi_isa_irq_penalty[irq] +=
(active ? PIRQ_PENALTY_ISA_USED : PIRQ_PENALTY_PCI_USING);
}
@@ -881,6 +874,17 @@ bool acpi_isa_irq_available(int irq)
acpi_irq_get_penalty(irq) < PIRQ_PENALTY_ISA_ALWAYS);
}
+void acpi_penalize_sci_irq(int irq, int trigger, int polarity)
+{
+ sci_irq = irq;
+
+ if (trigger == ACPI_MADT_TRIGGER_LEVEL &&
+ polarity == ACPI_MADT_POLARITY_ACTIVE_LOW)
+ sci_penalty = PIRQ_PENALTY_PCI_USING;
+ else
+ sci_penalty = PIRQ_PENALTY_ISA_ALWAYS;
+}
+
/*
* Over-ride default table to reserve additional IRQs for use by ISA
* e.g. acpi_irq_isa=5
diff --git a/drivers/android/binder.c b/drivers/android/binder.c
index 562af94bec35..3c71b982bf2a 100644
--- a/drivers/android/binder.c
+++ b/drivers/android/binder.c
@@ -1002,7 +1002,7 @@ static int binder_dec_node(struct binder_node *node, int strong, int internal)
static struct binder_ref *binder_get_ref(struct binder_proc *proc,
- uint32_t desc)
+ u32 desc, bool need_strong_ref)
{
struct rb_node *n = proc->refs_by_desc.rb_node;
struct binder_ref *ref;
@@ -1010,12 +1010,16 @@ static struct binder_ref *binder_get_ref(struct binder_proc *proc,
while (n) {
ref = rb_entry(n, struct binder_ref, rb_node_desc);
- if (desc < ref->desc)
+ if (desc < ref->desc) {
n = n->rb_left;
- else if (desc > ref->desc)
+ } else if (desc > ref->desc) {
n = n->rb_right;
- else
+ } else if (need_strong_ref && !ref->strong) {
+ binder_user_error("tried to use weak ref as strong ref\n");
+ return NULL;
+ } else {
return ref;
+ }
}
return NULL;
}
@@ -1285,7 +1289,10 @@ static void binder_transaction_buffer_release(struct binder_proc *proc,
} break;
case BINDER_TYPE_HANDLE:
case BINDER_TYPE_WEAK_HANDLE: {
- struct binder_ref *ref = binder_get_ref(proc, fp->handle);
+ struct binder_ref *ref;
+
+ ref = binder_get_ref(proc, fp->handle,
+ fp->type == BINDER_TYPE_HANDLE);
if (ref == NULL) {
pr_err("transaction release %d bad handle %d\n",
@@ -1380,7 +1387,7 @@ static void binder_transaction(struct binder_proc *proc,
if (tr->target.handle) {
struct binder_ref *ref;
- ref = binder_get_ref(proc, tr->target.handle);
+ ref = binder_get_ref(proc, tr->target.handle, true);
if (ref == NULL) {
binder_user_error("%d:%d got transaction to invalid handle\n",
proc->pid, thread->pid);
@@ -1577,7 +1584,9 @@ static void binder_transaction(struct binder_proc *proc,
fp->type = BINDER_TYPE_HANDLE;
else
fp->type = BINDER_TYPE_WEAK_HANDLE;
+ fp->binder = 0;
fp->handle = ref->desc;
+ fp->cookie = 0;
binder_inc_ref(ref, fp->type == BINDER_TYPE_HANDLE,
&thread->todo);
@@ -1589,7 +1598,10 @@ static void binder_transaction(struct binder_proc *proc,
} break;
case BINDER_TYPE_HANDLE:
case BINDER_TYPE_WEAK_HANDLE: {
- struct binder_ref *ref = binder_get_ref(proc, fp->handle);
+ struct binder_ref *ref;
+
+ ref = binder_get_ref(proc, fp->handle,
+ fp->type == BINDER_TYPE_HANDLE);
if (ref == NULL) {
binder_user_error("%d:%d got transaction with invalid handle, %d\n",
@@ -1624,7 +1636,9 @@ static void binder_transaction(struct binder_proc *proc,
return_error = BR_FAILED_REPLY;
goto err_binder_get_ref_for_node_failed;
}
+ fp->binder = 0;
fp->handle = new_ref->desc;
+ fp->cookie = 0;
binder_inc_ref(new_ref, fp->type == BINDER_TYPE_HANDLE, NULL);
trace_binder_transaction_ref_to_ref(t, ref,
new_ref);
@@ -1678,6 +1692,7 @@ static void binder_transaction(struct binder_proc *proc,
binder_debug(BINDER_DEBUG_TRANSACTION,
" fd %d -> %d\n", fp->handle, target_fd);
/* TODO: fput? */
+ fp->binder = 0;
fp->handle = target_fd;
} break;
@@ -1800,7 +1815,9 @@ static int binder_thread_write(struct binder_proc *proc,
ref->desc);
}
} else
- ref = binder_get_ref(proc, target);
+ ref = binder_get_ref(proc, target,
+ cmd == BC_ACQUIRE ||
+ cmd == BC_RELEASE);
if (ref == NULL) {
binder_user_error("%d:%d refcount change on invalid ref %d\n",
proc->pid, thread->pid, target);
@@ -1996,7 +2013,7 @@ static int binder_thread_write(struct binder_proc *proc,
if (get_user(cookie, (binder_uintptr_t __user *)ptr))
return -EFAULT;
ptr += sizeof(binder_uintptr_t);
- ref = binder_get_ref(proc, target);
+ ref = binder_get_ref(proc, target, false);
if (ref == NULL) {
binder_user_error("%d:%d %s invalid ref %d\n",
proc->pid, thread->pid,
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
index ba5f11cebee2..9669fc7c19df 100644
--- a/drivers/ata/ahci.c
+++ b/drivers/ata/ahci.c
@@ -1418,31 +1418,34 @@ static int ahci_init_msi(struct pci_dev *pdev, unsigned int n_ports,
* Message mode could be enforced. In this case assume that advantage
* of multipe MSIs is negated and use single MSI mode instead.
*/
- nvec = pci_alloc_irq_vectors(pdev, n_ports, INT_MAX,
- PCI_IRQ_MSIX | PCI_IRQ_MSI);
- if (nvec > 0) {
- if (!(readl(hpriv->mmio + HOST_CTL) & HOST_MRSM)) {
- hpriv->get_irq_vector = ahci_get_irq_vector;
- hpriv->flags |= AHCI_HFLAG_MULTI_MSI;
- return nvec;
+ if (n_ports > 1) {
+ nvec = pci_alloc_irq_vectors(pdev, n_ports, INT_MAX,
+ PCI_IRQ_MSIX | PCI_IRQ_MSI);
+ if (nvec > 0) {
+ if (!(readl(hpriv->mmio + HOST_CTL) & HOST_MRSM)) {
+ hpriv->get_irq_vector = ahci_get_irq_vector;
+ hpriv->flags |= AHCI_HFLAG_MULTI_MSI;
+ return nvec;
+ }
+
+ /*
+ * Fallback to single MSI mode if the controller
+ * enforced MRSM mode.
+ */
+ printk(KERN_INFO
+ "ahci: MRSM is on, fallback to single MSI\n");
+ pci_free_irq_vectors(pdev);
}
/*
- * Fallback to single MSI mode if the controller enforced MRSM
- * mode.
+ * -ENOSPC indicated we don't have enough vectors. Don't bother
+ * trying a single vectors for any other error:
*/
- printk(KERN_INFO "ahci: MRSM is on, fallback to single MSI\n");
- pci_free_irq_vectors(pdev);
+ if (nvec < 0 && nvec != -ENOSPC)
+ return nvec;
}
/*
- * -ENOSPC indicated we don't have enough vectors. Don't bother trying
- * a single vectors for any other error:
- */
- if (nvec < 0 && nvec != -ENOSPC)
- return nvec;
-
- /*
* If the host is not capable of supporting per-port vectors, fall
* back to single MSI before finally attempting single MSI-X.
*/
@@ -1617,7 +1620,7 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
/* legacy intx interrupts */
pci_intx(pdev, 1);
}
- hpriv->irq = pdev->irq;
+ hpriv->irq = pci_irq_vector(pdev, 0);
if (!(hpriv->cap & HOST_CAP_SSS) || ahci_ignore_sss)
host->flags |= ATA_HOST_PARALLEL_SCAN;
diff --git a/drivers/base/Kconfig b/drivers/base/Kconfig
index 37bf25c6b4a6..0e40967a5d6e 100644
--- a/drivers/base/Kconfig
+++ b/drivers/base/Kconfig
@@ -213,14 +213,16 @@ config DEBUG_DEVRES
If you are unsure about this, Say N here.
config DEBUG_TEST_DRIVER_REMOVE
- bool "Test driver remove calls during probe"
+ bool "Test driver remove calls during probe (UNSTABLE)"
depends on DEBUG_KERNEL
help
Say Y here if you want the Driver core to test driver remove functions
by calling probe, remove, probe. This tests the remove path without
having to unbind the driver or unload the driver module.
- If you are unsure about this, say N here.
+ This option is expected to find errors and may render your system
+ unusable. You should say N here unless you are explicitly looking to
+ test this functionality.
config SYS_HYPERVISOR
bool
diff --git a/drivers/block/DAC960.c b/drivers/block/DAC960.c
index 811e11c82f32..0809cda93cc0 100644
--- a/drivers/block/DAC960.c
+++ b/drivers/block/DAC960.c
@@ -2954,7 +2954,7 @@ DAC960_DetectController(struct pci_dev *PCI_Device,
case DAC960_PD_Controller:
if (!request_region(Controller->IO_Address, 0x80,
Controller->FullModelName)) {
- DAC960_Error("IO port 0x%d busy for Controller at\n",
+ DAC960_Error("IO port 0x%lx busy for Controller at\n",
Controller, Controller->IO_Address);
goto Failure;
}
@@ -2990,7 +2990,7 @@ DAC960_DetectController(struct pci_dev *PCI_Device,
case DAC960_P_Controller:
if (!request_region(Controller->IO_Address, 0x80,
Controller->FullModelName)){
- DAC960_Error("IO port 0x%d busy for Controller at\n",
+ DAC960_Error("IO port 0x%lx busy for Controller at\n",
Controller, Controller->IO_Address);
goto Failure;
}
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
index ba405b55329f..19a16b2dbb91 100644
--- a/drivers/block/nbd.c
+++ b/drivers/block/nbd.c
@@ -164,7 +164,7 @@ static void sock_shutdown(struct nbd_device *nbd)
spin_lock(&nbd->sock_lock);
if (!nbd->sock) {
- spin_unlock_irq(&nbd->sock_lock);
+ spin_unlock(&nbd->sock_lock);
return;
}
diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
index 2dc5c96c186a..5545a679abd8 100644
--- a/drivers/block/virtio_blk.c
+++ b/drivers/block/virtio_blk.c
@@ -376,7 +376,7 @@ static void virtblk_config_changed(struct virtio_device *vdev)
static int init_vq(struct virtio_blk *vblk)
{
- int err = 0;
+ int err;
int i;
vq_callback_t **callbacks;
const char **names;
@@ -390,13 +390,13 @@ static int init_vq(struct virtio_blk *vblk)
if (err)
num_vqs = 1;
- vblk->vqs = kmalloc(sizeof(*vblk->vqs) * num_vqs, GFP_KERNEL);
+ vblk->vqs = kmalloc_array(num_vqs, sizeof(*vblk->vqs), GFP_KERNEL);
if (!vblk->vqs)
return -ENOMEM;
- names = kmalloc(sizeof(*names) * num_vqs, GFP_KERNEL);
- callbacks = kmalloc(sizeof(*callbacks) * num_vqs, GFP_KERNEL);
- vqs = kmalloc(sizeof(*vqs) * num_vqs, GFP_KERNEL);
+ names = kmalloc_array(num_vqs, sizeof(*names), GFP_KERNEL);
+ callbacks = kmalloc_array(num_vqs, sizeof(*callbacks), GFP_KERNEL);
+ vqs = kmalloc_array(num_vqs, sizeof(*vqs), GFP_KERNEL);
if (!names || !callbacks || !vqs) {
err = -ENOMEM;
goto out;
diff --git a/drivers/bluetooth/btwilink.c b/drivers/bluetooth/btwilink.c
index ef51c9c864c5..b6bb58c41df5 100644
--- a/drivers/bluetooth/btwilink.c
+++ b/drivers/bluetooth/btwilink.c
@@ -310,7 +310,7 @@ static int bt_ti_probe(struct platform_device *pdev)
BT_DBG("HCI device registered (hdev %p)", hdev);
dev_set_drvdata(&pdev->dev, hst);
- return err;
+ return 0;
}
static int bt_ti_remove(struct platform_device *pdev)
diff --git a/drivers/bluetooth/hci_bcm.c b/drivers/bluetooth/hci_bcm.c
index 5ccb90ef0146..8f6c23c20c52 100644
--- a/drivers/bluetooth/hci_bcm.c
+++ b/drivers/bluetooth/hci_bcm.c
@@ -643,6 +643,14 @@ static const struct dmi_system_id bcm_wrong_irq_dmi_table[] = {
},
.driver_data = &acpi_active_low,
},
+ { /* Handle ThinkPad 8 tablets with BCM2E55 chipset ACPI ID */
+ .ident = "Lenovo ThinkPad 8",
+ .matches = {
+ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_EXACT_MATCH(DMI_PRODUCT_VERSION, "ThinkPad 8"),
+ },
+ .driver_data = &acpi_active_low,
+ },
{ }
};
diff --git a/drivers/bus/Kconfig b/drivers/bus/Kconfig
index 7010dcac9328..78751057164a 100644
--- a/drivers/bus/Kconfig
+++ b/drivers/bus/Kconfig
@@ -111,6 +111,7 @@ config OMAP_OCP2SCP
config QCOM_EBI2
bool "Qualcomm External Bus Interface 2 (EBI2)"
depends on HAS_IOMEM
+ depends on ARCH_QCOM || COMPILE_TEST
help
Say y here to enable support for the Qualcomm External Bus
Interface 2, which can be used to connect things like NAND Flash,
diff --git a/drivers/char/hw_random/core.c b/drivers/char/hw_random/core.c
index 482794526e8c..d2d2c89de5b4 100644
--- a/drivers/char/hw_random/core.c
+++ b/drivers/char/hw_random/core.c
@@ -84,14 +84,14 @@ static size_t rng_buffer_size(void)
static void add_early_randomness(struct hwrng *rng)
{
- unsigned char bytes[16];
int bytes_read;
+ size_t size = min_t(size_t, 16, rng_buffer_size());
mutex_lock(&reading_mutex);
- bytes_read = rng_get_data(rng, bytes, sizeof(bytes), 1);
+ bytes_read = rng_get_data(rng, rng_buffer, size, 1);
mutex_unlock(&reading_mutex);
if (bytes_read > 0)
- add_device_randomness(bytes, bytes_read);
+ add_device_randomness(rng_buffer, bytes_read);
}
static inline void cleanup_rng(struct kref *kref)
diff --git a/drivers/char/tpm/tpm-interface.c b/drivers/char/tpm/tpm-interface.c
index 8de61876f633..3a9149cf0110 100644
--- a/drivers/char/tpm/tpm-interface.c
+++ b/drivers/char/tpm/tpm-interface.c
@@ -813,9 +813,6 @@ int tpm_do_selftest(struct tpm_chip *chip)
continue;
}
- if (rc < TPM_HEADER_SIZE)
- return -EFAULT;
-
if (rc == TPM_ERR_DISABLED || rc == TPM_ERR_DEACTIVATED) {
dev_info(&chip->dev,
"TPM is disabled/deactivated (0x%X)\n", rc);
diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
index d433b1db1fdd..5649234b7316 100644
--- a/drivers/char/virtio_console.c
+++ b/drivers/char/virtio_console.c
@@ -1539,19 +1539,29 @@ static void remove_port_data(struct port *port)
spin_lock_irq(&port->inbuf_lock);
/* Remove unused data this port might have received. */
discard_port_data(port);
+ spin_unlock_irq(&port->inbuf_lock);
/* Remove buffers we queued up for the Host to send us data in. */
- while ((buf = virtqueue_detach_unused_buf(port->in_vq)))
- free_buf(buf, true);
- spin_unlock_irq(&port->inbuf_lock);
+ do {
+ spin_lock_irq(&port->inbuf_lock);
+ buf = virtqueue_detach_unused_buf(port->in_vq);
+ spin_unlock_irq(&port->inbuf_lock);
+ if (buf)
+ free_buf(buf, true);
+ } while (buf);
spin_lock_irq(&port->outvq_lock);
reclaim_consumed_buffers(port);
+ spin_unlock_irq(&port->outvq_lock);
/* Free pending buffers from the out-queue. */
- while ((buf = virtqueue_detach_unused_buf(port->out_vq)))
- free_buf(buf, true);
- spin_unlock_irq(&port->outvq_lock);
+ do {
+ spin_lock_irq(&port->outvq_lock);
+ buf = virtqueue_detach_unused_buf(port->out_vq);
+ spin_unlock_irq(&port->outvq_lock);
+ if (buf)
+ free_buf(buf, true);
+ } while (buf);
}
/*
diff --git a/drivers/clk/at91/clk-programmable.c b/drivers/clk/at91/clk-programmable.c
index 190122e64a3a..85a449cf61e3 100644
--- a/drivers/clk/at91/clk-programmable.c
+++ b/drivers/clk/at91/clk-programmable.c
@@ -203,7 +203,7 @@ at91_clk_register_programmable(struct regmap *regmap,
ret = clk_hw_register(NULL, &prog->hw);
if (ret) {
kfree(prog);
- hw = &prog->hw;
+ hw = ERR_PTR(ret);
}
return hw;
diff --git a/drivers/clk/bcm/clk-bcm2835.c b/drivers/clk/bcm/clk-bcm2835.c
index b68bf573dcfb..8c7763fd9efc 100644
--- a/drivers/clk/bcm/clk-bcm2835.c
+++ b/drivers/clk/bcm/clk-bcm2835.c
@@ -502,8 +502,12 @@ static long bcm2835_pll_rate_from_divisors(unsigned long parent_rate,
static long bcm2835_pll_round_rate(struct clk_hw *hw, unsigned long rate,
unsigned long *parent_rate)
{
+ struct bcm2835_pll *pll = container_of(hw, struct bcm2835_pll, hw);
+ const struct bcm2835_pll_data *data = pll->data;
u32 ndiv, fdiv;
+ rate = clamp(rate, data->min_rate, data->max_rate);
+
bcm2835_pll_choose_ndiv_and_fdiv(rate, *parent_rate, &ndiv, &fdiv);
return bcm2835_pll_rate_from_divisors(*parent_rate, ndiv, fdiv, 1);
@@ -608,13 +612,6 @@ static int bcm2835_pll_set_rate(struct clk_hw *hw,
u32 ana[4];
int i;
- if (rate < data->min_rate || rate > data->max_rate) {
- dev_err(cprman->dev, "%s: rate out of spec: %lu vs (%lu, %lu)\n",
- clk_hw_get_name(hw), rate,
- data->min_rate, data->max_rate);
- return -EINVAL;
- }
-
if (rate > data->max_fb_rate) {
use_fb_prediv = true;
rate /= 2;
diff --git a/drivers/clk/clk-max77686.c b/drivers/clk/clk-max77686.c
index b637f5979023..eb953d3b0b69 100644
--- a/drivers/clk/clk-max77686.c
+++ b/drivers/clk/clk-max77686.c
@@ -216,6 +216,7 @@ static int max77686_clk_probe(struct platform_device *pdev)
return -EINVAL;
}
+ drv_data->num_clks = num_clks;
drv_data->max_clk_data = devm_kcalloc(dev, num_clks,
sizeof(*drv_data->max_clk_data),
GFP_KERNEL);
diff --git a/drivers/clk/hisilicon/clk-hi6220.c b/drivers/clk/hisilicon/clk-hi6220.c
index fe364e63f8de..c0e8e1f196aa 100644
--- a/drivers/clk/hisilicon/clk-hi6220.c
+++ b/drivers/clk/hisilicon/clk-hi6220.c
@@ -195,7 +195,7 @@ static void __init hi6220_clk_sys_init(struct device_node *np)
hi6220_clk_register_divider(hi6220_div_clks_sys,
ARRAY_SIZE(hi6220_div_clks_sys), clk_data);
}
-CLK_OF_DECLARE(hi6220_clk_sys, "hisilicon,hi6220-sysctrl", hi6220_clk_sys_init);
+CLK_OF_DECLARE_DRIVER(hi6220_clk_sys, "hisilicon,hi6220-sysctrl", hi6220_clk_sys_init);
/* clocks in media controller */
@@ -252,7 +252,7 @@ static void __init hi6220_clk_media_init(struct device_node *np)
hi6220_clk_register_divider(hi6220_div_clks_media,
ARRAY_SIZE(hi6220_div_clks_media), clk_data);
}
-CLK_OF_DECLARE(hi6220_clk_media, "hisilicon,hi6220-mediactrl", hi6220_clk_media_init);
+CLK_OF_DECLARE_DRIVER(hi6220_clk_media, "hisilicon,hi6220-mediactrl", hi6220_clk_media_init);
/* clocks in pmctrl */
diff --git a/drivers/clk/mediatek/Kconfig b/drivers/clk/mediatek/Kconfig
index 380c372d528e..f042bd2a6a99 100644
--- a/drivers/clk/mediatek/Kconfig
+++ b/drivers/clk/mediatek/Kconfig
@@ -8,6 +8,7 @@ config COMMON_CLK_MEDIATEK
config COMMON_CLK_MT8135
bool "Clock driver for Mediatek MT8135"
+ depends on ARCH_MEDIATEK || COMPILE_TEST
select COMMON_CLK_MEDIATEK
default ARCH_MEDIATEK
---help---
@@ -15,6 +16,7 @@ config COMMON_CLK_MT8135
config COMMON_CLK_MT8173
bool "Clock driver for Mediatek MT8173"
+ depends on ARCH_MEDIATEK || COMPILE_TEST
select COMMON_CLK_MEDIATEK
default ARCH_MEDIATEK
---help---
diff --git a/drivers/clk/mvebu/armada-37xx-periph.c b/drivers/clk/mvebu/armada-37xx-periph.c
index 45905fc0d75b..cecb0fdfaef6 100644
--- a/drivers/clk/mvebu/armada-37xx-periph.c
+++ b/drivers/clk/mvebu/armada-37xx-periph.c
@@ -305,7 +305,7 @@ static const struct of_device_id armada_3700_periph_clock_of_match[] = {
};
static int armada_3700_add_composite_clk(const struct clk_periph_data *data,
void __iomem *reg, spinlock_t *lock,
- struct device *dev, struct clk_hw *hw)
+ struct device *dev, struct clk_hw **hw)
{
const struct clk_ops *mux_ops = NULL, *gate_ops = NULL,
*rate_ops = NULL;
@@ -329,6 +329,7 @@ static int armada_3700_add_composite_clk(const struct clk_periph_data *data,
gate->lock = lock;
gate_ops = gate_hw->init->ops;
gate->reg = reg + (u64)gate->reg;
+ gate->flags = CLK_GATE_SET_TO_DISABLE;
}
if (data->rate_hw) {
@@ -353,13 +354,13 @@ static int armada_3700_add_composite_clk(const struct clk_periph_data *data,
}
}
- hw = clk_hw_register_composite(dev, data->name, data->parent_names,
+ *hw = clk_hw_register_composite(dev, data->name, data->parent_names,
data->num_parents, mux_hw,
mux_ops, rate_hw, rate_ops,
gate_hw, gate_ops, CLK_IGNORE_UNUSED);
- if (IS_ERR(hw))
- return PTR_ERR(hw);
+ if (IS_ERR(*hw))
+ return PTR_ERR(*hw);
return 0;
}
@@ -400,7 +401,7 @@ static int armada_3700_periph_clock_probe(struct platform_device *pdev)
spin_lock_init(&driver_data->lock);
for (i = 0; i < num_periph; i++) {
- struct clk_hw *hw = driver_data->hw_data->hws[i];
+ struct clk_hw **hw = &driver_data->hw_data->hws[i];
if (armada_3700_add_composite_clk(&data[i], reg,
&driver_data->lock, dev, hw))
diff --git a/drivers/clk/samsung/clk-exynos-audss.c b/drivers/clk/samsung/clk-exynos-audss.c
index 51d152f735cc..17e68a724945 100644
--- a/drivers/clk/samsung/clk-exynos-audss.c
+++ b/drivers/clk/samsung/clk-exynos-audss.c
@@ -106,6 +106,7 @@ static const struct of_device_id exynos_audss_clk_of_match[] = {
},
{ },
};
+MODULE_DEVICE_TABLE(of, exynos_audss_clk_of_match);
static void exynos_audss_clk_teardown(void)
{
diff --git a/drivers/clk/uniphier/clk-uniphier-core.c b/drivers/clk/uniphier/clk-uniphier-core.c
index 5ffb898d0839..26c53f7963a4 100644
--- a/drivers/clk/uniphier/clk-uniphier-core.c
+++ b/drivers/clk/uniphier/clk-uniphier-core.c
@@ -79,7 +79,7 @@ static int uniphier_clk_probe(struct platform_device *pdev)
hw_data->num = clk_num;
/* avoid returning NULL for unused idx */
- for (; clk_num >= 0; clk_num--)
+ while (--clk_num >= 0)
hw_data->hws[clk_num] = ERR_PTR(-EINVAL);
for (p = data; p->name; p++) {
@@ -111,6 +111,10 @@ static int uniphier_clk_remove(struct platform_device *pdev)
static const struct of_device_id uniphier_clk_match[] = {
/* System clock */
{
+ .compatible = "socionext,uniphier-sld3-clock",
+ .data = uniphier_sld3_sys_clk_data,
+ },
+ {
.compatible = "socionext,uniphier-ld4-clock",
.data = uniphier_ld4_sys_clk_data,
},
@@ -138,7 +142,7 @@ static const struct of_device_id uniphier_clk_match[] = {
.compatible = "socionext,uniphier-ld20-clock",
.data = uniphier_ld20_sys_clk_data,
},
- /* Media I/O clock */
+ /* Media I/O clock, SD clock */
{
.compatible = "socionext,uniphier-sld3-mio-clock",
.data = uniphier_sld3_mio_clk_data,
@@ -156,20 +160,20 @@ static const struct of_device_id uniphier_clk_match[] = {
.data = uniphier_sld3_mio_clk_data,
},
{
- .compatible = "socionext,uniphier-pro5-mio-clock",
- .data = uniphier_pro5_mio_clk_data,
+ .compatible = "socionext,uniphier-pro5-sd-clock",
+ .data = uniphier_pro5_sd_clk_data,
},
{
- .compatible = "socionext,uniphier-pxs2-mio-clock",
- .data = uniphier_pro5_mio_clk_data,
+ .compatible = "socionext,uniphier-pxs2-sd-clock",
+ .data = uniphier_pro5_sd_clk_data,
},
{
.compatible = "socionext,uniphier-ld11-mio-clock",
.data = uniphier_sld3_mio_clk_data,
},
{
- .compatible = "socionext,uniphier-ld20-mio-clock",
- .data = uniphier_pro5_mio_clk_data,
+ .compatible = "socionext,uniphier-ld20-sd-clock",
+ .data = uniphier_pro5_sd_clk_data,
},
/* Peripheral clock */
{
diff --git a/drivers/clk/uniphier/clk-uniphier-mio.c b/drivers/clk/uniphier/clk-uniphier-mio.c
index 6aa7ec768d0b..218d20f099ce 100644
--- a/drivers/clk/uniphier/clk-uniphier-mio.c
+++ b/drivers/clk/uniphier/clk-uniphier-mio.c
@@ -93,7 +93,7 @@ const struct uniphier_clk_data uniphier_sld3_mio_clk_data[] = {
{ /* sentinel */ }
};
-const struct uniphier_clk_data uniphier_pro5_mio_clk_data[] = {
+const struct uniphier_clk_data uniphier_pro5_sd_clk_data[] = {
UNIPHIER_MIO_CLK_SD_FIXED,
UNIPHIER_MIO_CLK_SD(0, 0),
UNIPHIER_MIO_CLK_SD(1, 1),
diff --git a/drivers/clk/uniphier/clk-uniphier-mux.c b/drivers/clk/uniphier/clk-uniphier-mux.c
index 15a2f2cbe0d9..2c243a894f3b 100644
--- a/drivers/clk/uniphier/clk-uniphier-mux.c
+++ b/drivers/clk/uniphier/clk-uniphier-mux.c
@@ -42,7 +42,7 @@ static u8 uniphier_clk_mux_get_parent(struct clk_hw *hw)
struct uniphier_clk_mux *mux = to_uniphier_clk_mux(hw);
int num_parents = clk_hw_get_num_parents(hw);
int ret;
- u32 val;
+ unsigned int val;
u8 i;
ret = regmap_read(mux->regmap, mux->reg, &val);
diff --git a/drivers/clk/uniphier/clk-uniphier.h b/drivers/clk/uniphier/clk-uniphier.h
index 3ae184062388..0244dba1f4cf 100644
--- a/drivers/clk/uniphier/clk-uniphier.h
+++ b/drivers/clk/uniphier/clk-uniphier.h
@@ -115,7 +115,7 @@ extern const struct uniphier_clk_data uniphier_pxs2_sys_clk_data[];
extern const struct uniphier_clk_data uniphier_ld11_sys_clk_data[];
extern const struct uniphier_clk_data uniphier_ld20_sys_clk_data[];
extern const struct uniphier_clk_data uniphier_sld3_mio_clk_data[];
-extern const struct uniphier_clk_data uniphier_pro5_mio_clk_data[];
+extern const struct uniphier_clk_data uniphier_pro5_sd_clk_data[];
extern const struct uniphier_clk_data uniphier_ld4_peri_clk_data[];
extern const struct uniphier_clk_data uniphier_pro4_peri_clk_data[];
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index f535f8123258..4737520ec823 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -179,6 +179,7 @@ struct _pid {
/**
* struct cpudata - Per CPU instance data storage
* @cpu: CPU number for this instance data
+ * @policy: CPUFreq policy value
* @update_util: CPUFreq utility callback information
* @update_util_set: CPUFreq utility callback is set
* @iowait_boost: iowait-related boost fraction
@@ -201,6 +202,7 @@ struct _pid {
struct cpudata {
int cpu;
+ unsigned int policy;
struct update_util_data update_util;
bool update_util_set;
@@ -1142,10 +1144,8 @@ static void intel_pstate_get_min_max(struct cpudata *cpu, int *min, int *max)
*min = clamp_t(int, min_perf, cpu->pstate.min_pstate, max_perf);
}
-static void intel_pstate_set_min_pstate(struct cpudata *cpu)
+static void intel_pstate_set_pstate(struct cpudata *cpu, int pstate)
{
- int pstate = cpu->pstate.min_pstate;
-
trace_cpu_frequency(pstate * cpu->pstate.scaling, cpu->cpu);
cpu->pstate.current_pstate = pstate;
/*
@@ -1157,6 +1157,20 @@ static void intel_pstate_set_min_pstate(struct cpudata *cpu)
pstate_funcs.get_val(cpu, pstate));
}
+static void intel_pstate_set_min_pstate(struct cpudata *cpu)
+{
+ intel_pstate_set_pstate(cpu, cpu->pstate.min_pstate);
+}
+
+static void intel_pstate_max_within_limits(struct cpudata *cpu)
+{
+ int min_pstate, max_pstate;
+
+ update_turbo_state();
+ intel_pstate_get_min_max(cpu, &min_pstate, &max_pstate);
+ intel_pstate_set_pstate(cpu, max_pstate);
+}
+
static void intel_pstate_get_cpu_pstates(struct cpudata *cpu)
{
cpu->pstate.min_pstate = pstate_funcs.get_min();
@@ -1325,7 +1339,8 @@ static inline void intel_pstate_adjust_busy_pstate(struct cpudata *cpu)
from = cpu->pstate.current_pstate;
- target_pstate = pstate_funcs.get_target_pstate(cpu);
+ target_pstate = cpu->policy == CPUFREQ_POLICY_PERFORMANCE ?
+ cpu->pstate.turbo_pstate : pstate_funcs.get_target_pstate(cpu);
intel_pstate_update_pstate(cpu, target_pstate);
@@ -1491,7 +1506,9 @@ static int intel_pstate_set_policy(struct cpufreq_policy *policy)
pr_debug("set_policy cpuinfo.max %u policy->max %u\n",
policy->cpuinfo.max_freq, policy->max);
- cpu = all_cpu_data[0];
+ cpu = all_cpu_data[policy->cpu];
+ cpu->policy = policy->policy;
+
if (cpu->pstate.max_pstate_physical > cpu->pstate.max_pstate &&
policy->max < policy->cpuinfo.max_freq &&
policy->max > cpu->pstate.max_pstate * cpu->pstate.scaling) {
@@ -1499,7 +1516,7 @@ static int intel_pstate_set_policy(struct cpufreq_policy *policy)
policy->max = policy->cpuinfo.max_freq;
}
- if (policy->policy == CPUFREQ_POLICY_PERFORMANCE) {
+ if (cpu->policy == CPUFREQ_POLICY_PERFORMANCE) {
limits = &performance_limits;
if (policy->max >= policy->cpuinfo.max_freq) {
pr_debug("set performance\n");
@@ -1535,6 +1552,15 @@ static int intel_pstate_set_policy(struct cpufreq_policy *policy)
limits->max_perf = round_up(limits->max_perf, FRAC_BITS);
out:
+ if (cpu->policy == CPUFREQ_POLICY_PERFORMANCE) {
+ /*
+ * NOHZ_FULL CPUs need this as the governor callback may not
+ * be invoked on them.
+ */
+ intel_pstate_clear_update_util_hook(policy->cpu);
+ intel_pstate_max_within_limits(cpu);
+ }
+
intel_pstate_set_update_util_hook(policy->cpu);
intel_pstate_hwp_set_policy(policy);
diff --git a/drivers/dax/Kconfig b/drivers/dax/Kconfig
index daadd20aa936..3e2ab3b14eea 100644
--- a/drivers/dax/Kconfig
+++ b/drivers/dax/Kconfig
@@ -14,7 +14,7 @@ if DEV_DAX
config DEV_DAX_PMEM
tristate "PMEM DAX: direct access to persistent memory"
- depends on NVDIMM_DAX
+ depends on LIBNVDIMM && NVDIMM_DAX
default DEV_DAX
help
Support raw access to persistent memory. Note that this
diff --git a/drivers/dax/pmem.c b/drivers/dax/pmem.c
index 9630d8837ba9..4a15fa5df98b 100644
--- a/drivers/dax/pmem.c
+++ b/drivers/dax/pmem.c
@@ -44,7 +44,6 @@ static void dax_pmem_percpu_exit(void *data)
dev_dbg(dax_pmem->dev, "%s\n", __func__);
percpu_ref_exit(ref);
- wait_for_completion(&dax_pmem->cmp);
}
static void dax_pmem_percpu_kill(void *data)
@@ -54,6 +53,7 @@ static void dax_pmem_percpu_kill(void *data)
dev_dbg(dax_pmem->dev, "%s\n", __func__);
percpu_ref_kill(ref);
+ wait_for_completion(&dax_pmem->cmp);
}
static int dax_pmem_probe(struct device *dev)
diff --git a/drivers/dma-buf/dma-fence.c b/drivers/dma-buf/dma-fence.c
index 3a7bf009c21c..0212af7997d9 100644
--- a/drivers/dma-buf/dma-fence.c
+++ b/drivers/dma-buf/dma-fence.c
@@ -161,9 +161,6 @@ dma_fence_wait_timeout(struct dma_fence *fence, bool intr, signed long timeout)
if (WARN_ON(timeout < 0))
return -EINVAL;
- if (timeout == 0)
- return dma_fence_is_signaled(fence);
-
trace_dma_fence_wait_start(fence);
ret = fence->ops->wait(fence, intr, timeout);
trace_dma_fence_wait_end(fence);
@@ -339,18 +336,20 @@ dma_fence_default_wait_cb(struct dma_fence *fence, struct dma_fence_cb *cb)
* @timeout: [in] timeout value in jiffies, or MAX_SCHEDULE_TIMEOUT
*
* Returns -ERESTARTSYS if interrupted, 0 if the wait timed out, or the
- * remaining timeout in jiffies on success.
+ * remaining timeout in jiffies on success. If timeout is zero the value one is
+ * returned if the fence is already signaled for consistency with other
+ * functions taking a jiffies timeout.
*/
signed long
dma_fence_default_wait(struct dma_fence *fence, bool intr, signed long timeout)
{
struct default_wait_cb cb;
unsigned long flags;
- signed long ret = timeout;
+ signed long ret = timeout ? timeout : 1;
bool was_set;
if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
- return timeout;
+ return ret;
spin_lock_irqsave(fence->lock, flags);
@@ -403,14 +402,18 @@ out:
EXPORT_SYMBOL(dma_fence_default_wait);
static bool
-dma_fence_test_signaled_any(struct dma_fence **fences, uint32_t count)
+dma_fence_test_signaled_any(struct dma_fence **fences, uint32_t count,
+ uint32_t *idx)
{
int i;
for (i = 0; i < count; ++i) {
struct dma_fence *fence = fences[i];
- if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
+ if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) {
+ if (idx)
+ *idx = i;
return true;
+ }
}
return false;
}
@@ -422,6 +425,8 @@ dma_fence_test_signaled_any(struct dma_fence **fences, uint32_t count)
* @count: [in] number of fences to wait on
* @intr: [in] if true, do an interruptible wait
* @timeout: [in] timeout value in jiffies, or MAX_SCHEDULE_TIMEOUT
+ * @idx: [out] the first signaled fence index, meaningful only on
+ * positive return
*
* Returns -EINVAL on custom fence wait implementation, -ERESTARTSYS if
* interrupted, 0 if the wait timed out, or the remaining timeout in jiffies
@@ -433,7 +438,7 @@ dma_fence_test_signaled_any(struct dma_fence **fences, uint32_t count)
*/
signed long
dma_fence_wait_any_timeout(struct dma_fence **fences, uint32_t count,
- bool intr, signed long timeout)
+ bool intr, signed long timeout, uint32_t *idx)
{
struct default_wait_cb *cb;
signed long ret = timeout;
@@ -444,8 +449,11 @@ dma_fence_wait_any_timeout(struct dma_fence **fences, uint32_t count,
if (timeout == 0) {
for (i = 0; i < count; ++i)
- if (dma_fence_is_signaled(fences[i]))
+ if (dma_fence_is_signaled(fences[i])) {
+ if (idx)
+ *idx = i;
return 1;
+ }
return 0;
}
@@ -468,6 +476,8 @@ dma_fence_wait_any_timeout(struct dma_fence **fences, uint32_t count,
if (dma_fence_add_callback(fence, &cb[i].base,
dma_fence_default_wait_cb)) {
/* This fence is already signaled */
+ if (idx)
+ *idx = i;
goto fence_rm_cb;
}
}
@@ -478,7 +488,7 @@ dma_fence_wait_any_timeout(struct dma_fence **fences, uint32_t count,
else
set_current_state(TASK_UNINTERRUPTIBLE);
- if (dma_fence_test_signaled_any(fences, count))
+ if (dma_fence_test_signaled_any(fences, count, idx))
break;
ret = schedule_timeout(ret);
diff --git a/drivers/dma-buf/reservation.c b/drivers/dma-buf/reservation.c
index 7ed56f3edfb7..393817e849ed 100644
--- a/drivers/dma-buf/reservation.c
+++ b/drivers/dma-buf/reservation.c
@@ -370,10 +370,7 @@ long reservation_object_wait_timeout_rcu(struct reservation_object *obj,
{
struct dma_fence *fence;
unsigned seq, shared_count, i = 0;
- long ret = timeout;
-
- if (!timeout)
- return reservation_object_test_signaled_rcu(obj, wait_all);
+ long ret = timeout ? timeout : 1;
retry:
fence = NULL;
diff --git a/drivers/dma-buf/sw_sync.c b/drivers/dma-buf/sw_sync.c
index 82e0ca4dd0c1..69c5ff36e2f9 100644
--- a/drivers/dma-buf/sw_sync.c
+++ b/drivers/dma-buf/sw_sync.c
@@ -84,7 +84,7 @@ static inline struct sync_pt *dma_fence_to_sync_pt(struct dma_fence *fence)
* Creates a new sync_timeline. Returns the sync_timeline object or NULL in
* case of error.
*/
-struct sync_timeline *sync_timeline_create(const char *name)
+static struct sync_timeline *sync_timeline_create(const char *name)
{
struct sync_timeline *obj;
@@ -316,8 +316,8 @@ static long sw_sync_ioctl_create_fence(struct sync_timeline *obj,
}
sync_file = sync_file_create(&pt->base);
+ dma_fence_put(&pt->base);
if (!sync_file) {
- dma_fence_put(&pt->base);
err = -ENOMEM;
goto err;
}
diff --git a/drivers/extcon/extcon-qcom-spmi-misc.c b/drivers/extcon/extcon-qcom-spmi-misc.c
index ca957a5f4291..b8cde096a808 100644
--- a/drivers/extcon/extcon-qcom-spmi-misc.c
+++ b/drivers/extcon/extcon-qcom-spmi-misc.c
@@ -51,7 +51,7 @@ static void qcom_usb_extcon_detect_cable(struct work_struct *work)
if (ret)
return;
- extcon_set_state(info->edev, EXTCON_USB_HOST, !id);
+ extcon_set_state_sync(info->edev, EXTCON_USB_HOST, !id);
}
static irqreturn_t qcom_usb_irq_handler(int irq, void *dev_id)
diff --git a/drivers/firewire/net.c b/drivers/firewire/net.c
index 309311b1faae..15475892af0c 100644
--- a/drivers/firewire/net.c
+++ b/drivers/firewire/net.c
@@ -73,13 +73,13 @@ struct rfc2734_header {
#define fwnet_get_hdr_lf(h) (((h)->w0 & 0xc0000000) >> 30)
#define fwnet_get_hdr_ether_type(h) (((h)->w0 & 0x0000ffff))
-#define fwnet_get_hdr_dg_size(h) (((h)->w0 & 0x0fff0000) >> 16)
+#define fwnet_get_hdr_dg_size(h) ((((h)->w0 & 0x0fff0000) >> 16) + 1)
#define fwnet_get_hdr_fg_off(h) (((h)->w0 & 0x00000fff))
#define fwnet_get_hdr_dgl(h) (((h)->w1 & 0xffff0000) >> 16)
-#define fwnet_set_hdr_lf(lf) ((lf) << 30)
+#define fwnet_set_hdr_lf(lf) ((lf) << 30)
#define fwnet_set_hdr_ether_type(et) (et)
-#define fwnet_set_hdr_dg_size(dgs) ((dgs) << 16)
+#define fwnet_set_hdr_dg_size(dgs) (((dgs) - 1) << 16)
#define fwnet_set_hdr_fg_off(fgo) (fgo)
#define fwnet_set_hdr_dgl(dgl) ((dgl) << 16)
@@ -578,6 +578,9 @@ static int fwnet_incoming_packet(struct fwnet_device *dev, __be32 *buf, int len,
int retval;
u16 ether_type;
+ if (len <= RFC2374_UNFRAG_HDR_SIZE)
+ return 0;
+
hdr.w0 = be32_to_cpu(buf[0]);
lf = fwnet_get_hdr_lf(&hdr);
if (lf == RFC2374_HDR_UNFRAG) {
@@ -602,7 +605,12 @@ static int fwnet_incoming_packet(struct fwnet_device *dev, __be32 *buf, int len,
return fwnet_finish_incoming_packet(net, skb, source_node_id,
is_broadcast, ether_type);
}
+
/* A datagram fragment has been received, now the fun begins. */
+
+ if (len <= RFC2374_FRAG_HDR_SIZE)
+ return 0;
+
hdr.w1 = ntohl(buf[1]);
buf += 2;
len -= RFC2374_FRAG_HDR_SIZE;
@@ -614,7 +622,10 @@ static int fwnet_incoming_packet(struct fwnet_device *dev, __be32 *buf, int len,
fg_off = fwnet_get_hdr_fg_off(&hdr);
}
datagram_label = fwnet_get_hdr_dgl(&hdr);
- dg_size = fwnet_get_hdr_dg_size(&hdr); /* ??? + 1 */
+ dg_size = fwnet_get_hdr_dg_size(&hdr);
+
+ if (fg_off + len > dg_size)
+ return 0;
spin_lock_irqsave(&dev->lock, flags);
@@ -722,6 +733,22 @@ static void fwnet_receive_packet(struct fw_card *card, struct fw_request *r,
fw_send_response(card, r, rcode);
}
+static int gasp_source_id(__be32 *p)
+{
+ return be32_to_cpu(p[0]) >> 16;
+}
+
+static u32 gasp_specifier_id(__be32 *p)
+{
+ return (be32_to_cpu(p[0]) & 0xffff) << 8 |
+ (be32_to_cpu(p[1]) & 0xff000000) >> 24;
+}
+
+static u32 gasp_version(__be32 *p)
+{
+ return be32_to_cpu(p[1]) & 0xffffff;
+}
+
static void fwnet_receive_broadcast(struct fw_iso_context *context,
u32 cycle, size_t header_length, void *header, void *data)
{
@@ -731,9 +758,6 @@ static void fwnet_receive_broadcast(struct fw_iso_context *context,
__be32 *buf_ptr;
int retval;
u32 length;
- u16 source_node_id;
- u32 specifier_id;
- u32 ver;
unsigned long offset;
unsigned long flags;
@@ -750,22 +774,17 @@ static void fwnet_receive_broadcast(struct fw_iso_context *context,
spin_unlock_irqrestore(&dev->lock, flags);
- specifier_id = (be32_to_cpu(buf_ptr[0]) & 0xffff) << 8
- | (be32_to_cpu(buf_ptr[1]) & 0xff000000) >> 24;
- ver = be32_to_cpu(buf_ptr[1]) & 0xffffff;
- source_node_id = be32_to_cpu(buf_ptr[0]) >> 16;
-
- if (specifier_id == IANA_SPECIFIER_ID &&
- (ver == RFC2734_SW_VERSION
+ if (length > IEEE1394_GASP_HDR_SIZE &&
+ gasp_specifier_id(buf_ptr) == IANA_SPECIFIER_ID &&
+ (gasp_version(buf_ptr) == RFC2734_SW_VERSION
#if IS_ENABLED(CONFIG_IPV6)
- || ver == RFC3146_SW_VERSION
+ || gasp_version(buf_ptr) == RFC3146_SW_VERSION
#endif
- )) {
- buf_ptr += 2;
- length -= IEEE1394_GASP_HDR_SIZE;
- fwnet_incoming_packet(dev, buf_ptr, length, source_node_id,
+ ))
+ fwnet_incoming_packet(dev, buf_ptr + 2,
+ length - IEEE1394_GASP_HDR_SIZE,
+ gasp_source_id(buf_ptr),
context->card->generation, true);
- }
packet.payload_length = dev->rcv_buffer_size;
packet.interrupt = 1;
diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig
index 26ee00f6bd58..d011cb89d25e 100644
--- a/drivers/gpio/Kconfig
+++ b/drivers/gpio/Kconfig
@@ -284,7 +284,7 @@ config GPIO_MM_LANTIQ
config GPIO_MOCKUP
tristate "GPIO Testing Driver"
- depends on GPIOLIB
+ depends on GPIOLIB && SYSFS
select GPIO_SYSFS
help
This enables GPIO Testing driver, which provides a way to test GPIO
diff --git a/drivers/gpio/gpio-ath79.c b/drivers/gpio/gpio-ath79.c
index 9457e2022bf6..dc37dbe4b46d 100644
--- a/drivers/gpio/gpio-ath79.c
+++ b/drivers/gpio/gpio-ath79.c
@@ -219,6 +219,7 @@ static const struct of_device_id ath79_gpio_of_match[] = {
{ .compatible = "qca,ar9340-gpio" },
{},
};
+MODULE_DEVICE_TABLE(of, ath79_gpio_of_match);
static int ath79_gpio_probe(struct platform_device *pdev)
{
diff --git a/drivers/gpio/gpio-mpc8xxx.c b/drivers/gpio/gpio-mpc8xxx.c
index 425501c39527..793518a30afe 100644
--- a/drivers/gpio/gpio-mpc8xxx.c
+++ b/drivers/gpio/gpio-mpc8xxx.c
@@ -239,7 +239,7 @@ static int mpc8xxx_gpio_irq_map(struct irq_domain *h, unsigned int irq,
irq_hw_number_t hwirq)
{
irq_set_chip_data(irq, h->host_data);
- irq_set_chip_and_handler(irq, &mpc8xxx_irq_chip, handle_level_irq);
+ irq_set_chip_and_handler(irq, &mpc8xxx_irq_chip, handle_edge_irq);
return 0;
}
diff --git a/drivers/gpio/gpio-mvebu.c b/drivers/gpio/gpio-mvebu.c
index cd5dc27320a2..1ed6132b993c 100644
--- a/drivers/gpio/gpio-mvebu.c
+++ b/drivers/gpio/gpio-mvebu.c
@@ -293,10 +293,10 @@ static void mvebu_gpio_irq_ack(struct irq_data *d)
{
struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
struct mvebu_gpio_chip *mvchip = gc->private;
- u32 mask = ~(1 << (d->irq - gc->irq_base));
+ u32 mask = d->mask;
irq_gc_lock(gc);
- writel_relaxed(mask, mvebu_gpioreg_edge_cause(mvchip));
+ writel_relaxed(~mask, mvebu_gpioreg_edge_cause(mvchip));
irq_gc_unlock(gc);
}
@@ -305,7 +305,7 @@ static void mvebu_gpio_edge_irq_mask(struct irq_data *d)
struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
struct mvebu_gpio_chip *mvchip = gc->private;
struct irq_chip_type *ct = irq_data_get_chip_type(d);
- u32 mask = 1 << (d->irq - gc->irq_base);
+ u32 mask = d->mask;
irq_gc_lock(gc);
ct->mask_cache_priv &= ~mask;
@@ -319,8 +319,7 @@ static void mvebu_gpio_edge_irq_unmask(struct irq_data *d)
struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
struct mvebu_gpio_chip *mvchip = gc->private;
struct irq_chip_type *ct = irq_data_get_chip_type(d);
-
- u32 mask = 1 << (d->irq - gc->irq_base);
+ u32 mask = d->mask;
irq_gc_lock(gc);
ct->mask_cache_priv |= mask;
@@ -333,8 +332,7 @@ static void mvebu_gpio_level_irq_mask(struct irq_data *d)
struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
struct mvebu_gpio_chip *mvchip = gc->private;
struct irq_chip_type *ct = irq_data_get_chip_type(d);
-
- u32 mask = 1 << (d->irq - gc->irq_base);
+ u32 mask = d->mask;
irq_gc_lock(gc);
ct->mask_cache_priv &= ~mask;
@@ -347,8 +345,7 @@ static void mvebu_gpio_level_irq_unmask(struct irq_data *d)
struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
struct mvebu_gpio_chip *mvchip = gc->private;
struct irq_chip_type *ct = irq_data_get_chip_type(d);
-
- u32 mask = 1 << (d->irq - gc->irq_base);
+ u32 mask = d->mask;
irq_gc_lock(gc);
ct->mask_cache_priv |= mask;
@@ -462,7 +459,7 @@ static void mvebu_gpio_irq_handler(struct irq_desc *desc)
for (i = 0; i < mvchip->chip.ngpio; i++) {
int irq;
- irq = mvchip->irqbase + i;
+ irq = irq_find_mapping(mvchip->domain, i);
if (!(cause & (1 << i)))
continue;
@@ -655,6 +652,7 @@ static int mvebu_gpio_probe(struct platform_device *pdev)
struct irq_chip_type *ct;
struct clk *clk;
unsigned int ngpios;
+ bool have_irqs;
int soc_variant;
int i, cpu, id;
int err;
@@ -665,6 +663,9 @@ static int mvebu_gpio_probe(struct platform_device *pdev)
else
soc_variant = MVEBU_GPIO_SOC_VARIANT_ORION;
+ /* Some gpio controllers do not provide irq support */
+ have_irqs = of_irq_count(np) != 0;
+
mvchip = devm_kzalloc(&pdev->dev, sizeof(struct mvebu_gpio_chip),
GFP_KERNEL);
if (!mvchip)
@@ -697,7 +698,8 @@ static int mvebu_gpio_probe(struct platform_device *pdev)
mvchip->chip.get = mvebu_gpio_get;
mvchip->chip.direction_output = mvebu_gpio_direction_output;
mvchip->chip.set = mvebu_gpio_set;
- mvchip->chip.to_irq = mvebu_gpio_to_irq;
+ if (have_irqs)
+ mvchip->chip.to_irq = mvebu_gpio_to_irq;
mvchip->chip.base = id * MVEBU_MAX_GPIO_PER_BANK;
mvchip->chip.ngpio = ngpios;
mvchip->chip.can_sleep = false;
@@ -758,34 +760,30 @@ static int mvebu_gpio_probe(struct platform_device *pdev)
devm_gpiochip_add_data(&pdev->dev, &mvchip->chip, mvchip);
/* Some gpio controllers do not provide irq support */
- if (!of_irq_count(np))
+ if (!have_irqs)
return 0;
- /* Setup the interrupt handlers. Each chip can have up to 4
- * interrupt handlers, with each handler dealing with 8 GPIO
- * pins. */
- for (i = 0; i < 4; i++) {
- int irq = platform_get_irq(pdev, i);
-
- if (irq < 0)
- continue;
- irq_set_chained_handler_and_data(irq, mvebu_gpio_irq_handler,
- mvchip);
- }
-
- mvchip->irqbase = irq_alloc_descs(-1, 0, ngpios, -1);
- if (mvchip->irqbase < 0) {
- dev_err(&pdev->dev, "no irqs\n");
- return mvchip->irqbase;
+ mvchip->domain =
+ irq_domain_add_linear(np, ngpios, &irq_generic_chip_ops, NULL);
+ if (!mvchip->domain) {
+ dev_err(&pdev->dev, "couldn't allocate irq domain %s (DT).\n",
+ mvchip->chip.label);
+ return -ENODEV;
}
- gc = irq_alloc_generic_chip("mvebu_gpio_irq", 2, mvchip->irqbase,
- mvchip->membase, handle_level_irq);
- if (!gc) {
- dev_err(&pdev->dev, "Cannot allocate generic irq_chip\n");
- return -ENOMEM;
+ err = irq_alloc_domain_generic_chips(
+ mvchip->domain, ngpios, 2, np->name, handle_level_irq,
+ IRQ_NOREQUEST | IRQ_NOPROBE | IRQ_LEVEL, 0, 0);
+ if (err) {
+ dev_err(&pdev->dev, "couldn't allocate irq chips %s (DT).\n",
+ mvchip->chip.label);
+ goto err_domain;
}
+ /* NOTE: The common accessors cannot be used because of the percpu
+ * access to the mask registers
+ */
+ gc = irq_get_domain_generic_chip(mvchip->domain, 0);
gc->private = mvchip;
ct = &gc->chip_types[0];
ct->type = IRQ_TYPE_LEVEL_HIGH | IRQ_TYPE_LEVEL_LOW;
@@ -803,27 +801,23 @@ static int mvebu_gpio_probe(struct platform_device *pdev)
ct->handler = handle_edge_irq;
ct->chip.name = mvchip->chip.label;
- irq_setup_generic_chip(gc, IRQ_MSK(ngpios), 0,
- IRQ_NOREQUEST, IRQ_LEVEL | IRQ_NOPROBE);
+ /* Setup the interrupt handlers. Each chip can have up to 4
+ * interrupt handlers, with each handler dealing with 8 GPIO
+ * pins.
+ */
+ for (i = 0; i < 4; i++) {
+ int irq = platform_get_irq(pdev, i);
- /* Setup irq domain on top of the generic chip. */
- mvchip->domain = irq_domain_add_simple(np, mvchip->chip.ngpio,
- mvchip->irqbase,
- &irq_domain_simple_ops,
- mvchip);
- if (!mvchip->domain) {
- dev_err(&pdev->dev, "couldn't allocate irq domain %s (DT).\n",
- mvchip->chip.label);
- err = -ENODEV;
- goto err_generic_chip;
+ if (irq < 0)
+ continue;
+ irq_set_chained_handler_and_data(irq, mvebu_gpio_irq_handler,
+ mvchip);
}
return 0;
-err_generic_chip:
- irq_remove_generic_chip(gc, IRQ_MSK(ngpios), IRQ_NOREQUEST,
- IRQ_LEVEL | IRQ_NOPROBE);
- kfree(gc);
+err_domain:
+ irq_domain_remove(mvchip->domain);
return err;
}
diff --git a/drivers/gpio/gpio-mxs.c b/drivers/gpio/gpio-mxs.c
index b9daa0bf32a4..ee1724806f46 100644
--- a/drivers/gpio/gpio-mxs.c
+++ b/drivers/gpio/gpio-mxs.c
@@ -308,8 +308,10 @@ static int mxs_gpio_probe(struct platform_device *pdev)
writel(~0U, port->base + PINCTRL_IRQSTAT(port) + MXS_CLR);
irq_base = irq_alloc_descs(-1, 0, 32, numa_node_id());
- if (irq_base < 0)
- return irq_base;
+ if (irq_base < 0) {
+ err = irq_base;
+ goto out_iounmap;
+ }
port->domain = irq_domain_add_legacy(np, 32, irq_base, 0,
&irq_domain_simple_ops, NULL);
@@ -349,6 +351,8 @@ out_irqdomain_remove:
irq_domain_remove(port->domain);
out_irqdesc_free:
irq_free_descs(irq_base, 32);
+out_iounmap:
+ iounmap(port->base);
return err;
}
diff --git a/drivers/gpio/gpio-stmpe.c b/drivers/gpio/gpio-stmpe.c
index e7d422a6b90b..5b0042776ec7 100644
--- a/drivers/gpio/gpio-stmpe.c
+++ b/drivers/gpio/gpio-stmpe.c
@@ -409,7 +409,7 @@ static irqreturn_t stmpe_gpio_irq(int irq, void *dev)
* 801/1801/1600, bits are cleared when read.
* Edge detect register is not present on 801/1600/1801
*/
- if (stmpe->partnum != STMPE801 || stmpe->partnum != STMPE1600 ||
+ if (stmpe->partnum != STMPE801 && stmpe->partnum != STMPE1600 &&
stmpe->partnum != STMPE1801) {
stmpe_reg_write(stmpe, statmsbreg + i, status[i]);
stmpe_reg_write(stmpe,
diff --git a/drivers/gpio/gpio-ts4800.c b/drivers/gpio/gpio-ts4800.c
index 99256115bea5..c2a80b4cbf32 100644
--- a/drivers/gpio/gpio-ts4800.c
+++ b/drivers/gpio/gpio-ts4800.c
@@ -66,6 +66,7 @@ static const struct of_device_id ts4800_gpio_of_match[] = {
{ .compatible = "technologic,ts4800-gpio", },
{},
};
+MODULE_DEVICE_TABLE(of, ts4800_gpio_of_match);
static struct platform_driver ts4800_gpio_driver = {
.driver = {
diff --git a/drivers/gpio/gpiolib-acpi.c b/drivers/gpio/gpiolib-acpi.c
index 58ece201b8e6..72a4b326fd0d 100644
--- a/drivers/gpio/gpiolib-acpi.c
+++ b/drivers/gpio/gpiolib-acpi.c
@@ -653,14 +653,17 @@ int acpi_dev_gpio_irq_get(struct acpi_device *adev, int index)
{
int idx, i;
unsigned int irq_flags;
+ int ret = -ENOENT;
for (i = 0, idx = 0; idx <= index; i++) {
struct acpi_gpio_info info;
struct gpio_desc *desc;
desc = acpi_get_gpiod_by_index(adev, NULL, i, &info);
- if (IS_ERR(desc))
+ if (IS_ERR(desc)) {
+ ret = PTR_ERR(desc);
break;
+ }
if (info.gpioint && idx++ == index) {
int irq = gpiod_to_irq(desc);
@@ -679,7 +682,7 @@ int acpi_dev_gpio_irq_get(struct acpi_device *adev, int index)
}
}
- return -ENOENT;
+ return ret;
}
EXPORT_SYMBOL_GPL(acpi_dev_gpio_irq_get);
diff --git a/drivers/gpio/gpiolib-of.c b/drivers/gpio/gpiolib-of.c
index ecad3f0e3b77..193f15d50bba 100644
--- a/drivers/gpio/gpiolib-of.c
+++ b/drivers/gpio/gpiolib-of.c
@@ -26,14 +26,18 @@
#include "gpiolib.h"
-static int of_gpiochip_match_node(struct gpio_chip *chip, void *data)
+static int of_gpiochip_match_node_and_xlate(struct gpio_chip *chip, void *data)
{
- return chip->gpiodev->dev.of_node == data;
+ struct of_phandle_args *gpiospec = data;
+
+ return chip->gpiodev->dev.of_node == gpiospec->np &&
+ chip->of_xlate(chip, gpiospec, NULL) >= 0;
}
-static struct gpio_chip *of_find_gpiochip_by_node(struct device_node *np)
+static struct gpio_chip *of_find_gpiochip_by_xlate(
+ struct of_phandle_args *gpiospec)
{
- return gpiochip_find(np, of_gpiochip_match_node);
+ return gpiochip_find(gpiospec, of_gpiochip_match_node_and_xlate);
}
static struct gpio_desc *of_xlate_and_get_gpiod_flags(struct gpio_chip *chip,
@@ -79,7 +83,7 @@ struct gpio_desc *of_get_named_gpiod_flags(struct device_node *np,
return ERR_PTR(ret);
}
- chip = of_find_gpiochip_by_node(gpiospec.np);
+ chip = of_find_gpiochip_by_xlate(&gpiospec);
if (!chip) {
desc = ERR_PTR(-EPROBE_DEFER);
goto out;
diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
index f0fc3a0d37c8..93ed0e00c578 100644
--- a/drivers/gpio/gpiolib.c
+++ b/drivers/gpio/gpiolib.c
@@ -21,6 +21,7 @@
#include <linux/uaccess.h>
#include <linux/compat.h>
#include <linux/anon_inodes.h>
+#include <linux/file.h>
#include <linux/kfifo.h>
#include <linux/poll.h>
#include <linux/timekeeping.h>
@@ -333,6 +334,13 @@ struct linehandle_state {
u32 numdescs;
};
+#define GPIOHANDLE_REQUEST_VALID_FLAGS \
+ (GPIOHANDLE_REQUEST_INPUT | \
+ GPIOHANDLE_REQUEST_OUTPUT | \
+ GPIOHANDLE_REQUEST_ACTIVE_LOW | \
+ GPIOHANDLE_REQUEST_OPEN_DRAIN | \
+ GPIOHANDLE_REQUEST_OPEN_SOURCE)
+
static long linehandle_ioctl(struct file *filep, unsigned int cmd,
unsigned long arg)
{
@@ -344,6 +352,8 @@ static long linehandle_ioctl(struct file *filep, unsigned int cmd,
if (cmd == GPIOHANDLE_GET_LINE_VALUES_IOCTL) {
int val;
+ memset(&ghd, 0, sizeof(ghd));
+
/* TODO: check if descriptors are really input */
for (i = 0; i < lh->numdescs; i++) {
val = gpiod_get_value_cansleep(lh->descs[i]);
@@ -414,6 +424,7 @@ static int linehandle_create(struct gpio_device *gdev, void __user *ip)
{
struct gpiohandle_request handlereq;
struct linehandle_state *lh;
+ struct file *file;
int fd, i, ret;
if (copy_from_user(&handlereq, ip, sizeof(handlereq)))
@@ -444,6 +455,17 @@ static int linehandle_create(struct gpio_device *gdev, void __user *ip)
u32 lflags = handlereq.flags;
struct gpio_desc *desc;
+ if (offset >= gdev->ngpio) {
+ ret = -EINVAL;
+ goto out_free_descs;
+ }
+
+ /* Return an error if a unknown flag is set */
+ if (lflags & ~GPIOHANDLE_REQUEST_VALID_FLAGS) {
+ ret = -EINVAL;
+ goto out_free_descs;
+ }
+
desc = &gdev->descs[offset];
ret = gpiod_request(desc, lh->label);
if (ret)
@@ -479,26 +501,41 @@ static int linehandle_create(struct gpio_device *gdev, void __user *ip)
i--;
lh->numdescs = handlereq.lines;
- fd = anon_inode_getfd("gpio-linehandle",
- &linehandle_fileops,
- lh,
- O_RDONLY | O_CLOEXEC);
+ fd = get_unused_fd_flags(O_RDONLY | O_CLOEXEC);
if (fd < 0) {
ret = fd;
goto out_free_descs;
}
+ file = anon_inode_getfile("gpio-linehandle",
+ &linehandle_fileops,
+ lh,
+ O_RDONLY | O_CLOEXEC);
+ if (IS_ERR(file)) {
+ ret = PTR_ERR(file);
+ goto out_put_unused_fd;
+ }
+
handlereq.fd = fd;
if (copy_to_user(ip, &handlereq, sizeof(handlereq))) {
- ret = -EFAULT;
- goto out_free_descs;
+ /*
+ * fput() will trigger the release() callback, so do not go onto
+ * the regular error cleanup path here.
+ */
+ fput(file);
+ put_unused_fd(fd);
+ return -EFAULT;
}
+ fd_install(fd, file);
+
dev_dbg(&gdev->dev, "registered chardev handle for %d lines\n",
lh->numdescs);
return 0;
+out_put_unused_fd:
+ put_unused_fd(fd);
out_free_descs:
for (; i >= 0; i--)
gpiod_free(lh->descs[i]);
@@ -536,6 +573,10 @@ struct lineevent_state {
struct mutex read_lock;
};
+#define GPIOEVENT_REQUEST_VALID_FLAGS \
+ (GPIOEVENT_REQUEST_RISING_EDGE | \
+ GPIOEVENT_REQUEST_FALLING_EDGE)
+
static unsigned int lineevent_poll(struct file *filep,
struct poll_table_struct *wait)
{
@@ -623,6 +664,8 @@ static long lineevent_ioctl(struct file *filep, unsigned int cmd,
if (cmd == GPIOHANDLE_GET_LINE_VALUES_IOCTL) {
int val;
+ memset(&ghd, 0, sizeof(ghd));
+
val = gpiod_get_value_cansleep(le->desc);
if (val < 0)
return val;
@@ -695,6 +738,7 @@ static int lineevent_create(struct gpio_device *gdev, void __user *ip)
struct gpioevent_request eventreq;
struct lineevent_state *le;
struct gpio_desc *desc;
+ struct file *file;
u32 offset;
u32 lflags;
u32 eflags;
@@ -726,6 +770,18 @@ static int lineevent_create(struct gpio_device *gdev, void __user *ip)
lflags = eventreq.handleflags;
eflags = eventreq.eventflags;
+ if (offset >= gdev->ngpio) {
+ ret = -EINVAL;
+ goto out_free_label;
+ }
+
+ /* Return an error if a unknown flag is set */
+ if ((lflags & ~GPIOHANDLE_REQUEST_VALID_FLAGS) ||
+ (eflags & ~GPIOEVENT_REQUEST_VALID_FLAGS)) {
+ ret = -EINVAL;
+ goto out_free_label;
+ }
+
/* This is just wrong: we don't look for events on output lines */
if (lflags & GPIOHANDLE_REQUEST_OUTPUT) {
ret = -EINVAL;
@@ -777,23 +833,38 @@ static int lineevent_create(struct gpio_device *gdev, void __user *ip)
if (ret)
goto out_free_desc;
- fd = anon_inode_getfd("gpio-event",
- &lineevent_fileops,
- le,
- O_RDONLY | O_CLOEXEC);
+ fd = get_unused_fd_flags(O_RDONLY | O_CLOEXEC);
if (fd < 0) {
ret = fd;
goto out_free_irq;
}
+ file = anon_inode_getfile("gpio-event",
+ &lineevent_fileops,
+ le,
+ O_RDONLY | O_CLOEXEC);
+ if (IS_ERR(file)) {
+ ret = PTR_ERR(file);
+ goto out_put_unused_fd;
+ }
+
eventreq.fd = fd;
if (copy_to_user(ip, &eventreq, sizeof(eventreq))) {
- ret = -EFAULT;
- goto out_free_irq;
+ /*
+ * fput() will trigger the release() callback, so do not go onto
+ * the regular error cleanup path here.
+ */
+ fput(file);
+ put_unused_fd(fd);
+ return -EFAULT;
}
+ fd_install(fd, file);
+
return 0;
+out_put_unused_fd:
+ put_unused_fd(fd);
out_free_irq:
free_irq(le->irq, le);
out_free_desc:
@@ -823,6 +894,8 @@ static long gpio_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
if (cmd == GPIO_GET_CHIPINFO_IOCTL) {
struct gpiochip_info chipinfo;
+ memset(&chipinfo, 0, sizeof(chipinfo));
+
strncpy(chipinfo.name, dev_name(&gdev->dev),
sizeof(chipinfo.name));
chipinfo.name[sizeof(chipinfo.name)-1] = '\0';
@@ -839,7 +912,7 @@ static long gpio_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
if (copy_from_user(&lineinfo, ip, sizeof(lineinfo)))
return -EFAULT;
- if (lineinfo.line_offset > gdev->ngpio)
+ if (lineinfo.line_offset >= gdev->ngpio)
return -EINVAL;
desc = &gdev->descs[lineinfo.line_offset];
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
index 483059a22b1b..95fc0410e129 100644
--- a/drivers/gpu/drm/Kconfig
+++ b/drivers/gpu/drm/Kconfig
@@ -12,6 +12,7 @@ menuconfig DRM
select I2C
select I2C_ALGOBIT
select DMA_SHARED_BUFFER
+ select SYNC_FILE
help
Kernel-level support for the Direct Rendering Infrastructure (DRI)
introduced in XFree86 4.0. If you say Y here, you need to select
@@ -33,6 +34,20 @@ config DRM_DP_AUX_CHARDEV
read and write values to arbitrary DPCD registers on the DP aux
channel.
+config DRM_DEBUG_MM
+ bool "Insert extra checks and debug info into the DRM range managers"
+ default n
+ depends on DRM=y
+ depends on STACKTRACE_SUPPORT
+ select STACKDEPOT
+ help
+ Enable allocation tracking of memory manager and leak detection on
+ shutdown.
+
+ Recommended for driver developers only.
+
+ If in doubt, say "N".
+
config DRM_KMS_HELPER
tristate
depends on DRM
@@ -223,6 +238,8 @@ source "drivers/gpu/drm/hisilicon/Kconfig"
source "drivers/gpu/drm/mediatek/Kconfig"
+source "drivers/gpu/drm/zte/Kconfig"
+
# Keep legacy drivers last
menuconfig DRM_LEGACY
diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile
index 74579d2e796e..883f3e75cfbc 100644
--- a/drivers/gpu/drm/Makefile
+++ b/drivers/gpu/drm/Makefile
@@ -15,7 +15,8 @@ drm-y := drm_auth.o drm_bufs.o drm_cache.o \
drm_modeset_lock.o drm_atomic.o drm_bridge.o \
drm_framebuffer.o drm_connector.o drm_blend.o \
drm_encoder.o drm_mode_object.o drm_property.o \
- drm_plane.o drm_color_mgmt.o
+ drm_plane.o drm_color_mgmt.o drm_print.o \
+ drm_dumb_buffers.o drm_mode_config.o
drm-$(CONFIG_COMPAT) += drm_ioc32.o
drm-$(CONFIG_DRM_GEM_CMA_HELPER) += drm_gem_cma_helper.o
@@ -87,3 +88,4 @@ obj-$(CONFIG_DRM_FSL_DCU) += fsl-dcu/
obj-$(CONFIG_DRM_ETNAVIV) += etnaviv/
obj-$(CONFIG_DRM_ARCPGU)+= arc/
obj-y += hisilicon/
+obj-$(CONFIG_DRM_ZTE) += zte/
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index 2ec7b3baeec2..c2b8496cdf63 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -1212,6 +1212,8 @@ int amdgpu_gem_op_ioctl(struct drm_device *dev, void *data,
struct drm_file *filp);
int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp);
int amdgpu_cs_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *filp);
+int amdgpu_cs_wait_fences_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *filp);
int amdgpu_gem_metadata_ioctl(struct drm_device *dev, void *data,
struct drm_file *filp);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index a024217896fd..78da52f90099 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -530,7 +530,8 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
r = ttm_eu_reserve_buffers(&p->ticket, &p->validated, true,
&duplicates);
if (unlikely(r != 0)) {
- DRM_ERROR("ttm_eu_reserve_buffers failed.\n");
+ if (r != -ERESTARTSYS)
+ DRM_ERROR("ttm_eu_reserve_buffers failed.\n");
goto error_free_pages;
}
@@ -1140,6 +1141,180 @@ int amdgpu_cs_wait_ioctl(struct drm_device *dev, void *data,
}
/**
+ * amdgpu_cs_get_fence - helper to get fence from drm_amdgpu_fence
+ *
+ * @adev: amdgpu device
+ * @filp: file private
+ * @user: drm_amdgpu_fence copied from user space
+ */
+static struct dma_fence *amdgpu_cs_get_fence(struct amdgpu_device *adev,
+ struct drm_file *filp,
+ struct drm_amdgpu_fence *user)
+{
+ struct amdgpu_ring *ring;
+ struct amdgpu_ctx *ctx;
+ struct dma_fence *fence;
+ int r;
+
+ r = amdgpu_cs_get_ring(adev, user->ip_type, user->ip_instance,
+ user->ring, &ring);
+ if (r)
+ return ERR_PTR(r);
+
+ ctx = amdgpu_ctx_get(filp->driver_priv, user->ctx_id);
+ if (ctx == NULL)
+ return ERR_PTR(-EINVAL);
+
+ fence = amdgpu_ctx_get_fence(ctx, ring, user->seq_no);
+ amdgpu_ctx_put(ctx);
+
+ return fence;
+}
+
+/**
+ * amdgpu_cs_wait_all_fence - wait on all fences to signal
+ *
+ * @adev: amdgpu device
+ * @filp: file private
+ * @wait: wait parameters
+ * @fences: array of drm_amdgpu_fence
+ */
+static int amdgpu_cs_wait_all_fences(struct amdgpu_device *adev,
+ struct drm_file *filp,
+ union drm_amdgpu_wait_fences *wait,
+ struct drm_amdgpu_fence *fences)
+{
+ uint32_t fence_count = wait->in.fence_count;
+ unsigned int i;
+ long r = 1;
+
+ for (i = 0; i < fence_count; i++) {
+ struct dma_fence *fence;
+ unsigned long timeout = amdgpu_gem_timeout(wait->in.timeout_ns);
+
+ fence = amdgpu_cs_get_fence(adev, filp, &fences[i]);
+ if (IS_ERR(fence))
+ return PTR_ERR(fence);
+ else if (!fence)
+ continue;
+
+ r = dma_fence_wait_timeout(fence, true, timeout);
+ if (r < 0)
+ return r;
+
+ if (r == 0)
+ break;
+ }
+
+ memset(wait, 0, sizeof(*wait));
+ wait->out.status = (r > 0);
+
+ return 0;
+}
+
+/**
+ * amdgpu_cs_wait_any_fence - wait on any fence to signal
+ *
+ * @adev: amdgpu device
+ * @filp: file private
+ * @wait: wait parameters
+ * @fences: array of drm_amdgpu_fence
+ */
+static int amdgpu_cs_wait_any_fence(struct amdgpu_device *adev,
+ struct drm_file *filp,
+ union drm_amdgpu_wait_fences *wait,
+ struct drm_amdgpu_fence *fences)
+{
+ unsigned long timeout = amdgpu_gem_timeout(wait->in.timeout_ns);
+ uint32_t fence_count = wait->in.fence_count;
+ uint32_t first = ~0;
+ struct dma_fence **array;
+ unsigned int i;
+ long r;
+
+ /* Prepare the fence array */
+ array = kcalloc(fence_count, sizeof(struct dma_fence *), GFP_KERNEL);
+
+ if (array == NULL)
+ return -ENOMEM;
+
+ for (i = 0; i < fence_count; i++) {
+ struct dma_fence *fence;
+
+ fence = amdgpu_cs_get_fence(adev, filp, &fences[i]);
+ if (IS_ERR(fence)) {
+ r = PTR_ERR(fence);
+ goto err_free_fence_array;
+ } else if (fence) {
+ array[i] = fence;
+ } else { /* NULL, the fence has been already signaled */
+ r = 1;
+ goto out;
+ }
+ }
+
+ r = dma_fence_wait_any_timeout(array, fence_count, true, timeout,
+ &first);
+ if (r < 0)
+ goto err_free_fence_array;
+
+out:
+ memset(wait, 0, sizeof(*wait));
+ wait->out.status = (r > 0);
+ wait->out.first_signaled = first;
+ /* set return value 0 to indicate success */
+ r = 0;
+
+err_free_fence_array:
+ for (i = 0; i < fence_count; i++)
+ dma_fence_put(array[i]);
+ kfree(array);
+
+ return r;
+}
+
+/**
+ * amdgpu_cs_wait_fences_ioctl - wait for multiple command submissions to finish
+ *
+ * @dev: drm device
+ * @data: data from userspace
+ * @filp: file private
+ */
+int amdgpu_cs_wait_fences_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *filp)
+{
+ struct amdgpu_device *adev = dev->dev_private;
+ union drm_amdgpu_wait_fences *wait = data;
+ uint32_t fence_count = wait->in.fence_count;
+ struct drm_amdgpu_fence *fences_user;
+ struct drm_amdgpu_fence *fences;
+ int r;
+
+ /* Get the fences from userspace */
+ fences = kmalloc_array(fence_count, sizeof(struct drm_amdgpu_fence),
+ GFP_KERNEL);
+ if (fences == NULL)
+ return -ENOMEM;
+
+ fences_user = (void __user *)(unsigned long)(wait->in.fences);
+ if (copy_from_user(fences, fences_user,
+ sizeof(struct drm_amdgpu_fence) * fence_count)) {
+ r = -EFAULT;
+ goto err_free_fences;
+ }
+
+ if (wait->in.wait_all)
+ r = amdgpu_cs_wait_all_fences(adev, filp, wait, fences);
+ else
+ r = amdgpu_cs_wait_any_fence(adev, filp, wait, fences);
+
+err_free_fences:
+ kfree(fences);
+
+ return r;
+}
+
+/**
* amdgpu_cs_find_bo_va - find bo_va for VM address
*
* @parser: command submission parser context
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index 6958d4af017f..9e16e975f31a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -1981,6 +1981,7 @@ int amdgpu_device_suspend(struct drm_device *dev, bool suspend, bool fbcon)
*/
amdgpu_bo_evict_vram(adev);
+ amdgpu_atombios_scratch_regs_save(adev);
pci_save_state(dev->pdev);
if (suspend) {
/* Shut down the device */
@@ -2032,6 +2033,7 @@ int amdgpu_device_resume(struct drm_device *dev, bool resume, bool fbcon)
return r;
}
}
+ amdgpu_atombios_scratch_regs_restore(adev);
/* post card */
if (!amdgpu_card_posted(adev) || !resume) {
@@ -2290,8 +2292,6 @@ int amdgpu_gpu_reset(struct amdgpu_device *adev)
}
if (need_full_reset) {
- /* save scratch */
- amdgpu_atombios_scratch_regs_save(adev);
r = amdgpu_suspend(adev);
retry:
@@ -2301,8 +2301,9 @@ retry:
amdgpu_display_stop_mc_access(adev, &save);
amdgpu_wait_for_idle(adev, AMD_IP_BLOCK_TYPE_GMC);
}
-
+ amdgpu_atombios_scratch_regs_save(adev);
r = amdgpu_asic_reset(adev);
+ amdgpu_atombios_scratch_regs_restore(adev);
/* post card */
amdgpu_atom_asic_init(adev->mode_info.atom_context);
@@ -2310,8 +2311,6 @@ retry:
dev_info(adev->dev, "GPU reset succeeded, trying to resume\n");
r = amdgpu_resume(adev);
}
- /* restore scratch */
- amdgpu_atombios_scratch_regs_restore(adev);
}
if (!r) {
amdgpu_irq_gpu_reset_resume_helper(adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c
index 38bdc2d300a3..f1c9e59a7c87 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c
@@ -75,18 +75,12 @@ amdgpufb_release(struct fb_info *info, int user)
static struct fb_ops amdgpufb_ops = {
.owner = THIS_MODULE,
+ DRM_FB_HELPER_DEFAULT_OPS,
.fb_open = amdgpufb_open,
.fb_release = amdgpufb_release,
- .fb_check_var = drm_fb_helper_check_var,
- .fb_set_par = drm_fb_helper_set_par,
.fb_fillrect = drm_fb_helper_cfb_fillrect,
.fb_copyarea = drm_fb_helper_cfb_copyarea,
.fb_imageblit = drm_fb_helper_cfb_imageblit,
- .fb_pan_display = drm_fb_helper_pan_display,
- .fb_blank = drm_fb_helper_blank,
- .fb_setcmap = drm_fb_helper_setcmap,
- .fb_debug_enter = drm_fb_helper_debug_enter,
- .fb_debug_leave = drm_fb_helper_debug_leave,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
index 57552c79ec58..97928d7281f6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
@@ -68,6 +68,7 @@ int amdgpu_fence_slab_init(void)
void amdgpu_fence_slab_fini(void)
{
+ rcu_barrier();
kmem_cache_destroy(amdgpu_fence_slab);
}
/*
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
index 278708f5a744..9fa809876339 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
@@ -239,6 +239,7 @@ int amdgpu_irq_init(struct amdgpu_device *adev)
if (r) {
adev->irq.installed = false;
flush_work(&adev->hotplug_work);
+ cancel_work_sync(&adev->reset_work);
return r;
}
@@ -264,6 +265,7 @@ void amdgpu_irq_fini(struct amdgpu_device *adev)
if (adev->irq.msi_enabled)
pci_disable_msi(adev->pdev);
flush_work(&adev->hotplug_work);
+ cancel_work_sync(&adev->reset_work);
}
for (i = 0; i < AMDGPU_MAX_IRQ_SRC_ID; ++i) {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
index 78392671046a..ad908612aff9 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
@@ -489,10 +489,8 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
/* return all clocks in KHz */
dev_info.gpu_counter_freq = amdgpu_asic_get_xclk(adev) * 10;
if (adev->pm.dpm_enabled) {
- dev_info.max_engine_clock =
- adev->pm.dpm.dyn_state.max_clock_voltage_on_ac.sclk * 10;
- dev_info.max_memory_clock =
- adev->pm.dpm.dyn_state.max_clock_voltage_on_ac.mclk * 10;
+ dev_info.max_engine_clock = amdgpu_dpm_get_sclk(adev, false) * 10;
+ dev_info.max_memory_clock = amdgpu_dpm_get_mclk(adev, false) * 10;
} else {
dev_info.max_engine_clock = adev->pm.default_sclk * 10;
dev_info.max_memory_clock = adev->pm.default_mclk * 10;
@@ -825,6 +823,7 @@ const struct drm_ioctl_desc amdgpu_ioctls_kms[] = {
DRM_IOCTL_DEF_DRV(AMDGPU_CS, amdgpu_cs_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(AMDGPU_INFO, amdgpu_info_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(AMDGPU_WAIT_CS, amdgpu_cs_wait_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(AMDGPU_WAIT_FENCES, amdgpu_cs_wait_fences_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(AMDGPU_GEM_METADATA, amdgpu_gem_metadata_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(AMDGPU_GEM_VA, amdgpu_gem_va_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(AMDGPU_GEM_OP, amdgpu_gem_op_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
index f0a0513ef4c2..1479d09bd4dd 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
@@ -766,6 +766,10 @@ static const char *amdgpu_vram_names[] = {
int amdgpu_bo_init(struct amdgpu_device *adev)
{
+ /* reserve PAT memory space to WC for VRAM */
+ arch_io_reserve_memtype_wc(adev->mc.aper_base,
+ adev->mc.aper_size);
+
/* Add an MTRR for the VRAM */
adev->mc.vram_mtrr = arch_phys_wc_add(adev->mc.aper_base,
adev->mc.aper_size);
@@ -781,6 +785,7 @@ void amdgpu_bo_fini(struct amdgpu_device *adev)
{
amdgpu_ttm_fini(adev);
arch_phys_wc_del(adev->mc.vram_mtrr);
+ arch_io_free_memtype_wc(adev->mc.aper_base, adev->mc.aper_size);
}
int amdgpu_bo_fbdev_mmap(struct amdgpu_bo *bo,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c
index fd26c4b8d793..34a795463988 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c
@@ -361,7 +361,8 @@ int amdgpu_sa_bo_new(struct amdgpu_sa_manager *sa_manager,
if (count) {
spin_unlock(&sa_manager->wq.lock);
t = dma_fence_wait_any_timeout(fences, count, false,
- MAX_SCHEDULE_TIMEOUT);
+ MAX_SCHEDULE_TIMEOUT,
+ NULL);
for (i = 0; i < count; ++i)
dma_fence_put(fences[i]);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index e480263387e1..337c5b31d18d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -1775,5 +1775,6 @@ void amdgpu_vm_manager_fini(struct amdgpu_device *adev)
dma_fence_put(adev->vm_manager.ids[i].first);
amdgpu_sync_free(&adev->vm_manager.ids[i].active);
dma_fence_put(id->flushed_updates);
+ dma_fence_put(id->last_flush);
}
}
diff --git a/drivers/gpu/drm/amd/amdgpu/ci_dpm.c b/drivers/gpu/drm/amd/amdgpu/ci_dpm.c
index e9b1964d4e61..1caff75ab9fc 100644
--- a/drivers/gpu/drm/amd/amdgpu/ci_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/ci_dpm.c
@@ -4083,7 +4083,7 @@ static int ci_enable_uvd_dpm(struct amdgpu_device *adev, bool enable)
pi->dpm_level_enable_mask.mclk_dpm_enable_mask);
}
} else {
- if (pi->last_mclk_dpm_enable_mask & 0x1) {
+ if (pi->uvd_enabled) {
pi->uvd_enabled = false;
pi->dpm_level_enable_mask.mclk_dpm_enable_mask |= 1;
amdgpu_ci_send_msg_to_smc_with_parameter(adev,
@@ -6308,6 +6308,8 @@ static int ci_dpm_sw_fini(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ flush_work(&adev->pm.dpm.thermal.work);
+
mutex_lock(&adev->pm.mutex);
amdgpu_pm_sysfs_fini(adev);
ci_dpm_fini(adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
index 199d3f7235d6..65a954cb69ed 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
@@ -2032,7 +2032,7 @@ static int dce_v10_0_crtc_do_set_base(struct drm_crtc *crtc,
u32 tmp, viewport_w, viewport_h;
int r;
bool bypass_lut = false;
- char *format_name;
+ struct drm_format_name_buf format_name;
/* no fb bound */
if (!atomic && !crtc->primary->fb) {
@@ -2144,9 +2144,8 @@ static int dce_v10_0_crtc_do_set_base(struct drm_crtc *crtc,
bypass_lut = true;
break;
default:
- format_name = drm_get_format_name(target_fb->pixel_format);
- DRM_ERROR("Unsupported screen format %s\n", format_name);
- kfree(format_name);
+ DRM_ERROR("Unsupported screen format %s\n",
+ drm_get_format_name(target_fb->pixel_format, &format_name));
return -EINVAL;
}
@@ -3068,10 +3067,6 @@ static int dce_v10_0_hw_fini(void *handle)
static int dce_v10_0_suspend(void *handle)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- amdgpu_atombios_scratch_regs_save(adev);
-
return dce_v10_0_hw_fini(handle);
}
@@ -3082,8 +3077,6 @@ static int dce_v10_0_resume(void *handle)
ret = dce_v10_0_hw_init(handle);
- amdgpu_atombios_scratch_regs_restore(adev);
-
/* turn on the BL */
if (adev->mode_info.bl_encoder) {
u8 bl_level = amdgpu_display_backlight_get_level(adev,
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
index ecd000e35981..d807e876366b 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
@@ -2013,7 +2013,7 @@ static int dce_v11_0_crtc_do_set_base(struct drm_crtc *crtc,
u32 tmp, viewport_w, viewport_h;
int r;
bool bypass_lut = false;
- char *format_name;
+ struct drm_format_name_buf format_name;
/* no fb bound */
if (!atomic && !crtc->primary->fb) {
@@ -2125,9 +2125,8 @@ static int dce_v11_0_crtc_do_set_base(struct drm_crtc *crtc,
bypass_lut = true;
break;
default:
- format_name = drm_get_format_name(target_fb->pixel_format);
- DRM_ERROR("Unsupported screen format %s\n", format_name);
- kfree(format_name);
+ DRM_ERROR("Unsupported screen format %s\n",
+ drm_get_format_name(target_fb->pixel_format, &format_name));
return -EINVAL;
}
@@ -3132,10 +3131,6 @@ static int dce_v11_0_hw_fini(void *handle)
static int dce_v11_0_suspend(void *handle)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- amdgpu_atombios_scratch_regs_save(adev);
-
return dce_v11_0_hw_fini(handle);
}
@@ -3146,8 +3141,6 @@ static int dce_v11_0_resume(void *handle)
ret = dce_v11_0_hw_init(handle);
- amdgpu_atombios_scratch_regs_restore(adev);
-
/* turn on the BL */
if (adev->mode_info.bl_encoder) {
u8 bl_level = amdgpu_display_backlight_get_level(adev,
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
index 44547f951d92..bc9f2f423270 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
@@ -1456,6 +1456,7 @@ static int dce_v6_0_crtc_do_set_base(struct drm_crtc *crtc,
u32 viewport_w, viewport_h;
int r;
bool bypass_lut = false;
+ struct drm_format_name_buf format_name;
/* no fb bound */
if (!atomic && !crtc->primary->fb) {
@@ -1559,7 +1560,7 @@ static int dce_v6_0_crtc_do_set_base(struct drm_crtc *crtc,
break;
default:
DRM_ERROR("Unsupported screen format %s\n",
- drm_get_format_name(target_fb->pixel_format));
+ drm_get_format_name(target_fb->pixel_format, &format_name));
return -EINVAL;
}
@@ -2039,13 +2040,13 @@ static void dce_v6_0_crtc_dpms(struct drm_crtc *crtc, int mode)
type = amdgpu_crtc_idx_to_irq_type(adev, amdgpu_crtc->crtc_id);
amdgpu_irq_update(adev, &adev->crtc_irq, type);
amdgpu_irq_update(adev, &adev->pageflip_irq, type);
- drm_vblank_post_modeset(dev, amdgpu_crtc->crtc_id);
+ drm_crtc_vblank_on(crtc);
dce_v6_0_crtc_load_lut(crtc);
break;
case DRM_MODE_DPMS_STANDBY:
case DRM_MODE_DPMS_SUSPEND:
case DRM_MODE_DPMS_OFF:
- drm_vblank_pre_modeset(dev, amdgpu_crtc->crtc_id);
+ drm_crtc_vblank_off(crtc);
if (amdgpu_crtc->enabled)
amdgpu_atombios_crtc_blank(crtc, ATOM_ENABLE);
amdgpu_atombios_crtc_enable(crtc, ATOM_DISABLE);
@@ -2403,10 +2404,6 @@ static int dce_v6_0_hw_fini(void *handle)
static int dce_v6_0_suspend(void *handle)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- amdgpu_atombios_scratch_regs_save(adev);
-
return dce_v6_0_hw_fini(handle);
}
@@ -2417,8 +2414,6 @@ static int dce_v6_0_resume(void *handle)
ret = dce_v6_0_hw_init(handle);
- amdgpu_atombios_scratch_regs_restore(adev);
-
/* turn on the BL */
if (adev->mode_info.bl_encoder) {
u8 bl_level = amdgpu_display_backlight_get_level(adev,
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
index 979aedf4b74d..4ae59914bc32 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
@@ -1910,7 +1910,7 @@ static int dce_v8_0_crtc_do_set_base(struct drm_crtc *crtc,
u32 viewport_w, viewport_h;
int r;
bool bypass_lut = false;
- char *format_name;
+ struct drm_format_name_buf format_name;
/* no fb bound */
if (!atomic && !crtc->primary->fb) {
@@ -2015,9 +2015,8 @@ static int dce_v8_0_crtc_do_set_base(struct drm_crtc *crtc,
bypass_lut = true;
break;
default:
- format_name = drm_get_format_name(target_fb->pixel_format);
- DRM_ERROR("Unsupported screen format %s\n", format_name);
- kfree(format_name);
+ DRM_ERROR("Unsupported screen format %s\n",
+ drm_get_format_name(target_fb->pixel_format, &format_name));
return -EINVAL;
}
@@ -2913,10 +2912,6 @@ static int dce_v8_0_hw_fini(void *handle)
static int dce_v8_0_suspend(void *handle)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- amdgpu_atombios_scratch_regs_save(adev);
-
return dce_v8_0_hw_fini(handle);
}
@@ -2927,8 +2922,6 @@ static int dce_v8_0_resume(void *handle)
ret = dce_v8_0_hw_init(handle);
- amdgpu_atombios_scratch_regs_restore(adev);
-
/* turn on the BL */
if (adev->mode_info.bl_encoder) {
u8 bl_level = amdgpu_display_backlight_get_level(adev,
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_virtual.c b/drivers/gpu/drm/amd/amdgpu/dce_virtual.c
index cc85676a68d9..81cbf0b05dff 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_virtual.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_virtual.c
@@ -214,12 +214,12 @@ static void dce_virtual_crtc_dpms(struct drm_crtc *crtc, int mode)
/* Make sure VBLANK interrupts are still enabled */
type = amdgpu_crtc_idx_to_irq_type(adev, amdgpu_crtc->crtc_id);
amdgpu_irq_update(adev, &adev->crtc_irq, type);
- drm_vblank_on(dev, amdgpu_crtc->crtc_id);
+ drm_crtc_vblank_on(crtc);
break;
case DRM_MODE_DPMS_STANDBY:
case DRM_MODE_DPMS_SUSPEND:
case DRM_MODE_DPMS_OFF:
- drm_vblank_off(dev, amdgpu_crtc->crtc_id);
+ drm_crtc_vblank_off(crtc);
amdgpu_crtc->enabled = false;
break;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
index 86a7ca5d8511..23f1bc94ad3e 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
@@ -640,7 +640,6 @@ static const u32 stoney_mgcg_cgcg_init[] =
mmCP_MEM_SLP_CNTL, 0xffffffff, 0x00020201,
mmRLC_MEM_SLP_CNTL, 0xffffffff, 0x00020201,
mmCGTS_SM_CTRL_REG, 0xffffffff, 0x96940200,
- mmATC_MISC_CG, 0xffffffff, 0x000c0200,
};
static void gfx_v8_0_set_ring_funcs(struct amdgpu_device *adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
index 74d7cc3f7e8c..f7372d32b8e7 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
@@ -100,6 +100,7 @@ static const u32 cz_mgcg_cgcg_init[] =
static const u32 stoney_mgcg_cgcg_init[] =
{
+ mmATC_MISC_CG, 0xffffffff, 0x000c0200,
mmMC_MEM_POWER_LS, 0xffffffff, 0x00000104
};
diff --git a/drivers/gpu/drm/amd/amdgpu/kv_dpm.c b/drivers/gpu/drm/amd/amdgpu/kv_dpm.c
index b6f2e50636a9..61172d4a0657 100644
--- a/drivers/gpu/drm/amd/amdgpu/kv_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/kv_dpm.c
@@ -3063,6 +3063,8 @@ static int kv_dpm_sw_fini(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ flush_work(&adev->pm.dpm.thermal.work);
+
mutex_lock(&adev->pm.mutex);
amdgpu_pm_sysfs_fini(adev);
kv_dpm_fini(adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/si_dpm.c b/drivers/gpu/drm/amd/amdgpu/si_dpm.c
index 917213396787..f0f2f6c9718e 100644
--- a/drivers/gpu/drm/amd/amdgpu/si_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/si_dpm.c
@@ -3479,6 +3479,49 @@ static void si_apply_state_adjust_rules(struct amdgpu_device *adev,
int i;
struct si_dpm_quirk *p = si_dpm_quirk_list;
+ /* limit all SI kickers */
+ if (adev->asic_type == CHIP_PITCAIRN) {
+ if ((adev->pdev->revision == 0x81) ||
+ (adev->pdev->device == 0x6810) ||
+ (adev->pdev->device == 0x6811) ||
+ (adev->pdev->device == 0x6816) ||
+ (adev->pdev->device == 0x6817) ||
+ (adev->pdev->device == 0x6806))
+ max_mclk = 120000;
+ } else if (adev->asic_type == CHIP_VERDE) {
+ if ((adev->pdev->revision == 0x81) ||
+ (adev->pdev->revision == 0x83) ||
+ (adev->pdev->revision == 0x87) ||
+ (adev->pdev->device == 0x6820) ||
+ (adev->pdev->device == 0x6821) ||
+ (adev->pdev->device == 0x6822) ||
+ (adev->pdev->device == 0x6823) ||
+ (adev->pdev->device == 0x682A) ||
+ (adev->pdev->device == 0x682B)) {
+ max_sclk = 75000;
+ max_mclk = 80000;
+ }
+ } else if (adev->asic_type == CHIP_OLAND) {
+ if ((adev->pdev->revision == 0xC7) ||
+ (adev->pdev->revision == 0x80) ||
+ (adev->pdev->revision == 0x81) ||
+ (adev->pdev->revision == 0x83) ||
+ (adev->pdev->device == 0x6604) ||
+ (adev->pdev->device == 0x6605)) {
+ max_sclk = 75000;
+ max_mclk = 80000;
+ }
+ } else if (adev->asic_type == CHIP_HAINAN) {
+ if ((adev->pdev->revision == 0x81) ||
+ (adev->pdev->revision == 0x83) ||
+ (adev->pdev->revision == 0xC3) ||
+ (adev->pdev->device == 0x6664) ||
+ (adev->pdev->device == 0x6665) ||
+ (adev->pdev->device == 0x6667)) {
+ max_sclk = 75000;
+ max_mclk = 80000;
+ }
+ }
/* Apply dpm quirks */
while (p && p->chip_device != 0) {
if (adev->pdev->vendor == p->chip_vendor &&
@@ -3491,22 +3534,6 @@ static void si_apply_state_adjust_rules(struct amdgpu_device *adev,
}
++p;
}
- /* limit mclk on all R7 370 parts for stability */
- if (adev->pdev->device == 0x6811 &&
- adev->pdev->revision == 0x81)
- max_mclk = 120000;
- /* limit sclk/mclk on Jet parts for stability */
- if (adev->pdev->device == 0x6665 &&
- adev->pdev->revision == 0xc3) {
- max_sclk = 75000;
- max_mclk = 80000;
- }
- /* Limit clocks for some HD8600 parts */
- if (adev->pdev->device == 0x6660 &&
- adev->pdev->revision == 0x83) {
- max_sclk = 75000;
- max_mclk = 80000;
- }
if (rps->vce_active) {
rps->evclk = adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].evclk;
@@ -7779,6 +7806,8 @@ static int si_dpm_sw_fini(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ flush_work(&adev->pm.dpm.thermal.work);
+
mutex_lock(&adev->pm.mutex);
amdgpu_pm_sysfs_fini(adev);
si_dpm_fini(adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
index 5ed2930a8568..39f03f137a56 100644
--- a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
@@ -52,6 +52,8 @@
#define VCE_V3_0_STACK_SIZE (64 * 1024)
#define VCE_V3_0_DATA_SIZE ((16 * 1024 * AMDGPU_MAX_VCE_HANDLES) + (52 * 1024))
+#define FW_52_8_3 ((52 << 24) | (8 << 16) | (3 << 8))
+
static void vce_v3_0_mc_resume(struct amdgpu_device *adev, int idx);
static void vce_v3_0_set_ring_funcs(struct amdgpu_device *adev);
static void vce_v3_0_set_irq_funcs(struct amdgpu_device *adev);
@@ -382,6 +384,10 @@ static int vce_v3_0_sw_init(void *handle)
if (r)
return r;
+ /* 52.8.3 required for 3 ring support */
+ if (adev->vce.fw_version < FW_52_8_3)
+ adev->vce.num_rings = 2;
+
r = amdgpu_vce_resume(adev);
if (r)
return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/vi.c b/drivers/gpu/drm/amd/amdgpu/vi.c
index 25c0a71b257d..52d0a83e6ad1 100644
--- a/drivers/gpu/drm/amd/amdgpu/vi.c
+++ b/drivers/gpu/drm/amd/amdgpu/vi.c
@@ -988,7 +988,7 @@ static int vi_common_early_init(void *handle)
AMD_CG_SUPPORT_SDMA_MGCG |
AMD_CG_SUPPORT_SDMA_LS |
AMD_CG_SUPPORT_VCE_MGCG;
- adev->pg_flags |= AMD_PG_SUPPORT_GFX_PG |
+ adev->pg_flags = AMD_PG_SUPPORT_GFX_PG |
AMD_PG_SUPPORT_GFX_SMG |
AMD_PG_SUPPORT_GFX_PIPELINE |
AMD_PG_SUPPORT_UVD |
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c
index 1167205057b3..2ba7937d2545 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c
@@ -716,7 +716,7 @@ int phm_get_voltage_evv_on_sclk(struct pp_hwmgr *hwmgr, uint8_t voltage_type,
*voltage = 1150;
} else {
ret = atomctrl_get_voltage_evv_on_sclk_ai(hwmgr, voltage_type, sclk, id, &vol);
- *voltage = (uint16_t)vol/100;
+ *voltage = (uint16_t)(vol/100);
}
return ret;
}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c
index 1126bd4f74dc..0894527d932f 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c
@@ -1320,7 +1320,8 @@ int atomctrl_get_voltage_evv_on_sclk_ai(struct pp_hwmgr *hwmgr, uint8_t voltage_
if (0 != result)
return result;
- *voltage = le32_to_cpu(((GET_EVV_VOLTAGE_INFO_OUTPUT_PARAMETER_V1_3 *)(&get_voltage_info_param_space))->ulVoltageLevel);
+ *voltage = le32_to_cpu(((GET_EVV_VOLTAGE_INFO_OUTPUT_PARAMETER_V1_3 *)
+ (&get_voltage_info_param_space))->ulVoltageLevel);
return result;
}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.c b/drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.c
index baf0f3d4c2f0..c45bd2560468 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.c
@@ -1201,12 +1201,15 @@ static uint32_t make_classification_flags(struct pp_hwmgr *hwmgr,
static int ppt_get_num_of_vce_state_table_entries_v1_0(struct pp_hwmgr *hwmgr)
{
const ATOM_Tonga_POWERPLAYTABLE *pp_table = get_powerplay_table(hwmgr);
- const ATOM_Tonga_VCE_State_Table *vce_state_table =
- (ATOM_Tonga_VCE_State_Table *)(((unsigned long)pp_table) + le16_to_cpu(pp_table->usVCEStateTableOffset));
+ const ATOM_Tonga_VCE_State_Table *vce_state_table;
- if (vce_state_table == NULL)
+
+ if (pp_table == NULL)
return 0;
+ vce_state_table = (void *)pp_table +
+ le16_to_cpu(pp_table->usVCEStateTableOffset);
+
return vce_state_table->ucNumEntries;
}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
index 073e0bfa22a0..9e49f2777143 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
@@ -1168,8 +1168,8 @@ static int smu7_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
tmp_result = (!smum_is_dpm_running(hwmgr)) ? 0 : -1;
PP_ASSERT_WITH_CODE(tmp_result == 0,
- "DPM is already running right now, no need to enable DPM!",
- return 0);
+ "DPM is already running",
+ );
if (smu7_voltage_control(hwmgr)) {
tmp_result = smu7_enable_voltage_control(hwmgr);
@@ -2141,15 +2141,18 @@ static int smu7_patch_acp_vddc(struct pp_hwmgr *hwmgr,
}
static int smu7_patch_limits_vddc(struct pp_hwmgr *hwmgr,
- struct phm_clock_and_voltage_limits *tab)
+ struct phm_clock_and_voltage_limits *tab)
{
+ uint32_t vddc, vddci;
struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
if (tab) {
- smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, (uint32_t *)&tab->vddc,
- &data->vddc_leakage);
- smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, (uint32_t *)&tab->vddci,
- &data->vddci_leakage);
+ smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, &vddc,
+ &data->vddc_leakage);
+ tab->vddc = vddc;
+ smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, &vddci,
+ &data->vddci_leakage);
+ tab->vddci = vddci;
}
return 0;
diff --git a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
index 5364e6a7ec8f..09b2cf6ccfa4 100644
--- a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
+++ b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
@@ -646,6 +646,7 @@ void amd_sched_fini(struct amd_gpu_scheduler *sched)
{
if (sched->thread)
kthread_stop(sched->thread);
+ rcu_barrier();
if (atomic_dec_and_test(&sched_fence_slab_ref))
kmem_cache_destroy(sched_fence_slab);
}
diff --git a/drivers/gpu/drm/amd/scheduler/sched_fence.c b/drivers/gpu/drm/amd/scheduler/sched_fence.c
index c26fa298fe9e..91530e25aaff 100644
--- a/drivers/gpu/drm/amd/scheduler/sched_fence.c
+++ b/drivers/gpu/drm/amd/scheduler/sched_fence.c
@@ -107,7 +107,7 @@ static void amd_sched_fence_free(struct rcu_head *rcu)
}
/**
- * amd_sched_fence_release - callback that fence can be freed
+ * amd_sched_fence_release_scheduled - callback that fence can be freed
*
* @fence: fence
*
@@ -122,7 +122,7 @@ static void amd_sched_fence_release_scheduled(struct dma_fence *f)
}
/**
- * amd_sched_fence_release_scheduled - drop extra reference
+ * amd_sched_fence_release_finished - drop extra reference
*
* @f: fence
*
diff --git a/drivers/gpu/drm/arc/arcpgu_drv.c b/drivers/gpu/drm/arc/arcpgu_drv.c
index 28e6471257d0..0b6eaa49a1db 100644
--- a/drivers/gpu/drm/arc/arcpgu_drv.c
+++ b/drivers/gpu/drm/arc/arcpgu_drv.c
@@ -65,9 +65,7 @@ static const struct file_operations arcpgu_drm_ops = {
.open = drm_open,
.release = drm_release,
.unlocked_ioctl = drm_ioctl,
-#ifdef CONFIG_COMPAT
.compat_ioctl = drm_compat_ioctl,
-#endif
.poll = drm_poll,
.read = drm_read,
.llseek = no_llseek,
diff --git a/drivers/gpu/drm/arm/hdlcd_drv.c b/drivers/gpu/drm/arm/hdlcd_drv.c
index 6477d1a65266..faab7f9bd3b7 100644
--- a/drivers/gpu/drm/arm/hdlcd_drv.c
+++ b/drivers/gpu/drm/arm/hdlcd_drv.c
@@ -268,9 +268,7 @@ static const struct file_operations fops = {
.open = drm_open,
.release = drm_release,
.unlocked_ioctl = drm_ioctl,
-#ifdef CONFIG_COMPAT
.compat_ioctl = drm_compat_ioctl,
-#endif
.poll = drm_poll,
.read = drm_read,
.llseek = noop_llseek,
@@ -337,14 +335,10 @@ static int hdlcd_drm_bind(struct device *dev)
if (ret)
goto err_free;
- ret = drm_dev_register(drm, 0);
- if (ret)
- goto err_unload;
-
ret = component_bind_all(dev, drm);
if (ret) {
DRM_ERROR("Failed to bind all components\n");
- goto err_unregister;
+ goto err_unload;
}
ret = pm_runtime_set_active(dev);
@@ -371,8 +365,17 @@ static int hdlcd_drm_bind(struct device *dev)
goto err_fbdev;
}
+ ret = drm_dev_register(drm, 0);
+ if (ret)
+ goto err_register;
+
return 0;
+err_register:
+ if (hdlcd->fbdev) {
+ drm_fbdev_cma_fini(hdlcd->fbdev);
+ hdlcd->fbdev = NULL;
+ }
err_fbdev:
drm_kms_helper_poll_fini(drm);
drm_mode_config_cleanup(drm);
@@ -381,8 +384,6 @@ err_vblank:
pm_runtime_disable(drm->dev);
err_pm_active:
component_unbind_all(dev, drm);
-err_unregister:
- drm_dev_unregister(drm);
err_unload:
drm_irq_uninstall(drm);
of_reserved_mem_device_release(drm->dev);
@@ -398,6 +399,7 @@ static void hdlcd_drm_unbind(struct device *dev)
struct drm_device *drm = dev_get_drvdata(dev);
struct hdlcd_drm_private *hdlcd = drm->dev_private;
+ drm_dev_unregister(drm);
if (hdlcd->fbdev) {
drm_fbdev_cma_fini(hdlcd->fbdev);
hdlcd->fbdev = NULL;
@@ -411,7 +413,6 @@ static void hdlcd_drm_unbind(struct device *dev)
pm_runtime_disable(drm->dev);
of_reserved_mem_device_release(drm->dev);
drm_mode_config_cleanup(drm);
- drm_dev_unregister(drm);
drm_dev_unref(drm);
drm->dev_private = NULL;
dev_set_drvdata(dev, NULL);
diff --git a/drivers/gpu/drm/arm/malidp_drv.c b/drivers/gpu/drm/arm/malidp_drv.c
index 9f4739452a25..32f746e31379 100644
--- a/drivers/gpu/drm/arm/malidp_drv.c
+++ b/drivers/gpu/drm/arm/malidp_drv.c
@@ -42,6 +42,7 @@ static int malidp_set_and_wait_config_valid(struct drm_device *drm)
struct malidp_hw_device *hwdev = malidp->dev;
int ret;
+ atomic_set(&malidp->config_valid, 0);
hwdev->set_config_valid(hwdev);
/* don't wait for config_valid flag if we are in config mode */
if (hwdev->in_config_mode(hwdev))
@@ -91,8 +92,7 @@ static void malidp_atomic_commit_tail(struct drm_atomic_state *state)
drm_atomic_helper_commit_modeset_disables(drm, state);
drm_atomic_helper_commit_modeset_enables(drm, state);
- drm_atomic_helper_commit_planes(drm, state,
- DRM_PLANE_COMMIT_ACTIVE_ONLY);
+ drm_atomic_helper_commit_planes(drm, state, 0);
malidp_atomic_commit_hw_done(state);
@@ -155,6 +155,12 @@ static int malidp_init(struct drm_device *drm)
return 0;
}
+static void malidp_fini(struct drm_device *drm)
+{
+ malidp_de_planes_destroy(drm);
+ drm_mode_config_cleanup(drm);
+}
+
static int malidp_irq_init(struct platform_device *pdev)
{
int irq_de, irq_se, ret = 0;
@@ -197,9 +203,7 @@ static const struct file_operations fops = {
.open = drm_open,
.release = drm_release,
.unlocked_ioctl = drm_ioctl,
-#ifdef CONFIG_COMPAT
.compat_ioctl = drm_compat_ioctl,
-#endif
.poll = drm_poll,
.read = drm_read,
.llseek = noop_llseek,
@@ -355,10 +359,6 @@ static int malidp_bind(struct device *dev)
if (ret < 0)
goto init_fail;
- ret = drm_dev_register(drm, 0);
- if (ret)
- goto register_fail;
-
/* Set the CRTC's port so that the encoder component can find it */
ep = of_graph_get_next_endpoint(dev->of_node, NULL);
if (!ep) {
@@ -377,6 +377,8 @@ static int malidp_bind(struct device *dev)
if (ret < 0)
goto irq_init_fail;
+ drm->irq_enabled = true;
+
ret = drm_vblank_init(drm, drm->mode_config.num_crtc);
if (ret < 0) {
DRM_ERROR("failed to initialise vblank\n");
@@ -395,23 +397,31 @@ static int malidp_bind(struct device *dev)
}
drm_kms_helper_poll_init(drm);
+
+ ret = drm_dev_register(drm, 0);
+ if (ret)
+ goto register_fail;
+
return 0;
+register_fail:
+ if (malidp->fbdev) {
+ drm_fbdev_cma_fini(malidp->fbdev);
+ malidp->fbdev = NULL;
+ }
fbdev_fail:
drm_vblank_cleanup(drm);
vblank_fail:
malidp_se_irq_fini(drm);
malidp_de_irq_fini(drm);
+ drm->irq_enabled = false;
irq_init_fail:
component_unbind_all(dev, drm);
bind_fail:
of_node_put(malidp->crtc.port);
malidp->crtc.port = NULL;
port_fail:
- drm_dev_unregister(drm);
-register_fail:
- malidp_de_planes_destroy(drm);
- drm_mode_config_cleanup(drm);
+ malidp_fini(drm);
init_fail:
drm->dev_private = NULL;
dev_set_drvdata(dev, NULL);
@@ -432,6 +442,7 @@ static void malidp_unbind(struct device *dev)
struct malidp_drm *malidp = drm->dev_private;
struct malidp_hw_device *hwdev = malidp->dev;
+ drm_dev_unregister(drm);
if (malidp->fbdev) {
drm_fbdev_cma_fini(malidp->fbdev);
malidp->fbdev = NULL;
@@ -443,9 +454,7 @@ static void malidp_unbind(struct device *dev)
component_unbind_all(dev, drm);
of_node_put(malidp->crtc.port);
malidp->crtc.port = NULL;
- drm_dev_unregister(drm);
- malidp_de_planes_destroy(drm);
- drm_mode_config_cleanup(drm);
+ malidp_fini(drm);
drm->dev_private = NULL;
dev_set_drvdata(dev, NULL);
clk_disable_unprepare(hwdev->mclk);
diff --git a/drivers/gpu/drm/arm/malidp_drv.h b/drivers/gpu/drm/arm/malidp_drv.h
index 271d2fb9711c..9fc8a2e405e4 100644
--- a/drivers/gpu/drm/arm/malidp_drv.h
+++ b/drivers/gpu/drm/arm/malidp_drv.h
@@ -39,6 +39,9 @@ struct malidp_plane_state {
/* size of the required rotation memory if plane is rotated */
u32 rotmem_size;
+ /* internal format ID */
+ u8 format;
+ u8 n_planes;
};
#define to_malidp_plane(x) container_of(x, struct malidp_plane, base)
diff --git a/drivers/gpu/drm/arm/malidp_hw.c b/drivers/gpu/drm/arm/malidp_hw.c
index be815d0cc772..4bdf531f7844 100644
--- a/drivers/gpu/drm/arm/malidp_hw.c
+++ b/drivers/gpu/drm/arm/malidp_hw.c
@@ -125,6 +125,7 @@ static void malidp500_leave_config_mode(struct malidp_hw_device *hwdev)
{
u32 status, count = 100;
+ malidp_hw_clearbits(hwdev, MALIDP_CFG_VALID, MALIDP500_CONFIG_VALID);
malidp_hw_clearbits(hwdev, MALIDP500_DC_CONFIG_REQ, MALIDP500_DC_CONTROL);
while (count) {
status = malidp_hw_read(hwdev, hwdev->map.dc_base + MALIDP_REG_STATUS);
@@ -266,6 +267,7 @@ static void malidp550_leave_config_mode(struct malidp_hw_device *hwdev)
{
u32 status, count = 100;
+ malidp_hw_clearbits(hwdev, MALIDP_CFG_VALID, MALIDP550_CONFIG_VALID);
malidp_hw_clearbits(hwdev, MALIDP550_DC_CONFIG_REQ, MALIDP550_DC_CONTROL);
while (count) {
status = malidp_hw_read(hwdev, hwdev->map.dc_base + MALIDP_REG_STATUS);
@@ -436,6 +438,7 @@ const struct malidp_hw_device malidp_device[MALIDP_MAX_DEVICES] = {
},
.input_formats = malidp500_de_formats,
.n_input_formats = ARRAY_SIZE(malidp500_de_formats),
+ .bus_align_bytes = 8,
},
.query_hw = malidp500_query_hw,
.enter_config_mode = malidp500_enter_config_mode,
@@ -468,6 +471,7 @@ const struct malidp_hw_device malidp_device[MALIDP_MAX_DEVICES] = {
},
.input_formats = malidp550_de_formats,
.n_input_formats = ARRAY_SIZE(malidp550_de_formats),
+ .bus_align_bytes = 8,
},
.query_hw = malidp550_query_hw,
.enter_config_mode = malidp550_enter_config_mode,
@@ -501,6 +505,7 @@ const struct malidp_hw_device malidp_device[MALIDP_MAX_DEVICES] = {
},
.input_formats = malidp550_de_formats,
.n_input_formats = ARRAY_SIZE(malidp550_de_formats),
+ .bus_align_bytes = 16,
},
.query_hw = malidp650_query_hw,
.enter_config_mode = malidp550_enter_config_mode,
diff --git a/drivers/gpu/drm/arm/malidp_hw.h b/drivers/gpu/drm/arm/malidp_hw.h
index 141743e9f3a6..087e1202db3d 100644
--- a/drivers/gpu/drm/arm/malidp_hw.h
+++ b/drivers/gpu/drm/arm/malidp_hw.h
@@ -88,6 +88,9 @@ struct malidp_hw_regmap {
/* list of supported input formats for each layer */
const struct malidp_input_format *input_formats;
const u8 n_input_formats;
+
+ /* pitch alignment requirement in bytes */
+ const u8 bus_align_bytes;
};
struct malidp_hw_device {
@@ -229,6 +232,12 @@ void malidp_se_irq_fini(struct drm_device *drm);
u8 malidp_hw_get_format_id(const struct malidp_hw_regmap *map,
u8 layer_id, u32 format);
+static inline bool malidp_hw_pitch_valid(struct malidp_hw_device *hwdev,
+ unsigned int pitch)
+{
+ return !(pitch & (hwdev->map.bus_align_bytes - 1));
+}
+
/*
* background color components are defined as 12bits values,
* they will be shifted right when stored on hardware that
diff --git a/drivers/gpu/drm/arm/malidp_planes.c b/drivers/gpu/drm/arm/malidp_planes.c
index abaca03b9d36..63eec8f37cfc 100644
--- a/drivers/gpu/drm/arm/malidp_planes.c
+++ b/drivers/gpu/drm/arm/malidp_planes.c
@@ -27,6 +27,10 @@
#define LAYER_H_FLIP (1 << 10)
#define LAYER_V_FLIP (1 << 11)
#define LAYER_ROT_MASK (0xf << 8)
+#define LAYER_COMP_MASK (0x3 << 12)
+#define LAYER_COMP_PIXEL (0x3 << 12)
+#define LAYER_COMP_PLANE (0x2 << 12)
+#define MALIDP_LAYER_COMPOSE 0x008
#define MALIDP_LAYER_SIZE 0x00c
#define LAYER_H_VAL(x) (((x) & 0x1fff) << 0)
#define LAYER_V_VAL(x) (((x) & 0x1fff) << 16)
@@ -34,6 +38,14 @@
#define MALIDP_LAYER_OFFSET 0x014
#define MALIDP_LAYER_STRIDE 0x018
+/*
+ * This 4-entry look-up-table is used to determine the full 8-bit alpha value
+ * for formats with 1- or 2-bit alpha channels.
+ * We set it to give 100%/0% opacity for 1-bit formats and 100%/66%/33%/0%
+ * opacity for 2-bit formats.
+ */
+#define MALIDP_ALPHA_LUT 0xffaa5500
+
static void malidp_de_plane_destroy(struct drm_plane *plane)
{
struct malidp_plane *mp = to_malidp_plane(plane);
@@ -46,7 +58,8 @@ static void malidp_de_plane_destroy(struct drm_plane *plane)
devm_kfree(plane->dev->dev, mp);
}
-struct drm_plane_state *malidp_duplicate_plane_state(struct drm_plane *plane)
+static struct
+drm_plane_state *malidp_duplicate_plane_state(struct drm_plane *plane)
{
struct malidp_plane_state *state, *m_state;
@@ -58,13 +71,15 @@ struct drm_plane_state *malidp_duplicate_plane_state(struct drm_plane *plane)
m_state = to_malidp_plane_state(plane->state);
__drm_atomic_helper_plane_duplicate_state(plane, &state->base);
state->rotmem_size = m_state->rotmem_size;
+ state->format = m_state->format;
+ state->n_planes = m_state->n_planes;
}
return &state->base;
}
-void malidp_destroy_plane_state(struct drm_plane *plane,
- struct drm_plane_state *state)
+static void malidp_destroy_plane_state(struct drm_plane *plane,
+ struct drm_plane_state *state)
{
struct malidp_plane_state *m_state = to_malidp_plane_state(state);
@@ -75,6 +90,7 @@ void malidp_destroy_plane_state(struct drm_plane *plane,
static const struct drm_plane_funcs malidp_de_plane_funcs = {
.update_plane = drm_atomic_helper_update_plane,
.disable_plane = drm_atomic_helper_disable_plane,
+ .set_property = drm_atomic_helper_plane_set_property,
.destroy = malidp_de_plane_destroy,
.reset = drm_atomic_helper_plane_reset,
.atomic_duplicate_state = malidp_duplicate_plane_state,
@@ -86,17 +102,29 @@ static int malidp_de_plane_check(struct drm_plane *plane,
{
struct malidp_plane *mp = to_malidp_plane(plane);
struct malidp_plane_state *ms = to_malidp_plane_state(state);
- u8 format_id;
+ struct drm_framebuffer *fb;
+ int i;
u32 src_w, src_h;
if (!state->crtc || !state->fb)
return 0;
- format_id = malidp_hw_get_format_id(&mp->hwdev->map, mp->layer->id,
- state->fb->pixel_format);
- if (format_id == MALIDP_INVALID_FORMAT_ID)
+ fb = state->fb;
+
+ ms->format = malidp_hw_get_format_id(&mp->hwdev->map, mp->layer->id,
+ fb->pixel_format);
+ if (ms->format == MALIDP_INVALID_FORMAT_ID)
return -EINVAL;
+ ms->n_planes = drm_format_num_planes(fb->pixel_format);
+ for (i = 0; i < ms->n_planes; i++) {
+ if (!malidp_hw_pitch_valid(mp->hwdev, fb->pitches[i])) {
+ DRM_DEBUG_KMS("Invalid pitch %u for plane %d\n",
+ fb->pitches[i], i);
+ return -EINVAL;
+ }
+ }
+
src_w = state->src_w >> 16;
src_h = state->src_h >> 16;
@@ -135,17 +163,13 @@ static void malidp_de_plane_update(struct drm_plane *plane,
struct drm_gem_cma_object *obj;
struct malidp_plane *mp;
const struct malidp_hw_regmap *map;
- u8 format_id;
+ struct malidp_plane_state *ms = to_malidp_plane_state(plane->state);
u16 ptr;
- u32 format, src_w, src_h, dest_w, dest_h, val = 0;
- int num_planes, i;
+ u32 src_w, src_h, dest_w, dest_h, val;
+ int i;
mp = to_malidp_plane(plane);
-
map = &mp->hwdev->map;
- format = plane->state->fb->pixel_format;
- format_id = malidp_hw_get_format_id(map, mp->layer->id, format);
- num_planes = drm_format_num_planes(format);
/* convert src values from Q16 fixed point to integer */
src_w = plane->state->src_w >> 16;
@@ -158,9 +182,9 @@ static void malidp_de_plane_update(struct drm_plane *plane,
dest_h = plane->state->crtc_h;
}
- malidp_hw_write(mp->hwdev, format_id, mp->layer->base);
+ malidp_hw_write(mp->hwdev, ms->format, mp->layer->base);
- for (i = 0; i < num_planes; i++) {
+ for (i = 0; i < ms->n_planes; i++) {
/* calculate the offset for the layer's plane registers */
ptr = mp->layer->ptr + (i << 4);
@@ -181,9 +205,9 @@ static void malidp_de_plane_update(struct drm_plane *plane,
LAYER_V_VAL(plane->state->crtc_y),
mp->layer->base + MALIDP_LAYER_OFFSET);
- /* first clear the rotation bits in the register */
- malidp_hw_clearbits(mp->hwdev, LAYER_ROT_MASK,
- mp->layer->base + MALIDP_LAYER_CONTROL);
+ /* first clear the rotation bits */
+ val = malidp_hw_read(mp->hwdev, mp->layer->base + MALIDP_LAYER_CONTROL);
+ val &= ~LAYER_ROT_MASK;
/* setup the rotation and axis flip bits */
if (plane->state->rotation & DRM_ROTATE_MASK)
@@ -193,11 +217,18 @@ static void malidp_de_plane_update(struct drm_plane *plane,
if (plane->state->rotation & DRM_REFLECT_Y)
val |= LAYER_H_FLIP;
+ /*
+ * always enable pixel alpha blending until we have a way to change
+ * blend modes
+ */
+ val &= ~LAYER_COMP_MASK;
+ val |= LAYER_COMP_PIXEL;
+
/* set the 'enable layer' bit */
val |= LAYER_ENABLE;
- malidp_hw_setbits(mp->hwdev, val,
- mp->layer->base + MALIDP_LAYER_CONTROL);
+ malidp_hw_write(mp->hwdev, val,
+ mp->layer->base + MALIDP_LAYER_CONTROL);
}
static void malidp_de_plane_disable(struct drm_plane *plane,
@@ -222,6 +253,8 @@ int malidp_de_planes_init(struct drm_device *drm)
struct malidp_plane *plane = NULL;
enum drm_plane_type plane_type;
unsigned long crtcs = 1 << drm->mode_config.num_crtc;
+ unsigned long flags = DRM_ROTATE_0 | DRM_ROTATE_90 | DRM_ROTATE_180 |
+ DRM_ROTATE_270 | DRM_REFLECT_X | DRM_REFLECT_Y;
u32 *formats;
int ret, i, j, n;
@@ -254,23 +287,18 @@ int malidp_de_planes_init(struct drm_device *drm)
if (ret < 0)
goto cleanup;
- /* SMART layer can't be rotated */
- if (id != DE_SMART) {
- unsigned long flags = DRM_ROTATE_0 |
- DRM_ROTATE_90 |
- DRM_ROTATE_180 |
- DRM_ROTATE_270 |
- DRM_REFLECT_X |
- DRM_REFLECT_Y;
- drm_plane_create_rotation_property(&plane->base,
- DRM_ROTATE_0,
- flags);
- }
-
drm_plane_helper_add(&plane->base,
&malidp_de_plane_helper_funcs);
plane->hwdev = malidp->dev;
plane->layer = &map->layers[i];
+
+ /* Skip the features which the SMART layer doesn't have */
+ if (id == DE_SMART)
+ continue;
+
+ drm_plane_create_rotation_property(&plane->base, DRM_ROTATE_0, flags);
+ malidp_hw_write(malidp->dev, MALIDP_ALPHA_LUT,
+ plane->layer->base + MALIDP_LAYER_COMPOSE);
}
kfree(formats);
diff --git a/drivers/gpu/drm/armada/armada_fbdev.c b/drivers/gpu/drm/armada/armada_fbdev.c
index ca73ad8614fe..c5dc06a55883 100644
--- a/drivers/gpu/drm/armada/armada_fbdev.c
+++ b/drivers/gpu/drm/armada/armada_fbdev.c
@@ -19,16 +19,10 @@
static /*const*/ struct fb_ops armada_fb_ops = {
.owner = THIS_MODULE,
- .fb_check_var = drm_fb_helper_check_var,
- .fb_set_par = drm_fb_helper_set_par,
+ DRM_FB_HELPER_DEFAULT_OPS,
.fb_fillrect = drm_fb_helper_cfb_fillrect,
.fb_copyarea = drm_fb_helper_cfb_copyarea,
.fb_imageblit = drm_fb_helper_cfb_imageblit,
- .fb_pan_display = drm_fb_helper_pan_display,
- .fb_blank = drm_fb_helper_blank,
- .fb_setcmap = drm_fb_helper_setcmap,
- .fb_debug_enter = drm_fb_helper_debug_enter,
- .fb_debug_leave = drm_fb_helper_debug_leave,
};
static int armada_fb_create(struct drm_fb_helper *fbh,
diff --git a/drivers/gpu/drm/ast/ast_drv.c b/drivers/gpu/drm/ast/ast_drv.c
index f54afd2113a9..fd7c9eec92e4 100644
--- a/drivers/gpu/drm/ast/ast_drv.c
+++ b/drivers/gpu/drm/ast/ast_drv.c
@@ -188,9 +188,7 @@ static const struct file_operations ast_fops = {
.unlocked_ioctl = drm_ioctl,
.mmap = ast_mmap,
.poll = drm_poll,
-#ifdef CONFIG_COMPAT
.compat_ioctl = drm_compat_ioctl,
-#endif
.read = drm_read,
};
diff --git a/drivers/gpu/drm/ast/ast_fb.c b/drivers/gpu/drm/ast/ast_fb.c
index 7a86e24e2687..d6f5ec64c667 100644
--- a/drivers/gpu/drm/ast/ast_fb.c
+++ b/drivers/gpu/drm/ast/ast_fb.c
@@ -253,7 +253,7 @@ static int astfb_create(struct drm_fb_helper *helper,
err_release_fbi:
drm_fb_helper_release_fbi(helper);
err_free_vram:
- vfree(afbdev->sysram);
+ vfree(sysram);
return ret;
}
diff --git a/drivers/gpu/drm/ast/ast_ttm.c b/drivers/gpu/drm/ast/ast_ttm.c
index 7134fdf49210..2a1368fac1d1 100644
--- a/drivers/gpu/drm/ast/ast_ttm.c
+++ b/drivers/gpu/drm/ast/ast_ttm.c
@@ -268,6 +268,8 @@ int ast_mm_init(struct ast_private *ast)
return ret;
}
+ arch_io_reserve_memtype_wc(pci_resource_start(dev->pdev, 0),
+ pci_resource_len(dev->pdev, 0));
ast->fb_mtrr = arch_phys_wc_add(pci_resource_start(dev->pdev, 0),
pci_resource_len(dev->pdev, 0));
@@ -276,11 +278,15 @@ int ast_mm_init(struct ast_private *ast)
void ast_mm_fini(struct ast_private *ast)
{
+ struct drm_device *dev = ast->dev;
+
ttm_bo_device_release(&ast->ttm.bdev);
ast_ttm_global_release(ast);
arch_phys_wc_del(ast->fb_mtrr);
+ arch_io_free_memtype_wc(pci_resource_start(dev->pdev, 0),
+ pci_resource_len(dev->pdev, 0));
}
void ast_ttm_placement(struct ast_bo *bo, int domain)
diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c
index 9f6222895212..cbd0070265c9 100644
--- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c
+++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c
@@ -749,9 +749,7 @@ static const struct file_operations fops = {
.open = drm_open,
.release = drm_release,
.unlocked_ioctl = drm_ioctl,
-#ifdef CONFIG_COMPAT
.compat_ioctl = drm_compat_ioctl,
-#endif
.poll = drm_poll,
.read = drm_read,
.llseek = no_llseek,
diff --git a/drivers/gpu/drm/bochs/bochs_drv.c b/drivers/gpu/drm/bochs/bochs_drv.c
index 534227df23f3..15a293e65b31 100644
--- a/drivers/gpu/drm/bochs/bochs_drv.c
+++ b/drivers/gpu/drm/bochs/bochs_drv.c
@@ -70,9 +70,7 @@ static const struct file_operations bochs_fops = {
.open = drm_open,
.release = drm_release,
.unlocked_ioctl = drm_ioctl,
-#ifdef CONFIG_COMPAT
.compat_ioctl = drm_compat_ioctl,
-#endif
.poll = drm_poll,
.read = drm_read,
.llseek = no_llseek,
diff --git a/drivers/gpu/drm/bochs/bochs_fbdev.c b/drivers/gpu/drm/bochs/bochs_fbdev.c
index e1ec498a6b6e..da790a1c302a 100644
--- a/drivers/gpu/drm/bochs/bochs_fbdev.c
+++ b/drivers/gpu/drm/bochs/bochs_fbdev.c
@@ -22,14 +22,10 @@ static int bochsfb_mmap(struct fb_info *info,
static struct fb_ops bochsfb_ops = {
.owner = THIS_MODULE,
- .fb_check_var = drm_fb_helper_check_var,
- .fb_set_par = drm_fb_helper_set_par,
+ DRM_FB_HELPER_DEFAULT_OPS,
.fb_fillrect = drm_fb_helper_sys_fillrect,
.fb_copyarea = drm_fb_helper_sys_copyarea,
.fb_imageblit = drm_fb_helper_sys_imageblit,
- .fb_pan_display = drm_fb_helper_pan_display,
- .fb_blank = drm_fb_helper_blank,
- .fb_setcmap = drm_fb_helper_setcmap,
.fb_mmap = bochsfb_mmap,
};
diff --git a/drivers/gpu/drm/bridge/analogix/analogix_dp_reg.c b/drivers/gpu/drm/bridge/analogix/analogix_dp_reg.c
index cd37ac058675..303083ad28e3 100644
--- a/drivers/gpu/drm/bridge/analogix/analogix_dp_reg.c
+++ b/drivers/gpu/drm/bridge/analogix/analogix_dp_reg.c
@@ -1162,5 +1162,5 @@ ssize_t analogix_dp_transfer(struct analogix_dp_device *dp,
(msg->request & ~DP_AUX_I2C_MOT) == DP_AUX_NATIVE_READ)
msg->reply = DP_AUX_NATIVE_REPLY_ACK;
- return num_transferred;
+ return num_transferred > 0 ? num_transferred : -EBUSY;
}
diff --git a/drivers/gpu/drm/bridge/dw-hdmi.c b/drivers/gpu/drm/bridge/dw-hdmi.c
index ab7023e5dfde..b71088dab268 100644
--- a/drivers/gpu/drm/bridge/dw-hdmi.c
+++ b/drivers/gpu/drm/bridge/dw-hdmi.c
@@ -1,14 +1,15 @@
/*
+ * DesignWare High-Definition Multimedia Interface (HDMI) driver
+ *
+ * Copyright (C) 2013-2015 Mentor Graphics Inc.
* Copyright (C) 2011-2013 Freescale Semiconductor, Inc.
+ * Copyright (C) 2010, Guennadi Liakhovetski <g.liakhovetski@gmx.de>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
- * Designware High-Definition Multimedia Interface (HDMI) driver
- *
- * Copyright (C) 2010, Guennadi Liakhovetski <g.liakhovetski@gmx.de>
*/
#include <linux/module.h>
#include <linux/irq.h>
@@ -101,6 +102,17 @@ struct hdmi_data_info {
struct hdmi_vmode video_mode;
};
+struct dw_hdmi_i2c {
+ struct i2c_adapter adap;
+
+ struct mutex lock; /* used to serialize data transfers */
+ struct completion cmp;
+ u8 stat;
+
+ u8 slave_reg;
+ bool is_regaddr;
+};
+
struct dw_hdmi {
struct drm_connector connector;
struct drm_encoder *encoder;
@@ -111,6 +123,7 @@ struct dw_hdmi {
struct device *dev;
struct clk *isfr_clk;
struct clk *iahb_clk;
+ struct dw_hdmi_i2c *i2c;
struct hdmi_data_info hdmi_data;
const struct dw_hdmi_plat_data *plat_data;
@@ -198,6 +211,201 @@ static void hdmi_mask_writeb(struct dw_hdmi *hdmi, u8 data, unsigned int reg,
hdmi_modb(hdmi, data << shift, mask, reg);
}
+static void dw_hdmi_i2c_init(struct dw_hdmi *hdmi)
+{
+ /* Software reset */
+ hdmi_writeb(hdmi, 0x00, HDMI_I2CM_SOFTRSTZ);
+
+ /* Set Standard Mode speed (determined to be 100KHz on iMX6) */
+ hdmi_writeb(hdmi, 0x00, HDMI_I2CM_DIV);
+
+ /* Set done, not acknowledged and arbitration interrupt polarities */
+ hdmi_writeb(hdmi, HDMI_I2CM_INT_DONE_POL, HDMI_I2CM_INT);
+ hdmi_writeb(hdmi, HDMI_I2CM_CTLINT_NAC_POL | HDMI_I2CM_CTLINT_ARB_POL,
+ HDMI_I2CM_CTLINT);
+
+ /* Clear DONE and ERROR interrupts */
+ hdmi_writeb(hdmi, HDMI_IH_I2CM_STAT0_ERROR | HDMI_IH_I2CM_STAT0_DONE,
+ HDMI_IH_I2CM_STAT0);
+
+ /* Mute DONE and ERROR interrupts */
+ hdmi_writeb(hdmi, HDMI_IH_I2CM_STAT0_ERROR | HDMI_IH_I2CM_STAT0_DONE,
+ HDMI_IH_MUTE_I2CM_STAT0);
+}
+
+static int dw_hdmi_i2c_read(struct dw_hdmi *hdmi,
+ unsigned char *buf, unsigned int length)
+{
+ struct dw_hdmi_i2c *i2c = hdmi->i2c;
+ int stat;
+
+ if (!i2c->is_regaddr) {
+ dev_dbg(hdmi->dev, "set read register address to 0\n");
+ i2c->slave_reg = 0x00;
+ i2c->is_regaddr = true;
+ }
+
+ while (length--) {
+ reinit_completion(&i2c->cmp);
+
+ hdmi_writeb(hdmi, i2c->slave_reg++, HDMI_I2CM_ADDRESS);
+ hdmi_writeb(hdmi, HDMI_I2CM_OPERATION_READ,
+ HDMI_I2CM_OPERATION);
+
+ stat = wait_for_completion_timeout(&i2c->cmp, HZ / 10);
+ if (!stat)
+ return -EAGAIN;
+
+ /* Check for error condition on the bus */
+ if (i2c->stat & HDMI_IH_I2CM_STAT0_ERROR)
+ return -EIO;
+
+ *buf++ = hdmi_readb(hdmi, HDMI_I2CM_DATAI);
+ }
+
+ return 0;
+}
+
+static int dw_hdmi_i2c_write(struct dw_hdmi *hdmi,
+ unsigned char *buf, unsigned int length)
+{
+ struct dw_hdmi_i2c *i2c = hdmi->i2c;
+ int stat;
+
+ if (!i2c->is_regaddr) {
+ /* Use the first write byte as register address */
+ i2c->slave_reg = buf[0];
+ length--;
+ buf++;
+ i2c->is_regaddr = true;
+ }
+
+ while (length--) {
+ reinit_completion(&i2c->cmp);
+
+ hdmi_writeb(hdmi, *buf++, HDMI_I2CM_DATAO);
+ hdmi_writeb(hdmi, i2c->slave_reg++, HDMI_I2CM_ADDRESS);
+ hdmi_writeb(hdmi, HDMI_I2CM_OPERATION_WRITE,
+ HDMI_I2CM_OPERATION);
+
+ stat = wait_for_completion_timeout(&i2c->cmp, HZ / 10);
+ if (!stat)
+ return -EAGAIN;
+
+ /* Check for error condition on the bus */
+ if (i2c->stat & HDMI_IH_I2CM_STAT0_ERROR)
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int dw_hdmi_i2c_xfer(struct i2c_adapter *adap,
+ struct i2c_msg *msgs, int num)
+{
+ struct dw_hdmi *hdmi = i2c_get_adapdata(adap);
+ struct dw_hdmi_i2c *i2c = hdmi->i2c;
+ u8 addr = msgs[0].addr;
+ int i, ret = 0;
+
+ dev_dbg(hdmi->dev, "xfer: num: %d, addr: %#x\n", num, addr);
+
+ for (i = 0; i < num; i++) {
+ if (msgs[i].addr != addr) {
+ dev_warn(hdmi->dev,
+ "unsupported transfer, changed slave address\n");
+ return -EOPNOTSUPP;
+ }
+
+ if (msgs[i].len == 0) {
+ dev_dbg(hdmi->dev,
+ "unsupported transfer %d/%d, no data\n",
+ i + 1, num);
+ return -EOPNOTSUPP;
+ }
+ }
+
+ mutex_lock(&i2c->lock);
+
+ /* Unmute DONE and ERROR interrupts */
+ hdmi_writeb(hdmi, 0x00, HDMI_IH_MUTE_I2CM_STAT0);
+
+ /* Set slave device address taken from the first I2C message */
+ hdmi_writeb(hdmi, addr, HDMI_I2CM_SLAVE);
+
+ /* Set slave device register address on transfer */
+ i2c->is_regaddr = false;
+
+ for (i = 0; i < num; i++) {
+ dev_dbg(hdmi->dev, "xfer: num: %d/%d, len: %d, flags: %#x\n",
+ i + 1, num, msgs[i].len, msgs[i].flags);
+
+ if (msgs[i].flags & I2C_M_RD)
+ ret = dw_hdmi_i2c_read(hdmi, msgs[i].buf, msgs[i].len);
+ else
+ ret = dw_hdmi_i2c_write(hdmi, msgs[i].buf, msgs[i].len);
+
+ if (ret < 0)
+ break;
+ }
+
+ if (!ret)
+ ret = num;
+
+ /* Mute DONE and ERROR interrupts */
+ hdmi_writeb(hdmi, HDMI_IH_I2CM_STAT0_ERROR | HDMI_IH_I2CM_STAT0_DONE,
+ HDMI_IH_MUTE_I2CM_STAT0);
+
+ mutex_unlock(&i2c->lock);
+
+ return ret;
+}
+
+static u32 dw_hdmi_i2c_func(struct i2c_adapter *adapter)
+{
+ return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
+}
+
+static const struct i2c_algorithm dw_hdmi_algorithm = {
+ .master_xfer = dw_hdmi_i2c_xfer,
+ .functionality = dw_hdmi_i2c_func,
+};
+
+static struct i2c_adapter *dw_hdmi_i2c_adapter(struct dw_hdmi *hdmi)
+{
+ struct i2c_adapter *adap;
+ struct dw_hdmi_i2c *i2c;
+ int ret;
+
+ i2c = devm_kzalloc(hdmi->dev, sizeof(*i2c), GFP_KERNEL);
+ if (!i2c)
+ return ERR_PTR(-ENOMEM);
+
+ mutex_init(&i2c->lock);
+ init_completion(&i2c->cmp);
+
+ adap = &i2c->adap;
+ adap->class = I2C_CLASS_DDC;
+ adap->owner = THIS_MODULE;
+ adap->dev.parent = hdmi->dev;
+ adap->algo = &dw_hdmi_algorithm;
+ strlcpy(adap->name, "DesignWare HDMI", sizeof(adap->name));
+ i2c_set_adapdata(adap, hdmi);
+
+ ret = i2c_add_adapter(adap);
+ if (ret) {
+ dev_warn(hdmi->dev, "cannot add %s I2C adapter\n", adap->name);
+ devm_kfree(hdmi->dev, i2c);
+ return ERR_PTR(ret);
+ }
+
+ hdmi->i2c = i2c;
+
+ dev_info(hdmi->dev, "registered %s I2C bus driver\n", adap->name);
+
+ return adap;
+}
+
static void hdmi_set_cts_n(struct dw_hdmi *hdmi, unsigned int cts,
unsigned int n)
{
@@ -1512,16 +1720,40 @@ static const struct drm_bridge_funcs dw_hdmi_bridge_funcs = {
.mode_set = dw_hdmi_bridge_mode_set,
};
+static irqreturn_t dw_hdmi_i2c_irq(struct dw_hdmi *hdmi)
+{
+ struct dw_hdmi_i2c *i2c = hdmi->i2c;
+ unsigned int stat;
+
+ stat = hdmi_readb(hdmi, HDMI_IH_I2CM_STAT0);
+ if (!stat)
+ return IRQ_NONE;
+
+ hdmi_writeb(hdmi, stat, HDMI_IH_I2CM_STAT0);
+
+ i2c->stat = stat;
+
+ complete(&i2c->cmp);
+
+ return IRQ_HANDLED;
+}
+
static irqreturn_t dw_hdmi_hardirq(int irq, void *dev_id)
{
struct dw_hdmi *hdmi = dev_id;
u8 intr_stat;
+ irqreturn_t ret = IRQ_NONE;
+
+ if (hdmi->i2c)
+ ret = dw_hdmi_i2c_irq(hdmi);
intr_stat = hdmi_readb(hdmi, HDMI_IH_PHY_STAT0);
- if (intr_stat)
+ if (intr_stat) {
hdmi_writeb(hdmi, ~0, HDMI_IH_MUTE_PHY_STAT0);
+ return IRQ_WAKE_THREAD;
+ }
- return intr_stat ? IRQ_WAKE_THREAD : IRQ_NONE;
+ return ret;
}
static irqreturn_t dw_hdmi_irq(int irq, void *dev_id)
@@ -1681,7 +1913,7 @@ int dw_hdmi_bind(struct device *dev, struct device *master,
ddc_node = of_parse_phandle(np, "ddc-i2c-bus", 0);
if (ddc_node) {
- hdmi->ddc = of_find_i2c_adapter_by_node(ddc_node);
+ hdmi->ddc = of_get_i2c_adapter_by_node(ddc_node);
of_node_put(ddc_node);
if (!hdmi->ddc) {
dev_dbg(hdmi->dev, "failed to read ddc node\n");
@@ -1693,20 +1925,22 @@ int dw_hdmi_bind(struct device *dev, struct device *master,
}
hdmi->regs = devm_ioremap_resource(dev, iores);
- if (IS_ERR(hdmi->regs))
- return PTR_ERR(hdmi->regs);
+ if (IS_ERR(hdmi->regs)) {
+ ret = PTR_ERR(hdmi->regs);
+ goto err_res;
+ }
hdmi->isfr_clk = devm_clk_get(hdmi->dev, "isfr");
if (IS_ERR(hdmi->isfr_clk)) {
ret = PTR_ERR(hdmi->isfr_clk);
dev_err(hdmi->dev, "Unable to get HDMI isfr clk: %d\n", ret);
- return ret;
+ goto err_res;
}
ret = clk_prepare_enable(hdmi->isfr_clk);
if (ret) {
dev_err(hdmi->dev, "Cannot enable HDMI isfr clock: %d\n", ret);
- return ret;
+ goto err_res;
}
hdmi->iahb_clk = devm_clk_get(hdmi->dev, "iahb");
@@ -1744,6 +1978,13 @@ int dw_hdmi_bind(struct device *dev, struct device *master,
*/
hdmi_init_clk_regenerator(hdmi);
+ /* If DDC bus is not specified, try to register HDMI I2C bus */
+ if (!hdmi->ddc) {
+ hdmi->ddc = dw_hdmi_i2c_adapter(hdmi);
+ if (IS_ERR(hdmi->ddc))
+ hdmi->ddc = NULL;
+ }
+
/*
* Configure registers related to HDMI interrupt
* generation before registering IRQ.
@@ -1784,14 +2025,25 @@ int dw_hdmi_bind(struct device *dev, struct device *master,
hdmi->audio = platform_device_register_full(&pdevinfo);
}
+ /* Reset HDMI DDC I2C master controller and mute I2CM interrupts */
+ if (hdmi->i2c)
+ dw_hdmi_i2c_init(hdmi);
+
dev_set_drvdata(dev, hdmi);
return 0;
err_iahb:
+ if (hdmi->i2c) {
+ i2c_del_adapter(&hdmi->i2c->adap);
+ hdmi->ddc = NULL;
+ }
+
clk_disable_unprepare(hdmi->iahb_clk);
err_isfr:
clk_disable_unprepare(hdmi->isfr_clk);
+err_res:
+ i2c_put_adapter(hdmi->ddc);
return ret;
}
@@ -1809,13 +2061,18 @@ void dw_hdmi_unbind(struct device *dev, struct device *master, void *data)
clk_disable_unprepare(hdmi->iahb_clk);
clk_disable_unprepare(hdmi->isfr_clk);
- i2c_put_adapter(hdmi->ddc);
+
+ if (hdmi->i2c)
+ i2c_del_adapter(&hdmi->i2c->adap);
+ else
+ i2c_put_adapter(hdmi->ddc);
}
EXPORT_SYMBOL_GPL(dw_hdmi_unbind);
MODULE_AUTHOR("Sascha Hauer <s.hauer@pengutronix.de>");
MODULE_AUTHOR("Andy Yan <andy.yan@rock-chips.com>");
MODULE_AUTHOR("Yakir Yang <ykk@rock-chips.com>");
+MODULE_AUTHOR("Vladimir Zapolskiy <vladimir_zapolskiy@mentor.com>");
MODULE_DESCRIPTION("DW HDMI transmitter driver");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:dw-hdmi");
diff --git a/drivers/gpu/drm/bridge/dw-hdmi.h b/drivers/gpu/drm/bridge/dw-hdmi.h
index fc9a560429d6..6aadc840e888 100644
--- a/drivers/gpu/drm/bridge/dw-hdmi.h
+++ b/drivers/gpu/drm/bridge/dw-hdmi.h
@@ -566,6 +566,10 @@ enum {
HDMI_IH_PHY_STAT0_TX_PHY_LOCK = 0x2,
HDMI_IH_PHY_STAT0_HPD = 0x1,
+/* IH_I2CM_STAT0 and IH_MUTE_I2CM_STAT0 field values */
+ HDMI_IH_I2CM_STAT0_DONE = 0x2,
+ HDMI_IH_I2CM_STAT0_ERROR = 0x1,
+
/* IH_MUTE_I2CMPHY_STAT0 field values */
HDMI_IH_MUTE_I2CMPHY_STAT0_I2CMPHYDONE = 0x2,
HDMI_IH_MUTE_I2CMPHY_STAT0_I2CMPHYERROR = 0x1,
@@ -1032,6 +1036,21 @@ enum {
HDMI_A_VIDPOLCFG_HSYNCPOL_MASK = 0x2,
HDMI_A_VIDPOLCFG_HSYNCPOL_ACTIVE_HIGH = 0x2,
HDMI_A_VIDPOLCFG_HSYNCPOL_ACTIVE_LOW = 0x0,
+
+/* I2CM_OPERATION field values */
+ HDMI_I2CM_OPERATION_WRITE = 0x10,
+ HDMI_I2CM_OPERATION_READ_EXT = 0x2,
+ HDMI_I2CM_OPERATION_READ = 0x1,
+
+/* I2CM_INT field values */
+ HDMI_I2CM_INT_DONE_POL = 0x8,
+ HDMI_I2CM_INT_DONE_MASK = 0x4,
+
+/* I2CM_CTLINT field values */
+ HDMI_I2CM_CTLINT_NAC_POL = 0x80,
+ HDMI_I2CM_CTLINT_NAC_MASK = 0x40,
+ HDMI_I2CM_CTLINT_ARB_POL = 0x8,
+ HDMI_I2CM_CTLINT_ARB_MASK = 0x4,
};
#endif /* __DW_HDMI_H__ */
diff --git a/drivers/gpu/drm/cirrus/cirrus_drv.c b/drivers/gpu/drm/cirrus/cirrus_drv.c
index 6c76d125995b..d893ea21a359 100644
--- a/drivers/gpu/drm/cirrus/cirrus_drv.c
+++ b/drivers/gpu/drm/cirrus/cirrus_drv.c
@@ -126,9 +126,7 @@ static const struct file_operations cirrus_driver_fops = {
.unlocked_ioctl = drm_ioctl,
.mmap = cirrus_mmap,
.poll = drm_poll,
-#ifdef CONFIG_COMPAT
.compat_ioctl = drm_compat_ioctl,
-#endif
};
static struct drm_driver driver = {
.driver_features = DRIVER_MODESET | DRIVER_GEM,
diff --git a/drivers/gpu/drm/cirrus/cirrus_ttm.c b/drivers/gpu/drm/cirrus/cirrus_ttm.c
index de52b20800e1..d6da848f7c6f 100644
--- a/drivers/gpu/drm/cirrus/cirrus_ttm.c
+++ b/drivers/gpu/drm/cirrus/cirrus_ttm.c
@@ -268,6 +268,9 @@ int cirrus_mm_init(struct cirrus_device *cirrus)
return ret;
}
+ arch_io_reserve_memtype_wc(pci_resource_start(dev->pdev, 0),
+ pci_resource_len(dev->pdev, 0));
+
cirrus->fb_mtrr = arch_phys_wc_add(pci_resource_start(dev->pdev, 0),
pci_resource_len(dev->pdev, 0));
@@ -277,6 +280,8 @@ int cirrus_mm_init(struct cirrus_device *cirrus)
void cirrus_mm_fini(struct cirrus_device *cirrus)
{
+ struct drm_device *dev = cirrus->dev;
+
if (!cirrus->mm_inited)
return;
@@ -286,6 +291,8 @@ void cirrus_mm_fini(struct cirrus_device *cirrus)
arch_phys_wc_del(cirrus->fb_mtrr);
cirrus->fb_mtrr = 0;
+ arch_io_free_memtype_wc(pci_resource_start(dev->pdev, 0),
+ pci_resource_len(dev->pdev, 0));
}
void cirrus_ttm_placement(struct cirrus_bo *bo, int domain)
diff --git a/drivers/gpu/drm/drm_atomic.c b/drivers/gpu/drm/drm_atomic.c
index c32fb3c1d6f0..b476ec585547 100644
--- a/drivers/gpu/drm/drm_atomic.c
+++ b/drivers/gpu/drm/drm_atomic.c
@@ -30,6 +30,8 @@
#include <drm/drm_atomic.h>
#include <drm/drm_mode.h>
#include <drm/drm_plane_helper.h>
+#include <drm/drm_print.h>
+#include <linux/sync_file.h>
#include "drm_crtc_internal.h"
@@ -288,6 +290,23 @@ drm_atomic_get_crtc_state(struct drm_atomic_state *state,
}
EXPORT_SYMBOL(drm_atomic_get_crtc_state);
+static void set_out_fence_for_crtc(struct drm_atomic_state *state,
+ struct drm_crtc *crtc, s64 __user *fence_ptr)
+{
+ state->crtcs[drm_crtc_index(crtc)].out_fence_ptr = fence_ptr;
+}
+
+static s64 __user *get_out_fence_for_crtc(struct drm_atomic_state *state,
+ struct drm_crtc *crtc)
+{
+ s64 __user *fence_ptr;
+
+ fence_ptr = state->crtcs[drm_crtc_index(crtc)].out_fence_ptr;
+ state->crtcs[drm_crtc_index(crtc)].out_fence_ptr = NULL;
+
+ return fence_ptr;
+}
+
/**
* drm_atomic_set_mode_for_crtc - set mode for CRTC
* @state: the CRTC whose incoming state to update
@@ -416,18 +435,21 @@ drm_atomic_replace_property_blob_from_id(struct drm_crtc *crtc,
ssize_t expected_size,
bool *replaced)
{
- struct drm_device *dev = crtc->dev;
struct drm_property_blob *new_blob = NULL;
if (blob_id != 0) {
- new_blob = drm_property_lookup_blob(dev, blob_id);
+ new_blob = drm_property_lookup_blob(crtc->dev, blob_id);
if (new_blob == NULL)
return -EINVAL;
- if (expected_size > 0 && expected_size != new_blob->length)
+
+ if (expected_size > 0 && expected_size != new_blob->length) {
+ drm_property_unreference_blob(new_blob);
return -EINVAL;
+ }
}
drm_atomic_replace_property_blob(blob, new_blob, replaced);
+ drm_property_unreference_blob(new_blob);
return 0;
}
@@ -489,6 +511,16 @@ int drm_atomic_crtc_set_property(struct drm_crtc *crtc,
&replaced);
state->color_mgmt_changed |= replaced;
return ret;
+ } else if (property == config->prop_out_fence_ptr) {
+ s64 __user *fence_ptr = u64_to_user_ptr(val);
+
+ if (!fence_ptr)
+ return 0;
+
+ if (put_user(-1, fence_ptr))
+ return -EFAULT;
+
+ set_out_fence_for_crtc(state->state, crtc, fence_ptr);
} else if (crtc->funcs->atomic_set_property)
return crtc->funcs->atomic_set_property(crtc, state, property, val);
else
@@ -531,6 +563,8 @@ drm_atomic_crtc_get_property(struct drm_crtc *crtc,
*val = (state->ctm) ? state->ctm->base.id : 0;
else if (property == config->gamma_lut_property)
*val = (state->gamma_lut) ? state->gamma_lut->base.id : 0;
+ else if (property == config->prop_out_fence_ptr)
+ *val = 0;
else if (crtc->funcs->atomic_get_property)
return crtc->funcs->atomic_get_property(crtc, state, property, val);
else
@@ -602,6 +636,28 @@ static int drm_atomic_crtc_check(struct drm_crtc *crtc,
return 0;
}
+static void drm_atomic_crtc_print_state(struct drm_printer *p,
+ const struct drm_crtc_state *state)
+{
+ struct drm_crtc *crtc = state->crtc;
+
+ drm_printf(p, "crtc[%u]: %s\n", crtc->base.id, crtc->name);
+ drm_printf(p, "\tenable=%d\n", state->enable);
+ drm_printf(p, "\tactive=%d\n", state->active);
+ drm_printf(p, "\tplanes_changed=%d\n", state->planes_changed);
+ drm_printf(p, "\tmode_changed=%d\n", state->mode_changed);
+ drm_printf(p, "\tactive_changed=%d\n", state->active_changed);
+ drm_printf(p, "\tconnectors_changed=%d\n", state->connectors_changed);
+ drm_printf(p, "\tcolor_mgmt_changed=%d\n", state->color_mgmt_changed);
+ drm_printf(p, "\tplane_mask=%x\n", state->plane_mask);
+ drm_printf(p, "\tconnector_mask=%x\n", state->connector_mask);
+ drm_printf(p, "\tencoder_mask=%x\n", state->encoder_mask);
+ drm_printf(p, "\tmode: " DRM_MODE_FMT "\n", DRM_MODE_ARG(&state->mode));
+
+ if (crtc->funcs->atomic_print_state)
+ crtc->funcs->atomic_print_state(p, state);
+}
+
/**
* drm_atomic_get_plane_state - get plane state
* @state: global atomic state object
@@ -686,6 +742,17 @@ int drm_atomic_plane_set_property(struct drm_plane *plane,
drm_atomic_set_fb_for_plane(state, fb);
if (fb)
drm_framebuffer_unreference(fb);
+ } else if (property == config->prop_in_fence_fd) {
+ if (state->fence)
+ return -EINVAL;
+
+ if (U642I64(val) == -1)
+ return 0;
+
+ state->fence = sync_file_get_fence(val);
+ if (!state->fence)
+ return -EINVAL;
+
} else if (property == config->prop_crtc_id) {
struct drm_crtc *crtc = drm_crtc_find(dev, val);
return drm_atomic_set_crtc_for_plane(state, crtc);
@@ -747,6 +814,8 @@ drm_atomic_plane_get_property(struct drm_plane *plane,
if (property == config->prop_fb_id) {
*val = (state->fb) ? state->fb->base.id : 0;
+ } else if (property == config->prop_in_fence_fd) {
+ *val = -1;
} else if (property == config->prop_crtc_id) {
*val = (state->crtc) ? state->crtc->base.id : 0;
} else if (property == config->prop_crtc_x) {
@@ -835,9 +904,10 @@ static int drm_atomic_plane_check(struct drm_plane *plane,
/* Check whether this plane supports the fb pixel format. */
ret = drm_plane_check_pixel_format(plane, state->fb->pixel_format);
if (ret) {
- char *format_name = drm_get_format_name(state->fb->pixel_format);
- DRM_DEBUG_ATOMIC("Invalid pixel format %s\n", format_name);
- kfree(format_name);
+ struct drm_format_name_buf format_name;
+ DRM_DEBUG_ATOMIC("Invalid pixel format %s\n",
+ drm_get_format_name(state->fb->pixel_format,
+ &format_name));
return ret;
}
@@ -878,6 +948,39 @@ static int drm_atomic_plane_check(struct drm_plane *plane,
return 0;
}
+static void drm_atomic_plane_print_state(struct drm_printer *p,
+ const struct drm_plane_state *state)
+{
+ struct drm_plane *plane = state->plane;
+ struct drm_rect src = drm_plane_state_src(state);
+ struct drm_rect dest = drm_plane_state_dest(state);
+
+ drm_printf(p, "plane[%u]: %s\n", plane->base.id, plane->name);
+ drm_printf(p, "\tcrtc=%s\n", state->crtc ? state->crtc->name : "(null)");
+ drm_printf(p, "\tfb=%u\n", state->fb ? state->fb->base.id : 0);
+ if (state->fb) {
+ struct drm_framebuffer *fb = state->fb;
+ int i, n = drm_format_num_planes(fb->pixel_format);
+ struct drm_format_name_buf format_name;
+
+ drm_printf(p, "\t\tformat=%s\n",
+ drm_get_format_name(fb->pixel_format, &format_name));
+ drm_printf(p, "\t\tsize=%dx%d\n", fb->width, fb->height);
+ drm_printf(p, "\t\tlayers:\n");
+ for (i = 0; i < n; i++) {
+ drm_printf(p, "\t\t\tpitch[%d]=%u\n", i, fb->pitches[i]);
+ drm_printf(p, "\t\t\toffset[%d]=%u\n", i, fb->offsets[i]);
+ drm_printf(p, "\t\t\tmodifier[%d]=0x%llx\n", i, fb->modifier[i]);
+ }
+ }
+ drm_printf(p, "\tcrtc-pos=" DRM_RECT_FMT "\n", DRM_RECT_ARG(&dest));
+ drm_printf(p, "\tsrc-pos=" DRM_RECT_FP_FMT "\n", DRM_RECT_FP_ARG(&src));
+ drm_printf(p, "\trotation=%x\n", state->rotation);
+
+ if (plane->funcs->atomic_print_state)
+ plane->funcs->atomic_print_state(p, state);
+}
+
/**
* drm_atomic_get_connector_state - get connector state
* @state: global atomic state object
@@ -993,6 +1096,18 @@ int drm_atomic_connector_set_property(struct drm_connector *connector,
}
EXPORT_SYMBOL(drm_atomic_connector_set_property);
+static void drm_atomic_connector_print_state(struct drm_printer *p,
+ const struct drm_connector_state *state)
+{
+ struct drm_connector *connector = state->connector;
+
+ drm_printf(p, "connector[%u]: %s\n", connector->base.id, connector->name);
+ drm_printf(p, "\tcrtc=%s\n", state->crtc ? state->crtc->name : "(null)");
+
+ if (connector->funcs->atomic_print_state)
+ connector->funcs->atomic_print_state(p, state);
+}
+
/**
* drm_atomic_connector_get_property - get property value from connector state
* @connector: the drm connector to set a property on
@@ -1147,6 +1262,36 @@ drm_atomic_set_fb_for_plane(struct drm_plane_state *plane_state,
EXPORT_SYMBOL(drm_atomic_set_fb_for_plane);
/**
+ * drm_atomic_set_fence_for_plane - set fence for plane
+ * @plane_state: atomic state object for the plane
+ * @fence: dma_fence to use for the plane
+ *
+ * Helper to setup the plane_state fence in case it is not set yet.
+ * By using this drivers doesn't need to worry if the user choose
+ * implicit or explicit fencing.
+ *
+ * This function will not set the fence to the state if it was set
+ * via explicit fencing interfaces on the atomic ioctl. It will
+ * all drope the reference to the fence as we not storing it
+ * anywhere.
+ *
+ * Otherwise, if plane_state->fence is not set this function we
+ * just set it with the received implict fence.
+ */
+void
+drm_atomic_set_fence_for_plane(struct drm_plane_state *plane_state,
+ struct dma_fence *fence)
+{
+ if (plane_state->fence) {
+ dma_fence_put(fence);
+ return;
+ }
+
+ plane_state->fence = fence;
+}
+EXPORT_SYMBOL(drm_atomic_set_fence_for_plane);
+
+/**
* drm_atomic_set_crtc_for_connector - set crtc for connector
* @conn_state: atomic state object for the connector
* @crtc: crtc to use for the connector
@@ -1457,16 +1602,100 @@ int drm_atomic_nonblocking_commit(struct drm_atomic_state *state)
}
EXPORT_SYMBOL(drm_atomic_nonblocking_commit);
+static void drm_atomic_print_state(const struct drm_atomic_state *state)
+{
+ struct drm_printer p = drm_info_printer(state->dev->dev);
+ struct drm_plane *plane;
+ struct drm_plane_state *plane_state;
+ struct drm_crtc *crtc;
+ struct drm_crtc_state *crtc_state;
+ struct drm_connector *connector;
+ struct drm_connector_state *connector_state;
+ int i;
+
+ DRM_DEBUG_ATOMIC("checking %p\n", state);
+
+ for_each_plane_in_state(state, plane, plane_state, i)
+ drm_atomic_plane_print_state(&p, plane_state);
+
+ for_each_crtc_in_state(state, crtc, crtc_state, i)
+ drm_atomic_crtc_print_state(&p, crtc_state);
+
+ for_each_connector_in_state(state, connector, connector_state, i)
+ drm_atomic_connector_print_state(&p, connector_state);
+}
+
+/**
+ * drm_state_dump - dump entire device atomic state
+ * @dev: the drm device
+ * @p: where to print the state to
+ *
+ * Just for debugging. Drivers might want an option to dump state
+ * to dmesg in case of error irq's. (Hint, you probably want to
+ * ratelimit this!)
+ *
+ * The caller must drm_modeset_lock_all(), or if this is called
+ * from error irq handler, it should not be enabled by default.
+ * (Ie. if you are debugging errors you might not care that this
+ * is racey. But calling this without all modeset locks held is
+ * not inherently safe.)
+ */
+void drm_state_dump(struct drm_device *dev, struct drm_printer *p)
+{
+ struct drm_mode_config *config = &dev->mode_config;
+ struct drm_plane *plane;
+ struct drm_crtc *crtc;
+ struct drm_connector *connector;
+
+ if (!drm_core_check_feature(dev, DRIVER_ATOMIC))
+ return;
+
+ list_for_each_entry(plane, &config->plane_list, head)
+ drm_atomic_plane_print_state(p, plane->state);
+
+ list_for_each_entry(crtc, &config->crtc_list, head)
+ drm_atomic_crtc_print_state(p, crtc->state);
+
+ list_for_each_entry(connector, &config->connector_list, head)
+ drm_atomic_connector_print_state(p, connector->state);
+}
+EXPORT_SYMBOL(drm_state_dump);
+
+#ifdef CONFIG_DEBUG_FS
+static int drm_state_info(struct seq_file *m, void *data)
+{
+ struct drm_info_node *node = (struct drm_info_node *) m->private;
+ struct drm_device *dev = node->minor->dev;
+ struct drm_printer p = drm_seq_file_printer(m);
+
+ drm_modeset_lock_all(dev);
+ drm_state_dump(dev, &p);
+ drm_modeset_unlock_all(dev);
+
+ return 0;
+}
+
+/* any use in debugfs files to dump individual planes/crtc/etc? */
+static const struct drm_info_list drm_atomic_debugfs_list[] = {
+ {"state", drm_state_info, 0},
+};
+
+int drm_atomic_debugfs_init(struct drm_minor *minor)
+{
+ return drm_debugfs_create_files(drm_atomic_debugfs_list,
+ ARRAY_SIZE(drm_atomic_debugfs_list),
+ minor->debugfs_root, minor);
+}
+#endif
+
/*
* The big monstor ioctl
*/
static struct drm_pending_vblank_event *create_vblank_event(
- struct drm_device *dev, struct drm_file *file_priv,
- struct dma_fence *fence, uint64_t user_data)
+ struct drm_device *dev, uint64_t user_data)
{
struct drm_pending_vblank_event *e = NULL;
- int ret;
e = kzalloc(sizeof *e, GFP_KERNEL);
if (!e)
@@ -1476,17 +1705,6 @@ static struct drm_pending_vblank_event *create_vblank_event(
e->event.base.length = sizeof(e->event);
e->event.user_data = user_data;
- if (file_priv) {
- ret = drm_event_reserve_init(dev, file_priv, &e->base,
- &e->event.base);
- if (ret) {
- kfree(e);
- return NULL;
- }
- }
-
- e->base.fence = fence;
-
return e;
}
@@ -1591,6 +1809,165 @@ void drm_atomic_clean_old_fb(struct drm_device *dev,
}
EXPORT_SYMBOL(drm_atomic_clean_old_fb);
+static struct dma_fence *get_crtc_fence(struct drm_crtc *crtc)
+{
+ struct dma_fence *fence;
+
+ fence = kzalloc(sizeof(*fence), GFP_KERNEL);
+ if (!fence)
+ return NULL;
+
+ dma_fence_init(fence, &drm_crtc_fence_ops, &crtc->fence_lock,
+ crtc->fence_context, ++crtc->fence_seqno);
+
+ return fence;
+}
+
+struct drm_out_fence_state {
+ s64 __user *out_fence_ptr;
+ struct sync_file *sync_file;
+ int fd;
+};
+
+static int setup_out_fence(struct drm_out_fence_state *fence_state,
+ struct dma_fence *fence)
+{
+ fence_state->fd = get_unused_fd_flags(O_CLOEXEC);
+ if (fence_state->fd < 0)
+ return fence_state->fd;
+
+ if (put_user(fence_state->fd, fence_state->out_fence_ptr))
+ return -EFAULT;
+
+ fence_state->sync_file = sync_file_create(fence);
+ if (!fence_state->sync_file)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static int prepare_crtc_signaling(struct drm_device *dev,
+ struct drm_atomic_state *state,
+ struct drm_mode_atomic *arg,
+ struct drm_file *file_priv,
+ struct drm_out_fence_state **fence_state,
+ unsigned int *num_fences)
+{
+ struct drm_crtc *crtc;
+ struct drm_crtc_state *crtc_state;
+ int i, ret;
+
+ if (arg->flags & DRM_MODE_ATOMIC_TEST_ONLY)
+ return 0;
+
+ for_each_crtc_in_state(state, crtc, crtc_state, i) {
+ u64 __user *fence_ptr;
+
+ fence_ptr = get_out_fence_for_crtc(crtc_state->state, crtc);
+
+ if (arg->flags & DRM_MODE_PAGE_FLIP_EVENT || fence_ptr) {
+ struct drm_pending_vblank_event *e;
+
+ e = create_vblank_event(dev, arg->user_data);
+ if (!e)
+ return -ENOMEM;
+
+ crtc_state->event = e;
+ }
+
+ if (arg->flags & DRM_MODE_PAGE_FLIP_EVENT) {
+ struct drm_pending_vblank_event *e = crtc_state->event;
+
+ if (!file_priv)
+ continue;
+
+ ret = drm_event_reserve_init(dev, file_priv, &e->base,
+ &e->event.base);
+ if (ret) {
+ kfree(e);
+ crtc_state->event = NULL;
+ return ret;
+ }
+ }
+
+ if (fence_ptr) {
+ struct dma_fence *fence;
+ struct drm_out_fence_state *f;
+
+ f = krealloc(*fence_state, sizeof(**fence_state) *
+ (*num_fences + 1), GFP_KERNEL);
+ if (!f)
+ return -ENOMEM;
+
+ memset(&f[*num_fences], 0, sizeof(*f));
+
+ f[*num_fences].out_fence_ptr = fence_ptr;
+ *fence_state = f;
+
+ fence = get_crtc_fence(crtc);
+ if (!fence)
+ return -ENOMEM;
+
+ ret = setup_out_fence(&f[(*num_fences)++], fence);
+ if (ret) {
+ dma_fence_put(fence);
+ return ret;
+ }
+
+ crtc_state->event->base.fence = fence;
+ }
+ }
+
+ return 0;
+}
+
+static void complete_crtc_signaling(struct drm_device *dev,
+ struct drm_atomic_state *state,
+ struct drm_out_fence_state *fence_state,
+ unsigned int num_fences,
+ bool install_fds)
+{
+ struct drm_crtc *crtc;
+ struct drm_crtc_state *crtc_state;
+ int i;
+
+ if (install_fds) {
+ for (i = 0; i < num_fences; i++)
+ fd_install(fence_state[i].fd,
+ fence_state[i].sync_file->file);
+
+ kfree(fence_state);
+ return;
+ }
+
+ for_each_crtc_in_state(state, crtc, crtc_state, i) {
+ /*
+ * TEST_ONLY and PAGE_FLIP_EVENT are mutually
+ * exclusive, if they weren't, this code should be
+ * called on success for TEST_ONLY too.
+ */
+ if (crtc_state->event)
+ drm_event_cancel_free(dev, &crtc_state->event->base);
+ }
+
+ if (!fence_state)
+ return;
+
+ for (i = 0; i < num_fences; i++) {
+ if (fence_state[i].sync_file)
+ fput(fence_state[i].sync_file->file);
+ if (fence_state[i].fd >= 0)
+ put_unused_fd(fence_state[i].fd);
+
+ /* If this fails log error to the user */
+ if (fence_state[i].out_fence_ptr &&
+ put_user(-1, fence_state[i].out_fence_ptr))
+ DRM_DEBUG_ATOMIC("Couldn't clear out_fence_ptr\n");
+ }
+
+ kfree(fence_state);
+}
+
int drm_mode_atomic_ioctl(struct drm_device *dev,
void *data, struct drm_file *file_priv)
{
@@ -1603,11 +1980,10 @@ int drm_mode_atomic_ioctl(struct drm_device *dev,
struct drm_atomic_state *state;
struct drm_modeset_acquire_ctx ctx;
struct drm_plane *plane;
- struct drm_crtc *crtc;
- struct drm_crtc_state *crtc_state;
+ struct drm_out_fence_state *fence_state = NULL;
unsigned plane_mask;
int ret = 0;
- unsigned int i, j;
+ unsigned int i, j, num_fences = 0;
/* disallow for drivers not supporting atomic: */
if (!drm_core_check_feature(dev, DRIVER_ATOMIC))
@@ -1722,20 +2098,10 @@ retry:
drm_mode_object_unreference(obj);
}
- if (arg->flags & DRM_MODE_PAGE_FLIP_EVENT) {
- for_each_crtc_in_state(state, crtc, crtc_state, i) {
- struct drm_pending_vblank_event *e;
-
- e = create_vblank_event(dev, file_priv, NULL,
- arg->user_data);
- if (!e) {
- ret = -ENOMEM;
- goto out;
- }
-
- crtc_state->event = e;
- }
- }
+ ret = prepare_crtc_signaling(dev, state, arg, file_priv, &fence_state,
+ &num_fences);
+ if (ret)
+ goto out;
if (arg->flags & DRM_MODE_ATOMIC_TEST_ONLY) {
/*
@@ -1746,26 +2112,16 @@ retry:
} else if (arg->flags & DRM_MODE_ATOMIC_NONBLOCK) {
ret = drm_atomic_nonblocking_commit(state);
} else {
+ if (unlikely(drm_debug & DRM_UT_STATE))
+ drm_atomic_print_state(state);
+
ret = drm_atomic_commit(state);
}
out:
drm_atomic_clean_old_fb(dev, plane_mask, ret);
- if (ret && arg->flags & DRM_MODE_PAGE_FLIP_EVENT) {
- /*
- * TEST_ONLY and PAGE_FLIP_EVENT are mutually exclusive,
- * if they weren't, this code should be called on success
- * for TEST_ONLY too.
- */
-
- for_each_crtc_in_state(state, crtc, crtc_state, i) {
- if (!crtc_state->event)
- continue;
-
- drm_event_cancel_free(dev, &crtc_state->event->base);
- }
- }
+ complete_crtc_signaling(dev, state, fence_state, num_fences, !ret);
if (ret == -EDEADLK) {
drm_atomic_state_clear(state);
diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c
index 75ad01d595fd..0b16587cdc62 100644
--- a/drivers/gpu/drm/drm_atomic_helper.c
+++ b/drivers/gpu/drm/drm_atomic_helper.c
@@ -595,10 +595,6 @@ drm_atomic_helper_check_planes(struct drm_device *dev,
struct drm_plane_state *plane_state;
int i, ret = 0;
- ret = drm_atomic_normalize_zpos(dev, state);
- if (ret)
- return ret;
-
for_each_plane_in_state(state, plane, plane_state, i) {
const struct drm_plane_helper_funcs *funcs;
@@ -3076,6 +3072,8 @@ void __drm_atomic_helper_plane_duplicate_state(struct drm_plane *plane,
if (state->fb)
drm_framebuffer_reference(state->fb);
+
+ state->fence = NULL;
}
EXPORT_SYMBOL(__drm_atomic_helper_plane_duplicate_state);
@@ -3114,6 +3112,9 @@ void __drm_atomic_helper_plane_destroy_state(struct drm_plane_state *state)
{
if (state->fb)
drm_framebuffer_unreference(state->fb);
+
+ if (state->fence)
+ dma_fence_put(state->fence);
}
EXPORT_SYMBOL(__drm_atomic_helper_plane_destroy_state);
diff --git a/drivers/gpu/drm/drm_color_mgmt.c b/drivers/gpu/drm/drm_color_mgmt.c
index d28ffdd2b929..6543ebde501a 100644
--- a/drivers/gpu/drm/drm_color_mgmt.c
+++ b/drivers/gpu/drm/drm_color_mgmt.c
@@ -41,6 +41,10 @@
* nor use all the elements of the LUT (for example the hardware might
* choose to interpolate between LUT[0] and LUT[4]).
*
+ * Setting this to NULL (blob property value set to 0) means a
+ * linear/pass-thru gamma table should be used. This is generally the
+ * driver boot-up state too.
+ *
* “DEGAMMA_LUT_SIZE”:
* Unsinged range property to give the size of the lookup table to be set
* on the DEGAMMA_LUT property (the size depends on the underlying
@@ -54,6 +58,10 @@
* lookup through the gamma LUT. The data is interpreted as a struct
* &drm_color_ctm.
*
+ * Setting this to NULL (blob property value set to 0) means a
+ * unit/pass-thru matrix should be used. This is generally the driver
+ * boot-up state too.
+ *
* “GAMMA_LUT”:
* Blob property to set the gamma lookup table (LUT) mapping pixel data
* after the transformation matrix to data sent to the connector. The
@@ -62,6 +70,10 @@
* nor use all the elements of the LUT (for example the hardware might
* choose to interpolate between LUT[0] and LUT[4]).
*
+ * Setting this to NULL (blob property value set to 0) means a
+ * linear/pass-thru gamma table should be used. This is generally the
+ * driver boot-up state too.
+ *
* “GAMMA_LUT_SIZE”:
* Unsigned range property to give the size of the lookup table to be set
* on the GAMMA_LUT property (the size depends on the underlying hardware).
diff --git a/drivers/gpu/drm/drm_connector.c b/drivers/gpu/drm/drm_connector.c
index 2db7fb510b6c..b5c6a8ee831e 100644
--- a/drivers/gpu/drm/drm_connector.c
+++ b/drivers/gpu/drm/drm_connector.c
@@ -1121,3 +1121,107 @@ out_unlock:
return ret;
}
+
+/**
+ * DOC: Tile group
+ *
+ * Tile groups are used to represent tiled monitors with a unique integer
+ * identifier. Tiled monitors using DisplayID v1.3 have a unique 8-byte handle,
+ * we store this in a tile group, so we have a common identifier for all tiles
+ * in a monitor group. The property is called "TILE". Drivers can manage tile
+ * groups using drm_mode_create_tile_group(), drm_mode_put_tile_group() and
+ * drm_mode_get_tile_group(). But this is only needed for internal panels where
+ * the tile group information is exposed through a non-standard way.
+ */
+
+static void drm_tile_group_free(struct kref *kref)
+{
+ struct drm_tile_group *tg = container_of(kref, struct drm_tile_group, refcount);
+ struct drm_device *dev = tg->dev;
+ mutex_lock(&dev->mode_config.idr_mutex);
+ idr_remove(&dev->mode_config.tile_idr, tg->id);
+ mutex_unlock(&dev->mode_config.idr_mutex);
+ kfree(tg);
+}
+
+/**
+ * drm_mode_put_tile_group - drop a reference to a tile group.
+ * @dev: DRM device
+ * @tg: tile group to drop reference to.
+ *
+ * drop reference to tile group and free if 0.
+ */
+void drm_mode_put_tile_group(struct drm_device *dev,
+ struct drm_tile_group *tg)
+{
+ kref_put(&tg->refcount, drm_tile_group_free);
+}
+EXPORT_SYMBOL(drm_mode_put_tile_group);
+
+/**
+ * drm_mode_get_tile_group - get a reference to an existing tile group
+ * @dev: DRM device
+ * @topology: 8-bytes unique per monitor.
+ *
+ * Use the unique bytes to get a reference to an existing tile group.
+ *
+ * RETURNS:
+ * tile group or NULL if not found.
+ */
+struct drm_tile_group *drm_mode_get_tile_group(struct drm_device *dev,
+ char topology[8])
+{
+ struct drm_tile_group *tg;
+ int id;
+ mutex_lock(&dev->mode_config.idr_mutex);
+ idr_for_each_entry(&dev->mode_config.tile_idr, tg, id) {
+ if (!memcmp(tg->group_data, topology, 8)) {
+ if (!kref_get_unless_zero(&tg->refcount))
+ tg = NULL;
+ mutex_unlock(&dev->mode_config.idr_mutex);
+ return tg;
+ }
+ }
+ mutex_unlock(&dev->mode_config.idr_mutex);
+ return NULL;
+}
+EXPORT_SYMBOL(drm_mode_get_tile_group);
+
+/**
+ * drm_mode_create_tile_group - create a tile group from a displayid description
+ * @dev: DRM device
+ * @topology: 8-bytes unique per monitor.
+ *
+ * Create a tile group for the unique monitor, and get a unique
+ * identifier for the tile group.
+ *
+ * RETURNS:
+ * new tile group or error.
+ */
+struct drm_tile_group *drm_mode_create_tile_group(struct drm_device *dev,
+ char topology[8])
+{
+ struct drm_tile_group *tg;
+ int ret;
+
+ tg = kzalloc(sizeof(*tg), GFP_KERNEL);
+ if (!tg)
+ return ERR_PTR(-ENOMEM);
+
+ kref_init(&tg->refcount);
+ memcpy(tg->group_data, topology, 8);
+ tg->dev = dev;
+
+ mutex_lock(&dev->mode_config.idr_mutex);
+ ret = idr_alloc(&dev->mode_config.tile_idr, tg, 1, 0, GFP_KERNEL);
+ if (ret >= 0) {
+ tg->id = ret;
+ } else {
+ kfree(tg);
+ tg = ERR_PTR(ret);
+ }
+
+ mutex_unlock(&dev->mode_config.idr_mutex);
+ return tg;
+}
+EXPORT_SYMBOL(drm_mode_create_tile_group);
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
index 13441e21117c..90931e039731 100644
--- a/drivers/gpu/drm/drm_crtc.c
+++ b/drivers/gpu/drm/drm_crtc.c
@@ -33,6 +33,7 @@
#include <linux/list.h>
#include <linux/slab.h>
#include <linux/export.h>
+#include <linux/dma-fence.h>
#include <drm/drmP.h>
#include <drm/drm_crtc.h>
#include <drm/drm_edid.h>
@@ -45,18 +46,6 @@
#include "drm_crtc_internal.h"
#include "drm_internal.h"
-/*
- * Global properties
- */
-static const struct drm_prop_enum_list drm_plane_type_enum_list[] = {
- { DRM_PLANE_TYPE_OVERLAY, "Overlay" },
- { DRM_PLANE_TYPE_PRIMARY, "Primary" },
- { DRM_PLANE_TYPE_CURSOR, "Cursor" },
-};
-
-/*
- * Optional properties
- */
/**
* drm_crtc_force_disable - Forcibly turn off a CRTC
* @crtc: CRTC to turn off
@@ -102,8 +91,6 @@ out:
}
EXPORT_SYMBOL(drm_crtc_force_disable_all);
-DEFINE_WW_CLASS(crtc_ww_class);
-
static unsigned int drm_num_crtcs(struct drm_device *dev)
{
unsigned int num = 0;
@@ -116,7 +103,7 @@ static unsigned int drm_num_crtcs(struct drm_device *dev)
return num;
}
-static int drm_crtc_register_all(struct drm_device *dev)
+int drm_crtc_register_all(struct drm_device *dev)
{
struct drm_crtc *crtc;
int ret = 0;
@@ -135,7 +122,7 @@ static int drm_crtc_register_all(struct drm_device *dev)
return 0;
}
-static void drm_crtc_unregister_all(struct drm_device *dev)
+void drm_crtc_unregister_all(struct drm_device *dev)
{
struct drm_crtc *crtc;
@@ -165,6 +152,38 @@ static void drm_crtc_crc_fini(struct drm_crtc *crtc)
#endif
}
+static struct drm_crtc *fence_to_crtc(struct dma_fence *fence)
+{
+ BUG_ON(fence->ops != &drm_crtc_fence_ops);
+ return container_of(fence->lock, struct drm_crtc, fence_lock);
+}
+
+static const char *drm_crtc_fence_get_driver_name(struct dma_fence *fence)
+{
+ struct drm_crtc *crtc = fence_to_crtc(fence);
+
+ return crtc->dev->driver->name;
+}
+
+static const char *drm_crtc_fence_get_timeline_name(struct dma_fence *fence)
+{
+ struct drm_crtc *crtc = fence_to_crtc(fence);
+
+ return crtc->timeline_name;
+}
+
+static bool drm_crtc_fence_enable_signaling(struct dma_fence *fence)
+{
+ return true;
+}
+
+const struct dma_fence_ops drm_crtc_fence_ops = {
+ .get_driver_name = drm_crtc_fence_get_driver_name,
+ .get_timeline_name = drm_crtc_fence_get_timeline_name,
+ .enable_signaling = drm_crtc_fence_enable_signaling,
+ .wait = dma_fence_default_wait,
+};
+
/**
* drm_crtc_init_with_planes - Initialise a new CRTC object with
* specified primary and cursor planes.
@@ -222,6 +241,11 @@ int drm_crtc_init_with_planes(struct drm_device *dev, struct drm_crtc *crtc,
return -ENOMEM;
}
+ crtc->fence_context = dma_fence_context_alloc(1);
+ spin_lock_init(&crtc->fence_lock);
+ snprintf(crtc->timeline_name, sizeof(crtc->timeline_name),
+ "CRTC:%d-%s", crtc->base.id, crtc->name);
+
crtc->base.properties = &crtc->properties;
list_add_tail(&crtc->head, &config->crtc_list);
@@ -229,9 +253,9 @@ int drm_crtc_init_with_planes(struct drm_device *dev, struct drm_crtc *crtc,
crtc->primary = primary;
crtc->cursor = cursor;
- if (primary)
+ if (primary && !primary->possible_crtcs)
primary->possible_crtcs = 1 << drm_crtc_index(crtc);
- if (cursor)
+ if (cursor && !cursor->possible_crtcs)
cursor->possible_crtcs = 1 << drm_crtc_index(crtc);
ret = drm_crtc_crc_init(crtc);
@@ -243,6 +267,8 @@ int drm_crtc_init_with_planes(struct drm_device *dev, struct drm_crtc *crtc,
if (drm_core_check_feature(dev, DRIVER_ATOMIC)) {
drm_object_attach_property(&crtc->base, config->prop_active, 0);
drm_object_attach_property(&crtc->base, config->prop_mode_id, 0);
+ drm_object_attach_property(&crtc->base,
+ config->prop_out_fence_ptr, 0);
}
return 0;
@@ -287,301 +313,6 @@ void drm_crtc_cleanup(struct drm_crtc *crtc)
}
EXPORT_SYMBOL(drm_crtc_cleanup);
-int drm_modeset_register_all(struct drm_device *dev)
-{
- int ret;
-
- ret = drm_plane_register_all(dev);
- if (ret)
- goto err_plane;
-
- ret = drm_crtc_register_all(dev);
- if (ret)
- goto err_crtc;
-
- ret = drm_encoder_register_all(dev);
- if (ret)
- goto err_encoder;
-
- ret = drm_connector_register_all(dev);
- if (ret)
- goto err_connector;
-
- return 0;
-
-err_connector:
- drm_encoder_unregister_all(dev);
-err_encoder:
- drm_crtc_unregister_all(dev);
-err_crtc:
- drm_plane_unregister_all(dev);
-err_plane:
- return ret;
-}
-
-void drm_modeset_unregister_all(struct drm_device *dev)
-{
- drm_connector_unregister_all(dev);
- drm_encoder_unregister_all(dev);
- drm_crtc_unregister_all(dev);
- drm_plane_unregister_all(dev);
-}
-
-static int drm_mode_create_standard_properties(struct drm_device *dev)
-{
- struct drm_property *prop;
- int ret;
-
- ret = drm_connector_create_standard_properties(dev);
- if (ret)
- return ret;
-
- prop = drm_property_create_enum(dev, DRM_MODE_PROP_IMMUTABLE,
- "type", drm_plane_type_enum_list,
- ARRAY_SIZE(drm_plane_type_enum_list));
- if (!prop)
- return -ENOMEM;
- dev->mode_config.plane_type_property = prop;
-
- prop = drm_property_create_range(dev, DRM_MODE_PROP_ATOMIC,
- "SRC_X", 0, UINT_MAX);
- if (!prop)
- return -ENOMEM;
- dev->mode_config.prop_src_x = prop;
-
- prop = drm_property_create_range(dev, DRM_MODE_PROP_ATOMIC,
- "SRC_Y", 0, UINT_MAX);
- if (!prop)
- return -ENOMEM;
- dev->mode_config.prop_src_y = prop;
-
- prop = drm_property_create_range(dev, DRM_MODE_PROP_ATOMIC,
- "SRC_W", 0, UINT_MAX);
- if (!prop)
- return -ENOMEM;
- dev->mode_config.prop_src_w = prop;
-
- prop = drm_property_create_range(dev, DRM_MODE_PROP_ATOMIC,
- "SRC_H", 0, UINT_MAX);
- if (!prop)
- return -ENOMEM;
- dev->mode_config.prop_src_h = prop;
-
- prop = drm_property_create_signed_range(dev, DRM_MODE_PROP_ATOMIC,
- "CRTC_X", INT_MIN, INT_MAX);
- if (!prop)
- return -ENOMEM;
- dev->mode_config.prop_crtc_x = prop;
-
- prop = drm_property_create_signed_range(dev, DRM_MODE_PROP_ATOMIC,
- "CRTC_Y", INT_MIN, INT_MAX);
- if (!prop)
- return -ENOMEM;
- dev->mode_config.prop_crtc_y = prop;
-
- prop = drm_property_create_range(dev, DRM_MODE_PROP_ATOMIC,
- "CRTC_W", 0, INT_MAX);
- if (!prop)
- return -ENOMEM;
- dev->mode_config.prop_crtc_w = prop;
-
- prop = drm_property_create_range(dev, DRM_MODE_PROP_ATOMIC,
- "CRTC_H", 0, INT_MAX);
- if (!prop)
- return -ENOMEM;
- dev->mode_config.prop_crtc_h = prop;
-
- prop = drm_property_create_object(dev, DRM_MODE_PROP_ATOMIC,
- "FB_ID", DRM_MODE_OBJECT_FB);
- if (!prop)
- return -ENOMEM;
- dev->mode_config.prop_fb_id = prop;
-
- prop = drm_property_create_object(dev, DRM_MODE_PROP_ATOMIC,
- "CRTC_ID", DRM_MODE_OBJECT_CRTC);
- if (!prop)
- return -ENOMEM;
- dev->mode_config.prop_crtc_id = prop;
-
- prop = drm_property_create_bool(dev, DRM_MODE_PROP_ATOMIC,
- "ACTIVE");
- if (!prop)
- return -ENOMEM;
- dev->mode_config.prop_active = prop;
-
- prop = drm_property_create(dev,
- DRM_MODE_PROP_ATOMIC | DRM_MODE_PROP_BLOB,
- "MODE_ID", 0);
- if (!prop)
- return -ENOMEM;
- dev->mode_config.prop_mode_id = prop;
-
- prop = drm_property_create(dev,
- DRM_MODE_PROP_BLOB,
- "DEGAMMA_LUT", 0);
- if (!prop)
- return -ENOMEM;
- dev->mode_config.degamma_lut_property = prop;
-
- prop = drm_property_create_range(dev,
- DRM_MODE_PROP_IMMUTABLE,
- "DEGAMMA_LUT_SIZE", 0, UINT_MAX);
- if (!prop)
- return -ENOMEM;
- dev->mode_config.degamma_lut_size_property = prop;
-
- prop = drm_property_create(dev,
- DRM_MODE_PROP_BLOB,
- "CTM", 0);
- if (!prop)
- return -ENOMEM;
- dev->mode_config.ctm_property = prop;
-
- prop = drm_property_create(dev,
- DRM_MODE_PROP_BLOB,
- "GAMMA_LUT", 0);
- if (!prop)
- return -ENOMEM;
- dev->mode_config.gamma_lut_property = prop;
-
- prop = drm_property_create_range(dev,
- DRM_MODE_PROP_IMMUTABLE,
- "GAMMA_LUT_SIZE", 0, UINT_MAX);
- if (!prop)
- return -ENOMEM;
- dev->mode_config.gamma_lut_size_property = prop;
-
- return 0;
-}
-
-/**
- * drm_mode_getresources - get graphics configuration
- * @dev: drm device for the ioctl
- * @data: data pointer for the ioctl
- * @file_priv: drm file for the ioctl call
- *
- * Construct a set of configuration description structures and return
- * them to the user, including CRTC, connector and framebuffer configuration.
- *
- * Called by the user via ioctl.
- *
- * Returns:
- * Zero on success, negative errno on failure.
- */
-int drm_mode_getresources(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_mode_card_res *card_res = data;
- struct list_head *lh;
- struct drm_framebuffer *fb;
- struct drm_connector *connector;
- struct drm_crtc *crtc;
- struct drm_encoder *encoder;
- int ret = 0;
- int connector_count = 0;
- int crtc_count = 0;
- int fb_count = 0;
- int encoder_count = 0;
- int copied = 0;
- uint32_t __user *fb_id;
- uint32_t __user *crtc_id;
- uint32_t __user *connector_id;
- uint32_t __user *encoder_id;
-
- if (!drm_core_check_feature(dev, DRIVER_MODESET))
- return -EINVAL;
-
-
- mutex_lock(&file_priv->fbs_lock);
- /*
- * For the non-control nodes we need to limit the list of resources
- * by IDs in the group list for this node
- */
- list_for_each(lh, &file_priv->fbs)
- fb_count++;
-
- /* handle this in 4 parts */
- /* FBs */
- if (card_res->count_fbs >= fb_count) {
- copied = 0;
- fb_id = (uint32_t __user *)(unsigned long)card_res->fb_id_ptr;
- list_for_each_entry(fb, &file_priv->fbs, filp_head) {
- if (put_user(fb->base.id, fb_id + copied)) {
- mutex_unlock(&file_priv->fbs_lock);
- return -EFAULT;
- }
- copied++;
- }
- }
- card_res->count_fbs = fb_count;
- mutex_unlock(&file_priv->fbs_lock);
-
- /* mode_config.mutex protects the connector list against e.g. DP MST
- * connector hot-adding. CRTC/Plane lists are invariant. */
- mutex_lock(&dev->mode_config.mutex);
- drm_for_each_crtc(crtc, dev)
- crtc_count++;
-
- drm_for_each_connector(connector, dev)
- connector_count++;
-
- drm_for_each_encoder(encoder, dev)
- encoder_count++;
-
- card_res->max_height = dev->mode_config.max_height;
- card_res->min_height = dev->mode_config.min_height;
- card_res->max_width = dev->mode_config.max_width;
- card_res->min_width = dev->mode_config.min_width;
-
- /* CRTCs */
- if (card_res->count_crtcs >= crtc_count) {
- copied = 0;
- crtc_id = (uint32_t __user *)(unsigned long)card_res->crtc_id_ptr;
- drm_for_each_crtc(crtc, dev) {
- if (put_user(crtc->base.id, crtc_id + copied)) {
- ret = -EFAULT;
- goto out;
- }
- copied++;
- }
- }
- card_res->count_crtcs = crtc_count;
-
- /* Encoders */
- if (card_res->count_encoders >= encoder_count) {
- copied = 0;
- encoder_id = (uint32_t __user *)(unsigned long)card_res->encoder_id_ptr;
- drm_for_each_encoder(encoder, dev) {
- if (put_user(encoder->base.id, encoder_id +
- copied)) {
- ret = -EFAULT;
- goto out;
- }
- copied++;
- }
- }
- card_res->count_encoders = encoder_count;
-
- /* Connectors */
- if (card_res->count_connectors >= connector_count) {
- copied = 0;
- connector_id = (uint32_t __user *)(unsigned long)card_res->connector_id_ptr;
- drm_for_each_connector(connector, dev) {
- if (put_user(connector->base.id,
- connector_id + copied)) {
- ret = -EFAULT;
- goto out;
- }
- copied++;
- }
- }
- card_res->count_connectors = connector_count;
-
-out:
- mutex_unlock(&dev->mode_config.mutex);
- return ret;
-}
-
/**
* drm_mode_getcrtc - get CRTC configuration
* @dev: drm device for the ioctl
@@ -827,9 +558,10 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data,
ret = drm_plane_check_pixel_format(crtc->primary,
fb->pixel_format);
if (ret) {
- char *format_name = drm_get_format_name(fb->pixel_format);
- DRM_DEBUG_KMS("Invalid pixel format %s\n", format_name);
- kfree(format_name);
+ struct drm_format_name_buf format_name;
+ DRM_DEBUG_KMS("Invalid pixel format %s\n",
+ drm_get_format_name(fb->pixel_format,
+ &format_name));
goto out;
}
}
@@ -933,362 +665,3 @@ int drm_mode_crtc_set_obj_prop(struct drm_mode_object *obj,
return ret;
}
-
-/**
- * drm_mode_config_reset - call ->reset callbacks
- * @dev: drm device
- *
- * This functions calls all the crtc's, encoder's and connector's ->reset
- * callback. Drivers can use this in e.g. their driver load or resume code to
- * reset hardware and software state.
- */
-void drm_mode_config_reset(struct drm_device *dev)
-{
- struct drm_crtc *crtc;
- struct drm_plane *plane;
- struct drm_encoder *encoder;
- struct drm_connector *connector;
-
- drm_for_each_plane(plane, dev)
- if (plane->funcs->reset)
- plane->funcs->reset(plane);
-
- drm_for_each_crtc(crtc, dev)
- if (crtc->funcs->reset)
- crtc->funcs->reset(crtc);
-
- drm_for_each_encoder(encoder, dev)
- if (encoder->funcs->reset)
- encoder->funcs->reset(encoder);
-
- mutex_lock(&dev->mode_config.mutex);
- drm_for_each_connector(connector, dev)
- if (connector->funcs->reset)
- connector->funcs->reset(connector);
- mutex_unlock(&dev->mode_config.mutex);
-}
-EXPORT_SYMBOL(drm_mode_config_reset);
-
-/**
- * drm_mode_create_dumb_ioctl - create a dumb backing storage buffer
- * @dev: DRM device
- * @data: ioctl data
- * @file_priv: DRM file info
- *
- * This creates a new dumb buffer in the driver's backing storage manager (GEM,
- * TTM or something else entirely) and returns the resulting buffer handle. This
- * handle can then be wrapped up into a framebuffer modeset object.
- *
- * Note that userspace is not allowed to use such objects for render
- * acceleration - drivers must create their own private ioctls for such a use
- * case.
- *
- * Called by the user via ioctl.
- *
- * Returns:
- * Zero on success, negative errno on failure.
- */
-int drm_mode_create_dumb_ioctl(struct drm_device *dev,
- void *data, struct drm_file *file_priv)
-{
- struct drm_mode_create_dumb *args = data;
- u32 cpp, stride, size;
-
- if (!dev->driver->dumb_create)
- return -ENOSYS;
- if (!args->width || !args->height || !args->bpp)
- return -EINVAL;
-
- /* overflow checks for 32bit size calculations */
- /* NOTE: DIV_ROUND_UP() can overflow */
- cpp = DIV_ROUND_UP(args->bpp, 8);
- if (!cpp || cpp > 0xffffffffU / args->width)
- return -EINVAL;
- stride = cpp * args->width;
- if (args->height > 0xffffffffU / stride)
- return -EINVAL;
-
- /* test for wrap-around */
- size = args->height * stride;
- if (PAGE_ALIGN(size) == 0)
- return -EINVAL;
-
- /*
- * handle, pitch and size are output parameters. Zero them out to
- * prevent drivers from accidentally using uninitialized data. Since
- * not all existing userspace is clearing these fields properly we
- * cannot reject IOCTL with garbage in them.
- */
- args->handle = 0;
- args->pitch = 0;
- args->size = 0;
-
- return dev->driver->dumb_create(file_priv, dev, args);
-}
-
-/**
- * drm_mode_mmap_dumb_ioctl - create an mmap offset for a dumb backing storage buffer
- * @dev: DRM device
- * @data: ioctl data
- * @file_priv: DRM file info
- *
- * Allocate an offset in the drm device node's address space to be able to
- * memory map a dumb buffer.
- *
- * Called by the user via ioctl.
- *
- * Returns:
- * Zero on success, negative errno on failure.
- */
-int drm_mode_mmap_dumb_ioctl(struct drm_device *dev,
- void *data, struct drm_file *file_priv)
-{
- struct drm_mode_map_dumb *args = data;
-
- /* call driver ioctl to get mmap offset */
- if (!dev->driver->dumb_map_offset)
- return -ENOSYS;
-
- return dev->driver->dumb_map_offset(file_priv, dev, args->handle, &args->offset);
-}
-
-/**
- * drm_mode_destroy_dumb_ioctl - destroy a dumb backing strage buffer
- * @dev: DRM device
- * @data: ioctl data
- * @file_priv: DRM file info
- *
- * This destroys the userspace handle for the given dumb backing storage buffer.
- * Since buffer objects must be reference counted in the kernel a buffer object
- * won't be immediately freed if a framebuffer modeset object still uses it.
- *
- * Called by the user via ioctl.
- *
- * Returns:
- * Zero on success, negative errno on failure.
- */
-int drm_mode_destroy_dumb_ioctl(struct drm_device *dev,
- void *data, struct drm_file *file_priv)
-{
- struct drm_mode_destroy_dumb *args = data;
-
- if (!dev->driver->dumb_destroy)
- return -ENOSYS;
-
- return dev->driver->dumb_destroy(file_priv, dev, args->handle);
-}
-
-/**
- * drm_mode_config_init - initialize DRM mode_configuration structure
- * @dev: DRM device
- *
- * Initialize @dev's mode_config structure, used for tracking the graphics
- * configuration of @dev.
- *
- * Since this initializes the modeset locks, no locking is possible. Which is no
- * problem, since this should happen single threaded at init time. It is the
- * driver's problem to ensure this guarantee.
- *
- */
-void drm_mode_config_init(struct drm_device *dev)
-{
- mutex_init(&dev->mode_config.mutex);
- drm_modeset_lock_init(&dev->mode_config.connection_mutex);
- mutex_init(&dev->mode_config.idr_mutex);
- mutex_init(&dev->mode_config.fb_lock);
- mutex_init(&dev->mode_config.blob_lock);
- INIT_LIST_HEAD(&dev->mode_config.fb_list);
- INIT_LIST_HEAD(&dev->mode_config.crtc_list);
- INIT_LIST_HEAD(&dev->mode_config.connector_list);
- INIT_LIST_HEAD(&dev->mode_config.encoder_list);
- INIT_LIST_HEAD(&dev->mode_config.property_list);
- INIT_LIST_HEAD(&dev->mode_config.property_blob_list);
- INIT_LIST_HEAD(&dev->mode_config.plane_list);
- idr_init(&dev->mode_config.crtc_idr);
- idr_init(&dev->mode_config.tile_idr);
- ida_init(&dev->mode_config.connector_ida);
-
- drm_modeset_lock_all(dev);
- drm_mode_create_standard_properties(dev);
- drm_modeset_unlock_all(dev);
-
- /* Just to be sure */
- dev->mode_config.num_fb = 0;
- dev->mode_config.num_connector = 0;
- dev->mode_config.num_crtc = 0;
- dev->mode_config.num_encoder = 0;
- dev->mode_config.num_overlay_plane = 0;
- dev->mode_config.num_total_plane = 0;
-}
-EXPORT_SYMBOL(drm_mode_config_init);
-
-/**
- * drm_mode_config_cleanup - free up DRM mode_config info
- * @dev: DRM device
- *
- * Free up all the connectors and CRTCs associated with this DRM device, then
- * free up the framebuffers and associated buffer objects.
- *
- * Note that since this /should/ happen single-threaded at driver/device
- * teardown time, no locking is required. It's the driver's job to ensure that
- * this guarantee actually holds true.
- *
- * FIXME: cleanup any dangling user buffer objects too
- */
-void drm_mode_config_cleanup(struct drm_device *dev)
-{
- struct drm_connector *connector, *ot;
- struct drm_crtc *crtc, *ct;
- struct drm_encoder *encoder, *enct;
- struct drm_framebuffer *fb, *fbt;
- struct drm_property *property, *pt;
- struct drm_property_blob *blob, *bt;
- struct drm_plane *plane, *plt;
-
- list_for_each_entry_safe(encoder, enct, &dev->mode_config.encoder_list,
- head) {
- encoder->funcs->destroy(encoder);
- }
-
- list_for_each_entry_safe(connector, ot,
- &dev->mode_config.connector_list, head) {
- connector->funcs->destroy(connector);
- }
-
- list_for_each_entry_safe(property, pt, &dev->mode_config.property_list,
- head) {
- drm_property_destroy(dev, property);
- }
-
- list_for_each_entry_safe(plane, plt, &dev->mode_config.plane_list,
- head) {
- plane->funcs->destroy(plane);
- }
-
- list_for_each_entry_safe(crtc, ct, &dev->mode_config.crtc_list, head) {
- crtc->funcs->destroy(crtc);
- }
-
- list_for_each_entry_safe(blob, bt, &dev->mode_config.property_blob_list,
- head_global) {
- drm_property_unreference_blob(blob);
- }
-
- /*
- * Single-threaded teardown context, so it's not required to grab the
- * fb_lock to protect against concurrent fb_list access. Contrary, it
- * would actually deadlock with the drm_framebuffer_cleanup function.
- *
- * Also, if there are any framebuffers left, that's a driver leak now,
- * so politely WARN about this.
- */
- WARN_ON(!list_empty(&dev->mode_config.fb_list));
- list_for_each_entry_safe(fb, fbt, &dev->mode_config.fb_list, head) {
- drm_framebuffer_free(&fb->base.refcount);
- }
-
- ida_destroy(&dev->mode_config.connector_ida);
- idr_destroy(&dev->mode_config.tile_idr);
- idr_destroy(&dev->mode_config.crtc_idr);
- drm_modeset_lock_fini(&dev->mode_config.connection_mutex);
-}
-EXPORT_SYMBOL(drm_mode_config_cleanup);
-
-/**
- * DOC: Tile group
- *
- * Tile groups are used to represent tiled monitors with a unique
- * integer identifier. Tiled monitors using DisplayID v1.3 have
- * a unique 8-byte handle, we store this in a tile group, so we
- * have a common identifier for all tiles in a monitor group.
- */
-static void drm_tile_group_free(struct kref *kref)
-{
- struct drm_tile_group *tg = container_of(kref, struct drm_tile_group, refcount);
- struct drm_device *dev = tg->dev;
- mutex_lock(&dev->mode_config.idr_mutex);
- idr_remove(&dev->mode_config.tile_idr, tg->id);
- mutex_unlock(&dev->mode_config.idr_mutex);
- kfree(tg);
-}
-
-/**
- * drm_mode_put_tile_group - drop a reference to a tile group.
- * @dev: DRM device
- * @tg: tile group to drop reference to.
- *
- * drop reference to tile group and free if 0.
- */
-void drm_mode_put_tile_group(struct drm_device *dev,
- struct drm_tile_group *tg)
-{
- kref_put(&tg->refcount, drm_tile_group_free);
-}
-
-/**
- * drm_mode_get_tile_group - get a reference to an existing tile group
- * @dev: DRM device
- * @topology: 8-bytes unique per monitor.
- *
- * Use the unique bytes to get a reference to an existing tile group.
- *
- * RETURNS:
- * tile group or NULL if not found.
- */
-struct drm_tile_group *drm_mode_get_tile_group(struct drm_device *dev,
- char topology[8])
-{
- struct drm_tile_group *tg;
- int id;
- mutex_lock(&dev->mode_config.idr_mutex);
- idr_for_each_entry(&dev->mode_config.tile_idr, tg, id) {
- if (!memcmp(tg->group_data, topology, 8)) {
- if (!kref_get_unless_zero(&tg->refcount))
- tg = NULL;
- mutex_unlock(&dev->mode_config.idr_mutex);
- return tg;
- }
- }
- mutex_unlock(&dev->mode_config.idr_mutex);
- return NULL;
-}
-EXPORT_SYMBOL(drm_mode_get_tile_group);
-
-/**
- * drm_mode_create_tile_group - create a tile group from a displayid description
- * @dev: DRM device
- * @topology: 8-bytes unique per monitor.
- *
- * Create a tile group for the unique monitor, and get a unique
- * identifier for the tile group.
- *
- * RETURNS:
- * new tile group or error.
- */
-struct drm_tile_group *drm_mode_create_tile_group(struct drm_device *dev,
- char topology[8])
-{
- struct drm_tile_group *tg;
- int ret;
-
- tg = kzalloc(sizeof(*tg), GFP_KERNEL);
- if (!tg)
- return ERR_PTR(-ENOMEM);
-
- kref_init(&tg->refcount);
- memcpy(tg->group_data, topology, 8);
- tg->dev = dev;
-
- mutex_lock(&dev->mode_config.idr_mutex);
- ret = idr_alloc(&dev->mode_config.tile_idr, tg, 1, 0, GFP_KERNEL);
- if (ret >= 0) {
- tg->id = ret;
- } else {
- kfree(tg);
- tg = ERR_PTR(ret);
- }
-
- mutex_unlock(&dev->mode_config.idr_mutex);
- return tg;
-}
-EXPORT_SYMBOL(drm_mode_create_tile_group);
diff --git a/drivers/gpu/drm/drm_crtc_internal.h b/drivers/gpu/drm/drm_crtc_internal.h
index c48ba02c5365..33b17d0b127e 100644
--- a/drivers/gpu/drm/drm_crtc_internal.h
+++ b/drivers/gpu/drm/drm_crtc_internal.h
@@ -40,10 +40,29 @@ int drm_crtc_check_viewport(const struct drm_crtc *crtc,
int x, int y,
const struct drm_display_mode *mode,
const struct drm_framebuffer *fb);
+int drm_crtc_register_all(struct drm_device *dev);
+void drm_crtc_unregister_all(struct drm_device *dev);
-void drm_fb_release(struct drm_file *file_priv);
+extern const struct dma_fence_ops drm_crtc_fence_ops;
+
+/* IOCTLs */
+int drm_mode_getcrtc(struct drm_device *dev,
+ void *data, struct drm_file *file_priv);
+int drm_mode_setcrtc(struct drm_device *dev,
+ void *data, struct drm_file *file_priv);
+
+
+/* drm_mode_config.c */
+int drm_modeset_register_all(struct drm_device *dev);
+void drm_modeset_unregister_all(struct drm_device *dev);
+
+/* IOCTLs */
+int drm_mode_getresources(struct drm_device *dev,
+ void *data, struct drm_file *file_priv);
-/* dumb buffer support IOCTLs */
+
+/* drm_dumb_buffers.c */
+/* IOCTLs */
int drm_mode_create_dumb_ioctl(struct drm_device *dev,
void *data, struct drm_file *file_priv);
int drm_mode_mmap_dumb_ioctl(struct drm_device *dev,
@@ -51,14 +70,6 @@ int drm_mode_mmap_dumb_ioctl(struct drm_device *dev,
int drm_mode_destroy_dumb_ioctl(struct drm_device *dev,
void *data, struct drm_file *file_priv);
-/* IOCTLs */
-int drm_mode_getresources(struct drm_device *dev,
- void *data, struct drm_file *file_priv);
-int drm_mode_getcrtc(struct drm_device *dev,
- void *data, struct drm_file *file_priv);
-int drm_mode_setcrtc(struct drm_device *dev,
- void *data, struct drm_file *file_priv);
-
/* drm_color_mgmt.c */
/* IOCTLs */
@@ -147,6 +158,8 @@ void drm_framebuffer_free(struct kref *kref);
int drm_framebuffer_check_src_coords(uint32_t src_x, uint32_t src_y,
uint32_t src_w, uint32_t src_h,
const struct drm_framebuffer *fb);
+void drm_fb_release(struct drm_file *file_priv);
+
/* IOCTL */
int drm_mode_addfb(struct drm_device *dev,
@@ -166,9 +179,6 @@ int drm_atomic_get_property(struct drm_mode_object *obj,
int drm_mode_atomic_ioctl(struct drm_device *dev,
void *data, struct drm_file *file_priv);
-int drm_modeset_register_all(struct drm_device *dev);
-void drm_modeset_unregister_all(struct drm_device *dev);
-
/* drm_plane.c */
int drm_plane_register_all(struct drm_device *dev);
diff --git a/drivers/gpu/drm/drm_debugfs.c b/drivers/gpu/drm/drm_debugfs.c
index 800055c39cdb..206a4fe7ea26 100644
--- a/drivers/gpu/drm/drm_debugfs.c
+++ b/drivers/gpu/drm/drm_debugfs.c
@@ -36,6 +36,7 @@
#include <linux/export.h>
#include <drm/drmP.h>
#include <drm/drm_edid.h>
+#include <drm/drm_atomic.h>
#include "drm_internal.h"
#if defined(CONFIG_DEBUG_FS)
@@ -163,6 +164,14 @@ int drm_debugfs_init(struct drm_minor *minor, int minor_id,
return ret;
}
+ if (drm_core_check_feature(dev, DRIVER_ATOMIC)) {
+ ret = drm_atomic_debugfs_init(minor);
+ if (ret) {
+ DRM_ERROR("Failed to create atomic debugfs files\n");
+ return ret;
+ }
+ }
+
if (dev->driver->debugfs_init) {
ret = dev->driver->debugfs_init(minor);
if (ret) {
diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c
index 04e457117980..aa644487749c 100644
--- a/drivers/gpu/drm/drm_dp_mst_topology.c
+++ b/drivers/gpu/drm/drm_dp_mst_topology.c
@@ -914,6 +914,7 @@ static void drm_dp_destroy_port(struct kref *kref)
/* no need to clean up vcpi
* as if we have no connector we never setup a vcpi */
drm_dp_port_teardown_pdt(port, port->pdt);
+ port->pdt = DP_PEER_DEVICE_NONE;
}
kfree(port);
}
@@ -1159,7 +1160,9 @@ static void drm_dp_add_port(struct drm_dp_mst_branch *mstb,
drm_dp_put_port(port);
goto out;
}
- if (port->port_num >= DP_MST_LOGICAL_PORT_0) {
+ if ((port->pdt == DP_PEER_DEVICE_DP_LEGACY_CONV ||
+ port->pdt == DP_PEER_DEVICE_SST_SINK) &&
+ port->port_num >= DP_MST_LOGICAL_PORT_0) {
port->cached_edid = drm_get_edid(port->connector, &port->aux.ddc);
drm_mode_connector_set_tile_property(port->connector);
}
@@ -2919,6 +2922,7 @@ static void drm_dp_destroy_connector_work(struct work_struct *work)
mgr->cbs->destroy_connector(mgr, port->connector);
drm_dp_port_teardown_pdt(port, port->pdt);
+ port->pdt = DP_PEER_DEVICE_NONE;
if (!port->input && port->vcpi.vcpi > 0) {
drm_dp_mst_reset_vcpi_slots(mgr, port);
diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
index 6efdba4993fc..6dbb98649795 100644
--- a/drivers/gpu/drm/drm_drv.c
+++ b/drivers/gpu/drm/drm_drv.c
@@ -32,7 +32,10 @@
#include <linux/moduleparam.h>
#include <linux/mount.h>
#include <linux/slab.h>
+
+#include <drm/drm_drv.h>
#include <drm/drmP.h>
+
#include "drm_crtc_internal.h"
#include "drm_legacy.h"
#include "drm_internal.h"
@@ -257,10 +260,7 @@ static void drm_minor_unregister(struct drm_device *dev, unsigned int type)
drm_debugfs_cleanup(minor);
}
-/**
- * drm_minor_acquire - Acquire a DRM minor
- * @minor_id: Minor ID of the DRM-minor
- *
+/*
* Looks up the given minor-ID and returns the respective DRM-minor object. The
* refence-count of the underlying device is increased so you must release this
* object with drm_minor_release().
@@ -268,10 +268,6 @@ static void drm_minor_unregister(struct drm_device *dev, unsigned int type)
* As long as you hold this minor, it is guaranteed that the object and the
* minor->dev pointer will stay valid! However, the device may get unplugged and
* unregistered while you hold the minor.
- *
- * Returns:
- * Pointer to minor-object with increased device-refcount, or PTR_ERR on
- * failure.
*/
struct drm_minor *drm_minor_acquire(unsigned int minor_id)
{
@@ -294,12 +290,6 @@ struct drm_minor *drm_minor_acquire(unsigned int minor_id)
return minor;
}
-/**
- * drm_minor_release - Release DRM minor
- * @minor: Pointer to DRM minor object
- *
- * Release a minor that was previously acquired via drm_minor_acquire().
- */
void drm_minor_release(struct drm_minor *minor)
{
drm_dev_unref(minor->dev);
@@ -568,6 +558,9 @@ err_minors:
drm_fs_inode_free(dev->anon_inode);
err_free:
mutex_destroy(&dev->master_mutex);
+ mutex_destroy(&dev->ctxlist_mutex);
+ mutex_destroy(&dev->filelist_mutex);
+ mutex_destroy(&dev->struct_mutex);
return ret;
}
EXPORT_SYMBOL(drm_dev_init);
@@ -630,6 +623,9 @@ static void drm_dev_release(struct kref *ref)
drm_minor_free(dev, DRM_MINOR_CONTROL);
mutex_destroy(&dev->master_mutex);
+ mutex_destroy(&dev->ctxlist_mutex);
+ mutex_destroy(&dev->filelist_mutex);
+ mutex_destroy(&dev->struct_mutex);
kfree(dev->unique);
kfree(dev);
}
diff --git a/drivers/gpu/drm/drm_dumb_buffers.c b/drivers/gpu/drm/drm_dumb_buffers.c
new file mode 100644
index 000000000000..8ac5a1c1d811
--- /dev/null
+++ b/drivers/gpu/drm/drm_dumb_buffers.c
@@ -0,0 +1,128 @@
+/*
+ * Copyright (c) 2006-2008 Intel Corporation
+ * Copyright (c) 2007 Dave Airlie <airlied@linux.ie>
+ * Copyright (c) 2008 Red Hat Inc.
+ * Copyright (c) 2016 Intel Corporation
+ *
+ * Permission to use, copy, modify, distribute, and sell this software and its
+ * documentation for any purpose is hereby granted without fee, provided that
+ * the above copyright notice appear in all copies and that both that copyright
+ * notice and this permission notice appear in supporting documentation, and
+ * that the name of the copyright holders not be used in advertising or
+ * publicity pertaining to distribution of the software without specific,
+ * written prior permission. The copyright holders make no representations
+ * about the suitability of this software for any purpose. It is provided "as
+ * is" without express or implied warranty.
+ *
+ * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
+ * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
+ * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
+ * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
+ * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
+ * OF THIS SOFTWARE.
+ */
+
+#include <drm/drmP.h>
+
+#include "drm_crtc_internal.h"
+
+/**
+ * DOC: overview
+ *
+ * The KMS API doesn't standardize backing storage object creation and leaves it
+ * to driver-specific ioctls. Furthermore actually creating a buffer object even
+ * for GEM-based drivers is done through a driver-specific ioctl - GEM only has
+ * a common userspace interface for sharing and destroying objects. While not an
+ * issue for full-fledged graphics stacks that include device-specific userspace
+ * components (in libdrm for instance), this limit makes DRM-based early boot
+ * graphics unnecessarily complex.
+ *
+ * Dumb objects partly alleviate the problem by providing a standard API to
+ * create dumb buffers suitable for scanout, which can then be used to create
+ * KMS frame buffers.
+ *
+ * To support dumb objects drivers must implement the dumb_create,
+ * dumb_destroy and dumb_map_offset operations from struct &drm_driver. See
+ * there for further details.
+ *
+ * Note that dumb objects may not be used for gpu acceleration, as has been
+ * attempted on some ARM embedded platforms. Such drivers really must have
+ * a hardware-specific ioctl to allocate suitable buffer objects.
+ */
+
+int drm_mode_create_dumb_ioctl(struct drm_device *dev,
+ void *data, struct drm_file *file_priv)
+{
+ struct drm_mode_create_dumb *args = data;
+ u32 cpp, stride, size;
+
+ if (!dev->driver->dumb_create)
+ return -ENOSYS;
+ if (!args->width || !args->height || !args->bpp)
+ return -EINVAL;
+
+ /* overflow checks for 32bit size calculations */
+ /* NOTE: DIV_ROUND_UP() can overflow */
+ cpp = DIV_ROUND_UP(args->bpp, 8);
+ if (!cpp || cpp > 0xffffffffU / args->width)
+ return -EINVAL;
+ stride = cpp * args->width;
+ if (args->height > 0xffffffffU / stride)
+ return -EINVAL;
+
+ /* test for wrap-around */
+ size = args->height * stride;
+ if (PAGE_ALIGN(size) == 0)
+ return -EINVAL;
+
+ /*
+ * handle, pitch and size are output parameters. Zero them out to
+ * prevent drivers from accidentally using uninitialized data. Since
+ * not all existing userspace is clearing these fields properly we
+ * cannot reject IOCTL with garbage in them.
+ */
+ args->handle = 0;
+ args->pitch = 0;
+ args->size = 0;
+
+ return dev->driver->dumb_create(file_priv, dev, args);
+}
+
+/**
+ * drm_mode_mmap_dumb_ioctl - create an mmap offset for a dumb backing storage buffer
+ * @dev: DRM device
+ * @data: ioctl data
+ * @file_priv: DRM file info
+ *
+ * Allocate an offset in the drm device node's address space to be able to
+ * memory map a dumb buffer.
+ *
+ * Called by the user via ioctl.
+ *
+ * Returns:
+ * Zero on success, negative errno on failure.
+ */
+int drm_mode_mmap_dumb_ioctl(struct drm_device *dev,
+ void *data, struct drm_file *file_priv)
+{
+ struct drm_mode_map_dumb *args = data;
+
+ /* call driver ioctl to get mmap offset */
+ if (!dev->driver->dumb_map_offset)
+ return -ENOSYS;
+
+ return dev->driver->dumb_map_offset(file_priv, dev, args->handle, &args->offset);
+}
+
+int drm_mode_destroy_dumb_ioctl(struct drm_device *dev,
+ void *data, struct drm_file *file_priv)
+{
+ struct drm_mode_destroy_dumb *args = data;
+
+ if (!dev->driver->dumb_destroy)
+ return -ENOSYS;
+
+ return dev->driver->dumb_destroy(file_priv, dev, args->handle);
+}
+
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index 1801e9c0e41b..6798c3ad9d53 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -957,13 +957,13 @@ static const struct drm_display_mode edid_cea_modes[] = {
798, 858, 0, 480, 489, 495, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
.vrefresh = 240, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
- /* 58 - 720(1440)x480i@240 */
+ /* 58 - 720(1440)x480i@240Hz */
{ DRM_MODE("720x480i", DRM_MODE_TYPE_DRIVER, 54000, 720, 739,
801, 858, 0, 480, 488, 494, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
.vrefresh = 240, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
- /* 59 - 720(1440)x480i@240 */
+ /* 59 - 720(1440)x480i@240Hz */
{ DRM_MODE("720x480i", DRM_MODE_TYPE_DRIVER, 54000, 720, 739,
801, 858, 0, 480, 488, 494, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
diff --git a/drivers/gpu/drm/drm_fb_cma_helper.c b/drivers/gpu/drm/drm_fb_cma_helper.c
index 4c6664407bfb..81b3558302b5 100644
--- a/drivers/gpu/drm/drm_fb_cma_helper.c
+++ b/drivers/gpu/drm/drm_fb_cma_helper.c
@@ -18,13 +18,16 @@
*/
#include <drm/drmP.h>
+#include <drm/drm_atomic.h>
#include <drm/drm_crtc.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_gem_cma_helper.h>
#include <drm/drm_fb_cma_helper.h>
+#include <linux/dma-buf.h>
#include <linux/dma-mapping.h>
#include <linux/module.h>
+#include <linux/reservation.h>
#define DEFAULT_FBDEFIO_DELAY_MS 50
@@ -265,6 +268,38 @@ struct drm_gem_cma_object *drm_fb_cma_get_gem_obj(struct drm_framebuffer *fb,
}
EXPORT_SYMBOL_GPL(drm_fb_cma_get_gem_obj);
+/**
+ * drm_fb_cma_prepare_fb() - Prepare CMA framebuffer
+ * @plane: Which plane
+ * @state: Plane state attach fence to
+ *
+ * This should be put into prepare_fb hook of struct &drm_plane_helper_funcs .
+ *
+ * This function checks if the plane FB has an dma-buf attached, extracts
+ * the exclusive fence and attaches it to plane state for the atomic helper
+ * to wait on.
+ *
+ * There is no need for cleanup_fb for CMA based framebuffer drivers.
+ */
+int drm_fb_cma_prepare_fb(struct drm_plane *plane,
+ struct drm_plane_state *state)
+{
+ struct dma_buf *dma_buf;
+ struct dma_fence *fence;
+
+ if ((plane->state->fb == state->fb) || !state->fb)
+ return 0;
+
+ dma_buf = drm_fb_cma_get_gem_obj(state->fb, 0)->base.dma_buf;
+ if (dma_buf) {
+ fence = reservation_object_get_excl_rcu(dma_buf->resv);
+ drm_atomic_set_fence_for_plane(state, fence);
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(drm_fb_cma_prepare_fb);
+
#ifdef CONFIG_DEBUG_FS
static void drm_fb_cma_describe(struct drm_framebuffer *fb, struct seq_file *m)
{
@@ -314,14 +349,10 @@ static int drm_fb_cma_mmap(struct fb_info *info, struct vm_area_struct *vma)
static struct fb_ops drm_fbdev_cma_ops = {
.owner = THIS_MODULE,
+ DRM_FB_HELPER_DEFAULT_OPS,
.fb_fillrect = drm_fb_helper_sys_fillrect,
.fb_copyarea = drm_fb_helper_sys_copyarea,
.fb_imageblit = drm_fb_helper_sys_imageblit,
- .fb_check_var = drm_fb_helper_check_var,
- .fb_set_par = drm_fb_helper_set_par,
- .fb_blank = drm_fb_helper_blank,
- .fb_pan_display = drm_fb_helper_pan_display,
- .fb_setcmap = drm_fb_helper_setcmap,
.fb_mmap = drm_fb_cma_mmap,
};
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index 83dbae0fabcf..14547817566d 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -131,7 +131,12 @@ int drm_fb_helper_single_add_all_connectors(struct drm_fb_helper *fb_helper)
return 0;
fail:
for (i = 0; i < fb_helper->connector_count; i++) {
- kfree(fb_helper->connector_info[i]);
+ struct drm_fb_helper_connector *fb_helper_connector =
+ fb_helper->connector_info[i];
+
+ drm_connector_unreference(fb_helper_connector->connector);
+
+ kfree(fb_helper_connector);
fb_helper->connector_info[i] = NULL;
}
fb_helper->connector_count = 0;
@@ -251,6 +256,9 @@ int drm_fb_helper_debug_enter(struct fb_info *info)
continue;
funcs = mode_set->crtc->helper_private;
+ if (funcs->mode_set_base_atomic == NULL)
+ continue;
+
drm_fb_helper_save_lut_atomic(mode_set->crtc, helper);
funcs->mode_set_base_atomic(mode_set->crtc,
mode_set->fb,
@@ -304,6 +312,9 @@ int drm_fb_helper_debug_leave(struct fb_info *info)
continue;
}
+ if (funcs->mode_set_base_atomic == NULL)
+ continue;
+
drm_fb_helper_restore_lut_atomic(mode_set->crtc);
funcs->mode_set_base_atomic(mode_set->crtc, fb, crtc->x,
crtc->y, LEAVE_ATOMIC_MODE_SET);
@@ -600,6 +611,24 @@ int drm_fb_helper_blank(int blank, struct fb_info *info)
}
EXPORT_SYMBOL(drm_fb_helper_blank);
+static void drm_fb_helper_modeset_release(struct drm_fb_helper *helper,
+ struct drm_mode_set *modeset)
+{
+ int i;
+
+ for (i = 0; i < modeset->num_connectors; i++) {
+ drm_connector_unreference(modeset->connectors[i]);
+ modeset->connectors[i] = NULL;
+ }
+ modeset->num_connectors = 0;
+
+ drm_mode_destroy(helper->dev, modeset->mode);
+ modeset->mode = NULL;
+
+ /* FIXME should hold a ref? */
+ modeset->fb = NULL;
+}
+
static void drm_fb_helper_crtc_free(struct drm_fb_helper *helper)
{
int i;
@@ -609,10 +638,12 @@ static void drm_fb_helper_crtc_free(struct drm_fb_helper *helper)
kfree(helper->connector_info[i]);
}
kfree(helper->connector_info);
+
for (i = 0; i < helper->crtc_count; i++) {
- kfree(helper->crtc_info[i].mode_set.connectors);
- if (helper->crtc_info[i].mode_set.mode)
- drm_mode_destroy(helper->dev, helper->crtc_info[i].mode_set.mode);
+ struct drm_mode_set *modeset = &helper->crtc_info[i].mode_set;
+
+ drm_fb_helper_modeset_release(helper, modeset);
+ kfree(modeset->connectors);
}
kfree(helper->crtc_info);
}
@@ -641,7 +672,9 @@ static void drm_fb_helper_dirty_work(struct work_struct *work)
clip->x2 = clip->y2 = 0;
spin_unlock_irqrestore(&helper->dirty_lock, flags);
- helper->fb->funcs->dirty(helper->fb, NULL, 0, 0, &clip_copy, 1);
+ /* call dirty callback only when it has been really touched */
+ if (clip_copy.x1 < clip_copy.x2 && clip_copy.y1 < clip_copy.y2)
+ helper->fb->funcs->dirty(helper->fb, NULL, 0, 0, &clip_copy, 1);
}
/**
@@ -2085,7 +2118,6 @@ static void drm_setup_crtcs(struct drm_fb_helper *fb_helper)
struct drm_fb_helper_crtc **crtcs;
struct drm_display_mode **modes;
struct drm_fb_offset *offsets;
- struct drm_mode_set *modeset;
bool *enabled;
int width, height;
int i;
@@ -2133,45 +2165,35 @@ static void drm_setup_crtcs(struct drm_fb_helper *fb_helper)
/* need to set the modesets up here for use later */
/* fill out the connector<->crtc mappings into the modesets */
- for (i = 0; i < fb_helper->crtc_count; i++) {
- modeset = &fb_helper->crtc_info[i].mode_set;
- modeset->num_connectors = 0;
- modeset->fb = NULL;
- }
+ for (i = 0; i < fb_helper->crtc_count; i++)
+ drm_fb_helper_modeset_release(fb_helper,
+ &fb_helper->crtc_info[i].mode_set);
for (i = 0; i < fb_helper->connector_count; i++) {
struct drm_display_mode *mode = modes[i];
struct drm_fb_helper_crtc *fb_crtc = crtcs[i];
struct drm_fb_offset *offset = &offsets[i];
- modeset = &fb_crtc->mode_set;
+ struct drm_mode_set *modeset = &fb_crtc->mode_set;
if (mode && fb_crtc) {
+ struct drm_connector *connector =
+ fb_helper->connector_info[i]->connector;
+
DRM_DEBUG_KMS("desired mode %s set on crtc %d (%d,%d)\n",
mode->name, fb_crtc->mode_set.crtc->base.id, offset->x, offset->y);
+
fb_crtc->desired_mode = mode;
fb_crtc->x = offset->x;
fb_crtc->y = offset->y;
- if (modeset->mode)
- drm_mode_destroy(dev, modeset->mode);
modeset->mode = drm_mode_duplicate(dev,
fb_crtc->desired_mode);
- modeset->connectors[modeset->num_connectors++] = fb_helper->connector_info[i]->connector;
+ drm_connector_reference(connector);
+ modeset->connectors[modeset->num_connectors++] = connector;
modeset->fb = fb_helper->fb;
modeset->x = offset->x;
modeset->y = offset->y;
}
}
-
- /* Clear out any old modes if there are no more connected outputs. */
- for (i = 0; i < fb_helper->crtc_count; i++) {
- modeset = &fb_helper->crtc_info[i].mode_set;
- if (modeset->num_connectors == 0) {
- BUG_ON(modeset->fb);
- if (modeset->mode)
- drm_mode_destroy(dev, modeset->mode);
- modeset->mode = NULL;
- }
- }
out:
kfree(crtcs);
kfree(modes);
diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c
index cf993dbf602e..5d96de40b63f 100644
--- a/drivers/gpu/drm/drm_fops.c
+++ b/drivers/gpu/drm/drm_fops.c
@@ -51,10 +51,11 @@ DEFINE_MUTEX(drm_global_mutex);
* Drivers must define the file operations structure that forms the DRM
* userspace API entry point, even though most of those operations are
* implemented in the DRM core. The mandatory functions are drm_open(),
- * drm_read(), drm_ioctl() and drm_compat_ioctl if CONFIG_COMPAT is enabled.
- * Drivers which implement private ioctls that require 32/64 bit compatibility
- * support must provided their onw .compat_ioctl() handler that processes
- * private ioctls and calls drm_compat_ioctl() for core ioctls.
+ * drm_read(), drm_ioctl() and drm_compat_ioctl() if CONFIG_COMPAT is enabled
+ * (note that drm_compat_ioctl will be NULL if CONFIG_COMPAT=n). Drivers which
+ * implement private ioctls that require 32/64 bit compatibility support must
+ * provide their own .compat_ioctl() handler that processes private ioctls and
+ * calls drm_compat_ioctl() for core ioctls.
*
* In addition drm_read() and drm_poll() provide support for DRM events. DRM
* events are a generic and extensible means to send asynchronous events to
@@ -75,9 +76,7 @@ DEFINE_MUTEX(drm_global_mutex);
* .open = drm_open,
* .release = drm_release,
* .unlocked_ioctl = drm_ioctl,
- * #ifdef CONFIG_COMPAT
- * .compat_ioctl = drm_compat_ioctl,
- * #endif
+ * .compat_ioctl = drm_compat_ioctl, // NULL if CONFIG_COMPAT=n
* .poll = drm_poll,
* .read = drm_read,
* .llseek = no_llseek,
diff --git a/drivers/gpu/drm/drm_fourcc.c b/drivers/gpu/drm/drm_fourcc.c
index cbb8b77c363c..90d2cc8da8eb 100644
--- a/drivers/gpu/drm/drm_fourcc.c
+++ b/drivers/gpu/drm/drm_fourcc.c
@@ -79,17 +79,13 @@ uint32_t drm_mode_legacy_fb_format(uint32_t bpp, uint32_t depth)
EXPORT_SYMBOL(drm_mode_legacy_fb_format);
/**
- * drm_get_format_name - return a string for drm fourcc format
+ * drm_get_format_name - fill a string with a drm fourcc format's name
* @format: format to compute name of
- *
- * Note that the buffer returned by this function is owned by the caller
- * and will need to be freed using kfree().
+ * @buf: caller-supplied buffer
*/
-char *drm_get_format_name(uint32_t format)
+const char *drm_get_format_name(uint32_t format, struct drm_format_name_buf *buf)
{
- char *buf = kmalloc(32, GFP_KERNEL);
-
- snprintf(buf, 32,
+ snprintf(buf->str, sizeof(buf->str),
"%c%c%c%c %s-endian (0x%08x)",
printable_char(format & 0xff),
printable_char((format >> 8) & 0xff),
@@ -98,7 +94,7 @@ char *drm_get_format_name(uint32_t format)
format & DRM_FORMAT_BIG_ENDIAN ? "big" : "little",
format);
- return buf;
+ return buf->str;
}
EXPORT_SYMBOL(drm_get_format_name);
diff --git a/drivers/gpu/drm/drm_framebuffer.c b/drivers/gpu/drm/drm_framebuffer.c
index 49fd7db758e0..06ad3d1350c4 100644
--- a/drivers/gpu/drm/drm_framebuffer.c
+++ b/drivers/gpu/drm/drm_framebuffer.c
@@ -133,9 +133,10 @@ static int framebuffer_check(const struct drm_mode_fb_cmd2 *r)
info = __drm_format_info(r->pixel_format & ~DRM_FORMAT_BIG_ENDIAN);
if (!info) {
- char *format_name = drm_get_format_name(r->pixel_format);
- DRM_DEBUG_KMS("bad framebuffer format %s\n", format_name);
- kfree(format_name);
+ struct drm_format_name_buf format_name;
+ DRM_DEBUG_KMS("bad framebuffer format %s\n",
+ drm_get_format_name(r->pixel_format,
+ &format_name));
return -EINVAL;
}
@@ -673,6 +674,11 @@ EXPORT_SYMBOL(drm_framebuffer_lookup);
* those used for fbdev. Note that the caller must hold a reference of it's own,
* i.e. the object may not be destroyed through this call (since it'll lead to a
* locking inversion).
+ *
+ * NOTE: This function is deprecated. For driver-private framebuffers it is not
+ * recommended to embed a framebuffer struct info fbdev struct, instead, a
+ * framebuffer pointer is preferred and drm_framebuffer_unreference() should be
+ * called when the framebuffer is to be cleaned up.
*/
void drm_framebuffer_unregister_private(struct drm_framebuffer *fb)
{
diff --git a/drivers/gpu/drm/drm_internal.h b/drivers/gpu/drm/drm_internal.h
index abd209863ef4..db80ec860e33 100644
--- a/drivers/gpu/drm/drm_internal.h
+++ b/drivers/gpu/drm/drm_internal.h
@@ -24,9 +24,6 @@
#define DRM_IF_MAJOR 1
#define DRM_IF_MINOR 4
-/* drm_irq.c */
-extern unsigned int drm_timestamp_monotonic;
-
/* drm_fops.c */
extern struct mutex drm_global_mutex;
void drm_lastclose(struct drm_device *dev);
@@ -46,12 +43,21 @@ void drm_prime_destroy_file_private(struct drm_prime_file_private *prime_fpriv);
void drm_prime_remove_buf_handle_locked(struct drm_prime_file_private *prime_fpriv,
struct dma_buf *dma_buf);
+/* drm_drv.c */
+struct drm_minor *drm_minor_acquire(unsigned int minor_id);
+void drm_minor_release(struct drm_minor *minor);
+
/* drm_info.c */
int drm_name_info(struct seq_file *m, void *data);
int drm_clients_info(struct seq_file *m, void* data);
int drm_gem_name_info(struct seq_file *m, void *data);
/* drm_irq.c */
+extern unsigned int drm_timestamp_monotonic;
+
+/* IOCTLS */
+int drm_wait_vblank(struct drm_device *dev, void *data,
+ struct drm_file *filp);
int drm_control(struct drm_device *dev, void *data,
struct drm_file *file_priv);
int drm_modeset_ctl(struct drm_device *dev, void *data,
diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c
index 48a6167f5e7b..273625a85036 100644
--- a/drivers/gpu/drm/drm_irq.c
+++ b/drivers/gpu/drm/drm_irq.c
@@ -93,7 +93,7 @@ static void store_vblank(struct drm_device *dev, unsigned int pipe,
* Reset the stored timestamp for the current vblank count to correspond
* to the last vblank occurred.
*
- * Only to be called from drm_vblank_on().
+ * Only to be called from drm_crtc_vblank_on().
*
* Note: caller must hold dev->vbl_lock since this reads & writes
* device vblank fields.
@@ -234,6 +234,16 @@ static void drm_update_vblank_count(struct drm_device *dev, unsigned int pipe,
store_vblank(dev, pipe, diff, &t_vblank, cur_vblank);
}
+static u32 drm_vblank_count(struct drm_device *dev, unsigned int pipe)
+{
+ struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
+
+ if (WARN_ON(pipe >= dev->num_crtcs))
+ return 0;
+
+ return vblank->count;
+}
+
/**
* drm_accurate_vblank_count - retrieve the master vblank counter
* @crtc: which counter to retrieve
@@ -296,7 +306,7 @@ static void vblank_disable_and_save(struct drm_device *dev, unsigned int pipe)
* Always update the count and timestamp to maintain the
* appearance that the counter has been ticking all along until
* this time. This makes the count account for the entire time
- * between drm_vblank_on() and drm_vblank_off().
+ * between drm_crtc_vblank_on() and drm_crtc_vblank_off().
*/
drm_update_vblank_count(dev, pipe, 0);
@@ -888,31 +898,6 @@ drm_get_last_vbltimestamp(struct drm_device *dev, unsigned int pipe,
}
/**
- * drm_vblank_count - retrieve "cooked" vblank counter value
- * @dev: DRM device
- * @pipe: index of CRTC for which to retrieve the counter
- *
- * Fetches the "cooked" vblank count value that represents the number of
- * vblank events since the system was booted, including lost events due to
- * modesetting activity.
- *
- * This is the legacy version of drm_crtc_vblank_count().
- *
- * Returns:
- * The software vblank counter.
- */
-u32 drm_vblank_count(struct drm_device *dev, unsigned int pipe)
-{
- struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
-
- if (WARN_ON(pipe >= dev->num_crtcs))
- return 0;
-
- return vblank->count;
-}
-EXPORT_SYMBOL(drm_vblank_count);
-
-/**
* drm_crtc_vblank_count - retrieve "cooked" vblank counter value
* @crtc: which counter to retrieve
*
@@ -920,8 +905,6 @@ EXPORT_SYMBOL(drm_vblank_count);
* vblank events since the system was booted, including lost events due to
* modesetting activity.
*
- * This is the native KMS version of drm_vblank_count().
- *
* Returns:
* The software vblank counter.
*/
@@ -1272,21 +1255,20 @@ void drm_crtc_wait_one_vblank(struct drm_crtc *crtc)
EXPORT_SYMBOL(drm_crtc_wait_one_vblank);
/**
- * drm_vblank_off - disable vblank events on a CRTC
- * @dev: DRM device
- * @pipe: CRTC index
+ * drm_crtc_vblank_off - disable vblank events on a CRTC
+ * @crtc: CRTC in question
*
* Drivers can use this function to shut down the vblank interrupt handling when
* disabling a crtc. This function ensures that the latest vblank frame count is
- * stored so that drm_vblank_on() can restore it again.
+ * stored so that drm_vblank_on can restore it again.
*
* Drivers must use this function when the hardware vblank counter can get
* reset, e.g. when suspending.
- *
- * This is the legacy version of drm_crtc_vblank_off().
*/
-void drm_vblank_off(struct drm_device *dev, unsigned int pipe)
+void drm_crtc_vblank_off(struct drm_crtc *crtc)
{
+ struct drm_device *dev = crtc->dev;
+ unsigned int pipe = drm_crtc_index(crtc);
struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
struct drm_pending_vblank_event *e, *t;
struct timeval now;
@@ -1302,7 +1284,8 @@ void drm_vblank_off(struct drm_device *dev, unsigned int pipe)
DRM_DEBUG_VBL("crtc %d, vblank enabled %d, inmodeset %d\n",
pipe, vblank->enabled, vblank->inmodeset);
- /* Avoid redundant vblank disables without previous drm_vblank_on(). */
+ /* Avoid redundant vblank disables without previous
+ * drm_crtc_vblank_on(). */
if (drm_core_check_feature(dev, DRIVER_ATOMIC) || !vblank->inmodeset)
vblank_disable_and_save(dev, pipe);
@@ -1333,25 +1316,6 @@ void drm_vblank_off(struct drm_device *dev, unsigned int pipe)
}
spin_unlock_irqrestore(&dev->event_lock, irqflags);
}
-EXPORT_SYMBOL(drm_vblank_off);
-
-/**
- * drm_crtc_vblank_off - disable vblank events on a CRTC
- * @crtc: CRTC in question
- *
- * Drivers can use this function to shut down the vblank interrupt handling when
- * disabling a crtc. This function ensures that the latest vblank frame count is
- * stored so that drm_vblank_on can restore it again.
- *
- * Drivers must use this function when the hardware vblank counter can get
- * reset, e.g. when suspending.
- *
- * This is the native kms version of drm_vblank_off().
- */
-void drm_crtc_vblank_off(struct drm_crtc *crtc)
-{
- drm_vblank_off(crtc->dev, drm_crtc_index(crtc));
-}
EXPORT_SYMBOL(drm_crtc_vblank_off);
/**
@@ -1387,19 +1351,18 @@ void drm_crtc_vblank_reset(struct drm_crtc *crtc)
EXPORT_SYMBOL(drm_crtc_vblank_reset);
/**
- * drm_vblank_on - enable vblank events on a CRTC
- * @dev: DRM device
- * @pipe: CRTC index
+ * drm_crtc_vblank_on - enable vblank events on a CRTC
+ * @crtc: CRTC in question
*
* This functions restores the vblank interrupt state captured with
- * drm_vblank_off() again. Note that calls to drm_vblank_on() and
- * drm_vblank_off() can be unbalanced and so can also be unconditionally called
+ * drm_crtc_vblank_off() again. Note that calls to drm_crtc_vblank_on() and
+ * drm_crtc_vblank_off() can be unbalanced and so can also be unconditionally called
* in driver load code to reflect the current hardware state of the crtc.
- *
- * This is the legacy version of drm_crtc_vblank_on().
*/
-void drm_vblank_on(struct drm_device *dev, unsigned int pipe)
+void drm_crtc_vblank_on(struct drm_crtc *crtc)
{
+ struct drm_device *dev = crtc->dev;
+ unsigned int pipe = drm_crtc_index(crtc);
struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
unsigned long irqflags;
@@ -1426,49 +1389,10 @@ void drm_vblank_on(struct drm_device *dev, unsigned int pipe)
WARN_ON(drm_vblank_enable(dev, pipe));
spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
}
-EXPORT_SYMBOL(drm_vblank_on);
-
-/**
- * drm_crtc_vblank_on - enable vblank events on a CRTC
- * @crtc: CRTC in question
- *
- * This functions restores the vblank interrupt state captured with
- * drm_vblank_off() again. Note that calls to drm_vblank_on() and
- * drm_vblank_off() can be unbalanced and so can also be unconditionally called
- * in driver load code to reflect the current hardware state of the crtc.
- *
- * This is the native kms version of drm_vblank_on().
- */
-void drm_crtc_vblank_on(struct drm_crtc *crtc)
-{
- drm_vblank_on(crtc->dev, drm_crtc_index(crtc));
-}
EXPORT_SYMBOL(drm_crtc_vblank_on);
-/**
- * drm_vblank_pre_modeset - account for vblanks across mode sets
- * @dev: DRM device
- * @pipe: CRTC index
- *
- * Account for vblank events across mode setting events, which will likely
- * reset the hardware frame counter.
- *
- * This is done by grabbing a temporary vblank reference to ensure that the
- * vblank interrupt keeps running across the modeset sequence. With this the
- * software-side vblank frame counting will ensure that there are no jumps or
- * discontinuities.
- *
- * Unfortunately this approach is racy and also doesn't work when the vblank
- * interrupt stops running, e.g. across system suspend resume. It is therefore
- * highly recommended that drivers use the newer drm_vblank_off() and
- * drm_vblank_on() instead. drm_vblank_pre_modeset() only works correctly when
- * using "cooked" software vblank frame counters and not relying on any hardware
- * counters.
- *
- * Drivers must call drm_vblank_post_modeset() when re-enabling the same crtc
- * again.
- */
-void drm_vblank_pre_modeset(struct drm_device *dev, unsigned int pipe)
+static void drm_legacy_vblank_pre_modeset(struct drm_device *dev,
+ unsigned int pipe)
{
struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
@@ -1492,17 +1416,9 @@ void drm_vblank_pre_modeset(struct drm_device *dev, unsigned int pipe)
vblank->inmodeset |= 0x2;
}
}
-EXPORT_SYMBOL(drm_vblank_pre_modeset);
-/**
- * drm_vblank_post_modeset - undo drm_vblank_pre_modeset changes
- * @dev: DRM device
- * @pipe: CRTC index
- *
- * This function again drops the temporary vblank reference acquired in
- * drm_vblank_pre_modeset.
- */
-void drm_vblank_post_modeset(struct drm_device *dev, unsigned int pipe)
+static void drm_legacy_vblank_post_modeset(struct drm_device *dev,
+ unsigned int pipe)
{
struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
unsigned long irqflags;
@@ -1525,7 +1441,6 @@ void drm_vblank_post_modeset(struct drm_device *dev, unsigned int pipe)
vblank->inmodeset = 0;
}
}
-EXPORT_SYMBOL(drm_vblank_post_modeset);
/*
* drm_modeset_ctl - handle vblank event counter changes across mode switch
@@ -1558,10 +1473,10 @@ int drm_modeset_ctl(struct drm_device *dev, void *data,
switch (modeset->cmd) {
case _DRM_PRE_MODESET:
- drm_vblank_pre_modeset(dev, pipe);
+ drm_legacy_vblank_pre_modeset(dev, pipe);
break;
case _DRM_POST_MODESET:
- drm_vblank_post_modeset(dev, pipe);
+ drm_legacy_vblank_post_modeset(dev, pipe);
break;
default:
return -EINVAL;
@@ -1596,11 +1511,10 @@ static int drm_queue_vblank_event(struct drm_device *dev, unsigned int pipe,
spin_lock_irqsave(&dev->event_lock, flags);
/*
- * drm_vblank_off() might have been called after we called
- * drm_vblank_get(). drm_vblank_off() holds event_lock
- * around the vblank disable, so no need for further locking.
- * The reference from drm_vblank_get() protects against
- * vblank disable from another source.
+ * drm_crtc_vblank_off() might have been called after we called
+ * drm_vblank_get(). drm_crtc_vblank_off() holds event_lock around the
+ * vblank disable, so no need for further locking. The reference from
+ * drm_vblank_get() protects against vblank disable from another source.
*/
if (!vblank->enabled) {
ret = -EINVAL;
diff --git a/drivers/gpu/drm/drm_mm.c b/drivers/gpu/drm/drm_mm.c
index 11d44a1e0ab3..632473beb40c 100644
--- a/drivers/gpu/drm/drm_mm.c
+++ b/drivers/gpu/drm/drm_mm.c
@@ -104,6 +104,68 @@ static struct drm_mm_node *drm_mm_search_free_in_range_generic(const struct drm_
u64 end,
enum drm_mm_search_flags flags);
+#ifdef CONFIG_DRM_DEBUG_MM
+#include <linux/stackdepot.h>
+
+#define STACKDEPTH 32
+#define BUFSZ 4096
+
+static noinline void save_stack(struct drm_mm_node *node)
+{
+ unsigned long entries[STACKDEPTH];
+ struct stack_trace trace = {
+ .entries = entries,
+ .max_entries = STACKDEPTH,
+ .skip = 1
+ };
+
+ save_stack_trace(&trace);
+ if (trace.nr_entries != 0 &&
+ trace.entries[trace.nr_entries-1] == ULONG_MAX)
+ trace.nr_entries--;
+
+ /* May be called under spinlock, so avoid sleeping */
+ node->stack = depot_save_stack(&trace, GFP_NOWAIT);
+}
+
+static void show_leaks(struct drm_mm *mm)
+{
+ struct drm_mm_node *node;
+ unsigned long entries[STACKDEPTH];
+ char *buf;
+
+ buf = kmalloc(BUFSZ, GFP_KERNEL);
+ if (!buf)
+ return;
+
+ list_for_each_entry(node, &mm->head_node.node_list, node_list) {
+ struct stack_trace trace = {
+ .entries = entries,
+ .max_entries = STACKDEPTH
+ };
+
+ if (!node->stack) {
+ DRM_ERROR("node [%08llx + %08llx]: unknown owner\n",
+ node->start, node->size);
+ continue;
+ }
+
+ depot_fetch_stack(node->stack, &trace);
+ snprint_stack_trace(buf, BUFSZ, &trace, 0);
+ DRM_ERROR("node [%08llx + %08llx]: inserted at\n%s",
+ node->start, node->size, buf);
+ }
+
+ kfree(buf);
+}
+
+#undef STACKDEPTH
+#undef BUFSZ
+#else
+static void save_stack(struct drm_mm_node *node) { }
+static void show_leaks(struct drm_mm *mm) { }
+#endif
+
#define START(node) ((node)->start)
#define LAST(node) ((node)->start + (node)->size - 1)
@@ -228,6 +290,8 @@ static void drm_mm_insert_helper(struct drm_mm_node *hole_node,
list_add(&node->hole_stack, &mm->hole_stack);
node->hole_follows = 1;
}
+
+ save_stack(node);
}
/**
@@ -293,6 +357,8 @@ int drm_mm_reserve_node(struct drm_mm *mm, struct drm_mm_node *node)
node->hole_follows = 1;
}
+ save_stack(node);
+
return 0;
}
EXPORT_SYMBOL(drm_mm_reserve_node);
@@ -397,6 +463,8 @@ static void drm_mm_insert_helper_range(struct drm_mm_node *hole_node,
list_add(&node->hole_stack, &mm->hole_stack);
node->hole_follows = 1;
}
+
+ save_stack(node);
}
/**
@@ -861,10 +929,12 @@ EXPORT_SYMBOL(drm_mm_init);
* Note that it is a bug to call this function on an allocator which is not
* clean.
*/
-void drm_mm_takedown(struct drm_mm * mm)
+void drm_mm_takedown(struct drm_mm *mm)
{
- WARN(!list_empty(&mm->head_node.node_list),
- "Memory manager not clean during takedown.\n");
+ if (WARN(!list_empty(&mm->head_node.node_list),
+ "Memory manager not clean during takedown.\n"))
+ show_leaks(mm);
+
}
EXPORT_SYMBOL(drm_mm_takedown);
diff --git a/drivers/gpu/drm/drm_mode_config.c b/drivers/gpu/drm/drm_mode_config.c
new file mode 100644
index 000000000000..2735a5847ffa
--- /dev/null
+++ b/drivers/gpu/drm/drm_mode_config.c
@@ -0,0 +1,494 @@
+/*
+ * Copyright (c) 2016 Intel Corporation
+ *
+ * Permission to use, copy, modify, distribute, and sell this software and its
+ * documentation for any purpose is hereby granted without fee, provided that
+ * the above copyright notice appear in all copies and that both that copyright
+ * notice and this permission notice appear in supporting documentation, and
+ * that the name of the copyright holders not be used in advertising or
+ * publicity pertaining to distribution of the software without specific,
+ * written prior permission. The copyright holders make no representations
+ * about the suitability of this software for any purpose. It is provided "as
+ * is" without express or implied warranty.
+ *
+ * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
+ * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
+ * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
+ * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
+ * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
+ * OF THIS SOFTWARE.
+ */
+
+#include <drm/drm_mode_config.h>
+#include <drm/drmP.h>
+
+#include "drm_crtc_internal.h"
+#include "drm_internal.h"
+
+int drm_modeset_register_all(struct drm_device *dev)
+{
+ int ret;
+
+ ret = drm_plane_register_all(dev);
+ if (ret)
+ goto err_plane;
+
+ ret = drm_crtc_register_all(dev);
+ if (ret)
+ goto err_crtc;
+
+ ret = drm_encoder_register_all(dev);
+ if (ret)
+ goto err_encoder;
+
+ ret = drm_connector_register_all(dev);
+ if (ret)
+ goto err_connector;
+
+ return 0;
+
+err_connector:
+ drm_encoder_unregister_all(dev);
+err_encoder:
+ drm_crtc_unregister_all(dev);
+err_crtc:
+ drm_plane_unregister_all(dev);
+err_plane:
+ return ret;
+}
+
+void drm_modeset_unregister_all(struct drm_device *dev)
+{
+ drm_connector_unregister_all(dev);
+ drm_encoder_unregister_all(dev);
+ drm_crtc_unregister_all(dev);
+ drm_plane_unregister_all(dev);
+}
+
+/**
+ * drm_mode_getresources - get graphics configuration
+ * @dev: drm device for the ioctl
+ * @data: data pointer for the ioctl
+ * @file_priv: drm file for the ioctl call
+ *
+ * Construct a set of configuration description structures and return
+ * them to the user, including CRTC, connector and framebuffer configuration.
+ *
+ * Called by the user via ioctl.
+ *
+ * Returns:
+ * Zero on success, negative errno on failure.
+ */
+int drm_mode_getresources(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_mode_card_res *card_res = data;
+ struct list_head *lh;
+ struct drm_framebuffer *fb;
+ struct drm_connector *connector;
+ struct drm_crtc *crtc;
+ struct drm_encoder *encoder;
+ int ret = 0;
+ int connector_count = 0;
+ int crtc_count = 0;
+ int fb_count = 0;
+ int encoder_count = 0;
+ int copied = 0;
+ uint32_t __user *fb_id;
+ uint32_t __user *crtc_id;
+ uint32_t __user *connector_id;
+ uint32_t __user *encoder_id;
+
+ if (!drm_core_check_feature(dev, DRIVER_MODESET))
+ return -EINVAL;
+
+
+ mutex_lock(&file_priv->fbs_lock);
+ /*
+ * For the non-control nodes we need to limit the list of resources
+ * by IDs in the group list for this node
+ */
+ list_for_each(lh, &file_priv->fbs)
+ fb_count++;
+
+ /* handle this in 4 parts */
+ /* FBs */
+ if (card_res->count_fbs >= fb_count) {
+ copied = 0;
+ fb_id = (uint32_t __user *)(unsigned long)card_res->fb_id_ptr;
+ list_for_each_entry(fb, &file_priv->fbs, filp_head) {
+ if (put_user(fb->base.id, fb_id + copied)) {
+ mutex_unlock(&file_priv->fbs_lock);
+ return -EFAULT;
+ }
+ copied++;
+ }
+ }
+ card_res->count_fbs = fb_count;
+ mutex_unlock(&file_priv->fbs_lock);
+
+ /* mode_config.mutex protects the connector list against e.g. DP MST
+ * connector hot-adding. CRTC/Plane lists are invariant. */
+ mutex_lock(&dev->mode_config.mutex);
+ drm_for_each_crtc(crtc, dev)
+ crtc_count++;
+
+ drm_for_each_connector(connector, dev)
+ connector_count++;
+
+ drm_for_each_encoder(encoder, dev)
+ encoder_count++;
+
+ card_res->max_height = dev->mode_config.max_height;
+ card_res->min_height = dev->mode_config.min_height;
+ card_res->max_width = dev->mode_config.max_width;
+ card_res->min_width = dev->mode_config.min_width;
+
+ /* CRTCs */
+ if (card_res->count_crtcs >= crtc_count) {
+ copied = 0;
+ crtc_id = (uint32_t __user *)(unsigned long)card_res->crtc_id_ptr;
+ drm_for_each_crtc(crtc, dev) {
+ if (put_user(crtc->base.id, crtc_id + copied)) {
+ ret = -EFAULT;
+ goto out;
+ }
+ copied++;
+ }
+ }
+ card_res->count_crtcs = crtc_count;
+
+ /* Encoders */
+ if (card_res->count_encoders >= encoder_count) {
+ copied = 0;
+ encoder_id = (uint32_t __user *)(unsigned long)card_res->encoder_id_ptr;
+ drm_for_each_encoder(encoder, dev) {
+ if (put_user(encoder->base.id, encoder_id +
+ copied)) {
+ ret = -EFAULT;
+ goto out;
+ }
+ copied++;
+ }
+ }
+ card_res->count_encoders = encoder_count;
+
+ /* Connectors */
+ if (card_res->count_connectors >= connector_count) {
+ copied = 0;
+ connector_id = (uint32_t __user *)(unsigned long)card_res->connector_id_ptr;
+ drm_for_each_connector(connector, dev) {
+ if (put_user(connector->base.id,
+ connector_id + copied)) {
+ ret = -EFAULT;
+ goto out;
+ }
+ copied++;
+ }
+ }
+ card_res->count_connectors = connector_count;
+
+out:
+ mutex_unlock(&dev->mode_config.mutex);
+ return ret;
+}
+
+/**
+ * drm_mode_config_reset - call ->reset callbacks
+ * @dev: drm device
+ *
+ * This functions calls all the crtc's, encoder's and connector's ->reset
+ * callback. Drivers can use this in e.g. their driver load or resume code to
+ * reset hardware and software state.
+ */
+void drm_mode_config_reset(struct drm_device *dev)
+{
+ struct drm_crtc *crtc;
+ struct drm_plane *plane;
+ struct drm_encoder *encoder;
+ struct drm_connector *connector;
+
+ drm_for_each_plane(plane, dev)
+ if (plane->funcs->reset)
+ plane->funcs->reset(plane);
+
+ drm_for_each_crtc(crtc, dev)
+ if (crtc->funcs->reset)
+ crtc->funcs->reset(crtc);
+
+ drm_for_each_encoder(encoder, dev)
+ if (encoder->funcs->reset)
+ encoder->funcs->reset(encoder);
+
+ mutex_lock(&dev->mode_config.mutex);
+ drm_for_each_connector(connector, dev)
+ if (connector->funcs->reset)
+ connector->funcs->reset(connector);
+ mutex_unlock(&dev->mode_config.mutex);
+}
+EXPORT_SYMBOL(drm_mode_config_reset);
+
+/*
+ * Global properties
+ */
+static const struct drm_prop_enum_list drm_plane_type_enum_list[] = {
+ { DRM_PLANE_TYPE_OVERLAY, "Overlay" },
+ { DRM_PLANE_TYPE_PRIMARY, "Primary" },
+ { DRM_PLANE_TYPE_CURSOR, "Cursor" },
+};
+
+static int drm_mode_create_standard_properties(struct drm_device *dev)
+{
+ struct drm_property *prop;
+ int ret;
+
+ ret = drm_connector_create_standard_properties(dev);
+ if (ret)
+ return ret;
+
+ prop = drm_property_create_enum(dev, DRM_MODE_PROP_IMMUTABLE,
+ "type", drm_plane_type_enum_list,
+ ARRAY_SIZE(drm_plane_type_enum_list));
+ if (!prop)
+ return -ENOMEM;
+ dev->mode_config.plane_type_property = prop;
+
+ prop = drm_property_create_range(dev, DRM_MODE_PROP_ATOMIC,
+ "SRC_X", 0, UINT_MAX);
+ if (!prop)
+ return -ENOMEM;
+ dev->mode_config.prop_src_x = prop;
+
+ prop = drm_property_create_range(dev, DRM_MODE_PROP_ATOMIC,
+ "SRC_Y", 0, UINT_MAX);
+ if (!prop)
+ return -ENOMEM;
+ dev->mode_config.prop_src_y = prop;
+
+ prop = drm_property_create_range(dev, DRM_MODE_PROP_ATOMIC,
+ "SRC_W", 0, UINT_MAX);
+ if (!prop)
+ return -ENOMEM;
+ dev->mode_config.prop_src_w = prop;
+
+ prop = drm_property_create_range(dev, DRM_MODE_PROP_ATOMIC,
+ "SRC_H", 0, UINT_MAX);
+ if (!prop)
+ return -ENOMEM;
+ dev->mode_config.prop_src_h = prop;
+
+ prop = drm_property_create_signed_range(dev, DRM_MODE_PROP_ATOMIC,
+ "CRTC_X", INT_MIN, INT_MAX);
+ if (!prop)
+ return -ENOMEM;
+ dev->mode_config.prop_crtc_x = prop;
+
+ prop = drm_property_create_signed_range(dev, DRM_MODE_PROP_ATOMIC,
+ "CRTC_Y", INT_MIN, INT_MAX);
+ if (!prop)
+ return -ENOMEM;
+ dev->mode_config.prop_crtc_y = prop;
+
+ prop = drm_property_create_range(dev, DRM_MODE_PROP_ATOMIC,
+ "CRTC_W", 0, INT_MAX);
+ if (!prop)
+ return -ENOMEM;
+ dev->mode_config.prop_crtc_w = prop;
+
+ prop = drm_property_create_range(dev, DRM_MODE_PROP_ATOMIC,
+ "CRTC_H", 0, INT_MAX);
+ if (!prop)
+ return -ENOMEM;
+ dev->mode_config.prop_crtc_h = prop;
+
+ prop = drm_property_create_object(dev, DRM_MODE_PROP_ATOMIC,
+ "FB_ID", DRM_MODE_OBJECT_FB);
+ if (!prop)
+ return -ENOMEM;
+ dev->mode_config.prop_fb_id = prop;
+
+ prop = drm_property_create_signed_range(dev, DRM_MODE_PROP_ATOMIC,
+ "IN_FENCE_FD", -1, INT_MAX);
+ if (!prop)
+ return -ENOMEM;
+ dev->mode_config.prop_in_fence_fd = prop;
+
+ prop = drm_property_create_range(dev, DRM_MODE_PROP_ATOMIC,
+ "OUT_FENCE_PTR", 0, U64_MAX);
+ if (!prop)
+ return -ENOMEM;
+ dev->mode_config.prop_out_fence_ptr = prop;
+
+ prop = drm_property_create_object(dev, DRM_MODE_PROP_ATOMIC,
+ "CRTC_ID", DRM_MODE_OBJECT_CRTC);
+ if (!prop)
+ return -ENOMEM;
+ dev->mode_config.prop_crtc_id = prop;
+
+ prop = drm_property_create_bool(dev, DRM_MODE_PROP_ATOMIC,
+ "ACTIVE");
+ if (!prop)
+ return -ENOMEM;
+ dev->mode_config.prop_active = prop;
+
+ prop = drm_property_create(dev,
+ DRM_MODE_PROP_ATOMIC | DRM_MODE_PROP_BLOB,
+ "MODE_ID", 0);
+ if (!prop)
+ return -ENOMEM;
+ dev->mode_config.prop_mode_id = prop;
+
+ prop = drm_property_create(dev,
+ DRM_MODE_PROP_BLOB,
+ "DEGAMMA_LUT", 0);
+ if (!prop)
+ return -ENOMEM;
+ dev->mode_config.degamma_lut_property = prop;
+
+ prop = drm_property_create_range(dev,
+ DRM_MODE_PROP_IMMUTABLE,
+ "DEGAMMA_LUT_SIZE", 0, UINT_MAX);
+ if (!prop)
+ return -ENOMEM;
+ dev->mode_config.degamma_lut_size_property = prop;
+
+ prop = drm_property_create(dev,
+ DRM_MODE_PROP_BLOB,
+ "CTM", 0);
+ if (!prop)
+ return -ENOMEM;
+ dev->mode_config.ctm_property = prop;
+
+ prop = drm_property_create(dev,
+ DRM_MODE_PROP_BLOB,
+ "GAMMA_LUT", 0);
+ if (!prop)
+ return -ENOMEM;
+ dev->mode_config.gamma_lut_property = prop;
+
+ prop = drm_property_create_range(dev,
+ DRM_MODE_PROP_IMMUTABLE,
+ "GAMMA_LUT_SIZE", 0, UINT_MAX);
+ if (!prop)
+ return -ENOMEM;
+ dev->mode_config.gamma_lut_size_property = prop;
+
+ return 0;
+}
+
+/**
+ * drm_mode_config_init - initialize DRM mode_configuration structure
+ * @dev: DRM device
+ *
+ * Initialize @dev's mode_config structure, used for tracking the graphics
+ * configuration of @dev.
+ *
+ * Since this initializes the modeset locks, no locking is possible. Which is no
+ * problem, since this should happen single threaded at init time. It is the
+ * driver's problem to ensure this guarantee.
+ *
+ */
+void drm_mode_config_init(struct drm_device *dev)
+{
+ mutex_init(&dev->mode_config.mutex);
+ drm_modeset_lock_init(&dev->mode_config.connection_mutex);
+ mutex_init(&dev->mode_config.idr_mutex);
+ mutex_init(&dev->mode_config.fb_lock);
+ mutex_init(&dev->mode_config.blob_lock);
+ INIT_LIST_HEAD(&dev->mode_config.fb_list);
+ INIT_LIST_HEAD(&dev->mode_config.crtc_list);
+ INIT_LIST_HEAD(&dev->mode_config.connector_list);
+ INIT_LIST_HEAD(&dev->mode_config.encoder_list);
+ INIT_LIST_HEAD(&dev->mode_config.property_list);
+ INIT_LIST_HEAD(&dev->mode_config.property_blob_list);
+ INIT_LIST_HEAD(&dev->mode_config.plane_list);
+ idr_init(&dev->mode_config.crtc_idr);
+ idr_init(&dev->mode_config.tile_idr);
+ ida_init(&dev->mode_config.connector_ida);
+
+ drm_modeset_lock_all(dev);
+ drm_mode_create_standard_properties(dev);
+ drm_modeset_unlock_all(dev);
+
+ /* Just to be sure */
+ dev->mode_config.num_fb = 0;
+ dev->mode_config.num_connector = 0;
+ dev->mode_config.num_crtc = 0;
+ dev->mode_config.num_encoder = 0;
+ dev->mode_config.num_overlay_plane = 0;
+ dev->mode_config.num_total_plane = 0;
+}
+EXPORT_SYMBOL(drm_mode_config_init);
+
+/**
+ * drm_mode_config_cleanup - free up DRM mode_config info
+ * @dev: DRM device
+ *
+ * Free up all the connectors and CRTCs associated with this DRM device, then
+ * free up the framebuffers and associated buffer objects.
+ *
+ * Note that since this /should/ happen single-threaded at driver/device
+ * teardown time, no locking is required. It's the driver's job to ensure that
+ * this guarantee actually holds true.
+ *
+ * FIXME: cleanup any dangling user buffer objects too
+ */
+void drm_mode_config_cleanup(struct drm_device *dev)
+{
+ struct drm_connector *connector, *ot;
+ struct drm_crtc *crtc, *ct;
+ struct drm_encoder *encoder, *enct;
+ struct drm_framebuffer *fb, *fbt;
+ struct drm_property *property, *pt;
+ struct drm_property_blob *blob, *bt;
+ struct drm_plane *plane, *plt;
+
+ list_for_each_entry_safe(encoder, enct, &dev->mode_config.encoder_list,
+ head) {
+ encoder->funcs->destroy(encoder);
+ }
+
+ list_for_each_entry_safe(connector, ot,
+ &dev->mode_config.connector_list, head) {
+ connector->funcs->destroy(connector);
+ }
+
+ list_for_each_entry_safe(property, pt, &dev->mode_config.property_list,
+ head) {
+ drm_property_destroy(dev, property);
+ }
+
+ list_for_each_entry_safe(plane, plt, &dev->mode_config.plane_list,
+ head) {
+ plane->funcs->destroy(plane);
+ }
+
+ list_for_each_entry_safe(crtc, ct, &dev->mode_config.crtc_list, head) {
+ crtc->funcs->destroy(crtc);
+ }
+
+ list_for_each_entry_safe(blob, bt, &dev->mode_config.property_blob_list,
+ head_global) {
+ drm_property_unreference_blob(blob);
+ }
+
+ /*
+ * Single-threaded teardown context, so it's not required to grab the
+ * fb_lock to protect against concurrent fb_list access. Contrary, it
+ * would actually deadlock with the drm_framebuffer_cleanup function.
+ *
+ * Also, if there are any framebuffers left, that's a driver leak now,
+ * so politely WARN about this.
+ */
+ WARN_ON(!list_empty(&dev->mode_config.fb_list));
+ list_for_each_entry_safe(fb, fbt, &dev->mode_config.fb_list, head) {
+ drm_framebuffer_free(&fb->base.refcount);
+ }
+
+ ida_destroy(&dev->mode_config.connector_ida);
+ idr_destroy(&dev->mode_config.tile_idr);
+ idr_destroy(&dev->mode_config.crtc_idr);
+ drm_modeset_lock_fini(&dev->mode_config.connection_mutex);
+}
+EXPORT_SYMBOL(drm_mode_config_cleanup);
diff --git a/drivers/gpu/drm/drm_modes.c b/drivers/gpu/drm/drm_modes.c
index f64ac86deb84..ac6a35212501 100644
--- a/drivers/gpu/drm/drm_modes.c
+++ b/drivers/gpu/drm/drm_modes.c
@@ -49,13 +49,7 @@
*/
void drm_mode_debug_printmodeline(const struct drm_display_mode *mode)
{
- DRM_DEBUG_KMS("Modeline %d:\"%s\" %d %d %d %d %d %d %d %d %d %d "
- "0x%x 0x%x\n",
- mode->base.id, mode->name, mode->vrefresh, mode->clock,
- mode->hdisplay, mode->hsync_start,
- mode->hsync_end, mode->htotal,
- mode->vdisplay, mode->vsync_start,
- mode->vsync_end, mode->vtotal, mode->type, mode->flags);
+ DRM_DEBUG_KMS("Modeline " DRM_MODE_FMT "\n", DRM_MODE_ARG(mode));
}
EXPORT_SYMBOL(drm_mode_debug_printmodeline);
@@ -1001,7 +995,6 @@ bool drm_mode_equal_no_clocks_no_stereo(const struct drm_display_mode *mode1,
mode1->vsync_end == mode2->vsync_end &&
mode1->vtotal == mode2->vtotal &&
mode1->vscan == mode2->vscan &&
- mode1->picture_aspect_ratio == mode2->picture_aspect_ratio &&
(mode1->flags & ~DRM_MODE_FLAG_3D_MASK) ==
(mode2->flags & ~DRM_MODE_FLAG_3D_MASK))
return true;
@@ -1504,27 +1497,6 @@ void drm_mode_convert_to_umode(struct drm_mode_modeinfo *out,
out->vrefresh = in->vrefresh;
out->flags = in->flags;
out->type = in->type;
- out->flags &= ~DRM_MODE_FLAG_PIC_AR_MASK;
-
- switch (in->picture_aspect_ratio) {
- case HDMI_PICTURE_ASPECT_4_3:
- out->flags |= DRM_MODE_FLAG_PIC_AR_4_3;
- break;
- case HDMI_PICTURE_ASPECT_16_9:
- out->flags |= DRM_MODE_FLAG_PIC_AR_16_9;
- break;
- case HDMI_PICTURE_ASPECT_64_27:
- out->flags |= DRM_MODE_FLAG_PIC_AR_64_27;
- break;
- case DRM_MODE_PICTURE_ASPECT_256_135:
- out->flags |= DRM_MODE_FLAG_PIC_AR_256_135;
- break;
- case HDMI_PICTURE_ASPECT_RESERVED:
- default:
- out->flags |= DRM_MODE_FLAG_PIC_AR_NONE;
- break;
- }
-
strncpy(out->name, in->name, DRM_DISPLAY_MODE_LEN);
out->name[DRM_DISPLAY_MODE_LEN-1] = 0;
}
@@ -1570,27 +1542,6 @@ int drm_mode_convert_umode(struct drm_display_mode *out,
strncpy(out->name, in->name, DRM_DISPLAY_MODE_LEN);
out->name[DRM_DISPLAY_MODE_LEN-1] = 0;
- /* Clearing picture aspect ratio bits from out flags */
- out->flags &= ~DRM_MODE_FLAG_PIC_AR_MASK;
-
- switch (in->flags & DRM_MODE_FLAG_PIC_AR_MASK) {
- case DRM_MODE_FLAG_PIC_AR_4_3:
- out->picture_aspect_ratio |= HDMI_PICTURE_ASPECT_4_3;
- break;
- case DRM_MODE_FLAG_PIC_AR_16_9:
- out->picture_aspect_ratio |= HDMI_PICTURE_ASPECT_16_9;
- break;
- case DRM_MODE_FLAG_PIC_AR_64_27:
- out->picture_aspect_ratio |= HDMI_PICTURE_ASPECT_64_27;
- break;
- case DRM_MODE_FLAG_PIC_AR_256_135:
- out->picture_aspect_ratio |= HDMI_PICTURE_ASPECT_256_135;
- break;
- default:
- out->picture_aspect_ratio = HDMI_PICTURE_ASPECT_NONE;
- break;
- }
-
out->status = drm_mode_validate_basic(out);
if (out->status != MODE_OK)
goto out;
diff --git a/drivers/gpu/drm/drm_modeset_helper.c b/drivers/gpu/drm/drm_modeset_helper.c
index 2544dfe7354c..2f452b3dd40e 100644
--- a/drivers/gpu/drm/drm_modeset_helper.c
+++ b/drivers/gpu/drm/drm_modeset_helper.c
@@ -75,10 +75,11 @@ void drm_helper_mode_fill_fb_struct(struct drm_framebuffer *fb,
info = drm_format_info(mode_cmd->pixel_format);
if (!info || !info->depth) {
- char *format_name = drm_get_format_name(mode_cmd->pixel_format);
+ struct drm_format_name_buf format_name;
- DRM_DEBUG_KMS("non-RGB pixel format %s\n", format_name);
- kfree(format_name);
+ DRM_DEBUG_KMS("non-RGB pixel format %s\n",
+ drm_get_format_name(mode_cmd->pixel_format,
+ &format_name));
fb->depth = 0;
fb->bits_per_pixel = 0;
diff --git a/drivers/gpu/drm/drm_modeset_lock.c b/drivers/gpu/drm/drm_modeset_lock.c
index 61146f5b4f56..9059fe3145a1 100644
--- a/drivers/gpu/drm/drm_modeset_lock.c
+++ b/drivers/gpu/drm/drm_modeset_lock.c
@@ -60,6 +60,8 @@
* lists and lookup data structures.
*/
+static DEFINE_WW_CLASS(crtc_ww_class);
+
/**
* drm_modeset_lock_all - take all modeset locks
* @dev: DRM device
@@ -398,6 +400,17 @@ int drm_modeset_backoff_interruptible(struct drm_modeset_acquire_ctx *ctx)
EXPORT_SYMBOL(drm_modeset_backoff_interruptible);
/**
+ * drm_modeset_lock_init - initialize lock
+ * @lock: lock to init
+ */
+void drm_modeset_lock_init(struct drm_modeset_lock *lock)
+{
+ ww_mutex_init(&lock->mutex, &crtc_ww_class);
+ INIT_LIST_HEAD(&lock->head);
+}
+EXPORT_SYMBOL(drm_modeset_lock_init);
+
+/**
* drm_modeset_lock - take modeset lock
* @lock: lock to take
* @ctx: acquire ctx
diff --git a/drivers/gpu/drm/drm_plane.c b/drivers/gpu/drm/drm_plane.c
index 249c0ae52c6d..419ac313c36f 100644
--- a/drivers/gpu/drm/drm_plane.c
+++ b/drivers/gpu/drm/drm_plane.c
@@ -137,6 +137,7 @@ int drm_universal_plane_init(struct drm_device *dev, struct drm_plane *plane,
if (drm_core_check_feature(dev, DRIVER_ATOMIC)) {
drm_object_attach_property(&plane->base, config->prop_fb_id, 0);
+ drm_object_attach_property(&plane->base, config->prop_in_fence_fd, -1);
drm_object_attach_property(&plane->base, config->prop_crtc_id, 0);
drm_object_attach_property(&plane->base, config->prop_crtc_x, 0);
drm_object_attach_property(&plane->base, config->prop_crtc_y, 0);
@@ -479,9 +480,10 @@ static int __setplane_internal(struct drm_plane *plane,
/* Check whether this plane supports the fb pixel format. */
ret = drm_plane_check_pixel_format(plane, fb->pixel_format);
if (ret) {
- char *format_name = drm_get_format_name(fb->pixel_format);
- DRM_DEBUG_KMS("Invalid pixel format %s\n", format_name);
- kfree(format_name);
+ struct drm_format_name_buf format_name;
+ DRM_DEBUG_KMS("Invalid pixel format %s\n",
+ drm_get_format_name(fb->pixel_format,
+ &format_name));
goto out;
}
diff --git a/drivers/gpu/drm/drm_plane_helper.c b/drivers/gpu/drm/drm_plane_helper.c
index 7899fc1dcdb0..7a7dddf604d7 100644
--- a/drivers/gpu/drm/drm_plane_helper.c
+++ b/drivers/gpu/drm/drm_plane_helper.c
@@ -130,15 +130,8 @@ int drm_plane_helper_check_state(struct drm_plane_state *state,
unsigned int rotation = state->rotation;
int hscale, vscale;
- src->x1 = state->src_x;
- src->y1 = state->src_y;
- src->x2 = state->src_x + state->src_w;
- src->y2 = state->src_y + state->src_h;
-
- dst->x1 = state->crtc_x;
- dst->y1 = state->crtc_y;
- dst->x2 = state->crtc_x + state->crtc_w;
- dst->y2 = state->crtc_y + state->crtc_h;
+ *src = drm_plane_state_src(state);
+ *dst = drm_plane_state_dest(state);
if (!fb) {
state->visible = false;
diff --git a/drivers/gpu/drm/drm_print.c b/drivers/gpu/drm/drm_print.c
new file mode 100644
index 000000000000..ad3caaa1f48b
--- /dev/null
+++ b/drivers/gpu/drm/drm_print.c
@@ -0,0 +1,59 @@
+/*
+ * Copyright (C) 2016 Red Hat
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Rob Clark <robdclark@gmail.com>
+ */
+
+#include <stdarg.h>
+#include <linux/seq_file.h>
+#include <drm/drmP.h>
+#include <drm/drm_print.h>
+
+void __drm_printfn_seq_file(struct drm_printer *p, struct va_format *vaf)
+{
+ seq_printf(p->arg, "%pV", vaf);
+}
+EXPORT_SYMBOL(__drm_printfn_seq_file);
+
+void __drm_printfn_info(struct drm_printer *p, struct va_format *vaf)
+{
+ dev_printk(KERN_INFO, p->arg, "[" DRM_NAME "] %pV", vaf);
+}
+EXPORT_SYMBOL(__drm_printfn_info);
+
+/**
+ * drm_printf - print to a &drm_printer stream
+ * @p: the &drm_printer
+ * @f: format string
+ */
+void drm_printf(struct drm_printer *p, const char *f, ...)
+{
+ struct va_format vaf;
+ va_list args;
+
+ va_start(args, f);
+ vaf.fmt = f;
+ vaf.va = &args;
+ p->printfn(p, &vaf);
+ va_end(args);
+}
+EXPORT_SYMBOL(drm_printf);
diff --git a/drivers/gpu/drm/drm_rect.c b/drivers/gpu/drm/drm_rect.c
index 73e53a8d1b37..e6057d8cdcd5 100644
--- a/drivers/gpu/drm/drm_rect.c
+++ b/drivers/gpu/drm/drm_rect.c
@@ -281,17 +281,10 @@ EXPORT_SYMBOL(drm_rect_calc_vscale_relaxed);
*/
void drm_rect_debug_print(const char *prefix, const struct drm_rect *r, bool fixed_point)
{
- int w = drm_rect_width(r);
- int h = drm_rect_height(r);
-
if (fixed_point)
- DRM_DEBUG_KMS("%s%d.%06ux%d.%06u%+d.%06u%+d.%06u\n", prefix,
- w >> 16, ((w & 0xffff) * 15625) >> 10,
- h >> 16, ((h & 0xffff) * 15625) >> 10,
- r->x1 >> 16, ((r->x1 & 0xffff) * 15625) >> 10,
- r->y1 >> 16, ((r->y1 & 0xffff) * 15625) >> 10);
+ DRM_DEBUG_KMS("%s" DRM_RECT_FP_FMT "\n", prefix, DRM_RECT_FP_ARG(r));
else
- DRM_DEBUG_KMS("%s%dx%d%+d%+d\n", prefix, w, h, r->x1, r->y1);
+ DRM_DEBUG_KMS("%s" DRM_RECT_FMT "\n", prefix, DRM_RECT_ARG(r));
}
EXPORT_SYMBOL(drm_rect_debug_print);
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_drv.c b/drivers/gpu/drm/etnaviv/etnaviv_drv.c
index 0dee6acbd880..a6799b0aa3d9 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_drv.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_drv.c
@@ -479,9 +479,7 @@ static const struct file_operations fops = {
.open = drm_open,
.release = drm_release,
.unlocked_ioctl = drm_ioctl,
-#ifdef CONFIG_COMPAT
.compat_ioctl = drm_compat_ioctl,
-#endif
.poll = drm_poll,
.read = drm_read,
.llseek = no_llseek,
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.c b/drivers/gpu/drm/exynos/exynos_drm_drv.c
index 4a21a745c373..739180ac3da5 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.c
@@ -263,6 +263,26 @@ int exynos_atomic_commit(struct drm_device *dev, struct drm_atomic_state *state,
return 0;
}
+int exynos_atomic_check(struct drm_device *dev,
+ struct drm_atomic_state *state)
+{
+ int ret;
+
+ ret = drm_atomic_helper_check_modeset(dev, state);
+ if (ret)
+ return ret;
+
+ ret = drm_atomic_normalize_zpos(dev, state);
+ if (ret)
+ return ret;
+
+ ret = drm_atomic_helper_check_planes(dev, state);
+ if (ret)
+ return ret;
+
+ return ret;
+}
+
static int exynos_drm_open(struct drm_device *dev, struct drm_file *file)
{
struct drm_exynos_file_private *file_priv;
@@ -346,9 +366,7 @@ static const struct file_operations exynos_drm_driver_fops = {
.poll = drm_poll,
.read = drm_read,
.unlocked_ioctl = drm_ioctl,
-#ifdef CONFIG_COMPAT
.compat_ioctl = drm_compat_ioctl,
-#endif
.release = drm_release,
};
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.h b/drivers/gpu/drm/exynos/exynos_drm_drv.h
index d215149e737b..80c4d5b81689 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.h
@@ -301,6 +301,7 @@ static inline int exynos_dpi_bind(struct drm_device *dev,
int exynos_atomic_commit(struct drm_device *dev, struct drm_atomic_state *state,
bool nonblock);
+int exynos_atomic_check(struct drm_device *dev, struct drm_atomic_state *state);
extern struct platform_driver fimd_driver;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fb.c b/drivers/gpu/drm/exynos/exynos_drm_fb.c
index 40ce841eb952..23cce0a3f5fc 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fb.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fb.c
@@ -190,7 +190,7 @@ dma_addr_t exynos_drm_fb_dma_addr(struct drm_framebuffer *fb, int index)
static const struct drm_mode_config_funcs exynos_drm_mode_config_funcs = {
.fb_create = exynos_user_fb_create,
.output_poll_changed = exynos_drm_output_poll_changed,
- .atomic_check = drm_atomic_helper_check,
+ .atomic_check = exynos_atomic_check,
.atomic_commit = exynos_atomic_commit,
};
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fbdev.c b/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
index 4cfb39d543b4..9f35deb56170 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
@@ -63,15 +63,11 @@ static int exynos_drm_fb_mmap(struct fb_info *info,
static struct fb_ops exynos_drm_fb_ops = {
.owner = THIS_MODULE,
+ DRM_FB_HELPER_DEFAULT_OPS,
.fb_mmap = exynos_drm_fb_mmap,
.fb_fillrect = drm_fb_helper_cfb_fillrect,
.fb_copyarea = drm_fb_helper_cfb_copyarea,
.fb_imageblit = drm_fb_helper_cfb_imageblit,
- .fb_check_var = drm_fb_helper_check_var,
- .fb_set_par = drm_fb_helper_set_par,
- .fb_blank = drm_fb_helper_blank,
- .fb_pan_display = drm_fb_helper_pan_display,
- .fb_setcmap = drm_fb_helper_setcmap,
};
static int exynos_drm_fbdev_update(struct drm_fb_helper *helper,
diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c
index e04efbed1a54..0b0d1cb11641 100644
--- a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c
+++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c
@@ -180,9 +180,7 @@ static const struct file_operations fsl_dcu_drm_fops = {
.open = drm_open,
.release = drm_release,
.unlocked_ioctl = drm_ioctl,
-#ifdef CONFIG_COMPAT
.compat_ioctl = drm_compat_ioctl,
-#endif
.poll = drm_poll,
.read = drm_read,
.llseek = no_llseek,
diff --git a/drivers/gpu/drm/gma500/framebuffer.c b/drivers/gpu/drm/gma500/framebuffer.c
index 97daf23f3fef..4071b2d1e8cf 100644
--- a/drivers/gpu/drm/gma500/framebuffer.c
+++ b/drivers/gpu/drm/gma500/framebuffer.c
@@ -185,9 +185,7 @@ static int psbfb_mmap(struct fb_info *info, struct vm_area_struct *vma)
static struct fb_ops psbfb_ops = {
.owner = THIS_MODULE,
- .fb_check_var = drm_fb_helper_check_var,
- .fb_set_par = drm_fb_helper_set_par,
- .fb_blank = drm_fb_helper_blank,
+ DRM_FB_HELPER_DEFAULT_OPS,
.fb_setcolreg = psbfb_setcolreg,
.fb_fillrect = drm_fb_helper_cfb_fillrect,
.fb_copyarea = psbfb_copyarea,
@@ -198,9 +196,7 @@ static struct fb_ops psbfb_ops = {
static struct fb_ops psbfb_roll_ops = {
.owner = THIS_MODULE,
- .fb_check_var = drm_fb_helper_check_var,
- .fb_set_par = drm_fb_helper_set_par,
- .fb_blank = drm_fb_helper_blank,
+ DRM_FB_HELPER_DEFAULT_OPS,
.fb_setcolreg = psbfb_setcolreg,
.fb_fillrect = drm_fb_helper_cfb_fillrect,
.fb_copyarea = drm_fb_helper_cfb_copyarea,
@@ -211,9 +207,7 @@ static struct fb_ops psbfb_roll_ops = {
static struct fb_ops psbfb_unaccel_ops = {
.owner = THIS_MODULE,
- .fb_check_var = drm_fb_helper_check_var,
- .fb_set_par = drm_fb_helper_set_par,
- .fb_blank = drm_fb_helper_blank,
+ DRM_FB_HELPER_DEFAULT_OPS,
.fb_setcolreg = psbfb_setcolreg,
.fb_fillrect = drm_fb_helper_cfb_fillrect,
.fb_copyarea = drm_fb_helper_cfb_copyarea,
diff --git a/drivers/gpu/drm/gma500/gtt.c b/drivers/gpu/drm/gma500/gtt.c
index 76aea2e7fb9d..3f4f424196b2 100644
--- a/drivers/gpu/drm/gma500/gtt.c
+++ b/drivers/gpu/drm/gma500/gtt.c
@@ -131,7 +131,7 @@ static int psb_gtt_insert(struct drm_device *dev, struct gtt_range *r,
* page table entries with the dummy page. This is protected via the gtt
* mutex which the caller must hold.
*/
-void psb_gtt_remove(struct drm_device *dev, struct gtt_range *r)
+static void psb_gtt_remove(struct drm_device *dev, struct gtt_range *r)
{
struct drm_psb_private *dev_priv = dev->dev_private;
u32 __iomem *gtt_slot;
diff --git a/drivers/gpu/drm/gma500/psb_drv.c b/drivers/gpu/drm/gma500/psb_drv.c
index 50eb944fb78a..ff37ea585664 100644
--- a/drivers/gpu/drm/gma500/psb_drv.c
+++ b/drivers/gpu/drm/gma500/psb_drv.c
@@ -473,6 +473,7 @@ static const struct file_operations psb_gem_fops = {
.open = drm_open,
.release = drm_release,
.unlocked_ioctl = psb_unlocked_ioctl,
+ .compat_ioctl = drm_compat_ioctl,
.mmap = drm_gem_mmap,
.poll = drm_poll,
.read = drm_read,
diff --git a/drivers/gpu/drm/gma500/psb_drv.h b/drivers/gpu/drm/gma500/psb_drv.h
index b74372760d7f..05d7aaf47eea 100644
--- a/drivers/gpu/drm/gma500/psb_drv.h
+++ b/drivers/gpu/drm/gma500/psb_drv.h
@@ -753,10 +753,6 @@ extern int psb_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
extern int psb_gem_dumb_map_gtt(struct drm_file *file, struct drm_device *dev,
uint32_t handle, uint64_t *offset);
extern int psb_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
-extern int psb_gem_create_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file);
-extern int psb_gem_mmap_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file);
/* psb_device.c */
extern const struct psb_ops psb_chip_ops;
diff --git a/drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c b/drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c
index 7e7a4d43d6b6..afc2b5d2d5f0 100644
--- a/drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c
+++ b/drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c
@@ -608,17 +608,16 @@ static void ade_rdma_set(void __iomem *base, struct drm_framebuffer *fb,
u32 ch, u32 y, u32 in_h, u32 fmt)
{
struct drm_gem_cma_object *obj = drm_fb_cma_get_gem_obj(fb, 0);
- char *format_name;
+ struct drm_format_name_buf format_name;
u32 reg_ctrl, reg_addr, reg_size, reg_stride, reg_space, reg_en;
u32 stride = fb->pitches[0];
u32 addr = (u32)obj->paddr + y * stride;
DRM_DEBUG_DRIVER("rdma%d: (y=%d, height=%d), stride=%d, paddr=0x%x\n",
ch + 1, y, in_h, stride, (u32)obj->paddr);
- format_name = drm_get_format_name(fb->pixel_format);
DRM_DEBUG_DRIVER("addr=0x%x, fb:%dx%d, pixel_format=%d(%s)\n",
- addr, fb->width, fb->height, fmt, format_name);
- kfree(format_name);
+ addr, fb->width, fb->height, fmt,
+ drm_get_format_name(fb->pixel_format, &format_name));
/* get reg offset */
reg_ctrl = RD_CH_CTRL(ch);
diff --git a/drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.c b/drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.c
index e88fde18c946..ebd5f4fe4c23 100644
--- a/drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.c
+++ b/drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.c
@@ -152,9 +152,7 @@ static const struct file_operations kirin_drm_fops = {
.open = drm_open,
.release = drm_release,
.unlocked_ioctl = drm_ioctl,
-#ifdef CONFIG_COMPAT
.compat_ioctl = drm_compat_ioctl,
-#endif
.poll = drm_poll,
.read = drm_read,
.llseek = no_llseek,
diff --git a/drivers/gpu/drm/i2c/tda998x_drv.c b/drivers/gpu/drm/i2c/tda998x_drv.c
index af8683e0dd54..027521f30b6e 100644
--- a/drivers/gpu/drm/i2c/tda998x_drv.c
+++ b/drivers/gpu/drm/i2c/tda998x_drv.c
@@ -1585,7 +1585,6 @@ const struct drm_connector_helper_funcs tda998x_connector_helper_funcs = {
static void tda998x_connector_destroy(struct drm_connector *connector)
{
- drm_connector_unregister(connector);
drm_connector_cleanup(connector);
}
@@ -1657,16 +1656,10 @@ static int tda998x_bind(struct device *dev, struct device *master, void *data)
if (ret)
goto err_connector;
- ret = drm_connector_register(&priv->connector);
- if (ret)
- goto err_sysfs;
-
drm_mode_connector_attach_encoder(&priv->connector, &priv->encoder);
return 0;
-err_sysfs:
- drm_connector_cleanup(&priv->connector);
err_connector:
drm_encoder_cleanup(&priv->encoder);
err_encoder:
@@ -1679,7 +1672,6 @@ static void tda998x_unbind(struct device *dev, struct device *master,
{
struct tda998x_priv *priv = dev_get_drvdata(dev);
- drm_connector_unregister(&priv->connector);
drm_connector_cleanup(&priv->connector);
drm_encoder_cleanup(&priv->encoder);
tda998x_destroy(priv);
diff --git a/drivers/gpu/drm/i810/i810_dma.c b/drivers/gpu/drm/i810/i810_dma.c
index d91856779beb..ab4e6cbe1f8b 100644
--- a/drivers/gpu/drm/i810/i810_dma.c
+++ b/drivers/gpu/drm/i810/i810_dma.c
@@ -113,9 +113,7 @@ static const struct file_operations i810_buffer_fops = {
.release = drm_release,
.unlocked_ioctl = drm_ioctl,
.mmap = i810_mmap_buffers,
-#ifdef CONFIG_COMPAT
.compat_ioctl = drm_compat_ioctl,
-#endif
.llseek = noop_llseek,
};
diff --git a/drivers/gpu/drm/i810/i810_drv.c b/drivers/gpu/drm/i810/i810_drv.c
index 0be55dc1ef4b..02504a7cfaf2 100644
--- a/drivers/gpu/drm/i810/i810_drv.c
+++ b/drivers/gpu/drm/i810/i810_drv.c
@@ -49,9 +49,7 @@ static const struct file_operations i810_driver_fops = {
.unlocked_ioctl = drm_ioctl,
.mmap = drm_legacy_mmap,
.poll = drm_poll,
-#ifdef CONFIG_COMPAT
.compat_ioctl = drm_compat_ioctl,
-#endif
.llseek = noop_llseek,
};
diff --git a/drivers/gpu/drm/i915/Kconfig.debug b/drivers/gpu/drm/i915/Kconfig.debug
index cee87bfd10c4..51ba630a134b 100644
--- a/drivers/gpu/drm/i915/Kconfig.debug
+++ b/drivers/gpu/drm/i915/Kconfig.debug
@@ -21,6 +21,7 @@ config DRM_I915_DEBUG
select PREEMPT_COUNT
select X86_MSR # used by igt/pm_rpm
select DRM_VGEM # used by igt/prime_vgem (dmabuf interop checks)
+ select DRM_DEBUG_MM if DRM=y
default n
help
Choose this option to turn on extra driver debugging that may affect
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 93e9a0e8c0f5..1cc971cb6cb1 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -3033,7 +3033,7 @@ static void intel_plane_info(struct seq_file *m, struct intel_crtc *intel_crtc)
for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) {
struct drm_plane_state *state;
struct drm_plane *plane = &intel_plane->base;
- char *format_name;
+ struct drm_format_name_buf format_name;
if (!plane->state) {
seq_puts(m, "plane->state is NULL!\n");
@@ -3043,9 +3043,9 @@ static void intel_plane_info(struct seq_file *m, struct intel_crtc *intel_crtc)
state = plane->state;
if (state->fb) {
- format_name = drm_get_format_name(state->fb->pixel_format);
+ drm_get_format_name(state->fb->pixel_format, &format_name);
} else {
- format_name = kstrdup("N/A", GFP_KERNEL);
+ sprintf(format_name.str, "N/A");
}
seq_printf(m, "\t--Plane id %d: type=%s, crtc_pos=%4dx%4d, crtc_size=%4dx%4d, src_pos=%d.%04ux%d.%04u, src_size=%d.%04ux%d.%04u, format=%s, rotation=%s\n",
@@ -3061,10 +3061,8 @@ static void intel_plane_info(struct seq_file *m, struct intel_crtc *intel_crtc)
((state->src_w & 0xffff) * 15625) >> 10,
(state->src_h >> 16),
((state->src_h & 0xffff) * 15625) >> 10,
- format_name,
+ format_name.str,
plane_rotation(state->rotation));
-
- kfree(format_name);
}
}
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 874fe857a345..4f0e56d3b441 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -2501,9 +2501,7 @@ static const struct file_operations i915_driver_fops = {
.mmap = drm_gem_mmap,
.poll = drm_poll,
.read = drm_read,
-#ifdef CONFIG_COMPAT
.compat_ioctl = i915_compat_ioctl,
-#endif
.llseek = noop_llseek,
};
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 21b2be8981e3..5192206c62e2 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -2695,6 +2695,8 @@ __i915_printk(struct drm_i915_private *dev_priv, const char *level,
#ifdef CONFIG_COMPAT
extern long i915_compat_ioctl(struct file *filp, unsigned int cmd,
unsigned long arg);
+#else
+#define i915_compat_ioctl NULL
#endif
extern const struct dev_pm_ops i915_pm_ops;
diff --git a/drivers/gpu/drm/i915/intel_atomic_plane.c b/drivers/gpu/drm/i915/intel_atomic_plane.c
index 71a0433cd6c3..ff821649486e 100644
--- a/drivers/gpu/drm/i915/intel_atomic_plane.c
+++ b/drivers/gpu/drm/i915/intel_atomic_plane.c
@@ -142,7 +142,7 @@ static int intel_plane_atomic_check(struct drm_plane *plane,
crtc_state->base.enable ? crtc_state->pipe_src_h : 0;
if (state->fb && drm_rotation_90_or_270(state->rotation)) {
- char *format_name;
+ struct drm_format_name_buf format_name;
if (!(state->fb->modifier[0] == I915_FORMAT_MOD_Y_TILED ||
state->fb->modifier[0] == I915_FORMAT_MOD_Yf_TILED)) {
@@ -158,9 +158,9 @@ static int intel_plane_atomic_check(struct drm_plane *plane,
switch (state->fb->pixel_format) {
case DRM_FORMAT_C8:
case DRM_FORMAT_RGB565:
- format_name = drm_get_format_name(state->fb->pixel_format);
- DRM_DEBUG_KMS("Unsupported pixel format %s for 90/270!\n", format_name);
- kfree(format_name);
+ DRM_DEBUG_KMS("Unsupported pixel format %s for 90/270!\n",
+ drm_get_format_name(state->fb->pixel_format,
+ &format_name));
return -EINVAL;
default:
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index b2cedb94f534..2ebb8b833395 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -2821,14 +2821,8 @@ valid_fb:
plane_state->crtc_w = fb->width;
plane_state->crtc_h = fb->height;
- intel_state->base.src.x1 = plane_state->src_x;
- intel_state->base.src.y1 = plane_state->src_y;
- intel_state->base.src.x2 = plane_state->src_x + plane_state->src_w;
- intel_state->base.src.y2 = plane_state->src_y + plane_state->src_h;
- intel_state->base.dst.x1 = plane_state->crtc_x;
- intel_state->base.dst.y1 = plane_state->crtc_y;
- intel_state->base.dst.x2 = plane_state->crtc_x + plane_state->crtc_w;
- intel_state->base.dst.y2 = plane_state->crtc_y + plane_state->crtc_h;
+ intel_state->base.src = drm_plane_state_src(plane_state);
+ intel_state->base.dst = drm_plane_state_dest(plane_state);
obj = intel_fb_obj(fb);
if (i915_gem_object_is_tiled(obj))
@@ -12852,7 +12846,7 @@ static void intel_dump_pipe_config(struct intel_crtc *crtc,
DRM_DEBUG_KMS("planes on this crtc\n");
list_for_each_entry(plane, &dev->mode_config.plane_list, head) {
- char *format_name;
+ struct drm_format_name_buf format_name;
intel_plane = to_intel_plane(plane);
if (intel_plane->pipe != crtc->pipe)
continue;
@@ -12865,12 +12859,11 @@ static void intel_dump_pipe_config(struct intel_crtc *crtc,
continue;
}
- format_name = drm_get_format_name(fb->pixel_format);
-
DRM_DEBUG_KMS("[PLANE:%d:%s] enabled",
plane->base.id, plane->name);
DRM_DEBUG_KMS("\tFB:%d, fb = %ux%u format = %s",
- fb->base.id, fb->width, fb->height, format_name);
+ fb->base.id, fb->width, fb->height,
+ drm_get_format_name(fb->pixel_format, &format_name));
DRM_DEBUG_KMS("\tscaler:%d src %dx%d+%d+%d dst %dx%d+%d+%d\n",
state->scaler_id,
state->base.src.x1 >> 16,
@@ -12880,8 +12873,6 @@ static void intel_dump_pipe_config(struct intel_crtc *crtc,
state->base.dst.x1, state->base.dst.y1,
drm_rect_width(&state->base.dst),
drm_rect_height(&state->base.dst));
-
- kfree(format_name);
}
}
@@ -15775,7 +15766,7 @@ static int intel_framebuffer_init(struct drm_device *dev,
unsigned int tiling = i915_gem_object_get_tiling(obj);
int ret;
u32 pitch_limit, stride_alignment;
- char *format_name;
+ struct drm_format_name_buf format_name;
WARN_ON(!mutex_is_locked(&dev->struct_mutex));
@@ -15866,18 +15857,16 @@ static int intel_framebuffer_init(struct drm_device *dev,
break;
case DRM_FORMAT_XRGB1555:
if (INTEL_INFO(dev)->gen > 3) {
- format_name = drm_get_format_name(mode_cmd->pixel_format);
- DRM_DEBUG("unsupported pixel format: %s\n", format_name);
- kfree(format_name);
+ DRM_DEBUG("unsupported pixel format: %s\n",
+ drm_get_format_name(mode_cmd->pixel_format, &format_name));
return -EINVAL;
}
break;
case DRM_FORMAT_ABGR8888:
if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv) &&
INTEL_INFO(dev)->gen < 9) {
- format_name = drm_get_format_name(mode_cmd->pixel_format);
- DRM_DEBUG("unsupported pixel format: %s\n", format_name);
- kfree(format_name);
+ DRM_DEBUG("unsupported pixel format: %s\n",
+ drm_get_format_name(mode_cmd->pixel_format, &format_name));
return -EINVAL;
}
break;
@@ -15885,17 +15874,15 @@ static int intel_framebuffer_init(struct drm_device *dev,
case DRM_FORMAT_XRGB2101010:
case DRM_FORMAT_XBGR2101010:
if (INTEL_INFO(dev)->gen < 4) {
- format_name = drm_get_format_name(mode_cmd->pixel_format);
- DRM_DEBUG("unsupported pixel format: %s\n", format_name);
- kfree(format_name);
+ DRM_DEBUG("unsupported pixel format: %s\n",
+ drm_get_format_name(mode_cmd->pixel_format, &format_name));
return -EINVAL;
}
break;
case DRM_FORMAT_ABGR2101010:
if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv)) {
- format_name = drm_get_format_name(mode_cmd->pixel_format);
- DRM_DEBUG("unsupported pixel format: %s\n", format_name);
- kfree(format_name);
+ DRM_DEBUG("unsupported pixel format: %s\n",
+ drm_get_format_name(mode_cmd->pixel_format, &format_name));
return -EINVAL;
}
break;
@@ -15904,16 +15891,14 @@ static int intel_framebuffer_init(struct drm_device *dev,
case DRM_FORMAT_YVYU:
case DRM_FORMAT_VYUY:
if (INTEL_INFO(dev)->gen < 5) {
- format_name = drm_get_format_name(mode_cmd->pixel_format);
- DRM_DEBUG("unsupported pixel format: %s\n", format_name);
- kfree(format_name);
+ DRM_DEBUG("unsupported pixel format: %s\n",
+ drm_get_format_name(mode_cmd->pixel_format, &format_name));
return -EINVAL;
}
break;
default:
- format_name = drm_get_format_name(mode_cmd->pixel_format);
- DRM_DEBUG("unsupported pixel format: %s\n", format_name);
- kfree(format_name);
+ DRM_DEBUG("unsupported pixel format: %s\n",
+ drm_get_format_name(mode_cmd->pixel_format, &format_name));
return -EINVAL;
}
diff --git a/drivers/gpu/drm/i915/intel_fbdev.c b/drivers/gpu/drm/i915/intel_fbdev.c
index e3cf655bec3b..fc958d5ed0dc 100644
--- a/drivers/gpu/drm/i915/intel_fbdev.c
+++ b/drivers/gpu/drm/i915/intel_fbdev.c
@@ -102,16 +102,13 @@ static int intel_fbdev_pan_display(struct fb_var_screeninfo *var,
static struct fb_ops intelfb_ops = {
.owner = THIS_MODULE,
- .fb_check_var = drm_fb_helper_check_var,
+ DRM_FB_HELPER_DEFAULT_OPS,
.fb_set_par = intel_fbdev_set_par,
.fb_fillrect = drm_fb_helper_cfb_fillrect,
.fb_copyarea = drm_fb_helper_cfb_copyarea,
.fb_imageblit = drm_fb_helper_cfb_imageblit,
.fb_pan_display = intel_fbdev_pan_display,
.fb_blank = intel_fbdev_blank,
- .fb_setcmap = drm_fb_helper_setcmap,
- .fb_debug_enter = drm_fb_helper_debug_enter,
- .fb_debug_leave = drm_fb_helper_debug_leave,
};
static int intelfb_alloc(struct drm_fb_helper *helper,
diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c
index 3ba7d59df00d..c2b629c922e7 100644
--- a/drivers/gpu/drm/i915/intel_sprite.c
+++ b/drivers/gpu/drm/i915/intel_sprite.c
@@ -762,15 +762,8 @@ intel_check_sprite_plane(struct drm_plane *plane,
bool can_scale;
int ret;
- src->x1 = state->base.src_x;
- src->y1 = state->base.src_y;
- src->x2 = state->base.src_x + state->base.src_w;
- src->y2 = state->base.src_y + state->base.src_h;
-
- dst->x1 = state->base.crtc_x;
- dst->y1 = state->base.crtc_y;
- dst->x2 = state->base.crtc_x + state->base.crtc_w;
- dst->y2 = state->base.crtc_y + state->base.crtc_h;
+ *src = drm_plane_state_src(&state->base);
+ *dst = drm_plane_state_dest(&state->base);
if (!fb) {
state->base.visible = false;
diff --git a/drivers/gpu/drm/imx/imx-drm-core.c b/drivers/gpu/drm/imx/imx-drm-core.c
index 98df09c2b388..33404295b447 100644
--- a/drivers/gpu/drm/imx/imx-drm-core.c
+++ b/drivers/gpu/drm/imx/imx-drm-core.c
@@ -18,7 +18,6 @@
#include <linux/dma-buf.h>
#include <linux/module.h>
#include <linux/platform_device.h>
-#include <linux/reservation.h>
#include <drm/drmP.h>
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
@@ -151,38 +150,11 @@ static int imx_drm_atomic_check(struct drm_device *dev,
return ret;
}
-static int imx_drm_atomic_commit(struct drm_device *dev,
- struct drm_atomic_state *state,
- bool nonblock)
-{
- struct drm_plane_state *plane_state;
- struct drm_plane *plane;
- struct dma_buf *dma_buf;
- int i;
-
- /*
- * If the plane fb has an dma-buf attached, fish out the exclusive
- * fence for the atomic helper to wait on.
- */
- for_each_plane_in_state(state, plane, plane_state, i) {
- if ((plane->state->fb != plane_state->fb) && plane_state->fb) {
- dma_buf = drm_fb_cma_get_gem_obj(plane_state->fb,
- 0)->base.dma_buf;
- if (!dma_buf)
- continue;
- plane_state->fence =
- reservation_object_get_excl_rcu(dma_buf->resv);
- }
- }
-
- return drm_atomic_helper_commit(dev, state, nonblock);
-}
-
static const struct drm_mode_config_funcs imx_drm_mode_config_funcs = {
.fb_create = drm_fb_cma_create,
.output_poll_changed = imx_drm_output_poll_changed,
.atomic_check = imx_drm_atomic_check,
- .atomic_commit = imx_drm_atomic_commit,
+ .atomic_commit = drm_atomic_helper_commit,
};
static void imx_drm_atomic_commit_tail(struct drm_atomic_state *state)
@@ -357,8 +329,8 @@ static int imx_drm_bind(struct device *dev)
int ret;
drm = drm_dev_alloc(&imx_drm_driver, dev);
- if (!drm)
- return -ENOMEM;
+ if (IS_ERR(drm))
+ return PTR_ERR(drm);
imxdrm = devm_kzalloc(dev, sizeof(*imxdrm), GFP_KERNEL);
if (!imxdrm) {
@@ -436,9 +408,11 @@ static int imx_drm_bind(struct device *dev)
err_fbhelper:
drm_kms_helper_poll_fini(drm);
+#if IS_ENABLED(CONFIG_DRM_FBDEV_EMULATION)
if (imxdrm->fbhelper)
drm_fbdev_cma_fini(imxdrm->fbhelper);
err_unbind:
+#endif
component_unbind_all(drm->dev, drm);
err_vblank:
drm_vblank_cleanup(drm);
diff --git a/drivers/gpu/drm/imx/imx-ldb.c b/drivers/gpu/drm/imx/imx-ldb.c
index 3ce391c239b0..b300998dce7d 100644
--- a/drivers/gpu/drm/imx/imx-ldb.c
+++ b/drivers/gpu/drm/imx/imx-ldb.c
@@ -319,18 +319,6 @@ static void imx_ldb_encoder_disable(struct drm_encoder *encoder)
struct imx_ldb *ldb = imx_ldb_ch->ldb;
int mux, ret;
- /*
- * imx_ldb_encoder_disable is called by
- * drm_helper_disable_unused_functions without
- * the encoder being enabled before.
- */
- if (imx_ldb_ch == &ldb->channel[0] &&
- (ldb->ldb_ctrl & LDB_CH0_MODE_EN_MASK) == 0)
- return;
- else if (imx_ldb_ch == &ldb->channel[1] &&
- (ldb->ldb_ctrl & LDB_CH1_MODE_EN_MASK) == 0)
- return;
-
drm_panel_disable(imx_ldb_ch->panel);
if (imx_ldb_ch == &ldb->channel[0])
diff --git a/drivers/gpu/drm/imx/ipuv3-plane.c b/drivers/gpu/drm/imx/ipuv3-plane.c
index ce22d0a0ddc8..e74a0ad52950 100644
--- a/drivers/gpu/drm/imx/ipuv3-plane.c
+++ b/drivers/gpu/drm/imx/ipuv3-plane.c
@@ -50,6 +50,12 @@ static const uint32_t ipu_plane_formats[] = {
DRM_FORMAT_YVYU,
DRM_FORMAT_YUV420,
DRM_FORMAT_YVU420,
+ DRM_FORMAT_YUV422,
+ DRM_FORMAT_YVU422,
+ DRM_FORMAT_YUV444,
+ DRM_FORMAT_YVU444,
+ DRM_FORMAT_NV12,
+ DRM_FORMAT_NV16,
DRM_FORMAT_RGB565,
};
@@ -64,13 +70,14 @@ drm_plane_state_to_eba(struct drm_plane_state *state)
{
struct drm_framebuffer *fb = state->fb;
struct drm_gem_cma_object *cma_obj;
+ int x = state->src_x >> 16;
+ int y = state->src_y >> 16;
cma_obj = drm_fb_cma_get_gem_obj(fb, 0);
BUG_ON(!cma_obj);
- return cma_obj->paddr + fb->offsets[0] +
- fb->pitches[0] * (state->src_y >> 16) +
- (fb->bits_per_pixel >> 3) * (state->src_x >> 16);
+ return cma_obj->paddr + fb->offsets[0] + fb->pitches[0] * y +
+ drm_format_plane_cpp(fb->pixel_format, 0) * x;
}
static inline unsigned long
@@ -79,13 +86,17 @@ drm_plane_state_to_ubo(struct drm_plane_state *state)
struct drm_framebuffer *fb = state->fb;
struct drm_gem_cma_object *cma_obj;
unsigned long eba = drm_plane_state_to_eba(state);
+ int x = state->src_x >> 16;
+ int y = state->src_y >> 16;
cma_obj = drm_fb_cma_get_gem_obj(fb, 1);
BUG_ON(!cma_obj);
- return cma_obj->paddr + fb->offsets[1] +
- fb->pitches[1] * (state->src_y >> 16) / 2 +
- (state->src_x >> 16) / 2 - eba;
+ x /= drm_format_horz_chroma_subsampling(fb->pixel_format);
+ y /= drm_format_vert_chroma_subsampling(fb->pixel_format);
+
+ return cma_obj->paddr + fb->offsets[1] + fb->pitches[1] * y +
+ drm_format_plane_cpp(fb->pixel_format, 1) * x - eba;
}
static inline unsigned long
@@ -94,69 +105,17 @@ drm_plane_state_to_vbo(struct drm_plane_state *state)
struct drm_framebuffer *fb = state->fb;
struct drm_gem_cma_object *cma_obj;
unsigned long eba = drm_plane_state_to_eba(state);
+ int x = state->src_x >> 16;
+ int y = state->src_y >> 16;
cma_obj = drm_fb_cma_get_gem_obj(fb, 2);
BUG_ON(!cma_obj);
- return cma_obj->paddr + fb->offsets[2] +
- fb->pitches[2] * (state->src_y >> 16) / 2 +
- (state->src_x >> 16) / 2 - eba;
-}
-
-static void ipu_plane_atomic_set_base(struct ipu_plane *ipu_plane,
- struct drm_plane_state *old_state)
-{
- struct drm_plane *plane = &ipu_plane->base;
- struct drm_plane_state *state = plane->state;
- struct drm_framebuffer *fb = state->fb;
- unsigned long eba, ubo, vbo;
- int active;
-
- eba = drm_plane_state_to_eba(state);
-
- switch (fb->pixel_format) {
- case DRM_FORMAT_YUV420:
- case DRM_FORMAT_YVU420:
- if (old_state->fb)
- break;
-
- /*
- * Multiplanar formats have to meet the following restrictions:
- * - The (up to) three plane addresses are EBA, EBA+UBO, EBA+VBO
- * - EBA, UBO and VBO are a multiple of 8
- * - UBO and VBO are unsigned and not larger than 0xfffff8
- * - Only EBA may be changed while scanout is active
- * - The strides of U and V planes must be identical.
- */
- ubo = drm_plane_state_to_ubo(state);
- vbo = drm_plane_state_to_vbo(state);
-
- if (fb->pixel_format == DRM_FORMAT_YUV420)
- ipu_cpmem_set_yuv_planar_full(ipu_plane->ipu_ch,
- fb->pitches[1], ubo, vbo);
- else
- ipu_cpmem_set_yuv_planar_full(ipu_plane->ipu_ch,
- fb->pitches[1], vbo, ubo);
-
- dev_dbg(ipu_plane->base.dev->dev,
- "phy = %lu %lu %lu, x = %d, y = %d", eba, ubo, vbo,
- state->src_x >> 16, state->src_y >> 16);
- break;
- default:
- dev_dbg(ipu_plane->base.dev->dev, "phys = %lu, x = %d, y = %d",
- eba, state->src_x >> 16, state->src_y >> 16);
-
- break;
- }
+ x /= drm_format_horz_chroma_subsampling(fb->pixel_format);
+ y /= drm_format_vert_chroma_subsampling(fb->pixel_format);
- if (old_state->fb) {
- active = ipu_idmac_get_current_buffer(ipu_plane->ipu_ch);
- ipu_cpmem_set_buffer(ipu_plane->ipu_ch, !active, eba);
- ipu_idmac_select_buffer(ipu_plane->ipu_ch, !active);
- } else {
- ipu_cpmem_set_buffer(ipu_plane->ipu_ch, 0, eba);
- ipu_cpmem_set_buffer(ipu_plane->ipu_ch, 1, eba);
- }
+ return cma_obj->paddr + fb->offsets[2] + fb->pitches[2] * y +
+ drm_format_plane_cpp(fb->pixel_format, 2) * x - eba;
}
void ipu_plane_put_resources(struct ipu_plane *ipu_plane)
@@ -259,6 +218,7 @@ static int ipu_plane_atomic_check(struct drm_plane *plane,
struct drm_framebuffer *fb = state->fb;
struct drm_framebuffer *old_fb = old_state->fb;
unsigned long eba, ubo, vbo, old_ubo, old_vbo;
+ int hsub, vsub;
/* Ok to disable */
if (!fb)
@@ -338,6 +298,10 @@ static int ipu_plane_atomic_check(struct drm_plane *plane,
switch (fb->pixel_format) {
case DRM_FORMAT_YUV420:
case DRM_FORMAT_YVU420:
+ case DRM_FORMAT_YUV422:
+ case DRM_FORMAT_YVU422:
+ case DRM_FORMAT_YUV444:
+ case DRM_FORMAT_YVU444:
/*
* Multiplanar formats have to meet the following restrictions:
* - The (up to) three plane addresses are EBA, EBA+UBO, EBA+VBO
@@ -346,30 +310,49 @@ static int ipu_plane_atomic_check(struct drm_plane *plane,
* - Only EBA may be changed while scanout is active
* - The strides of U and V planes must be identical.
*/
- ubo = drm_plane_state_to_ubo(state);
vbo = drm_plane_state_to_vbo(state);
- if ((ubo & 0x7) || (vbo & 0x7))
+ if (vbo & 0x7 || vbo > 0xfffff8)
return -EINVAL;
- if ((ubo > 0xfffff8) || (vbo > 0xfffff8))
- return -EINVAL;
-
- if (old_fb) {
- old_ubo = drm_plane_state_to_ubo(old_state);
+ if (old_fb && (fb->pixel_format == old_fb->pixel_format)) {
old_vbo = drm_plane_state_to_vbo(old_state);
- if (ubo != old_ubo || vbo != old_vbo)
- return -EINVAL;
+ if (vbo != old_vbo)
+ crtc_state->mode_changed = true;
}
if (fb->pitches[1] != fb->pitches[2])
return -EINVAL;
+ /* fall-through */
+ case DRM_FORMAT_NV12:
+ case DRM_FORMAT_NV16:
+ ubo = drm_plane_state_to_ubo(state);
+
+ if (ubo & 0x7 || ubo > 0xfffff8)
+ return -EINVAL;
+
+ if (old_fb && (fb->pixel_format == old_fb->pixel_format)) {
+ old_ubo = drm_plane_state_to_ubo(old_state);
+ if (ubo != old_ubo)
+ crtc_state->mode_changed = true;
+ }
+
if (fb->pitches[1] < 1 || fb->pitches[1] > 16384)
return -EINVAL;
if (old_fb && old_fb->pitches[1] != fb->pitches[1])
crtc_state->mode_changed = true;
+
+ /*
+ * The x/y offsets must be even in case of horizontal/vertical
+ * chroma subsampling.
+ */
+ hsub = drm_format_horz_chroma_subsampling(fb->pixel_format);
+ vsub = drm_format_vert_chroma_subsampling(fb->pixel_format);
+ if (((state->src_x >> 16) & (hsub - 1)) ||
+ ((state->src_y >> 16) & (vsub - 1)))
+ return -EINVAL;
}
return 0;
@@ -386,15 +369,19 @@ static void ipu_plane_atomic_update(struct drm_plane *plane,
{
struct ipu_plane *ipu_plane = to_ipu_plane(plane);
struct drm_plane_state *state = plane->state;
+ struct drm_crtc_state *crtc_state = state->crtc->state;
+ struct drm_framebuffer *fb = state->fb;
+ unsigned long eba, ubo, vbo;
enum ipu_color_space ics;
+ int active;
- if (old_state->fb) {
- struct drm_crtc_state *crtc_state = state->crtc->state;
+ eba = drm_plane_state_to_eba(state);
- if (!drm_atomic_crtc_needs_modeset(crtc_state)) {
- ipu_plane_atomic_set_base(ipu_plane, old_state);
- return;
- }
+ if (old_state->fb && !drm_atomic_crtc_needs_modeset(crtc_state)) {
+ active = ipu_idmac_get_current_buffer(ipu_plane->ipu_ch);
+ ipu_cpmem_set_buffer(ipu_plane->ipu_ch, !active, eba);
+ ipu_idmac_select_buffer(ipu_plane->ipu_ch, !active);
+ return;
}
switch (ipu_plane->dp_flow) {
@@ -424,6 +411,7 @@ static void ipu_plane_atomic_update(struct drm_plane *plane,
ipu_dp_set_global_alpha(ipu_plane->dp, false, 0, false);
break;
default:
+ ipu_dp_set_global_alpha(ipu_plane->dp, true, 0, true);
break;
}
}
@@ -437,11 +425,50 @@ static void ipu_plane_atomic_update(struct drm_plane *plane,
ipu_cpmem_set_high_priority(ipu_plane->ipu_ch);
ipu_idmac_set_double_buffer(ipu_plane->ipu_ch, 1);
ipu_cpmem_set_stride(ipu_plane->ipu_ch, state->fb->pitches[0]);
- ipu_plane_atomic_set_base(ipu_plane, old_state);
+ switch (fb->pixel_format) {
+ case DRM_FORMAT_YUV420:
+ case DRM_FORMAT_YVU420:
+ case DRM_FORMAT_YUV422:
+ case DRM_FORMAT_YVU422:
+ case DRM_FORMAT_YUV444:
+ case DRM_FORMAT_YVU444:
+ ubo = drm_plane_state_to_ubo(state);
+ vbo = drm_plane_state_to_vbo(state);
+ if (fb->pixel_format == DRM_FORMAT_YVU420 ||
+ fb->pixel_format == DRM_FORMAT_YVU422 ||
+ fb->pixel_format == DRM_FORMAT_YVU444)
+ swap(ubo, vbo);
+
+ ipu_cpmem_set_yuv_planar_full(ipu_plane->ipu_ch,
+ fb->pitches[1], ubo, vbo);
+
+ dev_dbg(ipu_plane->base.dev->dev,
+ "phy = %lu %lu %lu, x = %d, y = %d", eba, ubo, vbo,
+ state->src_x >> 16, state->src_y >> 16);
+ break;
+ case DRM_FORMAT_NV12:
+ case DRM_FORMAT_NV16:
+ ubo = drm_plane_state_to_ubo(state);
+
+ ipu_cpmem_set_yuv_planar_full(ipu_plane->ipu_ch,
+ fb->pitches[1], ubo, ubo);
+
+ dev_dbg(ipu_plane->base.dev->dev,
+ "phy = %lu %lu, x = %d, y = %d", eba, ubo,
+ state->src_x >> 16, state->src_y >> 16);
+ break;
+ default:
+ dev_dbg(ipu_plane->base.dev->dev, "phys = %lu, x = %d, y = %d",
+ eba, state->src_x >> 16, state->src_y >> 16);
+ break;
+ }
+ ipu_cpmem_set_buffer(ipu_plane->ipu_ch, 0, eba);
+ ipu_cpmem_set_buffer(ipu_plane->ipu_ch, 1, eba);
ipu_plane_enable(ipu_plane);
}
static const struct drm_plane_helper_funcs ipu_plane_helper_funcs = {
+ .prepare_fb = drm_fb_cma_prepare_fb,
.atomic_check = ipu_plane_atomic_check,
.atomic_disable = ipu_plane_atomic_disable,
.atomic_update = ipu_plane_atomic_update,
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_drv.c b/drivers/gpu/drm/mediatek/mtk_drm_drv.c
index 296f541fbe2f..4b7fe7eaec01 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_drv.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_drv.c
@@ -249,16 +249,14 @@ static const struct file_operations mtk_drm_fops = {
.mmap = mtk_drm_gem_mmap,
.poll = drm_poll,
.read = drm_read,
-#ifdef CONFIG_COMPAT
.compat_ioctl = drm_compat_ioctl,
-#endif
};
static struct drm_driver mtk_drm_driver = {
.driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_PRIME |
DRIVER_ATOMIC,
- .get_vblank_counter = drm_vblank_count,
+ .get_vblank_counter = drm_vblank_no_hw_counter,
.enable_vblank = mtk_drm_crtc_enable_vblank,
.disable_vblank = mtk_drm_crtc_disable_vblank,
diff --git a/drivers/gpu/drm/mgag200/mgag200_drv.c b/drivers/gpu/drm/mgag200/mgag200_drv.c
index 1443b3a34775..b0b874264f9d 100644
--- a/drivers/gpu/drm/mgag200/mgag200_drv.c
+++ b/drivers/gpu/drm/mgag200/mgag200_drv.c
@@ -82,9 +82,7 @@ static const struct file_operations mgag200_driver_fops = {
.unlocked_ioctl = drm_ioctl,
.mmap = mgag200_mmap,
.poll = drm_poll,
-#ifdef CONFIG_COMPAT
.compat_ioctl = drm_compat_ioctl,
-#endif
.read = drm_read,
};
diff --git a/drivers/gpu/drm/mgag200/mgag200_ttm.c b/drivers/gpu/drm/mgag200/mgag200_ttm.c
index 83272b456329..5e20220ef4c6 100644
--- a/drivers/gpu/drm/mgag200/mgag200_ttm.c
+++ b/drivers/gpu/drm/mgag200/mgag200_ttm.c
@@ -267,6 +267,9 @@ int mgag200_mm_init(struct mga_device *mdev)
return ret;
}
+ arch_io_reserve_memtype_wc(pci_resource_start(dev->pdev, 0),
+ pci_resource_len(dev->pdev, 0));
+
mdev->fb_mtrr = arch_phys_wc_add(pci_resource_start(dev->pdev, 0),
pci_resource_len(dev->pdev, 0));
@@ -275,10 +278,14 @@ int mgag200_mm_init(struct mga_device *mdev)
void mgag200_mm_fini(struct mga_device *mdev)
{
+ struct drm_device *dev = mdev->dev;
+
ttm_bo_device_release(&mdev->ttm.bdev);
mgag200_ttm_global_release(mdev);
+ arch_io_free_memtype_wc(pci_resource_start(dev->pdev, 0),
+ pci_resource_len(dev->pdev, 0));
arch_phys_wc_del(mdev->fb_mtrr);
mdev->fb_mtrr = 0;
}
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_irq.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_irq.c
index a521207db8a1..b764d7f10312 100644
--- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_irq.c
+++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_irq.c
@@ -15,6 +15,7 @@
* this program. If not, see <http://www.gnu.org/licenses/>.
*/
+#include <drm/drm_print.h>
#include "msm_drv.h"
#include "mdp4_kms.h"
@@ -29,7 +30,16 @@ void mdp4_set_irqmask(struct mdp_kms *mdp_kms, uint32_t irqmask,
static void mdp4_irq_error_handler(struct mdp_irq *irq, uint32_t irqstatus)
{
+ struct mdp4_kms *mdp4_kms = container_of(irq, struct mdp4_kms, error_handler);
+ static DEFINE_RATELIMIT_STATE(rs, 5*HZ, 1);
+ extern bool dumpstate;
+
DRM_ERROR_RATELIMITED("errors: %08x\n", irqstatus);
+
+ if (dumpstate && __ratelimit(&rs)) {
+ struct drm_printer p = drm_info_printer(mdp4_kms->dev->dev);
+ drm_state_dump(mdp4_kms->dev, &p);
+ }
}
void mdp4_irq_preinstall(struct msm_kms *kms)
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_irq.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_irq.c
index d53e5510fd7c..5c5940db898e 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_irq.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_irq.c
@@ -17,6 +17,8 @@
#include <linux/irq.h>
+#include <drm/drm_print.h>
+
#include "msm_drv.h"
#include "mdp5_kms.h"
@@ -30,7 +32,16 @@ void mdp5_set_irqmask(struct mdp_kms *mdp_kms, uint32_t irqmask,
static void mdp5_irq_error_handler(struct mdp_irq *irq, uint32_t irqstatus)
{
+ struct mdp5_kms *mdp5_kms = container_of(irq, struct mdp5_kms, error_handler);
+ static DEFINE_RATELIMIT_STATE(rs, 5*HZ, 1);
+ extern bool dumpstate;
+
DRM_ERROR_RATELIMITED("errors: %08x\n", irqstatus);
+
+ if (dumpstate && __ratelimit(&rs)) {
+ struct drm_printer p = drm_info_printer(mdp5_kms->dev->dev);
+ drm_state_dump(mdp5_kms->dev, &p);
+ }
}
void mdp5_irq_preinstall(struct msm_kms *kms)
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h
index 03738927be10..c6fbcfad2d59 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h
@@ -114,6 +114,18 @@ static inline u32 mdp5_read(struct mdp5_kms *mdp5_kms, u32 reg)
return msm_readl(mdp5_kms->mmio + reg);
}
+static inline const char *stage2name(enum mdp_mixer_stage_id stage)
+{
+ static const char *names[] = {
+#define NAME(n) [n] = #n
+ NAME(STAGE_UNUSED), NAME(STAGE_BASE),
+ NAME(STAGE0), NAME(STAGE1), NAME(STAGE2),
+ NAME(STAGE3), NAME(STAGE4), NAME(STAGE6),
+#undef NAME
+ };
+ return names[stage];
+}
+
static inline const char *pipe2name(enum mdp5_pipe pipe)
{
static const char *names[] = {
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c
index cf50d3ec8d1b..8bf55e3450c5 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c
@@ -16,6 +16,7 @@
* this program. If not, see <http://www.gnu.org/licenses/>.
*/
+#include <drm/drm_print.h>
#include "mdp5_kms.h"
struct mdp5_plane {
@@ -181,6 +182,20 @@ done:
#undef SET_PROPERTY
}
+static void
+mdp5_plane_atomic_print_state(struct drm_printer *p,
+ const struct drm_plane_state *state)
+{
+ struct mdp5_plane_state *pstate = to_mdp5_plane_state(state);
+
+ drm_printf(p, "\tpremultiplied=%u\n", pstate->premultiplied);
+ drm_printf(p, "\tzpos=%u\n", pstate->zpos);
+ drm_printf(p, "\talpha=%u\n", pstate->alpha);
+ drm_printf(p, "\tstage=%s\n", stage2name(pstate->stage));
+ drm_printf(p, "\tmode_changed=%u\n", pstate->mode_changed);
+ drm_printf(p, "\tpending=%u\n", pstate->pending);
+}
+
static void mdp5_plane_reset(struct drm_plane *plane)
{
struct mdp5_plane_state *mdp5_state;
@@ -244,6 +259,7 @@ static const struct drm_plane_funcs mdp5_plane_funcs = {
.reset = mdp5_plane_reset,
.atomic_duplicate_state = mdp5_plane_duplicate_state,
.atomic_destroy_state = mdp5_plane_destroy_state,
+ .atomic_print_state = mdp5_plane_atomic_print_state,
};
static int mdp5_plane_prepare_fb(struct drm_plane *plane,
@@ -913,7 +929,7 @@ struct drm_plane *mdp5_plane_init(struct drm_device *dev,
type = private_plane ? DRM_PLANE_TYPE_PRIMARY : DRM_PLANE_TYPE_OVERLAY;
ret = drm_universal_plane_init(dev, plane, 0xff, &mdp5_plane_funcs,
mdp5_plane->formats, mdp5_plane->nformats,
- type, NULL);
+ type, "%s", mdp5_plane->name);
if (ret)
goto fail;
diff --git a/drivers/gpu/drm/msm/msm_atomic.c b/drivers/gpu/drm/msm/msm_atomic.c
index db193f835298..4e21e1d72378 100644
--- a/drivers/gpu/drm/msm/msm_atomic.c
+++ b/drivers/gpu/drm/msm/msm_atomic.c
@@ -217,8 +217,9 @@ int msm_atomic_commit(struct drm_device *dev,
if ((plane->state->fb != plane_state->fb) && plane_state->fb) {
struct drm_gem_object *obj = msm_framebuffer_bo(plane_state->fb, 0);
struct msm_gem_object *msm_obj = to_msm_bo(obj);
+ struct dma_fence *fence = reservation_object_get_excl_rcu(msm_obj->resv);
- plane_state->fence = reservation_object_get_excl_rcu(msm_obj->resv);
+ drm_atomic_set_fence_for_plane(plane_state, fence);
}
}
diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
index 84d38eaea585..8d21fb27a401 100644
--- a/drivers/gpu/drm/msm/msm_drv.c
+++ b/drivers/gpu/drm/msm/msm_drv.c
@@ -79,6 +79,10 @@ static char *vram = "16m";
MODULE_PARM_DESC(vram, "Configure VRAM size (for devices without IOMMU/GPUMMU)");
module_param(vram, charp, 0);
+bool dumpstate = false;
+MODULE_PARM_DESC(dumpstate, "Dump KMS state on errors");
+module_param(dumpstate, bool, 0600);
+
/*
* Util/helpers:
*/
@@ -768,9 +772,7 @@ static const struct file_operations fops = {
.open = drm_open,
.release = drm_release,
.unlocked_ioctl = drm_ioctl,
-#ifdef CONFIG_COMPAT
.compat_ioctl = drm_compat_ioctl,
-#endif
.poll = drm_poll,
.read = drm_read,
.llseek = no_llseek,
diff --git a/drivers/gpu/drm/msm/msm_fbdev.c b/drivers/gpu/drm/msm/msm_fbdev.c
index ffd4a338ca12..d29f5e82a410 100644
--- a/drivers/gpu/drm/msm/msm_fbdev.c
+++ b/drivers/gpu/drm/msm/msm_fbdev.c
@@ -39,6 +39,7 @@ struct msm_fbdev {
static struct fb_ops msm_fb_ops = {
.owner = THIS_MODULE,
+ DRM_FB_HELPER_DEFAULT_OPS,
/* Note: to properly handle manual update displays, we wrap the
* basic fbdev ops which write to the framebuffer
@@ -49,12 +50,6 @@ static struct fb_ops msm_fb_ops = {
.fb_copyarea = drm_fb_helper_sys_copyarea,
.fb_imageblit = drm_fb_helper_sys_imageblit,
.fb_mmap = msm_fbdev_mmap,
-
- .fb_check_var = drm_fb_helper_check_var,
- .fb_set_par = drm_fb_helper_set_par,
- .fb_pan_display = drm_fb_helper_pan_display,
- .fb_blank = drm_fb_helper_blank,
- .fb_setcmap = drm_fb_helper_setcmap,
};
static int msm_fbdev_mmap(struct fb_info *info, struct vm_area_struct *vma)
diff --git a/drivers/gpu/drm/nouveau/dispnv04/crtc.c b/drivers/gpu/drm/nouveau/dispnv04/crtc.c
index 0cb7a18cde26..59d1d1c5de5f 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/crtc.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/crtc.c
@@ -702,7 +702,7 @@ static void nv_crtc_prepare(struct drm_crtc *crtc)
if (nv_two_heads(dev))
NVSetOwner(dev, nv_crtc->index);
- drm_vblank_pre_modeset(dev, nv_crtc->index);
+ drm_crtc_vblank_off(crtc);
funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
NVBlankScreen(dev, nv_crtc->index, true);
@@ -734,7 +734,7 @@ static void nv_crtc_commit(struct drm_crtc *crtc)
#endif
funcs->dpms(crtc, DRM_MODE_DPMS_ON);
- drm_vblank_post_modeset(dev, nv_crtc->index);
+ drm_crtc_vblank_on(crtc);
}
static void nv_crtc_destroy(struct drm_crtc *crtc)
diff --git a/drivers/gpu/drm/nouveau/dispnv04/overlay.c b/drivers/gpu/drm/nouveau/dispnv04/overlay.c
index ec444eac6258..a79514d440b3 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/overlay.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/overlay.c
@@ -33,7 +33,7 @@
#include "nouveau_connector.h"
#include "nouveau_display.h"
#include "nvreg.h"
-
+#include "disp.h"
struct nouveau_plane {
struct drm_plane base;
diff --git a/drivers/gpu/drm/nouveau/include/nvif/cl5070.h b/drivers/gpu/drm/nouveau/include/nvif/cl5070.h
index d15c296b5f33..ae49dfd1f97b 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/cl5070.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/cl5070.h
@@ -34,6 +34,8 @@ struct nv50_disp_mthd_v1 {
#define NV50_DISP_MTHD_V1_SOR_HDMI_PWR 0x22
#define NV50_DISP_MTHD_V1_SOR_LVDS_SCRIPT 0x23
#define NV50_DISP_MTHD_V1_SOR_DP_PWR 0x24
+#define NV50_DISP_MTHD_V1_SOR_DP_MST_LINK 0x25
+#define NV50_DISP_MTHD_V1_SOR_DP_MST_VCPI 0x26
#define NV50_DISP_MTHD_V1_PIOR_PWR 0x30
__u8 method;
__u16 hasht;
@@ -90,6 +92,21 @@ struct nv50_disp_sor_dp_pwr_v0 {
__u8 pad02[6];
};
+struct nv50_disp_sor_dp_mst_link_v0 {
+ __u8 version;
+ __u8 state;
+ __u8 pad02[6];
+};
+
+struct nv50_disp_sor_dp_mst_vcpi_v0 {
+ __u8 version;
+ __u8 pad01[1];
+ __u8 start_slot;
+ __u8 num_slots;
+ __u16 pbn;
+ __u16 aligned_pbn;
+};
+
struct nv50_disp_pior_pwr_v0 {
__u8 version;
__u8 state;
diff --git a/drivers/gpu/drm/nouveau/include/nvif/object.h b/drivers/gpu/drm/nouveau/include/nvif/object.h
index 8d815967767f..9e58b305b020 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/object.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/object.h
@@ -66,6 +66,35 @@ void nvif_object_unmap(struct nvif_object *);
#define nvif_mthd(a,b,c,d) nvif_object_mthd((a), (b), (c), (d))
+struct nvif_mclass {
+ s32 oclass;
+ int version;
+};
+
+#define nvif_mclass(o,m) ({ \
+ struct nvif_object *object = (o); \
+ struct nvif_sclass *sclass; \
+ const typeof(m[0]) *mclass = (m); \
+ int ret = -ENODEV; \
+ int cnt, i, j; \
+ \
+ cnt = nvif_object_sclass_get(object, &sclass); \
+ if (cnt >= 0) { \
+ for (i = 0; ret < 0 && mclass[i].oclass; i++) { \
+ for (j = 0; j < cnt; j++) { \
+ if (mclass[i].oclass == sclass[j].oclass && \
+ mclass[i].version >= sclass[j].minver && \
+ mclass[i].version <= sclass[j].maxver) { \
+ ret = i; \
+ break; \
+ } \
+ } \
+ } \
+ nvif_object_sclass_put(&sclass); \
+ } \
+ ret; \
+})
+
/*XXX*/
#include <core/object.h>
#define nvxx_object(a) ({ \
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/fb.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/fb.h
index 3a410275fa71..65ce79a85d37 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/fb.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/fb.h
@@ -93,6 +93,7 @@ int gk104_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
int gk20a_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
int gm107_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
int gm200_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
+int gm20b_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
int gp100_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
int gp104_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
@@ -156,4 +157,6 @@ struct nvkm_ram_func {
int (*prog)(struct nvkm_ram *);
void (*tidy)(struct nvkm_ram *);
};
+
+extern const u8 gf100_pte_storage_type_map[256];
#endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_acpi.c b/drivers/gpu/drm/nouveau/nouveau_acpi.c
index dc57b628e074..193573d191e5 100644
--- a/drivers/gpu/drm/nouveau/nouveau_acpi.c
+++ b/drivers/gpu/drm/nouveau/nouveau_acpi.c
@@ -240,7 +240,8 @@ static bool nouveau_pr3_present(struct pci_dev *pdev)
if (!parent_adev)
return false;
- return acpi_has_method(parent_adev->handle, "_PR3");
+ return parent_adev->power.flags.power_resources &&
+ acpi_has_method(parent_adev->handle, "_PR3");
}
static void nouveau_dsm_pci_probe(struct pci_dev *pdev, acpi_handle *dhandle_out,
diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c
index a1570b109434..23ffe8571a99 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bios.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bios.c
@@ -333,6 +333,9 @@ get_fp_strap(struct drm_device *dev, struct nvbios *bios)
if (bios->major_version < 5 && bios->data[0x48] & 0x4)
return NVReadVgaCrtc5758(dev, 0, 0xf) & 0xf;
+ if (drm->device.info.family >= NV_DEVICE_INFO_V0_MAXWELL)
+ return nvif_rd32(device, 0x001800) & 0x0000000f;
+ else
if (drm->device.info.family >= NV_DEVICE_INFO_V0_TESLA)
return (nvif_rd32(device, NV_PEXTDEV_BOOT_0) >> 24) & 0xf;
else
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c
index c1084088f9e4..947c200655b4 100644
--- a/drivers/gpu/drm/nouveau/nouveau_connector.c
+++ b/drivers/gpu/drm/nouveau/nouveau_connector.c
@@ -30,6 +30,7 @@
#include <linux/vga_switcheroo.h>
#include <drm/drmP.h>
+#include <drm/drm_atomic_helper.h>
#include <drm/drm_edid.h>
#include <drm/drm_crtc_helper.h>
@@ -47,6 +48,301 @@
#include <nvif/cl0046.h>
#include <nvif/event.h>
+struct drm_display_mode *
+nouveau_conn_native_mode(struct drm_connector *connector)
+{
+ const struct drm_connector_helper_funcs *helper = connector->helper_private;
+ struct nouveau_drm *drm = nouveau_drm(connector->dev);
+ struct drm_device *dev = connector->dev;
+ struct drm_display_mode *mode, *largest = NULL;
+ int high_w = 0, high_h = 0, high_v = 0;
+
+ list_for_each_entry(mode, &connector->probed_modes, head) {
+ mode->vrefresh = drm_mode_vrefresh(mode);
+ if (helper->mode_valid(connector, mode) != MODE_OK ||
+ (mode->flags & DRM_MODE_FLAG_INTERLACE))
+ continue;
+
+ /* Use preferred mode if there is one.. */
+ if (mode->type & DRM_MODE_TYPE_PREFERRED) {
+ NV_DEBUG(drm, "native mode from preferred\n");
+ return drm_mode_duplicate(dev, mode);
+ }
+
+ /* Otherwise, take the resolution with the largest width, then
+ * height, then vertical refresh
+ */
+ if (mode->hdisplay < high_w)
+ continue;
+
+ if (mode->hdisplay == high_w && mode->vdisplay < high_h)
+ continue;
+
+ if (mode->hdisplay == high_w && mode->vdisplay == high_h &&
+ mode->vrefresh < high_v)
+ continue;
+
+ high_w = mode->hdisplay;
+ high_h = mode->vdisplay;
+ high_v = mode->vrefresh;
+ largest = mode;
+ }
+
+ NV_DEBUG(drm, "native mode from largest: %dx%d@%d\n",
+ high_w, high_h, high_v);
+ return largest ? drm_mode_duplicate(dev, largest) : NULL;
+}
+
+int
+nouveau_conn_atomic_get_property(struct drm_connector *connector,
+ const struct drm_connector_state *state,
+ struct drm_property *property, u64 *val)
+{
+ struct nouveau_conn_atom *asyc = nouveau_conn_atom(state);
+ struct nouveau_display *disp = nouveau_display(connector->dev);
+ struct drm_device *dev = connector->dev;
+
+ if (property == dev->mode_config.scaling_mode_property)
+ *val = asyc->scaler.mode;
+ else if (property == disp->underscan_property)
+ *val = asyc->scaler.underscan.mode;
+ else if (property == disp->underscan_hborder_property)
+ *val = asyc->scaler.underscan.hborder;
+ else if (property == disp->underscan_vborder_property)
+ *val = asyc->scaler.underscan.vborder;
+ else if (property == disp->dithering_mode)
+ *val = asyc->dither.mode;
+ else if (property == disp->dithering_depth)
+ *val = asyc->dither.depth;
+ else if (property == disp->vibrant_hue_property)
+ *val = asyc->procamp.vibrant_hue;
+ else if (property == disp->color_vibrance_property)
+ *val = asyc->procamp.color_vibrance;
+ else
+ return -EINVAL;
+
+ return 0;
+}
+
+int
+nouveau_conn_atomic_set_property(struct drm_connector *connector,
+ struct drm_connector_state *state,
+ struct drm_property *property, u64 val)
+{
+ struct drm_device *dev = connector->dev;
+ struct nouveau_conn_atom *asyc = nouveau_conn_atom(state);
+ struct nouveau_display *disp = nouveau_display(dev);
+
+ if (property == dev->mode_config.scaling_mode_property) {
+ switch (val) {
+ case DRM_MODE_SCALE_NONE:
+ /* We allow 'None' for EDID modes, even on a fixed
+ * panel (some exist with support for lower refresh
+ * rates, which people might want to use for power-
+ * saving purposes).
+ *
+ * Non-EDID modes will force the use of GPU scaling
+ * to the native mode regardless of this setting.
+ */
+ switch (connector->connector_type) {
+ case DRM_MODE_CONNECTOR_LVDS:
+ case DRM_MODE_CONNECTOR_eDP:
+ /* ... except prior to G80, where the code
+ * doesn't support such things.
+ */
+ if (disp->disp.oclass < NV50_DISP)
+ return -EINVAL;
+ break;
+ default:
+ break;
+ }
+ case DRM_MODE_SCALE_FULLSCREEN:
+ case DRM_MODE_SCALE_CENTER:
+ case DRM_MODE_SCALE_ASPECT:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (asyc->scaler.mode != val) {
+ asyc->scaler.mode = val;
+ asyc->set.scaler = true;
+ }
+ } else
+ if (property == disp->underscan_property) {
+ if (asyc->scaler.underscan.mode != val) {
+ asyc->scaler.underscan.mode = val;
+ asyc->set.scaler = true;
+ }
+ } else
+ if (property == disp->underscan_hborder_property) {
+ if (asyc->scaler.underscan.hborder != val) {
+ asyc->scaler.underscan.hborder = val;
+ asyc->set.scaler = true;
+ }
+ } else
+ if (property == disp->underscan_vborder_property) {
+ if (asyc->scaler.underscan.vborder != val) {
+ asyc->scaler.underscan.vborder = val;
+ asyc->set.scaler = true;
+ }
+ } else
+ if (property == disp->dithering_mode) {
+ if (asyc->dither.mode != val) {
+ asyc->dither.mode = val;
+ asyc->set.dither = true;
+ }
+ } else
+ if (property == disp->dithering_depth) {
+ if (asyc->dither.mode != val) {
+ asyc->dither.depth = val;
+ asyc->set.dither = true;
+ }
+ } else
+ if (property == disp->vibrant_hue_property) {
+ if (asyc->procamp.vibrant_hue != val) {
+ asyc->procamp.vibrant_hue = val;
+ asyc->set.procamp = true;
+ }
+ } else
+ if (property == disp->color_vibrance_property) {
+ if (asyc->procamp.color_vibrance != val) {
+ asyc->procamp.color_vibrance = val;
+ asyc->set.procamp = true;
+ }
+ } else {
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+void
+nouveau_conn_atomic_destroy_state(struct drm_connector *connector,
+ struct drm_connector_state *state)
+{
+ struct nouveau_conn_atom *asyc = nouveau_conn_atom(state);
+ __drm_atomic_helper_connector_destroy_state(&asyc->state);
+ kfree(asyc);
+}
+
+struct drm_connector_state *
+nouveau_conn_atomic_duplicate_state(struct drm_connector *connector)
+{
+ struct nouveau_conn_atom *armc = nouveau_conn_atom(connector->state);
+ struct nouveau_conn_atom *asyc;
+ if (!(asyc = kmalloc(sizeof(*asyc), GFP_KERNEL)))
+ return NULL;
+ __drm_atomic_helper_connector_duplicate_state(connector, &asyc->state);
+ asyc->dither = armc->dither;
+ asyc->scaler = armc->scaler;
+ asyc->procamp = armc->procamp;
+ asyc->set.mask = 0;
+ return &asyc->state;
+}
+
+void
+nouveau_conn_reset(struct drm_connector *connector)
+{
+ struct nouveau_conn_atom *asyc;
+
+ if (WARN_ON(!(asyc = kzalloc(sizeof(*asyc), GFP_KERNEL))))
+ return;
+
+ if (connector->state)
+ __drm_atomic_helper_connector_destroy_state(connector->state);
+ __drm_atomic_helper_connector_reset(connector, &asyc->state);
+ asyc->dither.mode = DITHERING_MODE_AUTO;
+ asyc->dither.depth = DITHERING_DEPTH_AUTO;
+ asyc->scaler.mode = DRM_MODE_SCALE_NONE;
+ asyc->scaler.underscan.mode = UNDERSCAN_OFF;
+ asyc->procamp.color_vibrance = 150;
+ asyc->procamp.vibrant_hue = 90;
+
+ if (nouveau_display(connector->dev)->disp.oclass < NV50_DISP) {
+ switch (connector->connector_type) {
+ case DRM_MODE_CONNECTOR_LVDS:
+ /* See note in nouveau_conn_atomic_set_property(). */
+ asyc->scaler.mode = DRM_MODE_SCALE_FULLSCREEN;
+ break;
+ default:
+ break;
+ }
+ }
+}
+
+void
+nouveau_conn_attach_properties(struct drm_connector *connector)
+{
+ struct drm_device *dev = connector->dev;
+ struct nouveau_conn_atom *armc = nouveau_conn_atom(connector->state);
+ struct nouveau_display *disp = nouveau_display(dev);
+
+ /* Init DVI-I specific properties. */
+ if (connector->connector_type == DRM_MODE_CONNECTOR_DVII)
+ drm_object_attach_property(&connector->base, dev->mode_config.
+ dvi_i_subconnector_property, 0);
+
+ /* Add overscan compensation options to digital outputs. */
+ if (disp->underscan_property &&
+ (connector->connector_type == DRM_MODE_CONNECTOR_DVID ||
+ connector->connector_type == DRM_MODE_CONNECTOR_DVII ||
+ connector->connector_type == DRM_MODE_CONNECTOR_HDMIA ||
+ connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort)) {
+ drm_object_attach_property(&connector->base,
+ disp->underscan_property,
+ UNDERSCAN_OFF);
+ drm_object_attach_property(&connector->base,
+ disp->underscan_hborder_property, 0);
+ drm_object_attach_property(&connector->base,
+ disp->underscan_vborder_property, 0);
+ }
+
+ /* Add hue and saturation options. */
+ if (disp->vibrant_hue_property)
+ drm_object_attach_property(&connector->base,
+ disp->vibrant_hue_property,
+ armc->procamp.vibrant_hue);
+ if (disp->color_vibrance_property)
+ drm_object_attach_property(&connector->base,
+ disp->color_vibrance_property,
+ armc->procamp.color_vibrance);
+
+ /* Scaling mode property. */
+ switch (connector->connector_type) {
+ case DRM_MODE_CONNECTOR_TV:
+ break;
+ case DRM_MODE_CONNECTOR_VGA:
+ if (disp->disp.oclass < NV50_DISP)
+ break; /* Can only scale on DFPs. */
+ /* Fall-through. */
+ default:
+ drm_object_attach_property(&connector->base, dev->mode_config.
+ scaling_mode_property,
+ armc->scaler.mode);
+ break;
+ }
+
+ /* Dithering properties. */
+ switch (connector->connector_type) {
+ case DRM_MODE_CONNECTOR_TV:
+ case DRM_MODE_CONNECTOR_VGA:
+ break;
+ default:
+ if (disp->dithering_mode) {
+ drm_object_attach_property(&connector->base,
+ disp->dithering_mode,
+ armc->dither.mode);
+ }
+ if (disp->dithering_depth) {
+ drm_object_attach_property(&connector->base,
+ disp->dithering_depth,
+ armc->dither.depth);
+ }
+ break;
+ }
+}
+
MODULE_PARM_DESC(tv_disable, "Disable TV-out detection");
int nouveau_tv_disable = 0;
module_param_named(tv_disable, nouveau_tv_disable, int, 0400);
@@ -151,7 +447,9 @@ nouveau_connector_ddc_detect(struct drm_connector *connector)
if (nv_encoder->dcb->type == DCB_OUTPUT_DP) {
int ret = nouveau_dp_detect(nv_encoder);
- if (ret == 0)
+ if (ret == NOUVEAU_DP_MST)
+ return NULL;
+ if (ret == NOUVEAU_DP_SST)
break;
} else
if ((vga_switcheroo_handler_flags() &
@@ -465,199 +763,39 @@ static int
nouveau_connector_set_property(struct drm_connector *connector,
struct drm_property *property, uint64_t value)
{
- struct nouveau_display *disp = nouveau_display(connector->dev);
+ struct nouveau_conn_atom *asyc = nouveau_conn_atom(connector->state);
struct nouveau_connector *nv_connector = nouveau_connector(connector);
struct nouveau_encoder *nv_encoder = nv_connector->detected_encoder;
struct drm_encoder *encoder = to_drm_encoder(nv_encoder);
- struct drm_device *dev = connector->dev;
- struct nouveau_crtc *nv_crtc;
int ret;
- nv_crtc = NULL;
- if (connector->encoder && connector->encoder->crtc)
- nv_crtc = nouveau_crtc(connector->encoder->crtc);
-
- /* Scaling mode */
- if (property == dev->mode_config.scaling_mode_property) {
- bool modeset = false;
-
- switch (value) {
- case DRM_MODE_SCALE_NONE:
- /* We allow 'None' for EDID modes, even on a fixed
- * panel (some exist with support for lower refresh
- * rates, which people might want to use for power
- * saving purposes).
- *
- * Non-EDID modes will force the use of GPU scaling
- * to the native mode regardless of this setting.
- */
- switch (nv_connector->type) {
- case DCB_CONNECTOR_LVDS:
- case DCB_CONNECTOR_LVDS_SPWG:
- case DCB_CONNECTOR_eDP:
- /* ... except prior to G80, where the code
- * doesn't support such things.
- */
- if (disp->disp.oclass < NV50_DISP)
- return -EINVAL;
- break;
- default:
- break;
- }
- break;
- case DRM_MODE_SCALE_FULLSCREEN:
- case DRM_MODE_SCALE_CENTER:
- case DRM_MODE_SCALE_ASPECT:
- break;
- default:
- return -EINVAL;
- }
-
- /* Changing between GPU and panel scaling requires a full
- * modeset
- */
- if ((nv_connector->scaling_mode == DRM_MODE_SCALE_NONE) ||
- (value == DRM_MODE_SCALE_NONE))
- modeset = true;
- nv_connector->scaling_mode = value;
-
- if (!nv_crtc)
- return 0;
-
- if (modeset || !nv_crtc->set_scale) {
- ret = drm_crtc_helper_set_mode(&nv_crtc->base,
- &nv_crtc->base.mode,
- nv_crtc->base.x,
- nv_crtc->base.y, NULL);
- if (!ret)
- return -EINVAL;
- } else {
- ret = nv_crtc->set_scale(nv_crtc, true);
- if (ret)
- return ret;
- }
-
- return 0;
- }
-
- /* Underscan */
- if (property == disp->underscan_property) {
- if (nv_connector->underscan != value) {
- nv_connector->underscan = value;
- if (!nv_crtc || !nv_crtc->set_scale)
- return 0;
-
- return nv_crtc->set_scale(nv_crtc, true);
- }
-
- return 0;
- }
-
- if (property == disp->underscan_hborder_property) {
- if (nv_connector->underscan_hborder != value) {
- nv_connector->underscan_hborder = value;
- if (!nv_crtc || !nv_crtc->set_scale)
- return 0;
-
- return nv_crtc->set_scale(nv_crtc, true);
- }
-
- return 0;
- }
-
- if (property == disp->underscan_vborder_property) {
- if (nv_connector->underscan_vborder != value) {
- nv_connector->underscan_vborder = value;
- if (!nv_crtc || !nv_crtc->set_scale)
- return 0;
-
- return nv_crtc->set_scale(nv_crtc, true);
- }
-
- return 0;
- }
+ if (connector->dev->mode_config.funcs->atomic_commit)
+ return drm_atomic_helper_connector_set_property(connector, property, value);
- /* Dithering */
- if (property == disp->dithering_mode) {
- nv_connector->dithering_mode = value;
- if (!nv_crtc || !nv_crtc->set_dither)
- return 0;
-
- return nv_crtc->set_dither(nv_crtc, true);
- }
-
- if (property == disp->dithering_depth) {
- nv_connector->dithering_depth = value;
- if (!nv_crtc || !nv_crtc->set_dither)
- return 0;
-
- return nv_crtc->set_dither(nv_crtc, true);
- }
-
- if (nv_crtc && nv_crtc->set_color_vibrance) {
- /* Hue */
- if (property == disp->vibrant_hue_property) {
- nv_crtc->vibrant_hue = value - 90;
- return nv_crtc->set_color_vibrance(nv_crtc, true);
- }
- /* Saturation */
- if (property == disp->color_vibrance_property) {
- nv_crtc->color_vibrance = value - 100;
- return nv_crtc->set_color_vibrance(nv_crtc, true);
- }
+ ret = connector->funcs->atomic_set_property(&nv_connector->base,
+ &asyc->state,
+ property, value);
+ if (ret) {
+ if (nv_encoder && nv_encoder->dcb->type == DCB_OUTPUT_TV)
+ return get_slave_funcs(encoder)->set_property(
+ encoder, connector, property, value);
+ return ret;
}
- if (nv_encoder && nv_encoder->dcb->type == DCB_OUTPUT_TV)
- return get_slave_funcs(encoder)->set_property(
- encoder, connector, property, value);
-
- return -EINVAL;
-}
-
-static struct drm_display_mode *
-nouveau_connector_native_mode(struct drm_connector *connector)
-{
- const struct drm_connector_helper_funcs *helper = connector->helper_private;
- struct nouveau_drm *drm = nouveau_drm(connector->dev);
- struct nouveau_connector *nv_connector = nouveau_connector(connector);
- struct drm_device *dev = connector->dev;
- struct drm_display_mode *mode, *largest = NULL;
- int high_w = 0, high_h = 0, high_v = 0;
+ nv_connector->scaling_mode = asyc->scaler.mode;
+ nv_connector->dithering_mode = asyc->dither.mode;
- list_for_each_entry(mode, &nv_connector->base.probed_modes, head) {
- mode->vrefresh = drm_mode_vrefresh(mode);
- if (helper->mode_valid(connector, mode) != MODE_OK ||
- (mode->flags & DRM_MODE_FLAG_INTERLACE))
- continue;
-
- /* Use preferred mode if there is one.. */
- if (mode->type & DRM_MODE_TYPE_PREFERRED) {
- NV_DEBUG(drm, "native mode from preferred\n");
- return drm_mode_duplicate(dev, mode);
- }
-
- /* Otherwise, take the resolution with the largest width, then
- * height, then vertical refresh
- */
- if (mode->hdisplay < high_w)
- continue;
-
- if (mode->hdisplay == high_w && mode->vdisplay < high_h)
- continue;
-
- if (mode->hdisplay == high_w && mode->vdisplay == high_h &&
- mode->vrefresh < high_v)
- continue;
-
- high_w = mode->hdisplay;
- high_h = mode->vdisplay;
- high_v = mode->vrefresh;
- largest = mode;
+ if (connector->encoder && connector->encoder->crtc) {
+ ret = drm_crtc_helper_set_mode(connector->encoder->crtc,
+ &connector->encoder->crtc->mode,
+ connector->encoder->crtc->x,
+ connector->encoder->crtc->y,
+ NULL);
+ if (!ret)
+ return -EINVAL;
}
- NV_DEBUG(drm, "native mode from largest: %dx%d@%d\n",
- high_w, high_h, high_v);
- return largest ? drm_mode_duplicate(dev, largest) : NULL;
+ return 0;
}
struct moderec {
@@ -805,8 +943,7 @@ nouveau_connector_get_modes(struct drm_connector *connector)
* the list of modes.
*/
if (!nv_connector->native_mode)
- nv_connector->native_mode =
- nouveau_connector_native_mode(connector);
+ nv_connector->native_mode = nouveau_conn_native_mode(connector);
if (ret == 0 && nv_connector->native_mode) {
struct drm_display_mode *mode;
@@ -934,56 +1071,42 @@ nouveau_connector_helper_funcs = {
.best_encoder = nouveau_connector_best_encoder,
};
+static int
+nouveau_connector_dpms(struct drm_connector *connector, int mode)
+{
+ if (connector->dev->mode_config.funcs->atomic_commit)
+ return drm_atomic_helper_connector_dpms(connector, mode);
+ return drm_helper_connector_dpms(connector, mode);
+}
+
static const struct drm_connector_funcs
nouveau_connector_funcs = {
- .dpms = drm_helper_connector_dpms,
+ .dpms = nouveau_connector_dpms,
+ .reset = nouveau_conn_reset,
.detect = nouveau_connector_detect,
- .destroy = nouveau_connector_destroy,
+ .force = nouveau_connector_force,
.fill_modes = drm_helper_probe_single_connector_modes,
.set_property = nouveau_connector_set_property,
- .force = nouveau_connector_force
+ .destroy = nouveau_connector_destroy,
+ .atomic_duplicate_state = nouveau_conn_atomic_duplicate_state,
+ .atomic_destroy_state = nouveau_conn_atomic_destroy_state,
+ .atomic_set_property = nouveau_conn_atomic_set_property,
+ .atomic_get_property = nouveau_conn_atomic_get_property,
};
static const struct drm_connector_funcs
nouveau_connector_funcs_lvds = {
- .dpms = drm_helper_connector_dpms,
+ .dpms = nouveau_connector_dpms,
+ .reset = nouveau_conn_reset,
.detect = nouveau_connector_detect_lvds,
- .destroy = nouveau_connector_destroy,
+ .force = nouveau_connector_force,
.fill_modes = drm_helper_probe_single_connector_modes,
.set_property = nouveau_connector_set_property,
- .force = nouveau_connector_force
-};
-
-static int
-nouveau_connector_dp_dpms(struct drm_connector *connector, int mode)
-{
- struct nouveau_encoder *nv_encoder = NULL;
-
- if (connector->encoder)
- nv_encoder = nouveau_encoder(connector->encoder);
- if (nv_encoder && nv_encoder->dcb &&
- nv_encoder->dcb->type == DCB_OUTPUT_DP) {
- if (mode == DRM_MODE_DPMS_ON) {
- u8 data = DP_SET_POWER_D0;
- nvkm_wraux(nv_encoder->aux, DP_SET_POWER, &data, 1);
- usleep_range(1000, 2000);
- } else {
- u8 data = DP_SET_POWER_D3;
- nvkm_wraux(nv_encoder->aux, DP_SET_POWER, &data, 1);
- }
- }
-
- return drm_helper_connector_dpms(connector, mode);
-}
-
-static const struct drm_connector_funcs
-nouveau_connector_funcs_dp = {
- .dpms = nouveau_connector_dp_dpms,
- .detect = nouveau_connector_detect,
.destroy = nouveau_connector_destroy,
- .fill_modes = drm_helper_probe_single_connector_modes,
- .set_property = nouveau_connector_set_property,
- .force = nouveau_connector_force
+ .atomic_duplicate_state = nouveau_conn_atomic_duplicate_state,
+ .atomic_destroy_state = nouveau_conn_atomic_destroy_state,
+ .atomic_set_property = nouveau_conn_atomic_set_property,
+ .atomic_get_property = nouveau_conn_atomic_get_property,
};
static int
@@ -995,19 +1118,20 @@ nouveau_connector_hotplug(struct nvif_notify *notify)
struct nouveau_drm *drm = nouveau_drm(connector->dev);
const struct nvif_notify_conn_rep_v0 *rep = notify->data;
const char *name = connector->name;
+ struct nouveau_encoder *nv_encoder;
if (rep->mask & NVIF_NOTIFY_CONN_V0_IRQ) {
+ NV_DEBUG(drm, "service %s\n", name);
+ if ((nv_encoder = find_encoder(connector, DCB_OUTPUT_DP)))
+ nv50_mstm_service(nv_encoder->dp.mstm);
} else {
bool plugged = (rep->mask != NVIF_NOTIFY_CONN_V0_UNPLUG);
NV_DEBUG(drm, "%splugged %s\n", plugged ? "" : "un", name);
-
- mutex_lock(&drm->dev->mode_config.mutex);
- if (plugged)
- drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON);
- else
- drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
- mutex_unlock(&drm->dev->mode_config.mutex);
+ if ((nv_encoder = find_encoder(connector, DCB_OUTPUT_DP))) {
+ if (!plugged)
+ nv50_mstm_remove(nv_encoder->dp.mstm);
+ }
drm_helper_hpd_irq_event(connector->dev);
}
@@ -1188,7 +1312,7 @@ nouveau_connector_create(struct drm_device *dev, int index)
return ERR_PTR(ret);
}
- funcs = &nouveau_connector_funcs_dp;
+ funcs = &nouveau_connector_funcs;
break;
default:
funcs = &nouveau_connector_funcs;
@@ -1202,38 +1326,10 @@ nouveau_connector_create(struct drm_device *dev, int index)
drm_connector_init(dev, connector, funcs, type);
drm_connector_helper_add(connector, &nouveau_connector_helper_funcs);
- /* Init DVI-I specific properties */
- if (nv_connector->type == DCB_CONNECTOR_DVI_I)
- drm_object_attach_property(&connector->base, dev->mode_config.dvi_i_subconnector_property, 0);
+ connector->funcs->reset(connector);
+ nouveau_conn_attach_properties(connector);
- /* Add overscan compensation options to digital outputs */
- if (disp->underscan_property &&
- (type == DRM_MODE_CONNECTOR_DVID ||
- type == DRM_MODE_CONNECTOR_DVII ||
- type == DRM_MODE_CONNECTOR_HDMIA ||
- type == DRM_MODE_CONNECTOR_DisplayPort)) {
- drm_object_attach_property(&connector->base,
- disp->underscan_property,
- UNDERSCAN_OFF);
- drm_object_attach_property(&connector->base,
- disp->underscan_hborder_property,
- 0);
- drm_object_attach_property(&connector->base,
- disp->underscan_vborder_property,
- 0);
- }
-
- /* Add hue and saturation options */
- if (disp->vibrant_hue_property)
- drm_object_attach_property(&connector->base,
- disp->vibrant_hue_property,
- 90);
- if (disp->color_vibrance_property)
- drm_object_attach_property(&connector->base,
- disp->color_vibrance_property,
- 150);
-
- /* default scaling mode */
+ /* Default scaling mode */
switch (nv_connector->type) {
case DCB_CONNECTOR_LVDS:
case DCB_CONNECTOR_LVDS_SPWG:
@@ -1250,23 +1346,6 @@ nouveau_connector_create(struct drm_device *dev, int index)
break;
}
- /* scaling mode property */
- switch (nv_connector->type) {
- case DCB_CONNECTOR_TV_0:
- case DCB_CONNECTOR_TV_1:
- case DCB_CONNECTOR_TV_3:
- break;
- case DCB_CONNECTOR_VGA:
- if (disp->disp.oclass < NV50_DISP)
- break; /* can only scale on DFPs */
- /* fall-through */
- default:
- drm_object_attach_property(&connector->base, dev->mode_config.
- scaling_mode_property,
- nv_connector->scaling_mode);
- break;
- }
-
/* dithering properties */
switch (nv_connector->type) {
case DCB_CONNECTOR_TV_0:
@@ -1275,20 +1354,7 @@ nouveau_connector_create(struct drm_device *dev, int index)
case DCB_CONNECTOR_VGA:
break;
default:
- if (disp->dithering_mode) {
- nv_connector->dithering_mode = DITHERING_MODE_AUTO;
- drm_object_attach_property(&connector->base,
- disp->dithering_mode,
- nv_connector->
- dithering_mode);
- }
- if (disp->dithering_depth) {
- nv_connector->dithering_depth = DITHERING_DEPTH_AUTO;
- drm_object_attach_property(&connector->base,
- disp->dithering_depth,
- nv_connector->
- dithering_depth);
- }
+ nv_connector->dithering_mode = DITHERING_MODE_AUTO;
break;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.h b/drivers/gpu/drm/nouveau/nouveau_connector.h
index 7446ee66ea04..096983c42a1f 100644
--- a/drivers/gpu/drm/nouveau/nouveau_connector.h
+++ b/drivers/gpu/drm/nouveau/nouveau_connector.h
@@ -35,30 +35,6 @@
struct nvkm_i2c_port;
-enum nouveau_underscan_type {
- UNDERSCAN_OFF,
- UNDERSCAN_ON,
- UNDERSCAN_AUTO,
-};
-
-/* the enum values specifically defined here match nv50/nvd0 hw values, and
- * the code relies on this
- */
-enum nouveau_dithering_mode {
- DITHERING_MODE_OFF = 0x00,
- DITHERING_MODE_ON = 0x01,
- DITHERING_MODE_DYNAMIC2X2 = 0x10 | DITHERING_MODE_ON,
- DITHERING_MODE_STATIC2X2 = 0x18 | DITHERING_MODE_ON,
- DITHERING_MODE_TEMPORAL = 0x20 | DITHERING_MODE_ON,
- DITHERING_MODE_AUTO
-};
-
-enum nouveau_dithering_depth {
- DITHERING_DEPTH_6BPC = 0x00,
- DITHERING_DEPTH_8BPC = 0x02,
- DITHERING_DEPTH_AUTO
-};
-
struct nouveau_connector {
struct drm_connector base;
enum dcb_connector_type type;
@@ -70,12 +46,7 @@ struct nouveau_connector {
struct drm_dp_aux aux;
int dithering_mode;
- int dithering_depth;
int scaling_mode;
- bool scaling_full;
- enum nouveau_underscan_type underscan;
- u32 underscan_hborder;
- u32 underscan_vborder;
struct nouveau_encoder *detected_encoder;
struct edid *edid;
@@ -109,5 +80,74 @@ nouveau_connector_create(struct drm_device *, int index);
extern int nouveau_tv_disable;
extern int nouveau_ignorelid;
extern int nouveau_duallink;
+extern int nouveau_hdmimhz;
+
+#include <drm/drm_crtc.h>
+#define nouveau_conn_atom(p) \
+ container_of((p), struct nouveau_conn_atom, state)
+
+struct nouveau_conn_atom {
+ struct drm_connector_state state;
+
+ struct {
+ /* The enum values specifically defined here match nv50/gf119
+ * hw values, and the code relies on this.
+ */
+ enum {
+ DITHERING_MODE_OFF = 0x00,
+ DITHERING_MODE_ON = 0x01,
+ DITHERING_MODE_DYNAMIC2X2 = 0x10 | DITHERING_MODE_ON,
+ DITHERING_MODE_STATIC2X2 = 0x18 | DITHERING_MODE_ON,
+ DITHERING_MODE_TEMPORAL = 0x20 | DITHERING_MODE_ON,
+ DITHERING_MODE_AUTO
+ } mode;
+ enum {
+ DITHERING_DEPTH_6BPC = 0x00,
+ DITHERING_DEPTH_8BPC = 0x02,
+ DITHERING_DEPTH_AUTO
+ } depth;
+ } dither;
+
+ struct {
+ int mode; /* DRM_MODE_SCALE_* */
+ struct {
+ enum {
+ UNDERSCAN_OFF,
+ UNDERSCAN_ON,
+ UNDERSCAN_AUTO,
+ } mode;
+ u32 hborder;
+ u32 vborder;
+ } underscan;
+ bool full;
+ } scaler;
+
+ struct {
+ int color_vibrance;
+ int vibrant_hue;
+ } procamp;
+
+ union {
+ struct {
+ bool dither:1;
+ bool scaler:1;
+ bool procamp:1;
+ };
+ u8 mask;
+ } set;
+};
+void nouveau_conn_attach_properties(struct drm_connector *);
+void nouveau_conn_reset(struct drm_connector *);
+struct drm_connector_state *
+nouveau_conn_atomic_duplicate_state(struct drm_connector *);
+void nouveau_conn_atomic_destroy_state(struct drm_connector *,
+ struct drm_connector_state *);
+int nouveau_conn_atomic_set_property(struct drm_connector *,
+ struct drm_connector_state *,
+ struct drm_property *, u64);
+int nouveau_conn_atomic_get_property(struct drm_connector *,
+ const struct drm_connector_state *,
+ struct drm_property *, u64 *);
+struct drm_display_mode *nouveau_conn_native_mode(struct drm_connector *);
#endif /* __NOUVEAU_CONNECTOR_H__ */
diff --git a/drivers/gpu/drm/nouveau/nouveau_crtc.h b/drivers/gpu/drm/nouveau/nouveau_crtc.h
index 863f10b8d818..050fcf30a0d2 100644
--- a/drivers/gpu/drm/nouveau/nouveau_crtc.h
+++ b/drivers/gpu/drm/nouveau/nouveau_crtc.h
@@ -38,8 +38,6 @@ struct nouveau_crtc {
uint32_t dpms_saved_fp_control;
uint32_t fp_users;
int saturation;
- int color_vibrance;
- int vibrant_hue;
int sharpness;
int last_dpms;
@@ -54,7 +52,6 @@ struct nouveau_crtc {
struct {
struct nouveau_bo *nvbo;
- bool visible;
uint32_t offset;
void (*set_offset)(struct nouveau_crtc *, uint32_t offset);
void (*set_pos)(struct nouveau_crtc *, int x, int y);
@@ -70,10 +67,6 @@ struct nouveau_crtc {
int depth;
} lut;
- int (*set_dither)(struct nouveau_crtc *crtc, bool update);
- int (*set_scale)(struct nouveau_crtc *crtc, bool update);
- int (*set_color_vibrance)(struct nouveau_crtc *crtc, bool update);
-
void (*save)(struct drm_crtc *crtc);
void (*restore)(struct drm_crtc *crtc);
};
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c
index afbf557b23d4..5698687bc197 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.c
+++ b/drivers/gpu/drm/nouveau/nouveau_display.c
@@ -25,6 +25,8 @@
*/
#include <drm/drmP.h>
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc_helper.h>
#include <nvif/class.h>
@@ -92,7 +94,7 @@ calc(int blanks, int blanke, int total, int line)
return line;
}
-int
+static int
nouveau_display_scanoutpos_head(struct drm_crtc *crtc, int *vpos, int *hpos,
ktime_t *stime, ktime_t *etime)
{
@@ -158,9 +160,13 @@ nouveau_display_vblstamp(struct drm_device *dev, unsigned int pipe,
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
if (nouveau_crtc(crtc)->index == pipe) {
+ struct drm_display_mode *mode;
+ if (dev->mode_config.funcs->atomic_commit)
+ mode = &crtc->state->adjusted_mode;
+ else
+ mode = &crtc->hwmode;
return drm_calc_vbltimestamp_from_scanoutpos(dev,
- pipe, max_error, time, flags,
- &crtc->hwmode);
+ pipe, max_error, time, flags, mode);
}
}
@@ -217,10 +223,6 @@ static void
nouveau_user_framebuffer_destroy(struct drm_framebuffer *drm_fb)
{
struct nouveau_framebuffer *fb = nouveau_framebuffer(drm_fb);
- struct nouveau_display *disp = nouveau_display(drm_fb->dev);
-
- if (disp->fb_dtor)
- disp->fb_dtor(drm_fb);
if (fb->nvbo)
drm_gem_object_unreference_unlocked(&fb->nvbo->gem);
@@ -245,57 +247,45 @@ static const struct drm_framebuffer_funcs nouveau_framebuffer_funcs = {
};
int
-nouveau_framebuffer_init(struct drm_device *dev,
- struct nouveau_framebuffer *nv_fb,
- const struct drm_mode_fb_cmd2 *mode_cmd,
- struct nouveau_bo *nvbo)
+nouveau_framebuffer_new(struct drm_device *dev,
+ const struct drm_mode_fb_cmd2 *mode_cmd,
+ struct nouveau_bo *nvbo,
+ struct nouveau_framebuffer **pfb)
{
- struct nouveau_display *disp = nouveau_display(dev);
- struct drm_framebuffer *fb = &nv_fb->base;
+ struct nouveau_framebuffer *fb;
int ret;
- drm_helper_mode_fill_fb_struct(fb, mode_cmd);
- nv_fb->nvbo = nvbo;
-
- ret = drm_framebuffer_init(dev, fb, &nouveau_framebuffer_funcs);
- if (ret)
- return ret;
+ if (!(fb = *pfb = kzalloc(sizeof(*fb), GFP_KERNEL)))
+ return -ENOMEM;
- if (disp->fb_ctor) {
- ret = disp->fb_ctor(fb);
- if (ret)
- disp->fb_dtor(fb);
- }
+ drm_helper_mode_fill_fb_struct(&fb->base, mode_cmd);
+ fb->nvbo = nvbo;
+ ret = drm_framebuffer_init(dev, &fb->base, &nouveau_framebuffer_funcs);
+ if (ret)
+ kfree(fb);
return ret;
}
-static struct drm_framebuffer *
+struct drm_framebuffer *
nouveau_user_framebuffer_create(struct drm_device *dev,
struct drm_file *file_priv,
const struct drm_mode_fb_cmd2 *mode_cmd)
{
- struct nouveau_framebuffer *nouveau_fb;
+ struct nouveau_framebuffer *fb;
+ struct nouveau_bo *nvbo;
struct drm_gem_object *gem;
- int ret = -ENOMEM;
+ int ret;
gem = drm_gem_object_lookup(file_priv, mode_cmd->handles[0]);
if (!gem)
return ERR_PTR(-ENOENT);
+ nvbo = nouveau_gem_object(gem);
- nouveau_fb = kzalloc(sizeof(struct nouveau_framebuffer), GFP_KERNEL);
- if (!nouveau_fb)
- goto err_unref;
-
- ret = nouveau_framebuffer_init(dev, nouveau_fb, mode_cmd, nouveau_gem_object(gem));
- if (ret)
- goto err;
-
- return &nouveau_fb->base;
+ ret = nouveau_framebuffer_new(dev, mode_cmd, nvbo, &fb);
+ if (ret == 0)
+ return &fb->base;
-err:
- kfree(nouveau_fb);
-err_unref:
drm_gem_object_unreference_unlocked(gem);
return ERR_PTR(ret);
}
@@ -385,16 +375,19 @@ nouveau_display_init(struct drm_device *dev)
}
void
-nouveau_display_fini(struct drm_device *dev)
+nouveau_display_fini(struct drm_device *dev, bool suspend)
{
struct nouveau_display *disp = nouveau_display(dev);
struct nouveau_drm *drm = nouveau_drm(dev);
struct drm_connector *connector;
- int head;
+ struct drm_crtc *crtc;
+
+ if (!suspend)
+ drm_crtc_force_disable_all(dev);
/* Make sure that drm and hw vblank irqs get properly disabled. */
- for (head = 0; head < dev->mode_config.num_crtc; head++)
- drm_vblank_off(dev, head);
+ drm_for_each_crtc(crtc, dev)
+ drm_crtc_vblank_off(crtc);
/* disable flip completion events */
nvif_notify_put(&drm->flip);
@@ -530,6 +523,8 @@ nouveau_display_create(struct drm_device *dev)
if (ret)
goto disp_create_err;
+ drm_mode_config_reset(dev);
+
if (dev->mode_config.num_crtc) {
ret = nouveau_display_vblank_init(dev);
if (ret)
@@ -556,7 +551,6 @@ nouveau_display_destroy(struct drm_device *dev)
nouveau_display_vblank_fini(dev);
drm_kms_helper_poll_fini(dev);
- drm_crtc_force_disable_all(dev);
drm_mode_config_cleanup(dev);
if (disp->dtor)
@@ -568,12 +562,138 @@ nouveau_display_destroy(struct drm_device *dev)
kfree(disp);
}
+static int
+nouveau_atomic_disable_connector(struct drm_atomic_state *state,
+ struct drm_connector *connector)
+{
+ struct drm_connector_state *connector_state;
+ struct drm_crtc *crtc;
+ struct drm_crtc_state *crtc_state;
+ struct drm_plane_state *plane_state;
+ struct drm_plane *plane;
+ int ret;
+
+ if (!(crtc = connector->state->crtc))
+ return 0;
+
+ connector_state = drm_atomic_get_connector_state(state, connector);
+ if (IS_ERR(connector_state))
+ return PTR_ERR(connector_state);
+
+ ret = drm_atomic_set_crtc_for_connector(connector_state, NULL);
+ if (ret)
+ return ret;
+
+ crtc_state = drm_atomic_get_crtc_state(state, crtc);
+ if (IS_ERR(crtc_state))
+ return PTR_ERR(crtc_state);
+
+ ret = drm_atomic_set_mode_for_crtc(crtc_state, NULL);
+ if (ret)
+ return ret;
+
+ crtc_state->active = false;
+
+ drm_for_each_plane_mask(plane, connector->dev, crtc_state->plane_mask) {
+ plane_state = drm_atomic_get_plane_state(state, plane);
+ if (IS_ERR(plane_state))
+ return PTR_ERR(plane_state);
+
+ ret = drm_atomic_set_crtc_for_plane(plane_state, NULL);
+ if (ret)
+ return ret;
+
+ drm_atomic_set_fb_for_plane(plane_state, NULL);
+ }
+
+ return 0;
+}
+
+static int
+nouveau_atomic_disable(struct drm_device *dev,
+ struct drm_modeset_acquire_ctx *ctx)
+{
+ struct drm_atomic_state *state;
+ struct drm_connector *connector;
+ int ret;
+
+ state = drm_atomic_state_alloc(dev);
+ if (!state)
+ return -ENOMEM;
+
+ state->acquire_ctx = ctx;
+
+ drm_for_each_connector(connector, dev) {
+ ret = nouveau_atomic_disable_connector(state, connector);
+ if (ret)
+ break;
+ }
+
+ if (ret == 0)
+ ret = drm_atomic_commit(state);
+ drm_atomic_state_put(state);
+ return ret;
+}
+
+static struct drm_atomic_state *
+nouveau_atomic_suspend(struct drm_device *dev)
+{
+ struct drm_modeset_acquire_ctx ctx;
+ struct drm_atomic_state *state;
+ int ret;
+
+ drm_modeset_acquire_init(&ctx, 0);
+
+retry:
+ ret = drm_modeset_lock_all_ctx(dev, &ctx);
+ if (ret < 0) {
+ state = ERR_PTR(ret);
+ goto unlock;
+ }
+
+ state = drm_atomic_helper_duplicate_state(dev, &ctx);
+ if (IS_ERR(state))
+ goto unlock;
+
+ ret = nouveau_atomic_disable(dev, &ctx);
+ if (ret < 0) {
+ drm_atomic_state_put(state);
+ state = ERR_PTR(ret);
+ goto unlock;
+ }
+
+unlock:
+ if (PTR_ERR(state) == -EDEADLK) {
+ drm_modeset_backoff(&ctx);
+ goto retry;
+ }
+
+ drm_modeset_drop_locks(&ctx);
+ drm_modeset_acquire_fini(&ctx);
+ return state;
+}
+
int
nouveau_display_suspend(struct drm_device *dev, bool runtime)
{
+ struct nouveau_display *disp = nouveau_display(dev);
struct drm_crtc *crtc;
- nouveau_display_fini(dev);
+ if (dev->mode_config.funcs->atomic_commit) {
+ if (!runtime) {
+ disp->suspend = nouveau_atomic_suspend(dev);
+ if (IS_ERR(disp->suspend)) {
+ int ret = PTR_ERR(disp->suspend);
+ disp->suspend = NULL;
+ return ret;
+ }
+ }
+
+ nouveau_display_fini(dev, true);
+ return 0;
+ }
+
+ nouveau_display_fini(dev, true);
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
struct nouveau_framebuffer *nouveau_fb;
@@ -600,9 +720,19 @@ nouveau_display_suspend(struct drm_device *dev, bool runtime)
void
nouveau_display_resume(struct drm_device *dev, bool runtime)
{
+ struct nouveau_display *disp = nouveau_display(dev);
struct nouveau_drm *drm = nouveau_drm(dev);
struct drm_crtc *crtc;
- int ret, head;
+ int ret;
+
+ if (dev->mode_config.funcs->atomic_commit) {
+ nouveau_display_init(dev);
+ if (disp->suspend) {
+ drm_atomic_helper_resume(dev, disp->suspend);
+ disp->suspend = NULL;
+ }
+ return;
+ }
/* re-pin fb/cursors */
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
@@ -647,10 +777,6 @@ nouveau_display_resume(struct drm_device *dev, bool runtime)
drm_helper_resume_force_mode(dev);
- /* Make sure that drm and hw vblank irqs get resumed if needed. */
- for (head = 0; head < dev->mode_config.num_crtc; head++)
- drm_vblank_on(dev, head);
-
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
@@ -692,10 +818,7 @@ nouveau_page_flip_emit(struct nouveau_channel *chan,
if (ret)
goto fail;
- if (drm->device.info.family < NV_DEVICE_INFO_V0_FERMI)
- BEGIN_NV04(chan, NvSubSw, NV_SW_PAGE_FLIP, 1);
- else
- BEGIN_NVC0(chan, FermiSw, NV_SW_PAGE_FLIP, 1);
+ BEGIN_NV04(chan, NvSubSw, NV_SW_PAGE_FLIP, 1);
OUT_RING (chan, 0x00000000);
FIRE_RING (chan);
@@ -724,6 +847,8 @@ nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb,
struct nouveau_channel *chan;
struct nouveau_cli *cli;
struct nouveau_fence *fence;
+ struct nv04_display *dispnv04 = nv04_display(dev);
+ int head = nouveau_crtc(crtc)->index;
int ret;
chan = drm->channel;
@@ -770,32 +895,23 @@ nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb,
drm_crtc_vblank_get(crtc);
/* Emit a page flip */
- if (drm->device.info.family >= NV_DEVICE_INFO_V0_TESLA) {
- ret = nv50_display_flip_next(crtc, fb, chan, swap_interval);
+ if (swap_interval) {
+ ret = RING_SPACE(chan, 8);
if (ret)
goto fail_unreserve;
- } else {
- struct nv04_display *dispnv04 = nv04_display(dev);
- int head = nouveau_crtc(crtc)->index;
-
- if (swap_interval) {
- ret = RING_SPACE(chan, 8);
- if (ret)
- goto fail_unreserve;
-
- BEGIN_NV04(chan, NvSubImageBlit, 0x012c, 1);
- OUT_RING (chan, 0);
- BEGIN_NV04(chan, NvSubImageBlit, 0x0134, 1);
- OUT_RING (chan, head);
- BEGIN_NV04(chan, NvSubImageBlit, 0x0100, 1);
- OUT_RING (chan, 0);
- BEGIN_NV04(chan, NvSubImageBlit, 0x0130, 1);
- OUT_RING (chan, 0);
- }
- nouveau_bo_ref(new_bo, &dispnv04->image[head]);
+ BEGIN_NV04(chan, NvSubImageBlit, 0x012c, 1);
+ OUT_RING (chan, 0);
+ BEGIN_NV04(chan, NvSubImageBlit, 0x0134, 1);
+ OUT_RING (chan, head);
+ BEGIN_NV04(chan, NvSubImageBlit, 0x0100, 1);
+ OUT_RING (chan, 0);
+ BEGIN_NV04(chan, NvSubImageBlit, 0x0130, 1);
+ OUT_RING (chan, 0);
}
+ nouveau_bo_ref(new_bo, &dispnv04->image[head]);
+
ret = nouveau_page_flip_emit(chan, old_bo, new_bo, s, &fence);
if (ret)
goto fail_unreserve;
@@ -843,16 +959,8 @@ nouveau_finish_page_flip(struct nouveau_channel *chan,
s = list_first_entry(&fctx->flip, struct nouveau_page_flip_state, head);
if (s->event) {
- if (drm->device.info.family < NV_DEVICE_INFO_V0_TESLA) {
- drm_crtc_arm_vblank_event(s->crtc, s->event);
- } else {
- drm_crtc_send_vblank_event(s->crtc, s->event);
-
- /* Give up ownership of vblank for page-flipped crtc */
- drm_crtc_vblank_put(s->crtc);
- }
- }
- else {
+ drm_crtc_arm_vblank_event(s->crtc, s->event);
+ } else {
/* Give up ownership of vblank for page-flipped crtc */
drm_crtc_vblank_put(s->crtc);
}
@@ -874,12 +982,10 @@ nouveau_flip_complete(struct nvif_notify *notify)
struct nouveau_page_flip_state state;
if (!nouveau_finish_page_flip(chan, &state)) {
- if (drm->device.info.family < NV_DEVICE_INFO_V0_TESLA) {
- nv_set_crtc_base(drm->dev, drm_crtc_index(state.crtc),
- state.offset + state.crtc->y *
- state.pitch + state.crtc->x *
- state.bpp / 8);
- }
+ nv_set_crtc_base(drm->dev, drm_crtc_index(state.crtc),
+ state.offset + state.crtc->y *
+ state.pitch + state.crtc->x *
+ state.bpp / 8);
}
return NVIF_NOTIFY_KEEP;
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.h b/drivers/gpu/drm/nouveau/nouveau_display.h
index 0420ee861ea4..330fe0fc5c11 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.h
+++ b/drivers/gpu/drm/nouveau/nouveau_display.h
@@ -22,8 +22,9 @@ nouveau_framebuffer(struct drm_framebuffer *fb)
return container_of(fb, struct nouveau_framebuffer, base);
}
-int nouveau_framebuffer_init(struct drm_device *, struct nouveau_framebuffer *,
- const struct drm_mode_fb_cmd2 *, struct nouveau_bo *);
+int nouveau_framebuffer_new(struct drm_device *,
+ const struct drm_mode_fb_cmd2 *,
+ struct nouveau_bo *, struct nouveau_framebuffer **);
struct nouveau_page_flip_state {
struct list_head head;
@@ -39,9 +40,6 @@ struct nouveau_display {
int (*init)(struct drm_device *);
void (*fini)(struct drm_device *);
- int (*fb_ctor)(struct drm_framebuffer *);
- void (*fb_dtor)(struct drm_framebuffer *);
-
struct nvif_object disp;
struct drm_property *dithering_mode;
@@ -52,6 +50,8 @@ struct nouveau_display {
/* not really hue and saturation: */
struct drm_property *vibrant_hue_property;
struct drm_property *color_vibrance_property;
+
+ struct drm_atomic_state *suspend;
};
static inline struct nouveau_display *
@@ -63,7 +63,7 @@ nouveau_display(struct drm_device *dev)
int nouveau_display_create(struct drm_device *dev);
void nouveau_display_destroy(struct drm_device *dev);
int nouveau_display_init(struct drm_device *dev);
-void nouveau_display_fini(struct drm_device *dev);
+void nouveau_display_fini(struct drm_device *dev, bool suspend);
int nouveau_display_suspend(struct drm_device *dev, bool runtime);
void nouveau_display_resume(struct drm_device *dev, bool runtime);
int nouveau_display_vblank_enable(struct drm_device *, unsigned int);
@@ -103,4 +103,7 @@ nouveau_backlight_exit(struct drm_device *dev) {
}
#endif
+struct drm_framebuffer *
+nouveau_user_framebuffer_create(struct drm_device *, struct drm_file *,
+ const struct drm_mode_fb_cmd2 *);
#endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_dp.c b/drivers/gpu/drm/nouveau/nouveau_dp.c
index 87d52d36f4fc..0d052e1660f8 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dp.c
+++ b/drivers/gpu/drm/nouveau/nouveau_dp.c
@@ -30,6 +30,13 @@
#include "nouveau_encoder.h"
#include "nouveau_crtc.h"
+#include <nvif/class.h>
+#include <nvif/cl5070.h>
+
+MODULE_PARM_DESC(mst, "Enable DisplayPort multi-stream (default: enabled)");
+static int nouveau_mst = 1;
+module_param_named(mst, nouveau_mst, int, 0400);
+
static void
nouveau_dp_probe_oui(struct drm_device *dev, struct nvkm_i2c_aux *aux, u8 *dpcd)
{
@@ -55,14 +62,14 @@ nouveau_dp_detect(struct nouveau_encoder *nv_encoder)
struct drm_device *dev = nv_encoder->base.base.dev;
struct nouveau_drm *drm = nouveau_drm(dev);
struct nvkm_i2c_aux *aux;
- u8 *dpcd = nv_encoder->dp.dpcd;
+ u8 dpcd[8];
int ret;
aux = nv_encoder->aux;
if (!aux)
return -ENODEV;
- ret = nvkm_rdaux(aux, DP_DPCD_REV, dpcd, 8);
+ ret = nvkm_rdaux(aux, DP_DPCD_REV, dpcd, sizeof(dpcd));
if (ret)
return ret;
@@ -84,5 +91,11 @@ nouveau_dp_detect(struct nouveau_encoder *nv_encoder)
nv_encoder->dp.link_nr, nv_encoder->dp.link_bw);
nouveau_dp_probe_oui(dev, aux, dpcd);
- return 0;
+
+ ret = nv50_mstm_detect(nv_encoder->dp.mstm, dpcd, nouveau_mst);
+ if (ret == 1)
+ return NOUVEAU_DP_MST;
+ if (ret == 0)
+ return NOUVEAU_DP_SST;
+ return ret;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c
index 6adf94789417..9876e6fcfcf0 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drm.c
@@ -519,7 +519,7 @@ nouveau_drm_unload(struct drm_device *dev)
nouveau_debugfs_fini(drm);
if (dev->mode_config.num_crtc)
- nouveau_display_fini(dev);
+ nouveau_display_fini(dev, false);
nouveau_display_destroy(dev);
nouveau_bios_takedown(dev);
@@ -1037,6 +1037,7 @@ static void nouveau_display_options(void)
DRM_DEBUG_DRIVER("... modeset : %d\n", nouveau_modeset);
DRM_DEBUG_DRIVER("... runpm : %d\n", nouveau_runtime_pm);
DRM_DEBUG_DRIVER("... vram_pushbuf : %d\n", nouveau_vram_pushbuf);
+ DRM_DEBUG_DRIVER("... hdmimhz : %d\n", nouveau_hdmimhz);
}
static const struct dev_pm_ops nouveau_pm_ops = {
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h
index c0e2b3207503..4cd47bae73c7 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drv.h
+++ b/drivers/gpu/drm/nouveau/nouveau_drv.h
@@ -204,6 +204,10 @@ void nouveau_drm_device_remove(struct drm_device *dev);
if (unlikely(drm_debug & DRM_UT_DRIVER)) \
NV_PRINTK(info, &(drm)->client, f, ##a); \
} while(0)
+#define NV_ATOMIC(drm,f,a...) do { \
+ if (unlikely(drm_debug & DRM_UT_ATOMIC)) \
+ NV_PRINTK(info, &(drm)->client, f, ##a); \
+} while(0)
extern int nouveau_modeset;
diff --git a/drivers/gpu/drm/nouveau/nouveau_encoder.h b/drivers/gpu/drm/nouveau/nouveau_encoder.h
index ee6a6d3fc80f..198e5f27682f 100644
--- a/drivers/gpu/drm/nouveau/nouveau_encoder.h
+++ b/drivers/gpu/drm/nouveau/nouveau_encoder.h
@@ -30,6 +30,7 @@
#include <subdev/bios/dcb.h>
#include <drm/drm_encoder_slave.h>
+#include <drm/drm_dp_mst_helper.h>
#include "dispnv04/disp.h"
#define NV_DPMS_CLEARED 0x80
@@ -57,15 +58,16 @@ struct nouveau_encoder {
union {
struct {
- u8 dpcd[8];
+ struct nv50_mstm *mstm;
int link_nr;
int link_bw;
- u32 datarate;
} dp;
};
void (*enc_save)(struct drm_encoder *encoder);
void (*enc_restore)(struct drm_encoder *encoder);
+ void (*update)(struct nouveau_encoder *, u8 head,
+ struct drm_display_mode *, u8 proto, u8 depth);
};
struct nouveau_encoder *
@@ -90,9 +92,17 @@ get_slave_funcs(struct drm_encoder *enc)
}
/* nouveau_dp.c */
+enum nouveau_dp_status {
+ NOUVEAU_DP_SST,
+ NOUVEAU_DP_MST,
+};
+
int nouveau_dp_detect(struct nouveau_encoder *);
struct nouveau_connector *
nouveau_encoder_connector_get(struct nouveau_encoder *encoder);
+int nv50_mstm_detect(struct nv50_mstm *, u8 dpcd[8], int allow);
+void nv50_mstm_remove(struct nv50_mstm *);
+void nv50_mstm_service(struct nv50_mstm *);
#endif /* __NOUVEAU_ENCODER_H__ */
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
index 9f5692726c16..2f2a3dcd4ad7 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
@@ -58,7 +58,7 @@ static void
nouveau_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
{
struct nouveau_fbdev *fbcon = info->par;
- struct nouveau_drm *drm = nouveau_drm(fbcon->dev);
+ struct nouveau_drm *drm = nouveau_drm(fbcon->helper.dev);
struct nvif_device *device = &drm->device;
int ret;
@@ -90,7 +90,7 @@ static void
nouveau_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *image)
{
struct nouveau_fbdev *fbcon = info->par;
- struct nouveau_drm *drm = nouveau_drm(fbcon->dev);
+ struct nouveau_drm *drm = nouveau_drm(fbcon->helper.dev);
struct nvif_device *device = &drm->device;
int ret;
@@ -122,7 +122,7 @@ static void
nouveau_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
{
struct nouveau_fbdev *fbcon = info->par;
- struct nouveau_drm *drm = nouveau_drm(fbcon->dev);
+ struct nouveau_drm *drm = nouveau_drm(fbcon->helper.dev);
struct nvif_device *device = &drm->device;
int ret;
@@ -154,7 +154,7 @@ static int
nouveau_fbcon_sync(struct fb_info *info)
{
struct nouveau_fbdev *fbcon = info->par;
- struct nouveau_drm *drm = nouveau_drm(fbcon->dev);
+ struct nouveau_drm *drm = nouveau_drm(fbcon->helper.dev);
struct nouveau_channel *chan = drm->channel;
int ret;
@@ -181,7 +181,7 @@ static int
nouveau_fbcon_open(struct fb_info *info, int user)
{
struct nouveau_fbdev *fbcon = info->par;
- struct nouveau_drm *drm = nouveau_drm(fbcon->dev);
+ struct nouveau_drm *drm = nouveau_drm(fbcon->helper.dev);
int ret = pm_runtime_get_sync(drm->dev->dev);
if (ret < 0 && ret != -EACCES)
return ret;
@@ -192,42 +192,30 @@ static int
nouveau_fbcon_release(struct fb_info *info, int user)
{
struct nouveau_fbdev *fbcon = info->par;
- struct nouveau_drm *drm = nouveau_drm(fbcon->dev);
+ struct nouveau_drm *drm = nouveau_drm(fbcon->helper.dev);
pm_runtime_put(drm->dev->dev);
return 0;
}
static struct fb_ops nouveau_fbcon_ops = {
.owner = THIS_MODULE,
+ DRM_FB_HELPER_DEFAULT_OPS,
.fb_open = nouveau_fbcon_open,
.fb_release = nouveau_fbcon_release,
- .fb_check_var = drm_fb_helper_check_var,
- .fb_set_par = drm_fb_helper_set_par,
.fb_fillrect = nouveau_fbcon_fillrect,
.fb_copyarea = nouveau_fbcon_copyarea,
.fb_imageblit = nouveau_fbcon_imageblit,
.fb_sync = nouveau_fbcon_sync,
- .fb_pan_display = drm_fb_helper_pan_display,
- .fb_blank = drm_fb_helper_blank,
- .fb_setcmap = drm_fb_helper_setcmap,
- .fb_debug_enter = drm_fb_helper_debug_enter,
- .fb_debug_leave = drm_fb_helper_debug_leave,
};
static struct fb_ops nouveau_fbcon_sw_ops = {
.owner = THIS_MODULE,
+ DRM_FB_HELPER_DEFAULT_OPS,
.fb_open = nouveau_fbcon_open,
.fb_release = nouveau_fbcon_release,
- .fb_check_var = drm_fb_helper_check_var,
- .fb_set_par = drm_fb_helper_set_par,
.fb_fillrect = drm_fb_helper_cfb_fillrect,
.fb_copyarea = drm_fb_helper_cfb_copyarea,
.fb_imageblit = drm_fb_helper_cfb_imageblit,
- .fb_pan_display = drm_fb_helper_pan_display,
- .fb_blank = drm_fb_helper_blank,
- .fb_setcmap = drm_fb_helper_setcmap,
- .fb_debug_enter = drm_fb_helper_debug_enter,
- .fb_debug_leave = drm_fb_helper_debug_leave,
};
void
@@ -333,16 +321,15 @@ nouveau_fbcon_create(struct drm_fb_helper *helper,
{
struct nouveau_fbdev *fbcon =
container_of(helper, struct nouveau_fbdev, helper);
- struct drm_device *dev = fbcon->dev;
+ struct drm_device *dev = fbcon->helper.dev;
struct nouveau_drm *drm = nouveau_drm(dev);
struct nvif_device *device = &drm->device;
struct fb_info *info;
- struct drm_framebuffer *fb;
- struct nouveau_framebuffer *nouveau_fb;
+ struct nouveau_framebuffer *fb;
struct nouveau_channel *chan;
struct nouveau_bo *nvbo;
struct drm_mode_fb_cmd2 mode_cmd;
- int size, ret;
+ int ret;
mode_cmd.width = sizes->surface_width;
mode_cmd.height = sizes->surface_height;
@@ -353,16 +340,17 @@ nouveau_fbcon_create(struct drm_fb_helper *helper,
mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp,
sizes->surface_depth);
- size = mode_cmd.pitches[0] * mode_cmd.height;
- size = roundup(size, PAGE_SIZE);
-
- ret = nouveau_gem_new(dev, size, 0, NOUVEAU_GEM_DOMAIN_VRAM,
- 0, 0x0000, &nvbo);
+ ret = nouveau_gem_new(dev, mode_cmd.pitches[0] * mode_cmd.height,
+ 0, NOUVEAU_GEM_DOMAIN_VRAM, 0, 0x0000, &nvbo);
if (ret) {
NV_ERROR(drm, "failed to allocate framebuffer\n");
goto out;
}
+ ret = nouveau_framebuffer_new(dev, &mode_cmd, nvbo, &fb);
+ if (ret)
+ goto out_unref;
+
ret = nouveau_bo_pin(nvbo, TTM_PL_FLAG_VRAM, false);
if (ret) {
NV_ERROR(drm, "failed to pin fb: %d\n", ret);
@@ -377,8 +365,7 @@ nouveau_fbcon_create(struct drm_fb_helper *helper,
chan = nouveau_nofbaccel ? NULL : drm->channel;
if (chan && device->info.family >= NV_DEVICE_INFO_V0_TESLA) {
- ret = nouveau_bo_vma_add(nvbo, drm->client.vm,
- &fbcon->nouveau_fb.vma);
+ ret = nouveau_bo_vma_add(nvbo, drm->client.vm, &fb->vma);
if (ret) {
NV_ERROR(drm, "failed to map fb into chan: %d\n", ret);
chan = NULL;
@@ -394,13 +381,8 @@ nouveau_fbcon_create(struct drm_fb_helper *helper,
info->par = fbcon;
- nouveau_framebuffer_init(dev, &fbcon->nouveau_fb, &mode_cmd, nvbo);
-
- nouveau_fb = &fbcon->nouveau_fb;
- fb = &nouveau_fb->base;
-
/* setup helper */
- fbcon->helper.fb = fb;
+ fbcon->helper.fb = &fb->base;
strcpy(info->fix.id, "nouveaufb");
if (!chan)
@@ -411,14 +393,14 @@ nouveau_fbcon_create(struct drm_fb_helper *helper,
FBINFO_HWACCEL_IMAGEBLIT;
info->flags |= FBINFO_CAN_FORCE_OUTPUT;
info->fbops = &nouveau_fbcon_sw_ops;
- info->fix.smem_start = nvbo->bo.mem.bus.base +
- nvbo->bo.mem.bus.offset;
- info->fix.smem_len = size;
+ info->fix.smem_start = fb->nvbo->bo.mem.bus.base +
+ fb->nvbo->bo.mem.bus.offset;
+ info->fix.smem_len = fb->nvbo->bo.mem.num_pages << PAGE_SHIFT;
- info->screen_base = nvbo_kmap_obj_iovirtual(nouveau_fb->nvbo);
- info->screen_size = size;
+ info->screen_base = nvbo_kmap_obj_iovirtual(fb->nvbo);
+ info->screen_size = fb->nvbo->bo.mem.num_pages << PAGE_SHIFT;
- drm_fb_helper_fill_fix(info, fb->pitches[0], fb->depth);
+ drm_fb_helper_fill_fix(info, fb->base.pitches[0], fb->base.depth);
drm_fb_helper_fill_var(info, &fbcon->helper, sizes->fb_width, sizes->fb_height);
/* Use default scratch pixmap (info->pixmap.flags = FB_PIXMAP_SYSTEM) */
@@ -429,20 +411,19 @@ nouveau_fbcon_create(struct drm_fb_helper *helper,
/* To allow resizeing without swapping buffers */
NV_INFO(drm, "allocated %dx%d fb: 0x%llx, bo %p\n",
- nouveau_fb->base.width, nouveau_fb->base.height,
- nvbo->bo.offset, nvbo);
+ fb->base.width, fb->base.height, fb->nvbo->bo.offset, nvbo);
vga_switcheroo_client_fb_set(dev->pdev, info);
return 0;
out_unlock:
if (chan)
- nouveau_bo_vma_del(nvbo, &fbcon->nouveau_fb.vma);
- nouveau_bo_unmap(nvbo);
+ nouveau_bo_vma_del(fb->nvbo, &fb->vma);
+ nouveau_bo_unmap(fb->nvbo);
out_unpin:
- nouveau_bo_unpin(nvbo);
+ nouveau_bo_unpin(fb->nvbo);
out_unref:
- nouveau_bo_ref(NULL, &nvbo);
+ nouveau_bo_ref(NULL, &fb->nvbo);
out:
return ret;
}
@@ -458,28 +439,26 @@ nouveau_fbcon_output_poll_changed(struct drm_device *dev)
static int
nouveau_fbcon_destroy(struct drm_device *dev, struct nouveau_fbdev *fbcon)
{
- struct nouveau_framebuffer *nouveau_fb = &fbcon->nouveau_fb;
+ struct nouveau_framebuffer *nouveau_fb = nouveau_framebuffer(fbcon->helper.fb);
drm_fb_helper_unregister_fbi(&fbcon->helper);
drm_fb_helper_release_fbi(&fbcon->helper);
+ drm_fb_helper_fini(&fbcon->helper);
if (nouveau_fb->nvbo) {
- nouveau_bo_unmap(nouveau_fb->nvbo);
nouveau_bo_vma_del(nouveau_fb->nvbo, &nouveau_fb->vma);
+ nouveau_bo_unmap(nouveau_fb->nvbo);
nouveau_bo_unpin(nouveau_fb->nvbo);
- drm_gem_object_unreference_unlocked(&nouveau_fb->nvbo->gem);
- nouveau_fb->nvbo = NULL;
+ drm_framebuffer_unreference(&nouveau_fb->base);
}
- drm_fb_helper_fini(&fbcon->helper);
- drm_framebuffer_unregister_private(&nouveau_fb->base);
- drm_framebuffer_cleanup(&nouveau_fb->base);
+
return 0;
}
void nouveau_fbcon_gpu_lockup(struct fb_info *info)
{
struct nouveau_fbdev *fbcon = info->par;
- struct nouveau_drm *drm = nouveau_drm(fbcon->dev);
+ struct nouveau_drm *drm = nouveau_drm(fbcon->helper.dev);
NV_ERROR(drm, "GPU lockup - switching to software fbcon\n");
info->flags |= FBINFO_HWACCEL_DISABLED;
@@ -522,7 +501,6 @@ nouveau_fbcon_init(struct drm_device *dev)
if (!fbcon)
return -ENOMEM;
- fbcon->dev = dev;
drm->fbcon = fbcon;
drm_fb_helper_prepare(dev, &fbcon->helper, &nouveau_fbcon_helper_funcs);
@@ -545,7 +523,8 @@ nouveau_fbcon_init(struct drm_device *dev)
preferred_bpp = 32;
/* disable all the possible outputs/crtcs before entering KMS mode */
- drm_helper_disable_unused_functions(dev);
+ if (!dev->mode_config.funcs->atomic_commit)
+ drm_helper_disable_unused_functions(dev);
ret = drm_fb_helper_initial_config(&fbcon->helper, preferred_bpp);
if (ret)
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.h b/drivers/gpu/drm/nouveau/nouveau_fbcon.h
index ca77ad001978..e2bca729721e 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fbcon.h
+++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.h
@@ -33,8 +33,6 @@
struct nouveau_fbdev {
struct drm_fb_helper helper;
- struct nouveau_framebuffer nouveau_fb;
- struct drm_device *dev;
unsigned int saved_flags;
struct nvif_object surf2d;
struct nvif_object clip;
diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c
index e9529ee6bc23..f2f348f0160c 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fence.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fence.c
@@ -586,5 +586,5 @@ static const struct dma_fence_ops nouveau_fence_ops_uevent = {
.enable_signaling = nouveau_fence_enable_signaling,
.signaled = nouveau_fence_is_signaled,
.wait = dma_fence_default_wait,
- .release = NULL
+ .release = nouveau_fence_release
};
diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.h b/drivers/gpu/drm/nouveau/nouveau_fence.h
index 41f3c019e534..ccdce1b4eec4 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fence.h
+++ b/drivers/gpu/drm/nouveau/nouveau_fence.h
@@ -92,7 +92,6 @@ struct nv84_fence_chan {
struct nouveau_fence_chan base;
struct nvkm_vma vma;
struct nvkm_vma vma_gart;
- struct nvkm_vma dispc_vma[4];
};
struct nv84_fence_priv {
@@ -102,7 +101,6 @@ struct nv84_fence_priv {
u32 *suspend;
};
-u64 nv84_fence_crtc(struct nouveau_channel *, int);
int nv84_fence_context_new(struct nouveau_channel *);
#endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c
index 7f083c95f422..201b52b750dd 100644
--- a/drivers/gpu/drm/nouveau/nouveau_gem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
@@ -369,7 +369,7 @@ validate_init(struct nouveau_channel *chan, struct drm_file *file_priv,
{
struct nouveau_cli *cli = nouveau_cli(file_priv);
int trycnt = 0;
- int ret, i;
+ int ret = -EINVAL, i;
struct nouveau_bo *res_bo = NULL;
LIST_HEAD(gart_list);
LIST_HEAD(vram_list);
diff --git a/drivers/gpu/drm/nouveau/nouveau_ttm.c b/drivers/gpu/drm/nouveau/nouveau_ttm.c
index 1825dbc33192..a6dbe8258040 100644
--- a/drivers/gpu/drm/nouveau/nouveau_ttm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_ttm.c
@@ -398,6 +398,9 @@ nouveau_ttm_init(struct nouveau_drm *drm)
/* VRAM init */
drm->gem.vram_available = drm->device.info.ram_user;
+ arch_io_reserve_memtype_wc(device->func->resource_addr(device, 1),
+ device->func->resource_size(device, 1));
+
ret = ttm_bo_init_mm(&drm->ttm.bdev, TTM_PL_VRAM,
drm->gem.vram_available >> PAGE_SHIFT);
if (ret) {
@@ -430,6 +433,8 @@ nouveau_ttm_init(struct nouveau_drm *drm)
void
nouveau_ttm_fini(struct nouveau_drm *drm)
{
+ struct nvkm_device *device = nvxx_device(&drm->device);
+
ttm_bo_clean_mm(&drm->ttm.bdev, TTM_PL_VRAM);
ttm_bo_clean_mm(&drm->ttm.bdev, TTM_PL_TT);
@@ -439,4 +444,7 @@ nouveau_ttm_fini(struct nouveau_drm *drm)
arch_phys_wc_del(drm->ttm.mtrr);
drm->ttm.mtrr = 0;
+ arch_io_free_memtype_wc(device->func->resource_addr(device, 1),
+ device->func->resource_size(device, 1));
+
}
diff --git a/drivers/gpu/drm/nouveau/nv04_fbcon.c b/drivers/gpu/drm/nouveau/nv04_fbcon.c
index da8fd5ff9d0f..6a2b187e3c3b 100644
--- a/drivers/gpu/drm/nouveau/nv04_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nv04_fbcon.c
@@ -30,7 +30,7 @@ int
nv04_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region)
{
struct nouveau_fbdev *nfbdev = info->par;
- struct nouveau_drm *drm = nouveau_drm(nfbdev->dev);
+ struct nouveau_drm *drm = nouveau_drm(nfbdev->helper.dev);
struct nouveau_channel *chan = drm->channel;
int ret;
@@ -50,7 +50,7 @@ int
nv04_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
{
struct nouveau_fbdev *nfbdev = info->par;
- struct nouveau_drm *drm = nouveau_drm(nfbdev->dev);
+ struct nouveau_drm *drm = nouveau_drm(nfbdev->helper.dev);
struct nouveau_channel *chan = drm->channel;
int ret;
@@ -77,7 +77,7 @@ int
nv04_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
{
struct nouveau_fbdev *nfbdev = info->par;
- struct nouveau_drm *drm = nouveau_drm(nfbdev->dev);
+ struct nouveau_drm *drm = nouveau_drm(nfbdev->helper.dev);
struct nouveau_channel *chan = drm->channel;
uint32_t fg;
uint32_t bg;
@@ -133,7 +133,7 @@ int
nv04_fbcon_accel_init(struct fb_info *info)
{
struct nouveau_fbdev *nfbdev = info->par;
- struct drm_device *dev = nfbdev->dev;
+ struct drm_device *dev = nfbdev->helper.dev;
struct nouveau_drm *drm = nouveau_drm(dev);
struct nouveau_channel *chan = drm->channel;
struct nvif_device *device = &drm->device;
diff --git a/drivers/gpu/drm/nouveau/nv10_fence.c b/drivers/gpu/drm/nouveau/nv10_fence.c
index f99fcf56928a..2998bde29211 100644
--- a/drivers/gpu/drm/nouveau/nv10_fence.c
+++ b/drivers/gpu/drm/nouveau/nv10_fence.c
@@ -57,16 +57,13 @@ void
nv10_fence_context_del(struct nouveau_channel *chan)
{
struct nv10_fence_chan *fctx = chan->fence;
- int i;
nouveau_fence_context_del(&fctx->base);
- for (i = 0; i < ARRAY_SIZE(fctx->head); i++)
- nvif_object_fini(&fctx->head[i]);
nvif_object_fini(&fctx->sema);
chan->fence = NULL;
nouveau_fence_context_free(&fctx->base);
}
-int
+static int
nv10_fence_context_new(struct nouveau_channel *chan)
{
struct nv10_fence_chan *fctx;
diff --git a/drivers/gpu/drm/nouveau/nv10_fence.h b/drivers/gpu/drm/nouveau/nv10_fence.h
index a87259f3983a..b7a508585304 100644
--- a/drivers/gpu/drm/nouveau/nv10_fence.h
+++ b/drivers/gpu/drm/nouveau/nv10_fence.h
@@ -7,7 +7,6 @@
struct nv10_fence_chan {
struct nouveau_fence_chan base;
struct nvif_object sema;
- struct nvif_object head[4];
};
struct nv10_fence_priv {
diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c
index 7d0edcbcfca7..a9855a4ec532 100644
--- a/drivers/gpu/drm/nouveau/nv50_display.c
+++ b/drivers/gpu/drm/nouveau/nv50_display.c
@@ -25,10 +25,12 @@
#include <linux/dma-mapping.h>
#include <drm/drmP.h>
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc_helper.h>
-#include <drm/drm_plane_helper.h>
#include <drm/drm_dp_helper.h>
#include <drm/drm_fb_helper.h>
+#include <drm/drm_plane_helper.h>
#include <nvif/class.h>
#include <nvif/cl0002.h>
@@ -38,6 +40,7 @@
#include <nvif/cl507c.h>
#include <nvif/cl507d.h>
#include <nvif/cl507e.h>
+#include <nvif/event.h>
#include "nouveau_drv.h"
#include "nouveau_dma.h"
@@ -46,6 +49,7 @@
#include "nouveau_encoder.h"
#include "nouveau_crtc.h"
#include "nouveau_fence.h"
+#include "nouveau_fbcon.h"
#include "nv50_display.h"
#define EVO_DMA_NR 9
@@ -61,6 +65,227 @@
#define EVO_MAST_NTFY EVO_SYNC( 0, 0x00)
#define EVO_FLIP_SEM0(c) EVO_SYNC((c) + 1, 0x00)
#define EVO_FLIP_SEM1(c) EVO_SYNC((c) + 1, 0x10)
+#define EVO_FLIP_NTFY0(c) EVO_SYNC((c) + 1, 0x20)
+#define EVO_FLIP_NTFY1(c) EVO_SYNC((c) + 1, 0x30)
+
+/******************************************************************************
+ * Atomic state
+ *****************************************************************************/
+#define nv50_atom(p) container_of((p), struct nv50_atom, state)
+
+struct nv50_atom {
+ struct drm_atomic_state state;
+
+ struct list_head outp;
+ bool lock_core;
+ bool flush_disable;
+};
+
+struct nv50_outp_atom {
+ struct list_head head;
+
+ struct drm_encoder *encoder;
+ bool flush_disable;
+
+ union {
+ struct {
+ bool ctrl:1;
+ };
+ u8 mask;
+ } clr;
+
+ union {
+ struct {
+ bool ctrl:1;
+ };
+ u8 mask;
+ } set;
+};
+
+#define nv50_head_atom(p) container_of((p), struct nv50_head_atom, state)
+
+struct nv50_head_atom {
+ struct drm_crtc_state state;
+
+ struct {
+ u16 iW;
+ u16 iH;
+ u16 oW;
+ u16 oH;
+ } view;
+
+ struct nv50_head_mode {
+ bool interlace;
+ u32 clock;
+ struct {
+ u16 active;
+ u16 synce;
+ u16 blanke;
+ u16 blanks;
+ } h;
+ struct {
+ u32 active;
+ u16 synce;
+ u16 blanke;
+ u16 blanks;
+ u16 blank2s;
+ u16 blank2e;
+ u16 blankus;
+ } v;
+ } mode;
+
+ struct {
+ u32 handle;
+ u64 offset:40;
+ } lut;
+
+ struct {
+ bool visible;
+ u32 handle;
+ u64 offset:40;
+ u8 format;
+ u8 kind:7;
+ u8 layout:1;
+ u8 block:4;
+ u32 pitch:20;
+ u16 x;
+ u16 y;
+ u16 w;
+ u16 h;
+ } core;
+
+ struct {
+ bool visible;
+ u32 handle;
+ u64 offset:40;
+ u8 layout:1;
+ u8 format:1;
+ } curs;
+
+ struct {
+ u8 depth;
+ u8 cpp;
+ u16 x;
+ u16 y;
+ u16 w;
+ u16 h;
+ } base;
+
+ struct {
+ u8 cpp;
+ } ovly;
+
+ struct {
+ bool enable:1;
+ u8 bits:2;
+ u8 mode:4;
+ } dither;
+
+ struct {
+ struct {
+ u16 cos:12;
+ u16 sin:12;
+ } sat;
+ } procamp;
+
+ union {
+ struct {
+ bool core:1;
+ bool curs:1;
+ };
+ u8 mask;
+ } clr;
+
+ union {
+ struct {
+ bool core:1;
+ bool curs:1;
+ bool view:1;
+ bool mode:1;
+ bool base:1;
+ bool ovly:1;
+ bool dither:1;
+ bool procamp:1;
+ };
+ u16 mask;
+ } set;
+};
+
+static inline struct nv50_head_atom *
+nv50_head_atom_get(struct drm_atomic_state *state, struct drm_crtc *crtc)
+{
+ struct drm_crtc_state *statec = drm_atomic_get_crtc_state(state, crtc);
+ if (IS_ERR(statec))
+ return (void *)statec;
+ return nv50_head_atom(statec);
+}
+
+#define nv50_wndw_atom(p) container_of((p), struct nv50_wndw_atom, state)
+
+struct nv50_wndw_atom {
+ struct drm_plane_state state;
+ u8 interval;
+
+ struct drm_rect clip;
+
+ struct {
+ u32 handle;
+ u16 offset:12;
+ bool awaken:1;
+ } ntfy;
+
+ struct {
+ u32 handle;
+ u16 offset:12;
+ u32 acquire;
+ u32 release;
+ } sema;
+
+ struct {
+ u8 enable:2;
+ } lut;
+
+ struct {
+ u8 mode:2;
+ u8 interval:4;
+
+ u8 format;
+ u8 kind:7;
+ u8 layout:1;
+ u8 block:4;
+ u32 pitch:20;
+ u16 w;
+ u16 h;
+
+ u32 handle;
+ u64 offset;
+ } image;
+
+ struct {
+ u16 x;
+ u16 y;
+ } point;
+
+ union {
+ struct {
+ bool ntfy:1;
+ bool sema:1;
+ bool image:1;
+ };
+ u8 mask;
+ } clr;
+
+ union {
+ struct {
+ bool ntfy:1;
+ bool sema:1;
+ bool image:1;
+ bool lut:1;
+ bool point:1;
+ };
+ u8 mask;
+ } set;
+};
/******************************************************************************
* EVO channel
@@ -133,34 +358,6 @@ nv50_pioc_create(struct nvif_device *device, struct nvif_object *disp,
}
/******************************************************************************
- * Cursor Immediate
- *****************************************************************************/
-
-struct nv50_curs {
- struct nv50_pioc base;
-};
-
-static int
-nv50_curs_create(struct nvif_device *device, struct nvif_object *disp,
- int head, struct nv50_curs *curs)
-{
- struct nv50_disp_cursor_v0 args = {
- .head = head,
- };
- static const s32 oclass[] = {
- GK104_DISP_CURSOR,
- GF110_DISP_CURSOR,
- GT214_DISP_CURSOR,
- G82_DISP_CURSOR,
- NV50_DISP_CURSOR,
- 0
- };
-
- return nv50_pioc_create(device, disp, oclass, head, &args, sizeof(args),
- &curs->base);
-}
-
-/******************************************************************************
* Overlay Immediate
*****************************************************************************/
@@ -192,6 +389,11 @@ nv50_oimm_create(struct nvif_device *device, struct nvif_object *disp,
* DMA EVO channel
*****************************************************************************/
+struct nv50_dmac_ctxdma {
+ struct list_head head;
+ struct nvif_object object;
+};
+
struct nv50_dmac {
struct nv50_chan base;
dma_addr_t handle;
@@ -199,6 +401,7 @@ struct nv50_dmac {
struct nvif_object sync;
struct nvif_object vram;
+ struct list_head ctxdma;
/* Protects against concurrent pushbuf access to this channel, lock is
* grabbed by evo_wait (if the pushbuf reservation is successful) and
@@ -207,9 +410,82 @@ struct nv50_dmac {
};
static void
+nv50_dmac_ctxdma_del(struct nv50_dmac_ctxdma *ctxdma)
+{
+ nvif_object_fini(&ctxdma->object);
+ list_del(&ctxdma->head);
+ kfree(ctxdma);
+}
+
+static struct nv50_dmac_ctxdma *
+nv50_dmac_ctxdma_new(struct nv50_dmac *dmac, struct nouveau_framebuffer *fb)
+{
+ struct nouveau_drm *drm = nouveau_drm(fb->base.dev);
+ struct nv50_dmac_ctxdma *ctxdma;
+ const u8 kind = (fb->nvbo->tile_flags & 0x0000ff00) >> 8;
+ const u32 handle = 0xfb000000 | kind;
+ struct {
+ struct nv_dma_v0 base;
+ union {
+ struct nv50_dma_v0 nv50;
+ struct gf100_dma_v0 gf100;
+ struct gf119_dma_v0 gf119;
+ };
+ } args = {};
+ u32 argc = sizeof(args.base);
+ int ret;
+
+ list_for_each_entry(ctxdma, &dmac->ctxdma, head) {
+ if (ctxdma->object.handle == handle)
+ return ctxdma;
+ }
+
+ if (!(ctxdma = kzalloc(sizeof(*ctxdma), GFP_KERNEL)))
+ return ERR_PTR(-ENOMEM);
+ list_add(&ctxdma->head, &dmac->ctxdma);
+
+ args.base.target = NV_DMA_V0_TARGET_VRAM;
+ args.base.access = NV_DMA_V0_ACCESS_RDWR;
+ args.base.start = 0;
+ args.base.limit = drm->device.info.ram_user - 1;
+
+ if (drm->device.info.chipset < 0x80) {
+ args.nv50.part = NV50_DMA_V0_PART_256;
+ argc += sizeof(args.nv50);
+ } else
+ if (drm->device.info.chipset < 0xc0) {
+ args.nv50.part = NV50_DMA_V0_PART_256;
+ args.nv50.kind = kind;
+ argc += sizeof(args.nv50);
+ } else
+ if (drm->device.info.chipset < 0xd0) {
+ args.gf100.kind = kind;
+ argc += sizeof(args.gf100);
+ } else {
+ args.gf119.page = GF119_DMA_V0_PAGE_LP;
+ args.gf119.kind = kind;
+ argc += sizeof(args.gf119);
+ }
+
+ ret = nvif_object_init(&dmac->base.user, handle, NV_DMA_IN_MEMORY,
+ &args, argc, &ctxdma->object);
+ if (ret) {
+ nv50_dmac_ctxdma_del(ctxdma);
+ return ERR_PTR(ret);
+ }
+
+ return ctxdma;
+}
+
+static void
nv50_dmac_destroy(struct nv50_dmac *dmac, struct nvif_object *disp)
{
struct nvif_device *device = dmac->base.device;
+ struct nv50_dmac_ctxdma *ctxdma, *ctxtmp;
+
+ list_for_each_entry_safe(ctxdma, ctxtmp, &dmac->ctxdma, head) {
+ nv50_dmac_ctxdma_del(ctxdma);
+ }
nvif_object_fini(&dmac->vram);
nvif_object_fini(&dmac->sync);
@@ -278,6 +554,7 @@ nv50_dmac_create(struct nvif_device *device, struct nvif_object *disp,
if (ret)
return ret;
+ INIT_LIST_HEAD(&dmac->ctxdma);
return ret;
}
@@ -381,34 +658,23 @@ nv50_ovly_create(struct nvif_device *device, struct nvif_object *disp,
struct nv50_head {
struct nouveau_crtc base;
- struct nouveau_bo *image;
- struct nv50_curs curs;
- struct nv50_sync sync;
struct nv50_ovly ovly;
struct nv50_oimm oimm;
};
#define nv50_head(c) ((struct nv50_head *)nouveau_crtc(c))
-#define nv50_curs(c) (&nv50_head(c)->curs)
-#define nv50_sync(c) (&nv50_head(c)->sync)
#define nv50_ovly(c) (&nv50_head(c)->ovly)
#define nv50_oimm(c) (&nv50_head(c)->oimm)
#define nv50_chan(c) (&(c)->base.base)
#define nv50_vers(c) nv50_chan(c)->user.oclass
-struct nv50_fbdma {
- struct list_head head;
- struct nvif_object core;
- struct nvif_object base[4];
-};
-
struct nv50_disp {
struct nvif_object *disp;
struct nv50_mast mast;
- struct list_head fbdma;
-
struct nouveau_bo *sync;
+
+ struct mutex mutex;
};
static struct nv50_disp *
@@ -419,12 +685,6 @@ nv50_disp(struct drm_device *dev)
#define nv50_mast(d) (&nv50_disp(d)->mast)
-static struct drm_crtc *
-nv50_display_crtc_get(struct drm_encoder *encoder)
-{
- return nouveau_encoder(encoder)->crtc;
-}
-
/******************************************************************************
* EVO channel helpers
*****************************************************************************/
@@ -463,812 +723,1460 @@ evo_kick(u32 *push, void *evoc)
mutex_unlock(&dmac->lock);
}
-#if 1
-#define evo_mthd(p,m,s) *((p)++) = (((s) << 18) | (m))
-#define evo_data(p,d) *((p)++) = (d)
-#else
#define evo_mthd(p,m,s) do { \
const u32 _m = (m), _s = (s); \
- printk(KERN_ERR "%04x %d %s\n", _m, _s, __func__); \
+ if (drm_debug & DRM_UT_KMS) \
+ printk(KERN_ERR "%04x %d %s\n", _m, _s, __func__); \
*((p)++) = ((_s << 18) | _m); \
} while(0)
+
#define evo_data(p,d) do { \
const u32 _d = (d); \
- printk(KERN_ERR "\t%08x\n", _d); \
+ if (drm_debug & DRM_UT_KMS) \
+ printk(KERN_ERR "\t%08x\n", _d); \
*((p)++) = _d; \
} while(0)
-#endif
-static bool
-evo_sync_wait(void *data)
+/******************************************************************************
+ * Plane
+ *****************************************************************************/
+#define nv50_wndw(p) container_of((p), struct nv50_wndw, plane)
+
+struct nv50_wndw {
+ const struct nv50_wndw_func *func;
+ struct nv50_dmac *dmac;
+
+ struct drm_plane plane;
+
+ struct nvif_notify notify;
+ u16 ntfy;
+ u16 sema;
+ u32 data;
+};
+
+struct nv50_wndw_func {
+ void *(*dtor)(struct nv50_wndw *);
+ int (*acquire)(struct nv50_wndw *, struct nv50_wndw_atom *asyw,
+ struct nv50_head_atom *asyh);
+ void (*release)(struct nv50_wndw *, struct nv50_wndw_atom *asyw,
+ struct nv50_head_atom *asyh);
+ void (*prepare)(struct nv50_wndw *, struct nv50_head_atom *asyh,
+ struct nv50_wndw_atom *asyw);
+
+ void (*sema_set)(struct nv50_wndw *, struct nv50_wndw_atom *);
+ void (*sema_clr)(struct nv50_wndw *);
+ void (*ntfy_set)(struct nv50_wndw *, struct nv50_wndw_atom *);
+ void (*ntfy_clr)(struct nv50_wndw *);
+ int (*ntfy_wait_begun)(struct nv50_wndw *, struct nv50_wndw_atom *);
+ void (*image_set)(struct nv50_wndw *, struct nv50_wndw_atom *);
+ void (*image_clr)(struct nv50_wndw *);
+ void (*lut)(struct nv50_wndw *, struct nv50_wndw_atom *);
+ void (*point)(struct nv50_wndw *, struct nv50_wndw_atom *);
+
+ u32 (*update)(struct nv50_wndw *, u32 interlock);
+};
+
+static int
+nv50_wndw_wait_armed(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
{
- if (nouveau_bo_rd32(data, EVO_MAST_NTFY) != 0x00000000)
- return true;
- usleep_range(1, 2);
- return false;
+ if (asyw->set.ntfy)
+ return wndw->func->ntfy_wait_begun(wndw, asyw);
+ return 0;
}
-static int
-evo_sync(struct drm_device *dev)
+static u32
+nv50_wndw_flush_clr(struct nv50_wndw *wndw, u32 interlock, bool flush,
+ struct nv50_wndw_atom *asyw)
{
- struct nvif_device *device = &nouveau_drm(dev)->device;
- struct nv50_disp *disp = nv50_disp(dev);
- struct nv50_mast *mast = nv50_mast(dev);
- u32 *push = evo_wait(mast, 8);
- if (push) {
- nouveau_bo_wr32(disp->sync, EVO_MAST_NTFY, 0x00000000);
- evo_mthd(push, 0x0084, 1);
- evo_data(push, 0x80000000 | EVO_MAST_NTFY);
- evo_mthd(push, 0x0080, 2);
- evo_data(push, 0x00000000);
- evo_data(push, 0x00000000);
- evo_kick(push, mast);
- if (nvif_msec(device, 2000,
- if (evo_sync_wait(disp->sync))
- break;
- ) >= 0)
- return 0;
+ if (asyw->clr.sema && (!asyw->set.sema || flush))
+ wndw->func->sema_clr(wndw);
+ if (asyw->clr.ntfy && (!asyw->set.ntfy || flush))
+ wndw->func->ntfy_clr(wndw);
+ if (asyw->clr.image && (!asyw->set.image || flush))
+ wndw->func->image_clr(wndw);
+
+ return flush ? wndw->func->update(wndw, interlock) : 0;
+}
+
+static u32
+nv50_wndw_flush_set(struct nv50_wndw *wndw, u32 interlock,
+ struct nv50_wndw_atom *asyw)
+{
+ if (interlock) {
+ asyw->image.mode = 0;
+ asyw->image.interval = 1;
}
- return -EBUSY;
+ if (asyw->set.sema ) wndw->func->sema_set (wndw, asyw);
+ if (asyw->set.ntfy ) wndw->func->ntfy_set (wndw, asyw);
+ if (asyw->set.image) wndw->func->image_set(wndw, asyw);
+ if (asyw->set.lut ) wndw->func->lut (wndw, asyw);
+ if (asyw->set.point) wndw->func->point (wndw, asyw);
+
+ return wndw->func->update(wndw, interlock);
}
-/******************************************************************************
- * Page flipping channel
- *****************************************************************************/
-struct nouveau_bo *
-nv50_display_crtc_sema(struct drm_device *dev, int crtc)
+static void
+nv50_wndw_atomic_check_release(struct nv50_wndw *wndw,
+ struct nv50_wndw_atom *asyw,
+ struct nv50_head_atom *asyh)
{
- return nv50_disp(dev)->sync;
+ struct nouveau_drm *drm = nouveau_drm(wndw->plane.dev);
+ NV_ATOMIC(drm, "%s release\n", wndw->plane.name);
+ wndw->func->release(wndw, asyw, asyh);
+ asyw->ntfy.handle = 0;
+ asyw->sema.handle = 0;
}
-struct nv50_display_flip {
- struct nv50_disp *disp;
- struct nv50_sync *chan;
-};
-
-static bool
-nv50_display_flip_wait(void *data)
+static int
+nv50_wndw_atomic_check_acquire(struct nv50_wndw *wndw,
+ struct nv50_wndw_atom *asyw,
+ struct nv50_head_atom *asyh)
{
- struct nv50_display_flip *flip = data;
- if (nouveau_bo_rd32(flip->disp->sync, flip->chan->addr / 4) ==
- flip->chan->data)
- return true;
- usleep_range(1, 2);
- return false;
+ struct nouveau_framebuffer *fb = nouveau_framebuffer(asyw->state.fb);
+ struct nouveau_drm *drm = nouveau_drm(wndw->plane.dev);
+ int ret;
+
+ NV_ATOMIC(drm, "%s acquire\n", wndw->plane.name);
+ asyw->clip.x1 = 0;
+ asyw->clip.y1 = 0;
+ asyw->clip.x2 = asyh->state.mode.hdisplay;
+ asyw->clip.y2 = asyh->state.mode.vdisplay;
+
+ asyw->image.w = fb->base.width;
+ asyw->image.h = fb->base.height;
+ asyw->image.kind = (fb->nvbo->tile_flags & 0x0000ff00) >> 8;
+ if (asyw->image.kind) {
+ asyw->image.layout = 0;
+ if (drm->device.info.chipset >= 0xc0)
+ asyw->image.block = fb->nvbo->tile_mode >> 4;
+ else
+ asyw->image.block = fb->nvbo->tile_mode;
+ asyw->image.pitch = (fb->base.pitches[0] / 4) << 4;
+ } else {
+ asyw->image.layout = 1;
+ asyw->image.block = 0;
+ asyw->image.pitch = fb->base.pitches[0];
+ }
+
+ ret = wndw->func->acquire(wndw, asyw, asyh);
+ if (ret)
+ return ret;
+
+ if (asyw->set.image) {
+ if (!(asyw->image.mode = asyw->interval ? 0 : 1))
+ asyw->image.interval = asyw->interval;
+ else
+ asyw->image.interval = 0;
+ }
+
+ return 0;
}
-void
-nv50_display_flip_stop(struct drm_crtc *crtc)
+static int
+nv50_wndw_atomic_check(struct drm_plane *plane, struct drm_plane_state *state)
{
- struct nvif_device *device = &nouveau_drm(crtc->dev)->device;
- struct nv50_display_flip flip = {
- .disp = nv50_disp(crtc->dev),
- .chan = nv50_sync(crtc),
- };
- u32 *push;
+ struct nouveau_drm *drm = nouveau_drm(plane->dev);
+ struct nv50_wndw *wndw = nv50_wndw(plane);
+ struct nv50_wndw_atom *armw = nv50_wndw_atom(wndw->plane.state);
+ struct nv50_wndw_atom *asyw = nv50_wndw_atom(state);
+ struct nv50_head_atom *harm = NULL, *asyh = NULL;
+ bool varm = false, asyv = false, asym = false;
+ int ret;
- push = evo_wait(flip.chan, 8);
- if (push) {
- evo_mthd(push, 0x0084, 1);
- evo_data(push, 0x00000000);
- evo_mthd(push, 0x0094, 1);
- evo_data(push, 0x00000000);
- evo_mthd(push, 0x00c0, 1);
- evo_data(push, 0x00000000);
- evo_mthd(push, 0x0080, 1);
- evo_data(push, 0x00000000);
- evo_kick(push, flip.chan);
+ NV_ATOMIC(drm, "%s atomic_check\n", plane->name);
+ if (asyw->state.crtc) {
+ asyh = nv50_head_atom_get(asyw->state.state, asyw->state.crtc);
+ if (IS_ERR(asyh))
+ return PTR_ERR(asyh);
+ asym = drm_atomic_crtc_needs_modeset(&asyh->state);
+ asyv = asyh->state.active;
}
- nvif_msec(device, 2000,
- if (nv50_display_flip_wait(&flip))
- break;
- );
+ if (armw->state.crtc) {
+ harm = nv50_head_atom_get(asyw->state.state, armw->state.crtc);
+ if (IS_ERR(harm))
+ return PTR_ERR(harm);
+ varm = harm->state.crtc->state->active;
+ }
+
+ if (asyv) {
+ asyw->point.x = asyw->state.crtc_x;
+ asyw->point.y = asyw->state.crtc_y;
+ if (memcmp(&armw->point, &asyw->point, sizeof(asyw->point)))
+ asyw->set.point = true;
+
+ if (!varm || asym || armw->state.fb != asyw->state.fb) {
+ ret = nv50_wndw_atomic_check_acquire(wndw, asyw, asyh);
+ if (ret)
+ return ret;
+ }
+ } else
+ if (varm) {
+ nv50_wndw_atomic_check_release(wndw, asyw, harm);
+ } else {
+ return 0;
+ }
+
+ if (!asyv || asym) {
+ asyw->clr.ntfy = armw->ntfy.handle != 0;
+ asyw->clr.sema = armw->sema.handle != 0;
+ if (wndw->func->image_clr)
+ asyw->clr.image = armw->image.handle != 0;
+ asyw->set.lut = wndw->func->lut && asyv;
+ }
+
+ return 0;
}
-int
-nv50_display_flip_next(struct drm_crtc *crtc, struct drm_framebuffer *fb,
- struct nouveau_channel *chan, u32 swap_interval)
+static void
+nv50_wndw_cleanup_fb(struct drm_plane *plane, struct drm_plane_state *old_state)
{
- struct nouveau_framebuffer *nv_fb = nouveau_framebuffer(fb);
- struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
- struct nv50_head *head = nv50_head(crtc);
- struct nv50_sync *sync = nv50_sync(crtc);
- u32 *push;
- int ret;
+ struct nouveau_framebuffer *fb = nouveau_framebuffer(old_state->fb);
+ struct nouveau_drm *drm = nouveau_drm(plane->dev);
- if (crtc->primary->fb->width != fb->width ||
- crtc->primary->fb->height != fb->height)
- return -EINVAL;
+ NV_ATOMIC(drm, "%s cleanup: %p\n", plane->name, old_state->fb);
+ if (!old_state->fb)
+ return;
+
+ nouveau_bo_unpin(fb->nvbo);
+}
- swap_interval <<= 4;
- if (swap_interval == 0)
- swap_interval |= 0x100;
- if (chan == NULL)
- evo_sync(crtc->dev);
+static int
+nv50_wndw_prepare_fb(struct drm_plane *plane, struct drm_plane_state *state)
+{
+ struct nouveau_framebuffer *fb = nouveau_framebuffer(state->fb);
+ struct nouveau_drm *drm = nouveau_drm(plane->dev);
+ struct nv50_wndw *wndw = nv50_wndw(plane);
+ struct nv50_wndw_atom *asyw = nv50_wndw_atom(state);
+ struct nv50_head_atom *asyh;
+ struct nv50_dmac_ctxdma *ctxdma;
+ int ret;
- push = evo_wait(sync, 128);
- if (unlikely(push == NULL))
- return -EBUSY;
+ NV_ATOMIC(drm, "%s prepare: %p\n", plane->name, state->fb);
+ if (!asyw->state.fb)
+ return 0;
- if (chan && chan->user.oclass < G82_CHANNEL_GPFIFO) {
- ret = RING_SPACE(chan, 8);
- if (ret)
- return ret;
+ ret = nouveau_bo_pin(fb->nvbo, TTM_PL_FLAG_VRAM, true);
+ if (ret)
+ return ret;
- BEGIN_NV04(chan, 0, NV11_SUBCHAN_DMA_SEMAPHORE, 2);
- OUT_RING (chan, NvEvoSema0 + nv_crtc->index);
- OUT_RING (chan, sync->addr ^ 0x10);
- BEGIN_NV04(chan, 0, NV11_SUBCHAN_SEMAPHORE_RELEASE, 1);
- OUT_RING (chan, sync->data + 1);
- BEGIN_NV04(chan, 0, NV11_SUBCHAN_SEMAPHORE_OFFSET, 2);
- OUT_RING (chan, sync->addr);
- OUT_RING (chan, sync->data);
- } else
- if (chan && chan->user.oclass < FERMI_CHANNEL_GPFIFO) {
- u64 addr = nv84_fence_crtc(chan, nv_crtc->index) + sync->addr;
- ret = RING_SPACE(chan, 12);
- if (ret)
- return ret;
+ ctxdma = nv50_dmac_ctxdma_new(wndw->dmac, fb);
+ if (IS_ERR(ctxdma)) {
+ nouveau_bo_unpin(fb->nvbo);
+ return PTR_ERR(ctxdma);
+ }
- BEGIN_NV04(chan, 0, NV11_SUBCHAN_DMA_SEMAPHORE, 1);
- OUT_RING (chan, chan->vram.handle);
- BEGIN_NV04(chan, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 4);
- OUT_RING (chan, upper_32_bits(addr ^ 0x10));
- OUT_RING (chan, lower_32_bits(addr ^ 0x10));
- OUT_RING (chan, sync->data + 1);
- OUT_RING (chan, NV84_SUBCHAN_SEMAPHORE_TRIGGER_WRITE_LONG);
- BEGIN_NV04(chan, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 4);
- OUT_RING (chan, upper_32_bits(addr));
- OUT_RING (chan, lower_32_bits(addr));
- OUT_RING (chan, sync->data);
- OUT_RING (chan, NV84_SUBCHAN_SEMAPHORE_TRIGGER_ACQUIRE_EQUAL);
- } else
- if (chan) {
- u64 addr = nv84_fence_crtc(chan, nv_crtc->index) + sync->addr;
- ret = RING_SPACE(chan, 10);
- if (ret)
- return ret;
+ asyw->state.fence = reservation_object_get_excl_rcu(fb->nvbo->bo.resv);
+ asyw->image.handle = ctxdma->object.handle;
+ asyw->image.offset = fb->nvbo->bo.offset;
- BEGIN_NVC0(chan, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 4);
- OUT_RING (chan, upper_32_bits(addr ^ 0x10));
- OUT_RING (chan, lower_32_bits(addr ^ 0x10));
- OUT_RING (chan, sync->data + 1);
- OUT_RING (chan, NV84_SUBCHAN_SEMAPHORE_TRIGGER_WRITE_LONG |
- NVC0_SUBCHAN_SEMAPHORE_TRIGGER_YIELD);
- BEGIN_NVC0(chan, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 4);
- OUT_RING (chan, upper_32_bits(addr));
- OUT_RING (chan, lower_32_bits(addr));
- OUT_RING (chan, sync->data);
- OUT_RING (chan, NV84_SUBCHAN_SEMAPHORE_TRIGGER_ACQUIRE_EQUAL |
- NVC0_SUBCHAN_SEMAPHORE_TRIGGER_YIELD);
- }
-
- if (chan) {
- sync->addr ^= 0x10;
- sync->data++;
- FIRE_RING (chan);
- }
-
- /* queue the flip */
- evo_mthd(push, 0x0100, 1);
- evo_data(push, 0xfffe0000);
- evo_mthd(push, 0x0084, 1);
- evo_data(push, swap_interval);
- if (!(swap_interval & 0x00000100)) {
- evo_mthd(push, 0x00e0, 1);
- evo_data(push, 0x40000000);
- }
- evo_mthd(push, 0x0088, 4);
- evo_data(push, sync->addr);
- evo_data(push, sync->data++);
- evo_data(push, sync->data);
- evo_data(push, sync->base.sync.handle);
- evo_mthd(push, 0x00a0, 2);
- evo_data(push, 0x00000000);
- evo_data(push, 0x00000000);
- evo_mthd(push, 0x00c0, 1);
- evo_data(push, nv_fb->r_handle);
- evo_mthd(push, 0x0110, 2);
- evo_data(push, 0x00000000);
- evo_data(push, 0x00000000);
- if (nv50_vers(sync) < GF110_DISP_BASE_CHANNEL_DMA) {
- evo_mthd(push, 0x0800, 5);
- evo_data(push, nv_fb->nvbo->bo.offset >> 8);
- evo_data(push, 0);
- evo_data(push, (fb->height << 16) | fb->width);
- evo_data(push, nv_fb->r_pitch);
- evo_data(push, nv_fb->r_format);
- } else {
- evo_mthd(push, 0x0400, 5);
- evo_data(push, nv_fb->nvbo->bo.offset >> 8);
- evo_data(push, 0);
- evo_data(push, (fb->height << 16) | fb->width);
- evo_data(push, nv_fb->r_pitch);
- evo_data(push, nv_fb->r_format);
+ if (wndw->func->prepare) {
+ asyh = nv50_head_atom_get(asyw->state.state, asyw->state.crtc);
+ if (IS_ERR(asyh))
+ return PTR_ERR(asyh);
+
+ wndw->func->prepare(wndw, asyh, asyw);
}
- evo_mthd(push, 0x0080, 1);
- evo_data(push, 0x00000000);
- evo_kick(push, sync);
- nouveau_bo_ref(nv_fb->nvbo, &head->image);
+ return 0;
+}
+
+static const struct drm_plane_helper_funcs
+nv50_wndw_helper = {
+ .prepare_fb = nv50_wndw_prepare_fb,
+ .cleanup_fb = nv50_wndw_cleanup_fb,
+ .atomic_check = nv50_wndw_atomic_check,
+};
+
+static void
+nv50_wndw_atomic_destroy_state(struct drm_plane *plane,
+ struct drm_plane_state *state)
+{
+ struct nv50_wndw_atom *asyw = nv50_wndw_atom(state);
+ __drm_atomic_helper_plane_destroy_state(&asyw->state);
+ dma_fence_put(asyw->state.fence);
+ kfree(asyw);
+}
+
+static struct drm_plane_state *
+nv50_wndw_atomic_duplicate_state(struct drm_plane *plane)
+{
+ struct nv50_wndw_atom *armw = nv50_wndw_atom(plane->state);
+ struct nv50_wndw_atom *asyw;
+ if (!(asyw = kmalloc(sizeof(*asyw), GFP_KERNEL)))
+ return NULL;
+ __drm_atomic_helper_plane_duplicate_state(plane, &asyw->state);
+ asyw->state.fence = NULL;
+ asyw->interval = 1;
+ asyw->sema = armw->sema;
+ asyw->ntfy = armw->ntfy;
+ asyw->image = armw->image;
+ asyw->point = armw->point;
+ asyw->lut = armw->lut;
+ asyw->clr.mask = 0;
+ asyw->set.mask = 0;
+ return &asyw->state;
+}
+
+static void
+nv50_wndw_reset(struct drm_plane *plane)
+{
+ struct nv50_wndw_atom *asyw;
+
+ if (WARN_ON(!(asyw = kzalloc(sizeof(*asyw), GFP_KERNEL))))
+ return;
+
+ if (plane->state)
+ plane->funcs->atomic_destroy_state(plane, plane->state);
+ plane->state = &asyw->state;
+ plane->state->plane = plane;
+ plane->state->rotation = DRM_ROTATE_0;
+}
+
+static void
+nv50_wndw_destroy(struct drm_plane *plane)
+{
+ struct nv50_wndw *wndw = nv50_wndw(plane);
+ void *data;
+ nvif_notify_fini(&wndw->notify);
+ data = wndw->func->dtor(wndw);
+ drm_plane_cleanup(&wndw->plane);
+ kfree(data);
+}
+
+static const struct drm_plane_funcs
+nv50_wndw = {
+ .update_plane = drm_atomic_helper_update_plane,
+ .disable_plane = drm_atomic_helper_disable_plane,
+ .destroy = nv50_wndw_destroy,
+ .reset = nv50_wndw_reset,
+ .set_property = drm_atomic_helper_plane_set_property,
+ .atomic_duplicate_state = nv50_wndw_atomic_duplicate_state,
+ .atomic_destroy_state = nv50_wndw_atomic_destroy_state,
+};
+
+static void
+nv50_wndw_fini(struct nv50_wndw *wndw)
+{
+ nvif_notify_put(&wndw->notify);
+}
+
+static void
+nv50_wndw_init(struct nv50_wndw *wndw)
+{
+ nvif_notify_get(&wndw->notify);
+}
+
+static int
+nv50_wndw_ctor(const struct nv50_wndw_func *func, struct drm_device *dev,
+ enum drm_plane_type type, const char *name, int index,
+ struct nv50_dmac *dmac, const u32 *format, int nformat,
+ struct nv50_wndw *wndw)
+{
+ int ret;
+
+ wndw->func = func;
+ wndw->dmac = dmac;
+
+ ret = drm_universal_plane_init(dev, &wndw->plane, 0, &nv50_wndw, format,
+ nformat, type, "%s-%d", name, index);
+ if (ret)
+ return ret;
+
+ drm_plane_helper_add(&wndw->plane, &nv50_wndw_helper);
return 0;
}
/******************************************************************************
- * CRTC
+ * Cursor plane
*****************************************************************************/
+#define nv50_curs(p) container_of((p), struct nv50_curs, wndw)
+
+struct nv50_curs {
+ struct nv50_wndw wndw;
+ struct nvif_object chan;
+};
+
+static u32
+nv50_curs_update(struct nv50_wndw *wndw, u32 interlock)
+{
+ struct nv50_curs *curs = nv50_curs(wndw);
+ nvif_wr32(&curs->chan, 0x0080, 0x00000000);
+ return 0;
+}
+
+static void
+nv50_curs_point(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
+{
+ struct nv50_curs *curs = nv50_curs(wndw);
+ nvif_wr32(&curs->chan, 0x0084, (asyw->point.y << 16) | asyw->point.x);
+}
+
+static void
+nv50_curs_prepare(struct nv50_wndw *wndw, struct nv50_head_atom *asyh,
+ struct nv50_wndw_atom *asyw)
+{
+ asyh->curs.handle = nv50_disp(wndw->plane.dev)->mast.base.vram.handle;
+ asyh->curs.offset = asyw->image.offset;
+ asyh->set.curs = asyh->curs.visible;
+}
+
+static void
+nv50_curs_release(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw,
+ struct nv50_head_atom *asyh)
+{
+ asyh->curs.visible = false;
+}
+
static int
-nv50_crtc_set_dither(struct nouveau_crtc *nv_crtc, bool update)
+nv50_curs_acquire(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw,
+ struct nv50_head_atom *asyh)
{
- struct nv50_mast *mast = nv50_mast(nv_crtc->base.dev);
- struct nouveau_connector *nv_connector;
- struct drm_connector *connector;
- u32 *push, mode = 0x00;
+ int ret;
- nv_connector = nouveau_crtc_connector_get(nv_crtc);
- connector = &nv_connector->base;
- if (nv_connector->dithering_mode == DITHERING_MODE_AUTO) {
- if (nv_crtc->base.primary->fb->depth > connector->display_info.bpc * 3)
- mode = DITHERING_MODE_DYNAMIC2X2;
- } else {
- mode = nv_connector->dithering_mode;
- }
+ ret = drm_plane_helper_check_state(&asyw->state, &asyw->clip,
+ DRM_PLANE_HELPER_NO_SCALING,
+ DRM_PLANE_HELPER_NO_SCALING,
+ true, true);
+ asyh->curs.visible = asyw->state.visible;
+ if (ret || !asyh->curs.visible)
+ return ret;
- if (nv_connector->dithering_depth == DITHERING_DEPTH_AUTO) {
- if (connector->display_info.bpc >= 8)
- mode |= DITHERING_DEPTH_8BPC;
- } else {
- mode |= nv_connector->dithering_depth;
+ switch (asyw->state.fb->width) {
+ case 32: asyh->curs.layout = 0; break;
+ case 64: asyh->curs.layout = 1; break;
+ default:
+ return -EINVAL;
}
- push = evo_wait(mast, 4);
- if (push) {
- if (nv50_vers(mast) < GF110_DISP_CORE_CHANNEL_DMA) {
- evo_mthd(push, 0x08a0 + (nv_crtc->index * 0x0400), 1);
- evo_data(push, mode);
- } else
- if (nv50_vers(mast) < GK104_DISP_CORE_CHANNEL_DMA) {
- evo_mthd(push, 0x0490 + (nv_crtc->index * 0x0300), 1);
- evo_data(push, mode);
- } else {
- evo_mthd(push, 0x04a0 + (nv_crtc->index * 0x0300), 1);
- evo_data(push, mode);
- }
+ if (asyw->state.fb->width != asyw->state.fb->height)
+ return -EINVAL;
- if (update) {
- evo_mthd(push, 0x0080, 1);
- evo_data(push, 0x00000000);
- }
- evo_kick(push, mast);
+ switch (asyw->state.fb->pixel_format) {
+ case DRM_FORMAT_ARGB8888: asyh->curs.format = 1; break;
+ default:
+ WARN_ON(1);
+ return -EINVAL;
}
return 0;
}
+static void *
+nv50_curs_dtor(struct nv50_wndw *wndw)
+{
+ struct nv50_curs *curs = nv50_curs(wndw);
+ nvif_object_fini(&curs->chan);
+ return curs;
+}
+
+static const u32
+nv50_curs_format[] = {
+ DRM_FORMAT_ARGB8888,
+};
+
+static const struct nv50_wndw_func
+nv50_curs = {
+ .dtor = nv50_curs_dtor,
+ .acquire = nv50_curs_acquire,
+ .release = nv50_curs_release,
+ .prepare = nv50_curs_prepare,
+ .point = nv50_curs_point,
+ .update = nv50_curs_update,
+};
+
static int
-nv50_crtc_set_scale(struct nouveau_crtc *nv_crtc, bool update)
+nv50_curs_new(struct nouveau_drm *drm, struct nv50_head *head,
+ struct nv50_curs **pcurs)
{
- struct nv50_mast *mast = nv50_mast(nv_crtc->base.dev);
- struct drm_display_mode *omode, *umode = &nv_crtc->base.mode;
- struct drm_crtc *crtc = &nv_crtc->base;
- struct nouveau_connector *nv_connector;
- int mode = DRM_MODE_SCALE_NONE;
- u32 oX, oY, *push;
+ static const struct nvif_mclass curses[] = {
+ { GK104_DISP_CURSOR, 0 },
+ { GF110_DISP_CURSOR, 0 },
+ { GT214_DISP_CURSOR, 0 },
+ { G82_DISP_CURSOR, 0 },
+ { NV50_DISP_CURSOR, 0 },
+ {}
+ };
+ struct nv50_disp_cursor_v0 args = {
+ .head = head->base.index,
+ };
+ struct nv50_disp *disp = nv50_disp(drm->dev);
+ struct nv50_curs *curs;
+ int cid, ret;
+
+ cid = nvif_mclass(disp->disp, curses);
+ if (cid < 0) {
+ NV_ERROR(drm, "No supported cursor immediate class\n");
+ return cid;
+ }
- /* start off at the resolution we programmed the crtc for, this
- * effectively handles NONE/FULL scaling
- */
- nv_connector = nouveau_crtc_connector_get(nv_crtc);
- if (nv_connector && nv_connector->native_mode) {
- mode = nv_connector->scaling_mode;
- if (nv_connector->scaling_full) /* non-EDID LVDS/eDP mode */
- mode = DRM_MODE_SCALE_FULLSCREEN;
+ if (!(curs = *pcurs = kzalloc(sizeof(*curs), GFP_KERNEL)))
+ return -ENOMEM;
+
+ ret = nv50_wndw_ctor(&nv50_curs, drm->dev, DRM_PLANE_TYPE_CURSOR,
+ "curs", head->base.index, &disp->mast.base,
+ nv50_curs_format, ARRAY_SIZE(nv50_curs_format),
+ &curs->wndw);
+ if (ret) {
+ kfree(curs);
+ return ret;
}
- if (mode != DRM_MODE_SCALE_NONE)
- omode = nv_connector->native_mode;
- else
- omode = umode;
+ ret = nvif_object_init(disp->disp, 0, curses[cid].oclass, &args,
+ sizeof(args), &curs->chan);
+ if (ret) {
+ NV_ERROR(drm, "curs%04x allocation failed: %d\n",
+ curses[cid].oclass, ret);
+ return ret;
+ }
- oX = omode->hdisplay;
- oY = omode->vdisplay;
- if (omode->flags & DRM_MODE_FLAG_DBLSCAN)
- oY *= 2;
+ return 0;
+}
- /* add overscan compensation if necessary, will keep the aspect
- * ratio the same as the backend mode unless overridden by the
- * user setting both hborder and vborder properties.
- */
- if (nv_connector && ( nv_connector->underscan == UNDERSCAN_ON ||
- (nv_connector->underscan == UNDERSCAN_AUTO &&
- drm_detect_hdmi_monitor(nv_connector->edid)))) {
- u32 bX = nv_connector->underscan_hborder;
- u32 bY = nv_connector->underscan_vborder;
- u32 aspect = (oY << 19) / oX;
+/******************************************************************************
+ * Primary plane
+ *****************************************************************************/
+#define nv50_base(p) container_of((p), struct nv50_base, wndw)
- if (bX) {
- oX -= (bX * 2);
- if (bY) oY -= (bY * 2);
- else oY = ((oX * aspect) + (aspect / 2)) >> 19;
- } else {
- oX -= (oX >> 4) + 32;
- if (bY) oY -= (bY * 2);
- else oY = ((oX * aspect) + (aspect / 2)) >> 19;
- }
+struct nv50_base {
+ struct nv50_wndw wndw;
+ struct nv50_sync chan;
+ int id;
+};
+
+static int
+nv50_base_notify(struct nvif_notify *notify)
+{
+ return NVIF_NOTIFY_KEEP;
+}
+
+static void
+nv50_base_lut(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
+{
+ struct nv50_base *base = nv50_base(wndw);
+ u32 *push;
+ if ((push = evo_wait(&base->chan, 2))) {
+ evo_mthd(push, 0x00e0, 1);
+ evo_data(push, asyw->lut.enable << 30);
+ evo_kick(push, &base->chan);
}
+}
- /* handle CENTER/ASPECT scaling, taking into account the areas
- * removed already for overscan compensation
- */
- switch (mode) {
- case DRM_MODE_SCALE_CENTER:
- oX = min((u32)umode->hdisplay, oX);
- oY = min((u32)umode->vdisplay, oY);
- /* fall-through */
- case DRM_MODE_SCALE_ASPECT:
- if (oY < oX) {
- u32 aspect = (umode->hdisplay << 19) / umode->vdisplay;
- oX = ((oY * aspect) + (aspect / 2)) >> 19;
- } else {
- u32 aspect = (umode->vdisplay << 19) / umode->hdisplay;
- oY = ((oX * aspect) + (aspect / 2)) >> 19;
- }
- break;
- default:
- break;
+static void
+nv50_base_image_clr(struct nv50_wndw *wndw)
+{
+ struct nv50_base *base = nv50_base(wndw);
+ u32 *push;
+ if ((push = evo_wait(&base->chan, 4))) {
+ evo_mthd(push, 0x0084, 1);
+ evo_data(push, 0x00000000);
+ evo_mthd(push, 0x00c0, 1);
+ evo_data(push, 0x00000000);
+ evo_kick(push, &base->chan);
}
+}
- push = evo_wait(mast, 8);
- if (push) {
- if (nv50_vers(mast) < GF110_DISP_CORE_CHANNEL_DMA) {
- /*XXX: SCALE_CTRL_ACTIVE??? */
- evo_mthd(push, 0x08d8 + (nv_crtc->index * 0x400), 2);
- evo_data(push, (oY << 16) | oX);
- evo_data(push, (oY << 16) | oX);
- evo_mthd(push, 0x08a4 + (nv_crtc->index * 0x400), 1);
+static void
+nv50_base_image_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
+{
+ struct nv50_base *base = nv50_base(wndw);
+ const s32 oclass = base->chan.base.base.user.oclass;
+ u32 *push;
+ if ((push = evo_wait(&base->chan, 10))) {
+ evo_mthd(push, 0x0084, 1);
+ evo_data(push, (asyw->image.mode << 8) |
+ (asyw->image.interval << 4));
+ evo_mthd(push, 0x00c0, 1);
+ evo_data(push, asyw->image.handle);
+ if (oclass < G82_DISP_BASE_CHANNEL_DMA) {
+ evo_mthd(push, 0x0800, 5);
+ evo_data(push, asyw->image.offset >> 8);
+ evo_data(push, 0x00000000);
+ evo_data(push, (asyw->image.h << 16) | asyw->image.w);
+ evo_data(push, (asyw->image.layout << 20) |
+ asyw->image.pitch |
+ asyw->image.block);
+ evo_data(push, (asyw->image.kind << 16) |
+ (asyw->image.format << 8));
+ } else
+ if (oclass < GF110_DISP_BASE_CHANNEL_DMA) {
+ evo_mthd(push, 0x0800, 5);
+ evo_data(push, asyw->image.offset >> 8);
evo_data(push, 0x00000000);
- evo_mthd(push, 0x08c8 + (nv_crtc->index * 0x400), 1);
- evo_data(push, umode->vdisplay << 16 | umode->hdisplay);
+ evo_data(push, (asyw->image.h << 16) | asyw->image.w);
+ evo_data(push, (asyw->image.layout << 20) |
+ asyw->image.pitch |
+ asyw->image.block);
+ evo_data(push, asyw->image.format << 8);
} else {
- evo_mthd(push, 0x04c0 + (nv_crtc->index * 0x300), 3);
- evo_data(push, (oY << 16) | oX);
- evo_data(push, (oY << 16) | oX);
- evo_data(push, (oY << 16) | oX);
- evo_mthd(push, 0x0494 + (nv_crtc->index * 0x300), 1);
+ evo_mthd(push, 0x0400, 5);
+ evo_data(push, asyw->image.offset >> 8);
evo_data(push, 0x00000000);
- evo_mthd(push, 0x04b8 + (nv_crtc->index * 0x300), 1);
- evo_data(push, umode->vdisplay << 16 | umode->hdisplay);
+ evo_data(push, (asyw->image.h << 16) | asyw->image.w);
+ evo_data(push, (asyw->image.layout << 24) |
+ asyw->image.pitch |
+ asyw->image.block);
+ evo_data(push, asyw->image.format << 8);
}
+ evo_kick(push, &base->chan);
+ }
+}
- evo_kick(push, mast);
+static void
+nv50_base_ntfy_clr(struct nv50_wndw *wndw)
+{
+ struct nv50_base *base = nv50_base(wndw);
+ u32 *push;
+ if ((push = evo_wait(&base->chan, 2))) {
+ evo_mthd(push, 0x00a4, 1);
+ evo_data(push, 0x00000000);
+ evo_kick(push, &base->chan);
+ }
+}
- if (update) {
- nv50_display_flip_stop(crtc);
- nv50_display_flip_next(crtc, crtc->primary->fb,
- NULL, 1);
- }
+static void
+nv50_base_ntfy_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
+{
+ struct nv50_base *base = nv50_base(wndw);
+ u32 *push;
+ if ((push = evo_wait(&base->chan, 3))) {
+ evo_mthd(push, 0x00a0, 2);
+ evo_data(push, (asyw->ntfy.awaken << 30) | asyw->ntfy.offset);
+ evo_data(push, asyw->ntfy.handle);
+ evo_kick(push, &base->chan);
}
+}
- return 0;
+static void
+nv50_base_sema_clr(struct nv50_wndw *wndw)
+{
+ struct nv50_base *base = nv50_base(wndw);
+ u32 *push;
+ if ((push = evo_wait(&base->chan, 2))) {
+ evo_mthd(push, 0x0094, 1);
+ evo_data(push, 0x00000000);
+ evo_kick(push, &base->chan);
+ }
}
-static int
-nv50_crtc_set_raster_vblank_dmi(struct nouveau_crtc *nv_crtc, u32 usec)
+static void
+nv50_base_sema_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
{
- struct nv50_mast *mast = nv50_mast(nv_crtc->base.dev);
+ struct nv50_base *base = nv50_base(wndw);
u32 *push;
+ if ((push = evo_wait(&base->chan, 5))) {
+ evo_mthd(push, 0x0088, 4);
+ evo_data(push, asyw->sema.offset);
+ evo_data(push, asyw->sema.acquire);
+ evo_data(push, asyw->sema.release);
+ evo_data(push, asyw->sema.handle);
+ evo_kick(push, &base->chan);
+ }
+}
- push = evo_wait(mast, 8);
- if (!push)
- return -ENOMEM;
+static u32
+nv50_base_update(struct nv50_wndw *wndw, u32 interlock)
+{
+ struct nv50_base *base = nv50_base(wndw);
+ u32 *push;
- evo_mthd(push, 0x0828 + (nv_crtc->index * 0x400), 1);
- evo_data(push, usec);
- evo_kick(push, mast);
+ if (!(push = evo_wait(&base->chan, 2)))
+ return 0;
+ evo_mthd(push, 0x0080, 1);
+ evo_data(push, interlock);
+ evo_kick(push, &base->chan);
+
+ if (base->chan.base.base.user.oclass < GF110_DISP_BASE_CHANNEL_DMA)
+ return interlock ? 2 << (base->id * 8) : 0;
+ return interlock ? 2 << (base->id * 4) : 0;
+}
+
+static int
+nv50_base_ntfy_wait_begun(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
+{
+ struct nouveau_drm *drm = nouveau_drm(wndw->plane.dev);
+ struct nv50_disp *disp = nv50_disp(wndw->plane.dev);
+ if (nvif_msec(&drm->device, 2000ULL,
+ u32 data = nouveau_bo_rd32(disp->sync, asyw->ntfy.offset / 4);
+ if ((data & 0xc0000000) == 0x40000000)
+ break;
+ usleep_range(1, 2);
+ ) < 0)
+ return -ETIMEDOUT;
return 0;
}
+static void
+nv50_base_release(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw,
+ struct nv50_head_atom *asyh)
+{
+ asyh->base.cpp = 0;
+}
+
static int
-nv50_crtc_set_color_vibrance(struct nouveau_crtc *nv_crtc, bool update)
+nv50_base_acquire(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw,
+ struct nv50_head_atom *asyh)
{
- struct nv50_mast *mast = nv50_mast(nv_crtc->base.dev);
- u32 *push, hue, vib;
- int adj;
+ const u32 format = asyw->state.fb->pixel_format;
+ const struct drm_format_info *info;
+ int ret;
- adj = (nv_crtc->color_vibrance > 0) ? 50 : 0;
- vib = ((nv_crtc->color_vibrance * 2047 + adj) / 100) & 0xfff;
- hue = ((nv_crtc->vibrant_hue * 2047) / 100) & 0xfff;
+ info = drm_format_info(format);
+ if (!info || !info->depth)
+ return -EINVAL;
- push = evo_wait(mast, 16);
- if (push) {
- if (nv50_vers(mast) < GF110_DISP_CORE_CHANNEL_DMA) {
- evo_mthd(push, 0x08a8 + (nv_crtc->index * 0x400), 1);
- evo_data(push, (hue << 20) | (vib << 8));
- } else {
- evo_mthd(push, 0x0498 + (nv_crtc->index * 0x300), 1);
- evo_data(push, (hue << 20) | (vib << 8));
- }
+ ret = drm_plane_helper_check_state(&asyw->state, &asyw->clip,
+ DRM_PLANE_HELPER_NO_SCALING,
+ DRM_PLANE_HELPER_NO_SCALING,
+ false, true);
+ if (ret)
+ return ret;
- if (update) {
- evo_mthd(push, 0x0080, 1);
- evo_data(push, 0x00000000);
- }
- evo_kick(push, mast);
+ asyh->base.depth = info->depth;
+ asyh->base.cpp = info->cpp[0];
+ asyh->base.x = asyw->state.src.x1 >> 16;
+ asyh->base.y = asyw->state.src.y1 >> 16;
+ asyh->base.w = asyw->state.fb->width;
+ asyh->base.h = asyw->state.fb->height;
+
+ switch (format) {
+ case DRM_FORMAT_C8 : asyw->image.format = 0x1e; break;
+ case DRM_FORMAT_RGB565 : asyw->image.format = 0xe8; break;
+ case DRM_FORMAT_XRGB1555 :
+ case DRM_FORMAT_ARGB1555 : asyw->image.format = 0xe9; break;
+ case DRM_FORMAT_XRGB8888 :
+ case DRM_FORMAT_ARGB8888 : asyw->image.format = 0xcf; break;
+ case DRM_FORMAT_XBGR2101010:
+ case DRM_FORMAT_ABGR2101010: asyw->image.format = 0xd1; break;
+ case DRM_FORMAT_XBGR8888 :
+ case DRM_FORMAT_ABGR8888 : asyw->image.format = 0xd5; break;
+ default:
+ WARN_ON(1);
+ return -EINVAL;
}
+ asyw->lut.enable = 1;
+ asyw->set.image = true;
return 0;
}
+static void *
+nv50_base_dtor(struct nv50_wndw *wndw)
+{
+ struct nv50_disp *disp = nv50_disp(wndw->plane.dev);
+ struct nv50_base *base = nv50_base(wndw);
+ nv50_dmac_destroy(&base->chan.base, disp->disp);
+ return base;
+}
+
+static const u32
+nv50_base_format[] = {
+ DRM_FORMAT_C8,
+ DRM_FORMAT_RGB565,
+ DRM_FORMAT_XRGB1555,
+ DRM_FORMAT_ARGB1555,
+ DRM_FORMAT_XRGB8888,
+ DRM_FORMAT_ARGB8888,
+ DRM_FORMAT_XBGR2101010,
+ DRM_FORMAT_ABGR2101010,
+ DRM_FORMAT_XBGR8888,
+ DRM_FORMAT_ABGR8888,
+};
+
+static const struct nv50_wndw_func
+nv50_base = {
+ .dtor = nv50_base_dtor,
+ .acquire = nv50_base_acquire,
+ .release = nv50_base_release,
+ .sema_set = nv50_base_sema_set,
+ .sema_clr = nv50_base_sema_clr,
+ .ntfy_set = nv50_base_ntfy_set,
+ .ntfy_clr = nv50_base_ntfy_clr,
+ .ntfy_wait_begun = nv50_base_ntfy_wait_begun,
+ .image_set = nv50_base_image_set,
+ .image_clr = nv50_base_image_clr,
+ .lut = nv50_base_lut,
+ .update = nv50_base_update,
+};
+
static int
-nv50_crtc_set_image(struct nouveau_crtc *nv_crtc, struct drm_framebuffer *fb,
- int x, int y, bool update)
+nv50_base_new(struct nouveau_drm *drm, struct nv50_head *head,
+ struct nv50_base **pbase)
+{
+ struct nv50_disp *disp = nv50_disp(drm->dev);
+ struct nv50_base *base;
+ int ret;
+
+ if (!(base = *pbase = kzalloc(sizeof(*base), GFP_KERNEL)))
+ return -ENOMEM;
+ base->id = head->base.index;
+ base->wndw.ntfy = EVO_FLIP_NTFY0(base->id);
+ base->wndw.sema = EVO_FLIP_SEM0(base->id);
+ base->wndw.data = 0x00000000;
+
+ ret = nv50_wndw_ctor(&nv50_base, drm->dev, DRM_PLANE_TYPE_PRIMARY,
+ "base", base->id, &base->chan.base,
+ nv50_base_format, ARRAY_SIZE(nv50_base_format),
+ &base->wndw);
+ if (ret) {
+ kfree(base);
+ return ret;
+ }
+
+ ret = nv50_base_create(&drm->device, disp->disp, base->id,
+ disp->sync->bo.offset, &base->chan);
+ if (ret)
+ return ret;
+
+ return nvif_notify_init(&base->chan.base.base.user, nv50_base_notify,
+ false,
+ NV50_DISP_BASE_CHANNEL_DMA_V0_NTFY_UEVENT,
+ &(struct nvif_notify_uevent_req) {},
+ sizeof(struct nvif_notify_uevent_req),
+ sizeof(struct nvif_notify_uevent_rep),
+ &base->wndw.notify);
+}
+
+/******************************************************************************
+ * Head
+ *****************************************************************************/
+static void
+nv50_head_procamp(struct nv50_head *head, struct nv50_head_atom *asyh)
{
- struct nouveau_framebuffer *nvfb = nouveau_framebuffer(fb);
- struct nv50_mast *mast = nv50_mast(nv_crtc->base.dev);
+ struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->mast.base;
u32 *push;
+ if ((push = evo_wait(core, 2))) {
+ if (core->base.user.oclass < GF110_DISP_CORE_CHANNEL_DMA)
+ evo_mthd(push, 0x08a8 + (head->base.index * 0x400), 1);
+ else
+ evo_mthd(push, 0x0498 + (head->base.index * 0x300), 1);
+ evo_data(push, (asyh->procamp.sat.sin << 20) |
+ (asyh->procamp.sat.cos << 8));
+ evo_kick(push, core);
+ }
+}
- push = evo_wait(mast, 16);
- if (push) {
- if (nv50_vers(mast) < GF110_DISP_CORE_CHANNEL_DMA) {
- evo_mthd(push, 0x0860 + (nv_crtc->index * 0x400), 1);
- evo_data(push, nvfb->nvbo->bo.offset >> 8);
- evo_mthd(push, 0x0868 + (nv_crtc->index * 0x400), 3);
- evo_data(push, (fb->height << 16) | fb->width);
- evo_data(push, nvfb->r_pitch);
- evo_data(push, nvfb->r_format);
- evo_mthd(push, 0x08c0 + (nv_crtc->index * 0x400), 1);
- evo_data(push, (y << 16) | x);
- if (nv50_vers(mast) > NV50_DISP_CORE_CHANNEL_DMA) {
- evo_mthd(push, 0x0874 + (nv_crtc->index * 0x400), 1);
- evo_data(push, nvfb->r_handle);
- }
- } else {
- evo_mthd(push, 0x0460 + (nv_crtc->index * 0x300), 1);
- evo_data(push, nvfb->nvbo->bo.offset >> 8);
- evo_mthd(push, 0x0468 + (nv_crtc->index * 0x300), 4);
- evo_data(push, (fb->height << 16) | fb->width);
- evo_data(push, nvfb->r_pitch);
- evo_data(push, nvfb->r_format);
- evo_data(push, nvfb->r_handle);
- evo_mthd(push, 0x04b0 + (nv_crtc->index * 0x300), 1);
- evo_data(push, (y << 16) | x);
- }
+static void
+nv50_head_dither(struct nv50_head *head, struct nv50_head_atom *asyh)
+{
+ struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->mast.base;
+ u32 *push;
+ if ((push = evo_wait(core, 2))) {
+ if (core->base.user.oclass < GF110_DISP_CORE_CHANNEL_DMA)
+ evo_mthd(push, 0x08a0 + (head->base.index * 0x0400), 1);
+ else
+ if (core->base.user.oclass < GK104_DISP_CORE_CHANNEL_DMA)
+ evo_mthd(push, 0x0490 + (head->base.index * 0x0300), 1);
+ else
+ evo_mthd(push, 0x04a0 + (head->base.index * 0x0300), 1);
+ evo_data(push, (asyh->dither.mode << 3) |
+ (asyh->dither.bits << 1) |
+ asyh->dither.enable);
+ evo_kick(push, core);
+ }
+}
- if (update) {
- evo_mthd(push, 0x0080, 1);
- evo_data(push, 0x00000000);
+static void
+nv50_head_ovly(struct nv50_head *head, struct nv50_head_atom *asyh)
+{
+ struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->mast.base;
+ u32 bounds = 0;
+ u32 *push;
+
+ if (asyh->base.cpp) {
+ switch (asyh->base.cpp) {
+ case 8: bounds |= 0x00000500; break;
+ case 4: bounds |= 0x00000300; break;
+ case 2: bounds |= 0x00000100; break;
+ default:
+ WARN_ON(1);
+ break;
}
- evo_kick(push, mast);
+ bounds |= 0x00000001;
}
- nv_crtc->fb.handle = nvfb->r_handle;
- return 0;
+ if ((push = evo_wait(core, 2))) {
+ if (core->base.user.oclass < GF110_DISP_CORE_CHANNEL_DMA)
+ evo_mthd(push, 0x0904 + head->base.index * 0x400, 1);
+ else
+ evo_mthd(push, 0x04d4 + head->base.index * 0x300, 1);
+ evo_data(push, bounds);
+ evo_kick(push, core);
+ }
}
static void
-nv50_crtc_cursor_show(struct nouveau_crtc *nv_crtc)
+nv50_head_base(struct nv50_head *head, struct nv50_head_atom *asyh)
{
- struct nv50_mast *mast = nv50_mast(nv_crtc->base.dev);
- u32 *push = evo_wait(mast, 16);
- if (push) {
- if (nv50_vers(mast) < G82_DISP_CORE_CHANNEL_DMA) {
- evo_mthd(push, 0x0880 + (nv_crtc->index * 0x400), 2);
- evo_data(push, 0x85000000);
- evo_data(push, nv_crtc->cursor.nvbo->bo.offset >> 8);
- } else
- if (nv50_vers(mast) < GF110_DISP_CORE_CHANNEL_DMA) {
- evo_mthd(push, 0x0880 + (nv_crtc->index * 0x400), 2);
- evo_data(push, 0x85000000);
- evo_data(push, nv_crtc->cursor.nvbo->bo.offset >> 8);
- evo_mthd(push, 0x089c + (nv_crtc->index * 0x400), 1);
- evo_data(push, mast->base.vram.handle);
- } else {
- evo_mthd(push, 0x0480 + (nv_crtc->index * 0x300), 2);
- evo_data(push, 0x85000000);
- evo_data(push, nv_crtc->cursor.nvbo->bo.offset >> 8);
- evo_mthd(push, 0x048c + (nv_crtc->index * 0x300), 1);
- evo_data(push, mast->base.vram.handle);
+ struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->mast.base;
+ u32 bounds = 0;
+ u32 *push;
+
+ if (asyh->base.cpp) {
+ switch (asyh->base.cpp) {
+ case 8: bounds |= 0x00000500; break;
+ case 4: bounds |= 0x00000300; break;
+ case 2: bounds |= 0x00000100; break;
+ case 1: bounds |= 0x00000000; break;
+ default:
+ WARN_ON(1);
+ break;
}
- evo_kick(push, mast);
+ bounds |= 0x00000001;
+ }
+
+ if ((push = evo_wait(core, 2))) {
+ if (core->base.user.oclass < GF110_DISP_CORE_CHANNEL_DMA)
+ evo_mthd(push, 0x0900 + head->base.index * 0x400, 1);
+ else
+ evo_mthd(push, 0x04d0 + head->base.index * 0x300, 1);
+ evo_data(push, bounds);
+ evo_kick(push, core);
}
- nv_crtc->cursor.visible = true;
}
static void
-nv50_crtc_cursor_hide(struct nouveau_crtc *nv_crtc)
+nv50_head_curs_clr(struct nv50_head *head)
{
- struct nv50_mast *mast = nv50_mast(nv_crtc->base.dev);
- u32 *push = evo_wait(mast, 16);
- if (push) {
- if (nv50_vers(mast) < G82_DISP_CORE_CHANNEL_DMA) {
- evo_mthd(push, 0x0880 + (nv_crtc->index * 0x400), 1);
+ struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->mast.base;
+ u32 *push;
+ if ((push = evo_wait(core, 4))) {
+ if (core->base.user.oclass < G82_DISP_CORE_CHANNEL_DMA) {
+ evo_mthd(push, 0x0880 + head->base.index * 0x400, 1);
evo_data(push, 0x05000000);
} else
- if (nv50_vers(mast) < GF110_DISP_CORE_CHANNEL_DMA) {
- evo_mthd(push, 0x0880 + (nv_crtc->index * 0x400), 1);
+ if (core->base.user.oclass < GF110_DISP_CORE_CHANNEL_DMA) {
+ evo_mthd(push, 0x0880 + head->base.index * 0x400, 1);
evo_data(push, 0x05000000);
- evo_mthd(push, 0x089c + (nv_crtc->index * 0x400), 1);
+ evo_mthd(push, 0x089c + head->base.index * 0x400, 1);
evo_data(push, 0x00000000);
} else {
- evo_mthd(push, 0x0480 + (nv_crtc->index * 0x300), 1);
+ evo_mthd(push, 0x0480 + head->base.index * 0x300, 1);
evo_data(push, 0x05000000);
- evo_mthd(push, 0x048c + (nv_crtc->index * 0x300), 1);
+ evo_mthd(push, 0x048c + head->base.index * 0x300, 1);
evo_data(push, 0x00000000);
}
- evo_kick(push, mast);
+ evo_kick(push, core);
}
- nv_crtc->cursor.visible = false;
}
static void
-nv50_crtc_cursor_show_hide(struct nouveau_crtc *nv_crtc, bool show, bool update)
+nv50_head_curs_set(struct nv50_head *head, struct nv50_head_atom *asyh)
{
- struct nv50_mast *mast = nv50_mast(nv_crtc->base.dev);
-
- if (show && nv_crtc->cursor.nvbo && nv_crtc->base.enabled)
- nv50_crtc_cursor_show(nv_crtc);
- else
- nv50_crtc_cursor_hide(nv_crtc);
-
- if (update) {
- u32 *push = evo_wait(mast, 2);
- if (push) {
- evo_mthd(push, 0x0080, 1);
- evo_data(push, 0x00000000);
- evo_kick(push, mast);
+ struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->mast.base;
+ u32 *push;
+ if ((push = evo_wait(core, 5))) {
+ if (core->base.user.oclass < G82_DISP_BASE_CHANNEL_DMA) {
+ evo_mthd(push, 0x0880 + head->base.index * 0x400, 2);
+ evo_data(push, 0x80000000 | (asyh->curs.layout << 26) |
+ (asyh->curs.format << 24));
+ evo_data(push, asyh->curs.offset >> 8);
+ } else
+ if (core->base.user.oclass < GF110_DISP_BASE_CHANNEL_DMA) {
+ evo_mthd(push, 0x0880 + head->base.index * 0x400, 2);
+ evo_data(push, 0x80000000 | (asyh->curs.layout << 26) |
+ (asyh->curs.format << 24));
+ evo_data(push, asyh->curs.offset >> 8);
+ evo_mthd(push, 0x089c + head->base.index * 0x400, 1);
+ evo_data(push, asyh->curs.handle);
+ } else {
+ evo_mthd(push, 0x0480 + head->base.index * 0x300, 2);
+ evo_data(push, 0x80000000 | (asyh->curs.layout << 26) |
+ (asyh->curs.format << 24));
+ evo_data(push, asyh->curs.offset >> 8);
+ evo_mthd(push, 0x048c + head->base.index * 0x300, 1);
+ evo_data(push, asyh->curs.handle);
}
+ evo_kick(push, core);
}
}
static void
-nv50_crtc_dpms(struct drm_crtc *crtc, int mode)
+nv50_head_core_clr(struct nv50_head *head)
{
+ struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->mast.base;
+ u32 *push;
+ if ((push = evo_wait(core, 2))) {
+ if (core->base.user.oclass < GF110_DISP_CORE_CHANNEL_DMA)
+ evo_mthd(push, 0x0874 + head->base.index * 0x400, 1);
+ else
+ evo_mthd(push, 0x0474 + head->base.index * 0x300, 1);
+ evo_data(push, 0x00000000);
+ evo_kick(push, core);
+ }
}
static void
-nv50_crtc_prepare(struct drm_crtc *crtc)
+nv50_head_core_set(struct nv50_head *head, struct nv50_head_atom *asyh)
{
- struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
- struct nv50_mast *mast = nv50_mast(crtc->dev);
+ struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->mast.base;
u32 *push;
+ if ((push = evo_wait(core, 9))) {
+ if (core->base.user.oclass < G82_DISP_CORE_CHANNEL_DMA) {
+ evo_mthd(push, 0x0860 + head->base.index * 0x400, 1);
+ evo_data(push, asyh->core.offset >> 8);
+ evo_mthd(push, 0x0868 + head->base.index * 0x400, 4);
+ evo_data(push, (asyh->core.h << 16) | asyh->core.w);
+ evo_data(push, asyh->core.layout << 20 |
+ (asyh->core.pitch >> 8) << 8 |
+ asyh->core.block);
+ evo_data(push, asyh->core.kind << 16 |
+ asyh->core.format << 8);
+ evo_data(push, asyh->core.handle);
+ evo_mthd(push, 0x08c0 + head->base.index * 0x400, 1);
+ evo_data(push, (asyh->core.y << 16) | asyh->core.x);
+ } else
+ if (core->base.user.oclass < GF110_DISP_CORE_CHANNEL_DMA) {
+ evo_mthd(push, 0x0860 + head->base.index * 0x400, 1);
+ evo_data(push, asyh->core.offset >> 8);
+ evo_mthd(push, 0x0868 + head->base.index * 0x400, 4);
+ evo_data(push, (asyh->core.h << 16) | asyh->core.w);
+ evo_data(push, asyh->core.layout << 20 |
+ (asyh->core.pitch >> 8) << 8 |
+ asyh->core.block);
+ evo_data(push, asyh->core.format << 8);
+ evo_data(push, asyh->core.handle);
+ evo_mthd(push, 0x08c0 + head->base.index * 0x400, 1);
+ evo_data(push, (asyh->core.y << 16) | asyh->core.x);
+ } else {
+ evo_mthd(push, 0x0460 + head->base.index * 0x300, 1);
+ evo_data(push, asyh->core.offset >> 8);
+ evo_mthd(push, 0x0468 + head->base.index * 0x300, 4);
+ evo_data(push, (asyh->core.h << 16) | asyh->core.w);
+ evo_data(push, asyh->core.layout << 24 |
+ (asyh->core.pitch >> 8) << 8 |
+ asyh->core.block);
+ evo_data(push, asyh->core.format << 8);
+ evo_data(push, asyh->core.handle);
+ evo_mthd(push, 0x04b0 + head->base.index * 0x300, 1);
+ evo_data(push, (asyh->core.y << 16) | asyh->core.x);
+ }
+ evo_kick(push, core);
+ }
+}
- nv50_display_flip_stop(crtc);
-
- push = evo_wait(mast, 6);
- if (push) {
- if (nv50_vers(mast) < G82_DISP_CORE_CHANNEL_DMA) {
- evo_mthd(push, 0x0874 + (nv_crtc->index * 0x400), 1);
- evo_data(push, 0x00000000);
- evo_mthd(push, 0x0840 + (nv_crtc->index * 0x400), 1);
+static void
+nv50_head_lut_clr(struct nv50_head *head)
+{
+ struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->mast.base;
+ u32 *push;
+ if ((push = evo_wait(core, 4))) {
+ if (core->base.user.oclass < G82_DISP_CORE_CHANNEL_DMA) {
+ evo_mthd(push, 0x0840 + (head->base.index * 0x400), 1);
evo_data(push, 0x40000000);
} else
- if (nv50_vers(mast) < GF110_DISP_CORE_CHANNEL_DMA) {
- evo_mthd(push, 0x0874 + (nv_crtc->index * 0x400), 1);
- evo_data(push, 0x00000000);
- evo_mthd(push, 0x0840 + (nv_crtc->index * 0x400), 1);
+ if (core->base.user.oclass < GF110_DISP_CORE_CHANNEL_DMA) {
+ evo_mthd(push, 0x0840 + (head->base.index * 0x400), 1);
evo_data(push, 0x40000000);
- evo_mthd(push, 0x085c + (nv_crtc->index * 0x400), 1);
+ evo_mthd(push, 0x085c + (head->base.index * 0x400), 1);
evo_data(push, 0x00000000);
} else {
- evo_mthd(push, 0x0474 + (nv_crtc->index * 0x300), 1);
- evo_data(push, 0x00000000);
- evo_mthd(push, 0x0440 + (nv_crtc->index * 0x300), 1);
+ evo_mthd(push, 0x0440 + (head->base.index * 0x300), 1);
evo_data(push, 0x03000000);
- evo_mthd(push, 0x045c + (nv_crtc->index * 0x300), 1);
+ evo_mthd(push, 0x045c + (head->base.index * 0x300), 1);
evo_data(push, 0x00000000);
}
-
- evo_kick(push, mast);
+ evo_kick(push, core);
}
-
- nv50_crtc_cursor_show_hide(nv_crtc, false, false);
}
static void
-nv50_crtc_commit(struct drm_crtc *crtc)
+nv50_head_lut_set(struct nv50_head *head, struct nv50_head_atom *asyh)
{
- struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
- struct nv50_mast *mast = nv50_mast(crtc->dev);
+ struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->mast.base;
u32 *push;
-
- push = evo_wait(mast, 32);
- if (push) {
- if (nv50_vers(mast) < G82_DISP_CORE_CHANNEL_DMA) {
- evo_mthd(push, 0x0874 + (nv_crtc->index * 0x400), 1);
- evo_data(push, nv_crtc->fb.handle);
- evo_mthd(push, 0x0840 + (nv_crtc->index * 0x400), 2);
+ if ((push = evo_wait(core, 7))) {
+ if (core->base.user.oclass < G82_DISP_CORE_CHANNEL_DMA) {
+ evo_mthd(push, 0x0840 + (head->base.index * 0x400), 2);
evo_data(push, 0xc0000000);
- evo_data(push, nv_crtc->lut.nvbo->bo.offset >> 8);
+ evo_data(push, asyh->lut.offset >> 8);
} else
- if (nv50_vers(mast) < GF110_DISP_CORE_CHANNEL_DMA) {
- evo_mthd(push, 0x0874 + (nv_crtc->index * 0x400), 1);
- evo_data(push, nv_crtc->fb.handle);
- evo_mthd(push, 0x0840 + (nv_crtc->index * 0x400), 2);
+ if (core->base.user.oclass < GF110_DISP_CORE_CHANNEL_DMA) {
+ evo_mthd(push, 0x0840 + (head->base.index * 0x400), 2);
evo_data(push, 0xc0000000);
- evo_data(push, nv_crtc->lut.nvbo->bo.offset >> 8);
- evo_mthd(push, 0x085c + (nv_crtc->index * 0x400), 1);
- evo_data(push, mast->base.vram.handle);
+ evo_data(push, asyh->lut.offset >> 8);
+ evo_mthd(push, 0x085c + (head->base.index * 0x400), 1);
+ evo_data(push, asyh->lut.handle);
} else {
- evo_mthd(push, 0x0474 + (nv_crtc->index * 0x300), 1);
- evo_data(push, nv_crtc->fb.handle);
- evo_mthd(push, 0x0440 + (nv_crtc->index * 0x300), 4);
+ evo_mthd(push, 0x0440 + (head->base.index * 0x300), 4);
evo_data(push, 0x83000000);
- evo_data(push, nv_crtc->lut.nvbo->bo.offset >> 8);
+ evo_data(push, asyh->lut.offset >> 8);
evo_data(push, 0x00000000);
evo_data(push, 0x00000000);
- evo_mthd(push, 0x045c + (nv_crtc->index * 0x300), 1);
- evo_data(push, mast->base.vram.handle);
- evo_mthd(push, 0x0430 + (nv_crtc->index * 0x300), 1);
+ evo_mthd(push, 0x045c + (head->base.index * 0x300), 1);
+ evo_data(push, asyh->lut.handle);
+ }
+ evo_kick(push, core);
+ }
+}
+
+static void
+nv50_head_mode(struct nv50_head *head, struct nv50_head_atom *asyh)
+{
+ struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->mast.base;
+ struct nv50_head_mode *m = &asyh->mode;
+ u32 *push;
+ if ((push = evo_wait(core, 14))) {
+ if (core->base.user.oclass < GF110_DISP_CORE_CHANNEL_DMA) {
+ evo_mthd(push, 0x0804 + (head->base.index * 0x400), 2);
+ evo_data(push, 0x00800000 | m->clock);
+ evo_data(push, m->interlace ? 0x00000002 : 0x00000000);
+ evo_mthd(push, 0x0810 + (head->base.index * 0x400), 7);
+ evo_data(push, 0x00000000);
+ evo_data(push, (m->v.active << 16) | m->h.active );
+ evo_data(push, (m->v.synce << 16) | m->h.synce );
+ evo_data(push, (m->v.blanke << 16) | m->h.blanke );
+ evo_data(push, (m->v.blanks << 16) | m->h.blanks );
+ evo_data(push, (m->v.blank2e << 16) | m->v.blank2s);
+ evo_data(push, asyh->mode.v.blankus);
+ evo_mthd(push, 0x082c + (head->base.index * 0x400), 1);
+ evo_data(push, 0x00000000);
+ } else {
+ evo_mthd(push, 0x0410 + (head->base.index * 0x300), 6);
+ evo_data(push, 0x00000000);
+ evo_data(push, (m->v.active << 16) | m->h.active );
+ evo_data(push, (m->v.synce << 16) | m->h.synce );
+ evo_data(push, (m->v.blanke << 16) | m->h.blanke );
+ evo_data(push, (m->v.blanks << 16) | m->h.blanks );
+ evo_data(push, (m->v.blank2e << 16) | m->v.blank2s);
+ evo_mthd(push, 0x042c + (head->base.index * 0x300), 2);
+ evo_data(push, 0x00000000); /* ??? */
evo_data(push, 0xffffff00);
+ evo_mthd(push, 0x0450 + (head->base.index * 0x300), 3);
+ evo_data(push, m->clock * 1000);
+ evo_data(push, 0x00200000); /* ??? */
+ evo_data(push, m->clock * 1000);
}
+ evo_kick(push, core);
+ }
+}
- evo_kick(push, mast);
+static void
+nv50_head_view(struct nv50_head *head, struct nv50_head_atom *asyh)
+{
+ struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->mast.base;
+ u32 *push;
+ if ((push = evo_wait(core, 10))) {
+ if (core->base.user.oclass < GF110_DISP_CORE_CHANNEL_DMA) {
+ evo_mthd(push, 0x08a4 + (head->base.index * 0x400), 1);
+ evo_data(push, 0x00000000);
+ evo_mthd(push, 0x08c8 + (head->base.index * 0x400), 1);
+ evo_data(push, (asyh->view.iH << 16) | asyh->view.iW);
+ evo_mthd(push, 0x08d8 + (head->base.index * 0x400), 2);
+ evo_data(push, (asyh->view.oH << 16) | asyh->view.oW);
+ evo_data(push, (asyh->view.oH << 16) | asyh->view.oW);
+ } else {
+ evo_mthd(push, 0x0494 + (head->base.index * 0x300), 1);
+ evo_data(push, 0x00000000);
+ evo_mthd(push, 0x04b8 + (head->base.index * 0x300), 1);
+ evo_data(push, (asyh->view.iH << 16) | asyh->view.iW);
+ evo_mthd(push, 0x04c0 + (head->base.index * 0x300), 3);
+ evo_data(push, (asyh->view.oH << 16) | asyh->view.oW);
+ evo_data(push, (asyh->view.oH << 16) | asyh->view.oW);
+ evo_data(push, (asyh->view.oH << 16) | asyh->view.oW);
+ }
+ evo_kick(push, core);
}
+}
- nv50_crtc_cursor_show_hide(nv_crtc, true, true);
- nv50_display_flip_next(crtc, crtc->primary->fb, NULL, 1);
+static void
+nv50_head_flush_clr(struct nv50_head *head, struct nv50_head_atom *asyh, bool y)
+{
+ if (asyh->clr.core && (!asyh->set.core || y))
+ nv50_head_lut_clr(head);
+ if (asyh->clr.core && (!asyh->set.core || y))
+ nv50_head_core_clr(head);
+ if (asyh->clr.curs && (!asyh->set.curs || y))
+ nv50_head_curs_clr(head);
}
-static bool
-nv50_crtc_mode_fixup(struct drm_crtc *crtc, const struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
+static void
+nv50_head_flush_set(struct nv50_head *head, struct nv50_head_atom *asyh)
{
- drm_mode_set_crtcinfo(adjusted_mode, CRTC_INTERLACE_HALVE_V);
- return true;
+ if (asyh->set.view ) nv50_head_view (head, asyh);
+ if (asyh->set.mode ) nv50_head_mode (head, asyh);
+ if (asyh->set.core ) nv50_head_lut_set (head, asyh);
+ if (asyh->set.core ) nv50_head_core_set(head, asyh);
+ if (asyh->set.curs ) nv50_head_curs_set(head, asyh);
+ if (asyh->set.base ) nv50_head_base (head, asyh);
+ if (asyh->set.ovly ) nv50_head_ovly (head, asyh);
+ if (asyh->set.dither ) nv50_head_dither (head, asyh);
+ if (asyh->set.procamp) nv50_head_procamp (head, asyh);
}
-static int
-nv50_crtc_swap_fbs(struct drm_crtc *crtc, struct drm_framebuffer *old_fb)
+static void
+nv50_head_atomic_check_procamp(struct nv50_head_atom *armh,
+ struct nv50_head_atom *asyh,
+ struct nouveau_conn_atom *asyc)
{
- struct nouveau_framebuffer *nvfb = nouveau_framebuffer(crtc->primary->fb);
- struct nv50_head *head = nv50_head(crtc);
- int ret;
+ const int vib = asyc->procamp.color_vibrance - 100;
+ const int hue = asyc->procamp.vibrant_hue - 90;
+ const int adj = (vib > 0) ? 50 : 0;
+ asyh->procamp.sat.cos = ((vib * 2047 + adj) / 100) & 0xfff;
+ asyh->procamp.sat.sin = ((hue * 2047) / 100) & 0xfff;
+ asyh->set.procamp = true;
+}
- ret = nouveau_bo_pin(nvfb->nvbo, TTM_PL_FLAG_VRAM, true);
- if (ret == 0) {
- if (head->image)
- nouveau_bo_unpin(head->image);
- nouveau_bo_ref(nvfb->nvbo, &head->image);
+static void
+nv50_head_atomic_check_dither(struct nv50_head_atom *armh,
+ struct nv50_head_atom *asyh,
+ struct nouveau_conn_atom *asyc)
+{
+ struct drm_connector *connector = asyc->state.connector;
+ u32 mode = 0x00;
+
+ if (asyc->dither.mode == DITHERING_MODE_AUTO) {
+ if (asyh->base.depth > connector->display_info.bpc * 3)
+ mode = DITHERING_MODE_DYNAMIC2X2;
+ } else {
+ mode = asyc->dither.mode;
}
- return ret;
+ if (asyc->dither.depth == DITHERING_DEPTH_AUTO) {
+ if (connector->display_info.bpc >= 8)
+ mode |= DITHERING_DEPTH_8BPC;
+ } else {
+ mode |= asyc->dither.depth;
+ }
+
+ asyh->dither.enable = mode;
+ asyh->dither.bits = mode >> 1;
+ asyh->dither.mode = mode >> 3;
+ asyh->set.dither = true;
}
-static int
-nv50_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *umode,
- struct drm_display_mode *mode, int x, int y,
- struct drm_framebuffer *old_fb)
+static void
+nv50_head_atomic_check_view(struct nv50_head_atom *armh,
+ struct nv50_head_atom *asyh,
+ struct nouveau_conn_atom *asyc)
{
- struct nv50_mast *mast = nv50_mast(crtc->dev);
- struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
- struct nouveau_connector *nv_connector;
- u32 ilace = (mode->flags & DRM_MODE_FLAG_INTERLACE) ? 2 : 1;
- u32 vscan = (mode->flags & DRM_MODE_FLAG_DBLSCAN) ? 2 : 1;
- u32 hactive, hsynce, hbackp, hfrontp, hblanke, hblanks;
- u32 vactive, vsynce, vbackp, vfrontp, vblanke, vblanks;
- u32 vblan2e = 0, vblan2s = 1, vblankus = 0;
- u32 *push;
- int ret;
-
- hactive = mode->htotal;
- hsynce = mode->hsync_end - mode->hsync_start - 1;
- hbackp = mode->htotal - mode->hsync_end;
- hblanke = hsynce + hbackp;
- hfrontp = mode->hsync_start - mode->hdisplay;
- hblanks = mode->htotal - hfrontp - 1;
-
- vactive = mode->vtotal * vscan / ilace;
- vsynce = ((mode->vsync_end - mode->vsync_start) * vscan / ilace) - 1;
- vbackp = (mode->vtotal - mode->vsync_end) * vscan / ilace;
- vblanke = vsynce + vbackp;
- vfrontp = (mode->vsync_start - mode->vdisplay) * vscan / ilace;
- vblanks = vactive - vfrontp - 1;
- /* XXX: Safe underestimate, even "0" works */
- vblankus = (vactive - mode->vdisplay - 2) * hactive;
- vblankus *= 1000;
- vblankus /= mode->clock;
+ struct drm_connector *connector = asyc->state.connector;
+ struct drm_display_mode *omode = &asyh->state.adjusted_mode;
+ struct drm_display_mode *umode = &asyh->state.mode;
+ int mode = asyc->scaler.mode;
+ struct edid *edid;
+
+ if (connector->edid_blob_ptr)
+ edid = (struct edid *)connector->edid_blob_ptr->data;
+ else
+ edid = NULL;
- if (mode->flags & DRM_MODE_FLAG_INTERLACE) {
- vblan2e = vactive + vsynce + vbackp;
- vblan2s = vblan2e + (mode->vdisplay * vscan / ilace);
- vactive = (vactive * 2) + 1;
+ if (!asyc->scaler.full) {
+ if (mode == DRM_MODE_SCALE_NONE)
+ omode = umode;
+ } else {
+ /* Non-EDID LVDS/eDP mode. */
+ mode = DRM_MODE_SCALE_FULLSCREEN;
}
- ret = nv50_crtc_swap_fbs(crtc, old_fb);
- if (ret)
- return ret;
+ asyh->view.iW = umode->hdisplay;
+ asyh->view.iH = umode->vdisplay;
+ asyh->view.oW = omode->hdisplay;
+ asyh->view.oH = omode->vdisplay;
+ if (omode->flags & DRM_MODE_FLAG_DBLSCAN)
+ asyh->view.oH *= 2;
- push = evo_wait(mast, 64);
- if (push) {
- if (nv50_vers(mast) < GF110_DISP_CORE_CHANNEL_DMA) {
- evo_mthd(push, 0x0804 + (nv_crtc->index * 0x400), 2);
- evo_data(push, 0x00800000 | mode->clock);
- evo_data(push, (ilace == 2) ? 2 : 0);
- evo_mthd(push, 0x0810 + (nv_crtc->index * 0x400), 6);
- evo_data(push, 0x00000000);
- evo_data(push, (vactive << 16) | hactive);
- evo_data(push, ( vsynce << 16) | hsynce);
- evo_data(push, (vblanke << 16) | hblanke);
- evo_data(push, (vblanks << 16) | hblanks);
- evo_data(push, (vblan2e << 16) | vblan2s);
- evo_mthd(push, 0x082c + (nv_crtc->index * 0x400), 1);
- evo_data(push, 0x00000000);
- evo_mthd(push, 0x0900 + (nv_crtc->index * 0x400), 2);
- evo_data(push, 0x00000311);
- evo_data(push, 0x00000100);
+ /* Add overscan compensation if necessary, will keep the aspect
+ * ratio the same as the backend mode unless overridden by the
+ * user setting both hborder and vborder properties.
+ */
+ if ((asyc->scaler.underscan.mode == UNDERSCAN_ON ||
+ (asyc->scaler.underscan.mode == UNDERSCAN_AUTO &&
+ drm_detect_hdmi_monitor(edid)))) {
+ u32 bX = asyc->scaler.underscan.hborder;
+ u32 bY = asyc->scaler.underscan.vborder;
+ u32 r = (asyh->view.oH << 19) / asyh->view.oW;
+
+ if (bX) {
+ asyh->view.oW -= (bX * 2);
+ if (bY) asyh->view.oH -= (bY * 2);
+ else asyh->view.oH = ((asyh->view.oW * r) + (r / 2)) >> 19;
} else {
- evo_mthd(push, 0x0410 + (nv_crtc->index * 0x300), 6);
- evo_data(push, 0x00000000);
- evo_data(push, (vactive << 16) | hactive);
- evo_data(push, ( vsynce << 16) | hsynce);
- evo_data(push, (vblanke << 16) | hblanke);
- evo_data(push, (vblanks << 16) | hblanks);
- evo_data(push, (vblan2e << 16) | vblan2s);
- evo_mthd(push, 0x042c + (nv_crtc->index * 0x300), 1);
- evo_data(push, 0x00000000); /* ??? */
- evo_mthd(push, 0x0450 + (nv_crtc->index * 0x300), 3);
- evo_data(push, mode->clock * 1000);
- evo_data(push, 0x00200000); /* ??? */
- evo_data(push, mode->clock * 1000);
- evo_mthd(push, 0x04d0 + (nv_crtc->index * 0x300), 2);
- evo_data(push, 0x00000311);
- evo_data(push, 0x00000100);
+ asyh->view.oW -= (asyh->view.oW >> 4) + 32;
+ if (bY) asyh->view.oH -= (bY * 2);
+ else asyh->view.oH = ((asyh->view.oW * r) + (r / 2)) >> 19;
}
+ }
- evo_kick(push, mast);
+ /* Handle CENTER/ASPECT scaling, taking into account the areas
+ * removed already for overscan compensation.
+ */
+ switch (mode) {
+ case DRM_MODE_SCALE_CENTER:
+ asyh->view.oW = min((u16)umode->hdisplay, asyh->view.oW);
+ asyh->view.oH = min((u16)umode->vdisplay, asyh->view.oH);
+ /* fall-through */
+ case DRM_MODE_SCALE_ASPECT:
+ if (asyh->view.oH < asyh->view.oW) {
+ u32 r = (asyh->view.iW << 19) / asyh->view.iH;
+ asyh->view.oW = ((asyh->view.oH * r) + (r / 2)) >> 19;
+ } else {
+ u32 r = (asyh->view.iH << 19) / asyh->view.iW;
+ asyh->view.oH = ((asyh->view.oW * r) + (r / 2)) >> 19;
+ }
+ break;
+ default:
+ break;
}
- nv_connector = nouveau_crtc_connector_get(nv_crtc);
- nv50_crtc_set_dither(nv_crtc, false);
- nv50_crtc_set_scale(nv_crtc, false);
+ asyh->set.view = true;
+}
+
+static void
+nv50_head_atomic_check_mode(struct nv50_head *head, struct nv50_head_atom *asyh)
+{
+ struct drm_display_mode *mode = &asyh->state.adjusted_mode;
+ u32 ilace = (mode->flags & DRM_MODE_FLAG_INTERLACE) ? 2 : 1;
+ u32 vscan = (mode->flags & DRM_MODE_FLAG_DBLSCAN) ? 2 : 1;
+ u32 hbackp = mode->htotal - mode->hsync_end;
+ u32 vbackp = (mode->vtotal - mode->vsync_end) * vscan / ilace;
+ u32 hfrontp = mode->hsync_start - mode->hdisplay;
+ u32 vfrontp = (mode->vsync_start - mode->vdisplay) * vscan / ilace;
+ struct nv50_head_mode *m = &asyh->mode;
+
+ m->h.active = mode->htotal;
+ m->h.synce = mode->hsync_end - mode->hsync_start - 1;
+ m->h.blanke = m->h.synce + hbackp;
+ m->h.blanks = mode->htotal - hfrontp - 1;
+
+ m->v.active = mode->vtotal * vscan / ilace;
+ m->v.synce = ((mode->vsync_end - mode->vsync_start) * vscan / ilace) - 1;
+ m->v.blanke = m->v.synce + vbackp;
+ m->v.blanks = m->v.active - vfrontp - 1;
+
+ /*XXX: Safe underestimate, even "0" works */
+ m->v.blankus = (m->v.active - mode->vdisplay - 2) * m->h.active;
+ m->v.blankus *= 1000;
+ m->v.blankus /= mode->clock;
- /* G94 only accepts this after setting scale */
- if (nv50_vers(mast) < GF110_DISP_CORE_CHANNEL_DMA)
- nv50_crtc_set_raster_vblank_dmi(nv_crtc, vblankus);
+ if (mode->flags & DRM_MODE_FLAG_INTERLACE) {
+ m->v.blank2e = m->v.active + m->v.synce + vbackp;
+ m->v.blank2s = m->v.blank2e + (mode->vdisplay * vscan / ilace);
+ m->v.active = (m->v.active * 2) + 1;
+ m->interlace = true;
+ } else {
+ m->v.blank2e = 0;
+ m->v.blank2s = 1;
+ m->interlace = false;
+ }
+ m->clock = mode->clock;
- nv50_crtc_set_color_vibrance(nv_crtc, false);
- nv50_crtc_set_image(nv_crtc, crtc->primary->fb, x, y, false);
- return 0;
+ drm_mode_set_crtcinfo(mode, CRTC_INTERLACE_HALVE_V);
+ asyh->set.mode = true;
}
static int
-nv50_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
- struct drm_framebuffer *old_fb)
+nv50_head_atomic_check(struct drm_crtc *crtc, struct drm_crtc_state *state)
{
struct nouveau_drm *drm = nouveau_drm(crtc->dev);
- struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
- int ret;
+ struct nv50_disp *disp = nv50_disp(crtc->dev);
+ struct nv50_head *head = nv50_head(crtc);
+ struct nv50_head_atom *armh = nv50_head_atom(crtc->state);
+ struct nv50_head_atom *asyh = nv50_head_atom(state);
+ struct nouveau_conn_atom *asyc = NULL;
+ struct drm_connector_state *conns;
+ struct drm_connector *conn;
+ int i;
- if (!crtc->primary->fb) {
- NV_DEBUG(drm, "No FB bound\n");
- return 0;
+ NV_ATOMIC(drm, "%s atomic_check %d\n", crtc->name, asyh->state.active);
+ if (asyh->state.active) {
+ for_each_connector_in_state(asyh->state.state, conn, conns, i) {
+ if (conns->crtc == crtc) {
+ asyc = nouveau_conn_atom(conns);
+ break;
+ }
+ }
+
+ if (armh->state.active) {
+ if (asyc) {
+ if (asyh->state.mode_changed)
+ asyc->set.scaler = true;
+ if (armh->base.depth != asyh->base.depth)
+ asyc->set.dither = true;
+ }
+ } else {
+ asyc->set.mask = ~0;
+ asyh->set.mask = ~0;
+ }
+
+ if (asyh->state.mode_changed)
+ nv50_head_atomic_check_mode(head, asyh);
+
+ if (asyc) {
+ if (asyc->set.scaler)
+ nv50_head_atomic_check_view(armh, asyh, asyc);
+ if (asyc->set.dither)
+ nv50_head_atomic_check_dither(armh, asyh, asyc);
+ if (asyc->set.procamp)
+ nv50_head_atomic_check_procamp(armh, asyh, asyc);
+ }
+
+ if ((asyh->core.visible = (asyh->base.cpp != 0))) {
+ asyh->core.x = asyh->base.x;
+ asyh->core.y = asyh->base.y;
+ asyh->core.w = asyh->base.w;
+ asyh->core.h = asyh->base.h;
+ } else
+ if ((asyh->core.visible = asyh->curs.visible)) {
+ /*XXX: We need to either find some way of having the
+ * primary base layer appear black, while still
+ * being able to display the other layers, or we
+ * need to allocate a dummy black surface here.
+ */
+ asyh->core.x = 0;
+ asyh->core.y = 0;
+ asyh->core.w = asyh->state.mode.hdisplay;
+ asyh->core.h = asyh->state.mode.vdisplay;
+ }
+ asyh->core.handle = disp->mast.base.vram.handle;
+ asyh->core.offset = 0;
+ asyh->core.format = 0xcf;
+ asyh->core.kind = 0;
+ asyh->core.layout = 1;
+ asyh->core.block = 0;
+ asyh->core.pitch = ALIGN(asyh->core.w, 64) * 4;
+ asyh->lut.handle = disp->mast.base.vram.handle;
+ asyh->lut.offset = head->base.lut.nvbo->bo.offset;
+ asyh->set.base = armh->base.cpp != asyh->base.cpp;
+ asyh->set.ovly = armh->ovly.cpp != asyh->ovly.cpp;
+ } else {
+ asyh->core.visible = false;
+ asyh->curs.visible = false;
+ asyh->base.cpp = 0;
+ asyh->ovly.cpp = 0;
}
- ret = nv50_crtc_swap_fbs(crtc, old_fb);
- if (ret)
- return ret;
+ if (!drm_atomic_crtc_needs_modeset(&asyh->state)) {
+ if (asyh->core.visible) {
+ if (memcmp(&armh->core, &asyh->core, sizeof(asyh->core)))
+ asyh->set.core = true;
+ } else
+ if (armh->core.visible) {
+ asyh->clr.core = true;
+ }
- nv50_display_flip_stop(crtc);
- nv50_crtc_set_image(nv_crtc, crtc->primary->fb, x, y, true);
- nv50_display_flip_next(crtc, crtc->primary->fb, NULL, 1);
- return 0;
-}
+ if (asyh->curs.visible) {
+ if (memcmp(&armh->curs, &asyh->curs, sizeof(asyh->curs)))
+ asyh->set.curs = true;
+ } else
+ if (armh->curs.visible) {
+ asyh->clr.curs = true;
+ }
+ } else {
+ asyh->clr.core = armh->core.visible;
+ asyh->clr.curs = armh->curs.visible;
+ asyh->set.core = asyh->core.visible;
+ asyh->set.curs = asyh->curs.visible;
+ }
-static int
-nv50_crtc_mode_set_base_atomic(struct drm_crtc *crtc,
- struct drm_framebuffer *fb, int x, int y,
- enum mode_set_atomic state)
-{
- struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
- nv50_display_flip_stop(crtc);
- nv50_crtc_set_image(nv_crtc, fb, x, y, true);
+ if (asyh->clr.mask || asyh->set.mask)
+ nv50_atom(asyh->state.state)->lock_core = true;
return 0;
}
static void
-nv50_crtc_lut_load(struct drm_crtc *crtc)
+nv50_head_lut_load(struct drm_crtc *crtc)
{
struct nv50_disp *disp = nv50_disp(crtc->dev);
struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
@@ -1292,64 +2200,95 @@ nv50_crtc_lut_load(struct drm_crtc *crtc)
}
}
-static void
-nv50_crtc_disable(struct drm_crtc *crtc)
+static int
+nv50_head_mode_set_base_atomic(struct drm_crtc *crtc,
+ struct drm_framebuffer *fb, int x, int y,
+ enum mode_set_atomic state)
{
- struct nv50_head *head = nv50_head(crtc);
- evo_sync(crtc->dev);
- if (head->image)
- nouveau_bo_unpin(head->image);
- nouveau_bo_ref(NULL, &head->image);
+ WARN_ON(1);
+ return 0;
}
+static const struct drm_crtc_helper_funcs
+nv50_head_help = {
+ .mode_set_base_atomic = nv50_head_mode_set_base_atomic,
+ .load_lut = nv50_head_lut_load,
+ .atomic_check = nv50_head_atomic_check,
+};
+
+/* This is identical to the version in the atomic helpers, except that
+ * it supports non-vblanked ("async") page flips.
+ */
static int
-nv50_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv,
- uint32_t handle, uint32_t width, uint32_t height)
+nv50_head_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb,
+ struct drm_pending_vblank_event *event, u32 flags)
{
- struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
- struct drm_gem_object *gem = NULL;
- struct nouveau_bo *nvbo = NULL;
+ struct drm_plane *plane = crtc->primary;
+ struct drm_atomic_state *state;
+ struct drm_plane_state *plane_state;
+ struct drm_crtc_state *crtc_state;
int ret = 0;
- if (handle) {
- if (width != 64 || height != 64)
- return -EINVAL;
+ state = drm_atomic_state_alloc(plane->dev);
+ if (!state)
+ return -ENOMEM;
- gem = drm_gem_object_lookup(file_priv, handle);
- if (unlikely(!gem))
- return -ENOENT;
- nvbo = nouveau_gem_object(gem);
+ state->acquire_ctx = drm_modeset_legacy_acquire_ctx(crtc);
+retry:
+ crtc_state = drm_atomic_get_crtc_state(state, crtc);
+ if (IS_ERR(crtc_state)) {
+ ret = PTR_ERR(crtc_state);
+ goto fail;
+ }
+ crtc_state->event = event;
- ret = nouveau_bo_pin(nvbo, TTM_PL_FLAG_VRAM, true);
+ plane_state = drm_atomic_get_plane_state(state, plane);
+ if (IS_ERR(plane_state)) {
+ ret = PTR_ERR(plane_state);
+ goto fail;
}
- if (ret == 0) {
- if (nv_crtc->cursor.nvbo)
- nouveau_bo_unpin(nv_crtc->cursor.nvbo);
- nouveau_bo_ref(nvbo, &nv_crtc->cursor.nvbo);
+ ret = drm_atomic_set_crtc_for_plane(plane_state, crtc);
+ if (ret != 0)
+ goto fail;
+ drm_atomic_set_fb_for_plane(plane_state, fb);
+
+ /* Make sure we don't accidentally do a full modeset. */
+ state->allow_modeset = false;
+ if (!crtc_state->active) {
+ DRM_DEBUG_ATOMIC("[CRTC:%d] disabled, rejecting legacy flip\n",
+ crtc->base.id);
+ ret = -EINVAL;
+ goto fail;
}
- drm_gem_object_unreference_unlocked(gem);
- nv50_crtc_cursor_show_hide(nv_crtc, true, true);
+ if (flags & DRM_MODE_PAGE_FLIP_ASYNC)
+ nv50_wndw_atom(plane_state)->interval = 0;
+
+ ret = drm_atomic_nonblocking_commit(state);
+fail:
+ if (ret == -EDEADLK)
+ goto backoff;
+
+ drm_atomic_state_put(state);
return ret;
-}
-static int
-nv50_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
-{
- struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
- struct nv50_curs *curs = nv50_curs(crtc);
- struct nv50_chan *chan = nv50_chan(curs);
- nvif_wr32(&chan->user, 0x0084, (y << 16) | (x & 0xffff));
- nvif_wr32(&chan->user, 0x0080, 0x00000000);
+backoff:
+ drm_atomic_state_clear(state);
+ drm_atomic_legacy_backoff(state);
- nv_crtc->cursor_saved_x = x;
- nv_crtc->cursor_saved_y = y;
- return 0;
+ /*
+ * Someone might have exchanged the framebuffer while we dropped locks
+ * in the backoff code. We need to fix up the fb refcount tracking the
+ * core does for us.
+ */
+ plane->old_fb = plane->fb;
+
+ goto retry;
}
static int
-nv50_crtc_gamma_set(struct drm_crtc *crtc, u16 *r, u16 *g, u16 *b,
+nv50_head_gamma_set(struct drm_crtc *crtc, u16 *r, u16 *g, u16 *b,
uint32_t size)
{
struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
@@ -1361,47 +2300,71 @@ nv50_crtc_gamma_set(struct drm_crtc *crtc, u16 *r, u16 *g, u16 *b,
nv_crtc->lut.b[i] = b[i];
}
- nv50_crtc_lut_load(crtc);
-
+ nv50_head_lut_load(crtc);
return 0;
}
static void
-nv50_crtc_cursor_restore(struct nouveau_crtc *nv_crtc, int x, int y)
+nv50_head_atomic_destroy_state(struct drm_crtc *crtc,
+ struct drm_crtc_state *state)
{
- nv50_crtc_cursor_move(&nv_crtc->base, x, y);
+ struct nv50_head_atom *asyh = nv50_head_atom(state);
+ __drm_atomic_helper_crtc_destroy_state(&asyh->state);
+ kfree(asyh);
+}
- nv50_crtc_cursor_show_hide(nv_crtc, true, true);
+static struct drm_crtc_state *
+nv50_head_atomic_duplicate_state(struct drm_crtc *crtc)
+{
+ struct nv50_head_atom *armh = nv50_head_atom(crtc->state);
+ struct nv50_head_atom *asyh;
+ if (!(asyh = kmalloc(sizeof(*asyh), GFP_KERNEL)))
+ return NULL;
+ __drm_atomic_helper_crtc_duplicate_state(crtc, &asyh->state);
+ asyh->view = armh->view;
+ asyh->mode = armh->mode;
+ asyh->lut = armh->lut;
+ asyh->core = armh->core;
+ asyh->curs = armh->curs;
+ asyh->base = armh->base;
+ asyh->ovly = armh->ovly;
+ asyh->dither = armh->dither;
+ asyh->procamp = armh->procamp;
+ asyh->clr.mask = 0;
+ asyh->set.mask = 0;
+ return &asyh->state;
+}
+
+static void
+__drm_atomic_helper_crtc_reset(struct drm_crtc *crtc,
+ struct drm_crtc_state *state)
+{
+ if (crtc->state)
+ crtc->funcs->atomic_destroy_state(crtc, crtc->state);
+ crtc->state = state;
+ crtc->state->crtc = crtc;
}
static void
-nv50_crtc_destroy(struct drm_crtc *crtc)
+nv50_head_reset(struct drm_crtc *crtc)
+{
+ struct nv50_head_atom *asyh;
+
+ if (WARN_ON(!(asyh = kzalloc(sizeof(*asyh), GFP_KERNEL))))
+ return;
+
+ __drm_atomic_helper_crtc_reset(crtc, &asyh->state);
+}
+
+static void
+nv50_head_destroy(struct drm_crtc *crtc)
{
struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
struct nv50_disp *disp = nv50_disp(crtc->dev);
struct nv50_head *head = nv50_head(crtc);
- struct nv50_fbdma *fbdma;
-
- list_for_each_entry(fbdma, &disp->fbdma, head) {
- nvif_object_fini(&fbdma->base[nv_crtc->index]);
- }
nv50_dmac_destroy(&head->ovly.base, disp->disp);
nv50_pioc_destroy(&head->oimm.base);
- nv50_dmac_destroy(&head->sync.base, disp->disp);
- nv50_pioc_destroy(&head->curs.base);
-
- /*XXX: this shouldn't be necessary, but the core doesn't call
- * disconnect() during the cleanup paths
- */
- if (head->image)
- nouveau_bo_unpin(head->image);
- nouveau_bo_ref(NULL, &head->image);
-
- /*XXX: ditto */
- if (nv_crtc->cursor.nvbo)
- nouveau_bo_unpin(nv_crtc->cursor.nvbo);
- nouveau_bo_ref(NULL, &nv_crtc->cursor.nvbo);
nouveau_bo_unmap(nv_crtc->lut.nvbo);
if (nv_crtc->lut.nvbo)
@@ -1412,34 +2375,27 @@ nv50_crtc_destroy(struct drm_crtc *crtc)
kfree(crtc);
}
-static const struct drm_crtc_helper_funcs nv50_crtc_hfunc = {
- .dpms = nv50_crtc_dpms,
- .prepare = nv50_crtc_prepare,
- .commit = nv50_crtc_commit,
- .mode_fixup = nv50_crtc_mode_fixup,
- .mode_set = nv50_crtc_mode_set,
- .mode_set_base = nv50_crtc_mode_set_base,
- .mode_set_base_atomic = nv50_crtc_mode_set_base_atomic,
- .load_lut = nv50_crtc_lut_load,
- .disable = nv50_crtc_disable,
-};
-
-static const struct drm_crtc_funcs nv50_crtc_func = {
- .cursor_set = nv50_crtc_cursor_set,
- .cursor_move = nv50_crtc_cursor_move,
- .gamma_set = nv50_crtc_gamma_set,
- .set_config = nouveau_crtc_set_config,
- .destroy = nv50_crtc_destroy,
- .page_flip = nouveau_crtc_page_flip,
+static const struct drm_crtc_funcs
+nv50_head_func = {
+ .reset = nv50_head_reset,
+ .gamma_set = nv50_head_gamma_set,
+ .destroy = nv50_head_destroy,
+ .set_config = drm_atomic_helper_set_config,
+ .page_flip = nv50_head_page_flip,
+ .set_property = drm_atomic_helper_crtc_set_property,
+ .atomic_duplicate_state = nv50_head_atomic_duplicate_state,
+ .atomic_destroy_state = nv50_head_atomic_destroy_state,
};
static int
-nv50_crtc_create(struct drm_device *dev, int index)
+nv50_head_create(struct drm_device *dev, int index)
{
struct nouveau_drm *drm = nouveau_drm(dev);
struct nvif_device *device = &drm->device;
struct nv50_disp *disp = nv50_disp(dev);
struct nv50_head *head;
+ struct nv50_base *base;
+ struct nv50_curs *curs;
struct drm_crtc *crtc;
int ret, i;
@@ -1448,21 +2404,25 @@ nv50_crtc_create(struct drm_device *dev, int index)
return -ENOMEM;
head->base.index = index;
- head->base.set_dither = nv50_crtc_set_dither;
- head->base.set_scale = nv50_crtc_set_scale;
- head->base.set_color_vibrance = nv50_crtc_set_color_vibrance;
- head->base.color_vibrance = 50;
- head->base.vibrant_hue = 0;
- head->base.cursor.set_pos = nv50_crtc_cursor_restore;
for (i = 0; i < 256; i++) {
head->base.lut.r[i] = i << 8;
head->base.lut.g[i] = i << 8;
head->base.lut.b[i] = i << 8;
}
+ ret = nv50_base_new(drm, head, &base);
+ if (ret == 0)
+ ret = nv50_curs_new(drm, head, &curs);
+ if (ret) {
+ kfree(head);
+ return ret;
+ }
+
crtc = &head->base.base;
- drm_crtc_init(dev, crtc, &nv50_crtc_func);
- drm_crtc_helper_add(crtc, &nv50_crtc_hfunc);
+ drm_crtc_init_with_planes(dev, crtc, &base->wndw.plane,
+ &curs->wndw.plane, &nv50_head_func,
+ "head-%d", head->base.index);
+ drm_crtc_helper_add(crtc, &nv50_head_help);
drm_mode_crtc_set_gamma_size(crtc, 256);
ret = nouveau_bo_new(dev, 8192, 0x100, TTM_PL_FLAG_VRAM,
@@ -1481,20 +2441,6 @@ nv50_crtc_create(struct drm_device *dev, int index)
if (ret)
goto out;
- /* allocate cursor resources */
- ret = nv50_curs_create(device, disp->disp, index, &head->curs);
- if (ret)
- goto out;
-
- /* allocate page flip / sync resources */
- ret = nv50_base_create(device, disp->disp, index, disp->sync->bo.offset,
- &head->sync);
- if (ret)
- goto out;
-
- head->sync.addr = EVO_FLIP_SEM0(index);
- head->sync.data = 0x00000000;
-
/* allocate overlay resources */
ret = nv50_oimm_create(device, disp->disp, index, &head->oimm);
if (ret)
@@ -1507,43 +2453,64 @@ nv50_crtc_create(struct drm_device *dev, int index)
out:
if (ret)
- nv50_crtc_destroy(crtc);
+ nv50_head_destroy(crtc);
return ret;
}
/******************************************************************************
- * Encoder helpers
+ * Output path helpers
*****************************************************************************/
-static bool
-nv50_encoder_mode_fixup(struct drm_encoder *encoder,
- const struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
+static int
+nv50_outp_atomic_check_view(struct drm_encoder *encoder,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state,
+ struct drm_display_mode *native_mode)
{
- struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
- struct nouveau_connector *nv_connector;
+ struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode;
+ struct drm_display_mode *mode = &crtc_state->mode;
+ struct drm_connector *connector = conn_state->connector;
+ struct nouveau_conn_atom *asyc = nouveau_conn_atom(conn_state);
+ struct nouveau_drm *drm = nouveau_drm(encoder->dev);
+
+ NV_ATOMIC(drm, "%s atomic_check\n", encoder->name);
+ asyc->scaler.full = false;
+ if (!native_mode)
+ return 0;
- nv_connector = nouveau_encoder_connector_get(nv_encoder);
- if (nv_connector && nv_connector->native_mode) {
- nv_connector->scaling_full = false;
- if (nv_connector->scaling_mode == DRM_MODE_SCALE_NONE) {
- switch (nv_connector->type) {
- case DCB_CONNECTOR_LVDS:
- case DCB_CONNECTOR_LVDS_SPWG:
- case DCB_CONNECTOR_eDP:
- /* force use of scaler for non-edid modes */
- if (adjusted_mode->type & DRM_MODE_TYPE_DRIVER)
- return true;
- nv_connector->scaling_full = true;
+ if (asyc->scaler.mode == DRM_MODE_SCALE_NONE) {
+ switch (connector->connector_type) {
+ case DRM_MODE_CONNECTOR_LVDS:
+ case DRM_MODE_CONNECTOR_eDP:
+ /* Force use of scaler for non-EDID modes. */
+ if (adjusted_mode->type & DRM_MODE_TYPE_DRIVER)
break;
- default:
- return true;
- }
+ mode = native_mode;
+ asyc->scaler.full = true;
+ break;
+ default:
+ break;
}
+ } else {
+ mode = native_mode;
+ }
- drm_mode_copy(adjusted_mode, nv_connector->native_mode);
+ if (!drm_mode_equal(adjusted_mode, mode)) {
+ drm_mode_copy(adjusted_mode, mode);
+ crtc_state->mode_changed = true;
}
- return true;
+ return 0;
+}
+
+static int
+nv50_outp_atomic_check(struct drm_encoder *encoder,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state)
+{
+ struct nouveau_connector *nv_connector =
+ nouveau_connector(conn_state->connector);
+ return nv50_outp_atomic_check_view(encoder, crtc_state, conn_state,
+ nv_connector->native_mode);
}
/******************************************************************************
@@ -1574,21 +2541,39 @@ nv50_dac_dpms(struct drm_encoder *encoder, int mode)
}
static void
-nv50_dac_commit(struct drm_encoder *encoder)
+nv50_dac_disable(struct drm_encoder *encoder)
{
+ struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+ struct nv50_mast *mast = nv50_mast(encoder->dev);
+ const int or = nv_encoder->or;
+ u32 *push;
+
+ if (nv_encoder->crtc) {
+ push = evo_wait(mast, 4);
+ if (push) {
+ if (nv50_vers(mast) < GF110_DISP_CORE_CHANNEL_DMA) {
+ evo_mthd(push, 0x0400 + (or * 0x080), 1);
+ evo_data(push, 0x00000000);
+ } else {
+ evo_mthd(push, 0x0180 + (or * 0x020), 1);
+ evo_data(push, 0x00000000);
+ }
+ evo_kick(push, mast);
+ }
+ }
+
+ nv_encoder->crtc = NULL;
}
static void
-nv50_dac_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
+nv50_dac_enable(struct drm_encoder *encoder)
{
struct nv50_mast *mast = nv50_mast(encoder->dev);
struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc);
+ struct drm_display_mode *mode = &nv_crtc->base.state->adjusted_mode;
u32 *push;
- nv50_dac_dpms(encoder, DRM_MODE_DPMS_ON);
-
push = evo_wait(mast, 8);
if (push) {
if (nv50_vers(mast) < GF110_DISP_CORE_CHANNEL_DMA) {
@@ -1627,33 +2612,6 @@ nv50_dac_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
nv_encoder->crtc = encoder->crtc;
}
-static void
-nv50_dac_disconnect(struct drm_encoder *encoder)
-{
- struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
- struct nv50_mast *mast = nv50_mast(encoder->dev);
- const int or = nv_encoder->or;
- u32 *push;
-
- if (nv_encoder->crtc) {
- nv50_crtc_prepare(nv_encoder->crtc);
-
- push = evo_wait(mast, 4);
- if (push) {
- if (nv50_vers(mast) < GF110_DISP_CORE_CHANNEL_DMA) {
- evo_mthd(push, 0x0400 + (or * 0x080), 1);
- evo_data(push, 0x00000000);
- } else {
- evo_mthd(push, 0x0180 + (or * 0x020), 1);
- evo_data(push, 0x00000000);
- }
- evo_kick(push, mast);
- }
- }
-
- nv_encoder->crtc = NULL;
-}
-
static enum drm_connector_status
nv50_dac_detect(struct drm_encoder *encoder, struct drm_connector *connector)
{
@@ -1681,6 +2639,15 @@ nv50_dac_detect(struct drm_encoder *encoder, struct drm_connector *connector)
return connector_status_connected;
}
+static const struct drm_encoder_helper_funcs
+nv50_dac_help = {
+ .dpms = nv50_dac_dpms,
+ .atomic_check = nv50_outp_atomic_check,
+ .enable = nv50_dac_enable,
+ .disable = nv50_dac_disable,
+ .detect = nv50_dac_detect
+};
+
static void
nv50_dac_destroy(struct drm_encoder *encoder)
{
@@ -1688,18 +2655,8 @@ nv50_dac_destroy(struct drm_encoder *encoder)
kfree(encoder);
}
-static const struct drm_encoder_helper_funcs nv50_dac_hfunc = {
- .dpms = nv50_dac_dpms,
- .mode_fixup = nv50_encoder_mode_fixup,
- .prepare = nv50_dac_disconnect,
- .commit = nv50_dac_commit,
- .mode_set = nv50_dac_mode_set,
- .disable = nv50_dac_disconnect,
- .get_crtc = nv50_display_crtc_get,
- .detect = nv50_dac_detect
-};
-
-static const struct drm_encoder_funcs nv50_dac_func = {
+static const struct drm_encoder_funcs
+nv50_dac_func = {
.destroy = nv50_dac_destroy,
};
@@ -1726,8 +2683,9 @@ nv50_dac_create(struct drm_connector *connector, struct dcb_output *dcbe)
encoder = to_drm_encoder(nv_encoder);
encoder->possible_crtcs = dcbe->heads;
encoder->possible_clones = 0;
- drm_encoder_init(connector->dev, encoder, &nv50_dac_func, type, NULL);
- drm_encoder_helper_add(encoder, &nv50_dac_hfunc);
+ drm_encoder_init(connector->dev, encoder, &nv50_dac_func, type,
+ "dac-%04x-%04x", dcbe->hasht, dcbe->hashm);
+ drm_encoder_helper_add(encoder, &nv50_dac_help);
drm_mode_connector_attach_encoder(connector, encoder);
return 0;
@@ -1737,7 +2695,26 @@ nv50_dac_create(struct drm_connector *connector, struct dcb_output *dcbe)
* Audio
*****************************************************************************/
static void
-nv50_audio_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode)
+nv50_audio_disable(struct drm_encoder *encoder, struct nouveau_crtc *nv_crtc)
+{
+ struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+ struct nv50_disp *disp = nv50_disp(encoder->dev);
+ struct {
+ struct nv50_disp_mthd_v1 base;
+ struct nv50_disp_sor_hda_eld_v0 eld;
+ } args = {
+ .base.version = 1,
+ .base.method = NV50_DISP_MTHD_V1_SOR_HDA_ELD,
+ .base.hasht = nv_encoder->dcb->hasht,
+ .base.hashm = (0xf0ff & nv_encoder->dcb->hashm) |
+ (0x0100 << nv_crtc->index),
+ };
+
+ nvif_mthd(disp->disp, 0, &args, sizeof(args));
+}
+
+static void
+nv50_audio_enable(struct drm_encoder *encoder, struct drm_display_mode *mode)
{
struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc);
@@ -1768,30 +2745,30 @@ nv50_audio_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode)
sizeof(args.base) + drm_eld_size(args.data));
}
+/******************************************************************************
+ * HDMI
+ *****************************************************************************/
static void
-nv50_audio_disconnect(struct drm_encoder *encoder, struct nouveau_crtc *nv_crtc)
+nv50_hdmi_disable(struct drm_encoder *encoder, struct nouveau_crtc *nv_crtc)
{
struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
struct nv50_disp *disp = nv50_disp(encoder->dev);
struct {
struct nv50_disp_mthd_v1 base;
- struct nv50_disp_sor_hda_eld_v0 eld;
+ struct nv50_disp_sor_hdmi_pwr_v0 pwr;
} args = {
.base.version = 1,
- .base.method = NV50_DISP_MTHD_V1_SOR_HDA_ELD,
- .base.hasht = nv_encoder->dcb->hasht,
- .base.hashm = (0xf0ff & nv_encoder->dcb->hashm) |
- (0x0100 << nv_crtc->index),
+ .base.method = NV50_DISP_MTHD_V1_SOR_HDMI_PWR,
+ .base.hasht = nv_encoder->dcb->hasht,
+ .base.hashm = (0xf0ff & nv_encoder->dcb->hashm) |
+ (0x0100 << nv_crtc->index),
};
nvif_mthd(disp->disp, 0, &args, sizeof(args));
}
-/******************************************************************************
- * HDMI
- *****************************************************************************/
static void
-nv50_hdmi_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode)
+nv50_hdmi_enable(struct drm_encoder *encoder, struct drm_display_mode *mode)
{
struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc);
@@ -1821,26 +2798,632 @@ nv50_hdmi_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode)
args.pwr.max_ac_packet = max_ac_packet / 32;
nvif_mthd(disp->disp, 0, &args, sizeof(args));
- nv50_audio_mode_set(encoder, mode);
+ nv50_audio_enable(encoder, mode);
+}
+
+/******************************************************************************
+ * MST
+ *****************************************************************************/
+#define nv50_mstm(p) container_of((p), struct nv50_mstm, mgr)
+#define nv50_mstc(p) container_of((p), struct nv50_mstc, connector)
+#define nv50_msto(p) container_of((p), struct nv50_msto, encoder)
+
+struct nv50_mstm {
+ struct nouveau_encoder *outp;
+
+ struct drm_dp_mst_topology_mgr mgr;
+ struct nv50_msto *msto[4];
+
+ bool modified;
+};
+
+struct nv50_mstc {
+ struct nv50_mstm *mstm;
+ struct drm_dp_mst_port *port;
+ struct drm_connector connector;
+
+ struct drm_display_mode *native;
+ struct edid *edid;
+
+ int pbn;
+};
+
+struct nv50_msto {
+ struct drm_encoder encoder;
+
+ struct nv50_head *head;
+ struct nv50_mstc *mstc;
+ bool disabled;
+};
+
+static struct drm_dp_payload *
+nv50_msto_payload(struct nv50_msto *msto)
+{
+ struct nouveau_drm *drm = nouveau_drm(msto->encoder.dev);
+ struct nv50_mstc *mstc = msto->mstc;
+ struct nv50_mstm *mstm = mstc->mstm;
+ int vcpi = mstc->port->vcpi.vcpi, i;
+
+ NV_ATOMIC(drm, "%s: vcpi %d\n", msto->encoder.name, vcpi);
+ for (i = 0; i < mstm->mgr.max_payloads; i++) {
+ struct drm_dp_payload *payload = &mstm->mgr.payloads[i];
+ NV_ATOMIC(drm, "%s: %d: vcpi %d start 0x%02x slots 0x%02x\n",
+ mstm->outp->base.base.name, i, payload->vcpi,
+ payload->start_slot, payload->num_slots);
+ }
+
+ for (i = 0; i < mstm->mgr.max_payloads; i++) {
+ struct drm_dp_payload *payload = &mstm->mgr.payloads[i];
+ if (payload->vcpi == vcpi)
+ return payload;
+ }
+
+ return NULL;
}
static void
-nv50_hdmi_disconnect(struct drm_encoder *encoder, struct nouveau_crtc *nv_crtc)
+nv50_msto_cleanup(struct nv50_msto *msto)
{
- struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
- struct nv50_disp *disp = nv50_disp(encoder->dev);
+ struct nouveau_drm *drm = nouveau_drm(msto->encoder.dev);
+ struct nv50_mstc *mstc = msto->mstc;
+ struct nv50_mstm *mstm = mstc->mstm;
+
+ NV_ATOMIC(drm, "%s: msto cleanup\n", msto->encoder.name);
+ if (mstc->port && mstc->port->vcpi.vcpi > 0 && !nv50_msto_payload(msto))
+ drm_dp_mst_deallocate_vcpi(&mstm->mgr, mstc->port);
+ if (msto->disabled) {
+ msto->mstc = NULL;
+ msto->head = NULL;
+ msto->disabled = false;
+ }
+}
+
+static void
+nv50_msto_prepare(struct nv50_msto *msto)
+{
+ struct nouveau_drm *drm = nouveau_drm(msto->encoder.dev);
+ struct nv50_mstc *mstc = msto->mstc;
+ struct nv50_mstm *mstm = mstc->mstm;
struct {
struct nv50_disp_mthd_v1 base;
- struct nv50_disp_sor_hdmi_pwr_v0 pwr;
+ struct nv50_disp_sor_dp_mst_vcpi_v0 vcpi;
} args = {
.base.version = 1,
- .base.method = NV50_DISP_MTHD_V1_SOR_HDMI_PWR,
- .base.hasht = nv_encoder->dcb->hasht,
- .base.hashm = (0xf0ff & nv_encoder->dcb->hashm) |
- (0x0100 << nv_crtc->index),
+ .base.method = NV50_DISP_MTHD_V1_SOR_DP_MST_VCPI,
+ .base.hasht = mstm->outp->dcb->hasht,
+ .base.hashm = (0xf0ff & mstm->outp->dcb->hashm) |
+ (0x0100 << msto->head->base.index),
};
- nvif_mthd(disp->disp, 0, &args, sizeof(args));
+ NV_ATOMIC(drm, "%s: msto prepare\n", msto->encoder.name);
+ if (mstc->port && mstc->port->vcpi.vcpi > 0) {
+ struct drm_dp_payload *payload = nv50_msto_payload(msto);
+ if (payload) {
+ args.vcpi.start_slot = payload->start_slot;
+ args.vcpi.num_slots = payload->num_slots;
+ args.vcpi.pbn = mstc->port->vcpi.pbn;
+ args.vcpi.aligned_pbn = mstc->port->vcpi.aligned_pbn;
+ }
+ }
+
+ NV_ATOMIC(drm, "%s: %s: %02x %02x %04x %04x\n",
+ msto->encoder.name, msto->head->base.base.name,
+ args.vcpi.start_slot, args.vcpi.num_slots,
+ args.vcpi.pbn, args.vcpi.aligned_pbn);
+ nvif_mthd(&drm->display->disp, 0, &args, sizeof(args));
+}
+
+static int
+nv50_msto_atomic_check(struct drm_encoder *encoder,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state)
+{
+ struct nv50_mstc *mstc = nv50_mstc(conn_state->connector);
+ struct nv50_mstm *mstm = mstc->mstm;
+ int bpp = conn_state->connector->display_info.bpc * 3;
+ int slots;
+
+ mstc->pbn = drm_dp_calc_pbn_mode(crtc_state->adjusted_mode.clock, bpp);
+
+ slots = drm_dp_find_vcpi_slots(&mstm->mgr, mstc->pbn);
+ if (slots < 0)
+ return slots;
+
+ return nv50_outp_atomic_check_view(encoder, crtc_state, conn_state,
+ mstc->native);
+}
+
+static void
+nv50_msto_enable(struct drm_encoder *encoder)
+{
+ struct nv50_head *head = nv50_head(encoder->crtc);
+ struct nv50_msto *msto = nv50_msto(encoder);
+ struct nv50_mstc *mstc = NULL;
+ struct nv50_mstm *mstm = NULL;
+ struct drm_connector *connector;
+ u8 proto, depth;
+ int slots;
+ bool r;
+
+ drm_for_each_connector(connector, encoder->dev) {
+ if (connector->state->best_encoder == &msto->encoder) {
+ mstc = nv50_mstc(connector);
+ mstm = mstc->mstm;
+ break;
+ }
+ }
+
+ if (WARN_ON(!mstc))
+ return;
+
+ r = drm_dp_mst_allocate_vcpi(&mstm->mgr, mstc->port, mstc->pbn, &slots);
+ WARN_ON(!r);
+
+ if (mstm->outp->dcb->sorconf.link & 1)
+ proto = 0x8;
+ else
+ proto = 0x9;
+
+ switch (mstc->connector.display_info.bpc) {
+ case 6: depth = 0x2; break;
+ case 8: depth = 0x5; break;
+ case 10:
+ default: depth = 0x6; break;
+ }
+
+ mstm->outp->update(mstm->outp, head->base.index,
+ &head->base.base.state->adjusted_mode, proto, depth);
+
+ msto->head = head;
+ msto->mstc = mstc;
+ mstm->modified = true;
+}
+
+static void
+nv50_msto_disable(struct drm_encoder *encoder)
+{
+ struct nv50_msto *msto = nv50_msto(encoder);
+ struct nv50_mstc *mstc = msto->mstc;
+ struct nv50_mstm *mstm = mstc->mstm;
+
+ if (mstc->port)
+ drm_dp_mst_reset_vcpi_slots(&mstm->mgr, mstc->port);
+
+ mstm->outp->update(mstm->outp, msto->head->base.index, NULL, 0, 0);
+ mstm->modified = true;
+ msto->disabled = true;
+}
+
+static const struct drm_encoder_helper_funcs
+nv50_msto_help = {
+ .disable = nv50_msto_disable,
+ .enable = nv50_msto_enable,
+ .atomic_check = nv50_msto_atomic_check,
+};
+
+static void
+nv50_msto_destroy(struct drm_encoder *encoder)
+{
+ struct nv50_msto *msto = nv50_msto(encoder);
+ drm_encoder_cleanup(&msto->encoder);
+ kfree(msto);
+}
+
+static const struct drm_encoder_funcs
+nv50_msto = {
+ .destroy = nv50_msto_destroy,
+};
+
+static int
+nv50_msto_new(struct drm_device *dev, u32 heads, const char *name, int id,
+ struct nv50_msto **pmsto)
+{
+ struct nv50_msto *msto;
+ int ret;
+
+ if (!(msto = *pmsto = kzalloc(sizeof(*msto), GFP_KERNEL)))
+ return -ENOMEM;
+
+ ret = drm_encoder_init(dev, &msto->encoder, &nv50_msto,
+ DRM_MODE_ENCODER_DPMST, "%s-mst-%d", name, id);
+ if (ret) {
+ kfree(*pmsto);
+ *pmsto = NULL;
+ return ret;
+ }
+
+ drm_encoder_helper_add(&msto->encoder, &nv50_msto_help);
+ msto->encoder.possible_crtcs = heads;
+ return 0;
+}
+
+static struct drm_encoder *
+nv50_mstc_atomic_best_encoder(struct drm_connector *connector,
+ struct drm_connector_state *connector_state)
+{
+ struct nv50_head *head = nv50_head(connector_state->crtc);
+ struct nv50_mstc *mstc = nv50_mstc(connector);
+ if (mstc->port) {
+ struct nv50_mstm *mstm = mstc->mstm;
+ return &mstm->msto[head->base.index]->encoder;
+ }
+ return NULL;
+}
+
+static struct drm_encoder *
+nv50_mstc_best_encoder(struct drm_connector *connector)
+{
+ struct nv50_mstc *mstc = nv50_mstc(connector);
+ if (mstc->port) {
+ struct nv50_mstm *mstm = mstc->mstm;
+ return &mstm->msto[0]->encoder;
+ }
+ return NULL;
+}
+
+static enum drm_mode_status
+nv50_mstc_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode)
+{
+ return MODE_OK;
+}
+
+static int
+nv50_mstc_get_modes(struct drm_connector *connector)
+{
+ struct nv50_mstc *mstc = nv50_mstc(connector);
+ int ret = 0;
+
+ mstc->edid = drm_dp_mst_get_edid(&mstc->connector, mstc->port->mgr, mstc->port);
+ drm_mode_connector_update_edid_property(&mstc->connector, mstc->edid);
+ if (mstc->edid) {
+ ret = drm_add_edid_modes(&mstc->connector, mstc->edid);
+ drm_edid_to_eld(&mstc->connector, mstc->edid);
+ }
+
+ if (!mstc->connector.display_info.bpc)
+ mstc->connector.display_info.bpc = 8;
+
+ if (mstc->native)
+ drm_mode_destroy(mstc->connector.dev, mstc->native);
+ mstc->native = nouveau_conn_native_mode(&mstc->connector);
+ return ret;
+}
+
+static const struct drm_connector_helper_funcs
+nv50_mstc_help = {
+ .get_modes = nv50_mstc_get_modes,
+ .mode_valid = nv50_mstc_mode_valid,
+ .best_encoder = nv50_mstc_best_encoder,
+ .atomic_best_encoder = nv50_mstc_atomic_best_encoder,
+};
+
+static enum drm_connector_status
+nv50_mstc_detect(struct drm_connector *connector, bool force)
+{
+ struct nv50_mstc *mstc = nv50_mstc(connector);
+ if (!mstc->port)
+ return connector_status_disconnected;
+ return drm_dp_mst_detect_port(connector, mstc->port->mgr, mstc->port);
+}
+
+static void
+nv50_mstc_destroy(struct drm_connector *connector)
+{
+ struct nv50_mstc *mstc = nv50_mstc(connector);
+ drm_connector_cleanup(&mstc->connector);
+ kfree(mstc);
+}
+
+static const struct drm_connector_funcs
+nv50_mstc = {
+ .dpms = drm_atomic_helper_connector_dpms,
+ .reset = nouveau_conn_reset,
+ .detect = nv50_mstc_detect,
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .set_property = drm_atomic_helper_connector_set_property,
+ .destroy = nv50_mstc_destroy,
+ .atomic_duplicate_state = nouveau_conn_atomic_duplicate_state,
+ .atomic_destroy_state = nouveau_conn_atomic_destroy_state,
+ .atomic_set_property = nouveau_conn_atomic_set_property,
+ .atomic_get_property = nouveau_conn_atomic_get_property,
+};
+
+static int
+nv50_mstc_new(struct nv50_mstm *mstm, struct drm_dp_mst_port *port,
+ const char *path, struct nv50_mstc **pmstc)
+{
+ struct drm_device *dev = mstm->outp->base.base.dev;
+ struct nv50_mstc *mstc;
+ int ret, i;
+
+ if (!(mstc = *pmstc = kzalloc(sizeof(*mstc), GFP_KERNEL)))
+ return -ENOMEM;
+ mstc->mstm = mstm;
+ mstc->port = port;
+
+ ret = drm_connector_init(dev, &mstc->connector, &nv50_mstc,
+ DRM_MODE_CONNECTOR_DisplayPort);
+ if (ret) {
+ kfree(*pmstc);
+ *pmstc = NULL;
+ return ret;
+ }
+
+ drm_connector_helper_add(&mstc->connector, &nv50_mstc_help);
+
+ mstc->connector.funcs->reset(&mstc->connector);
+ nouveau_conn_attach_properties(&mstc->connector);
+
+ for (i = 0; i < ARRAY_SIZE(mstm->msto) && mstm->msto; i++)
+ drm_mode_connector_attach_encoder(&mstc->connector, &mstm->msto[i]->encoder);
+
+ drm_object_attach_property(&mstc->connector.base, dev->mode_config.path_property, 0);
+ drm_object_attach_property(&mstc->connector.base, dev->mode_config.tile_property, 0);
+ drm_mode_connector_set_path_property(&mstc->connector, path);
+ return 0;
+}
+
+static void
+nv50_mstm_cleanup(struct nv50_mstm *mstm)
+{
+ struct nouveau_drm *drm = nouveau_drm(mstm->outp->base.base.dev);
+ struct drm_encoder *encoder;
+ int ret;
+
+ NV_ATOMIC(drm, "%s: mstm cleanup\n", mstm->outp->base.base.name);
+ ret = drm_dp_check_act_status(&mstm->mgr);
+
+ ret = drm_dp_update_payload_part2(&mstm->mgr);
+
+ drm_for_each_encoder(encoder, mstm->outp->base.base.dev) {
+ if (encoder->encoder_type == DRM_MODE_ENCODER_DPMST) {
+ struct nv50_msto *msto = nv50_msto(encoder);
+ struct nv50_mstc *mstc = msto->mstc;
+ if (mstc && mstc->mstm == mstm)
+ nv50_msto_cleanup(msto);
+ }
+ }
+
+ mstm->modified = false;
+}
+
+static void
+nv50_mstm_prepare(struct nv50_mstm *mstm)
+{
+ struct nouveau_drm *drm = nouveau_drm(mstm->outp->base.base.dev);
+ struct drm_encoder *encoder;
+ int ret;
+
+ NV_ATOMIC(drm, "%s: mstm prepare\n", mstm->outp->base.base.name);
+ ret = drm_dp_update_payload_part1(&mstm->mgr);
+
+ drm_for_each_encoder(encoder, mstm->outp->base.base.dev) {
+ if (encoder->encoder_type == DRM_MODE_ENCODER_DPMST) {
+ struct nv50_msto *msto = nv50_msto(encoder);
+ struct nv50_mstc *mstc = msto->mstc;
+ if (mstc && mstc->mstm == mstm)
+ nv50_msto_prepare(msto);
+ }
+ }
+}
+
+static void
+nv50_mstm_hotplug(struct drm_dp_mst_topology_mgr *mgr)
+{
+ struct nv50_mstm *mstm = nv50_mstm(mgr);
+ drm_kms_helper_hotplug_event(mstm->outp->base.base.dev);
+}
+
+static void
+nv50_mstm_destroy_connector(struct drm_dp_mst_topology_mgr *mgr,
+ struct drm_connector *connector)
+{
+ struct nouveau_drm *drm = nouveau_drm(connector->dev);
+ struct nv50_mstc *mstc = nv50_mstc(connector);
+
+ drm_connector_unregister(&mstc->connector);
+
+ drm_modeset_lock_all(drm->dev);
+ drm_fb_helper_remove_one_connector(&drm->fbcon->helper, &mstc->connector);
+ mstc->port = NULL;
+ drm_modeset_unlock_all(drm->dev);
+
+ drm_connector_unreference(&mstc->connector);
+}
+
+static void
+nv50_mstm_register_connector(struct drm_connector *connector)
+{
+ struct nouveau_drm *drm = nouveau_drm(connector->dev);
+
+ drm_modeset_lock_all(drm->dev);
+ drm_fb_helper_add_one_connector(&drm->fbcon->helper, connector);
+ drm_modeset_unlock_all(drm->dev);
+
+ drm_connector_register(connector);
+}
+
+static struct drm_connector *
+nv50_mstm_add_connector(struct drm_dp_mst_topology_mgr *mgr,
+ struct drm_dp_mst_port *port, const char *path)
+{
+ struct nv50_mstm *mstm = nv50_mstm(mgr);
+ struct nv50_mstc *mstc;
+ int ret;
+
+ ret = nv50_mstc_new(mstm, port, path, &mstc);
+ if (ret) {
+ if (mstc)
+ mstc->connector.funcs->destroy(&mstc->connector);
+ return NULL;
+ }
+
+ return &mstc->connector;
+}
+
+static const struct drm_dp_mst_topology_cbs
+nv50_mstm = {
+ .add_connector = nv50_mstm_add_connector,
+ .register_connector = nv50_mstm_register_connector,
+ .destroy_connector = nv50_mstm_destroy_connector,
+ .hotplug = nv50_mstm_hotplug,
+};
+
+void
+nv50_mstm_service(struct nv50_mstm *mstm)
+{
+ struct drm_dp_aux *aux = mstm->mgr.aux;
+ bool handled = true;
+ int ret;
+ u8 esi[8] = {};
+
+ while (handled) {
+ ret = drm_dp_dpcd_read(aux, DP_SINK_COUNT_ESI, esi, 8);
+ if (ret != 8) {
+ drm_dp_mst_topology_mgr_set_mst(&mstm->mgr, false);
+ return;
+ }
+
+ drm_dp_mst_hpd_irq(&mstm->mgr, esi, &handled);
+ if (!handled)
+ break;
+
+ drm_dp_dpcd_write(aux, DP_SINK_COUNT_ESI + 1, &esi[1], 3);
+ }
+}
+
+void
+nv50_mstm_remove(struct nv50_mstm *mstm)
+{
+ if (mstm)
+ drm_dp_mst_topology_mgr_set_mst(&mstm->mgr, false);
+}
+
+static int
+nv50_mstm_enable(struct nv50_mstm *mstm, u8 dpcd, int state)
+{
+ struct nouveau_encoder *outp = mstm->outp;
+ struct {
+ struct nv50_disp_mthd_v1 base;
+ struct nv50_disp_sor_dp_mst_link_v0 mst;
+ } args = {
+ .base.version = 1,
+ .base.method = NV50_DISP_MTHD_V1_SOR_DP_MST_LINK,
+ .base.hasht = outp->dcb->hasht,
+ .base.hashm = outp->dcb->hashm,
+ .mst.state = state,
+ };
+ struct nouveau_drm *drm = nouveau_drm(outp->base.base.dev);
+ struct nvif_object *disp = &drm->display->disp;
+ int ret;
+
+ if (dpcd >= 0x12) {
+ ret = drm_dp_dpcd_readb(mstm->mgr.aux, DP_MSTM_CTRL, &dpcd);
+ if (ret < 0)
+ return ret;
+
+ dpcd &= ~DP_MST_EN;
+ if (state)
+ dpcd |= DP_MST_EN;
+
+ ret = drm_dp_dpcd_writeb(mstm->mgr.aux, DP_MSTM_CTRL, dpcd);
+ if (ret < 0)
+ return ret;
+ }
+
+ return nvif_mthd(disp, 0, &args, sizeof(args));
+}
+
+int
+nv50_mstm_detect(struct nv50_mstm *mstm, u8 dpcd[8], int allow)
+{
+ int ret, state = 0;
+
+ if (!mstm)
+ return 0;
+
+ if (dpcd[0] >= 0x12 && allow) {
+ ret = drm_dp_dpcd_readb(mstm->mgr.aux, DP_MSTM_CAP, &dpcd[1]);
+ if (ret < 0)
+ return ret;
+
+ state = dpcd[1] & DP_MST_CAP;
+ }
+
+ ret = nv50_mstm_enable(mstm, dpcd[0], state);
+ if (ret)
+ return ret;
+
+ ret = drm_dp_mst_topology_mgr_set_mst(&mstm->mgr, state);
+ if (ret)
+ return nv50_mstm_enable(mstm, dpcd[0], 0);
+
+ return mstm->mgr.mst_state;
+}
+
+static void
+nv50_mstm_fini(struct nv50_mstm *mstm)
+{
+ if (mstm && mstm->mgr.mst_state)
+ drm_dp_mst_topology_mgr_suspend(&mstm->mgr);
+}
+
+static void
+nv50_mstm_init(struct nv50_mstm *mstm)
+{
+ if (mstm && mstm->mgr.mst_state)
+ drm_dp_mst_topology_mgr_resume(&mstm->mgr);
+}
+
+static void
+nv50_mstm_del(struct nv50_mstm **pmstm)
+{
+ struct nv50_mstm *mstm = *pmstm;
+ if (mstm) {
+ kfree(*pmstm);
+ *pmstm = NULL;
+ }
+}
+
+static int
+nv50_mstm_new(struct nouveau_encoder *outp, struct drm_dp_aux *aux, int aux_max,
+ int conn_base_id, struct nv50_mstm **pmstm)
+{
+ const int max_payloads = hweight8(outp->dcb->heads);
+ struct drm_device *dev = outp->base.base.dev;
+ struct nv50_mstm *mstm;
+ int ret, i;
+ u8 dpcd;
+
+ /* This is a workaround for some monitors not functioning
+ * correctly in MST mode on initial module load. I think
+ * some bad interaction with the VBIOS may be responsible.
+ *
+ * A good ol' off and on again seems to work here ;)
+ */
+ ret = drm_dp_dpcd_readb(aux, DP_DPCD_REV, &dpcd);
+ if (ret >= 0 && dpcd >= 0x12)
+ drm_dp_dpcd_writeb(aux, DP_MSTM_CTRL, 0);
+
+ if (!(mstm = *pmstm = kzalloc(sizeof(*mstm), GFP_KERNEL)))
+ return -ENOMEM;
+ mstm->outp = outp;
+ mstm->mgr.cbs = &nv50_mstm;
+
+ ret = drm_dp_mst_topology_mgr_init(&mstm->mgr, dev->dev, aux, aux_max,
+ max_payloads, conn_base_id);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < max_payloads; i++) {
+ ret = nv50_msto_new(dev, outp->dcb->heads, outp->base.base.name,
+ i, &mstm->msto[i]);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
}
/******************************************************************************
@@ -1861,89 +3444,91 @@ nv50_sor_dpms(struct drm_encoder *encoder, int mode)
.base.hashm = nv_encoder->dcb->hashm,
.pwr.state = mode == DRM_MODE_DPMS_ON,
};
- struct {
- struct nv50_disp_mthd_v1 base;
- struct nv50_disp_sor_dp_pwr_v0 pwr;
- } link = {
- .base.version = 1,
- .base.method = NV50_DISP_MTHD_V1_SOR_DP_PWR,
- .base.hasht = nv_encoder->dcb->hasht,
- .base.hashm = nv_encoder->dcb->hashm,
- .pwr.state = mode == DRM_MODE_DPMS_ON,
- };
- struct drm_device *dev = encoder->dev;
- struct drm_encoder *partner;
- nv_encoder->last_dpms = mode;
-
- list_for_each_entry(partner, &dev->mode_config.encoder_list, head) {
- struct nouveau_encoder *nv_partner = nouveau_encoder(partner);
-
- if (partner->encoder_type != DRM_MODE_ENCODER_TMDS)
- continue;
+ nvif_mthd(disp->disp, 0, &args, sizeof(args));
+}
- if (nv_partner != nv_encoder &&
- nv_partner->dcb->or == nv_encoder->dcb->or) {
- if (nv_partner->last_dpms == DRM_MODE_DPMS_ON)
- return;
- break;
- }
- }
+static void
+nv50_sor_update(struct nouveau_encoder *nv_encoder, u8 head,
+ struct drm_display_mode *mode, u8 proto, u8 depth)
+{
+ struct nv50_dmac *core = &nv50_mast(nv_encoder->base.base.dev)->base;
+ u32 *push;
- if (nv_encoder->dcb->type == DCB_OUTPUT_DP) {
- args.pwr.state = 1;
- nvif_mthd(disp->disp, 0, &args, sizeof(args));
- nvif_mthd(disp->disp, 0, &link, sizeof(link));
+ if (!mode) {
+ nv_encoder->ctrl &= ~BIT(head);
+ if (!(nv_encoder->ctrl & 0x0000000f))
+ nv_encoder->ctrl = 0;
} else {
- nvif_mthd(disp->disp, 0, &args, sizeof(args));
+ nv_encoder->ctrl |= proto << 8;
+ nv_encoder->ctrl |= BIT(head);
}
-}
-static void
-nv50_sor_ctrl(struct nouveau_encoder *nv_encoder, u32 mask, u32 data)
-{
- struct nv50_mast *mast = nv50_mast(nv_encoder->base.base.dev);
- u32 temp = (nv_encoder->ctrl & ~mask) | (data & mask), *push;
- if (temp != nv_encoder->ctrl && (push = evo_wait(mast, 2))) {
- if (nv50_vers(mast) < GF110_DISP_CORE_CHANNEL_DMA) {
+ if ((push = evo_wait(core, 6))) {
+ if (core->base.user.oclass < GF110_DISP_CORE_CHANNEL_DMA) {
+ if (mode) {
+ if (mode->flags & DRM_MODE_FLAG_NHSYNC)
+ nv_encoder->ctrl |= 0x00001000;
+ if (mode->flags & DRM_MODE_FLAG_NVSYNC)
+ nv_encoder->ctrl |= 0x00002000;
+ nv_encoder->ctrl |= depth << 16;
+ }
evo_mthd(push, 0x0600 + (nv_encoder->or * 0x40), 1);
- evo_data(push, (nv_encoder->ctrl = temp));
} else {
+ if (mode) {
+ u32 magic = 0x31ec6000 | (head << 25);
+ u32 syncs = 0x00000001;
+ if (mode->flags & DRM_MODE_FLAG_NHSYNC)
+ syncs |= 0x00000008;
+ if (mode->flags & DRM_MODE_FLAG_NVSYNC)
+ syncs |= 0x00000010;
+ if (mode->flags & DRM_MODE_FLAG_INTERLACE)
+ magic |= 0x00000001;
+
+ evo_mthd(push, 0x0404 + (head * 0x300), 2);
+ evo_data(push, syncs | (depth << 6));
+ evo_data(push, magic);
+ }
evo_mthd(push, 0x0200 + (nv_encoder->or * 0x20), 1);
- evo_data(push, (nv_encoder->ctrl = temp));
}
- evo_kick(push, mast);
+ evo_data(push, nv_encoder->ctrl);
+ evo_kick(push, core);
}
}
static void
-nv50_sor_disconnect(struct drm_encoder *encoder)
+nv50_sor_disable(struct drm_encoder *encoder)
{
struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
struct nouveau_crtc *nv_crtc = nouveau_crtc(nv_encoder->crtc);
- nv_encoder->last_dpms = DRM_MODE_DPMS_OFF;
nv_encoder->crtc = NULL;
if (nv_crtc) {
- nv50_crtc_prepare(&nv_crtc->base);
- nv50_sor_ctrl(nv_encoder, 1 << nv_crtc->index, 0);
- nv50_audio_disconnect(encoder, nv_crtc);
- nv50_hdmi_disconnect(&nv_encoder->base.base, nv_crtc);
- }
-}
+ struct nvkm_i2c_aux *aux = nv_encoder->aux;
+ u8 pwr;
-static void
-nv50_sor_commit(struct drm_encoder *encoder)
-{
+ if (aux) {
+ int ret = nvkm_rdaux(aux, DP_SET_POWER, &pwr, 1);
+ if (ret == 0) {
+ pwr &= ~DP_SET_POWER_MASK;
+ pwr |= DP_SET_POWER_D3;
+ nvkm_wraux(aux, DP_SET_POWER, &pwr, 1);
+ }
+ }
+
+ nv_encoder->update(nv_encoder, nv_crtc->index, NULL, 0, 0);
+ nv50_audio_disable(encoder, nv_crtc);
+ nv50_hdmi_disable(&nv_encoder->base.base, nv_crtc);
+ }
}
static void
-nv50_sor_mode_set(struct drm_encoder *encoder, struct drm_display_mode *umode,
- struct drm_display_mode *mode)
+nv50_sor_enable(struct drm_encoder *encoder)
{
struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc);
+ struct drm_display_mode *mode = &nv_crtc->base.state->adjusted_mode;
struct {
struct nv50_disp_mthd_v1 base;
struct nv50_disp_sor_lvds_script_v0 lvds;
@@ -1954,13 +3539,10 @@ nv50_sor_mode_set(struct drm_encoder *encoder, struct drm_display_mode *umode,
.base.hashm = nv_encoder->dcb->hashm,
};
struct nv50_disp *disp = nv50_disp(encoder->dev);
- struct nv50_mast *mast = nv50_mast(encoder->dev);
struct drm_device *dev = encoder->dev;
struct nouveau_drm *drm = nouveau_drm(dev);
struct nouveau_connector *nv_connector;
struct nvbios *bios = &drm->vbios;
- u32 mask, ctrl;
- u8 owner = 1 << nv_crtc->index;
u8 proto = 0xf;
u8 depth = 0x0;
@@ -1985,7 +3567,7 @@ nv50_sor_mode_set(struct drm_encoder *encoder, struct drm_display_mode *umode,
proto = 0x2;
}
- nv50_hdmi_mode_set(&nv_encoder->base.base, mode);
+ nv50_hdmi_enable(&nv_encoder->base.base, mode);
break;
case DCB_OUTPUT_LVDS:
proto = 0x0;
@@ -2019,94 +3601,60 @@ nv50_sor_mode_set(struct drm_encoder *encoder, struct drm_display_mode *umode,
nvif_mthd(disp->disp, 0, &lvds, sizeof(lvds));
break;
case DCB_OUTPUT_DP:
- if (nv_connector->base.display_info.bpc == 6) {
- nv_encoder->dp.datarate = mode->clock * 18 / 8;
+ if (nv_connector->base.display_info.bpc == 6)
depth = 0x2;
- } else
- if (nv_connector->base.display_info.bpc == 8) {
- nv_encoder->dp.datarate = mode->clock * 24 / 8;
+ else
+ if (nv_connector->base.display_info.bpc == 8)
depth = 0x5;
- } else {
- nv_encoder->dp.datarate = mode->clock * 30 / 8;
+ else
depth = 0x6;
- }
if (nv_encoder->dcb->sorconf.link & 1)
proto = 0x8;
else
proto = 0x9;
- nv50_audio_mode_set(encoder, mode);
+
+ nv50_audio_enable(encoder, mode);
break;
default:
BUG_ON(1);
break;
}
- nv50_sor_dpms(&nv_encoder->base.base, DRM_MODE_DPMS_ON);
-
- if (nv50_vers(mast) >= GF110_DISP) {
- u32 *push = evo_wait(mast, 3);
- if (push) {
- u32 magic = 0x31ec6000 | (nv_crtc->index << 25);
- u32 syncs = 0x00000001;
-
- if (mode->flags & DRM_MODE_FLAG_NHSYNC)
- syncs |= 0x00000008;
- if (mode->flags & DRM_MODE_FLAG_NVSYNC)
- syncs |= 0x00000010;
-
- if (mode->flags & DRM_MODE_FLAG_INTERLACE)
- magic |= 0x00000001;
-
- evo_mthd(push, 0x0404 + (nv_crtc->index * 0x300), 2);
- evo_data(push, syncs | (depth << 6));
- evo_data(push, magic);
- evo_kick(push, mast);
- }
-
- ctrl = proto << 8;
- mask = 0x00000f00;
- } else {
- ctrl = (depth << 16) | (proto << 8);
- if (mode->flags & DRM_MODE_FLAG_NHSYNC)
- ctrl |= 0x00001000;
- if (mode->flags & DRM_MODE_FLAG_NVSYNC)
- ctrl |= 0x00002000;
- mask = 0x000f3f00;
- }
-
- nv50_sor_ctrl(nv_encoder, mask | owner, ctrl | owner);
+ nv_encoder->update(nv_encoder, nv_crtc->index, mode, proto, depth);
}
+static const struct drm_encoder_helper_funcs
+nv50_sor_help = {
+ .dpms = nv50_sor_dpms,
+ .atomic_check = nv50_outp_atomic_check,
+ .enable = nv50_sor_enable,
+ .disable = nv50_sor_disable,
+};
+
static void
nv50_sor_destroy(struct drm_encoder *encoder)
{
+ struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+ nv50_mstm_del(&nv_encoder->dp.mstm);
drm_encoder_cleanup(encoder);
kfree(encoder);
}
-static const struct drm_encoder_helper_funcs nv50_sor_hfunc = {
- .dpms = nv50_sor_dpms,
- .mode_fixup = nv50_encoder_mode_fixup,
- .prepare = nv50_sor_disconnect,
- .commit = nv50_sor_commit,
- .mode_set = nv50_sor_mode_set,
- .disable = nv50_sor_disconnect,
- .get_crtc = nv50_display_crtc_get,
-};
-
-static const struct drm_encoder_funcs nv50_sor_func = {
+static const struct drm_encoder_funcs
+nv50_sor_func = {
.destroy = nv50_sor_destroy,
};
static int
nv50_sor_create(struct drm_connector *connector, struct dcb_output *dcbe)
{
+ struct nouveau_connector *nv_connector = nouveau_connector(connector);
struct nouveau_drm *drm = nouveau_drm(connector->dev);
struct nvkm_i2c *i2c = nvxx_i2c(&drm->device);
struct nouveau_encoder *nv_encoder;
struct drm_encoder *encoder;
- int type;
+ int type, ret;
switch (dcbe->type) {
case DCB_OUTPUT_LVDS: type = DRM_MODE_ENCODER_LVDS; break;
@@ -2122,7 +3670,16 @@ nv50_sor_create(struct drm_connector *connector, struct dcb_output *dcbe)
return -ENOMEM;
nv_encoder->dcb = dcbe;
nv_encoder->or = ffs(dcbe->or) - 1;
- nv_encoder->last_dpms = DRM_MODE_DPMS_OFF;
+ nv_encoder->update = nv50_sor_update;
+
+ encoder = to_drm_encoder(nv_encoder);
+ encoder->possible_crtcs = dcbe->heads;
+ encoder->possible_clones = 0;
+ drm_encoder_init(connector->dev, encoder, &nv50_sor_func, type,
+ "sor-%04x-%04x", dcbe->hasht, dcbe->hashm);
+ drm_encoder_helper_add(encoder, &nv50_sor_help);
+
+ drm_mode_connector_attach_encoder(connector, encoder);
if (dcbe->type == DCB_OUTPUT_DP) {
struct nvkm_i2c_aux *aux =
@@ -2131,6 +3688,15 @@ nv50_sor_create(struct drm_connector *connector, struct dcb_output *dcbe)
nv_encoder->i2c = &aux->i2c;
nv_encoder->aux = aux;
}
+
+ /*TODO: Use DP Info Table to check for support. */
+ if (nv50_disp(encoder->dev)->disp->oclass >= GF110_DISP) {
+ ret = nv50_mstm_new(nv_encoder, &nv_connector->aux, 16,
+ nv_connector->base.base.id,
+ &nv_encoder->dp.mstm);
+ if (ret)
+ return ret;
+ }
} else {
struct nvkm_i2c_bus *bus =
nvkm_i2c_bus_find(i2c, dcbe->i2c_index);
@@ -2138,20 +3704,12 @@ nv50_sor_create(struct drm_connector *connector, struct dcb_output *dcbe)
nv_encoder->i2c = &bus->i2c;
}
- encoder = to_drm_encoder(nv_encoder);
- encoder->possible_crtcs = dcbe->heads;
- encoder->possible_clones = 0;
- drm_encoder_init(connector->dev, encoder, &nv50_sor_func, type, NULL);
- drm_encoder_helper_add(encoder, &nv50_sor_hfunc);
-
- drm_mode_connector_attach_encoder(connector, encoder);
return 0;
}
/******************************************************************************
* PIOR
*****************************************************************************/
-
static void
nv50_pior_dpms(struct drm_encoder *encoder, int mode)
{
@@ -2172,30 +3730,48 @@ nv50_pior_dpms(struct drm_encoder *encoder, int mode)
nvif_mthd(disp->disp, 0, &args, sizeof(args));
}
-static bool
-nv50_pior_mode_fixup(struct drm_encoder *encoder,
- const struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
+static int
+nv50_pior_atomic_check(struct drm_encoder *encoder,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state)
{
- if (!nv50_encoder_mode_fixup(encoder, mode, adjusted_mode))
- return false;
- adjusted_mode->clock *= 2;
- return true;
+ int ret = nv50_outp_atomic_check(encoder, crtc_state, conn_state);
+ if (ret)
+ return ret;
+ crtc_state->adjusted_mode.clock *= 2;
+ return 0;
}
static void
-nv50_pior_commit(struct drm_encoder *encoder)
+nv50_pior_disable(struct drm_encoder *encoder)
{
+ struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+ struct nv50_mast *mast = nv50_mast(encoder->dev);
+ const int or = nv_encoder->or;
+ u32 *push;
+
+ if (nv_encoder->crtc) {
+ push = evo_wait(mast, 4);
+ if (push) {
+ if (nv50_vers(mast) < GF110_DISP_CORE_CHANNEL_DMA) {
+ evo_mthd(push, 0x0700 + (or * 0x040), 1);
+ evo_data(push, 0x00000000);
+ }
+ evo_kick(push, mast);
+ }
+ }
+
+ nv_encoder->crtc = NULL;
}
static void
-nv50_pior_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
+nv50_pior_enable(struct drm_encoder *encoder)
{
struct nv50_mast *mast = nv50_mast(encoder->dev);
struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc);
struct nouveau_connector *nv_connector;
+ struct drm_display_mode *mode = &nv_crtc->base.state->adjusted_mode;
u8 owner = 1 << nv_crtc->index;
u8 proto, depth;
u32 *push;
@@ -2218,8 +3794,6 @@ nv50_pior_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
break;
}
- nv50_pior_dpms(encoder, DRM_MODE_DPMS_ON);
-
push = evo_wait(mast, 8);
if (push) {
if (nv50_vers(mast) < GF110_DISP_CORE_CHANNEL_DMA) {
@@ -2238,29 +3812,13 @@ nv50_pior_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
nv_encoder->crtc = encoder->crtc;
}
-static void
-nv50_pior_disconnect(struct drm_encoder *encoder)
-{
- struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
- struct nv50_mast *mast = nv50_mast(encoder->dev);
- const int or = nv_encoder->or;
- u32 *push;
-
- if (nv_encoder->crtc) {
- nv50_crtc_prepare(nv_encoder->crtc);
-
- push = evo_wait(mast, 4);
- if (push) {
- if (nv50_vers(mast) < GF110_DISP_CORE_CHANNEL_DMA) {
- evo_mthd(push, 0x0700 + (or * 0x040), 1);
- evo_data(push, 0x00000000);
- }
- evo_kick(push, mast);
- }
- }
-
- nv_encoder->crtc = NULL;
-}
+static const struct drm_encoder_helper_funcs
+nv50_pior_help = {
+ .dpms = nv50_pior_dpms,
+ .atomic_check = nv50_pior_atomic_check,
+ .enable = nv50_pior_enable,
+ .disable = nv50_pior_disable,
+};
static void
nv50_pior_destroy(struct drm_encoder *encoder)
@@ -2269,17 +3827,8 @@ nv50_pior_destroy(struct drm_encoder *encoder)
kfree(encoder);
}
-static const struct drm_encoder_helper_funcs nv50_pior_hfunc = {
- .dpms = nv50_pior_dpms,
- .mode_fixup = nv50_pior_mode_fixup,
- .prepare = nv50_pior_disconnect,
- .commit = nv50_pior_commit,
- .mode_set = nv50_pior_mode_set,
- .disable = nv50_pior_disconnect,
- .get_crtc = nv50_display_crtc_get,
-};
-
-static const struct drm_encoder_funcs nv50_pior_func = {
+static const struct drm_encoder_funcs
+nv50_pior_func = {
.destroy = nv50_pior_destroy,
};
@@ -2321,149 +3870,462 @@ nv50_pior_create(struct drm_connector *connector, struct dcb_output *dcbe)
encoder = to_drm_encoder(nv_encoder);
encoder->possible_crtcs = dcbe->heads;
encoder->possible_clones = 0;
- drm_encoder_init(connector->dev, encoder, &nv50_pior_func, type, NULL);
- drm_encoder_helper_add(encoder, &nv50_pior_hfunc);
+ drm_encoder_init(connector->dev, encoder, &nv50_pior_func, type,
+ "pior-%04x-%04x", dcbe->hasht, dcbe->hashm);
+ drm_encoder_helper_add(encoder, &nv50_pior_help);
drm_mode_connector_attach_encoder(connector, encoder);
return 0;
}
/******************************************************************************
- * Framebuffer
+ * Atomic
*****************************************************************************/
static void
-nv50_fbdma_fini(struct nv50_fbdma *fbdma)
+nv50_disp_atomic_commit_core(struct nouveau_drm *drm, u32 interlock)
{
- int i;
- for (i = 0; i < ARRAY_SIZE(fbdma->base); i++)
- nvif_object_fini(&fbdma->base[i]);
- nvif_object_fini(&fbdma->core);
- list_del(&fbdma->head);
- kfree(fbdma);
+ struct nv50_disp *disp = nv50_disp(drm->dev);
+ struct nv50_dmac *core = &disp->mast.base;
+ struct nv50_mstm *mstm;
+ struct drm_encoder *encoder;
+ u32 *push;
+
+ NV_ATOMIC(drm, "commit core %08x\n", interlock);
+
+ drm_for_each_encoder(encoder, drm->dev) {
+ if (encoder->encoder_type != DRM_MODE_ENCODER_DPMST) {
+ mstm = nouveau_encoder(encoder)->dp.mstm;
+ if (mstm && mstm->modified)
+ nv50_mstm_prepare(mstm);
+ }
+ }
+
+ if ((push = evo_wait(core, 5))) {
+ evo_mthd(push, 0x0084, 1);
+ evo_data(push, 0x80000000);
+ evo_mthd(push, 0x0080, 2);
+ evo_data(push, interlock);
+ evo_data(push, 0x00000000);
+ nouveau_bo_wr32(disp->sync, 0, 0x00000000);
+ evo_kick(push, core);
+ if (nvif_msec(&drm->device, 2000ULL,
+ if (nouveau_bo_rd32(disp->sync, 0))
+ break;
+ usleep_range(1, 2);
+ ) < 0)
+ NV_ERROR(drm, "EVO timeout\n");
+ }
+
+ drm_for_each_encoder(encoder, drm->dev) {
+ if (encoder->encoder_type != DRM_MODE_ENCODER_DPMST) {
+ mstm = nouveau_encoder(encoder)->dp.mstm;
+ if (mstm && mstm->modified)
+ nv50_mstm_cleanup(mstm);
+ }
+ }
}
-static int
-nv50_fbdma_init(struct drm_device *dev, u32 name, u64 offset, u64 length, u8 kind)
+static void
+nv50_disp_atomic_commit_tail(struct drm_atomic_state *state)
{
+ struct drm_device *dev = state->dev;
+ struct drm_crtc_state *crtc_state;
+ struct drm_crtc *crtc;
+ struct drm_plane_state *plane_state;
+ struct drm_plane *plane;
struct nouveau_drm *drm = nouveau_drm(dev);
struct nv50_disp *disp = nv50_disp(dev);
- struct nv50_mast *mast = nv50_mast(dev);
- struct __attribute__ ((packed)) {
- struct nv_dma_v0 base;
- union {
- struct nv50_dma_v0 nv50;
- struct gf100_dma_v0 gf100;
- struct gf119_dma_v0 gf119;
- };
- } args = {};
- struct nv50_fbdma *fbdma;
- struct drm_crtc *crtc;
- u32 size = sizeof(args.base);
- int ret;
+ struct nv50_atom *atom = nv50_atom(state);
+ struct nv50_outp_atom *outp, *outt;
+ u32 interlock_core = 0;
+ u32 interlock_chan = 0;
+ int i;
+
+ NV_ATOMIC(drm, "commit %d %d\n", atom->lock_core, atom->flush_disable);
+ drm_atomic_helper_wait_for_fences(dev, state, false);
+ drm_atomic_helper_wait_for_dependencies(state);
+ drm_atomic_helper_update_legacy_modeset_state(dev, state);
- list_for_each_entry(fbdma, &disp->fbdma, head) {
- if (fbdma->core.handle == name)
- return 0;
+ if (atom->lock_core)
+ mutex_lock(&disp->mutex);
+
+ /* Disable head(s). */
+ for_each_crtc_in_state(state, crtc, crtc_state, i) {
+ struct nv50_head_atom *asyh = nv50_head_atom(crtc->state);
+ struct nv50_head *head = nv50_head(crtc);
+
+ NV_ATOMIC(drm, "%s: clr %04x (set %04x)\n", crtc->name,
+ asyh->clr.mask, asyh->set.mask);
+
+ if (asyh->clr.mask) {
+ nv50_head_flush_clr(head, asyh, atom->flush_disable);
+ interlock_core |= 1;
+ }
}
- fbdma = kzalloc(sizeof(*fbdma), GFP_KERNEL);
- if (!fbdma)
- return -ENOMEM;
- list_add(&fbdma->head, &disp->fbdma);
+ /* Disable plane(s). */
+ for_each_plane_in_state(state, plane, plane_state, i) {
+ struct nv50_wndw_atom *asyw = nv50_wndw_atom(plane->state);
+ struct nv50_wndw *wndw = nv50_wndw(plane);
- args.base.target = NV_DMA_V0_TARGET_VRAM;
- args.base.access = NV_DMA_V0_ACCESS_RDWR;
- args.base.start = offset;
- args.base.limit = offset + length - 1;
+ NV_ATOMIC(drm, "%s: clr %02x (set %02x)\n", plane->name,
+ asyw->clr.mask, asyw->set.mask);
+ if (!asyw->clr.mask)
+ continue;
- if (drm->device.info.chipset < 0x80) {
- args.nv50.part = NV50_DMA_V0_PART_256;
- size += sizeof(args.nv50);
- } else
- if (drm->device.info.chipset < 0xc0) {
- args.nv50.part = NV50_DMA_V0_PART_256;
- args.nv50.kind = kind;
- size += sizeof(args.nv50);
- } else
- if (drm->device.info.chipset < 0xd0) {
- args.gf100.kind = kind;
- size += sizeof(args.gf100);
- } else {
- args.gf119.page = GF119_DMA_V0_PAGE_LP;
- args.gf119.kind = kind;
- size += sizeof(args.gf119);
+ interlock_chan |= nv50_wndw_flush_clr(wndw, interlock_core,
+ atom->flush_disable,
+ asyw);
+ }
+
+ /* Disable output path(s). */
+ list_for_each_entry(outp, &atom->outp, head) {
+ const struct drm_encoder_helper_funcs *help;
+ struct drm_encoder *encoder;
+
+ encoder = outp->encoder;
+ help = encoder->helper_private;
+
+ NV_ATOMIC(drm, "%s: clr %02x (set %02x)\n", encoder->name,
+ outp->clr.mask, outp->set.mask);
+
+ if (outp->clr.mask) {
+ help->disable(encoder);
+ interlock_core |= 1;
+ if (outp->flush_disable) {
+ nv50_disp_atomic_commit_core(drm, interlock_chan);
+ interlock_core = 0;
+ interlock_chan = 0;
+ }
+ }
+ }
+
+ /* Flush disable. */
+ if (interlock_core) {
+ if (atom->flush_disable) {
+ nv50_disp_atomic_commit_core(drm, interlock_chan);
+ interlock_core = 0;
+ interlock_chan = 0;
+ }
}
- list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+ /* Update output path(s). */
+ list_for_each_entry_safe(outp, outt, &atom->outp, head) {
+ const struct drm_encoder_helper_funcs *help;
+ struct drm_encoder *encoder;
+
+ encoder = outp->encoder;
+ help = encoder->helper_private;
+
+ NV_ATOMIC(drm, "%s: set %02x (clr %02x)\n", encoder->name,
+ outp->set.mask, outp->clr.mask);
+
+ if (outp->set.mask) {
+ help->enable(encoder);
+ interlock_core = 1;
+ }
+
+ list_del(&outp->head);
+ kfree(outp);
+ }
+
+ /* Update head(s). */
+ for_each_crtc_in_state(state, crtc, crtc_state, i) {
+ struct nv50_head_atom *asyh = nv50_head_atom(crtc->state);
struct nv50_head *head = nv50_head(crtc);
- int ret = nvif_object_init(&head->sync.base.base.user, name,
- NV_DMA_IN_MEMORY, &args, size,
- &fbdma->base[head->base.index]);
- if (ret) {
- nv50_fbdma_fini(fbdma);
- return ret;
+
+ NV_ATOMIC(drm, "%s: set %04x (clr %04x)\n", crtc->name,
+ asyh->set.mask, asyh->clr.mask);
+
+ if (asyh->set.mask) {
+ nv50_head_flush_set(head, asyh);
+ interlock_core = 1;
}
}
- ret = nvif_object_init(&mast->base.base.user, name, NV_DMA_IN_MEMORY,
- &args, size, &fbdma->core);
- if (ret) {
- nv50_fbdma_fini(fbdma);
+ /* Update plane(s). */
+ for_each_plane_in_state(state, plane, plane_state, i) {
+ struct nv50_wndw_atom *asyw = nv50_wndw_atom(plane->state);
+ struct nv50_wndw *wndw = nv50_wndw(plane);
+
+ NV_ATOMIC(drm, "%s: set %02x (clr %02x)\n", plane->name,
+ asyw->set.mask, asyw->clr.mask);
+ if ( !asyw->set.mask &&
+ (!asyw->clr.mask || atom->flush_disable))
+ continue;
+
+ interlock_chan |= nv50_wndw_flush_set(wndw, interlock_core, asyw);
+ }
+
+ /* Flush update. */
+ if (interlock_core) {
+ if (!interlock_chan && atom->state.legacy_cursor_update) {
+ u32 *push = evo_wait(&disp->mast, 2);
+ if (push) {
+ evo_mthd(push, 0x0080, 1);
+ evo_data(push, 0x00000000);
+ evo_kick(push, &disp->mast);
+ }
+ } else {
+ nv50_disp_atomic_commit_core(drm, interlock_chan);
+ }
+ }
+
+ if (atom->lock_core)
+ mutex_unlock(&disp->mutex);
+
+ /* Wait for HW to signal completion. */
+ for_each_plane_in_state(state, plane, plane_state, i) {
+ struct nv50_wndw_atom *asyw = nv50_wndw_atom(plane->state);
+ struct nv50_wndw *wndw = nv50_wndw(plane);
+ int ret = nv50_wndw_wait_armed(wndw, asyw);
+ if (ret)
+ NV_ERROR(drm, "%s: timeout\n", plane->name);
+ }
+
+ for_each_crtc_in_state(state, crtc, crtc_state, i) {
+ if (crtc->state->event) {
+ unsigned long flags;
+ spin_lock_irqsave(&crtc->dev->event_lock, flags);
+ drm_crtc_send_vblank_event(crtc, crtc->state->event);
+ spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
+ crtc->state->event = NULL;
+ }
+ }
+
+ drm_atomic_helper_commit_hw_done(state);
+ drm_atomic_helper_cleanup_planes(dev, state);
+ drm_atomic_helper_commit_cleanup_done(state);
+ drm_atomic_state_put(state);
+}
+
+static void
+nv50_disp_atomic_commit_work(struct work_struct *work)
+{
+ struct drm_atomic_state *state =
+ container_of(work, typeof(*state), commit_work);
+ nv50_disp_atomic_commit_tail(state);
+}
+
+static int
+nv50_disp_atomic_commit(struct drm_device *dev,
+ struct drm_atomic_state *state, bool nonblock)
+{
+ struct nouveau_drm *drm = nouveau_drm(dev);
+ struct nv50_disp *disp = nv50_disp(dev);
+ struct drm_plane_state *plane_state;
+ struct drm_plane *plane;
+ struct drm_crtc *crtc;
+ bool active = false;
+ int ret, i;
+
+ ret = pm_runtime_get_sync(dev->dev);
+ if (ret < 0 && ret != -EACCES)
return ret;
+
+ ret = drm_atomic_helper_setup_commit(state, nonblock);
+ if (ret)
+ goto done;
+
+ INIT_WORK(&state->commit_work, nv50_disp_atomic_commit_work);
+
+ ret = drm_atomic_helper_prepare_planes(dev, state);
+ if (ret)
+ goto done;
+
+ if (!nonblock) {
+ ret = drm_atomic_helper_wait_for_fences(dev, state, true);
+ if (ret)
+ goto done;
+ }
+
+ for_each_plane_in_state(state, plane, plane_state, i) {
+ struct nv50_wndw_atom *asyw = nv50_wndw_atom(plane_state);
+ struct nv50_wndw *wndw = nv50_wndw(plane);
+ if (asyw->set.image) {
+ asyw->ntfy.handle = wndw->dmac->sync.handle;
+ asyw->ntfy.offset = wndw->ntfy;
+ asyw->ntfy.awaken = false;
+ asyw->set.ntfy = true;
+ nouveau_bo_wr32(disp->sync, wndw->ntfy / 4, 0x00000000);
+ wndw->ntfy ^= 0x10;
+ }
+ }
+
+ drm_atomic_helper_swap_state(state, true);
+ drm_atomic_state_get(state);
+
+ if (nonblock)
+ queue_work(system_unbound_wq, &state->commit_work);
+ else
+ nv50_disp_atomic_commit_tail(state);
+
+ drm_for_each_crtc(crtc, dev) {
+ if (crtc->state->enable) {
+ if (!drm->have_disp_power_ref) {
+ drm->have_disp_power_ref = true;
+ return ret;
+ }
+ active = true;
+ break;
+ }
+ }
+
+ if (!active && drm->have_disp_power_ref) {
+ pm_runtime_put_autosuspend(dev->dev);
+ drm->have_disp_power_ref = false;
+ }
+
+done:
+ pm_runtime_put_autosuspend(dev->dev);
+ return ret;
+}
+
+static struct nv50_outp_atom *
+nv50_disp_outp_atomic_add(struct nv50_atom *atom, struct drm_encoder *encoder)
+{
+ struct nv50_outp_atom *outp;
+
+ list_for_each_entry(outp, &atom->outp, head) {
+ if (outp->encoder == encoder)
+ return outp;
+ }
+
+ outp = kzalloc(sizeof(*outp), GFP_KERNEL);
+ if (!outp)
+ return ERR_PTR(-ENOMEM);
+
+ list_add(&outp->head, &atom->outp);
+ outp->encoder = encoder;
+ return outp;
+}
+
+static int
+nv50_disp_outp_atomic_check_clr(struct nv50_atom *atom,
+ struct drm_connector *connector)
+{
+ struct drm_encoder *encoder = connector->state->best_encoder;
+ struct drm_crtc_state *crtc_state;
+ struct drm_crtc *crtc;
+ struct nv50_outp_atom *outp;
+
+ if (!(crtc = connector->state->crtc))
+ return 0;
+
+ crtc_state = drm_atomic_get_existing_crtc_state(&atom->state, crtc);
+ if (crtc->state->active && drm_atomic_crtc_needs_modeset(crtc_state)) {
+ outp = nv50_disp_outp_atomic_add(atom, encoder);
+ if (IS_ERR(outp))
+ return PTR_ERR(outp);
+
+ if (outp->encoder->encoder_type == DRM_MODE_ENCODER_DPMST) {
+ outp->flush_disable = true;
+ atom->flush_disable = true;
+ }
+ outp->clr.ctrl = true;
+ atom->lock_core = true;
}
return 0;
}
-static void
-nv50_fb_dtor(struct drm_framebuffer *fb)
+static int
+nv50_disp_outp_atomic_check_set(struct nv50_atom *atom,
+ struct drm_connector_state *connector_state)
{
+ struct drm_encoder *encoder = connector_state->best_encoder;
+ struct drm_crtc_state *crtc_state;
+ struct drm_crtc *crtc;
+ struct nv50_outp_atom *outp;
+
+ if (!(crtc = connector_state->crtc))
+ return 0;
+
+ crtc_state = drm_atomic_get_existing_crtc_state(&atom->state, crtc);
+ if (crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state)) {
+ outp = nv50_disp_outp_atomic_add(atom, encoder);
+ if (IS_ERR(outp))
+ return PTR_ERR(outp);
+
+ outp->set.ctrl = true;
+ atom->lock_core = true;
+ }
+
+ return 0;
}
static int
-nv50_fb_ctor(struct drm_framebuffer *fb)
-{
- struct nouveau_framebuffer *nv_fb = nouveau_framebuffer(fb);
- struct nouveau_drm *drm = nouveau_drm(fb->dev);
- struct nouveau_bo *nvbo = nv_fb->nvbo;
- struct nv50_disp *disp = nv50_disp(fb->dev);
- u8 kind = nouveau_bo_tile_layout(nvbo) >> 8;
- u8 tile = nvbo->tile_mode;
-
- if (drm->device.info.chipset >= 0xc0)
- tile >>= 4; /* yep.. */
-
- switch (fb->depth) {
- case 8: nv_fb->r_format = 0x1e00; break;
- case 15: nv_fb->r_format = 0xe900; break;
- case 16: nv_fb->r_format = 0xe800; break;
- case 24:
- case 32: nv_fb->r_format = 0xcf00; break;
- case 30: nv_fb->r_format = 0xd100; break;
- default:
- NV_ERROR(drm, "unknown depth %d\n", fb->depth);
- return -EINVAL;
+nv50_disp_atomic_check(struct drm_device *dev, struct drm_atomic_state *state)
+{
+ struct nv50_atom *atom = nv50_atom(state);
+ struct drm_connector_state *connector_state;
+ struct drm_connector *connector;
+ int ret, i;
+
+ ret = drm_atomic_helper_check(dev, state);
+ if (ret)
+ return ret;
+
+ for_each_connector_in_state(state, connector, connector_state, i) {
+ ret = nv50_disp_outp_atomic_check_clr(atom, connector);
+ if (ret)
+ return ret;
+
+ ret = nv50_disp_outp_atomic_check_set(atom, connector_state);
+ if (ret)
+ return ret;
}
- if (disp->disp->oclass < G82_DISP) {
- nv_fb->r_pitch = kind ? (((fb->pitches[0] / 4) << 4) | tile) :
- (fb->pitches[0] | 0x00100000);
- nv_fb->r_format |= kind << 16;
- } else
- if (disp->disp->oclass < GF110_DISP) {
- nv_fb->r_pitch = kind ? (((fb->pitches[0] / 4) << 4) | tile) :
- (fb->pitches[0] | 0x00100000);
- } else {
- nv_fb->r_pitch = kind ? (((fb->pitches[0] / 4) << 4) | tile) :
- (fb->pitches[0] | 0x01000000);
+ return 0;
+}
+
+static void
+nv50_disp_atomic_state_clear(struct drm_atomic_state *state)
+{
+ struct nv50_atom *atom = nv50_atom(state);
+ struct nv50_outp_atom *outp, *outt;
+
+ list_for_each_entry_safe(outp, outt, &atom->outp, head) {
+ list_del(&outp->head);
+ kfree(outp);
}
- nv_fb->r_handle = 0xffff0000 | kind;
- return nv50_fbdma_init(fb->dev, nv_fb->r_handle, 0,
- drm->device.info.ram_user, kind);
+ drm_atomic_state_default_clear(state);
+}
+
+static void
+nv50_disp_atomic_state_free(struct drm_atomic_state *state)
+{
+ struct nv50_atom *atom = nv50_atom(state);
+ drm_atomic_state_default_release(&atom->state);
+ kfree(atom);
}
+static struct drm_atomic_state *
+nv50_disp_atomic_state_alloc(struct drm_device *dev)
+{
+ struct nv50_atom *atom;
+ if (!(atom = kzalloc(sizeof(*atom), GFP_KERNEL)) ||
+ drm_atomic_state_init(dev, &atom->state) < 0) {
+ kfree(atom);
+ return NULL;
+ }
+ INIT_LIST_HEAD(&atom->outp);
+ return &atom->state;
+}
+
+static const struct drm_mode_config_funcs
+nv50_disp_func = {
+ .fb_create = nouveau_user_framebuffer_create,
+ .output_poll_changed = nouveau_fbcon_output_poll_changed,
+ .atomic_check = nv50_disp_atomic_check,
+ .atomic_commit = nv50_disp_atomic_commit,
+ .atomic_state_alloc = nv50_disp_atomic_state_alloc,
+ .atomic_state_clear = nv50_disp_atomic_state_clear,
+ .atomic_state_free = nv50_disp_atomic_state_free,
+};
+
/******************************************************************************
* Init
*****************************************************************************/
@@ -2471,12 +4333,30 @@ nv50_fb_ctor(struct drm_framebuffer *fb)
void
nv50_display_fini(struct drm_device *dev)
{
+ struct nouveau_encoder *nv_encoder;
+ struct drm_encoder *encoder;
+ struct drm_plane *plane;
+
+ drm_for_each_plane(plane, dev) {
+ struct nv50_wndw *wndw = nv50_wndw(plane);
+ if (plane->funcs != &nv50_wndw)
+ continue;
+ nv50_wndw_fini(wndw);
+ }
+
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+ if (encoder->encoder_type != DRM_MODE_ENCODER_DPMST) {
+ nv_encoder = nouveau_encoder(encoder);
+ nv50_mstm_fini(nv_encoder->dp.mstm);
+ }
+ }
}
int
nv50_display_init(struct drm_device *dev)
{
- struct nv50_disp *disp = nv50_disp(dev);
+ struct drm_encoder *encoder;
+ struct drm_plane *plane;
struct drm_crtc *crtc;
u32 *push;
@@ -2484,16 +4364,35 @@ nv50_display_init(struct drm_device *dev)
if (!push)
return -EBUSY;
- list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
- struct nv50_sync *sync = nv50_sync(crtc);
-
- nv50_crtc_lut_load(crtc);
- nouveau_bo_wr32(disp->sync, sync->addr / 4, sync->data);
- }
-
evo_mthd(push, 0x0088, 1);
evo_data(push, nv50_mast(dev)->base.sync.handle);
evo_kick(push, nv50_mast(dev));
+
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+ if (encoder->encoder_type != DRM_MODE_ENCODER_DPMST) {
+ const struct drm_encoder_helper_funcs *help;
+ struct nouveau_encoder *nv_encoder;
+
+ nv_encoder = nouveau_encoder(encoder);
+ help = encoder->helper_private;
+ if (help && help->dpms)
+ help->dpms(encoder, DRM_MODE_DPMS_ON);
+
+ nv50_mstm_init(nv_encoder->dp.mstm);
+ }
+ }
+
+ drm_for_each_crtc(crtc, dev) {
+ nv50_head_lut_load(crtc);
+ }
+
+ drm_for_each_plane(plane, dev) {
+ struct nv50_wndw *wndw = nv50_wndw(plane);
+ if (plane->funcs != &nv50_wndw)
+ continue;
+ nv50_wndw_init(wndw);
+ }
+
return 0;
}
@@ -2501,11 +4400,6 @@ void
nv50_display_destroy(struct drm_device *dev)
{
struct nv50_disp *disp = nv50_disp(dev);
- struct nv50_fbdma *fbdma, *fbtmp;
-
- list_for_each_entry_safe(fbdma, fbtmp, &disp->fbdma, head) {
- nv50_fbdma_fini(fbdma);
- }
nv50_dmac_destroy(&disp->mast.base, disp->disp);
@@ -2518,6 +4412,10 @@ nv50_display_destroy(struct drm_device *dev)
kfree(disp);
}
+MODULE_PARM_DESC(atomic, "Expose atomic ioctl (default: disabled)");
+static int nouveau_atomic = 0;
+module_param_named(atomic, nouveau_atomic, int, 0400);
+
int
nv50_display_create(struct drm_device *dev)
{
@@ -2532,15 +4430,17 @@ nv50_display_create(struct drm_device *dev)
disp = kzalloc(sizeof(*disp), GFP_KERNEL);
if (!disp)
return -ENOMEM;
- INIT_LIST_HEAD(&disp->fbdma);
+
+ mutex_init(&disp->mutex);
nouveau_display(dev)->priv = disp;
nouveau_display(dev)->dtor = nv50_display_destroy;
nouveau_display(dev)->init = nv50_display_init;
nouveau_display(dev)->fini = nv50_display_fini;
- nouveau_display(dev)->fb_ctor = nv50_fb_ctor;
- nouveau_display(dev)->fb_dtor = nv50_fb_dtor;
disp->disp = &nouveau_display(dev)->disp;
+ dev->mode_config.funcs = &nv50_disp_func;
+ if (nouveau_atomic)
+ dev->driver->driver_features |= DRIVER_ATOMIC;
/* small shared memory area we use for notifiers and semaphores */
ret = nouveau_bo_new(dev, 4096, 0x1000, TTM_PL_FLAG_VRAM,
@@ -2572,7 +4472,7 @@ nv50_display_create(struct drm_device *dev)
crtcs = 2;
for (i = 0; i < crtcs; i++) {
- ret = nv50_crtc_create(dev, i);
+ ret = nv50_head_create(dev, i);
if (ret)
goto out;
}
diff --git a/drivers/gpu/drm/nouveau/nv50_display.h b/drivers/gpu/drm/nouveau/nv50_display.h
index 70da347aa8c5..918187cee84b 100644
--- a/drivers/gpu/drm/nouveau/nv50_display.h
+++ b/drivers/gpu/drm/nouveau/nv50_display.h
@@ -35,11 +35,4 @@ int nv50_display_create(struct drm_device *);
void nv50_display_destroy(struct drm_device *);
int nv50_display_init(struct drm_device *);
void nv50_display_fini(struct drm_device *);
-
-void nv50_display_flip_stop(struct drm_crtc *);
-int nv50_display_flip_next(struct drm_crtc *, struct drm_framebuffer *,
- struct nouveau_channel *, u32 swap_interval);
-
-struct nouveau_bo *nv50_display_crtc_sema(struct drm_device *, int head);
-
#endif /* __NV50_DISPLAY_H__ */
diff --git a/drivers/gpu/drm/nouveau/nv50_fbcon.c b/drivers/gpu/drm/nouveau/nv50_fbcon.c
index af3d3c49411a..327dcd7901ed 100644
--- a/drivers/gpu/drm/nouveau/nv50_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nv50_fbcon.c
@@ -30,7 +30,7 @@ int
nv50_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
{
struct nouveau_fbdev *nfbdev = info->par;
- struct nouveau_drm *drm = nouveau_drm(nfbdev->dev);
+ struct nouveau_drm *drm = nouveau_drm(nfbdev->helper.dev);
struct nouveau_channel *chan = drm->channel;
int ret;
@@ -65,7 +65,7 @@ int
nv50_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region)
{
struct nouveau_fbdev *nfbdev = info->par;
- struct nouveau_drm *drm = nouveau_drm(nfbdev->dev);
+ struct nouveau_drm *drm = nouveau_drm(nfbdev->helper.dev);
struct nouveau_channel *chan = drm->channel;
int ret;
@@ -93,7 +93,7 @@ int
nv50_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
{
struct nouveau_fbdev *nfbdev = info->par;
- struct nouveau_drm *drm = nouveau_drm(nfbdev->dev);
+ struct nouveau_drm *drm = nouveau_drm(nfbdev->helper.dev);
struct nouveau_channel *chan = drm->channel;
uint32_t dwords, *data = (uint32_t *)image->data;
uint32_t mask = ~(~0 >> (32 - info->var.bits_per_pixel));
@@ -148,8 +148,8 @@ int
nv50_fbcon_accel_init(struct fb_info *info)
{
struct nouveau_fbdev *nfbdev = info->par;
- struct nouveau_framebuffer *fb = &nfbdev->nouveau_fb;
- struct drm_device *dev = nfbdev->dev;
+ struct nouveau_framebuffer *fb = nouveau_framebuffer(nfbdev->helper.fb);
+ struct drm_device *dev = nfbdev->helper.dev;
struct nouveau_drm *drm = nouveau_drm(dev);
struct nouveau_channel *chan = drm->channel;
int ret, format;
diff --git a/drivers/gpu/drm/nouveau/nv50_fence.c b/drivers/gpu/drm/nouveau/nv50_fence.c
index 8c5295414578..f68c7054fd53 100644
--- a/drivers/gpu/drm/nouveau/nv50_fence.c
+++ b/drivers/gpu/drm/nouveau/nv50_fence.c
@@ -35,13 +35,12 @@
static int
nv50_fence_context_new(struct nouveau_channel *chan)
{
- struct drm_device *dev = chan->drm->dev;
struct nv10_fence_priv *priv = chan->drm->fence;
struct nv10_fence_chan *fctx;
struct ttm_mem_reg *mem = &priv->bo->bo.mem;
u32 start = mem->start * PAGE_SIZE;
u32 limit = start + mem->size - 1;
- int ret, i;
+ int ret;
fctx = chan->fence = kzalloc(sizeof(*fctx), GFP_KERNEL);
if (!fctx)
@@ -60,23 +59,6 @@ nv50_fence_context_new(struct nouveau_channel *chan)
.limit = limit,
}, sizeof(struct nv_dma_v0),
&fctx->sema);
-
- /* dma objects for display sync channel semaphore blocks */
- for (i = 0; !ret && i < dev->mode_config.num_crtc; i++) {
- struct nouveau_bo *bo = nv50_display_crtc_sema(dev, i);
- u32 start = bo->bo.mem.start * PAGE_SIZE;
- u32 limit = start + bo->bo.mem.size - 1;
-
- ret = nvif_object_init(&chan->user, NvEvoSema0 + i,
- NV_DMA_IN_MEMORY, &(struct nv_dma_v0) {
- .target = NV_DMA_V0_TARGET_VRAM,
- .access = NV_DMA_V0_ACCESS_RDWR,
- .start = start,
- .limit = limit,
- }, sizeof(struct nv_dma_v0),
- &fctx->head[i]);
- }
-
if (ret)
nv10_fence_context_del(chan);
return ret;
diff --git a/drivers/gpu/drm/nouveau/nv84_fence.c b/drivers/gpu/drm/nouveau/nv84_fence.c
index 23ef04b4e0b2..52b87ae83e7b 100644
--- a/drivers/gpu/drm/nouveau/nv84_fence.c
+++ b/drivers/gpu/drm/nouveau/nv84_fence.c
@@ -28,13 +28,6 @@
#include "nv50_display.h"
-u64
-nv84_fence_crtc(struct nouveau_channel *chan, int crtc)
-{
- struct nv84_fence_chan *fctx = chan->fence;
- return fctx->dispc_vma[crtc].offset;
-}
-
static int
nv84_fence_emit32(struct nouveau_channel *chan, u64 virtual, u32 sequence)
{
@@ -110,15 +103,8 @@ nv84_fence_read(struct nouveau_channel *chan)
static void
nv84_fence_context_del(struct nouveau_channel *chan)
{
- struct drm_device *dev = chan->drm->dev;
struct nv84_fence_priv *priv = chan->drm->fence;
struct nv84_fence_chan *fctx = chan->fence;
- int i;
-
- for (i = 0; i < dev->mode_config.num_crtc; i++) {
- struct nouveau_bo *bo = nv50_display_crtc_sema(dev, i);
- nouveau_bo_vma_del(bo, &fctx->dispc_vma[i]);
- }
nouveau_bo_wr32(priv->bo, chan->chid * 16 / 4, fctx->base.sequence);
nouveau_bo_vma_del(priv->bo, &fctx->vma_gart);
@@ -134,7 +120,7 @@ nv84_fence_context_new(struct nouveau_channel *chan)
struct nouveau_cli *cli = (void *)chan->user.client;
struct nv84_fence_priv *priv = chan->drm->fence;
struct nv84_fence_chan *fctx;
- int ret, i;
+ int ret;
fctx = chan->fence = kzalloc(sizeof(*fctx), GFP_KERNEL);
if (!fctx)
@@ -154,12 +140,6 @@ nv84_fence_context_new(struct nouveau_channel *chan)
&fctx->vma_gart);
}
- /* map display semaphore buffers into channel's vm */
- for (i = 0; !ret && i < chan->drm->dev->mode_config.num_crtc; i++) {
- struct nouveau_bo *bo = nv50_display_crtc_sema(chan->drm->dev, i);
- ret = nouveau_bo_vma_add(bo, cli->vm, &fctx->dispc_vma[i]);
- }
-
if (ret)
nv84_fence_context_del(chan);
return ret;
diff --git a/drivers/gpu/drm/nouveau/nvc0_fbcon.c b/drivers/gpu/drm/nouveau/nvc0_fbcon.c
index 054b6a056d99..90f27bfa381f 100644
--- a/drivers/gpu/drm/nouveau/nvc0_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nvc0_fbcon.c
@@ -30,7 +30,7 @@ int
nvc0_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
{
struct nouveau_fbdev *nfbdev = info->par;
- struct nouveau_drm *drm = nouveau_drm(nfbdev->dev);
+ struct nouveau_drm *drm = nouveau_drm(nfbdev->helper.dev);
struct nouveau_channel *chan = drm->channel;
int ret;
@@ -65,7 +65,7 @@ int
nvc0_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region)
{
struct nouveau_fbdev *nfbdev = info->par;
- struct nouveau_drm *drm = nouveau_drm(nfbdev->dev);
+ struct nouveau_drm *drm = nouveau_drm(nfbdev->helper.dev);
struct nouveau_channel *chan = drm->channel;
int ret;
@@ -93,7 +93,7 @@ int
nvc0_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
{
struct nouveau_fbdev *nfbdev = info->par;
- struct nouveau_drm *drm = nouveau_drm(nfbdev->dev);
+ struct nouveau_drm *drm = nouveau_drm(nfbdev->helper.dev);
struct nouveau_channel *chan = drm->channel;
uint32_t dwords, *data = (uint32_t *)image->data;
uint32_t mask = ~(~0 >> (32 - info->var.bits_per_pixel));
@@ -148,8 +148,8 @@ int
nvc0_fbcon_accel_init(struct fb_info *info)
{
struct nouveau_fbdev *nfbdev = info->par;
- struct drm_device *dev = nfbdev->dev;
- struct nouveau_framebuffer *fb = &nfbdev->nouveau_fb;
+ struct drm_device *dev = nfbdev->helper.dev;
+ struct nouveau_framebuffer *fb = nouveau_framebuffer(nfbdev->helper.fb);
struct nouveau_drm *drm = nouveau_drm(dev);
struct nouveau_channel *chan = drm->channel;
int ret, format;
diff --git a/drivers/gpu/drm/nouveau/nvif/client.c b/drivers/gpu/drm/nouveau/nvif/client.c
index 1ee9294eca2e..29c20dfd894d 100644
--- a/drivers/gpu/drm/nouveau/nvif/client.c
+++ b/drivers/gpu/drm/nouveau/nvif/client.c
@@ -55,7 +55,7 @@ nvif_client_fini(struct nvif_client *client)
}
}
-const struct nvif_driver *
+static const struct nvif_driver *
nvif_drivers[] = {
#ifdef __KERNEL__
&nvif_driver_nvkm,
diff --git a/drivers/gpu/drm/nouveau/nvif/notify.c b/drivers/gpu/drm/nouveau/nvif/notify.c
index b0787ff833ef..278b3933dc96 100644
--- a/drivers/gpu/drm/nouveau/nvif/notify.c
+++ b/drivers/gpu/drm/nouveau/nvif/notify.c
@@ -155,10 +155,8 @@ nvif_notify_fini(struct nvif_notify *notify)
int ret = nvif_notify_put(notify);
if (ret >= 0 && object) {
ret = nvif_object_ioctl(object, &args, sizeof(args), NULL);
- if (ret == 0) {
- notify->object = NULL;
- kfree((void *)notify->data);
- }
+ notify->object = NULL;
+ kfree((void *)notify->data);
}
return ret;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/ce/fuc/gf100.fuc3.h b/drivers/gpu/drm/nouveau/nvkm/engine/ce/fuc/gf100.fuc3.h
index 05bb65608dfe..d9ca9636a3e3 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/ce/fuc/gf100.fuc3.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/ce/fuc/gf100.fuc3.h
@@ -1,4 +1,4 @@
-uint32_t gf100_ce_data[] = {
+static uint32_t gf100_ce_data[] = {
/* 0x0000: ctx_object */
0x00000000,
/* 0x0004: ctx_query_address_high */
@@ -171,7 +171,7 @@ uint32_t gf100_ce_data[] = {
0x00000800,
};
-uint32_t gf100_ce_code[] = {
+static uint32_t gf100_ce_code[] = {
/* 0x0000: main */
0x04fe04bd,
0x3517f000,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/ce/fuc/gt215.fuc3.h b/drivers/gpu/drm/nouveau/nvkm/engine/ce/fuc/gt215.fuc3.h
index 972281d10f38..f0a1cf31c7ca 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/ce/fuc/gt215.fuc3.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/ce/fuc/gt215.fuc3.h
@@ -1,4 +1,4 @@
-uint32_t gt215_ce_data[] = {
+static uint32_t gt215_ce_data[] = {
/* 0x0000: ctx_object */
0x00000000,
/* 0x0004: ctx_dma */
@@ -183,7 +183,7 @@ uint32_t gt215_ce_data[] = {
0x00000800,
};
-uint32_t gt215_ce_code[] = {
+static uint32_t gt215_ce_code[] = {
/* 0x0000: main */
0x04fe04bd,
0x3517f000,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
index 53d171729353..bd22526edb0b 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
@@ -1852,7 +1852,7 @@ nvf1_chipset = {
.fb = gk104_fb_new,
.fuse = gf100_fuse_new,
.gpio = gk104_gpio_new,
- .i2c = gf119_i2c_new,
+ .i2c = gk104_i2c_new,
.ibus = gk104_ibus_new,
.iccsense = gf100_iccsense_new,
.imem = nv50_instmem_new,
@@ -1966,7 +1966,7 @@ nv117_chipset = {
.fb = gm107_fb_new,
.fuse = gm107_fuse_new,
.gpio = gk104_gpio_new,
- .i2c = gf119_i2c_new,
+ .i2c = gk104_i2c_new,
.ibus = gk104_ibus_new,
.iccsense = gf100_iccsense_new,
.imem = nv50_instmem_new,
@@ -2000,7 +2000,7 @@ nv118_chipset = {
.fb = gm107_fb_new,
.fuse = gm107_fuse_new,
.gpio = gk104_gpio_new,
- .i2c = gf119_i2c_new,
+ .i2c = gk104_i2c_new,
.ibus = gk104_ibus_new,
.iccsense = gf100_iccsense_new,
.imem = nv50_instmem_new,
@@ -2131,7 +2131,7 @@ nv12b_chipset = {
.bar = gk20a_bar_new,
.bus = gf100_bus_new,
.clk = gm20b_clk_new,
- .fb = gk20a_fb_new,
+ .fb = gm20b_fb_new,
.fuse = gm107_fuse_new,
.ibus = gk20a_ibus_new,
.imem = gk20a_instmem_new,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c
index 9b638bd905ff..f2bc0b7d9b93 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c
@@ -102,7 +102,7 @@ nvkm_device_tegra_probe_iommu(struct nvkm_device_tegra *tdev)
if (iommu_present(&platform_bus_type)) {
tdev->iommu.domain = iommu_domain_alloc(&platform_bus_type);
- if (IS_ERR(tdev->iommu.domain))
+ if (!tdev->iommu.domain)
goto error;
/*
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/user.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/user.c
index 79a8f71cf788..513ee6b79553 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/user.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/user.c
@@ -326,7 +326,7 @@ nvkm_udevice = {
.sclass = nvkm_udevice_child_get,
};
-int
+static int
nvkm_udevice_new(const struct nvkm_oclass *oclass, void *data, u32 size,
struct nvkm_object **pobject)
{
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.c
index dd2953bc9264..26990d44ae75 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.c
@@ -153,7 +153,7 @@ nv50_disp_chan_uevent = {
.fini = nv50_disp_chan_uevent_fini,
};
-int
+static int
nv50_disp_chan_rd32(struct nvkm_object *object, u64 addr, u32 *data)
{
struct nv50_disp_chan *chan = nv50_disp_chan(object);
@@ -163,7 +163,7 @@ nv50_disp_chan_rd32(struct nvkm_object *object, u64 addr, u32 *data)
return 0;
}
-int
+static int
nv50_disp_chan_wr32(struct nvkm_object *object, u64 addr, u32 data)
{
struct nv50_disp_chan *chan = nv50_disp_chan(object);
@@ -173,7 +173,7 @@ nv50_disp_chan_wr32(struct nvkm_object *object, u64 addr, u32 data)
return 0;
}
-int
+static int
nv50_disp_chan_ntfy(struct nvkm_object *object, u32 type,
struct nvkm_event **pevent)
{
@@ -189,7 +189,7 @@ nv50_disp_chan_ntfy(struct nvkm_object *object, u32 type,
return -EINVAL;
}
-int
+static int
nv50_disp_chan_map(struct nvkm_object *object, u64 *addr, u32 *size)
{
struct nv50_disp_chan *chan = nv50_disp_chan(object);
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/coreg94.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/coreg94.c
index 019379a3a01c..c65c9f3ff69f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/coreg94.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/coreg94.c
@@ -26,7 +26,7 @@
#include <nvif/class.h>
-const struct nv50_disp_mthd_list
+static const struct nv50_disp_mthd_list
g94_disp_core_mthd_sor = {
.mthd = 0x0040,
.addr = 0x000008,
@@ -43,8 +43,8 @@ g94_disp_core_chan_mthd = {
.prev = 0x000004,
.data = {
{ "Global", 1, &nv50_disp_core_mthd_base },
- { "DAC", 3, &g84_disp_core_mthd_dac },
- { "SOR", 4, &g94_disp_core_mthd_sor },
+ { "DAC", 3, &g84_disp_core_mthd_dac },
+ { "SOR", 4, &g94_disp_core_mthd_sor },
{ "PIOR", 3, &nv50_disp_core_mthd_pior },
{ "HEAD", 2, &g84_disp_core_mthd_head },
{}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/coregp104.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/coregp104.c
index 6922f4007b61..e356f87fbe60 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/coregp104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/coregp104.c
@@ -59,7 +59,7 @@ gp104_disp_core_init(struct nv50_disp_dmac *chan)
return 0;
}
-const struct nv50_disp_dmac_func
+static const struct nv50_disp_dmac_func
gp104_disp_core_func = {
.init = gp104_disp_core_init,
.fini = gf119_disp_core_fini,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dport.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dport.c
index 9688970eca47..4a93ceb850ac 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dport.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dport.c
@@ -319,9 +319,8 @@ static const struct dp_rates {
};
void
-nvkm_dp_train(struct work_struct *w)
+nvkm_dp_train(struct nvkm_output_dp *outp)
{
- struct nvkm_output_dp *outp = container_of(w, typeof(*outp), lt.work);
struct nv50_disp *disp = nv50_disp(outp->base.disp);
const struct dp_rates *cfg = nvkm_dp_rates;
struct dp_state _dp = {
@@ -353,9 +352,6 @@ nvkm_dp_train(struct work_struct *w)
}
cfg--;
- /* disable link interrupt handling during link training */
- nvkm_notify_put(&outp->irq);
-
/* ensure sink is not in a low-power state */
if (!nvkm_rdaux(outp->aux, DPCD_SC00, &pwr, 1)) {
if ((pwr & DPCD_SC00_SET_POWER) != DPCD_SC00_SET_POWER_D0) {
@@ -400,9 +396,6 @@ nvkm_dp_train(struct work_struct *w)
dp_link_train_fini(dp);
- /* signal completion and enable link interrupt handling */
OUTP_DBG(&outp->base, "training complete");
atomic_set(&outp->lt.done, 1);
- wake_up(&outp->lt.wait);
- nvkm_notify_get(&outp->irq);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dport.h b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dport.h
index 6e10c5e0ef11..baf1dd9ff975 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dport.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dport.h
@@ -1,6 +1,6 @@
#ifndef __NVKM_DISP_DPORT_H__
#define __NVKM_DISP_DPORT_H__
-#include <core/os.h>
+struct nvkm_output_dp;
/* DPCD Receiver Capabilities */
#define DPCD_RC00_DPCD_REV 0x00000
@@ -77,5 +77,5 @@
#define DPCD_SC00_SET_POWER_D0 0x01
#define DPCD_SC00_SET_POWER_D3 0x03
-void nvkm_dp_train(struct work_struct *);
+void nvkm_dp_train(struct nvkm_output_dp *);
#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gf119.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gf119.c
index 29e84b241cca..7b346ccc38b7 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gf119.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gf119.c
@@ -203,17 +203,20 @@ gf119_disp_intr_unk2_0(struct nv50_disp *disp, int head)
/* see note in nv50_disp_intr_unk20_0() */
if (outp && outp->info.type == DCB_OUTPUT_DP) {
struct nvkm_output_dp *outpdp = nvkm_output_dp(outp);
- struct nvbios_init init = {
- .subdev = subdev,
- .bios = subdev->device->bios,
- .outp = &outp->info,
- .crtc = head,
- .offset = outpdp->info.script[4],
- .execute = 1,
- };
+ if (!outpdp->lt.mst) {
+ struct nvbios_init init = {
+ .subdev = subdev,
+ .bios = subdev->device->bios,
+ .outp = &outp->info,
+ .crtc = head,
+ .offset = outpdp->info.script[4],
+ .execute = 1,
+ };
- nvbios_exec(&init);
- atomic_set(&outpdp->lt.done, 0);
+ nvkm_notify_put(&outpdp->irq);
+ nvbios_exec(&init);
+ atomic_set(&outpdp->lt.done, 0);
+ }
}
}
@@ -314,7 +317,7 @@ gf119_disp_intr_unk2_2(struct nv50_disp *disp, int head)
break;
}
- if (nvkm_output_dp_train(outp, pclk, true))
+ if (nvkm_output_dp_train(outp, pclk))
OUTP_ERR(outp, "link not trained before attach");
} else {
if (disp->func->sor.magic)
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c
index fbb8c7dc18fd..567466f93cd5 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c
@@ -590,6 +590,7 @@ nv50_disp_intr_unk20_0(struct nv50_disp *disp, int head)
.execute = 1,
};
+ nvkm_notify_put(&outpdp->irq);
nvbios_exec(&init);
atomic_set(&outpdp->lt.done, 0);
}
@@ -779,7 +780,7 @@ nv50_disp_intr_unk20_2(struct nv50_disp *disp, int head)
break;
}
- if (nvkm_output_dp_train(outp, datarate / soff, true))
+ if (nvkm_output_dp_train(outp, datarate / soff))
OUTP_ERR(outp, "link not trained before attach");
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/outpdp.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/outpdp.c
index 3b7a9e7a1ea8..de36f73b14dc 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/outpdp.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/outpdp.c
@@ -31,7 +31,7 @@
#include <nvif/event.h>
int
-nvkm_output_dp_train(struct nvkm_output *base, u32 datarate, bool wait)
+nvkm_output_dp_train(struct nvkm_output *base, u32 datarate)
{
struct nvkm_output_dp *outp = nvkm_output_dp(base);
bool retrain = true;
@@ -39,6 +39,8 @@ nvkm_output_dp_train(struct nvkm_output *base, u32 datarate, bool wait)
u32 linkrate;
int ret, i;
+ mutex_lock(&outp->mutex);
+
/* check that the link is trained at a high enough rate */
ret = nvkm_rdaux(outp->aux, DPCD_LC00_LINK_BW_SET, link, 2);
if (ret) {
@@ -88,19 +90,10 @@ done:
outp->dpcd[DPCD_RC02] =
outp->base.info.dpconf.link_nr;
}
- atomic_set(&outp->lt.done, 0);
- schedule_work(&outp->lt.work);
- } else {
- nvkm_notify_get(&outp->irq);
- }
-
- if (wait) {
- if (!wait_event_timeout(outp->lt.wait,
- atomic_read(&outp->lt.done),
- msecs_to_jiffies(2000)))
- ret = -ETIMEDOUT;
+ nvkm_dp_train(outp);
}
+ mutex_unlock(&outp->mutex);
return ret;
}
@@ -118,7 +111,7 @@ nvkm_output_dp_enable(struct nvkm_output_dp *outp, bool enable)
if (!nvkm_rdaux(aux, DPCD_RC00_DPCD_REV, outp->dpcd,
sizeof(outp->dpcd))) {
- nvkm_output_dp_train(&outp->base, 0, true);
+ nvkm_output_dp_train(&outp->base, 0);
return;
}
}
@@ -165,10 +158,10 @@ nvkm_output_dp_irq(struct nvkm_notify *notify)
};
OUTP_DBG(&outp->base, "IRQ: %d", line->mask);
- nvkm_output_dp_train(&outp->base, 0, true);
+ nvkm_output_dp_train(&outp->base, 0);
nvkm_event_send(&disp->hpd, rep.mask, conn->index, &rep, sizeof(rep));
- return NVKM_NOTIFY_DROP;
+ return NVKM_NOTIFY_KEEP;
}
static void
@@ -177,7 +170,6 @@ nvkm_output_dp_fini(struct nvkm_output *base)
struct nvkm_output_dp *outp = nvkm_output_dp(base);
nvkm_notify_put(&outp->hpd);
nvkm_notify_put(&outp->irq);
- flush_work(&outp->lt.work);
nvkm_output_dp_enable(outp, false);
}
@@ -187,6 +179,7 @@ nvkm_output_dp_init(struct nvkm_output *base)
struct nvkm_output_dp *outp = nvkm_output_dp(base);
nvkm_notify_put(&outp->base.conn->hpd);
nvkm_output_dp_enable(outp, true);
+ nvkm_notify_get(&outp->irq);
nvkm_notify_get(&outp->hpd);
}
@@ -238,11 +231,6 @@ nvkm_output_dp_ctor(const struct nvkm_output_dp_func *func,
OUTP_DBG(&outp->base, "bios dp %02x %02x %02x %02x",
outp->version, hdr, cnt, len);
- /* link training */
- INIT_WORK(&outp->lt.work, nvkm_dp_train);
- init_waitqueue_head(&outp->lt.wait);
- atomic_set(&outp->lt.done, 0);
-
/* link maintenance */
ret = nvkm_notify_init(NULL, &i2c->event, nvkm_output_dp_irq, true,
&(struct nvkm_i2c_ntfy_req) {
@@ -257,6 +245,9 @@ nvkm_output_dp_ctor(const struct nvkm_output_dp_func *func,
return ret;
}
+ mutex_init(&outp->mutex);
+ atomic_set(&outp->lt.done, 0);
+
/* hotplug detect, replaces gpio-based mechanism with aux events */
ret = nvkm_notify_init(NULL, &i2c->event, nvkm_output_dp_hpd, true,
&(struct nvkm_i2c_ntfy_req) {
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/outpdp.h b/drivers/gpu/drm/nouveau/nvkm/engine/disp/outpdp.h
index 4e983f6d7032..3c83a561cd88 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/outpdp.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/outpdp.h
@@ -29,10 +29,10 @@ struct nvkm_output_dp {
bool present;
u8 dpcd[16];
+ struct mutex mutex;
struct {
- struct work_struct work;
- wait_queue_head_t wait;
atomic_t done;
+ bool mst;
} lt;
};
@@ -41,9 +41,11 @@ struct nvkm_output_dp_func {
int (*lnk_pwr)(struct nvkm_output_dp *, int nr);
int (*lnk_ctl)(struct nvkm_output_dp *, int nr, int bw, bool ef);
int (*drv_ctl)(struct nvkm_output_dp *, int ln, int vs, int pe, int pc);
+ void (*vcpi)(struct nvkm_output_dp *, int head, u8 start_slot,
+ u8 num_slots, u16 pbn, u16 aligned_pbn);
};
-int nvkm_output_dp_train(struct nvkm_output *, u32 rate, bool wait);
+int nvkm_output_dp_train(struct nvkm_output *, u32 rate);
int nvkm_output_dp_ctor(const struct nvkm_output_dp_func *, struct nvkm_disp *,
int index, struct dcb_output *, struct nvkm_i2c_aux *,
@@ -63,6 +65,7 @@ int gf119_sor_dp_new(struct nvkm_disp *, int, struct dcb_output *,
struct nvkm_output **);
int gf119_sor_dp_lnk_ctl(struct nvkm_output_dp *, int, int, bool);
int gf119_sor_dp_drv_ctl(struct nvkm_output_dp *, int, int, int, int);
+void gf119_sor_dp_vcpi(struct nvkm_output_dp *, int, u8, u8, u16, u16);
int gm107_sor_dp_new(struct nvkm_disp *, int, struct dcb_output *,
struct nvkm_output **);
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv50.c
index 2f9cecd81d04..c1158b22a721 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv50.c
@@ -66,7 +66,7 @@ nv50_disp_root_scanoutpos(NV50_DISP_MTHD_V0)
return 0;
}
-int
+static int
nv50_disp_root_mthd_(struct nvkm_object *object, u32 mthd, void *data, u32 size)
{
union {
@@ -173,13 +173,56 @@ nv50_disp_root_mthd_(struct nvkm_object *object, u32 mthd, void *data, u32 size)
return 0;
} else
if (args->v0.state != 0) {
- nvkm_output_dp_train(&outpdp->base, 0, true);
+ nvkm_output_dp_train(&outpdp->base, 0);
return 0;
}
} else
return ret;
}
break;
+ case NV50_DISP_MTHD_V1_SOR_DP_MST_LINK: {
+ struct nvkm_output_dp *outpdp = nvkm_output_dp(outp);
+ union {
+ struct nv50_disp_sor_dp_mst_link_v0 v0;
+ } *args = data;
+ int ret = -ENOSYS;
+ nvif_ioctl(object, "disp sor dp mst link size %d\n", size);
+ if (!(ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, false))) {
+ nvif_ioctl(object, "disp sor dp mst link vers %d state %d\n",
+ args->v0.version, args->v0.state);
+ if (outpdp->lt.mst != !!args->v0.state) {
+ outpdp->lt.mst = !!args->v0.state;
+ atomic_set(&outpdp->lt.done, 0);
+ nvkm_output_dp_train(&outpdp->base, 0);
+ }
+ return 0;
+ } else
+ return ret;
+ }
+ break;
+ case NV50_DISP_MTHD_V1_SOR_DP_MST_VCPI: {
+ struct nvkm_output_dp *outpdp = nvkm_output_dp(outp);
+ union {
+ struct nv50_disp_sor_dp_mst_vcpi_v0 v0;
+ } *args = data;
+ int ret = -ENOSYS;
+ nvif_ioctl(object, "disp sor dp mst vcpi size %d\n", size);
+ if (!(ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, false))) {
+ nvif_ioctl(object, "disp sor dp mst vcpi vers %d "
+ "slot %02x/%02x pbn %04x/%04x\n",
+ args->v0.version, args->v0.start_slot,
+ args->v0.num_slots, args->v0.pbn,
+ args->v0.aligned_pbn);
+ if (!outpdp->func->vcpi)
+ return -ENODEV;
+ outpdp->func->vcpi(outpdp, head, args->v0.start_slot,
+ args->v0.num_slots, args->v0.pbn,
+ args->v0.aligned_pbn);
+ return 0;
+ } else
+ return ret;
+ }
+ break;
case NV50_DISP_MTHD_V1_PIOR_PWR:
if (!func->pior.power)
return -ENODEV;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgf119.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgf119.c
index 49bd5da194e1..6ffdaa65aa77 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgf119.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgf119.c
@@ -56,11 +56,13 @@ gf119_sor_dp_lnk_ctl(struct nvkm_output_dp *outp, int nr, int bw, bool ef)
clksor |= bw << 18;
dpctrl |= ((1 << nr) - 1) << 16;
+ if (outp->lt.mst)
+ dpctrl |= 0x40000000;
if (ef)
dpctrl |= 0x00004000;
nvkm_mask(device, 0x612300 + soff, 0x007c0000, clksor);
- nvkm_mask(device, 0x61c10c + loff, 0x001f4000, dpctrl);
+ nvkm_mask(device, 0x61c10c + loff, 0x401f4000, dpctrl);
return 0;
}
@@ -101,12 +103,24 @@ gf119_sor_dp_drv_ctl(struct nvkm_output_dp *outp,
return 0;
}
+void
+gf119_sor_dp_vcpi(struct nvkm_output_dp *outp, int head, u8 slot,
+ u8 slot_nr, u16 pbn, u16 aligned)
+{
+ struct nvkm_device *device = outp->base.disp->engine.subdev.device;
+ const u32 hoff = head * 0x800;
+
+ nvkm_mask(device, 0x616588 + hoff, 0x00003f3f, (slot_nr << 8) | slot);
+ nvkm_mask(device, 0x61658c + hoff, 0xffffffff, (aligned << 16) | pbn);
+}
+
static const struct nvkm_output_dp_func
gf119_sor_dp_func = {
.pattern = gf119_sor_dp_pattern,
.lnk_pwr = g94_sor_dp_lnk_pwr,
.lnk_ctl = gf119_sor_dp_lnk_ctl,
.drv_ctl = gf119_sor_dp_drv_ctl,
+ .vcpi = gf119_sor_dp_vcpi,
};
int
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgm107.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgm107.c
index 37790b2617c5..4cf8ad4d18ab 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgm107.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgm107.c
@@ -43,6 +43,7 @@ gm107_sor_dp_func = {
.lnk_pwr = g94_sor_dp_lnk_pwr,
.lnk_ctl = gf119_sor_dp_lnk_ctl,
.drv_ctl = gf119_sor_dp_drv_ctl,
+ .vcpi = gf119_sor_dp_vcpi,
};
int
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgm200.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgm200.c
index c44fa7ea672a..81b788fa61be 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgm200.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgm200.c
@@ -120,6 +120,7 @@ gm200_sor_dp_func = {
.lnk_pwr = gm200_sor_dp_lnk_pwr,
.lnk_ctl = gf119_sor_dp_lnk_ctl,
.drv_ctl = gm200_sor_dp_drv_ctl,
+ .vcpi = gf119_sor_dp_vcpi,
};
int
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/chang84.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/chang84.c
index aeb3387a3fb0..15a992b3580a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/chang84.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/chang84.c
@@ -129,7 +129,7 @@ g84_fifo_chan_engine_fini(struct nvkm_fifo_chan *base,
}
-int
+static int
g84_fifo_chan_engine_init(struct nvkm_fifo_chan *base,
struct nvkm_engine *engine)
{
@@ -170,7 +170,7 @@ g84_fifo_chan_engine_ctor(struct nvkm_fifo_chan *base,
return nvkm_object_bind(object, NULL, 0, &chan->engn[engn]);
}
-int
+static int
g84_fifo_chan_object_ctor(struct nvkm_fifo_chan *base,
struct nvkm_object *object)
{
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogf100.c
index cbc67f262322..12d964260a29 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogf100.c
@@ -60,6 +60,7 @@ gf100_fifo_gpfifo_engine_fini(struct nvkm_fifo_chan *base,
struct nvkm_gpuobj *inst = chan->base.inst;
int ret = 0;
+ mutex_lock(&subdev->mutex);
nvkm_wr32(device, 0x002634, chan->base.chid);
if (nvkm_msec(device, 2000,
if (nvkm_rd32(device, 0x002634) == chan->base.chid)
@@ -67,10 +68,12 @@ gf100_fifo_gpfifo_engine_fini(struct nvkm_fifo_chan *base,
) < 0) {
nvkm_error(subdev, "channel %d [%s] kick timeout\n",
chan->base.chid, chan->base.object.client->name);
- ret = -EBUSY;
- if (suspend)
- return ret;
+ ret = -ETIMEDOUT;
}
+ mutex_unlock(&subdev->mutex);
+
+ if (ret && suspend)
+ return ret;
if (offset) {
nvkm_kmap(inst);
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogk104.c
index ed4351032ed6..a2df4f3e7763 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogk104.c
@@ -40,7 +40,9 @@ gk104_fifo_gpfifo_kick(struct gk104_fifo_chan *chan)
struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
struct nvkm_device *device = subdev->device;
struct nvkm_client *client = chan->base.object.client;
+ int ret = 0;
+ mutex_lock(&subdev->mutex);
nvkm_wr32(device, 0x002634, chan->base.chid);
if (nvkm_msec(device, 2000,
if (!(nvkm_rd32(device, 0x002634) & 0x00100000))
@@ -48,10 +50,10 @@ gk104_fifo_gpfifo_kick(struct gk104_fifo_chan *chan)
) < 0) {
nvkm_error(subdev, "channel %d [%s] kick timeout\n",
chan->base.chid, client->name);
- return -EBUSY;
+ ret = -ETIMEDOUT;
}
-
- return 0;
+ mutex_unlock(&subdev->mutex);
+ return ret;
}
static u32
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf117.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf117.c
index c925ade5880e..74a64e3fd59a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf117.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf117.c
@@ -218,7 +218,7 @@ gf117_grctx_generate_attrib(struct gf100_grctx *info)
}
}
-void
+static void
gf117_grctx_generate_main(struct gf100_gr *gr, struct gf100_grctx *info)
{
struct nvkm_device *device = gr->base.engine.subdev.device;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm107.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm107.c
index 6d3c5011e18c..4c4b5ab6e46d 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm107.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm107.c
@@ -933,7 +933,7 @@ gm107_grctx_generate_attrib(struct gf100_grctx *info)
}
}
-void
+static void
gm107_grctx_generate_tpcid(struct gf100_gr *gr)
{
struct nvkm_device *device = gr->base.engine.subdev.device;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxnv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxnv50.c
index 1e13278cf306..c8bb9191f9a2 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxnv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxnv50.c
@@ -106,6 +106,7 @@
#define CP_SEEK_2 0x00c800ff
#include "ctxnv40.h"
+#include "nv50.h"
#include <subdev/fb.h>
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgf100.fuc3.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgf100.fuc3.h
index 8cb240b65ec2..12a703fe355d 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgf100.fuc3.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgf100.fuc3.h
@@ -1,4 +1,4 @@
-uint32_t gf100_grgpc_data[] = {
+static uint32_t gf100_grgpc_data[] = {
/* 0x0000: gpc_mmio_list_head */
0x00000064,
/* 0x0004: gpc_mmio_list_tail */
@@ -36,7 +36,7 @@ uint32_t gf100_grgpc_data[] = {
0x00000000,
};
-uint32_t gf100_grgpc_code[] = {
+static uint32_t gf100_grgpc_code[] = {
0x03a10ef5,
/* 0x0004: queue_put */
0x9800d898,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgf117.fuc3.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgf117.fuc3.h
index 550d6ba0933b..ffbfc51200f1 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgf117.fuc3.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgf117.fuc3.h
@@ -1,4 +1,4 @@
-uint32_t gf117_grgpc_data[] = {
+static uint32_t gf117_grgpc_data[] = {
/* 0x0000: gpc_mmio_list_head */
0x0000006c,
/* 0x0004: gpc_mmio_list_tail */
@@ -40,7 +40,7 @@ uint32_t gf117_grgpc_data[] = {
0x00000000,
};
-uint32_t gf117_grgpc_code[] = {
+static uint32_t gf117_grgpc_code[] = {
0x03a10ef5,
/* 0x0004: queue_put */
0x9800d898,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgk104.fuc3.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgk104.fuc3.h
index 271b59d365e5..357f662de571 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgk104.fuc3.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgk104.fuc3.h
@@ -1,4 +1,4 @@
-uint32_t gk104_grgpc_data[] = {
+static uint32_t gk104_grgpc_data[] = {
/* 0x0000: gpc_mmio_list_head */
0x0000006c,
/* 0x0004: gpc_mmio_list_tail */
@@ -40,7 +40,7 @@ uint32_t gk104_grgpc_data[] = {
0x00000000,
};
-uint32_t gk104_grgpc_code[] = {
+static uint32_t gk104_grgpc_code[] = {
0x03a10ef5,
/* 0x0004: queue_put */
0x9800d898,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgk110.fuc3.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgk110.fuc3.h
index 73b4a32c5d29..4ffc8212a85c 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgk110.fuc3.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgk110.fuc3.h
@@ -1,4 +1,4 @@
-uint32_t gk110_grgpc_data[] = {
+static uint32_t gk110_grgpc_data[] = {
/* 0x0000: gpc_mmio_list_head */
0x0000006c,
/* 0x0004: gpc_mmio_list_tail */
@@ -40,7 +40,7 @@ uint32_t gk110_grgpc_data[] = {
0x00000000,
};
-uint32_t gk110_grgpc_code[] = {
+static uint32_t gk110_grgpc_code[] = {
0x03a10ef5,
/* 0x0004: queue_put */
0x9800d898,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgk208.fuc5.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgk208.fuc5.h
index 018169818317..09196206c9bc 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgk208.fuc5.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgk208.fuc5.h
@@ -1,4 +1,4 @@
-uint32_t gk208_grgpc_data[] = {
+static uint32_t gk208_grgpc_data[] = {
/* 0x0000: gpc_mmio_list_head */
0x0000006c,
/* 0x0004: gpc_mmio_list_tail */
@@ -40,7 +40,7 @@ uint32_t gk208_grgpc_data[] = {
0x00000000,
};
-uint32_t gk208_grgpc_code[] = {
+static uint32_t gk208_grgpc_code[] = {
0x03140ef5,
/* 0x0004: queue_put */
0x9800d898,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgm107.fuc5.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgm107.fuc5.h
index eca007f03fa9..6d7d004363d9 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgm107.fuc5.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgm107.fuc5.h
@@ -1,4 +1,4 @@
-uint32_t gm107_grgpc_data[] = {
+static uint32_t gm107_grgpc_data[] = {
/* 0x0000: gpc_mmio_list_head */
0x0000006c,
/* 0x0004: gpc_mmio_list_tail */
@@ -40,7 +40,7 @@ uint32_t gm107_grgpc_data[] = {
0x00000000,
};
-uint32_t gm107_grgpc_code[] = {
+static uint32_t gm107_grgpc_code[] = {
0x03410ef5,
/* 0x0004: queue_put */
0x9800d898,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgf100.fuc3.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgf100.fuc3.h
index 8015b40a61d6..7538404b8b13 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgf100.fuc3.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgf100.fuc3.h
@@ -1,4 +1,4 @@
-uint32_t gf100_grhub_data[] = {
+static uint32_t gf100_grhub_data[] = {
/* 0x0000: hub_mmio_list_head */
0x00000300,
/* 0x0004: hub_mmio_list_tail */
@@ -205,7 +205,7 @@ uint32_t gf100_grhub_data[] = {
0x0417e91c,
};
-uint32_t gf100_grhub_code[] = {
+static uint32_t gf100_grhub_code[] = {
0x039b0ef5,
/* 0x0004: queue_put */
0x9800d898,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgf117.fuc3.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgf117.fuc3.h
index 2af90ec6852a..ce000a47ec6d 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgf117.fuc3.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgf117.fuc3.h
@@ -1,4 +1,4 @@
-uint32_t gf117_grhub_data[] = {
+static uint32_t gf117_grhub_data[] = {
/* 0x0000: hub_mmio_list_head */
0x00000300,
/* 0x0004: hub_mmio_list_tail */
@@ -205,7 +205,7 @@ uint32_t gf117_grhub_data[] = {
0x0417e91c,
};
-uint32_t gf117_grhub_code[] = {
+static uint32_t gf117_grhub_code[] = {
0x039b0ef5,
/* 0x0004: queue_put */
0x9800d898,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgk104.fuc3.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgk104.fuc3.h
index e8b8c1c94700..1f26cb6a233c 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgk104.fuc3.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgk104.fuc3.h
@@ -1,4 +1,4 @@
-uint32_t gk104_grhub_data[] = {
+static uint32_t gk104_grhub_data[] = {
/* 0x0000: hub_mmio_list_head */
0x00000300,
/* 0x0004: hub_mmio_list_tail */
@@ -205,7 +205,7 @@ uint32_t gk104_grhub_data[] = {
0x0417e91c,
};
-uint32_t gk104_grhub_code[] = {
+static uint32_t gk104_grhub_code[] = {
0x039b0ef5,
/* 0x0004: queue_put */
0x9800d898,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgk110.fuc3.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgk110.fuc3.h
index f4ed2fb6f714..70436d93efe3 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgk110.fuc3.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgk110.fuc3.h
@@ -1,4 +1,4 @@
-uint32_t gk110_grhub_data[] = {
+static uint32_t gk110_grhub_data[] = {
/* 0x0000: hub_mmio_list_head */
0x00000300,
/* 0x0004: hub_mmio_list_tail */
@@ -205,7 +205,7 @@ uint32_t gk110_grhub_data[] = {
0x0417e91c,
};
-uint32_t gk110_grhub_code[] = {
+static uint32_t gk110_grhub_code[] = {
0x039b0ef5,
/* 0x0004: queue_put */
0x9800d898,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgk208.fuc5.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgk208.fuc5.h
index ed488973c117..e0933a07426a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgk208.fuc5.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgk208.fuc5.h
@@ -1,4 +1,4 @@
-uint32_t gk208_grhub_data[] = {
+static uint32_t gk208_grhub_data[] = {
/* 0x0000: hub_mmio_list_head */
0x00000300,
/* 0x0004: hub_mmio_list_tail */
@@ -205,7 +205,7 @@ uint32_t gk208_grhub_data[] = {
0x0417e91c,
};
-uint32_t gk208_grhub_code[] = {
+static uint32_t gk208_grhub_code[] = {
0x030e0ef5,
/* 0x0004: queue_put */
0x9800d898,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgm107.fuc5.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgm107.fuc5.h
index 5c9051839557..9b432823bcbe 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgm107.fuc5.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgm107.fuc5.h
@@ -1,4 +1,4 @@
-uint32_t gm107_grhub_data[] = {
+static uint32_t gm107_grhub_data[] = {
/* 0x0000: hub_mmio_list_head */
0x00000300,
/* 0x0004: hub_mmio_list_tail */
@@ -205,7 +205,7 @@ uint32_t gm107_grhub_data[] = {
0x0417e91c,
};
-uint32_t gm107_grhub_code[] = {
+static uint32_t gm107_grhub_code[] = {
0x030e0ef5,
/* 0x0004: queue_put */
0x9800d898,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
index 157919c788e6..60a1b5c8214b 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
@@ -1384,7 +1384,7 @@ gf100_gr_intr(struct nvkm_gr *base)
nvkm_fifo_chan_put(device->fifo, flags, &chan);
}
-void
+static void
gf100_gr_init_fw(struct gf100_gr *gr, u32 fuc_base,
struct gf100_gr_fuc *code, struct gf100_gr_fuc *data)
{
@@ -1701,7 +1701,7 @@ gf100_gr_oneinit(struct nvkm_gr *base)
return 0;
}
-int
+static int
gf100_gr_init_(struct nvkm_gr *base)
{
struct gf100_gr *gr = gf100_gr(base);
@@ -1756,6 +1756,50 @@ gf100_gr_ = {
};
int
+gf100_gr_ctor_fw_legacy(struct gf100_gr *gr, const char *fwname,
+ struct gf100_gr_fuc *fuc, int ret)
+{
+ struct nvkm_subdev *subdev = &gr->base.engine.subdev;
+ struct nvkm_device *device = subdev->device;
+ const struct firmware *fw;
+ char f[32];
+
+ /* see if this firmware has a legacy path */
+ if (!strcmp(fwname, "fecs_inst"))
+ fwname = "fuc409c";
+ else if (!strcmp(fwname, "fecs_data"))
+ fwname = "fuc409d";
+ else if (!strcmp(fwname, "gpccs_inst"))
+ fwname = "fuc41ac";
+ else if (!strcmp(fwname, "gpccs_data"))
+ fwname = "fuc41ad";
+ else {
+ /* nope, let's just return the error we got */
+ nvkm_error(subdev, "failed to load %s\n", fwname);
+ return ret;
+ }
+
+ /* yes, try to load from the legacy path */
+ nvkm_debug(subdev, "%s: falling back to legacy path\n", fwname);
+
+ snprintf(f, sizeof(f), "nouveau/nv%02x_%s", device->chipset, fwname);
+ ret = request_firmware(&fw, f, device->dev);
+ if (ret) {
+ snprintf(f, sizeof(f), "nouveau/%s", fwname);
+ ret = request_firmware(&fw, f, device->dev);
+ if (ret) {
+ nvkm_error(subdev, "failed to load %s\n", fwname);
+ return ret;
+ }
+ }
+
+ fuc->size = fw->size;
+ fuc->data = kmemdup(fw->data, fuc->size, GFP_KERNEL);
+ release_firmware(fw);
+ return (fuc->data != NULL) ? 0 : -ENOMEM;
+}
+
+int
gf100_gr_ctor_fw(struct gf100_gr *gr, const char *fwname,
struct gf100_gr_fuc *fuc)
{
@@ -1765,10 +1809,8 @@ gf100_gr_ctor_fw(struct gf100_gr *gr, const char *fwname,
int ret;
ret = nvkm_firmware_get(device, fwname, &fw);
- if (ret) {
- nvkm_error(subdev, "failed to load %s\n", fwname);
- return ret;
- }
+ if (ret)
+ return gf100_gr_ctor_fw_legacy(gr, fwname, fuc, ret);
fuc->size = fw->size;
fuc->data = kmemdup(fw->data, fuc->size, GFP_KERNEL);
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf117.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf117.c
index 70335f65c51e..0124e468086e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf117.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf117.c
@@ -102,7 +102,7 @@ gf117_gr_pack_mmio[] = {
#include "fuc/hubgf117.fuc3.h"
-struct gf100_gr_ucode
+static struct gf100_gr_ucode
gf117_gr_fecs_ucode = {
.code.data = gf117_grhub_code,
.code.size = sizeof(gf117_grhub_code),
@@ -112,7 +112,7 @@ gf117_gr_fecs_ucode = {
#include "fuc/gpcgf117.fuc3.h"
-struct gf100_gr_ucode
+static struct gf100_gr_ucode
gf117_gr_gpccs_ucode = {
.code.data = gf117_grgpc_code,
.code.size = sizeof(gf117_grgpc_code),
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm107.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm107.c
index 45f965f608a7..2c67fac576d1 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm107.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm107.c
@@ -308,7 +308,7 @@ gm107_gr_init_bios(struct gf100_gr *gr)
}
}
-int
+static int
gm107_gr_init(struct gf100_gr *gr)
{
struct nvkm_device *device = gr->base.engine.subdev.device;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/pm/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/pm/base.c
index 8616636ad7b4..dde89a4a0f5b 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/pm/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/pm/base.c
@@ -71,7 +71,7 @@ nvkm_perfdom_find(struct nvkm_pm *pm, int di)
return NULL;
}
-struct nvkm_perfsig *
+static struct nvkm_perfsig *
nvkm_perfsig_find(struct nvkm_pm *pm, u8 di, u8 si, struct nvkm_perfdom **pdom)
{
struct nvkm_perfdom *dom = *pdom;
@@ -699,7 +699,7 @@ nvkm_pm_oclass_get(struct nvkm_oclass *oclass, int index,
return 1;
}
-int
+static int
nvkm_perfsrc_new(struct nvkm_pm *pm, struct nvkm_perfsig *sig,
const struct nvkm_specsrc *spec)
{
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/pm/gf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/pm/gf100.c
index d2901e9a7808..fe2532ee4145 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/pm/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/pm/gf100.c
@@ -102,7 +102,7 @@ gf100_pm_gpc[] = {
{}
};
-const struct nvkm_specdom
+static const struct nvkm_specdom
gf100_pm_part[] = {
{ 0xe0, (const struct nvkm_specsig[]) {
{ 0x0f, "part00_pbfb_00", gf100_pbfb_sources },
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/sec/fuc/g98.fuc0s.h b/drivers/gpu/drm/nouveau/nvkm/engine/sec/fuc/g98.fuc0s.h
index eca62221f299..4b57f8814560 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/sec/fuc/g98.fuc0s.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/sec/fuc/g98.fuc0s.h
@@ -1,4 +1,4 @@
-uint32_t g98_sec_data[] = {
+static uint32_t g98_sec_data[] = {
/* 0x0000: ctx_dma */
/* 0x0000: ctx_dma_query */
0x00000000,
@@ -150,7 +150,7 @@ uint32_t g98_sec_data[] = {
0x00000000,
};
-uint32_t g98_sec_code[] = {
+static uint32_t g98_sec_code[] = {
0x17f004bd,
0x0010fe35,
0xf10004fe,
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/fan.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/fan.c
index 80fed7e78dcb..e2905815049b 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/fan.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/fan.c
@@ -25,7 +25,7 @@
#include <subdev/bios/bit.h>
#include <subdev/bios/fan.h>
-u16
+static u16
nvbios_fan_table(struct nvkm_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
{
struct bit_entry bit_P;
@@ -52,7 +52,7 @@ nvbios_fan_table(struct nvkm_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
return 0x0000;
}
-u16
+static u16
nvbios_fan_entry(struct nvkm_bios *bios, int idx, u8 *ver, u8 *hdr,
u8 *cnt, u8 *len)
{
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/priv.h
index 212800ecdce9..7d1d3c6b4b72 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/priv.h
@@ -12,6 +12,7 @@ struct nvbios_source {
bool rw;
bool ignore_checksum;
bool no_pcir;
+ bool require_checksum;
};
int nvbios_extend(struct nvkm_bios *, u32 length);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadow.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadow.c
index b2557e87afdd..7deb81b6dbac 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadow.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadow.c
@@ -86,9 +86,12 @@ shadow_image(struct nvkm_bios *bios, int idx, u32 offset, struct shadow *mthd)
nvbios_checksum(&bios->data[image.base], image.size)) {
nvkm_debug(subdev, "%08x: checksum failed\n",
image.base);
- if (mthd->func->rw)
+ if (!mthd->func->require_checksum) {
+ if (mthd->func->rw)
+ score += 1;
score += 1;
- score += 1;
+ } else
+ return 0;
} else {
score += 3;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowacpi.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowacpi.c
index 8fecb5ff22a0..06572f8ce914 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowacpi.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowacpi.c
@@ -99,6 +99,7 @@ nvbios_acpi_fast = {
.init = acpi_init,
.read = acpi_read_fast,
.rw = false,
+ .require_checksum = true,
};
const struct nvbios_source
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gt215.c b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gt215.c
index 056702ef69aa..96e0941c8edd 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gt215.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gt215.c
@@ -180,7 +180,7 @@ gt215_clk_read(struct nvkm_clk *base, enum nv_clk_src src)
return 0;
}
-int
+static int
gt215_clk_info(struct nvkm_clk *base, int idx, u32 khz,
struct gt215_clk_info *info)
{
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/Kbuild
index edcc157e6ac8..ef47d57fcb87 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/Kbuild
@@ -24,6 +24,7 @@ nvkm-y += nvkm/subdev/fb/gk104.o
nvkm-y += nvkm/subdev/fb/gk20a.o
nvkm-y += nvkm/subdev/fb/gm107.o
nvkm-y += nvkm/subdev/fb/gm200.o
+nvkm-y += nvkm/subdev/fb/gm20b.o
nvkm-y += nvkm/subdev/fb/gp100.o
nvkm-y += nvkm/subdev/fb/gp104.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf100.h b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf100.h
index 449f431644b3..412eb89834e8 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf100.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf100.h
@@ -16,4 +16,8 @@ void gf100_fb_init(struct nvkm_fb *);
void gf100_fb_intr(struct nvkm_fb *);
void gp100_fb_init(struct nvkm_fb *);
+
+void gm200_fb_init_page(struct nvkm_fb *fb);
+void gm200_fb_init(struct nvkm_fb *base);
+
#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gk20a.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gk20a.c
index f815fe2bbf08..5d34d6136616 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gk20a.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gk20a.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2014, NVIDIA CORPORATION. All rights reserved.
+ * Copyright (c) 2014-2016, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -20,27 +20,21 @@
* DEALINGS IN THE SOFTWARE.
*/
#include "priv.h"
+#include "gf100.h"
-#include <core/memory.h>
-
-static void
-gk20a_fb_init(struct nvkm_fb *fb)
-{
- struct nvkm_device *device = fb->subdev.device;
- nvkm_wr32(device, 0x100cc8, nvkm_memory_addr(fb->mmu_wr) >> 8);
- nvkm_wr32(device, 0x100ccc, nvkm_memory_addr(fb->mmu_rd) >> 8);
-}
-
+/* GK20A's FB is similar to GF100's, but without the ability to allocate VRAM */
static const struct nvkm_fb_func
gk20a_fb = {
+ .dtor = gf100_fb_dtor,
.oneinit = gf100_fb_oneinit,
- .init = gk20a_fb_init,
+ .init = gf100_fb_init,
.init_page = gf100_fb_init_page,
+ .intr = gf100_fb_intr,
.memtype_valid = gf100_fb_memtype_valid,
};
int
gk20a_fb_new(struct nvkm_device *device, int index, struct nvkm_fb **pfb)
{
- return nvkm_fb_new_(&gk20a_fb, device, index, pfb);
+ return gf100_fb_new_(&gk20a_fb, device, index, pfb);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gm200.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gm200.c
index 62f653240be3..fe5886013ac0 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gm200.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gm200.c
@@ -44,7 +44,7 @@ gm200_fb_init_page(struct nvkm_fb *fb)
}
}
-static void
+void
gm200_fb_init(struct nvkm_fb *base)
{
struct gf100_fb *fb = gf100_fb(base);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gm20b.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gm20b.c
new file mode 100644
index 000000000000..b87c233bcd6d
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gm20b.c
@@ -0,0 +1,40 @@
+/*
+ * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+#include "priv.h"
+#include "gf100.h"
+
+/* GM20B's FB is similar to GM200, but without the ability to allocate VRAM */
+static const struct nvkm_fb_func
+gm20b_fb = {
+ .dtor = gf100_fb_dtor,
+ .oneinit = gf100_fb_oneinit,
+ .init = gm200_fb_init,
+ .init_page = gm200_fb_init_page,
+ .intr = gf100_fb_intr,
+ .memtype_valid = gf100_fb_memtype_valid,
+};
+
+int
+gm20b_fb_new(struct nvkm_device *device, int index, struct nvkm_fb **pfb)
+{
+ return gf100_fb_new_(&gm20b_fb, device, index, pfb);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgf100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgf100.c
index 772425ca5a9e..093223d1df4f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgf100.c
@@ -420,8 +420,6 @@ gf100_ram_tidy(struct nvkm_ram *base)
ram_exec(&ram->fuc, false);
}
-extern const u8 gf100_pte_storage_type_map[256];
-
void
gf100_ram_put(struct nvkm_ram *ram, struct nvkm_mem **pmem)
{
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgt215.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgt215.c
index d15ea886df27..f10664372161 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgt215.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgt215.c
@@ -95,7 +95,7 @@ struct gt215_ram {
struct gt215_ltrain ltrain;
};
-void
+static void
gt215_link_train_calc(u32 *vals, struct gt215_ltrain *train)
{
int i, lo, hi;
@@ -149,7 +149,7 @@ gt215_link_train_calc(u32 *vals, struct gt215_ltrain *train)
/*
* Link training for (at least) DDR3
*/
-int
+static int
gt215_link_train(struct gt215_ram *ram)
{
struct gt215_ltrain *train = &ram->ltrain;
@@ -267,7 +267,7 @@ out:
return ret;
}
-int
+static int
gt215_link_train_init(struct gt215_ram *ram)
{
static const u32 pattern[16] = {
@@ -333,7 +333,7 @@ gt215_link_train_init(struct gt215_ram *ram)
return 0;
}
-void
+static void
gt215_link_train_fini(struct gt215_ram *ram)
{
if (ram->ltrain.mem)
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/sddr2.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/sddr2.c
index b9f1ffdfc602..4dcd8742f2da 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/sddr2.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/sddr2.c
@@ -23,6 +23,7 @@
* Ben Skeggs
*/
#include "priv.h"
+#include "ram.h"
struct ramxlat {
int id;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/sddr3.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/sddr3.c
index 26900333b1d6..eca8a445eab3 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/sddr3.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/sddr3.c
@@ -23,6 +23,7 @@
* Roy Spliet <rspliet@eclipso.eu>
*/
#include "priv.h"
+#include "ram.h"
struct ramxlat {
int id;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/gk104.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/gk104.c
index 3f45afd17d5a..2ead515b8530 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/gk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/gk104.c
@@ -37,7 +37,7 @@ gk104_gpio_intr_stat(struct nvkm_gpio *gpio, u32 *hi, u32 *lo)
nvkm_wr32(device, 0x00dc80, intr1);
}
-void
+static void
gk104_gpio_intr_mask(struct nvkm_gpio *gpio, u32 type, u32 mask, u32 data)
{
struct nvkm_device *device = gpio->subdev.device;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.c
index f0851d57df2f..01d5c5a56e2e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.c
@@ -74,7 +74,7 @@ nvkm_i2c_aux_i2c_func(struct i2c_adapter *adap)
return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
}
-const struct i2c_algorithm
+static const struct i2c_algorithm
nvkm_i2c_aux_i2c_algo = {
.master_xfer = nvkm_i2c_aux_i2c_xfer,
.functionality = nvkm_i2c_aux_i2c_func
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxg94.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxg94.c
index 954f5b76bfcf..b80236a4eeac 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxg94.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxg94.c
@@ -79,7 +79,7 @@ g94_i2c_aux_xfer(struct nvkm_i2c_aux *obj, bool retry,
struct g94_i2c_aux *aux = g94_i2c_aux(obj);
struct nvkm_device *device = aux->base.pad->i2c->subdev.device;
const u32 base = aux->ch * 0x50;
- u32 ctrl, stat, timeout, retries;
+ u32 ctrl, stat, timeout, retries = 0;
u32 xbuf[4] = {};
int ret, i;
@@ -111,7 +111,7 @@ g94_i2c_aux_xfer(struct nvkm_i2c_aux *obj, bool retry,
nvkm_wr32(device, 0x00e4e0 + base, addr);
/* (maybe) retry transaction a number of times on failure... */
- for (retries = 0; !ret && retries < 32; retries++) {
+ do {
/* reset, and delay a while if this is a retry */
nvkm_wr32(device, 0x00e4e4 + base, 0x80000000 | ctrl);
nvkm_wr32(device, 0x00e4e4 + base, 0x00000000 | ctrl);
@@ -131,20 +131,20 @@ g94_i2c_aux_xfer(struct nvkm_i2c_aux *obj, bool retry,
goto out;
}
} while (ctrl & 0x00010000);
- ret = 1;
+ ret = 0;
/* read status, and check if transaction completed ok */
stat = nvkm_mask(device, 0x00e4e8 + base, 0, 0);
if ((stat & 0x000f0000) == 0x00080000 ||
(stat & 0x000f0000) == 0x00020000)
- ret = retry ? 0 : 1;
+ ret = 1;
if ((stat & 0x00000100))
ret = -ETIMEDOUT;
if ((stat & 0x00000e00))
ret = -EIO;
AUX_TRACE(&aux->base, "%02d %08x %08x", retries, ctrl, stat);
- }
+ } while (ret && retry && retries++ < 32);
if (type & 1) {
for (i = 0; i < 16; i += 4) {
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxgm200.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxgm200.c
index 61d729b82c69..ed458c7f056b 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxgm200.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxgm200.c
@@ -79,7 +79,7 @@ gm200_i2c_aux_xfer(struct nvkm_i2c_aux *obj, bool retry,
struct gm200_i2c_aux *aux = gm200_i2c_aux(obj);
struct nvkm_device *device = aux->base.pad->i2c->subdev.device;
const u32 base = aux->ch * 0x50;
- u32 ctrl, stat, timeout, retries;
+ u32 ctrl, stat, timeout, retries = 0;
u32 xbuf[4] = {};
int ret, i;
@@ -111,7 +111,7 @@ gm200_i2c_aux_xfer(struct nvkm_i2c_aux *obj, bool retry,
nvkm_wr32(device, 0x00d950 + base, addr);
/* (maybe) retry transaction a number of times on failure... */
- for (retries = 0; !ret && retries < 32; retries++) {
+ do {
/* reset, and delay a while if this is a retry */
nvkm_wr32(device, 0x00d954 + base, 0x80000000 | ctrl);
nvkm_wr32(device, 0x00d954 + base, 0x00000000 | ctrl);
@@ -131,20 +131,20 @@ gm200_i2c_aux_xfer(struct nvkm_i2c_aux *obj, bool retry,
goto out;
}
} while (ctrl & 0x00010000);
- ret = 1;
+ ret = 0;
/* read status, and check if transaction completed ok */
stat = nvkm_mask(device, 0x00d958 + base, 0, 0);
if ((stat & 0x000f0000) == 0x00080000 ||
(stat & 0x000f0000) == 0x00020000)
- ret = retry ? 0 : 1;
+ ret = 1;
if ((stat & 0x00000100))
ret = -ETIMEDOUT;
if ((stat & 0x00000e00))
ret = -EIO;
AUX_TRACE(&aux->base, "%02d %08x %08x", retries, ctrl, stat);
- }
+ } while (ret && retry && retries++ < 32);
if (type & 1) {
for (i = 0; i < 16; i += 4) {
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/iccsense/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/iccsense/base.c
index 658355fc9354..f0af2a381eea 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/iccsense/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/iccsense/base.c
@@ -288,7 +288,8 @@ nvkm_iccsense_init(struct nvkm_subdev *subdev)
return 0;
}
-struct nvkm_subdev_func iccsense_func = {
+static const struct nvkm_subdev_func
+iccsense_func = {
.oneinit = nvkm_iccsense_oneinit,
.init = nvkm_iccsense_init,
.dtor = nvkm_iccsense_dtor,
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/base.c
index 8ed8f65ff664..10c987a654ec 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/base.c
@@ -104,7 +104,7 @@ nvkm_instobj_dtor(struct nvkm_memory *memory)
return iobj;
}
-const struct nvkm_memory_func
+static const struct nvkm_memory_func
nvkm_instobj_func = {
.dtor = nvkm_instobj_dtor,
.target = nvkm_instobj_target,
@@ -156,7 +156,7 @@ nvkm_instobj_wr32_slow(struct nvkm_memory *memory, u64 offset, u32 data)
return nvkm_wo32(iobj->parent, offset, data);
}
-const struct nvkm_memory_func
+static const struct nvkm_memory_func
nvkm_instobj_func_slow = {
.dtor = nvkm_instobj_dtor,
.target = nvkm_instobj_target,
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/g84.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/g84.c
index c3d66ef5dc12..430a61c3df44 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/g84.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/g84.c
@@ -34,7 +34,7 @@ g84_mc_reset[] = {
{}
};
-const struct nvkm_mc_map
+static const struct nvkm_mc_map
g84_mc_intr[] = {
{ 0x04000000, NVKM_ENGINE_DISP },
{ 0x00020000, NVKM_ENGINE_VP },
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mxm/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mxm/base.c
index 21b65ee254e4..e3e2f5e83815 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mxm/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mxm/base.c
@@ -250,6 +250,10 @@ nvkm_mxm_new_(struct nvkm_device *device, int index, struct nvkm_mxm **pmxm)
}
nvkm_info(&mxm->subdev, "BIOS version %d.%d\n", ver >> 4, ver & 0x0f);
+ nvkm_debug(&mxm->subdev, "module flags: %02x\n",
+ nvbios_rd08(bios, data + 0x01));
+ nvkm_debug(&mxm->subdev, "config flags: %02x\n",
+ nvbios_rd08(bios, data + 0x02));
if (mxm_shadow(mxm, ver)) {
nvkm_warn(&mxm->subdev, "failed to locate valid SIS\n");
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gf100.fuc3.h b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gf100.fuc3.h
index e2faccffee6f..0bcf0b307a61 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gf100.fuc3.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gf100.fuc3.h
@@ -1,4 +1,4 @@
-uint32_t gf100_pmu_data[] = {
+static uint32_t gf100_pmu_data[] = {
/* 0x0000: proc_kern */
0x52544e49,
0x00000000,
@@ -916,7 +916,7 @@ uint32_t gf100_pmu_data[] = {
0x00000000,
};
-uint32_t gf100_pmu_code[] = {
+static uint32_t gf100_pmu_code[] = {
0x03920ef5,
/* 0x0004: rd32 */
0x07a007f1,
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gf119.fuc4.h b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gf119.fuc4.h
index 2d5bdc539697..fe8905666c67 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gf119.fuc4.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gf119.fuc4.h
@@ -1,4 +1,4 @@
-uint32_t gf119_pmu_data[] = {
+static uint32_t gf119_pmu_data[] = {
/* 0x0000: proc_kern */
0x52544e49,
0x00000000,
@@ -915,7 +915,7 @@ uint32_t gf119_pmu_data[] = {
0x00000000,
};
-uint32_t gf119_pmu_code[] = {
+static uint32_t gf119_pmu_code[] = {
0x03410ef5,
/* 0x0004: rd32 */
0x07a007f1,
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gk208.fuc5.h b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gk208.fuc5.h
index 3c731ff12871..9cf4e6fc724e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gk208.fuc5.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gk208.fuc5.h
@@ -1,4 +1,4 @@
-uint32_t gk208_pmu_data[] = {
+static uint32_t gk208_pmu_data[] = {
/* 0x0000: proc_kern */
0x52544e49,
0x00000000,
@@ -915,7 +915,7 @@ uint32_t gk208_pmu_data[] = {
0x00000000,
};
-uint32_t gk208_pmu_code[] = {
+static uint32_t gk208_pmu_code[] = {
0x02f90ef5,
/* 0x0004: rd32 */
0xf607a040,
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gt215.fuc3.h b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gt215.fuc3.h
index e83341815ec6..5d692425b190 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gt215.fuc3.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gt215.fuc3.h
@@ -1,4 +1,4 @@
-uint32_t gt215_pmu_data[] = {
+static uint32_t gt215_pmu_data[] = {
/* 0x0000: proc_kern */
0x52544e49,
0x00000000,
@@ -916,7 +916,7 @@ uint32_t gt215_pmu_data[] = {
0x00000000,
};
-uint32_t gt215_pmu_code[] = {
+static uint32_t gt215_pmu_code[] = {
0x03920ef5,
/* 0x0004: rd32 */
0x07a007f1,
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gt215.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gt215.c
index 8ba7fa4ca75b..dcf9eaf274aa 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gt215.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gt215.c
@@ -24,17 +24,8 @@
#include "priv.h"
#include "fuc/gt215.fuc3.h"
-static void
-gt215_pmu_reset(struct nvkm_pmu *pmu)
-{
- struct nvkm_device *device = pmu->subdev.device;
- nvkm_mask(device, 0x022210, 0x00000001, 0x00000000);
- nvkm_mask(device, 0x022210, 0x00000001, 0x00000001);
-}
-
static const struct nvkm_pmu_func
gt215_pmu = {
- .reset = gt215_pmu_reset,
.code.data = gt215_pmu_code,
.code.size = sizeof(gt215_pmu_code),
.data.data = gt215_pmu_data,
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/priv.h
index f38c88fae3d6..73b811ccc2d5 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/priv.h
@@ -8,8 +8,6 @@ int nvkm_pmu_new_(const struct nvkm_pmu_func *, struct nvkm_device *,
int index, struct nvkm_pmu **);
struct nvkm_pmu_func {
- void (*reset)(struct nvkm_pmu *);
-
struct {
u32 *data;
u32 size;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gm200.c b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gm200.c
index f1e2dc914366..ec48e4ace37a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gm200.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gm200.c
@@ -1364,7 +1364,7 @@ gm200_secboot_init(struct nvkm_secboot *sb)
return 0;
}
-int
+static int
gm200_secboot_fini(struct nvkm_secboot *sb, bool suspend)
{
struct gm200_secboot *gsb = gm200_secboot(sb);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gk104.c b/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gk104.c
index b2c5d1166a13..1c744e029454 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gk104.c
@@ -35,7 +35,7 @@ struct gk104_volt {
struct nvbios_volt bios;
};
-int
+static int
gk104_volt_get(struct nvkm_volt *base)
{
struct nvbios_volt *bios = &gk104_volt(base)->bios;
@@ -48,7 +48,7 @@ gk104_volt_get(struct nvkm_volt *base)
return bios->base + bios->pwm_range * duty / div;
}
-int
+static int
gk104_volt_set(struct nvkm_volt *base, u32 uv)
{
struct nvbios_volt *bios = &gk104_volt(base)->bios;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gm20b.c b/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gm20b.c
index 74db4d28930f..2925b9cae681 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gm20b.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gm20b.c
@@ -25,7 +25,7 @@
#include <core/tegra.h>
-const struct cvb_coef gm20b_cvb_coef[] = {
+static const struct cvb_coef gm20b_cvb_coef[] = {
/* KHz, c0, c1, c2 */
/* 76800 */ { 1786666, -85625, 1632 },
/* 153600 */ { 1846729, -87525, 1632 },
@@ -58,7 +58,7 @@ static const struct cvb_coef gm20b_na_cvb_coef[] = {
/* 998400 */ { 1316991, 8144, -940, 808, -21583, 226 },
};
-const u32 speedo_to_vmin[] = {
+static const u32 speedo_to_vmin[] = {
/* 0, 1, 2, 3, 4, */
950000, 840000, 818750, 840000, 810000,
};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gpio.c b/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gpio.c
index d2bac1d77819..443c031b966b 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gpio.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gpio.c
@@ -25,6 +25,7 @@
#include <subdev/bios.h>
#include <subdev/bios/gpio.h>
#include <subdev/gpio.h>
+#include "priv.h"
static const u8 tags[] = {
DCB_GPIO_VID0, DCB_GPIO_VID1, DCB_GPIO_VID2, DCB_GPIO_VID3,
diff --git a/drivers/gpu/drm/omapdrm/displays/connector-analog-tv.c b/drivers/gpu/drm/omapdrm/displays/connector-analog-tv.c
index 3485d1ecd655..aaa8a58390f1 100644
--- a/drivers/gpu/drm/omapdrm/displays/connector-analog-tv.c
+++ b/drivers/gpu/drm/omapdrm/displays/connector-analog-tv.c
@@ -24,23 +24,24 @@ struct panel_drv_data {
struct device *dev;
- struct omap_video_timings timings;
+ struct videomode vm;
bool invert_polarity;
};
-static const struct omap_video_timings tvc_pal_timings = {
- .x_res = 720,
- .y_res = 574,
+static const struct videomode tvc_pal_vm = {
+ .hactive = 720,
+ .vactive = 574,
.pixelclock = 13500000,
- .hsw = 64,
- .hfp = 12,
- .hbp = 68,
- .vsw = 5,
- .vfp = 5,
- .vbp = 41,
-
- .interlace = true,
+ .hsync_len = 64,
+ .hfront_porch = 12,
+ .hback_porch = 68,
+ .vsync_len = 5,
+ .vfront_porch = 5,
+ .vback_porch = 41,
+
+ .flags = DISPLAY_FLAGS_INTERLACED | DISPLAY_FLAGS_HSYNC_LOW |
+ DISPLAY_FLAGS_VSYNC_LOW,
};
static const struct of_device_id tvc_of_match[];
@@ -92,7 +93,7 @@ static int tvc_enable(struct omap_dss_device *dssdev)
if (omapdss_device_is_enabled(dssdev))
return 0;
- in->ops.atv->set_timings(in, &ddata->timings);
+ in->ops.atv->set_timings(in, &ddata->vm);
if (!ddata->dev->of_node) {
in->ops.atv->set_type(in, OMAP_DSS_VENC_TYPE_COMPOSITE);
@@ -126,32 +127,32 @@ static void tvc_disable(struct omap_dss_device *dssdev)
}
static void tvc_set_timings(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings)
+ struct videomode *vm)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
struct omap_dss_device *in = ddata->in;
- ddata->timings = *timings;
- dssdev->panel.timings = *timings;
+ ddata->vm = *vm;
+ dssdev->panel.vm = *vm;
- in->ops.atv->set_timings(in, timings);
+ in->ops.atv->set_timings(in, vm);
}
static void tvc_get_timings(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings)
+ struct videomode *vm)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
- *timings = ddata->timings;
+ *vm = ddata->vm;
}
static int tvc_check_timings(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings)
+ struct videomode *vm)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
struct omap_dss_device *in = ddata->in;
- return in->ops.atv->check_timings(in, timings);
+ return in->ops.atv->check_timings(in, vm);
}
static u32 tvc_get_wss(struct omap_dss_device *dssdev)
@@ -253,14 +254,14 @@ static int tvc_probe(struct platform_device *pdev)
return -ENODEV;
}
- ddata->timings = tvc_pal_timings;
+ ddata->vm = tvc_pal_vm;
dssdev = &ddata->dssdev;
dssdev->driver = &tvc_driver;
dssdev->dev = &pdev->dev;
dssdev->type = OMAP_DISPLAY_TYPE_VENC;
dssdev->owner = THIS_MODULE;
- dssdev->panel.timings = tvc_pal_timings;
+ dssdev->panel.vm = tvc_pal_vm;
r = omapdss_register_display(dssdev);
if (r) {
diff --git a/drivers/gpu/drm/omapdrm/displays/connector-dvi.c b/drivers/gpu/drm/omapdrm/displays/connector-dvi.c
index 684b7aeda411..d6875d9fcefa 100644
--- a/drivers/gpu/drm/omapdrm/displays/connector-dvi.c
+++ b/drivers/gpu/drm/omapdrm/displays/connector-dvi.c
@@ -19,32 +19,30 @@
#include "../dss/omapdss.h"
-static const struct omap_video_timings dvic_default_timings = {
- .x_res = 640,
- .y_res = 480,
+static const struct videomode dvic_default_vm = {
+ .hactive = 640,
+ .vactive = 480,
.pixelclock = 23500000,
- .hfp = 48,
- .hsw = 32,
- .hbp = 80,
+ .hfront_porch = 48,
+ .hsync_len = 32,
+ .hback_porch = 80,
- .vfp = 3,
- .vsw = 4,
- .vbp = 7,
+ .vfront_porch = 3,
+ .vsync_len = 4,
+ .vback_porch = 7,
- .vsync_level = OMAPDSS_SIG_ACTIVE_HIGH,
- .hsync_level = OMAPDSS_SIG_ACTIVE_HIGH,
- .data_pclk_edge = OMAPDSS_DRIVE_SIG_RISING_EDGE,
- .de_level = OMAPDSS_SIG_ACTIVE_HIGH,
- .sync_pclk_edge = OMAPDSS_DRIVE_SIG_FALLING_EDGE,
+ .flags = DISPLAY_FLAGS_HSYNC_HIGH | DISPLAY_FLAGS_VSYNC_HIGH |
+ DISPLAY_FLAGS_SYNC_NEGEDGE | DISPLAY_FLAGS_DE_HIGH |
+ DISPLAY_FLAGS_PIXDATA_POSEDGE,
};
struct panel_drv_data {
struct omap_dss_device dssdev;
struct omap_dss_device *in;
- struct omap_video_timings timings;
+ struct videomode vm;
struct i2c_adapter *i2c_adapter;
};
@@ -90,7 +88,7 @@ static int dvic_enable(struct omap_dss_device *dssdev)
if (omapdss_device_is_enabled(dssdev))
return 0;
- in->ops.dvi->set_timings(in, &ddata->timings);
+ in->ops.dvi->set_timings(in, &ddata->vm);
r = in->ops.dvi->enable(in);
if (r)
@@ -115,32 +113,32 @@ static void dvic_disable(struct omap_dss_device *dssdev)
}
static void dvic_set_timings(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings)
+ struct videomode *vm)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
struct omap_dss_device *in = ddata->in;
- ddata->timings = *timings;
- dssdev->panel.timings = *timings;
+ ddata->vm = *vm;
+ dssdev->panel.vm = *vm;
- in->ops.dvi->set_timings(in, timings);
+ in->ops.dvi->set_timings(in, vm);
}
static void dvic_get_timings(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings)
+ struct videomode *vm)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
- *timings = ddata->timings;
+ *vm = ddata->vm;
}
static int dvic_check_timings(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings)
+ struct videomode *vm)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
struct omap_dss_device *in = ddata->in;
- return in->ops.dvi->check_timings(in, timings);
+ return in->ops.dvi->check_timings(in, vm);
}
static int dvic_ddc_read(struct i2c_adapter *adapter,
@@ -287,14 +285,14 @@ static int dvic_probe(struct platform_device *pdev)
if (r)
return r;
- ddata->timings = dvic_default_timings;
+ ddata->vm = dvic_default_vm;
dssdev = &ddata->dssdev;
dssdev->driver = &dvic_driver;
dssdev->dev = &pdev->dev;
dssdev->type = OMAP_DISPLAY_TYPE_DVI;
dssdev->owner = THIS_MODULE;
- dssdev->panel.timings = dvic_default_timings;
+ dssdev->panel.vm = dvic_default_vm;
r = omapdss_register_display(dssdev);
if (r) {
diff --git a/drivers/gpu/drm/omapdrm/displays/connector-hdmi.c b/drivers/gpu/drm/omapdrm/displays/connector-hdmi.c
index 7bdf83af9797..1ef130641bae 100644
--- a/drivers/gpu/drm/omapdrm/displays/connector-hdmi.c
+++ b/drivers/gpu/drm/omapdrm/displays/connector-hdmi.c
@@ -21,21 +21,18 @@
#include "../dss/omapdss.h"
-static const struct omap_video_timings hdmic_default_timings = {
- .x_res = 640,
- .y_res = 480,
+static const struct videomode hdmic_default_vm = {
+ .hactive = 640,
+ .vactive = 480,
.pixelclock = 25175000,
- .hsw = 96,
- .hfp = 16,
- .hbp = 48,
- .vsw = 2,
- .vfp = 11,
- .vbp = 31,
-
- .vsync_level = OMAPDSS_SIG_ACTIVE_LOW,
- .hsync_level = OMAPDSS_SIG_ACTIVE_LOW,
-
- .interlace = false,
+ .hsync_len = 96,
+ .hfront_porch = 16,
+ .hback_porch = 48,
+ .vsync_len = 2,
+ .vfront_porch = 11,
+ .vback_porch = 31,
+
+ .flags = DISPLAY_FLAGS_HSYNC_LOW | DISPLAY_FLAGS_VSYNC_LOW,
};
struct panel_drv_data {
@@ -44,7 +41,7 @@ struct panel_drv_data {
struct device *dev;
- struct omap_video_timings timings;
+ struct videomode vm;
int hpd_gpio;
};
@@ -96,7 +93,7 @@ static int hdmic_enable(struct omap_dss_device *dssdev)
if (omapdss_device_is_enabled(dssdev))
return 0;
- in->ops.hdmi->set_timings(in, &ddata->timings);
+ in->ops.hdmi->set_timings(in, &ddata->vm);
r = in->ops.hdmi->enable(in);
if (r)
@@ -123,32 +120,32 @@ static void hdmic_disable(struct omap_dss_device *dssdev)
}
static void hdmic_set_timings(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings)
+ struct videomode *vm)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
struct omap_dss_device *in = ddata->in;
- ddata->timings = *timings;
- dssdev->panel.timings = *timings;
+ ddata->vm = *vm;
+ dssdev->panel.vm = *vm;
- in->ops.hdmi->set_timings(in, timings);
+ in->ops.hdmi->set_timings(in, vm);
}
static void hdmic_get_timings(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings)
+ struct videomode *vm)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
- *timings = ddata->timings;
+ *vm = ddata->vm;
}
static int hdmic_check_timings(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings)
+ struct videomode *vm)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
struct omap_dss_device *in = ddata->in;
- return in->ops.hdmi->check_timings(in, timings);
+ return in->ops.hdmi->check_timings(in, vm);
}
static int hdmic_read_edid(struct omap_dss_device *dssdev,
@@ -259,14 +256,14 @@ static int hdmic_probe(struct platform_device *pdev)
goto err_reg;
}
- ddata->timings = hdmic_default_timings;
+ ddata->vm = hdmic_default_vm;
dssdev = &ddata->dssdev;
dssdev->driver = &hdmic_driver;
dssdev->dev = &pdev->dev;
dssdev->type = OMAP_DISPLAY_TYPE_HDMI;
dssdev->owner = THIS_MODULE;
- dssdev->panel.timings = hdmic_default_timings;
+ dssdev->panel.vm = hdmic_default_vm;
r = omapdss_register_display(dssdev);
if (r) {
diff --git a/drivers/gpu/drm/omapdrm/displays/encoder-opa362.c b/drivers/gpu/drm/omapdrm/displays/encoder-opa362.c
index fe4e7ec3bab0..f7a5731492d0 100644
--- a/drivers/gpu/drm/omapdrm/displays/encoder-opa362.c
+++ b/drivers/gpu/drm/omapdrm/displays/encoder-opa362.c
@@ -27,7 +27,7 @@ struct panel_drv_data {
struct gpio_desc *enable_gpio;
- struct omap_video_timings timings;
+ struct videomode vm;
};
#define to_panel_data(x) container_of(x, struct panel_drv_data, dssdev)
@@ -90,7 +90,7 @@ static int opa362_enable(struct omap_dss_device *dssdev)
if (omapdss_device_is_enabled(dssdev))
return 0;
- in->ops.atv->set_timings(in, &ddata->timings);
+ in->ops.atv->set_timings(in, &ddata->vm);
r = in->ops.atv->enable(in);
if (r)
@@ -123,38 +123,38 @@ static void opa362_disable(struct omap_dss_device *dssdev)
}
static void opa362_set_timings(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings)
+ struct videomode *vm)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
struct omap_dss_device *in = ddata->in;
dev_dbg(dssdev->dev, "set_timings\n");
- ddata->timings = *timings;
- dssdev->panel.timings = *timings;
+ ddata->vm = *vm;
+ dssdev->panel.vm = *vm;
- in->ops.atv->set_timings(in, timings);
+ in->ops.atv->set_timings(in, vm);
}
static void opa362_get_timings(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings)
+ struct videomode *vm)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
dev_dbg(dssdev->dev, "get_timings\n");
- *timings = ddata->timings;
+ *vm = ddata->vm;
}
static int opa362_check_timings(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings)
+ struct videomode *vm)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
struct omap_dss_device *in = ddata->in;
dev_dbg(dssdev->dev, "check_timings\n");
- return in->ops.atv->check_timings(in, timings);
+ return in->ops.atv->check_timings(in, vm);
}
static void opa362_set_type(struct omap_dss_device *dssdev,
diff --git a/drivers/gpu/drm/omapdrm/displays/encoder-tfp410.c b/drivers/gpu/drm/omapdrm/displays/encoder-tfp410.c
index d768217cefe0..13e32d02c884 100644
--- a/drivers/gpu/drm/omapdrm/displays/encoder-tfp410.c
+++ b/drivers/gpu/drm/omapdrm/displays/encoder-tfp410.c
@@ -24,7 +24,7 @@ struct panel_drv_data {
int pd_gpio;
int data_lines;
- struct omap_video_timings timings;
+ struct videomode vm;
};
#define to_panel_data(x) container_of(x, struct panel_drv_data, dssdev)
@@ -81,7 +81,7 @@ static int tfp410_enable(struct omap_dss_device *dssdev)
if (omapdss_device_is_enabled(dssdev))
return 0;
- in->ops.dpi->set_timings(in, &ddata->timings);
+ in->ops.dpi->set_timings(in, &ddata->vm);
if (ddata->data_lines)
in->ops.dpi->set_data_lines(in, ddata->data_lines);
@@ -113,44 +113,43 @@ static void tfp410_disable(struct omap_dss_device *dssdev)
dssdev->state = OMAP_DSS_DISPLAY_DISABLED;
}
-static void tfp410_fix_timings(struct omap_video_timings *timings)
+static void tfp410_fix_timings(struct videomode *vm)
{
- timings->data_pclk_edge = OMAPDSS_DRIVE_SIG_RISING_EDGE;
- timings->sync_pclk_edge = OMAPDSS_DRIVE_SIG_RISING_EDGE;
- timings->de_level = OMAPDSS_SIG_ACTIVE_HIGH;
+ vm->flags |= DISPLAY_FLAGS_DE_HIGH | DISPLAY_FLAGS_PIXDATA_POSEDGE |
+ DISPLAY_FLAGS_SYNC_POSEDGE;
}
static void tfp410_set_timings(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings)
+ struct videomode *vm)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
struct omap_dss_device *in = ddata->in;
- tfp410_fix_timings(timings);
+ tfp410_fix_timings(vm);
- ddata->timings = *timings;
- dssdev->panel.timings = *timings;
+ ddata->vm = *vm;
+ dssdev->panel.vm = *vm;
- in->ops.dpi->set_timings(in, timings);
+ in->ops.dpi->set_timings(in, vm);
}
static void tfp410_get_timings(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings)
+ struct videomode *vm)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
- *timings = ddata->timings;
+ *vm = ddata->vm;
}
static int tfp410_check_timings(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings)
+ struct videomode *vm)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
struct omap_dss_device *in = ddata->in;
- tfp410_fix_timings(timings);
+ tfp410_fix_timings(vm);
- return in->ops.dpi->check_timings(in, timings);
+ return in->ops.dpi->check_timings(in, vm);
}
static const struct omapdss_dvi_ops tfp410_dvi_ops = {
diff --git a/drivers/gpu/drm/omapdrm/displays/encoder-tpd12s015.c b/drivers/gpu/drm/omapdrm/displays/encoder-tpd12s015.c
index 46855c8f5cbf..6d8f79b29af6 100644
--- a/drivers/gpu/drm/omapdrm/displays/encoder-tpd12s015.c
+++ b/drivers/gpu/drm/omapdrm/displays/encoder-tpd12s015.c
@@ -26,7 +26,7 @@ struct panel_drv_data {
struct gpio_desc *ls_oe_gpio;
struct gpio_desc *hpd_gpio;
- struct omap_video_timings timings;
+ struct videomode vm;
};
#define to_panel_data(x) container_of(x, struct panel_drv_data, dssdev)
@@ -80,7 +80,7 @@ static int tpd_enable(struct omap_dss_device *dssdev)
if (dssdev->state == OMAP_DSS_DISPLAY_ACTIVE)
return 0;
- in->ops.hdmi->set_timings(in, &ddata->timings);
+ in->ops.hdmi->set_timings(in, &ddata->vm);
r = in->ops.hdmi->enable(in);
if (r)
@@ -105,33 +105,33 @@ static void tpd_disable(struct omap_dss_device *dssdev)
}
static void tpd_set_timings(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings)
+ struct videomode *vm)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
struct omap_dss_device *in = ddata->in;
- ddata->timings = *timings;
- dssdev->panel.timings = *timings;
+ ddata->vm = *vm;
+ dssdev->panel.vm = *vm;
- in->ops.hdmi->set_timings(in, timings);
+ in->ops.hdmi->set_timings(in, vm);
}
static void tpd_get_timings(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings)
+ struct videomode *vm)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
- *timings = ddata->timings;
+ *vm = ddata->vm;
}
static int tpd_check_timings(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings)
+ struct videomode *vm)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
struct omap_dss_device *in = ddata->in;
int r;
- r = in->ops.hdmi->check_timings(in, timings);
+ r = in->ops.hdmi->check_timings(in, vm);
return r;
}
diff --git a/drivers/gpu/drm/omapdrm/displays/panel-dpi.c b/drivers/gpu/drm/omapdrm/displays/panel-dpi.c
index 7f16f985ab22..38003208d9ca 100644
--- a/drivers/gpu/drm/omapdrm/displays/panel-dpi.c
+++ b/drivers/gpu/drm/omapdrm/displays/panel-dpi.c
@@ -28,7 +28,7 @@ struct panel_drv_data {
int data_lines;
- struct omap_video_timings videomode;
+ struct videomode vm;
/* used for non-DT boot, to be removed */
int backlight_gpio;
@@ -80,7 +80,7 @@ static int panel_dpi_enable(struct omap_dss_device *dssdev)
if (ddata->data_lines)
in->ops.dpi->set_data_lines(in, ddata->data_lines);
- in->ops.dpi->set_timings(in, &ddata->videomode);
+ in->ops.dpi->set_timings(in, &ddata->vm);
r = in->ops.dpi->enable(in);
if (r)
@@ -122,32 +122,32 @@ static void panel_dpi_disable(struct omap_dss_device *dssdev)
}
static void panel_dpi_set_timings(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings)
+ struct videomode *vm)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
struct omap_dss_device *in = ddata->in;
- ddata->videomode = *timings;
- dssdev->panel.timings = *timings;
+ ddata->vm = *vm;
+ dssdev->panel.vm = *vm;
- in->ops.dpi->set_timings(in, timings);
+ in->ops.dpi->set_timings(in, vm);
}
static void panel_dpi_get_timings(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings)
+ struct videomode *vm)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
- *timings = ddata->videomode;
+ *vm = ddata->vm;
}
static int panel_dpi_check_timings(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings)
+ struct videomode *vm)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
struct omap_dss_device *in = ddata->in;
- return in->ops.dpi->check_timings(in, timings);
+ return in->ops.dpi->check_timings(in, vm);
}
static struct omap_dss_driver panel_dpi_ops = {
@@ -169,7 +169,6 @@ static int panel_dpi_probe_pdata(struct platform_device *pdev)
const struct panel_dpi_platform_data *pdata;
struct panel_drv_data *ddata = platform_get_drvdata(pdev);
struct omap_dss_device *dssdev, *in;
- struct videomode vm;
int r;
pdata = dev_get_platdata(&pdev->dev);
@@ -185,8 +184,7 @@ static int panel_dpi_probe_pdata(struct platform_device *pdev)
ddata->data_lines = pdata->data_lines;
- videomode_from_timing(pdata->display_timing, &vm);
- videomode_to_omap_video_timings(&vm, &ddata->videomode);
+ videomode_from_timing(pdata->display_timing, &ddata->vm);
dssdev = &ddata->dssdev;
dssdev->name = pdata->name;
@@ -214,7 +212,6 @@ static int panel_dpi_probe_of(struct platform_device *pdev)
struct omap_dss_device *in;
int r;
struct display_timing timing;
- struct videomode vm;
struct gpio_desc *gpio;
gpio = devm_gpiod_get_optional(&pdev->dev, "enable", GPIOD_OUT_LOW);
@@ -245,8 +242,7 @@ static int panel_dpi_probe_of(struct platform_device *pdev)
return r;
}
- videomode_from_timing(&timing, &vm);
- videomode_to_omap_video_timings(&vm, &ddata->videomode);
+ videomode_from_timing(&timing, &ddata->vm);
in = omapdss_of_find_source_for_first_ep(node);
if (IS_ERR(in)) {
@@ -295,7 +291,7 @@ static int panel_dpi_probe(struct platform_device *pdev)
dssdev->driver = &panel_dpi_ops;
dssdev->type = OMAP_DISPLAY_TYPE_DPI;
dssdev->owner = THIS_MODULE;
- dssdev->panel.timings = ddata->videomode;
+ dssdev->panel.vm = ddata->vm;
dssdev->phy.dpi.data_lines = ddata->data_lines;
r = omapdss_register_display(dssdev);
diff --git a/drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c b/drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c
index b1f3b818edf4..dc026a843712 100644
--- a/drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c
+++ b/drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c
@@ -42,7 +42,7 @@ struct panel_drv_data {
struct omap_dss_device dssdev;
struct omap_dss_device *in;
- struct omap_video_timings timings;
+ struct videomode vm;
struct platform_device *pdev;
@@ -382,8 +382,8 @@ static const struct backlight_ops dsicm_bl_ops = {
static void dsicm_get_resolution(struct omap_dss_device *dssdev,
u16 *xres, u16 *yres)
{
- *xres = dssdev->panel.timings.x_res;
- *yres = dssdev->panel.timings.y_res;
+ *xres = dssdev->panel.vm.hactive;
+ *yres = dssdev->panel.vm.vactive;
}
static ssize_t dsicm_num_errors_show(struct device *dev,
@@ -589,7 +589,7 @@ static int dsicm_power_on(struct panel_drv_data *ddata)
struct omap_dss_dsi_config dsi_config = {
.mode = OMAP_DSS_DSI_CMD_MODE,
.pixel_format = OMAP_DSS_DSI_FMT_RGB888,
- .timings = &ddata->timings,
+ .vm = &ddata->vm,
.hs_clk_min = 150000000,
.hs_clk_max = 300000000,
.lp_clk_min = 7000000,
@@ -892,8 +892,8 @@ static int dsicm_update(struct omap_dss_device *dssdev,
/* XXX no need to send this every frame, but dsi break if not done */
r = dsicm_set_update_window(ddata, 0, 0,
- dssdev->panel.timings.x_res,
- dssdev->panel.timings.y_res);
+ dssdev->panel.vm.hactive,
+ dssdev->panel.vm.vactive);
if (r)
goto err;
@@ -1023,9 +1023,8 @@ static int dsicm_memory_read(struct omap_dss_device *dssdev,
goto err1;
}
- size = min(w * h * 3,
- dssdev->panel.timings.x_res *
- dssdev->panel.timings.y_res * 3);
+ size = min((u32)w * h * 3,
+ dssdev->panel.vm.hactive * dssdev->panel.vm.vactive * 3);
in->ops.dsi->bus_lock(in);
@@ -1186,14 +1185,14 @@ static int dsicm_probe(struct platform_device *pdev)
if (r)
return r;
- ddata->timings.x_res = 864;
- ddata->timings.y_res = 480;
- ddata->timings.pixelclock = 864 * 480 * 60;
+ ddata->vm.hactive = 864;
+ ddata->vm.vactive = 480;
+ ddata->vm.pixelclock = 864 * 480 * 60;
dssdev = &ddata->dssdev;
dssdev->dev = dev;
dssdev->driver = &dsicm_ops;
- dssdev->panel.timings = ddata->timings;
+ dssdev->panel.vm = ddata->vm;
dssdev->type = OMAP_DISPLAY_TYPE_DSI;
dssdev->owner = THIS_MODULE;
diff --git a/drivers/gpu/drm/omapdrm/displays/panel-lgphilips-lb035q02.c b/drivers/gpu/drm/omapdrm/displays/panel-lgphilips-lb035q02.c
index 6dfb96cea293..43d21edb51f5 100644
--- a/drivers/gpu/drm/omapdrm/displays/panel-lgphilips-lb035q02.c
+++ b/drivers/gpu/drm/omapdrm/displays/panel-lgphilips-lb035q02.c
@@ -19,25 +19,28 @@
#include "../dss/omapdss.h"
-static struct omap_video_timings lb035q02_timings = {
- .x_res = 320,
- .y_res = 240,
+static struct videomode lb035q02_vm = {
+ .hactive = 320,
+ .vactive = 240,
.pixelclock = 6500000,
- .hsw = 2,
- .hfp = 20,
- .hbp = 68,
-
- .vsw = 2,
- .vfp = 4,
- .vbp = 18,
-
- .vsync_level = OMAPDSS_SIG_ACTIVE_LOW,
- .hsync_level = OMAPDSS_SIG_ACTIVE_LOW,
- .data_pclk_edge = OMAPDSS_DRIVE_SIG_RISING_EDGE,
- .de_level = OMAPDSS_SIG_ACTIVE_HIGH,
- .sync_pclk_edge = OMAPDSS_DRIVE_SIG_FALLING_EDGE,
+ .hsync_len = 2,
+ .hfront_porch = 20,
+ .hback_porch = 68,
+
+ .vsync_len = 2,
+ .vfront_porch = 4,
+ .vback_porch = 18,
+
+ .flags = DISPLAY_FLAGS_HSYNC_LOW | DISPLAY_FLAGS_VSYNC_LOW |
+ DISPLAY_FLAGS_DE_HIGH | DISPLAY_FLAGS_SYNC_NEGEDGE |
+ DISPLAY_FLAGS_PIXDATA_POSEDGE,
+ /*
+ * Note: According to the panel documentation:
+ * DE is active LOW
+ * DATA needs to be driven on the FALLING edge
+ */
};
struct panel_drv_data {
@@ -48,7 +51,7 @@ struct panel_drv_data {
int data_lines;
- struct omap_video_timings videomode;
+ struct videomode vm;
struct gpio_desc *enable_gpio;
};
@@ -158,7 +161,7 @@ static int lb035q02_enable(struct omap_dss_device *dssdev)
if (ddata->data_lines)
in->ops.dpi->set_data_lines(in, ddata->data_lines);
- in->ops.dpi->set_timings(in, &ddata->videomode);
+ in->ops.dpi->set_timings(in, &ddata->vm);
r = in->ops.dpi->enable(in);
if (r)
@@ -189,32 +192,32 @@ static void lb035q02_disable(struct omap_dss_device *dssdev)
}
static void lb035q02_set_timings(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings)
+ struct videomode *vm)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
struct omap_dss_device *in = ddata->in;
- ddata->videomode = *timings;
- dssdev->panel.timings = *timings;
+ ddata->vm = *vm;
+ dssdev->panel.vm = *vm;
- in->ops.dpi->set_timings(in, timings);
+ in->ops.dpi->set_timings(in, vm);
}
static void lb035q02_get_timings(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings)
+ struct videomode *vm)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
- *timings = ddata->videomode;
+ *vm = ddata->vm;
}
static int lb035q02_check_timings(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings)
+ struct videomode *vm)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
struct omap_dss_device *in = ddata->in;
- return in->ops.dpi->check_timings(in, timings);
+ return in->ops.dpi->check_timings(in, vm);
}
static struct omap_dss_driver lb035q02_ops = {
@@ -278,14 +281,14 @@ static int lb035q02_panel_spi_probe(struct spi_device *spi)
if (r)
return r;
- ddata->videomode = lb035q02_timings;
+ ddata->vm = lb035q02_vm;
dssdev = &ddata->dssdev;
dssdev->dev = &spi->dev;
dssdev->driver = &lb035q02_ops;
dssdev->type = OMAP_DISPLAY_TYPE_DPI;
dssdev->owner = THIS_MODULE;
- dssdev->panel.timings = ddata->videomode;
+ dssdev->panel.vm = ddata->vm;
dssdev->phy.dpi.data_lines = ddata->data_lines;
r = omapdss_register_display(dssdev);
diff --git a/drivers/gpu/drm/omapdrm/displays/panel-nec-nl8048hl11.c b/drivers/gpu/drm/omapdrm/displays/panel-nec-nl8048hl11.c
index 9f3d6f48f3e1..2de27ba01552 100644
--- a/drivers/gpu/drm/omapdrm/displays/panel-nec-nl8048hl11.c
+++ b/drivers/gpu/drm/omapdrm/displays/panel-nec-nl8048hl11.c
@@ -23,7 +23,7 @@ struct panel_drv_data {
struct omap_dss_device dssdev;
struct omap_dss_device *in;
- struct omap_video_timings videomode;
+ struct videomode vm;
int data_lines;
@@ -65,22 +65,20 @@ static const struct {
{ 156, 0x00 }, { 157, 0x00 }, { 2, 0x00 },
};
-static const struct omap_video_timings nec_8048_panel_timings = {
- .x_res = LCD_XRES,
- .y_res = LCD_YRES,
+static const struct videomode nec_8048_panel_vm = {
+ .hactive = LCD_XRES,
+ .vactive = LCD_YRES,
.pixelclock = LCD_PIXEL_CLOCK,
- .hfp = 6,
- .hsw = 1,
- .hbp = 4,
- .vfp = 3,
- .vsw = 1,
- .vbp = 4,
-
- .vsync_level = OMAPDSS_SIG_ACTIVE_LOW,
- .hsync_level = OMAPDSS_SIG_ACTIVE_LOW,
- .data_pclk_edge = OMAPDSS_DRIVE_SIG_RISING_EDGE,
- .de_level = OMAPDSS_SIG_ACTIVE_HIGH,
- .sync_pclk_edge = OMAPDSS_DRIVE_SIG_RISING_EDGE,
+ .hfront_porch = 6,
+ .hsync_len = 1,
+ .hback_porch = 4,
+ .vfront_porch = 3,
+ .vsync_len = 1,
+ .vback_porch = 4,
+
+ .flags = DISPLAY_FLAGS_HSYNC_LOW | DISPLAY_FLAGS_VSYNC_LOW |
+ DISPLAY_FLAGS_DE_HIGH | DISPLAY_FLAGS_SYNC_POSEDGE |
+ DISPLAY_FLAGS_PIXDATA_POSEDGE,
};
#define to_panel_data(p) container_of(p, struct panel_drv_data, dssdev)
@@ -157,7 +155,7 @@ static int nec_8048_enable(struct omap_dss_device *dssdev)
if (ddata->data_lines)
in->ops.dpi->set_data_lines(in, ddata->data_lines);
- in->ops.dpi->set_timings(in, &ddata->videomode);
+ in->ops.dpi->set_timings(in, &ddata->vm);
r = in->ops.dpi->enable(in);
if (r)
@@ -188,32 +186,32 @@ static void nec_8048_disable(struct omap_dss_device *dssdev)
}
static void nec_8048_set_timings(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings)
+ struct videomode *vm)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
struct omap_dss_device *in = ddata->in;
- ddata->videomode = *timings;
- dssdev->panel.timings = *timings;
+ ddata->vm = *vm;
+ dssdev->panel.vm = *vm;
- in->ops.dpi->set_timings(in, timings);
+ in->ops.dpi->set_timings(in, vm);
}
static void nec_8048_get_timings(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings)
+ struct videomode *vm)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
- *timings = ddata->videomode;
+ *vm = ddata->vm;
}
static int nec_8048_check_timings(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings)
+ struct videomode *vm)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
struct omap_dss_device *in = ddata->in;
- return in->ops.dpi->check_timings(in, timings);
+ return in->ops.dpi->check_timings(in, vm);
}
static struct omap_dss_driver nec_8048_ops = {
@@ -306,14 +304,14 @@ static int nec_8048_probe(struct spi_device *spi)
goto err_gpio;
}
- ddata->videomode = nec_8048_panel_timings;
+ ddata->vm = nec_8048_panel_vm;
dssdev = &ddata->dssdev;
dssdev->dev = &spi->dev;
dssdev->driver = &nec_8048_ops;
dssdev->type = OMAP_DISPLAY_TYPE_DPI;
dssdev->owner = THIS_MODULE;
- dssdev->panel.timings = ddata->videomode;
+ dssdev->panel.vm = ddata->vm;
r = omapdss_register_display(dssdev);
if (r) {
diff --git a/drivers/gpu/drm/omapdrm/displays/panel-sharp-ls037v7dw01.c b/drivers/gpu/drm/omapdrm/displays/panel-sharp-ls037v7dw01.c
index 3d3efc561ea9..04fe235b7cac 100644
--- a/drivers/gpu/drm/omapdrm/displays/panel-sharp-ls037v7dw01.c
+++ b/drivers/gpu/drm/omapdrm/displays/panel-sharp-ls037v7dw01.c
@@ -26,7 +26,7 @@ struct panel_drv_data {
int data_lines;
- struct omap_video_timings videomode;
+ struct videomode vm;
struct gpio_desc *resb_gpio; /* low = reset active min 20 us */
struct gpio_desc *ini_gpio; /* high = power on */
@@ -35,25 +35,27 @@ struct panel_drv_data {
struct gpio_desc *ud_gpio; /* high = conventional vertical scanning */
};
-static const struct omap_video_timings sharp_ls_timings = {
- .x_res = 480,
- .y_res = 640,
+static const struct videomode sharp_ls_vm = {
+ .hactive = 480,
+ .vactive = 640,
.pixelclock = 19200000,
- .hsw = 2,
- .hfp = 1,
- .hbp = 28,
-
- .vsw = 1,
- .vfp = 1,
- .vbp = 1,
-
- .vsync_level = OMAPDSS_SIG_ACTIVE_LOW,
- .hsync_level = OMAPDSS_SIG_ACTIVE_LOW,
- .data_pclk_edge = OMAPDSS_DRIVE_SIG_RISING_EDGE,
- .de_level = OMAPDSS_SIG_ACTIVE_HIGH,
- .sync_pclk_edge = OMAPDSS_DRIVE_SIG_FALLING_EDGE,
+ .hsync_len = 2,
+ .hfront_porch = 1,
+ .hback_porch = 28,
+
+ .vsync_len = 1,
+ .vfront_porch = 1,
+ .vback_porch = 1,
+
+ .flags = DISPLAY_FLAGS_HSYNC_LOW | DISPLAY_FLAGS_VSYNC_LOW |
+ DISPLAY_FLAGS_DE_HIGH | DISPLAY_FLAGS_SYNC_NEGEDGE |
+ DISPLAY_FLAGS_PIXDATA_POSEDGE,
+ /*
+ * Note: According to the panel documentation:
+ * DATA needs to be driven on the FALLING edge
+ */
};
#define to_panel_data(p) container_of(p, struct panel_drv_data, dssdev)
@@ -99,7 +101,7 @@ static int sharp_ls_enable(struct omap_dss_device *dssdev)
if (ddata->data_lines)
in->ops.dpi->set_data_lines(in, ddata->data_lines);
- in->ops.dpi->set_timings(in, &ddata->videomode);
+ in->ops.dpi->set_timings(in, &ddata->vm);
if (ddata->vcc) {
r = regulator_enable(ddata->vcc);
@@ -154,32 +156,32 @@ static void sharp_ls_disable(struct omap_dss_device *dssdev)
}
static void sharp_ls_set_timings(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings)
+ struct videomode *vm)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
struct omap_dss_device *in = ddata->in;
- ddata->videomode = *timings;
- dssdev->panel.timings = *timings;
+ ddata->vm = *vm;
+ dssdev->panel.vm = *vm;
- in->ops.dpi->set_timings(in, timings);
+ in->ops.dpi->set_timings(in, vm);
}
static void sharp_ls_get_timings(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings)
+ struct videomode *vm)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
- *timings = ddata->videomode;
+ *vm = ddata->vm;
}
static int sharp_ls_check_timings(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings)
+ struct videomode *vm)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
struct omap_dss_device *in = ddata->in;
- return in->ops.dpi->check_timings(in, timings);
+ return in->ops.dpi->check_timings(in, vm);
}
static struct omap_dss_driver sharp_ls_ops = {
@@ -279,14 +281,14 @@ static int sharp_ls_probe(struct platform_device *pdev)
if (r)
return r;
- ddata->videomode = sharp_ls_timings;
+ ddata->vm = sharp_ls_vm;
dssdev = &ddata->dssdev;
dssdev->dev = &pdev->dev;
dssdev->driver = &sharp_ls_ops;
dssdev->type = OMAP_DISPLAY_TYPE_DPI;
dssdev->owner = THIS_MODULE;
- dssdev->panel.timings = ddata->videomode;
+ dssdev->panel.vm = ddata->vm;
dssdev->phy.dpi.data_lines = ddata->data_lines;
r = omapdss_register_display(dssdev);
diff --git a/drivers/gpu/drm/omapdrm/displays/panel-sony-acx565akm.c b/drivers/gpu/drm/omapdrm/displays/panel-sony-acx565akm.c
index 3557a4c7dd7b..746cb8d9cba1 100644
--- a/drivers/gpu/drm/omapdrm/displays/panel-sony-acx565akm.c
+++ b/drivers/gpu/drm/omapdrm/displays/panel-sony-acx565akm.c
@@ -71,7 +71,7 @@ struct panel_drv_data {
int reset_gpio;
int datapairs;
- struct omap_video_timings videomode;
+ struct videomode vm;
char *name;
int enabled;
@@ -92,23 +92,20 @@ struct panel_drv_data {
struct backlight_device *bl_dev;
};
-static const struct omap_video_timings acx565akm_panel_timings = {
- .x_res = 800,
- .y_res = 480,
+static const struct videomode acx565akm_panel_vm = {
+ .hactive = 800,
+ .vactive = 480,
.pixelclock = 24000000,
- .hfp = 28,
- .hsw = 4,
- .hbp = 24,
- .vfp = 3,
- .vsw = 3,
- .vbp = 4,
-
- .vsync_level = OMAPDSS_SIG_ACTIVE_LOW,
- .hsync_level = OMAPDSS_SIG_ACTIVE_LOW,
-
- .data_pclk_edge = OMAPDSS_DRIVE_SIG_RISING_EDGE,
- .de_level = OMAPDSS_SIG_ACTIVE_HIGH,
- .sync_pclk_edge = OMAPDSS_DRIVE_SIG_FALLING_EDGE,
+ .hfront_porch = 28,
+ .hsync_len = 4,
+ .hback_porch = 24,
+ .vfront_porch = 3,
+ .vsync_len = 3,
+ .vback_porch = 4,
+
+ .flags = DISPLAY_FLAGS_HSYNC_LOW | DISPLAY_FLAGS_VSYNC_LOW |
+ DISPLAY_FLAGS_DE_HIGH | DISPLAY_FLAGS_SYNC_NEGEDGE |
+ DISPLAY_FLAGS_PIXDATA_POSEDGE,
};
#define to_panel_data(p) container_of(p, struct panel_drv_data, dssdev)
@@ -548,7 +545,7 @@ static int acx565akm_panel_power_on(struct omap_dss_device *dssdev)
dev_dbg(&ddata->spi->dev, "%s\n", __func__);
- in->ops.sdi->set_timings(in, &ddata->videomode);
+ in->ops.sdi->set_timings(in, &ddata->vm);
if (ddata->datapairs > 0)
in->ops.sdi->set_datapairs(in, ddata->datapairs);
@@ -662,32 +659,32 @@ static void acx565akm_disable(struct omap_dss_device *dssdev)
}
static void acx565akm_set_timings(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings)
+ struct videomode *vm)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
struct omap_dss_device *in = ddata->in;
- ddata->videomode = *timings;
- dssdev->panel.timings = *timings;
+ ddata->vm = *vm;
+ dssdev->panel.vm = *vm;
- in->ops.sdi->set_timings(in, timings);
+ in->ops.sdi->set_timings(in, vm);
}
static void acx565akm_get_timings(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings)
+ struct videomode *vm)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
- *timings = ddata->videomode;
+ *vm = ddata->vm;
}
static int acx565akm_check_timings(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings)
+ struct videomode *vm)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
struct omap_dss_device *in = ddata->in;
- return in->ops.sdi->check_timings(in, timings);
+ return in->ops.sdi->check_timings(in, vm);
}
static struct omap_dss_driver acx565akm_ops = {
@@ -845,14 +842,14 @@ static int acx565akm_probe(struct spi_device *spi)
acx565akm_bl_update_status(bldev);
- ddata->videomode = acx565akm_panel_timings;
+ ddata->vm = acx565akm_panel_vm;
dssdev = &ddata->dssdev;
dssdev->dev = &spi->dev;
dssdev->driver = &acx565akm_ops;
dssdev->type = OMAP_DISPLAY_TYPE_SDI;
dssdev->owner = THIS_MODULE;
- dssdev->panel.timings = ddata->videomode;
+ dssdev->panel.vm = ddata->vm;
r = omapdss_register_display(dssdev);
if (r) {
diff --git a/drivers/gpu/drm/omapdrm/displays/panel-tpo-td028ttec1.c b/drivers/gpu/drm/omapdrm/displays/panel-tpo-td028ttec1.c
index e859b3f893f7..f313dbfcbacb 100644
--- a/drivers/gpu/drm/omapdrm/displays/panel-tpo-td028ttec1.c
+++ b/drivers/gpu/drm/omapdrm/displays/panel-tpo-td028ttec1.c
@@ -37,28 +37,29 @@ struct panel_drv_data {
int data_lines;
- struct omap_video_timings videomode;
+ struct videomode vm;
struct spi_device *spi_dev;
};
-static struct omap_video_timings td028ttec1_panel_timings = {
- .x_res = 480,
- .y_res = 640,
+static struct videomode td028ttec1_panel_vm = {
+ .hactive = 480,
+ .vactive = 640,
.pixelclock = 22153000,
- .hfp = 24,
- .hsw = 8,
- .hbp = 8,
- .vfp = 4,
- .vsw = 2,
- .vbp = 2,
-
- .vsync_level = OMAPDSS_SIG_ACTIVE_LOW,
- .hsync_level = OMAPDSS_SIG_ACTIVE_LOW,
-
- .data_pclk_edge = OMAPDSS_DRIVE_SIG_FALLING_EDGE,
- .de_level = OMAPDSS_SIG_ACTIVE_HIGH,
- .sync_pclk_edge = OMAPDSS_DRIVE_SIG_RISING_EDGE,
+ .hfront_porch = 24,
+ .hsync_len = 8,
+ .hback_porch = 8,
+ .vfront_porch = 4,
+ .vsync_len = 2,
+ .vback_porch = 2,
+
+ .flags = DISPLAY_FLAGS_HSYNC_LOW | DISPLAY_FLAGS_VSYNC_LOW |
+ DISPLAY_FLAGS_DE_HIGH | DISPLAY_FLAGS_SYNC_POSEDGE |
+ DISPLAY_FLAGS_PIXDATA_NEGEDGE,
+ /*
+ * Note: According to the panel documentation:
+ * SYNC needs to be driven on the FALLING edge
+ */
};
#define JBT_COMMAND 0x000
@@ -208,7 +209,7 @@ static int td028ttec1_panel_enable(struct omap_dss_device *dssdev)
if (ddata->data_lines)
in->ops.dpi->set_data_lines(in, ddata->data_lines);
- in->ops.dpi->set_timings(in, &ddata->videomode);
+ in->ops.dpi->set_timings(in, &ddata->vm);
r = in->ops.dpi->enable(in);
if (r)
@@ -325,32 +326,32 @@ static void td028ttec1_panel_disable(struct omap_dss_device *dssdev)
}
static void td028ttec1_panel_set_timings(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings)
+ struct videomode *vm)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
struct omap_dss_device *in = ddata->in;
- ddata->videomode = *timings;
- dssdev->panel.timings = *timings;
+ ddata->vm = *vm;
+ dssdev->panel.vm = *vm;
- in->ops.dpi->set_timings(in, timings);
+ in->ops.dpi->set_timings(in, vm);
}
static void td028ttec1_panel_get_timings(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings)
+ struct videomode *vm)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
- *timings = ddata->videomode;
+ *vm = ddata->vm;
}
static int td028ttec1_panel_check_timings(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings)
+ struct videomode *vm)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
struct omap_dss_device *in = ddata->in;
- return in->ops.dpi->check_timings(in, timings);
+ return in->ops.dpi->check_timings(in, vm);
}
static struct omap_dss_driver td028ttec1_ops = {
@@ -414,14 +415,14 @@ static int td028ttec1_panel_probe(struct spi_device *spi)
if (r)
return r;
- ddata->videomode = td028ttec1_panel_timings;
+ ddata->vm = td028ttec1_panel_vm;
dssdev = &ddata->dssdev;
dssdev->dev = &spi->dev;
dssdev->driver = &td028ttec1_ops;
dssdev->type = OMAP_DISPLAY_TYPE_DPI;
dssdev->owner = THIS_MODULE;
- dssdev->panel.timings = ddata->videomode;
+ dssdev->panel.vm = ddata->vm;
dssdev->phy.dpi.data_lines = ddata->data_lines;
r = omapdss_register_display(dssdev);
diff --git a/drivers/gpu/drm/omapdrm/displays/panel-tpo-td043mtea1.c b/drivers/gpu/drm/omapdrm/displays/panel-tpo-td043mtea1.c
index 66c6bbe6472b..0787dba44faa 100644
--- a/drivers/gpu/drm/omapdrm/displays/panel-tpo-td043mtea1.c
+++ b/drivers/gpu/drm/omapdrm/displays/panel-tpo-td043mtea1.c
@@ -56,7 +56,7 @@ struct panel_drv_data {
struct omap_dss_device dssdev;
struct omap_dss_device *in;
- struct omap_video_timings videomode;
+ struct videomode vm;
int data_lines;
@@ -72,25 +72,27 @@ struct panel_drv_data {
u32 power_on_resume:1;
};
-static const struct omap_video_timings tpo_td043_timings = {
- .x_res = 800,
- .y_res = 480,
+static const struct videomode tpo_td043_vm = {
+ .hactive = 800,
+ .vactive = 480,
.pixelclock = 36000000,
- .hsw = 1,
- .hfp = 68,
- .hbp = 214,
+ .hsync_len = 1,
+ .hfront_porch = 68,
+ .hback_porch = 214,
- .vsw = 1,
- .vfp = 39,
- .vbp = 34,
+ .vsync_len = 1,
+ .vfront_porch = 39,
+ .vback_porch = 34,
- .vsync_level = OMAPDSS_SIG_ACTIVE_LOW,
- .hsync_level = OMAPDSS_SIG_ACTIVE_LOW,
- .data_pclk_edge = OMAPDSS_DRIVE_SIG_FALLING_EDGE,
- .de_level = OMAPDSS_SIG_ACTIVE_HIGH,
- .sync_pclk_edge = OMAPDSS_DRIVE_SIG_RISING_EDGE,
+ .flags = DISPLAY_FLAGS_HSYNC_LOW | DISPLAY_FLAGS_VSYNC_LOW |
+ DISPLAY_FLAGS_DE_HIGH | DISPLAY_FLAGS_SYNC_POSEDGE |
+ DISPLAY_FLAGS_PIXDATA_NEGEDGE,
+ /*
+ * Note: According to the panel documentation:
+ * SYNC needs to be driven on the FALLING edge
+ */
};
#define to_panel_data(p) container_of(p, struct panel_drv_data, dssdev)
@@ -378,7 +380,7 @@ static int tpo_td043_enable(struct omap_dss_device *dssdev)
if (ddata->data_lines)
in->ops.dpi->set_data_lines(in, ddata->data_lines);
- in->ops.dpi->set_timings(in, &ddata->videomode);
+ in->ops.dpi->set_timings(in, &ddata->vm);
r = in->ops.dpi->enable(in);
if (r)
@@ -418,32 +420,32 @@ static void tpo_td043_disable(struct omap_dss_device *dssdev)
}
static void tpo_td043_set_timings(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings)
+ struct videomode *vm)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
struct omap_dss_device *in = ddata->in;
- ddata->videomode = *timings;
- dssdev->panel.timings = *timings;
+ ddata->vm = *vm;
+ dssdev->panel.vm = *vm;
- in->ops.dpi->set_timings(in, timings);
+ in->ops.dpi->set_timings(in, vm);
}
static void tpo_td043_get_timings(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings)
+ struct videomode *vm)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
- *timings = ddata->videomode;
+ *vm = ddata->vm;
}
static int tpo_td043_check_timings(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings)
+ struct videomode *vm)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
struct omap_dss_device *in = ddata->in;
- return in->ops.dpi->check_timings(in, timings);
+ return in->ops.dpi->check_timings(in, vm);
}
static struct omap_dss_driver tpo_td043_ops = {
@@ -546,14 +548,14 @@ static int tpo_td043_probe(struct spi_device *spi)
goto err_sysfs;
}
- ddata->videomode = tpo_td043_timings;
+ ddata->vm = tpo_td043_vm;
dssdev = &ddata->dssdev;
dssdev->dev = &spi->dev;
dssdev->driver = &tpo_td043_ops;
dssdev->type = OMAP_DISPLAY_TYPE_DPI;
dssdev->owner = THIS_MODULE;
- dssdev->panel.timings = ddata->videomode;
+ dssdev->panel.vm = ddata->vm;
r = omapdss_register_display(dssdev);
if (r) {
diff --git a/drivers/gpu/drm/omapdrm/dss/dispc.c b/drivers/gpu/drm/omapdrm/dss/dispc.c
index 535240fba671..c839f6456db2 100644
--- a/drivers/gpu/drm/omapdrm/dss/dispc.c
+++ b/drivers/gpu/drm/omapdrm/dss/dispc.c
@@ -75,7 +75,7 @@ struct dispc_features {
unsigned long max_lcd_pclk;
unsigned long max_tv_pclk;
int (*calc_scaling) (unsigned long pclk, unsigned long lclk,
- const struct omap_video_timings *mgr_timings,
+ const struct videomode *vm,
u16 width, u16 height, u16 out_width, u16 out_height,
enum omap_color_mode color_mode, bool *five_taps,
int *x_predecim, int *y_predecim, int *decim_x, int *decim_y,
@@ -1679,7 +1679,7 @@ static void dispc_ovl_set_scaling_uv(enum omap_plane plane,
{
int scale_x = out_width != orig_width;
int scale_y = out_height != orig_height;
- bool chroma_upscale = plane != OMAP_DSS_WB ? true : false;
+ bool chroma_upscale = plane != OMAP_DSS_WB;
if (!dss_has_feature(FEAT_HANDLE_UV_SEPARATE))
return;
@@ -2179,7 +2179,7 @@ static void calc_tiler_rotation_offset(u16 screen_width, u16 width,
* undocumented horizontal position and timing related limitations.
*/
static int check_horiz_timing_omap3(unsigned long pclk, unsigned long lclk,
- const struct omap_video_timings *t, u16 pos_x,
+ const struct videomode *vm, u16 pos_x,
u16 width, u16 height, u16 out_width, u16 out_height,
bool five_taps)
{
@@ -2189,14 +2189,16 @@ static int check_horiz_timing_omap3(unsigned long pclk, unsigned long lclk,
u64 val, blank;
int i;
- nonactive = t->x_res + t->hfp + t->hsw + t->hbp - out_width;
+ nonactive = vm->hactive + vm->hfront_porch + vm->hsync_len +
+ vm->hback_porch - out_width;
i = 0;
if (out_height < height)
i++;
if (out_width < width)
i++;
- blank = div_u64((u64)(t->hbp + t->hsw + t->hfp) * lclk, pclk);
+ blank = div_u64((u64)(vm->hback_porch + vm->hsync_len + vm->hfront_porch) *
+ lclk, pclk);
DSSDBG("blanking period + ppl = %llu (limit = %u)\n", blank, limits[i]);
if (blank <= limits[i])
return -EINVAL;
@@ -2231,7 +2233,7 @@ static int check_horiz_timing_omap3(unsigned long pclk, unsigned long lclk,
}
static unsigned long calc_core_clk_five_taps(unsigned long pclk,
- const struct omap_video_timings *mgr_timings, u16 width,
+ const struct videomode *vm, u16 width,
u16 height, u16 out_width, u16 out_height,
enum omap_color_mode color_mode)
{
@@ -2242,7 +2244,7 @@ static unsigned long calc_core_clk_five_taps(unsigned long pclk,
return (unsigned long) pclk;
if (height > out_height) {
- unsigned int ppl = mgr_timings->x_res;
+ unsigned int ppl = vm->hactive;
tmp = (u64)pclk * height * out_width;
do_div(tmp, 2 * out_height * ppl);
@@ -2324,7 +2326,7 @@ static unsigned long calc_core_clk_44xx(unsigned long pclk, u16 width,
}
static int dispc_ovl_calc_scaling_24xx(unsigned long pclk, unsigned long lclk,
- const struct omap_video_timings *mgr_timings,
+ const struct videomode *vm,
u16 width, u16 height, u16 out_width, u16 out_height,
enum omap_color_mode color_mode, bool *five_taps,
int *x_predecim, int *y_predecim, int *decim_x, int *decim_y,
@@ -2370,7 +2372,7 @@ static int dispc_ovl_calc_scaling_24xx(unsigned long pclk, unsigned long lclk,
}
static int dispc_ovl_calc_scaling_34xx(unsigned long pclk, unsigned long lclk,
- const struct omap_video_timings *mgr_timings,
+ const struct videomode *vm,
u16 width, u16 height, u16 out_width, u16 out_height,
enum omap_color_mode color_mode, bool *five_taps,
int *x_predecim, int *y_predecim, int *decim_x, int *decim_y,
@@ -2392,7 +2394,7 @@ static int dispc_ovl_calc_scaling_34xx(unsigned long pclk, unsigned long lclk,
*five_taps = false;
again:
if (*five_taps)
- *core_clk = calc_core_clk_five_taps(pclk, mgr_timings,
+ *core_clk = calc_core_clk_five_taps(pclk, vm,
in_width, in_height, out_width,
out_height, color_mode);
else
@@ -2400,7 +2402,7 @@ again:
in_height, out_width, out_height,
mem_to_mem);
- error = check_horiz_timing_omap3(pclk, lclk, mgr_timings,
+ error = check_horiz_timing_omap3(pclk, lclk, vm,
pos_x, in_width, in_height, out_width,
out_height, *five_taps);
if (error && *five_taps) {
@@ -2435,7 +2437,7 @@ again:
return -EINVAL;
}
- if (check_horiz_timing_omap3(pclk, lclk, mgr_timings, pos_x, in_width,
+ if (check_horiz_timing_omap3(pclk, lclk, vm, pos_x, in_width,
in_height, out_width, out_height, *five_taps)) {
DSSERR("horizontal timing too tight\n");
return -EINVAL;
@@ -2455,7 +2457,7 @@ again:
}
static int dispc_ovl_calc_scaling_44xx(unsigned long pclk, unsigned long lclk,
- const struct omap_video_timings *mgr_timings,
+ const struct videomode *vm,
u16 width, u16 height, u16 out_width, u16 out_height,
enum omap_color_mode color_mode, bool *five_taps,
int *x_predecim, int *y_predecim, int *decim_x, int *decim_y,
@@ -2501,7 +2503,7 @@ static int dispc_ovl_calc_scaling_44xx(unsigned long pclk, unsigned long lclk,
static int dispc_ovl_calc_scaling(unsigned long pclk, unsigned long lclk,
enum omap_overlay_caps caps,
- const struct omap_video_timings *mgr_timings,
+ const struct videomode *vm,
u16 width, u16 height, u16 out_width, u16 out_height,
enum omap_color_mode color_mode, bool *five_taps,
int *x_predecim, int *y_predecim, u16 pos_x,
@@ -2515,7 +2517,7 @@ static int dispc_ovl_calc_scaling(unsigned long pclk, unsigned long lclk,
if (width == out_width && height == out_height)
return 0;
- if (!mem_to_mem && (pclk == 0 || mgr_timings->pixelclock == 0)) {
+ if (!mem_to_mem && (pclk == 0 || vm->pixelclock == 0)) {
DSSERR("cannot calculate scaling settings: pclk is zero\n");
return -EINVAL;
}
@@ -2551,7 +2553,7 @@ static int dispc_ovl_calc_scaling(unsigned long pclk, unsigned long lclk,
if (decim_y > *y_predecim || out_height > height * 8)
return -EINVAL;
- ret = dispc.feat->calc_scaling(pclk, lclk, mgr_timings, width, height,
+ ret = dispc.feat->calc_scaling(pclk, lclk, vm, width, height,
out_width, out_height, color_mode, five_taps,
x_predecim, y_predecim, &decim_x, &decim_y, pos_x, &core_clk,
mem_to_mem);
@@ -2591,7 +2593,7 @@ static int dispc_ovl_setup_common(enum omap_plane plane,
u16 out_width, u16 out_height, enum omap_color_mode color_mode,
u8 rotation, bool mirror, u8 zorder, u8 pre_mult_alpha,
u8 global_alpha, enum omap_dss_rotation_type rotation_type,
- bool replication, const struct omap_video_timings *mgr_timings,
+ bool replication, const struct videomode *vm,
bool mem_to_mem)
{
bool five_taps = true;
@@ -2605,7 +2607,7 @@ static int dispc_ovl_setup_common(enum omap_plane plane,
u16 in_height = height;
u16 in_width = width;
int x_predecim = 1, y_predecim = 1;
- bool ilace = mgr_timings->interlace;
+ bool ilace = !!(vm->flags & DISPLAY_FLAGS_INTERLACED);
unsigned long pclk = dispc_plane_pclk_rate(plane);
unsigned long lclk = dispc_plane_lclk_rate(plane);
@@ -2647,7 +2649,7 @@ static int dispc_ovl_setup_common(enum omap_plane plane,
if (!dss_feat_color_mode_supported(plane, color_mode))
return -EINVAL;
- r = dispc_ovl_calc_scaling(pclk, lclk, caps, mgr_timings, in_width,
+ r = dispc_ovl_calc_scaling(pclk, lclk, caps, vm, in_width,
in_height, out_width, out_height, color_mode,
&five_taps, &x_predecim, &y_predecim, pos_x,
rotation_type, mem_to_mem);
@@ -2784,7 +2786,7 @@ static int dispc_ovl_setup_common(enum omap_plane plane,
}
int dispc_ovl_setup(enum omap_plane plane, const struct omap_overlay_info *oi,
- bool replication, const struct omap_video_timings *mgr_timings,
+ bool replication, const struct videomode *vm,
bool mem_to_mem)
{
int r;
@@ -2803,14 +2805,14 @@ int dispc_ovl_setup(enum omap_plane plane, const struct omap_overlay_info *oi,
oi->screen_width, oi->pos_x, oi->pos_y, oi->width, oi->height,
oi->out_width, oi->out_height, oi->color_mode, oi->rotation,
oi->mirror, oi->zorder, oi->pre_mult_alpha, oi->global_alpha,
- oi->rotation_type, replication, mgr_timings, mem_to_mem);
+ oi->rotation_type, replication, vm, mem_to_mem);
return r;
}
EXPORT_SYMBOL(dispc_ovl_setup);
int dispc_wb_setup(const struct omap_dss_writeback_info *wi,
- bool mem_to_mem, const struct omap_video_timings *mgr_timings)
+ bool mem_to_mem, const struct videomode *vm)
{
int r;
u32 l;
@@ -2819,8 +2821,8 @@ int dispc_wb_setup(const struct omap_dss_writeback_info *wi,
const u8 zorder = 0, global_alpha = 0;
const bool replication = false;
bool truncation;
- int in_width = mgr_timings->x_res;
- int in_height = mgr_timings->y_res;
+ int in_width = vm->hactive;
+ int in_height = vm->vactive;
enum omap_overlay_caps caps =
OMAP_DSS_OVL_CAP_SCALE | OMAP_DSS_OVL_CAP_PRE_MULT_ALPHA;
@@ -2833,7 +2835,7 @@ int dispc_wb_setup(const struct omap_dss_writeback_info *wi,
wi->buf_width, pos_x, pos_y, in_width, in_height, wi->width,
wi->height, wi->color_mode, wi->rotation, wi->mirror, zorder,
wi->pre_mult_alpha, global_alpha, wi->rotation_type,
- replication, mgr_timings, mem_to_mem);
+ replication, vm, mem_to_mem);
switch (wi->color_mode) {
case OMAP_DSS_COLOR_RGB16:
@@ -2867,8 +2869,8 @@ int dispc_wb_setup(const struct omap_dss_writeback_info *wi,
} else {
int wbdelay;
- wbdelay = min(mgr_timings->vfp + mgr_timings->vsw +
- mgr_timings->vbp, 255);
+ wbdelay = min(vm->vfront_porch +
+ vm->vsync_len + vm->vback_porch, (u32)255);
/* WBDELAYCOUNT */
REG_FLD_MOD(DISPC_OVL_ATTRIBUTES2(plane), wbdelay, 7, 0);
@@ -3093,10 +3095,10 @@ static bool _dispc_mgr_size_ok(u16 width, u16 height)
height <= dispc.feat->mgr_height_max;
}
-static bool _dispc_lcd_timings_ok(int hsw, int hfp, int hbp,
+static bool _dispc_lcd_timings_ok(int hsync_len, int hfp, int hbp,
int vsw, int vfp, int vbp)
{
- if (hsw < 1 || hsw > dispc.feat->sw_max ||
+ if (hsync_len < 1 || hsync_len > dispc.feat->sw_max ||
hfp < 1 || hfp > dispc.feat->hp_max ||
hbp < 1 || hbp > dispc.feat->hp_max ||
vsw < 1 || vsw > dispc.feat->sw_max ||
@@ -3110,113 +3112,77 @@ static bool _dispc_mgr_pclk_ok(enum omap_channel channel,
unsigned long pclk)
{
if (dss_mgr_is_lcd(channel))
- return pclk <= dispc.feat->max_lcd_pclk ? true : false;
+ return pclk <= dispc.feat->max_lcd_pclk;
else
- return pclk <= dispc.feat->max_tv_pclk ? true : false;
+ return pclk <= dispc.feat->max_tv_pclk;
}
-bool dispc_mgr_timings_ok(enum omap_channel channel,
- const struct omap_video_timings *timings)
+bool dispc_mgr_timings_ok(enum omap_channel channel, const struct videomode *vm)
{
- if (!_dispc_mgr_size_ok(timings->x_res, timings->y_res))
+ if (!_dispc_mgr_size_ok(vm->hactive, vm->vactive))
return false;
- if (!_dispc_mgr_pclk_ok(channel, timings->pixelclock))
+ if (!_dispc_mgr_pclk_ok(channel, vm->pixelclock))
return false;
if (dss_mgr_is_lcd(channel)) {
/* TODO: OMAP4+ supports interlace for LCD outputs */
- if (timings->interlace)
+ if (vm->flags & DISPLAY_FLAGS_INTERLACED)
return false;
- if (!_dispc_lcd_timings_ok(timings->hsw, timings->hfp,
- timings->hbp, timings->vsw, timings->vfp,
- timings->vbp))
+ if (!_dispc_lcd_timings_ok(vm->hsync_len,
+ vm->hfront_porch, vm->hback_porch,
+ vm->vsync_len, vm->vfront_porch,
+ vm->vback_porch))
return false;
}
return true;
}
-static void _dispc_mgr_set_lcd_timings(enum omap_channel channel, int hsw,
- int hfp, int hbp, int vsw, int vfp, int vbp,
- enum omap_dss_signal_level vsync_level,
- enum omap_dss_signal_level hsync_level,
- enum omap_dss_signal_edge data_pclk_edge,
- enum omap_dss_signal_level de_level,
- enum omap_dss_signal_edge sync_pclk_edge)
-
+static void _dispc_mgr_set_lcd_timings(enum omap_channel channel,
+ const struct videomode *vm)
{
u32 timing_h, timing_v, l;
bool onoff, rf, ipc, vs, hs, de;
- timing_h = FLD_VAL(hsw-1, dispc.feat->sw_start, 0) |
- FLD_VAL(hfp-1, dispc.feat->fp_start, 8) |
- FLD_VAL(hbp-1, dispc.feat->bp_start, 20);
- timing_v = FLD_VAL(vsw-1, dispc.feat->sw_start, 0) |
- FLD_VAL(vfp, dispc.feat->fp_start, 8) |
- FLD_VAL(vbp, dispc.feat->bp_start, 20);
+ timing_h = FLD_VAL(vm->hsync_len - 1, dispc.feat->sw_start, 0) |
+ FLD_VAL(vm->hfront_porch - 1, dispc.feat->fp_start, 8) |
+ FLD_VAL(vm->hback_porch - 1, dispc.feat->bp_start, 20);
+ timing_v = FLD_VAL(vm->vsync_len - 1, dispc.feat->sw_start, 0) |
+ FLD_VAL(vm->vfront_porch, dispc.feat->fp_start, 8) |
+ FLD_VAL(vm->vback_porch, dispc.feat->bp_start, 20);
dispc_write_reg(DISPC_TIMING_H(channel), timing_h);
dispc_write_reg(DISPC_TIMING_V(channel), timing_v);
- switch (vsync_level) {
- case OMAPDSS_SIG_ACTIVE_LOW:
- vs = true;
- break;
- case OMAPDSS_SIG_ACTIVE_HIGH:
+ if (vm->flags & DISPLAY_FLAGS_VSYNC_HIGH)
vs = false;
- break;
- default:
- BUG();
- }
+ else
+ vs = true;
- switch (hsync_level) {
- case OMAPDSS_SIG_ACTIVE_LOW:
- hs = true;
- break;
- case OMAPDSS_SIG_ACTIVE_HIGH:
+ if (vm->flags & DISPLAY_FLAGS_HSYNC_HIGH)
hs = false;
- break;
- default:
- BUG();
- }
+ else
+ hs = true;
- switch (de_level) {
- case OMAPDSS_SIG_ACTIVE_LOW:
- de = true;
- break;
- case OMAPDSS_SIG_ACTIVE_HIGH:
+ if (vm->flags & DISPLAY_FLAGS_DE_HIGH)
de = false;
- break;
- default:
- BUG();
- }
+ else
+ de = true;
- switch (data_pclk_edge) {
- case OMAPDSS_DRIVE_SIG_RISING_EDGE:
+ if (vm->flags & DISPLAY_FLAGS_PIXDATA_POSEDGE)
ipc = false;
- break;
- case OMAPDSS_DRIVE_SIG_FALLING_EDGE:
+ else
ipc = true;
- break;
- default:
- BUG();
- }
/* always use the 'rf' setting */
onoff = true;
- switch (sync_pclk_edge) {
- case OMAPDSS_DRIVE_SIG_FALLING_EDGE:
- rf = false;
- break;
- case OMAPDSS_DRIVE_SIG_RISING_EDGE:
+ if (vm->flags & DISPLAY_FLAGS_SYNC_POSEDGE)
rf = true;
- break;
- default:
- BUG();
- }
+ else
+ rf = false;
l = FLD_VAL(onoff, 17, 17) |
FLD_VAL(rf, 16, 16) |
@@ -3253,13 +3219,13 @@ static void _dispc_mgr_set_lcd_timings(enum omap_channel channel, int hsw,
/* change name to mode? */
void dispc_mgr_set_timings(enum omap_channel channel,
- const struct omap_video_timings *timings)
+ const struct videomode *vm)
{
unsigned xtot, ytot;
unsigned long ht, vt;
- struct omap_video_timings t = *timings;
+ struct videomode t = *vm;
- DSSDBG("channel %d xres %u yres %u\n", channel, t.x_res, t.y_res);
+ DSSDBG("channel %d xres %u yres %u\n", channel, t.hactive, t.vactive);
if (!dispc_mgr_timings_ok(channel, &t)) {
BUG();
@@ -3267,34 +3233,37 @@ void dispc_mgr_set_timings(enum omap_channel channel,
}
if (dss_mgr_is_lcd(channel)) {
- _dispc_mgr_set_lcd_timings(channel, t.hsw, t.hfp, t.hbp, t.vsw,
- t.vfp, t.vbp, t.vsync_level, t.hsync_level,
- t.data_pclk_edge, t.de_level, t.sync_pclk_edge);
+ _dispc_mgr_set_lcd_timings(channel, &t);
- xtot = t.x_res + t.hfp + t.hsw + t.hbp;
- ytot = t.y_res + t.vfp + t.vsw + t.vbp;
+ xtot = t.hactive + t.hfront_porch + t.hsync_len + t.hback_porch;
+ ytot = t.vactive + t.vfront_porch + t.vsync_len + t.vback_porch;
- ht = timings->pixelclock / xtot;
- vt = timings->pixelclock / xtot / ytot;
+ ht = vm->pixelclock / xtot;
+ vt = vm->pixelclock / xtot / ytot;
- DSSDBG("pck %u\n", timings->pixelclock);
- DSSDBG("hsw %d hfp %d hbp %d vsw %d vfp %d vbp %d\n",
- t.hsw, t.hfp, t.hbp, t.vsw, t.vfp, t.vbp);
+ DSSDBG("pck %lu\n", vm->pixelclock);
+ DSSDBG("hsync_len %d hfp %d hbp %d vsw %d vfp %d vbp %d\n",
+ t.hsync_len, t.hfront_porch, t.hback_porch,
+ t.vsync_len, t.vfront_porch, t.vback_porch);
DSSDBG("vsync_level %d hsync_level %d data_pclk_edge %d de_level %d sync_pclk_edge %d\n",
- t.vsync_level, t.hsync_level, t.data_pclk_edge,
- t.de_level, t.sync_pclk_edge);
+ !!(t.flags & DISPLAY_FLAGS_VSYNC_HIGH),
+ !!(t.flags & DISPLAY_FLAGS_HSYNC_HIGH),
+ !!(t.flags & DISPLAY_FLAGS_PIXDATA_POSEDGE),
+ !!(t.flags & DISPLAY_FLAGS_DE_HIGH),
+ !!(t.flags & DISPLAY_FLAGS_SYNC_POSEDGE));
DSSDBG("hsync %luHz, vsync %luHz\n", ht, vt);
} else {
- if (t.interlace)
- t.y_res /= 2;
+ if (t.flags & DISPLAY_FLAGS_INTERLACED)
+ t.vactive /= 2;
if (dispc.feat->supports_double_pixel)
- REG_FLD_MOD(DISPC_CONTROL, t.double_pixel ? 1 : 0,
- 19, 17);
+ REG_FLD_MOD(DISPC_CONTROL,
+ !!(t.flags & DISPLAY_FLAGS_DOUBLECLK),
+ 19, 17);
}
- dispc_mgr_set_size(channel, t.x_res, t.y_res);
+ dispc_mgr_set_size(channel, t.hactive, t.vactive);
}
EXPORT_SYMBOL(dispc_mgr_set_timings);
@@ -4214,23 +4183,20 @@ EXPORT_SYMBOL(dispc_free_irq);
*/
static const struct dispc_errata_i734_data {
- struct omap_video_timings timings;
+ struct videomode vm;
struct omap_overlay_info ovli;
struct omap_overlay_manager_info mgri;
struct dss_lcd_mgr_config lcd_conf;
} i734 = {
- .timings = {
- .x_res = 8, .y_res = 1,
+ .vm = {
+ .hactive = 8, .vactive = 1,
.pixelclock = 16000000,
- .hsw = 8, .hfp = 4, .hbp = 4,
- .vsw = 1, .vfp = 1, .vbp = 1,
- .vsync_level = OMAPDSS_SIG_ACTIVE_LOW,
- .hsync_level = OMAPDSS_SIG_ACTIVE_LOW,
- .interlace = false,
- .data_pclk_edge = OMAPDSS_DRIVE_SIG_RISING_EDGE,
- .de_level = OMAPDSS_SIG_ACTIVE_HIGH,
- .sync_pclk_edge = OMAPDSS_DRIVE_SIG_RISING_EDGE,
- .double_pixel = false,
+ .hsync_len = 8, .hfront_porch = 4, .hback_porch = 4,
+ .vsync_len = 1, .vfront_porch = 1, .vback_porch = 1,
+
+ .flags = DISPLAY_FLAGS_HSYNC_LOW | DISPLAY_FLAGS_VSYNC_LOW |
+ DISPLAY_FLAGS_DE_HIGH | DISPLAY_FLAGS_SYNC_POSEDGE |
+ DISPLAY_FLAGS_PIXDATA_POSEDGE,
},
.ovli = {
.screen_width = 1,
@@ -4320,7 +4286,7 @@ static void dispc_errata_i734_wa(void)
/* Setup and enable GFX plane */
dispc_ovl_set_channel_out(OMAP_DSS_GFX, OMAP_DSS_CHANNEL_LCD);
- dispc_ovl_setup(OMAP_DSS_GFX, &ovli, false, &i734.timings, false);
+ dispc_ovl_setup(OMAP_DSS_GFX, &ovli, false, &i734.vm, false);
dispc_ovl_enable(OMAP_DSS_GFX, true);
/* Set up and enable display manager for LCD1 */
@@ -4328,7 +4294,7 @@ static void dispc_errata_i734_wa(void)
dispc_calc_clock_rates(dss_get_dispc_clk_rate(),
&lcd_conf.clock_info);
dispc_mgr_set_lcd_config(OMAP_DSS_CHANNEL_LCD, &lcd_conf);
- dispc_mgr_set_timings(OMAP_DSS_CHANNEL_LCD, &i734.timings);
+ dispc_mgr_set_timings(OMAP_DSS_CHANNEL_LCD, &i734.vm);
dispc_clear_irqstatus(framedone_irq);
diff --git a/drivers/gpu/drm/omapdrm/dss/display.c b/drivers/gpu/drm/omapdrm/dss/display.c
index 8dcdd7cf9937..425a5a8dff8b 100644
--- a/drivers/gpu/drm/omapdrm/dss/display.c
+++ b/drivers/gpu/drm/omapdrm/dss/display.c
@@ -35,8 +35,8 @@
void omapdss_default_get_resolution(struct omap_dss_device *dssdev,
u16 *xres, u16 *yres)
{
- *xres = dssdev->panel.timings.x_res;
- *yres = dssdev->panel.timings.y_res;
+ *xres = dssdev->panel.vm.hactive;
+ *yres = dssdev->panel.vm.vactive;
}
EXPORT_SYMBOL(omapdss_default_get_resolution);
@@ -72,9 +72,9 @@ int omapdss_default_get_recommended_bpp(struct omap_dss_device *dssdev)
EXPORT_SYMBOL(omapdss_default_get_recommended_bpp);
void omapdss_default_get_timings(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings)
+ struct videomode *vm)
{
- *timings = dssdev->panel.timings;
+ *vm = dssdev->panel.vm;
}
EXPORT_SYMBOL(omapdss_default_get_timings);
@@ -217,73 +217,3 @@ struct omap_dss_device *omap_dss_find_device(void *data,
return NULL;
}
EXPORT_SYMBOL(omap_dss_find_device);
-
-void videomode_to_omap_video_timings(const struct videomode *vm,
- struct omap_video_timings *ovt)
-{
- memset(ovt, 0, sizeof(*ovt));
-
- ovt->pixelclock = vm->pixelclock;
- ovt->x_res = vm->hactive;
- ovt->hbp = vm->hback_porch;
- ovt->hfp = vm->hfront_porch;
- ovt->hsw = vm->hsync_len;
- ovt->y_res = vm->vactive;
- ovt->vbp = vm->vback_porch;
- ovt->vfp = vm->vfront_porch;
- ovt->vsw = vm->vsync_len;
-
- ovt->vsync_level = vm->flags & DISPLAY_FLAGS_VSYNC_HIGH ?
- OMAPDSS_SIG_ACTIVE_HIGH :
- OMAPDSS_SIG_ACTIVE_LOW;
- ovt->hsync_level = vm->flags & DISPLAY_FLAGS_HSYNC_HIGH ?
- OMAPDSS_SIG_ACTIVE_HIGH :
- OMAPDSS_SIG_ACTIVE_LOW;
- ovt->de_level = vm->flags & DISPLAY_FLAGS_DE_HIGH ?
- OMAPDSS_SIG_ACTIVE_HIGH :
- OMAPDSS_SIG_ACTIVE_LOW;
- ovt->data_pclk_edge = vm->flags & DISPLAY_FLAGS_PIXDATA_POSEDGE ?
- OMAPDSS_DRIVE_SIG_RISING_EDGE :
- OMAPDSS_DRIVE_SIG_FALLING_EDGE;
-
- ovt->sync_pclk_edge = ovt->data_pclk_edge;
-}
-EXPORT_SYMBOL(videomode_to_omap_video_timings);
-
-void omap_video_timings_to_videomode(const struct omap_video_timings *ovt,
- struct videomode *vm)
-{
- memset(vm, 0, sizeof(*vm));
-
- vm->pixelclock = ovt->pixelclock;
-
- vm->hactive = ovt->x_res;
- vm->hback_porch = ovt->hbp;
- vm->hfront_porch = ovt->hfp;
- vm->hsync_len = ovt->hsw;
- vm->vactive = ovt->y_res;
- vm->vback_porch = ovt->vbp;
- vm->vfront_porch = ovt->vfp;
- vm->vsync_len = ovt->vsw;
-
- if (ovt->hsync_level == OMAPDSS_SIG_ACTIVE_HIGH)
- vm->flags |= DISPLAY_FLAGS_HSYNC_HIGH;
- else
- vm->flags |= DISPLAY_FLAGS_HSYNC_LOW;
-
- if (ovt->vsync_level == OMAPDSS_SIG_ACTIVE_HIGH)
- vm->flags |= DISPLAY_FLAGS_VSYNC_HIGH;
- else
- vm->flags |= DISPLAY_FLAGS_VSYNC_LOW;
-
- if (ovt->de_level == OMAPDSS_SIG_ACTIVE_HIGH)
- vm->flags |= DISPLAY_FLAGS_DE_HIGH;
- else
- vm->flags |= DISPLAY_FLAGS_DE_LOW;
-
- if (ovt->data_pclk_edge == OMAPDSS_DRIVE_SIG_RISING_EDGE)
- vm->flags |= DISPLAY_FLAGS_PIXDATA_POSEDGE;
- else
- vm->flags |= DISPLAY_FLAGS_PIXDATA_NEGEDGE;
-}
-EXPORT_SYMBOL(omap_video_timings_to_videomode);
diff --git a/drivers/gpu/drm/omapdrm/dss/dpi.c b/drivers/gpu/drm/omapdrm/dss/dpi.c
index b268295b76cf..e75162d26ac0 100644
--- a/drivers/gpu/drm/omapdrm/dss/dpi.c
+++ b/drivers/gpu/drm/omapdrm/dss/dpi.c
@@ -47,7 +47,7 @@ struct dpi_data {
struct mutex lock;
- struct omap_video_timings timings;
+ struct videomode vm;
struct dss_lcd_mgr_config mgr_config;
int data_lines;
@@ -333,31 +333,31 @@ static int dpi_set_mode(struct dpi_data *dpi)
{
struct omap_dss_device *out = &dpi->output;
enum omap_channel channel = out->dispc_channel;
- struct omap_video_timings *t = &dpi->timings;
+ struct videomode *vm = &dpi->vm;
int lck_div = 0, pck_div = 0;
unsigned long fck = 0;
unsigned long pck;
int r = 0;
if (dpi->pll)
- r = dpi_set_pll_clk(dpi, channel, t->pixelclock, &fck,
+ r = dpi_set_pll_clk(dpi, channel, vm->pixelclock, &fck,
&lck_div, &pck_div);
else
- r = dpi_set_dispc_clk(dpi, t->pixelclock, &fck,
+ r = dpi_set_dispc_clk(dpi, vm->pixelclock, &fck,
&lck_div, &pck_div);
if (r)
return r;
pck = fck / lck_div / pck_div;
- if (pck != t->pixelclock) {
- DSSWARN("Could not find exact pixel clock. Requested %d Hz, got %lu Hz\n",
- t->pixelclock, pck);
+ if (pck != vm->pixelclock) {
+ DSSWARN("Could not find exact pixel clock. Requested %lu Hz, got %lu Hz\n",
+ vm->pixelclock, pck);
- t->pixelclock = pck;
+ vm->pixelclock = pck;
}
- dss_mgr_set_timings(channel, t);
+ dss_mgr_set_timings(channel, vm);
return 0;
}
@@ -476,7 +476,7 @@ static void dpi_display_disable(struct omap_dss_device *dssdev)
}
static void dpi_set_timings(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings)
+ struct videomode *vm)
{
struct dpi_data *dpi = dpi_get_data_from_dssdev(dssdev);
@@ -484,25 +484,25 @@ static void dpi_set_timings(struct omap_dss_device *dssdev,
mutex_lock(&dpi->lock);
- dpi->timings = *timings;
+ dpi->vm = *vm;
mutex_unlock(&dpi->lock);
}
static void dpi_get_timings(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings)
+ struct videomode *vm)
{
struct dpi_data *dpi = dpi_get_data_from_dssdev(dssdev);
mutex_lock(&dpi->lock);
- *timings = dpi->timings;
+ *vm = dpi->vm;
mutex_unlock(&dpi->lock);
}
static int dpi_check_timings(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings)
+ struct videomode *vm)
{
struct dpi_data *dpi = dpi_get_data_from_dssdev(dssdev);
enum omap_channel channel = dpi->output.dispc_channel;
@@ -512,23 +512,23 @@ static int dpi_check_timings(struct omap_dss_device *dssdev,
struct dpi_clk_calc_ctx ctx;
bool ok;
- if (timings->x_res % 8 != 0)
+ if (vm->hactive % 8 != 0)
return -EINVAL;
- if (!dispc_mgr_timings_ok(channel, timings))
+ if (!dispc_mgr_timings_ok(channel, vm))
return -EINVAL;
- if (timings->pixelclock == 0)
+ if (vm->pixelclock == 0)
return -EINVAL;
if (dpi->pll) {
- ok = dpi_pll_clk_calc(dpi, timings->pixelclock, &ctx);
+ ok = dpi_pll_clk_calc(dpi, vm->pixelclock, &ctx);
if (!ok)
return -EINVAL;
fck = ctx.pll_cinfo.clkout[ctx.clkout_idx];
} else {
- ok = dpi_dss_clk_calc(timings->pixelclock, &ctx);
+ ok = dpi_dss_clk_calc(vm->pixelclock, &ctx);
if (!ok)
return -EINVAL;
@@ -540,7 +540,7 @@ static int dpi_check_timings(struct omap_dss_device *dssdev,
pck = fck / lck_div / pck_div;
- timings->pixelclock = pck;
+ vm->pixelclock = pck;
return 0;
}
diff --git a/drivers/gpu/drm/omapdrm/dss/dsi.c b/drivers/gpu/drm/omapdrm/dss/dsi.c
index e1be5e795cd8..f060bda31235 100644
--- a/drivers/gpu/drm/omapdrm/dss/dsi.c
+++ b/drivers/gpu/drm/omapdrm/dss/dsi.c
@@ -289,7 +289,7 @@ struct dsi_clk_calc_ctx {
struct dss_pll_clock_info dsi_cinfo;
struct dispc_clock_info dispc_cinfo;
- struct omap_video_timings dispc_vm;
+ struct videomode vm;
struct omap_dss_dsi_videomode_timings dsi_vm;
};
@@ -383,7 +383,7 @@ struct dsi_data {
unsigned scp_clk_refcount;
struct dss_lcd_mgr_config mgr_config;
- struct omap_video_timings timings;
+ struct videomode vm;
enum omap_dss_dsi_pixel_format pix_fmt;
enum omap_dss_dsi_mode mode;
struct omap_dss_dsi_videomode_timings vm_timings;
@@ -3321,12 +3321,12 @@ static void dsi_config_vp_num_line_buffers(struct platform_device *dsidev)
if (dsi->mode == OMAP_DSS_DSI_VIDEO_MODE) {
int bpp = dsi_get_pixel_size(dsi->pix_fmt);
- struct omap_video_timings *timings = &dsi->timings;
+ struct videomode *vm = &dsi->vm;
/*
* Don't use line buffers if width is greater than the video
* port's line buffer size
*/
- if (dsi->line_buffer_size <= timings->x_res * bpp / 8)
+ if (dsi->line_buffer_size <= vm->hactive * bpp / 8)
num_line_buffers = 0;
else
num_line_buffers = 2;
@@ -3453,7 +3453,7 @@ static void dsi_config_cmd_mode_interleaving(struct platform_device *dsidev)
int ddr_clk_pre, ddr_clk_post, enter_hs_mode_lat, exit_hs_mode_lat;
int tclk_trail, ths_exit, exiths_clk;
bool ddr_alwon;
- struct omap_video_timings *timings = &dsi->timings;
+ struct videomode *vm = &dsi->vm;
int bpp = dsi_get_pixel_size(dsi->pix_fmt);
int ndl = dsi->num_lanes_used - 1;
int dsi_fclk_hsdiv = dsi->user_dsi_cinfo.mX[HSDIV_DSI] + 1;
@@ -3494,7 +3494,7 @@ static void dsi_config_cmd_mode_interleaving(struct platform_device *dsidev)
exiths_clk = ths_exit + tclk_trail;
- width_bytes = DIV_ROUND_UP(timings->x_res * bpp, 8);
+ width_bytes = DIV_ROUND_UP(vm->hactive * bpp, 8);
bllp = hbp + hfp + hsa + DIV_ROUND_UP(width_bytes + 6, ndl);
if (!hsa_blanking_mode) {
@@ -3705,7 +3705,7 @@ static void dsi_proto_timings(struct platform_device *dsidev)
int vbp = dsi->vm_timings.vbp;
int window_sync = dsi->vm_timings.window_sync;
bool hsync_end;
- struct omap_video_timings *timings = &dsi->timings;
+ struct videomode *vm = &dsi->vm;
int bpp = dsi_get_pixel_size(dsi->pix_fmt);
int tl, t_he, width_bytes;
@@ -3713,7 +3713,7 @@ static void dsi_proto_timings(struct platform_device *dsidev)
t_he = hsync_end ?
((hsa == 0 && ndl == 3) ? 1 : DIV_ROUND_UP(4, ndl)) : 0;
- width_bytes = DIV_ROUND_UP(timings->x_res * bpp, 8);
+ width_bytes = DIV_ROUND_UP(vm->hactive * bpp, 8);
/* TL = t_HS + HSA + t_HE + HFP + ceil((WC + 6) / NDL) + HBP */
tl = DIV_ROUND_UP(4, ndl) + (hsync_end ? hsa : 0) + t_he + hfp +
@@ -3722,7 +3722,7 @@ static void dsi_proto_timings(struct platform_device *dsidev)
DSSDBG("HBP: %d, HFP: %d, HSA: %d, TL: %d TXBYTECLKHS\n", hbp,
hfp, hsync_end ? hsa : 0, tl);
DSSDBG("VBP: %d, VFP: %d, VSA: %d, VACT: %d lines\n", vbp, vfp,
- vsa, timings->y_res);
+ vsa, vm->vactive);
r = dsi_read_reg(dsidev, DSI_VM_TIMING1);
r = FLD_MOD(r, hbp, 11, 0); /* HBP */
@@ -3738,7 +3738,7 @@ static void dsi_proto_timings(struct platform_device *dsidev)
dsi_write_reg(dsidev, DSI_VM_TIMING2, r);
r = dsi_read_reg(dsidev, DSI_VM_TIMING3);
- r = FLD_MOD(r, timings->y_res, 14, 0); /* VACT */
+ r = FLD_MOD(r, vm->vactive, 14, 0); /* VACT */
r = FLD_MOD(r, tl, 31, 16); /* TL */
dsi_write_reg(dsidev, DSI_VM_TIMING3, r);
}
@@ -3856,7 +3856,7 @@ static int dsi_enable_video_output(struct omap_dss_device *dssdev, int channel)
/* MODE, 1 = video mode */
REG_FLD_MOD(dsidev, DSI_VC_CTRL(channel), 1, 4, 4);
- word_count = DIV_ROUND_UP(dsi->timings.x_res * bpp, 8);
+ word_count = DIV_ROUND_UP(dsi->vm.hactive * bpp, 8);
dsi_vc_write_long_header(dsidev, channel, data_type,
word_count, 0);
@@ -3918,8 +3918,8 @@ static void dsi_update_screen_dispc(struct platform_device *dsidev)
int r;
const unsigned channel = dsi->update_channel;
const unsigned line_buf_size = dsi->line_buffer_size;
- u16 w = dsi->timings.x_res;
- u16 h = dsi->timings.y_res;
+ u16 w = dsi->vm.hactive;
+ u16 h = dsi->vm.vactive;
DSSDBG("dsi_update_screen_dispc(%dx%d)\n", w, h);
@@ -3969,7 +3969,7 @@ static void dsi_update_screen_dispc(struct platform_device *dsidev)
msecs_to_jiffies(250));
BUG_ON(r == 0);
- dss_mgr_set_timings(dispc_channel, &dsi->timings);
+ dss_mgr_set_timings(dispc_channel, &dsi->vm);
dss_mgr_start_update(dispc_channel);
@@ -4056,8 +4056,8 @@ static int dsi_update(struct omap_dss_device *dssdev, int channel,
dsi->framedone_callback = callback;
dsi->framedone_data = data;
- dw = dsi->timings.x_res;
- dh = dsi->timings.y_res;
+ dw = dsi->vm.hactive;
+ dh = dsi->vm.vactive;
#ifdef DSI_PERF_MEASURE
dsi->update_bytes = dw * dh *
@@ -4120,16 +4120,21 @@ static int dsi_display_init_dispc(struct platform_device *dsidev,
/*
* override interlace, logic level and edge related parameters in
- * omap_video_timings with default values
+ * videomode with default values
*/
- dsi->timings.interlace = false;
- dsi->timings.hsync_level = OMAPDSS_SIG_ACTIVE_HIGH;
- dsi->timings.vsync_level = OMAPDSS_SIG_ACTIVE_HIGH;
- dsi->timings.data_pclk_edge = OMAPDSS_DRIVE_SIG_RISING_EDGE;
- dsi->timings.de_level = OMAPDSS_SIG_ACTIVE_HIGH;
- dsi->timings.sync_pclk_edge = OMAPDSS_DRIVE_SIG_FALLING_EDGE;
-
- dss_mgr_set_timings(channel, &dsi->timings);
+ dsi->vm.flags &= ~DISPLAY_FLAGS_INTERLACED;
+ dsi->vm.flags &= ~DISPLAY_FLAGS_HSYNC_LOW;
+ dsi->vm.flags |= DISPLAY_FLAGS_HSYNC_HIGH;
+ dsi->vm.flags &= ~DISPLAY_FLAGS_VSYNC_LOW;
+ dsi->vm.flags |= DISPLAY_FLAGS_VSYNC_HIGH;
+ dsi->vm.flags &= ~DISPLAY_FLAGS_PIXDATA_NEGEDGE;
+ dsi->vm.flags |= DISPLAY_FLAGS_PIXDATA_POSEDGE;
+ dsi->vm.flags &= ~DISPLAY_FLAGS_DE_LOW;
+ dsi->vm.flags |= DISPLAY_FLAGS_DE_HIGH;
+ dsi->vm.flags &= ~DISPLAY_FLAGS_SYNC_POSEDGE;
+ dsi->vm.flags |= DISPLAY_FLAGS_SYNC_NEGEDGE;
+
+ dss_mgr_set_timings(channel, &dsi->vm);
r = dsi_configure_dispc_clocks(dsidev);
if (r)
@@ -4331,7 +4336,7 @@ static void print_dsi_vm(const char *str,
wc = DIV_ROUND_UP(t->hact * t->bitspp, 8);
pps = DIV_ROUND_UP(wc + 6, t->ndl); /* pixel packet size */
- bl = t->hss + t->hsa + t->hse + t->hbp + t->hfp;
+ bl = t->hss + t->hsa + t->hse + t->hbp + t->hfront_porch;
tot = bl + pps;
#define TO_DSI_T(x) ((u32)div64_u64((u64)x * 1000000000llu, byteclk))
@@ -4340,14 +4345,14 @@ static void print_dsi_vm(const char *str,
"%u/%u/%u/%u/%u/%u = %u + %u = %u\n",
str,
byteclk,
- t->hss, t->hsa, t->hse, t->hbp, pps, t->hfp,
+ t->hss, t->hsa, t->hse, t->hbp, pps, t->hfront_porch,
bl, pps, tot,
TO_DSI_T(t->hss),
TO_DSI_T(t->hsa),
TO_DSI_T(t->hse),
TO_DSI_T(t->hbp),
TO_DSI_T(pps),
- TO_DSI_T(t->hfp),
+ TO_DSI_T(t->hfront_porch),
TO_DSI_T(bl),
TO_DSI_T(pps),
@@ -4356,13 +4361,13 @@ static void print_dsi_vm(const char *str,
#undef TO_DSI_T
}
-static void print_dispc_vm(const char *str, const struct omap_video_timings *t)
+static void print_dispc_vm(const char *str, const struct videomode *vm)
{
- unsigned long pck = t->pixelclock;
+ unsigned long pck = vm->pixelclock;
int hact, bl, tot;
- hact = t->x_res;
- bl = t->hsw + t->hbp + t->hfp;
+ hact = vm->hactive;
+ bl = vm->hsync_len + vm->hbp + vm->hfront_porch;
tot = hact + bl;
#define TO_DISPC_T(x) ((u32)div64_u64((u64)x * 1000000000llu, pck))
@@ -4371,12 +4376,12 @@ static void print_dispc_vm(const char *str, const struct omap_video_timings *t)
"%u/%u/%u/%u = %u + %u = %u\n",
str,
pck,
- t->hsw, t->hbp, hact, t->hfp,
+ vm->hsync_len, vm->hbp, hact, vm->hfront_porch,
bl, hact, tot,
- TO_DISPC_T(t->hsw),
- TO_DISPC_T(t->hbp),
+ TO_DISPC_T(vm->hsync_len),
+ TO_DISPC_T(vm->hbp),
TO_DISPC_T(hact),
- TO_DISPC_T(t->hfp),
+ TO_DISPC_T(vm->hfront_porch),
TO_DISPC_T(bl),
TO_DISPC_T(hact),
TO_DISPC_T(tot));
@@ -4387,7 +4392,7 @@ static void print_dispc_vm(const char *str, const struct omap_video_timings *t)
static void print_dsi_dispc_vm(const char *str,
const struct omap_dss_dsi_videomode_timings *t)
{
- struct omap_video_timings vm = { 0 };
+ struct videomode vm = { 0 };
unsigned long byteclk = t->hsclk / 4;
unsigned long pck;
u64 dsi_tput;
@@ -4396,13 +4401,13 @@ static void print_dsi_dispc_vm(const char *str,
dsi_tput = (u64)byteclk * t->ndl * 8;
pck = (u32)div64_u64(dsi_tput, t->bitspp);
dsi_hact = DIV_ROUND_UP(DIV_ROUND_UP(t->hact * t->bitspp, 8) + 6, t->ndl);
- dsi_htot = t->hss + t->hsa + t->hse + t->hbp + dsi_hact + t->hfp;
+ dsi_htot = t->hss + t->hsa + t->hse + t->hbp + dsi_hact + t->hfront_porch;
vm.pixelclock = pck;
- vm.hsw = div64_u64((u64)(t->hsa + t->hse) * pck, byteclk);
+ vm.hsync_len = div64_u64((u64)(t->hsa + t->hse) * pck, byteclk);
vm.hbp = div64_u64((u64)t->hbp * pck, byteclk);
- vm.hfp = div64_u64((u64)t->hfp * pck, byteclk);
- vm.x_res = t->hact;
+ vm.hfront_porch = div64_u64((u64)t->hfront_porch * pck, byteclk);
+ vm.hactive = t->hact;
print_dispc_vm(str, &vm);
}
@@ -4412,19 +4417,19 @@ static bool dsi_cm_calc_dispc_cb(int lckd, int pckd, unsigned long lck,
unsigned long pck, void *data)
{
struct dsi_clk_calc_ctx *ctx = data;
- struct omap_video_timings *t = &ctx->dispc_vm;
+ struct videomode *vm = &ctx->vm;
ctx->dispc_cinfo.lck_div = lckd;
ctx->dispc_cinfo.pck_div = pckd;
ctx->dispc_cinfo.lck = lck;
ctx->dispc_cinfo.pck = pck;
- *t = *ctx->config->timings;
- t->pixelclock = pck;
- t->x_res = ctx->config->timings->x_res;
- t->y_res = ctx->config->timings->y_res;
- t->hsw = t->hfp = t->hbp = t->vsw = 1;
- t->vfp = t->vbp = 0;
+ *vm = *ctx->config->vm;
+ vm->pixelclock = pck;
+ vm->hactive = ctx->config->vm->hactive;
+ vm->vactive = ctx->config->vm->vactive;
+ vm->hsync_len = vm->hfront_porch = vm->hback_porch = vm->vsync_len = 1;
+ vm->vfront_porch = vm->vback_porch = 0;
return true;
}
@@ -4475,7 +4480,7 @@ static bool dsi_cm_calc(struct dsi_data *dsi,
* especially as we go to LP between each pixel packet due to HW
* "feature". So let's just estimate very roughly and multiply by 1.5.
*/
- pck = cfg->timings->pixelclock;
+ pck = cfg->vm->pixelclock;
pck = pck * 3 / 2;
txbyteclk = pck * bitspp / 8 / ndl;
@@ -4510,14 +4515,14 @@ static bool dsi_vm_calc_blanking(struct dsi_clk_calc_ctx *ctx)
int dispc_htot, dispc_hbl; /* pixels */
int dsi_htot, dsi_hact, dsi_hbl, hss, hse; /* byteclks */
int hfp, hsa, hbp;
- const struct omap_video_timings *req_vm;
- struct omap_video_timings *dispc_vm;
+ const struct videomode *req_vm;
+ struct videomode *dispc_vm;
struct omap_dss_dsi_videomode_timings *dsi_vm;
u64 dsi_tput, dispc_tput;
dsi_tput = (u64)byteclk * ndl * 8;
- req_vm = cfg->timings;
+ req_vm = cfg->vm;
req_pck_min = ctx->req_pck_min;
req_pck_max = ctx->req_pck_max;
req_pck_nom = ctx->req_pck_nom;
@@ -4525,9 +4530,10 @@ static bool dsi_vm_calc_blanking(struct dsi_clk_calc_ctx *ctx)
dispc_pck = ctx->dispc_cinfo.pck;
dispc_tput = (u64)dispc_pck * bitspp;
- xres = req_vm->x_res;
+ xres = req_vm->hactive;
- panel_hbl = req_vm->hfp + req_vm->hbp + req_vm->hsw;
+ panel_hbl = req_vm->hfront_porch + req_vm->hback_porch +
+ req_vm->hsync_len;
panel_htot = xres + panel_hbl;
dsi_hact = DIV_ROUND_UP(DIV_ROUND_UP(xres * bitspp, 8) + 6, ndl);
@@ -4557,7 +4563,7 @@ static bool dsi_vm_calc_blanking(struct dsi_clk_calc_ctx *ctx)
hss = DIV_ROUND_UP(4, ndl);
if (cfg->trans_mode == OMAP_DSS_DSI_PULSE_MODE) {
- if (ndl == 3 && req_vm->hsw == 0)
+ if (ndl == 3 && req_vm->hsync_len == 0)
hse = 1;
else
hse = DIV_ROUND_UP(4, ndl);
@@ -4596,14 +4602,14 @@ static bool dsi_vm_calc_blanking(struct dsi_clk_calc_ctx *ctx)
if (cfg->trans_mode != OMAP_DSS_DSI_PULSE_MODE) {
hsa = 0;
- } else if (ndl == 3 && req_vm->hsw == 0) {
+ } else if (ndl == 3 && req_vm->hsync_len == 0) {
hsa = 0;
} else {
- hsa = div64_u64((u64)req_vm->hsw * byteclk, req_pck_nom);
+ hsa = div64_u64((u64)req_vm->hsync_len * byteclk, req_pck_nom);
hsa = max(hsa - hse, 1);
}
- hbp = div64_u64((u64)req_vm->hbp * byteclk, req_pck_nom);
+ hbp = div64_u64((u64)req_vm->hback_porch * byteclk, req_pck_nom);
hbp = max(hbp, 1);
hfp = dsi_hbl - (hss + hsa + hse + hbp);
@@ -4633,10 +4639,10 @@ static bool dsi_vm_calc_blanking(struct dsi_clk_calc_ctx *ctx)
dsi_vm->hact = xres;
dsi_vm->hfp = hfp;
- dsi_vm->vsa = req_vm->vsw;
- dsi_vm->vbp = req_vm->vbp;
- dsi_vm->vact = req_vm->y_res;
- dsi_vm->vfp = req_vm->vfp;
+ dsi_vm->vsa = req_vm->vsync_len;
+ dsi_vm->vbp = req_vm->vback_porch;
+ dsi_vm->vact = req_vm->vactive;
+ dsi_vm->vfp = req_vm->vfront_porch;
dsi_vm->trans_mode = cfg->trans_mode;
@@ -4650,19 +4656,19 @@ static bool dsi_vm_calc_blanking(struct dsi_clk_calc_ctx *ctx)
/* setup DISPC videomode */
- dispc_vm = &ctx->dispc_vm;
+ dispc_vm = &ctx->vm;
*dispc_vm = *req_vm;
dispc_vm->pixelclock = dispc_pck;
if (cfg->trans_mode == OMAP_DSS_DSI_PULSE_MODE) {
- hsa = div64_u64((u64)req_vm->hsw * dispc_pck,
+ hsa = div64_u64((u64)req_vm->hsync_len * dispc_pck,
req_pck_nom);
hsa = max(hsa, 1);
} else {
hsa = 1;
}
- hbp = div64_u64((u64)req_vm->hbp * dispc_pck, req_pck_nom);
+ hbp = div64_u64((u64)req_vm->hback_porch * dispc_pck, req_pck_nom);
hbp = max(hbp, 1);
hfp = dispc_hbl - hsa - hbp;
@@ -4685,9 +4691,9 @@ static bool dsi_vm_calc_blanking(struct dsi_clk_calc_ctx *ctx)
if (hfp < 1)
return false;
- dispc_vm->hfp = hfp;
- dispc_vm->hsw = hsa;
- dispc_vm->hbp = hbp;
+ dispc_vm->hfront_porch = hfp;
+ dispc_vm->hsync_len = hsa;
+ dispc_vm->hback_porch = hbp;
return true;
}
@@ -4707,9 +4713,9 @@ static bool dsi_vm_calc_dispc_cb(int lckd, int pckd, unsigned long lck,
return false;
#ifdef PRINT_VERBOSE_VM_TIMINGS
- print_dispc_vm("dispc", &ctx->dispc_vm);
+ print_dispc_vm("dispc", &ctx->vm);
print_dsi_vm("dsi ", &ctx->dsi_vm);
- print_dispc_vm("req ", ctx->config->timings);
+ print_dispc_vm("req ", ctx->config->vm);
print_dsi_dispc_vm("act ", &ctx->dsi_vm);
#endif
@@ -4758,7 +4764,7 @@ static bool dsi_vm_calc(struct dsi_data *dsi,
const struct omap_dss_dsi_config *cfg,
struct dsi_clk_calc_ctx *ctx)
{
- const struct omap_video_timings *t = cfg->timings;
+ const struct videomode *vm = cfg->vm;
unsigned long clkin;
unsigned long pll_min;
unsigned long pll_max;
@@ -4774,9 +4780,9 @@ static bool dsi_vm_calc(struct dsi_data *dsi,
ctx->config = cfg;
/* these limits should come from the panel driver */
- ctx->req_pck_min = t->pixelclock - 1000;
- ctx->req_pck_nom = t->pixelclock;
- ctx->req_pck_max = t->pixelclock + 1000;
+ ctx->req_pck_min = vm->pixelclock - 1000;
+ ctx->req_pck_nom = vm->pixelclock;
+ ctx->req_pck_max = vm->pixelclock + 1000;
byteclk_min = div64_u64((u64)ctx->req_pck_min * bitspp, ndl * 8);
pll_min = max(cfg->hs_clk_min * 4, byteclk_min * 4 * 4);
@@ -4833,7 +4839,7 @@ static int dsi_set_config(struct omap_dss_device *dssdev,
dsi->user_dsi_cinfo = ctx.dsi_cinfo;
dsi->user_dispc_cinfo = ctx.dispc_cinfo;
- dsi->timings = ctx.dispc_vm;
+ dsi->vm = ctx.vm;
dsi->vm_timings = ctx.dsi_vm;
mutex_unlock(&dsi->lock);
@@ -5342,7 +5348,7 @@ static int dsi_bind(struct device *dev, struct device *master, void *data)
dsi->phy_base = devm_ioremap(&dsidev->dev, res->start,
resource_size(res));
- if (!dsi->proto_base) {
+ if (!dsi->phy_base) {
DSSERR("can't ioremap DSI PHY\n");
return -ENOMEM;
}
@@ -5362,7 +5368,7 @@ static int dsi_bind(struct device *dev, struct device *master, void *data)
dsi->pll_base = devm_ioremap(&dsidev->dev, res->start,
resource_size(res));
- if (!dsi->proto_base) {
+ if (!dsi->pll_base) {
DSSERR("can't ioremap DSI PLL\n");
return -ENOMEM;
}
diff --git a/drivers/gpu/drm/omapdrm/dss/dss.h b/drivers/gpu/drm/omapdrm/dss/dss.h
index 4fd06dc41cb3..56493b290731 100644
--- a/drivers/gpu/drm/omapdrm/dss/dss.h
+++ b/drivers/gpu/drm/omapdrm/dss/dss.h
@@ -366,8 +366,7 @@ bool dispc_div_calc(unsigned long dispc,
unsigned long pck_min, unsigned long pck_max,
dispc_div_calc_func func, void *data);
-bool dispc_mgr_timings_ok(enum omap_channel channel,
- const struct omap_video_timings *timings);
+bool dispc_mgr_timings_ok(enum omap_channel channel, const struct videomode *vm);
int dispc_calc_clock_rates(unsigned long dispc_fclk_rate,
struct dispc_clock_info *cinfo);
@@ -390,7 +389,7 @@ void dispc_wb_enable(bool enable);
bool dispc_wb_is_enabled(void);
void dispc_wb_set_channel_in(enum dss_writeback_channel channel);
int dispc_wb_setup(const struct omap_dss_writeback_info *wi,
- bool mem_to_mem, const struct omap_video_timings *timings);
+ bool mem_to_mem, const struct videomode *vm);
/* VENC */
int venc_init_platform_driver(void) __init;
diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi.h b/drivers/gpu/drm/omapdrm/dss/hdmi.h
index 63e711545865..fb6cccd02374 100644
--- a/drivers/gpu/drm/omapdrm/dss/hdmi.h
+++ b/drivers/gpu/drm/omapdrm/dss/hdmi.h
@@ -181,7 +181,7 @@ struct hdmi_video_format {
};
struct hdmi_config {
- struct omap_video_timings timings;
+ struct videomode vm;
struct hdmi_avi_infoframe infoframe;
enum hdmi_core_hdmi_dvi hdmi_dvi_mode;
};
@@ -298,11 +298,11 @@ int hdmi_wp_set_pll_pwr(struct hdmi_wp_data *wp, enum hdmi_pll_pwr val);
void hdmi_wp_video_config_format(struct hdmi_wp_data *wp,
struct hdmi_video_format *video_fmt);
void hdmi_wp_video_config_interface(struct hdmi_wp_data *wp,
- struct omap_video_timings *timings);
+ struct videomode *vm);
void hdmi_wp_video_config_timing(struct hdmi_wp_data *wp,
- struct omap_video_timings *timings);
+ struct videomode *vm);
void hdmi_wp_init_vid_fmt_timings(struct hdmi_video_format *video_fmt,
- struct omap_video_timings *timings, struct hdmi_config *param);
+ struct videomode *vm, struct hdmi_config *param);
int hdmi_wp_init(struct platform_device *pdev, struct hdmi_wp_data *wp);
phys_addr_t hdmi_wp_get_audio_dma_addr(struct hdmi_wp_data *wp);
diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi4.c b/drivers/gpu/drm/omapdrm/dss/hdmi4.c
index cbd28dfdb86a..e7162c16de2e 100644
--- a/drivers/gpu/drm/omapdrm/dss/hdmi4.c
+++ b/drivers/gpu/drm/omapdrm/dss/hdmi4.c
@@ -155,7 +155,7 @@ static void hdmi_power_off_core(struct omap_dss_device *dssdev)
static int hdmi_power_on_full(struct omap_dss_device *dssdev)
{
int r;
- struct omap_video_timings *p;
+ struct videomode *vm;
enum omap_channel channel = dssdev->dispc_channel;
struct hdmi_wp_data *wp = &hdmi.wp;
struct dss_pll_clock_info hdmi_cinfo = { 0 };
@@ -169,12 +169,13 @@ static int hdmi_power_on_full(struct omap_dss_device *dssdev)
hdmi_wp_clear_irqenable(wp, 0xffffffff);
hdmi_wp_set_irqstatus(wp, 0xffffffff);
- p = &hdmi.cfg.timings;
+ vm = &hdmi.cfg.vm;
- DSSDBG("hdmi_power_on x_res= %d y_res = %d\n", p->x_res, p->y_res);
+ DSSDBG("hdmi_power_on hactive= %d vactive = %d\n", vm->hactive,
+ vm->vactive);
- pc = p->pixelclock;
- if (p->double_pixel)
+ pc = vm->pixelclock;
+ if (vm->flags & DISPLAY_FLAGS_DOUBLECLK)
pc *= 2;
/* DSS_HDMI_TCLK is bitclk / 10 */
@@ -209,7 +210,7 @@ static int hdmi_power_on_full(struct omap_dss_device *dssdev)
hdmi4_configure(&hdmi.core, &hdmi.wp, &hdmi.cfg);
/* tv size */
- dss_mgr_set_timings(channel, p);
+ dss_mgr_set_timings(channel, vm);
r = dss_mgr_enable(channel);
if (r)
@@ -255,30 +256,30 @@ static void hdmi_power_off_full(struct omap_dss_device *dssdev)
}
static int hdmi_display_check_timing(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings)
+ struct videomode *vm)
{
- if (!dispc_mgr_timings_ok(dssdev->dispc_channel, timings))
+ if (!dispc_mgr_timings_ok(dssdev->dispc_channel, vm))
return -EINVAL;
return 0;
}
static void hdmi_display_set_timing(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings)
+ struct videomode *vm)
{
mutex_lock(&hdmi.lock);
- hdmi.cfg.timings = *timings;
+ hdmi.cfg.vm = *vm;
- dispc_set_tv_pclk(timings->pixelclock);
+ dispc_set_tv_pclk(vm->pixelclock);
mutex_unlock(&hdmi.lock);
}
static void hdmi_display_get_timings(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings)
+ struct videomode *vm)
{
- *timings = hdmi.cfg.timings;
+ *vm = hdmi.cfg.vm;
}
static void hdmi_dump_regs(struct seq_file *s)
@@ -352,7 +353,7 @@ static int hdmi_display_enable(struct omap_dss_device *dssdev)
if (hdmi.audio_configured) {
r = hdmi4_audio_config(&hdmi.core, &hdmi.wp, &hdmi.audio_config,
- hdmi.cfg.timings.pixelclock);
+ hdmi.cfg.vm.pixelclock);
if (r) {
DSSERR("Error restoring audio configuration: %d", r);
hdmi.audio_abort_cb(&hdmi.pdev->dev);
@@ -643,7 +644,7 @@ static int hdmi_audio_config(struct device *dev,
}
ret = hdmi4_audio_config(&hd->core, &hd->wp, dss_audio,
- hd->cfg.timings.pixelclock);
+ hd->cfg.vm.pixelclock);
if (!ret) {
hd->audio_configured = true;
hd->audio_config = *dss_audio;
diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi4_core.c b/drivers/gpu/drm/omapdrm/dss/hdmi4_core.c
index ef3afe99e487..e05b7ac4f7dd 100644
--- a/drivers/gpu/drm/omapdrm/dss/hdmi4_core.c
+++ b/drivers/gpu/drm/omapdrm/dss/hdmi4_core.c
@@ -310,7 +310,7 @@ void hdmi4_configure(struct hdmi_core_data *core,
struct hdmi_wp_data *wp, struct hdmi_config *cfg)
{
/* HDMI */
- struct omap_video_timings video_timing;
+ struct videomode vm;
struct hdmi_video_format video_format;
/* HDMI core */
struct hdmi_core_video_config v_core_cfg;
@@ -318,16 +318,16 @@ void hdmi4_configure(struct hdmi_core_data *core,
hdmi_core_init(&v_core_cfg);
- hdmi_wp_init_vid_fmt_timings(&video_format, &video_timing, cfg);
+ hdmi_wp_init_vid_fmt_timings(&video_format, &vm, cfg);
- hdmi_wp_video_config_timing(wp, &video_timing);
+ hdmi_wp_video_config_timing(wp, &vm);
/* video config */
video_format.packing_mode = HDMI_PACK_24b_RGB_YUV444_YUV422;
hdmi_wp_video_config_format(wp, &video_format);
- hdmi_wp_video_config_interface(wp, &video_timing);
+ hdmi_wp_video_config_interface(wp, &vm);
/*
* configure core video part
diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi5.c b/drivers/gpu/drm/omapdrm/dss/hdmi5.c
index 0c0a5139a301..678dfb02764a 100644
--- a/drivers/gpu/drm/omapdrm/dss/hdmi5.c
+++ b/drivers/gpu/drm/omapdrm/dss/hdmi5.c
@@ -172,7 +172,7 @@ static void hdmi_power_off_core(struct omap_dss_device *dssdev)
static int hdmi_power_on_full(struct omap_dss_device *dssdev)
{
int r;
- struct omap_video_timings *p;
+ struct videomode *vm;
enum omap_channel channel = dssdev->dispc_channel;
struct dss_pll_clock_info hdmi_cinfo = { 0 };
unsigned pc;
@@ -181,12 +181,13 @@ static int hdmi_power_on_full(struct omap_dss_device *dssdev)
if (r)
return r;
- p = &hdmi.cfg.timings;
+ vm = &hdmi.cfg.vm;
- DSSDBG("hdmi_power_on x_res= %d y_res = %d\n", p->x_res, p->y_res);
+ DSSDBG("hdmi_power_on hactive= %d vactive = %d\n", vm->hactive,
+ vm->vactive);
- pc = p->pixelclock;
- if (p->double_pixel)
+ pc = vm->pixelclock;
+ if (vm->flags & DISPLAY_FLAGS_DOUBLECLK)
pc *= 2;
/* DSS_HDMI_TCLK is bitclk / 10 */
@@ -226,7 +227,7 @@ static int hdmi_power_on_full(struct omap_dss_device *dssdev)
hdmi5_configure(&hdmi.core, &hdmi.wp, &hdmi.cfg);
/* tv size */
- dss_mgr_set_timings(channel, p);
+ dss_mgr_set_timings(channel, vm);
r = dss_mgr_enable(channel);
if (r)
@@ -272,30 +273,30 @@ static void hdmi_power_off_full(struct omap_dss_device *dssdev)
}
static int hdmi_display_check_timing(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings)
+ struct videomode *vm)
{
- if (!dispc_mgr_timings_ok(dssdev->dispc_channel, timings))
+ if (!dispc_mgr_timings_ok(dssdev->dispc_channel, vm))
return -EINVAL;
return 0;
}
static void hdmi_display_set_timing(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings)
+ struct videomode *vm)
{
mutex_lock(&hdmi.lock);
- hdmi.cfg.timings = *timings;
+ hdmi.cfg.vm = *vm;
- dispc_set_tv_pclk(timings->pixelclock);
+ dispc_set_tv_pclk(vm->pixelclock);
mutex_unlock(&hdmi.lock);
}
static void hdmi_display_get_timings(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings)
+ struct videomode *vm)
{
- *timings = hdmi.cfg.timings;
+ *vm = hdmi.cfg.vm;
}
static void hdmi_dump_regs(struct seq_file *s)
@@ -378,7 +379,7 @@ static int hdmi_display_enable(struct omap_dss_device *dssdev)
if (hdmi.audio_configured) {
r = hdmi5_audio_config(&hdmi.core, &hdmi.wp, &hdmi.audio_config,
- hdmi.cfg.timings.pixelclock);
+ hdmi.cfg.vm.pixelclock);
if (r) {
DSSERR("Error restoring audio configuration: %d", r);
hdmi.audio_abort_cb(&hdmi.pdev->dev);
@@ -669,7 +670,7 @@ static int hdmi_audio_config(struct device *dev,
}
ret = hdmi5_audio_config(&hd->core, &hd->wp, dss_audio,
- hd->cfg.timings.pixelclock);
+ hd->cfg.vm.pixelclock);
if (!ret) {
hd->audio_configured = true;
diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi5_core.c b/drivers/gpu/drm/omapdrm/dss/hdmi5_core.c
index 8ab2093daa12..8de1d7b2ae55 100644
--- a/drivers/gpu/drm/omapdrm/dss/hdmi5_core.c
+++ b/drivers/gpu/drm/omapdrm/dss/hdmi5_core.c
@@ -292,35 +292,35 @@ static void hdmi_core_init(struct hdmi_core_vid_config *video_cfg,
{
DSSDBG("hdmi_core_init\n");
- video_cfg->v_fc_config.timings = cfg->timings;
+ video_cfg->v_fc_config.vm = cfg->vm;
/* video core */
video_cfg->data_enable_pol = 1; /* It is always 1*/
- video_cfg->hblank = cfg->timings.hfp +
- cfg->timings.hbp + cfg->timings.hsw;
+ video_cfg->hblank = cfg->vm.hfront_porch +
+ cfg->vm.hback_porch + cfg->vm.hsync_len;
video_cfg->vblank_osc = 0;
- video_cfg->vblank = cfg->timings.vsw +
- cfg->timings.vfp + cfg->timings.vbp;
+ video_cfg->vblank = cfg->vm.vsync_len + cfg->vm.vfront_porch +
+ cfg->vm.vback_porch;
video_cfg->v_fc_config.hdmi_dvi_mode = cfg->hdmi_dvi_mode;
- if (cfg->timings.interlace) {
+ if (cfg->vm.flags & DISPLAY_FLAGS_INTERLACED) {
/* set vblank_osc if vblank is fractional */
if (video_cfg->vblank % 2 != 0)
video_cfg->vblank_osc = 1;
- video_cfg->v_fc_config.timings.y_res /= 2;
+ video_cfg->v_fc_config.vm.vactive /= 2;
video_cfg->vblank /= 2;
- video_cfg->v_fc_config.timings.vfp /= 2;
- video_cfg->v_fc_config.timings.vsw /= 2;
- video_cfg->v_fc_config.timings.vbp /= 2;
+ video_cfg->v_fc_config.vm.vfront_porch /= 2;
+ video_cfg->v_fc_config.vm.vsync_len /= 2;
+ video_cfg->v_fc_config.vm.vback_porch /= 2;
}
- if (cfg->timings.double_pixel) {
- video_cfg->v_fc_config.timings.x_res *= 2;
+ if (cfg->vm.flags & DISPLAY_FLAGS_DOUBLECLK) {
+ video_cfg->v_fc_config.vm.hactive *= 2;
video_cfg->hblank *= 2;
- video_cfg->v_fc_config.timings.hfp *= 2;
- video_cfg->v_fc_config.timings.hsw *= 2;
- video_cfg->v_fc_config.timings.hbp *= 2;
+ video_cfg->v_fc_config.vm.hfront_porch *= 2;
+ video_cfg->v_fc_config.vm.hsync_len *= 2;
+ video_cfg->v_fc_config.vm.hback_porch *= 2;
}
}
@@ -329,13 +329,12 @@ static void hdmi_core_video_config(struct hdmi_core_data *core,
struct hdmi_core_vid_config *cfg)
{
void __iomem *base = core->base;
+ struct videomode *vm = &cfg->v_fc_config.vm;
unsigned char r = 0;
bool vsync_pol, hsync_pol;
- vsync_pol =
- cfg->v_fc_config.timings.vsync_level == OMAPDSS_SIG_ACTIVE_HIGH;
- hsync_pol =
- cfg->v_fc_config.timings.hsync_level == OMAPDSS_SIG_ACTIVE_HIGH;
+ vsync_pol = !!(vm->flags & DISPLAY_FLAGS_VSYNC_HIGH);
+ hsync_pol = !!(vm->flags & DISPLAY_FLAGS_HSYNC_HIGH);
/* Set hsync, vsync and data-enable polarity */
r = hdmi_read_reg(base, HDMI_CORE_FC_INVIDCONF);
@@ -343,20 +342,16 @@ static void hdmi_core_video_config(struct hdmi_core_data *core,
r = FLD_MOD(r, hsync_pol, 5, 5);
r = FLD_MOD(r, cfg->data_enable_pol, 4, 4);
r = FLD_MOD(r, cfg->vblank_osc, 1, 1);
- r = FLD_MOD(r, cfg->v_fc_config.timings.interlace, 0, 0);
+ r = FLD_MOD(r, !!(vm->flags & DISPLAY_FLAGS_INTERLACED), 0, 0);
hdmi_write_reg(base, HDMI_CORE_FC_INVIDCONF, r);
/* set x resolution */
- REG_FLD_MOD(base, HDMI_CORE_FC_INHACTIV1,
- cfg->v_fc_config.timings.x_res >> 8, 4, 0);
- REG_FLD_MOD(base, HDMI_CORE_FC_INHACTIV0,
- cfg->v_fc_config.timings.x_res & 0xFF, 7, 0);
+ REG_FLD_MOD(base, HDMI_CORE_FC_INHACTIV1, vm->hactive >> 8, 4, 0);
+ REG_FLD_MOD(base, HDMI_CORE_FC_INHACTIV0, vm->hactive & 0xFF, 7, 0);
/* set y resolution */
- REG_FLD_MOD(base, HDMI_CORE_FC_INVACTIV1,
- cfg->v_fc_config.timings.y_res >> 8, 4, 0);
- REG_FLD_MOD(base, HDMI_CORE_FC_INVACTIV0,
- cfg->v_fc_config.timings.y_res & 0xFF, 7, 0);
+ REG_FLD_MOD(base, HDMI_CORE_FC_INVACTIV1, vm->vactive >> 8, 4, 0);
+ REG_FLD_MOD(base, HDMI_CORE_FC_INVACTIV0, vm->vactive & 0xFF, 7, 0);
/* set horizontal blanking pixels */
REG_FLD_MOD(base, HDMI_CORE_FC_INHBLANK1, cfg->hblank >> 8, 4, 0);
@@ -366,30 +361,28 @@ static void hdmi_core_video_config(struct hdmi_core_data *core,
REG_FLD_MOD(base, HDMI_CORE_FC_INVBLANK, cfg->vblank, 7, 0);
/* set horizontal sync offset */
- REG_FLD_MOD(base, HDMI_CORE_FC_HSYNCINDELAY1,
- cfg->v_fc_config.timings.hfp >> 8, 4, 0);
- REG_FLD_MOD(base, HDMI_CORE_FC_HSYNCINDELAY0,
- cfg->v_fc_config.timings.hfp & 0xFF, 7, 0);
+ REG_FLD_MOD(base, HDMI_CORE_FC_HSYNCINDELAY1, vm->hfront_porch >> 8,
+ 4, 0);
+ REG_FLD_MOD(base, HDMI_CORE_FC_HSYNCINDELAY0, vm->hfront_porch & 0xFF,
+ 7, 0);
/* set vertical sync offset */
- REG_FLD_MOD(base, HDMI_CORE_FC_VSYNCINDELAY,
- cfg->v_fc_config.timings.vfp, 7, 0);
+ REG_FLD_MOD(base, HDMI_CORE_FC_VSYNCINDELAY, vm->vfront_porch, 7, 0);
/* set horizontal sync pulse width */
- REG_FLD_MOD(base, HDMI_CORE_FC_HSYNCINWIDTH1,
- (cfg->v_fc_config.timings.hsw >> 8), 1, 0);
- REG_FLD_MOD(base, HDMI_CORE_FC_HSYNCINWIDTH0,
- cfg->v_fc_config.timings.hsw & 0xFF, 7, 0);
+ REG_FLD_MOD(base, HDMI_CORE_FC_HSYNCINWIDTH1, (vm->hsync_len >> 8),
+ 1, 0);
+ REG_FLD_MOD(base, HDMI_CORE_FC_HSYNCINWIDTH0, vm->hsync_len & 0xFF,
+ 7, 0);
/* set vertical sync pulse width */
- REG_FLD_MOD(base, HDMI_CORE_FC_VSYNCINWIDTH,
- cfg->v_fc_config.timings.vsw, 5, 0);
+ REG_FLD_MOD(base, HDMI_CORE_FC_VSYNCINWIDTH, vm->vsync_len, 5, 0);
/* select DVI mode */
REG_FLD_MOD(base, HDMI_CORE_FC_INVIDCONF,
- cfg->v_fc_config.hdmi_dvi_mode, 3, 3);
+ cfg->v_fc_config.hdmi_dvi_mode, 3, 3);
- if (cfg->v_fc_config.timings.double_pixel)
+ if (vm->flags & DISPLAY_FLAGS_DOUBLECLK)
REG_FLD_MOD(base, HDMI_CORE_FC_PRCONF, 2, 7, 4);
else
REG_FLD_MOD(base, HDMI_CORE_FC_PRCONF, 1, 7, 4);
@@ -616,7 +609,7 @@ int hdmi5_core_handle_irqs(struct hdmi_core_data *core)
void hdmi5_configure(struct hdmi_core_data *core, struct hdmi_wp_data *wp,
struct hdmi_config *cfg)
{
- struct omap_video_timings video_timing;
+ struct videomode vm;
struct hdmi_video_format video_format;
struct hdmi_core_vid_config v_core_cfg;
@@ -624,16 +617,16 @@ void hdmi5_configure(struct hdmi_core_data *core, struct hdmi_wp_data *wp,
hdmi_core_init(&v_core_cfg, cfg);
- hdmi_wp_init_vid_fmt_timings(&video_format, &video_timing, cfg);
+ hdmi_wp_init_vid_fmt_timings(&video_format, &vm, cfg);
- hdmi_wp_video_config_timing(wp, &video_timing);
+ hdmi_wp_video_config_timing(wp, &vm);
/* video config */
video_format.packing_mode = HDMI_PACK_24b_RGB_YUV444_YUV422;
hdmi_wp_video_config_format(wp, &video_format);
- hdmi_wp_video_config_interface(wp, &video_timing);
+ hdmi_wp_video_config_interface(wp, &vm);
/* support limited range with 24 bit color depth for now */
hdmi_core_configure_range(core);
diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi_wp.c b/drivers/gpu/drm/omapdrm/dss/hdmi_wp.c
index 203694a52d18..b783d5a0750e 100644
--- a/drivers/gpu/drm/omapdrm/dss/hdmi_wp.c
+++ b/drivers/gpu/drm/omapdrm/dss/hdmi_wp.c
@@ -144,87 +144,84 @@ void hdmi_wp_video_config_format(struct hdmi_wp_data *wp,
}
void hdmi_wp_video_config_interface(struct hdmi_wp_data *wp,
- struct omap_video_timings *timings)
+ struct videomode *vm)
{
u32 r;
bool vsync_pol, hsync_pol;
DSSDBG("Enter hdmi_wp_video_config_interface\n");
- vsync_pol = timings->vsync_level == OMAPDSS_SIG_ACTIVE_HIGH;
- hsync_pol = timings->hsync_level == OMAPDSS_SIG_ACTIVE_HIGH;
+ vsync_pol = !!(vm->flags & DISPLAY_FLAGS_VSYNC_HIGH);
+ hsync_pol = !!(vm->flags & DISPLAY_FLAGS_HSYNC_HIGH);
r = hdmi_read_reg(wp->base, HDMI_WP_VIDEO_CFG);
r = FLD_MOD(r, vsync_pol, 7, 7);
r = FLD_MOD(r, hsync_pol, 6, 6);
- r = FLD_MOD(r, timings->interlace, 3, 3);
+ r = FLD_MOD(r, !!(vm->flags & DISPLAY_FLAGS_INTERLACED), 3, 3);
r = FLD_MOD(r, 1, 1, 0); /* HDMI_TIMING_MASTER_24BIT */
hdmi_write_reg(wp->base, HDMI_WP_VIDEO_CFG, r);
}
void hdmi_wp_video_config_timing(struct hdmi_wp_data *wp,
- struct omap_video_timings *timings)
+ struct videomode *vm)
{
u32 timing_h = 0;
u32 timing_v = 0;
- unsigned hsw_offset = 1;
+ unsigned hsync_len_offset = 1;
DSSDBG("Enter hdmi_wp_video_config_timing\n");
/*
* On OMAP4 and OMAP5 ES1 the HSW field is programmed as is. On OMAP5
- * ES2+ (including DRA7/AM5 SoCs) HSW field is programmed to hsw-1.
+ * ES2+ (including DRA7/AM5 SoCs) HSW field is programmed to hsync_len-1.
* However, we don't support OMAP5 ES1 at all, so we can just check for
* OMAP4 here.
*/
if (omapdss_get_version() == OMAPDSS_VER_OMAP4430_ES1 ||
omapdss_get_version() == OMAPDSS_VER_OMAP4430_ES2 ||
omapdss_get_version() == OMAPDSS_VER_OMAP4)
- hsw_offset = 0;
+ hsync_len_offset = 0;
- timing_h |= FLD_VAL(timings->hbp, 31, 20);
- timing_h |= FLD_VAL(timings->hfp, 19, 8);
- timing_h |= FLD_VAL(timings->hsw - hsw_offset, 7, 0);
+ timing_h |= FLD_VAL(vm->hback_porch, 31, 20);
+ timing_h |= FLD_VAL(vm->hfront_porch, 19, 8);
+ timing_h |= FLD_VAL(vm->hsync_len - hsync_len_offset, 7, 0);
hdmi_write_reg(wp->base, HDMI_WP_VIDEO_TIMING_H, timing_h);
- timing_v |= FLD_VAL(timings->vbp, 31, 20);
- timing_v |= FLD_VAL(timings->vfp, 19, 8);
- timing_v |= FLD_VAL(timings->vsw, 7, 0);
+ timing_v |= FLD_VAL(vm->vback_porch, 31, 20);
+ timing_v |= FLD_VAL(vm->vfront_porch, 19, 8);
+ timing_v |= FLD_VAL(vm->vsync_len, 7, 0);
hdmi_write_reg(wp->base, HDMI_WP_VIDEO_TIMING_V, timing_v);
}
void hdmi_wp_init_vid_fmt_timings(struct hdmi_video_format *video_fmt,
- struct omap_video_timings *timings, struct hdmi_config *param)
+ struct videomode *vm, struct hdmi_config *param)
{
DSSDBG("Enter hdmi_wp_video_init_format\n");
video_fmt->packing_mode = HDMI_PACK_10b_RGB_YUV444;
- video_fmt->y_res = param->timings.y_res;
- video_fmt->x_res = param->timings.x_res;
-
- timings->hbp = param->timings.hbp;
- timings->hfp = param->timings.hfp;
- timings->hsw = param->timings.hsw;
- timings->vbp = param->timings.vbp;
- timings->vfp = param->timings.vfp;
- timings->vsw = param->timings.vsw;
-
- timings->vsync_level = param->timings.vsync_level;
- timings->hsync_level = param->timings.hsync_level;
- timings->interlace = param->timings.interlace;
- timings->double_pixel = param->timings.double_pixel;
-
- if (param->timings.interlace) {
+ video_fmt->y_res = param->vm.vactive;
+ video_fmt->x_res = param->vm.hactive;
+
+ vm->hback_porch = param->vm.hback_porch;
+ vm->hfront_porch = param->vm.hfront_porch;
+ vm->hsync_len = param->vm.hsync_len;
+ vm->vback_porch = param->vm.vback_porch;
+ vm->vfront_porch = param->vm.vfront_porch;
+ vm->vsync_len = param->vm.vsync_len;
+
+ vm->flags = param->vm.flags;
+
+ if (param->vm.flags & DISPLAY_FLAGS_INTERLACED) {
video_fmt->y_res /= 2;
- timings->vbp /= 2;
- timings->vfp /= 2;
- timings->vsw /= 2;
+ vm->vback_porch /= 2;
+ vm->vfront_porch /= 2;
+ vm->vsync_len /= 2;
}
- if (param->timings.double_pixel) {
+ if (param->vm.flags & DISPLAY_FLAGS_DOUBLECLK) {
video_fmt->x_res *= 2;
- timings->hfp *= 2;
- timings->hsw *= 2;
- timings->hbp *= 2;
+ vm->hfront_porch *= 2;
+ vm->hsync_len *= 2;
+ vm->hback_porch *= 2;
}
}
diff --git a/drivers/gpu/drm/omapdrm/dss/omapdss.h b/drivers/gpu/drm/omapdrm/dss/omapdss.h
index 6eaf1adbd606..b420dde8c0fb 100644
--- a/drivers/gpu/drm/omapdrm/dss/omapdss.h
+++ b/drivers/gpu/drm/omapdrm/dss/omapdss.h
@@ -290,7 +290,7 @@ struct omap_dss_dsi_videomode_timings {
struct omap_dss_dsi_config {
enum omap_dss_dsi_mode mode;
enum omap_dss_dsi_pixel_format pixel_format;
- const struct omap_video_timings *timings;
+ const struct videomode *vm;
unsigned long hs_clk_min, hs_clk_max;
unsigned long lp_clk_min, lp_clk_max;
@@ -299,48 +299,12 @@ struct omap_dss_dsi_config {
enum omap_dss_dsi_trans_mode trans_mode;
};
-struct omap_video_timings {
- /* Unit: pixels */
- u16 x_res;
- /* Unit: pixels */
- u16 y_res;
- /* Unit: Hz */
- u32 pixelclock;
- /* Unit: pixel clocks */
- u16 hsw; /* Horizontal synchronization pulse width */
- /* Unit: pixel clocks */
- u16 hfp; /* Horizontal front porch */
- /* Unit: pixel clocks */
- u16 hbp; /* Horizontal back porch */
- /* Unit: line clocks */
- u16 vsw; /* Vertical synchronization pulse width */
- /* Unit: line clocks */
- u16 vfp; /* Vertical front porch */
- /* Unit: line clocks */
- u16 vbp; /* Vertical back porch */
-
- /* Vsync logic level */
- enum omap_dss_signal_level vsync_level;
- /* Hsync logic level */
- enum omap_dss_signal_level hsync_level;
- /* Interlaced or Progressive timings */
- bool interlace;
- /* Pixel clock edge to drive LCD data */
- enum omap_dss_signal_edge data_pclk_edge;
- /* Data enable logic level */
- enum omap_dss_signal_level de_level;
- /* Pixel clock edges to drive HSYNC and VSYNC signals */
- enum omap_dss_signal_edge sync_pclk_edge;
-
- bool double_pixel;
-};
-
-/* Hardcoded timings for tv modes. Venc only uses these to
+/* Hardcoded videomodes for tv. Venc only uses these to
* identify the mode, and does not actually use the configs
* itself. However, the configs should be something that
* a normal monitor can also show */
-extern const struct omap_video_timings omap_dss_pal_timings;
-extern const struct omap_video_timings omap_dss_ntsc_timings;
+extern const struct videomode omap_dss_pal_vm;
+extern const struct videomode omap_dss_ntsc_vm;
struct omap_dss_cpr_coefs {
s16 rr, rg, rb;
@@ -502,11 +466,11 @@ struct omapdss_dpi_ops {
void (*disable)(struct omap_dss_device *dssdev);
int (*check_timings)(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings);
+ struct videomode *vm);
void (*set_timings)(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings);
+ struct videomode *vm);
void (*get_timings)(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings);
+ struct videomode *vm);
void (*set_data_lines)(struct omap_dss_device *dssdev, int data_lines);
};
@@ -521,11 +485,11 @@ struct omapdss_sdi_ops {
void (*disable)(struct omap_dss_device *dssdev);
int (*check_timings)(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings);
+ struct videomode *vm);
void (*set_timings)(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings);
+ struct videomode *vm);
void (*get_timings)(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings);
+ struct videomode *vm);
void (*set_datapairs)(struct omap_dss_device *dssdev, int datapairs);
};
@@ -540,11 +504,11 @@ struct omapdss_dvi_ops {
void (*disable)(struct omap_dss_device *dssdev);
int (*check_timings)(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings);
+ struct videomode *vm);
void (*set_timings)(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings);
+ struct videomode *vm);
void (*get_timings)(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings);
+ struct videomode *vm);
};
struct omapdss_atv_ops {
@@ -557,11 +521,11 @@ struct omapdss_atv_ops {
void (*disable)(struct omap_dss_device *dssdev);
int (*check_timings)(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings);
+ struct videomode *vm);
void (*set_timings)(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings);
+ struct videomode *vm);
void (*get_timings)(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings);
+ struct videomode *vm);
void (*set_type)(struct omap_dss_device *dssdev,
enum omap_dss_venc_type type);
@@ -582,11 +546,11 @@ struct omapdss_hdmi_ops {
void (*disable)(struct omap_dss_device *dssdev);
int (*check_timings)(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings);
+ struct videomode *vm);
void (*set_timings)(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings);
+ struct videomode *vm);
void (*get_timings)(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings);
+ struct videomode *vm);
int (*read_edid)(struct omap_dss_device *dssdev, u8 *buf, int len);
bool (*detect)(struct omap_dss_device *dssdev);
@@ -692,7 +656,7 @@ struct omap_dss_device {
} phy;
struct {
- struct omap_video_timings timings;
+ struct videomode vm;
enum omap_dss_dsi_pixel_format dsi_pix_fmt;
enum omap_dss_dsi_mode dsi_mode;
@@ -785,11 +749,11 @@ struct omap_dss_driver {
int (*get_recommended_bpp)(struct omap_dss_device *dssdev);
int (*check_timings)(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings);
+ struct videomode *vm);
void (*set_timings)(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings);
+ struct videomode *vm);
void (*get_timings)(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings);
+ struct videomode *vm);
int (*set_wss)(struct omap_dss_device *dssdev, u32 wss);
u32 (*get_wss)(struct omap_dss_device *dssdev);
@@ -819,11 +783,6 @@ struct omap_dss_device *omap_dss_find_device(void *data,
int (*match)(struct omap_dss_device *dssdev, void *data));
const char *omapdss_get_default_display_name(void);
-void videomode_to_omap_video_timings(const struct videomode *vm,
- struct omap_video_timings *ovt);
-void omap_video_timings_to_videomode(const struct omap_video_timings *ovt,
- struct videomode *vm);
-
int dss_feat_get_num_mgrs(void);
int dss_feat_get_num_ovls(void);
enum omap_color_mode dss_feat_get_supported_color_modes(enum omap_plane plane);
@@ -852,7 +811,7 @@ void omapdss_default_get_resolution(struct omap_dss_device *dssdev,
u16 *xres, u16 *yres);
int omapdss_default_get_recommended_bpp(struct omap_dss_device *dssdev);
void omapdss_default_get_timings(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings);
+ struct videomode *vm);
typedef void (*omap_dispc_isr_t) (void *arg, u32 mask);
int omap_dispc_register_isr(omap_dispc_isr_t isr, void *arg, u32 mask);
@@ -906,7 +865,7 @@ void dispc_mgr_go(enum omap_channel channel);
void dispc_mgr_set_lcd_config(enum omap_channel channel,
const struct dss_lcd_mgr_config *config);
void dispc_mgr_set_timings(enum omap_channel channel,
- const struct omap_video_timings *timings);
+ const struct videomode *vm);
void dispc_mgr_setup(enum omap_channel channel,
const struct omap_overlay_manager_info *info);
u32 dispc_mgr_gamma_size(enum omap_channel channel);
@@ -919,8 +878,7 @@ bool dispc_ovl_enabled(enum omap_plane plane);
void dispc_ovl_set_channel_out(enum omap_plane plane,
enum omap_channel channel);
int dispc_ovl_setup(enum omap_plane plane, const struct omap_overlay_info *oi,
- bool replication, const struct omap_video_timings *mgr_timings,
- bool mem_to_mem);
+ bool replication, const struct videomode *vm, bool mem_to_mem);
enum omap_dss_output_id dispc_mgr_get_supported_outputs(enum omap_channel channel);
@@ -934,7 +892,7 @@ struct dss_mgr_ops {
int (*enable)(enum omap_channel channel);
void (*disable)(enum omap_channel channel);
void (*set_timings)(enum omap_channel channel,
- const struct omap_video_timings *timings);
+ const struct videomode *vm);
void (*set_lcd_config)(enum omap_channel channel,
const struct dss_lcd_mgr_config *config);
int (*register_framedone_handler)(enum omap_channel channel,
@@ -951,7 +909,7 @@ int dss_mgr_connect(enum omap_channel channel,
void dss_mgr_disconnect(enum omap_channel channel,
struct omap_dss_device *dst);
void dss_mgr_set_timings(enum omap_channel channel,
- const struct omap_video_timings *timings);
+ const struct videomode *vm);
void dss_mgr_set_lcd_config(enum omap_channel channel,
const struct dss_lcd_mgr_config *config);
int dss_mgr_enable(enum omap_channel channel);
diff --git a/drivers/gpu/drm/omapdrm/dss/output.c b/drivers/gpu/drm/omapdrm/dss/output.c
index 24f859488201..a901af5a9bc3 100644
--- a/drivers/gpu/drm/omapdrm/dss/output.c
+++ b/drivers/gpu/drm/omapdrm/dss/output.c
@@ -201,10 +201,9 @@ void dss_mgr_disconnect(enum omap_channel channel,
}
EXPORT_SYMBOL(dss_mgr_disconnect);
-void dss_mgr_set_timings(enum omap_channel channel,
- const struct omap_video_timings *timings)
+void dss_mgr_set_timings(enum omap_channel channel, const struct videomode *vm)
{
- dss_mgr_ops->set_timings(channel, timings);
+ dss_mgr_ops->set_timings(channel, vm);
}
EXPORT_SYMBOL(dss_mgr_set_timings);
diff --git a/drivers/gpu/drm/omapdrm/dss/rfbi.c b/drivers/gpu/drm/omapdrm/dss/rfbi.c
index cd53566d75eb..09724757366a 100644
--- a/drivers/gpu/drm/omapdrm/dss/rfbi.c
+++ b/drivers/gpu/drm/omapdrm/dss/rfbi.c
@@ -113,7 +113,7 @@ static struct {
struct semaphore bus_lock;
- struct omap_video_timings timings;
+ struct videomode vm;
int pixel_size;
int data_lines;
struct rfbi_timings intf_timings;
@@ -308,15 +308,15 @@ static int rfbi_transfer_area(struct omap_dss_device *dssdev,
u32 l;
int r;
struct omap_overlay_manager *mgr = rfbi.output.manager;
- u16 width = rfbi.timings.x_res;
- u16 height = rfbi.timings.y_res;
+ u16 width = rfbi.vm.hactive;
+ u16 height = rfbi.vm.vactive;
/*BUG_ON(callback == 0);*/
BUG_ON(rfbi.framedone_callback != NULL);
DSSDBG("rfbi_transfer_area %dx%d\n", width, height);
- dss_mgr_set_timings(mgr, &rfbi.timings);
+ dss_mgr_set_timings(mgr, &rfbi.vm);
r = dss_mgr_enable(mgr);
if (r)
@@ -777,8 +777,8 @@ static int rfbi_update(struct omap_dss_device *dssdev, void (*callback)(void *),
static void rfbi_set_size(struct omap_dss_device *dssdev, u16 w, u16 h)
{
- rfbi.timings.x_res = w;
- rfbi.timings.y_res = h;
+ rfbi.vm.hactive = w;
+ rfbi.vm.vactive = h;
}
static void rfbi_set_pixel_size(struct omap_dss_device *dssdev, int pixel_size)
@@ -854,25 +854,30 @@ static void rfbi_config_lcd_manager(struct omap_dss_device *dssdev)
dss_mgr_set_lcd_config(mgr, &mgr_config);
/*
- * Set rfbi.timings with default values, the x_res and y_res fields
+ * Set rfbi.timings with default values, the hactive and vactive fields
* are expected to be already configured by the panel driver via
* omapdss_rfbi_set_size()
*/
- rfbi.timings.hsw = 1;
- rfbi.timings.hfp = 1;
- rfbi.timings.hbp = 1;
- rfbi.timings.vsw = 1;
- rfbi.timings.vfp = 0;
- rfbi.timings.vbp = 0;
-
- rfbi.timings.interlace = false;
- rfbi.timings.hsync_level = OMAPDSS_SIG_ACTIVE_HIGH;
- rfbi.timings.vsync_level = OMAPDSS_SIG_ACTIVE_HIGH;
- rfbi.timings.data_pclk_edge = OMAPDSS_DRIVE_SIG_RISING_EDGE;
- rfbi.timings.de_level = OMAPDSS_SIG_ACTIVE_HIGH;
- rfbi.timings.sync_pclk_edge = OMAPDSS_DRIVE_SIG_FALLING_EDGE;
-
- dss_mgr_set_timings(mgr, &rfbi.timings);
+ rfbi.vm.hsync_len = 1;
+ rfbi.vm.hfront_porch = 1;
+ rfbi.vm.hback_porch = 1;
+ rfbi.vm.vsync_len = 1;
+ rfbi.vm.vfront_porch = 0;
+ rfbi.vm.vback_porch = 0;
+
+ rfbi.vm.flags &= ~DISPLAY_FLAGS_INTERLACED;
+ rfbi.vm.flags &= ~DISPLAY_FLAGS_HSYNC_LOW;
+ rfbi.vm.flags |= DISPLAY_FLAGS_HSYNC_HIGH;
+ rfbi.vm.flags &= ~DISPLAY_FLAGS_VSYNC_LOW;
+ rfbi.vm.flags |= DISPLAY_FLAGS_VSYNC_HIGH;
+ rfbi.vm.flags &= ~DISPLAY_FLAGS_PIXDATA_NEGEDGE;
+ rfbi.vm.flags |= DISPLAY_FLAGS_PIXDATA_POSEDGE;
+ rfbi.vm.flags &= ~DISPLAY_FLAGS_DE_LOW;
+ rfbi.vm.flags |= DISPLAY_FLAGS_DE_HIGH;
+ rfbi.vm.flags &= ~DISPLAY_FLAGS_SYNC_POSEDGE;
+ rfbi.vm.flags |= DISPLAY_FLAGS_SYNC_NEGEDGE;
+
+ dss_mgr_set_timings(mgr, &rfbi.vm);
}
static int rfbi_display_enable(struct omap_dss_device *dssdev)
diff --git a/drivers/gpu/drm/omapdrm/dss/sdi.c b/drivers/gpu/drm/omapdrm/dss/sdi.c
index 0a96c321ce62..b3bda2d3c08d 100644
--- a/drivers/gpu/drm/omapdrm/dss/sdi.c
+++ b/drivers/gpu/drm/omapdrm/dss/sdi.c
@@ -39,7 +39,7 @@ static struct {
struct regulator *vdds_sdi_reg;
struct dss_lcd_mgr_config mgr_config;
- struct omap_video_timings timings;
+ struct videomode vm;
int datapairs;
struct omap_dss_device output;
@@ -131,7 +131,7 @@ static int sdi_display_enable(struct omap_dss_device *dssdev)
{
struct omap_dss_device *out = &sdi.output;
enum omap_channel channel = dssdev->dispc_channel;
- struct omap_video_timings *t = &sdi.timings;
+ struct videomode *vm = &sdi.vm;
unsigned long fck;
struct dispc_clock_info dispc_cinfo;
unsigned long pck;
@@ -151,10 +151,9 @@ static int sdi_display_enable(struct omap_dss_device *dssdev)
goto err_get_dispc;
/* 15.5.9.1.2 */
- t->data_pclk_edge = OMAPDSS_DRIVE_SIG_RISING_EDGE;
- t->sync_pclk_edge = OMAPDSS_DRIVE_SIG_RISING_EDGE;
+ vm->flags |= DISPLAY_FLAGS_PIXDATA_POSEDGE | DISPLAY_FLAGS_SYNC_POSEDGE;
- r = sdi_calc_clock_div(t->pixelclock, &fck, &dispc_cinfo);
+ r = sdi_calc_clock_div(vm->pixelclock, &fck, &dispc_cinfo);
if (r)
goto err_calc_clock_div;
@@ -162,15 +161,15 @@ static int sdi_display_enable(struct omap_dss_device *dssdev)
pck = fck / dispc_cinfo.lck_div / dispc_cinfo.pck_div;
- if (pck != t->pixelclock) {
- DSSWARN("Could not find exact pixel clock. Requested %d Hz, got %lu Hz\n",
- t->pixelclock, pck);
+ if (pck != vm->pixelclock) {
+ DSSWARN("Could not find exact pixel clock. Requested %lu Hz, got %lu Hz\n",
+ vm->pixelclock, pck);
- t->pixelclock = pck;
+ vm->pixelclock = pck;
}
- dss_mgr_set_timings(channel, t);
+ dss_mgr_set_timings(channel, vm);
r = dss_set_fck_rate(fck);
if (r)
@@ -229,26 +228,26 @@ static void sdi_display_disable(struct omap_dss_device *dssdev)
}
static void sdi_set_timings(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings)
+ struct videomode *vm)
{
- sdi.timings = *timings;
+ sdi.vm = *vm;
}
static void sdi_get_timings(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings)
+ struct videomode *vm)
{
- *timings = sdi.timings;
+ *vm = sdi.vm;
}
static int sdi_check_timings(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings)
+ struct videomode *vm)
{
enum omap_channel channel = dssdev->dispc_channel;
- if (!dispc_mgr_timings_ok(channel, timings))
+ if (!dispc_mgr_timings_ok(channel, vm))
return -EINVAL;
- if (timings->pixelclock == 0)
+ if (vm->pixelclock == 0)
return -EINVAL;
return 0;
diff --git a/drivers/gpu/drm/omapdrm/dss/venc.c b/drivers/gpu/drm/omapdrm/dss/venc.c
index 6eedf2118708..d74f7fcc2e46 100644
--- a/drivers/gpu/drm/omapdrm/dss/venc.c
+++ b/drivers/gpu/drm/omapdrm/dss/venc.c
@@ -262,47 +262,41 @@ static const struct venc_config venc_config_pal_bdghi = {
.fid_ext_start_y__fid_ext_offset_y = 0x01380005,
};
-const struct omap_video_timings omap_dss_pal_timings = {
- .x_res = 720,
- .y_res = 574,
+const struct videomode omap_dss_pal_vm = {
+ .hactive = 720,
+ .vactive = 574,
.pixelclock = 13500000,
- .hsw = 64,
- .hfp = 12,
- .hbp = 68,
- .vsw = 5,
- .vfp = 5,
- .vbp = 41,
-
- .interlace = true,
-
- .hsync_level = OMAPDSS_SIG_ACTIVE_LOW,
- .vsync_level = OMAPDSS_SIG_ACTIVE_LOW,
- .data_pclk_edge = OMAPDSS_DRIVE_SIG_RISING_EDGE,
- .de_level = OMAPDSS_SIG_ACTIVE_HIGH,
- .sync_pclk_edge = OMAPDSS_DRIVE_SIG_FALLING_EDGE,
+ .hsync_len = 64,
+ .hfront_porch = 12,
+ .hback_porch = 68,
+ .vsync_len = 5,
+ .vfront_porch = 5,
+ .vback_porch = 41,
+
+ .flags = DISPLAY_FLAGS_INTERLACED | DISPLAY_FLAGS_HSYNC_LOW |
+ DISPLAY_FLAGS_VSYNC_LOW | DISPLAY_FLAGS_DE_HIGH |
+ DISPLAY_FLAGS_PIXDATA_POSEDGE |
+ DISPLAY_FLAGS_SYNC_NEGEDGE,
};
-EXPORT_SYMBOL(omap_dss_pal_timings);
+EXPORT_SYMBOL(omap_dss_pal_vm);
-const struct omap_video_timings omap_dss_ntsc_timings = {
- .x_res = 720,
- .y_res = 482,
+const struct videomode omap_dss_ntsc_vm = {
+ .hactive = 720,
+ .vactive = 482,
.pixelclock = 13500000,
- .hsw = 64,
- .hfp = 16,
- .hbp = 58,
- .vsw = 6,
- .vfp = 6,
- .vbp = 31,
-
- .interlace = true,
-
- .hsync_level = OMAPDSS_SIG_ACTIVE_LOW,
- .vsync_level = OMAPDSS_SIG_ACTIVE_LOW,
- .data_pclk_edge = OMAPDSS_DRIVE_SIG_RISING_EDGE,
- .de_level = OMAPDSS_SIG_ACTIVE_HIGH,
- .sync_pclk_edge = OMAPDSS_DRIVE_SIG_FALLING_EDGE,
+ .hsync_len = 64,
+ .hfront_porch = 16,
+ .hback_porch = 58,
+ .vsync_len = 6,
+ .vfront_porch = 6,
+ .vback_porch = 31,
+
+ .flags = DISPLAY_FLAGS_INTERLACED | DISPLAY_FLAGS_HSYNC_LOW |
+ DISPLAY_FLAGS_VSYNC_LOW | DISPLAY_FLAGS_DE_HIGH |
+ DISPLAY_FLAGS_PIXDATA_POSEDGE |
+ DISPLAY_FLAGS_SYNC_NEGEDGE,
};
-EXPORT_SYMBOL(omap_dss_ntsc_timings);
+EXPORT_SYMBOL(omap_dss_ntsc_vm);
static struct {
struct platform_device *pdev;
@@ -313,7 +307,7 @@ static struct {
struct clk *tv_dac_clk;
- struct omap_video_timings timings;
+ struct videomode vm;
enum omap_dss_venc_type type;
bool invert_polarity;
@@ -427,13 +421,12 @@ static void venc_runtime_put(void)
WARN_ON(r < 0 && r != -ENOSYS);
}
-static const struct venc_config *venc_timings_to_config(
- struct omap_video_timings *timings)
+static const struct venc_config *venc_timings_to_config(struct videomode *vm)
{
- if (memcmp(&omap_dss_pal_timings, timings, sizeof(*timings)) == 0)
+ if (memcmp(&omap_dss_pal_vm, vm, sizeof(*vm)) == 0)
return &venc_config_pal_trm;
- if (memcmp(&omap_dss_ntsc_timings, timings, sizeof(*timings)) == 0)
+ if (memcmp(&omap_dss_ntsc_vm, vm, sizeof(*vm)) == 0)
return &venc_config_ntsc_trm;
BUG();
@@ -451,7 +444,7 @@ static int venc_power_on(struct omap_dss_device *dssdev)
goto err0;
venc_reset();
- venc_write_config(venc_timings_to_config(&venc.timings));
+ venc_write_config(venc_timings_to_config(&venc.vm));
dss_set_venc_output(venc.type);
dss_set_dac_pwrdn_bgz(1);
@@ -468,7 +461,7 @@ static int venc_power_on(struct omap_dss_device *dssdev)
venc_write_reg(VENC_OUTPUT_CONTROL, l);
- dss_mgr_set_timings(channel, &venc.timings);
+ dss_mgr_set_timings(channel, &venc.vm);
r = regulator_enable(venc.vdda_dac_reg);
if (r)
@@ -546,17 +539,17 @@ static void venc_display_disable(struct omap_dss_device *dssdev)
}
static void venc_set_timings(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings)
+ struct videomode *vm)
{
DSSDBG("venc_set_timings\n");
mutex_lock(&venc.venc_lock);
/* Reset WSS data when the TV standard changes. */
- if (memcmp(&venc.timings, timings, sizeof(*timings)))
+ if (memcmp(&venc.vm, vm, sizeof(*vm)))
venc.wss_data = 0;
- venc.timings = *timings;
+ venc.vm = *vm;
dispc_set_tv_pclk(13500000);
@@ -564,25 +557,25 @@ static void venc_set_timings(struct omap_dss_device *dssdev,
}
static int venc_check_timings(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings)
+ struct videomode *vm)
{
DSSDBG("venc_check_timings\n");
- if (memcmp(&omap_dss_pal_timings, timings, sizeof(*timings)) == 0)
+ if (memcmp(&omap_dss_pal_vm, vm, sizeof(*vm)) == 0)
return 0;
- if (memcmp(&omap_dss_ntsc_timings, timings, sizeof(*timings)) == 0)
+ if (memcmp(&omap_dss_ntsc_vm, vm, sizeof(*vm)) == 0)
return 0;
return -EINVAL;
}
static void venc_get_timings(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings)
+ struct videomode *vm)
{
mutex_lock(&venc.venc_lock);
- *timings = venc.timings;
+ *vm = venc.vm;
mutex_unlock(&venc.venc_lock);
}
@@ -602,7 +595,7 @@ static int venc_set_wss(struct omap_dss_device *dssdev, u32 wss)
mutex_lock(&venc.venc_lock);
- config = venc_timings_to_config(&venc.timings);
+ config = venc_timings_to_config(&venc.vm);
/* Invert due to VENC_L21_WC_CTL:INV=1 */
venc.wss_data = (wss ^ 0xfffff) << 8;
diff --git a/drivers/gpu/drm/omapdrm/omap_connector.c b/drivers/gpu/drm/omapdrm/omap_connector.c
index 137fe690a0da..2580e8673908 100644
--- a/drivers/gpu/drm/omapdrm/omap_connector.c
+++ b/drivers/gpu/drm/omapdrm/omap_connector.c
@@ -42,73 +42,6 @@ bool omap_connector_get_hdmi_mode(struct drm_connector *connector)
return omap_connector->hdmi_mode;
}
-void copy_timings_omap_to_drm(struct drm_display_mode *mode,
- struct omap_video_timings *timings)
-{
- mode->clock = timings->pixelclock / 1000;
-
- mode->hdisplay = timings->x_res;
- mode->hsync_start = mode->hdisplay + timings->hfp;
- mode->hsync_end = mode->hsync_start + timings->hsw;
- mode->htotal = mode->hsync_end + timings->hbp;
-
- mode->vdisplay = timings->y_res;
- mode->vsync_start = mode->vdisplay + timings->vfp;
- mode->vsync_end = mode->vsync_start + timings->vsw;
- mode->vtotal = mode->vsync_end + timings->vbp;
-
- mode->flags = 0;
-
- if (timings->interlace)
- mode->flags |= DRM_MODE_FLAG_INTERLACE;
-
- if (timings->double_pixel)
- mode->flags |= DRM_MODE_FLAG_DBLCLK;
-
- if (timings->hsync_level == OMAPDSS_SIG_ACTIVE_HIGH)
- mode->flags |= DRM_MODE_FLAG_PHSYNC;
- else
- mode->flags |= DRM_MODE_FLAG_NHSYNC;
-
- if (timings->vsync_level == OMAPDSS_SIG_ACTIVE_HIGH)
- mode->flags |= DRM_MODE_FLAG_PVSYNC;
- else
- mode->flags |= DRM_MODE_FLAG_NVSYNC;
-}
-
-void copy_timings_drm_to_omap(struct omap_video_timings *timings,
- struct drm_display_mode *mode)
-{
- timings->pixelclock = mode->clock * 1000;
-
- timings->x_res = mode->hdisplay;
- timings->hfp = mode->hsync_start - mode->hdisplay;
- timings->hsw = mode->hsync_end - mode->hsync_start;
- timings->hbp = mode->htotal - mode->hsync_end;
-
- timings->y_res = mode->vdisplay;
- timings->vfp = mode->vsync_start - mode->vdisplay;
- timings->vsw = mode->vsync_end - mode->vsync_start;
- timings->vbp = mode->vtotal - mode->vsync_end;
-
- timings->interlace = !!(mode->flags & DRM_MODE_FLAG_INTERLACE);
- timings->double_pixel = !!(mode->flags & DRM_MODE_FLAG_DBLCLK);
-
- if (mode->flags & DRM_MODE_FLAG_PHSYNC)
- timings->hsync_level = OMAPDSS_SIG_ACTIVE_HIGH;
- else
- timings->hsync_level = OMAPDSS_SIG_ACTIVE_LOW;
-
- if (mode->flags & DRM_MODE_FLAG_PVSYNC)
- timings->vsync_level = OMAPDSS_SIG_ACTIVE_HIGH;
- else
- timings->vsync_level = OMAPDSS_SIG_ACTIVE_LOW;
-
- timings->data_pclk_edge = OMAPDSS_DRIVE_SIG_RISING_EDGE;
- timings->de_level = OMAPDSS_SIG_ACTIVE_HIGH;
- timings->sync_pclk_edge = OMAPDSS_DRIVE_SIG_FALLING_EDGE;
-}
-
static enum drm_connector_status omap_connector_detect(
struct drm_connector *connector, bool force)
{
@@ -185,11 +118,11 @@ static int omap_connector_get_modes(struct drm_connector *connector)
kfree(edid);
} else {
struct drm_display_mode *mode = drm_mode_create(dev);
- struct omap_video_timings timings = {0};
+ struct videomode vm = {0};
- dssdrv->get_timings(dssdev, &timings);
+ dssdrv->get_timings(dssdev, &vm);
- copy_timings_omap_to_drm(mode, &timings);
+ drm_display_mode_from_videomode(&vm, mode);
mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED;
drm_mode_set_name(mode);
@@ -207,12 +140,14 @@ static int omap_connector_mode_valid(struct drm_connector *connector,
struct omap_connector *omap_connector = to_omap_connector(connector);
struct omap_dss_device *dssdev = omap_connector->dssdev;
struct omap_dss_driver *dssdrv = dssdev->driver;
- struct omap_video_timings timings = {0};
+ struct videomode vm = {0};
struct drm_device *dev = connector->dev;
struct drm_display_mode *new_mode;
int r, ret = MODE_BAD;
- copy_timings_drm_to_omap(&timings, mode);
+ drm_display_mode_to_videomode(mode, &vm);
+ vm.flags |= DISPLAY_FLAGS_DE_HIGH | DISPLAY_FLAGS_PIXDATA_POSEDGE |
+ DISPLAY_FLAGS_SYNC_NEGEDGE;
mode->vrefresh = drm_mode_vrefresh(mode);
/*
@@ -221,13 +156,13 @@ static int omap_connector_mode_valid(struct drm_connector *connector,
* panel's timings
*/
if (dssdrv->check_timings) {
- r = dssdrv->check_timings(dssdev, &timings);
+ r = dssdrv->check_timings(dssdev, &vm);
} else {
- struct omap_video_timings t = {0};
+ struct videomode t = {0};
dssdrv->get_timings(dssdev, &t);
- if (memcmp(&timings, &t, sizeof(struct omap_video_timings)))
+ if (memcmp(&vm, &t, sizeof(struct videomode)))
r = -EINVAL;
else
r = 0;
@@ -236,7 +171,7 @@ static int omap_connector_mode_valid(struct drm_connector *connector,
if (!r) {
/* check if vrefresh is still valid */
new_mode = drm_mode_duplicate(dev, mode);
- new_mode->clock = timings.pixelclock / 1000;
+ new_mode->clock = vm.pixelclock / 1000;
new_mode->vrefresh = 0;
if (mode->vrefresh == drm_mode_vrefresh(new_mode))
ret = MODE_OK;
diff --git a/drivers/gpu/drm/omapdrm/omap_crtc.c b/drivers/gpu/drm/omapdrm/omap_crtc.c
index 16c691dbc372..8dea89030e66 100644
--- a/drivers/gpu/drm/omapdrm/omap_crtc.c
+++ b/drivers/gpu/drm/omapdrm/omap_crtc.c
@@ -34,7 +34,7 @@ struct omap_crtc {
const char *name;
enum omap_channel channel;
- struct omap_video_timings timings;
+ struct videomode vm;
struct omap_drm_irq vblank_irq;
struct omap_drm_irq error_irq;
@@ -56,10 +56,10 @@ uint32_t pipe2vbl(struct drm_crtc *crtc)
return dispc_mgr_get_vsync_irq(omap_crtc->channel);
}
-struct omap_video_timings *omap_crtc_timings(struct drm_crtc *crtc)
+struct videomode *omap_crtc_timings(struct drm_crtc *crtc)
{
struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
- return &omap_crtc->timings;
+ return &omap_crtc->vm;
}
enum omap_channel omap_crtc_channel(struct drm_crtc *crtc)
@@ -201,7 +201,7 @@ static int omap_crtc_dss_enable(enum omap_channel channel)
dispc_mgr_setup(omap_crtc->channel, &info);
dispc_mgr_set_timings(omap_crtc->channel,
- &omap_crtc->timings);
+ &omap_crtc->vm);
omap_crtc_set_enabled(&omap_crtc->base, true);
return 0;
@@ -215,11 +215,11 @@ static void omap_crtc_dss_disable(enum omap_channel channel)
}
static void omap_crtc_dss_set_timings(enum omap_channel channel,
- const struct omap_video_timings *timings)
+ const struct videomode *vm)
{
struct omap_crtc *omap_crtc = omap_crtcs[channel];
DBG("%s", omap_crtc->name);
- omap_crtc->timings = *timings;
+ omap_crtc->vm = *vm;
}
static void omap_crtc_dss_set_lcd_config(enum omap_channel channel,
@@ -369,7 +369,10 @@ static void omap_crtc_mode_set_nofb(struct drm_crtc *crtc)
mode->vdisplay, mode->vsync_start, mode->vsync_end, mode->vtotal,
mode->type, mode->flags);
- copy_timings_drm_to_omap(&omap_crtc->timings, mode);
+ drm_display_mode_to_videomode(mode, &omap_crtc->vm);
+ omap_crtc->vm.flags |= DISPLAY_FLAGS_DE_HIGH |
+ DISPLAY_FLAGS_PIXDATA_POSEDGE |
+ DISPLAY_FLAGS_SYNC_NEGEDGE;
}
static int omap_crtc_atomic_check(struct drm_crtc *crtc,
@@ -411,19 +414,6 @@ static void omap_crtc_atomic_flush(struct drm_crtc *crtc,
dispc_mgr_set_gamma(omap_crtc->channel, lut, length);
}
- if (crtc->state->color_mgmt_changed) {
- struct drm_color_lut *lut = NULL;
- uint length = 0;
-
- if (crtc->state->gamma_lut) {
- lut = (struct drm_color_lut *)
- crtc->state->gamma_lut->data;
- length = crtc->state->gamma_lut->length /
- sizeof(*lut);
- }
- dispc_mgr_set_gamma(omap_crtc->channel, lut, length);
- }
-
if (dispc_mgr_is_enabled(omap_crtc->channel)) {
DBG("%s: GO", omap_crtc->name);
diff --git a/drivers/gpu/drm/omapdrm/omap_drv.h b/drivers/gpu/drm/omapdrm/omap_drv.h
index dcc30a98b9d4..4c51135eb9a6 100644
--- a/drivers/gpu/drm/omapdrm/omap_drv.h
+++ b/drivers/gpu/drm/omapdrm/omap_drv.h
@@ -148,7 +148,7 @@ static inline void omap_fbdev_free(struct drm_device *dev)
}
#endif
-struct omap_video_timings *omap_crtc_timings(struct drm_crtc *crtc);
+struct videomode *omap_crtc_timings(struct drm_crtc *crtc);
enum omap_channel omap_crtc_channel(struct drm_crtc *crtc);
void omap_crtc_pre_init(void);
void omap_crtc_pre_uninit(void);
@@ -171,11 +171,6 @@ struct drm_encoder *omap_connector_attached_encoder(
struct drm_connector *connector);
bool omap_connector_get_hdmi_mode(struct drm_connector *connector);
-void copy_timings_omap_to_drm(struct drm_display_mode *mode,
- struct omap_video_timings *timings);
-void copy_timings_drm_to_omap(struct omap_video_timings *timings,
- struct drm_display_mode *mode);
-
uint32_t omap_framebuffer_get_formats(uint32_t *pixel_formats,
uint32_t max_formats, enum omap_color_mode supported_modes);
struct drm_framebuffer *omap_framebuffer_create(struct drm_device *dev,
diff --git a/drivers/gpu/drm/omapdrm/omap_encoder.c b/drivers/gpu/drm/omapdrm/omap_encoder.c
index 0bbb9c59622e..a20f30039aee 100644
--- a/drivers/gpu/drm/omapdrm/omap_encoder.c
+++ b/drivers/gpu/drm/omapdrm/omap_encoder.c
@@ -102,7 +102,7 @@ static void omap_encoder_disable(struct drm_encoder *encoder)
static int omap_encoder_update(struct drm_encoder *encoder,
enum omap_channel channel,
- struct omap_video_timings *timings)
+ struct videomode *vm)
{
struct drm_device *dev = encoder->dev;
struct omap_encoder *omap_encoder = to_omap_encoder(encoder);
@@ -111,13 +111,13 @@ static int omap_encoder_update(struct drm_encoder *encoder,
int ret;
if (dssdrv->check_timings) {
- ret = dssdrv->check_timings(dssdev, timings);
+ ret = dssdrv->check_timings(dssdev, vm);
} else {
- struct omap_video_timings t = {0};
+ struct videomode t = {0};
dssdrv->get_timings(dssdev, &t);
- if (memcmp(timings, &t, sizeof(struct omap_video_timings)))
+ if (memcmp(vm, &t, sizeof(struct videomode)))
ret = -EINVAL;
else
ret = 0;
@@ -129,7 +129,7 @@ static int omap_encoder_update(struct drm_encoder *encoder,
}
if (dssdrv->set_timings)
- dssdrv->set_timings(dssdev, timings);
+ dssdrv->set_timings(dssdev, vm);
return 0;
}
diff --git a/drivers/gpu/drm/omapdrm/omap_fbdev.c b/drivers/gpu/drm/omapdrm/omap_fbdev.c
index adb10fbe918d..8d8ac173f55d 100644
--- a/drivers/gpu/drm/omapdrm/omap_fbdev.c
+++ b/drivers/gpu/drm/omapdrm/omap_fbdev.c
@@ -82,6 +82,7 @@ fallback:
static struct fb_ops omap_fb_ops = {
.owner = THIS_MODULE,
+ DRM_FB_HELPER_DEFAULT_OPS,
/* Note: to properly handle manual update displays, we wrap the
* basic fbdev ops which write to the framebuffer
@@ -92,11 +93,7 @@ static struct fb_ops omap_fb_ops = {
.fb_copyarea = drm_fb_helper_sys_copyarea,
.fb_imageblit = drm_fb_helper_sys_imageblit,
- .fb_check_var = drm_fb_helper_check_var,
- .fb_set_par = drm_fb_helper_set_par,
.fb_pan_display = omap_fbdev_pan_display,
- .fb_blank = drm_fb_helper_blank,
- .fb_setcmap = drm_fb_helper_setcmap,
};
static int omap_fbdev_create(struct drm_fb_helper *helper,
diff --git a/drivers/gpu/drm/omapdrm/omap_gem.c b/drivers/gpu/drm/omapdrm/omap_gem.c
index 505dee0db973..d4e1e11466f8 100644
--- a/drivers/gpu/drm/omapdrm/omap_gem.c
+++ b/drivers/gpu/drm/omapdrm/omap_gem.c
@@ -336,8 +336,10 @@ static void omap_gem_detach_pages(struct drm_gem_object *obj)
if (omap_obj->flags & (OMAP_BO_WC|OMAP_BO_UNCACHED)) {
int i, npages = obj->size >> PAGE_SHIFT;
for (i = 0; i < npages; i++) {
- dma_unmap_page(obj->dev->dev, omap_obj->addrs[i],
- PAGE_SIZE, DMA_BIDIRECTIONAL);
+ if (omap_obj->addrs[i])
+ dma_unmap_page(obj->dev->dev,
+ omap_obj->addrs[i],
+ PAGE_SIZE, DMA_BIDIRECTIONAL);
}
}
diff --git a/drivers/gpu/drm/omapdrm/omap_plane.c b/drivers/gpu/drm/omapdrm/omap_plane.c
index 0ffd5b930ec0..9c43cb481e62 100644
--- a/drivers/gpu/drm/omapdrm/omap_plane.c
+++ b/drivers/gpu/drm/omapdrm/omap_plane.c
@@ -131,7 +131,9 @@ static void omap_plane_atomic_update(struct drm_plane *plane,
/* and finally, update omapdss: */
ret = dispc_ovl_setup(omap_plane->id, &info, false,
omap_crtc_timings(state->crtc), false);
- if (WARN_ON(ret)) {
+ if (ret) {
+ dev_err(plane->dev->dev, "Failed to setup plane %s\n",
+ omap_plane->name);
dispc_ovl_enable(omap_plane->id, false);
return;
}
@@ -157,12 +159,20 @@ static int omap_plane_atomic_check(struct drm_plane *plane,
{
struct drm_crtc_state *crtc_state;
- if (!state->crtc)
+ if (!state->fb)
return 0;
- crtc_state = drm_atomic_get_crtc_state(state->state, state->crtc);
- if (IS_ERR(crtc_state))
- return PTR_ERR(crtc_state);
+ /* crtc should only be NULL when disabling (i.e., !state->fb) */
+ if (WARN_ON(!state->crtc))
+ return 0;
+
+ crtc_state = drm_atomic_get_existing_crtc_state(state->state, state->crtc);
+ /* we should have a crtc state if the plane is attached to a crtc */
+ if (WARN_ON(!crtc_state))
+ return 0;
+
+ if (!crtc_state->enable)
+ return 0;
if (state->crtc_x < 0 || state->crtc_y < 0)
return -EINVAL;
@@ -173,11 +183,9 @@ static int omap_plane_atomic_check(struct drm_plane *plane,
if (state->crtc_y + state->crtc_h > crtc_state->adjusted_mode.vdisplay)
return -EINVAL;
- if (state->fb) {
- if (state->rotation != DRM_ROTATE_0 &&
- !omap_framebuffer_supports_rotation(state->fb))
- return -EINVAL;
- }
+ if (state->rotation != DRM_ROTATE_0 &&
+ !omap_framebuffer_supports_rotation(state->fb))
+ return -EINVAL;
return 0;
}
diff --git a/drivers/gpu/drm/qxl/qxl_fb.c b/drivers/gpu/drm/qxl/qxl_fb.c
index 2cd879a4ae15..7e305d8a4146 100644
--- a/drivers/gpu/drm/qxl/qxl_fb.c
+++ b/drivers/gpu/drm/qxl/qxl_fb.c
@@ -81,16 +81,10 @@ static struct fb_deferred_io qxl_defio = {
static struct fb_ops qxlfb_ops = {
.owner = THIS_MODULE,
- .fb_check_var = drm_fb_helper_check_var,
- .fb_set_par = drm_fb_helper_set_par, /* TODO: copy vmwgfx */
+ DRM_FB_HELPER_DEFAULT_OPS,
.fb_fillrect = drm_fb_helper_sys_fillrect,
.fb_copyarea = drm_fb_helper_sys_copyarea,
.fb_imageblit = drm_fb_helper_sys_imageblit,
- .fb_pan_display = drm_fb_helper_pan_display,
- .fb_blank = drm_fb_helper_blank,
- .fb_setcmap = drm_fb_helper_setcmap,
- .fb_debug_enter = drm_fb_helper_debug_enter,
- .fb_debug_leave = drm_fb_helper_debug_leave,
};
static void qxlfb_destroy_pinned_object(struct drm_gem_object *gobj)
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
index 74f99bac08b1..05f4ebe31ce2 100644
--- a/drivers/gpu/drm/radeon/atombios_crtc.c
+++ b/drivers/gpu/drm/radeon/atombios_crtc.c
@@ -1156,7 +1156,7 @@ static int dce4_crtc_do_set_base(struct drm_crtc *crtc,
u32 tmp, viewport_w, viewport_h;
int r;
bool bypass_lut = false;
- char *format_name;
+ struct drm_format_name_buf format_name;
/* no fb bound */
if (!atomic && !crtc->primary->fb) {
@@ -1260,9 +1260,8 @@ static int dce4_crtc_do_set_base(struct drm_crtc *crtc,
bypass_lut = true;
break;
default:
- format_name = drm_get_format_name(target_fb->pixel_format);
- DRM_ERROR("Unsupported screen format %s\n", format_name);
- kfree(format_name);
+ DRM_ERROR("Unsupported screen format %s\n",
+ drm_get_format_name(target_fb->pixel_format, &format_name));
return -EINVAL;
}
@@ -1473,7 +1472,7 @@ static int avivo_crtc_do_set_base(struct drm_crtc *crtc,
u32 viewport_w, viewport_h;
int r;
bool bypass_lut = false;
- char *format_name;
+ struct drm_format_name_buf format_name;
/* no fb bound */
if (!atomic && !crtc->primary->fb) {
@@ -1563,9 +1562,8 @@ static int avivo_crtc_do_set_base(struct drm_crtc *crtc,
bypass_lut = true;
break;
default:
- format_name = drm_get_format_name(target_fb->pixel_format);
- DRM_ERROR("Unsupported screen format %s\n", format_name);
- kfree(format_name);
+ DRM_ERROR("Unsupported screen format %s\n",
+ drm_get_format_name(target_fb->pixel_format, &format_name));
return -EINVAL;
}
diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c
index 103fc8650197..a0d4a0522fdc 100644
--- a/drivers/gpu/drm/radeon/ni.c
+++ b/drivers/gpu/drm/radeon/ni.c
@@ -1396,9 +1396,7 @@ static void cayman_pcie_gart_fini(struct radeon_device *rdev)
void cayman_cp_int_cntl_setup(struct radeon_device *rdev,
int ring, u32 cp_int_cntl)
{
- u32 srbm_gfx_cntl = RREG32(SRBM_GFX_CNTL) & ~3;
-
- WREG32(SRBM_GFX_CNTL, srbm_gfx_cntl | (ring & 3));
+ WREG32(SRBM_GFX_CNTL, RINGID(ring));
WREG32(CP_INT_CNTL, cp_int_cntl);
}
diff --git a/drivers/gpu/drm/radeon/radeon_dp_auxch.c b/drivers/gpu/drm/radeon/radeon_dp_auxch.c
index 2d465648856a..474a8a1886f7 100644
--- a/drivers/gpu/drm/radeon/radeon_dp_auxch.c
+++ b/drivers/gpu/drm/radeon/radeon_dp_auxch.c
@@ -105,7 +105,7 @@ radeon_dp_aux_transfer_native(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg
tmp &= AUX_HPD_SEL(0x7);
tmp |= AUX_HPD_SEL(chan->rec.hpd);
- tmp |= AUX_EN | AUX_LS_READ_EN | AUX_HPD_DISCON(0x1);
+ tmp |= AUX_EN | AUX_LS_READ_EN;
WREG32(AUX_CONTROL + aux_offset[instance], tmp);
diff --git a/drivers/gpu/drm/radeon/radeon_fb.c b/drivers/gpu/drm/radeon/radeon_fb.c
index f65f29911dca..899b6a1644bd 100644
--- a/drivers/gpu/drm/radeon/radeon_fb.c
+++ b/drivers/gpu/drm/radeon/radeon_fb.c
@@ -74,18 +74,12 @@ radeonfb_release(struct fb_info *info, int user)
static struct fb_ops radeonfb_ops = {
.owner = THIS_MODULE,
+ DRM_FB_HELPER_DEFAULT_OPS,
.fb_open = radeonfb_open,
.fb_release = radeonfb_release,
- .fb_check_var = drm_fb_helper_check_var,
- .fb_set_par = drm_fb_helper_set_par,
.fb_fillrect = drm_fb_helper_cfb_fillrect,
.fb_copyarea = drm_fb_helper_cfb_copyarea,
.fb_imageblit = drm_fb_helper_cfb_imageblit,
- .fb_pan_display = drm_fb_helper_pan_display,
- .fb_blank = drm_fb_helper_blank,
- .fb_setcmap = drm_fb_helper_setcmap,
- .fb_debug_enter = drm_fb_helper_debug_enter,
- .fb_debug_leave = drm_fb_helper_debug_leave,
};
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
index be30861afae9..41b72ce6613f 100644
--- a/drivers/gpu/drm/radeon/radeon_object.c
+++ b/drivers/gpu/drm/radeon/radeon_object.c
@@ -446,6 +446,10 @@ void radeon_bo_force_delete(struct radeon_device *rdev)
int radeon_bo_init(struct radeon_device *rdev)
{
+ /* reserve PAT memory space to WC for VRAM */
+ arch_io_reserve_memtype_wc(rdev->mc.aper_base,
+ rdev->mc.aper_size);
+
/* Add an MTRR for the VRAM */
if (!rdev->fastfb_working) {
rdev->mc.vram_mtrr = arch_phys_wc_add(rdev->mc.aper_base,
@@ -463,6 +467,7 @@ void radeon_bo_fini(struct radeon_device *rdev)
{
radeon_ttm_fini(rdev);
arch_phys_wc_del(rdev->mc.vram_mtrr);
+ arch_io_free_memtype_wc(rdev->mc.aper_base, rdev->mc.aper_size);
}
/* Returns how many bytes TTM can move per IB.
diff --git a/drivers/gpu/drm/radeon/si_dpm.c b/drivers/gpu/drm/radeon/si_dpm.c
index 89bdf20344ae..c49934527a87 100644
--- a/drivers/gpu/drm/radeon/si_dpm.c
+++ b/drivers/gpu/drm/radeon/si_dpm.c
@@ -2999,6 +2999,49 @@ static void si_apply_state_adjust_rules(struct radeon_device *rdev,
int i;
struct si_dpm_quirk *p = si_dpm_quirk_list;
+ /* limit all SI kickers */
+ if (rdev->family == CHIP_PITCAIRN) {
+ if ((rdev->pdev->revision == 0x81) ||
+ (rdev->pdev->device == 0x6810) ||
+ (rdev->pdev->device == 0x6811) ||
+ (rdev->pdev->device == 0x6816) ||
+ (rdev->pdev->device == 0x6817) ||
+ (rdev->pdev->device == 0x6806))
+ max_mclk = 120000;
+ } else if (rdev->family == CHIP_VERDE) {
+ if ((rdev->pdev->revision == 0x81) ||
+ (rdev->pdev->revision == 0x83) ||
+ (rdev->pdev->revision == 0x87) ||
+ (rdev->pdev->device == 0x6820) ||
+ (rdev->pdev->device == 0x6821) ||
+ (rdev->pdev->device == 0x6822) ||
+ (rdev->pdev->device == 0x6823) ||
+ (rdev->pdev->device == 0x682A) ||
+ (rdev->pdev->device == 0x682B)) {
+ max_sclk = 75000;
+ max_mclk = 80000;
+ }
+ } else if (rdev->family == CHIP_OLAND) {
+ if ((rdev->pdev->revision == 0xC7) ||
+ (rdev->pdev->revision == 0x80) ||
+ (rdev->pdev->revision == 0x81) ||
+ (rdev->pdev->revision == 0x83) ||
+ (rdev->pdev->device == 0x6604) ||
+ (rdev->pdev->device == 0x6605)) {
+ max_sclk = 75000;
+ max_mclk = 80000;
+ }
+ } else if (rdev->family == CHIP_HAINAN) {
+ if ((rdev->pdev->revision == 0x81) ||
+ (rdev->pdev->revision == 0x83) ||
+ (rdev->pdev->revision == 0xC3) ||
+ (rdev->pdev->device == 0x6664) ||
+ (rdev->pdev->device == 0x6665) ||
+ (rdev->pdev->device == 0x6667)) {
+ max_sclk = 75000;
+ max_mclk = 80000;
+ }
+ }
/* Apply dpm quirks */
while (p && p->chip_device != 0) {
if (rdev->pdev->vendor == p->chip_vendor &&
@@ -3011,16 +3054,6 @@ static void si_apply_state_adjust_rules(struct radeon_device *rdev,
}
++p;
}
- /* limit mclk on all R7 370 parts for stability */
- if (rdev->pdev->device == 0x6811 &&
- rdev->pdev->revision == 0x81)
- max_mclk = 120000;
- /* limit sclk/mclk on Jet parts for stability */
- if (rdev->pdev->device == 0x6665 &&
- rdev->pdev->revision == 0xc3) {
- max_sclk = 75000;
- max_mclk = 80000;
- }
if (rps->vce_active) {
rps->evclk = rdev->pm.dpm.vce_states[rdev->pm.dpm.vce_level].evclk;
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_crtc.c b/drivers/gpu/drm/rcar-du/rcar_du_crtc.c
index 7316fc7fa0bd..a2ec6d8796a0 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_crtc.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_crtc.c
@@ -149,8 +149,8 @@ static void rcar_du_crtc_set_display_timing(struct rcar_du_crtc *rcrtc)
rcar_du_group_write(rcrtc->group, rcrtc->index % 2 ? OTAR2 : OTAR, 0);
/* Signal polarities */
- value = ((mode->flags & DRM_MODE_FLAG_PVSYNC) ? 0 : DSMR_VSL)
- | ((mode->flags & DRM_MODE_FLAG_PHSYNC) ? 0 : DSMR_HSL)
+ value = ((mode->flags & DRM_MODE_FLAG_PVSYNC) ? DSMR_VSL : 0)
+ | ((mode->flags & DRM_MODE_FLAG_PHSYNC) ? DSMR_HSL : 0)
| DSMR_DIPM_DISP | DSMR_CSPM;
rcar_du_crtc_write(rcrtc, DSMR, value);
@@ -172,7 +172,7 @@ static void rcar_du_crtc_set_display_timing(struct rcar_du_crtc *rcrtc)
mode->crtc_vsync_start - 1);
rcar_du_crtc_write(rcrtc, VCR, mode->crtc_vtotal - 1);
- rcar_du_crtc_write(rcrtc, DESR, mode->htotal - mode->hsync_start);
+ rcar_du_crtc_write(rcrtc, DESR, mode->htotal - mode->hsync_start - 1);
rcar_du_crtc_write(rcrtc, DEWR, mode->hdisplay);
}
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_drv.c b/drivers/gpu/drm/rcar-du/rcar_du_drv.c
index 73c971e39b1c..c05e00872778 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_drv.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_drv.c
@@ -110,6 +110,27 @@ static const struct rcar_du_device_info rcar_du_r8a7791_info = {
.num_lvds = 1,
};
+static const struct rcar_du_device_info rcar_du_r8a7792_info = {
+ .gen = 2,
+ .features = RCAR_DU_FEATURE_CRTC_IRQ_CLOCK
+ | RCAR_DU_FEATURE_EXT_CTRL_REGS,
+ .num_crtcs = 2,
+ .routes = {
+ /* R8A7792 has two RGB outputs. */
+ [RCAR_DU_OUTPUT_DPAD0] = {
+ .possible_crtcs = BIT(0),
+ .encoder_type = DRM_MODE_ENCODER_NONE,
+ .port = 0,
+ },
+ [RCAR_DU_OUTPUT_DPAD1] = {
+ .possible_crtcs = BIT(1),
+ .encoder_type = DRM_MODE_ENCODER_NONE,
+ .port = 1,
+ },
+ },
+ .num_lvds = 0,
+};
+
static const struct rcar_du_device_info rcar_du_r8a7794_info = {
.gen = 2,
.features = RCAR_DU_FEATURE_CRTC_IRQ_CLOCK
@@ -157,13 +178,39 @@ static const struct rcar_du_device_info rcar_du_r8a7795_info = {
.num_lvds = 1,
};
+static const struct rcar_du_device_info rcar_du_r8a7796_info = {
+ .gen = 3,
+ .features = RCAR_DU_FEATURE_CRTC_IRQ_CLOCK
+ | RCAR_DU_FEATURE_EXT_CTRL_REGS
+ | RCAR_DU_FEATURE_VSP1_SOURCE,
+ .num_crtcs = 3,
+ .routes = {
+ /* R8A7796 has one RGB output, one LVDS output and one
+ * (currently unsupported) HDMI output.
+ */
+ [RCAR_DU_OUTPUT_DPAD0] = {
+ .possible_crtcs = BIT(2),
+ .encoder_type = DRM_MODE_ENCODER_NONE,
+ .port = 0,
+ },
+ [RCAR_DU_OUTPUT_LVDS0] = {
+ .possible_crtcs = BIT(0),
+ .encoder_type = DRM_MODE_ENCODER_LVDS,
+ .port = 2,
+ },
+ },
+ .num_lvds = 1,
+};
+
static const struct of_device_id rcar_du_of_table[] = {
{ .compatible = "renesas,du-r8a7779", .data = &rcar_du_r8a7779_info },
{ .compatible = "renesas,du-r8a7790", .data = &rcar_du_r8a7790_info },
{ .compatible = "renesas,du-r8a7791", .data = &rcar_du_r8a7791_info },
+ { .compatible = "renesas,du-r8a7792", .data = &rcar_du_r8a7792_info },
{ .compatible = "renesas,du-r8a7793", .data = &rcar_du_r8a7791_info },
{ .compatible = "renesas,du-r8a7794", .data = &rcar_du_r8a7794_info },
{ .compatible = "renesas,du-r8a7795", .data = &rcar_du_r8a7795_info },
+ { .compatible = "renesas,du-r8a7796", .data = &rcar_du_r8a7796_info },
{ }
};
@@ -201,9 +248,7 @@ static const struct file_operations rcar_du_fops = {
.open = drm_open,
.release = drm_release,
.unlocked_ioctl = drm_ioctl,
-#ifdef CONFIG_COMPAT
.compat_ioctl = drm_compat_ioctl,
-#endif
.poll = drm_poll,
.read = drm_read,
.llseek = no_llseek,
@@ -285,7 +330,6 @@ static int rcar_du_remove(struct platform_device *pdev)
drm_kms_helper_poll_fini(ddev);
drm_mode_config_cleanup(ddev);
- drm_vblank_cleanup(ddev);
drm_dev_unref(ddev);
@@ -294,18 +338,12 @@ static int rcar_du_remove(struct platform_device *pdev)
static int rcar_du_probe(struct platform_device *pdev)
{
- struct device_node *np = pdev->dev.of_node;
struct rcar_du_device *rcdu;
struct drm_device *ddev;
struct resource *mem;
int ret;
- if (np == NULL) {
- dev_err(&pdev->dev, "no device tree node\n");
- return -ENODEV;
- }
-
- /* Allocate and initialize the DRM and R-Car device structures. */
+ /* Allocate and initialize the R-Car device structure. */
rcdu = devm_kzalloc(&pdev->dev, sizeof(*rcdu), GFP_KERNEL);
if (rcdu == NULL)
return -ENOMEM;
@@ -315,31 +353,22 @@ static int rcar_du_probe(struct platform_device *pdev)
rcdu->dev = &pdev->dev;
rcdu->info = of_match_device(rcar_du_of_table, rcdu->dev)->data;
- ddev = drm_dev_alloc(&rcar_du_driver, &pdev->dev);
- if (IS_ERR(ddev))
- return PTR_ERR(ddev);
-
- rcdu->ddev = ddev;
- ddev->dev_private = rcdu;
-
platform_set_drvdata(pdev, rcdu);
/* I/O resources */
mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
rcdu->mmio = devm_ioremap_resource(&pdev->dev, mem);
- if (IS_ERR(rcdu->mmio)) {
- ret = PTR_ERR(rcdu->mmio);
- goto error;
- }
-
- /* Initialize vertical blanking interrupts handling. Start with vblank
- * disabled for all CRTCs.
- */
- ret = drm_vblank_init(ddev, (1 << rcdu->info->num_crtcs) - 1);
- if (ret < 0)
- goto error;
+ if (IS_ERR(rcdu->mmio))
+ return PTR_ERR(rcdu->mmio);
/* DRM/KMS objects */
+ ddev = drm_dev_alloc(&rcar_du_driver, &pdev->dev);
+ if (IS_ERR(ddev))
+ return PTR_ERR(ddev);
+
+ rcdu->ddev = ddev;
+ ddev->dev_private = rcdu;
+
ret = rcar_du_modeset_init(rcdu);
if (ret < 0) {
if (ret != -EPROBE_DEFER)
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_group.c b/drivers/gpu/drm/rcar-du/rcar_du_group.c
index 33b2fc53da3e..64738fca96d0 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_group.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_group.c
@@ -105,16 +105,20 @@ static void rcar_du_group_setup(struct rcar_du_group *rgrp)
if (rcar_du_has(rgrp->dev, RCAR_DU_FEATURE_EXT_CTRL_REGS)) {
rcar_du_group_setup_defr8(rgrp);
- /* Configure input dot clock routing. We currently hardcode the
- * configuration to routing DOTCLKINn to DUn.
+ /*
+ * Configure input dot clock routing. We currently hardcode the
+ * configuration to routing DOTCLKINn to DUn. Register fields
+ * depend on the DU generation, but the resulting value is 0 in
+ * all cases.
+ *
+ * On Gen2 a single register in the first group controls dot
+ * clock selection for all channels, while on Gen3 dot clocks
+ * are setup through per-group registers, only available when
+ * the group has two channels.
*/
- rcar_du_group_write(rgrp, DIDSR, DIDSR_CODE |
- DIDSR_LCDS_DCLKIN(2) |
- DIDSR_LCDS_DCLKIN(1) |
- DIDSR_LCDS_DCLKIN(0) |
- DIDSR_PDCS_CLK(2, 0) |
- DIDSR_PDCS_CLK(1, 0) |
- DIDSR_PDCS_CLK(0, 0));
+ if ((rcdu->info->gen < 3 && rgrp->index == 0) ||
+ (rcdu->info->gen == 3 && rgrp->num_crtcs > 1))
+ rcar_du_group_write(rgrp, DIDSR, DIDSR_CODE);
}
if (rcdu->info->gen >= 3)
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_hdmienc.c b/drivers/gpu/drm/rcar-du/rcar_du_hdmienc.c
index e03004f4588d..f9515f53cc5b 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_hdmienc.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_hdmienc.c
@@ -108,7 +108,7 @@ int rcar_du_hdmienc_init(struct rcar_du_device *rcdu,
if (hdmienc == NULL)
return -ENOMEM;
- /* Locate drm bridge from the hdmi encoder DT node */
+ /* Locate the DRM bridge from the HDMI encoder DT node. */
bridge = of_drm_find_bridge(np);
if (!bridge)
return -EPROBE_DEFER;
@@ -123,7 +123,7 @@ int rcar_du_hdmienc_init(struct rcar_du_device *rcdu,
renc->hdmi = hdmienc;
hdmienc->renc = renc;
- /* Link drm_bridge to encoder */
+ /* Link the bridge to the encoder. */
bridge->encoder = encoder;
encoder->bridge = bridge;
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_kms.c b/drivers/gpu/drm/rcar-du/rcar_du_kms.c
index c76ed9ee6a01..b5d3f16cfa12 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_kms.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_kms.c
@@ -231,8 +231,16 @@ static int rcar_du_atomic_check(struct drm_device *dev,
struct rcar_du_device *rcdu = dev->dev_private;
int ret;
- ret = drm_atomic_helper_check(dev, state);
- if (ret < 0)
+ ret = drm_atomic_helper_check_modeset(dev, state);
+ if (ret)
+ return ret;
+
+ ret = drm_atomic_normalize_zpos(dev, state);
+ if (ret)
+ return ret;
+
+ ret = drm_atomic_helper_check_planes(dev, state);
+ if (ret)
return ret;
if (rcar_du_has(rcdu, RCAR_DU_FEATURE_VSP1_SOURCE))
@@ -446,13 +454,13 @@ static int rcar_du_encoders_init_one(struct rcar_du_device *rcdu,
}
ret = rcar_du_encoder_init(rcdu, enc_type, output, encoder, connector);
- of_node_put(encoder);
- of_node_put(connector);
-
if (ret && ret != -EPROBE_DEFER)
dev_warn(rcdu->dev,
- "failed to initialize encoder %s (%d), skipping\n",
- encoder->full_name, ret);
+ "failed to initialize encoder %s on output %u (%d), skipping\n",
+ of_node_full_name(encoder), output, ret);
+
+ of_node_put(encoder);
+ of_node_put(connector);
return ret;
}
@@ -560,6 +568,13 @@ int rcar_du_modeset_init(struct rcar_du_device *rcdu)
if (ret < 0)
return ret;
+ /* Initialize vertical blanking interrupts handling. Start with vblank
+ * disabled for all CRTCs.
+ */
+ ret = drm_vblank_init(dev, (1 << rcdu->info->num_crtcs) - 1);
+ if (ret < 0)
+ return ret;
+
/* Initialize the groups. */
num_groups = DIV_ROUND_UP(rcdu->num_crtcs, 2);
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_lvdscon.c b/drivers/gpu/drm/rcar-du/rcar_du_lvdscon.c
index 6afd0af312ba..64e9f0b86e58 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_lvdscon.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_lvdscon.c
@@ -79,7 +79,7 @@ static const struct drm_connector_funcs connector_funcs = {
int rcar_du_lvds_connector_init(struct rcar_du_device *rcdu,
struct rcar_du_encoder *renc,
- /* TODO const */ struct device_node *np)
+ const struct device_node *np)
{
struct drm_encoder *encoder = rcar_encoder_to_drm_encoder(renc);
struct rcar_du_lvds_connector *lvdscon;
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_lvdscon.h b/drivers/gpu/drm/rcar-du/rcar_du_lvdscon.h
index d4881ee0be7e..639071dd235c 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_lvdscon.h
+++ b/drivers/gpu/drm/rcar-du/rcar_du_lvdscon.h
@@ -19,6 +19,6 @@ struct rcar_du_encoder;
int rcar_du_lvds_connector_init(struct rcar_du_device *rcdu,
struct rcar_du_encoder *renc,
- struct device_node *np);
+ const struct device_node *np);
#endif /* __RCAR_DU_LVDSCON_H__ */
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_lvdsenc.c b/drivers/gpu/drm/rcar-du/rcar_du_lvdsenc.c
index ef3a50321ecc..e3a4985f6f3f 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_lvdsenc.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_lvdsenc.c
@@ -104,7 +104,14 @@ static void rcar_du_lvdsenc_start_gen3(struct rcar_du_lvdsenc *lvds,
rcar_lvds_write(lvds, LVDPLLCR, pllcr);
- /* Turn the PLL on, set it to LVDS normal mode, wait for the startup
+ /* Turn all the channels on. */
+ rcar_lvds_write(lvds, LVDCR1,
+ LVDCR1_CHSTBY_GEN3(3) | LVDCR1_CHSTBY_GEN3(2) |
+ LVDCR1_CHSTBY_GEN3(1) | LVDCR1_CHSTBY_GEN3(0) |
+ LVDCR1_CLKSTBY_GEN3);
+
+ /*
+ * Turn the PLL on, set it to LVDS normal mode, wait for the startup
* delay and turn the output on.
*/
lvdcr0 = LVDCR0_PLLON;
@@ -117,12 +124,6 @@ static void rcar_du_lvdsenc_start_gen3(struct rcar_du_lvdsenc *lvds,
lvdcr0 |= LVDCR0_LVRES;
rcar_lvds_write(lvds, LVDCR0, lvdcr0);
-
- /* Turn all the channels on. */
- rcar_lvds_write(lvds, LVDCR1,
- LVDCR1_CHSTBY_GEN3(3) | LVDCR1_CHSTBY_GEN3(2) |
- LVDCR1_CHSTBY_GEN3(1) | LVDCR1_CHSTBY_GEN3(0) |
- LVDCR1_CLKSTBY_GEN3);
}
static int rcar_du_lvdsenc_start(struct rcar_du_lvdsenc *lvds,
@@ -241,10 +242,8 @@ int rcar_du_lvdsenc_init(struct rcar_du_device *rcdu)
for (i = 0; i < rcdu->info->num_lvds; ++i) {
lvds = devm_kzalloc(&pdev->dev, sizeof(*lvds), GFP_KERNEL);
- if (lvds == NULL) {
- dev_err(&pdev->dev, "failed to allocate private data\n");
+ if (lvds == NULL)
return -ENOMEM;
- }
lvds->dev = rcdu;
lvds->index = i;
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_drv.c b/drivers/gpu/drm/rockchip/rockchip_drm_drv.c
index 6fe161192bb4..2390c8577617 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_drv.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_drv.c
@@ -275,9 +275,7 @@ static const struct file_operations rockchip_drm_driver_fops = {
.poll = drm_poll,
.read = drm_read,
.unlocked_ioctl = drm_ioctl,
-#ifdef CONFIG_COMPAT
.compat_ioctl = drm_compat_ioctl,
-#endif
.release = drm_release,
};
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_fbdev.c b/drivers/gpu/drm/rockchip/rockchip_drm_fbdev.c
index a16c69f96ed5..8f639c8597a5 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_fbdev.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_fbdev.c
@@ -37,15 +37,11 @@ static int rockchip_fbdev_mmap(struct fb_info *info,
static struct fb_ops rockchip_drm_fbdev_ops = {
.owner = THIS_MODULE,
+ DRM_FB_HELPER_DEFAULT_OPS,
.fb_mmap = rockchip_fbdev_mmap,
.fb_fillrect = drm_fb_helper_cfb_fillrect,
.fb_copyarea = drm_fb_helper_cfb_copyarea,
.fb_imageblit = drm_fb_helper_cfb_imageblit,
- .fb_check_var = drm_fb_helper_check_var,
- .fb_set_par = drm_fb_helper_set_par,
- .fb_blank = drm_fb_helper_blank,
- .fb_pan_display = drm_fb_helper_pan_display,
- .fb_setcmap = drm_fb_helper_setcmap,
};
static int rockchip_drm_fbdev_create(struct drm_fb_helper *helper,
diff --git a/drivers/gpu/drm/savage/savage_drv.c b/drivers/gpu/drm/savage/savage_drv.c
index 3b807135a5cd..78c6d8e9b42c 100644
--- a/drivers/gpu/drm/savage/savage_drv.c
+++ b/drivers/gpu/drm/savage/savage_drv.c
@@ -42,9 +42,7 @@ static const struct file_operations savage_driver_fops = {
.unlocked_ioctl = drm_ioctl,
.mmap = drm_legacy_mmap,
.poll = drm_poll,
-#ifdef CONFIG_COMPAT
.compat_ioctl = drm_compat_ioctl,
-#endif
.llseek = noop_llseek,
};
diff --git a/drivers/gpu/drm/shmobile/shmob_drm_drv.c b/drivers/gpu/drm/shmobile/shmob_drm_drv.c
index f0492603ea88..38dd55f4af81 100644
--- a/drivers/gpu/drm/shmobile/shmob_drm_drv.c
+++ b/drivers/gpu/drm/shmobile/shmob_drm_drv.c
@@ -245,9 +245,7 @@ static const struct file_operations shmob_drm_fops = {
.open = drm_open,
.release = drm_release,
.unlocked_ioctl = drm_ioctl,
-#ifdef CONFIG_COMPAT
.compat_ioctl = drm_compat_ioctl,
-#endif
.poll = drm_poll,
.read = drm_read,
.llseek = no_llseek,
diff --git a/drivers/gpu/drm/sis/sis_drv.c b/drivers/gpu/drm/sis/sis_drv.c
index ae9839886c4d..a836451920f0 100644
--- a/drivers/gpu/drm/sis/sis_drv.c
+++ b/drivers/gpu/drm/sis/sis_drv.c
@@ -72,9 +72,7 @@ static const struct file_operations sis_driver_fops = {
.unlocked_ioctl = drm_ioctl,
.mmap = drm_legacy_mmap,
.poll = drm_poll,
-#ifdef CONFIG_COMPAT
.compat_ioctl = drm_compat_ioctl,
-#endif
.llseek = noop_llseek,
};
diff --git a/drivers/gpu/drm/sti/sti_drv.c b/drivers/gpu/drm/sti/sti_drv.c
index 6aead2013b62..ff71e25ab5bf 100644
--- a/drivers/gpu/drm/sti/sti_drv.c
+++ b/drivers/gpu/drm/sti/sti_drv.c
@@ -196,6 +196,26 @@ static void sti_atomic_work(struct work_struct *work)
sti_atomic_complete(private, private->commit.state);
}
+static int sti_atomic_check(struct drm_device *dev,
+ struct drm_atomic_state *state)
+{
+ int ret;
+
+ ret = drm_atomic_helper_check_modeset(dev, state);
+ if (ret)
+ return ret;
+
+ ret = drm_atomic_normalize_zpos(dev, state);
+ if (ret)
+ return ret;
+
+ ret = drm_atomic_helper_check_planes(dev, state);
+ if (ret)
+ return ret;
+
+ return ret;
+}
+
static int sti_atomic_commit(struct drm_device *drm,
struct drm_atomic_state *state, bool nonblock)
{
@@ -250,7 +270,7 @@ static void sti_output_poll_changed(struct drm_device *ddev)
static const struct drm_mode_config_funcs sti_mode_config_funcs = {
.fb_create = drm_fb_cma_create,
.output_poll_changed = sti_output_poll_changed,
- .atomic_check = drm_atomic_helper_check,
+ .atomic_check = sti_atomic_check,
.atomic_commit = sti_atomic_commit,
};
@@ -277,9 +297,7 @@ static const struct file_operations sti_driver_fops = {
.poll = drm_poll,
.read = drm_read,
.unlocked_ioctl = drm_ioctl,
-#ifdef CONFIG_COMPAT
.compat_ioctl = drm_compat_ioctl,
-#endif
.release = drm_release,
};
diff --git a/drivers/gpu/drm/sun4i/sun4i_backend.c b/drivers/gpu/drm/sun4i/sun4i_backend.c
index 32c0584e3c35..2e08f969bb64 100644
--- a/drivers/gpu/drm/sun4i/sun4i_backend.c
+++ b/drivers/gpu/drm/sun4i/sun4i_backend.c
@@ -95,6 +95,22 @@ static int sun4i_backend_drm_format_to_layer(struct drm_plane *plane,
*mode = SUN4I_BACKEND_LAY_FBFMT_ARGB8888;
break;
+ case DRM_FORMAT_ARGB4444:
+ *mode = SUN4I_BACKEND_LAY_FBFMT_ARGB4444;
+ break;
+
+ case DRM_FORMAT_ARGB1555:
+ *mode = SUN4I_BACKEND_LAY_FBFMT_ARGB1555;
+ break;
+
+ case DRM_FORMAT_RGBA5551:
+ *mode = SUN4I_BACKEND_LAY_FBFMT_RGBA5551;
+ break;
+
+ case DRM_FORMAT_RGBA4444:
+ *mode = SUN4I_BACKEND_LAY_FBFMT_RGBA4444;
+ break;
+
case DRM_FORMAT_XRGB8888:
*mode = SUN4I_BACKEND_LAY_FBFMT_XRGB8888;
break;
@@ -103,6 +119,10 @@ static int sun4i_backend_drm_format_to_layer(struct drm_plane *plane,
*mode = SUN4I_BACKEND_LAY_FBFMT_RGB888;
break;
+ case DRM_FORMAT_RGB565:
+ *mode = SUN4I_BACKEND_LAY_FBFMT_RGB565;
+ break;
+
default:
return -EINVAL;
}
@@ -389,7 +409,7 @@ static void sun4i_backend_unbind(struct device *dev, struct device *master,
reset_control_assert(backend->reset);
}
-static struct component_ops sun4i_backend_ops = {
+static const struct component_ops sun4i_backend_ops = {
.bind = sun4i_backend_bind,
.unbind = sun4i_backend_unbind,
};
@@ -408,6 +428,7 @@ static int sun4i_backend_remove(struct platform_device *pdev)
static const struct of_device_id sun4i_backend_of_table[] = {
{ .compatible = "allwinner,sun5i-a13-display-backend" },
+ { .compatible = "allwinner,sun6i-a31-display-backend" },
{ .compatible = "allwinner,sun8i-a33-display-backend" },
{ }
};
diff --git a/drivers/gpu/drm/sun4i/sun4i_drv.c b/drivers/gpu/drm/sun4i/sun4i_drv.c
index b3c4ad605e81..2891aa914cfa 100644
--- a/drivers/gpu/drm/sun4i/sun4i_drv.c
+++ b/drivers/gpu/drm/sun4i/sun4i_drv.c
@@ -53,9 +53,7 @@ static const struct file_operations sun4i_drv_fops = {
.open = drm_open,
.release = drm_release,
.unlocked_ioctl = drm_ioctl,
-#ifdef CONFIG_COMPAT
.compat_ioctl = drm_compat_ioctl,
-#endif
.poll = drm_poll,
.read = drm_read,
.llseek = no_llseek,
@@ -202,12 +200,15 @@ static const struct component_master_ops sun4i_drv_master_ops = {
static bool sun4i_drv_node_is_frontend(struct device_node *node)
{
return of_device_is_compatible(node, "allwinner,sun5i-a13-display-frontend") ||
+ of_device_is_compatible(node, "allwinner,sun6i-a31-display-frontend") ||
of_device_is_compatible(node, "allwinner,sun8i-a33-display-frontend");
}
static bool sun4i_drv_node_is_tcon(struct device_node *node)
{
return of_device_is_compatible(node, "allwinner,sun5i-a13-tcon") ||
+ of_device_is_compatible(node, "allwinner,sun6i-a31-tcon") ||
+ of_device_is_compatible(node, "allwinner,sun6i-a31s-tcon") ||
of_device_is_compatible(node, "allwinner,sun8i-a33-tcon");
}
@@ -323,6 +324,8 @@ static int sun4i_drv_remove(struct platform_device *pdev)
static const struct of_device_id sun4i_drv_of_table[] = {
{ .compatible = "allwinner,sun5i-a13-display-engine" },
+ { .compatible = "allwinner,sun6i-a31-display-engine" },
+ { .compatible = "allwinner,sun6i-a31s-display-engine" },
{ .compatible = "allwinner,sun8i-a33-display-engine" },
{ }
};
diff --git a/drivers/gpu/drm/sun4i/sun4i_layer.c b/drivers/gpu/drm/sun4i/sun4i_layer.c
index f0035bf5efea..5d53c977bca5 100644
--- a/drivers/gpu/drm/sun4i/sun4i_layer.c
+++ b/drivers/gpu/drm/sun4i/sun4i_layer.c
@@ -73,12 +73,18 @@ static const struct drm_plane_funcs sun4i_backend_layer_funcs = {
static const uint32_t sun4i_backend_layer_formats_primary[] = {
DRM_FORMAT_ARGB8888,
DRM_FORMAT_RGB888,
+ DRM_FORMAT_RGB565,
DRM_FORMAT_XRGB8888,
};
static const uint32_t sun4i_backend_layer_formats_overlay[] = {
DRM_FORMAT_ARGB8888,
+ DRM_FORMAT_ARGB4444,
+ DRM_FORMAT_ARGB1555,
+ DRM_FORMAT_RGBA5551,
+ DRM_FORMAT_RGBA4444,
DRM_FORMAT_RGB888,
+ DRM_FORMAT_RGB565,
DRM_FORMAT_XRGB8888,
};
diff --git a/drivers/gpu/drm/sun4i/sun4i_tcon.c b/drivers/gpu/drm/sun4i/sun4i_tcon.c
index cadacb517f95..ea2906f87cb9 100644
--- a/drivers/gpu/drm/sun4i/sun4i_tcon.c
+++ b/drivers/gpu/drm/sun4i/sun4i_tcon.c
@@ -20,6 +20,7 @@
#include <linux/component.h>
#include <linux/ioport.h>
#include <linux/of_address.h>
+#include <linux/of_device.h>
#include <linux/of_graph.h>
#include <linux/of_irq.h>
#include <linux/regmap.h>
@@ -62,7 +63,7 @@ void sun4i_tcon_channel_disable(struct sun4i_tcon *tcon, int channel)
return;
}
- WARN_ON(!tcon->has_channel_1);
+ WARN_ON(!tcon->quirks->has_channel_1);
regmap_update_bits(tcon->regs, SUN4I_TCON1_CTL_REG,
SUN4I_TCON1_CTL_TCON_ENABLE, 0);
clk_disable_unprepare(tcon->sclk1);
@@ -80,7 +81,7 @@ void sun4i_tcon_channel_enable(struct sun4i_tcon *tcon, int channel)
return;
}
- WARN_ON(!tcon->has_channel_1);
+ WARN_ON(!tcon->quirks->has_channel_1);
regmap_update_bits(tcon->regs, SUN4I_TCON1_CTL_REG,
SUN4I_TCON1_CTL_TCON_ENABLE,
SUN4I_TCON1_CTL_TCON_ENABLE);
@@ -202,7 +203,7 @@ void sun4i_tcon1_mode_set(struct sun4i_tcon *tcon,
u8 clk_delay;
u32 val;
- WARN_ON(!tcon->has_channel_1);
+ WARN_ON(!tcon->quirks->has_channel_1);
/* Adjust clock delay */
clk_delay = sun4i_tcon_get_clk_delay(mode, 1);
@@ -266,7 +267,7 @@ void sun4i_tcon1_mode_set(struct sun4i_tcon *tcon,
/*
* FIXME: Undocumented bits
*/
- if (tcon->has_mux)
+ if (tcon->quirks->has_unknown_mux)
regmap_write(tcon->regs, SUN4I_TCON_MUX_CTRL_REG, 1);
}
EXPORT_SYMBOL(sun4i_tcon1_mode_set);
@@ -327,7 +328,7 @@ static int sun4i_tcon_init_clocks(struct device *dev,
return PTR_ERR(tcon->sclk0);
}
- if (tcon->has_channel_1) {
+ if (tcon->quirks->has_channel_1) {
tcon->sclk1 = devm_clk_get(dev, "tcon-ch1");
if (IS_ERR(tcon->sclk1)) {
dev_err(dev, "Couldn't get the TCON channel 1 clock\n");
@@ -487,14 +488,7 @@ static int sun4i_tcon_bind(struct device *dev, struct device *master,
drv->tcon = tcon;
tcon->drm = drm;
tcon->dev = dev;
-
- if (of_device_is_compatible(dev->of_node, "allwinner,sun5i-a13-tcon")) {
- tcon->has_mux = true;
- tcon->has_channel_1 = true;
- } else {
- tcon->has_mux = false;
- tcon->has_channel_1 = false;
- }
+ tcon->quirks = of_device_get_match_data(dev);
tcon->lcd_rst = devm_reset_control_get(dev, "lcd");
if (IS_ERR(tcon->lcd_rst)) {
@@ -551,7 +545,7 @@ static void sun4i_tcon_unbind(struct device *dev, struct device *master,
sun4i_tcon_free_clocks(tcon);
}
-static struct component_ops sun4i_tcon_ops = {
+static const struct component_ops sun4i_tcon_ops = {
.bind = sun4i_tcon_bind,
.unbind = sun4i_tcon_unbind,
};
@@ -588,9 +582,28 @@ static int sun4i_tcon_remove(struct platform_device *pdev)
return 0;
}
+static const struct sun4i_tcon_quirks sun5i_a13_quirks = {
+ .has_unknown_mux = true,
+ .has_channel_1 = true,
+};
+
+static const struct sun4i_tcon_quirks sun6i_a31_quirks = {
+ .has_channel_1 = true,
+};
+
+static const struct sun4i_tcon_quirks sun6i_a31s_quirks = {
+ .has_channel_1 = true,
+};
+
+static const struct sun4i_tcon_quirks sun8i_a33_quirks = {
+ /* nothing is supported */
+};
+
static const struct of_device_id sun4i_tcon_of_table[] = {
- { .compatible = "allwinner,sun5i-a13-tcon" },
- { .compatible = "allwinner,sun8i-a33-tcon" },
+ { .compatible = "allwinner,sun5i-a13-tcon", .data = &sun5i_a13_quirks },
+ { .compatible = "allwinner,sun6i-a31-tcon", .data = &sun6i_a31_quirks },
+ { .compatible = "allwinner,sun6i-a31s-tcon", .data = &sun6i_a31s_quirks },
+ { .compatible = "allwinner,sun8i-a33-tcon", .data = &sun8i_a33_quirks },
{ }
};
MODULE_DEVICE_TABLE(of, sun4i_tcon_of_table);
diff --git a/drivers/gpu/drm/sun4i/sun4i_tcon.h b/drivers/gpu/drm/sun4i/sun4i_tcon.h
index 12bd48925f4d..166064bafe2e 100644
--- a/drivers/gpu/drm/sun4i/sun4i_tcon.h
+++ b/drivers/gpu/drm/sun4i/sun4i_tcon.h
@@ -142,6 +142,11 @@
#define SUN4I_TCON_MAX_CHANNELS 2
+struct sun4i_tcon_quirks {
+ bool has_unknown_mux; /* sun5i has undocumented mux */
+ bool has_channel_1; /* a33 does not have channel 1 */
+};
+
struct sun4i_tcon {
struct device *dev;
struct drm_device *drm;
@@ -160,12 +165,10 @@ struct sun4i_tcon {
/* Reset control */
struct reset_control *lcd_rst;
- /* Platform adjustments */
- bool has_mux;
-
struct drm_panel *panel;
- bool has_channel_1;
+ /* Platform adjustments */
+ const struct sun4i_tcon_quirks *quirks;
};
struct drm_bridge *sun4i_tcon_find_bridge(struct device_node *node);
diff --git a/drivers/gpu/drm/sun4i/sun4i_tv.c b/drivers/gpu/drm/sun4i/sun4i_tv.c
index 1dd3d9eabf2e..d430b331fed5 100644
--- a/drivers/gpu/drm/sun4i/sun4i_tv.c
+++ b/drivers/gpu/drm/sun4i/sun4i_tv.c
@@ -667,7 +667,7 @@ static void sun4i_tv_unbind(struct device *dev, struct device *master,
clk_disable_unprepare(tv->clk);
}
-static struct component_ops sun4i_tv_ops = {
+static const struct component_ops sun4i_tv_ops = {
.bind = sun4i_tv_bind,
.unbind = sun4i_tv_unbind,
};
diff --git a/drivers/gpu/drm/sun4i/sun6i_drc.c b/drivers/gpu/drm/sun4i/sun6i_drc.c
index bf6d846d8132..09bba853e2a4 100644
--- a/drivers/gpu/drm/sun4i/sun6i_drc.c
+++ b/drivers/gpu/drm/sun4i/sun6i_drc.c
@@ -80,7 +80,7 @@ static void sun6i_drc_unbind(struct device *dev, struct device *master,
reset_control_assert(drc->reset);
}
-static struct component_ops sun6i_drc_ops = {
+static const struct component_ops sun6i_drc_ops = {
.bind = sun6i_drc_bind,
.unbind = sun6i_drc_unbind,
};
@@ -98,6 +98,8 @@ static int sun6i_drc_remove(struct platform_device *pdev)
}
static const struct of_device_id sun6i_drc_of_table[] = {
+ { .compatible = "allwinner,sun6i-a31-drc" },
+ { .compatible = "allwinner,sun6i-a31s-drc" },
{ .compatible = "allwinner,sun8i-a33-drc" },
{ }
};
diff --git a/drivers/gpu/drm/tdfx/tdfx_drv.c b/drivers/gpu/drm/tdfx/tdfx_drv.c
index f418892b0c71..c54138c3a376 100644
--- a/drivers/gpu/drm/tdfx/tdfx_drv.c
+++ b/drivers/gpu/drm/tdfx/tdfx_drv.c
@@ -49,9 +49,7 @@ static const struct file_operations tdfx_driver_fops = {
.unlocked_ioctl = drm_ioctl,
.mmap = drm_legacy_mmap,
.poll = drm_poll,
-#ifdef CONFIG_COMPAT
.compat_ioctl = drm_compat_ioctl,
-#endif
.llseek = noop_llseek,
};
diff --git a/drivers/gpu/drm/tegra/drm.c b/drivers/gpu/drm/tegra/drm.c
index a9630c2d6cb3..b8be3ee4d3b8 100644
--- a/drivers/gpu/drm/tegra/drm.c
+++ b/drivers/gpu/drm/tegra/drm.c
@@ -802,9 +802,7 @@ static const struct file_operations tegra_drm_fops = {
.mmap = tegra_drm_mmap,
.poll = drm_poll,
.read = drm_read,
-#ifdef CONFIG_COMPAT
.compat_ioctl = drm_compat_ioctl,
-#endif
.llseek = noop_llseek,
};
diff --git a/drivers/gpu/drm/tegra/fb.c b/drivers/gpu/drm/tegra/fb.c
index e6d71fa4028e..e4a5ab0a9677 100644
--- a/drivers/gpu/drm/tegra/fb.c
+++ b/drivers/gpu/drm/tegra/fb.c
@@ -186,14 +186,10 @@ unreference:
#ifdef CONFIG_DRM_FBDEV_EMULATION
static struct fb_ops tegra_fb_ops = {
.owner = THIS_MODULE,
+ DRM_FB_HELPER_DEFAULT_OPS,
.fb_fillrect = drm_fb_helper_sys_fillrect,
.fb_copyarea = drm_fb_helper_sys_copyarea,
.fb_imageblit = drm_fb_helper_sys_imageblit,
- .fb_check_var = drm_fb_helper_check_var,
- .fb_set_par = drm_fb_helper_set_par,
- .fb_blank = drm_fb_helper_blank,
- .fb_pan_display = drm_fb_helper_pan_display,
- .fb_setcmap = drm_fb_helper_setcmap,
};
static int tegra_fbdev_probe(struct drm_fb_helper *helper,
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_drv.c b/drivers/gpu/drm/tilcdc/tilcdc_drv.c
index 147fb28287ae..0f58a74f25d1 100644
--- a/drivers/gpu/drm/tilcdc/tilcdc_drv.c
+++ b/drivers/gpu/drm/tilcdc/tilcdc_drv.c
@@ -573,9 +573,7 @@ static const struct file_operations fops = {
.open = drm_open,
.release = drm_release,
.unlocked_ioctl = drm_ioctl,
-#ifdef CONFIG_COMPAT
.compat_ioctl = drm_compat_ioctl,
-#endif
.poll = drm_poll,
.read = drm_read,
.llseek = no_llseek,
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index f6ff579e8918..d5063618efa7 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -1611,7 +1611,14 @@ EXPORT_SYMBOL(ttm_bo_unmap_virtual);
int ttm_bo_wait(struct ttm_buffer_object *bo,
bool interruptible, bool no_wait)
{
- long timeout = no_wait ? 0 : 15 * HZ;
+ long timeout = 15 * HZ;
+
+ if (no_wait) {
+ if (reservation_object_test_signaled_rcu(bo->resv, true))
+ return 0;
+ else
+ return -EBUSY;
+ }
timeout = reservation_object_wait_timeout_rcu(bo->resv, true,
interruptible, timeout);
diff --git a/drivers/gpu/drm/udl/udl_drv.c b/drivers/gpu/drm/udl/udl_drv.c
index cc45d98f9bb5..cd8b01727734 100644
--- a/drivers/gpu/drm/udl/udl_drv.c
+++ b/drivers/gpu/drm/udl/udl_drv.c
@@ -44,9 +44,7 @@ static const struct file_operations udl_driver_fops = {
.read = drm_read,
.unlocked_ioctl = drm_ioctl,
.release = drm_release,
-#ifdef CONFIG_COMPAT
.compat_ioctl = drm_compat_ioctl,
-#endif
.llseek = noop_llseek,
};
diff --git a/drivers/gpu/drm/udl/udl_fb.c b/drivers/gpu/drm/udl/udl_fb.c
index 611b6b9bb3cb..167f42c67c7c 100644
--- a/drivers/gpu/drm/udl/udl_fb.c
+++ b/drivers/gpu/drm/udl/udl_fb.c
@@ -254,16 +254,10 @@ static int udl_fb_release(struct fb_info *info, int user)
static struct fb_ops udlfb_ops = {
.owner = THIS_MODULE,
- .fb_check_var = drm_fb_helper_check_var,
- .fb_set_par = drm_fb_helper_set_par,
+ DRM_FB_HELPER_DEFAULT_OPS,
.fb_fillrect = drm_fb_helper_sys_fillrect,
.fb_copyarea = drm_fb_helper_sys_copyarea,
.fb_imageblit = drm_fb_helper_sys_imageblit,
- .fb_pan_display = drm_fb_helper_pan_display,
- .fb_blank = drm_fb_helper_blank,
- .fb_setcmap = drm_fb_helper_setcmap,
- .fb_debug_enter = drm_fb_helper_debug_enter,
- .fb_debug_leave = drm_fb_helper_debug_leave,
.fb_mmap = udl_fb_mmap,
.fb_open = udl_fb_open,
.fb_release = udl_fb_release,
diff --git a/drivers/gpu/drm/vc4/vc4_drv.c b/drivers/gpu/drm/vc4/vc4_drv.c
index 8703f56b7947..1dab9e5b3689 100644
--- a/drivers/gpu/drm/vc4/vc4_drv.c
+++ b/drivers/gpu/drm/vc4/vc4_drv.c
@@ -61,23 +61,28 @@ static int vc4_get_param_ioctl(struct drm_device *dev, void *data,
if (ret < 0)
return ret;
args->value = V3D_READ(V3D_IDENT0);
- pm_runtime_put(&vc4->v3d->pdev->dev);
+ pm_runtime_mark_last_busy(&vc4->v3d->pdev->dev);
+ pm_runtime_put_autosuspend(&vc4->v3d->pdev->dev);
break;
case DRM_VC4_PARAM_V3D_IDENT1:
ret = pm_runtime_get_sync(&vc4->v3d->pdev->dev);
if (ret < 0)
return ret;
args->value = V3D_READ(V3D_IDENT1);
- pm_runtime_put(&vc4->v3d->pdev->dev);
+ pm_runtime_mark_last_busy(&vc4->v3d->pdev->dev);
+ pm_runtime_put_autosuspend(&vc4->v3d->pdev->dev);
break;
case DRM_VC4_PARAM_V3D_IDENT2:
ret = pm_runtime_get_sync(&vc4->v3d->pdev->dev);
if (ret < 0)
return ret;
args->value = V3D_READ(V3D_IDENT2);
- pm_runtime_put(&vc4->v3d->pdev->dev);
+ pm_runtime_mark_last_busy(&vc4->v3d->pdev->dev);
+ pm_runtime_put_autosuspend(&vc4->v3d->pdev->dev);
break;
case DRM_VC4_PARAM_SUPPORTS_BRANCHES:
+ case DRM_VC4_PARAM_SUPPORTS_ETC1:
+ case DRM_VC4_PARAM_SUPPORTS_THREADED_FS:
args->value = true;
break;
default:
@@ -103,9 +108,7 @@ static const struct file_operations vc4_drm_fops = {
.mmap = vc4_mmap,
.poll = drm_poll,
.read = drm_read,
-#ifdef CONFIG_COMPAT
.compat_ioctl = drm_compat_ioctl,
-#endif
.llseek = noop_llseek,
};
diff --git a/drivers/gpu/drm/vc4/vc4_drv.h b/drivers/gpu/drm/vc4/vc4_drv.h
index 7c1e4d97486f..fef172804345 100644
--- a/drivers/gpu/drm/vc4/vc4_drv.h
+++ b/drivers/gpu/drm/vc4/vc4_drv.h
@@ -381,6 +381,8 @@ struct vc4_validated_shader_info {
uint32_t num_uniform_addr_offsets;
uint32_t *uniform_addr_offsets;
+
+ bool is_threaded;
};
/**
diff --git a/drivers/gpu/drm/vc4/vc4_gem.c b/drivers/gpu/drm/vc4/vc4_gem.c
index 47a095f392f8..db920771bfb5 100644
--- a/drivers/gpu/drm/vc4/vc4_gem.c
+++ b/drivers/gpu/drm/vc4/vc4_gem.c
@@ -544,14 +544,15 @@ vc4_cl_lookup_bos(struct drm_device *dev,
handles = drm_malloc_ab(exec->bo_count, sizeof(uint32_t));
if (!handles) {
+ ret = -ENOMEM;
DRM_ERROR("Failed to allocate incoming GEM handles\n");
goto fail;
}
- ret = copy_from_user(handles,
- (void __user *)(uintptr_t)args->bo_handles,
- exec->bo_count * sizeof(uint32_t));
- if (ret) {
+ if (copy_from_user(handles,
+ (void __user *)(uintptr_t)args->bo_handles,
+ exec->bo_count * sizeof(uint32_t))) {
+ ret = -EFAULT;
DRM_ERROR("Failed to copy in GEM handles\n");
goto fail;
}
@@ -708,8 +709,10 @@ vc4_complete_exec(struct drm_device *dev, struct vc4_exec_info *exec)
}
mutex_lock(&vc4->power_lock);
- if (--vc4->power_refcount == 0)
- pm_runtime_put(&vc4->v3d->pdev->dev);
+ if (--vc4->power_refcount == 0) {
+ pm_runtime_mark_last_busy(&vc4->v3d->pdev->dev);
+ pm_runtime_put_autosuspend(&vc4->v3d->pdev->dev);
+ }
mutex_unlock(&vc4->power_lock);
kfree(exec);
diff --git a/drivers/gpu/drm/vc4/vc4_v3d.c b/drivers/gpu/drm/vc4/vc4_v3d.c
index e6d3c6028341..7cc346ad9b0b 100644
--- a/drivers/gpu/drm/vc4/vc4_v3d.c
+++ b/drivers/gpu/drm/vc4/vc4_v3d.c
@@ -222,6 +222,8 @@ static int vc4_v3d_bind(struct device *dev, struct device *master, void *data)
return ret;
}
+ pm_runtime_use_autosuspend(dev);
+ pm_runtime_set_autosuspend_delay(dev, 40); /* a little over 2 frames. */
pm_runtime_enable(dev);
return 0;
diff --git a/drivers/gpu/drm/vc4/vc4_validate.c b/drivers/gpu/drm/vc4/vc4_validate.c
index 26503e307438..9fd171c361c2 100644
--- a/drivers/gpu/drm/vc4/vc4_validate.c
+++ b/drivers/gpu/drm/vc4/vc4_validate.c
@@ -644,6 +644,13 @@ reloc_tex(struct vc4_exec_info *exec,
cpp = 1;
break;
case VC4_TEXTURE_TYPE_ETC1:
+ /* ETC1 is arranged as 64-bit blocks, where each block is 4x4
+ * pixels.
+ */
+ cpp = 8;
+ width = (width + 3) >> 2;
+ height = (height + 3) >> 2;
+ break;
case VC4_TEXTURE_TYPE_BW1:
case VC4_TEXTURE_TYPE_A4:
case VC4_TEXTURE_TYPE_A1:
@@ -782,11 +789,6 @@ validate_gl_shader_rec(struct drm_device *dev,
exec->shader_rec_v += roundup(packet_size, 16);
exec->shader_rec_size -= packet_size;
- if (!(*(uint16_t *)pkt_u & VC4_SHADER_FLAG_FS_SINGLE_THREAD)) {
- DRM_ERROR("Multi-threaded fragment shaders not supported.\n");
- return -EINVAL;
- }
-
for (i = 0; i < shader_reloc_count; i++) {
if (src_handles[i] > exec->bo_count) {
DRM_ERROR("Shader handle %d too big\n", src_handles[i]);
@@ -803,6 +805,18 @@ validate_gl_shader_rec(struct drm_device *dev,
return -EINVAL;
}
+ if (((*(uint16_t *)pkt_u & VC4_SHADER_FLAG_FS_SINGLE_THREAD) == 0) !=
+ to_vc4_bo(&bo[0]->base)->validated_shader->is_threaded) {
+ DRM_ERROR("Thread mode of CL and FS do not match\n");
+ return -EINVAL;
+ }
+
+ if (to_vc4_bo(&bo[1]->base)->validated_shader->is_threaded ||
+ to_vc4_bo(&bo[2]->base)->validated_shader->is_threaded) {
+ DRM_ERROR("cs and vs cannot be threaded\n");
+ return -EINVAL;
+ }
+
for (i = 0; i < shader_reloc_count; i++) {
struct vc4_validated_shader_info *validated_shader;
uint32_t o = shader_reloc_offsets[i];
diff --git a/drivers/gpu/drm/vc4/vc4_validate_shaders.c b/drivers/gpu/drm/vc4/vc4_validate_shaders.c
index 2543cf5b8b51..5dba13dd1e9b 100644
--- a/drivers/gpu/drm/vc4/vc4_validate_shaders.c
+++ b/drivers/gpu/drm/vc4/vc4_validate_shaders.c
@@ -83,6 +83,13 @@ struct vc4_shader_validation_state {
* basic blocks.
*/
bool needs_uniform_address_for_loop;
+
+ /* Set when we find an instruction writing the top half of the
+ * register files. If we allowed writing the unusable regs in
+ * a threaded shader, then the other shader running on our
+ * QPU's clamp validation would be invalid.
+ */
+ bool all_registers_used;
};
static uint32_t
@@ -119,6 +126,13 @@ raddr_add_a_to_live_reg_index(uint64_t inst)
}
static bool
+live_reg_is_upper_half(uint32_t lri)
+{
+ return (lri >= 16 && lri < 32) ||
+ (lri >= 32 + 16 && lri < 32 + 32);
+}
+
+static bool
is_tmu_submit(uint32_t waddr)
{
return (waddr == QPU_W_TMU0_S ||
@@ -390,6 +404,9 @@ check_reg_write(struct vc4_validated_shader_info *validated_shader,
} else {
validation_state->live_immediates[lri] = ~0;
}
+
+ if (live_reg_is_upper_half(lri))
+ validation_state->all_registers_used = true;
}
switch (waddr) {
@@ -598,6 +615,11 @@ check_instruction_reads(struct vc4_validated_shader_info *validated_shader,
}
}
+ if ((raddr_a >= 16 && raddr_a < 32) ||
+ (raddr_b >= 16 && raddr_b < 32 && sig != QPU_SIG_SMALL_IMM)) {
+ validation_state->all_registers_used = true;
+ }
+
return true;
}
@@ -608,9 +630,7 @@ static bool
vc4_validate_branches(struct vc4_shader_validation_state *validation_state)
{
uint32_t max_branch_target = 0;
- bool found_shader_end = false;
int ip;
- int shader_end_ip = 0;
int last_branch = -2;
for (ip = 0; ip < validation_state->max_ip; ip++) {
@@ -621,8 +641,13 @@ vc4_validate_branches(struct vc4_shader_validation_state *validation_state)
uint32_t branch_target_ip;
if (sig == QPU_SIG_PROG_END) {
- shader_end_ip = ip;
- found_shader_end = true;
+ /* There are two delay slots after program end is
+ * signaled that are still executed, then we're
+ * finished. validation_state->max_ip is the
+ * instruction after the last valid instruction in the
+ * program.
+ */
+ validation_state->max_ip = ip + 3;
continue;
}
@@ -676,15 +701,9 @@ vc4_validate_branches(struct vc4_shader_validation_state *validation_state)
}
set_bit(after_delay_ip, validation_state->branch_targets);
max_branch_target = max(max_branch_target, after_delay_ip);
-
- /* There are two delay slots after program end is signaled
- * that are still executed, then we're finished.
- */
- if (found_shader_end && ip == shader_end_ip + 2)
- break;
}
- if (max_branch_target > shader_end_ip) {
+ if (max_branch_target > validation_state->max_ip - 3) {
DRM_ERROR("Branch landed after QPU_SIG_PROG_END");
return false;
}
@@ -756,6 +775,7 @@ vc4_validate_shader(struct drm_gem_cma_object *shader_obj)
{
bool found_shader_end = false;
int shader_end_ip = 0;
+ uint32_t last_thread_switch_ip = -3;
uint32_t ip;
struct vc4_validated_shader_info *validated_shader = NULL;
struct vc4_shader_validation_state validation_state;
@@ -788,6 +808,17 @@ vc4_validate_shader(struct drm_gem_cma_object *shader_obj)
if (!vc4_handle_branch_target(&validation_state))
goto fail;
+ if (ip == last_thread_switch_ip + 3) {
+ /* Reset r0-r3 live clamp data */
+ int i;
+
+ for (i = 64; i < LIVE_REG_COUNT; i++) {
+ validation_state.live_min_clamp_offsets[i] = ~0;
+ validation_state.live_max_clamp_regs[i] = false;
+ validation_state.live_immediates[i] = ~0;
+ }
+ }
+
switch (sig) {
case QPU_SIG_NONE:
case QPU_SIG_WAIT_FOR_SCOREBOARD:
@@ -797,6 +828,8 @@ vc4_validate_shader(struct drm_gem_cma_object *shader_obj)
case QPU_SIG_LOAD_TMU1:
case QPU_SIG_PROG_END:
case QPU_SIG_SMALL_IMM:
+ case QPU_SIG_THREAD_SWITCH:
+ case QPU_SIG_LAST_THREAD_SWITCH:
if (!check_instruction_writes(validated_shader,
&validation_state)) {
DRM_ERROR("Bad write at ip %d\n", ip);
@@ -812,6 +845,18 @@ vc4_validate_shader(struct drm_gem_cma_object *shader_obj)
shader_end_ip = ip;
}
+ if (sig == QPU_SIG_THREAD_SWITCH ||
+ sig == QPU_SIG_LAST_THREAD_SWITCH) {
+ validated_shader->is_threaded = true;
+
+ if (ip < last_thread_switch_ip + 3) {
+ DRM_ERROR("Thread switch too soon after "
+ "last switch at ip %d\n", ip);
+ goto fail;
+ }
+ last_thread_switch_ip = ip;
+ }
+
break;
case QPU_SIG_LOAD_IMM:
@@ -826,6 +871,13 @@ vc4_validate_shader(struct drm_gem_cma_object *shader_obj)
if (!check_branch(inst, validated_shader,
&validation_state, ip))
goto fail;
+
+ if (ip < last_thread_switch_ip + 3) {
+ DRM_ERROR("Branch in thread switch at ip %d",
+ ip);
+ goto fail;
+ }
+
break;
default:
DRM_ERROR("Unsupported QPU signal %d at "
@@ -847,6 +899,14 @@ vc4_validate_shader(struct drm_gem_cma_object *shader_obj)
goto fail;
}
+ /* Might corrupt other thread */
+ if (validated_shader->is_threaded &&
+ validation_state.all_registers_used) {
+ DRM_ERROR("Shader uses threading, but uses the upper "
+ "half of the registers, too\n");
+ goto fail;
+ }
+
/* If we did a backwards branch and we haven't emitted a uniforms
* reset since then, we still need the uniforms stream to have the
* uniforms address available so that the backwards branch can do its
diff --git a/drivers/gpu/drm/via/via_drv.c b/drivers/gpu/drm/via/via_drv.c
index e5582bab7e3c..9e0e5392b6ec 100644
--- a/drivers/gpu/drm/via/via_drv.c
+++ b/drivers/gpu/drm/via/via_drv.c
@@ -64,9 +64,7 @@ static const struct file_operations via_driver_fops = {
.unlocked_ioctl = drm_ioctl,
.mmap = drm_legacy_mmap,
.poll = drm_poll,
-#ifdef CONFIG_COMPAT
.compat_ioctl = drm_compat_ioctl,
-#endif
.llseek = noop_llseek,
};
diff --git a/drivers/gpu/drm/virtio/virtgpu_display.c b/drivers/gpu/drm/virtio/virtgpu_display.c
index 7cf3678623c3..58048709c34e 100644
--- a/drivers/gpu/drm/virtio/virtgpu_display.c
+++ b/drivers/gpu/drm/virtio/virtgpu_display.c
@@ -338,8 +338,7 @@ static void vgdev_atomic_commit_tail(struct drm_atomic_state *state)
drm_atomic_helper_commit_modeset_disables(dev, state);
drm_atomic_helper_commit_modeset_enables(dev, state);
- drm_atomic_helper_commit_planes(dev, state,
- DRM_PLANE_COMMIT_ACTIVE_ONLY);
+ drm_atomic_helper_commit_planes(dev, state, 0);
drm_atomic_helper_commit_hw_done(state);
diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.c b/drivers/gpu/drm/virtio/virtgpu_drv.c
index 5820b7020ae5..04d98db75c64 100644
--- a/drivers/gpu/drm/virtio/virtgpu_drv.c
+++ b/drivers/gpu/drm/virtio/virtgpu_drv.c
@@ -108,9 +108,7 @@ static const struct file_operations virtio_gpu_driver_fops = {
.read = drm_read,
.unlocked_ioctl = drm_ioctl,
.release = drm_release,
-#ifdef CONFIG_COMPAT
.compat_ioctl = drm_compat_ioctl,
-#endif
.llseek = noop_llseek,
};
diff --git a/drivers/gpu/drm/virtio/virtgpu_fb.c b/drivers/gpu/drm/virtio/virtgpu_fb.c
index 2242a80866a9..dd21f950e129 100644
--- a/drivers/gpu/drm/virtio/virtgpu_fb.c
+++ b/drivers/gpu/drm/virtio/virtgpu_fb.c
@@ -200,16 +200,10 @@ static void virtio_gpu_3d_imageblit(struct fb_info *info,
static struct fb_ops virtio_gpufb_ops = {
.owner = THIS_MODULE,
- .fb_check_var = drm_fb_helper_check_var,
- .fb_set_par = drm_fb_helper_set_par, /* TODO: copy vmwgfx */
+ DRM_FB_HELPER_DEFAULT_OPS,
.fb_fillrect = virtio_gpu_3d_fillrect,
.fb_copyarea = virtio_gpu_3d_copyarea,
.fb_imageblit = virtio_gpu_3d_imageblit,
- .fb_pan_display = drm_fb_helper_pan_display,
- .fb_blank = drm_fb_helper_blank,
- .fb_setcmap = drm_fb_helper_setcmap,
- .fb_debug_enter = drm_fb_helper_debug_enter,
- .fb_debug_leave = drm_fb_helper_debug_leave,
};
static int virtio_gpu_vmap_fb(struct virtio_gpu_device *vgdev,
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
index c965514b82be..e3f68cc9bb4b 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
@@ -985,8 +985,9 @@ static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev,
info = drm_format_info(mode_cmd2->pixel_format);
if (!info || !info->depth) {
+ struct drm_format_name_buf format_name;
DRM_ERROR("Unsupported framebuffer format %s\n",
- drm_get_format_name(mode_cmd2->pixel_format));
+ drm_get_format_name(mode_cmd2->pixel_format, &format_name));
return ERR_PTR(-EINVAL);
}
diff --git a/drivers/gpu/drm/zte/Kconfig b/drivers/gpu/drm/zte/Kconfig
new file mode 100644
index 000000000000..4065b2840f1c
--- /dev/null
+++ b/drivers/gpu/drm/zte/Kconfig
@@ -0,0 +1,8 @@
+config DRM_ZTE
+ tristate "DRM Support for ZTE SoCs"
+ depends on DRM && ARCH_ZX
+ select DRM_KMS_CMA_HELPER
+ select DRM_KMS_FB_HELPER
+ select DRM_KMS_HELPER
+ help
+ Choose this option to enable DRM on ZTE ZX SoCs.
diff --git a/drivers/gpu/drm/zte/Makefile b/drivers/gpu/drm/zte/Makefile
new file mode 100644
index 000000000000..699180bfd57c
--- /dev/null
+++ b/drivers/gpu/drm/zte/Makefile
@@ -0,0 +1,7 @@
+zxdrm-y := \
+ zx_drm_drv.o \
+ zx_hdmi.o \
+ zx_plane.o \
+ zx_vou.o
+
+obj-$(CONFIG_DRM_ZTE) += zxdrm.o
diff --git a/drivers/gpu/drm/zte/zx_drm_drv.c b/drivers/gpu/drm/zte/zx_drm_drv.c
new file mode 100644
index 000000000000..3e76f72c92ff
--- /dev/null
+++ b/drivers/gpu/drm/zte/zx_drm_drv.c
@@ -0,0 +1,267 @@
+/*
+ * Copyright 2016 Linaro Ltd.
+ * Copyright 2016 ZTE Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/clk.h>
+#include <linux/component.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/of_graph.h>
+#include <linux/of_platform.h>
+#include <linux/spinlock.h>
+
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_fb_helper.h>
+#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_of.h>
+#include <drm/drmP.h>
+
+#include "zx_drm_drv.h"
+#include "zx_vou.h"
+
+struct zx_drm_private {
+ struct drm_fbdev_cma *fbdev;
+};
+
+static void zx_drm_fb_output_poll_changed(struct drm_device *drm)
+{
+ struct zx_drm_private *priv = drm->dev_private;
+
+ drm_fbdev_cma_hotplug_event(priv->fbdev);
+}
+
+static const struct drm_mode_config_funcs zx_drm_mode_config_funcs = {
+ .fb_create = drm_fb_cma_create,
+ .output_poll_changed = zx_drm_fb_output_poll_changed,
+ .atomic_check = drm_atomic_helper_check,
+ .atomic_commit = drm_atomic_helper_commit,
+};
+
+static void zx_drm_lastclose(struct drm_device *drm)
+{
+ struct zx_drm_private *priv = drm->dev_private;
+
+ drm_fbdev_cma_restore_mode(priv->fbdev);
+}
+
+static const struct file_operations zx_drm_fops = {
+ .owner = THIS_MODULE,
+ .open = drm_open,
+ .release = drm_release,
+ .unlocked_ioctl = drm_ioctl,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = drm_compat_ioctl,
+#endif
+ .poll = drm_poll,
+ .read = drm_read,
+ .llseek = noop_llseek,
+ .mmap = drm_gem_cma_mmap,
+};
+
+static struct drm_driver zx_drm_driver = {
+ .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_PRIME |
+ DRIVER_ATOMIC,
+ .lastclose = zx_drm_lastclose,
+ .get_vblank_counter = drm_vblank_no_hw_counter,
+ .enable_vblank = zx_vou_enable_vblank,
+ .disable_vblank = zx_vou_disable_vblank,
+ .gem_free_object = drm_gem_cma_free_object,
+ .gem_vm_ops = &drm_gem_cma_vm_ops,
+ .dumb_create = drm_gem_cma_dumb_create,
+ .dumb_map_offset = drm_gem_cma_dumb_map_offset,
+ .dumb_destroy = drm_gem_dumb_destroy,
+ .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
+ .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
+ .gem_prime_export = drm_gem_prime_export,
+ .gem_prime_import = drm_gem_prime_import,
+ .gem_prime_get_sg_table = drm_gem_cma_prime_get_sg_table,
+ .gem_prime_import_sg_table = drm_gem_cma_prime_import_sg_table,
+ .gem_prime_vmap = drm_gem_cma_prime_vmap,
+ .gem_prime_vunmap = drm_gem_cma_prime_vunmap,
+ .gem_prime_mmap = drm_gem_cma_prime_mmap,
+ .fops = &zx_drm_fops,
+ .name = "zx-vou",
+ .desc = "ZTE VOU Controller DRM",
+ .date = "20160811",
+ .major = 1,
+ .minor = 0,
+};
+
+static int zx_drm_bind(struct device *dev)
+{
+ struct drm_device *drm;
+ struct zx_drm_private *priv;
+ int ret;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ drm = drm_dev_alloc(&zx_drm_driver, dev);
+ if (IS_ERR(drm))
+ return PTR_ERR(drm);
+
+ drm->dev_private = priv;
+ dev_set_drvdata(dev, drm);
+
+ drm_mode_config_init(drm);
+ drm->mode_config.min_width = 16;
+ drm->mode_config.min_height = 16;
+ drm->mode_config.max_width = 4096;
+ drm->mode_config.max_height = 4096;
+ drm->mode_config.funcs = &zx_drm_mode_config_funcs;
+
+ ret = component_bind_all(dev, drm);
+ if (ret) {
+ DRM_DEV_ERROR(dev, "failed to bind all components: %d\n", ret);
+ goto out_unregister;
+ }
+
+ ret = drm_vblank_init(drm, drm->mode_config.num_crtc);
+ if (ret < 0) {
+ DRM_DEV_ERROR(dev, "failed to init vblank: %d\n", ret);
+ goto out_unbind;
+ }
+
+ /*
+ * We will manage irq handler on our own. In this case, irq_enabled
+ * need to be true for using vblank core support.
+ */
+ drm->irq_enabled = true;
+
+ drm_mode_config_reset(drm);
+ drm_kms_helper_poll_init(drm);
+
+ priv->fbdev = drm_fbdev_cma_init(drm, 32, drm->mode_config.num_crtc,
+ drm->mode_config.num_connector);
+ if (IS_ERR(priv->fbdev)) {
+ ret = PTR_ERR(priv->fbdev);
+ DRM_DEV_ERROR(dev, "failed to init cma fbdev: %d\n", ret);
+ priv->fbdev = NULL;
+ goto out_poll_fini;
+ }
+
+ ret = drm_dev_register(drm, 0);
+ if (ret)
+ goto out_fbdev_fini;
+
+ return 0;
+
+out_fbdev_fini:
+ if (priv->fbdev) {
+ drm_fbdev_cma_fini(priv->fbdev);
+ priv->fbdev = NULL;
+ }
+out_poll_fini:
+ drm_kms_helper_poll_fini(drm);
+ drm_mode_config_cleanup(drm);
+ drm_vblank_cleanup(drm);
+out_unbind:
+ component_unbind_all(dev, drm);
+out_unregister:
+ dev_set_drvdata(dev, NULL);
+ drm->dev_private = NULL;
+ drm_dev_unref(drm);
+ return ret;
+}
+
+static void zx_drm_unbind(struct device *dev)
+{
+ struct drm_device *drm = dev_get_drvdata(dev);
+ struct zx_drm_private *priv = drm->dev_private;
+
+ drm_dev_unregister(drm);
+ if (priv->fbdev) {
+ drm_fbdev_cma_fini(priv->fbdev);
+ priv->fbdev = NULL;
+ }
+ drm_kms_helper_poll_fini(drm);
+ drm_mode_config_cleanup(drm);
+ drm_vblank_cleanup(drm);
+ component_unbind_all(dev, drm);
+ dev_set_drvdata(dev, NULL);
+ drm->dev_private = NULL;
+ drm_dev_unref(drm);
+}
+
+static const struct component_master_ops zx_drm_master_ops = {
+ .bind = zx_drm_bind,
+ .unbind = zx_drm_unbind,
+};
+
+static int compare_of(struct device *dev, void *data)
+{
+ return dev->of_node == data;
+}
+
+static int zx_drm_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *parent = dev->of_node;
+ struct device_node *child;
+ struct component_match *match = NULL;
+ int ret;
+
+ ret = of_platform_populate(parent, NULL, NULL, dev);
+ if (ret)
+ return ret;
+
+ for_each_available_child_of_node(parent, child) {
+ component_match_add(dev, &match, compare_of, child);
+ of_node_put(child);
+ }
+
+ return component_master_add_with_match(dev, &zx_drm_master_ops, match);
+}
+
+static int zx_drm_remove(struct platform_device *pdev)
+{
+ component_master_del(&pdev->dev, &zx_drm_master_ops);
+ return 0;
+}
+
+static const struct of_device_id zx_drm_of_match[] = {
+ { .compatible = "zte,zx296718-vou", },
+ { /* end */ },
+};
+MODULE_DEVICE_TABLE(of, zx_drm_of_match);
+
+static struct platform_driver zx_drm_platform_driver = {
+ .probe = zx_drm_probe,
+ .remove = zx_drm_remove,
+ .driver = {
+ .name = "zx-drm",
+ .of_match_table = zx_drm_of_match,
+ },
+};
+
+static struct platform_driver *drivers[] = {
+ &zx_crtc_driver,
+ &zx_hdmi_driver,
+ &zx_drm_platform_driver,
+};
+
+static int zx_drm_init(void)
+{
+ return platform_register_drivers(drivers, ARRAY_SIZE(drivers));
+}
+module_init(zx_drm_init);
+
+static void zx_drm_exit(void)
+{
+ platform_unregister_drivers(drivers, ARRAY_SIZE(drivers));
+}
+module_exit(zx_drm_exit);
+
+MODULE_AUTHOR("Shawn Guo <shawn.guo@linaro.org>");
+MODULE_DESCRIPTION("ZTE ZX VOU DRM driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/zte/zx_drm_drv.h b/drivers/gpu/drm/zte/zx_drm_drv.h
new file mode 100644
index 000000000000..e65cd18a6cba
--- /dev/null
+++ b/drivers/gpu/drm/zte/zx_drm_drv.h
@@ -0,0 +1,36 @@
+/*
+ * Copyright 2016 Linaro Ltd.
+ * Copyright 2016 ZTE Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#ifndef __ZX_DRM_DRV_H__
+#define __ZX_DRM_DRV_H__
+
+extern struct platform_driver zx_crtc_driver;
+extern struct platform_driver zx_hdmi_driver;
+
+static inline u32 zx_readl(void __iomem *reg)
+{
+ return readl_relaxed(reg);
+}
+
+static inline void zx_writel(void __iomem *reg, u32 val)
+{
+ writel_relaxed(val, reg);
+}
+
+static inline void zx_writel_mask(void __iomem *reg, u32 mask, u32 val)
+{
+ u32 tmp;
+
+ tmp = zx_readl(reg);
+ tmp = (tmp & ~mask) | (val & mask);
+ zx_writel(reg, tmp);
+}
+
+#endif /* __ZX_DRM_DRV_H__ */
diff --git a/drivers/gpu/drm/zte/zx_hdmi.c b/drivers/gpu/drm/zte/zx_hdmi.c
new file mode 100644
index 000000000000..6bf6c364811e
--- /dev/null
+++ b/drivers/gpu/drm/zte/zx_hdmi.c
@@ -0,0 +1,624 @@
+/*
+ * Copyright 2016 Linaro Ltd.
+ * Copyright 2016 ZTE Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/clk.h>
+#include <linux/component.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/hdmi.h>
+#include <linux/irq.h>
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/of_device.h>
+
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_edid.h>
+#include <drm/drm_of.h>
+#include <drm/drmP.h>
+
+#include "zx_hdmi_regs.h"
+#include "zx_vou.h"
+
+#define ZX_HDMI_INFOFRAME_SIZE 31
+#define DDC_SEGMENT_ADDR 0x30
+
+struct zx_hdmi_i2c {
+ struct i2c_adapter adap;
+ struct mutex lock;
+};
+
+struct zx_hdmi {
+ struct drm_connector connector;
+ struct drm_encoder encoder;
+ struct zx_hdmi_i2c *ddc;
+ struct device *dev;
+ struct drm_device *drm;
+ void __iomem *mmio;
+ struct clk *cec_clk;
+ struct clk *osc_clk;
+ struct clk *xclk;
+ bool sink_is_hdmi;
+ bool sink_has_audio;
+ const struct vou_inf *inf;
+};
+
+#define to_zx_hdmi(x) container_of(x, struct zx_hdmi, x)
+
+static const struct vou_inf vou_inf_hdmi = {
+ .id = VOU_HDMI,
+ .data_sel = VOU_YUV444,
+ .clocks_en_bits = BIT(24) | BIT(18) | BIT(6),
+ .clocks_sel_bits = BIT(13) | BIT(2),
+};
+
+static inline u8 hdmi_readb(struct zx_hdmi *hdmi, u16 offset)
+{
+ return readl_relaxed(hdmi->mmio + offset * 4);
+}
+
+static inline void hdmi_writeb(struct zx_hdmi *hdmi, u16 offset, u8 val)
+{
+ writel_relaxed(val, hdmi->mmio + offset * 4);
+}
+
+static inline void hdmi_writeb_mask(struct zx_hdmi *hdmi, u16 offset,
+ u8 mask, u8 val)
+{
+ u8 tmp;
+
+ tmp = hdmi_readb(hdmi, offset);
+ tmp = (tmp & ~mask) | (val & mask);
+ hdmi_writeb(hdmi, offset, tmp);
+}
+
+static int zx_hdmi_infoframe_trans(struct zx_hdmi *hdmi,
+ union hdmi_infoframe *frame, u8 fsel)
+{
+ u8 buffer[ZX_HDMI_INFOFRAME_SIZE];
+ int num;
+ int i;
+
+ hdmi_writeb(hdmi, TPI_INFO_FSEL, fsel);
+
+ num = hdmi_infoframe_pack(frame, buffer, ZX_HDMI_INFOFRAME_SIZE);
+ if (num < 0) {
+ DRM_DEV_ERROR(hdmi->dev, "failed to pack infoframe: %d\n", num);
+ return num;
+ }
+
+ for (i = 0; i < num; i++)
+ hdmi_writeb(hdmi, TPI_INFO_B0 + i, buffer[i]);
+
+ hdmi_writeb_mask(hdmi, TPI_INFO_EN, TPI_INFO_TRANS_RPT,
+ TPI_INFO_TRANS_RPT);
+ hdmi_writeb_mask(hdmi, TPI_INFO_EN, TPI_INFO_TRANS_EN,
+ TPI_INFO_TRANS_EN);
+
+ return num;
+}
+
+static int zx_hdmi_config_video_vsi(struct zx_hdmi *hdmi,
+ struct drm_display_mode *mode)
+{
+ union hdmi_infoframe frame;
+ int ret;
+
+ ret = drm_hdmi_vendor_infoframe_from_display_mode(&frame.vendor.hdmi,
+ mode);
+ if (ret) {
+ DRM_DEV_ERROR(hdmi->dev, "failed to get vendor infoframe: %d\n",
+ ret);
+ return ret;
+ }
+
+ return zx_hdmi_infoframe_trans(hdmi, &frame, FSEL_VSIF);
+}
+
+static int zx_hdmi_config_video_avi(struct zx_hdmi *hdmi,
+ struct drm_display_mode *mode)
+{
+ union hdmi_infoframe frame;
+ int ret;
+
+ ret = drm_hdmi_avi_infoframe_from_display_mode(&frame.avi, mode);
+ if (ret) {
+ DRM_DEV_ERROR(hdmi->dev, "failed to get avi infoframe: %d\n",
+ ret);
+ return ret;
+ }
+
+ /* We always use YUV444 for HDMI output. */
+ frame.avi.colorspace = HDMI_COLORSPACE_YUV444;
+
+ return zx_hdmi_infoframe_trans(hdmi, &frame, FSEL_AVI);
+}
+
+static void zx_hdmi_encoder_mode_set(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adj_mode)
+{
+ struct zx_hdmi *hdmi = to_zx_hdmi(encoder);
+
+ if (hdmi->sink_is_hdmi) {
+ zx_hdmi_config_video_avi(hdmi, mode);
+ zx_hdmi_config_video_vsi(hdmi, mode);
+ }
+}
+
+static void zx_hdmi_phy_start(struct zx_hdmi *hdmi)
+{
+ /* Copy from ZTE BSP code */
+ hdmi_writeb(hdmi, 0x222, 0x0);
+ hdmi_writeb(hdmi, 0x224, 0x4);
+ hdmi_writeb(hdmi, 0x909, 0x0);
+ hdmi_writeb(hdmi, 0x7b0, 0x90);
+ hdmi_writeb(hdmi, 0x7b1, 0x00);
+ hdmi_writeb(hdmi, 0x7b2, 0xa7);
+ hdmi_writeb(hdmi, 0x7b8, 0xaa);
+ hdmi_writeb(hdmi, 0x7b2, 0xa7);
+ hdmi_writeb(hdmi, 0x7b3, 0x0f);
+ hdmi_writeb(hdmi, 0x7b4, 0x0f);
+ hdmi_writeb(hdmi, 0x7b5, 0x55);
+ hdmi_writeb(hdmi, 0x7b7, 0x03);
+ hdmi_writeb(hdmi, 0x7b9, 0x12);
+ hdmi_writeb(hdmi, 0x7ba, 0x32);
+ hdmi_writeb(hdmi, 0x7bc, 0x68);
+ hdmi_writeb(hdmi, 0x7be, 0x40);
+ hdmi_writeb(hdmi, 0x7bf, 0x84);
+ hdmi_writeb(hdmi, 0x7c1, 0x0f);
+ hdmi_writeb(hdmi, 0x7c8, 0x02);
+ hdmi_writeb(hdmi, 0x7c9, 0x03);
+ hdmi_writeb(hdmi, 0x7ca, 0x40);
+ hdmi_writeb(hdmi, 0x7dc, 0x31);
+ hdmi_writeb(hdmi, 0x7e2, 0x04);
+ hdmi_writeb(hdmi, 0x7e0, 0x06);
+ hdmi_writeb(hdmi, 0x7cb, 0x68);
+ hdmi_writeb(hdmi, 0x7f9, 0x02);
+ hdmi_writeb(hdmi, 0x7b6, 0x02);
+ hdmi_writeb(hdmi, 0x7f3, 0x0);
+}
+
+static void zx_hdmi_hw_enable(struct zx_hdmi *hdmi)
+{
+ /* Enable pclk */
+ hdmi_writeb_mask(hdmi, CLKPWD, CLKPWD_PDIDCK, CLKPWD_PDIDCK);
+
+ /* Enable HDMI for TX */
+ hdmi_writeb_mask(hdmi, FUNC_SEL, FUNC_HDMI_EN, FUNC_HDMI_EN);
+
+ /* Enable deep color packet */
+ hdmi_writeb_mask(hdmi, P2T_CTRL, P2T_DC_PKT_EN, P2T_DC_PKT_EN);
+
+ /* Enable HDMI/MHL mode for output */
+ hdmi_writeb_mask(hdmi, TEST_TXCTRL, TEST_TXCTRL_HDMI_MODE,
+ TEST_TXCTRL_HDMI_MODE);
+
+ /* Configure reg_qc_sel */
+ hdmi_writeb(hdmi, HDMICTL4, 0x3);
+
+ /* Enable interrupt */
+ hdmi_writeb_mask(hdmi, INTR1_MASK, INTR1_MONITOR_DETECT,
+ INTR1_MONITOR_DETECT);
+
+ /* Start up phy */
+ zx_hdmi_phy_start(hdmi);
+}
+
+static void zx_hdmi_hw_disable(struct zx_hdmi *hdmi)
+{
+ /* Disable interrupt */
+ hdmi_writeb_mask(hdmi, INTR1_MASK, INTR1_MONITOR_DETECT, 0);
+
+ /* Disable deep color packet */
+ hdmi_writeb_mask(hdmi, P2T_CTRL, P2T_DC_PKT_EN, P2T_DC_PKT_EN);
+
+ /* Disable HDMI for TX */
+ hdmi_writeb_mask(hdmi, FUNC_SEL, FUNC_HDMI_EN, 0);
+
+ /* Disable pclk */
+ hdmi_writeb_mask(hdmi, CLKPWD, CLKPWD_PDIDCK, 0);
+}
+
+static void zx_hdmi_encoder_enable(struct drm_encoder *encoder)
+{
+ struct zx_hdmi *hdmi = to_zx_hdmi(encoder);
+
+ clk_prepare_enable(hdmi->cec_clk);
+ clk_prepare_enable(hdmi->osc_clk);
+ clk_prepare_enable(hdmi->xclk);
+
+ zx_hdmi_hw_enable(hdmi);
+
+ vou_inf_enable(hdmi->inf, encoder->crtc);
+}
+
+static void zx_hdmi_encoder_disable(struct drm_encoder *encoder)
+{
+ struct zx_hdmi *hdmi = to_zx_hdmi(encoder);
+
+ vou_inf_disable(hdmi->inf, encoder->crtc);
+
+ zx_hdmi_hw_disable(hdmi);
+
+ clk_disable_unprepare(hdmi->xclk);
+ clk_disable_unprepare(hdmi->osc_clk);
+ clk_disable_unprepare(hdmi->cec_clk);
+}
+
+static const struct drm_encoder_helper_funcs zx_hdmi_encoder_helper_funcs = {
+ .enable = zx_hdmi_encoder_enable,
+ .disable = zx_hdmi_encoder_disable,
+ .mode_set = zx_hdmi_encoder_mode_set,
+};
+
+static const struct drm_encoder_funcs zx_hdmi_encoder_funcs = {
+ .destroy = drm_encoder_cleanup,
+};
+
+static int zx_hdmi_connector_get_modes(struct drm_connector *connector)
+{
+ struct zx_hdmi *hdmi = to_zx_hdmi(connector);
+ struct edid *edid;
+ int ret;
+
+ edid = drm_get_edid(connector, &hdmi->ddc->adap);
+ if (!edid)
+ return 0;
+
+ hdmi->sink_is_hdmi = drm_detect_hdmi_monitor(edid);
+ hdmi->sink_has_audio = drm_detect_monitor_audio(edid);
+ drm_mode_connector_update_edid_property(connector, edid);
+ ret = drm_add_edid_modes(connector, edid);
+ kfree(edid);
+
+ return ret;
+}
+
+static enum drm_mode_status
+zx_hdmi_connector_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode)
+{
+ return MODE_OK;
+}
+
+static struct drm_connector_helper_funcs zx_hdmi_connector_helper_funcs = {
+ .get_modes = zx_hdmi_connector_get_modes,
+ .mode_valid = zx_hdmi_connector_mode_valid,
+};
+
+static enum drm_connector_status
+zx_hdmi_connector_detect(struct drm_connector *connector, bool force)
+{
+ struct zx_hdmi *hdmi = to_zx_hdmi(connector);
+
+ return (hdmi_readb(hdmi, TPI_HPD_RSEN) & TPI_HPD_CONNECTION) ?
+ connector_status_connected : connector_status_disconnected;
+}
+
+static const struct drm_connector_funcs zx_hdmi_connector_funcs = {
+ .dpms = drm_atomic_helper_connector_dpms,
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .detect = zx_hdmi_connector_detect,
+ .destroy = drm_connector_cleanup,
+ .reset = drm_atomic_helper_connector_reset,
+ .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+};
+
+static int zx_hdmi_register(struct drm_device *drm, struct zx_hdmi *hdmi)
+{
+ struct drm_encoder *encoder = &hdmi->encoder;
+
+ encoder->possible_crtcs = VOU_CRTC_MASK;
+
+ drm_encoder_init(drm, encoder, &zx_hdmi_encoder_funcs,
+ DRM_MODE_ENCODER_TMDS, NULL);
+ drm_encoder_helper_add(encoder, &zx_hdmi_encoder_helper_funcs);
+
+ hdmi->connector.polled = DRM_CONNECTOR_POLL_HPD;
+
+ drm_connector_init(drm, &hdmi->connector, &zx_hdmi_connector_funcs,
+ DRM_MODE_CONNECTOR_HDMIA);
+ drm_connector_helper_add(&hdmi->connector,
+ &zx_hdmi_connector_helper_funcs);
+
+ drm_mode_connector_attach_encoder(&hdmi->connector, encoder);
+
+ return 0;
+}
+
+static irqreturn_t zx_hdmi_irq_thread(int irq, void *dev_id)
+{
+ struct zx_hdmi *hdmi = dev_id;
+
+ drm_helper_hpd_irq_event(hdmi->connector.dev);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t zx_hdmi_irq_handler(int irq, void *dev_id)
+{
+ struct zx_hdmi *hdmi = dev_id;
+ u8 lstat;
+
+ lstat = hdmi_readb(hdmi, L1_INTR_STAT);
+
+ /* Monitor detect/HPD interrupt */
+ if (lstat & L1_INTR_STAT_INTR1) {
+ u8 stat;
+
+ stat = hdmi_readb(hdmi, INTR1_STAT);
+ hdmi_writeb(hdmi, INTR1_STAT, stat);
+
+ if (stat & INTR1_MONITOR_DETECT)
+ return IRQ_WAKE_THREAD;
+ }
+
+ return IRQ_NONE;
+}
+
+static int zx_hdmi_i2c_read(struct zx_hdmi *hdmi, struct i2c_msg *msg)
+{
+ int len = msg->len;
+ u8 *buf = msg->buf;
+ int retry = 0;
+ int ret = 0;
+
+ /* Bits [9:8] of bytes */
+ hdmi_writeb(hdmi, ZX_DDC_DIN_CNT2, (len >> 8) & 0xff);
+ /* Bits [7:0] of bytes */
+ hdmi_writeb(hdmi, ZX_DDC_DIN_CNT1, len & 0xff);
+
+ /* Clear FIFO */
+ hdmi_writeb_mask(hdmi, ZX_DDC_CMD, DDC_CMD_MASK, DDC_CMD_CLEAR_FIFO);
+
+ /* Kick off the read */
+ hdmi_writeb_mask(hdmi, ZX_DDC_CMD, DDC_CMD_MASK,
+ DDC_CMD_SEQUENTIAL_READ);
+
+ while (len > 0) {
+ int cnt, i;
+
+ /* FIFO needs some time to get ready */
+ usleep_range(500, 1000);
+
+ cnt = hdmi_readb(hdmi, ZX_DDC_DOUT_CNT) & DDC_DOUT_CNT_MASK;
+ if (cnt == 0) {
+ if (++retry > 5) {
+ DRM_DEV_ERROR(hdmi->dev,
+ "DDC FIFO read timed out!");
+ return -ETIMEDOUT;
+ }
+ continue;
+ }
+
+ for (i = 0; i < cnt; i++)
+ *buf++ = hdmi_readb(hdmi, ZX_DDC_DATA);
+ len -= cnt;
+ }
+
+ return ret;
+}
+
+static int zx_hdmi_i2c_write(struct zx_hdmi *hdmi, struct i2c_msg *msg)
+{
+ /*
+ * The DDC I2C adapter is only for reading EDID data, so we assume
+ * that the write to this adapter must be the EDID data offset.
+ */
+ if ((msg->len != 1) ||
+ ((msg->addr != DDC_ADDR) && (msg->addr != DDC_SEGMENT_ADDR)))
+ return -EINVAL;
+
+ if (msg->addr == DDC_SEGMENT_ADDR)
+ hdmi_writeb(hdmi, ZX_DDC_SEGM, msg->addr << 1);
+ else if (msg->addr == DDC_ADDR)
+ hdmi_writeb(hdmi, ZX_DDC_ADDR, msg->addr << 1);
+
+ hdmi_writeb(hdmi, ZX_DDC_OFFSET, msg->buf[0]);
+
+ return 0;
+}
+
+static int zx_hdmi_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs,
+ int num)
+{
+ struct zx_hdmi *hdmi = i2c_get_adapdata(adap);
+ struct zx_hdmi_i2c *ddc = hdmi->ddc;
+ int i, ret = 0;
+
+ mutex_lock(&ddc->lock);
+
+ /* Enable DDC master access */
+ hdmi_writeb_mask(hdmi, TPI_DDC_MASTER_EN, HW_DDC_MASTER, HW_DDC_MASTER);
+
+ for (i = 0; i < num; i++) {
+ DRM_DEV_DEBUG(hdmi->dev,
+ "xfer: num: %d/%d, len: %d, flags: %#x\n",
+ i + 1, num, msgs[i].len, msgs[i].flags);
+
+ if (msgs[i].flags & I2C_M_RD)
+ ret = zx_hdmi_i2c_read(hdmi, &msgs[i]);
+ else
+ ret = zx_hdmi_i2c_write(hdmi, &msgs[i]);
+
+ if (ret < 0)
+ break;
+ }
+
+ if (!ret)
+ ret = num;
+
+ /* Disable DDC master access */
+ hdmi_writeb_mask(hdmi, TPI_DDC_MASTER_EN, HW_DDC_MASTER, 0);
+
+ mutex_unlock(&ddc->lock);
+
+ return ret;
+}
+
+static u32 zx_hdmi_i2c_func(struct i2c_adapter *adapter)
+{
+ return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
+}
+
+static const struct i2c_algorithm zx_hdmi_algorithm = {
+ .master_xfer = zx_hdmi_i2c_xfer,
+ .functionality = zx_hdmi_i2c_func,
+};
+
+static int zx_hdmi_ddc_register(struct zx_hdmi *hdmi)
+{
+ struct i2c_adapter *adap;
+ struct zx_hdmi_i2c *ddc;
+ int ret;
+
+ ddc = devm_kzalloc(hdmi->dev, sizeof(*ddc), GFP_KERNEL);
+ if (!ddc)
+ return -ENOMEM;
+
+ hdmi->ddc = ddc;
+ mutex_init(&ddc->lock);
+
+ adap = &ddc->adap;
+ adap->owner = THIS_MODULE;
+ adap->class = I2C_CLASS_DDC;
+ adap->dev.parent = hdmi->dev;
+ adap->algo = &zx_hdmi_algorithm;
+ snprintf(adap->name, sizeof(adap->name), "zx hdmi i2c");
+
+ ret = i2c_add_adapter(adap);
+ if (ret) {
+ DRM_DEV_ERROR(hdmi->dev, "failed to add I2C adapter: %d\n",
+ ret);
+ return ret;
+ }
+
+ i2c_set_adapdata(adap, hdmi);
+
+ return 0;
+}
+
+static int zx_hdmi_bind(struct device *dev, struct device *master, void *data)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct drm_device *drm = data;
+ struct resource *res;
+ struct zx_hdmi *hdmi;
+ int irq;
+ int ret;
+
+ hdmi = devm_kzalloc(dev, sizeof(*hdmi), GFP_KERNEL);
+ if (!hdmi)
+ return -ENOMEM;
+
+ hdmi->dev = dev;
+ hdmi->drm = drm;
+ hdmi->inf = &vou_inf_hdmi;
+
+ dev_set_drvdata(dev, hdmi);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ hdmi->mmio = devm_ioremap_resource(dev, res);
+ if (IS_ERR(hdmi->mmio)) {
+ ret = PTR_ERR(hdmi->mmio);
+ DRM_DEV_ERROR(dev, "failed to remap hdmi region: %d\n", ret);
+ return ret;
+ }
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return irq;
+
+ hdmi->cec_clk = devm_clk_get(hdmi->dev, "osc_cec");
+ if (IS_ERR(hdmi->cec_clk)) {
+ ret = PTR_ERR(hdmi->cec_clk);
+ DRM_DEV_ERROR(dev, "failed to get cec_clk: %d\n", ret);
+ return ret;
+ }
+
+ hdmi->osc_clk = devm_clk_get(hdmi->dev, "osc_clk");
+ if (IS_ERR(hdmi->osc_clk)) {
+ ret = PTR_ERR(hdmi->osc_clk);
+ DRM_DEV_ERROR(dev, "failed to get osc_clk: %d\n", ret);
+ return ret;
+ }
+
+ hdmi->xclk = devm_clk_get(hdmi->dev, "xclk");
+ if (IS_ERR(hdmi->xclk)) {
+ ret = PTR_ERR(hdmi->xclk);
+ DRM_DEV_ERROR(dev, "failed to get xclk: %d\n", ret);
+ return ret;
+ }
+
+ ret = zx_hdmi_ddc_register(hdmi);
+ if (ret) {
+ DRM_DEV_ERROR(dev, "failed to register ddc: %d\n", ret);
+ return ret;
+ }
+
+ ret = zx_hdmi_register(drm, hdmi);
+ if (ret) {
+ DRM_DEV_ERROR(dev, "failed to register hdmi: %d\n", ret);
+ return ret;
+ }
+
+ ret = devm_request_threaded_irq(dev, irq, zx_hdmi_irq_handler,
+ zx_hdmi_irq_thread, IRQF_SHARED,
+ dev_name(dev), hdmi);
+ if (ret) {
+ DRM_DEV_ERROR(dev, "failed to request threaded irq: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void zx_hdmi_unbind(struct device *dev, struct device *master,
+ void *data)
+{
+ struct zx_hdmi *hdmi = dev_get_drvdata(dev);
+
+ hdmi->connector.funcs->destroy(&hdmi->connector);
+ hdmi->encoder.funcs->destroy(&hdmi->encoder);
+}
+
+static const struct component_ops zx_hdmi_component_ops = {
+ .bind = zx_hdmi_bind,
+ .unbind = zx_hdmi_unbind,
+};
+
+static int zx_hdmi_probe(struct platform_device *pdev)
+{
+ return component_add(&pdev->dev, &zx_hdmi_component_ops);
+}
+
+static int zx_hdmi_remove(struct platform_device *pdev)
+{
+ component_del(&pdev->dev, &zx_hdmi_component_ops);
+ return 0;
+}
+
+static const struct of_device_id zx_hdmi_of_match[] = {
+ { .compatible = "zte,zx296718-hdmi", },
+ { /* end */ },
+};
+MODULE_DEVICE_TABLE(of, zx_hdmi_of_match);
+
+struct platform_driver zx_hdmi_driver = {
+ .probe = zx_hdmi_probe,
+ .remove = zx_hdmi_remove,
+ .driver = {
+ .name = "zx-hdmi",
+ .of_match_table = zx_hdmi_of_match,
+ },
+};
diff --git a/drivers/gpu/drm/zte/zx_hdmi_regs.h b/drivers/gpu/drm/zte/zx_hdmi_regs.h
new file mode 100644
index 000000000000..de911f66b658
--- /dev/null
+++ b/drivers/gpu/drm/zte/zx_hdmi_regs.h
@@ -0,0 +1,56 @@
+/*
+ * Copyright 2016 Linaro Ltd.
+ * Copyright 2016 ZTE Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#ifndef __ZX_HDMI_REGS_H__
+#define __ZX_HDMI_REGS_H__
+
+#define FUNC_SEL 0x000b
+#define FUNC_HDMI_EN BIT(0)
+#define CLKPWD 0x000d
+#define CLKPWD_PDIDCK BIT(2)
+#define P2T_CTRL 0x0066
+#define P2T_DC_PKT_EN BIT(7)
+#define L1_INTR_STAT 0x007e
+#define L1_INTR_STAT_INTR1 BIT(0)
+#define INTR1_STAT 0x008f
+#define INTR1_MASK 0x0095
+#define INTR1_MONITOR_DETECT (BIT(5) | BIT(6))
+#define ZX_DDC_ADDR 0x00ed
+#define ZX_DDC_SEGM 0x00ee
+#define ZX_DDC_OFFSET 0x00ef
+#define ZX_DDC_DIN_CNT1 0x00f0
+#define ZX_DDC_DIN_CNT2 0x00f1
+#define ZX_DDC_CMD 0x00f3
+#define DDC_CMD_MASK 0xf
+#define DDC_CMD_CLEAR_FIFO 0x9
+#define DDC_CMD_SEQUENTIAL_READ 0x2
+#define ZX_DDC_DATA 0x00f4
+#define ZX_DDC_DOUT_CNT 0x00f5
+#define DDC_DOUT_CNT_MASK 0x1f
+#define TEST_TXCTRL 0x00f7
+#define TEST_TXCTRL_HDMI_MODE BIT(1)
+#define HDMICTL4 0x0235
+#define TPI_HPD_RSEN 0x063b
+#define TPI_HPD_CONNECTION (BIT(1) | BIT(2))
+#define TPI_INFO_FSEL 0x06bf
+#define FSEL_AVI 0
+#define FSEL_GBD 1
+#define FSEL_AUDIO 2
+#define FSEL_SPD 3
+#define FSEL_MPEG 4
+#define FSEL_VSIF 5
+#define TPI_INFO_B0 0x06c0
+#define TPI_INFO_EN 0x06df
+#define TPI_INFO_TRANS_EN BIT(7)
+#define TPI_INFO_TRANS_RPT BIT(6)
+#define TPI_DDC_MASTER_EN 0x06f8
+#define HW_DDC_MASTER BIT(7)
+
+#endif /* __ZX_HDMI_REGS_H__ */
diff --git a/drivers/gpu/drm/zte/zx_plane.c b/drivers/gpu/drm/zte/zx_plane.c
new file mode 100644
index 000000000000..546eb92a94e8
--- /dev/null
+++ b/drivers/gpu/drm/zte/zx_plane.c
@@ -0,0 +1,299 @@
+/*
+ * Copyright 2016 Linaro Ltd.
+ * Copyright 2016 ZTE Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_modeset_helper_vtables.h>
+#include <drm/drm_plane_helper.h>
+#include <drm/drmP.h>
+
+#include "zx_drm_drv.h"
+#include "zx_plane.h"
+#include "zx_plane_regs.h"
+#include "zx_vou.h"
+
+struct zx_plane {
+ struct drm_plane plane;
+ void __iomem *layer;
+ void __iomem *csc;
+ void __iomem *hbsc;
+ void __iomem *rsz;
+};
+
+#define to_zx_plane(plane) container_of(plane, struct zx_plane, plane)
+
+static const uint32_t gl_formats[] = {
+ DRM_FORMAT_ARGB8888,
+ DRM_FORMAT_XRGB8888,
+ DRM_FORMAT_RGB888,
+ DRM_FORMAT_RGB565,
+ DRM_FORMAT_ARGB1555,
+ DRM_FORMAT_ARGB4444,
+};
+
+static int zx_gl_plane_atomic_check(struct drm_plane *plane,
+ struct drm_plane_state *plane_state)
+{
+ struct drm_framebuffer *fb = plane_state->fb;
+ struct drm_crtc *crtc = plane_state->crtc;
+ struct drm_crtc_state *crtc_state;
+ struct drm_rect clip;
+
+ if (!crtc || !fb)
+ return 0;
+
+ crtc_state = drm_atomic_get_existing_crtc_state(plane_state->state,
+ crtc);
+ if (WARN_ON(!crtc_state))
+ return -EINVAL;
+
+ /* nothing to check when disabling or disabled */
+ if (!crtc_state->enable)
+ return 0;
+
+ /* plane must be enabled */
+ if (!plane_state->crtc)
+ return -EINVAL;
+
+ clip.x1 = 0;
+ clip.y1 = 0;
+ clip.x2 = crtc_state->adjusted_mode.hdisplay;
+ clip.y2 = crtc_state->adjusted_mode.vdisplay;
+
+ return drm_plane_helper_check_state(plane_state, &clip,
+ DRM_PLANE_HELPER_NO_SCALING,
+ DRM_PLANE_HELPER_NO_SCALING,
+ false, true);
+}
+
+static int zx_gl_get_fmt(uint32_t format)
+{
+ switch (format) {
+ case DRM_FORMAT_ARGB8888:
+ case DRM_FORMAT_XRGB8888:
+ return GL_FMT_ARGB8888;
+ case DRM_FORMAT_RGB888:
+ return GL_FMT_RGB888;
+ case DRM_FORMAT_RGB565:
+ return GL_FMT_RGB565;
+ case DRM_FORMAT_ARGB1555:
+ return GL_FMT_ARGB1555;
+ case DRM_FORMAT_ARGB4444:
+ return GL_FMT_ARGB4444;
+ default:
+ WARN_ONCE(1, "invalid pixel format %d\n", format);
+ return -EINVAL;
+ }
+}
+
+static inline void zx_gl_set_update(struct zx_plane *zplane)
+{
+ void __iomem *layer = zplane->layer;
+
+ zx_writel_mask(layer + GL_CTRL0, GL_UPDATE, GL_UPDATE);
+}
+
+static inline void zx_gl_rsz_set_update(struct zx_plane *zplane)
+{
+ zx_writel(zplane->rsz + RSZ_ENABLE_CFG, 1);
+}
+
+void zx_plane_set_update(struct drm_plane *plane)
+{
+ struct zx_plane *zplane = to_zx_plane(plane);
+
+ zx_gl_rsz_set_update(zplane);
+ zx_gl_set_update(zplane);
+}
+
+static void zx_gl_rsz_setup(struct zx_plane *zplane, u32 src_w, u32 src_h,
+ u32 dst_w, u32 dst_h)
+{
+ void __iomem *rsz = zplane->rsz;
+
+ zx_writel(rsz + RSZ_SRC_CFG, RSZ_VER(src_h - 1) | RSZ_HOR(src_w - 1));
+ zx_writel(rsz + RSZ_DEST_CFG, RSZ_VER(dst_h - 1) | RSZ_HOR(dst_w - 1));
+
+ zx_gl_rsz_set_update(zplane);
+}
+
+static void zx_gl_plane_atomic_update(struct drm_plane *plane,
+ struct drm_plane_state *old_state)
+{
+ struct zx_plane *zplane = to_zx_plane(plane);
+ struct drm_framebuffer *fb = plane->state->fb;
+ struct drm_gem_cma_object *cma_obj;
+ void __iomem *layer = zplane->layer;
+ void __iomem *csc = zplane->csc;
+ void __iomem *hbsc = zplane->hbsc;
+ u32 src_x, src_y, src_w, src_h;
+ u32 dst_x, dst_y, dst_w, dst_h;
+ unsigned int bpp;
+ uint32_t format;
+ dma_addr_t paddr;
+ u32 stride;
+ int fmt;
+
+ if (!fb)
+ return;
+
+ format = fb->pixel_format;
+ stride = fb->pitches[0];
+
+ src_x = plane->state->src_x >> 16;
+ src_y = plane->state->src_y >> 16;
+ src_w = plane->state->src_w >> 16;
+ src_h = plane->state->src_h >> 16;
+
+ dst_x = plane->state->crtc_x;
+ dst_y = plane->state->crtc_y;
+ dst_w = plane->state->crtc_w;
+ dst_h = plane->state->crtc_h;
+
+ bpp = drm_format_plane_cpp(format, 0);
+
+ cma_obj = drm_fb_cma_get_gem_obj(fb, 0);
+ paddr = cma_obj->paddr + fb->offsets[0];
+ paddr += src_y * stride + src_x * bpp / 8;
+ zx_writel(layer + GL_ADDR, paddr);
+
+ /* Set up source height/width register */
+ zx_writel(layer + GL_SRC_SIZE, GL_SRC_W(src_w) | GL_SRC_H(src_h));
+
+ /* Set up start position register */
+ zx_writel(layer + GL_POS_START, GL_POS_X(dst_x) | GL_POS_Y(dst_y));
+
+ /* Set up end position register */
+ zx_writel(layer + GL_POS_END,
+ GL_POS_X(dst_x + dst_w) | GL_POS_Y(dst_y + dst_h));
+
+ /* Set up stride register */
+ zx_writel(layer + GL_STRIDE, stride & 0xffff);
+
+ /* Set up graphic layer data format */
+ fmt = zx_gl_get_fmt(format);
+ if (fmt >= 0)
+ zx_writel_mask(layer + GL_CTRL1, GL_DATA_FMT_MASK,
+ fmt << GL_DATA_FMT_SHIFT);
+
+ /* Initialize global alpha with a sane value */
+ zx_writel_mask(layer + GL_CTRL2, GL_GLOBAL_ALPHA_MASK,
+ 0xff << GL_GLOBAL_ALPHA_SHIFT);
+
+ /* Setup CSC for the GL */
+ if (dst_h > 720)
+ zx_writel_mask(csc + CSC_CTRL0, CSC_COV_MODE_MASK,
+ CSC_BT709_IMAGE_RGB2YCBCR << CSC_COV_MODE_SHIFT);
+ else
+ zx_writel_mask(csc + CSC_CTRL0, CSC_COV_MODE_MASK,
+ CSC_BT601_IMAGE_RGB2YCBCR << CSC_COV_MODE_SHIFT);
+ zx_writel_mask(csc + CSC_CTRL0, CSC_WORK_ENABLE, CSC_WORK_ENABLE);
+
+ /* Always use scaler since it exists (set for not bypass) */
+ zx_writel_mask(layer + GL_CTRL3, GL_SCALER_BYPASS_MODE,
+ GL_SCALER_BYPASS_MODE);
+
+ zx_gl_rsz_setup(zplane, src_w, src_h, dst_w, dst_h);
+
+ /* Enable HBSC block */
+ zx_writel_mask(hbsc + HBSC_CTRL0, HBSC_CTRL_EN, HBSC_CTRL_EN);
+
+ zx_gl_set_update(zplane);
+}
+
+static const struct drm_plane_helper_funcs zx_gl_plane_helper_funcs = {
+ .atomic_check = zx_gl_plane_atomic_check,
+ .atomic_update = zx_gl_plane_atomic_update,
+};
+
+static void zx_plane_destroy(struct drm_plane *plane)
+{
+ drm_plane_helper_disable(plane);
+ drm_plane_cleanup(plane);
+}
+
+static const struct drm_plane_funcs zx_plane_funcs = {
+ .update_plane = drm_atomic_helper_update_plane,
+ .disable_plane = drm_atomic_helper_disable_plane,
+ .destroy = zx_plane_destroy,
+ .reset = drm_atomic_helper_plane_reset,
+ .atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_plane_destroy_state,
+};
+
+static void zx_plane_hbsc_init(struct zx_plane *zplane)
+{
+ void __iomem *hbsc = zplane->hbsc;
+
+ /*
+ * Initialize HBSC block with a sane configuration per recommedation
+ * from ZTE BSP code.
+ */
+ zx_writel(hbsc + HBSC_SATURATION, 0x200);
+ zx_writel(hbsc + HBSC_HUE, 0x0);
+ zx_writel(hbsc + HBSC_BRIGHT, 0x0);
+ zx_writel(hbsc + HBSC_CONTRAST, 0x200);
+
+ zx_writel(hbsc + HBSC_THRESHOLD_COL1, (0x3ac << 16) | 0x40);
+ zx_writel(hbsc + HBSC_THRESHOLD_COL2, (0x3c0 << 16) | 0x40);
+ zx_writel(hbsc + HBSC_THRESHOLD_COL3, (0x3c0 << 16) | 0x40);
+}
+
+struct drm_plane *zx_plane_init(struct drm_device *drm, struct device *dev,
+ struct zx_layer_data *data,
+ enum drm_plane_type type)
+{
+ const struct drm_plane_helper_funcs *helper;
+ struct zx_plane *zplane;
+ struct drm_plane *plane;
+ const uint32_t *formats;
+ unsigned int format_count;
+ int ret;
+
+ zplane = devm_kzalloc(dev, sizeof(*zplane), GFP_KERNEL);
+ if (!zplane)
+ return ERR_PTR(-ENOMEM);
+
+ plane = &zplane->plane;
+
+ zplane->layer = data->layer;
+ zplane->hbsc = data->hbsc;
+ zplane->csc = data->csc;
+ zplane->rsz = data->rsz;
+
+ zx_plane_hbsc_init(zplane);
+
+ switch (type) {
+ case DRM_PLANE_TYPE_PRIMARY:
+ helper = &zx_gl_plane_helper_funcs;
+ formats = gl_formats;
+ format_count = ARRAY_SIZE(gl_formats);
+ break;
+ case DRM_PLANE_TYPE_OVERLAY:
+ /* TODO: add video layer (vl) support */
+ break;
+ default:
+ return ERR_PTR(-ENODEV);
+ }
+
+ ret = drm_universal_plane_init(drm, plane, VOU_CRTC_MASK,
+ &zx_plane_funcs, formats, format_count,
+ type, NULL);
+ if (ret) {
+ DRM_DEV_ERROR(dev, "failed to init universal plane: %d\n", ret);
+ return ERR_PTR(ret);
+ }
+
+ drm_plane_helper_add(plane, helper);
+
+ return plane;
+}
diff --git a/drivers/gpu/drm/zte/zx_plane.h b/drivers/gpu/drm/zte/zx_plane.h
new file mode 100644
index 000000000000..2b82cd558d9d
--- /dev/null
+++ b/drivers/gpu/drm/zte/zx_plane.h
@@ -0,0 +1,26 @@
+/*
+ * Copyright 2016 Linaro Ltd.
+ * Copyright 2016 ZTE Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#ifndef __ZX_PLANE_H__
+#define __ZX_PLANE_H__
+
+struct zx_layer_data {
+ void __iomem *layer;
+ void __iomem *csc;
+ void __iomem *hbsc;
+ void __iomem *rsz;
+};
+
+struct drm_plane *zx_plane_init(struct drm_device *drm, struct device *dev,
+ struct zx_layer_data *data,
+ enum drm_plane_type type);
+void zx_plane_set_update(struct drm_plane *plane);
+
+#endif /* __ZX_PLANE_H__ */
diff --git a/drivers/gpu/drm/zte/zx_plane_regs.h b/drivers/gpu/drm/zte/zx_plane_regs.h
new file mode 100644
index 000000000000..3dde6716a558
--- /dev/null
+++ b/drivers/gpu/drm/zte/zx_plane_regs.h
@@ -0,0 +1,91 @@
+/*
+ * Copyright 2016 Linaro Ltd.
+ * Copyright 2016 ZTE Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#ifndef __ZX_PLANE_REGS_H__
+#define __ZX_PLANE_REGS_H__
+
+/* GL registers */
+#define GL_CTRL0 0x00
+#define GL_UPDATE BIT(5)
+#define GL_CTRL1 0x04
+#define GL_DATA_FMT_SHIFT 0
+#define GL_DATA_FMT_MASK (0xf << GL_DATA_FMT_SHIFT)
+#define GL_FMT_ARGB8888 0
+#define GL_FMT_RGB888 1
+#define GL_FMT_RGB565 2
+#define GL_FMT_ARGB1555 3
+#define GL_FMT_ARGB4444 4
+#define GL_CTRL2 0x08
+#define GL_GLOBAL_ALPHA_SHIFT 8
+#define GL_GLOBAL_ALPHA_MASK (0xff << GL_GLOBAL_ALPHA_SHIFT)
+#define GL_CTRL3 0x0c
+#define GL_SCALER_BYPASS_MODE BIT(0)
+#define GL_STRIDE 0x18
+#define GL_ADDR 0x1c
+#define GL_SRC_SIZE 0x38
+#define GL_SRC_W_SHIFT 16
+#define GL_SRC_W_MASK (0x3fff << GL_SRC_W_SHIFT)
+#define GL_SRC_H_SHIFT 0
+#define GL_SRC_H_MASK (0x3fff << GL_SRC_H_SHIFT)
+#define GL_POS_START 0x9c
+#define GL_POS_END 0xa0
+#define GL_POS_X_SHIFT 16
+#define GL_POS_X_MASK (0x1fff << GL_POS_X_SHIFT)
+#define GL_POS_Y_SHIFT 0
+#define GL_POS_Y_MASK (0x1fff << GL_POS_Y_SHIFT)
+
+#define GL_SRC_W(x) (((x) << GL_SRC_W_SHIFT) & GL_SRC_W_MASK)
+#define GL_SRC_H(x) (((x) << GL_SRC_H_SHIFT) & GL_SRC_H_MASK)
+#define GL_POS_X(x) (((x) << GL_POS_X_SHIFT) & GL_POS_X_MASK)
+#define GL_POS_Y(x) (((x) << GL_POS_Y_SHIFT) & GL_POS_Y_MASK)
+
+/* CSC registers */
+#define CSC_CTRL0 0x30
+#define CSC_COV_MODE_SHIFT 16
+#define CSC_COV_MODE_MASK (0xffff << CSC_COV_MODE_SHIFT)
+#define CSC_BT601_IMAGE_RGB2YCBCR 0
+#define CSC_BT601_IMAGE_YCBCR2RGB 1
+#define CSC_BT601_VIDEO_RGB2YCBCR 2
+#define CSC_BT601_VIDEO_YCBCR2RGB 3
+#define CSC_BT709_IMAGE_RGB2YCBCR 4
+#define CSC_BT709_IMAGE_YCBCR2RGB 5
+#define CSC_BT709_VIDEO_RGB2YCBCR 6
+#define CSC_BT709_VIDEO_YCBCR2RGB 7
+#define CSC_BT2020_IMAGE_RGB2YCBCR 8
+#define CSC_BT2020_IMAGE_YCBCR2RGB 9
+#define CSC_BT2020_VIDEO_RGB2YCBCR 10
+#define CSC_BT2020_VIDEO_YCBCR2RGB 11
+#define CSC_WORK_ENABLE BIT(0)
+
+/* RSZ registers */
+#define RSZ_SRC_CFG 0x00
+#define RSZ_DEST_CFG 0x04
+#define RSZ_ENABLE_CFG 0x14
+
+#define RSZ_VER_SHIFT 16
+#define RSZ_VER_MASK (0xffff << RSZ_VER_SHIFT)
+#define RSZ_HOR_SHIFT 0
+#define RSZ_HOR_MASK (0xffff << RSZ_HOR_SHIFT)
+
+#define RSZ_VER(x) (((x) << RSZ_VER_SHIFT) & RSZ_VER_MASK)
+#define RSZ_HOR(x) (((x) << RSZ_HOR_SHIFT) & RSZ_HOR_MASK)
+
+/* HBSC registers */
+#define HBSC_SATURATION 0x00
+#define HBSC_HUE 0x04
+#define HBSC_BRIGHT 0x08
+#define HBSC_CONTRAST 0x0c
+#define HBSC_THRESHOLD_COL1 0x10
+#define HBSC_THRESHOLD_COL2 0x14
+#define HBSC_THRESHOLD_COL3 0x18
+#define HBSC_CTRL0 0x28
+#define HBSC_CTRL_EN BIT(2)
+
+#endif /* __ZX_PLANE_REGS_H__ */
diff --git a/drivers/gpu/drm/zte/zx_vou.c b/drivers/gpu/drm/zte/zx_vou.c
new file mode 100644
index 000000000000..73fe15c17c32
--- /dev/null
+++ b/drivers/gpu/drm/zte/zx_vou.c
@@ -0,0 +1,661 @@
+/*
+ * Copyright 2016 Linaro Ltd.
+ * Copyright 2016 ZTE Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/clk.h>
+#include <linux/component.h>
+#include <linux/of_address.h>
+#include <video/videomode.h>
+
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_fb_helper.h>
+#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_of.h>
+#include <drm/drm_plane_helper.h>
+#include <drm/drmP.h>
+
+#include "zx_drm_drv.h"
+#include "zx_plane.h"
+#include "zx_vou.h"
+#include "zx_vou_regs.h"
+
+#define GL_NUM 2
+#define VL_NUM 3
+
+enum vou_chn_type {
+ VOU_CHN_MAIN,
+ VOU_CHN_AUX,
+};
+
+struct zx_crtc_regs {
+ u32 fir_active;
+ u32 fir_htiming;
+ u32 fir_vtiming;
+ u32 timing_shift;
+ u32 timing_pi_shift;
+};
+
+static const struct zx_crtc_regs main_crtc_regs = {
+ .fir_active = FIR_MAIN_ACTIVE,
+ .fir_htiming = FIR_MAIN_H_TIMING,
+ .fir_vtiming = FIR_MAIN_V_TIMING,
+ .timing_shift = TIMING_MAIN_SHIFT,
+ .timing_pi_shift = TIMING_MAIN_PI_SHIFT,
+};
+
+static const struct zx_crtc_regs aux_crtc_regs = {
+ .fir_active = FIR_AUX_ACTIVE,
+ .fir_htiming = FIR_AUX_H_TIMING,
+ .fir_vtiming = FIR_AUX_V_TIMING,
+ .timing_shift = TIMING_AUX_SHIFT,
+ .timing_pi_shift = TIMING_AUX_PI_SHIFT,
+};
+
+struct zx_crtc_bits {
+ u32 polarity_mask;
+ u32 polarity_shift;
+ u32 int_frame_mask;
+ u32 tc_enable;
+ u32 gl_enable;
+};
+
+static const struct zx_crtc_bits main_crtc_bits = {
+ .polarity_mask = MAIN_POL_MASK,
+ .polarity_shift = MAIN_POL_SHIFT,
+ .int_frame_mask = TIMING_INT_MAIN_FRAME,
+ .tc_enable = MAIN_TC_EN,
+ .gl_enable = OSD_CTRL0_GL0_EN,
+};
+
+static const struct zx_crtc_bits aux_crtc_bits = {
+ .polarity_mask = AUX_POL_MASK,
+ .polarity_shift = AUX_POL_SHIFT,
+ .int_frame_mask = TIMING_INT_AUX_FRAME,
+ .tc_enable = AUX_TC_EN,
+ .gl_enable = OSD_CTRL0_GL1_EN,
+};
+
+struct zx_crtc {
+ struct drm_crtc crtc;
+ struct drm_plane *primary;
+ struct zx_vou_hw *vou;
+ void __iomem *chnreg;
+ const struct zx_crtc_regs *regs;
+ const struct zx_crtc_bits *bits;
+ enum vou_chn_type chn_type;
+ struct clk *pixclk;
+};
+
+#define to_zx_crtc(x) container_of(x, struct zx_crtc, crtc)
+
+struct zx_vou_hw {
+ struct device *dev;
+ void __iomem *osd;
+ void __iomem *timing;
+ void __iomem *vouctl;
+ void __iomem *otfppu;
+ void __iomem *dtrc;
+ struct clk *axi_clk;
+ struct clk *ppu_clk;
+ struct clk *main_clk;
+ struct clk *aux_clk;
+ struct zx_crtc *main_crtc;
+ struct zx_crtc *aux_crtc;
+};
+
+static inline struct zx_vou_hw *crtc_to_vou(struct drm_crtc *crtc)
+{
+ struct zx_crtc *zcrtc = to_zx_crtc(crtc);
+
+ return zcrtc->vou;
+}
+
+void vou_inf_enable(const struct vou_inf *inf, struct drm_crtc *crtc)
+{
+ struct zx_crtc *zcrtc = to_zx_crtc(crtc);
+ struct zx_vou_hw *vou = zcrtc->vou;
+ bool is_main = zcrtc->chn_type == VOU_CHN_MAIN;
+ u32 data_sel_shift = inf->id << 1;
+
+ /* Select data format */
+ zx_writel_mask(vou->vouctl + VOU_INF_DATA_SEL, 0x3 << data_sel_shift,
+ inf->data_sel << data_sel_shift);
+
+ /* Select channel */
+ zx_writel_mask(vou->vouctl + VOU_INF_CH_SEL, 0x1 << inf->id,
+ zcrtc->chn_type << inf->id);
+
+ /* Select interface clocks */
+ zx_writel_mask(vou->vouctl + VOU_CLK_SEL, inf->clocks_sel_bits,
+ is_main ? 0 : inf->clocks_sel_bits);
+
+ /* Enable interface clocks */
+ zx_writel_mask(vou->vouctl + VOU_CLK_EN, inf->clocks_en_bits,
+ inf->clocks_en_bits);
+
+ /* Enable the device */
+ zx_writel_mask(vou->vouctl + VOU_INF_EN, 1 << inf->id, 1 << inf->id);
+}
+
+void vou_inf_disable(const struct vou_inf *inf, struct drm_crtc *crtc)
+{
+ struct zx_vou_hw *vou = crtc_to_vou(crtc);
+
+ /* Disable the device */
+ zx_writel_mask(vou->vouctl + VOU_INF_EN, 1 << inf->id, 0);
+
+ /* Disable interface clocks */
+ zx_writel_mask(vou->vouctl + VOU_CLK_EN, inf->clocks_en_bits, 0);
+}
+
+static inline void vou_chn_set_update(struct zx_crtc *zcrtc)
+{
+ zx_writel(zcrtc->chnreg + CHN_UPDATE, 1);
+}
+
+static void zx_crtc_enable(struct drm_crtc *crtc)
+{
+ struct drm_display_mode *mode = &crtc->state->adjusted_mode;
+ struct zx_crtc *zcrtc = to_zx_crtc(crtc);
+ struct zx_vou_hw *vou = zcrtc->vou;
+ const struct zx_crtc_regs *regs = zcrtc->regs;
+ const struct zx_crtc_bits *bits = zcrtc->bits;
+ struct videomode vm;
+ u32 pol = 0;
+ u32 val;
+ int ret;
+
+ drm_display_mode_to_videomode(mode, &vm);
+
+ /* Set up timing parameters */
+ val = V_ACTIVE(vm.vactive - 1);
+ val |= H_ACTIVE(vm.hactive - 1);
+ zx_writel(vou->timing + regs->fir_active, val);
+
+ val = SYNC_WIDE(vm.hsync_len - 1);
+ val |= BACK_PORCH(vm.hback_porch - 1);
+ val |= FRONT_PORCH(vm.hfront_porch - 1);
+ zx_writel(vou->timing + regs->fir_htiming, val);
+
+ val = SYNC_WIDE(vm.vsync_len - 1);
+ val |= BACK_PORCH(vm.vback_porch - 1);
+ val |= FRONT_PORCH(vm.vfront_porch - 1);
+ zx_writel(vou->timing + regs->fir_vtiming, val);
+
+ /* Set up polarities */
+ if (vm.flags & DISPLAY_FLAGS_VSYNC_LOW)
+ pol |= 1 << POL_VSYNC_SHIFT;
+ if (vm.flags & DISPLAY_FLAGS_HSYNC_LOW)
+ pol |= 1 << POL_HSYNC_SHIFT;
+
+ zx_writel_mask(vou->timing + TIMING_CTRL, bits->polarity_mask,
+ pol << bits->polarity_shift);
+
+ /* Setup SHIFT register by following what ZTE BSP does */
+ zx_writel(vou->timing + regs->timing_shift, H_SHIFT_VAL);
+ zx_writel(vou->timing + regs->timing_pi_shift, H_PI_SHIFT_VAL);
+
+ /* Enable TIMING_CTRL */
+ zx_writel_mask(vou->timing + TIMING_TC_ENABLE, bits->tc_enable,
+ bits->tc_enable);
+
+ /* Configure channel screen size */
+ zx_writel_mask(zcrtc->chnreg + CHN_CTRL1, CHN_SCREEN_W_MASK,
+ vm.hactive << CHN_SCREEN_W_SHIFT);
+ zx_writel_mask(zcrtc->chnreg + CHN_CTRL1, CHN_SCREEN_H_MASK,
+ vm.vactive << CHN_SCREEN_H_SHIFT);
+
+ /* Update channel */
+ vou_chn_set_update(zcrtc);
+
+ /* Enable channel */
+ zx_writel_mask(zcrtc->chnreg + CHN_CTRL0, CHN_ENABLE, CHN_ENABLE);
+
+ /* Enable Graphic Layer */
+ zx_writel_mask(vou->osd + OSD_CTRL0, bits->gl_enable,
+ bits->gl_enable);
+
+ drm_crtc_vblank_on(crtc);
+
+ ret = clk_set_rate(zcrtc->pixclk, mode->clock * 1000);
+ if (ret) {
+ DRM_DEV_ERROR(vou->dev, "failed to set pixclk rate: %d\n", ret);
+ return;
+ }
+
+ ret = clk_prepare_enable(zcrtc->pixclk);
+ if (ret)
+ DRM_DEV_ERROR(vou->dev, "failed to enable pixclk: %d\n", ret);
+}
+
+static void zx_crtc_disable(struct drm_crtc *crtc)
+{
+ struct zx_crtc *zcrtc = to_zx_crtc(crtc);
+ const struct zx_crtc_bits *bits = zcrtc->bits;
+ struct zx_vou_hw *vou = zcrtc->vou;
+
+ clk_disable_unprepare(zcrtc->pixclk);
+
+ drm_crtc_vblank_off(crtc);
+
+ /* Disable Graphic Layer */
+ zx_writel_mask(vou->osd + OSD_CTRL0, bits->gl_enable, 0);
+
+ /* Disable channel */
+ zx_writel_mask(zcrtc->chnreg + CHN_CTRL0, CHN_ENABLE, 0);
+
+ /* Disable TIMING_CTRL */
+ zx_writel_mask(vou->timing + TIMING_TC_ENABLE, bits->tc_enable, 0);
+}
+
+static void zx_crtc_atomic_flush(struct drm_crtc *crtc,
+ struct drm_crtc_state *old_state)
+{
+ struct drm_pending_vblank_event *event = crtc->state->event;
+
+ if (!event)
+ return;
+
+ crtc->state->event = NULL;
+
+ spin_lock_irq(&crtc->dev->event_lock);
+ if (drm_crtc_vblank_get(crtc) == 0)
+ drm_crtc_arm_vblank_event(crtc, event);
+ else
+ drm_crtc_send_vblank_event(crtc, event);
+ spin_unlock_irq(&crtc->dev->event_lock);
+}
+
+static const struct drm_crtc_helper_funcs zx_crtc_helper_funcs = {
+ .enable = zx_crtc_enable,
+ .disable = zx_crtc_disable,
+ .atomic_flush = zx_crtc_atomic_flush,
+};
+
+static const struct drm_crtc_funcs zx_crtc_funcs = {
+ .destroy = drm_crtc_cleanup,
+ .set_config = drm_atomic_helper_set_config,
+ .page_flip = drm_atomic_helper_page_flip,
+ .reset = drm_atomic_helper_crtc_reset,
+ .atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
+};
+
+static int zx_crtc_init(struct drm_device *drm, struct zx_vou_hw *vou,
+ enum vou_chn_type chn_type)
+{
+ struct device *dev = vou->dev;
+ struct zx_layer_data data;
+ struct zx_crtc *zcrtc;
+ int ret;
+
+ zcrtc = devm_kzalloc(dev, sizeof(*zcrtc), GFP_KERNEL);
+ if (!zcrtc)
+ return -ENOMEM;
+
+ zcrtc->vou = vou;
+ zcrtc->chn_type = chn_type;
+
+ if (chn_type == VOU_CHN_MAIN) {
+ data.layer = vou->osd + MAIN_GL_OFFSET;
+ data.csc = vou->osd + MAIN_CSC_OFFSET;
+ data.hbsc = vou->osd + MAIN_HBSC_OFFSET;
+ data.rsz = vou->otfppu + MAIN_RSZ_OFFSET;
+ zcrtc->chnreg = vou->osd + OSD_MAIN_CHN;
+ zcrtc->regs = &main_crtc_regs;
+ zcrtc->bits = &main_crtc_bits;
+ } else {
+ data.layer = vou->osd + AUX_GL_OFFSET;
+ data.csc = vou->osd + AUX_CSC_OFFSET;
+ data.hbsc = vou->osd + AUX_HBSC_OFFSET;
+ data.rsz = vou->otfppu + AUX_RSZ_OFFSET;
+ zcrtc->chnreg = vou->osd + OSD_AUX_CHN;
+ zcrtc->regs = &aux_crtc_regs;
+ zcrtc->bits = &aux_crtc_bits;
+ }
+
+ zcrtc->pixclk = devm_clk_get(dev, (chn_type == VOU_CHN_MAIN) ?
+ "main_wclk" : "aux_wclk");
+ if (IS_ERR(zcrtc->pixclk)) {
+ ret = PTR_ERR(zcrtc->pixclk);
+ DRM_DEV_ERROR(dev, "failed to get pix clk: %d\n", ret);
+ return ret;
+ }
+
+ zcrtc->primary = zx_plane_init(drm, dev, &data, DRM_PLANE_TYPE_PRIMARY);
+ if (IS_ERR(zcrtc->primary)) {
+ ret = PTR_ERR(zcrtc->primary);
+ DRM_DEV_ERROR(dev, "failed to init primary plane: %d\n", ret);
+ return ret;
+ }
+
+ ret = drm_crtc_init_with_planes(drm, &zcrtc->crtc, zcrtc->primary, NULL,
+ &zx_crtc_funcs, NULL);
+ if (ret) {
+ DRM_DEV_ERROR(dev, "failed to init drm crtc: %d\n", ret);
+ return ret;
+ }
+
+ drm_crtc_helper_add(&zcrtc->crtc, &zx_crtc_helper_funcs);
+
+ if (chn_type == VOU_CHN_MAIN)
+ vou->main_crtc = zcrtc;
+ else
+ vou->aux_crtc = zcrtc;
+
+ return 0;
+}
+
+static inline struct drm_crtc *zx_find_crtc(struct drm_device *drm, int pipe)
+{
+ struct drm_crtc *crtc;
+
+ list_for_each_entry(crtc, &drm->mode_config.crtc_list, head)
+ if (crtc->index == pipe)
+ return crtc;
+
+ return NULL;
+}
+
+int zx_vou_enable_vblank(struct drm_device *drm, unsigned int pipe)
+{
+ struct drm_crtc *crtc;
+ struct zx_crtc *zcrtc;
+ struct zx_vou_hw *vou;
+ u32 int_frame_mask;
+
+ crtc = zx_find_crtc(drm, pipe);
+ if (!crtc)
+ return 0;
+
+ vou = crtc_to_vou(crtc);
+ zcrtc = to_zx_crtc(crtc);
+ int_frame_mask = zcrtc->bits->int_frame_mask;
+
+ zx_writel_mask(vou->timing + TIMING_INT_CTRL, int_frame_mask,
+ int_frame_mask);
+
+ return 0;
+}
+
+void zx_vou_disable_vblank(struct drm_device *drm, unsigned int pipe)
+{
+ struct drm_crtc *crtc;
+ struct zx_crtc *zcrtc;
+ struct zx_vou_hw *vou;
+
+ crtc = zx_find_crtc(drm, pipe);
+ if (!crtc)
+ return;
+
+ vou = crtc_to_vou(crtc);
+ zcrtc = to_zx_crtc(crtc);
+
+ zx_writel_mask(vou->timing + TIMING_INT_CTRL,
+ zcrtc->bits->int_frame_mask, 0);
+}
+
+static irqreturn_t vou_irq_handler(int irq, void *dev_id)
+{
+ struct zx_vou_hw *vou = dev_id;
+ u32 state;
+
+ /* Handle TIMING_CTRL frame interrupts */
+ state = zx_readl(vou->timing + TIMING_INT_STATE);
+ zx_writel(vou->timing + TIMING_INT_STATE, state);
+
+ if (state & TIMING_INT_MAIN_FRAME)
+ drm_crtc_handle_vblank(&vou->main_crtc->crtc);
+
+ if (state & TIMING_INT_AUX_FRAME)
+ drm_crtc_handle_vblank(&vou->aux_crtc->crtc);
+
+ /* Handle OSD interrupts */
+ state = zx_readl(vou->osd + OSD_INT_STA);
+ zx_writel(vou->osd + OSD_INT_CLRSTA, state);
+
+ if (state & OSD_INT_MAIN_UPT) {
+ vou_chn_set_update(vou->main_crtc);
+ zx_plane_set_update(vou->main_crtc->primary);
+ }
+
+ if (state & OSD_INT_AUX_UPT) {
+ vou_chn_set_update(vou->aux_crtc);
+ zx_plane_set_update(vou->aux_crtc->primary);
+ }
+
+ if (state & OSD_INT_ERROR)
+ DRM_DEV_ERROR(vou->dev, "OSD ERROR: 0x%08x!\n", state);
+
+ return IRQ_HANDLED;
+}
+
+static void vou_dtrc_init(struct zx_vou_hw *vou)
+{
+ /* Clear bit for bypass by ID */
+ zx_writel_mask(vou->dtrc + DTRC_DETILE_CTRL,
+ TILE2RASTESCAN_BYPASS_MODE, 0);
+
+ /* Select ARIDR mode */
+ zx_writel_mask(vou->dtrc + DTRC_DETILE_CTRL, DETILE_ARIDR_MODE_MASK,
+ DETILE_ARID_IN_ARIDR);
+
+ /* Bypass decompression for both frames */
+ zx_writel_mask(vou->dtrc + DTRC_F0_CTRL, DTRC_DECOMPRESS_BYPASS,
+ DTRC_DECOMPRESS_BYPASS);
+ zx_writel_mask(vou->dtrc + DTRC_F1_CTRL, DTRC_DECOMPRESS_BYPASS,
+ DTRC_DECOMPRESS_BYPASS);
+
+ /* Set up ARID register */
+ zx_writel(vou->dtrc + DTRC_ARID, DTRC_ARID3(0xf) | DTRC_ARID2(0xe) |
+ DTRC_ARID1(0xf) | DTRC_ARID0(0xe));
+}
+
+static void vou_hw_init(struct zx_vou_hw *vou)
+{
+ /* Set GL0 to main channel and GL1 to aux channel */
+ zx_writel_mask(vou->osd + OSD_CTRL0, OSD_CTRL0_GL0_SEL, 0);
+ zx_writel_mask(vou->osd + OSD_CTRL0, OSD_CTRL0_GL1_SEL,
+ OSD_CTRL0_GL1_SEL);
+
+ /* Release reset for all VOU modules */
+ zx_writel(vou->vouctl + VOU_SOFT_RST, ~0);
+
+ /* Select main clock for GL0 and aux clock for GL1 module */
+ zx_writel_mask(vou->vouctl + VOU_CLK_SEL, VOU_CLK_GL0_SEL, 0);
+ zx_writel_mask(vou->vouctl + VOU_CLK_SEL, VOU_CLK_GL1_SEL,
+ VOU_CLK_GL1_SEL);
+
+ /* Enable clock auto-gating for all VOU modules */
+ zx_writel(vou->vouctl + VOU_CLK_REQEN, ~0);
+
+ /* Enable all VOU module clocks */
+ zx_writel(vou->vouctl + VOU_CLK_EN, ~0);
+
+ /* Clear both OSD and TIMING_CTRL interrupt state */
+ zx_writel(vou->osd + OSD_INT_CLRSTA, ~0);
+ zx_writel(vou->timing + TIMING_INT_STATE, ~0);
+
+ /* Enable OSD and TIMING_CTRL interrrupts */
+ zx_writel(vou->osd + OSD_INT_MSK, OSD_INT_ENABLE);
+ zx_writel(vou->timing + TIMING_INT_CTRL, TIMING_INT_ENABLE);
+
+ /* Select GPC as input to gl/vl scaler as a sane default setting */
+ zx_writel(vou->otfppu + OTFPPU_RSZ_DATA_SOURCE, 0x2a);
+
+ /*
+ * Needs to reset channel and layer logic per frame when frame starts
+ * to get VOU work properly.
+ */
+ zx_writel_mask(vou->osd + OSD_RST_CLR, RST_PER_FRAME, RST_PER_FRAME);
+
+ vou_dtrc_init(vou);
+}
+
+static int zx_crtc_bind(struct device *dev, struct device *master, void *data)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct drm_device *drm = data;
+ struct zx_vou_hw *vou;
+ struct resource *res;
+ int irq;
+ int ret;
+
+ vou = devm_kzalloc(dev, sizeof(*vou), GFP_KERNEL);
+ if (!vou)
+ return -ENOMEM;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "osd");
+ vou->osd = devm_ioremap_resource(dev, res);
+ if (IS_ERR(vou->osd)) {
+ ret = PTR_ERR(vou->osd);
+ DRM_DEV_ERROR(dev, "failed to remap osd region: %d\n", ret);
+ return ret;
+ }
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "timing_ctrl");
+ vou->timing = devm_ioremap_resource(dev, res);
+ if (IS_ERR(vou->timing)) {
+ ret = PTR_ERR(vou->timing);
+ DRM_DEV_ERROR(dev, "failed to remap timing_ctrl region: %d\n",
+ ret);
+ return ret;
+ }
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dtrc");
+ vou->dtrc = devm_ioremap_resource(dev, res);
+ if (IS_ERR(vou->dtrc)) {
+ ret = PTR_ERR(vou->dtrc);
+ DRM_DEV_ERROR(dev, "failed to remap dtrc region: %d\n", ret);
+ return ret;
+ }
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "vou_ctrl");
+ vou->vouctl = devm_ioremap_resource(dev, res);
+ if (IS_ERR(vou->vouctl)) {
+ ret = PTR_ERR(vou->vouctl);
+ DRM_DEV_ERROR(dev, "failed to remap vou_ctrl region: %d\n",
+ ret);
+ return ret;
+ }
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "otfppu");
+ vou->otfppu = devm_ioremap_resource(dev, res);
+ if (IS_ERR(vou->otfppu)) {
+ ret = PTR_ERR(vou->otfppu);
+ DRM_DEV_ERROR(dev, "failed to remap otfppu region: %d\n", ret);
+ return ret;
+ }
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return irq;
+
+ vou->axi_clk = devm_clk_get(dev, "aclk");
+ if (IS_ERR(vou->axi_clk)) {
+ ret = PTR_ERR(vou->axi_clk);
+ DRM_DEV_ERROR(dev, "failed to get axi_clk: %d\n", ret);
+ return ret;
+ }
+
+ vou->ppu_clk = devm_clk_get(dev, "ppu_wclk");
+ if (IS_ERR(vou->ppu_clk)) {
+ ret = PTR_ERR(vou->ppu_clk);
+ DRM_DEV_ERROR(dev, "failed to get ppu_clk: %d\n", ret);
+ return ret;
+ }
+
+ ret = clk_prepare_enable(vou->axi_clk);
+ if (ret) {
+ DRM_DEV_ERROR(dev, "failed to enable axi_clk: %d\n", ret);
+ return ret;
+ }
+
+ clk_prepare_enable(vou->ppu_clk);
+ if (ret) {
+ DRM_DEV_ERROR(dev, "failed to enable ppu_clk: %d\n", ret);
+ goto disable_axi_clk;
+ }
+
+ vou->dev = dev;
+ dev_set_drvdata(dev, vou);
+
+ vou_hw_init(vou);
+
+ ret = devm_request_irq(dev, irq, vou_irq_handler, 0, "zx_vou", vou);
+ if (ret < 0) {
+ DRM_DEV_ERROR(dev, "failed to request vou irq: %d\n", ret);
+ goto disable_ppu_clk;
+ }
+
+ ret = zx_crtc_init(drm, vou, VOU_CHN_MAIN);
+ if (ret) {
+ DRM_DEV_ERROR(dev, "failed to init main channel crtc: %d\n",
+ ret);
+ goto disable_ppu_clk;
+ }
+
+ ret = zx_crtc_init(drm, vou, VOU_CHN_AUX);
+ if (ret) {
+ DRM_DEV_ERROR(dev, "failed to init aux channel crtc: %d\n",
+ ret);
+ goto disable_ppu_clk;
+ }
+
+ return 0;
+
+disable_ppu_clk:
+ clk_disable_unprepare(vou->ppu_clk);
+disable_axi_clk:
+ clk_disable_unprepare(vou->axi_clk);
+ return ret;
+}
+
+static void zx_crtc_unbind(struct device *dev, struct device *master,
+ void *data)
+{
+ struct zx_vou_hw *vou = dev_get_drvdata(dev);
+
+ clk_disable_unprepare(vou->axi_clk);
+ clk_disable_unprepare(vou->ppu_clk);
+}
+
+static const struct component_ops zx_crtc_component_ops = {
+ .bind = zx_crtc_bind,
+ .unbind = zx_crtc_unbind,
+};
+
+static int zx_crtc_probe(struct platform_device *pdev)
+{
+ return component_add(&pdev->dev, &zx_crtc_component_ops);
+}
+
+static int zx_crtc_remove(struct platform_device *pdev)
+{
+ component_del(&pdev->dev, &zx_crtc_component_ops);
+ return 0;
+}
+
+static const struct of_device_id zx_crtc_of_match[] = {
+ { .compatible = "zte,zx296718-dpc", },
+ { /* end */ },
+};
+MODULE_DEVICE_TABLE(of, zx_crtc_of_match);
+
+struct platform_driver zx_crtc_driver = {
+ .probe = zx_crtc_probe,
+ .remove = zx_crtc_remove,
+ .driver = {
+ .name = "zx-crtc",
+ .of_match_table = zx_crtc_of_match,
+ },
+};
diff --git a/drivers/gpu/drm/zte/zx_vou.h b/drivers/gpu/drm/zte/zx_vou.h
new file mode 100644
index 000000000000..349e06cd86f4
--- /dev/null
+++ b/drivers/gpu/drm/zte/zx_vou.h
@@ -0,0 +1,46 @@
+/*
+ * Copyright 2016 Linaro Ltd.
+ * Copyright 2016 ZTE Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#ifndef __ZX_VOU_H__
+#define __ZX_VOU_H__
+
+#define VOU_CRTC_MASK 0x3
+
+/* VOU output interfaces */
+enum vou_inf_id {
+ VOU_HDMI = 0,
+ VOU_RGB_LCD = 1,
+ VOU_TV_ENC = 2,
+ VOU_MIPI_DSI = 3,
+ VOU_LVDS = 4,
+ VOU_VGA = 5,
+};
+
+enum vou_inf_data_sel {
+ VOU_YUV444 = 0,
+ VOU_RGB_101010 = 1,
+ VOU_RGB_888 = 2,
+ VOU_RGB_666 = 3,
+};
+
+struct vou_inf {
+ enum vou_inf_id id;
+ enum vou_inf_data_sel data_sel;
+ u32 clocks_en_bits;
+ u32 clocks_sel_bits;
+};
+
+void vou_inf_enable(const struct vou_inf *inf, struct drm_crtc *crtc);
+void vou_inf_disable(const struct vou_inf *inf, struct drm_crtc *crtc);
+
+int zx_vou_enable_vblank(struct drm_device *drm, unsigned int pipe);
+void zx_vou_disable_vblank(struct drm_device *drm, unsigned int pipe);
+
+#endif /* __ZX_VOU_H__ */
diff --git a/drivers/gpu/drm/zte/zx_vou_regs.h b/drivers/gpu/drm/zte/zx_vou_regs.h
new file mode 100644
index 000000000000..f44e7a4ae441
--- /dev/null
+++ b/drivers/gpu/drm/zte/zx_vou_regs.h
@@ -0,0 +1,157 @@
+/*
+ * Copyright 2016 Linaro Ltd.
+ * Copyright 2016 ZTE Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#ifndef __ZX_VOU_REGS_H__
+#define __ZX_VOU_REGS_H__
+
+/* Sub-module offset */
+#define MAIN_GL_OFFSET 0x130
+#define MAIN_CSC_OFFSET 0x580
+#define MAIN_HBSC_OFFSET 0x820
+#define MAIN_RSZ_OFFSET 0x600 /* OTFPPU sub-module */
+
+#define AUX_GL_OFFSET 0x200
+#define AUX_CSC_OFFSET 0x5d0
+#define AUX_HBSC_OFFSET 0x860
+#define AUX_RSZ_OFFSET 0x800
+
+/* OSD (GPC_GLOBAL) registers */
+#define OSD_INT_STA 0x04
+#define OSD_INT_CLRSTA 0x08
+#define OSD_INT_MSK 0x0c
+#define OSD_INT_AUX_UPT BIT(14)
+#define OSD_INT_MAIN_UPT BIT(13)
+#define OSD_INT_GL1_LBW BIT(10)
+#define OSD_INT_GL0_LBW BIT(9)
+#define OSD_INT_VL2_LBW BIT(8)
+#define OSD_INT_VL1_LBW BIT(7)
+#define OSD_INT_VL0_LBW BIT(6)
+#define OSD_INT_BUS_ERR BIT(3)
+#define OSD_INT_CFG_ERR BIT(2)
+#define OSD_INT_ERROR (\
+ OSD_INT_GL1_LBW | OSD_INT_GL0_LBW | \
+ OSD_INT_VL2_LBW | OSD_INT_VL1_LBW | OSD_INT_VL0_LBW | \
+ OSD_INT_BUS_ERR | OSD_INT_CFG_ERR \
+)
+#define OSD_INT_ENABLE (OSD_INT_ERROR | OSD_INT_AUX_UPT | OSD_INT_MAIN_UPT)
+#define OSD_CTRL0 0x10
+#define OSD_CTRL0_GL0_EN BIT(7)
+#define OSD_CTRL0_GL0_SEL BIT(6)
+#define OSD_CTRL0_GL1_EN BIT(5)
+#define OSD_CTRL0_GL1_SEL BIT(4)
+#define OSD_RST_CLR 0x1c
+#define RST_PER_FRAME BIT(19)
+
+/* Main/Aux channel registers */
+#define OSD_MAIN_CHN 0x470
+#define OSD_AUX_CHN 0x4d0
+#define CHN_CTRL0 0x00
+#define CHN_ENABLE BIT(0)
+#define CHN_CTRL1 0x04
+#define CHN_SCREEN_W_SHIFT 18
+#define CHN_SCREEN_W_MASK (0x1fff << CHN_SCREEN_W_SHIFT)
+#define CHN_SCREEN_H_SHIFT 5
+#define CHN_SCREEN_H_MASK (0x1fff << CHN_SCREEN_H_SHIFT)
+#define CHN_UPDATE 0x08
+
+/* TIMING_CTRL registers */
+#define TIMING_TC_ENABLE 0x04
+#define AUX_TC_EN BIT(1)
+#define MAIN_TC_EN BIT(0)
+#define FIR_MAIN_ACTIVE 0x08
+#define FIR_AUX_ACTIVE 0x0c
+#define V_ACTIVE_SHIFT 16
+#define V_ACTIVE_MASK (0xffff << V_ACTIVE_SHIFT)
+#define H_ACTIVE_SHIFT 0
+#define H_ACTIVE_MASK (0xffff << H_ACTIVE_SHIFT)
+#define FIR_MAIN_H_TIMING 0x10
+#define FIR_MAIN_V_TIMING 0x14
+#define FIR_AUX_H_TIMING 0x18
+#define FIR_AUX_V_TIMING 0x1c
+#define SYNC_WIDE_SHIFT 22
+#define SYNC_WIDE_MASK (0x3ff << SYNC_WIDE_SHIFT)
+#define BACK_PORCH_SHIFT 11
+#define BACK_PORCH_MASK (0x7ff << BACK_PORCH_SHIFT)
+#define FRONT_PORCH_SHIFT 0
+#define FRONT_PORCH_MASK (0x7ff << FRONT_PORCH_SHIFT)
+#define TIMING_CTRL 0x20
+#define AUX_POL_SHIFT 3
+#define AUX_POL_MASK (0x7 << AUX_POL_SHIFT)
+#define MAIN_POL_SHIFT 0
+#define MAIN_POL_MASK (0x7 << MAIN_POL_SHIFT)
+#define POL_DE_SHIFT 2
+#define POL_VSYNC_SHIFT 1
+#define POL_HSYNC_SHIFT 0
+#define TIMING_INT_CTRL 0x24
+#define TIMING_INT_STATE 0x28
+#define TIMING_INT_AUX_FRAME BIT(3)
+#define TIMING_INT_MAIN_FRAME BIT(1)
+#define TIMING_INT_AUX_FRAME_SEL_VSW (0x2 << 10)
+#define TIMING_INT_MAIN_FRAME_SEL_VSW (0x2 << 6)
+#define TIMING_INT_ENABLE (\
+ TIMING_INT_MAIN_FRAME_SEL_VSW | TIMING_INT_AUX_FRAME_SEL_VSW | \
+ TIMING_INT_MAIN_FRAME | TIMING_INT_AUX_FRAME \
+)
+#define TIMING_MAIN_SHIFT 0x2c
+#define TIMING_AUX_SHIFT 0x30
+#define H_SHIFT_VAL 0x0048
+#define TIMING_MAIN_PI_SHIFT 0x68
+#define TIMING_AUX_PI_SHIFT 0x6c
+#define H_PI_SHIFT_VAL 0x000f
+
+#define V_ACTIVE(x) (((x) << V_ACTIVE_SHIFT) & V_ACTIVE_MASK)
+#define H_ACTIVE(x) (((x) << H_ACTIVE_SHIFT) & H_ACTIVE_MASK)
+
+#define SYNC_WIDE(x) (((x) << SYNC_WIDE_SHIFT) & SYNC_WIDE_MASK)
+#define BACK_PORCH(x) (((x) << BACK_PORCH_SHIFT) & BACK_PORCH_MASK)
+#define FRONT_PORCH(x) (((x) << FRONT_PORCH_SHIFT) & FRONT_PORCH_MASK)
+
+/* DTRC registers */
+#define DTRC_F0_CTRL 0x2c
+#define DTRC_F1_CTRL 0x5c
+#define DTRC_DECOMPRESS_BYPASS BIT(17)
+#define DTRC_DETILE_CTRL 0x68
+#define TILE2RASTESCAN_BYPASS_MODE BIT(30)
+#define DETILE_ARIDR_MODE_MASK (0x3 << 0)
+#define DETILE_ARID_ALL 0
+#define DETILE_ARID_IN_ARIDR 1
+#define DETILE_ARID_BYP_BUT_ARIDR 2
+#define DETILE_ARID_IN_ARIDR2 3
+#define DTRC_ARID 0x6c
+#define DTRC_ARID3_SHIFT 24
+#define DTRC_ARID3_MASK (0xff << DTRC_ARID3_SHIFT)
+#define DTRC_ARID2_SHIFT 16
+#define DTRC_ARID2_MASK (0xff << DTRC_ARID2_SHIFT)
+#define DTRC_ARID1_SHIFT 8
+#define DTRC_ARID1_MASK (0xff << DTRC_ARID1_SHIFT)
+#define DTRC_ARID0_SHIFT 0
+#define DTRC_ARID0_MASK (0xff << DTRC_ARID0_SHIFT)
+#define DTRC_DEC2DDR_ARID 0x70
+
+#define DTRC_ARID3(x) (((x) << DTRC_ARID3_SHIFT) & DTRC_ARID3_MASK)
+#define DTRC_ARID2(x) (((x) << DTRC_ARID2_SHIFT) & DTRC_ARID2_MASK)
+#define DTRC_ARID1(x) (((x) << DTRC_ARID1_SHIFT) & DTRC_ARID1_MASK)
+#define DTRC_ARID0(x) (((x) << DTRC_ARID0_SHIFT) & DTRC_ARID0_MASK)
+
+/* VOU_CTRL registers */
+#define VOU_INF_EN 0x00
+#define VOU_INF_CH_SEL 0x04
+#define VOU_INF_DATA_SEL 0x08
+#define VOU_SOFT_RST 0x14
+#define VOU_CLK_SEL 0x18
+#define VOU_CLK_GL1_SEL BIT(5)
+#define VOU_CLK_GL0_SEL BIT(4)
+#define VOU_CLK_REQEN 0x20
+#define VOU_CLK_EN 0x24
+
+/* OTFPPU_CTRL registers */
+#define OTFPPU_RSZ_DATA_SOURCE 0x04
+
+#endif /* __ZX_VOU_REGS_H__ */
diff --git a/drivers/gpu/ipu-v3/ipu-common.c b/drivers/gpu/ipu-v3/ipu-common.c
index b9539f7c5e9a..97218af4fe75 100644
--- a/drivers/gpu/ipu-v3/ipu-common.c
+++ b/drivers/gpu/ipu-v3/ipu-common.c
@@ -88,6 +88,8 @@ enum ipu_color_space ipu_drm_fourcc_to_colorspace(u32 drm_fourcc)
case DRM_FORMAT_YVU420:
case DRM_FORMAT_YUV422:
case DRM_FORMAT_YVU422:
+ case DRM_FORMAT_YUV444:
+ case DRM_FORMAT_YVU444:
case DRM_FORMAT_NV12:
case DRM_FORMAT_NV21:
case DRM_FORMAT_NV16:
@@ -1284,8 +1286,11 @@ static int ipu_irq_init(struct ipu_soc *ipu)
return ret;
}
- for (i = 0; i < IPU_NUM_IRQS; i += 32)
+ /* Mask and clear all interrupts */
+ for (i = 0; i < IPU_NUM_IRQS; i += 32) {
ipu_cm_write(ipu, 0, IPU_INT_CTRL(i / 32));
+ ipu_cm_write(ipu, ~unused[i / 32], IPU_INT_STAT(i / 32));
+ }
for (i = 0; i < IPU_NUM_IRQS; i += 32) {
gc = irq_get_domain_generic_chip(ipu->domain, i);
diff --git a/drivers/gpu/ipu-v3/ipu-cpmem.c b/drivers/gpu/ipu-v3/ipu-cpmem.c
index fcb7dc86167b..4b2b67113d92 100644
--- a/drivers/gpu/ipu-v3/ipu-cpmem.c
+++ b/drivers/gpu/ipu-v3/ipu-cpmem.c
@@ -417,42 +417,6 @@ void ipu_cpmem_set_yuv_planar_full(struct ipuv3_channel *ch,
}
EXPORT_SYMBOL_GPL(ipu_cpmem_set_yuv_planar_full);
-void ipu_cpmem_set_yuv_planar(struct ipuv3_channel *ch,
- u32 pixel_format, int stride, int height)
-{
- int fourcc, u_offset, v_offset;
- int uv_stride = 0;
-
- fourcc = v4l2_pix_fmt_to_drm_fourcc(pixel_format);
- switch (fourcc) {
- case DRM_FORMAT_YUV420:
- uv_stride = stride / 2;
- u_offset = stride * height;
- v_offset = u_offset + (uv_stride * height / 2);
- break;
- case DRM_FORMAT_YVU420:
- uv_stride = stride / 2;
- v_offset = stride * height;
- u_offset = v_offset + (uv_stride * height / 2);
- break;
- case DRM_FORMAT_YUV422:
- uv_stride = stride / 2;
- u_offset = stride * height;
- v_offset = u_offset + (uv_stride * height);
- break;
- case DRM_FORMAT_NV12:
- case DRM_FORMAT_NV16:
- uv_stride = stride;
- u_offset = stride * height;
- v_offset = 0;
- break;
- default:
- return;
- }
- ipu_cpmem_set_yuv_planar_full(ch, uv_stride, u_offset, v_offset);
-}
-EXPORT_SYMBOL_GPL(ipu_cpmem_set_yuv_planar);
-
static const struct ipu_rgb def_xrgb_32 = {
.red = { .offset = 16, .length = 8, },
.green = { .offset = 8, .length = 8, },
@@ -590,6 +554,13 @@ int ipu_cpmem_set_fmt(struct ipuv3_channel *ch, u32 drm_fourcc)
/* burst size */
ipu_ch_param_write_field(ch, IPU_FIELD_NPB, 31);
break;
+ case DRM_FORMAT_YUV444:
+ case DRM_FORMAT_YVU444:
+ /* pix format */
+ ipu_ch_param_write_field(ch, IPU_FIELD_PFS, 0);
+ /* burst size */
+ ipu_ch_param_write_field(ch, IPU_FIELD_NPB, 31);
+ break;
case DRM_FORMAT_NV12:
/* pix format */
ipu_ch_param_write_field(ch, IPU_FIELD_PFS, 4);
diff --git a/drivers/gpu/ipu-v3/ipu-csi.c b/drivers/gpu/ipu-v3/ipu-csi.c
index d6e5ded24418..63c7292f427a 100644
--- a/drivers/gpu/ipu-v3/ipu-csi.c
+++ b/drivers/gpu/ipu-v3/ipu-csi.c
@@ -529,6 +529,22 @@ void ipu_csi_set_window(struct ipu_csi *csi, struct v4l2_rect *w)
}
EXPORT_SYMBOL_GPL(ipu_csi_set_window);
+void ipu_csi_set_downsize(struct ipu_csi *csi, bool horiz, bool vert)
+{
+ unsigned long flags;
+ u32 reg;
+
+ spin_lock_irqsave(&csi->lock, flags);
+
+ reg = ipu_csi_read(csi, CSI_OUT_FRM_CTRL);
+ reg &= ~(CSI_HORI_DOWNSIZE_EN | CSI_VERT_DOWNSIZE_EN);
+ reg |= (horiz ? CSI_HORI_DOWNSIZE_EN : 0) |
+ (vert ? CSI_VERT_DOWNSIZE_EN : 0);
+ ipu_csi_write(csi, reg, CSI_OUT_FRM_CTRL);
+
+ spin_unlock_irqrestore(&csi->lock, flags);
+}
+
void ipu_csi_set_test_generator(struct ipu_csi *csi, bool active,
u32 r_value, u32 g_value, u32 b_value,
u32 pix_clk)
diff --git a/drivers/gpu/ipu-v3/ipu-di.c b/drivers/gpu/ipu-v3/ipu-di.c
index a8d87ddd8a17..d2f1bd9d3deb 100644
--- a/drivers/gpu/ipu-v3/ipu-di.c
+++ b/drivers/gpu/ipu-v3/ipu-di.c
@@ -535,7 +535,7 @@ int ipu_di_adjust_videomode(struct ipu_di *di, struct videomode *mode)
return -EINVAL;
}
- dev_warn(di->ipu->dev, "videomode adapted for IPU restrictions\n");
+ dev_dbg(di->ipu->dev, "videomode adapted for IPU restrictions\n");
return 0;
}
EXPORT_SYMBOL_GPL(ipu_di_adjust_videomode);
diff --git a/drivers/gpu/ipu-v3/ipu-image-convert.c b/drivers/gpu/ipu-v3/ipu-image-convert.c
index 2ba7d437a2af..805b6fa7b5f4 100644
--- a/drivers/gpu/ipu-v3/ipu-image-convert.c
+++ b/drivers/gpu/ipu-v3/ipu-image-convert.c
@@ -1617,7 +1617,7 @@ ipu_image_convert(struct ipu_soc *ipu, enum ipu_ic_task ic_task,
ctx = ipu_image_convert_prepare(ipu, ic_task, in, out, rot_mode,
complete, complete_context);
if (IS_ERR(ctx))
- return ERR_PTR(PTR_ERR(ctx));
+ return ERR_CAST(ctx);
run = kzalloc(sizeof(*run), GFP_KERNEL);
if (!run) {
diff --git a/drivers/hv/hv_util.c b/drivers/hv/hv_util.c
index 4aa3cb63fd41..bcd06306f3e8 100644
--- a/drivers/hv/hv_util.c
+++ b/drivers/hv/hv_util.c
@@ -314,10 +314,14 @@ static void heartbeat_onchannelcallback(void *context)
u8 *hbeat_txf_buf = util_heartbeat.recv_buffer;
struct icmsg_negotiate *negop = NULL;
- vmbus_recvpacket(channel, hbeat_txf_buf,
- PAGE_SIZE, &recvlen, &requestid);
+ while (1) {
+
+ vmbus_recvpacket(channel, hbeat_txf_buf,
+ PAGE_SIZE, &recvlen, &requestid);
+
+ if (!recvlen)
+ break;
- if (recvlen > 0) {
icmsghdrp = (struct icmsg_hdr *)&hbeat_txf_buf[
sizeof(struct vmbuspipe_hdr)];
diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig
index 6d94e2ec5b4f..d252276feadf 100644
--- a/drivers/i2c/busses/Kconfig
+++ b/drivers/i2c/busses/Kconfig
@@ -79,12 +79,12 @@ config I2C_AMD8111
config I2C_HIX5HD2
tristate "Hix5hd2 high-speed I2C driver"
- depends on ARCH_HIX5HD2 || COMPILE_TEST
+ depends on ARCH_HISI || ARCH_HIX5HD2 || COMPILE_TEST
help
- Say Y here to include support for high-speed I2C controller in the
- Hisilicon based hix5hd2 SoCs.
+ Say Y here to include support for the high-speed I2C controller
+ used in HiSilicon hix5hd2 SoCs.
- This driver can also be built as a module. If so, the module
+ This driver can also be built as a module. If so, the module
will be called i2c-hix5hd2.
config I2C_I801
@@ -589,10 +589,10 @@ config I2C_IMG
config I2C_IMX
tristate "IMX I2C interface"
- depends on ARCH_MXC || ARCH_LAYERSCAPE
+ depends on ARCH_MXC || ARCH_LAYERSCAPE || COLDFIRE
help
Say Y here if you want to use the IIC bus controller on
- the Freescale i.MX/MXC or Layerscape processors.
+ the Freescale i.MX/MXC, Layerscape or ColdFire processors.
This driver can also be built as a module. If so, the module
will be called i2c-imx.
diff --git a/drivers/i2c/busses/i2c-designware-core.c b/drivers/i2c/busses/i2c-designware-core.c
index 1fe93c43215c..11e866d05368 100644
--- a/drivers/i2c/busses/i2c-designware-core.c
+++ b/drivers/i2c/busses/i2c-designware-core.c
@@ -95,6 +95,9 @@
#define DW_IC_STATUS_TFE BIT(2)
#define DW_IC_STATUS_MST_ACTIVITY BIT(5)
+#define DW_IC_SDA_HOLD_RX_SHIFT 16
+#define DW_IC_SDA_HOLD_RX_MASK GENMASK(23, DW_IC_SDA_HOLD_RX_SHIFT)
+
#define DW_IC_ERR_TX_ABRT 0x1
#define DW_IC_TAR_10BITADDR_MASTER BIT(12)
@@ -420,12 +423,20 @@ int i2c_dw_init(struct dw_i2c_dev *dev)
/* Configure SDA Hold Time if required */
reg = dw_readl(dev, DW_IC_COMP_VERSION);
if (reg >= DW_IC_SDA_HOLD_MIN_VERS) {
- if (dev->sda_hold_time) {
- dw_writel(dev, dev->sda_hold_time, DW_IC_SDA_HOLD);
- } else {
+ if (!dev->sda_hold_time) {
/* Keep previous hold time setting if no one set it */
dev->sda_hold_time = dw_readl(dev, DW_IC_SDA_HOLD);
}
+ /*
+ * Workaround for avoiding TX arbitration lost in case I2C
+ * slave pulls SDA down "too quickly" after falling egde of
+ * SCL by enabling non-zero SDA RX hold. Specification says it
+ * extends incoming SDA low to high transition while SCL is
+ * high but it apprears to help also above issue.
+ */
+ if (!(dev->sda_hold_time & DW_IC_SDA_HOLD_RX_MASK))
+ dev->sda_hold_time |= 1 << DW_IC_SDA_HOLD_RX_SHIFT;
+ dw_writel(dev, dev->sda_hold_time, DW_IC_SDA_HOLD);
} else {
dev_warn(dev->dev,
"Hardware too old to adjust SDA hold time.\n");
diff --git a/drivers/i2c/busses/i2c-digicolor.c b/drivers/i2c/busses/i2c-digicolor.c
index 9604024e0eb0..49f2084f7bb5 100644
--- a/drivers/i2c/busses/i2c-digicolor.c
+++ b/drivers/i2c/busses/i2c-digicolor.c
@@ -368,6 +368,7 @@ static const struct of_device_id dc_i2c_match[] = {
{ .compatible = "cnxt,cx92755-i2c" },
{ },
};
+MODULE_DEVICE_TABLE(of, dc_i2c_match);
static struct platform_driver dc_i2c_driver = {
.probe = dc_i2c_probe,
diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c
index 08847e8b8998..eb3627f35d12 100644
--- a/drivers/i2c/busses/i2c-i801.c
+++ b/drivers/i2c/busses/i2c-i801.c
@@ -146,6 +146,7 @@
#define SMBHSTCFG_HST_EN 1
#define SMBHSTCFG_SMB_SMI_EN 2
#define SMBHSTCFG_I2C_EN 4
+#define SMBHSTCFG_SPD_WD 0x10
/* TCO configuration bits for TCOCTL */
#define TCOCTL_EN 0x0100
@@ -865,9 +866,16 @@ static s32 i801_access(struct i2c_adapter *adap, u16 addr,
block = 1;
break;
case I2C_SMBUS_I2C_BLOCK_DATA:
- /* NB: page 240 of ICH5 datasheet shows that the R/#W
- * bit should be cleared here, even when reading */
- outb_p((addr & 0x7f) << 1, SMBHSTADD(priv));
+ /*
+ * NB: page 240 of ICH5 datasheet shows that the R/#W
+ * bit should be cleared here, even when reading.
+ * However if SPD Write Disable is set (Lynx Point and later),
+ * the read will fail if we don't set the R/#W bit.
+ */
+ outb_p(((addr & 0x7f) << 1) |
+ ((priv->original_hstcfg & SMBHSTCFG_SPD_WD) ?
+ (read_write & 0x01) : 0),
+ SMBHSTADD(priv));
if (read_write == I2C_SMBUS_READ) {
/* NB: page 240 of ICH5 datasheet also shows
* that DATA1 is the cmd field when reading */
@@ -1573,6 +1581,8 @@ static int i801_probe(struct pci_dev *dev, const struct pci_device_id *id)
/* Disable SMBus interrupt feature if SMBus using SMI# */
priv->features &= ~FEATURE_IRQ;
}
+ if (temp & SMBHSTCFG_SPD_WD)
+ dev_info(&dev->dev, "SPD Write Disable is set\n");
/* Clear special mode bits */
if (priv->features & (FEATURE_SMBUS_PEC | FEATURE_BLOCK_BUFFER))
diff --git a/drivers/i2c/busses/i2c-imx.c b/drivers/i2c/busses/i2c-imx.c
index 592a8f26a708..47fc1f1acff7 100644
--- a/drivers/i2c/busses/i2c-imx.c
+++ b/drivers/i2c/busses/i2c-imx.c
@@ -1009,10 +1009,13 @@ static int i2c_imx_init_recovery_info(struct imx_i2c_struct *i2c_imx,
rinfo->sda_gpio = of_get_named_gpio(pdev->dev.of_node, "sda-gpios", 0);
rinfo->scl_gpio = of_get_named_gpio(pdev->dev.of_node, "scl-gpios", 0);
- if (!gpio_is_valid(rinfo->sda_gpio) ||
- !gpio_is_valid(rinfo->scl_gpio) ||
- IS_ERR(i2c_imx->pinctrl_pins_default) ||
- IS_ERR(i2c_imx->pinctrl_pins_gpio)) {
+ if (rinfo->sda_gpio == -EPROBE_DEFER ||
+ rinfo->scl_gpio == -EPROBE_DEFER) {
+ return -EPROBE_DEFER;
+ } else if (!gpio_is_valid(rinfo->sda_gpio) ||
+ !gpio_is_valid(rinfo->scl_gpio) ||
+ IS_ERR(i2c_imx->pinctrl_pins_default) ||
+ IS_ERR(i2c_imx->pinctrl_pins_gpio)) {
dev_dbg(&pdev->dev, "recovery information incomplete\n");
return 0;
}
diff --git a/drivers/i2c/busses/i2c-jz4780.c b/drivers/i2c/busses/i2c-jz4780.c
index b8ea62105f42..30132c3957cd 100644
--- a/drivers/i2c/busses/i2c-jz4780.c
+++ b/drivers/i2c/busses/i2c-jz4780.c
@@ -729,6 +729,7 @@ static const struct of_device_id jz4780_i2c_of_matches[] = {
{ .compatible = "ingenic,jz4780-i2c", },
{ /* sentinel */ }
};
+MODULE_DEVICE_TABLE(of, jz4780_i2c_of_matches);
static int jz4780_i2c_probe(struct platform_device *pdev)
{
diff --git a/drivers/i2c/busses/i2c-rk3x.c b/drivers/i2c/busses/i2c-rk3x.c
index 50702c7bb244..df220666d627 100644
--- a/drivers/i2c/busses/i2c-rk3x.c
+++ b/drivers/i2c/busses/i2c-rk3x.c
@@ -694,6 +694,8 @@ static int rk3x_i2c_v0_calc_timings(unsigned long clk_rate,
t_calc->div_low--;
t_calc->div_high--;
+ /* Give the tuning value 0, that would not update con register */
+ t_calc->tuning = 0;
/* Maximum divider supported by hw is 0xffff */
if (t_calc->div_low > 0xffff) {
t_calc->div_low = 0xffff;
diff --git a/drivers/i2c/busses/i2c-xgene-slimpro.c b/drivers/i2c/busses/i2c-xgene-slimpro.c
index 263685c7a512..05cf192ef1ac 100644
--- a/drivers/i2c/busses/i2c-xgene-slimpro.c
+++ b/drivers/i2c/busses/i2c-xgene-slimpro.c
@@ -105,7 +105,7 @@ struct slimpro_i2c_dev {
struct mbox_chan *mbox_chan;
struct mbox_client mbox_client;
struct completion rd_complete;
- u8 dma_buffer[I2C_SMBUS_BLOCK_MAX];
+ u8 dma_buffer[I2C_SMBUS_BLOCK_MAX + 1]; /* dma_buffer[0] is used for length */
u32 *resp_msg;
};
diff --git a/drivers/i2c/busses/i2c-xlp9xx.c b/drivers/i2c/busses/i2c-xlp9xx.c
index 2a972ed7aa0d..e29ff37a43bd 100644
--- a/drivers/i2c/busses/i2c-xlp9xx.c
+++ b/drivers/i2c/busses/i2c-xlp9xx.c
@@ -426,6 +426,7 @@ static const struct of_device_id xlp9xx_i2c_of_match[] = {
{ .compatible = "netlogic,xlp980-i2c", },
{ /* sentinel */ },
};
+MODULE_DEVICE_TABLE(of, xlp9xx_i2c_of_match);
#ifdef CONFIG_ACPI
static const struct acpi_device_id xlp9xx_i2c_acpi_ids[] = {
diff --git a/drivers/i2c/busses/i2c-xlr.c b/drivers/i2c/busses/i2c-xlr.c
index 0968f59b6df5..ad17d88d8573 100644
--- a/drivers/i2c/busses/i2c-xlr.c
+++ b/drivers/i2c/busses/i2c-xlr.c
@@ -358,6 +358,7 @@ static const struct of_device_id xlr_i2c_dt_ids[] = {
},
{ }
};
+MODULE_DEVICE_TABLE(of, xlr_i2c_dt_ids);
static int xlr_i2c_probe(struct platform_device *pdev)
{
diff --git a/drivers/i2c/i2c-core.c b/drivers/i2c/i2c-core.c
index 5ab67219f71e..b432b64e307a 100644
--- a/drivers/i2c/i2c-core.c
+++ b/drivers/i2c/i2c-core.c
@@ -1681,6 +1681,7 @@ static struct i2c_client *of_i2c_register_device(struct i2c_adapter *adap,
static void of_i2c_register_devices(struct i2c_adapter *adap)
{
struct device_node *bus, *node;
+ struct i2c_client *client;
/* Only register child devices if the adapter has a node pointer set */
if (!adap->dev.of_node)
@@ -1695,7 +1696,14 @@ static void of_i2c_register_devices(struct i2c_adapter *adap)
for_each_available_child_of_node(bus, node) {
if (of_node_test_and_set_flag(node, OF_POPULATED))
continue;
- of_i2c_register_device(adap, node);
+
+ client = of_i2c_register_device(adap, node);
+ if (IS_ERR(client)) {
+ dev_warn(&adap->dev,
+ "Failed to create I2C device for %s\n",
+ node->full_name);
+ of_node_clear_flag(node, OF_POPULATED);
+ }
}
of_node_put(bus);
@@ -2171,6 +2179,7 @@ int i2c_register_driver(struct module *owner, struct i2c_driver *driver)
/* add the driver to the list of i2c drivers in the driver core */
driver->driver.owner = owner;
driver->driver.bus = &i2c_bus_type;
+ INIT_LIST_HEAD(&driver->clients);
/* When registration returns, the driver core
* will have called probe() for all matching-but-unbound devices.
@@ -2181,7 +2190,6 @@ int i2c_register_driver(struct module *owner, struct i2c_driver *driver)
pr_debug("driver [%s] registered\n", driver->driver.name);
- INIT_LIST_HEAD(&driver->clients);
/* Walk the adapters that are already present */
i2c_for_each_dev(driver, __process_new_driver);
@@ -2299,6 +2307,7 @@ static int of_i2c_notify(struct notifier_block *nb, unsigned long action,
if (IS_ERR(client)) {
dev_err(&adap->dev, "failed to create client for '%s'\n",
rd->dn->full_name);
+ of_node_clear_flag(rd->dn, OF_POPULATED);
return notifier_from_errno(PTR_ERR(client));
}
break;
diff --git a/drivers/iio/adc/Kconfig b/drivers/iio/adc/Kconfig
index 7edcf3238620..99c051490eff 100644
--- a/drivers/iio/adc/Kconfig
+++ b/drivers/iio/adc/Kconfig
@@ -437,6 +437,8 @@ config STX104
config TI_ADC081C
tristate "Texas Instruments ADC081C/ADC101C/ADC121C family"
depends on I2C
+ select IIO_BUFFER
+ select IIO_TRIGGERED_BUFFER
help
If you say yes here you get support for Texas Instruments ADC081C,
ADC101C and ADC121C ADC chips.
diff --git a/drivers/iio/chemical/atlas-ph-sensor.c b/drivers/iio/chemical/atlas-ph-sensor.c
index bd321b305a0a..ef761a508630 100644
--- a/drivers/iio/chemical/atlas-ph-sensor.c
+++ b/drivers/iio/chemical/atlas-ph-sensor.c
@@ -213,13 +213,14 @@ static int atlas_check_ec_calibration(struct atlas_data *data)
struct device *dev = &data->client->dev;
int ret;
unsigned int val;
+ __be16 rval;
- ret = regmap_bulk_read(data->regmap, ATLAS_REG_EC_PROBE, &val, 2);
+ ret = regmap_bulk_read(data->regmap, ATLAS_REG_EC_PROBE, &rval, 2);
if (ret)
return ret;
- dev_info(dev, "probe set to K = %d.%.2d", be16_to_cpu(val) / 100,
- be16_to_cpu(val) % 100);
+ val = be16_to_cpu(rval);
+ dev_info(dev, "probe set to K = %d.%.2d", val / 100, val % 100);
ret = regmap_read(data->regmap, ATLAS_REG_EC_CALIB_STATUS, &val);
if (ret)
diff --git a/drivers/iio/temperature/maxim_thermocouple.c b/drivers/iio/temperature/maxim_thermocouple.c
index 39dd2026ccc9..066161a4bccd 100644
--- a/drivers/iio/temperature/maxim_thermocouple.c
+++ b/drivers/iio/temperature/maxim_thermocouple.c
@@ -123,22 +123,24 @@ static int maxim_thermocouple_read(struct maxim_thermocouple_data *data,
{
unsigned int storage_bytes = data->chip->read_size;
unsigned int shift = chan->scan_type.shift + (chan->address * 8);
- unsigned int buf;
+ __be16 buf16;
+ __be32 buf32;
int ret;
- ret = spi_read(data->spi, (void *) &buf, storage_bytes);
- if (ret)
- return ret;
-
switch (storage_bytes) {
case 2:
- *val = be16_to_cpu(buf);
+ ret = spi_read(data->spi, (void *)&buf16, storage_bytes);
+ *val = be16_to_cpu(buf16);
break;
case 4:
- *val = be32_to_cpu(buf);
+ ret = spi_read(data->spi, (void *)&buf32, storage_bytes);
+ *val = be32_to_cpu(buf32);
break;
}
+ if (ret)
+ return ret;
+
/* check to be sure this is a valid reading */
if (*val & data->chip->status_bit)
return -EINVAL;
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
index 22174774dbb8..63036c731626 100644
--- a/drivers/infiniband/hw/mlx5/main.c
+++ b/drivers/infiniband/hw/mlx5/main.c
@@ -1019,7 +1019,7 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev,
resp.qp_tab_size = 1 << MLX5_CAP_GEN(dev->mdev, log_max_qp);
if (mlx5_core_is_pf(dev->mdev) && MLX5_CAP_GEN(dev->mdev, bf))
resp.bf_reg_size = 1 << MLX5_CAP_GEN(dev->mdev, log_bf_reg_size);
- resp.cache_line_size = L1_CACHE_BYTES;
+ resp.cache_line_size = cache_line_size();
resp.max_sq_desc_sz = MLX5_CAP_GEN(dev->mdev, max_wqe_sz_sq);
resp.max_rq_desc_sz = MLX5_CAP_GEN(dev->mdev, max_wqe_sz_rq);
resp.max_send_wqebb = 1 << MLX5_CAP_GEN(dev->mdev, log_max_qp_sz);
diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c
index 41f4c2afbcdd..7ce97daf26c6 100644
--- a/drivers/infiniband/hw/mlx5/qp.c
+++ b/drivers/infiniband/hw/mlx5/qp.c
@@ -52,7 +52,6 @@ enum {
enum {
MLX5_IB_SQ_STRIDE = 6,
- MLX5_IB_CACHE_LINE_SIZE = 64,
};
static const u32 mlx5_ib_opcode[] = {
diff --git a/drivers/infiniband/hw/qedr/Kconfig b/drivers/infiniband/hw/qedr/Kconfig
index 7c06d85568d4..6c9f3923e838 100644
--- a/drivers/infiniband/hw/qedr/Kconfig
+++ b/drivers/infiniband/hw/qedr/Kconfig
@@ -2,6 +2,7 @@ config INFINIBAND_QEDR
tristate "QLogic RoCE driver"
depends on 64BIT && QEDE
select QED_LL2
+ select QED_RDMA
---help---
This driver provides low-level InfiniBand over Ethernet
support for QLogic QED host channel adapters (HCAs).
diff --git a/drivers/infiniband/ulp/ipoib/ipoib.h b/drivers/infiniband/ulp/ipoib/ipoib.h
index 7b8d2d9e2263..da12717a3eb7 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib.h
+++ b/drivers/infiniband/ulp/ipoib/ipoib.h
@@ -63,6 +63,8 @@ enum ipoib_flush_level {
enum {
IPOIB_ENCAP_LEN = 4,
+ IPOIB_PSEUDO_LEN = 20,
+ IPOIB_HARD_LEN = IPOIB_ENCAP_LEN + IPOIB_PSEUDO_LEN,
IPOIB_UD_HEAD_SIZE = IB_GRH_BYTES + IPOIB_ENCAP_LEN,
IPOIB_UD_RX_SG = 2, /* max buffer needed for 4K mtu */
@@ -134,15 +136,21 @@ struct ipoib_header {
u16 reserved;
};
-struct ipoib_cb {
- struct qdisc_skb_cb qdisc_cb;
- u8 hwaddr[INFINIBAND_ALEN];
+struct ipoib_pseudo_header {
+ u8 hwaddr[INFINIBAND_ALEN];
};
-static inline struct ipoib_cb *ipoib_skb_cb(const struct sk_buff *skb)
+static inline void skb_add_pseudo_hdr(struct sk_buff *skb)
{
- BUILD_BUG_ON(sizeof(skb->cb) < sizeof(struct ipoib_cb));
- return (struct ipoib_cb *)skb->cb;
+ char *data = skb_push(skb, IPOIB_PSEUDO_LEN);
+
+ /*
+ * only the ipoib header is present now, make room for a dummy
+ * pseudo header and set skb field accordingly
+ */
+ memset(data, 0, IPOIB_PSEUDO_LEN);
+ skb_reset_mac_header(skb);
+ skb_pull(skb, IPOIB_HARD_LEN);
}
/* Used for all multicast joins (broadcast, IPv4 mcast and IPv6 mcast) */
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_cm.c b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
index 4ad297d3de89..339a1eecdfe3 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_cm.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
@@ -63,6 +63,8 @@ MODULE_PARM_DESC(cm_data_debug_level,
#define IPOIB_CM_RX_DELAY (3 * 256 * HZ)
#define IPOIB_CM_RX_UPDATE_MASK (0x3)
+#define IPOIB_CM_RX_RESERVE (ALIGN(IPOIB_HARD_LEN, 16) - IPOIB_ENCAP_LEN)
+
static struct ib_qp_attr ipoib_cm_err_attr = {
.qp_state = IB_QPS_ERR
};
@@ -146,15 +148,15 @@ static struct sk_buff *ipoib_cm_alloc_rx_skb(struct net_device *dev,
struct sk_buff *skb;
int i;
- skb = dev_alloc_skb(IPOIB_CM_HEAD_SIZE + 12);
+ skb = dev_alloc_skb(ALIGN(IPOIB_CM_HEAD_SIZE + IPOIB_PSEUDO_LEN, 16));
if (unlikely(!skb))
return NULL;
/*
- * IPoIB adds a 4 byte header. So we need 12 more bytes to align the
+ * IPoIB adds a IPOIB_ENCAP_LEN byte header, this will align the
* IP header to a multiple of 16.
*/
- skb_reserve(skb, 12);
+ skb_reserve(skb, IPOIB_CM_RX_RESERVE);
mapping[0] = ib_dma_map_single(priv->ca, skb->data, IPOIB_CM_HEAD_SIZE,
DMA_FROM_DEVICE);
@@ -624,9 +626,9 @@ void ipoib_cm_handle_rx_wc(struct net_device *dev, struct ib_wc *wc)
if (wc->byte_len < IPOIB_CM_COPYBREAK) {
int dlen = wc->byte_len;
- small_skb = dev_alloc_skb(dlen + 12);
+ small_skb = dev_alloc_skb(dlen + IPOIB_CM_RX_RESERVE);
if (small_skb) {
- skb_reserve(small_skb, 12);
+ skb_reserve(small_skb, IPOIB_CM_RX_RESERVE);
ib_dma_sync_single_for_cpu(priv->ca, rx_ring[wr_id].mapping[0],
dlen, DMA_FROM_DEVICE);
skb_copy_from_linear_data(skb, small_skb->data, dlen);
@@ -663,8 +665,7 @@ void ipoib_cm_handle_rx_wc(struct net_device *dev, struct ib_wc *wc)
copied:
skb->protocol = ((struct ipoib_header *) skb->data)->proto;
- skb_reset_mac_header(skb);
- skb_pull(skb, IPOIB_ENCAP_LEN);
+ skb_add_pseudo_hdr(skb);
++dev->stats.rx_packets;
dev->stats.rx_bytes += skb->len;
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ib.c b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
index be11d5d5b8c1..830fecb6934c 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
@@ -128,16 +128,15 @@ static struct sk_buff *ipoib_alloc_rx_skb(struct net_device *dev, int id)
buf_size = IPOIB_UD_BUF_SIZE(priv->max_ib_mtu);
- skb = dev_alloc_skb(buf_size + IPOIB_ENCAP_LEN);
+ skb = dev_alloc_skb(buf_size + IPOIB_HARD_LEN);
if (unlikely(!skb))
return NULL;
/*
- * IB will leave a 40 byte gap for a GRH and IPoIB adds a 4 byte
- * header. So we need 4 more bytes to get to 48 and align the
- * IP header to a multiple of 16.
+ * the IP header will be at IPOIP_HARD_LEN + IB_GRH_BYTES, that is
+ * 64 bytes aligned
*/
- skb_reserve(skb, 4);
+ skb_reserve(skb, sizeof(struct ipoib_pseudo_header));
mapping = priv->rx_ring[id].mapping;
mapping[0] = ib_dma_map_single(priv->ca, skb->data, buf_size,
@@ -253,8 +252,7 @@ static void ipoib_ib_handle_rx_wc(struct net_device *dev, struct ib_wc *wc)
skb_pull(skb, IB_GRH_BYTES);
skb->protocol = ((struct ipoib_header *) skb->data)->proto;
- skb_reset_mac_header(skb);
- skb_pull(skb, IPOIB_ENCAP_LEN);
+ skb_add_pseudo_hdr(skb);
++dev->stats.rx_packets;
dev->stats.rx_bytes += skb->len;
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
index 5636fc3da6b8..b58d9dca5c93 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
@@ -925,9 +925,12 @@ static void neigh_add_path(struct sk_buff *skb, u8 *daddr,
ipoib_neigh_free(neigh);
goto err_drop;
}
- if (skb_queue_len(&neigh->queue) < IPOIB_MAX_PATH_REC_QUEUE)
+ if (skb_queue_len(&neigh->queue) <
+ IPOIB_MAX_PATH_REC_QUEUE) {
+ /* put pseudoheader back on for next time */
+ skb_push(skb, IPOIB_PSEUDO_LEN);
__skb_queue_tail(&neigh->queue, skb);
- else {
+ } else {
ipoib_warn(priv, "queue length limit %d. Packet drop.\n",
skb_queue_len(&neigh->queue));
goto err_drop;
@@ -964,7 +967,7 @@ err_drop:
}
static void unicast_arp_send(struct sk_buff *skb, struct net_device *dev,
- struct ipoib_cb *cb)
+ struct ipoib_pseudo_header *phdr)
{
struct ipoib_dev_priv *priv = netdev_priv(dev);
struct ipoib_path *path;
@@ -972,16 +975,18 @@ static void unicast_arp_send(struct sk_buff *skb, struct net_device *dev,
spin_lock_irqsave(&priv->lock, flags);
- path = __path_find(dev, cb->hwaddr + 4);
+ path = __path_find(dev, phdr->hwaddr + 4);
if (!path || !path->valid) {
int new_path = 0;
if (!path) {
- path = path_rec_create(dev, cb->hwaddr + 4);
+ path = path_rec_create(dev, phdr->hwaddr + 4);
new_path = 1;
}
if (path) {
if (skb_queue_len(&path->queue) < IPOIB_MAX_PATH_REC_QUEUE) {
+ /* put pseudoheader back on for next time */
+ skb_push(skb, IPOIB_PSEUDO_LEN);
__skb_queue_tail(&path->queue, skb);
} else {
++dev->stats.tx_dropped;
@@ -1009,10 +1014,12 @@ static void unicast_arp_send(struct sk_buff *skb, struct net_device *dev,
be16_to_cpu(path->pathrec.dlid));
spin_unlock_irqrestore(&priv->lock, flags);
- ipoib_send(dev, skb, path->ah, IPOIB_QPN(cb->hwaddr));
+ ipoib_send(dev, skb, path->ah, IPOIB_QPN(phdr->hwaddr));
return;
} else if ((path->query || !path_rec_start(dev, path)) &&
skb_queue_len(&path->queue) < IPOIB_MAX_PATH_REC_QUEUE) {
+ /* put pseudoheader back on for next time */
+ skb_push(skb, IPOIB_PSEUDO_LEN);
__skb_queue_tail(&path->queue, skb);
} else {
++dev->stats.tx_dropped;
@@ -1026,13 +1033,15 @@ static int ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct ipoib_dev_priv *priv = netdev_priv(dev);
struct ipoib_neigh *neigh;
- struct ipoib_cb *cb = ipoib_skb_cb(skb);
+ struct ipoib_pseudo_header *phdr;
struct ipoib_header *header;
unsigned long flags;
+ phdr = (struct ipoib_pseudo_header *) skb->data;
+ skb_pull(skb, sizeof(*phdr));
header = (struct ipoib_header *) skb->data;
- if (unlikely(cb->hwaddr[4] == 0xff)) {
+ if (unlikely(phdr->hwaddr[4] == 0xff)) {
/* multicast, arrange "if" according to probability */
if ((header->proto != htons(ETH_P_IP)) &&
(header->proto != htons(ETH_P_IPV6)) &&
@@ -1045,13 +1054,13 @@ static int ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev)
return NETDEV_TX_OK;
}
/* Add in the P_Key for multicast*/
- cb->hwaddr[8] = (priv->pkey >> 8) & 0xff;
- cb->hwaddr[9] = priv->pkey & 0xff;
+ phdr->hwaddr[8] = (priv->pkey >> 8) & 0xff;
+ phdr->hwaddr[9] = priv->pkey & 0xff;
- neigh = ipoib_neigh_get(dev, cb->hwaddr);
+ neigh = ipoib_neigh_get(dev, phdr->hwaddr);
if (likely(neigh))
goto send_using_neigh;
- ipoib_mcast_send(dev, cb->hwaddr, skb);
+ ipoib_mcast_send(dev, phdr->hwaddr, skb);
return NETDEV_TX_OK;
}
@@ -1060,16 +1069,16 @@ static int ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev)
case htons(ETH_P_IP):
case htons(ETH_P_IPV6):
case htons(ETH_P_TIPC):
- neigh = ipoib_neigh_get(dev, cb->hwaddr);
+ neigh = ipoib_neigh_get(dev, phdr->hwaddr);
if (unlikely(!neigh)) {
- neigh_add_path(skb, cb->hwaddr, dev);
+ neigh_add_path(skb, phdr->hwaddr, dev);
return NETDEV_TX_OK;
}
break;
case htons(ETH_P_ARP):
case htons(ETH_P_RARP):
/* for unicast ARP and RARP should always perform path find */
- unicast_arp_send(skb, dev, cb);
+ unicast_arp_send(skb, dev, phdr);
return NETDEV_TX_OK;
default:
/* ethertype not supported by IPoIB */
@@ -1086,11 +1095,13 @@ send_using_neigh:
goto unref;
}
} else if (neigh->ah) {
- ipoib_send(dev, skb, neigh->ah, IPOIB_QPN(cb->hwaddr));
+ ipoib_send(dev, skb, neigh->ah, IPOIB_QPN(phdr->hwaddr));
goto unref;
}
if (skb_queue_len(&neigh->queue) < IPOIB_MAX_PATH_REC_QUEUE) {
+ /* put pseudoheader back on for next time */
+ skb_push(skb, sizeof(*phdr));
spin_lock_irqsave(&priv->lock, flags);
__skb_queue_tail(&neigh->queue, skb);
spin_unlock_irqrestore(&priv->lock, flags);
@@ -1122,8 +1133,8 @@ static int ipoib_hard_header(struct sk_buff *skb,
unsigned short type,
const void *daddr, const void *saddr, unsigned len)
{
+ struct ipoib_pseudo_header *phdr;
struct ipoib_header *header;
- struct ipoib_cb *cb = ipoib_skb_cb(skb);
header = (struct ipoib_header *) skb_push(skb, sizeof *header);
@@ -1132,12 +1143,13 @@ static int ipoib_hard_header(struct sk_buff *skb,
/*
* we don't rely on dst_entry structure, always stuff the
- * destination address into skb->cb so we can figure out where
+ * destination address into skb hard header so we can figure out where
* to send the packet later.
*/
- memcpy(cb->hwaddr, daddr, INFINIBAND_ALEN);
+ phdr = (struct ipoib_pseudo_header *) skb_push(skb, sizeof(*phdr));
+ memcpy(phdr->hwaddr, daddr, INFINIBAND_ALEN);
- return sizeof *header;
+ return IPOIB_HARD_LEN;
}
static void ipoib_set_mcast_list(struct net_device *dev)
@@ -1759,7 +1771,7 @@ void ipoib_setup(struct net_device *dev)
dev->flags |= IFF_BROADCAST | IFF_MULTICAST;
- dev->hard_header_len = IPOIB_ENCAP_LEN;
+ dev->hard_header_len = IPOIB_HARD_LEN;
dev->addr_len = INFINIBAND_ALEN;
dev->type = ARPHRD_INFINIBAND;
dev->tx_queue_len = ipoib_sendq_size * 2;
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
index d3394b6add24..1909dd252c94 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
@@ -796,9 +796,11 @@ void ipoib_mcast_send(struct net_device *dev, u8 *daddr, struct sk_buff *skb)
__ipoib_mcast_add(dev, mcast);
list_add_tail(&mcast->list, &priv->multicast_list);
}
- if (skb_queue_len(&mcast->pkt_queue) < IPOIB_MAX_MCAST_QUEUE)
+ if (skb_queue_len(&mcast->pkt_queue) < IPOIB_MAX_MCAST_QUEUE) {
+ /* put pseudoheader back on for next time */
+ skb_push(skb, sizeof(struct ipoib_pseudo_header));
skb_queue_tail(&mcast->pkt_queue, skb);
- else {
+ } else {
++dev->stats.tx_dropped;
dev_kfree_skb_any(skb);
}
diff --git a/drivers/input/mouse/focaltech.c b/drivers/input/mouse/focaltech.c
index 54eceb30ede5..a7d39689bbfb 100644
--- a/drivers/input/mouse/focaltech.c
+++ b/drivers/input/mouse/focaltech.c
@@ -43,7 +43,7 @@ int focaltech_detect(struct psmouse *psmouse, bool set_properties)
if (set_properties) {
psmouse->vendor = "FocalTech";
- psmouse->name = "FocalTech Touchpad";
+ psmouse->name = "Touchpad";
}
return 0;
@@ -146,8 +146,8 @@ static void focaltech_report_state(struct psmouse *psmouse)
}
input_mt_report_pointer_emulation(dev, true);
- input_report_key(psmouse->dev, BTN_LEFT, state->pressed);
- input_sync(psmouse->dev);
+ input_report_key(dev, BTN_LEFT, state->pressed);
+ input_sync(dev);
}
static void focaltech_process_touch_packet(struct psmouse *psmouse,
diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h
index f4bfb4b2d50a..073246c7d163 100644
--- a/drivers/input/serio/i8042-x86ia64io.h
+++ b/drivers/input/serio/i8042-x86ia64io.h
@@ -877,6 +877,13 @@ static const struct dmi_system_id __initconst i8042_dmi_kbdreset_table[] = {
DMI_MATCH(DMI_PRODUCT_NAME, "P34"),
},
},
+ {
+ /* Schenker XMG C504 - Elantech touchpad */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "XMG"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "C504"),
+ },
+ },
{ }
};
diff --git a/drivers/ipack/ipack.c b/drivers/ipack/ipack.c
index c0e7b624ce54..12102448fddd 100644
--- a/drivers/ipack/ipack.c
+++ b/drivers/ipack/ipack.c
@@ -178,7 +178,7 @@ static ssize_t modalias_show(struct device *dev, struct device_attribute *attr,
idev->id_vendor, idev->id_device);
}
-ipack_device_attr(id_format, "0x%hhu\n");
+ipack_device_attr(id_format, "0x%hhx\n");
static DEVICE_ATTR_RO(id);
static DEVICE_ATTR_RO(id_device);
diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c
index 8abde6b8cedc..6d53810963f7 100644
--- a/drivers/md/dm-raid.c
+++ b/drivers/md/dm-raid.c
@@ -266,7 +266,7 @@ static struct raid_type {
{"raid10_offset", "raid10 offset (striped mirrors)", 0, 2, 10, ALGORITHM_RAID10_OFFSET},
{"raid10_near", "raid10 near (striped mirrors)", 0, 2, 10, ALGORITHM_RAID10_NEAR},
{"raid10", "raid10 (striped mirrors)", 0, 2, 10, ALGORITHM_RAID10_DEFAULT},
- {"raid4", "raid4 (dedicated last parity disk)", 1, 2, 4, ALGORITHM_PARITY_N}, /* raid4 layout = raid5_n */
+ {"raid4", "raid4 (dedicated first parity disk)", 1, 2, 5, ALGORITHM_PARITY_0}, /* raid4 layout = raid5_0 */
{"raid5_n", "raid5 (dedicated last parity disk)", 1, 2, 5, ALGORITHM_PARITY_N},
{"raid5_ls", "raid5 (left symmetric)", 1, 2, 5, ALGORITHM_LEFT_SYMMETRIC},
{"raid5_rs", "raid5 (right symmetric)", 1, 2, 5, ALGORITHM_RIGHT_SYMMETRIC},
@@ -2087,11 +2087,11 @@ static int super_init_validation(struct raid_set *rs, struct md_rdev *rdev)
/*
* No takeover/reshaping, because we don't have the extended v1.9.0 metadata
*/
- if (le32_to_cpu(sb->level) != mddev->level) {
+ if (le32_to_cpu(sb->level) != mddev->new_level) {
DMERR("Reshaping/takeover raid sets not yet supported. (raid level/stripes/size change)");
return -EINVAL;
}
- if (le32_to_cpu(sb->layout) != mddev->layout) {
+ if (le32_to_cpu(sb->layout) != mddev->new_layout) {
DMERR("Reshaping raid sets not yet supported. (raid layout change)");
DMERR(" 0x%X vs 0x%X", le32_to_cpu(sb->layout), mddev->layout);
DMERR(" Old layout: %s w/ %d copies",
@@ -2102,7 +2102,7 @@ static int super_init_validation(struct raid_set *rs, struct md_rdev *rdev)
raid10_md_layout_to_copies(mddev->layout));
return -EINVAL;
}
- if (le32_to_cpu(sb->stripe_sectors) != mddev->chunk_sectors) {
+ if (le32_to_cpu(sb->stripe_sectors) != mddev->new_chunk_sectors) {
DMERR("Reshaping raid sets not yet supported. (stripe sectors change)");
return -EINVAL;
}
@@ -2115,6 +2115,8 @@ static int super_init_validation(struct raid_set *rs, struct md_rdev *rdev)
return -EINVAL;
}
+ DMINFO("Discovered old metadata format; upgrading to extended metadata format");
+
/* Table line is checked vs. authoritative superblock */
rs_set_new(rs);
}
@@ -2258,7 +2260,8 @@ static int super_validate(struct raid_set *rs, struct md_rdev *rdev)
if (!mddev->events && super_init_validation(rs, rdev))
return -EINVAL;
- if (le32_to_cpu(sb->compat_features) != FEATURE_FLAG_SUPPORTS_V190) {
+ if (le32_to_cpu(sb->compat_features) &&
+ le32_to_cpu(sb->compat_features) != FEATURE_FLAG_SUPPORTS_V190) {
rs->ti->error = "Unable to assemble array: Unknown flag(s) in compatible feature flags";
return -EINVAL;
}
@@ -3646,7 +3649,7 @@ static void raid_resume(struct dm_target *ti)
static struct target_type raid_target = {
.name = "raid",
- .version = {1, 9, 0},
+ .version = {1, 9, 1},
.module = THIS_MODULE,
.ctr = raid_ctr,
.dtr = raid_dtr,
diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c
index bdf1606f67bc..9a8b71067c6e 100644
--- a/drivers/md/dm-raid1.c
+++ b/drivers/md/dm-raid1.c
@@ -145,7 +145,6 @@ static void dispatch_bios(void *context, struct bio_list *bio_list)
struct dm_raid1_bio_record {
struct mirror *m;
- /* if details->bi_bdev == NULL, details were not saved */
struct dm_bio_details details;
region_t write_region;
};
@@ -1200,8 +1199,6 @@ static int mirror_map(struct dm_target *ti, struct bio *bio)
struct dm_raid1_bio_record *bio_record =
dm_per_bio_data(bio, sizeof(struct dm_raid1_bio_record));
- bio_record->details.bi_bdev = NULL;
-
if (rw == WRITE) {
/* Save region for mirror_end_io() handler */
bio_record->write_region = dm_rh_bio_to_region(ms->rh, bio);
@@ -1260,22 +1257,12 @@ static int mirror_end_io(struct dm_target *ti, struct bio *bio, int error)
}
if (error == -EOPNOTSUPP)
- goto out;
+ return error;
if ((error == -EWOULDBLOCK) && (bio->bi_opf & REQ_RAHEAD))
- goto out;
+ return error;
if (unlikely(error)) {
- if (!bio_record->details.bi_bdev) {
- /*
- * There wasn't enough memory to record necessary
- * information for a retry or there was no other
- * mirror in-sync.
- */
- DMERR_LIMIT("Mirror read failed.");
- return -EIO;
- }
-
m = bio_record->m;
DMERR("Mirror read failed from %s. Trying alternative device.",
@@ -1291,7 +1278,7 @@ static int mirror_end_io(struct dm_target *ti, struct bio *bio, int error)
bd = &bio_record->details;
dm_bio_restore(bd, bio);
- bio_record->details.bi_bdev = NULL;
+ bio->bi_error = 0;
queue_bio(ms, bio, rw);
return DM_ENDIO_INCOMPLETE;
@@ -1299,9 +1286,6 @@ static int mirror_end_io(struct dm_target *ti, struct bio *bio, int error)
DMERR("All replicated volumes dead, failing I/O");
}
-out:
- bio_record->details.bi_bdev = NULL;
-
return error;
}
diff --git a/drivers/md/dm-rq.c b/drivers/md/dm-rq.c
index dc75bea0d541..1d0d2adc050a 100644
--- a/drivers/md/dm-rq.c
+++ b/drivers/md/dm-rq.c
@@ -856,8 +856,11 @@ int dm_old_init_request_queue(struct mapped_device *md)
kthread_init_worker(&md->kworker);
md->kworker_task = kthread_run(kthread_worker_fn, &md->kworker,
"kdmwork-%s", dm_device_name(md));
- if (IS_ERR(md->kworker_task))
- return PTR_ERR(md->kworker_task);
+ if (IS_ERR(md->kworker_task)) {
+ int error = PTR_ERR(md->kworker_task);
+ md->kworker_task = NULL;
+ return error;
+ }
elv_register_queue(md->queue);
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
index 3e407a9cde1f..c4b53b332607 100644
--- a/drivers/md/dm-table.c
+++ b/drivers/md/dm-table.c
@@ -695,37 +695,32 @@ int dm_table_add_target(struct dm_table *t, const char *type,
tgt->type = dm_get_target_type(type);
if (!tgt->type) {
- DMERR("%s: %s: unknown target type", dm_device_name(t->md),
- type);
+ DMERR("%s: %s: unknown target type", dm_device_name(t->md), type);
return -EINVAL;
}
if (dm_target_needs_singleton(tgt->type)) {
if (t->num_targets) {
- DMERR("%s: target type %s must appear alone in table",
- dm_device_name(t->md), type);
- return -EINVAL;
+ tgt->error = "singleton target type must appear alone in table";
+ goto bad;
}
t->singleton = true;
}
if (dm_target_always_writeable(tgt->type) && !(t->mode & FMODE_WRITE)) {
- DMERR("%s: target type %s may not be included in read-only tables",
- dm_device_name(t->md), type);
- return -EINVAL;
+ tgt->error = "target type may not be included in a read-only table";
+ goto bad;
}
if (t->immutable_target_type) {
if (t->immutable_target_type != tgt->type) {
- DMERR("%s: immutable target type %s cannot be mixed with other target types",
- dm_device_name(t->md), t->immutable_target_type->name);
- return -EINVAL;
+ tgt->error = "immutable target type cannot be mixed with other target types";
+ goto bad;
}
} else if (dm_target_is_immutable(tgt->type)) {
if (t->num_targets) {
- DMERR("%s: immutable target type %s cannot be mixed with other target types",
- dm_device_name(t->md), tgt->type->name);
- return -EINVAL;
+ tgt->error = "immutable target type cannot be mixed with other target types";
+ goto bad;
}
t->immutable_target_type = tgt->type;
}
@@ -740,7 +735,6 @@ int dm_table_add_target(struct dm_table *t, const char *type,
*/
if (!adjoin(t, tgt)) {
tgt->error = "Gap in table";
- r = -EINVAL;
goto bad;
}
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 147af9536d0c..ef7bf1dd6900 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -1423,8 +1423,6 @@ static void cleanup_mapped_device(struct mapped_device *md)
if (md->bs)
bioset_free(md->bs);
- cleanup_srcu_struct(&md->io_barrier);
-
if (md->disk) {
spin_lock(&_minor_lock);
md->disk->private_data = NULL;
@@ -1436,6 +1434,8 @@ static void cleanup_mapped_device(struct mapped_device *md)
if (md->queue)
blk_cleanup_queue(md->queue);
+ cleanup_srcu_struct(&md->io_barrier);
+
if (md->bdev) {
bdput(md->bdev);
md->bdev = NULL;
diff --git a/drivers/md/md.c b/drivers/md/md.c
index eac84d8ff724..2089d46b0eb8 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -3887,10 +3887,10 @@ array_state_show(struct mddev *mddev, char *page)
st = read_auto;
break;
case 0:
- if (mddev->in_sync)
- st = clean;
- else if (test_bit(MD_CHANGE_PENDING, &mddev->flags))
+ if (test_bit(MD_CHANGE_PENDING, &mddev->flags))
st = write_pending;
+ else if (mddev->in_sync)
+ st = clean;
else if (mddev->safemode)
st = active_idle;
else
@@ -8144,14 +8144,14 @@ void md_do_sync(struct md_thread *thread)
if (!test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
!test_bit(MD_RECOVERY_INTR, &mddev->recovery) &&
- mddev->curr_resync > 2) {
+ mddev->curr_resync > 3) {
mddev->curr_resync_completed = mddev->curr_resync;
sysfs_notify(&mddev->kobj, NULL, "sync_completed");
}
mddev->pers->sync_request(mddev, max_sectors, &skipped);
if (!test_bit(MD_RECOVERY_CHECK, &mddev->recovery) &&
- mddev->curr_resync > 2) {
+ mddev->curr_resync > 3) {
if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) {
if (mddev->curr_resync >= mddev->recovery_cp) {
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index 1961d827dbd1..29e2df5cd77b 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -403,11 +403,14 @@ static void raid1_end_write_request(struct bio *bio)
struct bio *to_put = NULL;
int mirror = find_bio_disk(r1_bio, bio);
struct md_rdev *rdev = conf->mirrors[mirror].rdev;
+ bool discard_error;
+
+ discard_error = bio->bi_error && bio_op(bio) == REQ_OP_DISCARD;
/*
* 'one mirror IO has finished' event handler:
*/
- if (bio->bi_error) {
+ if (bio->bi_error && !discard_error) {
set_bit(WriteErrorSeen, &rdev->flags);
if (!test_and_set_bit(WantReplacement, &rdev->flags))
set_bit(MD_RECOVERY_NEEDED, &
@@ -444,7 +447,7 @@ static void raid1_end_write_request(struct bio *bio)
/* Maybe we can clear some bad blocks. */
if (is_badblock(rdev, r1_bio->sector, r1_bio->sectors,
- &first_bad, &bad_sectors)) {
+ &first_bad, &bad_sectors) && !discard_error) {
r1_bio->bios[mirror] = IO_MADE_GOOD;
set_bit(R1BIO_MadeGood, &r1_bio->state);
}
@@ -2294,17 +2297,23 @@ static void handle_read_error(struct r1conf *conf, struct r1bio *r1_bio)
* This is all done synchronously while the array is
* frozen
*/
+
+ bio = r1_bio->bios[r1_bio->read_disk];
+ bdevname(bio->bi_bdev, b);
+ bio_put(bio);
+ r1_bio->bios[r1_bio->read_disk] = NULL;
+
if (mddev->ro == 0) {
freeze_array(conf, 1);
fix_read_error(conf, r1_bio->read_disk,
r1_bio->sector, r1_bio->sectors);
unfreeze_array(conf);
- } else
- md_error(mddev, conf->mirrors[r1_bio->read_disk].rdev);
+ } else {
+ r1_bio->bios[r1_bio->read_disk] = IO_BLOCKED;
+ }
+
rdev_dec_pending(conf->mirrors[r1_bio->read_disk].rdev, conf->mddev);
- bio = r1_bio->bios[r1_bio->read_disk];
- bdevname(bio->bi_bdev, b);
read_more:
disk = read_balance(conf, r1_bio, &max_sectors);
if (disk == -1) {
@@ -2315,11 +2324,6 @@ read_more:
} else {
const unsigned long do_sync
= r1_bio->master_bio->bi_opf & REQ_SYNC;
- if (bio) {
- r1_bio->bios[r1_bio->read_disk] =
- mddev->ro ? IO_BLOCKED : NULL;
- bio_put(bio);
- }
r1_bio->read_disk = disk;
bio = bio_clone_mddev(r1_bio->master_bio, GFP_NOIO, mddev);
bio_trim(bio, r1_bio->sector - bio->bi_iter.bi_sector,
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index be1a9fca3b2d..39fddda2fef2 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -447,6 +447,9 @@ static void raid10_end_write_request(struct bio *bio)
struct r10conf *conf = r10_bio->mddev->private;
int slot, repl;
struct md_rdev *rdev = NULL;
+ bool discard_error;
+
+ discard_error = bio->bi_error && bio_op(bio) == REQ_OP_DISCARD;
dev = find_bio_disk(conf, r10_bio, bio, &slot, &repl);
@@ -460,7 +463,7 @@ static void raid10_end_write_request(struct bio *bio)
/*
* this branch is our 'one mirror IO has finished' event handler:
*/
- if (bio->bi_error) {
+ if (bio->bi_error && !discard_error) {
if (repl)
/* Never record new bad blocks to replacement,
* just fail it.
@@ -503,7 +506,7 @@ static void raid10_end_write_request(struct bio *bio)
if (is_badblock(rdev,
r10_bio->devs[slot].addr,
r10_bio->sectors,
- &first_bad, &bad_sectors)) {
+ &first_bad, &bad_sectors) && !discard_error) {
bio_put(bio);
if (repl)
r10_bio->devs[slot].repl_bio = IO_MADE_GOOD;
diff --git a/drivers/md/raid5-cache.c b/drivers/md/raid5-cache.c
index 1b1ab4a1d132..a227a9f3ee65 100644
--- a/drivers/md/raid5-cache.c
+++ b/drivers/md/raid5-cache.c
@@ -1087,7 +1087,7 @@ static int r5l_recovery_log(struct r5l_log *log)
* 1's seq + 10 and let superblock points to meta2. The same recovery will
* not think meta 3 is a valid meta, because its seq doesn't match
*/
- if (ctx.seq > log->last_cp_seq + 1) {
+ if (ctx.seq > log->last_cp_seq) {
int ret;
ret = r5l_log_write_empty_meta_block(log, ctx.pos, ctx.seq + 10);
@@ -1096,6 +1096,8 @@ static int r5l_recovery_log(struct r5l_log *log)
log->seq = ctx.seq + 11;
log->log_start = r5l_ring_add(log, ctx.pos, BLOCK_SECTORS);
r5l_write_super(log, ctx.pos);
+ log->last_checkpoint = ctx.pos;
+ log->next_checkpoint = ctx.pos;
} else {
log->log_start = ctx.pos;
log->seq = ctx.seq;
@@ -1154,6 +1156,7 @@ create:
if (create_super) {
log->last_cp_seq = prandom_u32();
cp = 0;
+ r5l_log_write_empty_meta_block(log, cp, log->last_cp_seq);
/*
* Make sure super points to correct address. Log might have
* data very soon. If super hasn't correct log tail address,
@@ -1168,6 +1171,7 @@ create:
if (log->max_free_space > RECLAIM_MAX_FREE_SPACE)
log->max_free_space = RECLAIM_MAX_FREE_SPACE;
log->last_checkpoint = cp;
+ log->next_checkpoint = cp;
__free_page(page);
diff --git a/drivers/media/usb/b2c2/flexcop-usb.c b/drivers/media/usb/b2c2/flexcop-usb.c
index d4bdba60b0f7..52bc42da8a4c 100644
--- a/drivers/media/usb/b2c2/flexcop-usb.c
+++ b/drivers/media/usb/b2c2/flexcop-usb.c
@@ -73,23 +73,34 @@ static int flexcop_usb_readwrite_dw(struct flexcop_device *fc, u16 wRegOffsPCI,
u8 request_type = (read ? USB_DIR_IN : USB_DIR_OUT) | USB_TYPE_VENDOR;
u8 wAddress = B2C2_FLEX_PCIOFFSET_TO_INTERNALADDR(wRegOffsPCI) |
(read ? 0x80 : 0);
+ int ret;
+
+ mutex_lock(&fc_usb->data_mutex);
+ if (!read)
+ memcpy(fc_usb->data, val, sizeof(*val));
- int len = usb_control_msg(fc_usb->udev,
+ ret = usb_control_msg(fc_usb->udev,
read ? B2C2_USB_CTRL_PIPE_IN : B2C2_USB_CTRL_PIPE_OUT,
request,
request_type, /* 0xc0 read or 0x40 write */
wAddress,
0,
- val,
+ fc_usb->data,
sizeof(u32),
B2C2_WAIT_FOR_OPERATION_RDW * HZ);
- if (len != sizeof(u32)) {
+ if (ret != sizeof(u32)) {
err("error while %s dword from %d (%d).", read ? "reading" :
"writing", wAddress, wRegOffsPCI);
- return -EIO;
+ if (ret >= 0)
+ ret = -EIO;
}
- return 0;
+
+ if (read && ret >= 0)
+ memcpy(val, fc_usb->data, sizeof(*val));
+ mutex_unlock(&fc_usb->data_mutex);
+
+ return ret;
}
/*
* DKT 010817 - add support for V8 memory read/write and flash update
@@ -100,9 +111,14 @@ static int flexcop_usb_v8_memory_req(struct flexcop_usb *fc_usb,
{
u8 request_type = USB_TYPE_VENDOR;
u16 wIndex;
- int nWaitTime, pipe, len;
+ int nWaitTime, pipe, ret;
wIndex = page << 8;
+ if (buflen > sizeof(fc_usb->data)) {
+ err("Buffer size bigger than max URB control message\n");
+ return -EIO;
+ }
+
switch (req) {
case B2C2_USB_READ_V8_MEM:
nWaitTime = B2C2_WAIT_FOR_OPERATION_V8READ;
@@ -127,17 +143,32 @@ static int flexcop_usb_v8_memory_req(struct flexcop_usb *fc_usb,
deb_v8("v8mem: %02x %02x %04x %04x, len: %d\n", request_type, req,
wAddress, wIndex, buflen);
- len = usb_control_msg(fc_usb->udev, pipe,
+ mutex_lock(&fc_usb->data_mutex);
+
+ if ((request_type & USB_ENDPOINT_DIR_MASK) == USB_DIR_OUT)
+ memcpy(fc_usb->data, pbBuffer, buflen);
+
+ ret = usb_control_msg(fc_usb->udev, pipe,
req,
request_type,
wAddress,
wIndex,
- pbBuffer,
+ fc_usb->data,
buflen,
nWaitTime * HZ);
+ if (ret != buflen)
+ ret = -EIO;
+
+ if (ret >= 0) {
+ ret = 0;
+ if ((request_type & USB_ENDPOINT_DIR_MASK) == USB_DIR_IN)
+ memcpy(pbBuffer, fc_usb->data, buflen);
+ }
- debug_dump(pbBuffer, len, deb_v8);
- return len == buflen ? 0 : -EIO;
+ mutex_unlock(&fc_usb->data_mutex);
+
+ debug_dump(pbBuffer, ret, deb_v8);
+ return ret;
}
#define bytes_left_to_read_on_page(paddr,buflen) \
@@ -196,29 +227,6 @@ static int flexcop_usb_get_mac_addr(struct flexcop_device *fc, int extended)
fc->dvb_adapter.proposed_mac, 6);
}
-#if 0
-static int flexcop_usb_utility_req(struct flexcop_usb *fc_usb, int set,
- flexcop_usb_utility_function_t func, u8 extra, u16 wIndex,
- u16 buflen, u8 *pvBuffer)
-{
- u16 wValue;
- u8 request_type = (set ? USB_DIR_OUT : USB_DIR_IN) | USB_TYPE_VENDOR;
- int nWaitTime = 2,
- pipe = set ? B2C2_USB_CTRL_PIPE_OUT : B2C2_USB_CTRL_PIPE_IN, len;
- wValue = (func << 8) | extra;
-
- len = usb_control_msg(fc_usb->udev,pipe,
- B2C2_USB_UTILITY,
- request_type,
- wValue,
- wIndex,
- pvBuffer,
- buflen,
- nWaitTime * HZ);
- return len == buflen ? 0 : -EIO;
-}
-#endif
-
/* usb i2c stuff */
static int flexcop_usb_i2c_req(struct flexcop_i2c_adapter *i2c,
flexcop_usb_request_t req, flexcop_usb_i2c_function_t func,
@@ -226,9 +234,14 @@ static int flexcop_usb_i2c_req(struct flexcop_i2c_adapter *i2c,
{
struct flexcop_usb *fc_usb = i2c->fc->bus_specific;
u16 wValue, wIndex;
- int nWaitTime,pipe,len;
+ int nWaitTime, pipe, ret;
u8 request_type = USB_TYPE_VENDOR;
+ if (buflen > sizeof(fc_usb->data)) {
+ err("Buffer size bigger than max URB control message\n");
+ return -EIO;
+ }
+
switch (func) {
case USB_FUNC_I2C_WRITE:
case USB_FUNC_I2C_MULTIWRITE:
@@ -257,15 +270,32 @@ static int flexcop_usb_i2c_req(struct flexcop_i2c_adapter *i2c,
wValue & 0xff, wValue >> 8,
wIndex & 0xff, wIndex >> 8);
- len = usb_control_msg(fc_usb->udev,pipe,
+ mutex_lock(&fc_usb->data_mutex);
+
+ if ((request_type & USB_ENDPOINT_DIR_MASK) == USB_DIR_OUT)
+ memcpy(fc_usb->data, buf, buflen);
+
+ ret = usb_control_msg(fc_usb->udev, pipe,
req,
request_type,
wValue,
wIndex,
- buf,
+ fc_usb->data,
buflen,
nWaitTime * HZ);
- return len == buflen ? 0 : -EREMOTEIO;
+
+ if (ret != buflen)
+ ret = -EIO;
+
+ if (ret >= 0) {
+ ret = 0;
+ if ((request_type & USB_ENDPOINT_DIR_MASK) == USB_DIR_IN)
+ memcpy(buf, fc_usb->data, buflen);
+ }
+
+ mutex_unlock(&fc_usb->data_mutex);
+
+ return 0;
}
/* actual bus specific access functions,
@@ -516,6 +546,7 @@ static int flexcop_usb_probe(struct usb_interface *intf,
/* general flexcop init */
fc_usb = fc->bus_specific;
fc_usb->fc_dev = fc;
+ mutex_init(&fc_usb->data_mutex);
fc->read_ibi_reg = flexcop_usb_read_ibi_reg;
fc->write_ibi_reg = flexcop_usb_write_ibi_reg;
diff --git a/drivers/media/usb/b2c2/flexcop-usb.h b/drivers/media/usb/b2c2/flexcop-usb.h
index 92529a9c4475..25ad43166e78 100644
--- a/drivers/media/usb/b2c2/flexcop-usb.h
+++ b/drivers/media/usb/b2c2/flexcop-usb.h
@@ -29,6 +29,10 @@ struct flexcop_usb {
u8 tmp_buffer[1023+190];
int tmp_buffer_length;
+
+ /* for URB control messages */
+ u8 data[80];
+ struct mutex data_mutex;
};
#if 0
diff --git a/drivers/media/usb/cpia2/cpia2_usb.c b/drivers/media/usb/cpia2/cpia2_usb.c
index 13620cdf0599..e9100a235831 100644
--- a/drivers/media/usb/cpia2/cpia2_usb.c
+++ b/drivers/media/usb/cpia2/cpia2_usb.c
@@ -545,18 +545,30 @@ static void free_sbufs(struct camera_data *cam)
static int write_packet(struct usb_device *udev,
u8 request, u8 * registers, u16 start, size_t size)
{
+ unsigned char *buf;
+ int ret;
+
if (!registers || size <= 0)
return -EINVAL;
- return usb_control_msg(udev,
+ buf = kmalloc(size, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ memcpy(buf, registers, size);
+
+ ret = usb_control_msg(udev,
usb_sndctrlpipe(udev, 0),
request,
USB_TYPE_VENDOR | USB_RECIP_DEVICE,
start, /* value */
0, /* index */
- registers, /* buffer */
+ buf, /* buffer */
size,
HZ);
+
+ kfree(buf);
+ return ret;
}
/****************************************************************************
@@ -567,18 +579,32 @@ static int write_packet(struct usb_device *udev,
static int read_packet(struct usb_device *udev,
u8 request, u8 * registers, u16 start, size_t size)
{
+ unsigned char *buf;
+ int ret;
+
if (!registers || size <= 0)
return -EINVAL;
- return usb_control_msg(udev,
+ buf = kmalloc(size, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ ret = usb_control_msg(udev,
usb_rcvctrlpipe(udev, 0),
request,
USB_DIR_IN|USB_TYPE_VENDOR|USB_RECIP_DEVICE,
start, /* value */
0, /* index */
- registers, /* buffer */
+ buf, /* buffer */
size,
HZ);
+
+ if (ret >= 0)
+ memcpy(registers, buf, size);
+
+ kfree(buf);
+
+ return ret;
}
/******************************************************************************
diff --git a/drivers/media/usb/dvb-usb/af9005.c b/drivers/media/usb/dvb-usb/af9005.c
index efa782ed6e2d..b257780fb380 100644
--- a/drivers/media/usb/dvb-usb/af9005.c
+++ b/drivers/media/usb/dvb-usb/af9005.c
@@ -52,17 +52,16 @@ u8 regmask[8] = { 0x01, 0x03, 0x07, 0x0f, 0x1f, 0x3f, 0x7f, 0xff };
struct af9005_device_state {
u8 sequence;
int led_state;
+ unsigned char data[256];
+ struct mutex data_mutex;
};
static int af9005_generic_read_write(struct dvb_usb_device *d, u16 reg,
int readwrite, int type, u8 * values, int len)
{
struct af9005_device_state *st = d->priv;
- u8 obuf[16] = { 0 };
- u8 ibuf[17] = { 0 };
- u8 command;
- int i;
- int ret;
+ u8 command, seq;
+ int i, ret;
if (len < 1) {
err("generic read/write, less than 1 byte. Makes no sense.");
@@ -73,16 +72,17 @@ static int af9005_generic_read_write(struct dvb_usb_device *d, u16 reg,
return -EINVAL;
}
- obuf[0] = 14; /* rest of buffer length low */
- obuf[1] = 0; /* rest of buffer length high */
+ mutex_lock(&st->data_mutex);
+ st->data[0] = 14; /* rest of buffer length low */
+ st->data[1] = 0; /* rest of buffer length high */
- obuf[2] = AF9005_REGISTER_RW; /* register operation */
- obuf[3] = 12; /* rest of buffer length */
+ st->data[2] = AF9005_REGISTER_RW; /* register operation */
+ st->data[3] = 12; /* rest of buffer length */
- obuf[4] = st->sequence++; /* sequence number */
+ st->data[4] = seq = st->sequence++; /* sequence number */
- obuf[5] = (u8) (reg >> 8); /* register address */
- obuf[6] = (u8) (reg & 0xff);
+ st->data[5] = (u8) (reg >> 8); /* register address */
+ st->data[6] = (u8) (reg & 0xff);
if (type == AF9005_OFDM_REG) {
command = AF9005_CMD_OFDM_REG;
@@ -96,51 +96,52 @@ static int af9005_generic_read_write(struct dvb_usb_device *d, u16 reg,
command |= readwrite;
if (readwrite == AF9005_CMD_WRITE)
for (i = 0; i < len; i++)
- obuf[8 + i] = values[i];
+ st->data[8 + i] = values[i];
else if (type == AF9005_TUNER_REG)
/* read command for tuner, the first byte contains the i2c address */
- obuf[8] = values[0];
- obuf[7] = command;
+ st->data[8] = values[0];
+ st->data[7] = command;
- ret = dvb_usb_generic_rw(d, obuf, 16, ibuf, 17, 0);
+ ret = dvb_usb_generic_rw(d, st->data, 16, st->data, 17, 0);
if (ret)
- return ret;
+ goto ret;
/* sanity check */
- if (ibuf[2] != AF9005_REGISTER_RW_ACK) {
+ if (st->data[2] != AF9005_REGISTER_RW_ACK) {
err("generic read/write, wrong reply code.");
- return -EIO;
+ ret = -EIO;
+ goto ret;
}
- if (ibuf[3] != 0x0d) {
+ if (st->data[3] != 0x0d) {
err("generic read/write, wrong length in reply.");
- return -EIO;
+ ret = -EIO;
+ goto ret;
}
- if (ibuf[4] != obuf[4]) {
+ if (st->data[4] != seq) {
err("generic read/write, wrong sequence in reply.");
- return -EIO;
+ ret = -EIO;
+ goto ret;
}
/*
- Windows driver doesn't check these fields, in fact sometimes
- the register in the reply is different that what has been sent
-
- if (ibuf[5] != obuf[5] || ibuf[6] != obuf[6]) {
- err("generic read/write, wrong register in reply.");
- return -EIO;
- }
- if (ibuf[7] != command) {
- err("generic read/write wrong command in reply.");
- return -EIO;
- }
+ * In thesis, both input and output buffers should have
+ * identical values for st->data[5] to st->data[8].
+ * However, windows driver doesn't check these fields, in fact
+ * sometimes the register in the reply is different that what
+ * has been sent
*/
- if (ibuf[16] != 0x01) {
+ if (st->data[16] != 0x01) {
err("generic read/write wrong status code in reply.");
- return -EIO;
+ ret = -EIO;
+ goto ret;
}
+
if (readwrite == AF9005_CMD_READ)
for (i = 0; i < len; i++)
- values[i] = ibuf[8 + i];
+ values[i] = st->data[8 + i];
- return 0;
+ret:
+ mutex_unlock(&st->data_mutex);
+ return ret;
}
@@ -464,8 +465,7 @@ int af9005_send_command(struct dvb_usb_device *d, u8 command, u8 * wbuf,
struct af9005_device_state *st = d->priv;
int ret, i, packet_len;
- u8 buf[64];
- u8 ibuf[64];
+ u8 seq;
if (wlen < 0) {
err("send command, wlen less than 0 bytes. Makes no sense.");
@@ -480,94 +480,97 @@ int af9005_send_command(struct dvb_usb_device *d, u8 command, u8 * wbuf,
return -EINVAL;
}
packet_len = wlen + 5;
- buf[0] = (u8) (packet_len & 0xff);
- buf[1] = (u8) ((packet_len & 0xff00) >> 8);
-
- buf[2] = 0x26; /* packet type */
- buf[3] = wlen + 3;
- buf[4] = st->sequence++;
- buf[5] = command;
- buf[6] = wlen;
+
+ mutex_lock(&st->data_mutex);
+
+ st->data[0] = (u8) (packet_len & 0xff);
+ st->data[1] = (u8) ((packet_len & 0xff00) >> 8);
+
+ st->data[2] = 0x26; /* packet type */
+ st->data[3] = wlen + 3;
+ st->data[4] = seq = st->sequence++;
+ st->data[5] = command;
+ st->data[6] = wlen;
for (i = 0; i < wlen; i++)
- buf[7 + i] = wbuf[i];
- ret = dvb_usb_generic_rw(d, buf, wlen + 7, ibuf, rlen + 7, 0);
- if (ret)
- return ret;
- if (ibuf[2] != 0x27) {
+ st->data[7 + i] = wbuf[i];
+ ret = dvb_usb_generic_rw(d, st->data, wlen + 7, st->data, rlen + 7, 0);
+ if (st->data[2] != 0x27) {
err("send command, wrong reply code.");
- return -EIO;
- }
- if (ibuf[4] != buf[4]) {
+ ret = -EIO;
+ } else if (st->data[4] != seq) {
err("send command, wrong sequence in reply.");
- return -EIO;
- }
- if (ibuf[5] != 0x01) {
+ ret = -EIO;
+ } else if (st->data[5] != 0x01) {
err("send command, wrong status code in reply.");
- return -EIO;
- }
- if (ibuf[6] != rlen) {
+ ret = -EIO;
+ } else if (st->data[6] != rlen) {
err("send command, invalid data length in reply.");
- return -EIO;
+ ret = -EIO;
}
- for (i = 0; i < rlen; i++)
- rbuf[i] = ibuf[i + 7];
- return 0;
+ if (!ret) {
+ for (i = 0; i < rlen; i++)
+ rbuf[i] = st->data[i + 7];
+ }
+
+ mutex_unlock(&st->data_mutex);
+ return ret;
}
int af9005_read_eeprom(struct dvb_usb_device *d, u8 address, u8 * values,
int len)
{
struct af9005_device_state *st = d->priv;
- u8 obuf[16], ibuf[14];
+ u8 seq;
int ret, i;
- memset(obuf, 0, sizeof(obuf));
- memset(ibuf, 0, sizeof(ibuf));
+ mutex_lock(&st->data_mutex);
- obuf[0] = 14; /* length of rest of packet low */
- obuf[1] = 0; /* length of rest of packer high */
+ memset(st->data, 0, sizeof(st->data));
- obuf[2] = 0x2a; /* read/write eeprom */
+ st->data[0] = 14; /* length of rest of packet low */
+ st->data[1] = 0; /* length of rest of packer high */
- obuf[3] = 12; /* size */
+ st->data[2] = 0x2a; /* read/write eeprom */
- obuf[4] = st->sequence++;
+ st->data[3] = 12; /* size */
- obuf[5] = 0; /* read */
+ st->data[4] = seq = st->sequence++;
- obuf[6] = len;
- obuf[7] = address;
- ret = dvb_usb_generic_rw(d, obuf, 16, ibuf, 14, 0);
- if (ret)
- return ret;
- if (ibuf[2] != 0x2b) {
+ st->data[5] = 0; /* read */
+
+ st->data[6] = len;
+ st->data[7] = address;
+ ret = dvb_usb_generic_rw(d, st->data, 16, st->data, 14, 0);
+ if (st->data[2] != 0x2b) {
err("Read eeprom, invalid reply code");
- return -EIO;
- }
- if (ibuf[3] != 10) {
+ ret = -EIO;
+ } else if (st->data[3] != 10) {
err("Read eeprom, invalid reply length");
- return -EIO;
- }
- if (ibuf[4] != obuf[4]) {
+ ret = -EIO;
+ } else if (st->data[4] != seq) {
err("Read eeprom, wrong sequence in reply ");
- return -EIO;
- }
- if (ibuf[5] != 1) {
+ ret = -EIO;
+ } else if (st->data[5] != 1) {
err("Read eeprom, wrong status in reply ");
- return -EIO;
+ ret = -EIO;
}
- for (i = 0; i < len; i++) {
- values[i] = ibuf[6 + i];
+
+ if (!ret) {
+ for (i = 0; i < len; i++)
+ values[i] = st->data[6 + i];
}
- return 0;
+ mutex_unlock(&st->data_mutex);
+
+ return ret;
}
-static int af9005_boot_packet(struct usb_device *udev, int type, u8 * reply)
+static int af9005_boot_packet(struct usb_device *udev, int type, u8 *reply,
+ u8 *buf, int size)
{
- u8 buf[FW_BULKOUT_SIZE + 2];
u16 checksum;
int act_len, i, ret;
- memset(buf, 0, sizeof(buf));
+
+ memset(buf, 0, size);
buf[0] = (u8) (FW_BULKOUT_SIZE & 0xff);
buf[1] = (u8) ((FW_BULKOUT_SIZE >> 8) & 0xff);
switch (type) {
@@ -720,15 +723,21 @@ static int af9005_download_firmware(struct usb_device *udev, const struct firmwa
{
int i, packets, ret, act_len;
- u8 buf[FW_BULKOUT_SIZE + 2];
+ u8 *buf;
u8 reply;
- ret = af9005_boot_packet(udev, FW_CONFIG, &reply);
+ buf = kmalloc(FW_BULKOUT_SIZE + 2, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ ret = af9005_boot_packet(udev, FW_CONFIG, &reply, buf,
+ FW_BULKOUT_SIZE + 2);
if (ret)
- return ret;
+ goto err;
if (reply != 0x01) {
err("before downloading firmware, FW_CONFIG expected 0x01, received 0x%x", reply);
- return -EIO;
+ ret = -EIO;
+ goto err;
}
packets = fw->size / FW_BULKOUT_SIZE;
buf[0] = (u8) (FW_BULKOUT_SIZE & 0xff);
@@ -743,28 +752,35 @@ static int af9005_download_firmware(struct usb_device *udev, const struct firmwa
buf, FW_BULKOUT_SIZE + 2, &act_len, 1000);
if (ret) {
err("firmware download failed at packet %d with code %d", i, ret);
- return ret;
+ goto err;
}
}
- ret = af9005_boot_packet(udev, FW_CONFIRM, &reply);
+ ret = af9005_boot_packet(udev, FW_CONFIRM, &reply,
+ buf, FW_BULKOUT_SIZE + 2);
if (ret)
- return ret;
+ goto err;
if (reply != (u8) (packets & 0xff)) {
err("after downloading firmware, FW_CONFIRM expected 0x%x, received 0x%x", packets & 0xff, reply);
- return -EIO;
+ ret = -EIO;
+ goto err;
}
- ret = af9005_boot_packet(udev, FW_BOOT, &reply);
+ ret = af9005_boot_packet(udev, FW_BOOT, &reply, buf,
+ FW_BULKOUT_SIZE + 2);
if (ret)
- return ret;
- ret = af9005_boot_packet(udev, FW_CONFIG, &reply);
+ goto err;
+ ret = af9005_boot_packet(udev, FW_CONFIG, &reply, buf,
+ FW_BULKOUT_SIZE + 2);
if (ret)
- return ret;
+ goto err;
if (reply != 0x02) {
err("after downloading firmware, FW_CONFIG expected 0x02, received 0x%x", reply);
- return -EIO;
+ ret = -EIO;
+ goto err;
}
- return 0;
+err:
+ kfree(buf);
+ return ret;
}
@@ -823,53 +839,59 @@ static int af9005_rc_query(struct dvb_usb_device *d, u32 * event, int *state)
{
struct af9005_device_state *st = d->priv;
int ret, len;
-
- u8 obuf[5];
- u8 ibuf[256];
+ u8 seq;
*state = REMOTE_NO_KEY_PRESSED;
if (rc_decode == NULL) {
/* it shouldn't never come here */
return 0;
}
+
+ mutex_lock(&st->data_mutex);
+
/* deb_info("rc_query\n"); */
- obuf[0] = 3; /* rest of packet length low */
- obuf[1] = 0; /* rest of packet lentgh high */
- obuf[2] = 0x40; /* read remote */
- obuf[3] = 1; /* rest of packet length */
- obuf[4] = st->sequence++; /* sequence number */
- ret = dvb_usb_generic_rw(d, obuf, 5, ibuf, 256, 0);
+ st->data[0] = 3; /* rest of packet length low */
+ st->data[1] = 0; /* rest of packet lentgh high */
+ st->data[2] = 0x40; /* read remote */
+ st->data[3] = 1; /* rest of packet length */
+ st->data[4] = seq = st->sequence++; /* sequence number */
+ ret = dvb_usb_generic_rw(d, st->data, 5, st->data, 256, 0);
if (ret) {
err("rc query failed");
- return ret;
+ goto ret;
}
- if (ibuf[2] != 0x41) {
+ if (st->data[2] != 0x41) {
err("rc query bad header.");
- return -EIO;
- }
- if (ibuf[4] != obuf[4]) {
+ ret = -EIO;
+ goto ret;
+ } else if (st->data[4] != seq) {
err("rc query bad sequence.");
- return -EIO;
+ ret = -EIO;
+ goto ret;
}
- len = ibuf[5];
+ len = st->data[5];
if (len > 246) {
err("rc query invalid length");
- return -EIO;
+ ret = -EIO;
+ goto ret;
}
if (len > 0) {
deb_rc("rc data (%d) ", len);
- debug_dump((ibuf + 6), len, deb_rc);
- ret = rc_decode(d, &ibuf[6], len, event, state);
+ debug_dump((st->data + 6), len, deb_rc);
+ ret = rc_decode(d, &st->data[6], len, event, state);
if (ret) {
err("rc_decode failed");
- return ret;
+ goto ret;
} else {
deb_rc("rc_decode state %x event %x\n", *state, *event);
if (*state == REMOTE_KEY_REPEAT)
*event = d->last_event;
}
}
- return 0;
+
+ret:
+ mutex_unlock(&st->data_mutex);
+ return ret;
}
static int af9005_power_ctrl(struct dvb_usb_device *d, int onoff)
@@ -953,10 +975,16 @@ static int af9005_identify_state(struct usb_device *udev,
int *cold)
{
int ret;
- u8 reply;
- ret = af9005_boot_packet(udev, FW_CONFIG, &reply);
+ u8 reply, *buf;
+
+ buf = kmalloc(FW_BULKOUT_SIZE + 2, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ ret = af9005_boot_packet(udev, FW_CONFIG, &reply,
+ buf, FW_BULKOUT_SIZE + 2);
if (ret)
- return ret;
+ goto err;
deb_info("result of FW_CONFIG in identify state %d\n", reply);
if (reply == 0x01)
*cold = 1;
@@ -965,7 +993,10 @@ static int af9005_identify_state(struct usb_device *udev,
else
return -EIO;
deb_info("Identify state cold = %d\n", *cold);
- return 0;
+
+err:
+ kfree(buf);
+ return ret;
}
static struct dvb_usb_device_properties af9005_properties;
@@ -973,8 +1004,20 @@ static struct dvb_usb_device_properties af9005_properties;
static int af9005_usb_probe(struct usb_interface *intf,
const struct usb_device_id *id)
{
- return dvb_usb_device_init(intf, &af9005_properties,
- THIS_MODULE, NULL, adapter_nr);
+ struct dvb_usb_device *d;
+ struct af9005_device_state *st;
+ int ret;
+
+ ret = dvb_usb_device_init(intf, &af9005_properties,
+ THIS_MODULE, &d, adapter_nr);
+
+ if (ret < 0)
+ return ret;
+
+ st = d->priv;
+ mutex_init(&st->data_mutex);
+
+ return 0;
}
enum af9005_usb_table_entry {
diff --git a/drivers/media/usb/dvb-usb/cinergyT2-core.c b/drivers/media/usb/dvb-usb/cinergyT2-core.c
index 9fd1527494eb..8ac825413d5a 100644
--- a/drivers/media/usb/dvb-usb/cinergyT2-core.c
+++ b/drivers/media/usb/dvb-usb/cinergyT2-core.c
@@ -41,6 +41,8 @@ DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr);
struct cinergyt2_state {
u8 rc_counter;
+ unsigned char data[64];
+ struct mutex data_mutex;
};
/* We are missing a release hook with usb_device data */
@@ -50,38 +52,57 @@ static struct dvb_usb_device_properties cinergyt2_properties;
static int cinergyt2_streaming_ctrl(struct dvb_usb_adapter *adap, int enable)
{
- char buf[] = { CINERGYT2_EP1_CONTROL_STREAM_TRANSFER, enable ? 1 : 0 };
- char result[64];
- return dvb_usb_generic_rw(adap->dev, buf, sizeof(buf), result,
- sizeof(result), 0);
+ struct dvb_usb_device *d = adap->dev;
+ struct cinergyt2_state *st = d->priv;
+ int ret;
+
+ mutex_lock(&st->data_mutex);
+ st->data[0] = CINERGYT2_EP1_CONTROL_STREAM_TRANSFER;
+ st->data[1] = enable ? 1 : 0;
+
+ ret = dvb_usb_generic_rw(d, st->data, 2, st->data, 64, 0);
+ mutex_unlock(&st->data_mutex);
+
+ return ret;
}
static int cinergyt2_power_ctrl(struct dvb_usb_device *d, int enable)
{
- char buf[] = { CINERGYT2_EP1_SLEEP_MODE, enable ? 0 : 1 };
- char state[3];
- return dvb_usb_generic_rw(d, buf, sizeof(buf), state, sizeof(state), 0);
+ struct cinergyt2_state *st = d->priv;
+ int ret;
+
+ mutex_lock(&st->data_mutex);
+ st->data[0] = CINERGYT2_EP1_SLEEP_MODE;
+ st->data[1] = enable ? 0 : 1;
+
+ ret = dvb_usb_generic_rw(d, st->data, 2, st->data, 3, 0);
+ mutex_unlock(&st->data_mutex);
+
+ return ret;
}
static int cinergyt2_frontend_attach(struct dvb_usb_adapter *adap)
{
- char query[] = { CINERGYT2_EP1_GET_FIRMWARE_VERSION };
- char state[3];
+ struct dvb_usb_device *d = adap->dev;
+ struct cinergyt2_state *st = d->priv;
int ret;
adap->fe_adap[0].fe = cinergyt2_fe_attach(adap->dev);
- ret = dvb_usb_generic_rw(adap->dev, query, sizeof(query), state,
- sizeof(state), 0);
+ mutex_lock(&st->data_mutex);
+ st->data[0] = CINERGYT2_EP1_GET_FIRMWARE_VERSION;
+
+ ret = dvb_usb_generic_rw(d, st->data, 1, st->data, 3, 0);
if (ret < 0) {
deb_rc("cinergyt2_power_ctrl() Failed to retrieve sleep "
"state info\n");
}
+ mutex_unlock(&st->data_mutex);
/* Copy this pointer as we are gonna need it in the release phase */
cinergyt2_usb_device = adap->dev;
- return 0;
+ return ret;
}
static struct rc_map_table rc_map_cinergyt2_table[] = {
@@ -141,13 +162,18 @@ static int repeatable_keys[] = {
static int cinergyt2_rc_query(struct dvb_usb_device *d, u32 *event, int *state)
{
struct cinergyt2_state *st = d->priv;
- u8 key[5] = {0, 0, 0, 0, 0}, cmd = CINERGYT2_EP1_GET_RC_EVENTS;
- int i;
+ int i, ret;
*state = REMOTE_NO_KEY_PRESSED;
- dvb_usb_generic_rw(d, &cmd, 1, key, sizeof(key), 0);
- if (key[4] == 0xff) {
+ mutex_lock(&st->data_mutex);
+ st->data[0] = CINERGYT2_EP1_GET_RC_EVENTS;
+
+ ret = dvb_usb_generic_rw(d, st->data, 1, st->data, 5, 0);
+ if (ret < 0)
+ goto ret;
+
+ if (st->data[4] == 0xff) {
/* key repeat */
st->rc_counter++;
if (st->rc_counter > RC_REPEAT_DELAY) {
@@ -157,31 +183,45 @@ static int cinergyt2_rc_query(struct dvb_usb_device *d, u32 *event, int *state)
*event = d->last_event;
deb_rc("repeat key, event %x\n",
*event);
- return 0;
+ goto ret;
}
}
deb_rc("repeated key (non repeatable)\n");
}
- return 0;
+ goto ret;
}
/* hack to pass checksum on the custom field */
- key[2] = ~key[1];
- dvb_usb_nec_rc_key_to_event(d, key, event, state);
- if (key[0] != 0) {
+ st->data[2] = ~st->data[1];
+ dvb_usb_nec_rc_key_to_event(d, st->data, event, state);
+ if (st->data[0] != 0) {
if (*event != d->last_event)
st->rc_counter = 0;
- deb_rc("key: %*ph\n", 5, key);
+ deb_rc("key: %*ph\n", 5, st->data);
}
- return 0;
+
+ret:
+ mutex_unlock(&st->data_mutex);
+ return ret;
}
static int cinergyt2_usb_probe(struct usb_interface *intf,
const struct usb_device_id *id)
{
- return dvb_usb_device_init(intf, &cinergyt2_properties,
- THIS_MODULE, NULL, adapter_nr);
+ struct dvb_usb_device *d;
+ struct cinergyt2_state *st;
+ int ret;
+
+ ret = dvb_usb_device_init(intf, &cinergyt2_properties,
+ THIS_MODULE, &d, adapter_nr);
+ if (ret < 0)
+ return ret;
+
+ st = d->priv;
+ mutex_init(&st->data_mutex);
+
+ return 0;
}
diff --git a/drivers/media/usb/dvb-usb/cinergyT2-fe.c b/drivers/media/usb/dvb-usb/cinergyT2-fe.c
index b3ec743a7a2e..2d29b4174dba 100644
--- a/drivers/media/usb/dvb-usb/cinergyT2-fe.c
+++ b/drivers/media/usb/dvb-usb/cinergyT2-fe.c
@@ -139,32 +139,42 @@ static uint16_t compute_tps(struct dtv_frontend_properties *op)
struct cinergyt2_fe_state {
struct dvb_frontend fe;
struct dvb_usb_device *d;
+
+ unsigned char data[64];
+ struct mutex data_mutex;
+
+ struct dvbt_get_status_msg status;
};
static int cinergyt2_fe_read_status(struct dvb_frontend *fe,
enum fe_status *status)
{
struct cinergyt2_fe_state *state = fe->demodulator_priv;
- struct dvbt_get_status_msg result;
- u8 cmd[] = { CINERGYT2_EP1_GET_TUNER_STATUS };
int ret;
- ret = dvb_usb_generic_rw(state->d, cmd, sizeof(cmd), (u8 *)&result,
- sizeof(result), 0);
+ mutex_lock(&state->data_mutex);
+ state->data[0] = CINERGYT2_EP1_GET_TUNER_STATUS;
+
+ ret = dvb_usb_generic_rw(state->d, state->data, 1,
+ state->data, sizeof(state->status), 0);
+ if (!ret)
+ memcpy(&state->status, state->data, sizeof(state->status));
+ mutex_unlock(&state->data_mutex);
+
if (ret < 0)
return ret;
*status = 0;
- if (0xffff - le16_to_cpu(result.gain) > 30)
+ if (0xffff - le16_to_cpu(state->status.gain) > 30)
*status |= FE_HAS_SIGNAL;
- if (result.lock_bits & (1 << 6))
+ if (state->status.lock_bits & (1 << 6))
*status |= FE_HAS_LOCK;
- if (result.lock_bits & (1 << 5))
+ if (state->status.lock_bits & (1 << 5))
*status |= FE_HAS_SYNC;
- if (result.lock_bits & (1 << 4))
+ if (state->status.lock_bits & (1 << 4))
*status |= FE_HAS_CARRIER;
- if (result.lock_bits & (1 << 1))
+ if (state->status.lock_bits & (1 << 1))
*status |= FE_HAS_VITERBI;
if ((*status & (FE_HAS_CARRIER | FE_HAS_VITERBI | FE_HAS_SYNC)) !=
@@ -177,34 +187,16 @@ static int cinergyt2_fe_read_status(struct dvb_frontend *fe,
static int cinergyt2_fe_read_ber(struct dvb_frontend *fe, u32 *ber)
{
struct cinergyt2_fe_state *state = fe->demodulator_priv;
- struct dvbt_get_status_msg status;
- char cmd[] = { CINERGYT2_EP1_GET_TUNER_STATUS };
- int ret;
-
- ret = dvb_usb_generic_rw(state->d, cmd, sizeof(cmd), (char *)&status,
- sizeof(status), 0);
- if (ret < 0)
- return ret;
- *ber = le32_to_cpu(status.viterbi_error_rate);
+ *ber = le32_to_cpu(state->status.viterbi_error_rate);
return 0;
}
static int cinergyt2_fe_read_unc_blocks(struct dvb_frontend *fe, u32 *unc)
{
struct cinergyt2_fe_state *state = fe->demodulator_priv;
- struct dvbt_get_status_msg status;
- u8 cmd[] = { CINERGYT2_EP1_GET_TUNER_STATUS };
- int ret;
- ret = dvb_usb_generic_rw(state->d, cmd, sizeof(cmd), (u8 *)&status,
- sizeof(status), 0);
- if (ret < 0) {
- err("cinergyt2_fe_read_unc_blocks() Failed! (Error=%d)\n",
- ret);
- return ret;
- }
- *unc = le32_to_cpu(status.uncorrected_block_count);
+ *unc = le32_to_cpu(state->status.uncorrected_block_count);
return 0;
}
@@ -212,35 +204,16 @@ static int cinergyt2_fe_read_signal_strength(struct dvb_frontend *fe,
u16 *strength)
{
struct cinergyt2_fe_state *state = fe->demodulator_priv;
- struct dvbt_get_status_msg status;
- char cmd[] = { CINERGYT2_EP1_GET_TUNER_STATUS };
- int ret;
- ret = dvb_usb_generic_rw(state->d, cmd, sizeof(cmd), (char *)&status,
- sizeof(status), 0);
- if (ret < 0) {
- err("cinergyt2_fe_read_signal_strength() Failed!"
- " (Error=%d)\n", ret);
- return ret;
- }
- *strength = (0xffff - le16_to_cpu(status.gain));
+ *strength = (0xffff - le16_to_cpu(state->status.gain));
return 0;
}
static int cinergyt2_fe_read_snr(struct dvb_frontend *fe, u16 *snr)
{
struct cinergyt2_fe_state *state = fe->demodulator_priv;
- struct dvbt_get_status_msg status;
- char cmd[] = { CINERGYT2_EP1_GET_TUNER_STATUS };
- int ret;
- ret = dvb_usb_generic_rw(state->d, cmd, sizeof(cmd), (char *)&status,
- sizeof(status), 0);
- if (ret < 0) {
- err("cinergyt2_fe_read_snr() Failed! (Error=%d)\n", ret);
- return ret;
- }
- *snr = (status.snr << 8) | status.snr;
+ *snr = (state->status.snr << 8) | state->status.snr;
return 0;
}
@@ -266,34 +239,36 @@ static int cinergyt2_fe_set_frontend(struct dvb_frontend *fe)
{
struct dtv_frontend_properties *fep = &fe->dtv_property_cache;
struct cinergyt2_fe_state *state = fe->demodulator_priv;
- struct dvbt_set_parameters_msg param;
- char result[2];
+ struct dvbt_set_parameters_msg *param;
int err;
- param.cmd = CINERGYT2_EP1_SET_TUNER_PARAMETERS;
- param.tps = cpu_to_le16(compute_tps(fep));
- param.freq = cpu_to_le32(fep->frequency / 1000);
- param.flags = 0;
+ mutex_lock(&state->data_mutex);
+
+ param = (void *)state->data;
+ param->cmd = CINERGYT2_EP1_SET_TUNER_PARAMETERS;
+ param->tps = cpu_to_le16(compute_tps(fep));
+ param->freq = cpu_to_le32(fep->frequency / 1000);
+ param->flags = 0;
switch (fep->bandwidth_hz) {
default:
case 8000000:
- param.bandwidth = 8;
+ param->bandwidth = 8;
break;
case 7000000:
- param.bandwidth = 7;
+ param->bandwidth = 7;
break;
case 6000000:
- param.bandwidth = 6;
+ param->bandwidth = 6;
break;
}
- err = dvb_usb_generic_rw(state->d,
- (char *)&param, sizeof(param),
- result, sizeof(result), 0);
+ err = dvb_usb_generic_rw(state->d, state->data, sizeof(*param),
+ state->data, 2, 0);
if (err < 0)
err("cinergyt2_fe_set_frontend() Failed! err=%d\n", err);
+ mutex_unlock(&state->data_mutex);
return (err < 0) ? err : 0;
}
@@ -315,6 +290,7 @@ struct dvb_frontend *cinergyt2_fe_attach(struct dvb_usb_device *d)
s->d = d;
memcpy(&s->fe.ops, &cinergyt2_fe_ops, sizeof(struct dvb_frontend_ops));
s->fe.demodulator_priv = s;
+ mutex_init(&s->data_mutex);
return &s->fe;
}
diff --git a/drivers/media/usb/dvb-usb/cxusb.c b/drivers/media/usb/dvb-usb/cxusb.c
index 907ac01ae297..39772812269d 100644
--- a/drivers/media/usb/dvb-usb/cxusb.c
+++ b/drivers/media/usb/dvb-usb/cxusb.c
@@ -45,9 +45,6 @@
#include "si2168.h"
#include "si2157.h"
-/* Max transfer size done by I2C transfer functions */
-#define MAX_XFER_SIZE 80
-
/* debug */
static int dvb_usb_cxusb_debug;
module_param_named(debug, dvb_usb_cxusb_debug, int, 0644);
@@ -61,23 +58,27 @@ DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr);
static int cxusb_ctrl_msg(struct dvb_usb_device *d,
u8 cmd, u8 *wbuf, int wlen, u8 *rbuf, int rlen)
{
- int wo = (rbuf == NULL || rlen == 0); /* write-only */
- u8 sndbuf[MAX_XFER_SIZE];
+ struct cxusb_state *st = d->priv;
+ int ret, wo;
- if (1 + wlen > sizeof(sndbuf)) {
- warn("i2c wr: len=%d is too big!\n",
- wlen);
+ if (1 + wlen > MAX_XFER_SIZE) {
+ warn("i2c wr: len=%d is too big!\n", wlen);
return -EOPNOTSUPP;
}
- memset(sndbuf, 0, 1+wlen);
+ wo = (rbuf == NULL || rlen == 0); /* write-only */
- sndbuf[0] = cmd;
- memcpy(&sndbuf[1], wbuf, wlen);
+ mutex_lock(&st->data_mutex);
+ st->data[0] = cmd;
+ memcpy(&st->data[1], wbuf, wlen);
if (wo)
- return dvb_usb_generic_write(d, sndbuf, 1+wlen);
+ ret = dvb_usb_generic_write(d, st->data, 1 + wlen);
else
- return dvb_usb_generic_rw(d, sndbuf, 1+wlen, rbuf, rlen, 0);
+ ret = dvb_usb_generic_rw(d, st->data, 1 + wlen,
+ rbuf, rlen, 0);
+
+ mutex_unlock(&st->data_mutex);
+ return ret;
}
/* GPIO */
@@ -1460,36 +1461,43 @@ static struct dvb_usb_device_properties cxusb_mygica_t230_properties;
static int cxusb_probe(struct usb_interface *intf,
const struct usb_device_id *id)
{
+ struct dvb_usb_device *d;
+ struct cxusb_state *st;
+
if (0 == dvb_usb_device_init(intf, &cxusb_medion_properties,
- THIS_MODULE, NULL, adapter_nr) ||
+ THIS_MODULE, &d, adapter_nr) ||
0 == dvb_usb_device_init(intf, &cxusb_bluebird_lgh064f_properties,
- THIS_MODULE, NULL, adapter_nr) ||
+ THIS_MODULE, &d, adapter_nr) ||
0 == dvb_usb_device_init(intf, &cxusb_bluebird_dee1601_properties,
- THIS_MODULE, NULL, adapter_nr) ||
+ THIS_MODULE, &d, adapter_nr) ||
0 == dvb_usb_device_init(intf, &cxusb_bluebird_lgz201_properties,
- THIS_MODULE, NULL, adapter_nr) ||
+ THIS_MODULE, &d, adapter_nr) ||
0 == dvb_usb_device_init(intf, &cxusb_bluebird_dtt7579_properties,
- THIS_MODULE, NULL, adapter_nr) ||
+ THIS_MODULE, &d, adapter_nr) ||
0 == dvb_usb_device_init(intf, &cxusb_bluebird_dualdig4_properties,
- THIS_MODULE, NULL, adapter_nr) ||
+ THIS_MODULE, &d, adapter_nr) ||
0 == dvb_usb_device_init(intf, &cxusb_bluebird_nano2_properties,
- THIS_MODULE, NULL, adapter_nr) ||
+ THIS_MODULE, &d, adapter_nr) ||
0 == dvb_usb_device_init(intf,
&cxusb_bluebird_nano2_needsfirmware_properties,
- THIS_MODULE, NULL, adapter_nr) ||
+ THIS_MODULE, &d, adapter_nr) ||
0 == dvb_usb_device_init(intf, &cxusb_aver_a868r_properties,
- THIS_MODULE, NULL, adapter_nr) ||
+ THIS_MODULE, &d, adapter_nr) ||
0 == dvb_usb_device_init(intf,
&cxusb_bluebird_dualdig4_rev2_properties,
- THIS_MODULE, NULL, adapter_nr) ||
+ THIS_MODULE, &d, adapter_nr) ||
0 == dvb_usb_device_init(intf, &cxusb_d680_dmb_properties,
- THIS_MODULE, NULL, adapter_nr) ||
+ THIS_MODULE, &d, adapter_nr) ||
0 == dvb_usb_device_init(intf, &cxusb_mygica_d689_properties,
- THIS_MODULE, NULL, adapter_nr) ||
+ THIS_MODULE, &d, adapter_nr) ||
0 == dvb_usb_device_init(intf, &cxusb_mygica_t230_properties,
- THIS_MODULE, NULL, adapter_nr) ||
- 0)
+ THIS_MODULE, &d, adapter_nr) ||
+ 0) {
+ st = d->priv;
+ mutex_init(&st->data_mutex);
+
return 0;
+ }
return -EINVAL;
}
diff --git a/drivers/media/usb/dvb-usb/cxusb.h b/drivers/media/usb/dvb-usb/cxusb.h
index 527ff7905e15..9f3ee0e47d5c 100644
--- a/drivers/media/usb/dvb-usb/cxusb.h
+++ b/drivers/media/usb/dvb-usb/cxusb.h
@@ -28,10 +28,16 @@
#define CMD_ANALOG 0x50
#define CMD_DIGITAL 0x51
+/* Max transfer size done by I2C transfer functions */
+#define MAX_XFER_SIZE 80
+
struct cxusb_state {
u8 gpio_write_state[3];
struct i2c_client *i2c_client_demod;
struct i2c_client *i2c_client_tuner;
+
+ unsigned char data[MAX_XFER_SIZE];
+ struct mutex data_mutex;
};
#endif
diff --git a/drivers/media/usb/dvb-usb/dib0700_core.c b/drivers/media/usb/dvb-usb/dib0700_core.c
index f3196658fb70..92d5408684ac 100644
--- a/drivers/media/usb/dvb-usb/dib0700_core.c
+++ b/drivers/media/usb/dvb-usb/dib0700_core.c
@@ -213,7 +213,7 @@ static int dib0700_i2c_xfer_new(struct i2c_adapter *adap, struct i2c_msg *msg,
usb_rcvctrlpipe(d->udev, 0),
REQUEST_NEW_I2C_READ,
USB_TYPE_VENDOR | USB_DIR_IN,
- value, index, msg[i].buf,
+ value, index, st->buf,
msg[i].len,
USB_CTRL_GET_TIMEOUT);
if (result < 0) {
@@ -221,6 +221,14 @@ static int dib0700_i2c_xfer_new(struct i2c_adapter *adap, struct i2c_msg *msg,
break;
}
+ if (msg[i].len > sizeof(st->buf)) {
+ deb_info("buffer too small to fit %d bytes\n",
+ msg[i].len);
+ return -EIO;
+ }
+
+ memcpy(msg[i].buf, st->buf, msg[i].len);
+
deb_data("<<< ");
debug_dump(msg[i].buf, msg[i].len, deb_data);
@@ -238,6 +246,13 @@ static int dib0700_i2c_xfer_new(struct i2c_adapter *adap, struct i2c_msg *msg,
/* I2C ctrl + FE bus; */
st->buf[3] = ((gen_mode << 6) & 0xC0) |
((bus_mode << 4) & 0x30);
+
+ if (msg[i].len > sizeof(st->buf) - 4) {
+ deb_info("i2c message to big: %d\n",
+ msg[i].len);
+ return -EIO;
+ }
+
/* The Actual i2c payload */
memcpy(&st->buf[4], msg[i].buf, msg[i].len);
@@ -283,6 +298,11 @@ static int dib0700_i2c_xfer_legacy(struct i2c_adapter *adap,
/* fill in the address */
st->buf[1] = msg[i].addr << 1;
/* fill the buffer */
+ if (msg[i].len > sizeof(st->buf) - 2) {
+ deb_info("i2c xfer to big: %d\n",
+ msg[i].len);
+ return -EIO;
+ }
memcpy(&st->buf[2], msg[i].buf, msg[i].len);
/* write/read request */
@@ -292,13 +312,20 @@ static int dib0700_i2c_xfer_legacy(struct i2c_adapter *adap,
/* special thing in the current firmware: when length is zero the read-failed */
len = dib0700_ctrl_rd(d, st->buf, msg[i].len + 2,
- msg[i+1].buf, msg[i+1].len);
+ st->buf, msg[i + 1].len);
if (len <= 0) {
deb_info("I2C read failed on address 0x%02x\n",
msg[i].addr);
break;
}
+ if (msg[i + 1].len > sizeof(st->buf)) {
+ deb_info("i2c xfer buffer to small for %d\n",
+ msg[i].len);
+ return -EIO;
+ }
+ memcpy(msg[i + 1].buf, st->buf, msg[i + 1].len);
+
msg[i+1].len = len;
i++;
diff --git a/drivers/media/usb/dvb-usb/dib0700_devices.c b/drivers/media/usb/dvb-usb/dib0700_devices.c
index 0857b56e652c..ef1b8ee75c57 100644
--- a/drivers/media/usb/dvb-usb/dib0700_devices.c
+++ b/drivers/media/usb/dvb-usb/dib0700_devices.c
@@ -508,8 +508,6 @@ static int stk7700ph_tuner_attach(struct dvb_usb_adapter *adap)
#define DEFAULT_RC_INTERVAL 50
-static u8 rc_request[] = { REQUEST_POLL_RC, 0 };
-
/*
* This function is used only when firmware is < 1.20 version. Newer
* firmwares use bulk mode, with functions implemented at dib0700_core,
@@ -517,7 +515,6 @@ static u8 rc_request[] = { REQUEST_POLL_RC, 0 };
*/
static int dib0700_rc_query_old_firmware(struct dvb_usb_device *d)
{
- u8 key[4];
enum rc_type protocol;
u32 scancode;
u8 toggle;
@@ -532,39 +529,43 @@ static int dib0700_rc_query_old_firmware(struct dvb_usb_device *d)
return 0;
}
- i = dib0700_ctrl_rd(d, rc_request, 2, key, 4);
+ st->buf[0] = REQUEST_POLL_RC;
+ st->buf[1] = 0;
+
+ i = dib0700_ctrl_rd(d, st->buf, 2, st->buf, 4);
if (i <= 0) {
err("RC Query Failed");
- return -1;
+ return -EIO;
}
/* losing half of KEY_0 events from Philipps rc5 remotes.. */
- if (key[0] == 0 && key[1] == 0 && key[2] == 0 && key[3] == 0)
+ if (st->buf[0] == 0 && st->buf[1] == 0
+ && st->buf[2] == 0 && st->buf[3] == 0)
return 0;
- /* info("%d: %2X %2X %2X %2X",dvb_usb_dib0700_ir_proto,(int)key[3-2],(int)key[3-3],(int)key[3-1],(int)key[3]); */
+ /* info("%d: %2X %2X %2X %2X",dvb_usb_dib0700_ir_proto,(int)st->buf[3 - 2],(int)st->buf[3 - 3],(int)st->buf[3 - 1],(int)st->buf[3]); */
dib0700_rc_setup(d, NULL); /* reset ir sensor data to prevent false events */
switch (d->props.rc.core.protocol) {
case RC_BIT_NEC:
/* NEC protocol sends repeat code as 0 0 0 FF */
- if ((key[3-2] == 0x00) && (key[3-3] == 0x00) &&
- (key[3] == 0xff)) {
+ if ((st->buf[3 - 2] == 0x00) && (st->buf[3 - 3] == 0x00) &&
+ (st->buf[3] == 0xff)) {
rc_repeat(d->rc_dev);
return 0;
}
protocol = RC_TYPE_NEC;
- scancode = RC_SCANCODE_NEC(key[3-2], key[3-3]);
+ scancode = RC_SCANCODE_NEC(st->buf[3 - 2], st->buf[3 - 3]);
toggle = 0;
break;
default:
/* RC-5 protocol changes toggle bit on new keypress */
protocol = RC_TYPE_RC5;
- scancode = RC_SCANCODE_RC5(key[3-2], key[3-3]);
- toggle = key[3-1];
+ scancode = RC_SCANCODE_RC5(st->buf[3 - 2], st->buf[3 - 3]);
+ toggle = st->buf[3 - 1];
break;
}
diff --git a/drivers/media/usb/dvb-usb/dibusb-common.c b/drivers/media/usb/dvb-usb/dibusb-common.c
index 18ed3bfbb5e2..de3ee2547479 100644
--- a/drivers/media/usb/dvb-usb/dibusb-common.c
+++ b/drivers/media/usb/dvb-usb/dibusb-common.c
@@ -62,72 +62,117 @@ EXPORT_SYMBOL(dibusb_pid_filter_ctrl);
int dibusb_power_ctrl(struct dvb_usb_device *d, int onoff)
{
- u8 b[3];
+ u8 *b;
int ret;
+
+ b = kmalloc(3, GFP_KERNEL);
+ if (!b)
+ return -ENOMEM;
+
b[0] = DIBUSB_REQ_SET_IOCTL;
b[1] = DIBUSB_IOCTL_CMD_POWER_MODE;
b[2] = onoff ? DIBUSB_IOCTL_POWER_WAKEUP : DIBUSB_IOCTL_POWER_SLEEP;
- ret = dvb_usb_generic_write(d,b,3);
+
+ ret = dvb_usb_generic_write(d, b, 3);
+
+ kfree(b);
+
msleep(10);
+
return ret;
}
EXPORT_SYMBOL(dibusb_power_ctrl);
int dibusb2_0_streaming_ctrl(struct dvb_usb_adapter *adap, int onoff)
{
- u8 b[3] = { 0 };
int ret;
+ u8 *b;
+
+ b = kmalloc(3, GFP_KERNEL);
+ if (!b)
+ return -ENOMEM;
if ((ret = dibusb_streaming_ctrl(adap,onoff)) < 0)
- return ret;
+ goto ret;
if (onoff) {
b[0] = DIBUSB_REQ_SET_STREAMING_MODE;
b[1] = 0x00;
- if ((ret = dvb_usb_generic_write(adap->dev,b,2)) < 0)
- return ret;
+ ret = dvb_usb_generic_write(adap->dev, b, 2);
+ if (ret < 0)
+ goto ret;
}
b[0] = DIBUSB_REQ_SET_IOCTL;
b[1] = onoff ? DIBUSB_IOCTL_CMD_ENABLE_STREAM : DIBUSB_IOCTL_CMD_DISABLE_STREAM;
- return dvb_usb_generic_write(adap->dev,b,3);
+ ret = dvb_usb_generic_write(adap->dev, b, 3);
+
+ret:
+ kfree(b);
+ return ret;
}
EXPORT_SYMBOL(dibusb2_0_streaming_ctrl);
int dibusb2_0_power_ctrl(struct dvb_usb_device *d, int onoff)
{
- if (onoff) {
- u8 b[3] = { DIBUSB_REQ_SET_IOCTL, DIBUSB_IOCTL_CMD_POWER_MODE, DIBUSB_IOCTL_POWER_WAKEUP };
- return dvb_usb_generic_write(d,b,3);
- } else
+ u8 *b;
+ int ret;
+
+ if (!onoff)
return 0;
+
+ b = kmalloc(3, GFP_KERNEL);
+ if (!b)
+ return -ENOMEM;
+
+ b[0] = DIBUSB_REQ_SET_IOCTL;
+ b[1] = DIBUSB_IOCTL_CMD_POWER_MODE;
+ b[2] = DIBUSB_IOCTL_POWER_WAKEUP;
+
+ ret = dvb_usb_generic_write(d, b, 3);
+
+ kfree(b);
+
+ return ret;
}
EXPORT_SYMBOL(dibusb2_0_power_ctrl);
static int dibusb_i2c_msg(struct dvb_usb_device *d, u8 addr,
u8 *wbuf, u16 wlen, u8 *rbuf, u16 rlen)
{
- u8 sndbuf[MAX_XFER_SIZE]; /* lead(1) devaddr,direction(1) addr(2) data(wlen) (len(2) (when reading)) */
+ u8 *sndbuf;
+ int ret, wo, len;
+
/* write only ? */
- int wo = (rbuf == NULL || rlen == 0),
- len = 2 + wlen + (wo ? 0 : 2);
+ wo = (rbuf == NULL || rlen == 0);
+
+ len = 2 + wlen + (wo ? 0 : 2);
+
+ sndbuf = kmalloc(MAX_XFER_SIZE, GFP_KERNEL);
+ if (!sndbuf)
+ return -ENOMEM;
- if (4 + wlen > sizeof(sndbuf)) {
+ if (4 + wlen > MAX_XFER_SIZE) {
warn("i2c wr: len=%d is too big!\n", wlen);
- return -EOPNOTSUPP;
+ ret = -EOPNOTSUPP;
+ goto ret;
}
sndbuf[0] = wo ? DIBUSB_REQ_I2C_WRITE : DIBUSB_REQ_I2C_READ;
sndbuf[1] = (addr << 1) | (wo ? 0 : 1);
- memcpy(&sndbuf[2],wbuf,wlen);
+ memcpy(&sndbuf[2], wbuf, wlen);
if (!wo) {
- sndbuf[wlen+2] = (rlen >> 8) & 0xff;
- sndbuf[wlen+3] = rlen & 0xff;
+ sndbuf[wlen + 2] = (rlen >> 8) & 0xff;
+ sndbuf[wlen + 3] = rlen & 0xff;
}
- return dvb_usb_generic_rw(d,sndbuf,len,rbuf,rlen,0);
+ ret = dvb_usb_generic_rw(d, sndbuf, len, rbuf, rlen, 0);
+
+ret:
+ kfree(sndbuf);
+ return ret;
}
/*
@@ -319,11 +364,27 @@ EXPORT_SYMBOL(rc_map_dibusb_table);
int dibusb_rc_query(struct dvb_usb_device *d, u32 *event, int *state)
{
- u8 key[5],cmd = DIBUSB_REQ_POLL_REMOTE;
- dvb_usb_generic_rw(d,&cmd,1,key,5,0);
- dvb_usb_nec_rc_key_to_event(d,key,event,state);
- if (key[0] != 0)
- deb_info("key: %*ph\n", 5, key);
- return 0;
+ u8 *buf;
+ int ret;
+
+ buf = kmalloc(5, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ buf[0] = DIBUSB_REQ_POLL_REMOTE;
+
+ ret = dvb_usb_generic_rw(d, buf, 1, buf, 5, 0);
+ if (ret < 0)
+ goto ret;
+
+ dvb_usb_nec_rc_key_to_event(d, buf, event, state);
+
+ if (buf[0] != 0)
+ deb_info("key: %*ph\n", 5, buf);
+
+ kfree(buf);
+
+ret:
+ return ret;
}
EXPORT_SYMBOL(dibusb_rc_query);
diff --git a/drivers/media/usb/dvb-usb/dibusb.h b/drivers/media/usb/dvb-usb/dibusb.h
index 3f82163d8ab8..697be2a17ade 100644
--- a/drivers/media/usb/dvb-usb/dibusb.h
+++ b/drivers/media/usb/dvb-usb/dibusb.h
@@ -96,6 +96,9 @@
#define DIBUSB_IOCTL_CMD_ENABLE_STREAM 0x01
#define DIBUSB_IOCTL_CMD_DISABLE_STREAM 0x02
+/* Max transfer size done by I2C transfer functions */
+#define MAX_XFER_SIZE 64
+
struct dibusb_state {
struct dib_fe_xfer_ops ops;
int mt2060_present;
diff --git a/drivers/media/usb/dvb-usb/digitv.c b/drivers/media/usb/dvb-usb/digitv.c
index 63134335c994..4284f6984dc1 100644
--- a/drivers/media/usb/dvb-usb/digitv.c
+++ b/drivers/media/usb/dvb-usb/digitv.c
@@ -28,22 +28,26 @@ DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr);
static int digitv_ctrl_msg(struct dvb_usb_device *d,
u8 cmd, u8 vv, u8 *wbuf, int wlen, u8 *rbuf, int rlen)
{
- int wo = (rbuf == NULL || rlen == 0); /* write-only */
- u8 sndbuf[7],rcvbuf[7];
- memset(sndbuf,0,7); memset(rcvbuf,0,7);
+ struct digitv_state *st = d->priv;
+ int ret, wo;
- sndbuf[0] = cmd;
- sndbuf[1] = vv;
- sndbuf[2] = wo ? wlen : rlen;
+ wo = (rbuf == NULL || rlen == 0); /* write-only */
+
+ memset(st->sndbuf, 0, 7);
+ memset(st->rcvbuf, 0, 7);
+
+ st->sndbuf[0] = cmd;
+ st->sndbuf[1] = vv;
+ st->sndbuf[2] = wo ? wlen : rlen;
if (wo) {
- memcpy(&sndbuf[3],wbuf,wlen);
- dvb_usb_generic_write(d,sndbuf,7);
+ memcpy(&st->sndbuf[3], wbuf, wlen);
+ ret = dvb_usb_generic_write(d, st->sndbuf, 7);
} else {
- dvb_usb_generic_rw(d,sndbuf,7,rcvbuf,7,10);
- memcpy(rbuf,&rcvbuf[3],rlen);
+ ret = dvb_usb_generic_rw(d, st->sndbuf, 7, st->rcvbuf, 7, 10);
+ memcpy(rbuf, &st->rcvbuf[3], rlen);
}
- return 0;
+ return ret;
}
/* I2C */
diff --git a/drivers/media/usb/dvb-usb/digitv.h b/drivers/media/usb/dvb-usb/digitv.h
index 908c09f4966b..581e09c25491 100644
--- a/drivers/media/usb/dvb-usb/digitv.h
+++ b/drivers/media/usb/dvb-usb/digitv.h
@@ -5,7 +5,10 @@
#include "dvb-usb.h"
struct digitv_state {
- int is_nxt6000;
+ int is_nxt6000;
+
+ unsigned char sndbuf[7];
+ unsigned char rcvbuf[7];
};
/* protocol (from usblogging and the SDK:
diff --git a/drivers/media/usb/dvb-usb/dtt200u-fe.c b/drivers/media/usb/dvb-usb/dtt200u-fe.c
index c09332bd99cb..f5c042baa254 100644
--- a/drivers/media/usb/dvb-usb/dtt200u-fe.c
+++ b/drivers/media/usb/dvb-usb/dtt200u-fe.c
@@ -18,17 +18,28 @@ struct dtt200u_fe_state {
struct dtv_frontend_properties fep;
struct dvb_frontend frontend;
+
+ unsigned char data[80];
+ struct mutex data_mutex;
};
static int dtt200u_fe_read_status(struct dvb_frontend *fe,
enum fe_status *stat)
{
struct dtt200u_fe_state *state = fe->demodulator_priv;
- u8 st = GET_TUNE_STATUS, b[3];
+ int ret;
+
+ mutex_lock(&state->data_mutex);
+ state->data[0] = GET_TUNE_STATUS;
- dvb_usb_generic_rw(state->d,&st,1,b,3,0);
+ ret = dvb_usb_generic_rw(state->d, state->data, 1, state->data, 3, 0);
+ if (ret < 0) {
+ *stat = 0;
+ mutex_unlock(&state->data_mutex);
+ return ret;
+ }
- switch (b[0]) {
+ switch (state->data[0]) {
case 0x01:
*stat = FE_HAS_SIGNAL | FE_HAS_CARRIER |
FE_HAS_VITERBI | FE_HAS_SYNC | FE_HAS_LOCK;
@@ -41,51 +52,86 @@ static int dtt200u_fe_read_status(struct dvb_frontend *fe,
*stat = 0;
break;
}
+ mutex_unlock(&state->data_mutex);
return 0;
}
static int dtt200u_fe_read_ber(struct dvb_frontend* fe, u32 *ber)
{
struct dtt200u_fe_state *state = fe->demodulator_priv;
- u8 bw = GET_VIT_ERR_CNT,b[3];
- dvb_usb_generic_rw(state->d,&bw,1,b,3,0);
- *ber = (b[0] << 16) | (b[1] << 8) | b[2];
- return 0;
+ int ret;
+
+ mutex_lock(&state->data_mutex);
+ state->data[0] = GET_VIT_ERR_CNT;
+
+ ret = dvb_usb_generic_rw(state->d, state->data, 1, state->data, 3, 0);
+ if (ret >= 0)
+ *ber = (state->data[0] << 16) | (state->data[1] << 8) | state->data[2];
+
+ mutex_unlock(&state->data_mutex);
+ return ret;
}
static int dtt200u_fe_read_unc_blocks(struct dvb_frontend* fe, u32 *unc)
{
struct dtt200u_fe_state *state = fe->demodulator_priv;
- u8 bw = GET_RS_UNCOR_BLK_CNT,b[2];
+ int ret;
- dvb_usb_generic_rw(state->d,&bw,1,b,2,0);
- *unc = (b[0] << 8) | b[1];
- return 0;
+ mutex_lock(&state->data_mutex);
+ state->data[0] = GET_RS_UNCOR_BLK_CNT;
+
+ ret = dvb_usb_generic_rw(state->d, state->data, 1, state->data, 2, 0);
+ if (ret >= 0)
+ *unc = (state->data[0] << 8) | state->data[1];
+
+ mutex_unlock(&state->data_mutex);
+ return ret;
}
static int dtt200u_fe_read_signal_strength(struct dvb_frontend* fe, u16 *strength)
{
struct dtt200u_fe_state *state = fe->demodulator_priv;
- u8 bw = GET_AGC, b;
- dvb_usb_generic_rw(state->d,&bw,1,&b,1,0);
- *strength = (b << 8) | b;
- return 0;
+ int ret;
+
+ mutex_lock(&state->data_mutex);
+ state->data[0] = GET_AGC;
+
+ ret = dvb_usb_generic_rw(state->d, state->data, 1, state->data, 1, 0);
+ if (ret >= 0)
+ *strength = (state->data[0] << 8) | state->data[0];
+
+ mutex_unlock(&state->data_mutex);
+ return ret;
}
static int dtt200u_fe_read_snr(struct dvb_frontend* fe, u16 *snr)
{
struct dtt200u_fe_state *state = fe->demodulator_priv;
- u8 bw = GET_SNR,br;
- dvb_usb_generic_rw(state->d,&bw,1,&br,1,0);
- *snr = ~((br << 8) | br);
- return 0;
+ int ret;
+
+ mutex_lock(&state->data_mutex);
+ state->data[0] = GET_SNR;
+
+ ret = dvb_usb_generic_rw(state->d, state->data, 1, state->data, 1, 0);
+ if (ret >= 0)
+ *snr = ~((state->data[0] << 8) | state->data[0]);
+
+ mutex_unlock(&state->data_mutex);
+ return ret;
}
static int dtt200u_fe_init(struct dvb_frontend* fe)
{
struct dtt200u_fe_state *state = fe->demodulator_priv;
- u8 b = SET_INIT;
- return dvb_usb_generic_write(state->d,&b,1);
+ int ret;
+
+ mutex_lock(&state->data_mutex);
+ state->data[0] = SET_INIT;
+
+ ret = dvb_usb_generic_write(state->d, state->data, 1);
+ mutex_unlock(&state->data_mutex);
+
+ return ret;
}
static int dtt200u_fe_sleep(struct dvb_frontend* fe)
@@ -105,39 +151,40 @@ static int dtt200u_fe_set_frontend(struct dvb_frontend *fe)
{
struct dtv_frontend_properties *fep = &fe->dtv_property_cache;
struct dtt200u_fe_state *state = fe->demodulator_priv;
- int i;
- enum fe_status st;
+ int ret;
u16 freq = fep->frequency / 250000;
- u8 bwbuf[2] = { SET_BANDWIDTH, 0 },freqbuf[3] = { SET_RF_FREQ, 0, 0 };
+ mutex_lock(&state->data_mutex);
+ state->data[0] = SET_BANDWIDTH;
switch (fep->bandwidth_hz) {
case 8000000:
- bwbuf[1] = 8;
+ state->data[1] = 8;
break;
case 7000000:
- bwbuf[1] = 7;
+ state->data[1] = 7;
break;
case 6000000:
- bwbuf[1] = 6;
+ state->data[1] = 6;
break;
default:
- return -EINVAL;
+ ret = -EINVAL;
+ goto ret;
}
- dvb_usb_generic_write(state->d,bwbuf,2);
+ ret = dvb_usb_generic_write(state->d, state->data, 2);
+ if (ret < 0)
+ goto ret;
- freqbuf[1] = freq & 0xff;
- freqbuf[2] = (freq >> 8) & 0xff;
- dvb_usb_generic_write(state->d,freqbuf,3);
+ state->data[0] = SET_RF_FREQ;
+ state->data[1] = freq & 0xff;
+ state->data[2] = (freq >> 8) & 0xff;
+ ret = dvb_usb_generic_write(state->d, state->data, 3);
+ if (ret < 0)
+ goto ret;
- for (i = 0; i < 30; i++) {
- msleep(20);
- dtt200u_fe_read_status(fe, &st);
- if (st & FE_TIMEDOUT)
- continue;
- }
-
- return 0;
+ret:
+ mutex_unlock(&state->data_mutex);
+ return ret;
}
static int dtt200u_fe_get_frontend(struct dvb_frontend* fe,
@@ -169,6 +216,7 @@ struct dvb_frontend* dtt200u_fe_attach(struct dvb_usb_device *d)
deb_info("attaching frontend dtt200u\n");
state->d = d;
+ mutex_init(&state->data_mutex);
memcpy(&state->frontend.ops,&dtt200u_fe_ops,sizeof(struct dvb_frontend_ops));
state->frontend.demodulator_priv = state;
diff --git a/drivers/media/usb/dvb-usb/dtt200u.c b/drivers/media/usb/dvb-usb/dtt200u.c
index d2a01b50af0d..f88572c7ae7c 100644
--- a/drivers/media/usb/dvb-usb/dtt200u.c
+++ b/drivers/media/usb/dvb-usb/dtt200u.c
@@ -20,75 +20,114 @@ MODULE_PARM_DESC(debug, "set debugging level (1=info,xfer=2 (or-able))." DVB_USB
DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr);
+struct dtt200u_state {
+ unsigned char data[80];
+ struct mutex data_mutex;
+};
+
static int dtt200u_power_ctrl(struct dvb_usb_device *d, int onoff)
{
- u8 b = SET_INIT;
+ struct dtt200u_state *st = d->priv;
+ int ret = 0;
+
+ mutex_lock(&st->data_mutex);
+
+ st->data[0] = SET_INIT;
if (onoff)
- dvb_usb_generic_write(d,&b,2);
+ ret = dvb_usb_generic_write(d, st->data, 2);
- return 0;
+ mutex_unlock(&st->data_mutex);
+ return ret;
}
static int dtt200u_streaming_ctrl(struct dvb_usb_adapter *adap, int onoff)
{
- u8 b_streaming[2] = { SET_STREAMING, onoff };
- u8 b_rst_pid = RESET_PID_FILTER;
+ struct dtt200u_state *st = adap->dev->priv;
+ int ret;
- dvb_usb_generic_write(adap->dev, b_streaming, 2);
+ mutex_lock(&st->data_mutex);
+ st->data[0] = SET_STREAMING;
+ st->data[1] = onoff;
- if (onoff == 0)
- dvb_usb_generic_write(adap->dev, &b_rst_pid, 1);
- return 0;
+ ret = dvb_usb_generic_write(adap->dev, st->data, 2);
+ if (ret < 0)
+ goto ret;
+
+ if (onoff)
+ goto ret;
+
+ st->data[0] = RESET_PID_FILTER;
+ ret = dvb_usb_generic_write(adap->dev, st->data, 1);
+
+ret:
+ mutex_unlock(&st->data_mutex);
+
+ return ret;
}
static int dtt200u_pid_filter(struct dvb_usb_adapter *adap, int index, u16 pid, int onoff)
{
- u8 b_pid[4];
+ struct dtt200u_state *st = adap->dev->priv;
+ int ret;
+
pid = onoff ? pid : 0;
- b_pid[0] = SET_PID_FILTER;
- b_pid[1] = index;
- b_pid[2] = pid & 0xff;
- b_pid[3] = (pid >> 8) & 0x1f;
+ mutex_lock(&st->data_mutex);
+ st->data[0] = SET_PID_FILTER;
+ st->data[1] = index;
+ st->data[2] = pid & 0xff;
+ st->data[3] = (pid >> 8) & 0x1f;
- return dvb_usb_generic_write(adap->dev, b_pid, 4);
+ ret = dvb_usb_generic_write(adap->dev, st->data, 4);
+ mutex_unlock(&st->data_mutex);
+
+ return ret;
}
static int dtt200u_rc_query(struct dvb_usb_device *d)
{
- u8 key[5],cmd = GET_RC_CODE;
+ struct dtt200u_state *st = d->priv;
u32 scancode;
+ int ret;
+
+ mutex_lock(&st->data_mutex);
+ st->data[0] = GET_RC_CODE;
+
+ ret = dvb_usb_generic_rw(d, st->data, 1, st->data, 5, 0);
+ if (ret < 0)
+ goto ret;
- dvb_usb_generic_rw(d,&cmd,1,key,5,0);
- if (key[0] == 1) {
+ if (st->data[0] == 1) {
enum rc_type proto = RC_TYPE_NEC;
- scancode = key[1];
- if ((u8) ~key[1] != key[2]) {
+ scancode = st->data[1];
+ if ((u8) ~st->data[1] != st->data[2]) {
/* Extended NEC */
scancode = scancode << 8;
- scancode |= key[2];
+ scancode |= st->data[2];
proto = RC_TYPE_NECX;
}
scancode = scancode << 8;
- scancode |= key[3];
+ scancode |= st->data[3];
/* Check command checksum is ok */
- if ((u8) ~key[3] == key[4])
+ if ((u8) ~st->data[3] == st->data[4])
rc_keydown(d->rc_dev, proto, scancode, 0);
else
rc_keyup(d->rc_dev);
- } else if (key[0] == 2) {
+ } else if (st->data[0] == 2) {
rc_repeat(d->rc_dev);
} else {
rc_keyup(d->rc_dev);
}
- if (key[0] != 0)
- deb_info("key: %*ph\n", 5, key);
+ if (st->data[0] != 0)
+ deb_info("st->data: %*ph\n", 5, st->data);
- return 0;
+ret:
+ mutex_unlock(&st->data_mutex);
+ return ret;
}
static int dtt200u_frontend_attach(struct dvb_usb_adapter *adap)
@@ -106,17 +145,24 @@ static struct dvb_usb_device_properties wt220u_miglia_properties;
static int dtt200u_usb_probe(struct usb_interface *intf,
const struct usb_device_id *id)
{
+ struct dvb_usb_device *d;
+ struct dtt200u_state *st;
+
if (0 == dvb_usb_device_init(intf, &dtt200u_properties,
- THIS_MODULE, NULL, adapter_nr) ||
+ THIS_MODULE, &d, adapter_nr) ||
0 == dvb_usb_device_init(intf, &wt220u_properties,
- THIS_MODULE, NULL, adapter_nr) ||
+ THIS_MODULE, &d, adapter_nr) ||
0 == dvb_usb_device_init(intf, &wt220u_fc_properties,
- THIS_MODULE, NULL, adapter_nr) ||
+ THIS_MODULE, &d, adapter_nr) ||
0 == dvb_usb_device_init(intf, &wt220u_zl0353_properties,
- THIS_MODULE, NULL, adapter_nr) ||
+ THIS_MODULE, &d, adapter_nr) ||
0 == dvb_usb_device_init(intf, &wt220u_miglia_properties,
- THIS_MODULE, NULL, adapter_nr))
+ THIS_MODULE, &d, adapter_nr)) {
+ st = d->priv;
+ mutex_init(&st->data_mutex);
+
return 0;
+ }
return -ENODEV;
}
@@ -140,6 +186,8 @@ static struct dvb_usb_device_properties dtt200u_properties = {
.usb_ctrl = CYPRESS_FX2,
.firmware = "dvb-usb-dtt200u-01.fw",
+ .size_of_priv = sizeof(struct dtt200u_state),
+
.num_adapters = 1,
.adapter = {
{
@@ -190,6 +238,8 @@ static struct dvb_usb_device_properties wt220u_properties = {
.usb_ctrl = CYPRESS_FX2,
.firmware = "dvb-usb-wt220u-02.fw",
+ .size_of_priv = sizeof(struct dtt200u_state),
+
.num_adapters = 1,
.adapter = {
{
@@ -240,6 +290,8 @@ static struct dvb_usb_device_properties wt220u_fc_properties = {
.usb_ctrl = CYPRESS_FX2,
.firmware = "dvb-usb-wt220u-fc03.fw",
+ .size_of_priv = sizeof(struct dtt200u_state),
+
.num_adapters = 1,
.adapter = {
{
@@ -290,6 +342,8 @@ static struct dvb_usb_device_properties wt220u_zl0353_properties = {
.usb_ctrl = CYPRESS_FX2,
.firmware = "dvb-usb-wt220u-zl0353-01.fw",
+ .size_of_priv = sizeof(struct dtt200u_state),
+
.num_adapters = 1,
.adapter = {
{
@@ -340,6 +394,8 @@ static struct dvb_usb_device_properties wt220u_miglia_properties = {
.usb_ctrl = CYPRESS_FX2,
.firmware = "dvb-usb-wt220u-miglia-01.fw",
+ .size_of_priv = sizeof(struct dtt200u_state),
+
.num_adapters = 1,
.generic_bulk_ctrl_endpoint = 0x01,
diff --git a/drivers/media/usb/dvb-usb/dtv5100.c b/drivers/media/usb/dvb-usb/dtv5100.c
index 3d11df41cac0..c60fb54f445f 100644
--- a/drivers/media/usb/dvb-usb/dtv5100.c
+++ b/drivers/media/usb/dvb-usb/dtv5100.c
@@ -31,9 +31,14 @@ module_param_named(debug, dvb_usb_dtv5100_debug, int, 0644);
MODULE_PARM_DESC(debug, "set debugging level" DVB_USB_DEBUG_STATUS);
DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr);
+struct dtv5100_state {
+ unsigned char data[80];
+};
+
static int dtv5100_i2c_msg(struct dvb_usb_device *d, u8 addr,
u8 *wbuf, u16 wlen, u8 *rbuf, u16 rlen)
{
+ struct dtv5100_state *st = d->priv;
u8 request;
u8 type;
u16 value;
@@ -60,9 +65,10 @@ static int dtv5100_i2c_msg(struct dvb_usb_device *d, u8 addr,
}
index = (addr << 8) + wbuf[0];
+ memcpy(st->data, rbuf, rlen);
msleep(1); /* avoid I2C errors */
return usb_control_msg(d->udev, usb_rcvctrlpipe(d->udev, 0), request,
- type, value, index, rbuf, rlen,
+ type, value, index, st->data, rlen,
DTV5100_USB_TIMEOUT);
}
@@ -176,7 +182,7 @@ static struct dvb_usb_device_properties dtv5100_properties = {
.caps = DVB_USB_IS_AN_I2C_ADAPTER,
.usb_ctrl = DEVICE_SPECIFIC,
- .size_of_priv = 0,
+ .size_of_priv = sizeof(struct dtv5100_state),
.num_adapters = 1,
.adapter = {{
diff --git a/drivers/media/usb/dvb-usb/dw2102.c b/drivers/media/usb/dvb-usb/dw2102.c
index 5fb0c650926e..2c720cb2fb00 100644
--- a/drivers/media/usb/dvb-usb/dw2102.c
+++ b/drivers/media/usb/dvb-usb/dw2102.c
@@ -852,7 +852,7 @@ static int su3000_power_ctrl(struct dvb_usb_device *d, int i)
if (i && !state->initialized) {
state->initialized = 1;
/* reset board */
- dvb_usb_generic_rw(d, obuf, 2, NULL, 0, 0);
+ return dvb_usb_generic_rw(d, obuf, 2, NULL, 0, 0);
}
return 0;
diff --git a/drivers/media/usb/dvb-usb/gp8psk.c b/drivers/media/usb/dvb-usb/gp8psk.c
index 5d0384dd45b5..adfd76491451 100644
--- a/drivers/media/usb/dvb-usb/gp8psk.c
+++ b/drivers/media/usb/dvb-usb/gp8psk.c
@@ -24,6 +24,10 @@ MODULE_PARM_DESC(debug, "set debugging level (1=info,xfer=2,rc=4 (or-able))." DV
DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr);
+struct gp8psk_state {
+ unsigned char data[80];
+};
+
static int gp8psk_get_fw_version(struct dvb_usb_device *d, u8 *fw_vers)
{
return (gp8psk_usb_in_op(d, GET_FW_VERS, 0, 0, fw_vers, 6));
@@ -53,17 +57,22 @@ static void gp8psk_info(struct dvb_usb_device *d)
int gp8psk_usb_in_op(struct dvb_usb_device *d, u8 req, u16 value, u16 index, u8 *b, int blen)
{
+ struct gp8psk_state *st = d->priv;
int ret = 0,try = 0;
+ if (blen > sizeof(st->data))
+ return -EIO;
+
if ((ret = mutex_lock_interruptible(&d->usb_mutex)))
return ret;
while (ret >= 0 && ret != blen && try < 3) {
+ memcpy(st->data, b, blen);
ret = usb_control_msg(d->udev,
usb_rcvctrlpipe(d->udev,0),
req,
USB_TYPE_VENDOR | USB_DIR_IN,
- value,index,b,blen,
+ value, index, st->data, blen,
2000);
deb_info("reading number %d (ret: %d)\n",try,ret);
try++;
@@ -86,19 +95,24 @@ int gp8psk_usb_in_op(struct dvb_usb_device *d, u8 req, u16 value, u16 index, u8
int gp8psk_usb_out_op(struct dvb_usb_device *d, u8 req, u16 value,
u16 index, u8 *b, int blen)
{
+ struct gp8psk_state *st = d->priv;
int ret;
deb_xfer("out: req. %x, val: %x, ind: %x, buffer: ",req,value,index);
debug_dump(b,blen,deb_xfer);
+ if (blen > sizeof(st->data))
+ return -EIO;
+
if ((ret = mutex_lock_interruptible(&d->usb_mutex)))
return ret;
+ memcpy(st->data, b, blen);
if (usb_control_msg(d->udev,
usb_sndctrlpipe(d->udev,0),
req,
USB_TYPE_VENDOR | USB_DIR_OUT,
- value,index,b,blen,
+ value, index, st->data, blen,
2000) != blen) {
warn("usb out operation failed.");
ret = -EIO;
@@ -143,6 +157,11 @@ static int gp8psk_load_bcm4500fw(struct dvb_usb_device *d)
err("failed to load bcm4500 firmware.");
goto out_free;
}
+ if (buflen > 64) {
+ err("firmare chunk size bigger than 64 bytes.");
+ goto out_free;
+ }
+
memcpy(buf, ptr, buflen);
if (dvb_usb_generic_write(d, buf, buflen)) {
err("failed to load bcm4500 firmware.");
@@ -265,6 +284,8 @@ static struct dvb_usb_device_properties gp8psk_properties = {
.usb_ctrl = CYPRESS_FX2,
.firmware = "dvb-usb-gp8psk-01.fw",
+ .size_of_priv = sizeof(struct gp8psk_state),
+
.num_adapters = 1,
.adapter = {
{
diff --git a/drivers/media/usb/dvb-usb/nova-t-usb2.c b/drivers/media/usb/dvb-usb/nova-t-usb2.c
index fc7569e2728d..1babd3341910 100644
--- a/drivers/media/usb/dvb-usb/nova-t-usb2.c
+++ b/drivers/media/usb/dvb-usb/nova-t-usb2.c
@@ -74,22 +74,31 @@ static struct rc_map_table rc_map_haupp_table[] = {
*/
static int nova_t_rc_query(struct dvb_usb_device *d, u32 *event, int *state)
{
- u8 key[5],cmd[2] = { DIBUSB_REQ_POLL_REMOTE, 0x35 }, data,toggle,custom;
+ u8 *buf, data, toggle, custom;
u16 raw;
- int i;
+ int i, ret;
struct dibusb_device_state *st = d->priv;
- dvb_usb_generic_rw(d,cmd,2,key,5,0);
+ buf = kmalloc(5, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ buf[0] = DIBUSB_REQ_POLL_REMOTE;
+ buf[1] = 0x35;
+ ret = dvb_usb_generic_rw(d, buf, 2, buf, 5, 0);
+ if (ret < 0)
+ goto ret;
*state = REMOTE_NO_KEY_PRESSED;
- switch (key[0]) {
+ switch (buf[0]) {
case DIBUSB_RC_HAUPPAUGE_KEY_PRESSED:
- raw = ((key[1] << 8) | key[2]) >> 3;
+ raw = ((buf[1] << 8) | buf[2]) >> 3;
toggle = !!(raw & 0x800);
data = raw & 0x3f;
custom = (raw >> 6) & 0x1f;
- deb_rc("raw key code 0x%02x, 0x%02x, 0x%02x to c: %02x d: %02x toggle: %d\n",key[1],key[2],key[3],custom,data,toggle);
+ deb_rc("raw key code 0x%02x, 0x%02x, 0x%02x to c: %02x d: %02x toggle: %d\n",
+ buf[1], buf[2], buf[3], custom, data, toggle);
for (i = 0; i < ARRAY_SIZE(rc_map_haupp_table); i++) {
if (rc5_data(&rc_map_haupp_table[i]) == data &&
@@ -117,7 +126,9 @@ static int nova_t_rc_query(struct dvb_usb_device *d, u32 *event, int *state)
break;
}
- return 0;
+ret:
+ kfree(buf);
+ return ret;
}
static int nova_t_read_mac_address (struct dvb_usb_device *d, u8 mac[6])
diff --git a/drivers/media/usb/dvb-usb/pctv452e.c b/drivers/media/usb/dvb-usb/pctv452e.c
index c05de1b088a4..07fa08be9e99 100644
--- a/drivers/media/usb/dvb-usb/pctv452e.c
+++ b/drivers/media/usb/dvb-usb/pctv452e.c
@@ -97,48 +97,53 @@ struct pctv452e_state {
u8 c; /* transaction counter, wraps around... */
u8 initialized; /* set to 1 if 0x15 has been sent */
u16 last_rc_key;
+
+ unsigned char data[80];
};
static int tt3650_ci_msg(struct dvb_usb_device *d, u8 cmd, u8 *data,
unsigned int write_len, unsigned int read_len)
{
struct pctv452e_state *state = (struct pctv452e_state *)d->priv;
- u8 buf[64];
u8 id;
unsigned int rlen;
int ret;
- BUG_ON(NULL == data && 0 != (write_len | read_len));
- BUG_ON(write_len > 64 - 4);
- BUG_ON(read_len > 64 - 4);
+ if (!data || (write_len > 64 - 4) || (read_len > 64 - 4)) {
+ err("%s: transfer data invalid", __func__);
+ return -EIO;
+ }
+ mutex_lock(&state->ca_mutex);
id = state->c++;
- buf[0] = SYNC_BYTE_OUT;
- buf[1] = id;
- buf[2] = cmd;
- buf[3] = write_len;
+ state->data[0] = SYNC_BYTE_OUT;
+ state->data[1] = id;
+ state->data[2] = cmd;
+ state->data[3] = write_len;
- memcpy(buf + 4, data, write_len);
+ memcpy(state->data + 4, data, write_len);
rlen = (read_len > 0) ? 64 : 0;
- ret = dvb_usb_generic_rw(d, buf, 4 + write_len,
- buf, rlen, /* delay_ms */ 0);
+ ret = dvb_usb_generic_rw(d, state->data, 4 + write_len,
+ state->data, rlen, /* delay_ms */ 0);
if (0 != ret)
goto failed;
ret = -EIO;
- if (SYNC_BYTE_IN != buf[0] || id != buf[1])
+ if (SYNC_BYTE_IN != state->data[0] || id != state->data[1])
goto failed;
- memcpy(data, buf + 4, read_len);
+ memcpy(data, state->data + 4, read_len);
+ mutex_unlock(&state->ca_mutex);
return 0;
failed:
err("CI error %d; %02X %02X %02X -> %*ph.",
- ret, SYNC_BYTE_OUT, id, cmd, 3, buf);
+ ret, SYNC_BYTE_OUT, id, cmd, 3, state->data);
+ mutex_unlock(&state->ca_mutex);
return ret;
}
@@ -405,52 +410,53 @@ static int pctv452e_i2c_msg(struct dvb_usb_device *d, u8 addr,
u8 *rcv_buf, u8 rcv_len)
{
struct pctv452e_state *state = (struct pctv452e_state *)d->priv;
- u8 buf[64];
u8 id;
int ret;
+ mutex_lock(&state->ca_mutex);
id = state->c++;
ret = -EINVAL;
if (snd_len > 64 - 7 || rcv_len > 64 - 7)
goto failed;
- buf[0] = SYNC_BYTE_OUT;
- buf[1] = id;
- buf[2] = PCTV_CMD_I2C;
- buf[3] = snd_len + 3;
- buf[4] = addr << 1;
- buf[5] = snd_len;
- buf[6] = rcv_len;
+ state->data[0] = SYNC_BYTE_OUT;
+ state->data[1] = id;
+ state->data[2] = PCTV_CMD_I2C;
+ state->data[3] = snd_len + 3;
+ state->data[4] = addr << 1;
+ state->data[5] = snd_len;
+ state->data[6] = rcv_len;
- memcpy(buf + 7, snd_buf, snd_len);
+ memcpy(state->data + 7, snd_buf, snd_len);
- ret = dvb_usb_generic_rw(d, buf, 7 + snd_len,
- buf, /* rcv_len */ 64,
+ ret = dvb_usb_generic_rw(d, state->data, 7 + snd_len,
+ state->data, /* rcv_len */ 64,
/* delay_ms */ 0);
if (ret < 0)
goto failed;
/* TT USB protocol error. */
ret = -EIO;
- if (SYNC_BYTE_IN != buf[0] || id != buf[1])
+ if (SYNC_BYTE_IN != state->data[0] || id != state->data[1])
goto failed;
/* I2C device didn't respond as expected. */
ret = -EREMOTEIO;
- if (buf[5] < snd_len || buf[6] < rcv_len)
+ if (state->data[5] < snd_len || state->data[6] < rcv_len)
goto failed;
- memcpy(rcv_buf, buf + 7, rcv_len);
+ memcpy(rcv_buf, state->data + 7, rcv_len);
+ mutex_unlock(&state->ca_mutex);
return rcv_len;
failed:
- err("I2C error %d; %02X %02X %02X %02X %02X -> "
- "%02X %02X %02X %02X %02X.",
+ err("I2C error %d; %02X %02X %02X %02X %02X -> %*ph",
ret, SYNC_BYTE_OUT, id, addr << 1, snd_len, rcv_len,
- buf[0], buf[1], buf[4], buf[5], buf[6]);
+ 7, state->data);
+ mutex_unlock(&state->ca_mutex);
return ret;
}
@@ -499,8 +505,7 @@ static u32 pctv452e_i2c_func(struct i2c_adapter *adapter)
static int pctv452e_power_ctrl(struct dvb_usb_device *d, int i)
{
struct pctv452e_state *state = (struct pctv452e_state *)d->priv;
- u8 b0[] = { 0xaa, 0, PCTV_CMD_RESET, 1, 0 };
- u8 rx[PCTV_ANSWER_LEN];
+ u8 *rx;
int ret;
info("%s: %d\n", __func__, i);
@@ -511,6 +516,11 @@ static int pctv452e_power_ctrl(struct dvb_usb_device *d, int i)
if (state->initialized)
return 0;
+ rx = kmalloc(PCTV_ANSWER_LEN, GFP_KERNEL);
+ if (!rx)
+ return -ENOMEM;
+
+ mutex_lock(&state->ca_mutex);
/* hmm where shoud this should go? */
ret = usb_set_interface(d->udev, 0, ISOC_INTERFACE_ALTERNATIVE);
if (ret != 0)
@@ -518,65 +528,75 @@ static int pctv452e_power_ctrl(struct dvb_usb_device *d, int i)
__func__, ret);
/* this is a one-time initialization, dont know where to put */
- b0[1] = state->c++;
+ state->data[0] = 0xaa;
+ state->data[1] = state->c++;
+ state->data[2] = PCTV_CMD_RESET;
+ state->data[3] = 1;
+ state->data[4] = 0;
/* reset board */
- ret = dvb_usb_generic_rw(d, b0, sizeof(b0), rx, PCTV_ANSWER_LEN, 0);
+ ret = dvb_usb_generic_rw(d, state->data, 5, rx, PCTV_ANSWER_LEN, 0);
if (ret)
- return ret;
+ goto ret;
- b0[1] = state->c++;
- b0[4] = 1;
+ state->data[1] = state->c++;
+ state->data[4] = 1;
/* reset board (again?) */
- ret = dvb_usb_generic_rw(d, b0, sizeof(b0), rx, PCTV_ANSWER_LEN, 0);
+ ret = dvb_usb_generic_rw(d, state->data, 5, rx, PCTV_ANSWER_LEN, 0);
if (ret)
- return ret;
+ goto ret;
state->initialized = 1;
- return 0;
+ret:
+ mutex_unlock(&state->ca_mutex);
+ kfree(rx);
+ return ret;
}
static int pctv452e_rc_query(struct dvb_usb_device *d)
{
struct pctv452e_state *state = (struct pctv452e_state *)d->priv;
- u8 b[CMD_BUFFER_SIZE];
- u8 rx[PCTV_ANSWER_LEN];
int ret, i;
- u8 id = state->c++;
+ u8 id;
+
+ mutex_lock(&state->ca_mutex);
+ id = state->c++;
/* prepare command header */
- b[0] = SYNC_BYTE_OUT;
- b[1] = id;
- b[2] = PCTV_CMD_IR;
- b[3] = 0;
+ state->data[0] = SYNC_BYTE_OUT;
+ state->data[1] = id;
+ state->data[2] = PCTV_CMD_IR;
+ state->data[3] = 0;
/* send ir request */
- ret = dvb_usb_generic_rw(d, b, 4, rx, PCTV_ANSWER_LEN, 0);
+ ret = dvb_usb_generic_rw(d, state->data, 4,
+ state->data, PCTV_ANSWER_LEN, 0);
if (ret != 0)
- return ret;
+ goto ret;
if (debug > 3) {
- info("%s: read: %2d: %*ph: ", __func__, ret, 3, rx);
- for (i = 0; (i < rx[3]) && ((i+3) < PCTV_ANSWER_LEN); i++)
- info(" %02x", rx[i+3]);
+ info("%s: read: %2d: %*ph: ", __func__, ret, 3, state->data);
+ for (i = 0; (i < state->data[3]) && ((i + 3) < PCTV_ANSWER_LEN); i++)
+ info(" %02x", state->data[i + 3]);
info("\n");
}
- if ((rx[3] == 9) && (rx[12] & 0x01)) {
+ if ((state->data[3] == 9) && (state->data[12] & 0x01)) {
/* got a "press" event */
- state->last_rc_key = RC_SCANCODE_RC5(rx[7], rx[6]);
+ state->last_rc_key = RC_SCANCODE_RC5(state->data[7], state->data[6]);
if (debug > 2)
info("%s: cmd=0x%02x sys=0x%02x\n",
- __func__, rx[6], rx[7]);
+ __func__, state->data[6], state->data[7]);
rc_keydown(d->rc_dev, RC_TYPE_RC5, state->last_rc_key, 0);
} else if (state->last_rc_key) {
rc_keyup(d->rc_dev);
state->last_rc_key = 0;
}
-
- return 0;
+ret:
+ mutex_unlock(&state->ca_mutex);
+ return ret;
}
static int pctv452e_read_mac_address(struct dvb_usb_device *d, u8 mac[6])
diff --git a/drivers/media/usb/dvb-usb/technisat-usb2.c b/drivers/media/usb/dvb-usb/technisat-usb2.c
index d9f3262bf071..4706628a3ed5 100644
--- a/drivers/media/usb/dvb-usb/technisat-usb2.c
+++ b/drivers/media/usb/dvb-usb/technisat-usb2.c
@@ -89,9 +89,13 @@ struct technisat_usb2_state {
static int technisat_usb2_i2c_access(struct usb_device *udev,
u8 device_addr, u8 *tx, u8 txlen, u8 *rx, u8 rxlen)
{
- u8 b[64];
+ u8 *b;
int ret, actual_length;
+ b = kmalloc(64, GFP_KERNEL);
+ if (!b)
+ return -ENOMEM;
+
deb_i2c("i2c-access: %02x, tx: ", device_addr);
debug_dump(tx, txlen, deb_i2c);
deb_i2c(" ");
@@ -123,7 +127,7 @@ static int technisat_usb2_i2c_access(struct usb_device *udev,
if (ret < 0) {
err("i2c-error: out failed %02x = %d", device_addr, ret);
- return -ENODEV;
+ goto err;
}
ret = usb_bulk_msg(udev,
@@ -131,7 +135,7 @@ static int technisat_usb2_i2c_access(struct usb_device *udev,
b, 64, &actual_length, 1000);
if (ret < 0) {
err("i2c-error: in failed %02x = %d", device_addr, ret);
- return -ENODEV;
+ goto err;
}
if (b[0] != I2C_STATUS_OK) {
@@ -140,7 +144,7 @@ static int technisat_usb2_i2c_access(struct usb_device *udev,
if (!(b[0] == I2C_STATUS_NAK &&
device_addr == 0x60
/* && device_is_technisat_usb2 */))
- return -ENODEV;
+ goto err;
}
deb_i2c("status: %d, ", b[0]);
@@ -154,7 +158,9 @@ static int technisat_usb2_i2c_access(struct usb_device *udev,
deb_i2c("\n");
- return 0;
+err:
+ kfree(b);
+ return ret;
}
static int technisat_usb2_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg *msg,
diff --git a/drivers/media/usb/s2255/s2255drv.c b/drivers/media/usb/s2255/s2255drv.c
index c3a0e87066eb..f7bb78c1873c 100644
--- a/drivers/media/usb/s2255/s2255drv.c
+++ b/drivers/media/usb/s2255/s2255drv.c
@@ -1901,19 +1901,30 @@ static long s2255_vendor_req(struct s2255_dev *dev, unsigned char Request,
s32 TransferBufferLength, int bOut)
{
int r;
+ unsigned char *buf;
+
+ buf = kmalloc(TransferBufferLength, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
if (!bOut) {
r = usb_control_msg(dev->udev, usb_rcvctrlpipe(dev->udev, 0),
Request,
USB_TYPE_VENDOR | USB_RECIP_DEVICE |
USB_DIR_IN,
- Value, Index, TransferBuffer,
+ Value, Index, buf,
TransferBufferLength, HZ * 5);
+
+ if (r >= 0)
+ memcpy(TransferBuffer, buf, TransferBufferLength);
} else {
+ memcpy(buf, TransferBuffer, TransferBufferLength);
r = usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0),
Request, USB_TYPE_VENDOR | USB_RECIP_DEVICE,
- Value, Index, TransferBuffer,
+ Value, Index, buf,
TransferBufferLength, HZ * 5);
}
+ kfree(buf);
return r;
}
diff --git a/drivers/media/usb/stkwebcam/stk-webcam.c b/drivers/media/usb/stkwebcam/stk-webcam.c
index db200c9d796d..22a9aae16291 100644
--- a/drivers/media/usb/stkwebcam/stk-webcam.c
+++ b/drivers/media/usb/stkwebcam/stk-webcam.c
@@ -147,20 +147,26 @@ int stk_camera_write_reg(struct stk_camera *dev, u16 index, u8 value)
int stk_camera_read_reg(struct stk_camera *dev, u16 index, int *value)
{
struct usb_device *udev = dev->udev;
+ unsigned char *buf;
int ret;
+ buf = kmalloc(sizeof(u8), GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
ret = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0),
0x00,
USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
0x00,
index,
- (u8 *) value,
+ buf,
sizeof(u8),
500);
- if (ret < 0)
- return ret;
- else
- return 0;
+ if (ret >= 0)
+ memcpy(value, buf, sizeof(u8));
+
+ kfree(buf);
+ return ret;
}
static int stk_start_stream(struct stk_camera *dev)
diff --git a/drivers/misc/cxl/api.c b/drivers/misc/cxl/api.c
index af23d7dfe752..2e5233b60971 100644
--- a/drivers/misc/cxl/api.c
+++ b/drivers/misc/cxl/api.c
@@ -247,7 +247,9 @@ int cxl_start_context(struct cxl_context *ctx, u64 wed,
cxl_ctx_get();
if ((rc = cxl_ops->attach_process(ctx, kernel, wed, 0))) {
+ put_pid(ctx->glpid);
put_pid(ctx->pid);
+ ctx->glpid = ctx->pid = NULL;
cxl_adapter_context_put(ctx->afu->adapter);
cxl_ctx_put();
goto out;
diff --git a/drivers/misc/cxl/file.c b/drivers/misc/cxl/file.c
index d0b421f49b39..77080cc5fa0a 100644
--- a/drivers/misc/cxl/file.c
+++ b/drivers/misc/cxl/file.c
@@ -194,6 +194,16 @@ static long afu_ioctl_start_work(struct cxl_context *ctx,
ctx->mmio_err_ff = !!(work.flags & CXL_START_WORK_ERR_FF);
/*
+ * Increment the mapped context count for adapter. This also checks
+ * if adapter_context_lock is taken.
+ */
+ rc = cxl_adapter_context_get(ctx->afu->adapter);
+ if (rc) {
+ afu_release_irqs(ctx, ctx);
+ goto out;
+ }
+
+ /*
* We grab the PID here and not in the file open to allow for the case
* where a process (master, some daemon, etc) has opened the chardev on
* behalf of another process, so the AFU's mm gets bound to the process
@@ -205,15 +215,6 @@ static long afu_ioctl_start_work(struct cxl_context *ctx,
ctx->pid = get_task_pid(current, PIDTYPE_PID);
ctx->glpid = get_task_pid(current->group_leader, PIDTYPE_PID);
- /*
- * Increment the mapped context count for adapter. This also checks
- * if adapter_context_lock is taken.
- */
- rc = cxl_adapter_context_get(ctx->afu->adapter);
- if (rc) {
- afu_release_irqs(ctx, ctx);
- goto out;
- }
trace_cxl_attach(ctx, work.work_element_descriptor, work.num_interrupts, amr);
@@ -221,6 +222,9 @@ static long afu_ioctl_start_work(struct cxl_context *ctx,
amr))) {
afu_release_irqs(ctx, ctx);
cxl_adapter_context_put(ctx->afu->adapter);
+ put_pid(ctx->glpid);
+ put_pid(ctx->pid);
+ ctx->glpid = ctx->pid = NULL;
goto out;
}
diff --git a/drivers/misc/genwqe/card_utils.c b/drivers/misc/genwqe/card_utils.c
index 8a679ecc8fd1..fc2794b513fa 100644
--- a/drivers/misc/genwqe/card_utils.c
+++ b/drivers/misc/genwqe/card_utils.c
@@ -352,17 +352,27 @@ int genwqe_alloc_sync_sgl(struct genwqe_dev *cd, struct genwqe_sgl *sgl,
if (copy_from_user(sgl->lpage, user_addr + user_size -
sgl->lpage_size, sgl->lpage_size)) {
rc = -EFAULT;
- goto err_out1;
+ goto err_out2;
}
}
return 0;
+ err_out2:
+ __genwqe_free_consistent(cd, PAGE_SIZE, sgl->lpage,
+ sgl->lpage_dma_addr);
+ sgl->lpage = NULL;
+ sgl->lpage_dma_addr = 0;
err_out1:
__genwqe_free_consistent(cd, PAGE_SIZE, sgl->fpage,
sgl->fpage_dma_addr);
+ sgl->fpage = NULL;
+ sgl->fpage_dma_addr = 0;
err_out:
__genwqe_free_consistent(cd, sgl->sgl_size, sgl->sgl,
sgl->sgl_dma_addr);
+ sgl->sgl = NULL;
+ sgl->sgl_dma_addr = 0;
+ sgl->sgl_size = 0;
return -ENOMEM;
}
diff --git a/drivers/misc/mei/hw-txe.c b/drivers/misc/mei/hw-txe.c
index e6e5e55a12ed..60415a2bfcbd 100644
--- a/drivers/misc/mei/hw-txe.c
+++ b/drivers/misc/mei/hw-txe.c
@@ -981,11 +981,13 @@ static bool mei_txe_check_and_ack_intrs(struct mei_device *dev, bool do_ack)
hisr = mei_txe_br_reg_read(hw, HISR_REG);
aliveness = mei_txe_aliveness_get(dev);
- if (hhisr & IPC_HHIER_SEC && aliveness)
+ if (hhisr & IPC_HHIER_SEC && aliveness) {
ipc_isr = mei_txe_sec_reg_read_silent(hw,
SEC_IPC_HOST_INT_STATUS_REG);
- else
+ } else {
ipc_isr = 0;
+ hhisr &= ~IPC_HHIER_SEC;
+ }
generated = generated ||
(hisr & HISR_INT_STS_MSK) ||
diff --git a/drivers/misc/sgi-gru/grumain.c b/drivers/misc/sgi-gru/grumain.c
index 1525870f460a..33741ad4a74a 100644
--- a/drivers/misc/sgi-gru/grumain.c
+++ b/drivers/misc/sgi-gru/grumain.c
@@ -283,7 +283,7 @@ static void gru_unload_mm_tracker(struct gru_state *gru,
spin_lock(&gru->gs_asid_lock);
BUG_ON((asids->mt_ctxbitmap & ctxbitmap) != ctxbitmap);
asids->mt_ctxbitmap ^= ctxbitmap;
- gru_dbg(grudev, "gid %d, gts %p, gms %p, ctxnum 0x%d, asidmap 0x%lx\n",
+ gru_dbg(grudev, "gid %d, gts %p, gms %p, ctxnum %d, asidmap 0x%lx\n",
gru->gs_gid, gts, gms, gts->ts_ctxnum, gms->ms_asidmap[0]);
spin_unlock(&gru->gs_asid_lock);
spin_unlock(&gms->ms_asid_lock);
diff --git a/drivers/misc/vmw_vmci/vmci_doorbell.c b/drivers/misc/vmw_vmci/vmci_doorbell.c
index a8cee33ae8d2..b3fa738ae005 100644
--- a/drivers/misc/vmw_vmci/vmci_doorbell.c
+++ b/drivers/misc/vmw_vmci/vmci_doorbell.c
@@ -431,6 +431,12 @@ int vmci_doorbell_create(struct vmci_handle *handle,
if (vmci_handle_is_invalid(*handle)) {
u32 context_id = vmci_get_context_id();
+ if (context_id == VMCI_INVALID_ID) {
+ pr_warn("Failed to get context ID\n");
+ result = VMCI_ERROR_NO_RESOURCES;
+ goto free_mem;
+ }
+
/* Let resource code allocate a free ID for us */
new_handle = vmci_make_handle(context_id, VMCI_INVALID_ID);
} else {
@@ -525,7 +531,7 @@ int vmci_doorbell_destroy(struct vmci_handle handle)
entry = container_of(resource, struct dbell_entry, resource);
- if (vmci_guest_code_active()) {
+ if (!hlist_unhashed(&entry->node)) {
int result;
dbell_index_table_remove(entry);
diff --git a/drivers/misc/vmw_vmci/vmci_driver.c b/drivers/misc/vmw_vmci/vmci_driver.c
index 896be150e28f..d7eaf1eb11e7 100644
--- a/drivers/misc/vmw_vmci/vmci_driver.c
+++ b/drivers/misc/vmw_vmci/vmci_driver.c
@@ -113,5 +113,5 @@ module_exit(vmci_drv_exit);
MODULE_AUTHOR("VMware, Inc.");
MODULE_DESCRIPTION("VMware Virtual Machine Communication Interface.");
-MODULE_VERSION("1.1.4.0-k");
+MODULE_VERSION("1.1.5.0-k");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/mmc/host/dw_mmc-pltfm.c b/drivers/mmc/host/dw_mmc-pltfm.c
index c0bb0c793e84..dbbc4303bdd0 100644
--- a/drivers/mmc/host/dw_mmc-pltfm.c
+++ b/drivers/mmc/host/dw_mmc-pltfm.c
@@ -46,12 +46,13 @@ int dw_mci_pltfm_register(struct platform_device *pdev,
host->pdata = pdev->dev.platform_data;
regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- /* Get registers' physical base address */
- host->phy_regs = regs->start;
host->regs = devm_ioremap_resource(&pdev->dev, regs);
if (IS_ERR(host->regs))
return PTR_ERR(host->regs);
+ /* Get registers' physical base address */
+ host->phy_regs = regs->start;
+
platform_set_drvdata(pdev, host);
return dw_mci_probe(host);
}
diff --git a/drivers/mmc/host/sdhci-msm.c b/drivers/mmc/host/sdhci-msm.c
index 8ef44a2a2fd9..90ed2e12d345 100644
--- a/drivers/mmc/host/sdhci-msm.c
+++ b/drivers/mmc/host/sdhci-msm.c
@@ -647,6 +647,7 @@ static int sdhci_msm_probe(struct platform_device *pdev)
if (msm_host->pwr_irq < 0) {
dev_err(&pdev->dev, "Get pwr_irq failed (%d)\n",
msm_host->pwr_irq);
+ ret = msm_host->pwr_irq;
goto clk_disable;
}
diff --git a/drivers/mtd/nand/gpmi-nand/gpmi-lib.c b/drivers/mtd/nand/gpmi-nand/gpmi-lib.c
index 0f68a99fc4ad..141bd70a49c2 100644
--- a/drivers/mtd/nand/gpmi-nand/gpmi-lib.c
+++ b/drivers/mtd/nand/gpmi-nand/gpmi-lib.c
@@ -161,7 +161,7 @@ int gpmi_init(struct gpmi_nand_data *this)
ret = gpmi_enable_clk(this);
if (ret)
- goto err_out;
+ return ret;
ret = gpmi_reset_block(r->gpmi_regs, false);
if (ret)
goto err_out;
@@ -197,6 +197,7 @@ int gpmi_init(struct gpmi_nand_data *this)
gpmi_disable_clk(this);
return 0;
err_out:
+ gpmi_disable_clk(this);
return ret;
}
@@ -270,7 +271,7 @@ int bch_set_geometry(struct gpmi_nand_data *this)
ret = gpmi_enable_clk(this);
if (ret)
- goto err_out;
+ return ret;
/*
* Due to erratum #2847 of the MX23, the BCH cannot be soft reset on this
@@ -308,6 +309,7 @@ int bch_set_geometry(struct gpmi_nand_data *this)
gpmi_disable_clk(this);
return 0;
err_out:
+ gpmi_disable_clk(this);
return ret;
}
diff --git a/drivers/mtd/nand/mtk_ecc.c b/drivers/mtd/nand/mtk_ecc.c
index d54f666417e1..dbf256217b3e 100644
--- a/drivers/mtd/nand/mtk_ecc.c
+++ b/drivers/mtd/nand/mtk_ecc.c
@@ -86,6 +86,8 @@ struct mtk_ecc {
struct completion done;
struct mutex lock;
u32 sectors;
+
+ u8 eccdata[112];
};
static inline void mtk_ecc_wait_idle(struct mtk_ecc *ecc,
@@ -366,9 +368,8 @@ int mtk_ecc_encode(struct mtk_ecc *ecc, struct mtk_ecc_config *config,
u8 *data, u32 bytes)
{
dma_addr_t addr;
- u8 *p;
- u32 len, i, val;
- int ret = 0;
+ u32 len;
+ int ret;
addr = dma_map_single(ecc->dev, data, bytes, DMA_TO_DEVICE);
ret = dma_mapping_error(ecc->dev, addr);
@@ -393,14 +394,12 @@ int mtk_ecc_encode(struct mtk_ecc *ecc, struct mtk_ecc_config *config,
/* Program ECC bytes to OOB: per sector oob = FDM + ECC + SPARE */
len = (config->strength * ECC_PARITY_BITS + 7) >> 3;
- p = data + bytes;
- /* write the parity bytes generated by the ECC back to the OOB region */
- for (i = 0; i < len; i++) {
- if ((i % 4) == 0)
- val = readl(ecc->regs + ECC_ENCPAR(i / 4));
- p[i] = (val >> ((i % 4) * 8)) & 0xff;
- }
+ /* write the parity bytes generated by the ECC back to temp buffer */
+ __ioread32_copy(ecc->eccdata, ecc->regs + ECC_ENCPAR(0), round_up(len, 4));
+
+ /* copy into possibly unaligned OOB region with actual length */
+ memcpy(data + bytes, ecc->eccdata, len);
timeout:
dma_unmap_single(ecc->dev, addr, bytes, DMA_TO_DEVICE);
diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c
index e5718e5ecf92..3bde96a3f7bf 100644
--- a/drivers/mtd/nand/nand_base.c
+++ b/drivers/mtd/nand/nand_base.c
@@ -1095,10 +1095,11 @@ static void nand_release_data_interface(struct nand_chip *chip)
/**
* nand_reset - Reset and initialize a NAND device
* @chip: The NAND chip
+ * @chipnr: Internal die id
*
* Returns 0 for success or negative error code otherwise
*/
-int nand_reset(struct nand_chip *chip)
+int nand_reset(struct nand_chip *chip, int chipnr)
{
struct mtd_info *mtd = nand_to_mtd(chip);
int ret;
@@ -1107,9 +1108,17 @@ int nand_reset(struct nand_chip *chip)
if (ret)
return ret;
+ /*
+ * The CS line has to be released before we can apply the new NAND
+ * interface settings, hence this weird ->select_chip() dance.
+ */
+ chip->select_chip(mtd, chipnr);
chip->cmdfunc(mtd, NAND_CMD_RESET, -1, -1);
+ chip->select_chip(mtd, -1);
+ chip->select_chip(mtd, chipnr);
ret = nand_setup_data_interface(chip);
+ chip->select_chip(mtd, -1);
if (ret)
return ret;
@@ -1185,8 +1194,6 @@ int nand_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
/* Shift to get chip number */
chipnr = ofs >> chip->chip_shift;
- chip->select_chip(mtd, chipnr);
-
/*
* Reset the chip.
* If we want to check the WP through READ STATUS and check the bit 7
@@ -1194,7 +1201,9 @@ int nand_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
* some operation can also clear the bit 7 of status register
* eg. erase/program a locked block
*/
- nand_reset(chip);
+ nand_reset(chip, chipnr);
+
+ chip->select_chip(mtd, chipnr);
/* Check, if it is write protected */
if (nand_check_wp(mtd)) {
@@ -1244,8 +1253,6 @@ int nand_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
/* Shift to get chip number */
chipnr = ofs >> chip->chip_shift;
- chip->select_chip(mtd, chipnr);
-
/*
* Reset the chip.
* If we want to check the WP through READ STATUS and check the bit 7
@@ -1253,7 +1260,9 @@ int nand_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
* some operation can also clear the bit 7 of status register
* eg. erase/program a locked block
*/
- nand_reset(chip);
+ nand_reset(chip, chipnr);
+
+ chip->select_chip(mtd, chipnr);
/* Check, if it is write protected */
if (nand_check_wp(mtd)) {
@@ -2940,10 +2949,6 @@ static int nand_do_write_oob(struct mtd_info *mtd, loff_t to,
}
chipnr = (int)(to >> chip->chip_shift);
- chip->select_chip(mtd, chipnr);
-
- /* Shift to get page */
- page = (int)(to >> chip->page_shift);
/*
* Reset the chip. Some chips (like the Toshiba TC5832DC found in one
@@ -2951,7 +2956,12 @@ static int nand_do_write_oob(struct mtd_info *mtd, loff_t to,
* if we don't do this. I have no clue why, but I seem to have 'fixed'
* it in the doc2000 driver in August 1999. dwmw2.
*/
- nand_reset(chip);
+ nand_reset(chip, chipnr);
+
+ chip->select_chip(mtd, chipnr);
+
+ /* Shift to get page */
+ page = (int)(to >> chip->page_shift);
/* Check, if it is write protected */
if (nand_check_wp(mtd)) {
@@ -3984,14 +3994,14 @@ static struct nand_flash_dev *nand_get_flash_type(struct mtd_info *mtd,
int i, maf_idx;
u8 id_data[8];
- /* Select the device */
- chip->select_chip(mtd, 0);
-
/*
* Reset the chip, required by some chips (e.g. Micron MT29FxGxxxxx)
* after power-up.
*/
- nand_reset(chip);
+ nand_reset(chip, 0);
+
+ /* Select the device */
+ chip->select_chip(mtd, 0);
/* Send the command for reading device ID */
chip->cmdfunc(mtd, NAND_CMD_READID, 0x00, -1);
@@ -4329,17 +4339,31 @@ int nand_scan_ident(struct mtd_info *mtd, int maxchips,
return PTR_ERR(type);
}
+ /* Initialize the ->data_interface field. */
ret = nand_init_data_interface(chip);
if (ret)
return ret;
+ /*
+ * Setup the data interface correctly on the chip and controller side.
+ * This explicit call to nand_setup_data_interface() is only required
+ * for the first die, because nand_reset() has been called before
+ * ->data_interface and ->default_onfi_timing_mode were set.
+ * For the other dies, nand_reset() will automatically switch to the
+ * best mode for us.
+ */
+ ret = nand_setup_data_interface(chip);
+ if (ret)
+ return ret;
+
chip->select_chip(mtd, -1);
/* Check for a chip array */
for (i = 1; i < maxchips; i++) {
- chip->select_chip(mtd, i);
/* See comment in nand_get_flash_type for reset */
- nand_reset(chip);
+ nand_reset(chip, i);
+
+ chip->select_chip(mtd, i);
/* Send the command for reading device ID */
chip->cmdfunc(mtd, NAND_CMD_READID, 0x00, -1);
/* Read manufacturer and device IDs */
diff --git a/drivers/mtd/ubi/fastmap.c b/drivers/mtd/ubi/fastmap.c
index 2ff62157d3bb..c1f5c29e458e 100644
--- a/drivers/mtd/ubi/fastmap.c
+++ b/drivers/mtd/ubi/fastmap.c
@@ -707,11 +707,11 @@ static int ubi_attach_fastmap(struct ubi_device *ubi,
fmvhdr->vol_type,
be32_to_cpu(fmvhdr->last_eb_bytes));
- if (!av)
- goto fail_bad;
- if (PTR_ERR(av) == -EINVAL) {
- ubi_err(ubi, "volume (ID %i) already exists",
- fmvhdr->vol_id);
+ if (IS_ERR(av)) {
+ if (PTR_ERR(av) == -EEXIST)
+ ubi_err(ubi, "volume (ID %i) already exists",
+ fmvhdr->vol_id);
+
goto fail_bad;
}
diff --git a/drivers/net/dsa/b53/b53_mmap.c b/drivers/net/dsa/b53/b53_mmap.c
index 76fb8552c9d9..ef63d24fef81 100644
--- a/drivers/net/dsa/b53/b53_mmap.c
+++ b/drivers/net/dsa/b53/b53_mmap.c
@@ -256,6 +256,7 @@ static const struct of_device_id b53_mmap_of_table[] = {
{ .compatible = "brcm,bcm63xx-switch" },
{ /* sentinel */ },
};
+MODULE_DEVICE_TABLE(of, b53_mmap_of_table);
static struct platform_driver b53_mmap_driver = {
.probe = b53_mmap_probe,
diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c
index e218887f18b7..e3ee27ce13dd 100644
--- a/drivers/net/dsa/bcm_sf2.c
+++ b/drivers/net/dsa/bcm_sf2.c
@@ -1133,6 +1133,20 @@ static int bcm_sf2_sw_remove(struct platform_device *pdev)
return 0;
}
+static void bcm_sf2_sw_shutdown(struct platform_device *pdev)
+{
+ struct bcm_sf2_priv *priv = platform_get_drvdata(pdev);
+
+ /* For a kernel about to be kexec'd we want to keep the GPHY on for a
+ * successful MDIO bus scan to occur. If we did turn off the GPHY
+ * before (e.g: port_disable), this will also power it back on.
+ *
+ * Do not rely on kexec_in_progress, just power the PHY on.
+ */
+ if (priv->hw_params.num_gphy == 1)
+ bcm_sf2_gphy_enable_set(priv->dev->ds, true);
+}
+
#ifdef CONFIG_PM_SLEEP
static int bcm_sf2_suspend(struct device *dev)
{
@@ -1158,10 +1172,12 @@ static const struct of_device_id bcm_sf2_of_match[] = {
{ .compatible = "brcm,bcm7445-switch-v4.0" },
{ /* sentinel */ },
};
+MODULE_DEVICE_TABLE(of, bcm_sf2_of_match);
static struct platform_driver bcm_sf2_driver = {
.probe = bcm_sf2_sw_probe,
.remove = bcm_sf2_sw_remove,
+ .shutdown = bcm_sf2_sw_shutdown,
.driver = {
.name = "brcm-sf2",
.of_match_table = bcm_sf2_of_match,
diff --git a/drivers/net/ethernet/aurora/nb8800.c b/drivers/net/ethernet/aurora/nb8800.c
index b047fd607b83..00c38bf151e6 100644
--- a/drivers/net/ethernet/aurora/nb8800.c
+++ b/drivers/net/ethernet/aurora/nb8800.c
@@ -1358,6 +1358,7 @@ static const struct of_device_id nb8800_dt_ids[] = {
},
{ }
};
+MODULE_DEVICE_TABLE(of, nb8800_dt_ids);
static int nb8800_probe(struct platform_device *pdev)
{
diff --git a/drivers/net/ethernet/broadcom/bcm63xx_enet.c b/drivers/net/ethernet/broadcom/bcm63xx_enet.c
index ae364c74baf3..537090952c45 100644
--- a/drivers/net/ethernet/broadcom/bcm63xx_enet.c
+++ b/drivers/net/ethernet/broadcom/bcm63xx_enet.c
@@ -1126,7 +1126,8 @@ out_freeirq:
free_irq(dev->irq, dev);
out_phy_disconnect:
- phy_disconnect(phydev);
+ if (priv->has_phy)
+ phy_disconnect(phydev);
return ret;
}
diff --git a/drivers/net/ethernet/broadcom/bgmac.c b/drivers/net/ethernet/broadcom/bgmac.c
index 856379cbb402..31ca204b38d2 100644
--- a/drivers/net/ethernet/broadcom/bgmac.c
+++ b/drivers/net/ethernet/broadcom/bgmac.c
@@ -1449,7 +1449,7 @@ static int bgmac_phy_connect(struct bgmac *bgmac)
phy_dev = phy_connect(bgmac->net_dev, bus_id, &bgmac_adjust_link,
PHY_INTERFACE_MODE_MII);
if (IS_ERR(phy_dev)) {
- dev_err(bgmac->dev, "PHY connecton failed\n");
+ dev_err(bgmac->dev, "PHY connection failed\n");
return PTR_ERR(phy_dev);
}
diff --git a/drivers/net/ethernet/broadcom/bnx2.c b/drivers/net/ethernet/broadcom/bnx2.c
index 27f11a5d5fe2..b3791b394715 100644
--- a/drivers/net/ethernet/broadcom/bnx2.c
+++ b/drivers/net/ethernet/broadcom/bnx2.c
@@ -271,22 +271,25 @@ static inline u32 bnx2_tx_avail(struct bnx2 *bp, struct bnx2_tx_ring_info *txr)
static u32
bnx2_reg_rd_ind(struct bnx2 *bp, u32 offset)
{
+ unsigned long flags;
u32 val;
- spin_lock_bh(&bp->indirect_lock);
+ spin_lock_irqsave(&bp->indirect_lock, flags);
BNX2_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
val = BNX2_RD(bp, BNX2_PCICFG_REG_WINDOW);
- spin_unlock_bh(&bp->indirect_lock);
+ spin_unlock_irqrestore(&bp->indirect_lock, flags);
return val;
}
static void
bnx2_reg_wr_ind(struct bnx2 *bp, u32 offset, u32 val)
{
- spin_lock_bh(&bp->indirect_lock);
+ unsigned long flags;
+
+ spin_lock_irqsave(&bp->indirect_lock, flags);
BNX2_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
BNX2_WR(bp, BNX2_PCICFG_REG_WINDOW, val);
- spin_unlock_bh(&bp->indirect_lock);
+ spin_unlock_irqrestore(&bp->indirect_lock, flags);
}
static void
@@ -304,8 +307,10 @@ bnx2_shmem_rd(struct bnx2 *bp, u32 offset)
static void
bnx2_ctx_wr(struct bnx2 *bp, u32 cid_addr, u32 offset, u32 val)
{
+ unsigned long flags;
+
offset += cid_addr;
- spin_lock_bh(&bp->indirect_lock);
+ spin_lock_irqsave(&bp->indirect_lock, flags);
if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
int i;
@@ -322,7 +327,7 @@ bnx2_ctx_wr(struct bnx2 *bp, u32 cid_addr, u32 offset, u32 val)
BNX2_WR(bp, BNX2_CTX_DATA_ADR, offset);
BNX2_WR(bp, BNX2_CTX_DATA, val);
}
- spin_unlock_bh(&bp->indirect_lock);
+ spin_unlock_irqrestore(&bp->indirect_lock, flags);
}
#ifdef BCM_CNIC
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index 20fe6a8c35c1..0cee4c0283f9 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -15241,7 +15241,7 @@ static void bnx2x_init_cyclecounter(struct bnx2x *bp)
memset(&bp->cyclecounter, 0, sizeof(bp->cyclecounter));
bp->cyclecounter.read = bnx2x_cyclecounter_read;
bp->cyclecounter.mask = CYCLECOUNTER_MASK(64);
- bp->cyclecounter.shift = 1;
+ bp->cyclecounter.shift = 0;
bp->cyclecounter.mult = 1;
}
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index f320497368f4..57eb4e1345cb 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -4057,7 +4057,7 @@ static void cfg_queues(struct adapter *adap)
* capped by the number of available cores.
*/
if (n10g) {
- i = num_online_cpus();
+ i = min_t(int, MAX_OFLD_QSETS, num_online_cpus());
s->ofldqsets = roundup(i, adap->params.nports);
} else {
s->ofldqsets = adap->params.nports;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c
index 0945fa49a5dd..2471ff465d5c 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c
@@ -135,15 +135,17 @@ static int uldrx_handler(struct sge_rspq *q, const __be64 *rsp,
}
static int alloc_uld_rxqs(struct adapter *adap,
- struct sge_uld_rxq_info *rxq_info,
- unsigned int nq, unsigned int offset, bool lro)
+ struct sge_uld_rxq_info *rxq_info, bool lro)
{
struct sge *s = &adap->sge;
- struct sge_ofld_rxq *q = rxq_info->uldrxq + offset;
- unsigned short *ids = rxq_info->rspq_id + offset;
- unsigned int per_chan = nq / adap->params.nports;
+ unsigned int nq = rxq_info->nrxq + rxq_info->nciq;
+ struct sge_ofld_rxq *q = rxq_info->uldrxq;
+ unsigned short *ids = rxq_info->rspq_id;
unsigned int bmap_idx = 0;
- int i, err, msi_idx;
+ unsigned int per_chan;
+ int i, err, msi_idx, que_idx = 0;
+
+ per_chan = rxq_info->nrxq / adap->params.nports;
if (adap->flags & USING_MSIX)
msi_idx = 1;
@@ -151,12 +153,18 @@ static int alloc_uld_rxqs(struct adapter *adap,
msi_idx = -((int)s->intrq.abs_id + 1);
for (i = 0; i < nq; i++, q++) {
+ if (i == rxq_info->nrxq) {
+ /* start allocation of concentrator queues */
+ per_chan = rxq_info->nciq / adap->params.nports;
+ que_idx = 0;
+ }
+
if (msi_idx >= 0) {
bmap_idx = get_msix_idx_from_bmap(adap);
msi_idx = adap->msix_info_ulds[bmap_idx].idx;
}
err = t4_sge_alloc_rxq(adap, &q->rspq, false,
- adap->port[i / per_chan],
+ adap->port[que_idx++ / per_chan],
msi_idx,
q->fl.size ? &q->fl : NULL,
uldrx_handler,
@@ -165,29 +173,19 @@ static int alloc_uld_rxqs(struct adapter *adap,
if (err)
goto freeout;
if (msi_idx >= 0)
- rxq_info->msix_tbl[i + offset] = bmap_idx;
+ rxq_info->msix_tbl[i] = bmap_idx;
memset(&q->stats, 0, sizeof(q->stats));
if (ids)
ids[i] = q->rspq.abs_id;
}
return 0;
freeout:
- q = rxq_info->uldrxq + offset;
+ q = rxq_info->uldrxq;
for ( ; i; i--, q++) {
if (q->rspq.desc)
free_rspq_fl(adap, &q->rspq,
q->fl.size ? &q->fl : NULL);
}
-
- /* We need to free rxq also in case of ciq allocation failure */
- if (offset) {
- q = rxq_info->uldrxq + offset;
- for ( ; i; i--, q++) {
- if (q->rspq.desc)
- free_rspq_fl(adap, &q->rspq,
- q->fl.size ? &q->fl : NULL);
- }
- }
return err;
}
@@ -205,9 +203,7 @@ setup_sge_queues_uld(struct adapter *adap, unsigned int uld_type, bool lro)
return -ENOMEM;
}
- ret = !(!alloc_uld_rxqs(adap, rxq_info, rxq_info->nrxq, 0, lro) &&
- !alloc_uld_rxqs(adap, rxq_info, rxq_info->nciq,
- rxq_info->nrxq, lro));
+ ret = !(!alloc_uld_rxqs(adap, rxq_info, lro));
/* Tell uP to route control queue completions to rdma rspq */
if (adap->flags & FULL_INIT_DONE &&
diff --git a/drivers/net/ethernet/chelsio/cxgb4/sched.c b/drivers/net/ethernet/chelsio/cxgb4/sched.c
index 539de764bbd3..cbd68a8fe2e4 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/sched.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/sched.c
@@ -210,8 +210,10 @@ static int t4_sched_queue_bind(struct port_info *pi, struct ch_sched_queue *p)
/* Unbind queue from any existing class */
err = t4_sched_queue_unbind(pi, p);
- if (err)
+ if (err) {
+ t4_free_mem(qe);
goto out;
+ }
/* Bind queue to specified class */
memset(qe, 0, sizeof(*qe));
diff --git a/drivers/net/ethernet/cisco/enic/vnic_rq.c b/drivers/net/ethernet/cisco/enic/vnic_rq.c
index e572a527b18d..36bc2c71fba9 100644
--- a/drivers/net/ethernet/cisco/enic/vnic_rq.c
+++ b/drivers/net/ethernet/cisco/enic/vnic_rq.c
@@ -169,19 +169,28 @@ int vnic_rq_disable(struct vnic_rq *rq)
{
unsigned int wait;
struct vnic_dev *vdev = rq->vdev;
+ int i;
- iowrite32(0, &rq->ctrl->enable);
+ /* Due to a race condition with clearing RQ "mini-cache" in hw, we need
+ * to disable the RQ twice to guarantee that stale descriptors are not
+ * used when this RQ is re-enabled.
+ */
+ for (i = 0; i < 2; i++) {
+ iowrite32(0, &rq->ctrl->enable);
- /* Wait for HW to ACK disable request */
- for (wait = 0; wait < 1000; wait++) {
- if (!(ioread32(&rq->ctrl->running)))
- return 0;
- udelay(10);
- }
+ /* Wait for HW to ACK disable request */
+ for (wait = 20000; wait > 0; wait--)
+ if (!ioread32(&rq->ctrl->running))
+ break;
+ if (!wait) {
+ vdev_neterr(vdev, "Failed to disable RQ[%d]\n",
+ rq->index);
- vdev_neterr(vdev, "Failed to disable RQ[%d]\n", rq->index);
+ return -ETIMEDOUT;
+ }
+ }
- return -ETIMEDOUT;
+ return 0;
}
void vnic_rq_clean(struct vnic_rq *rq,
@@ -212,6 +221,11 @@ void vnic_rq_clean(struct vnic_rq *rq,
[fetch_index % VNIC_RQ_BUF_BLK_ENTRIES(count)];
iowrite32(fetch_index, &rq->ctrl->posted_index);
+ /* Anytime we write fetch_index, we need to re-write 0 to rq->enable
+ * to re-sync internal VIC state.
+ */
+ iowrite32(0, &rq->ctrl->enable);
+
vnic_dev_clear_desc_ring(&rq->ring);
}
diff --git a/drivers/net/ethernet/ezchip/nps_enet.c b/drivers/net/ethernet/ezchip/nps_enet.c
index f928e6f79c89..223f35cc034c 100644
--- a/drivers/net/ethernet/ezchip/nps_enet.c
+++ b/drivers/net/ethernet/ezchip/nps_enet.c
@@ -669,6 +669,7 @@ static const struct of_device_id nps_enet_dt_ids[] = {
{ .compatible = "ezchip,nps-mgt-enet" },
{ /* Sentinel */ }
};
+MODULE_DEVICE_TABLE(of, nps_enet_dt_ids);
static struct platform_driver nps_enet_driver = {
.probe = nps_enet_probe,
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index 48a033e64423..5aa9d4ded214 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -1430,14 +1430,14 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id)
skb_put(skb, pkt_len - 4);
data = skb->data;
+ if (!is_copybreak && need_swap)
+ swap_buffer(data, pkt_len);
+
#if !defined(CONFIG_M5272)
if (fep->quirks & FEC_QUIRK_HAS_RACC)
data = skb_pull_inline(skb, 2);
#endif
- if (!is_copybreak && need_swap)
- swap_buffer(data, pkt_len);
-
/* Extract the enhanced buffer descriptor */
ebdp = NULL;
if (fep->bufdesc_ex)
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c
index 8d70377f6624..8ea3d95fa483 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c
@@ -2751,6 +2751,7 @@ static const struct of_device_id g_dsaf_match[] = {
{.compatible = "hisilicon,hns-dsaf-v2"},
{}
};
+MODULE_DEVICE_TABLE(of, g_dsaf_match);
static struct platform_driver g_dsaf_driver = {
.probe = hns_dsaf_probe,
diff --git a/drivers/net/ethernet/hisilicon/hns_mdio.c b/drivers/net/ethernet/hisilicon/hns_mdio.c
index 33f4c483af0f..501eb2090ca6 100644
--- a/drivers/net/ethernet/hisilicon/hns_mdio.c
+++ b/drivers/net/ethernet/hisilicon/hns_mdio.c
@@ -563,6 +563,7 @@ static const struct of_device_id hns_mdio_match[] = {
{.compatible = "hisilicon,hns-mdio"},
{}
};
+MODULE_DEVICE_TABLE(of, hns_mdio_match);
static const struct acpi_device_id hns_mdio_acpi_match[] = {
{ "HISI0141", 0 },
diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
index bfe17d9c022d..5f44c5520fbc 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.c
+++ b/drivers/net/ethernet/ibm/ibmvnic.c
@@ -1190,7 +1190,7 @@ static struct ibmvnic_sub_crq_queue *init_sub_crq_queue(struct ibmvnic_adapter
if (!scrq)
return NULL;
- scrq->msgs = (union sub_crq *)__get_free_pages(GFP_KERNEL, 2);
+ scrq->msgs = (union sub_crq *)__get_free_pages(GFP_ATOMIC, 2);
memset(scrq->msgs, 0, 4 * PAGE_SIZE);
if (!scrq->msgs) {
dev_warn(dev, "Couldn't allocate crq queue messages page\n");
@@ -1461,14 +1461,16 @@ static int init_sub_crq_irqs(struct ibmvnic_adapter *adapter)
return rc;
req_rx_irq_failed:
- for (j = 0; j < i; j++)
+ for (j = 0; j < i; j++) {
free_irq(adapter->rx_scrq[j]->irq, adapter->rx_scrq[j]);
irq_dispose_mapping(adapter->rx_scrq[j]->irq);
+ }
i = adapter->req_tx_queues;
req_tx_irq_failed:
- for (j = 0; j < i; j++)
+ for (j = 0; j < i; j++) {
free_irq(adapter->tx_scrq[j]->irq, adapter->tx_scrq[j]);
irq_dispose_mapping(adapter->rx_scrq[j]->irq);
+ }
release_sub_crqs_no_irqs(adapter);
return rc;
}
@@ -3232,6 +3234,27 @@ static void ibmvnic_free_inflight(struct ibmvnic_adapter *adapter)
spin_unlock_irqrestore(&adapter->inflight_lock, flags);
}
+static void ibmvnic_xport_event(struct work_struct *work)
+{
+ struct ibmvnic_adapter *adapter = container_of(work,
+ struct ibmvnic_adapter,
+ ibmvnic_xport);
+ struct device *dev = &adapter->vdev->dev;
+ long rc;
+
+ ibmvnic_free_inflight(adapter);
+ release_sub_crqs(adapter);
+ if (adapter->migrated) {
+ rc = ibmvnic_reenable_crq_queue(adapter);
+ if (rc)
+ dev_err(dev, "Error after enable rc=%ld\n", rc);
+ adapter->migrated = false;
+ rc = ibmvnic_send_crq_init(adapter);
+ if (rc)
+ dev_err(dev, "Error sending init rc=%ld\n", rc);
+ }
+}
+
static void ibmvnic_handle_crq(union ibmvnic_crq *crq,
struct ibmvnic_adapter *adapter)
{
@@ -3267,15 +3290,7 @@ static void ibmvnic_handle_crq(union ibmvnic_crq *crq,
if (gen_crq->cmd == IBMVNIC_PARTITION_MIGRATED) {
dev_info(dev, "Re-enabling adapter\n");
adapter->migrated = true;
- ibmvnic_free_inflight(adapter);
- release_sub_crqs(adapter);
- rc = ibmvnic_reenable_crq_queue(adapter);
- if (rc)
- dev_err(dev, "Error after enable rc=%ld\n", rc);
- adapter->migrated = false;
- rc = ibmvnic_send_crq_init(adapter);
- if (rc)
- dev_err(dev, "Error sending init rc=%ld\n", rc);
+ schedule_work(&adapter->ibmvnic_xport);
} else if (gen_crq->cmd == IBMVNIC_DEVICE_FAILOVER) {
dev_info(dev, "Backing device failover detected\n");
netif_carrier_off(netdev);
@@ -3284,8 +3299,7 @@ static void ibmvnic_handle_crq(union ibmvnic_crq *crq,
/* The adapter lost the connection */
dev_err(dev, "Virtual Adapter failed (rc=%d)\n",
gen_crq->cmd);
- ibmvnic_free_inflight(adapter);
- release_sub_crqs(adapter);
+ schedule_work(&adapter->ibmvnic_xport);
}
return;
case IBMVNIC_CRQ_CMD_RSP:
@@ -3654,6 +3668,7 @@ static void handle_crq_init_rsp(struct work_struct *work)
goto task_failed;
netdev->real_num_tx_queues = adapter->req_tx_queues;
+ netdev->mtu = adapter->req_mtu;
if (adapter->failover) {
adapter->failover = false;
@@ -3725,6 +3740,7 @@ static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id)
SET_NETDEV_DEV(netdev, &dev->dev);
INIT_WORK(&adapter->vnic_crq_init, handle_crq_init_rsp);
+ INIT_WORK(&adapter->ibmvnic_xport, ibmvnic_xport_event);
spin_lock_init(&adapter->stats_lock);
@@ -3792,6 +3808,7 @@ static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id)
}
netdev->real_num_tx_queues = adapter->req_tx_queues;
+ netdev->mtu = adapter->req_mtu;
rc = register_netdev(netdev);
if (rc) {
diff --git a/drivers/net/ethernet/ibm/ibmvnic.h b/drivers/net/ethernet/ibm/ibmvnic.h
index bfc84c7d0e11..dd775d951b73 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.h
+++ b/drivers/net/ethernet/ibm/ibmvnic.h
@@ -27,7 +27,7 @@
/**************************************************************************/
#define IBMVNIC_NAME "ibmvnic"
-#define IBMVNIC_DRIVER_VERSION "1.0"
+#define IBMVNIC_DRIVER_VERSION "1.0.1"
#define IBMVNIC_INVALID_MAP -1
#define IBMVNIC_STATS_TIMEOUT 1
/* basic structures plus 100 2k buffers */
@@ -1048,5 +1048,6 @@ struct ibmvnic_adapter {
u8 map_id;
struct work_struct vnic_crq_init;
+ struct work_struct ibmvnic_xport;
bool failover;
};
diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h
index 2030d7c1dc94..6d61e443bdf8 100644
--- a/drivers/net/ethernet/intel/i40e/i40e.h
+++ b/drivers/net/ethernet/intel/i40e/i40e.h
@@ -92,6 +92,7 @@
#define I40E_AQ_LEN 256
#define I40E_AQ_WORK_LIMIT 66 /* max number of VFs + a little */
#define I40E_MAX_USER_PRIORITY 8
+#define I40E_DEFAULT_TRAFFIC_CLASS BIT(0)
#define I40E_DEFAULT_MSG_ENABLE 4
#define I40E_QUEUE_WAIT_RETRY_LIMIT 10
#define I40E_INT_NAME_STR_LEN (IFNAMSIZ + 16)
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index ac1faee2a5b8..31c97e3937a4 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -4641,29 +4641,6 @@ static u8 i40e_pf_get_num_tc(struct i40e_pf *pf)
}
/**
- * i40e_pf_get_default_tc - Get bitmap for first enabled TC
- * @pf: PF being queried
- *
- * Return a bitmap for first enabled traffic class for this PF.
- **/
-static u8 i40e_pf_get_default_tc(struct i40e_pf *pf)
-{
- u8 enabled_tc = pf->hw.func_caps.enabled_tcmap;
- u8 i = 0;
-
- if (!enabled_tc)
- return 0x1; /* TC0 */
-
- /* Find the first enabled TC */
- for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
- if (enabled_tc & BIT(i))
- break;
- }
-
- return BIT(i);
-}
-
-/**
* i40e_pf_get_pf_tc_map - Get bitmap for enabled traffic classes
* @pf: PF being queried
*
@@ -4673,7 +4650,7 @@ static u8 i40e_pf_get_tc_map(struct i40e_pf *pf)
{
/* If DCB is not enabled for this PF then just return default TC */
if (!(pf->flags & I40E_FLAG_DCB_ENABLED))
- return i40e_pf_get_default_tc(pf);
+ return I40E_DEFAULT_TRAFFIC_CLASS;
/* SFP mode we want PF to be enabled for all TCs */
if (!(pf->flags & I40E_FLAG_MFP_ENABLED))
@@ -4683,7 +4660,7 @@ static u8 i40e_pf_get_tc_map(struct i40e_pf *pf)
if (pf->hw.func_caps.iscsi)
return i40e_get_iscsi_tc_map(pf);
else
- return i40e_pf_get_default_tc(pf);
+ return I40E_DEFAULT_TRAFFIC_CLASS;
}
/**
@@ -5029,7 +5006,7 @@ static void i40e_dcb_reconfigure(struct i40e_pf *pf)
if (v == pf->lan_vsi)
tc_map = i40e_pf_get_tc_map(pf);
else
- tc_map = i40e_pf_get_default_tc(pf);
+ tc_map = I40E_DEFAULT_TRAFFIC_CLASS;
#ifdef I40E_FCOE
if (pf->vsi[v]->type == I40E_VSI_FCOE)
tc_map = i40e_get_fcoe_tc_map(pf);
@@ -5717,7 +5694,7 @@ static int i40e_handle_lldp_event(struct i40e_pf *pf,
u8 type;
/* Not DCB capable or capability disabled */
- if (!(pf->flags & I40E_FLAG_DCB_ENABLED))
+ if (!(pf->flags & I40E_FLAG_DCB_CAPABLE))
return ret;
/* Ignore if event is not for Nearest Bridge */
@@ -7707,6 +7684,7 @@ static int i40e_init_msix(struct i40e_pf *pf)
pf->flags &= ~I40E_FLAG_MSIX_ENABLED;
kfree(pf->msix_entries);
pf->msix_entries = NULL;
+ pci_disable_msix(pf->pdev);
return -ENODEV;
} else if (v_actual == I40E_MIN_MSIX) {
@@ -9056,7 +9034,7 @@ static int i40e_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
return 0;
return ndo_dflt_bridge_getlink(skb, pid, seq, dev, veb->bridge_mode,
- nlflags, 0, 0, filter_mask, NULL);
+ 0, 0, nlflags, filter_mask, NULL);
}
/* Hardware supports L4 tunnel length of 128B (=2^7) which includes
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index a244d9a67264..bd93d823cc25 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -9135,10 +9135,14 @@ static void *ixgbe_fwd_add(struct net_device *pdev, struct net_device *vdev)
goto fwd_add_err;
fwd_adapter->pool = pool;
fwd_adapter->real_adapter = adapter;
- err = ixgbe_fwd_ring_up(vdev, fwd_adapter);
- if (err)
- goto fwd_add_err;
- netif_tx_start_all_queues(vdev);
+
+ if (netif_running(pdev)) {
+ err = ixgbe_fwd_ring_up(vdev, fwd_adapter);
+ if (err)
+ goto fwd_add_err;
+ netif_tx_start_all_queues(vdev);
+ }
+
return fwd_adapter;
fwd_add_err:
/* unwind counter and free adapter struct */
diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c
index 55831188bc32..bf5cc55ba24c 100644
--- a/drivers/net/ethernet/marvell/mv643xx_eth.c
+++ b/drivers/net/ethernet/marvell/mv643xx_eth.c
@@ -2968,6 +2968,22 @@ static void set_params(struct mv643xx_eth_private *mp,
mp->txq_count = pd->tx_queue_count ? : 1;
}
+static int get_phy_mode(struct mv643xx_eth_private *mp)
+{
+ struct device *dev = mp->dev->dev.parent;
+ int iface = -1;
+
+ if (dev->of_node)
+ iface = of_get_phy_mode(dev->of_node);
+
+ /* Historical default if unspecified. We could also read/write
+ * the interface state in the PSC1
+ */
+ if (iface < 0)
+ iface = PHY_INTERFACE_MODE_GMII;
+ return iface;
+}
+
static struct phy_device *phy_scan(struct mv643xx_eth_private *mp,
int phy_addr)
{
@@ -2994,7 +3010,7 @@ static struct phy_device *phy_scan(struct mv643xx_eth_private *mp,
"orion-mdio-mii", addr);
phydev = phy_connect(mp->dev, phy_id, mv643xx_eth_adjust_link,
- PHY_INTERFACE_MODE_GMII);
+ get_phy_mode(mp));
if (!IS_ERR(phydev)) {
phy_addr_set(mp, addr);
break;
@@ -3090,6 +3106,7 @@ static int mv643xx_eth_probe(struct platform_device *pdev)
if (!dev)
return -ENOMEM;
+ SET_NETDEV_DEV(dev, &pdev->dev);
mp = netdev_priv(dev);
platform_set_drvdata(pdev, mp);
@@ -3129,7 +3146,7 @@ static int mv643xx_eth_probe(struct platform_device *pdev)
if (pd->phy_node) {
mp->phy = of_phy_connect(mp->dev, pd->phy_node,
mv643xx_eth_adjust_link, 0,
- PHY_INTERFACE_MODE_GMII);
+ get_phy_mode(mp));
if (!mp->phy)
err = -ENODEV;
else
@@ -3187,8 +3204,6 @@ static int mv643xx_eth_probe(struct platform_device *pdev)
dev->priv_flags |= IFF_UNICAST_FLT;
dev->gso_max_segs = MV643XX_MAX_TSO_SEGS;
- SET_NETDEV_DEV(dev, &pdev->dev);
-
if (mp->shared->win_protect)
wrl(mp, WINDOW_PROTECT(mp->port_num), mp->shared->win_protect);
diff --git a/drivers/net/ethernet/mellanox/mlx4/cmd.c b/drivers/net/ethernet/mellanox/mlx4/cmd.c
index b1cef7a0f7ca..e36bebcab3f2 100644
--- a/drivers/net/ethernet/mellanox/mlx4/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx4/cmd.c
@@ -2469,6 +2469,7 @@ err_comm_admin:
kfree(priv->mfunc.master.slave_state);
err_comm:
iounmap(priv->mfunc.comm);
+ priv->mfunc.comm = NULL;
err_vhcr:
dma_free_coherent(&dev->persist->pdev->dev, PAGE_SIZE,
priv->mfunc.vhcr,
@@ -2537,6 +2538,13 @@ void mlx4_report_internal_err_comm_event(struct mlx4_dev *dev)
int slave;
u32 slave_read;
+ /* If the comm channel has not yet been initialized,
+ * skip reporting the internal error event to all
+ * the communication channels.
+ */
+ if (!priv->mfunc.comm)
+ return;
+
/* Report an internal error event to all
* communication channels.
*/
@@ -2571,6 +2579,7 @@ void mlx4_multi_func_cleanup(struct mlx4_dev *dev)
}
iounmap(priv->mfunc.comm);
+ priv->mfunc.comm = NULL;
}
void mlx4_cmd_cleanup(struct mlx4_dev *dev, int cleanup_mask)
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_clock.c b/drivers/net/ethernet/mellanox/mlx4/en_clock.c
index 08fc5fc56d43..a5fc46bbcbe2 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_clock.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_clock.c
@@ -245,8 +245,11 @@ static u32 freq_to_shift(u16 freq)
{
u32 freq_khz = freq * 1000;
u64 max_val_cycles = freq_khz * 1000 * MLX4_EN_WRAP_AROUND_SEC;
+ u64 tmp_rounded =
+ roundup_pow_of_two(max_val_cycles) > max_val_cycles ?
+ roundup_pow_of_two(max_val_cycles) - 1 : UINT_MAX;
u64 max_val_cycles_rounded = is_power_of_2(max_val_cycles + 1) ?
- max_val_cycles : roundup_pow_of_two(max_val_cycles) - 1;
+ max_val_cycles : tmp_rounded;
/* calculate max possible multiplier in order to fit in 64bit */
u64 max_mul = div_u64(0xffffffffffffffffULL, max_val_cycles_rounded);
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_cq.c b/drivers/net/ethernet/mellanox/mlx4/en_cq.c
index 132cea655920..e3be7e44ff51 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_cq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_cq.c
@@ -127,7 +127,15 @@ int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq,
/* For TX we use the same irq per
ring we assigned for the RX */
struct mlx4_en_cq *rx_cq;
-
+ int xdp_index;
+
+ /* The xdp tx irq must align with the rx ring that forwards to
+ * it, so reindex these from 0. This should only happen when
+ * tx_ring_num is not a multiple of rx_ring_num.
+ */
+ xdp_index = (priv->xdp_ring_num - priv->tx_ring_num) + cq_idx;
+ if (xdp_index >= 0)
+ cq_idx = xdp_index;
cq_idx = cq_idx % priv->rx_ring_num;
rx_cq = priv->rx_cq[cq_idx];
cq->vector = rx_cq->vector;
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
index 7e703bed7b82..12c99a2655f2 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
@@ -1733,6 +1733,13 @@ int mlx4_en_start_port(struct net_device *dev)
udp_tunnel_get_rx_info(dev);
priv->port_up = true;
+
+ /* Process all completions if exist to prevent
+ * the queues freezing if they are full
+ */
+ for (i = 0; i < priv->rx_ring_num; i++)
+ napi_schedule(&priv->rx_cq[i]->napi);
+
netif_tx_start_all_queues(dev);
netif_device_attach(dev);
@@ -1910,8 +1917,9 @@ static void mlx4_en_clear_stats(struct net_device *dev)
struct mlx4_en_dev *mdev = priv->mdev;
int i;
- if (mlx4_en_DUMP_ETH_STATS(mdev, priv->port, 1))
- en_dbg(HW, priv, "Failed dumping statistics\n");
+ if (!mlx4_is_slave(mdev->dev))
+ if (mlx4_en_DUMP_ETH_STATS(mdev, priv->port, 1))
+ en_dbg(HW, priv, "Failed dumping statistics\n");
memset(&priv->pstats, 0, sizeof(priv->pstats));
memset(&priv->pkstats, 0, sizeof(priv->pkstats));
@@ -2194,6 +2202,7 @@ void mlx4_en_destroy_netdev(struct net_device *dev)
if (!shutdown)
free_netdev(dev);
+ dev->ethtool_ops = NULL;
}
static int mlx4_en_change_mtu(struct net_device *dev, int new_mtu)
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_port.c b/drivers/net/ethernet/mellanox/mlx4/en_port.c
index 5aa8b751f417..59473a0ebcdf 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_port.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_port.c
@@ -166,7 +166,7 @@ int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset)
return PTR_ERR(mailbox);
err = mlx4_cmd_box(mdev->dev, 0, mailbox->dma, in_mod, 0,
MLX4_CMD_DUMP_ETH_STATS, MLX4_CMD_TIME_CLASS_B,
- MLX4_CMD_WRAPPED);
+ MLX4_CMD_NATIVE);
if (err)
goto out;
@@ -322,7 +322,7 @@ int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset)
err = mlx4_cmd_box(mdev->dev, 0, mailbox->dma,
in_mod | MLX4_DUMP_ETH_STATS_FLOW_CONTROL,
0, MLX4_CMD_DUMP_ETH_STATS,
- MLX4_CMD_TIME_CLASS_B, MLX4_CMD_WRAPPED);
+ MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
if (err)
goto out;
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_selftest.c b/drivers/net/ethernet/mellanox/mlx4/en_selftest.c
index b66e03d9711f..c06346a82496 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_selftest.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_selftest.c
@@ -118,6 +118,29 @@ mlx4_en_test_loopback_exit:
return !loopback_ok;
}
+static int mlx4_en_test_interrupts(struct mlx4_en_priv *priv)
+{
+ struct mlx4_en_dev *mdev = priv->mdev;
+ int err = 0;
+ int i = 0;
+
+ err = mlx4_test_async(mdev->dev);
+ /* When not in MSI_X or slave, test only async */
+ if (!(mdev->dev->flags & MLX4_FLAG_MSI_X) || mlx4_is_slave(mdev->dev))
+ return err;
+
+ /* A loop over all completion vectors of current port,
+ * for each vector check whether it works by mapping command
+ * completions to that vector and performing a NOP command
+ */
+ for (i = 0; i < priv->rx_ring_num; i++) {
+ err = mlx4_test_interrupt(mdev->dev, priv->rx_cq[i]->vector);
+ if (err)
+ break;
+ }
+
+ return err;
+}
static int mlx4_en_test_link(struct mlx4_en_priv *priv)
{
@@ -151,7 +174,6 @@ static int mlx4_en_test_speed(struct mlx4_en_priv *priv)
void mlx4_en_ex_selftest(struct net_device *dev, u32 *flags, u64 *buf)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
- struct mlx4_en_dev *mdev = priv->mdev;
int i, carrier_ok;
memset(buf, 0, sizeof(u64) * MLX4_EN_NUM_SELF_TEST);
@@ -177,7 +199,7 @@ void mlx4_en_ex_selftest(struct net_device *dev, u32 *flags, u64 *buf)
netif_carrier_on(dev);
}
- buf[0] = mlx4_test_interrupts(mdev->dev);
+ buf[0] = mlx4_en_test_interrupts(priv);
buf[1] = mlx4_en_test_link(priv);
buf[2] = mlx4_en_test_speed(priv);
diff --git a/drivers/net/ethernet/mellanox/mlx4/eq.c b/drivers/net/ethernet/mellanox/mlx4/eq.c
index cf8f8a72a801..cd3638e6fe25 100644
--- a/drivers/net/ethernet/mellanox/mlx4/eq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/eq.c
@@ -1361,53 +1361,49 @@ void mlx4_cleanup_eq_table(struct mlx4_dev *dev)
kfree(priv->eq_table.uar_map);
}
-/* A test that verifies that we can accept interrupts on all
- * the irq vectors of the device.
+/* A test that verifies that we can accept interrupts
+ * on the vector allocated for asynchronous events
+ */
+int mlx4_test_async(struct mlx4_dev *dev)
+{
+ return mlx4_NOP(dev);
+}
+EXPORT_SYMBOL(mlx4_test_async);
+
+/* A test that verifies that we can accept interrupts
+ * on the given irq vector of the tested port.
* Interrupts are checked using the NOP command.
*/
-int mlx4_test_interrupts(struct mlx4_dev *dev)
+int mlx4_test_interrupt(struct mlx4_dev *dev, int vector)
{
struct mlx4_priv *priv = mlx4_priv(dev);
- int i;
int err;
- err = mlx4_NOP(dev);
- /* When not in MSI_X, there is only one irq to check */
- if (!(dev->flags & MLX4_FLAG_MSI_X) || mlx4_is_slave(dev))
- return err;
-
- /* A loop over all completion vectors, for each vector we will check
- * whether it works by mapping command completions to that vector
- * and performing a NOP command
- */
- for(i = 0; !err && (i < dev->caps.num_comp_vectors); ++i) {
- /* Make sure request_irq was called */
- if (!priv->eq_table.eq[i].have_irq)
- continue;
-
- /* Temporary use polling for command completions */
- mlx4_cmd_use_polling(dev);
-
- /* Map the new eq to handle all asynchronous events */
- err = mlx4_MAP_EQ(dev, get_async_ev_mask(dev), 0,
- priv->eq_table.eq[i].eqn);
- if (err) {
- mlx4_warn(dev, "Failed mapping eq for interrupt test\n");
- mlx4_cmd_use_events(dev);
- break;
- }
+ /* Temporary use polling for command completions */
+ mlx4_cmd_use_polling(dev);
- /* Go back to using events */
- mlx4_cmd_use_events(dev);
- err = mlx4_NOP(dev);
+ /* Map the new eq to handle all asynchronous events */
+ err = mlx4_MAP_EQ(dev, get_async_ev_mask(dev), 0,
+ priv->eq_table.eq[MLX4_CQ_TO_EQ_VECTOR(vector)].eqn);
+ if (err) {
+ mlx4_warn(dev, "Failed mapping eq for interrupt test\n");
+ goto out;
}
+ /* Go back to using events */
+ mlx4_cmd_use_events(dev);
+ err = mlx4_NOP(dev);
+
/* Return to default */
+ mlx4_cmd_use_polling(dev);
+out:
mlx4_MAP_EQ(dev, get_async_ev_mask(dev), 0,
priv->eq_table.eq[MLX4_EQ_ASYNC].eqn);
+ mlx4_cmd_use_events(dev);
+
return err;
}
-EXPORT_SYMBOL(mlx4_test_interrupts);
+EXPORT_SYMBOL(mlx4_test_interrupt);
bool mlx4_is_eq_vector_valid(struct mlx4_dev *dev, u8 port, int vector)
{
diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.c b/drivers/net/ethernet/mellanox/mlx4/fw.c
index c41ab31a39f8..84bab9f0732e 100644
--- a/drivers/net/ethernet/mellanox/mlx4/fw.c
+++ b/drivers/net/ethernet/mellanox/mlx4/fw.c
@@ -49,9 +49,9 @@ enum {
extern void __buggy_use_of_MLX4_GET(void);
extern void __buggy_use_of_MLX4_PUT(void);
-static bool enable_qos = true;
+static bool enable_qos;
module_param(enable_qos, bool, 0444);
-MODULE_PARM_DESC(enable_qos, "Enable Enhanced QoS support (default: on)");
+MODULE_PARM_DESC(enable_qos, "Enable Enhanced QoS support (default: off)");
#define MLX4_GET(dest, source, offset) \
do { \
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
index 7183ac4135d2..6f4e67bc3538 100644
--- a/drivers/net/ethernet/mellanox/mlx4/main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/main.c
@@ -1102,6 +1102,14 @@ static int __set_port_type(struct mlx4_port_info *info,
int i;
int err = 0;
+ if ((port_type & mdev->caps.supported_type[info->port]) != port_type) {
+ mlx4_err(mdev,
+ "Requested port type for port %d is not supported on this HCA\n",
+ info->port);
+ err = -EINVAL;
+ goto err_sup;
+ }
+
mlx4_stop_sense(mdev);
mutex_lock(&priv->port_mutex);
info->tmp_type = port_type;
@@ -1147,7 +1155,7 @@ static int __set_port_type(struct mlx4_port_info *info,
out:
mlx4_start_sense(mdev);
mutex_unlock(&priv->port_mutex);
-
+err_sup:
return err;
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4.h b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
index e4878f31e45d..88ee7d8a5923 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
@@ -145,9 +145,10 @@ enum mlx4_resource {
RES_MTT,
RES_MAC,
RES_VLAN,
- RES_EQ,
+ RES_NPORT_ID,
RES_COUNTER,
RES_FS_RULE,
+ RES_EQ,
MLX4_NUM_OF_RESOURCE_TYPE
};
@@ -1329,8 +1330,6 @@ int mlx4_SET_VLAN_FLTR_wrapper(struct mlx4_dev *dev, int slave,
struct mlx4_cmd_info *cmd);
int mlx4_common_set_vlan_fltr(struct mlx4_dev *dev, int function,
int port, void *buf);
-int mlx4_common_dump_eth_stats(struct mlx4_dev *dev, int slave, u32 in_mod,
- struct mlx4_cmd_mailbox *outbox);
int mlx4_DUMP_ETH_STATS_wrapper(struct mlx4_dev *dev, int slave,
struct mlx4_vhcr *vhcr,
struct mlx4_cmd_mailbox *inbox,
diff --git a/drivers/net/ethernet/mellanox/mlx4/port.c b/drivers/net/ethernet/mellanox/mlx4/port.c
index c5b2064297a1..b656dd5772e5 100644
--- a/drivers/net/ethernet/mellanox/mlx4/port.c
+++ b/drivers/net/ethernet/mellanox/mlx4/port.c
@@ -1728,24 +1728,13 @@ int mlx4_SET_VLAN_FLTR_wrapper(struct mlx4_dev *dev, int slave,
return err;
}
-int mlx4_common_dump_eth_stats(struct mlx4_dev *dev, int slave,
- u32 in_mod, struct mlx4_cmd_mailbox *outbox)
-{
- return mlx4_cmd_box(dev, 0, outbox->dma, in_mod, 0,
- MLX4_CMD_DUMP_ETH_STATS, MLX4_CMD_TIME_CLASS_B,
- MLX4_CMD_NATIVE);
-}
-
int mlx4_DUMP_ETH_STATS_wrapper(struct mlx4_dev *dev, int slave,
struct mlx4_vhcr *vhcr,
struct mlx4_cmd_mailbox *inbox,
struct mlx4_cmd_mailbox *outbox,
struct mlx4_cmd_info *cmd)
{
- if (slave != dev->caps.function)
- return 0;
- return mlx4_common_dump_eth_stats(dev, slave,
- vhcr->in_modifier, outbox);
+ return 0;
}
int mlx4_get_slave_from_roce_gid(struct mlx4_dev *dev, int port, u8 *gid,
diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
index 84d7857ccc27..c548beaaf910 100644
--- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
+++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
@@ -1605,13 +1605,14 @@ static int eq_res_start_move_to(struct mlx4_dev *dev, int slave, int index,
r->com.from_state = r->com.state;
r->com.to_state = state;
r->com.state = RES_EQ_BUSY;
- if (eq)
- *eq = r;
}
}
spin_unlock_irq(mlx4_tlock(dev));
+ if (!err && eq)
+ *eq = r;
+
return err;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/alloc.c b/drivers/net/ethernet/mellanox/mlx5/core/alloc.c
index 6cb38304669f..2c6e3c7b7417 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/alloc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/alloc.c
@@ -41,6 +41,13 @@
#include "mlx5_core.h"
+struct mlx5_db_pgdir {
+ struct list_head list;
+ unsigned long *bitmap;
+ __be32 *db_page;
+ dma_addr_t db_dma;
+};
+
/* Handling for queue buffers -- we allocate a bunch of memory and
* register it in a memory region at HCA virtual address 0.
*/
@@ -102,17 +109,28 @@ EXPORT_SYMBOL_GPL(mlx5_buf_free);
static struct mlx5_db_pgdir *mlx5_alloc_db_pgdir(struct mlx5_core_dev *dev,
int node)
{
+ u32 db_per_page = PAGE_SIZE / cache_line_size();
struct mlx5_db_pgdir *pgdir;
pgdir = kzalloc(sizeof(*pgdir), GFP_KERNEL);
if (!pgdir)
return NULL;
- bitmap_fill(pgdir->bitmap, MLX5_DB_PER_PAGE);
+ pgdir->bitmap = kcalloc(BITS_TO_LONGS(db_per_page),
+ sizeof(unsigned long),
+ GFP_KERNEL);
+
+ if (!pgdir->bitmap) {
+ kfree(pgdir);
+ return NULL;
+ }
+
+ bitmap_fill(pgdir->bitmap, db_per_page);
pgdir->db_page = mlx5_dma_zalloc_coherent_node(dev, PAGE_SIZE,
&pgdir->db_dma, node);
if (!pgdir->db_page) {
+ kfree(pgdir->bitmap);
kfree(pgdir);
return NULL;
}
@@ -123,18 +141,19 @@ static struct mlx5_db_pgdir *mlx5_alloc_db_pgdir(struct mlx5_core_dev *dev,
static int mlx5_alloc_db_from_pgdir(struct mlx5_db_pgdir *pgdir,
struct mlx5_db *db)
{
+ u32 db_per_page = PAGE_SIZE / cache_line_size();
int offset;
int i;
- i = find_first_bit(pgdir->bitmap, MLX5_DB_PER_PAGE);
- if (i >= MLX5_DB_PER_PAGE)
+ i = find_first_bit(pgdir->bitmap, db_per_page);
+ if (i >= db_per_page)
return -ENOMEM;
__clear_bit(i, pgdir->bitmap);
db->u.pgdir = pgdir;
db->index = i;
- offset = db->index * L1_CACHE_BYTES;
+ offset = db->index * cache_line_size();
db->db = pgdir->db_page + offset / sizeof(*pgdir->db_page);
db->dma = pgdir->db_dma + offset;
@@ -181,14 +200,16 @@ EXPORT_SYMBOL_GPL(mlx5_db_alloc);
void mlx5_db_free(struct mlx5_core_dev *dev, struct mlx5_db *db)
{
+ u32 db_per_page = PAGE_SIZE / cache_line_size();
mutex_lock(&dev->priv.pgdir_mutex);
__set_bit(db->index, db->u.pgdir->bitmap);
- if (bitmap_full(db->u.pgdir->bitmap, MLX5_DB_PER_PAGE)) {
+ if (bitmap_full(db->u.pgdir->bitmap, db_per_page)) {
dma_free_coherent(&(dev->pdev->dev), PAGE_SIZE,
db->u.pgdir->db_page, db->u.pgdir->db_dma);
list_del(&db->u.pgdir->list);
+ kfree(db->u.pgdir->bitmap);
kfree(db->u.pgdir);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h
index 460363b66cb1..7a43502a89cc 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
@@ -85,6 +85,9 @@
#define MLX5_MPWRQ_SMALL_PACKET_THRESHOLD (128)
#define MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ (64 * 1024)
+#define MLX5E_DEFAULT_LRO_TIMEOUT 32
+#define MLX5E_LRO_TIMEOUT_ARR_SIZE 4
+
#define MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC 0x10
#define MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC_FROM_CQE 0x3
#define MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_PKTS 0x20
@@ -221,6 +224,7 @@ struct mlx5e_params {
struct ieee_ets ets;
#endif
bool rx_am_enabled;
+ u32 lro_timeout;
};
struct mlx5e_tstamp {
@@ -888,5 +892,6 @@ int mlx5e_attach_netdev(struct mlx5_core_dev *mdev, struct net_device *netdev);
void mlx5e_detach_netdev(struct mlx5_core_dev *mdev, struct net_device *netdev);
struct rtnl_link_stats64 *
mlx5e_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats);
+u32 mlx5e_choose_lro_timeout(struct mlx5_core_dev *mdev, u32 wanted_timeout);
#endif /* __MLX5_EN_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index 7eaf38020a8f..f4c687ce4c59 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -1971,9 +1971,7 @@ static void mlx5e_build_tir_ctx_lro(void *tirc, struct mlx5e_priv *priv)
MLX5_SET(tirc, tirc, lro_max_ip_payload_size,
(priv->params.lro_wqe_sz -
ROUGH_MAX_L2_L3_HDR_SZ) >> 8);
- MLX5_SET(tirc, tirc, lro_timeout_period_usecs,
- MLX5_CAP_ETH(priv->mdev,
- lro_timer_supported_periods[2]));
+ MLX5_SET(tirc, tirc, lro_timeout_period_usecs, priv->params.lro_timeout);
}
void mlx5e_build_tir_ctx_hash(void *tirc, struct mlx5e_priv *priv)
@@ -3401,6 +3399,18 @@ static void mlx5e_query_min_inline(struct mlx5_core_dev *mdev,
}
}
+u32 mlx5e_choose_lro_timeout(struct mlx5_core_dev *mdev, u32 wanted_timeout)
+{
+ int i;
+
+ /* The supported periods are organized in ascending order */
+ for (i = 0; i < MLX5E_LRO_TIMEOUT_ARR_SIZE - 1; i++)
+ if (MLX5_CAP_ETH(mdev, lro_timer_supported_periods[i]) >= wanted_timeout)
+ break;
+
+ return MLX5_CAP_ETH(mdev, lro_timer_supported_periods[i]);
+}
+
static void mlx5e_build_nic_netdev_priv(struct mlx5_core_dev *mdev,
struct net_device *netdev,
const struct mlx5e_profile *profile,
@@ -3419,6 +3429,9 @@ static void mlx5e_build_nic_netdev_priv(struct mlx5_core_dev *mdev,
priv->profile = profile;
priv->ppriv = ppriv;
+ priv->params.lro_timeout =
+ mlx5e_choose_lro_timeout(mdev, MLX5E_DEFAULT_LRO_TIMEOUT);
+
priv->params.log_sq_size = MLX5E_PARAMS_DEFAULT_LOG_SQ_SIZE;
/* set CQE compression */
@@ -4035,7 +4048,6 @@ void mlx5e_destroy_netdev(struct mlx5_core_dev *mdev, struct mlx5e_priv *priv)
const struct mlx5e_profile *profile = priv->profile;
struct net_device *netdev = priv->netdev;
- unregister_netdev(netdev);
destroy_workqueue(priv->wq);
if (profile->cleanup)
profile->cleanup(priv);
@@ -4052,6 +4064,7 @@ static void mlx5e_remove(struct mlx5_core_dev *mdev, void *vpriv)
for (vport = 1; vport < total_vfs; vport++)
mlx5_eswitch_unregister_vport_rep(esw, vport);
+ unregister_netdev(priv->netdev);
mlx5e_detach(mdev, vpriv);
mlx5e_destroy_netdev(mdev, priv);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
index 3c97da103d30..7fe6559e4ab3 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
@@ -457,6 +457,7 @@ void mlx5e_vport_rep_unload(struct mlx5_eswitch *esw,
struct mlx5e_priv *priv = rep->priv_data;
struct net_device *netdev = priv->netdev;
+ unregister_netdev(netdev);
mlx5e_detach_netdev(esw->dev, netdev);
mlx5e_destroy_netdev(esw->dev, priv);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
index abbf2c369923..be1f7333ab7f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
@@ -931,8 +931,8 @@ static void esw_vport_change_handler(struct work_struct *work)
mutex_unlock(&esw->state_lock);
}
-static void esw_vport_enable_egress_acl(struct mlx5_eswitch *esw,
- struct mlx5_vport *vport)
+static int esw_vport_enable_egress_acl(struct mlx5_eswitch *esw,
+ struct mlx5_vport *vport)
{
int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
struct mlx5_flow_group *vlan_grp = NULL;
@@ -949,9 +949,11 @@ static void esw_vport_enable_egress_acl(struct mlx5_eswitch *esw,
int table_size = 2;
int err = 0;
- if (!MLX5_CAP_ESW_EGRESS_ACL(dev, ft_support) ||
- !IS_ERR_OR_NULL(vport->egress.acl))
- return;
+ if (!MLX5_CAP_ESW_EGRESS_ACL(dev, ft_support))
+ return -EOPNOTSUPP;
+
+ if (!IS_ERR_OR_NULL(vport->egress.acl))
+ return 0;
esw_debug(dev, "Create vport[%d] egress ACL log_max_size(%d)\n",
vport->vport, MLX5_CAP_ESW_EGRESS_ACL(dev, log_max_ft_size));
@@ -959,12 +961,12 @@ static void esw_vport_enable_egress_acl(struct mlx5_eswitch *esw,
root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_ESW_EGRESS);
if (!root_ns) {
esw_warn(dev, "Failed to get E-Switch egress flow namespace\n");
- return;
+ return -EIO;
}
flow_group_in = mlx5_vzalloc(inlen);
if (!flow_group_in)
- return;
+ return -ENOMEM;
acl = mlx5_create_vport_flow_table(root_ns, 0, table_size, 0, vport->vport);
if (IS_ERR(acl)) {
@@ -1009,6 +1011,7 @@ out:
mlx5_destroy_flow_group(vlan_grp);
if (err && !IS_ERR_OR_NULL(acl))
mlx5_destroy_flow_table(acl);
+ return err;
}
static void esw_vport_cleanup_egress_rules(struct mlx5_eswitch *esw,
@@ -1041,8 +1044,8 @@ static void esw_vport_disable_egress_acl(struct mlx5_eswitch *esw,
vport->egress.acl = NULL;
}
-static void esw_vport_enable_ingress_acl(struct mlx5_eswitch *esw,
- struct mlx5_vport *vport)
+static int esw_vport_enable_ingress_acl(struct mlx5_eswitch *esw,
+ struct mlx5_vport *vport)
{
int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
struct mlx5_core_dev *dev = esw->dev;
@@ -1063,9 +1066,11 @@ static void esw_vport_enable_ingress_acl(struct mlx5_eswitch *esw,
int table_size = 4;
int err = 0;
- if (!MLX5_CAP_ESW_INGRESS_ACL(dev, ft_support) ||
- !IS_ERR_OR_NULL(vport->ingress.acl))
- return;
+ if (!MLX5_CAP_ESW_INGRESS_ACL(dev, ft_support))
+ return -EOPNOTSUPP;
+
+ if (!IS_ERR_OR_NULL(vport->ingress.acl))
+ return 0;
esw_debug(dev, "Create vport[%d] ingress ACL log_max_size(%d)\n",
vport->vport, MLX5_CAP_ESW_INGRESS_ACL(dev, log_max_ft_size));
@@ -1073,12 +1078,12 @@ static void esw_vport_enable_ingress_acl(struct mlx5_eswitch *esw,
root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_ESW_INGRESS);
if (!root_ns) {
esw_warn(dev, "Failed to get E-Switch ingress flow namespace\n");
- return;
+ return -EIO;
}
flow_group_in = mlx5_vzalloc(inlen);
if (!flow_group_in)
- return;
+ return -ENOMEM;
acl = mlx5_create_vport_flow_table(root_ns, 0, table_size, 0, vport->vport);
if (IS_ERR(acl)) {
@@ -1167,6 +1172,7 @@ out:
}
kvfree(flow_group_in);
+ return err;
}
static void esw_vport_cleanup_ingress_rules(struct mlx5_eswitch *esw,
@@ -1225,7 +1231,13 @@ static int esw_vport_ingress_config(struct mlx5_eswitch *esw,
return 0;
}
- esw_vport_enable_ingress_acl(esw, vport);
+ err = esw_vport_enable_ingress_acl(esw, vport);
+ if (err) {
+ mlx5_core_warn(esw->dev,
+ "failed to enable ingress acl (%d) on vport[%d]\n",
+ err, vport->vport);
+ return err;
+ }
esw_debug(esw->dev,
"vport[%d] configure ingress rules, vlan(%d) qos(%d)\n",
@@ -1299,7 +1311,13 @@ static int esw_vport_egress_config(struct mlx5_eswitch *esw,
return 0;
}
- esw_vport_enable_egress_acl(esw, vport);
+ err = esw_vport_enable_egress_acl(esw, vport);
+ if (err) {
+ mlx5_core_warn(esw->dev,
+ "failed to enable egress acl (%d) on vport[%d]\n",
+ err, vport->vport);
+ return err;
+ }
esw_debug(esw->dev,
"vport[%d] configure egress rules, vlan(%d) qos(%d)\n",
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
index 5da2cc878582..89696048b045 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
@@ -436,6 +436,9 @@ static void del_flow_group(struct fs_node *node)
fs_get_obj(ft, fg->node.parent);
dev = get_dev(&ft->node);
+ if (ft->autogroup.active)
+ ft->autogroup.num_groups--;
+
if (mlx5_cmd_destroy_flow_group(dev, ft, fg->id))
mlx5_core_warn(dev, "flow steering can't destroy fg %d of ft %d\n",
fg->id, ft->id);
@@ -879,7 +882,7 @@ static struct mlx5_flow_group *create_flow_group_common(struct mlx5_flow_table *
tree_init_node(&fg->node, !is_auto_fg, del_flow_group);
tree_add_node(&fg->node, &ft->node);
/* Add node to group list */
- list_add(&fg->node.list, ft->node.children.prev);
+ list_add(&fg->node.list, prev_fg);
return fg;
}
@@ -893,7 +896,7 @@ struct mlx5_flow_group *mlx5_create_flow_group(struct mlx5_flow_table *ft,
return ERR_PTR(-EPERM);
lock_ref_node(&ft->node);
- fg = create_flow_group_common(ft, fg_in, &ft->node.children, false);
+ fg = create_flow_group_common(ft, fg_in, ft->node.children.prev, false);
unlock_ref_node(&ft->node);
return fg;
@@ -1012,7 +1015,7 @@ static struct mlx5_flow_group *create_autogroup(struct mlx5_flow_table *ft,
u32 *match_criteria)
{
int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
- struct list_head *prev = &ft->node.children;
+ struct list_head *prev = ft->node.children.prev;
unsigned int candidate_index = 0;
struct mlx5_flow_group *fg;
void *match_criteria_addr;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c
index 3a9195b4169d..3b026c151cf2 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c
@@ -218,6 +218,7 @@ struct mlx5_fc *mlx5_fc_create(struct mlx5_core_dev *dev, bool aging)
goto err_out;
if (aging) {
+ counter->cache.lastuse = jiffies;
counter->aging = true;
spin_lock(&fc_stats->addlist_lock);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/health.c b/drivers/net/ethernet/mellanox/mlx5/core/health.c
index 1a05fb965c8d..5bcf93422ee0 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/health.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/health.c
@@ -61,10 +61,15 @@ enum {
enum {
MLX5_NIC_IFC_FULL = 0,
MLX5_NIC_IFC_DISABLED = 1,
- MLX5_NIC_IFC_NO_DRAM_NIC = 2
+ MLX5_NIC_IFC_NO_DRAM_NIC = 2,
+ MLX5_NIC_IFC_INVALID = 3
};
-static u8 get_nic_interface(struct mlx5_core_dev *dev)
+enum {
+ MLX5_DROP_NEW_HEALTH_WORK,
+};
+
+static u8 get_nic_state(struct mlx5_core_dev *dev)
{
return (ioread32be(&dev->iseg->cmdq_addr_l_sz) >> 8) & 3;
}
@@ -97,7 +102,7 @@ static int in_fatal(struct mlx5_core_dev *dev)
struct mlx5_core_health *health = &dev->priv.health;
struct health_buffer __iomem *h = health->health;
- if (get_nic_interface(dev) == MLX5_NIC_IFC_DISABLED)
+ if (get_nic_state(dev) == MLX5_NIC_IFC_DISABLED)
return 1;
if (ioread32be(&h->fw_ver) == 0xffffffff)
@@ -127,7 +132,7 @@ unlock:
static void mlx5_handle_bad_state(struct mlx5_core_dev *dev)
{
- u8 nic_interface = get_nic_interface(dev);
+ u8 nic_interface = get_nic_state(dev);
switch (nic_interface) {
case MLX5_NIC_IFC_FULL:
@@ -149,8 +154,34 @@ static void mlx5_handle_bad_state(struct mlx5_core_dev *dev)
mlx5_disable_device(dev);
}
+static void health_recover(struct work_struct *work)
+{
+ struct mlx5_core_health *health;
+ struct delayed_work *dwork;
+ struct mlx5_core_dev *dev;
+ struct mlx5_priv *priv;
+ u8 nic_state;
+
+ dwork = container_of(work, struct delayed_work, work);
+ health = container_of(dwork, struct mlx5_core_health, recover_work);
+ priv = container_of(health, struct mlx5_priv, health);
+ dev = container_of(priv, struct mlx5_core_dev, priv);
+
+ nic_state = get_nic_state(dev);
+ if (nic_state == MLX5_NIC_IFC_INVALID) {
+ dev_err(&dev->pdev->dev, "health recovery flow aborted since the nic state is invalid\n");
+ return;
+ }
+
+ dev_err(&dev->pdev->dev, "starting health recovery flow\n");
+ mlx5_recover_device(dev);
+}
+
+/* How much time to wait until health resetting the driver (in msecs) */
+#define MLX5_RECOVERY_DELAY_MSECS 60000
static void health_care(struct work_struct *work)
{
+ unsigned long recover_delay = msecs_to_jiffies(MLX5_RECOVERY_DELAY_MSECS);
struct mlx5_core_health *health;
struct mlx5_core_dev *dev;
struct mlx5_priv *priv;
@@ -160,6 +191,14 @@ static void health_care(struct work_struct *work)
dev = container_of(priv, struct mlx5_core_dev, priv);
mlx5_core_warn(dev, "handling bad device here\n");
mlx5_handle_bad_state(dev);
+
+ spin_lock(&health->wq_lock);
+ if (!test_bit(MLX5_DROP_NEW_HEALTH_WORK, &health->flags))
+ schedule_delayed_work(&health->recover_work, recover_delay);
+ else
+ dev_err(&dev->pdev->dev,
+ "new health works are not permitted at this stage\n");
+ spin_unlock(&health->wq_lock);
}
static const char *hsynd_str(u8 synd)
@@ -272,7 +311,13 @@ static void poll_health(unsigned long data)
if (in_fatal(dev) && !health->sick) {
health->sick = true;
print_health_info(dev);
- schedule_work(&health->work);
+ spin_lock(&health->wq_lock);
+ if (!test_bit(MLX5_DROP_NEW_HEALTH_WORK, &health->flags))
+ queue_work(health->wq, &health->work);
+ else
+ dev_err(&dev->pdev->dev,
+ "new health works are not permitted at this stage\n");
+ spin_unlock(&health->wq_lock);
}
}
@@ -281,6 +326,8 @@ void mlx5_start_health_poll(struct mlx5_core_dev *dev)
struct mlx5_core_health *health = &dev->priv.health;
init_timer(&health->timer);
+ health->sick = 0;
+ clear_bit(MLX5_DROP_NEW_HEALTH_WORK, &health->flags);
health->health = &dev->iseg->health;
health->health_counter = &dev->iseg->health_counter;
@@ -297,11 +344,22 @@ void mlx5_stop_health_poll(struct mlx5_core_dev *dev)
del_timer_sync(&health->timer);
}
+void mlx5_drain_health_wq(struct mlx5_core_dev *dev)
+{
+ struct mlx5_core_health *health = &dev->priv.health;
+
+ spin_lock(&health->wq_lock);
+ set_bit(MLX5_DROP_NEW_HEALTH_WORK, &health->flags);
+ spin_unlock(&health->wq_lock);
+ cancel_delayed_work_sync(&health->recover_work);
+ cancel_work_sync(&health->work);
+}
+
void mlx5_health_cleanup(struct mlx5_core_dev *dev)
{
struct mlx5_core_health *health = &dev->priv.health;
- flush_work(&health->work);
+ destroy_workqueue(health->wq);
}
int mlx5_health_init(struct mlx5_core_dev *dev)
@@ -316,9 +374,13 @@ int mlx5_health_init(struct mlx5_core_dev *dev)
strcpy(name, "mlx5_health");
strcat(name, dev_name(&dev->pdev->dev));
+ health->wq = create_singlethread_workqueue(name);
kfree(name);
-
+ if (!health->wq)
+ return -ENOMEM;
+ spin_lock_init(&health->wq_lock);
INIT_WORK(&health->work, health_care);
+ INIT_DELAYED_WORK(&health->recover_work, health_recover);
return 0;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index d9c3c70b29e4..d5433c49b2b0 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -844,12 +844,6 @@ static int mlx5_init_once(struct mlx5_core_dev *dev, struct mlx5_priv *priv)
struct pci_dev *pdev = dev->pdev;
int err;
- err = mlx5_query_hca_caps(dev);
- if (err) {
- dev_err(&pdev->dev, "query hca failed\n");
- goto out;
- }
-
err = mlx5_query_board_id(dev);
if (err) {
dev_err(&pdev->dev, "query board id failed\n");
@@ -1023,6 +1017,12 @@ static int mlx5_load_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv,
mlx5_start_health_poll(dev);
+ err = mlx5_query_hca_caps(dev);
+ if (err) {
+ dev_err(&pdev->dev, "query hca failed\n");
+ goto err_stop_poll;
+ }
+
if (boot && mlx5_init_once(dev, priv)) {
dev_err(&pdev->dev, "sw objs init failed\n");
goto err_stop_poll;
@@ -1313,10 +1313,16 @@ static pci_ers_result_t mlx5_pci_err_detected(struct pci_dev *pdev,
struct mlx5_priv *priv = &dev->priv;
dev_info(&pdev->dev, "%s was called\n", __func__);
+
mlx5_enter_error_state(dev);
mlx5_unload_one(dev, priv, false);
- pci_save_state(pdev);
- mlx5_pci_disable_device(dev);
+ /* In case of kernel call save the pci state and drain health wq */
+ if (state) {
+ pci_save_state(pdev);
+ mlx5_drain_health_wq(dev);
+ mlx5_pci_disable_device(dev);
+ }
+
return state == pci_channel_io_perm_failure ?
PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_NEED_RESET;
}
@@ -1373,11 +1379,6 @@ static pci_ers_result_t mlx5_pci_slot_reset(struct pci_dev *pdev)
return PCI_ERS_RESULT_RECOVERED;
}
-void mlx5_disable_device(struct mlx5_core_dev *dev)
-{
- mlx5_pci_err_detected(dev->pdev, 0);
-}
-
static void mlx5_pci_resume(struct pci_dev *pdev)
{
struct mlx5_core_dev *dev = pci_get_drvdata(pdev);
@@ -1427,6 +1428,18 @@ static const struct pci_device_id mlx5_core_pci_table[] = {
MODULE_DEVICE_TABLE(pci, mlx5_core_pci_table);
+void mlx5_disable_device(struct mlx5_core_dev *dev)
+{
+ mlx5_pci_err_detected(dev->pdev, 0);
+}
+
+void mlx5_recover_device(struct mlx5_core_dev *dev)
+{
+ mlx5_pci_disable_device(dev);
+ if (mlx5_pci_slot_reset(dev->pdev) == PCI_ERS_RESULT_RECOVERED)
+ mlx5_pci_resume(dev->pdev);
+}
+
static struct pci_driver mlx5_core_driver = {
.name = DRIVER_NAME,
.id_table = mlx5_core_pci_table,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
index 3d0cfb9f18f9..187662c8ea96 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
@@ -83,6 +83,7 @@ void mlx5_core_event(struct mlx5_core_dev *dev, enum mlx5_dev_event event,
unsigned long param);
void mlx5_enter_error_state(struct mlx5_core_dev *dev);
void mlx5_disable_device(struct mlx5_core_dev *dev);
+void mlx5_recover_device(struct mlx5_core_dev *dev);
int mlx5_sriov_init(struct mlx5_core_dev *dev);
void mlx5_sriov_cleanup(struct mlx5_core_dev *dev);
int mlx5_sriov_attach(struct mlx5_core_dev *dev);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
index cc4fd61914d3..a57d5a81eb05 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
@@ -209,6 +209,7 @@ static void free_4k(struct mlx5_core_dev *dev, u64 addr)
static int alloc_system_page(struct mlx5_core_dev *dev, u16 func_id)
{
struct page *page;
+ u64 zero_addr = 1;
u64 addr;
int err;
int nid = dev_to_node(&dev->pdev->dev);
@@ -218,26 +219,35 @@ static int alloc_system_page(struct mlx5_core_dev *dev, u16 func_id)
mlx5_core_warn(dev, "failed to allocate page\n");
return -ENOMEM;
}
+map:
addr = dma_map_page(&dev->pdev->dev, page, 0,
PAGE_SIZE, DMA_BIDIRECTIONAL);
if (dma_mapping_error(&dev->pdev->dev, addr)) {
mlx5_core_warn(dev, "failed dma mapping page\n");
err = -ENOMEM;
- goto out_alloc;
+ goto err_mapping;
}
+
+ /* Firmware doesn't support page with physical address 0 */
+ if (addr == 0) {
+ zero_addr = addr;
+ goto map;
+ }
+
err = insert_page(dev, addr, page, func_id);
if (err) {
mlx5_core_err(dev, "failed to track allocated page\n");
- goto out_mapping;
+ dma_unmap_page(&dev->pdev->dev, addr, PAGE_SIZE,
+ DMA_BIDIRECTIONAL);
}
- return 0;
-
-out_mapping:
- dma_unmap_page(&dev->pdev->dev, addr, PAGE_SIZE, DMA_BIDIRECTIONAL);
+err_mapping:
+ if (err)
+ __free_page(page);
-out_alloc:
- __free_page(page);
+ if (zero_addr == 0)
+ dma_unmap_page(&dev->pdev->dev, zero_addr, PAGE_SIZE,
+ DMA_BIDIRECTIONAL);
return err;
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci.c b/drivers/net/ethernet/mellanox/mlxsw/pci.c
index e742bd4e8894..912f71f84209 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/pci.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/pci.c
@@ -1838,11 +1838,17 @@ static const struct mlxsw_bus mlxsw_pci_bus = {
.cmd_exec = mlxsw_pci_cmd_exec,
};
-static int mlxsw_pci_sw_reset(struct mlxsw_pci *mlxsw_pci)
+static int mlxsw_pci_sw_reset(struct mlxsw_pci *mlxsw_pci,
+ const struct pci_device_id *id)
{
unsigned long end;
mlxsw_pci_write32(mlxsw_pci, SW_RESET, MLXSW_PCI_SW_RESET_RST_BIT);
+ if (id->device == PCI_DEVICE_ID_MELLANOX_SWITCHX2) {
+ msleep(MLXSW_PCI_SW_RESET_TIMEOUT_MSECS);
+ return 0;
+ }
+
wmb(); /* reset needs to be written before we read control register */
end = jiffies + msecs_to_jiffies(MLXSW_PCI_SW_RESET_TIMEOUT_MSECS);
do {
@@ -1909,7 +1915,7 @@ static int mlxsw_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
mlxsw_pci->pdev = pdev;
pci_set_drvdata(pdev, mlxsw_pci);
- err = mlxsw_pci_sw_reset(mlxsw_pci);
+ err = mlxsw_pci_sw_reset(mlxsw_pci, id);
if (err) {
dev_err(&pdev->dev, "Software reset failed\n");
goto err_sw_reset;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
index 78fc557d6dd7..4573da2c5560 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
@@ -320,6 +320,8 @@ mlxsw_sp_lpm_tree_create(struct mlxsw_sp *mlxsw_sp,
lpm_tree);
if (err)
goto err_left_struct_set;
+ memcpy(&lpm_tree->prefix_usage, prefix_usage,
+ sizeof(lpm_tree->prefix_usage));
return lpm_tree;
err_left_struct_set:
@@ -343,7 +345,8 @@ mlxsw_sp_lpm_tree_get(struct mlxsw_sp *mlxsw_sp,
for (i = 0; i < MLXSW_SP_LPM_TREE_COUNT; i++) {
lpm_tree = &mlxsw_sp->router.lpm_trees[i];
- if (lpm_tree->proto == proto &&
+ if (lpm_tree->ref_count != 0 &&
+ lpm_tree->proto == proto &&
mlxsw_sp_prefix_usage_eq(&lpm_tree->prefix_usage,
prefix_usage))
goto inc_ref_count;
@@ -1820,19 +1823,17 @@ err_fib_entry_insert:
return err;
}
-static int mlxsw_sp_router_fib4_del(struct mlxsw_sp *mlxsw_sp,
- struct fib_entry_notifier_info *fen_info)
+static void mlxsw_sp_router_fib4_del(struct mlxsw_sp *mlxsw_sp,
+ struct fib_entry_notifier_info *fen_info)
{
struct mlxsw_sp_fib_entry *fib_entry;
if (mlxsw_sp->router.aborted)
- return 0;
+ return;
fib_entry = mlxsw_sp_fib_entry_find(mlxsw_sp, fen_info);
- if (!fib_entry) {
- dev_warn(mlxsw_sp->bus_info->dev, "Failed to find FIB4 entry being removed.\n");
- return -ENOENT;
- }
+ if (!fib_entry)
+ return;
if (fib_entry->ref_count == 1) {
mlxsw_sp_fib_entry_del(mlxsw_sp, fib_entry);
@@ -1840,7 +1841,6 @@ static int mlxsw_sp_router_fib4_del(struct mlxsw_sp *mlxsw_sp,
}
mlxsw_sp_fib_entry_put(mlxsw_sp, fib_entry);
- return 0;
}
static int mlxsw_sp_router_set_abort_trap(struct mlxsw_sp *mlxsw_sp)
@@ -1862,7 +1862,8 @@ static int mlxsw_sp_router_set_abort_trap(struct mlxsw_sp *mlxsw_sp)
if (err)
return err;
- mlxsw_reg_raltb_pack(raltb_pl, 0, MLXSW_REG_RALXX_PROTOCOL_IPV4, 0);
+ mlxsw_reg_raltb_pack(raltb_pl, 0, MLXSW_REG_RALXX_PROTOCOL_IPV4,
+ MLXSW_SP_LPM_TREE_MIN);
err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raltb), raltb_pl);
if (err)
return err;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/switchx2.c b/drivers/net/ethernet/mellanox/mlxsw/switchx2.c
index c0c23e2f3275..92bda8703f87 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/switchx2.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/switchx2.c
@@ -1088,6 +1088,7 @@ err_port_stp_state_set:
err_port_admin_status_set:
err_port_mtu_set:
err_port_speed_set:
+ mlxsw_sx_port_swid_set(mlxsw_sx_port, MLXSW_PORT_SWID_DISABLED_PORT);
err_port_swid_set:
err_port_system_port_mapping_set:
port_not_usable:
diff --git a/drivers/net/ethernet/qlogic/Kconfig b/drivers/net/ethernet/qlogic/Kconfig
index 1e8339a67f6e..32f2a45f4ab2 100644
--- a/drivers/net/ethernet/qlogic/Kconfig
+++ b/drivers/net/ethernet/qlogic/Kconfig
@@ -107,4 +107,7 @@ config QEDE
---help---
This enables the support for ...
+config QED_RDMA
+ bool
+
endif # NET_VENDOR_QLOGIC
diff --git a/drivers/net/ethernet/qlogic/qed/Makefile b/drivers/net/ethernet/qlogic/qed/Makefile
index cda0af7fbc20..967acf322c09 100644
--- a/drivers/net/ethernet/qlogic/qed/Makefile
+++ b/drivers/net/ethernet/qlogic/qed/Makefile
@@ -5,4 +5,4 @@ qed-y := qed_cxt.o qed_dev.o qed_hw.o qed_init_fw_funcs.o qed_init_ops.o \
qed_selftest.o qed_dcbx.o qed_debug.o
qed-$(CONFIG_QED_SRIOV) += qed_sriov.o qed_vf.o
qed-$(CONFIG_QED_LL2) += qed_ll2.o
-qed-$(CONFIG_INFINIBAND_QEDR) += qed_roce.o
+qed-$(CONFIG_QED_RDMA) += qed_roce.o
diff --git a/drivers/net/ethernet/qlogic/qed/qed_cxt.c b/drivers/net/ethernet/qlogic/qed/qed_cxt.c
index 82370a1a59ad..0c42c240b5cf 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_cxt.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_cxt.c
@@ -47,13 +47,8 @@
#define TM_ALIGN BIT(TM_SHIFT)
#define TM_ELEM_SIZE 4
-/* ILT constants */
-#if IS_ENABLED(CONFIG_INFINIBAND_QEDR)
/* For RoCE we configure to 64K to cover for RoCE max tasks 256K purpose. */
-#define ILT_DEFAULT_HW_P_SIZE 4
-#else
-#define ILT_DEFAULT_HW_P_SIZE 3
-#endif
+#define ILT_DEFAULT_HW_P_SIZE (IS_ENABLED(CONFIG_QED_RDMA) ? 4 : 3)
#define ILT_PAGE_IN_BYTES(hw_p_size) (1U << ((hw_p_size) + 12))
#define ILT_CFG_REG(cli, reg) PSWRQ2_REG_ ## cli ## _ ## reg ## _RT_OFFSET
@@ -349,14 +344,14 @@ static struct qed_tid_seg *qed_cxt_tid_seg_info(struct qed_hwfn *p_hwfn,
return NULL;
}
-void qed_cxt_set_srq_count(struct qed_hwfn *p_hwfn, u32 num_srqs)
+static void qed_cxt_set_srq_count(struct qed_hwfn *p_hwfn, u32 num_srqs)
{
struct qed_cxt_mngr *p_mgr = p_hwfn->p_cxt_mngr;
p_mgr->srq_count = num_srqs;
}
-u32 qed_cxt_get_srq_count(struct qed_hwfn *p_hwfn)
+static u32 qed_cxt_get_srq_count(struct qed_hwfn *p_hwfn)
{
struct qed_cxt_mngr *p_mgr = p_hwfn->p_cxt_mngr;
@@ -1804,8 +1799,8 @@ int qed_cxt_get_cid_info(struct qed_hwfn *p_hwfn, struct qed_cxt_info *p_info)
return 0;
}
-void qed_rdma_set_pf_params(struct qed_hwfn *p_hwfn,
- struct qed_rdma_pf_params *p_params)
+static void qed_rdma_set_pf_params(struct qed_hwfn *p_hwfn,
+ struct qed_rdma_pf_params *p_params)
{
u32 num_cons, num_tasks, num_qps, num_mrs, num_srqs;
enum protocol_type proto;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dcbx.c b/drivers/net/ethernet/qlogic/qed/qed_dcbx.c
index 130da1c0490b..a4789a93b692 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_dcbx.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_dcbx.c
@@ -1190,6 +1190,7 @@ int qed_dcbx_get_config_params(struct qed_hwfn *p_hwfn,
if (!dcbx_info)
return -ENOMEM;
+ memset(dcbx_info, 0, sizeof(*dcbx_info));
rc = qed_dcbx_query_params(p_hwfn, dcbx_info, QED_DCBX_OPERATIONAL_MIB);
if (rc) {
kfree(dcbx_info);
@@ -1225,6 +1226,7 @@ static struct qed_dcbx_get *qed_dcbnl_get_dcbx(struct qed_hwfn *hwfn,
if (!dcbx_info)
return NULL;
+ memset(dcbx_info, 0, sizeof(*dcbx_info));
if (qed_dcbx_query_params(hwfn, dcbx_info, type)) {
kfree(dcbx_info);
return NULL;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_debug.c b/drivers/net/ethernet/qlogic/qed/qed_debug.c
index 88e7d5bef909..68f19ca57f96 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_debug.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_debug.c
@@ -405,7 +405,7 @@ struct phy_defs {
/***************************** Constant Arrays *******************************/
/* Debug arrays */
-static struct dbg_array s_dbg_arrays[MAX_BIN_DBG_BUFFER_TYPE] = { {0} };
+static struct dbg_array s_dbg_arrays[MAX_BIN_DBG_BUFFER_TYPE] = { {NULL} };
/* Chip constant definitions array */
static struct chip_defs s_chip_defs[MAX_CHIP_IDS] = {
@@ -4028,10 +4028,10 @@ static enum dbg_status qed_mcp_trace_read_meta(struct qed_hwfn *p_hwfn,
}
/* Dump MCP Trace */
-enum dbg_status qed_mcp_trace_dump(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt,
- u32 *dump_buf,
- bool dump, u32 *num_dumped_dwords)
+static enum dbg_status qed_mcp_trace_dump(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *dump_buf,
+ bool dump, u32 *num_dumped_dwords)
{
u32 trace_data_grc_addr, trace_data_size_bytes, trace_data_size_dwords;
u32 trace_meta_size_dwords, running_bundle_id, offset = 0;
@@ -4130,10 +4130,10 @@ enum dbg_status qed_mcp_trace_dump(struct qed_hwfn *p_hwfn,
}
/* Dump GRC FIFO */
-enum dbg_status qed_reg_fifo_dump(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt,
- u32 *dump_buf,
- bool dump, u32 *num_dumped_dwords)
+static enum dbg_status qed_reg_fifo_dump(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *dump_buf,
+ bool dump, u32 *num_dumped_dwords)
{
u32 offset = 0, dwords_read, size_param_offset;
bool fifo_has_data;
@@ -4192,10 +4192,10 @@ enum dbg_status qed_reg_fifo_dump(struct qed_hwfn *p_hwfn,
}
/* Dump IGU FIFO */
-enum dbg_status qed_igu_fifo_dump(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt,
- u32 *dump_buf,
- bool dump, u32 *num_dumped_dwords)
+static enum dbg_status qed_igu_fifo_dump(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *dump_buf,
+ bool dump, u32 *num_dumped_dwords)
{
u32 offset = 0, dwords_read, size_param_offset;
bool fifo_has_data;
@@ -4255,10 +4255,11 @@ enum dbg_status qed_igu_fifo_dump(struct qed_hwfn *p_hwfn,
}
/* Protection Override dump */
-enum dbg_status qed_protection_override_dump(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt,
- u32 *dump_buf,
- bool dump, u32 *num_dumped_dwords)
+static enum dbg_status qed_protection_override_dump(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *dump_buf,
+ bool dump,
+ u32 *num_dumped_dwords)
{
u32 offset = 0, size_param_offset, override_window_dwords;
@@ -6339,10 +6340,11 @@ enum dbg_status qed_print_fw_asserts_results(struct qed_hwfn *p_hwfn,
}
/* Wrapper for unifying the idle_chk and mcp_trace api */
-enum dbg_status qed_print_idle_chk_results_wrapper(struct qed_hwfn *p_hwfn,
- u32 *dump_buf,
- u32 num_dumped_dwords,
- char *results_buf)
+static enum dbg_status
+qed_print_idle_chk_results_wrapper(struct qed_hwfn *p_hwfn,
+ u32 *dump_buf,
+ u32 num_dumped_dwords,
+ char *results_buf)
{
u32 num_errors, num_warnnings;
@@ -6413,8 +6415,8 @@ static void qed_dbg_print_feature(u8 *p_text_buf, u32 text_size)
#define QED_RESULTS_BUF_MIN_SIZE 16
/* Generic function for decoding debug feature info */
-enum dbg_status format_feature(struct qed_hwfn *p_hwfn,
- enum qed_dbg_features feature_idx)
+static enum dbg_status format_feature(struct qed_hwfn *p_hwfn,
+ enum qed_dbg_features feature_idx)
{
struct qed_dbg_feature *feature =
&p_hwfn->cdev->dbg_params.features[feature_idx];
@@ -6480,8 +6482,9 @@ enum dbg_status format_feature(struct qed_hwfn *p_hwfn,
}
/* Generic function for performing the dump of a debug feature. */
-enum dbg_status qed_dbg_dump(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
- enum qed_dbg_features feature_idx)
+static enum dbg_status qed_dbg_dump(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ enum qed_dbg_features feature_idx)
{
struct qed_dbg_feature *feature =
&p_hwfn->cdev->dbg_params.features[feature_idx];
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev.c b/drivers/net/ethernet/qlogic/qed/qed_dev.c
index 754f6a908858..edae5fc5fccd 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_dev.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_dev.c
@@ -497,12 +497,13 @@ int qed_resc_alloc(struct qed_dev *cdev)
if (p_hwfn->hw_info.personality == QED_PCI_ETH_ROCE) {
num_cons = qed_cxt_get_proto_cid_count(p_hwfn,
PROTOCOLID_ROCE,
- 0) * 2;
+ NULL) * 2;
n_eqes += num_cons + 2 * MAX_NUM_VFS_BB;
} else if (p_hwfn->hw_info.personality == QED_PCI_ISCSI) {
num_cons =
qed_cxt_get_proto_cid_count(p_hwfn,
- PROTOCOLID_ISCSI, 0);
+ PROTOCOLID_ISCSI,
+ NULL);
n_eqes += 2 * num_cons;
}
@@ -1422,19 +1423,19 @@ static void qed_hw_set_feat(struct qed_hwfn *p_hwfn)
u32 *feat_num = p_hwfn->hw_info.feat_num;
int num_features = 1;
-#if IS_ENABLED(CONFIG_INFINIBAND_QEDR)
- /* Roce CNQ each requires: 1 status block + 1 CNQ. We divide the
- * status blocks equally between L2 / RoCE but with consideration as
- * to how many l2 queues / cnqs we have
- */
- if (p_hwfn->hw_info.personality == QED_PCI_ETH_ROCE) {
+ if (IS_ENABLED(CONFIG_QED_RDMA) &&
+ p_hwfn->hw_info.personality == QED_PCI_ETH_ROCE) {
+ /* Roce CNQ each requires: 1 status block + 1 CNQ. We divide
+ * the status blocks equally between L2 / RoCE but with
+ * consideration as to how many l2 queues / cnqs we have.
+ */
num_features++;
feat_num[QED_RDMA_CNQ] =
min_t(u32, RESC_NUM(p_hwfn, QED_SB) / num_features,
RESC_NUM(p_hwfn, QED_RDMA_CNQ_RAM));
}
-#endif
+
feat_num[QED_PF_L2_QUE] = min_t(u32, RESC_NUM(p_hwfn, QED_SB) /
num_features,
RESC_NUM(p_hwfn, QED_L2_QUEUE));
diff --git a/drivers/net/ethernet/qlogic/qed/qed_ll2.c b/drivers/net/ethernet/qlogic/qed/qed_ll2.c
index 02a8be2faed7..63e1a1b0ef8e 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_ll2.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_ll2.c
@@ -38,6 +38,7 @@
#include "qed_mcp.h"
#include "qed_reg_addr.h"
#include "qed_sp.h"
+#include "qed_roce.h"
#define QED_LL2_RX_REGISTERED(ll2) ((ll2)->rx_queue.b_cb_registred)
#define QED_LL2_TX_REGISTERED(ll2) ((ll2)->tx_queue.b_cb_registred)
@@ -140,11 +141,11 @@ static void qed_ll2_kill_buffers(struct qed_dev *cdev)
qed_ll2_dealloc_buffer(cdev, buffer);
}
-void qed_ll2b_complete_rx_packet(struct qed_hwfn *p_hwfn,
- u8 connection_handle,
- struct qed_ll2_rx_packet *p_pkt,
- struct core_rx_fast_path_cqe *p_cqe,
- bool b_last_packet)
+static void qed_ll2b_complete_rx_packet(struct qed_hwfn *p_hwfn,
+ u8 connection_handle,
+ struct qed_ll2_rx_packet *p_pkt,
+ struct core_rx_fast_path_cqe *p_cqe,
+ bool b_last_packet)
{
u16 packet_length = le16_to_cpu(p_cqe->packet_length);
struct qed_ll2_buffer *buffer = p_pkt->cookie;
@@ -515,7 +516,7 @@ static int qed_ll2_rxq_completion(struct qed_hwfn *p_hwfn, void *cookie)
return rc;
}
-void qed_ll2_rxq_flush(struct qed_hwfn *p_hwfn, u8 connection_handle)
+static void qed_ll2_rxq_flush(struct qed_hwfn *p_hwfn, u8 connection_handle)
{
struct qed_ll2_info *p_ll2_conn = NULL;
struct qed_ll2_rx_packet *p_pkt = NULL;
@@ -537,8 +538,7 @@ void qed_ll2_rxq_flush(struct qed_hwfn *p_hwfn, u8 connection_handle)
if (!p_pkt)
break;
- list_del(&p_pkt->list_entry);
- list_add_tail(&p_pkt->list_entry, &p_rx->free_descq);
+ list_move_tail(&p_pkt->list_entry, &p_rx->free_descq);
rx_buf_addr = p_pkt->rx_buf_addr;
cookie = p_pkt->cookie;
@@ -992,9 +992,8 @@ static void qed_ll2_post_rx_buffer_notify_fw(struct qed_hwfn *p_hwfn,
p_posting_packet = list_first_entry(&p_rx->posting_descq,
struct qed_ll2_rx_packet,
list_entry);
- list_del(&p_posting_packet->list_entry);
- list_add_tail(&p_posting_packet->list_entry,
- &p_rx->active_descq);
+ list_move_tail(&p_posting_packet->list_entry,
+ &p_rx->active_descq);
b_notify_fw = true;
}
@@ -1123,9 +1122,6 @@ static void qed_ll2_prepare_tx_packet_set_bd(struct qed_hwfn *p_hwfn,
DMA_REGPAIR_LE(start_bd->addr, first_frag);
start_bd->nbytes = cpu_to_le16(first_frag_len);
- SET_FIELD(start_bd->bd_flags.as_bitfield, CORE_TX_BD_FLAGS_ROCE_FLAV,
- type);
-
DP_VERBOSE(p_hwfn,
(NETIF_MSG_TX_QUEUED | QED_MSG_LL2),
"LL2 [q 0x%02x cid 0x%08x type 0x%08x] Tx Producer at [0x%04x] - set with a %04x bytes %02x BDs buffer at %08x:%08x\n",
@@ -1188,8 +1184,7 @@ static void qed_ll2_tx_packet_notify(struct qed_hwfn *p_hwfn,
if (!p_pkt)
break;
- list_del(&p_pkt->list_entry);
- list_add_tail(&p_pkt->list_entry, &p_tx->active_descq);
+ list_move_tail(&p_pkt->list_entry, &p_tx->active_descq);
}
SET_FIELD(db_msg.params, CORE_DB_DATA_DEST, DB_DEST_XCM);
diff --git a/drivers/net/ethernet/qlogic/qed/qed_ll2.h b/drivers/net/ethernet/qlogic/qed/qed_ll2.h
index 80a5dc2d652d..4e3d62a16cab 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_ll2.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_ll2.h
@@ -293,24 +293,4 @@ void qed_ll2_setup(struct qed_hwfn *p_hwfn,
*/
void qed_ll2_free(struct qed_hwfn *p_hwfn,
struct qed_ll2_info *p_ll2_connections);
-void qed_ll2b_complete_rx_gsi_packet(struct qed_hwfn *p_hwfn,
- u8 connection_handle,
- void *cookie,
- dma_addr_t rx_buf_addr,
- u16 data_length,
- u8 data_length_error,
- u16 parse_flags,
- u16 vlan,
- u32 src_mac_addr_hi,
- u16 src_mac_addr_lo, bool b_last_packet);
-void qed_ll2b_complete_tx_gsi_packet(struct qed_hwfn *p_hwfn,
- u8 connection_handle,
- void *cookie,
- dma_addr_t first_frag_addr,
- bool b_last_fragment, bool b_last_packet);
-void qed_ll2b_release_tx_gsi_packet(struct qed_hwfn *p_hwfn,
- u8 connection_handle,
- void *cookie,
- dma_addr_t first_frag_addr,
- bool b_last_fragment, bool b_last_packet);
#endif
diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c
index 4ee3151e80c2..c418360ba02a 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_main.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_main.c
@@ -33,10 +33,8 @@
#include "qed_hw.h"
#include "qed_selftest.h"
-#if IS_ENABLED(CONFIG_INFINIBAND_QEDR)
#define QED_ROCE_QPS (8192)
#define QED_ROCE_DPIS (8)
-#endif
static char version[] =
"QLogic FastLinQ 4xxxx Core Module qed " DRV_MODULE_VERSION "\n";
@@ -682,9 +680,7 @@ static int qed_slowpath_setup_int(struct qed_dev *cdev,
enum qed_int_mode int_mode)
{
struct qed_sb_cnt_info sb_cnt_info;
-#if IS_ENABLED(CONFIG_INFINIBAND_QEDR)
- int num_l2_queues;
-#endif
+ int num_l2_queues = 0;
int rc;
int i;
@@ -715,8 +711,9 @@ static int qed_slowpath_setup_int(struct qed_dev *cdev,
cdev->int_params.fp_msix_cnt = cdev->int_params.out.num_vectors -
cdev->num_hwfns;
-#if IS_ENABLED(CONFIG_INFINIBAND_QEDR)
- num_l2_queues = 0;
+ if (!IS_ENABLED(CONFIG_QED_RDMA))
+ return 0;
+
for_each_hwfn(cdev, i)
num_l2_queues += FEAT_NUM(&cdev->hwfns[i], QED_PF_L2_QUE);
@@ -738,7 +735,6 @@ static int qed_slowpath_setup_int(struct qed_dev *cdev,
DP_VERBOSE(cdev, QED_MSG_RDMA, "roce_msix_cnt=%d roce_msix_base=%d\n",
cdev->int_params.rdma_msix_cnt,
cdev->int_params.rdma_msix_base);
-#endif
return 0;
}
@@ -843,18 +839,20 @@ static void qed_update_pf_params(struct qed_dev *cdev,
{
int i;
-#if IS_ENABLED(CONFIG_INFINIBAND_QEDR)
- params->rdma_pf_params.num_qps = QED_ROCE_QPS;
- params->rdma_pf_params.min_dpis = QED_ROCE_DPIS;
- /* divide by 3 the MRs to avoid MF ILT overflow */
- params->rdma_pf_params.num_mrs = RDMA_MAX_TIDS;
- params->rdma_pf_params.gl_pi = QED_ROCE_PROTOCOL_INDEX;
-#endif
for (i = 0; i < cdev->num_hwfns; i++) {
struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
p_hwfn->pf_params = *params;
}
+
+ if (!IS_ENABLED(CONFIG_QED_RDMA))
+ return;
+
+ params->rdma_pf_params.num_qps = QED_ROCE_QPS;
+ params->rdma_pf_params.min_dpis = QED_ROCE_DPIS;
+ /* divide by 3 the MRs to avoid MF ILT overflow */
+ params->rdma_pf_params.num_mrs = RDMA_MAX_TIDS;
+ params->rdma_pf_params.gl_pi = QED_ROCE_PROTOCOL_INDEX;
}
static int qed_slowpath_start(struct qed_dev *cdev,
@@ -880,6 +878,7 @@ static int qed_slowpath_start(struct qed_dev *cdev,
}
}
+ cdev->rx_coalesce_usecs = QED_DEFAULT_RX_USECS;
rc = qed_nic_setup(cdev);
if (rc)
goto err;
@@ -1432,7 +1431,7 @@ static int qed_set_led(struct qed_dev *cdev, enum qed_led_mode mode)
return status;
}
-struct qed_selftest_ops qed_selftest_ops_pass = {
+static struct qed_selftest_ops qed_selftest_ops_pass = {
.selftest_memory = &qed_selftest_memory,
.selftest_interrupt = &qed_selftest_interrupt,
.selftest_register = &qed_selftest_register,
diff --git a/drivers/net/ethernet/qlogic/qed/qed_roce.c b/drivers/net/ethernet/qlogic/qed/qed_roce.c
index 76831a398bed..f3a825a8f8d5 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_roce.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_roce.c
@@ -129,17 +129,12 @@ static void qed_bmap_release_id(struct qed_hwfn *p_hwfn,
}
}
-u32 qed_rdma_get_sb_id(void *p_hwfn, u32 rel_sb_id)
+static u32 qed_rdma_get_sb_id(void *p_hwfn, u32 rel_sb_id)
{
/* First sb id for RoCE is after all the l2 sb */
return FEAT_NUM((struct qed_hwfn *)p_hwfn, QED_PF_L2_QUE) + rel_sb_id;
}
-u32 qed_rdma_query_cau_timer_res(void *rdma_cxt)
-{
- return QED_CAU_DEF_RX_TIMER_RES;
-}
-
static int qed_rdma_alloc(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
struct qed_rdma_start_in_params *params)
@@ -162,7 +157,8 @@ static int qed_rdma_alloc(struct qed_hwfn *p_hwfn,
p_hwfn->p_rdma_info = p_rdma_info;
p_rdma_info->proto = PROTOCOLID_ROCE;
- num_cons = qed_cxt_get_proto_cid_count(p_hwfn, p_rdma_info->proto, 0);
+ num_cons = qed_cxt_get_proto_cid_count(p_hwfn, p_rdma_info->proto,
+ NULL);
p_rdma_info->num_qps = num_cons / 2;
@@ -275,7 +271,7 @@ free_rdma_info:
return rc;
}
-void qed_rdma_resc_free(struct qed_hwfn *p_hwfn)
+static void qed_rdma_resc_free(struct qed_hwfn *p_hwfn)
{
struct qed_rdma_info *p_rdma_info = p_hwfn->p_rdma_info;
@@ -527,6 +523,26 @@ static int qed_rdma_start_fw(struct qed_hwfn *p_hwfn,
return qed_spq_post(p_hwfn, p_ent, NULL);
}
+static int qed_rdma_alloc_tid(void *rdma_cxt, u32 *itid)
+{
+ struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
+ int rc;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Allocate TID\n");
+
+ spin_lock_bh(&p_hwfn->p_rdma_info->lock);
+ rc = qed_rdma_bmap_alloc_id(p_hwfn,
+ &p_hwfn->p_rdma_info->tid_map, itid);
+ spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
+ if (rc)
+ goto out;
+
+ rc = qed_cxt_dynamic_ilt_alloc(p_hwfn, QED_ELEM_TASK, *itid);
+out:
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Allocate TID - done, rc = %d\n", rc);
+ return rc;
+}
+
static int qed_rdma_reserve_lkey(struct qed_hwfn *p_hwfn)
{
struct qed_rdma_device *dev = p_hwfn->p_rdma_info->dev;
@@ -573,7 +589,7 @@ static int qed_rdma_setup(struct qed_hwfn *p_hwfn,
return qed_rdma_start_fw(p_hwfn, params, p_ptt);
}
-int qed_rdma_stop(void *rdma_cxt)
+static int qed_rdma_stop(void *rdma_cxt)
{
struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
struct rdma_close_func_ramrod_data *p_ramrod;
@@ -629,8 +645,8 @@ out:
return rc;
}
-int qed_rdma_add_user(void *rdma_cxt,
- struct qed_rdma_add_user_out_params *out_params)
+static int qed_rdma_add_user(void *rdma_cxt,
+ struct qed_rdma_add_user_out_params *out_params)
{
struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
u32 dpi_start_offset;
@@ -664,7 +680,7 @@ int qed_rdma_add_user(void *rdma_cxt,
return rc;
}
-struct qed_rdma_port *qed_rdma_query_port(void *rdma_cxt)
+static struct qed_rdma_port *qed_rdma_query_port(void *rdma_cxt)
{
struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
struct qed_rdma_port *p_port = p_hwfn->p_rdma_info->port;
@@ -680,7 +696,7 @@ struct qed_rdma_port *qed_rdma_query_port(void *rdma_cxt)
return p_port;
}
-struct qed_rdma_device *qed_rdma_query_device(void *rdma_cxt)
+static struct qed_rdma_device *qed_rdma_query_device(void *rdma_cxt)
{
struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
@@ -690,7 +706,7 @@ struct qed_rdma_device *qed_rdma_query_device(void *rdma_cxt)
return p_hwfn->p_rdma_info->dev;
}
-void qed_rdma_free_tid(void *rdma_cxt, u32 itid)
+static void qed_rdma_free_tid(void *rdma_cxt, u32 itid)
{
struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
@@ -701,27 +717,7 @@ void qed_rdma_free_tid(void *rdma_cxt, u32 itid)
spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
}
-int qed_rdma_alloc_tid(void *rdma_cxt, u32 *itid)
-{
- struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
- int rc;
-
- DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Allocate TID\n");
-
- spin_lock_bh(&p_hwfn->p_rdma_info->lock);
- rc = qed_rdma_bmap_alloc_id(p_hwfn,
- &p_hwfn->p_rdma_info->tid_map, itid);
- spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
- if (rc)
- goto out;
-
- rc = qed_cxt_dynamic_ilt_alloc(p_hwfn, QED_ELEM_TASK, *itid);
-out:
- DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Allocate TID - done, rc = %d\n", rc);
- return rc;
-}
-
-void qed_rdma_cnq_prod_update(void *rdma_cxt, u8 qz_offset, u16 prod)
+static void qed_rdma_cnq_prod_update(void *rdma_cxt, u8 qz_offset, u16 prod)
{
struct qed_hwfn *p_hwfn;
u16 qz_num;
@@ -816,7 +812,7 @@ static int qed_rdma_get_int(struct qed_dev *cdev, struct qed_int_info *info)
return 0;
}
-int qed_rdma_alloc_pd(void *rdma_cxt, u16 *pd)
+static int qed_rdma_alloc_pd(void *rdma_cxt, u16 *pd)
{
struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
u32 returned_id;
@@ -836,7 +832,7 @@ int qed_rdma_alloc_pd(void *rdma_cxt, u16 *pd)
return rc;
}
-void qed_rdma_free_pd(void *rdma_cxt, u16 pd)
+static void qed_rdma_free_pd(void *rdma_cxt, u16 pd)
{
struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
@@ -873,8 +869,9 @@ qed_rdma_toggle_bit_create_resize_cq(struct qed_hwfn *p_hwfn, u16 icid)
return toggle_bit;
}
-int qed_rdma_create_cq(void *rdma_cxt,
- struct qed_rdma_create_cq_in_params *params, u16 *icid)
+static int qed_rdma_create_cq(void *rdma_cxt,
+ struct qed_rdma_create_cq_in_params *params,
+ u16 *icid)
{
struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
struct qed_rdma_info *p_info = p_hwfn->p_rdma_info;
@@ -957,98 +954,10 @@ err:
return rc;
}
-int qed_rdma_resize_cq(void *rdma_cxt,
- struct qed_rdma_resize_cq_in_params *in_params,
- struct qed_rdma_resize_cq_out_params *out_params)
-{
- struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
- struct rdma_resize_cq_output_params *p_ramrod_res;
- struct rdma_resize_cq_ramrod_data *p_ramrod;
- enum qed_rdma_toggle_bit toggle_bit;
- struct qed_sp_init_data init_data;
- struct qed_spq_entry *p_ent;
- dma_addr_t ramrod_res_phys;
- u8 fw_return_code;
- int rc = -ENOMEM;
-
- DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", in_params->icid);
-
- p_ramrod_res =
- (struct rdma_resize_cq_output_params *)
- dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
- sizeof(struct rdma_resize_cq_output_params),
- &ramrod_res_phys, GFP_KERNEL);
- if (!p_ramrod_res) {
- DP_NOTICE(p_hwfn,
- "qed resize cq failed: cannot allocate memory (ramrod)\n");
- return rc;
- }
-
- /* Get SPQ entry */
- memset(&init_data, 0, sizeof(init_data));
- init_data.cid = in_params->icid;
- init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
- init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
-
- rc = qed_sp_init_request(p_hwfn, &p_ent,
- RDMA_RAMROD_RESIZE_CQ,
- p_hwfn->p_rdma_info->proto, &init_data);
- if (rc)
- goto err;
-
- p_ramrod = &p_ent->ramrod.rdma_resize_cq;
-
- p_ramrod->flags = 0;
-
- /* toggle the bit for every resize or create cq for a given icid */
- toggle_bit = qed_rdma_toggle_bit_create_resize_cq(p_hwfn,
- in_params->icid);
-
- SET_FIELD(p_ramrod->flags,
- RDMA_RESIZE_CQ_RAMROD_DATA_TOGGLE_BIT, toggle_bit);
-
- SET_FIELD(p_ramrod->flags,
- RDMA_RESIZE_CQ_RAMROD_DATA_IS_TWO_LEVEL_PBL,
- in_params->pbl_two_level);
-
- p_ramrod->pbl_log_page_size = in_params->pbl_page_size_log - 12;
- p_ramrod->pbl_num_pages = cpu_to_le16(in_params->pbl_num_pages);
- p_ramrod->max_cqes = cpu_to_le32(in_params->cq_size);
- DMA_REGPAIR_LE(p_ramrod->pbl_addr, in_params->pbl_ptr);
- DMA_REGPAIR_LE(p_ramrod->output_params_addr, ramrod_res_phys);
-
- rc = qed_spq_post(p_hwfn, p_ent, &fw_return_code);
- if (rc)
- goto err;
-
- if (fw_return_code != RDMA_RETURN_OK) {
- DP_NOTICE(p_hwfn, "fw_return_code = %d\n", fw_return_code);
- rc = -EINVAL;
- goto err;
- }
-
- out_params->prod = le32_to_cpu(p_ramrod_res->old_cq_prod);
- out_params->cons = le32_to_cpu(p_ramrod_res->old_cq_cons);
-
- dma_free_coherent(&p_hwfn->cdev->pdev->dev,
- sizeof(struct rdma_resize_cq_output_params),
- p_ramrod_res, ramrod_res_phys);
-
- DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Resized CQ, rc = %d\n", rc);
-
- return rc;
-
-err: dma_free_coherent(&p_hwfn->cdev->pdev->dev,
- sizeof(struct rdma_resize_cq_output_params),
- p_ramrod_res, ramrod_res_phys);
- DP_NOTICE(p_hwfn, "Resized CQ, Failed - rc = %d\n", rc);
-
- return rc;
-}
-
-int qed_rdma_destroy_cq(void *rdma_cxt,
- struct qed_rdma_destroy_cq_in_params *in_params,
- struct qed_rdma_destroy_cq_out_params *out_params)
+static int
+qed_rdma_destroy_cq(void *rdma_cxt,
+ struct qed_rdma_destroy_cq_in_params *in_params,
+ struct qed_rdma_destroy_cq_out_params *out_params)
{
struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
struct rdma_destroy_cq_output_params *p_ramrod_res;
@@ -1169,7 +1078,7 @@ static enum roce_flavor qed_roce_mode_to_flavor(enum roce_mode roce_mode)
return flavor;
}
-int qed_roce_alloc_cid(struct qed_hwfn *p_hwfn, u16 *cid)
+static int qed_roce_alloc_cid(struct qed_hwfn *p_hwfn, u16 *cid)
{
struct qed_rdma_info *p_rdma_info = p_hwfn->p_rdma_info;
u32 responder_icid;
@@ -1793,9 +1702,9 @@ err:
return rc;
}
-int qed_roce_query_qp(struct qed_hwfn *p_hwfn,
- struct qed_rdma_qp *qp,
- struct qed_rdma_query_qp_out_params *out_params)
+static int qed_roce_query_qp(struct qed_hwfn *p_hwfn,
+ struct qed_rdma_qp *qp,
+ struct qed_rdma_query_qp_out_params *out_params)
{
struct roce_query_qp_resp_output_params *p_resp_ramrod_res;
struct roce_query_qp_req_output_params *p_req_ramrod_res;
@@ -1936,7 +1845,7 @@ err_resp:
return rc;
}
-int qed_roce_destroy_qp(struct qed_hwfn *p_hwfn, struct qed_rdma_qp *qp)
+static int qed_roce_destroy_qp(struct qed_hwfn *p_hwfn, struct qed_rdma_qp *qp)
{
u32 num_invalidated_mw = 0;
u32 num_bound_mw = 0;
@@ -1985,9 +1894,9 @@ int qed_roce_destroy_qp(struct qed_hwfn *p_hwfn, struct qed_rdma_qp *qp)
return 0;
}
-int qed_rdma_query_qp(void *rdma_cxt,
- struct qed_rdma_qp *qp,
- struct qed_rdma_query_qp_out_params *out_params)
+static int qed_rdma_query_qp(void *rdma_cxt,
+ struct qed_rdma_qp *qp,
+ struct qed_rdma_query_qp_out_params *out_params)
{
struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
int rc;
@@ -2022,7 +1931,7 @@ int qed_rdma_query_qp(void *rdma_cxt,
return rc;
}
-int qed_rdma_destroy_qp(void *rdma_cxt, struct qed_rdma_qp *qp)
+static int qed_rdma_destroy_qp(void *rdma_cxt, struct qed_rdma_qp *qp)
{
struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
int rc = 0;
@@ -2038,7 +1947,7 @@ int qed_rdma_destroy_qp(void *rdma_cxt, struct qed_rdma_qp *qp)
return rc;
}
-struct qed_rdma_qp *
+static struct qed_rdma_qp *
qed_rdma_create_qp(void *rdma_cxt,
struct qed_rdma_create_qp_in_params *in_params,
struct qed_rdma_create_qp_out_params *out_params)
@@ -2215,9 +2124,9 @@ static int qed_roce_modify_qp(struct qed_hwfn *p_hwfn,
return rc;
}
-int qed_rdma_modify_qp(void *rdma_cxt,
- struct qed_rdma_qp *qp,
- struct qed_rdma_modify_qp_in_params *params)
+static int qed_rdma_modify_qp(void *rdma_cxt,
+ struct qed_rdma_qp *qp,
+ struct qed_rdma_modify_qp_in_params *params)
{
struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
enum qed_roce_qp_state prev_state;
@@ -2312,8 +2221,9 @@ int qed_rdma_modify_qp(void *rdma_cxt,
return rc;
}
-int qed_rdma_register_tid(void *rdma_cxt,
- struct qed_rdma_register_tid_in_params *params)
+static int
+qed_rdma_register_tid(void *rdma_cxt,
+ struct qed_rdma_register_tid_in_params *params)
{
struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
struct rdma_register_tid_ramrod_data *p_ramrod;
@@ -2450,7 +2360,7 @@ int qed_rdma_register_tid(void *rdma_cxt,
return rc;
}
-int qed_rdma_deregister_tid(void *rdma_cxt, u32 itid)
+static int qed_rdma_deregister_tid(void *rdma_cxt, u32 itid)
{
struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
struct rdma_deregister_tid_ramrod_data *p_ramrod;
@@ -2561,7 +2471,8 @@ void qed_rdma_dpm_bar(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
qed_rdma_dpm_conf(p_hwfn, p_ptt);
}
-int qed_rdma_start(void *rdma_cxt, struct qed_rdma_start_in_params *params)
+static int qed_rdma_start(void *rdma_cxt,
+ struct qed_rdma_start_in_params *params)
{
struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
struct qed_ptt *p_ptt;
@@ -2601,7 +2512,7 @@ static int qed_rdma_init(struct qed_dev *cdev,
return qed_rdma_start(QED_LEADING_HWFN(cdev), params);
}
-void qed_rdma_remove_user(void *rdma_cxt, u16 dpi)
+static void qed_rdma_remove_user(void *rdma_cxt, u16 dpi)
{
struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
@@ -2809,11 +2720,6 @@ static int qed_roce_ll2_stop(struct qed_dev *cdev)
struct qed_roce_ll2_info *roce_ll2 = hwfn->ll2;
int rc;
- if (!cdev) {
- DP_ERR(cdev, "qed roce ll2 stop: invalid cdev\n");
- return -EINVAL;
- }
-
if (roce_ll2->handle == QED_LL2_UNUSED_HANDLE) {
DP_ERR(cdev, "qed roce ll2 stop: cannot stop an unused LL2\n");
return -EINVAL;
@@ -2850,7 +2756,7 @@ static int qed_roce_ll2_tx(struct qed_dev *cdev,
int rc;
int i;
- if (!cdev || !pkt || !params) {
+ if (!pkt || !params) {
DP_ERR(cdev,
"roce ll2 tx: failed tx because one of the following is NULL - drv=%p, pkt=%p, params=%p\n",
cdev, pkt, params);
diff --git a/drivers/net/ethernet/qlogic/qed/qed_roce.h b/drivers/net/ethernet/qlogic/qed/qed_roce.h
index 2f091e8a0f40..279f342af8db 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_roce.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_roce.h
@@ -95,26 +95,6 @@ struct qed_rdma_info {
enum protocol_type proto;
};
-struct qed_rdma_resize_cq_in_params {
- u16 icid;
- u32 cq_size;
- bool pbl_two_level;
- u64 pbl_ptr;
- u16 pbl_num_pages;
- u8 pbl_page_size_log;
-};
-
-struct qed_rdma_resize_cq_out_params {
- u32 prod;
- u32 cons;
-};
-
-struct qed_rdma_resize_cnq_in_params {
- u32 cnq_id;
- u32 pbl_page_size_log;
- u64 pbl_ptr;
-};
-
struct qed_rdma_qp {
struct regpair qp_handle;
struct regpair qp_handle_async;
@@ -181,36 +161,55 @@ struct qed_rdma_qp {
dma_addr_t shared_queue_phys_addr;
};
-int
-qed_rdma_add_user(void *rdma_cxt,
- struct qed_rdma_add_user_out_params *out_params);
-int qed_rdma_alloc_pd(void *rdma_cxt, u16 *pd);
-int qed_rdma_alloc_tid(void *rdma_cxt, u32 *tid);
-int qed_rdma_deregister_tid(void *rdma_cxt, u32 tid);
-void qed_rdma_free_tid(void *rdma_cxt, u32 tid);
-struct qed_rdma_device *qed_rdma_query_device(void *rdma_cxt);
-struct qed_rdma_port *qed_rdma_query_port(void *rdma_cxt);
-int
-qed_rdma_register_tid(void *rdma_cxt,
- struct qed_rdma_register_tid_in_params *params);
-void qed_rdma_remove_user(void *rdma_cxt, u16 dpi);
-int qed_rdma_start(void *p_hwfn, struct qed_rdma_start_in_params *params);
-int qed_rdma_stop(void *rdma_cxt);
-u32 qed_rdma_get_sb_id(void *p_hwfn, u32 rel_sb_id);
-u32 qed_rdma_query_cau_timer_res(void *p_hwfn);
-void qed_rdma_cnq_prod_update(void *rdma_cxt, u8 cnq_index, u16 prod);
-void qed_rdma_resc_free(struct qed_hwfn *p_hwfn);
+#if IS_ENABLED(CONFIG_QED_RDMA)
+void qed_rdma_dpm_bar(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
void qed_async_roce_event(struct qed_hwfn *p_hwfn,
struct event_ring_entry *p_eqe);
-int qed_rdma_destroy_qp(void *rdma_cxt, struct qed_rdma_qp *qp);
-int qed_rdma_modify_qp(void *rdma_cxt, struct qed_rdma_qp *qp,
- struct qed_rdma_modify_qp_in_params *params);
-int qed_rdma_query_qp(void *rdma_cxt, struct qed_rdma_qp *qp,
- struct qed_rdma_query_qp_out_params *out_params);
-
-#if IS_ENABLED(CONFIG_INFINIBAND_QEDR)
-void qed_rdma_dpm_bar(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
+void qed_ll2b_complete_tx_gsi_packet(struct qed_hwfn *p_hwfn,
+ u8 connection_handle,
+ void *cookie,
+ dma_addr_t first_frag_addr,
+ bool b_last_fragment, bool b_last_packet);
+void qed_ll2b_release_tx_gsi_packet(struct qed_hwfn *p_hwfn,
+ u8 connection_handle,
+ void *cookie,
+ dma_addr_t first_frag_addr,
+ bool b_last_fragment, bool b_last_packet);
+void qed_ll2b_complete_rx_gsi_packet(struct qed_hwfn *p_hwfn,
+ u8 connection_handle,
+ void *cookie,
+ dma_addr_t rx_buf_addr,
+ u16 data_length,
+ u8 data_length_error,
+ u16 parse_flags,
+ u16 vlan,
+ u32 src_mac_addr_hi,
+ u16 src_mac_addr_lo, bool b_last_packet);
#else
-void qed_rdma_dpm_bar(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) {}
+static inline void qed_rdma_dpm_bar(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) {}
+static inline void qed_async_roce_event(struct qed_hwfn *p_hwfn, struct event_ring_entry *p_eqe) {}
+static inline void qed_ll2b_complete_tx_gsi_packet(struct qed_hwfn *p_hwfn,
+ u8 connection_handle,
+ void *cookie,
+ dma_addr_t first_frag_addr,
+ bool b_last_fragment,
+ bool b_last_packet) {}
+static inline void qed_ll2b_release_tx_gsi_packet(struct qed_hwfn *p_hwfn,
+ u8 connection_handle,
+ void *cookie,
+ dma_addr_t first_frag_addr,
+ bool b_last_fragment,
+ bool b_last_packet) {}
+static inline void qed_ll2b_complete_rx_gsi_packet(struct qed_hwfn *p_hwfn,
+ u8 connection_handle,
+ void *cookie,
+ dma_addr_t rx_buf_addr,
+ u16 data_length,
+ u8 data_length_error,
+ u16 parse_flags,
+ u16 vlan,
+ u32 src_mac_addr_hi,
+ u16 src_mac_addr_lo,
+ bool b_last_packet) {}
#endif
#endif
diff --git a/drivers/net/ethernet/qlogic/qed/qed_sp.h b/drivers/net/ethernet/qlogic/qed/qed_sp.h
index 652c90819758..b2c08e4d2a9b 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_sp.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_sp.h
@@ -80,7 +80,6 @@ union ramrod_data {
struct roce_destroy_qp_resp_ramrod_data roce_destroy_qp_resp;
struct roce_destroy_qp_req_ramrod_data roce_destroy_qp_req;
struct rdma_create_cq_ramrod_data rdma_create_cq;
- struct rdma_resize_cq_ramrod_data rdma_resize_cq;
struct rdma_destroy_cq_ramrod_data rdma_destroy_cq;
struct rdma_srq_create_ramrod_data rdma_create_srq;
struct rdma_srq_destroy_ramrod_data rdma_destroy_srq;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_spq.c b/drivers/net/ethernet/qlogic/qed/qed_spq.c
index caff41544898..9fbaf9429fd0 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_spq.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_spq.c
@@ -28,9 +28,7 @@
#include "qed_reg_addr.h"
#include "qed_sp.h"
#include "qed_sriov.h"
-#if IS_ENABLED(CONFIG_INFINIBAND_QEDR)
#include "qed_roce.h"
-#endif
/***************************************************************************
* Structures & Definitions
@@ -240,11 +238,9 @@ qed_async_event_completion(struct qed_hwfn *p_hwfn,
struct event_ring_entry *p_eqe)
{
switch (p_eqe->protocol_id) {
-#if IS_ENABLED(CONFIG_INFINIBAND_QEDR)
case PROTOCOLID_ROCE:
qed_async_roce_event(p_hwfn, p_eqe);
return 0;
-#endif
case PROTOCOLID_COMMON:
return qed_sriov_eqe_event(p_hwfn,
p_eqe->opcode,
diff --git a/drivers/net/ethernet/qlogic/qede/Makefile b/drivers/net/ethernet/qlogic/qede/Makefile
index 28dc58919c85..048a230c3ce0 100644
--- a/drivers/net/ethernet/qlogic/qede/Makefile
+++ b/drivers/net/ethernet/qlogic/qede/Makefile
@@ -2,4 +2,4 @@ obj-$(CONFIG_QEDE) := qede.o
qede-y := qede_main.o qede_ethtool.o
qede-$(CONFIG_DCB) += qede_dcbnl.o
-qede-$(CONFIG_INFINIBAND_QEDR) += qede_roce.o
+qede-$(CONFIG_QED_RDMA) += qede_roce.o
diff --git a/drivers/net/ethernet/qlogic/qede/qede.h b/drivers/net/ethernet/qlogic/qede/qede.h
index 28c0e9f42c9e..974689a13337 100644
--- a/drivers/net/ethernet/qlogic/qede/qede.h
+++ b/drivers/net/ethernet/qlogic/qede/qede.h
@@ -348,12 +348,13 @@ bool qede_has_rx_work(struct qede_rx_queue *rxq);
int qede_txq_has_work(struct qede_tx_queue *txq);
void qede_recycle_rx_bd_ring(struct qede_rx_queue *rxq, struct qede_dev *edev,
u8 count);
+void qede_update_rx_prod(struct qede_dev *edev, struct qede_rx_queue *rxq);
#define RX_RING_SIZE_POW 13
#define RX_RING_SIZE ((u16)BIT(RX_RING_SIZE_POW))
#define NUM_RX_BDS_MAX (RX_RING_SIZE - 1)
#define NUM_RX_BDS_MIN 128
-#define NUM_RX_BDS_DEF NUM_RX_BDS_MAX
+#define NUM_RX_BDS_DEF ((u16)BIT(10) - 1)
#define TX_RING_SIZE_POW 13
#define TX_RING_SIZE ((u16)BIT(TX_RING_SIZE_POW))
diff --git a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c
index 25a9b293ee8f..12251a1032d1 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c
@@ -756,6 +756,8 @@ static void qede_get_channels(struct net_device *dev,
struct qede_dev *edev = netdev_priv(dev);
channels->max_combined = QEDE_MAX_RSS_CNT(edev);
+ channels->max_rx = QEDE_MAX_RSS_CNT(edev);
+ channels->max_tx = QEDE_MAX_RSS_CNT(edev);
channels->combined_count = QEDE_QUEUE_CNT(edev) - edev->fp_num_tx -
edev->fp_num_rx;
channels->tx_count = edev->fp_num_tx;
@@ -820,6 +822,13 @@ static int qede_set_channels(struct net_device *dev,
edev->req_queues = count;
edev->req_num_tx = channels->tx_count;
edev->req_num_rx = channels->rx_count;
+ /* Reset the indirection table if rx queue count is updated */
+ if ((edev->req_queues - edev->req_num_tx) != QEDE_RSS_COUNT(edev)) {
+ edev->rss_params_inited &= ~QEDE_RSS_INDIR_INITED;
+ memset(&edev->rss_params.rss_ind_table, 0,
+ sizeof(edev->rss_params.rss_ind_table));
+ }
+
if (netif_running(dev))
qede_reload(edev, NULL, NULL);
@@ -1053,6 +1062,12 @@ static int qede_set_rxfh(struct net_device *dev, const u32 *indir,
struct qede_dev *edev = netdev_priv(dev);
int i;
+ if (edev->dev_info.common.num_hwfns > 1) {
+ DP_INFO(edev,
+ "RSS configuration is not supported for 100G devices\n");
+ return -EOPNOTSUPP;
+ }
+
if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)
return -EOPNOTSUPP;
@@ -1184,8 +1199,8 @@ static int qede_selftest_transmit_traffic(struct qede_dev *edev,
}
first_bd = (struct eth_tx_1st_bd *)qed_chain_consume(&txq->tx_pbl);
- dma_unmap_page(&edev->pdev->dev, BD_UNMAP_ADDR(first_bd),
- BD_UNMAP_LEN(first_bd), DMA_TO_DEVICE);
+ dma_unmap_single(&edev->pdev->dev, BD_UNMAP_ADDR(first_bd),
+ BD_UNMAP_LEN(first_bd), DMA_TO_DEVICE);
txq->sw_tx_cons++;
txq->sw_tx_ring[idx].skb = NULL;
@@ -1199,8 +1214,8 @@ static int qede_selftest_receive_traffic(struct qede_dev *edev)
struct qede_rx_queue *rxq = NULL;
struct sw_rx_data *sw_rx_data;
union eth_rx_cqe *cqe;
+ int i, rc = 0;
u8 *data_ptr;
- int i;
for_each_queue(i) {
if (edev->fp_array[i].type & QEDE_FASTPATH_RX) {
@@ -1219,46 +1234,60 @@ static int qede_selftest_receive_traffic(struct qede_dev *edev)
* queue and that the loopback traffic is not IP.
*/
for (i = 0; i < QEDE_SELFTEST_POLL_COUNT; i++) {
- if (qede_has_rx_work(rxq))
+ if (!qede_has_rx_work(rxq)) {
+ usleep_range(100, 200);
+ continue;
+ }
+
+ hw_comp_cons = le16_to_cpu(*rxq->hw_cons_ptr);
+ sw_comp_cons = qed_chain_get_cons_idx(&rxq->rx_comp_ring);
+
+ /* Memory barrier to prevent the CPU from doing speculative
+ * reads of CQE/BD before reading hw_comp_cons. If the CQE is
+ * read before it is written by FW, then FW writes CQE and SB,
+ * and then the CPU reads the hw_comp_cons, it will use an old
+ * CQE.
+ */
+ rmb();
+
+ /* Get the CQE from the completion ring */
+ cqe = (union eth_rx_cqe *)qed_chain_consume(&rxq->rx_comp_ring);
+
+ /* Get the data from the SW ring */
+ sw_rx_index = rxq->sw_rx_cons & NUM_RX_BDS_MAX;
+ sw_rx_data = &rxq->sw_rx_ring[sw_rx_index];
+ fp_cqe = &cqe->fast_path_regular;
+ len = le16_to_cpu(fp_cqe->len_on_first_bd);
+ data_ptr = (u8 *)(page_address(sw_rx_data->data) +
+ fp_cqe->placement_offset +
+ sw_rx_data->page_offset);
+ if (ether_addr_equal(data_ptr, edev->ndev->dev_addr) &&
+ ether_addr_equal(data_ptr + ETH_ALEN,
+ edev->ndev->dev_addr)) {
+ for (i = ETH_HLEN; i < len; i++)
+ if (data_ptr[i] != (unsigned char)(i & 0xff)) {
+ rc = -1;
+ break;
+ }
+
+ qede_recycle_rx_bd_ring(rxq, edev, 1);
+ qed_chain_recycle_consumed(&rxq->rx_comp_ring);
break;
- usleep_range(100, 200);
+ }
+
+ DP_INFO(edev, "Not the transmitted packet\n");
+ qede_recycle_rx_bd_ring(rxq, edev, 1);
+ qed_chain_recycle_consumed(&rxq->rx_comp_ring);
}
- if (!qede_has_rx_work(rxq)) {
+ if (i == QEDE_SELFTEST_POLL_COUNT) {
DP_NOTICE(edev, "Failed to receive the traffic\n");
return -1;
}
- hw_comp_cons = le16_to_cpu(*rxq->hw_cons_ptr);
- sw_comp_cons = qed_chain_get_cons_idx(&rxq->rx_comp_ring);
+ qede_update_rx_prod(edev, rxq);
- /* Memory barrier to prevent the CPU from doing speculative reads of CQE
- * / BD before reading hw_comp_cons. If the CQE is read before it is
- * written by FW, then FW writes CQE and SB, and then the CPU reads the
- * hw_comp_cons, it will use an old CQE.
- */
- rmb();
-
- /* Get the CQE from the completion ring */
- cqe = (union eth_rx_cqe *)qed_chain_consume(&rxq->rx_comp_ring);
-
- /* Get the data from the SW ring */
- sw_rx_index = rxq->sw_rx_cons & NUM_RX_BDS_MAX;
- sw_rx_data = &rxq->sw_rx_ring[sw_rx_index];
- fp_cqe = &cqe->fast_path_regular;
- len = le16_to_cpu(fp_cqe->len_on_first_bd);
- data_ptr = (u8 *)(page_address(sw_rx_data->data) +
- fp_cqe->placement_offset + sw_rx_data->page_offset);
- for (i = ETH_HLEN; i < len; i++)
- if (data_ptr[i] != (unsigned char)(i & 0xff)) {
- DP_NOTICE(edev, "Loopback test failed\n");
- qede_recycle_rx_bd_ring(rxq, edev, 1);
- return -1;
- }
-
- qede_recycle_rx_bd_ring(rxq, edev, 1);
-
- return 0;
+ return rc;
}
static int qede_selftest_run_loopback(struct qede_dev *edev, u32 loopback_mode)
diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c
index 343038ca047d..7def29aaf65c 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_main.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_main.c
@@ -313,8 +313,8 @@ static int qede_free_tx_pkt(struct qede_dev *edev,
split_bd_len = BD_UNMAP_LEN(split);
bds_consumed++;
}
- dma_unmap_page(&edev->pdev->dev, BD_UNMAP_ADDR(first_bd),
- BD_UNMAP_LEN(first_bd) + split_bd_len, DMA_TO_DEVICE);
+ dma_unmap_single(&edev->pdev->dev, BD_UNMAP_ADDR(first_bd),
+ BD_UNMAP_LEN(first_bd) + split_bd_len, DMA_TO_DEVICE);
/* Unmap the data of the skb frags */
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++, bds_consumed++) {
@@ -359,8 +359,8 @@ static void qede_free_failed_tx_pkt(struct qede_dev *edev,
nbd--;
}
- dma_unmap_page(&edev->pdev->dev, BD_UNMAP_ADDR(first_bd),
- BD_UNMAP_LEN(first_bd) + split_bd_len, DMA_TO_DEVICE);
+ dma_unmap_single(&edev->pdev->dev, BD_UNMAP_ADDR(first_bd),
+ BD_UNMAP_LEN(first_bd) + split_bd_len, DMA_TO_DEVICE);
/* Unmap the data of the skb frags */
for (i = 0; i < nbd; i++) {
@@ -943,8 +943,7 @@ static inline int qede_realloc_rx_buffer(struct qede_dev *edev,
return 0;
}
-static inline void qede_update_rx_prod(struct qede_dev *edev,
- struct qede_rx_queue *rxq)
+void qede_update_rx_prod(struct qede_dev *edev, struct qede_rx_queue *rxq)
{
u16 bd_prod = qed_chain_get_prod_idx(&rxq->rx_bd_ring);
u16 cqe_prod = qed_chain_get_prod_idx(&rxq->rx_comp_ring);
@@ -2941,7 +2940,7 @@ static int qede_alloc_mem_txq(struct qede_dev *edev, struct qede_tx_queue *txq)
txq->num_tx_buffers = edev->q_num_tx_buffers;
/* Allocate the parallel driver ring for Tx buffers */
- size = sizeof(*txq->sw_tx_ring) * NUM_TX_BDS_MAX;
+ size = sizeof(*txq->sw_tx_ring) * TX_RING_SIZE;
txq->sw_tx_ring = kzalloc(size, GFP_KERNEL);
if (!txq->sw_tx_ring) {
DP_NOTICE(edev, "Tx buffers ring allocation failed\n");
@@ -2952,7 +2951,7 @@ static int qede_alloc_mem_txq(struct qede_dev *edev, struct qede_tx_queue *txq)
QED_CHAIN_USE_TO_CONSUME_PRODUCE,
QED_CHAIN_MODE_PBL,
QED_CHAIN_CNT_TYPE_U16,
- NUM_TX_BDS_MAX,
+ TX_RING_SIZE,
sizeof(*p_virt), &txq->tx_pbl);
if (rc)
goto err;
diff --git a/drivers/net/ethernet/qualcomm/emac/emac-mac.c b/drivers/net/ethernet/qualcomm/emac/emac-mac.c
index e97968ed4b8f..6fb3bee904d3 100644
--- a/drivers/net/ethernet/qualcomm/emac/emac-mac.c
+++ b/drivers/net/ethernet/qualcomm/emac/emac-mac.c
@@ -1021,14 +1021,18 @@ void emac_mac_down(struct emac_adapter *adpt)
napi_disable(&adpt->rx_q.napi);
phy_stop(adpt->phydev);
- phy_disconnect(adpt->phydev);
- /* disable mac irq */
+ /* Interrupts must be disabled before the PHY is disconnected, to
+ * avoid a race condition where adjust_link is null when we get
+ * an interrupt.
+ */
writel(DIS_INT, adpt->base + EMAC_INT_STATUS);
writel(0, adpt->base + EMAC_INT_MASK);
synchronize_irq(adpt->irq.irq);
free_irq(adpt->irq.irq, &adpt->irq);
+ phy_disconnect(adpt->phydev);
+
emac_mac_reset(adpt);
emac_tx_q_descs_free(adpt);
diff --git a/drivers/net/ethernet/qualcomm/emac/emac.c b/drivers/net/ethernet/qualcomm/emac/emac.c
index 9bf3b2b82e95..4fede4b86538 100644
--- a/drivers/net/ethernet/qualcomm/emac/emac.c
+++ b/drivers/net/ethernet/qualcomm/emac/emac.c
@@ -575,6 +575,7 @@ static const struct of_device_id emac_dt_match[] = {
},
{}
};
+MODULE_DEVICE_TABLE(of, emac_dt_match);
#if IS_ENABLED(CONFIG_ACPI)
static const struct acpi_device_id emac_acpi_match[] = {
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
index e55638c7505a..bf000d819a21 100644
--- a/drivers/net/ethernet/realtek/r8169.c
+++ b/drivers/net/ethernet/realtek/r8169.c
@@ -8273,7 +8273,8 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
if ((sizeof(dma_addr_t) > 4) &&
(use_dac == 1 || (use_dac == -1 && pci_is_pcie(pdev) &&
tp->mac_version >= RTL_GIGA_MAC_VER_18)) &&
- !pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
+ !pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) &&
+ !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) {
/* CPlusCmd Dual Access Cycle is only needed for non-PCIe */
if (!pci_is_pcie(pdev))
diff --git a/drivers/net/ethernet/rocker/rocker_main.c b/drivers/net/ethernet/rocker/rocker_main.c
index 5424fb341613..24b746406bc7 100644
--- a/drivers/net/ethernet/rocker/rocker_main.c
+++ b/drivers/net/ethernet/rocker/rocker_main.c
@@ -1471,7 +1471,7 @@ static int rocker_world_check_init(struct rocker_port *rocker_port)
if (rocker->wops) {
if (rocker->wops->mode != mode) {
dev_err(&rocker->pdev->dev, "hardware has ports in different worlds, which is not supported\n");
- return err;
+ return -EINVAL;
}
return 0;
}
diff --git a/drivers/net/ethernet/rocker/rocker_ofdpa.c b/drivers/net/ethernet/rocker/rocker_ofdpa.c
index 431a60804272..4ca461322d60 100644
--- a/drivers/net/ethernet/rocker/rocker_ofdpa.c
+++ b/drivers/net/ethernet/rocker/rocker_ofdpa.c
@@ -1493,8 +1493,6 @@ static int ofdpa_port_ipv4_nh(struct ofdpa_port *ofdpa_port,
spin_lock_irqsave(&ofdpa->neigh_tbl_lock, lock_flags);
found = ofdpa_neigh_tbl_find(ofdpa, ip_addr);
- if (found)
- *index = found->index;
updating = found && adding;
removing = found && !adding;
@@ -1508,9 +1506,11 @@ static int ofdpa_port_ipv4_nh(struct ofdpa_port *ofdpa_port,
resolved = false;
} else if (removing) {
ofdpa_neigh_del(trans, found);
+ *index = found->index;
} else if (updating) {
ofdpa_neigh_update(found, trans, NULL, false);
resolved = !is_zero_ether_addr(found->eth_dst);
+ *index = found->index;
} else {
err = -ENOENT;
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c
index 4ec7397e7fb3..a1b17cd7886b 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c
@@ -347,10 +347,9 @@ static void dwmac4_display_ring(void *head, unsigned int size, bool rx)
pr_info("%s descriptor ring:\n", rx ? "RX" : "TX");
for (i = 0; i < size; i++) {
- if (p->des0)
- pr_info("%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n",
- i, (unsigned int)virt_to_phys(p),
- p->des0, p->des1, p->des2, p->des3);
+ pr_info("%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n",
+ i, (unsigned int)virt_to_phys(p),
+ p->des0, p->des1, p->des2, p->des3);
p++;
}
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac.h b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
index 8dc9056c1001..b15fc55f1b96 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac.h
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
@@ -145,7 +145,7 @@ int stmmac_mdio_register(struct net_device *ndev);
int stmmac_mdio_reset(struct mii_bus *mii);
void stmmac_set_ethtool_ops(struct net_device *netdev);
-int stmmac_ptp_register(struct stmmac_priv *priv);
+void stmmac_ptp_register(struct stmmac_priv *priv);
void stmmac_ptp_unregister(struct stmmac_priv *priv);
int stmmac_resume(struct device *dev);
int stmmac_suspend(struct device *dev);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 6c85b61aaa0b..48e71fad4210 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -676,7 +676,9 @@ static int stmmac_init_ptp(struct stmmac_priv *priv)
priv->hwts_tx_en = 0;
priv->hwts_rx_en = 0;
- return stmmac_ptp_register(priv);
+ stmmac_ptp_register(priv);
+
+ return 0;
}
static void stmmac_release_ptp(struct stmmac_priv *priv)
@@ -1710,7 +1712,7 @@ static int stmmac_hw_setup(struct net_device *dev, bool init_ptp)
if (init_ptp) {
ret = stmmac_init_ptp(priv);
if (ret)
- netdev_warn(priv->dev, "PTP support cannot init.\n");
+ netdev_warn(priv->dev, "fail to init PTP.\n");
}
#ifdef CONFIG_DEBUG_FS
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c
index 289d52725a6c..1477471f8d44 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c
@@ -177,7 +177,7 @@ static struct ptp_clock_info stmmac_ptp_clock_ops = {
* Description: this function will register the ptp clock driver
* to kernel. It also does some house keeping work.
*/
-int stmmac_ptp_register(struct stmmac_priv *priv)
+void stmmac_ptp_register(struct stmmac_priv *priv)
{
spin_lock_init(&priv->ptp_lock);
priv->ptp_clock_ops = stmmac_ptp_clock_ops;
@@ -185,15 +185,10 @@ int stmmac_ptp_register(struct stmmac_priv *priv)
priv->ptp_clock = ptp_clock_register(&priv->ptp_clock_ops,
priv->device);
if (IS_ERR(priv->ptp_clock)) {
+ netdev_err(priv->dev, "ptp_clock_register failed\n");
priv->ptp_clock = NULL;
- return PTR_ERR(priv->ptp_clock);
- }
-
- spin_lock_init(&priv->ptp_lock);
-
- netdev_dbg(priv->dev, "Added PTP HW clock successfully\n");
-
- return 0;
+ } else if (priv->ptp_clock)
+ netdev_info(priv->dev, "registered PTP clock\n");
}
/**
diff --git a/drivers/net/ethernet/synopsys/dwc_eth_qos.c b/drivers/net/ethernet/synopsys/dwc_eth_qos.c
index 0d0053128542..5eedac495077 100644
--- a/drivers/net/ethernet/synopsys/dwc_eth_qos.c
+++ b/drivers/net/ethernet/synopsys/dwc_eth_qos.c
@@ -982,11 +982,13 @@ static int dwceqos_mii_probe(struct net_device *ndev)
if (netif_msg_probe(lp))
phy_attached_info(phydev);
- phydev->supported &= PHY_GBIT_FEATURES;
+ phydev->supported &= PHY_GBIT_FEATURES | SUPPORTED_Pause |
+ SUPPORTED_Asym_Pause;
lp->link = 0;
lp->speed = 0;
lp->duplex = DUPLEX_UNKNOWN;
+ lp->flowcontrol.autoneg = AUTONEG_ENABLE;
return 0;
}
diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c
index 3c20e87bb761..42edd7b7902f 100644
--- a/drivers/net/geneve.c
+++ b/drivers/net/geneve.c
@@ -58,9 +58,9 @@ struct geneve_dev {
struct hlist_node hlist; /* vni hash table */
struct net *net; /* netns for packet i/o */
struct net_device *dev; /* netdev for geneve tunnel */
- struct geneve_sock *sock4; /* IPv4 socket used for geneve tunnel */
+ struct geneve_sock __rcu *sock4; /* IPv4 socket used for geneve tunnel */
#if IS_ENABLED(CONFIG_IPV6)
- struct geneve_sock *sock6; /* IPv6 socket used for geneve tunnel */
+ struct geneve_sock __rcu *sock6; /* IPv6 socket used for geneve tunnel */
#endif
u8 vni[3]; /* virtual network ID for tunnel */
u8 ttl; /* TTL override */
@@ -453,7 +453,7 @@ static struct sk_buff **geneve_gro_receive(struct sock *sk,
skb_gro_pull(skb, gh_len);
skb_gro_postpull_rcsum(skb, gh, gh_len);
- pp = ptype->callbacks.gro_receive(head, skb);
+ pp = call_gro_receive(ptype->callbacks.gro_receive, head, skb);
flush = 0;
out_unlock:
@@ -543,9 +543,19 @@ static void __geneve_sock_release(struct geneve_sock *gs)
static void geneve_sock_release(struct geneve_dev *geneve)
{
- __geneve_sock_release(geneve->sock4);
+ struct geneve_sock *gs4 = rtnl_dereference(geneve->sock4);
#if IS_ENABLED(CONFIG_IPV6)
- __geneve_sock_release(geneve->sock6);
+ struct geneve_sock *gs6 = rtnl_dereference(geneve->sock6);
+
+ rcu_assign_pointer(geneve->sock6, NULL);
+#endif
+
+ rcu_assign_pointer(geneve->sock4, NULL);
+ synchronize_net();
+
+ __geneve_sock_release(gs4);
+#if IS_ENABLED(CONFIG_IPV6)
+ __geneve_sock_release(gs6);
#endif
}
@@ -586,10 +596,10 @@ out:
gs->flags = geneve->flags;
#if IS_ENABLED(CONFIG_IPV6)
if (ipv6)
- geneve->sock6 = gs;
+ rcu_assign_pointer(geneve->sock6, gs);
else
#endif
- geneve->sock4 = gs;
+ rcu_assign_pointer(geneve->sock4, gs);
hash = geneve_net_vni_hash(geneve->vni);
hlist_add_head_rcu(&geneve->hlist, &gs->vni_list[hash]);
@@ -603,9 +613,7 @@ static int geneve_open(struct net_device *dev)
bool metadata = geneve->collect_md;
int ret = 0;
- geneve->sock4 = NULL;
#if IS_ENABLED(CONFIG_IPV6)
- geneve->sock6 = NULL;
if (ipv6 || metadata)
ret = geneve_sock_add(geneve, true);
#endif
@@ -720,6 +728,9 @@ static struct rtable *geneve_get_v4_rt(struct sk_buff *skb,
struct rtable *rt = NULL;
__u8 tos;
+ if (!rcu_dereference(geneve->sock4))
+ return ERR_PTR(-EIO);
+
memset(fl4, 0, sizeof(*fl4));
fl4->flowi4_mark = skb->mark;
fl4->flowi4_proto = IPPROTO_UDP;
@@ -772,11 +783,15 @@ static struct dst_entry *geneve_get_v6_dst(struct sk_buff *skb,
{
bool use_cache = ip_tunnel_dst_cache_usable(skb, info);
struct geneve_dev *geneve = netdev_priv(dev);
- struct geneve_sock *gs6 = geneve->sock6;
struct dst_entry *dst = NULL;
struct dst_cache *dst_cache;
+ struct geneve_sock *gs6;
__u8 prio;
+ gs6 = rcu_dereference(geneve->sock6);
+ if (!gs6)
+ return ERR_PTR(-EIO);
+
memset(fl6, 0, sizeof(*fl6));
fl6->flowi6_mark = skb->mark;
fl6->flowi6_proto = IPPROTO_UDP;
@@ -842,7 +857,7 @@ static netdev_tx_t geneve_xmit_skb(struct sk_buff *skb, struct net_device *dev,
struct ip_tunnel_info *info)
{
struct geneve_dev *geneve = netdev_priv(dev);
- struct geneve_sock *gs4 = geneve->sock4;
+ struct geneve_sock *gs4;
struct rtable *rt = NULL;
const struct iphdr *iip; /* interior IP header */
int err = -EINVAL;
@@ -853,6 +868,10 @@ static netdev_tx_t geneve_xmit_skb(struct sk_buff *skb, struct net_device *dev,
bool xnet = !net_eq(geneve->net, dev_net(geneve->dev));
u32 flags = geneve->flags;
+ gs4 = rcu_dereference(geneve->sock4);
+ if (!gs4)
+ goto tx_error;
+
if (geneve->collect_md) {
if (unlikely(!info || !(info->mode & IP_TUNNEL_INFO_TX))) {
netdev_dbg(dev, "no tunnel metadata\n");
@@ -932,9 +951,9 @@ static netdev_tx_t geneve6_xmit_skb(struct sk_buff *skb, struct net_device *dev,
struct ip_tunnel_info *info)
{
struct geneve_dev *geneve = netdev_priv(dev);
- struct geneve_sock *gs6 = geneve->sock6;
struct dst_entry *dst = NULL;
const struct iphdr *iip; /* interior IP header */
+ struct geneve_sock *gs6;
int err = -EINVAL;
struct flowi6 fl6;
__u8 prio, ttl;
@@ -943,6 +962,10 @@ static netdev_tx_t geneve6_xmit_skb(struct sk_buff *skb, struct net_device *dev,
bool xnet = !net_eq(geneve->net, dev_net(geneve->dev));
u32 flags = geneve->flags;
+ gs6 = rcu_dereference(geneve->sock6);
+ if (!gs6)
+ goto tx_error;
+
if (geneve->collect_md) {
if (unlikely(!info || !(info->mode & IP_TUNNEL_INFO_TX))) {
netdev_dbg(dev, "no tunnel metadata\n");
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
index f0919bd3a563..f6382150b16a 100644
--- a/drivers/net/hyperv/netvsc_drv.c
+++ b/drivers/net/hyperv/netvsc_drv.c
@@ -447,7 +447,7 @@ static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net)
* Setup the sendside checksum offload only if this is not a
* GSO packet.
*/
- if (skb_is_gso(skb)) {
+ if ((net_trans_info & (INFO_TCP | INFO_UDP)) && skb_is_gso(skb)) {
struct ndis_tcp_lso_info *lso_info;
rndis_msg_size += NDIS_LSO_PPI_SIZE;
@@ -607,15 +607,18 @@ static struct sk_buff *netvsc_alloc_recv_skb(struct net_device *net,
packet->total_data_buflen);
skb->protocol = eth_type_trans(skb, net);
- if (csum_info) {
- /* We only look at the IP checksum here.
- * Should we be dropping the packet if checksum
- * failed? How do we deal with other checksums - TCP/UDP?
- */
- if (csum_info->receive.ip_checksum_succeeded)
+
+ /* skb is already created with CHECKSUM_NONE */
+ skb_checksum_none_assert(skb);
+
+ /*
+ * In Linux, the IP checksum is always checked.
+ * Do L4 checksum offload if enabled and present.
+ */
+ if (csum_info && (net->features & NETIF_F_RXCSUM)) {
+ if (csum_info->receive.tcp_checksum_succeeded ||
+ csum_info->receive.udp_checksum_succeeded)
skb->ip_summed = CHECKSUM_UNNECESSARY;
- else
- skb->ip_summed = CHECKSUM_NONE;
}
if (vlan_tci & VLAN_TAG_PRESENT)
@@ -696,12 +699,8 @@ int netvsc_recv_callback(struct hv_device *device_obj,
static void netvsc_get_drvinfo(struct net_device *net,
struct ethtool_drvinfo *info)
{
- struct net_device_context *net_device_ctx = netdev_priv(net);
- struct hv_device *dev = net_device_ctx->device_ctx;
-
strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
strlcpy(info->fw_version, "N/A", sizeof(info->fw_version));
- strlcpy(info->bus_info, vmbus_dev_name(dev), sizeof(info->bus_info));
}
static void netvsc_get_channels(struct net_device *net,
diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c
index 3ea47f28e143..d2e61e002926 100644
--- a/drivers/net/macsec.c
+++ b/drivers/net/macsec.c
@@ -397,6 +397,14 @@ static struct macsec_cb *macsec_skb_cb(struct sk_buff *skb)
#define DEFAULT_ENCRYPT false
#define DEFAULT_ENCODING_SA 0
+static bool send_sci(const struct macsec_secy *secy)
+{
+ const struct macsec_tx_sc *tx_sc = &secy->tx_sc;
+
+ return tx_sc->send_sci ||
+ (secy->n_rx_sc > 1 && !tx_sc->end_station && !tx_sc->scb);
+}
+
static sci_t make_sci(u8 *addr, __be16 port)
{
sci_t sci;
@@ -437,15 +445,15 @@ static unsigned int macsec_extra_len(bool sci_present)
/* Fill SecTAG according to IEEE 802.1AE-2006 10.5.3 */
static void macsec_fill_sectag(struct macsec_eth_header *h,
- const struct macsec_secy *secy, u32 pn)
+ const struct macsec_secy *secy, u32 pn,
+ bool sci_present)
{
const struct macsec_tx_sc *tx_sc = &secy->tx_sc;
- memset(&h->tci_an, 0, macsec_sectag_len(tx_sc->send_sci));
+ memset(&h->tci_an, 0, macsec_sectag_len(sci_present));
h->eth.h_proto = htons(ETH_P_MACSEC);
- if (tx_sc->send_sci ||
- (secy->n_rx_sc > 1 && !tx_sc->end_station && !tx_sc->scb)) {
+ if (sci_present) {
h->tci_an |= MACSEC_TCI_SC;
memcpy(&h->secure_channel_id, &secy->sci,
sizeof(h->secure_channel_id));
@@ -650,6 +658,7 @@ static struct sk_buff *macsec_encrypt(struct sk_buff *skb,
struct macsec_tx_sc *tx_sc;
struct macsec_tx_sa *tx_sa;
struct macsec_dev *macsec = macsec_priv(dev);
+ bool sci_present;
u32 pn;
secy = &macsec->secy;
@@ -687,7 +696,8 @@ static struct sk_buff *macsec_encrypt(struct sk_buff *skb,
unprotected_len = skb->len;
eth = eth_hdr(skb);
- hh = (struct macsec_eth_header *)skb_push(skb, macsec_extra_len(tx_sc->send_sci));
+ sci_present = send_sci(secy);
+ hh = (struct macsec_eth_header *)skb_push(skb, macsec_extra_len(sci_present));
memmove(hh, eth, 2 * ETH_ALEN);
pn = tx_sa_update_pn(tx_sa, secy);
@@ -696,7 +706,7 @@ static struct sk_buff *macsec_encrypt(struct sk_buff *skb,
kfree_skb(skb);
return ERR_PTR(-ENOLINK);
}
- macsec_fill_sectag(hh, secy, pn);
+ macsec_fill_sectag(hh, secy, pn, sci_present);
macsec_set_shortlen(hh, unprotected_len - 2 * ETH_ALEN);
skb_put(skb, secy->icv_len);
@@ -726,10 +736,10 @@ static struct sk_buff *macsec_encrypt(struct sk_buff *skb,
skb_to_sgvec(skb, sg, 0, skb->len);
if (tx_sc->encrypt) {
- int len = skb->len - macsec_hdr_len(tx_sc->send_sci) -
+ int len = skb->len - macsec_hdr_len(sci_present) -
secy->icv_len;
aead_request_set_crypt(req, sg, sg, len, iv);
- aead_request_set_ad(req, macsec_hdr_len(tx_sc->send_sci));
+ aead_request_set_ad(req, macsec_hdr_len(sci_present));
} else {
aead_request_set_crypt(req, sg, sg, 0, iv);
aead_request_set_ad(req, skb->len - secy->icv_len);
diff --git a/drivers/net/phy/at803x.c b/drivers/net/phy/at803x.c
index f279a897a5c7..a52b560e428b 100644
--- a/drivers/net/phy/at803x.c
+++ b/drivers/net/phy/at803x.c
@@ -42,19 +42,24 @@
#define AT803X_MMD_ACCESS_CONTROL 0x0D
#define AT803X_MMD_ACCESS_CONTROL_DATA 0x0E
#define AT803X_FUNC_DATA 0x4003
+#define AT803X_REG_CHIP_CONFIG 0x1f
+#define AT803X_BT_BX_REG_SEL 0x8000
#define AT803X_DEBUG_ADDR 0x1D
#define AT803X_DEBUG_DATA 0x1E
+#define AT803X_MODE_CFG_MASK 0x0F
+#define AT803X_MODE_CFG_SGMII 0x01
+
+#define AT803X_PSSR 0x11 /*PHY-Specific Status Register*/
+#define AT803X_PSSR_MR_AN_COMPLETE 0x0200
+
#define AT803X_DEBUG_REG_0 0x00
#define AT803X_DEBUG_RX_CLK_DLY_EN BIT(15)
#define AT803X_DEBUG_REG_5 0x05
#define AT803X_DEBUG_TX_CLK_DLY_EN BIT(8)
-#define AT803X_REG_CHIP_CONFIG 0x1f
-#define AT803X_BT_BX_REG_SEL 0x8000
-
#define ATH8030_PHY_ID 0x004dd076
#define ATH8031_PHY_ID 0x004dd074
#define ATH8035_PHY_ID 0x004dd072
@@ -209,7 +214,6 @@ static int at803x_suspend(struct phy_device *phydev)
{
int value;
int wol_enabled;
- int ccr;
mutex_lock(&phydev->lock);
@@ -225,16 +229,6 @@ static int at803x_suspend(struct phy_device *phydev)
phy_write(phydev, MII_BMCR, value);
- if (phydev->interface != PHY_INTERFACE_MODE_SGMII)
- goto done;
-
- /* also power-down SGMII interface */
- ccr = phy_read(phydev, AT803X_REG_CHIP_CONFIG);
- phy_write(phydev, AT803X_REG_CHIP_CONFIG, ccr & ~AT803X_BT_BX_REG_SEL);
- phy_write(phydev, MII_BMCR, phy_read(phydev, MII_BMCR) | BMCR_PDOWN);
- phy_write(phydev, AT803X_REG_CHIP_CONFIG, ccr | AT803X_BT_BX_REG_SEL);
-
-done:
mutex_unlock(&phydev->lock);
return 0;
@@ -243,7 +237,6 @@ done:
static int at803x_resume(struct phy_device *phydev)
{
int value;
- int ccr;
mutex_lock(&phydev->lock);
@@ -251,17 +244,6 @@ static int at803x_resume(struct phy_device *phydev)
value &= ~(BMCR_PDOWN | BMCR_ISOLATE);
phy_write(phydev, MII_BMCR, value);
- if (phydev->interface != PHY_INTERFACE_MODE_SGMII)
- goto done;
-
- /* also power-up SGMII interface */
- ccr = phy_read(phydev, AT803X_REG_CHIP_CONFIG);
- phy_write(phydev, AT803X_REG_CHIP_CONFIG, ccr & ~AT803X_BT_BX_REG_SEL);
- value = phy_read(phydev, MII_BMCR) & ~(BMCR_PDOWN | BMCR_ISOLATE);
- phy_write(phydev, MII_BMCR, value);
- phy_write(phydev, AT803X_REG_CHIP_CONFIG, ccr | AT803X_BT_BX_REG_SEL);
-
-done:
mutex_unlock(&phydev->lock);
return 0;
@@ -381,6 +363,36 @@ static void at803x_link_change_notify(struct phy_device *phydev)
}
}
+static int at803x_aneg_done(struct phy_device *phydev)
+{
+ int ccr;
+
+ int aneg_done = genphy_aneg_done(phydev);
+ if (aneg_done != BMSR_ANEGCOMPLETE)
+ return aneg_done;
+
+ /*
+ * in SGMII mode, if copper side autoneg is successful,
+ * also check SGMII side autoneg result
+ */
+ ccr = phy_read(phydev, AT803X_REG_CHIP_CONFIG);
+ if ((ccr & AT803X_MODE_CFG_MASK) != AT803X_MODE_CFG_SGMII)
+ return aneg_done;
+
+ /* switch to SGMII/fiber page */
+ phy_write(phydev, AT803X_REG_CHIP_CONFIG, ccr & ~AT803X_BT_BX_REG_SEL);
+
+ /* check if the SGMII link is OK. */
+ if (!(phy_read(phydev, AT803X_PSSR) & AT803X_PSSR_MR_AN_COMPLETE)) {
+ pr_warn("803x_aneg_done: SGMII link is not ok\n");
+ aneg_done = 0;
+ }
+ /* switch back to copper page */
+ phy_write(phydev, AT803X_REG_CHIP_CONFIG, ccr | AT803X_BT_BX_REG_SEL);
+
+ return aneg_done;
+}
+
static struct phy_driver at803x_driver[] = {
{
/* ATHEROS 8035 */
@@ -432,6 +444,7 @@ static struct phy_driver at803x_driver[] = {
.flags = PHY_HAS_INTERRUPT,
.config_aneg = genphy_config_aneg,
.read_status = genphy_read_status,
+ .aneg_done = at803x_aneg_done,
.ack_interrupt = &at803x_ack_interrupt,
.config_intr = &at803x_config_intr,
} };
diff --git a/drivers/net/phy/dp83848.c b/drivers/net/phy/dp83848.c
index 03d54c4adc88..800b39f06279 100644
--- a/drivers/net/phy/dp83848.c
+++ b/drivers/net/phy/dp83848.c
@@ -19,6 +19,7 @@
#define TI_DP83848C_PHY_ID 0x20005ca0
#define NS_DP83848C_PHY_ID 0x20005c90
#define TLK10X_PHY_ID 0x2000a210
+#define TI_DP83822_PHY_ID 0x2000a240
/* Registers */
#define DP83848_MICR 0x11 /* MII Interrupt Control Register */
@@ -77,6 +78,7 @@ static struct mdio_device_id __maybe_unused dp83848_tbl[] = {
{ TI_DP83848C_PHY_ID, 0xfffffff0 },
{ NS_DP83848C_PHY_ID, 0xfffffff0 },
{ TLK10X_PHY_ID, 0xfffffff0 },
+ { TI_DP83822_PHY_ID, 0xfffffff0 },
{ }
};
MODULE_DEVICE_TABLE(mdio, dp83848_tbl);
@@ -105,6 +107,7 @@ static struct phy_driver dp83848_driver[] = {
DP83848_PHY_DRIVER(TI_DP83848C_PHY_ID, "TI DP83848C 10/100 Mbps PHY"),
DP83848_PHY_DRIVER(NS_DP83848C_PHY_ID, "NS DP83848C 10/100 Mbps PHY"),
DP83848_PHY_DRIVER(TLK10X_PHY_ID, "TI TLK10X 10/100 Mbps PHY"),
+ DP83848_PHY_DRIVER(TI_DP83822_PHY_ID, "TI DP83822 10/100 Mbps PHY"),
};
module_phy_driver(dp83848_driver);
diff --git a/drivers/net/usb/asix_common.c b/drivers/net/usb/asix_common.c
index f79eb12c326a..125cff57c759 100644
--- a/drivers/net/usb/asix_common.c
+++ b/drivers/net/usb/asix_common.c
@@ -433,13 +433,13 @@ int asix_mdio_read(struct net_device *netdev, int phy_id, int loc)
mutex_lock(&dev->phy_mutex);
do {
ret = asix_set_sw_mii(dev, 0);
- if (ret == -ENODEV)
+ if (ret == -ENODEV || ret == -ETIMEDOUT)
break;
usleep_range(1000, 1100);
ret = asix_read_cmd(dev, AX_CMD_STATMNGSTS_REG,
0, 0, 1, &smsr, 0);
} while (!(smsr & AX_HOST_EN) && (i++ < 30) && (ret != -ENODEV));
- if (ret == -ENODEV) {
+ if (ret == -ENODEV || ret == -ETIMEDOUT) {
mutex_unlock(&dev->phy_mutex);
return ret;
}
@@ -497,13 +497,13 @@ int asix_mdio_read_nopm(struct net_device *netdev, int phy_id, int loc)
mutex_lock(&dev->phy_mutex);
do {
ret = asix_set_sw_mii(dev, 1);
- if (ret == -ENODEV)
+ if (ret == -ENODEV || ret == -ETIMEDOUT)
break;
usleep_range(1000, 1100);
ret = asix_read_cmd(dev, AX_CMD_STATMNGSTS_REG,
0, 0, 1, &smsr, 1);
} while (!(smsr & AX_HOST_EN) && (i++ < 30) && (ret != -ENODEV));
- if (ret == -ENODEV) {
+ if (ret == -ENODEV || ret == -ETIMEDOUT) {
mutex_unlock(&dev->phy_mutex);
return ret;
}
diff --git a/drivers/net/usb/kalmia.c b/drivers/net/usb/kalmia.c
index 5662babf0583..3e37724d30ae 100644
--- a/drivers/net/usb/kalmia.c
+++ b/drivers/net/usb/kalmia.c
@@ -151,7 +151,7 @@ kalmia_bind(struct usbnet *dev, struct usb_interface *intf)
status = kalmia_init_and_get_ethernet_addr(dev, ethernet_addr);
- if (status < 0) {
+ if (status) {
usb_set_intfdata(intf, NULL);
usb_driver_release_interface(driver_of(intf), intf);
return status;
diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c
index b5554f2ebee4..ef83ae3b0a44 100644
--- a/drivers/net/vmxnet3/vmxnet3_drv.c
+++ b/drivers/net/vmxnet3/vmxnet3_drv.c
@@ -2279,6 +2279,7 @@ vmxnet3_set_mc(struct net_device *netdev)
&adapter->shared->devRead.rxFilterConf;
u8 *new_table = NULL;
dma_addr_t new_table_pa = 0;
+ bool new_table_pa_valid = false;
u32 new_mode = VMXNET3_RXM_UCAST;
if (netdev->flags & IFF_PROMISC) {
@@ -2307,13 +2308,15 @@ vmxnet3_set_mc(struct net_device *netdev)
new_table,
sz,
PCI_DMA_TODEVICE);
+ if (!dma_mapping_error(&adapter->pdev->dev,
+ new_table_pa)) {
+ new_mode |= VMXNET3_RXM_MCAST;
+ new_table_pa_valid = true;
+ rxConf->mfTablePA = cpu_to_le64(
+ new_table_pa);
+ }
}
-
- if (!dma_mapping_error(&adapter->pdev->dev,
- new_table_pa)) {
- new_mode |= VMXNET3_RXM_MCAST;
- rxConf->mfTablePA = cpu_to_le64(new_table_pa);
- } else {
+ if (!new_table_pa_valid) {
netdev_info(netdev,
"failed to copy mcast list, setting ALL_MULTI\n");
new_mode |= VMXNET3_RXM_ALL_MULTI;
@@ -2338,7 +2341,7 @@ vmxnet3_set_mc(struct net_device *netdev)
VMXNET3_CMD_UPDATE_MAC_FILTERS);
spin_unlock_irqrestore(&adapter->cmd_lock, flags);
- if (new_table_pa)
+ if (new_table_pa_valid)
dma_unmap_single(&adapter->pdev->dev, new_table_pa,
rxConf->mfTableLen, PCI_DMA_TODEVICE);
kfree(new_table);
diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c
index 85c271c70d42..820de6a9ddde 100644
--- a/drivers/net/vrf.c
+++ b/drivers/net/vrf.c
@@ -956,6 +956,7 @@ static struct sk_buff *vrf_ip6_rcv(struct net_device *vrf_dev,
if (skb->pkt_type == PACKET_LOOPBACK) {
skb->dev = vrf_dev;
skb->skb_iif = vrf_dev->ifindex;
+ IP6CB(skb)->flags |= IP6SKB_L3SLAVE;
skb->pkt_type = PACKET_HOST;
goto out;
}
@@ -996,6 +997,7 @@ static struct sk_buff *vrf_ip_rcv(struct net_device *vrf_dev,
{
skb->dev = vrf_dev;
skb->skb_iif = vrf_dev->ifindex;
+ IPCB(skb)->flags |= IPSKB_L3SLAVE;
/* loopback traffic; do not push through packet taps again.
* Reset pkt_type for upper layers to process skb
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index e7d16687538b..f3c2fa3ab0d5 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -583,7 +583,7 @@ static struct sk_buff **vxlan_gro_receive(struct sock *sk,
}
}
- pp = eth_gro_receive(head, skb);
+ pp = call_gro_receive(eth_gro_receive, head, skb);
flush = 0;
out:
@@ -943,17 +943,20 @@ static bool vxlan_snoop(struct net_device *dev,
static bool vxlan_group_used(struct vxlan_net *vn, struct vxlan_dev *dev)
{
struct vxlan_dev *vxlan;
+ struct vxlan_sock *sock4;
+ struct vxlan_sock *sock6 = NULL;
unsigned short family = dev->default_dst.remote_ip.sa.sa_family;
+ sock4 = rtnl_dereference(dev->vn4_sock);
+
/* The vxlan_sock is only used by dev, leaving group has
* no effect on other vxlan devices.
*/
- if (family == AF_INET && dev->vn4_sock &&
- atomic_read(&dev->vn4_sock->refcnt) == 1)
+ if (family == AF_INET && sock4 && atomic_read(&sock4->refcnt) == 1)
return false;
#if IS_ENABLED(CONFIG_IPV6)
- if (family == AF_INET6 && dev->vn6_sock &&
- atomic_read(&dev->vn6_sock->refcnt) == 1)
+ sock6 = rtnl_dereference(dev->vn6_sock);
+ if (family == AF_INET6 && sock6 && atomic_read(&sock6->refcnt) == 1)
return false;
#endif
@@ -961,10 +964,12 @@ static bool vxlan_group_used(struct vxlan_net *vn, struct vxlan_dev *dev)
if (!netif_running(vxlan->dev) || vxlan == dev)
continue;
- if (family == AF_INET && vxlan->vn4_sock != dev->vn4_sock)
+ if (family == AF_INET &&
+ rtnl_dereference(vxlan->vn4_sock) != sock4)
continue;
#if IS_ENABLED(CONFIG_IPV6)
- if (family == AF_INET6 && vxlan->vn6_sock != dev->vn6_sock)
+ if (family == AF_INET6 &&
+ rtnl_dereference(vxlan->vn6_sock) != sock6)
continue;
#endif
@@ -1005,22 +1010,25 @@ static bool __vxlan_sock_release_prep(struct vxlan_sock *vs)
static void vxlan_sock_release(struct vxlan_dev *vxlan)
{
- bool ipv4 = __vxlan_sock_release_prep(vxlan->vn4_sock);
+ struct vxlan_sock *sock4 = rtnl_dereference(vxlan->vn4_sock);
#if IS_ENABLED(CONFIG_IPV6)
- bool ipv6 = __vxlan_sock_release_prep(vxlan->vn6_sock);
+ struct vxlan_sock *sock6 = rtnl_dereference(vxlan->vn6_sock);
+
+ rcu_assign_pointer(vxlan->vn6_sock, NULL);
#endif
+ rcu_assign_pointer(vxlan->vn4_sock, NULL);
synchronize_net();
- if (ipv4) {
- udp_tunnel_sock_release(vxlan->vn4_sock->sock);
- kfree(vxlan->vn4_sock);
+ if (__vxlan_sock_release_prep(sock4)) {
+ udp_tunnel_sock_release(sock4->sock);
+ kfree(sock4);
}
#if IS_ENABLED(CONFIG_IPV6)
- if (ipv6) {
- udp_tunnel_sock_release(vxlan->vn6_sock->sock);
- kfree(vxlan->vn6_sock);
+ if (__vxlan_sock_release_prep(sock6)) {
+ udp_tunnel_sock_release(sock6->sock);
+ kfree(sock6);
}
#endif
}
@@ -1036,18 +1044,21 @@ static int vxlan_igmp_join(struct vxlan_dev *vxlan)
int ret = -EINVAL;
if (ip->sa.sa_family == AF_INET) {
+ struct vxlan_sock *sock4 = rtnl_dereference(vxlan->vn4_sock);
struct ip_mreqn mreq = {
.imr_multiaddr.s_addr = ip->sin.sin_addr.s_addr,
.imr_ifindex = ifindex,
};
- sk = vxlan->vn4_sock->sock->sk;
+ sk = sock4->sock->sk;
lock_sock(sk);
ret = ip_mc_join_group(sk, &mreq);
release_sock(sk);
#if IS_ENABLED(CONFIG_IPV6)
} else {
- sk = vxlan->vn6_sock->sock->sk;
+ struct vxlan_sock *sock6 = rtnl_dereference(vxlan->vn6_sock);
+
+ sk = sock6->sock->sk;
lock_sock(sk);
ret = ipv6_stub->ipv6_sock_mc_join(sk, ifindex,
&ip->sin6.sin6_addr);
@@ -1067,18 +1078,21 @@ static int vxlan_igmp_leave(struct vxlan_dev *vxlan)
int ret = -EINVAL;
if (ip->sa.sa_family == AF_INET) {
+ struct vxlan_sock *sock4 = rtnl_dereference(vxlan->vn4_sock);
struct ip_mreqn mreq = {
.imr_multiaddr.s_addr = ip->sin.sin_addr.s_addr,
.imr_ifindex = ifindex,
};
- sk = vxlan->vn4_sock->sock->sk;
+ sk = sock4->sock->sk;
lock_sock(sk);
ret = ip_mc_leave_group(sk, &mreq);
release_sock(sk);
#if IS_ENABLED(CONFIG_IPV6)
} else {
- sk = vxlan->vn6_sock->sock->sk;
+ struct vxlan_sock *sock6 = rtnl_dereference(vxlan->vn6_sock);
+
+ sk = sock6->sock->sk;
lock_sock(sk);
ret = ipv6_stub->ipv6_sock_mc_drop(sk, ifindex,
&ip->sin6.sin6_addr);
@@ -1828,11 +1842,15 @@ static struct dst_entry *vxlan6_get_route(struct vxlan_dev *vxlan,
struct dst_cache *dst_cache,
const struct ip_tunnel_info *info)
{
+ struct vxlan_sock *sock6 = rcu_dereference(vxlan->vn6_sock);
bool use_cache = ip_tunnel_dst_cache_usable(skb, info);
struct dst_entry *ndst;
struct flowi6 fl6;
int err;
+ if (!sock6)
+ return ERR_PTR(-EIO);
+
if (tos && !info)
use_cache = false;
if (use_cache) {
@@ -1850,7 +1868,7 @@ static struct dst_entry *vxlan6_get_route(struct vxlan_dev *vxlan,
fl6.flowi6_proto = IPPROTO_UDP;
err = ipv6_stub->ipv6_dst_lookup(vxlan->net,
- vxlan->vn6_sock->sock->sk,
+ sock6->sock->sk,
&ndst, &fl6);
if (err < 0)
return ERR_PTR(err);
@@ -1995,9 +2013,11 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
}
if (dst->sa.sa_family == AF_INET) {
- if (!vxlan->vn4_sock)
+ struct vxlan_sock *sock4 = rcu_dereference(vxlan->vn4_sock);
+
+ if (!sock4)
goto drop;
- sk = vxlan->vn4_sock->sock->sk;
+ sk = sock4->sock->sk;
rt = vxlan_get_route(vxlan, skb,
rdst ? rdst->remote_ifindex : 0, tos,
@@ -2050,12 +2070,13 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
src_port, dst_port, xnet, !udp_sum);
#if IS_ENABLED(CONFIG_IPV6)
} else {
+ struct vxlan_sock *sock6 = rcu_dereference(vxlan->vn6_sock);
struct dst_entry *ndst;
u32 rt6i_flags;
- if (!vxlan->vn6_sock)
+ if (!sock6)
goto drop;
- sk = vxlan->vn6_sock->sock->sk;
+ sk = sock6->sock->sk;
ndst = vxlan6_get_route(vxlan, skb,
rdst ? rdst->remote_ifindex : 0, tos,
@@ -2415,9 +2436,10 @@ static int vxlan_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb)
dport = info->key.tp_dst ? : vxlan->cfg.dst_port;
if (ip_tunnel_info_af(info) == AF_INET) {
+ struct vxlan_sock *sock4 = rcu_dereference(vxlan->vn4_sock);
struct rtable *rt;
- if (!vxlan->vn4_sock)
+ if (!sock4)
return -EINVAL;
rt = vxlan_get_route(vxlan, skb, 0, info->key.tos,
info->key.u.ipv4.dst,
@@ -2429,8 +2451,6 @@ static int vxlan_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb)
#if IS_ENABLED(CONFIG_IPV6)
struct dst_entry *ndst;
- if (!vxlan->vn6_sock)
- return -EINVAL;
ndst = vxlan6_get_route(vxlan, skb, 0, info->key.tos,
info->key.label, &info->key.u.ipv6.dst,
&info->key.u.ipv6.src, NULL, info);
@@ -2740,10 +2760,10 @@ static int __vxlan_sock_add(struct vxlan_dev *vxlan, bool ipv6)
return PTR_ERR(vs);
#if IS_ENABLED(CONFIG_IPV6)
if (ipv6)
- vxlan->vn6_sock = vs;
+ rcu_assign_pointer(vxlan->vn6_sock, vs);
else
#endif
- vxlan->vn4_sock = vs;
+ rcu_assign_pointer(vxlan->vn4_sock, vs);
vxlan_vs_add_dev(vs, vxlan);
return 0;
}
@@ -2754,9 +2774,9 @@ static int vxlan_sock_add(struct vxlan_dev *vxlan)
bool metadata = vxlan->flags & VXLAN_F_COLLECT_METADATA;
int ret = 0;
- vxlan->vn4_sock = NULL;
+ RCU_INIT_POINTER(vxlan->vn4_sock, NULL);
#if IS_ENABLED(CONFIG_IPV6)
- vxlan->vn6_sock = NULL;
+ RCU_INIT_POINTER(vxlan->vn6_sock, NULL);
if (ipv6 || metadata)
ret = __vxlan_sock_add(vxlan, true);
#endif
diff --git a/drivers/net/wan/Kconfig b/drivers/net/wan/Kconfig
index 33ab3345d333..4e9fe75d7067 100644
--- a/drivers/net/wan/Kconfig
+++ b/drivers/net/wan/Kconfig
@@ -294,7 +294,7 @@ config FSL_UCC_HDLC
config SLIC_DS26522
tristate "Slic Maxim ds26522 card support"
depends on SPI
- depends on FSL_SOC || ARCH_MXC || ARCH_LAYERSCAPE
+ depends on FSL_SOC || ARCH_MXC || ARCH_LAYERSCAPE || COMPILE_TEST
help
This module initializes and configures the slic maxim card
in T1 or E1 mode.
diff --git a/drivers/net/wan/slic_ds26522.c b/drivers/net/wan/slic_ds26522.c
index d06a887a2352..b776a0ab106c 100644
--- a/drivers/net/wan/slic_ds26522.c
+++ b/drivers/net/wan/slic_ds26522.c
@@ -223,12 +223,19 @@ static int slic_ds26522_probe(struct spi_device *spi)
return ret;
}
+static const struct spi_device_id slic_ds26522_id[] = {
+ { .name = "ds26522" },
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(spi, slic_ds26522_id);
+
static const struct of_device_id slic_ds26522_match[] = {
{
.compatible = "maxim,ds26522",
},
{},
};
+MODULE_DEVICE_TABLE(of, slic_ds26522_match);
static struct spi_driver slic_ds26522_driver = {
.driver = {
@@ -239,6 +246,7 @@ static struct spi_driver slic_ds26522_driver = {
},
.probe = slic_ds26522_probe,
.remove = slic_ds26522_remove,
+ .id_table = slic_ds26522_id,
};
static int __init slic_ds26522_init(void)
diff --git a/drivers/net/wireless/ath/ath10k/core.h b/drivers/net/wireless/ath/ath10k/core.h
index dda49af1eb74..521f1c55c19e 100644
--- a/drivers/net/wireless/ath/ath10k/core.h
+++ b/drivers/net/wireless/ath/ath10k/core.h
@@ -450,6 +450,7 @@ struct ath10k_debug {
u32 pktlog_filter;
u32 reg_addr;
u32 nf_cal_period;
+ void *cal_data;
struct ath10k_fw_crash_data *fw_crash_data;
};
diff --git a/drivers/net/wireless/ath/ath10k/debug.c b/drivers/net/wireless/ath/ath10k/debug.c
index 832da6ed9f13..82a4c67f3672 100644
--- a/drivers/net/wireless/ath/ath10k/debug.c
+++ b/drivers/net/wireless/ath/ath10k/debug.c
@@ -30,6 +30,8 @@
/* ms */
#define ATH10K_DEBUG_HTT_STATS_INTERVAL 1000
+#define ATH10K_DEBUG_CAL_DATA_LEN 12064
+
#define ATH10K_FW_CRASH_DUMP_VERSION 1
/**
@@ -1451,56 +1453,51 @@ static const struct file_operations fops_fw_dbglog = {
.llseek = default_llseek,
};
-static int ath10k_debug_cal_data_open(struct inode *inode, struct file *file)
+static int ath10k_debug_cal_data_fetch(struct ath10k *ar)
{
- struct ath10k *ar = inode->i_private;
- void *buf;
u32 hi_addr;
__le32 addr;
int ret;
- mutex_lock(&ar->conf_mutex);
-
- if (ar->state != ATH10K_STATE_ON &&
- ar->state != ATH10K_STATE_UTF) {
- ret = -ENETDOWN;
- goto err;
- }
+ lockdep_assert_held(&ar->conf_mutex);
- buf = vmalloc(ar->hw_params.cal_data_len);
- if (!buf) {
- ret = -ENOMEM;
- goto err;
- }
+ if (WARN_ON(ar->hw_params.cal_data_len > ATH10K_DEBUG_CAL_DATA_LEN))
+ return -EINVAL;
hi_addr = host_interest_item_address(HI_ITEM(hi_board_data));
ret = ath10k_hif_diag_read(ar, hi_addr, &addr, sizeof(addr));
if (ret) {
- ath10k_warn(ar, "failed to read hi_board_data address: %d\n", ret);
- goto err_vfree;
+ ath10k_warn(ar, "failed to read hi_board_data address: %d\n",
+ ret);
+ return ret;
}
- ret = ath10k_hif_diag_read(ar, le32_to_cpu(addr), buf,
+ ret = ath10k_hif_diag_read(ar, le32_to_cpu(addr), ar->debug.cal_data,
ar->hw_params.cal_data_len);
if (ret) {
ath10k_warn(ar, "failed to read calibration data: %d\n", ret);
- goto err_vfree;
+ return ret;
}
- file->private_data = buf;
+ return 0;
+}
- mutex_unlock(&ar->conf_mutex);
+static int ath10k_debug_cal_data_open(struct inode *inode, struct file *file)
+{
+ struct ath10k *ar = inode->i_private;
- return 0;
+ mutex_lock(&ar->conf_mutex);
-err_vfree:
- vfree(buf);
+ if (ar->state == ATH10K_STATE_ON ||
+ ar->state == ATH10K_STATE_UTF) {
+ ath10k_debug_cal_data_fetch(ar);
+ }
-err:
+ file->private_data = ar;
mutex_unlock(&ar->conf_mutex);
- return ret;
+ return 0;
}
static ssize_t ath10k_debug_cal_data_read(struct file *file,
@@ -1508,18 +1505,16 @@ static ssize_t ath10k_debug_cal_data_read(struct file *file,
size_t count, loff_t *ppos)
{
struct ath10k *ar = file->private_data;
- void *buf = file->private_data;
- return simple_read_from_buffer(user_buf, count, ppos,
- buf, ar->hw_params.cal_data_len);
-}
+ mutex_lock(&ar->conf_mutex);
-static int ath10k_debug_cal_data_release(struct inode *inode,
- struct file *file)
-{
- vfree(file->private_data);
+ count = simple_read_from_buffer(user_buf, count, ppos,
+ ar->debug.cal_data,
+ ar->hw_params.cal_data_len);
- return 0;
+ mutex_unlock(&ar->conf_mutex);
+
+ return count;
}
static ssize_t ath10k_write_ani_enable(struct file *file,
@@ -1580,7 +1575,6 @@ static const struct file_operations fops_ani_enable = {
static const struct file_operations fops_cal_data = {
.open = ath10k_debug_cal_data_open,
.read = ath10k_debug_cal_data_read,
- .release = ath10k_debug_cal_data_release,
.owner = THIS_MODULE,
.llseek = default_llseek,
};
@@ -1932,6 +1926,8 @@ void ath10k_debug_stop(struct ath10k *ar)
{
lockdep_assert_held(&ar->conf_mutex);
+ ath10k_debug_cal_data_fetch(ar);
+
/* Must not use _sync to avoid deadlock, we do that in
* ath10k_debug_destroy(). The check for htt_stats_mask is to avoid
* warning from del_timer(). */
@@ -2344,6 +2340,10 @@ int ath10k_debug_create(struct ath10k *ar)
if (!ar->debug.fw_crash_data)
return -ENOMEM;
+ ar->debug.cal_data = vzalloc(ATH10K_DEBUG_CAL_DATA_LEN);
+ if (!ar->debug.cal_data)
+ return -ENOMEM;
+
INIT_LIST_HEAD(&ar->debug.fw_stats.pdevs);
INIT_LIST_HEAD(&ar->debug.fw_stats.vdevs);
INIT_LIST_HEAD(&ar->debug.fw_stats.peers);
@@ -2357,6 +2357,9 @@ void ath10k_debug_destroy(struct ath10k *ar)
vfree(ar->debug.fw_crash_data);
ar->debug.fw_crash_data = NULL;
+ vfree(ar->debug.cal_data);
+ ar->debug.cal_data = NULL;
+
ath10k_debug_fw_stats_reset(ar);
kfree(ar->debug.tpc_stats);
diff --git a/drivers/net/wireless/ath/ath6kl/sdio.c b/drivers/net/wireless/ath/ath6kl/sdio.c
index eab0ab976af2..76eb33679d4b 100644
--- a/drivers/net/wireless/ath/ath6kl/sdio.c
+++ b/drivers/net/wireless/ath/ath6kl/sdio.c
@@ -1401,6 +1401,7 @@ static const struct sdio_device_id ath6kl_sdio_devices[] = {
{SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_AR6004_BASE | 0x0))},
{SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_AR6004_BASE | 0x1))},
{SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_AR6004_BASE | 0x2))},
+ {SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_AR6004_BASE | 0x18))},
{},
};
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_calib.c b/drivers/net/wireless/ath/ath9k/ar9003_calib.c
index b6f064a8d264..7e27a06e5df1 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_calib.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_calib.c
@@ -33,7 +33,6 @@ struct coeff {
enum ar9003_cal_types {
IQ_MISMATCH_CAL = BIT(0),
- TEMP_COMP_CAL = BIT(1),
};
static void ar9003_hw_setup_calibration(struct ath_hw *ah,
@@ -59,12 +58,6 @@ static void ar9003_hw_setup_calibration(struct ath_hw *ah,
/* Kick-off cal */
REG_SET_BIT(ah, AR_PHY_TIMING4, AR_PHY_TIMING4_DO_CAL);
break;
- case TEMP_COMP_CAL:
- ath_dbg(common, CALIBRATE,
- "starting Temperature Compensation Calibration\n");
- REG_SET_BIT(ah, AR_CH0_THERM, AR_CH0_THERM_LOCAL);
- REG_SET_BIT(ah, AR_CH0_THERM, AR_CH0_THERM_START);
- break;
default:
ath_err(common, "Invalid calibration type\n");
break;
@@ -93,8 +86,7 @@ static bool ar9003_hw_per_calibration(struct ath_hw *ah,
/*
* Accumulate cal measures for active chains
*/
- if (cur_caldata->calCollect)
- cur_caldata->calCollect(ah);
+ cur_caldata->calCollect(ah);
ah->cal_samples++;
if (ah->cal_samples >= cur_caldata->calNumSamples) {
@@ -107,8 +99,7 @@ static bool ar9003_hw_per_calibration(struct ath_hw *ah,
/*
* Process accumulated data
*/
- if (cur_caldata->calPostProc)
- cur_caldata->calPostProc(ah, numChains);
+ cur_caldata->calPostProc(ah, numChains);
/* Calibration has finished. */
caldata->CalValid |= cur_caldata->calType;
@@ -323,16 +314,9 @@ static const struct ath9k_percal_data iq_cal_single_sample = {
ar9003_hw_iqcalibrate
};
-static const struct ath9k_percal_data temp_cal_single_sample = {
- TEMP_COMP_CAL,
- MIN_CAL_SAMPLES,
- PER_MAX_LOG_COUNT,
-};
-
static void ar9003_hw_init_cal_settings(struct ath_hw *ah)
{
ah->iq_caldata.calData = &iq_cal_single_sample;
- ah->temp_caldata.calData = &temp_cal_single_sample;
if (AR_SREV_9300_20_OR_LATER(ah)) {
ah->enabled_cals |= TX_IQ_CAL;
@@ -340,7 +324,7 @@ static void ar9003_hw_init_cal_settings(struct ath_hw *ah)
ah->enabled_cals |= TX_IQ_ON_AGC_CAL;
}
- ah->supp_cals = IQ_MISMATCH_CAL | TEMP_COMP_CAL;
+ ah->supp_cals = IQ_MISMATCH_CAL;
}
#define OFF_UPPER_LT 24
@@ -1399,9 +1383,6 @@ static void ar9003_hw_init_cal_common(struct ath_hw *ah)
INIT_CAL(&ah->iq_caldata);
INSERT_CAL(ah, &ah->iq_caldata);
- INIT_CAL(&ah->temp_caldata);
- INSERT_CAL(ah, &ah->temp_caldata);
-
/* Initialize current pointer to first element in list */
ah->cal_list_curr = ah->cal_list;
diff --git a/drivers/net/wireless/ath/ath9k/hw.h b/drivers/net/wireless/ath/ath9k/hw.h
index 2a5d3ad1169c..9cbca1229bac 100644
--- a/drivers/net/wireless/ath/ath9k/hw.h
+++ b/drivers/net/wireless/ath/ath9k/hw.h
@@ -830,7 +830,6 @@ struct ath_hw {
/* Calibration */
u32 supp_cals;
struct ath9k_cal_list iq_caldata;
- struct ath9k_cal_list temp_caldata;
struct ath9k_cal_list adcgain_caldata;
struct ath9k_cal_list adcdc_caldata;
struct ath9k_cal_list *cal_list;
diff --git a/drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c b/drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c
index 94480123efa3..274dd5a1574a 100644
--- a/drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c
+++ b/drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c
@@ -45,7 +45,7 @@ static int mwifiex_11n_dispatch_amsdu_pkt(struct mwifiex_private *priv,
skb_trim(skb, le16_to_cpu(local_rx_pd->rx_pkt_length));
ieee80211_amsdu_to_8023s(skb, &list, priv->curr_addr,
- priv->wdev.iftype, 0, false);
+ priv->wdev.iftype, 0, NULL, NULL);
while (!skb_queue_empty(&list)) {
struct rx_packet_hdr *rx_hdr;
diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h
index 1016628926d2..08d587a342d3 100644
--- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h
+++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h
@@ -238,7 +238,7 @@ struct rtl8xxxu_rxdesc16 {
u32 pattern1match:1;
u32 pattern0match:1;
#endif
- __le32 tsfl;
+ u32 tsfl;
#if 0
u32 bassn:12;
u32 bavld:1;
@@ -368,7 +368,7 @@ struct rtl8xxxu_rxdesc24 {
u32 ldcp:1;
u32 splcp:1;
#endif
- __le32 tsfl;
+ u32 tsfl;
};
struct rtl8xxxu_txdesc32 {
diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192e.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192e.c
index df54d27e7851..a793fedc3654 100644
--- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192e.c
+++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192e.c
@@ -1461,7 +1461,9 @@ static int rtl8192eu_active_to_emu(struct rtl8xxxu_priv *priv)
int count, ret = 0;
/* Turn off RF */
- rtl8xxxu_write8(priv, REG_RF_CTRL, 0);
+ val8 = rtl8xxxu_read8(priv, REG_RF_CTRL);
+ val8 &= ~RF_ENABLE;
+ rtl8xxxu_write8(priv, REG_RF_CTRL, val8);
/* Switch DPDT_SEL_P output from register 0x65[2] */
val8 = rtl8xxxu_read8(priv, REG_LEDCFG2);
@@ -1593,6 +1595,10 @@ static void rtl8192e_enable_rf(struct rtl8xxxu_priv *priv)
u32 val32;
u8 val8;
+ val32 = rtl8xxxu_read32(priv, REG_RX_WAIT_CCA);
+ val32 |= (BIT(22) | BIT(23));
+ rtl8xxxu_write32(priv, REG_RX_WAIT_CCA, val32);
+
val8 = rtl8xxxu_read8(priv, REG_GPIO_MUXCFG);
val8 |= BIT(5);
rtl8xxxu_write8(priv, REG_GPIO_MUXCFG, val8);
diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8723b.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8723b.c
index 6c086b5657e9..02b8ddd98a95 100644
--- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8723b.c
+++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8723b.c
@@ -1498,6 +1498,10 @@ static void rtl8723b_enable_rf(struct rtl8xxxu_priv *priv)
u32 val32;
u8 val8;
+ val32 = rtl8xxxu_read32(priv, REG_RX_WAIT_CCA);
+ val32 |= (BIT(22) | BIT(23));
+ rtl8xxxu_write32(priv, REG_RX_WAIT_CCA, val32);
+
/*
* No indication anywhere as to what 0x0790 does. The 2 antenna
* vendor code preserves bits 6-7 here.
diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c
index b2d7f6e69667..a5e6ec2152bf 100644
--- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c
+++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c
@@ -5197,7 +5197,12 @@ int rtl8xxxu_parse_rxdesc16(struct rtl8xxxu_priv *priv, struct sk_buff *skb)
pkt_offset = roundup(pkt_len + drvinfo_sz + desc_shift +
sizeof(struct rtl8xxxu_rxdesc16), 128);
- if (pkt_cnt > 1)
+ /*
+ * Only clone the skb if there's enough data at the end to
+ * at least cover the rx descriptor
+ */
+ if (pkt_cnt > 1 &&
+ urb_len > (pkt_offset + sizeof(struct rtl8xxxu_rxdesc16)))
next_skb = skb_clone(skb, GFP_ATOMIC);
rx_status = IEEE80211_SKB_RXCB(skb);
@@ -5215,7 +5220,7 @@ int rtl8xxxu_parse_rxdesc16(struct rtl8xxxu_priv *priv, struct sk_buff *skb)
rtl8xxxu_rx_parse_phystats(priv, rx_status, phy_stats,
rx_desc->rxmcs);
- rx_status->mactime = le32_to_cpu(rx_desc->tsfl);
+ rx_status->mactime = rx_desc->tsfl;
rx_status->flag |= RX_FLAG_MACTIME_START;
if (!rx_desc->swdec)
@@ -5285,7 +5290,7 @@ int rtl8xxxu_parse_rxdesc24(struct rtl8xxxu_priv *priv, struct sk_buff *skb)
rtl8xxxu_rx_parse_phystats(priv, rx_status, phy_stats,
rx_desc->rxmcs);
- rx_status->mactime = le32_to_cpu(rx_desc->tsfl);
+ rx_status->mactime = rx_desc->tsfl;
rx_status->flag |= RX_FLAG_MACTIME_START;
if (!rx_desc->swdec)
diff --git a/drivers/net/wireless/realtek/rtlwifi/core.c b/drivers/net/wireless/realtek/rtlwifi/core.c
index f95760c13c56..8e7f23c11680 100644
--- a/drivers/net/wireless/realtek/rtlwifi/core.c
+++ b/drivers/net/wireless/realtek/rtlwifi/core.c
@@ -111,7 +111,7 @@ static void rtl_fw_do_work(const struct firmware *firmware, void *context,
if (!err)
goto found_alt;
}
- pr_err("Firmware %s not available\n", rtlpriv->cfg->fw_name);
+ pr_err("Selected firmware is not available\n");
rtlpriv->max_fw_size = 0;
return;
}
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/sw.c
index e7b11b40e68d..f361808def47 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/sw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/sw.c
@@ -86,6 +86,7 @@ int rtl88e_init_sw_vars(struct ieee80211_hw *hw)
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
u8 tid;
+ char *fw_name;
rtl8188ee_bt_reg_init(hw);
rtlpriv->dm.dm_initialgain_enable = 1;
@@ -169,10 +170,10 @@ int rtl88e_init_sw_vars(struct ieee80211_hw *hw)
return 1;
}
- rtlpriv->cfg->fw_name = "rtlwifi/rtl8188efw.bin";
+ fw_name = "rtlwifi/rtl8188efw.bin";
rtlpriv->max_fw_size = 0x8000;
- pr_info("Using firmware %s\n", rtlpriv->cfg->fw_name);
- err = request_firmware_nowait(THIS_MODULE, 1, rtlpriv->cfg->fw_name,
+ pr_info("Using firmware %s\n", fw_name);
+ err = request_firmware_nowait(THIS_MODULE, 1, fw_name,
rtlpriv->io.dev, GFP_KERNEL, hw,
rtl_fw_cb);
if (err) {
@@ -284,7 +285,6 @@ static const struct rtl_hal_cfg rtl88ee_hal_cfg = {
.bar_id = 2,
.write_readback = true,
.name = "rtl88e_pci",
- .fw_name = "rtlwifi/rtl8188efw.bin",
.ops = &rtl8188ee_hal_ops,
.mod_params = &rtl88ee_mod_params,
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/sw.c
index 87aa209ae325..8b6e37ce3f66 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/sw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/sw.c
@@ -96,6 +96,7 @@ int rtl92c_init_sw_vars(struct ieee80211_hw *hw)
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+ char *fw_name = "rtlwifi/rtl8192cfwU.bin";
rtl8192ce_bt_reg_init(hw);
@@ -167,15 +168,12 @@ int rtl92c_init_sw_vars(struct ieee80211_hw *hw)
}
/* request fw */
- if (IS_VENDOR_UMC_A_CUT(rtlhal->version) &&
- !IS_92C_SERIAL(rtlhal->version))
- rtlpriv->cfg->fw_name = "rtlwifi/rtl8192cfwU.bin";
- else if (IS_81XXC_VENDOR_UMC_B_CUT(rtlhal->version))
- rtlpriv->cfg->fw_name = "rtlwifi/rtl8192cfwU_B.bin";
+ if (IS_81XXC_VENDOR_UMC_B_CUT(rtlhal->version))
+ fw_name = "rtlwifi/rtl8192cfwU_B.bin";
rtlpriv->max_fw_size = 0x4000;
- pr_info("Using firmware %s\n", rtlpriv->cfg->fw_name);
- err = request_firmware_nowait(THIS_MODULE, 1, rtlpriv->cfg->fw_name,
+ pr_info("Using firmware %s\n", fw_name);
+ err = request_firmware_nowait(THIS_MODULE, 1, fw_name,
rtlpriv->io.dev, GFP_KERNEL, hw,
rtl_fw_cb);
if (err) {
@@ -262,7 +260,6 @@ static const struct rtl_hal_cfg rtl92ce_hal_cfg = {
.bar_id = 2,
.write_readback = true,
.name = "rtl92c_pci",
- .fw_name = "rtlwifi/rtl8192cfw.bin",
.ops = &rtl8192ce_hal_ops,
.mod_params = &rtl92ce_mod_params,
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/sw.c
index 7c6f7f0d18c6..f953320f0e23 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/sw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/sw.c
@@ -59,6 +59,7 @@ static int rtl92cu_init_sw_vars(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
int err;
+ char *fw_name;
rtlpriv->dm.dm_initialgain_enable = true;
rtlpriv->dm.dm_flag = 0;
@@ -77,18 +78,18 @@ static int rtl92cu_init_sw_vars(struct ieee80211_hw *hw)
}
if (IS_VENDOR_UMC_A_CUT(rtlpriv->rtlhal.version) &&
!IS_92C_SERIAL(rtlpriv->rtlhal.version)) {
- rtlpriv->cfg->fw_name = "rtlwifi/rtl8192cufw_A.bin";
+ fw_name = "rtlwifi/rtl8192cufw_A.bin";
} else if (IS_81XXC_VENDOR_UMC_B_CUT(rtlpriv->rtlhal.version)) {
- rtlpriv->cfg->fw_name = "rtlwifi/rtl8192cufw_B.bin";
+ fw_name = "rtlwifi/rtl8192cufw_B.bin";
} else {
- rtlpriv->cfg->fw_name = "rtlwifi/rtl8192cufw_TMSC.bin";
+ fw_name = "rtlwifi/rtl8192cufw_TMSC.bin";
}
/* provide name of alternative file */
rtlpriv->cfg->alt_fw_name = "rtlwifi/rtl8192cufw.bin";
- pr_info("Loading firmware %s\n", rtlpriv->cfg->fw_name);
+ pr_info("Loading firmware %s\n", fw_name);
rtlpriv->max_fw_size = 0x4000;
err = request_firmware_nowait(THIS_MODULE, 1,
- rtlpriv->cfg->fw_name, rtlpriv->io.dev,
+ fw_name, rtlpriv->io.dev,
GFP_KERNEL, hw, rtl_fw_cb);
return err;
}
@@ -187,7 +188,6 @@ static struct rtl_hal_usbint_cfg rtl92cu_interface_cfg = {
static struct rtl_hal_cfg rtl92cu_hal_cfg = {
.name = "rtl92c_usb",
- .fw_name = "rtlwifi/rtl8192cufw.bin",
.ops = &rtl8192cu_hal_ops,
.mod_params = &rtl92cu_mod_params,
.usb_interface_cfg = &rtl92cu_interface_cfg,
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/sw.c
index 0538a4d09568..1ebfee18882f 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/sw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/sw.c
@@ -92,6 +92,7 @@ static int rtl92d_init_sw_vars(struct ieee80211_hw *hw)
u8 tid;
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
+ char *fw_name = "rtlwifi/rtl8192defw.bin";
rtlpriv->dm.dm_initialgain_enable = true;
rtlpriv->dm.dm_flag = 0;
@@ -181,10 +182,10 @@ static int rtl92d_init_sw_vars(struct ieee80211_hw *hw)
rtlpriv->max_fw_size = 0x8000;
pr_info("Driver for Realtek RTL8192DE WLAN interface\n");
- pr_info("Loading firmware file %s\n", rtlpriv->cfg->fw_name);
+ pr_info("Loading firmware file %s\n", fw_name);
/* request fw */
- err = request_firmware_nowait(THIS_MODULE, 1, rtlpriv->cfg->fw_name,
+ err = request_firmware_nowait(THIS_MODULE, 1, fw_name,
rtlpriv->io.dev, GFP_KERNEL, hw,
rtl_fw_cb);
if (err) {
@@ -266,7 +267,6 @@ static const struct rtl_hal_cfg rtl92de_hal_cfg = {
.bar_id = 2,
.write_readback = true,
.name = "rtl8192de",
- .fw_name = "rtlwifi/rtl8192defw.bin",
.ops = &rtl8192de_hal_ops,
.mod_params = &rtl92de_mod_params,
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/sw.c
index ac299cbe59b0..46b605de36e7 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/sw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/sw.c
@@ -91,6 +91,7 @@ int rtl92ee_init_sw_vars(struct ieee80211_hw *hw)
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
int err = 0;
+ char *fw_name;
rtl92ee_bt_reg_init(hw);
rtlpci->msi_support = rtlpriv->cfg->mod_params->msi_support;
@@ -170,11 +171,11 @@ int rtl92ee_init_sw_vars(struct ieee80211_hw *hw)
}
/* request fw */
- rtlpriv->cfg->fw_name = "rtlwifi/rtl8192eefw.bin";
+ fw_name = "rtlwifi/rtl8192eefw.bin";
rtlpriv->max_fw_size = 0x8000;
- pr_info("Using firmware %s\n", rtlpriv->cfg->fw_name);
- err = request_firmware_nowait(THIS_MODULE, 1, rtlpriv->cfg->fw_name,
+ pr_info("Using firmware %s\n", fw_name);
+ err = request_firmware_nowait(THIS_MODULE, 1, fw_name,
rtlpriv->io.dev, GFP_KERNEL, hw,
rtl_fw_cb);
if (err) {
@@ -266,7 +267,6 @@ static const struct rtl_hal_cfg rtl92ee_hal_cfg = {
.bar_id = 2,
.write_readback = true,
.name = "rtl92ee_pci",
- .fw_name = "rtlwifi/rtl8192eefw.bin",
.ops = &rtl8192ee_hal_ops,
.mod_params = &rtl92ee_mod_params,
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/sw.c
index 5e8e02d5de8a..3e1eaeac4fdc 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/sw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/sw.c
@@ -89,12 +89,13 @@ static void rtl92se_fw_cb(const struct firmware *firmware, void *context)
struct ieee80211_hw *hw = context;
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rt_firmware *pfirmware = NULL;
+ char *fw_name = "rtlwifi/rtl8192sefw.bin";
RT_TRACE(rtlpriv, COMP_ERR, DBG_LOUD,
"Firmware callback routine entered!\n");
complete(&rtlpriv->firmware_loading_complete);
if (!firmware) {
- pr_err("Firmware %s not available\n", rtlpriv->cfg->fw_name);
+ pr_err("Firmware %s not available\n", fw_name);
rtlpriv->max_fw_size = 0;
return;
}
@@ -117,6 +118,7 @@ static int rtl92s_init_sw_vars(struct ieee80211_hw *hw)
struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
int err = 0;
u16 earlyrxthreshold = 7;
+ char *fw_name = "rtlwifi/rtl8192sefw.bin";
rtlpriv->dm.dm_initialgain_enable = true;
rtlpriv->dm.dm_flag = 0;
@@ -214,9 +216,9 @@ static int rtl92s_init_sw_vars(struct ieee80211_hw *hw)
rtlpriv->max_fw_size = RTL8190_MAX_FIRMWARE_CODE_SIZE*2 +
sizeof(struct fw_hdr);
pr_info("Driver for Realtek RTL8192SE/RTL8191SE\n"
- "Loading firmware %s\n", rtlpriv->cfg->fw_name);
+ "Loading firmware %s\n", fw_name);
/* request fw */
- err = request_firmware_nowait(THIS_MODULE, 1, rtlpriv->cfg->fw_name,
+ err = request_firmware_nowait(THIS_MODULE, 1, fw_name,
rtlpriv->io.dev, GFP_KERNEL, hw,
rtl92se_fw_cb);
if (err) {
@@ -310,7 +312,6 @@ static const struct rtl_hal_cfg rtl92se_hal_cfg = {
.bar_id = 1,
.write_readback = false,
.name = "rtl92s_pci",
- .fw_name = "rtlwifi/rtl8192sefw.bin",
.ops = &rtl8192se_hal_ops,
.mod_params = &rtl92se_mod_params,
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/sw.c
index 89c828ad89f4..c51a9e8234e9 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/sw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/sw.c
@@ -94,6 +94,7 @@ int rtl8723e_init_sw_vars(struct ieee80211_hw *hw)
struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
int err = 0;
+ char *fw_name = "rtlwifi/rtl8723fw.bin";
rtl8723e_bt_reg_init(hw);
@@ -176,14 +177,12 @@ int rtl8723e_init_sw_vars(struct ieee80211_hw *hw)
return 1;
}
- if (IS_VENDOR_8723_A_CUT(rtlhal->version))
- rtlpriv->cfg->fw_name = "rtlwifi/rtl8723fw.bin";
- else if (IS_81xxC_VENDOR_UMC_B_CUT(rtlhal->version))
- rtlpriv->cfg->fw_name = "rtlwifi/rtl8723fw_B.bin";
+ if (IS_81xxC_VENDOR_UMC_B_CUT(rtlhal->version))
+ fw_name = "rtlwifi/rtl8723fw_B.bin";
rtlpriv->max_fw_size = 0x6000;
- pr_info("Using firmware %s\n", rtlpriv->cfg->fw_name);
- err = request_firmware_nowait(THIS_MODULE, 1, rtlpriv->cfg->fw_name,
+ pr_info("Using firmware %s\n", fw_name);
+ err = request_firmware_nowait(THIS_MODULE, 1, fw_name,
rtlpriv->io.dev, GFP_KERNEL, hw,
rtl_fw_cb);
if (err) {
@@ -280,7 +279,6 @@ static const struct rtl_hal_cfg rtl8723e_hal_cfg = {
.bar_id = 2,
.write_readback = true,
.name = "rtl8723e_pci",
- .fw_name = "rtlwifi/rtl8723efw.bin",
.ops = &rtl8723e_hal_ops,
.mod_params = &rtl8723e_mod_params,
.maps[SYS_ISO_CTRL] = REG_SYS_ISO_CTRL,
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/sw.c
index 20b53f035483..847644d1f5f5 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/sw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/sw.c
@@ -91,6 +91,7 @@ int rtl8723be_init_sw_vars(struct ieee80211_hw *hw)
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+ char *fw_name = "rtlwifi/rtl8723befw.bin";
rtl8723be_bt_reg_init(hw);
rtlpriv->btcoexist.btc_ops = rtl_btc_get_ops_pointer();
@@ -184,8 +185,8 @@ int rtl8723be_init_sw_vars(struct ieee80211_hw *hw)
}
rtlpriv->max_fw_size = 0x8000;
- pr_info("Using firmware %s\n", rtlpriv->cfg->fw_name);
- err = request_firmware_nowait(THIS_MODULE, 1, rtlpriv->cfg->fw_name,
+ pr_info("Using firmware %s\n", fw_name);
+ err = request_firmware_nowait(THIS_MODULE, 1, fw_name,
rtlpriv->io.dev, GFP_KERNEL, hw,
rtl_fw_cb);
if (err) {
@@ -280,7 +281,6 @@ static const struct rtl_hal_cfg rtl8723be_hal_cfg = {
.bar_id = 2,
.write_readback = true,
.name = "rtl8723be_pci",
- .fw_name = "rtlwifi/rtl8723befw.bin",
.ops = &rtl8723be_hal_ops,
.mod_params = &rtl8723be_mod_params,
.maps[SYS_ISO_CTRL] = REG_SYS_ISO_CTRL,
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/sw.c
index 22f687b1f133..297938e0effd 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/sw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/sw.c
@@ -93,6 +93,7 @@ int rtl8821ae_init_sw_vars(struct ieee80211_hw *hw)
struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+ char *fw_name, *wowlan_fw_name;
rtl8821ae_bt_reg_init(hw);
rtlpriv->btcoexist.btc_ops = rtl_btc_get_ops_pointer();
@@ -203,17 +204,17 @@ int rtl8821ae_init_sw_vars(struct ieee80211_hw *hw)
}
if (rtlhal->hw_type == HARDWARE_TYPE_RTL8812AE) {
- rtlpriv->cfg->fw_name = "rtlwifi/rtl8812aefw.bin";
- rtlpriv->cfg->wowlan_fw_name = "rtlwifi/rtl8812aefw_wowlan.bin";
+ fw_name = "rtlwifi/rtl8812aefw.bin";
+ wowlan_fw_name = "rtlwifi/rtl8812aefw_wowlan.bin";
} else {
- rtlpriv->cfg->fw_name = "rtlwifi/rtl8821aefw.bin";
- rtlpriv->cfg->wowlan_fw_name = "rtlwifi/rtl8821aefw_wowlan.bin";
+ fw_name = "rtlwifi/rtl8821aefw.bin";
+ wowlan_fw_name = "rtlwifi/rtl8821aefw_wowlan.bin";
}
rtlpriv->max_fw_size = 0x8000;
/*load normal firmware*/
- pr_info("Using firmware %s\n", rtlpriv->cfg->fw_name);
- err = request_firmware_nowait(THIS_MODULE, 1, rtlpriv->cfg->fw_name,
+ pr_info("Using firmware %s\n", fw_name);
+ err = request_firmware_nowait(THIS_MODULE, 1, fw_name,
rtlpriv->io.dev, GFP_KERNEL, hw,
rtl_fw_cb);
if (err) {
@@ -222,9 +223,9 @@ int rtl8821ae_init_sw_vars(struct ieee80211_hw *hw)
return 1;
}
/*load wowlan firmware*/
- pr_info("Using firmware %s\n", rtlpriv->cfg->wowlan_fw_name);
+ pr_info("Using firmware %s\n", wowlan_fw_name);
err = request_firmware_nowait(THIS_MODULE, 1,
- rtlpriv->cfg->wowlan_fw_name,
+ wowlan_fw_name,
rtlpriv->io.dev, GFP_KERNEL, hw,
rtl_wowlan_fw_cb);
if (err) {
@@ -320,7 +321,6 @@ static const struct rtl_hal_cfg rtl8821ae_hal_cfg = {
.bar_id = 2,
.write_readback = true,
.name = "rtl8821ae_pci",
- .fw_name = "rtlwifi/rtl8821aefw.bin",
.ops = &rtl8821ae_hal_ops,
.mod_params = &rtl8821ae_mod_params,
.maps[SYS_ISO_CTRL] = REG_SYS_ISO_CTRL,
diff --git a/drivers/net/wireless/realtek/rtlwifi/wifi.h b/drivers/net/wireless/realtek/rtlwifi/wifi.h
index 595f7d5d091a..dafe486f8448 100644
--- a/drivers/net/wireless/realtek/rtlwifi/wifi.h
+++ b/drivers/net/wireless/realtek/rtlwifi/wifi.h
@@ -2278,9 +2278,7 @@ struct rtl_hal_cfg {
u8 bar_id;
bool write_readback;
char *name;
- char *fw_name;
char *alt_fw_name;
- char *wowlan_fw_name;
struct rtl_hal_ops *ops;
struct rtl_mod_params *mod_params;
struct rtl_hal_usbint_cfg *usb_interface_cfg;
diff --git a/drivers/net/wireless/ti/wlcore/sdio.c b/drivers/net/wireless/ti/wlcore/sdio.c
index a6e94b1a12cb..47fe7f96a242 100644
--- a/drivers/net/wireless/ti/wlcore/sdio.c
+++ b/drivers/net/wireless/ti/wlcore/sdio.c
@@ -391,7 +391,6 @@ static void wl1271_remove(struct sdio_func *func)
pm_runtime_get_noresume(&func->dev);
platform_device_unregister(glue->core);
- kfree(glue);
}
#ifdef CONFIG_PM
diff --git a/drivers/nvdimm/Kconfig b/drivers/nvdimm/Kconfig
index 8b2b740d6679..124c2432ac9c 100644
--- a/drivers/nvdimm/Kconfig
+++ b/drivers/nvdimm/Kconfig
@@ -89,7 +89,7 @@ config NVDIMM_PFN
Select Y if unsure
config NVDIMM_DAX
- tristate "NVDIMM DAX: Raw access to persistent memory"
+ bool "NVDIMM DAX: Raw access to persistent memory"
default LIBNVDIMM
depends on NVDIMM_PFN
help
diff --git a/drivers/nvdimm/namespace_devs.c b/drivers/nvdimm/namespace_devs.c
index 3509cff68ef9..abe5c6bc756c 100644
--- a/drivers/nvdimm/namespace_devs.c
+++ b/drivers/nvdimm/namespace_devs.c
@@ -2176,12 +2176,14 @@ static struct device **scan_labels(struct nd_region *nd_region)
return devs;
err:
- for (i = 0; devs[i]; i++)
- if (is_nd_blk(&nd_region->dev))
- namespace_blk_release(devs[i]);
- else
- namespace_pmem_release(devs[i]);
- kfree(devs);
+ if (devs) {
+ for (i = 0; devs[i]; i++)
+ if (is_nd_blk(&nd_region->dev))
+ namespace_blk_release(devs[i]);
+ else
+ namespace_pmem_release(devs[i]);
+ kfree(devs);
+ }
return NULL;
}
diff --git a/drivers/nvdimm/pmem.c b/drivers/nvdimm/pmem.c
index 42b3a8217073..24618431a14b 100644
--- a/drivers/nvdimm/pmem.c
+++ b/drivers/nvdimm/pmem.c
@@ -47,7 +47,7 @@ static struct nd_region *to_region(struct pmem_device *pmem)
return to_nd_region(to_dev(pmem)->parent);
}
-static void pmem_clear_poison(struct pmem_device *pmem, phys_addr_t offset,
+static int pmem_clear_poison(struct pmem_device *pmem, phys_addr_t offset,
unsigned int len)
{
struct device *dev = to_dev(pmem);
@@ -62,8 +62,12 @@ static void pmem_clear_poison(struct pmem_device *pmem, phys_addr_t offset,
__func__, (unsigned long long) sector,
cleared / 512, cleared / 512 > 1 ? "s" : "");
badblocks_clear(&pmem->bb, sector, cleared / 512);
+ } else {
+ return -EIO;
}
+
invalidate_pmem(pmem->virt_addr + offset, len);
+ return 0;
}
static void write_pmem(void *pmem_addr, struct page *page,
@@ -123,7 +127,7 @@ static int pmem_do_bvec(struct pmem_device *pmem, struct page *page,
flush_dcache_page(page);
write_pmem(pmem_addr, page, off, len);
if (unlikely(bad_pmem)) {
- pmem_clear_poison(pmem, pmem_off, len);
+ rc = pmem_clear_poison(pmem, pmem_off, len);
write_pmem(pmem_addr, page, off, len);
}
}
diff --git a/drivers/pci/host/pcie-designware.c b/drivers/pci/host/pcie-designware.c
index 035f50c03281..bed19994c1e9 100644
--- a/drivers/pci/host/pcie-designware.c
+++ b/drivers/pci/host/pcie-designware.c
@@ -637,8 +637,6 @@ int dw_pcie_host_init(struct pcie_port *pp)
}
}
- pp->iatu_unroll_enabled = dw_pcie_iatu_unroll_enabled(pp);
-
if (pp->ops->host_init)
pp->ops->host_init(pp);
@@ -809,6 +807,11 @@ void dw_pcie_setup_rc(struct pcie_port *pp)
{
u32 val;
+ /* get iATU unroll support */
+ pp->iatu_unroll_enabled = dw_pcie_iatu_unroll_enabled(pp);
+ dev_dbg(pp->dev, "iATU unroll: %s\n",
+ pp->iatu_unroll_enabled ? "enabled" : "disabled");
+
/* set the number of lanes */
val = dw_pcie_readl_rc(pp, PCIE_PORT_LINK_CONTROL);
val &= ~PORT_LINK_MODE_MASK;
diff --git a/drivers/pci/host/pcie-qcom.c b/drivers/pci/host/pcie-qcom.c
index ef0a84c7a588..35936409b2d4 100644
--- a/drivers/pci/host/pcie-qcom.c
+++ b/drivers/pci/host/pcie-qcom.c
@@ -533,11 +533,11 @@ static int qcom_pcie_probe(struct platform_device *pdev)
if (IS_ERR(pcie->phy))
return PTR_ERR(pcie->phy);
+ pp->dev = dev;
ret = pcie->ops->get_resources(pcie);
if (ret)
return ret;
- pp->dev = dev;
pp->root_bus_nr = -1;
pp->ops = &qcom_pcie_dw_ops;
diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c
index bfdd0744b686..ad70507cfb56 100644
--- a/drivers/pci/msi.c
+++ b/drivers/pci/msi.c
@@ -610,6 +610,7 @@ static int msi_verify_entries(struct pci_dev *dev)
* msi_capability_init - configure device's MSI capability structure
* @dev: pointer to the pci_dev data structure of MSI device function
* @nvec: number of interrupts to allocate
+ * @affinity: flag to indicate cpu irq affinity mask should be set
*
* Setup the MSI capability structure of the device with the requested
* number of interrupts. A return value of zero indicates the successful
@@ -752,6 +753,7 @@ static void msix_program_entries(struct pci_dev *dev,
* @dev: pointer to the pci_dev data structure of MSI-X device function
* @entries: pointer to an array of struct msix_entry entries
* @nvec: number of @entries
+ * @affinity: flag to indicate cpu irq affinity mask should be set
*
* Setup the MSI-X capability structure of device function with a
* single MSI-X irq. A return of zero indicates the successful setup of
diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
index 67426c0477d3..5c1519b229e0 100644
--- a/drivers/regulator/core.c
+++ b/drivers/regulator/core.c
@@ -2754,7 +2754,7 @@ static int _regulator_set_voltage_time(struct regulator_dev *rdev,
ramp_delay = rdev->desc->ramp_delay;
if (ramp_delay == 0) {
- rdev_warn(rdev, "ramp_delay not set\n");
+ rdev_dbg(rdev, "ramp_delay not set\n");
return 0;
}
diff --git a/drivers/reset/reset-uniphier.c b/drivers/reset/reset-uniphier.c
index 8b2558e7363e..968c3ae4535c 100644
--- a/drivers/reset/reset-uniphier.c
+++ b/drivers/reset/reset-uniphier.c
@@ -154,7 +154,7 @@ const struct uniphier_reset_data uniphier_sld3_mio_reset_data[] = {
UNIPHIER_RESET_END,
};
-const struct uniphier_reset_data uniphier_pro5_mio_reset_data[] = {
+const struct uniphier_reset_data uniphier_pro5_sd_reset_data[] = {
UNIPHIER_MIO_RESET_SD(0, 0),
UNIPHIER_MIO_RESET_SD(1, 1),
UNIPHIER_MIO_RESET_EMMC_HW_RESET(6, 1),
@@ -360,7 +360,7 @@ static const struct of_device_id uniphier_reset_match[] = {
.compatible = "socionext,uniphier-ld20-reset",
.data = uniphier_ld20_sys_reset_data,
},
- /* Media I/O reset */
+ /* Media I/O reset, SD reset */
{
.compatible = "socionext,uniphier-sld3-mio-reset",
.data = uniphier_sld3_mio_reset_data,
@@ -378,20 +378,20 @@ static const struct of_device_id uniphier_reset_match[] = {
.data = uniphier_sld3_mio_reset_data,
},
{
- .compatible = "socionext,uniphier-pro5-mio-reset",
- .data = uniphier_pro5_mio_reset_data,
+ .compatible = "socionext,uniphier-pro5-sd-reset",
+ .data = uniphier_pro5_sd_reset_data,
},
{
- .compatible = "socionext,uniphier-pxs2-mio-reset",
- .data = uniphier_pro5_mio_reset_data,
+ .compatible = "socionext,uniphier-pxs2-sd-reset",
+ .data = uniphier_pro5_sd_reset_data,
},
{
.compatible = "socionext,uniphier-ld11-mio-reset",
.data = uniphier_sld3_mio_reset_data,
},
{
- .compatible = "socionext,uniphier-ld20-mio-reset",
- .data = uniphier_pro5_mio_reset_data,
+ .compatible = "socionext,uniphier-ld20-sd-reset",
+ .data = uniphier_pro5_sd_reset_data,
},
/* Peripheral reset */
{
diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c
index 831935af7389..a7a88476e215 100644
--- a/drivers/s390/block/dasd_eckd.c
+++ b/drivers/s390/block/dasd_eckd.c
@@ -1205,7 +1205,7 @@ static int verify_fcx_max_data(struct dasd_device *device, __u8 lpm)
mdc, lpm);
return mdc;
}
- fcx_max_data = mdc * FCX_MAX_DATA_FACTOR;
+ fcx_max_data = (u32)mdc * FCX_MAX_DATA_FACTOR;
if (fcx_max_data < private->fcx_max_data) {
dev_warn(&device->cdev->dev,
"The maximum data size for zHPF requests %u "
@@ -1675,7 +1675,7 @@ static u32 get_fcx_max_data(struct dasd_device *device)
" data size for zHPF requests failed\n");
return 0;
} else
- return mdc * FCX_MAX_DATA_FACTOR;
+ return (u32)mdc * FCX_MAX_DATA_FACTOR;
}
/*
diff --git a/drivers/s390/cio/chp.c b/drivers/s390/cio/chp.c
index 46be25c7461e..876c7e6e3a99 100644
--- a/drivers/s390/cio/chp.c
+++ b/drivers/s390/cio/chp.c
@@ -780,7 +780,7 @@ static int cfg_wait_idle(void)
static int __init chp_init(void)
{
struct chp_id chpid;
- int ret;
+ int state, ret;
ret = crw_register_handler(CRW_RSC_CPATH, chp_process_crw);
if (ret)
@@ -791,7 +791,9 @@ static int __init chp_init(void)
return 0;
/* Register available channel-paths. */
chp_id_for_each(&chpid) {
- if (chp_info_get_status(chpid) != CHP_STATUS_NOT_RECOGNIZED)
+ state = chp_info_get_status(chpid);
+ if (state == CHP_STATUS_CONFIGURED ||
+ state == CHP_STATUS_STANDBY)
chp_new(chpid);
}
diff --git a/drivers/scsi/NCR5380.c b/drivers/scsi/NCR5380.c
index db2739079cbb..790babc5ef66 100644
--- a/drivers/scsi/NCR5380.c
+++ b/drivers/scsi/NCR5380.c
@@ -353,7 +353,7 @@ static void NCR5380_print_phase(struct Scsi_Host *instance)
#endif
-static int probe_irq __initdata;
+static int probe_irq;
/**
* probe_intr - helper for IRQ autoprobe
@@ -365,7 +365,7 @@ static int probe_irq __initdata;
* used by the IRQ probe code.
*/
-static irqreturn_t __init probe_intr(int irq, void *dev_id)
+static irqreturn_t probe_intr(int irq, void *dev_id)
{
probe_irq = irq;
return IRQ_HANDLED;
@@ -380,7 +380,7 @@ static irqreturn_t __init probe_intr(int irq, void *dev_id)
* and then looking to see what interrupt actually turned up.
*/
-static int __init __maybe_unused NCR5380_probe_irq(struct Scsi_Host *instance,
+static int __maybe_unused NCR5380_probe_irq(struct Scsi_Host *instance,
int possible)
{
struct NCR5380_hostdata *hostdata = shost_priv(instance);
diff --git a/drivers/scsi/arcmsr/arcmsr_hba.c b/drivers/scsi/arcmsr/arcmsr_hba.c
index 3d53d636b17b..f0cfb0451757 100644
--- a/drivers/scsi/arcmsr/arcmsr_hba.c
+++ b/drivers/scsi/arcmsr/arcmsr_hba.c
@@ -2636,18 +2636,9 @@ static int arcmsr_queue_command_lck(struct scsi_cmnd *cmd,
struct AdapterControlBlock *acb = (struct AdapterControlBlock *) host->hostdata;
struct CommandControlBlock *ccb;
int target = cmd->device->id;
- int lun = cmd->device->lun;
- uint8_t scsicmd = cmd->cmnd[0];
cmd->scsi_done = done;
cmd->host_scribble = NULL;
cmd->result = 0;
- if ((scsicmd == SYNCHRONIZE_CACHE) ||(scsicmd == SEND_DIAGNOSTIC)){
- if(acb->devstate[target][lun] == ARECA_RAID_GONE) {
- cmd->result = (DID_NO_CONNECT << 16);
- }
- cmd->scsi_done(cmd);
- return 0;
- }
if (target == 16) {
/* virtual device for iop message transfer */
arcmsr_handle_virtual_command(acb, cmd);
diff --git a/drivers/scsi/be2iscsi/be_main.c b/drivers/scsi/be2iscsi/be_main.c
index 68138a647dfc..d9239c2d49b1 100644
--- a/drivers/scsi/be2iscsi/be_main.c
+++ b/drivers/scsi/be2iscsi/be_main.c
@@ -900,8 +900,9 @@ void hwi_ring_cq_db(struct beiscsi_hba *phba,
static struct sgl_handle *alloc_io_sgl_handle(struct beiscsi_hba *phba)
{
struct sgl_handle *psgl_handle;
+ unsigned long flags;
- spin_lock_bh(&phba->io_sgl_lock);
+ spin_lock_irqsave(&phba->io_sgl_lock, flags);
if (phba->io_sgl_hndl_avbl) {
beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_IO,
"BM_%d : In alloc_io_sgl_handle,"
@@ -919,14 +920,16 @@ static struct sgl_handle *alloc_io_sgl_handle(struct beiscsi_hba *phba)
phba->io_sgl_alloc_index++;
} else
psgl_handle = NULL;
- spin_unlock_bh(&phba->io_sgl_lock);
+ spin_unlock_irqrestore(&phba->io_sgl_lock, flags);
return psgl_handle;
}
static void
free_io_sgl_handle(struct beiscsi_hba *phba, struct sgl_handle *psgl_handle)
{
- spin_lock_bh(&phba->io_sgl_lock);
+ unsigned long flags;
+
+ spin_lock_irqsave(&phba->io_sgl_lock, flags);
beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_IO,
"BM_%d : In free_,io_sgl_free_index=%d\n",
phba->io_sgl_free_index);
@@ -941,7 +944,7 @@ free_io_sgl_handle(struct beiscsi_hba *phba, struct sgl_handle *psgl_handle)
"value there=%p\n", phba->io_sgl_free_index,
phba->io_sgl_hndl_base
[phba->io_sgl_free_index]);
- spin_unlock_bh(&phba->io_sgl_lock);
+ spin_unlock_irqrestore(&phba->io_sgl_lock, flags);
return;
}
phba->io_sgl_hndl_base[phba->io_sgl_free_index] = psgl_handle;
@@ -950,7 +953,7 @@ free_io_sgl_handle(struct beiscsi_hba *phba, struct sgl_handle *psgl_handle)
phba->io_sgl_free_index = 0;
else
phba->io_sgl_free_index++;
- spin_unlock_bh(&phba->io_sgl_lock);
+ spin_unlock_irqrestore(&phba->io_sgl_lock, flags);
}
static inline struct wrb_handle *
@@ -958,15 +961,16 @@ beiscsi_get_wrb_handle(struct hwi_wrb_context *pwrb_context,
unsigned int wrbs_per_cxn)
{
struct wrb_handle *pwrb_handle;
+ unsigned long flags;
- spin_lock_bh(&pwrb_context->wrb_lock);
+ spin_lock_irqsave(&pwrb_context->wrb_lock, flags);
pwrb_handle = pwrb_context->pwrb_handle_base[pwrb_context->alloc_index];
pwrb_context->wrb_handles_available--;
if (pwrb_context->alloc_index == (wrbs_per_cxn - 1))
pwrb_context->alloc_index = 0;
else
pwrb_context->alloc_index++;
- spin_unlock_bh(&pwrb_context->wrb_lock);
+ spin_unlock_irqrestore(&pwrb_context->wrb_lock, flags);
if (pwrb_handle)
memset(pwrb_handle->pwrb, 0, sizeof(*pwrb_handle->pwrb));
@@ -1001,14 +1005,16 @@ beiscsi_put_wrb_handle(struct hwi_wrb_context *pwrb_context,
struct wrb_handle *pwrb_handle,
unsigned int wrbs_per_cxn)
{
- spin_lock_bh(&pwrb_context->wrb_lock);
+ unsigned long flags;
+
+ spin_lock_irqsave(&pwrb_context->wrb_lock, flags);
pwrb_context->pwrb_handle_base[pwrb_context->free_index] = pwrb_handle;
pwrb_context->wrb_handles_available++;
if (pwrb_context->free_index == (wrbs_per_cxn - 1))
pwrb_context->free_index = 0;
else
pwrb_context->free_index++;
- spin_unlock_bh(&pwrb_context->wrb_lock);
+ spin_unlock_irqrestore(&pwrb_context->wrb_lock, flags);
}
/**
@@ -1037,8 +1043,9 @@ free_wrb_handle(struct beiscsi_hba *phba, struct hwi_wrb_context *pwrb_context,
static struct sgl_handle *alloc_mgmt_sgl_handle(struct beiscsi_hba *phba)
{
struct sgl_handle *psgl_handle;
+ unsigned long flags;
- spin_lock_bh(&phba->mgmt_sgl_lock);
+ spin_lock_irqsave(&phba->mgmt_sgl_lock, flags);
if (phba->eh_sgl_hndl_avbl) {
psgl_handle = phba->eh_sgl_hndl_base[phba->eh_sgl_alloc_index];
phba->eh_sgl_hndl_base[phba->eh_sgl_alloc_index] = NULL;
@@ -1056,14 +1063,16 @@ static struct sgl_handle *alloc_mgmt_sgl_handle(struct beiscsi_hba *phba)
phba->eh_sgl_alloc_index++;
} else
psgl_handle = NULL;
- spin_unlock_bh(&phba->mgmt_sgl_lock);
+ spin_unlock_irqrestore(&phba->mgmt_sgl_lock, flags);
return psgl_handle;
}
void
free_mgmt_sgl_handle(struct beiscsi_hba *phba, struct sgl_handle *psgl_handle)
{
- spin_lock_bh(&phba->mgmt_sgl_lock);
+ unsigned long flags;
+
+ spin_lock_irqsave(&phba->mgmt_sgl_lock, flags);
beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG,
"BM_%d : In free_mgmt_sgl_handle,"
"eh_sgl_free_index=%d\n",
@@ -1078,7 +1087,7 @@ free_mgmt_sgl_handle(struct beiscsi_hba *phba, struct sgl_handle *psgl_handle)
"BM_%d : Double Free in eh SGL ,"
"eh_sgl_free_index=%d\n",
phba->eh_sgl_free_index);
- spin_unlock_bh(&phba->mgmt_sgl_lock);
+ spin_unlock_irqrestore(&phba->mgmt_sgl_lock, flags);
return;
}
phba->eh_sgl_hndl_base[phba->eh_sgl_free_index] = psgl_handle;
@@ -1088,7 +1097,7 @@ free_mgmt_sgl_handle(struct beiscsi_hba *phba, struct sgl_handle *psgl_handle)
phba->eh_sgl_free_index = 0;
else
phba->eh_sgl_free_index++;
- spin_unlock_bh(&phba->mgmt_sgl_lock);
+ spin_unlock_irqrestore(&phba->mgmt_sgl_lock, flags);
}
static void
diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c
index c051694bfcb0..f9b6fba689ff 100644
--- a/drivers/scsi/libiscsi.c
+++ b/drivers/scsi/libiscsi.c
@@ -791,9 +791,9 @@ __iscsi_conn_send_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
free_task:
/* regular RX path uses back_lock */
- spin_lock_bh(&session->back_lock);
+ spin_lock(&session->back_lock);
__iscsi_put_task(task);
- spin_unlock_bh(&session->back_lock);
+ spin_unlock(&session->back_lock);
return NULL;
}
diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c
index 9ff57dee72d7..d8b1fbd4c8aa 100644
--- a/drivers/scsi/megaraid/megaraid_sas_base.c
+++ b/drivers/scsi/megaraid/megaraid_sas_base.c
@@ -1700,16 +1700,13 @@ megasas_queue_command(struct Scsi_Host *shost, struct scsi_cmnd *scmd)
goto out_done;
}
- switch (scmd->cmnd[0]) {
- case SYNCHRONIZE_CACHE:
- /*
- * FW takes care of flush cache on its own
- * No need to send it down
- */
+ /*
+ * FW takes care of flush cache on its own for Virtual Disk.
+ * No need to send it down for VD. For JBOD send SYNCHRONIZE_CACHE to FW.
+ */
+ if ((scmd->cmnd[0] == SYNCHRONIZE_CACHE) && MEGASAS_IS_LOGICAL(scmd)) {
scmd->result = DID_OK << 16;
goto out_done;
- default:
- break;
}
return instance->instancet->build_and_issue_cmd(instance, scmd);
diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c
index c905709707f0..cf04a364fd8b 100644
--- a/drivers/scsi/scsi_debug.c
+++ b/drivers/scsi/scsi_debug.c
@@ -5134,6 +5134,7 @@ static void __exit scsi_debug_exit(void)
bus_unregister(&pseudo_lld_bus);
root_device_unregister(pseudo_primary);
+ vfree(map_storep);
vfree(dif_storep);
vfree(fake_storep);
kfree(sdebug_q_arr);
diff --git a/drivers/spi/spi-fsl-dspi.c b/drivers/spi/spi-fsl-dspi.c
index 35c0dd945668..a67b0ff6a362 100644
--- a/drivers/spi/spi-fsl-dspi.c
+++ b/drivers/spi/spi-fsl-dspi.c
@@ -70,6 +70,7 @@
#define SPI_SR 0x2c
#define SPI_SR_EOQF 0x10000000
#define SPI_SR_TCFQF 0x80000000
+#define SPI_SR_CLEAR 0xdaad0000
#define SPI_RSER 0x30
#define SPI_RSER_EOQFE 0x10000000
@@ -646,6 +647,11 @@ static const struct regmap_config dspi_regmap_config = {
.max_register = 0x88,
};
+static void dspi_init(struct fsl_dspi *dspi)
+{
+ regmap_write(dspi->regmap, SPI_SR, SPI_SR_CLEAR);
+}
+
static int dspi_probe(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
@@ -709,6 +715,7 @@ static int dspi_probe(struct platform_device *pdev)
return PTR_ERR(dspi->regmap);
}
+ dspi_init(dspi);
dspi->irq = platform_get_irq(pdev, 0);
if (dspi->irq < 0) {
dev_err(&pdev->dev, "can't get platform irq\n");
diff --git a/drivers/spi/spi-fsl-espi.c b/drivers/spi/spi-fsl-espi.c
index 7451585a080e..2c175b9495f7 100644
--- a/drivers/spi/spi-fsl-espi.c
+++ b/drivers/spi/spi-fsl-espi.c
@@ -458,7 +458,7 @@ static void fsl_espi_cpu_irq(struct mpc8xxx_spi *mspi, u32 events)
mspi->len -= rx_nr_bytes;
- if (mspi->rx)
+ if (rx_nr_bytes && mspi->rx)
mspi->get_rx(rx_data, mspi);
}
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
index 5787b723b593..838783c3fed0 100644
--- a/drivers/spi/spi.c
+++ b/drivers/spi/spi.c
@@ -1618,9 +1618,11 @@ static void of_register_spi_devices(struct spi_master *master)
if (of_node_test_and_set_flag(nc, OF_POPULATED))
continue;
spi = of_register_spi_device(master, nc);
- if (IS_ERR(spi))
+ if (IS_ERR(spi)) {
dev_warn(&master->dev, "Failed to create SPI device for %s\n",
nc->full_name);
+ of_node_clear_flag(nc, OF_POPULATED);
+ }
}
}
#else
@@ -3131,6 +3133,7 @@ static int of_spi_notify(struct notifier_block *nb, unsigned long action,
if (IS_ERR(spi)) {
pr_err("%s: failed to create for '%s'\n",
__func__, rd->dn->full_name);
+ of_node_clear_flag(rd->dn, OF_POPULATED);
return notifier_from_errno(PTR_ERR(spi));
}
break;
diff --git a/drivers/staging/android/ion/ion.c b/drivers/staging/android/ion/ion.c
index 396ded52ab70..209a8f7ef02b 100644
--- a/drivers/staging/android/ion/ion.c
+++ b/drivers/staging/android/ion/ion.c
@@ -1187,8 +1187,10 @@ int ion_query_heaps(struct ion_client *client, struct ion_heap_query *query)
hdata.type = heap->type;
hdata.heap_id = heap->id;
- ret = copy_to_user(&buffer[cnt],
- &hdata, sizeof(hdata));
+ if (copy_to_user(&buffer[cnt], &hdata, sizeof(hdata))) {
+ ret = -EFAULT;
+ goto out;
+ }
cnt++;
if (cnt >= max_cnt)
diff --git a/drivers/staging/android/ion/ion_of.c b/drivers/staging/android/ion/ion_of.c
index 15bac92b7f04..46b2bb99bfd6 100644
--- a/drivers/staging/android/ion/ion_of.c
+++ b/drivers/staging/android/ion/ion_of.c
@@ -107,7 +107,7 @@ struct ion_platform_data *ion_parse_dt(struct platform_device *pdev,
heap_pdev = of_platform_device_create(node, heaps[i].name,
&pdev->dev);
- if (!pdev)
+ if (!heap_pdev)
return ERR_PTR(-ENOMEM);
heap_pdev->dev.platform_data = &heaps[i];
diff --git a/drivers/staging/greybus/arche-platform.c b/drivers/staging/greybus/arche-platform.c
index e36ee984485b..34307ac3f255 100644
--- a/drivers/staging/greybus/arche-platform.c
+++ b/drivers/staging/greybus/arche-platform.c
@@ -128,6 +128,7 @@ int arche_platform_change_state(enum arche_platform_state state,
pdev = of_find_device_by_node(np);
if (!pdev) {
pr_err("arche-platform device not found\n");
+ of_node_put(np);
return -ENODEV;
}
diff --git a/drivers/staging/greybus/es2.c b/drivers/staging/greybus/es2.c
index 071bb1cfd3ae..baab460eeaa3 100644
--- a/drivers/staging/greybus/es2.c
+++ b/drivers/staging/greybus/es2.c
@@ -1548,7 +1548,8 @@ static int ap_probe(struct usb_interface *interface,
INIT_LIST_HEAD(&es2->arpcs);
spin_lock_init(&es2->arpc_lock);
- if (es2_arpc_in_enable(es2))
+ retval = es2_arpc_in_enable(es2);
+ if (retval)
goto error;
retval = gb_hd_add(hd);
diff --git a/drivers/staging/greybus/gpio.c b/drivers/staging/greybus/gpio.c
index 5e06e4229e42..250caa00de5e 100644
--- a/drivers/staging/greybus/gpio.c
+++ b/drivers/staging/greybus/gpio.c
@@ -702,15 +702,13 @@ static int gb_gpio_probe(struct gbphy_device *gbphy_dev,
ret = gb_gpio_irqchip_add(gpio, irqc, 0,
handle_level_irq, IRQ_TYPE_NONE);
if (ret) {
- dev_err(&connection->bundle->dev,
- "failed to add irq chip: %d\n", ret);
+ dev_err(&gbphy_dev->dev, "failed to add irq chip: %d\n", ret);
goto exit_line_free;
}
ret = gpiochip_add(gpio);
if (ret) {
- dev_err(&connection->bundle->dev,
- "failed to add gpio chip: %d\n", ret);
+ dev_err(&gbphy_dev->dev, "failed to add gpio chip: %d\n", ret);
goto exit_gpio_irqchip_remove;
}
diff --git a/drivers/staging/greybus/module.c b/drivers/staging/greybus/module.c
index 69f67ddbd4a3..660b4674a76f 100644
--- a/drivers/staging/greybus/module.c
+++ b/drivers/staging/greybus/module.c
@@ -127,7 +127,7 @@ struct gb_module *gb_module_create(struct gb_host_device *hd, u8 module_id,
return module;
err_put_interfaces:
- for (--i; i > 0; --i)
+ for (--i; i >= 0; --i)
gb_interface_put(module->interfaces[i]);
put_device(&module->dev);
diff --git a/drivers/staging/greybus/uart.c b/drivers/staging/greybus/uart.c
index 5ee7954bd9f9..2633d2bfb1b4 100644
--- a/drivers/staging/greybus/uart.c
+++ b/drivers/staging/greybus/uart.c
@@ -888,7 +888,7 @@ static int gb_uart_probe(struct gbphy_device *gbphy_dev,
minor = alloc_minor(gb_tty);
if (minor < 0) {
if (minor == -ENOSPC) {
- dev_err(&connection->bundle->dev,
+ dev_err(&gbphy_dev->dev,
"no more free minor numbers\n");
retval = -ENODEV;
} else {
diff --git a/drivers/staging/iio/accel/sca3000_core.c b/drivers/staging/iio/accel/sca3000_core.c
index d626125d7af9..564b36d4f648 100644
--- a/drivers/staging/iio/accel/sca3000_core.c
+++ b/drivers/staging/iio/accel/sca3000_core.c
@@ -468,6 +468,8 @@ static inline int __sca3000_get_base_freq(struct sca3000_state *st,
case SCA3000_MEAS_MODE_OP_2:
*base_freq = info->option_mode_2_freq;
break;
+ default:
+ ret = -EINVAL;
}
error_ret:
return ret;
diff --git a/drivers/staging/lustre/lustre/llite/lproc_llite.c b/drivers/staging/lustre/lustre/llite/lproc_llite.c
index 6eae60595905..23fda9d98bff 100644
--- a/drivers/staging/lustre/lustre/llite/lproc_llite.c
+++ b/drivers/staging/lustre/lustre/llite/lproc_llite.c
@@ -871,12 +871,10 @@ static ssize_t xattr_cache_store(struct kobject *kobj,
}
LUSTRE_RW_ATTR(xattr_cache);
-static ssize_t unstable_stats_show(struct kobject *kobj,
- struct attribute *attr,
- char *buf)
+static int ll_unstable_stats_seq_show(struct seq_file *m, void *v)
{
- struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
- ll_kobj);
+ struct super_block *sb = m->private;
+ struct ll_sb_info *sbi = ll_s2sbi(sb);
struct cl_client_cache *cache = sbi->ll_cache;
long pages;
int mb;
@@ -884,19 +882,21 @@ static ssize_t unstable_stats_show(struct kobject *kobj,
pages = atomic_long_read(&cache->ccc_unstable_nr);
mb = (pages * PAGE_SIZE) >> 20;
- return sprintf(buf, "unstable_check: %8d\n"
- "unstable_pages: %12ld\n"
- "unstable_mb: %8d\n",
- cache->ccc_unstable_check, pages, mb);
+ seq_printf(m,
+ "unstable_check: %8d\n"
+ "unstable_pages: %12ld\n"
+ "unstable_mb: %8d\n",
+ cache->ccc_unstable_check, pages, mb);
+
+ return 0;
}
-static ssize_t unstable_stats_store(struct kobject *kobj,
- struct attribute *attr,
- const char *buffer,
- size_t count)
+static ssize_t ll_unstable_stats_seq_write(struct file *file,
+ const char __user *buffer,
+ size_t count, loff_t *off)
{
- struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
- ll_kobj);
+ struct super_block *sb = ((struct seq_file *)file->private_data)->private;
+ struct ll_sb_info *sbi = ll_s2sbi(sb);
char kernbuf[128];
int val, rc;
@@ -922,7 +922,7 @@ static ssize_t unstable_stats_store(struct kobject *kobj,
return count;
}
-LUSTRE_RW_ATTR(unstable_stats);
+LPROC_SEQ_FOPS(ll_unstable_stats);
static ssize_t root_squash_show(struct kobject *kobj, struct attribute *attr,
char *buf)
@@ -995,6 +995,7 @@ static struct lprocfs_vars lprocfs_llite_obd_vars[] = {
/* { "filegroups", lprocfs_rd_filegroups, 0, 0 }, */
{ "max_cached_mb", &ll_max_cached_mb_fops, NULL },
{ "statahead_stats", &ll_statahead_stats_fops, NULL, 0 },
+ { "unstable_stats", &ll_unstable_stats_fops, NULL },
{ "sbi_flags", &ll_sbi_flags_fops, NULL, 0 },
{ .name = "nosquash_nids",
.fops = &ll_nosquash_nids_fops },
@@ -1026,7 +1027,6 @@ static struct attribute *llite_attrs[] = {
&lustre_attr_max_easize.attr,
&lustre_attr_default_easize.attr,
&lustre_attr_xattr_cache.attr,
- &lustre_attr_unstable_stats.attr,
&lustre_attr_root_squash.attr,
NULL,
};
diff --git a/drivers/staging/media/bcm2048/radio-bcm2048.c b/drivers/staging/media/bcm2048/radio-bcm2048.c
index ea15cc638097..4d9bd02ede47 100644
--- a/drivers/staging/media/bcm2048/radio-bcm2048.c
+++ b/drivers/staging/media/bcm2048/radio-bcm2048.c
@@ -482,6 +482,8 @@ static int bcm2048_set_rds_no_lock(struct bcm2048_device *bdev, u8 rds_on)
flags);
memset(&bdev->rds_info, 0, sizeof(bdev->rds_info));
}
+ if (err)
+ return err;
return bcm2048_send_command(bdev, BCM2048_I2C_FM_RDS_SYSTEM,
bdev->cache_fm_rds_system);
diff --git a/drivers/staging/wilc1000/host_interface.c b/drivers/staging/wilc1000/host_interface.c
index 78f5613e9467..6ab7443eabde 100644
--- a/drivers/staging/wilc1000/host_interface.c
+++ b/drivers/staging/wilc1000/host_interface.c
@@ -3388,7 +3388,6 @@ int wilc_init(struct net_device *dev, struct host_if_drv **hif_drv_handler)
clients_count++;
- destroy_workqueue(hif_workqueue);
_fail_:
return result;
}
diff --git a/drivers/thermal/intel_pch_thermal.c b/drivers/thermal/intel_pch_thermal.c
index 9b4815e81b0d..19bf2028e508 100644
--- a/drivers/thermal/intel_pch_thermal.c
+++ b/drivers/thermal/intel_pch_thermal.c
@@ -20,10 +20,13 @@
#include <linux/types.h>
#include <linux/init.h>
#include <linux/pci.h>
+#include <linux/acpi.h>
#include <linux/thermal.h>
#include <linux/pm.h>
/* Intel PCH thermal Device IDs */
+#define PCH_THERMAL_DID_HSW_1 0x9C24 /* Haswell PCH */
+#define PCH_THERMAL_DID_HSW_2 0x8C24 /* Haswell PCH */
#define PCH_THERMAL_DID_WPT 0x9CA4 /* Wildcat Point */
#define PCH_THERMAL_DID_SKL 0x9D31 /* Skylake PCH */
@@ -66,9 +69,53 @@ struct pch_thermal_device {
unsigned long crt_temp;
int hot_trip_id;
unsigned long hot_temp;
+ int psv_trip_id;
+ unsigned long psv_temp;
bool bios_enabled;
};
+#ifdef CONFIG_ACPI
+
+/*
+ * On some platforms, there is a companion ACPI device, which adds
+ * passive trip temperature using _PSV method. There is no specific
+ * passive temperature setting in MMIO interface of this PCI device.
+ */
+static void pch_wpt_add_acpi_psv_trip(struct pch_thermal_device *ptd,
+ int *nr_trips)
+{
+ struct acpi_device *adev;
+
+ ptd->psv_trip_id = -1;
+
+ adev = ACPI_COMPANION(&ptd->pdev->dev);
+ if (adev) {
+ unsigned long long r;
+ acpi_status status;
+
+ status = acpi_evaluate_integer(adev->handle, "_PSV", NULL,
+ &r);
+ if (ACPI_SUCCESS(status)) {
+ unsigned long trip_temp;
+
+ trip_temp = DECI_KELVIN_TO_MILLICELSIUS(r);
+ if (trip_temp) {
+ ptd->psv_temp = trip_temp;
+ ptd->psv_trip_id = *nr_trips;
+ ++(*nr_trips);
+ }
+ }
+ }
+}
+#else
+static void pch_wpt_add_acpi_psv_trip(struct pch_thermal_device *ptd,
+ int *nr_trips)
+{
+ ptd->psv_trip_id = -1;
+
+}
+#endif
+
static int pch_wpt_init(struct pch_thermal_device *ptd, int *nr_trips)
{
u8 tsel;
@@ -119,6 +166,8 @@ read_trips:
++(*nr_trips);
}
+ pch_wpt_add_acpi_psv_trip(ptd, nr_trips);
+
return 0;
}
@@ -194,6 +243,8 @@ static int pch_get_trip_type(struct thermal_zone_device *tzd, int trip,
*type = THERMAL_TRIP_CRITICAL;
else if (ptd->hot_trip_id == trip)
*type = THERMAL_TRIP_HOT;
+ else if (ptd->psv_trip_id == trip)
+ *type = THERMAL_TRIP_PASSIVE;
else
return -EINVAL;
@@ -208,6 +259,8 @@ static int pch_get_trip_temp(struct thermal_zone_device *tzd, int trip, int *tem
*temp = ptd->crt_temp;
else if (ptd->hot_trip_id == trip)
*temp = ptd->hot_temp;
+ else if (ptd->psv_trip_id == trip)
+ *temp = ptd->psv_temp;
else
return -EINVAL;
@@ -242,6 +295,11 @@ static int intel_pch_thermal_probe(struct pci_dev *pdev,
ptd->ops = &pch_dev_ops_wpt;
dev_name = "pch_skylake";
break;
+ case PCH_THERMAL_DID_HSW_1:
+ case PCH_THERMAL_DID_HSW_2:
+ ptd->ops = &pch_dev_ops_wpt;
+ dev_name = "pch_haswell";
+ break;
default:
dev_err(&pdev->dev, "unknown pch thermal device\n");
return -ENODEV;
@@ -324,6 +382,8 @@ static int intel_pch_thermal_resume(struct device *device)
static struct pci_device_id intel_pch_thermal_id[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCH_THERMAL_DID_WPT) },
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCH_THERMAL_DID_SKL) },
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCH_THERMAL_DID_HSW_1) },
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCH_THERMAL_DID_HSW_2) },
{ 0, },
};
MODULE_DEVICE_TABLE(pci, intel_pch_thermal_id);
diff --git a/drivers/thermal/intel_powerclamp.c b/drivers/thermal/intel_powerclamp.c
index 0e4dc0afcfd2..7a223074df3d 100644
--- a/drivers/thermal/intel_powerclamp.c
+++ b/drivers/thermal/intel_powerclamp.c
@@ -669,20 +669,10 @@ static struct thermal_cooling_device_ops powerclamp_cooling_ops = {
.set_cur_state = powerclamp_set_cur_state,
};
-static const struct x86_cpu_id intel_powerclamp_ids[] __initconst = {
- { X86_VENDOR_INTEL, X86_FAMILY_ANY, X86_MODEL_ANY, X86_FEATURE_MWAIT },
- { X86_VENDOR_INTEL, X86_FAMILY_ANY, X86_MODEL_ANY, X86_FEATURE_ARAT },
- { X86_VENDOR_INTEL, X86_FAMILY_ANY, X86_MODEL_ANY, X86_FEATURE_NONSTOP_TSC },
- { X86_VENDOR_INTEL, X86_FAMILY_ANY, X86_MODEL_ANY, X86_FEATURE_CONSTANT_TSC},
- {}
-};
-MODULE_DEVICE_TABLE(x86cpu, intel_powerclamp_ids);
-
static int __init powerclamp_probe(void)
{
- if (!x86_match_cpu(intel_powerclamp_ids)) {
- pr_err("Intel powerclamp does not run on family %d model %d\n",
- boot_cpu_data.x86, boot_cpu_data.x86_model);
+ if (!boot_cpu_has(X86_FEATURE_MWAIT)) {
+ pr_err("CPU does not support MWAIT");
return -ENODEV;
}
diff --git a/drivers/tty/serial/8250/8250_lpss.c b/drivers/tty/serial/8250/8250_lpss.c
index 886fcf37f291..b9923464599f 100644
--- a/drivers/tty/serial/8250/8250_lpss.c
+++ b/drivers/tty/serial/8250/8250_lpss.c
@@ -213,7 +213,7 @@ static int qrk_serial_setup(struct lpss8250 *lpss, struct uart_port *port)
struct pci_dev *pdev = to_pci_dev(port->dev);
int ret;
- ret = pci_alloc_irq_vectors(pdev, 1, 1, 0);
+ ret = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_ALL_TYPES);
if (ret < 0)
return ret;
diff --git a/drivers/tty/serial/8250/8250_port.c b/drivers/tty/serial/8250/8250_port.c
index 1bfb6fdbaa20..1731b98d2471 100644
--- a/drivers/tty/serial/8250/8250_port.c
+++ b/drivers/tty/serial/8250/8250_port.c
@@ -83,7 +83,8 @@ static const struct serial8250_config uart_config[] = {
.name = "16550A",
.fifo_size = 16,
.tx_loadsz = 16,
- .fcr = UART_FCR_ENABLE_FIFO | UART_FCR_R_TRIG_10,
+ .fcr = UART_FCR_ENABLE_FIFO | UART_FCR_R_TRIG_10 |
+ UART_FCR_CLEAR_RCVR | UART_FCR_CLEAR_XMIT,
.rxtrig_bytes = {1, 4, 8, 14},
.flags = UART_CAP_FIFO,
},
diff --git a/drivers/tty/serial/8250/8250_uniphier.c b/drivers/tty/serial/8250/8250_uniphier.c
index b8d9c8c9d02a..417d9e7038e1 100644
--- a/drivers/tty/serial/8250/8250_uniphier.c
+++ b/drivers/tty/serial/8250/8250_uniphier.c
@@ -99,7 +99,7 @@ static void uniphier_serial_out(struct uart_port *p, int offset, int value)
case UART_LCR:
valshift = UNIPHIER_UART_LCR_SHIFT;
/* Divisor latch access bit does not exist. */
- value &= ~(UART_LCR_DLAB << valshift);
+ value &= ~UART_LCR_DLAB;
/* fall through */
case UART_MCR:
offset = UNIPHIER_UART_LCR_MCR;
@@ -199,7 +199,7 @@ static int uniphier_uart_probe(struct platform_device *pdev)
regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!regs) {
- dev_err(dev, "failed to get memory resource");
+ dev_err(dev, "failed to get memory resource\n");
return -EINVAL;
}
diff --git a/drivers/tty/serial/Kconfig b/drivers/tty/serial/Kconfig
index c7831407a882..25c1d7bc0100 100644
--- a/drivers/tty/serial/Kconfig
+++ b/drivers/tty/serial/Kconfig
@@ -1625,6 +1625,7 @@ config SERIAL_SPRD_CONSOLE
config SERIAL_STM32
tristate "STMicroelectronics STM32 serial port support"
select SERIAL_CORE
+ depends on HAS_DMA
depends on ARM || COMPILE_TEST
help
This driver is for the on-chip Serial Controller on
diff --git a/drivers/tty/serial/atmel_serial.c b/drivers/tty/serial/atmel_serial.c
index fd8aa1f4ba78..168b10cad47b 100644
--- a/drivers/tty/serial/atmel_serial.c
+++ b/drivers/tty/serial/atmel_serial.c
@@ -2132,11 +2132,29 @@ static void atmel_set_termios(struct uart_port *port, struct ktermios *termios,
mode |= ATMEL_US_USMODE_RS485;
} else if (termios->c_cflag & CRTSCTS) {
/* RS232 with hardware handshake (RTS/CTS) */
- if (atmel_use_dma_rx(port) && !atmel_use_fifo(port)) {
- dev_info(port->dev, "not enabling hardware flow control because DMA is used");
- termios->c_cflag &= ~CRTSCTS;
- } else {
+ if (atmel_use_fifo(port) &&
+ !mctrl_gpio_to_gpiod(atmel_port->gpios, UART_GPIO_CTS)) {
+ /*
+ * with ATMEL_US_USMODE_HWHS set, the controller will
+ * be able to drive the RTS pin high/low when the RX
+ * FIFO is above RXFTHRES/below RXFTHRES2.
+ * It will also disable the transmitter when the CTS
+ * pin is high.
+ * This mode is not activated if CTS pin is a GPIO
+ * because in this case, the transmitter is always
+ * disabled (there must be an internal pull-up
+ * responsible for this behaviour).
+ * If the RTS pin is a GPIO, the controller won't be
+ * able to drive it according to the FIFO thresholds,
+ * but it will be handled by the driver.
+ */
mode |= ATMEL_US_USMODE_HWHS;
+ } else {
+ /*
+ * For platforms without FIFO, the flow control is
+ * handled by the driver.
+ */
+ mode |= ATMEL_US_USMODE_NORMAL;
}
} else {
/* RS232 without hadware handshake */
diff --git a/drivers/tty/serial/fsl_lpuart.c b/drivers/tty/serial/fsl_lpuart.c
index de9d5107c00a..76103f2c4a80 100644
--- a/drivers/tty/serial/fsl_lpuart.c
+++ b/drivers/tty/serial/fsl_lpuart.c
@@ -328,7 +328,7 @@ static void lpuart_dma_tx(struct lpuart_port *sport)
sport->dma_tx_bytes = uart_circ_chars_pending(xmit);
- if (xmit->tail < xmit->head) {
+ if (xmit->tail < xmit->head || xmit->head == 0) {
sport->dma_tx_nents = 1;
sg_init_one(sgl, xmit->buf + xmit->tail, sport->dma_tx_bytes);
} else {
@@ -359,7 +359,6 @@ static void lpuart_dma_tx(struct lpuart_port *sport)
sport->dma_tx_in_progress = true;
sport->dma_tx_cookie = dmaengine_submit(sport->dma_tx_desc);
dma_async_issue_pending(sport->dma_tx_chan);
-
}
static void lpuart_dma_tx_complete(void *arg)
diff --git a/drivers/tty/serial/pch_uart.c b/drivers/tty/serial/pch_uart.c
index d391650b82e7..42caccb5e87e 100644
--- a/drivers/tty/serial/pch_uart.c
+++ b/drivers/tty/serial/pch_uart.c
@@ -419,6 +419,7 @@ static struct dmi_system_id pch_uart_dmi_table[] = {
},
(void *)MINNOW_UARTCLK,
},
+ { }
};
/* Return UART clock, checking for board specific clocks. */
diff --git a/drivers/tty/serial/sc16is7xx.c b/drivers/tty/serial/sc16is7xx.c
index 2675792a8f59..fb0672554123 100644
--- a/drivers/tty/serial/sc16is7xx.c
+++ b/drivers/tty/serial/sc16is7xx.c
@@ -1130,9 +1130,13 @@ static int sc16is7xx_gpio_direction_output(struct gpio_chip *chip,
{
struct sc16is7xx_port *s = gpiochip_get_data(chip);
struct uart_port *port = &s->p[0].port;
+ u8 state = sc16is7xx_port_read(port, SC16IS7XX_IOSTATE_REG);
- sc16is7xx_port_update(port, SC16IS7XX_IOSTATE_REG, BIT(offset),
- val ? BIT(offset) : 0);
+ if (val)
+ state |= BIT(offset);
+ else
+ state &= ~BIT(offset);
+ sc16is7xx_port_write(port, SC16IS7XX_IOSTATE_REG, state);
sc16is7xx_port_update(port, SC16IS7XX_IODIR_REG, BIT(offset),
BIT(offset));
diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c
index 6e4f63627479..f2303f390345 100644
--- a/drivers/tty/serial/serial_core.c
+++ b/drivers/tty/serial/serial_core.c
@@ -111,7 +111,7 @@ void uart_write_wakeup(struct uart_port *port)
* closed. No cookie for you.
*/
BUG_ON(!state);
- tty_wakeup(state->port.tty);
+ tty_port_tty_wakeup(&state->port);
}
static void uart_stop(struct tty_struct *tty)
@@ -632,7 +632,7 @@ static void uart_flush_buffer(struct tty_struct *tty)
if (port->ops->flush_buffer)
port->ops->flush_buffer(port);
uart_port_unlock(port, flags);
- tty_wakeup(tty);
+ tty_port_tty_wakeup(&state->port);
}
/*
@@ -2746,8 +2746,6 @@ int uart_add_one_port(struct uart_driver *drv, struct uart_port *uport)
uport->cons = drv->cons;
uport->minor = drv->tty_driver->minor_start + uport->line;
- port->console = uart_console(uport);
-
/*
* If this port is a console, then the spinlock is already
* initialised.
@@ -2761,6 +2759,8 @@ int uart_add_one_port(struct uart_driver *drv, struct uart_port *uport)
uart_configure_port(drv, state, uport);
+ port->console = uart_console(uport);
+
num_groups = 2;
if (uport->attr_group)
num_groups++;
diff --git a/drivers/tty/serial/stm32-usart.h b/drivers/tty/serial/stm32-usart.h
index 41d974923102..cd97ceb76e4f 100644
--- a/drivers/tty/serial/stm32-usart.h
+++ b/drivers/tty/serial/stm32-usart.h
@@ -31,7 +31,7 @@ struct stm32_usart_info {
struct stm32_usart_config cfg;
};
-#define UNDEF_REG ~0
+#define UNDEF_REG 0xff
/* Register offsets */
struct stm32_usart_info stm32f4_info = {
diff --git a/drivers/tty/serial/xilinx_uartps.c b/drivers/tty/serial/xilinx_uartps.c
index f37edaa5ac75..dd4c02fa4820 100644
--- a/drivers/tty/serial/xilinx_uartps.c
+++ b/drivers/tty/serial/xilinx_uartps.c
@@ -1200,6 +1200,7 @@ static int __init cdns_early_console_setup(struct earlycon_device *device,
OF_EARLYCON_DECLARE(cdns, "xlnx,xuartps", cdns_early_console_setup);
OF_EARLYCON_DECLARE(cdns, "cdns,uart-r1p8", cdns_early_console_setup);
OF_EARLYCON_DECLARE(cdns, "cdns,uart-r1p12", cdns_early_console_setup);
+OF_EARLYCON_DECLARE(cdns, "xlnx,zynqmp-uart", cdns_early_console_setup);
/**
* cdns_uart_console_write - perform write operation
@@ -1438,6 +1439,7 @@ static const struct of_device_id cdns_uart_of_match[] = {
{ .compatible = "xlnx,xuartps", },
{ .compatible = "cdns,uart-r1p8", },
{ .compatible = "cdns,uart-r1p12", .data = &zynqmp_uart_def },
+ { .compatible = "xlnx,zynqmp-uart", .data = &zynqmp_uart_def },
{}
};
MODULE_DEVICE_TABLE(of, cdns_uart_of_match);
diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c
index 06fb39c1d6dd..8c3bf3d613c0 100644
--- a/drivers/tty/vt/vt.c
+++ b/drivers/tty/vt/vt.c
@@ -870,10 +870,15 @@ static int vc_do_resize(struct tty_struct *tty, struct vc_data *vc,
if (new_cols == vc->vc_cols && new_rows == vc->vc_rows)
return 0;
+ if (new_screen_size > (4 << 20))
+ return -EINVAL;
newscreen = kmalloc(new_screen_size, GFP_USER);
if (!newscreen)
return -ENOMEM;
+ if (vc == sel_cons)
+ clear_selection();
+
old_rows = vc->vc_rows;
old_row_size = vc->vc_size_row;
@@ -1176,7 +1181,7 @@ static void csi_J(struct vc_data *vc, int vpar)
break;
case 3: /* erase scroll-back buffer (and whole display) */
scr_memsetw(vc->vc_screenbuf, vc->vc_video_erase_char,
- vc->vc_screenbuf_size >> 1);
+ vc->vc_screenbuf_size);
set_origin(vc);
if (con_is_visible(vc))
update_screen(vc);
diff --git a/drivers/usb/chipidea/host.c b/drivers/usb/chipidea/host.c
index 96ae69502c86..111b0e0b8698 100644
--- a/drivers/usb/chipidea/host.c
+++ b/drivers/usb/chipidea/host.c
@@ -188,6 +188,8 @@ static void host_stop(struct ci_hdrc *ci)
if (hcd) {
usb_remove_hcd(hcd);
+ ci->role = CI_ROLE_END;
+ synchronize_irq(ci->irq);
usb_put_hcd(hcd);
if (ci->platdata->reg_vbus && !ci_otg_is_fsm_mode(ci) &&
(ci->platdata->flags & CI_HDRC_TURN_VBUS_EARLY_ON))
diff --git a/drivers/usb/dwc2/core.c b/drivers/usb/dwc2/core.c
index fa9b26b91507..4c0fa0b17353 100644
--- a/drivers/usb/dwc2/core.c
+++ b/drivers/usb/dwc2/core.c
@@ -463,9 +463,18 @@ static void dwc2_clear_force_mode(struct dwc2_hsotg *hsotg)
*/
void dwc2_force_dr_mode(struct dwc2_hsotg *hsotg)
{
+ bool ret;
+
switch (hsotg->dr_mode) {
case USB_DR_MODE_HOST:
- dwc2_force_mode(hsotg, true);
+ ret = dwc2_force_mode(hsotg, true);
+ /*
+ * NOTE: This is required for some rockchip soc based
+ * platforms on their host-only dwc2.
+ */
+ if (!ret)
+ msleep(50);
+
break;
case USB_DR_MODE_PERIPHERAL:
dwc2_force_mode(hsotg, false);
diff --git a/drivers/usb/dwc2/core.h b/drivers/usb/dwc2/core.h
index aad4107ef927..2a21a0414b1d 100644
--- a/drivers/usb/dwc2/core.h
+++ b/drivers/usb/dwc2/core.h
@@ -259,6 +259,13 @@ enum dwc2_lx_state {
DWC2_L3, /* Off state */
};
+/*
+ * Gadget periodic tx fifo sizes as used by legacy driver
+ * EP0 is not included
+ */
+#define DWC2_G_P_LEGACY_TX_FIFO_SIZE {256, 256, 256, 256, 768, 768, 768, \
+ 768, 0, 0, 0, 0, 0, 0, 0}
+
/* Gadget ep0 states */
enum dwc2_ep0_state {
DWC2_EP0_SETUP,
diff --git a/drivers/usb/dwc2/gadget.c b/drivers/usb/dwc2/gadget.c
index 4cd6403a7566..24fbebc9b409 100644
--- a/drivers/usb/dwc2/gadget.c
+++ b/drivers/usb/dwc2/gadget.c
@@ -186,10 +186,9 @@ static void dwc2_hsotg_ctrl_epint(struct dwc2_hsotg *hsotg,
*/
static void dwc2_hsotg_init_fifo(struct dwc2_hsotg *hsotg)
{
- unsigned int fifo;
+ unsigned int ep;
unsigned int addr;
int timeout;
- u32 dptxfsizn;
u32 val;
/* Reset fifo map if not correctly cleared during previous session */
@@ -217,16 +216,16 @@ static void dwc2_hsotg_init_fifo(struct dwc2_hsotg *hsotg)
* them to endpoints dynamically according to maxpacket size value of
* given endpoint.
*/
- for (fifo = 1; fifo < MAX_EPS_CHANNELS; fifo++) {
- dptxfsizn = dwc2_readl(hsotg->regs + DPTXFSIZN(fifo));
-
- val = (dptxfsizn & FIFOSIZE_DEPTH_MASK) | addr;
- addr += dptxfsizn >> FIFOSIZE_DEPTH_SHIFT;
-
- if (addr > hsotg->fifo_mem)
- break;
+ for (ep = 1; ep < MAX_EPS_CHANNELS; ep++) {
+ if (!hsotg->g_tx_fifo_sz[ep])
+ continue;
+ val = addr;
+ val |= hsotg->g_tx_fifo_sz[ep] << FIFOSIZE_DEPTH_SHIFT;
+ WARN_ONCE(addr + hsotg->g_tx_fifo_sz[ep] > hsotg->fifo_mem,
+ "insufficient fifo memory");
+ addr += hsotg->g_tx_fifo_sz[ep];
- dwc2_writel(val, hsotg->regs + DPTXFSIZN(fifo));
+ dwc2_writel(val, hsotg->regs + DPTXFSIZN(ep));
}
/*
@@ -3807,10 +3806,36 @@ static void dwc2_hsotg_dump(struct dwc2_hsotg *hsotg)
static void dwc2_hsotg_of_probe(struct dwc2_hsotg *hsotg)
{
struct device_node *np = hsotg->dev->of_node;
+ u32 len = 0;
+ u32 i = 0;
/* Enable dma if requested in device tree */
hsotg->g_using_dma = of_property_read_bool(np, "g-use-dma");
+ /*
+ * Register TX periodic fifo size per endpoint.
+ * EP0 is excluded since it has no fifo configuration.
+ */
+ if (!of_find_property(np, "g-tx-fifo-size", &len))
+ goto rx_fifo;
+
+ len /= sizeof(u32);
+
+ /* Read tx fifo sizes other than ep0 */
+ if (of_property_read_u32_array(np, "g-tx-fifo-size",
+ &hsotg->g_tx_fifo_sz[1], len))
+ goto rx_fifo;
+
+ /* Add ep0 */
+ len++;
+
+ /* Make remaining TX fifos unavailable */
+ if (len < MAX_EPS_CHANNELS) {
+ for (i = len; i < MAX_EPS_CHANNELS; i++)
+ hsotg->g_tx_fifo_sz[i] = 0;
+ }
+
+rx_fifo:
/* Register RX fifo size */
of_property_read_u32(np, "g-rx-fifo-size", &hsotg->g_rx_fifo_sz);
@@ -3832,10 +3857,13 @@ int dwc2_gadget_init(struct dwc2_hsotg *hsotg, int irq)
struct device *dev = hsotg->dev;
int epnum;
int ret;
+ int i;
+ u32 p_tx_fifo[] = DWC2_G_P_LEGACY_TX_FIFO_SIZE;
/* Initialize to legacy fifo configuration values */
hsotg->g_rx_fifo_sz = 2048;
hsotg->g_np_g_tx_fifo_sz = 1024;
+ memcpy(&hsotg->g_tx_fifo_sz[1], p_tx_fifo, sizeof(p_tx_fifo));
/* Device tree specific probe */
dwc2_hsotg_of_probe(hsotg);
@@ -3853,6 +3881,9 @@ int dwc2_gadget_init(struct dwc2_hsotg *hsotg, int irq)
dev_dbg(dev, "NonPeriodic TXFIFO size: %d\n",
hsotg->g_np_g_tx_fifo_sz);
dev_dbg(dev, "RXFIFO size: %d\n", hsotg->g_rx_fifo_sz);
+ for (i = 0; i < MAX_EPS_CHANNELS; i++)
+ dev_dbg(dev, "Periodic TXFIFO%2d size: %d\n", i,
+ hsotg->g_tx_fifo_sz[i]);
hsotg->gadget.max_speed = USB_SPEED_HIGH;
hsotg->gadget.ops = &dwc2_hsotg_gadget_ops;
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
index 07cc8929f271..1dfa56a5f1c5 100644
--- a/drivers/usb/dwc3/gadget.c
+++ b/drivers/usb/dwc3/gadget.c
@@ -783,6 +783,7 @@ static void dwc3_prepare_one_trb(struct dwc3_ep *dep,
req->trb = trb;
req->trb_dma = dwc3_trb_dma_offset(dep, trb);
req->first_trb_index = dep->trb_enqueue;
+ dep->queued_requests++;
}
dwc3_ep_inc_enq(dep);
@@ -833,8 +834,6 @@ static void dwc3_prepare_one_trb(struct dwc3_ep *dep,
trb->ctrl |= DWC3_TRB_CTRL_HWO;
- dep->queued_requests++;
-
trace_dwc3_prepare_trb(dep, trb);
}
@@ -1074,9 +1073,17 @@ static int __dwc3_gadget_ep_queue(struct dwc3_ep *dep, struct dwc3_request *req)
list_add_tail(&req->list, &dep->pending_list);
- if (usb_endpoint_xfer_isoc(dep->endpoint.desc) &&
- dep->flags & DWC3_EP_PENDING_REQUEST) {
- if (list_empty(&dep->started_list)) {
+ /*
+ * NOTICE: Isochronous endpoints should NEVER be prestarted. We must
+ * wait for a XferNotReady event so we will know what's the current
+ * (micro-)frame number.
+ *
+ * Without this trick, we are very, very likely gonna get Bus Expiry
+ * errors which will force us issue EndTransfer command.
+ */
+ if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) {
+ if ((dep->flags & DWC3_EP_PENDING_REQUEST) &&
+ list_empty(&dep->started_list)) {
dwc3_stop_active_transfer(dwc, dep->number, true);
dep->flags = DWC3_EP_ENABLED;
}
@@ -1861,8 +1868,11 @@ static int __dwc3_cleanup_done_trbs(struct dwc3 *dwc, struct dwc3_ep *dep,
unsigned int s_pkt = 0;
unsigned int trb_status;
- dep->queued_requests--;
dwc3_ep_inc_deq(dep);
+
+ if (req->trb == trb)
+ dep->queued_requests--;
+
trace_dwc3_complete_trb(dep, trb);
/*
@@ -2980,7 +2990,7 @@ err3:
kfree(dwc->setup_buf);
err2:
- dma_free_coherent(dwc->dev, sizeof(*dwc->ep0_trb),
+ dma_free_coherent(dwc->dev, sizeof(*dwc->ep0_trb) * 2,
dwc->ep0_trb, dwc->ep0_trb_addr);
err1:
@@ -3005,7 +3015,7 @@ void dwc3_gadget_exit(struct dwc3 *dwc)
kfree(dwc->setup_buf);
kfree(dwc->zlp_buf);
- dma_free_coherent(dwc->dev, sizeof(*dwc->ep0_trb),
+ dma_free_coherent(dwc->dev, sizeof(*dwc->ep0_trb) * 2,
dwc->ep0_trb, dwc->ep0_trb_addr);
dma_free_coherent(dwc->dev, sizeof(*dwc->ctrl_req),
diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c
index 54ad100af35b..e40d47d47d82 100644
--- a/drivers/usb/gadget/function/f_fs.c
+++ b/drivers/usb/gadget/function/f_fs.c
@@ -136,8 +136,60 @@ struct ffs_epfile {
/*
* Buffer for holding data from partial reads which may happen since
* we’re rounding user read requests to a multiple of a max packet size.
+ *
+ * The pointer is initialised with NULL value and may be set by
+ * __ffs_epfile_read_data function to point to a temporary buffer.
+ *
+ * In normal operation, calls to __ffs_epfile_read_buffered will consume
+ * data from said buffer and eventually free it. Importantly, while the
+ * function is using the buffer, it sets the pointer to NULL. This is
+ * all right since __ffs_epfile_read_data and __ffs_epfile_read_buffered
+ * can never run concurrently (they are synchronised by epfile->mutex)
+ * so the latter will not assign a new value to the pointer.
+ *
+ * Meanwhile ffs_func_eps_disable frees the buffer (if the pointer is
+ * valid) and sets the pointer to READ_BUFFER_DROP value. This special
+ * value is crux of the synchronisation between ffs_func_eps_disable and
+ * __ffs_epfile_read_data.
+ *
+ * Once __ffs_epfile_read_data is about to finish it will try to set the
+ * pointer back to its old value (as described above), but seeing as the
+ * pointer is not-NULL (namely READ_BUFFER_DROP) it will instead free
+ * the buffer.
+ *
+ * == State transitions ==
+ *
+ * • ptr == NULL: (initial state)
+ * ◦ __ffs_epfile_read_buffer_free: go to ptr == DROP
+ * ◦ __ffs_epfile_read_buffered: nop
+ * ◦ __ffs_epfile_read_data allocates temp buffer: go to ptr == buf
+ * ◦ reading finishes: n/a, not in ‘and reading’ state
+ * • ptr == DROP:
+ * ◦ __ffs_epfile_read_buffer_free: nop
+ * ◦ __ffs_epfile_read_buffered: go to ptr == NULL
+ * ◦ __ffs_epfile_read_data allocates temp buffer: free buf, nop
+ * ◦ reading finishes: n/a, not in ‘and reading’ state
+ * • ptr == buf:
+ * ◦ __ffs_epfile_read_buffer_free: free buf, go to ptr == DROP
+ * ◦ __ffs_epfile_read_buffered: go to ptr == NULL and reading
+ * ◦ __ffs_epfile_read_data: n/a, __ffs_epfile_read_buffered
+ * is always called first
+ * ◦ reading finishes: n/a, not in ‘and reading’ state
+ * • ptr == NULL and reading:
+ * ◦ __ffs_epfile_read_buffer_free: go to ptr == DROP and reading
+ * ◦ __ffs_epfile_read_buffered: n/a, mutex is held
+ * ◦ __ffs_epfile_read_data: n/a, mutex is held
+ * ◦ reading finishes and …
+ * … all data read: free buf, go to ptr == NULL
+ * … otherwise: go to ptr == buf and reading
+ * • ptr == DROP and reading:
+ * ◦ __ffs_epfile_read_buffer_free: nop
+ * ◦ __ffs_epfile_read_buffered: n/a, mutex is held
+ * ◦ __ffs_epfile_read_data: n/a, mutex is held
+ * ◦ reading finishes: free buf, go to ptr == DROP
*/
- struct ffs_buffer *read_buffer; /* P: epfile->mutex */
+ struct ffs_buffer *read_buffer;
+#define READ_BUFFER_DROP ((struct ffs_buffer *)ERR_PTR(-ESHUTDOWN))
char name[5];
@@ -736,25 +788,47 @@ static void ffs_epfile_async_io_complete(struct usb_ep *_ep,
schedule_work(&io_data->work);
}
+static void __ffs_epfile_read_buffer_free(struct ffs_epfile *epfile)
+{
+ /*
+ * See comment in struct ffs_epfile for full read_buffer pointer
+ * synchronisation story.
+ */
+ struct ffs_buffer *buf = xchg(&epfile->read_buffer, READ_BUFFER_DROP);
+ if (buf && buf != READ_BUFFER_DROP)
+ kfree(buf);
+}
+
/* Assumes epfile->mutex is held. */
static ssize_t __ffs_epfile_read_buffered(struct ffs_epfile *epfile,
struct iov_iter *iter)
{
- struct ffs_buffer *buf = epfile->read_buffer;
+ /*
+ * Null out epfile->read_buffer so ffs_func_eps_disable does not free
+ * the buffer while we are using it. See comment in struct ffs_epfile
+ * for full read_buffer pointer synchronisation story.
+ */
+ struct ffs_buffer *buf = xchg(&epfile->read_buffer, NULL);
ssize_t ret;
- if (!buf)
+ if (!buf || buf == READ_BUFFER_DROP)
return 0;
ret = copy_to_iter(buf->data, buf->length, iter);
if (buf->length == ret) {
kfree(buf);
- epfile->read_buffer = NULL;
- } else if (unlikely(iov_iter_count(iter))) {
+ return ret;
+ }
+
+ if (unlikely(iov_iter_count(iter))) {
ret = -EFAULT;
} else {
buf->length -= ret;
buf->data += ret;
}
+
+ if (cmpxchg(&epfile->read_buffer, NULL, buf))
+ kfree(buf);
+
return ret;
}
@@ -783,7 +857,15 @@ static ssize_t __ffs_epfile_read_data(struct ffs_epfile *epfile,
buf->length = data_len;
buf->data = buf->storage;
memcpy(buf->storage, data + ret, data_len);
- epfile->read_buffer = buf;
+
+ /*
+ * At this point read_buffer is NULL or READ_BUFFER_DROP (if
+ * ffs_func_eps_disable has been called in the meanwhile). See comment
+ * in struct ffs_epfile for full read_buffer pointer synchronisation
+ * story.
+ */
+ if (unlikely(cmpxchg(&epfile->read_buffer, NULL, buf)))
+ kfree(buf);
return ret;
}
@@ -1097,8 +1179,7 @@ ffs_epfile_release(struct inode *inode, struct file *file)
ENTER();
- kfree(epfile->read_buffer);
- epfile->read_buffer = NULL;
+ __ffs_epfile_read_buffer_free(epfile);
ffs_data_closed(epfile->ffs);
return 0;
@@ -1724,24 +1805,20 @@ static void ffs_func_eps_disable(struct ffs_function *func)
unsigned count = func->ffs->eps_count;
unsigned long flags;
+ spin_lock_irqsave(&func->ffs->eps_lock, flags);
do {
- if (epfile)
- mutex_lock(&epfile->mutex);
- spin_lock_irqsave(&func->ffs->eps_lock, flags);
/* pending requests get nuked */
if (likely(ep->ep))
usb_ep_disable(ep->ep);
++ep;
- spin_unlock_irqrestore(&func->ffs->eps_lock, flags);
if (epfile) {
epfile->ep = NULL;
- kfree(epfile->read_buffer);
- epfile->read_buffer = NULL;
- mutex_unlock(&epfile->mutex);
+ __ffs_epfile_read_buffer_free(epfile);
++epfile;
}
} while (--count);
+ spin_unlock_irqrestore(&func->ffs->eps_lock, flags);
}
static int ffs_func_eps_enable(struct ffs_function *func)
diff --git a/drivers/usb/gadget/function/u_ether.c b/drivers/usb/gadget/function/u_ether.c
index 9c8c9ed1dc9e..fe1811650dbc 100644
--- a/drivers/usb/gadget/function/u_ether.c
+++ b/drivers/usb/gadget/function/u_ether.c
@@ -590,8 +590,9 @@ static netdev_tx_t eth_start_xmit(struct sk_buff *skb,
/* throttle high/super speed IRQ rate back slightly */
if (gadget_is_dualspeed(dev->gadget))
- req->no_interrupt = (dev->gadget->speed == USB_SPEED_HIGH ||
- dev->gadget->speed == USB_SPEED_SUPER)
+ req->no_interrupt = (((dev->gadget->speed == USB_SPEED_HIGH ||
+ dev->gadget->speed == USB_SPEED_SUPER)) &&
+ !list_empty(&dev->tx_reqs))
? ((atomic_read(&dev->tx_qlen) % dev->qmult) != 0)
: 0;
diff --git a/drivers/usb/gadget/udc/atmel_usba_udc.c b/drivers/usb/gadget/udc/atmel_usba_udc.c
index bb1f6c8f0f01..45bc997d0711 100644
--- a/drivers/usb/gadget/udc/atmel_usba_udc.c
+++ b/drivers/usb/gadget/udc/atmel_usba_udc.c
@@ -1978,7 +1978,7 @@ static struct usba_ep * atmel_udc_of_init(struct platform_device *pdev,
dev_err(&pdev->dev, "of_probe: name error(%d)\n", ret);
goto err;
}
- ep->ep.name = name;
+ ep->ep.name = kasprintf(GFP_KERNEL, "ep%d", ep->index);
ep->ep_regs = udc->regs + USBA_EPT_BASE(i);
ep->dma_regs = udc->regs + USBA_DMA_BASE(i);
diff --git a/drivers/usb/host/ehci-platform.c b/drivers/usb/host/ehci-platform.c
index 876dca4fc216..a268d9e8d6cf 100644
--- a/drivers/usb/host/ehci-platform.c
+++ b/drivers/usb/host/ehci-platform.c
@@ -39,7 +39,7 @@
#define DRIVER_DESC "EHCI generic platform driver"
#define EHCI_MAX_CLKS 4
-#define EHCI_MAX_RSTS 3
+#define EHCI_MAX_RSTS 4
#define hcd_to_ehci_priv(h) ((struct ehci_platform_priv *)hcd_to_ehci(h)->priv)
struct ehci_platform_priv {
diff --git a/drivers/usb/host/ohci-at91.c b/drivers/usb/host/ohci-at91.c
index 5b5880c0ae19..b38a228134df 100644
--- a/drivers/usb/host/ohci-at91.c
+++ b/drivers/usb/host/ohci-at91.c
@@ -221,6 +221,12 @@ static int usb_hcd_at91_probe(const struct hc_driver *driver,
ohci->num_ports = board->ports;
at91_start_hc(pdev);
+ /*
+ * The RemoteWakeupConnected bit has to be set explicitly
+ * before calling ohci_run. The reset value of this bit is 0.
+ */
+ ohci->hc_control = OHCI_CTRL_RWC;
+
retval = usb_add_hcd(hcd, irq, IRQF_SHARED);
if (retval == 0) {
device_wakeup_enable(hcd->self.controller);
@@ -677,9 +683,6 @@ ohci_hcd_at91_drv_suspend(struct device *dev)
* REVISIT: some boards will be able to turn VBUS off...
*/
if (!ohci_at91->wakeup) {
- ohci->hc_control = ohci_readl(ohci, &ohci->regs->control);
- ohci->hc_control &= OHCI_CTRL_RWC;
- ohci_writel(ohci, ohci->hc_control, &ohci->regs->control);
ohci->rh_state = OHCI_RH_HALTED;
/* flush the writes */
diff --git a/drivers/usb/host/ohci-hcd.c b/drivers/usb/host/ohci-hcd.c
index 1700908b84ef..86612ac3fda2 100644
--- a/drivers/usb/host/ohci-hcd.c
+++ b/drivers/usb/host/ohci-hcd.c
@@ -72,7 +72,7 @@
static const char hcd_name [] = "ohci_hcd";
#define STATECHANGE_DELAY msecs_to_jiffies(300)
-#define IO_WATCHDOG_DELAY msecs_to_jiffies(250)
+#define IO_WATCHDOG_DELAY msecs_to_jiffies(275)
#include "ohci.h"
#include "pci-quirks.h"
diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c
index 730b9fd26685..0ef16900efed 100644
--- a/drivers/usb/host/xhci-hub.c
+++ b/drivers/usb/host/xhci-hub.c
@@ -1166,7 +1166,7 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
xhci_set_link_state(xhci, port_array, wIndex,
XDEV_RESUME);
spin_unlock_irqrestore(&xhci->lock, flags);
- msleep(20);
+ msleep(USB_RESUME_TIMEOUT);
spin_lock_irqsave(&xhci->lock, flags);
xhci_set_link_state(xhci, port_array, wIndex,
XDEV_U0);
@@ -1355,6 +1355,35 @@ int xhci_bus_suspend(struct usb_hcd *hcd)
return 0;
}
+/*
+ * Workaround for missing Cold Attach Status (CAS) if device re-plugged in S3.
+ * warm reset a USB3 device stuck in polling or compliance mode after resume.
+ * See Intel 100/c230 series PCH specification update Doc #332692-006 Errata #8
+ */
+static bool xhci_port_missing_cas_quirk(int port_index,
+ __le32 __iomem **port_array)
+{
+ u32 portsc;
+
+ portsc = readl(port_array[port_index]);
+
+ /* if any of these are set we are not stuck */
+ if (portsc & (PORT_CONNECT | PORT_CAS))
+ return false;
+
+ if (((portsc & PORT_PLS_MASK) != XDEV_POLLING) &&
+ ((portsc & PORT_PLS_MASK) != XDEV_COMP_MODE))
+ return false;
+
+ /* clear wakeup/change bits, and do a warm port reset */
+ portsc &= ~(PORT_RWC_BITS | PORT_CEC | PORT_WAKE_BITS);
+ portsc |= PORT_WR;
+ writel(portsc, port_array[port_index]);
+ /* flush write */
+ readl(port_array[port_index]);
+ return true;
+}
+
int xhci_bus_resume(struct usb_hcd *hcd)
{
struct xhci_hcd *xhci = hcd_to_xhci(hcd);
@@ -1392,6 +1421,14 @@ int xhci_bus_resume(struct usb_hcd *hcd)
u32 temp;
temp = readl(port_array[port_index]);
+
+ /* warm reset CAS limited ports stuck in polling/compliance */
+ if ((xhci->quirks & XHCI_MISSING_CAS) &&
+ (hcd->speed >= HCD_USB3) &&
+ xhci_port_missing_cas_quirk(port_index, port_array)) {
+ xhci_dbg(xhci, "reset stuck port %d\n", port_index);
+ continue;
+ }
if (DEV_SUPERSPEED_ANY(temp))
temp &= ~(PORT_RWC_BITS | PORT_CEC | PORT_WAKE_BITS);
else
@@ -1410,7 +1447,7 @@ int xhci_bus_resume(struct usb_hcd *hcd)
if (need_usb2_u3_exit) {
spin_unlock_irqrestore(&xhci->lock, flags);
- msleep(20);
+ msleep(USB_RESUME_TIMEOUT);
spin_lock_irqsave(&xhci->lock, flags);
}
diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
index d7b0f97abbad..e96ae80d107e 100644
--- a/drivers/usb/host/xhci-pci.c
+++ b/drivers/usb/host/xhci-pci.c
@@ -45,11 +45,13 @@
#define PCI_DEVICE_ID_INTEL_LYNXPOINT_XHCI 0x8c31
#define PCI_DEVICE_ID_INTEL_LYNXPOINT_LP_XHCI 0x9c31
+#define PCI_DEVICE_ID_INTEL_WILDCATPOINT_LP_XHCI 0x9cb1
#define PCI_DEVICE_ID_INTEL_CHERRYVIEW_XHCI 0x22b5
#define PCI_DEVICE_ID_INTEL_SUNRISEPOINT_H_XHCI 0xa12f
#define PCI_DEVICE_ID_INTEL_SUNRISEPOINT_LP_XHCI 0x9d2f
#define PCI_DEVICE_ID_INTEL_BROXTON_M_XHCI 0x0aa8
#define PCI_DEVICE_ID_INTEL_BROXTON_B_XHCI 0x1aa8
+#define PCI_DEVICE_ID_INTEL_APL_XHCI 0x5aa8
static const char hcd_name[] = "xhci_hcd";
@@ -153,7 +155,8 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
xhci->quirks |= XHCI_SPURIOUS_REBOOT;
}
if (pdev->vendor == PCI_VENDOR_ID_INTEL &&
- pdev->device == PCI_DEVICE_ID_INTEL_LYNXPOINT_LP_XHCI) {
+ (pdev->device == PCI_DEVICE_ID_INTEL_LYNXPOINT_LP_XHCI ||
+ pdev->device == PCI_DEVICE_ID_INTEL_WILDCATPOINT_LP_XHCI)) {
xhci->quirks |= XHCI_SPURIOUS_REBOOT;
xhci->quirks |= XHCI_SPURIOUS_WAKEUP;
}
@@ -169,6 +172,11 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
pdev->device == PCI_DEVICE_ID_INTEL_CHERRYVIEW_XHCI) {
xhci->quirks |= XHCI_SSIC_PORT_UNUSED;
}
+ if (pdev->vendor == PCI_VENDOR_ID_INTEL &&
+ (pdev->device == PCI_DEVICE_ID_INTEL_CHERRYVIEW_XHCI ||
+ pdev->device == PCI_DEVICE_ID_INTEL_APL_XHCI))
+ xhci->quirks |= XHCI_MISSING_CAS;
+
if (pdev->vendor == PCI_VENDOR_ID_ETRON &&
pdev->device == PCI_DEVICE_ID_EJ168) {
xhci->quirks |= XHCI_RESET_ON_RESUME;
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
index b2c1dc5dc0f3..f945380035d0 100644
--- a/drivers/usb/host/xhci.h
+++ b/drivers/usb/host/xhci.h
@@ -314,6 +314,8 @@ struct xhci_op_regs {
#define XDEV_U2 (0x2 << 5)
#define XDEV_U3 (0x3 << 5)
#define XDEV_INACTIVE (0x6 << 5)
+#define XDEV_POLLING (0x7 << 5)
+#define XDEV_COMP_MODE (0xa << 5)
#define XDEV_RESUME (0xf << 5)
/* true: port has power (see HCC_PPC) */
#define PORT_POWER (1 << 9)
@@ -1653,6 +1655,7 @@ struct xhci_hcd {
#define XHCI_MTK_HOST (1 << 21)
#define XHCI_SSIC_PORT_UNUSED (1 << 22)
#define XHCI_NO_64BIT_SUPPORT (1 << 23)
+#define XHCI_MISSING_CAS (1 << 24)
unsigned int num_active_eps;
unsigned int limit_active_eps;
/* There are two roothubs to keep track of bus suspend info for */
diff --git a/drivers/usb/musb/musb_gadget.c b/drivers/usb/musb/musb_gadget.c
index bff4869a57cd..4042ea017985 100644
--- a/drivers/usb/musb/musb_gadget.c
+++ b/drivers/usb/musb/musb_gadget.c
@@ -1255,6 +1255,7 @@ static int musb_gadget_queue(struct usb_ep *ep, struct usb_request *req,
map_dma_buffer(request, musb, musb_ep);
+ pm_runtime_get_sync(musb->controller);
spin_lock_irqsave(&musb->lock, lockflags);
/* don't queue if the ep is down */
@@ -1275,6 +1276,9 @@ static int musb_gadget_queue(struct usb_ep *ep, struct usb_request *req,
unlock:
spin_unlock_irqrestore(&musb->lock, lockflags);
+ pm_runtime_mark_last_busy(musb->controller);
+ pm_runtime_put_autosuspend(musb->controller);
+
return status;
}
diff --git a/drivers/usb/musb/omap2430.c b/drivers/usb/musb/omap2430.c
index 1ab6973d4f61..cc1225485509 100644
--- a/drivers/usb/musb/omap2430.c
+++ b/drivers/usb/musb/omap2430.c
@@ -287,6 +287,7 @@ static int omap2430_musb_init(struct musb *musb)
}
musb->isr = omap2430_musb_interrupt;
phy_init(musb->phy);
+ phy_power_on(musb->phy);
l = musb_readl(musb->mregs, OTG_INTERFSEL);
@@ -323,8 +324,6 @@ static void omap2430_musb_enable(struct musb *musb)
struct musb_hdrc_platform_data *pdata = dev_get_platdata(dev);
struct omap_musb_board_data *data = pdata->board_data;
- if (!WARN_ON(!musb->phy))
- phy_power_on(musb->phy);
switch (glue->status) {
@@ -361,9 +360,6 @@ static void omap2430_musb_disable(struct musb *musb)
struct device *dev = musb->controller;
struct omap2430_glue *glue = dev_get_drvdata(dev->parent);
- if (!WARN_ON(!musb->phy))
- phy_power_off(musb->phy);
-
if (glue->status != MUSB_UNKNOWN)
omap_control_usb_set_mode(glue->control_otghs,
USB_MODE_DISCONNECT);
@@ -375,6 +371,7 @@ static int omap2430_musb_exit(struct musb *musb)
struct omap2430_glue *glue = dev_get_drvdata(dev->parent);
omap2430_low_level_exit(musb);
+ phy_power_off(musb->phy);
phy_exit(musb->phy);
musb->phy = NULL;
cancel_work_sync(&glue->omap_musb_mailbox_work);
diff --git a/drivers/usb/renesas_usbhs/rcar3.c b/drivers/usb/renesas_usbhs/rcar3.c
index 1d70add926f0..d544b331c9f2 100644
--- a/drivers/usb/renesas_usbhs/rcar3.c
+++ b/drivers/usb/renesas_usbhs/rcar3.c
@@ -9,6 +9,7 @@
*
*/
+#include <linux/delay.h>
#include <linux/io.h>
#include "common.h"
#include "rcar3.h"
@@ -35,10 +36,13 @@ static int usbhs_rcar3_power_ctrl(struct platform_device *pdev,
usbhs_write32(priv, UGCTRL2, UGCTRL2_RESERVED_3 | UGCTRL2_USB0SEL_OTG);
- if (enable)
+ if (enable) {
usbhs_bset(priv, LPSTS, LPSTS_SUSPM, LPSTS_SUSPM);
- else
+ /* The controller on R-Car Gen3 needs to wait up to 45 usec */
+ udelay(45);
+ } else {
usbhs_bset(priv, LPSTS, LPSTS_SUSPM, 0);
+ }
return 0;
}
diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
index 54a4de0efdba..f61477bed3a8 100644
--- a/drivers/usb/serial/cp210x.c
+++ b/drivers/usb/serial/cp210x.c
@@ -1077,7 +1077,9 @@ static int cp210x_tiocmget(struct tty_struct *tty)
u8 control;
int result;
- cp210x_read_u8_reg(port, CP210X_GET_MDMSTS, &control);
+ result = cp210x_read_u8_reg(port, CP210X_GET_MDMSTS, &control);
+ if (result)
+ return result;
result = ((control & CONTROL_DTR) ? TIOCM_DTR : 0)
|((control & CONTROL_RTS) ? TIOCM_RTS : 0)
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
index b2d767e743fc..0ff7f38d7800 100644
--- a/drivers/usb/serial/ftdi_sio.c
+++ b/drivers/usb/serial/ftdi_sio.c
@@ -986,7 +986,8 @@ static const struct usb_device_id id_table_combined[] = {
/* ekey Devices */
{ USB_DEVICE(FTDI_VID, FTDI_EKEY_CONV_USB_PID) },
/* Infineon Devices */
- { USB_DEVICE_INTERFACE_NUMBER(INFINEON_VID, INFINEON_TRIBOARD_PID, 1) },
+ { USB_DEVICE_INTERFACE_NUMBER(INFINEON_VID, INFINEON_TRIBOARD_TC1798_PID, 1) },
+ { USB_DEVICE_INTERFACE_NUMBER(INFINEON_VID, INFINEON_TRIBOARD_TC2X7_PID, 1) },
/* GE Healthcare devices */
{ USB_DEVICE(GE_HEALTHCARE_VID, GE_HEALTHCARE_NEMO_TRACKER_PID) },
/* Active Research (Actisense) devices */
diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
index f87a938cf005..21011c0a4c64 100644
--- a/drivers/usb/serial/ftdi_sio_ids.h
+++ b/drivers/usb/serial/ftdi_sio_ids.h
@@ -626,8 +626,9 @@
/*
* Infineon Technologies
*/
-#define INFINEON_VID 0x058b
-#define INFINEON_TRIBOARD_PID 0x0028 /* DAS JTAG TriBoard TC1798 V1.0 */
+#define INFINEON_VID 0x058b
+#define INFINEON_TRIBOARD_TC1798_PID 0x0028 /* DAS JTAG TriBoard TC1798 V1.0 */
+#define INFINEON_TRIBOARD_TC2X7_PID 0x0043 /* DAS JTAG TriBoard TC2X7 V1.0 */
/*
* Acton Research Corp.
diff --git a/drivers/usb/serial/usb-serial.c b/drivers/usb/serial/usb-serial.c
index d213cf44a7e4..4a037b4a79cf 100644
--- a/drivers/usb/serial/usb-serial.c
+++ b/drivers/usb/serial/usb-serial.c
@@ -1078,7 +1078,8 @@ static int usb_serial_probe(struct usb_interface *interface,
serial->disconnected = 0;
- usb_serial_console_init(serial->port[0]->minor);
+ if (num_ports > 0)
+ usb_serial_console_init(serial->port[0]->minor);
exit:
module_put(type->driver.owner);
return 0;
diff --git a/drivers/usb/wusbcore/crypto.c b/drivers/usb/wusbcore/crypto.c
index 79b2b628066d..79451f7ef1b7 100644
--- a/drivers/usb/wusbcore/crypto.c
+++ b/drivers/usb/wusbcore/crypto.c
@@ -133,6 +133,13 @@ static void bytewise_xor(void *_bo, const void *_bi1, const void *_bi2,
bo[itr] = bi1[itr] ^ bi2[itr];
}
+/* Scratch space for MAC calculations. */
+struct wusb_mac_scratch {
+ struct aes_ccm_b0 b0;
+ struct aes_ccm_b1 b1;
+ struct aes_ccm_a ax;
+};
+
/*
* CC-MAC function WUSB1.0[6.5]
*
@@ -197,16 +204,15 @@ static void bytewise_xor(void *_bo, const void *_bi1, const void *_bi2,
* what sg[4] is for. Maybe there is a smarter way to do this.
*/
static int wusb_ccm_mac(struct crypto_skcipher *tfm_cbc,
- struct crypto_cipher *tfm_aes, void *mic,
+ struct crypto_cipher *tfm_aes,
+ struct wusb_mac_scratch *scratch,
+ void *mic,
const struct aes_ccm_nonce *n,
const struct aes_ccm_label *a, const void *b,
size_t blen)
{
int result = 0;
SKCIPHER_REQUEST_ON_STACK(req, tfm_cbc);
- struct aes_ccm_b0 b0;
- struct aes_ccm_b1 b1;
- struct aes_ccm_a ax;
struct scatterlist sg[4], sg_dst;
void *dst_buf;
size_t dst_size;
@@ -218,16 +224,17 @@ static int wusb_ccm_mac(struct crypto_skcipher *tfm_cbc,
* These checks should be compile time optimized out
* ensure @a fills b1's mac_header and following fields
*/
- WARN_ON(sizeof(*a) != sizeof(b1) - sizeof(b1.la));
- WARN_ON(sizeof(b0) != sizeof(struct aes_ccm_block));
- WARN_ON(sizeof(b1) != sizeof(struct aes_ccm_block));
- WARN_ON(sizeof(ax) != sizeof(struct aes_ccm_block));
+ WARN_ON(sizeof(*a) != sizeof(scratch->b1) - sizeof(scratch->b1.la));
+ WARN_ON(sizeof(scratch->b0) != sizeof(struct aes_ccm_block));
+ WARN_ON(sizeof(scratch->b1) != sizeof(struct aes_ccm_block));
+ WARN_ON(sizeof(scratch->ax) != sizeof(struct aes_ccm_block));
result = -ENOMEM;
zero_padding = blen % sizeof(struct aes_ccm_block);
if (zero_padding)
zero_padding = sizeof(struct aes_ccm_block) - zero_padding;
- dst_size = blen + sizeof(b0) + sizeof(b1) + zero_padding;
+ dst_size = blen + sizeof(scratch->b0) + sizeof(scratch->b1) +
+ zero_padding;
dst_buf = kzalloc(dst_size, GFP_KERNEL);
if (!dst_buf)
goto error_dst_buf;
@@ -235,9 +242,9 @@ static int wusb_ccm_mac(struct crypto_skcipher *tfm_cbc,
memset(iv, 0, sizeof(iv));
/* Setup B0 */
- b0.flags = 0x59; /* Format B0 */
- b0.ccm_nonce = *n;
- b0.lm = cpu_to_be16(0); /* WUSB1.0[6.5] sez l(m) is 0 */
+ scratch->b0.flags = 0x59; /* Format B0 */
+ scratch->b0.ccm_nonce = *n;
+ scratch->b0.lm = cpu_to_be16(0); /* WUSB1.0[6.5] sez l(m) is 0 */
/* Setup B1
*
@@ -246,12 +253,12 @@ static int wusb_ccm_mac(struct crypto_skcipher *tfm_cbc,
* 14'--after clarification, it means to use A's contents
* for MAC Header, EO, sec reserved and padding.
*/
- b1.la = cpu_to_be16(blen + 14);
- memcpy(&b1.mac_header, a, sizeof(*a));
+ scratch->b1.la = cpu_to_be16(blen + 14);
+ memcpy(&scratch->b1.mac_header, a, sizeof(*a));
sg_init_table(sg, ARRAY_SIZE(sg));
- sg_set_buf(&sg[0], &b0, sizeof(b0));
- sg_set_buf(&sg[1], &b1, sizeof(b1));
+ sg_set_buf(&sg[0], &scratch->b0, sizeof(scratch->b0));
+ sg_set_buf(&sg[1], &scratch->b1, sizeof(scratch->b1));
sg_set_buf(&sg[2], b, blen);
/* 0 if well behaved :) */
sg_set_buf(&sg[3], bzero, zero_padding);
@@ -276,11 +283,12 @@ static int wusb_ccm_mac(struct crypto_skcipher *tfm_cbc,
* POS Crypto API: size is assumed to be AES's block size.
* Thanks for documenting it -- tip taken from airo.c
*/
- ax.flags = 0x01; /* as per WUSB 1.0 spec */
- ax.ccm_nonce = *n;
- ax.counter = 0;
- crypto_cipher_encrypt_one(tfm_aes, (void *)&ax, (void *)&ax);
- bytewise_xor(mic, &ax, iv, 8);
+ scratch->ax.flags = 0x01; /* as per WUSB 1.0 spec */
+ scratch->ax.ccm_nonce = *n;
+ scratch->ax.counter = 0;
+ crypto_cipher_encrypt_one(tfm_aes, (void *)&scratch->ax,
+ (void *)&scratch->ax);
+ bytewise_xor(mic, &scratch->ax, iv, 8);
result = 8;
error_cbc_crypt:
kfree(dst_buf);
@@ -303,6 +311,7 @@ ssize_t wusb_prf(void *out, size_t out_size,
struct aes_ccm_nonce n = *_n;
struct crypto_skcipher *tfm_cbc;
struct crypto_cipher *tfm_aes;
+ struct wusb_mac_scratch *scratch;
u64 sfn = 0;
__le64 sfn_le;
@@ -329,17 +338,25 @@ ssize_t wusb_prf(void *out, size_t out_size,
printk(KERN_ERR "E: can't set AES key: %d\n", (int)result);
goto error_setkey_aes;
}
+ scratch = kmalloc(sizeof(*scratch), GFP_KERNEL);
+ if (!scratch) {
+ result = -ENOMEM;
+ goto error_alloc_scratch;
+ }
for (bitr = 0; bitr < (len + 63) / 64; bitr++) {
sfn_le = cpu_to_le64(sfn++);
memcpy(&n.sfn, &sfn_le, sizeof(n.sfn)); /* n.sfn++... */
- result = wusb_ccm_mac(tfm_cbc, tfm_aes, out + bytes,
+ result = wusb_ccm_mac(tfm_cbc, tfm_aes, scratch, out + bytes,
&n, a, b, blen);
if (result < 0)
goto error_ccm_mac;
bytes += result;
}
result = bytes;
+
+ kfree(scratch);
+error_alloc_scratch:
error_ccm_mac:
error_setkey_aes:
crypto_free_cipher(tfm_aes);
diff --git a/drivers/vfio/pci/vfio_pci.c b/drivers/vfio/pci/vfio_pci.c
index d624a527777f..031bc08d000d 100644
--- a/drivers/vfio/pci/vfio_pci.c
+++ b/drivers/vfio/pci/vfio_pci.c
@@ -829,8 +829,9 @@ static long vfio_pci_ioctl(void *device_data,
} else if (cmd == VFIO_DEVICE_SET_IRQS) {
struct vfio_irq_set hdr;
+ size_t size;
u8 *data = NULL;
- int ret = 0;
+ int max, ret = 0;
minsz = offsetofend(struct vfio_irq_set, count);
@@ -838,23 +839,31 @@ static long vfio_pci_ioctl(void *device_data,
return -EFAULT;
if (hdr.argsz < minsz || hdr.index >= VFIO_PCI_NUM_IRQS ||
+ hdr.count >= (U32_MAX - hdr.start) ||
hdr.flags & ~(VFIO_IRQ_SET_DATA_TYPE_MASK |
VFIO_IRQ_SET_ACTION_TYPE_MASK))
return -EINVAL;
- if (!(hdr.flags & VFIO_IRQ_SET_DATA_NONE)) {
- size_t size;
- int max = vfio_pci_get_irq_count(vdev, hdr.index);
+ max = vfio_pci_get_irq_count(vdev, hdr.index);
+ if (hdr.start >= max || hdr.start + hdr.count > max)
+ return -EINVAL;
- if (hdr.flags & VFIO_IRQ_SET_DATA_BOOL)
- size = sizeof(uint8_t);
- else if (hdr.flags & VFIO_IRQ_SET_DATA_EVENTFD)
- size = sizeof(int32_t);
- else
- return -EINVAL;
+ switch (hdr.flags & VFIO_IRQ_SET_DATA_TYPE_MASK) {
+ case VFIO_IRQ_SET_DATA_NONE:
+ size = 0;
+ break;
+ case VFIO_IRQ_SET_DATA_BOOL:
+ size = sizeof(uint8_t);
+ break;
+ case VFIO_IRQ_SET_DATA_EVENTFD:
+ size = sizeof(int32_t);
+ break;
+ default:
+ return -EINVAL;
+ }
- if (hdr.argsz - minsz < hdr.count * size ||
- hdr.start >= max || hdr.start + hdr.count > max)
+ if (size) {
+ if (hdr.argsz - minsz < hdr.count * size)
return -EINVAL;
data = memdup_user((void __user *)(arg + minsz),
diff --git a/drivers/vfio/pci/vfio_pci_intrs.c b/drivers/vfio/pci/vfio_pci_intrs.c
index c2e60893cd09..1c46045b0e7f 100644
--- a/drivers/vfio/pci/vfio_pci_intrs.c
+++ b/drivers/vfio/pci/vfio_pci_intrs.c
@@ -256,7 +256,7 @@ static int vfio_msi_enable(struct vfio_pci_device *vdev, int nvec, bool msix)
if (!is_irq_none(vdev))
return -EINVAL;
- vdev->ctx = kzalloc(nvec * sizeof(struct vfio_pci_irq_ctx), GFP_KERNEL);
+ vdev->ctx = kcalloc(nvec, sizeof(struct vfio_pci_irq_ctx), GFP_KERNEL);
if (!vdev->ctx)
return -ENOMEM;
diff --git a/drivers/video/of_display_timing.c b/drivers/video/of_display_timing.c
index 8a1076beecd3..32b0a7543433 100644
--- a/drivers/video/of_display_timing.c
+++ b/drivers/video/of_display_timing.c
@@ -88,6 +88,15 @@ static int of_parse_display_timing(const struct device_node *np,
dt->flags |= val ? DISPLAY_FLAGS_PIXDATA_POSEDGE :
DISPLAY_FLAGS_PIXDATA_NEGEDGE;
+ if (!of_property_read_u32(np, "syncclk-active", &val))
+ dt->flags |= val ? DISPLAY_FLAGS_SYNC_POSEDGE :
+ DISPLAY_FLAGS_SYNC_NEGEDGE;
+ else if (dt->flags & (DISPLAY_FLAGS_PIXDATA_POSEDGE |
+ DISPLAY_FLAGS_PIXDATA_NEGEDGE))
+ dt->flags |= dt->flags & DISPLAY_FLAGS_PIXDATA_POSEDGE ?
+ DISPLAY_FLAGS_SYNC_POSEDGE :
+ DISPLAY_FLAGS_SYNC_NEGEDGE;
+
if (of_property_read_bool(np, "interlaced"))
dt->flags |= DISPLAY_FLAGS_INTERLACED;
if (of_property_read_bool(np, "doublescan"))
@@ -110,7 +119,7 @@ static int of_parse_display_timing(const struct device_node *np,
* @name: name of the timing node
* @dt: display_timing struct to fill
**/
-int of_get_display_timing(struct device_node *np, const char *name,
+int of_get_display_timing(const struct device_node *np, const char *name,
struct display_timing *dt)
{
struct device_node *timing_np;
@@ -133,7 +142,7 @@ EXPORT_SYMBOL_GPL(of_get_display_timing);
* of_get_display_timings - parse all display_timing entries from a device_node
* @np: device_node with the subnodes
**/
-struct display_timings *of_get_display_timings(struct device_node *np)
+struct display_timings *of_get_display_timings(const struct device_node *np)
{
struct device_node *timings_np;
struct device_node *entry;
@@ -249,7 +258,7 @@ EXPORT_SYMBOL_GPL(of_get_display_timings);
* of_display_timings_exist - check if a display-timings node is provided
* @np: device_node with the timing
**/
-int of_display_timings_exist(struct device_node *np)
+int of_display_timings_exist(const struct device_node *np)
{
struct device_node *timings_np;
diff --git a/drivers/virtio/config.c b/drivers/virtio/config.c
deleted file mode 100644
index f70bcd2ff98f..000000000000
--- a/drivers/virtio/config.c
+++ /dev/null
@@ -1,12 +0,0 @@
-/* Configuration space parsing helpers for virtio.
- *
- * The configuration is [type][len][... len bytes ...] fields.
- *
- * Copyright 2007 Rusty Russell, IBM Corporation.
- * GPL v2 or later.
- */
-#include <linux/err.h>
-#include <linux/virtio.h>
-#include <linux/virtio_config.h>
-#include <linux/bug.h>
-
diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c
index 4e7003db12c4..181793f07852 100644
--- a/drivers/virtio/virtio_balloon.c
+++ b/drivers/virtio/virtio_balloon.c
@@ -577,6 +577,8 @@ static int virtballoon_probe(struct virtio_device *vdev)
virtio_device_ready(vdev);
+ if (towards_target(vb))
+ virtballoon_changed(vdev);
return 0;
out_del_vqs:
diff --git a/drivers/virtio/virtio_pci_legacy.c b/drivers/virtio/virtio_pci_legacy.c
index 8c4e61783441..6d9e5173d5fa 100644
--- a/drivers/virtio/virtio_pci_legacy.c
+++ b/drivers/virtio/virtio_pci_legacy.c
@@ -212,10 +212,18 @@ int virtio_pci_legacy_probe(struct virtio_pci_device *vp_dev)
return -ENODEV;
}
- rc = dma_set_mask_and_coherent(&pci_dev->dev, DMA_BIT_MASK(64));
- if (rc)
- rc = dma_set_mask_and_coherent(&pci_dev->dev,
- DMA_BIT_MASK(32));
+ rc = dma_set_mask(&pci_dev->dev, DMA_BIT_MASK(64));
+ if (rc) {
+ rc = dma_set_mask_and_coherent(&pci_dev->dev, DMA_BIT_MASK(32));
+ } else {
+ /*
+ * The virtio ring base address is expressed as a 32-bit PFN,
+ * with a page size of 1 << VIRTIO_PCI_QUEUE_ADDR_SHIFT.
+ */
+ dma_set_coherent_mask(&pci_dev->dev,
+ DMA_BIT_MASK(32 + VIRTIO_PCI_QUEUE_ADDR_SHIFT));
+ }
+
if (rc)
dev_warn(&pci_dev->dev, "Failed to enable 64-bit or 32-bit DMA. Trying to continue, but this might not work.\n");
diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c
index ed9c9eeedfe5..489bfc61cf30 100644
--- a/drivers/virtio/virtio_ring.c
+++ b/drivers/virtio/virtio_ring.c
@@ -167,7 +167,7 @@ static bool vring_use_dma_api(struct virtio_device *vdev)
* making all of the arch DMA ops work on the vring device itself
* is a mess. For now, we use the parent device for DMA ops.
*/
-static struct device *vring_dma_dev(const struct vring_virtqueue *vq)
+static inline struct device *vring_dma_dev(const struct vring_virtqueue *vq)
{
return vq->vq.vdev->dev.parent;
}
@@ -732,7 +732,8 @@ void virtqueue_disable_cb(struct virtqueue *_vq)
if (!(vq->avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT)) {
vq->avail_flags_shadow |= VRING_AVAIL_F_NO_INTERRUPT;
- vq->vring.avail->flags = cpu_to_virtio16(_vq->vdev, vq->avail_flags_shadow);
+ if (!vq->event)
+ vq->vring.avail->flags = cpu_to_virtio16(_vq->vdev, vq->avail_flags_shadow);
}
}
@@ -764,7 +765,8 @@ unsigned virtqueue_enable_cb_prepare(struct virtqueue *_vq)
* entry. Always do both to keep code simple. */
if (vq->avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT) {
vq->avail_flags_shadow &= ~VRING_AVAIL_F_NO_INTERRUPT;
- vq->vring.avail->flags = cpu_to_virtio16(_vq->vdev, vq->avail_flags_shadow);
+ if (!vq->event)
+ vq->vring.avail->flags = cpu_to_virtio16(_vq->vdev, vq->avail_flags_shadow);
}
vring_used_event(&vq->vring) = cpu_to_virtio16(_vq->vdev, last_used_idx = vq->last_used_idx);
END_USE(vq);
@@ -832,10 +834,11 @@ bool virtqueue_enable_cb_delayed(struct virtqueue *_vq)
* more to do. */
/* Depending on the VIRTIO_RING_F_USED_EVENT_IDX feature, we need to
* either clear the flags bit or point the event index at the next
- * entry. Always do both to keep code simple. */
+ * entry. Always update the event index to keep code simple. */
if (vq->avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT) {
vq->avail_flags_shadow &= ~VRING_AVAIL_F_NO_INTERRUPT;
- vq->vring.avail->flags = cpu_to_virtio16(_vq->vdev, vq->avail_flags_shadow);
+ if (!vq->event)
+ vq->vring.avail->flags = cpu_to_virtio16(_vq->vdev, vq->avail_flags_shadow);
}
/* TODO: tune this threshold */
bufs = (u16)(vq->avail_idx_shadow - vq->last_used_idx) * 3 / 4;
@@ -953,7 +956,8 @@ struct virtqueue *__vring_new_virtqueue(unsigned int index,
/* No callback? Tell other side not to bother us. */
if (!callback) {
vq->avail_flags_shadow |= VRING_AVAIL_F_NO_INTERRUPT;
- vq->vring.avail->flags = cpu_to_virtio16(vdev, vq->avail_flags_shadow);
+ if (!vq->event)
+ vq->vring.avail->flags = cpu_to_virtio16(vdev, vq->avail_flags_shadow);
}
/* Put everything in free lists. */
diff --git a/drivers/vme/vme.c b/drivers/vme/vme.c
index 15b64076bc26..bdbadaa47ef3 100644
--- a/drivers/vme/vme.c
+++ b/drivers/vme/vme.c
@@ -156,12 +156,16 @@ size_t vme_get_size(struct vme_resource *resource)
case VME_MASTER:
retval = vme_master_get(resource, &enabled, &base, &size,
&aspace, &cycle, &dwidth);
+ if (retval)
+ return 0;
return size;
break;
case VME_SLAVE:
retval = vme_slave_get(resource, &enabled, &base, &size,
&buf_base, &aspace, &cycle);
+ if (retval)
+ return 0;
return size;
break;
diff --git a/drivers/xen/manage.c b/drivers/xen/manage.c
index e12bd3635f83..26e5e8507f03 100644
--- a/drivers/xen/manage.c
+++ b/drivers/xen/manage.c
@@ -168,7 +168,9 @@ out:
#endif /* CONFIG_HIBERNATE_CALLBACKS */
struct shutdown_handler {
- const char *command;
+#define SHUTDOWN_CMD_SIZE 11
+ const char command[SHUTDOWN_CMD_SIZE];
+ bool flag;
void (*cb)(void);
};
@@ -206,22 +208,22 @@ static void do_reboot(void)
ctrl_alt_del();
}
+static struct shutdown_handler shutdown_handlers[] = {
+ { "poweroff", true, do_poweroff },
+ { "halt", false, do_poweroff },
+ { "reboot", true, do_reboot },
+#ifdef CONFIG_HIBERNATE_CALLBACKS
+ { "suspend", true, do_suspend },
+#endif
+};
+
static void shutdown_handler(struct xenbus_watch *watch,
const char **vec, unsigned int len)
{
char *str;
struct xenbus_transaction xbt;
int err;
- static struct shutdown_handler handlers[] = {
- { "poweroff", do_poweroff },
- { "halt", do_poweroff },
- { "reboot", do_reboot },
-#ifdef CONFIG_HIBERNATE_CALLBACKS
- { "suspend", do_suspend },
-#endif
- {NULL, NULL},
- };
- static struct shutdown_handler *handler;
+ int idx;
if (shutting_down != SHUTDOWN_INVALID)
return;
@@ -238,13 +240,13 @@ static void shutdown_handler(struct xenbus_watch *watch,
return;
}
- for (handler = &handlers[0]; handler->command; handler++) {
- if (strcmp(str, handler->command) == 0)
+ for (idx = 0; idx < ARRAY_SIZE(shutdown_handlers); idx++) {
+ if (strcmp(str, shutdown_handlers[idx].command) == 0)
break;
}
/* Only acknowledge commands which we are prepared to handle. */
- if (handler->cb)
+ if (idx < ARRAY_SIZE(shutdown_handlers))
xenbus_write(xbt, "control", "shutdown", "");
err = xenbus_transaction_end(xbt, 0);
@@ -253,8 +255,8 @@ static void shutdown_handler(struct xenbus_watch *watch,
goto again;
}
- if (handler->cb) {
- handler->cb();
+ if (idx < ARRAY_SIZE(shutdown_handlers)) {
+ shutdown_handlers[idx].cb();
} else {
pr_info("Ignoring shutdown request: %s\n", str);
shutting_down = SHUTDOWN_INVALID;
@@ -310,6 +312,9 @@ static struct notifier_block xen_reboot_nb = {
static int setup_shutdown_watcher(void)
{
int err;
+ int idx;
+#define FEATURE_PATH_SIZE (SHUTDOWN_CMD_SIZE + sizeof("feature-"))
+ char node[FEATURE_PATH_SIZE];
err = register_xenbus_watch(&shutdown_watch);
if (err) {
@@ -326,6 +331,14 @@ static int setup_shutdown_watcher(void)
}
#endif
+ for (idx = 0; idx < ARRAY_SIZE(shutdown_handlers); idx++) {
+ if (!shutdown_handlers[idx].flag)
+ continue;
+ snprintf(node, FEATURE_PATH_SIZE, "feature-%s",
+ shutdown_handlers[idx].command);
+ xenbus_printf(XBT_NIL, "control", node, "%u", 1);
+ }
+
return 0;
}
diff --git a/drivers/xen/xenbus/xenbus_dev_frontend.c b/drivers/xen/xenbus/xenbus_dev_frontend.c
index c1010f018bd8..1e8be12ebb55 100644
--- a/drivers/xen/xenbus/xenbus_dev_frontend.c
+++ b/drivers/xen/xenbus/xenbus_dev_frontend.c
@@ -364,7 +364,7 @@ out:
static int xenbus_write_watch(unsigned msg_type, struct xenbus_file_priv *u)
{
- struct watch_adapter *watch, *tmp_watch;
+ struct watch_adapter *watch;
char *path, *token;
int err, rc;
LIST_HEAD(staging_q);
@@ -399,7 +399,7 @@ static int xenbus_write_watch(unsigned msg_type, struct xenbus_file_priv *u)
}
list_add(&watch->list, &u->watches);
} else {
- list_for_each_entry_safe(watch, tmp_watch, &u->watches, list) {
+ list_for_each_entry(watch, &u->watches, list) {
if (!strcmp(watch->token, token) &&
!strcmp(watch->watch.node, path)) {
unregister_xenbus_watch(&watch->watch);
diff --git a/drivers/xen/xenbus/xenbus_probe_frontend.c b/drivers/xen/xenbus/xenbus_probe_frontend.c
index 611a23119675..6d40a972ffb2 100644
--- a/drivers/xen/xenbus/xenbus_probe_frontend.c
+++ b/drivers/xen/xenbus/xenbus_probe_frontend.c
@@ -335,7 +335,9 @@ static int backend_state;
static void xenbus_reset_backend_state_changed(struct xenbus_watch *w,
const char **v, unsigned int l)
{
- xenbus_scanf(XBT_NIL, v[XS_WATCH_PATH], "", "%i", &backend_state);
+ if (xenbus_scanf(XBT_NIL, v[XS_WATCH_PATH], "", "%i",
+ &backend_state) != 1)
+ backend_state = XenbusStateUnknown;
printk(KERN_DEBUG "XENBUS: backend %s %s\n",
v[XS_WATCH_PATH], xenbus_strstate(backend_state));
wake_up(&backend_state_wq);
diff --git a/fs/afs/cmservice.c b/fs/afs/cmservice.c
index 2037e7a77a37..d764236072b1 100644
--- a/fs/afs/cmservice.c
+++ b/fs/afs/cmservice.c
@@ -91,11 +91,9 @@ static const struct afs_call_type afs_SRXCBTellMeAboutYourself = {
*/
bool afs_cm_incoming_call(struct afs_call *call)
{
- u32 operation_id = ntohl(call->operation_ID);
+ _enter("{CB.OP %u}", call->operation_ID);
- _enter("{CB.OP %u}", operation_id);
-
- switch (operation_id) {
+ switch (call->operation_ID) {
case CBCallBack:
call->type = &afs_SRXCBCallBack;
return true;
diff --git a/fs/afs/fsclient.c b/fs/afs/fsclient.c
index 96f4d764d1a6..31c616ab9b40 100644
--- a/fs/afs/fsclient.c
+++ b/fs/afs/fsclient.c
@@ -364,7 +364,7 @@ static int afs_deliver_fs_fetch_data(struct afs_call *call)
buffer = kmap(page);
ret = afs_extract_data(call, buffer,
call->count, true);
- kunmap(buffer);
+ kunmap(page);
if (ret < 0)
return ret;
}
@@ -397,7 +397,7 @@ static int afs_deliver_fs_fetch_data(struct afs_call *call)
page = call->reply3;
buffer = kmap(page);
memset(buffer + call->count, 0, PAGE_SIZE - call->count);
- kunmap(buffer);
+ kunmap(page);
}
_leave(" = 0 [done]");
diff --git a/fs/afs/internal.h b/fs/afs/internal.h
index 5497c8496055..535a38d2c1d0 100644
--- a/fs/afs/internal.h
+++ b/fs/afs/internal.h
@@ -112,7 +112,7 @@ struct afs_call {
bool need_attention; /* T if RxRPC poked us */
u16 service_id; /* RxRPC service ID to call */
__be16 port; /* target UDP port */
- __be32 operation_ID; /* operation ID for an incoming call */
+ u32 operation_ID; /* operation ID for an incoming call */
u32 count; /* count for use in unmarshalling */
__be32 tmp; /* place to extract temporary data */
afs_dataversion_t store_version; /* updated version expected from store */
diff --git a/fs/afs/rxrpc.c b/fs/afs/rxrpc.c
index 477928b25940..25f05a8d21b1 100644
--- a/fs/afs/rxrpc.c
+++ b/fs/afs/rxrpc.c
@@ -676,10 +676,11 @@ static int afs_deliver_cm_op_id(struct afs_call *call)
ASSERTCMP(call->offset, <, 4);
/* the operation ID forms the first four bytes of the request data */
- ret = afs_extract_data(call, &call->operation_ID, 4, true);
+ ret = afs_extract_data(call, &call->tmp, 4, true);
if (ret < 0)
return ret;
+ call->operation_ID = ntohl(call->tmp);
call->state = AFS_CALL_AWAIT_REQUEST;
call->offset = 0;
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index 210c94ac8818..4607af38c72e 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -2647,7 +2647,10 @@ static noinline int __btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
btrfs_free_delayed_extent_op(extent_op);
if (ret) {
+ spin_lock(&delayed_refs->lock);
locked_ref->processing = 0;
+ delayed_refs->num_heads_ready++;
+ spin_unlock(&delayed_refs->lock);
btrfs_delayed_ref_unlock(locked_ref);
btrfs_put_delayed_ref(ref);
btrfs_debug(fs_info, "run_one_delayed_ref returned %d",
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index 66a755150056..8ed05d95584a 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -5569,7 +5569,7 @@ void le_bitmap_set(u8 *map, unsigned int start, int len)
*p |= mask_to_set;
len -= bits_to_set;
bits_to_set = BITS_PER_BYTE;
- mask_to_set = ~(u8)0;
+ mask_to_set = ~0;
p++;
}
if (len) {
@@ -5589,7 +5589,7 @@ void le_bitmap_clear(u8 *map, unsigned int start, int len)
*p &= ~mask_to_clear;
len -= bits_to_clear;
bits_to_clear = BITS_PER_BYTE;
- mask_to_clear = ~(u8)0;
+ mask_to_clear = ~0;
p++;
}
if (len) {
@@ -5679,7 +5679,7 @@ void extent_buffer_bitmap_set(struct extent_buffer *eb, unsigned long start,
kaddr[offset] |= mask_to_set;
len -= bits_to_set;
bits_to_set = BITS_PER_BYTE;
- mask_to_set = ~(u8)0;
+ mask_to_set = ~0;
if (++offset >= PAGE_SIZE && len > 0) {
offset = 0;
page = eb->pages[++i];
@@ -5721,7 +5721,7 @@ void extent_buffer_bitmap_clear(struct extent_buffer *eb, unsigned long start,
kaddr[offset] &= ~mask_to_clear;
len -= bits_to_clear;
bits_to_clear = BITS_PER_BYTE;
- mask_to_clear = ~(u8)0;
+ mask_to_clear = ~0;
if (++offset >= PAGE_SIZE && len > 0) {
offset = 0;
page = eb->pages[++i];
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index 2b790bda7998..8e3a5a266917 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -4605,8 +4605,8 @@ delete:
BUG_ON(ret);
if (btrfs_should_throttle_delayed_refs(trans, root))
btrfs_async_run_delayed_refs(root,
- trans->transid,
- trans->delayed_ref_updates * 2, 0);
+ trans->delayed_ref_updates * 2,
+ trans->transid, 0);
if (be_nice) {
if (truncate_space_check(trans, root,
extent_num_bytes)) {
@@ -8931,9 +8931,14 @@ again:
* So even we call qgroup_free_data(), it won't decrease reserved
* space.
* 2) Not written to disk
- * This means the reserved space should be freed here.
+ * This means the reserved space should be freed here. However,
+ * if a truncate invalidates the page (by clearing PageDirty)
+ * and the page is accounted for while allocating extent
+ * in btrfs_check_data_free_space() we let delayed_ref to
+ * free the entire extent.
*/
- btrfs_qgroup_free_data(inode, page_start, PAGE_SIZE);
+ if (PageDirty(page))
+ btrfs_qgroup_free_data(inode, page_start, PAGE_SIZE);
if (!inode_evicting) {
clear_extent_bit(tree, page_start, page_end,
EXTENT_LOCKED | EXTENT_DIRTY |
diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
index 18e1aa0f85f5..7acbd2cf6192 100644
--- a/fs/btrfs/ioctl.c
+++ b/fs/btrfs/ioctl.c
@@ -3814,6 +3814,11 @@ process_slot:
}
btrfs_release_path(path);
key.offset = next_key_min_offset;
+
+ if (fatal_signal_pending(current)) {
+ ret = -EINTR;
+ goto out;
+ }
}
ret = 0;
diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c
index 0ec8ffa37ab0..c4af0cdb783d 100644
--- a/fs/btrfs/relocation.c
+++ b/fs/btrfs/relocation.c
@@ -2728,7 +2728,14 @@ static int do_relocation(struct btrfs_trans_handle *trans,
bytenr = btrfs_node_blockptr(upper->eb, slot);
if (lowest) {
- BUG_ON(bytenr != node->bytenr);
+ if (bytenr != node->bytenr) {
+ btrfs_err(root->fs_info,
+ "lowest leaf/node mismatch: bytenr %llu node->bytenr %llu slot %d upper %llu",
+ bytenr, node->bytenr, slot,
+ upper->eb->start);
+ err = -EIO;
+ goto next;
+ }
} else {
if (node->eb->start == bytenr)
goto next;
diff --git a/fs/btrfs/send.c b/fs/btrfs/send.c
index 01bc36cec26e..71261b459863 100644
--- a/fs/btrfs/send.c
+++ b/fs/btrfs/send.c
@@ -5805,6 +5805,64 @@ static int changed_extent(struct send_ctx *sctx,
int ret = 0;
if (sctx->cur_ino != sctx->cmp_key->objectid) {
+
+ if (result == BTRFS_COMPARE_TREE_CHANGED) {
+ struct extent_buffer *leaf_l;
+ struct extent_buffer *leaf_r;
+ struct btrfs_file_extent_item *ei_l;
+ struct btrfs_file_extent_item *ei_r;
+
+ leaf_l = sctx->left_path->nodes[0];
+ leaf_r = sctx->right_path->nodes[0];
+ ei_l = btrfs_item_ptr(leaf_l,
+ sctx->left_path->slots[0],
+ struct btrfs_file_extent_item);
+ ei_r = btrfs_item_ptr(leaf_r,
+ sctx->right_path->slots[0],
+ struct btrfs_file_extent_item);
+
+ /*
+ * We may have found an extent item that has changed
+ * only its disk_bytenr field and the corresponding
+ * inode item was not updated. This case happens due to
+ * very specific timings during relocation when a leaf
+ * that contains file extent items is COWed while
+ * relocation is ongoing and its in the stage where it
+ * updates data pointers. So when this happens we can
+ * safely ignore it since we know it's the same extent,
+ * but just at different logical and physical locations
+ * (when an extent is fully replaced with a new one, we
+ * know the generation number must have changed too,
+ * since snapshot creation implies committing the current
+ * transaction, and the inode item must have been updated
+ * as well).
+ * This replacement of the disk_bytenr happens at
+ * relocation.c:replace_file_extents() through
+ * relocation.c:btrfs_reloc_cow_block().
+ */
+ if (btrfs_file_extent_generation(leaf_l, ei_l) ==
+ btrfs_file_extent_generation(leaf_r, ei_r) &&
+ btrfs_file_extent_ram_bytes(leaf_l, ei_l) ==
+ btrfs_file_extent_ram_bytes(leaf_r, ei_r) &&
+ btrfs_file_extent_compression(leaf_l, ei_l) ==
+ btrfs_file_extent_compression(leaf_r, ei_r) &&
+ btrfs_file_extent_encryption(leaf_l, ei_l) ==
+ btrfs_file_extent_encryption(leaf_r, ei_r) &&
+ btrfs_file_extent_other_encoding(leaf_l, ei_l) ==
+ btrfs_file_extent_other_encoding(leaf_r, ei_r) &&
+ btrfs_file_extent_type(leaf_l, ei_l) ==
+ btrfs_file_extent_type(leaf_r, ei_r) &&
+ btrfs_file_extent_disk_bytenr(leaf_l, ei_l) !=
+ btrfs_file_extent_disk_bytenr(leaf_r, ei_r) &&
+ btrfs_file_extent_disk_num_bytes(leaf_l, ei_l) ==
+ btrfs_file_extent_disk_num_bytes(leaf_r, ei_r) &&
+ btrfs_file_extent_offset(leaf_l, ei_l) ==
+ btrfs_file_extent_offset(leaf_r, ei_r) &&
+ btrfs_file_extent_num_bytes(leaf_l, ei_l) ==
+ btrfs_file_extent_num_bytes(leaf_r, ei_r))
+ return 0;
+ }
+
inconsistent_snapshot_error(sctx, result, "extent");
return -EIO;
}
diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
index 528cae123dc9..3d33c4e41e5f 100644
--- a/fs/btrfs/tree-log.c
+++ b/fs/btrfs/tree-log.c
@@ -2713,14 +2713,12 @@ static inline void btrfs_remove_all_log_ctxs(struct btrfs_root *root,
int index, int error)
{
struct btrfs_log_ctx *ctx;
+ struct btrfs_log_ctx *safe;
- if (!error) {
- INIT_LIST_HEAD(&root->log_ctxs[index]);
- return;
- }
-
- list_for_each_entry(ctx, &root->log_ctxs[index], list)
+ list_for_each_entry_safe(ctx, safe, &root->log_ctxs[index], list) {
+ list_del_init(&ctx->list);
ctx->log_ret = error;
+ }
INIT_LIST_HEAD(&root->log_ctxs[index]);
}
@@ -2961,13 +2959,9 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans,
mutex_unlock(&root->log_mutex);
out_wake_log_root:
- /*
- * We needn't get log_mutex here because we are sure all
- * the other tasks are blocked.
- */
+ mutex_lock(&log_root_tree->log_mutex);
btrfs_remove_all_log_ctxs(log_root_tree, index2, ret);
- mutex_lock(&log_root_tree->log_mutex);
log_root_tree->log_transid_committed++;
atomic_set(&log_root_tree->log_commit[index2], 0);
mutex_unlock(&log_root_tree->log_mutex);
@@ -2978,10 +2972,8 @@ out_wake_log_root:
if (waitqueue_active(&log_root_tree->log_commit_wait[index2]))
wake_up(&log_root_tree->log_commit_wait[index2]);
out:
- /* See above. */
- btrfs_remove_all_log_ctxs(root, index1, ret);
-
mutex_lock(&root->log_mutex);
+ btrfs_remove_all_log_ctxs(root, index1, ret);
root->log_transid_committed++;
atomic_set(&root->log_commit[index1], 0);
mutex_unlock(&root->log_mutex);
diff --git a/fs/exofs/dir.c b/fs/exofs/dir.c
index 79101651fe9e..42f9a0a0c4ca 100644
--- a/fs/exofs/dir.c
+++ b/fs/exofs/dir.c
@@ -137,7 +137,7 @@ Espan:
bad_entry:
EXOFS_ERR(
"ERROR [exofs_check_page]: bad entry in directory(0x%lx): %s - "
- "offset=%lu, inode=0x%llu, rec_len=%d, name_len=%d\n",
+ "offset=%lu, inode=0x%llx, rec_len=%d, name_len=%d\n",
dir->i_ino, error, (page->index<<PAGE_SHIFT)+offs,
_LLU(le64_to_cpu(p->inode_no)),
rec_len, p->name_len);
diff --git a/fs/iomap.c b/fs/iomap.c
index 013d1d36fbbf..a8ee8c33ca78 100644
--- a/fs/iomap.c
+++ b/fs/iomap.c
@@ -433,8 +433,7 @@ iomap_page_mkwrite_actor(struct inode *inode, loff_t pos, loff_t length,
struct page *page = data;
int ret;
- ret = __block_write_begin_int(page, pos & ~PAGE_MASK, length,
- NULL, iomap);
+ ret = __block_write_begin_int(page, pos, length, NULL, iomap);
if (ret)
return ret;
@@ -561,7 +560,7 @@ int iomap_fiemap(struct inode *inode, struct fiemap_extent_info *fi,
}
while (len > 0) {
- ret = iomap_apply(inode, start, len, 0, ops, &ctx,
+ ret = iomap_apply(inode, start, len, IOMAP_REPORT, ops, &ctx,
iomap_fiemap_actor);
/* inode with no (attribute) mapping will give ENOENT */
if (ret == -ENOENT)
diff --git a/fs/kernfs/file.c b/fs/kernfs/file.c
index 2bcb86e6e6ca..78219d5644e9 100644
--- a/fs/kernfs/file.c
+++ b/fs/kernfs/file.c
@@ -911,6 +911,7 @@ const struct file_operations kernfs_file_fops = {
.open = kernfs_fop_open,
.release = kernfs_fop_release,
.poll = kernfs_fop_poll,
+ .fsync = noop_fsync,
};
/**
diff --git a/fs/nfsd/netns.h b/fs/nfsd/netns.h
index b10d557f9c9e..ee36efd5aece 100644
--- a/fs/nfsd/netns.h
+++ b/fs/nfsd/netns.h
@@ -84,6 +84,8 @@ struct nfsd_net {
struct list_head client_lru;
struct list_head close_lru;
struct list_head del_recall_lru;
+
+ /* protected by blocked_locks_lock */
struct list_head blocked_locks_lru;
struct delayed_work laundromat_work;
@@ -91,6 +93,9 @@ struct nfsd_net {
/* client_lock protects the client lru list and session hash table */
spinlock_t client_lock;
+ /* protects blocked_locks_lru */
+ spinlock_t blocked_locks_lock;
+
struct file *rec_file;
bool in_grace;
const struct nfsd4_client_tracking_ops *client_tracking_ops;
diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
index 9752beb78659..4b4beaaa4eaa 100644
--- a/fs/nfsd/nfs4state.c
+++ b/fs/nfsd/nfs4state.c
@@ -217,7 +217,7 @@ find_blocked_lock(struct nfs4_lockowner *lo, struct knfsd_fh *fh,
{
struct nfsd4_blocked_lock *cur, *found = NULL;
- spin_lock(&nn->client_lock);
+ spin_lock(&nn->blocked_locks_lock);
list_for_each_entry(cur, &lo->lo_blocked, nbl_list) {
if (fh_match(fh, &cur->nbl_fh)) {
list_del_init(&cur->nbl_list);
@@ -226,7 +226,7 @@ find_blocked_lock(struct nfs4_lockowner *lo, struct knfsd_fh *fh,
break;
}
}
- spin_unlock(&nn->client_lock);
+ spin_unlock(&nn->blocked_locks_lock);
if (found)
posix_unblock_lock(&found->nbl_lock);
return found;
@@ -1227,9 +1227,7 @@ static void put_ol_stateid_locked(struct nfs4_ol_stateid *stp,
static bool unhash_lock_stateid(struct nfs4_ol_stateid *stp)
{
- struct nfs4_openowner *oo = openowner(stp->st_openstp->st_stateowner);
-
- lockdep_assert_held(&oo->oo_owner.so_client->cl_lock);
+ lockdep_assert_held(&stp->st_stid.sc_client->cl_lock);
list_del_init(&stp->st_locks);
nfs4_unhash_stid(&stp->st_stid);
@@ -1238,12 +1236,12 @@ static bool unhash_lock_stateid(struct nfs4_ol_stateid *stp)
static void release_lock_stateid(struct nfs4_ol_stateid *stp)
{
- struct nfs4_openowner *oo = openowner(stp->st_openstp->st_stateowner);
+ struct nfs4_client *clp = stp->st_stid.sc_client;
bool unhashed;
- spin_lock(&oo->oo_owner.so_client->cl_lock);
+ spin_lock(&clp->cl_lock);
unhashed = unhash_lock_stateid(stp);
- spin_unlock(&oo->oo_owner.so_client->cl_lock);
+ spin_unlock(&clp->cl_lock);
if (unhashed)
nfs4_put_stid(&stp->st_stid);
}
@@ -4665,7 +4663,7 @@ nfs4_laundromat(struct nfsd_net *nn)
* indefinitely once the lock does become free.
*/
BUG_ON(!list_empty(&reaplist));
- spin_lock(&nn->client_lock);
+ spin_lock(&nn->blocked_locks_lock);
while (!list_empty(&nn->blocked_locks_lru)) {
nbl = list_first_entry(&nn->blocked_locks_lru,
struct nfsd4_blocked_lock, nbl_lru);
@@ -4678,7 +4676,7 @@ nfs4_laundromat(struct nfsd_net *nn)
list_move(&nbl->nbl_lru, &reaplist);
list_del_init(&nbl->nbl_list);
}
- spin_unlock(&nn->client_lock);
+ spin_unlock(&nn->blocked_locks_lock);
while (!list_empty(&reaplist)) {
nbl = list_first_entry(&nn->blocked_locks_lru,
@@ -5439,13 +5437,13 @@ nfsd4_lm_notify(struct file_lock *fl)
bool queue = false;
/* An empty list means that something else is going to be using it */
- spin_lock(&nn->client_lock);
+ spin_lock(&nn->blocked_locks_lock);
if (!list_empty(&nbl->nbl_list)) {
list_del_init(&nbl->nbl_list);
list_del_init(&nbl->nbl_lru);
queue = true;
}
- spin_unlock(&nn->client_lock);
+ spin_unlock(&nn->blocked_locks_lock);
if (queue)
nfsd4_run_cb(&nbl->nbl_cb);
@@ -5868,10 +5866,10 @@ nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
if (fl_flags & FL_SLEEP) {
nbl->nbl_time = jiffies;
- spin_lock(&nn->client_lock);
+ spin_lock(&nn->blocked_locks_lock);
list_add_tail(&nbl->nbl_list, &lock_sop->lo_blocked);
list_add_tail(&nbl->nbl_lru, &nn->blocked_locks_lru);
- spin_unlock(&nn->client_lock);
+ spin_unlock(&nn->blocked_locks_lock);
}
err = vfs_lock_file(filp, F_SETLK, file_lock, conflock);
@@ -5900,10 +5898,10 @@ out:
if (nbl) {
/* dequeue it if we queued it before */
if (fl_flags & FL_SLEEP) {
- spin_lock(&nn->client_lock);
+ spin_lock(&nn->blocked_locks_lock);
list_del_init(&nbl->nbl_list);
list_del_init(&nbl->nbl_lru);
- spin_unlock(&nn->client_lock);
+ spin_unlock(&nn->blocked_locks_lock);
}
free_blocked_lock(nbl);
}
@@ -6943,9 +6941,11 @@ static int nfs4_state_create_net(struct net *net)
INIT_LIST_HEAD(&nn->client_lru);
INIT_LIST_HEAD(&nn->close_lru);
INIT_LIST_HEAD(&nn->del_recall_lru);
- INIT_LIST_HEAD(&nn->blocked_locks_lru);
spin_lock_init(&nn->client_lock);
+ spin_lock_init(&nn->blocked_locks_lock);
+ INIT_LIST_HEAD(&nn->blocked_locks_lru);
+
INIT_DELAYED_WORK(&nn->laundromat_work, laundromat_main);
get_net(net);
@@ -7063,14 +7063,14 @@ nfs4_state_shutdown_net(struct net *net)
}
BUG_ON(!list_empty(&reaplist));
- spin_lock(&nn->client_lock);
+ spin_lock(&nn->blocked_locks_lock);
while (!list_empty(&nn->blocked_locks_lru)) {
nbl = list_first_entry(&nn->blocked_locks_lru,
struct nfsd4_blocked_lock, nbl_lru);
list_move(&nbl->nbl_lru, &reaplist);
list_del_init(&nbl->nbl_list);
}
- spin_unlock(&nn->client_lock);
+ spin_unlock(&nn->blocked_locks_lock);
while (!list_empty(&reaplist)) {
nbl = list_first_entry(&nn->blocked_locks_lru,
diff --git a/fs/orangefs/dcache.c b/fs/orangefs/dcache.c
index 1e8fe844e69f..5355efba4bc8 100644
--- a/fs/orangefs/dcache.c
+++ b/fs/orangefs/dcache.c
@@ -73,7 +73,7 @@ static int orangefs_revalidate_lookup(struct dentry *dentry)
}
}
- dentry->d_time = jiffies + orangefs_dcache_timeout_msecs*HZ/1000;
+ orangefs_set_timeout(dentry);
ret = 1;
out_release_op:
op_release(new_op);
@@ -94,8 +94,9 @@ out_drop:
static int orangefs_d_revalidate(struct dentry *dentry, unsigned int flags)
{
int ret;
+ unsigned long time = (unsigned long) dentry->d_fsdata;
- if (time_before(jiffies, dentry->d_time))
+ if (time_before(jiffies, time))
return 1;
if (flags & LOOKUP_RCU)
diff --git a/fs/orangefs/file.c b/fs/orangefs/file.c
index 66ea0cc37b18..02cc6139ec90 100644
--- a/fs/orangefs/file.c
+++ b/fs/orangefs/file.c
@@ -621,9 +621,9 @@ static int orangefs_file_release(struct inode *inode, struct file *file)
* readahead cache (if any); this forces an expensive refresh of
* data for the next caller of mmap (or 'get_block' accesses)
*/
- if (file->f_path.dentry->d_inode &&
- file->f_path.dentry->d_inode->i_mapping &&
- mapping_nrpages(&file->f_path.dentry->d_inode->i_data)) {
+ if (file_inode(file) &&
+ file_inode(file)->i_mapping &&
+ mapping_nrpages(&file_inode(file)->i_data)) {
if (orangefs_features & ORANGEFS_FEATURE_READAHEAD) {
gossip_debug(GOSSIP_INODE_DEBUG,
"calling flush_racache on %pU\n",
@@ -632,7 +632,7 @@ static int orangefs_file_release(struct inode *inode, struct file *file)
gossip_debug(GOSSIP_INODE_DEBUG,
"flush_racache finished\n");
}
- truncate_inode_pages(file->f_path.dentry->d_inode->i_mapping,
+ truncate_inode_pages(file_inode(file)->i_mapping,
0);
}
return 0;
@@ -648,7 +648,7 @@ static int orangefs_fsync(struct file *file,
{
int ret = -EINVAL;
struct orangefs_inode_s *orangefs_inode =
- ORANGEFS_I(file->f_path.dentry->d_inode);
+ ORANGEFS_I(file_inode(file));
struct orangefs_kernel_op_s *new_op = NULL;
/* required call */
@@ -661,7 +661,7 @@ static int orangefs_fsync(struct file *file,
ret = service_operation(new_op,
"orangefs_fsync",
- get_interruptible_flag(file->f_path.dentry->d_inode));
+ get_interruptible_flag(file_inode(file)));
gossip_debug(GOSSIP_FILE_DEBUG,
"orangefs_fsync got return value of %d\n",
@@ -669,7 +669,7 @@ static int orangefs_fsync(struct file *file,
op_release(new_op);
- orangefs_flush_inode(file->f_path.dentry->d_inode);
+ orangefs_flush_inode(file_inode(file));
return ret;
}
diff --git a/fs/orangefs/namei.c b/fs/orangefs/namei.c
index d15d3d2dba62..a290ff6ec756 100644
--- a/fs/orangefs/namei.c
+++ b/fs/orangefs/namei.c
@@ -72,7 +72,7 @@ static int orangefs_create(struct inode *dir,
d_instantiate(dentry, inode);
unlock_new_inode(inode);
- dentry->d_time = jiffies + orangefs_dcache_timeout_msecs*HZ/1000;
+ orangefs_set_timeout(dentry);
ORANGEFS_I(inode)->getattr_time = jiffies - 1;
gossip_debug(GOSSIP_NAME_DEBUG,
@@ -183,7 +183,7 @@ static struct dentry *orangefs_lookup(struct inode *dir, struct dentry *dentry,
goto out;
}
- dentry->d_time = jiffies + orangefs_dcache_timeout_msecs*HZ/1000;
+ orangefs_set_timeout(dentry);
inode = orangefs_iget(dir->i_sb, &new_op->downcall.resp.lookup.refn);
if (IS_ERR(inode)) {
@@ -322,7 +322,7 @@ static int orangefs_symlink(struct inode *dir,
d_instantiate(dentry, inode);
unlock_new_inode(inode);
- dentry->d_time = jiffies + orangefs_dcache_timeout_msecs*HZ/1000;
+ orangefs_set_timeout(dentry);
ORANGEFS_I(inode)->getattr_time = jiffies - 1;
gossip_debug(GOSSIP_NAME_DEBUG,
@@ -386,7 +386,7 @@ static int orangefs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode
d_instantiate(dentry, inode);
unlock_new_inode(inode);
- dentry->d_time = jiffies + orangefs_dcache_timeout_msecs*HZ/1000;
+ orangefs_set_timeout(dentry);
ORANGEFS_I(inode)->getattr_time = jiffies - 1;
gossip_debug(GOSSIP_NAME_DEBUG,
diff --git a/fs/orangefs/orangefs-kernel.h b/fs/orangefs/orangefs-kernel.h
index 0a82048f3aaf..3bf803d732c5 100644
--- a/fs/orangefs/orangefs-kernel.h
+++ b/fs/orangefs/orangefs-kernel.h
@@ -580,4 +580,11 @@ static inline void orangefs_i_size_write(struct inode *inode, loff_t i_size)
#endif
}
+static inline void orangefs_set_timeout(struct dentry *dentry)
+{
+ unsigned long time = jiffies + orangefs_dcache_timeout_msecs*HZ/1000;
+
+ dentry->d_fsdata = (void *) time;
+}
+
#endif /* __ORANGEFSKERNEL_H */
diff --git a/fs/overlayfs/copy_up.c b/fs/overlayfs/copy_up.c
index aeb60f791418..36795eed40b0 100644
--- a/fs/overlayfs/copy_up.c
+++ b/fs/overlayfs/copy_up.c
@@ -178,6 +178,8 @@ static int ovl_copy_up_data(struct path *old, struct path *new, loff_t len)
len -= bytes;
}
+ if (!error)
+ error = vfs_fsync(new_file, 0);
fput(new_file);
out_fput:
fput(old_file);
diff --git a/fs/overlayfs/inode.c b/fs/overlayfs/inode.c
index c58f01babf30..7fb53d055537 100644
--- a/fs/overlayfs/inode.c
+++ b/fs/overlayfs/inode.c
@@ -270,9 +270,6 @@ struct posix_acl *ovl_get_acl(struct inode *inode, int type)
if (!IS_ENABLED(CONFIG_FS_POSIX_ACL) || !IS_POSIXACL(realinode))
return NULL;
- if (!realinode->i_op->get_acl)
- return NULL;
-
old_cred = ovl_override_creds(inode->i_sb);
acl = get_acl(realinode, type);
revert_creds(old_cred);
diff --git a/fs/overlayfs/super.c b/fs/overlayfs/super.c
index bcf3965be819..edd46a0e951d 100644
--- a/fs/overlayfs/super.c
+++ b/fs/overlayfs/super.c
@@ -1037,6 +1037,21 @@ ovl_posix_acl_xattr_set(const struct xattr_handler *handler,
posix_acl_release(acl);
+ /*
+ * Check if sgid bit needs to be cleared (actual setacl operation will
+ * be done with mounter's capabilities and so that won't do it for us).
+ */
+ if (unlikely(inode->i_mode & S_ISGID) &&
+ handler->flags == ACL_TYPE_ACCESS &&
+ !in_group_p(inode->i_gid) &&
+ !capable_wrt_inode_uidgid(inode, CAP_FSETID)) {
+ struct iattr iattr = { .ia_valid = ATTR_KILL_SGID };
+
+ err = ovl_setattr(dentry, &iattr);
+ if (err)
+ return err;
+ }
+
err = ovl_xattr_set(dentry, handler->name, value, size, flags);
if (!err)
ovl_copyattr(ovl_inode_real(inode, NULL), inode);
diff --git a/fs/proc/base.c b/fs/proc/base.c
index 8e654468ab67..ca651ac00660 100644
--- a/fs/proc/base.c
+++ b/fs/proc/base.c
@@ -252,7 +252,7 @@ static ssize_t proc_pid_cmdline_read(struct file *file, char __user *buf,
* Inherently racy -- command line shares address space
* with code and data.
*/
- rv = access_remote_vm(mm, arg_end - 1, &c, 1, FOLL_FORCE);
+ rv = access_remote_vm(mm, arg_end - 1, &c, 1, 0);
if (rv <= 0)
goto out_free_page;
@@ -270,8 +270,7 @@ static ssize_t proc_pid_cmdline_read(struct file *file, char __user *buf,
int nr_read;
_count = min3(count, len, PAGE_SIZE);
- nr_read = access_remote_vm(mm, p, page, _count,
- FOLL_FORCE);
+ nr_read = access_remote_vm(mm, p, page, _count, 0);
if (nr_read < 0)
rv = nr_read;
if (nr_read <= 0)
@@ -306,8 +305,7 @@ static ssize_t proc_pid_cmdline_read(struct file *file, char __user *buf,
bool final;
_count = min3(count, len, PAGE_SIZE);
- nr_read = access_remote_vm(mm, p, page, _count,
- FOLL_FORCE);
+ nr_read = access_remote_vm(mm, p, page, _count, 0);
if (nr_read < 0)
rv = nr_read;
if (nr_read <= 0)
@@ -356,8 +354,7 @@ skip_argv:
bool final;
_count = min3(count, len, PAGE_SIZE);
- nr_read = access_remote_vm(mm, p, page, _count,
- FOLL_FORCE);
+ nr_read = access_remote_vm(mm, p, page, _count, 0);
if (nr_read < 0)
rv = nr_read;
if (nr_read <= 0)
@@ -835,7 +832,7 @@ static ssize_t mem_rw(struct file *file, char __user *buf,
unsigned long addr = *ppos;
ssize_t copied;
char *page;
- unsigned int flags = FOLL_FORCE;
+ unsigned int flags;
if (!mm)
return 0;
@@ -848,6 +845,8 @@ static ssize_t mem_rw(struct file *file, char __user *buf,
if (!atomic_inc_not_zero(&mm->mm_users))
goto free;
+ /* Maybe we should limit FOLL_FORCE to actual ptrace users? */
+ flags = FOLL_FORCE;
if (write)
flags |= FOLL_WRITE;
@@ -971,8 +970,7 @@ static ssize_t environ_read(struct file *file, char __user *buf,
max_len = min_t(size_t, PAGE_SIZE, count);
this_len = min(max_len, this_len);
- retval = access_remote_vm(mm, (env_start + src),
- page, this_len, FOLL_FORCE);
+ retval = access_remote_vm(mm, (env_start + src), page, this_len, 0);
if (retval <= 0) {
ret = retval;
@@ -1014,6 +1012,9 @@ static ssize_t auxv_read(struct file *file, char __user *buf,
{
struct mm_struct *mm = file->private_data;
unsigned int nwords = 0;
+
+ if (!mm)
+ return 0;
do {
nwords += 2;
} while (mm->saved_auxv[nwords - 2] != 0); /* AT_NULL */
diff --git a/fs/ubifs/dir.c b/fs/ubifs/dir.c
index bd4a5e8ce441..ca16c5d7bab1 100644
--- a/fs/ubifs/dir.c
+++ b/fs/ubifs/dir.c
@@ -543,6 +543,14 @@ out:
if (err != -ENOENT)
ubifs_err(c, "cannot find next direntry, error %d", err);
+ else
+ /*
+ * -ENOENT is a non-fatal error in this context, the TNC uses
+ * it to indicate that the cursor moved past the current directory
+ * and readdir() has to stop.
+ */
+ err = 0;
+
/* 2 is a special value indicating that there are no more direntries */
ctx->pos = 2;
diff --git a/fs/xfs/libxfs/xfs_bmap.c b/fs/xfs/libxfs/xfs_bmap.c
index c27344cf38e1..c6eb21940783 100644
--- a/fs/xfs/libxfs/xfs_bmap.c
+++ b/fs/xfs/libxfs/xfs_bmap.c
@@ -3974,9 +3974,6 @@ xfs_bmap_remap_alloc(
* allocating, so skip that check by pretending to be freeing.
*/
error = xfs_alloc_fix_freelist(&args, XFS_ALLOC_FLAG_FREEING);
- if (error)
- goto error0;
-error0:
xfs_perag_put(args.pag);
if (error)
trace_xfs_bmap_remap_alloc_error(ap->ip, error, _RET_IP_);
@@ -3999,6 +3996,39 @@ xfs_bmap_alloc(
return xfs_bmap_btalloc(ap);
}
+/* Trim extent to fit a logical block range. */
+void
+xfs_trim_extent(
+ struct xfs_bmbt_irec *irec,
+ xfs_fileoff_t bno,
+ xfs_filblks_t len)
+{
+ xfs_fileoff_t distance;
+ xfs_fileoff_t end = bno + len;
+
+ if (irec->br_startoff + irec->br_blockcount <= bno ||
+ irec->br_startoff >= end) {
+ irec->br_blockcount = 0;
+ return;
+ }
+
+ if (irec->br_startoff < bno) {
+ distance = bno - irec->br_startoff;
+ if (isnullstartblock(irec->br_startblock))
+ irec->br_startblock = DELAYSTARTBLOCK;
+ if (irec->br_startblock != DELAYSTARTBLOCK &&
+ irec->br_startblock != HOLESTARTBLOCK)
+ irec->br_startblock += distance;
+ irec->br_startoff += distance;
+ irec->br_blockcount -= distance;
+ }
+
+ if (end < irec->br_startoff + irec->br_blockcount) {
+ distance = irec->br_startoff + irec->br_blockcount - end;
+ irec->br_blockcount -= distance;
+ }
+}
+
/*
* Trim the returned map to the required bounds
*/
@@ -4829,6 +4859,219 @@ xfs_bmap_split_indlen(
return stolen;
}
+int
+xfs_bmap_del_extent_delay(
+ struct xfs_inode *ip,
+ int whichfork,
+ xfs_extnum_t *idx,
+ struct xfs_bmbt_irec *got,
+ struct xfs_bmbt_irec *del)
+{
+ struct xfs_mount *mp = ip->i_mount;
+ struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork);
+ struct xfs_bmbt_irec new;
+ int64_t da_old, da_new, da_diff = 0;
+ xfs_fileoff_t del_endoff, got_endoff;
+ xfs_filblks_t got_indlen, new_indlen, stolen;
+ int error = 0, state = 0;
+ bool isrt;
+
+ XFS_STATS_INC(mp, xs_del_exlist);
+
+ isrt = (whichfork == XFS_DATA_FORK) && XFS_IS_REALTIME_INODE(ip);
+ del_endoff = del->br_startoff + del->br_blockcount;
+ got_endoff = got->br_startoff + got->br_blockcount;
+ da_old = startblockval(got->br_startblock);
+ da_new = 0;
+
+ ASSERT(*idx >= 0);
+ ASSERT(*idx < ifp->if_bytes / sizeof(struct xfs_bmbt_rec));
+ ASSERT(del->br_blockcount > 0);
+ ASSERT(got->br_startoff <= del->br_startoff);
+ ASSERT(got_endoff >= del_endoff);
+
+ if (isrt) {
+ int64_t rtexts = XFS_FSB_TO_B(mp, del->br_blockcount);
+
+ do_div(rtexts, mp->m_sb.sb_rextsize);
+ xfs_mod_frextents(mp, rtexts);
+ }
+
+ /*
+ * Update the inode delalloc counter now and wait to update the
+ * sb counters as we might have to borrow some blocks for the
+ * indirect block accounting.
+ */
+ xfs_trans_reserve_quota_nblks(NULL, ip, -((long)del->br_blockcount), 0,
+ isrt ? XFS_QMOPT_RES_RTBLKS : XFS_QMOPT_RES_REGBLKS);
+ ip->i_delayed_blks -= del->br_blockcount;
+
+ if (whichfork == XFS_COW_FORK)
+ state |= BMAP_COWFORK;
+
+ if (got->br_startoff == del->br_startoff)
+ state |= BMAP_LEFT_CONTIG;
+ if (got_endoff == del_endoff)
+ state |= BMAP_RIGHT_CONTIG;
+
+ switch (state & (BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG)) {
+ case BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
+ /*
+ * Matches the whole extent. Delete the entry.
+ */
+ xfs_iext_remove(ip, *idx, 1, state);
+ --*idx;
+ break;
+ case BMAP_LEFT_CONTIG:
+ /*
+ * Deleting the first part of the extent.
+ */
+ trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
+ got->br_startoff = del_endoff;
+ got->br_blockcount -= del->br_blockcount;
+ da_new = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip,
+ got->br_blockcount), da_old);
+ got->br_startblock = nullstartblock((int)da_new);
+ xfs_bmbt_set_all(xfs_iext_get_ext(ifp, *idx), got);
+ trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
+ break;
+ case BMAP_RIGHT_CONTIG:
+ /*
+ * Deleting the last part of the extent.
+ */
+ trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
+ got->br_blockcount = got->br_blockcount - del->br_blockcount;
+ da_new = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip,
+ got->br_blockcount), da_old);
+ got->br_startblock = nullstartblock((int)da_new);
+ xfs_bmbt_set_all(xfs_iext_get_ext(ifp, *idx), got);
+ trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
+ break;
+ case 0:
+ /*
+ * Deleting the middle of the extent.
+ *
+ * Distribute the original indlen reservation across the two new
+ * extents. Steal blocks from the deleted extent if necessary.
+ * Stealing blocks simply fudges the fdblocks accounting below.
+ * Warn if either of the new indlen reservations is zero as this
+ * can lead to delalloc problems.
+ */
+ trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
+
+ got->br_blockcount = del->br_startoff - got->br_startoff;
+ got_indlen = xfs_bmap_worst_indlen(ip, got->br_blockcount);
+
+ new.br_blockcount = got_endoff - del_endoff;
+ new_indlen = xfs_bmap_worst_indlen(ip, new.br_blockcount);
+
+ WARN_ON_ONCE(!got_indlen || !new_indlen);
+ stolen = xfs_bmap_split_indlen(da_old, &got_indlen, &new_indlen,
+ del->br_blockcount);
+
+ got->br_startblock = nullstartblock((int)got_indlen);
+ xfs_bmbt_set_all(xfs_iext_get_ext(ifp, *idx), got);
+ trace_xfs_bmap_post_update(ip, *idx, 0, _THIS_IP_);
+
+ new.br_startoff = del_endoff;
+ new.br_state = got->br_state;
+ new.br_startblock = nullstartblock((int)new_indlen);
+
+ ++*idx;
+ xfs_iext_insert(ip, *idx, 1, &new, state);
+
+ da_new = got_indlen + new_indlen - stolen;
+ del->br_blockcount -= stolen;
+ break;
+ }
+
+ ASSERT(da_old >= da_new);
+ da_diff = da_old - da_new;
+ if (!isrt)
+ da_diff += del->br_blockcount;
+ if (da_diff)
+ xfs_mod_fdblocks(mp, da_diff, false);
+ return error;
+}
+
+void
+xfs_bmap_del_extent_cow(
+ struct xfs_inode *ip,
+ xfs_extnum_t *idx,
+ struct xfs_bmbt_irec *got,
+ struct xfs_bmbt_irec *del)
+{
+ struct xfs_mount *mp = ip->i_mount;
+ struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, XFS_COW_FORK);
+ struct xfs_bmbt_irec new;
+ xfs_fileoff_t del_endoff, got_endoff;
+ int state = BMAP_COWFORK;
+
+ XFS_STATS_INC(mp, xs_del_exlist);
+
+ del_endoff = del->br_startoff + del->br_blockcount;
+ got_endoff = got->br_startoff + got->br_blockcount;
+
+ ASSERT(*idx >= 0);
+ ASSERT(*idx < ifp->if_bytes / sizeof(struct xfs_bmbt_rec));
+ ASSERT(del->br_blockcount > 0);
+ ASSERT(got->br_startoff <= del->br_startoff);
+ ASSERT(got_endoff >= del_endoff);
+ ASSERT(!isnullstartblock(got->br_startblock));
+
+ if (got->br_startoff == del->br_startoff)
+ state |= BMAP_LEFT_CONTIG;
+ if (got_endoff == del_endoff)
+ state |= BMAP_RIGHT_CONTIG;
+
+ switch (state & (BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG)) {
+ case BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
+ /*
+ * Matches the whole extent. Delete the entry.
+ */
+ xfs_iext_remove(ip, *idx, 1, state);
+ --*idx;
+ break;
+ case BMAP_LEFT_CONTIG:
+ /*
+ * Deleting the first part of the extent.
+ */
+ trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
+ got->br_startoff = del_endoff;
+ got->br_blockcount -= del->br_blockcount;
+ got->br_startblock = del->br_startblock + del->br_blockcount;
+ xfs_bmbt_set_all(xfs_iext_get_ext(ifp, *idx), got);
+ trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
+ break;
+ case BMAP_RIGHT_CONTIG:
+ /*
+ * Deleting the last part of the extent.
+ */
+ trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
+ got->br_blockcount -= del->br_blockcount;
+ xfs_bmbt_set_all(xfs_iext_get_ext(ifp, *idx), got);
+ trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
+ break;
+ case 0:
+ /*
+ * Deleting the middle of the extent.
+ */
+ trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
+ got->br_blockcount = del->br_startoff - got->br_startoff;
+ xfs_bmbt_set_all(xfs_iext_get_ext(ifp, *idx), got);
+ trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
+
+ new.br_startoff = del_endoff;
+ new.br_blockcount = got_endoff - del_endoff;
+ new.br_state = got->br_state;
+ new.br_startblock = del->br_startblock + del->br_blockcount;
+
+ ++*idx;
+ xfs_iext_insert(ip, *idx, 1, &new, state);
+ break;
+ }
+}
+
/*
* Called by xfs_bmapi to update file extent records and the btree
* after removing space (or undoing a delayed allocation).
@@ -5171,175 +5414,6 @@ done:
return error;
}
-/* Remove an extent from the CoW fork. Similar to xfs_bmap_del_extent. */
-int
-xfs_bunmapi_cow(
- struct xfs_inode *ip,
- struct xfs_bmbt_irec *del)
-{
- xfs_filblks_t da_new;
- xfs_filblks_t da_old;
- xfs_fsblock_t del_endblock = 0;
- xfs_fileoff_t del_endoff;
- int delay;
- struct xfs_bmbt_rec_host *ep;
- int error;
- struct xfs_bmbt_irec got;
- xfs_fileoff_t got_endoff;
- struct xfs_ifork *ifp;
- struct xfs_mount *mp;
- xfs_filblks_t nblks;
- struct xfs_bmbt_irec new;
- /* REFERENCED */
- uint qfield;
- xfs_filblks_t temp;
- xfs_filblks_t temp2;
- int state = BMAP_COWFORK;
- int eof;
- xfs_extnum_t eidx;
-
- mp = ip->i_mount;
- XFS_STATS_INC(mp, xs_del_exlist);
-
- ep = xfs_bmap_search_extents(ip, del->br_startoff, XFS_COW_FORK, &eof,
- &eidx, &got, &new);
-
- ifp = XFS_IFORK_PTR(ip, XFS_COW_FORK); ifp = ifp;
- ASSERT((eidx >= 0) && (eidx < ifp->if_bytes /
- (uint)sizeof(xfs_bmbt_rec_t)));
- ASSERT(del->br_blockcount > 0);
- ASSERT(got.br_startoff <= del->br_startoff);
- del_endoff = del->br_startoff + del->br_blockcount;
- got_endoff = got.br_startoff + got.br_blockcount;
- ASSERT(got_endoff >= del_endoff);
- delay = isnullstartblock(got.br_startblock);
- ASSERT(isnullstartblock(del->br_startblock) == delay);
- qfield = 0;
- error = 0;
- /*
- * If deleting a real allocation, must free up the disk space.
- */
- if (!delay) {
- nblks = del->br_blockcount;
- qfield = XFS_TRANS_DQ_BCOUNT;
- /*
- * Set up del_endblock and cur for later.
- */
- del_endblock = del->br_startblock + del->br_blockcount;
- da_old = da_new = 0;
- } else {
- da_old = startblockval(got.br_startblock);
- da_new = 0;
- nblks = 0;
- }
- qfield = qfield;
- nblks = nblks;
-
- /*
- * Set flag value to use in switch statement.
- * Left-contig is 2, right-contig is 1.
- */
- switch (((got.br_startoff == del->br_startoff) << 1) |
- (got_endoff == del_endoff)) {
- case 3:
- /*
- * Matches the whole extent. Delete the entry.
- */
- xfs_iext_remove(ip, eidx, 1, BMAP_COWFORK);
- --eidx;
- break;
-
- case 2:
- /*
- * Deleting the first part of the extent.
- */
- trace_xfs_bmap_pre_update(ip, eidx, state, _THIS_IP_);
- xfs_bmbt_set_startoff(ep, del_endoff);
- temp = got.br_blockcount - del->br_blockcount;
- xfs_bmbt_set_blockcount(ep, temp);
- if (delay) {
- temp = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, temp),
- da_old);
- xfs_bmbt_set_startblock(ep, nullstartblock((int)temp));
- trace_xfs_bmap_post_update(ip, eidx, state, _THIS_IP_);
- da_new = temp;
- break;
- }
- xfs_bmbt_set_startblock(ep, del_endblock);
- trace_xfs_bmap_post_update(ip, eidx, state, _THIS_IP_);
- break;
-
- case 1:
- /*
- * Deleting the last part of the extent.
- */
- temp = got.br_blockcount - del->br_blockcount;
- trace_xfs_bmap_pre_update(ip, eidx, state, _THIS_IP_);
- xfs_bmbt_set_blockcount(ep, temp);
- if (delay) {
- temp = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, temp),
- da_old);
- xfs_bmbt_set_startblock(ep, nullstartblock((int)temp));
- trace_xfs_bmap_post_update(ip, eidx, state, _THIS_IP_);
- da_new = temp;
- break;
- }
- trace_xfs_bmap_post_update(ip, eidx, state, _THIS_IP_);
- break;
-
- case 0:
- /*
- * Deleting the middle of the extent.
- */
- temp = del->br_startoff - got.br_startoff;
- trace_xfs_bmap_pre_update(ip, eidx, state, _THIS_IP_);
- xfs_bmbt_set_blockcount(ep, temp);
- new.br_startoff = del_endoff;
- temp2 = got_endoff - del_endoff;
- new.br_blockcount = temp2;
- new.br_state = got.br_state;
- if (!delay) {
- new.br_startblock = del_endblock;
- } else {
- temp = xfs_bmap_worst_indlen(ip, temp);
- xfs_bmbt_set_startblock(ep, nullstartblock((int)temp));
- temp2 = xfs_bmap_worst_indlen(ip, temp2);
- new.br_startblock = nullstartblock((int)temp2);
- da_new = temp + temp2;
- while (da_new > da_old) {
- if (temp) {
- temp--;
- da_new--;
- xfs_bmbt_set_startblock(ep,
- nullstartblock((int)temp));
- }
- if (da_new == da_old)
- break;
- if (temp2) {
- temp2--;
- da_new--;
- new.br_startblock =
- nullstartblock((int)temp2);
- }
- }
- }
- trace_xfs_bmap_post_update(ip, eidx, state, _THIS_IP_);
- xfs_iext_insert(ip, eidx + 1, 1, &new, state);
- ++eidx;
- break;
- }
-
- /*
- * Account for change in delayed indirect blocks.
- * Nothing to do for disk quota accounting here.
- */
- ASSERT(da_old >= da_new);
- if (da_old > da_new)
- xfs_mod_fdblocks(mp, (int64_t)(da_old - da_new), false);
-
- return error;
-}
-
/*
* Unmap (remove) blocks from a file.
* If nexts is nonzero then the number of extents to remove is limited to
diff --git a/fs/xfs/libxfs/xfs_bmap.h b/fs/xfs/libxfs/xfs_bmap.h
index f97db7132564..7cae6ec27fa6 100644
--- a/fs/xfs/libxfs/xfs_bmap.h
+++ b/fs/xfs/libxfs/xfs_bmap.h
@@ -190,6 +190,8 @@ void xfs_bmap_trace_exlist(struct xfs_inode *ip, xfs_extnum_t cnt,
#define XFS_BMAP_TRACE_EXLIST(ip,c,w)
#endif
+void xfs_trim_extent(struct xfs_bmbt_irec *irec, xfs_fileoff_t bno,
+ xfs_filblks_t len);
int xfs_bmap_add_attrfork(struct xfs_inode *ip, int size, int rsvd);
void xfs_bmap_local_to_extents_empty(struct xfs_inode *ip, int whichfork);
void xfs_bmap_add_free(struct xfs_mount *mp, struct xfs_defer_ops *dfops,
@@ -221,7 +223,11 @@ int xfs_bunmapi(struct xfs_trans *tp, struct xfs_inode *ip,
xfs_fileoff_t bno, xfs_filblks_t len, int flags,
xfs_extnum_t nexts, xfs_fsblock_t *firstblock,
struct xfs_defer_ops *dfops, int *done);
-int xfs_bunmapi_cow(struct xfs_inode *ip, struct xfs_bmbt_irec *del);
+int xfs_bmap_del_extent_delay(struct xfs_inode *ip, int whichfork,
+ xfs_extnum_t *idx, struct xfs_bmbt_irec *got,
+ struct xfs_bmbt_irec *del);
+void xfs_bmap_del_extent_cow(struct xfs_inode *ip, xfs_extnum_t *idx,
+ struct xfs_bmbt_irec *got, struct xfs_bmbt_irec *del);
int xfs_check_nostate_extents(struct xfs_ifork *ifp, xfs_extnum_t idx,
xfs_extnum_t num);
uint xfs_default_attroffset(struct xfs_inode *ip);
diff --git a/fs/xfs/libxfs/xfs_btree.c b/fs/xfs/libxfs/xfs_btree.c
index 5c8e6f2ce44f..0e80993c8a59 100644
--- a/fs/xfs/libxfs/xfs_btree.c
+++ b/fs/xfs/libxfs/xfs_btree.c
@@ -4826,7 +4826,7 @@ xfs_btree_calc_size(
return rval;
}
-int
+static int
xfs_btree_count_blocks_helper(
struct xfs_btree_cur *cur,
int level,
diff --git a/fs/xfs/libxfs/xfs_dquot_buf.c b/fs/xfs/libxfs/xfs_dquot_buf.c
index 3cc3cf767474..ac9a003dd29a 100644
--- a/fs/xfs/libxfs/xfs_dquot_buf.c
+++ b/fs/xfs/libxfs/xfs_dquot_buf.c
@@ -191,8 +191,7 @@ xfs_dquot_buf_verify_crc(
if (mp->m_quotainfo)
ndquots = mp->m_quotainfo->qi_dqperchunk;
else
- ndquots = xfs_calc_dquots_per_chunk(
- XFS_BB_TO_FSB(mp, bp->b_length));
+ ndquots = xfs_calc_dquots_per_chunk(bp->b_length);
for (i = 0; i < ndquots; i++, d++) {
if (!xfs_verify_cksum((char *)d, sizeof(struct xfs_dqblk),
diff --git a/fs/xfs/libxfs/xfs_format.h b/fs/xfs/libxfs/xfs_format.h
index f6547fc5e016..6b7579e7b60a 100644
--- a/fs/xfs/libxfs/xfs_format.h
+++ b/fs/xfs/libxfs/xfs_format.h
@@ -865,7 +865,6 @@ typedef struct xfs_timestamp {
* padding field for v3 inodes.
*/
#define XFS_DINODE_MAGIC 0x494e /* 'IN' */
-#define XFS_DINODE_GOOD_VERSION(v) ((v) >= 1 && (v) <= 3)
typedef struct xfs_dinode {
__be16 di_magic; /* inode magic # = XFS_DINODE_MAGIC */
__be16 di_mode; /* mode and type of file */
diff --git a/fs/xfs/libxfs/xfs_inode_buf.c b/fs/xfs/libxfs/xfs_inode_buf.c
index 8de9a3a29589..134424fac434 100644
--- a/fs/xfs/libxfs/xfs_inode_buf.c
+++ b/fs/xfs/libxfs/xfs_inode_buf.c
@@ -57,6 +57,17 @@ xfs_inobp_check(
}
#endif
+bool
+xfs_dinode_good_version(
+ struct xfs_mount *mp,
+ __u8 version)
+{
+ if (xfs_sb_version_hascrc(&mp->m_sb))
+ return version == 3;
+
+ return version == 1 || version == 2;
+}
+
/*
* If we are doing readahead on an inode buffer, we might be in log recovery
* reading an inode allocation buffer that hasn't yet been replayed, and hence
@@ -91,7 +102,7 @@ xfs_inode_buf_verify(
dip = xfs_buf_offset(bp, (i << mp->m_sb.sb_inodelog));
di_ok = dip->di_magic == cpu_to_be16(XFS_DINODE_MAGIC) &&
- XFS_DINODE_GOOD_VERSION(dip->di_version);
+ xfs_dinode_good_version(mp, dip->di_version);
if (unlikely(XFS_TEST_ERROR(!di_ok, mp,
XFS_ERRTAG_ITOBP_INOTOBP,
XFS_RANDOM_ITOBP_INOTOBP))) {
diff --git a/fs/xfs/libxfs/xfs_inode_buf.h b/fs/xfs/libxfs/xfs_inode_buf.h
index 62d9d4681c8c..3cfe12a4f58a 100644
--- a/fs/xfs/libxfs/xfs_inode_buf.h
+++ b/fs/xfs/libxfs/xfs_inode_buf.h
@@ -74,6 +74,8 @@ void xfs_inode_from_disk(struct xfs_inode *ip, struct xfs_dinode *from);
void xfs_log_dinode_to_disk(struct xfs_log_dinode *from,
struct xfs_dinode *to);
+bool xfs_dinode_good_version(struct xfs_mount *mp, __u8 version);
+
#if defined(DEBUG)
void xfs_inobp_check(struct xfs_mount *, struct xfs_buf *);
#else
diff --git a/fs/xfs/xfs_file.c b/fs/xfs/xfs_file.c
index a314fc7b56fa..6e4f7f900fea 100644
--- a/fs/xfs/xfs_file.c
+++ b/fs/xfs/xfs_file.c
@@ -249,6 +249,7 @@ xfs_file_dio_aio_read(
struct xfs_inode *ip = XFS_I(inode);
loff_t isize = i_size_read(inode);
size_t count = iov_iter_count(to);
+ loff_t end = iocb->ki_pos + count - 1;
struct iov_iter data;
struct xfs_buftarg *target;
ssize_t ret = 0;
@@ -272,49 +273,21 @@ xfs_file_dio_aio_read(
file_accessed(iocb->ki_filp);
- /*
- * Locking is a bit tricky here. If we take an exclusive lock for direct
- * IO, we effectively serialise all new concurrent read IO to this file
- * and block it behind IO that is currently in progress because IO in
- * progress holds the IO lock shared. We only need to hold the lock
- * exclusive to blow away the page cache, so only take lock exclusively
- * if the page cache needs invalidation. This allows the normal direct
- * IO case of no page cache pages to proceeed concurrently without
- * serialisation.
- */
xfs_rw_ilock(ip, XFS_IOLOCK_SHARED);
if (mapping->nrpages) {
- xfs_rw_iunlock(ip, XFS_IOLOCK_SHARED);
- xfs_rw_ilock(ip, XFS_IOLOCK_EXCL);
+ ret = filemap_write_and_wait_range(mapping, iocb->ki_pos, end);
+ if (ret)
+ goto out_unlock;
/*
- * The generic dio code only flushes the range of the particular
- * I/O. Because we take an exclusive lock here, this whole
- * sequence is considerably more expensive for us. This has a
- * noticeable performance impact for any file with cached pages,
- * even when outside of the range of the particular I/O.
- *
- * Hence, amortize the cost of the lock against a full file
- * flush and reduce the chances of repeated iolock cycles going
- * forward.
+ * Invalidate whole pages. This can return an error if we fail
+ * to invalidate a page, but this should never happen on XFS.
+ * Warn if it does fail.
*/
- if (mapping->nrpages) {
- ret = filemap_write_and_wait(mapping);
- if (ret) {
- xfs_rw_iunlock(ip, XFS_IOLOCK_EXCL);
- return ret;
- }
-
- /*
- * Invalidate whole pages. This can return an error if
- * we fail to invalidate a page, but this should never
- * happen on XFS. Warn if it does fail.
- */
- ret = invalidate_inode_pages2(mapping);
- WARN_ON_ONCE(ret);
- ret = 0;
- }
- xfs_rw_ilock_demote(ip, XFS_IOLOCK_EXCL);
+ ret = invalidate_inode_pages2_range(mapping,
+ iocb->ki_pos >> PAGE_SHIFT, end >> PAGE_SHIFT);
+ WARN_ON_ONCE(ret);
+ ret = 0;
}
data = *to;
@@ -324,8 +297,9 @@ xfs_file_dio_aio_read(
iocb->ki_pos += ret;
iov_iter_advance(to, ret);
}
- xfs_rw_iunlock(ip, XFS_IOLOCK_SHARED);
+out_unlock:
+ xfs_rw_iunlock(ip, XFS_IOLOCK_SHARED);
return ret;
}
@@ -570,61 +544,49 @@ xfs_file_dio_aio_write(
if ((iocb->ki_pos | count) & target->bt_logical_sectormask)
return -EINVAL;
- /* "unaligned" here means not aligned to a filesystem block */
- if ((iocb->ki_pos & mp->m_blockmask) ||
- ((iocb->ki_pos + count) & mp->m_blockmask))
- unaligned_io = 1;
-
/*
- * We don't need to take an exclusive lock unless there page cache needs
- * to be invalidated or unaligned IO is being executed. We don't need to
- * consider the EOF extension case here because
- * xfs_file_aio_write_checks() will relock the inode as necessary for
- * EOF zeroing cases and fill out the new inode size as appropriate.
+ * Don't take the exclusive iolock here unless the I/O is unaligned to
+ * the file system block size. We don't need to consider the EOF
+ * extension case here because xfs_file_aio_write_checks() will relock
+ * the inode as necessary for EOF zeroing cases and fill out the new
+ * inode size as appropriate.
*/
- if (unaligned_io || mapping->nrpages)
+ if ((iocb->ki_pos & mp->m_blockmask) ||
+ ((iocb->ki_pos + count) & mp->m_blockmask)) {
+ unaligned_io = 1;
iolock = XFS_IOLOCK_EXCL;
- else
+ } else {
iolock = XFS_IOLOCK_SHARED;
- xfs_rw_ilock(ip, iolock);
-
- /*
- * Recheck if there are cached pages that need invalidate after we got
- * the iolock to protect against other threads adding new pages while
- * we were waiting for the iolock.
- */
- if (mapping->nrpages && iolock == XFS_IOLOCK_SHARED) {
- xfs_rw_iunlock(ip, iolock);
- iolock = XFS_IOLOCK_EXCL;
- xfs_rw_ilock(ip, iolock);
}
+ xfs_rw_ilock(ip, iolock);
+
ret = xfs_file_aio_write_checks(iocb, from, &iolock);
if (ret)
goto out;
count = iov_iter_count(from);
end = iocb->ki_pos + count - 1;
- /*
- * See xfs_file_dio_aio_read() for why we do a full-file flush here.
- */
if (mapping->nrpages) {
- ret = filemap_write_and_wait(VFS_I(ip)->i_mapping);
+ ret = filemap_write_and_wait_range(mapping, iocb->ki_pos, end);
if (ret)
goto out;
+
/*
* Invalidate whole pages. This can return an error if we fail
* to invalidate a page, but this should never happen on XFS.
* Warn if it does fail.
*/
- ret = invalidate_inode_pages2(VFS_I(ip)->i_mapping);
+ ret = invalidate_inode_pages2_range(mapping,
+ iocb->ki_pos >> PAGE_SHIFT, end >> PAGE_SHIFT);
WARN_ON_ONCE(ret);
ret = 0;
}
/*
* If we are doing unaligned IO, wait for all other IO to drain,
- * otherwise demote the lock if we had to flush cached pages
+ * otherwise demote the lock if we had to take the exclusive lock
+ * for other reasons in xfs_file_aio_write_checks.
*/
if (unaligned_io)
inode_dio_wait(inode);
@@ -947,134 +909,6 @@ out_unlock:
return error;
}
-/*
- * Flush all file writes out to disk.
- */
-static int
-xfs_file_wait_for_io(
- struct inode *inode,
- loff_t offset,
- size_t len)
-{
- loff_t rounding;
- loff_t ioffset;
- loff_t iendoffset;
- loff_t bs;
- int ret;
-
- bs = inode->i_sb->s_blocksize;
- inode_dio_wait(inode);
-
- rounding = max_t(xfs_off_t, bs, PAGE_SIZE);
- ioffset = round_down(offset, rounding);
- iendoffset = round_up(offset + len, rounding) - 1;
- ret = filemap_write_and_wait_range(inode->i_mapping, ioffset,
- iendoffset);
- return ret;
-}
-
-/* Hook up to the VFS reflink function */
-STATIC int
-xfs_file_share_range(
- struct file *file_in,
- loff_t pos_in,
- struct file *file_out,
- loff_t pos_out,
- u64 len,
- bool is_dedupe)
-{
- struct inode *inode_in;
- struct inode *inode_out;
- ssize_t ret;
- loff_t bs;
- loff_t isize;
- int same_inode;
- loff_t blen;
- unsigned int flags = 0;
-
- inode_in = file_inode(file_in);
- inode_out = file_inode(file_out);
- bs = inode_out->i_sb->s_blocksize;
-
- /* Don't touch certain kinds of inodes */
- if (IS_IMMUTABLE(inode_out))
- return -EPERM;
- if (IS_SWAPFILE(inode_in) ||
- IS_SWAPFILE(inode_out))
- return -ETXTBSY;
-
- /* Reflink only works within this filesystem. */
- if (inode_in->i_sb != inode_out->i_sb)
- return -EXDEV;
- same_inode = (inode_in->i_ino == inode_out->i_ino);
-
- /* Don't reflink dirs, pipes, sockets... */
- if (S_ISDIR(inode_in->i_mode) || S_ISDIR(inode_out->i_mode))
- return -EISDIR;
- if (S_ISFIFO(inode_in->i_mode) || S_ISFIFO(inode_out->i_mode))
- return -EINVAL;
- if (!S_ISREG(inode_in->i_mode) || !S_ISREG(inode_out->i_mode))
- return -EINVAL;
-
- /* Don't share DAX file data for now. */
- if (IS_DAX(inode_in) || IS_DAX(inode_out))
- return -EINVAL;
-
- /* Are we going all the way to the end? */
- isize = i_size_read(inode_in);
- if (isize == 0)
- return 0;
- if (len == 0)
- len = isize - pos_in;
-
- /* Ensure offsets don't wrap and the input is inside i_size */
- if (pos_in + len < pos_in || pos_out + len < pos_out ||
- pos_in + len > isize)
- return -EINVAL;
-
- /* Don't allow dedupe past EOF in the dest file */
- if (is_dedupe) {
- loff_t disize;
-
- disize = i_size_read(inode_out);
- if (pos_out >= disize || pos_out + len > disize)
- return -EINVAL;
- }
-
- /* If we're linking to EOF, continue to the block boundary. */
- if (pos_in + len == isize)
- blen = ALIGN(isize, bs) - pos_in;
- else
- blen = len;
-
- /* Only reflink if we're aligned to block boundaries */
- if (!IS_ALIGNED(pos_in, bs) || !IS_ALIGNED(pos_in + blen, bs) ||
- !IS_ALIGNED(pos_out, bs) || !IS_ALIGNED(pos_out + blen, bs))
- return -EINVAL;
-
- /* Don't allow overlapped reflink within the same file */
- if (same_inode && pos_out + blen > pos_in && pos_out < pos_in + blen)
- return -EINVAL;
-
- /* Wait for the completion of any pending IOs on srcfile */
- ret = xfs_file_wait_for_io(inode_in, pos_in, len);
- if (ret)
- goto out;
- ret = xfs_file_wait_for_io(inode_out, pos_out, len);
- if (ret)
- goto out;
-
- if (is_dedupe)
- flags |= XFS_REFLINK_DEDUPE;
- ret = xfs_reflink_remap_range(XFS_I(inode_in), pos_in, XFS_I(inode_out),
- pos_out, len, flags);
- if (ret < 0)
- goto out;
-
-out:
- return ret;
-}
-
STATIC ssize_t
xfs_file_copy_range(
struct file *file_in,
@@ -1086,7 +920,7 @@ xfs_file_copy_range(
{
int error;
- error = xfs_file_share_range(file_in, pos_in, file_out, pos_out,
+ error = xfs_reflink_remap_range(file_in, pos_in, file_out, pos_out,
len, false);
if (error)
return error;
@@ -1101,7 +935,7 @@ xfs_file_clone_range(
loff_t pos_out,
u64 len)
{
- return xfs_file_share_range(file_in, pos_in, file_out, pos_out,
+ return xfs_reflink_remap_range(file_in, pos_in, file_out, pos_out,
len, false);
}
@@ -1124,7 +958,7 @@ xfs_file_dedupe_range(
if (len > XFS_MAX_DEDUPE_LEN)
len = XFS_MAX_DEDUPE_LEN;
- error = xfs_file_share_range(src_file, loff, dst_file, dst_loff,
+ error = xfs_reflink_remap_range(src_file, loff, dst_file, dst_loff,
len, true);
if (error)
return error;
diff --git a/fs/xfs/xfs_icache.c b/fs/xfs/xfs_icache.c
index 14796b744e0a..f295049db681 100644
--- a/fs/xfs/xfs_icache.c
+++ b/fs/xfs/xfs_icache.c
@@ -1656,9 +1656,9 @@ void
xfs_inode_set_cowblocks_tag(
xfs_inode_t *ip)
{
- trace_xfs_inode_set_eofblocks_tag(ip);
+ trace_xfs_inode_set_cowblocks_tag(ip);
return __xfs_inode_set_eofblocks_tag(ip, xfs_queue_cowblocks,
- trace_xfs_perag_set_eofblocks,
+ trace_xfs_perag_set_cowblocks,
XFS_ICI_COWBLOCKS_TAG);
}
@@ -1666,7 +1666,7 @@ void
xfs_inode_clear_cowblocks_tag(
xfs_inode_t *ip)
{
- trace_xfs_inode_clear_eofblocks_tag(ip);
+ trace_xfs_inode_clear_cowblocks_tag(ip);
return __xfs_inode_clear_eofblocks_tag(ip,
- trace_xfs_perag_clear_eofblocks, XFS_ICI_COWBLOCKS_TAG);
+ trace_xfs_perag_clear_cowblocks, XFS_ICI_COWBLOCKS_TAG);
}
diff --git a/fs/xfs/xfs_iomap.c b/fs/xfs/xfs_iomap.c
index d907eb9f8ef3..436e109bb01e 100644
--- a/fs/xfs/xfs_iomap.c
+++ b/fs/xfs/xfs_iomap.c
@@ -566,6 +566,17 @@ xfs_file_iomap_begin_delay(
xfs_bmap_search_extents(ip, offset_fsb, XFS_DATA_FORK, &eof, &idx,
&got, &prev);
if (!eof && got.br_startoff <= offset_fsb) {
+ if (xfs_is_reflink_inode(ip)) {
+ bool shared;
+
+ end_fsb = min(XFS_B_TO_FSB(mp, offset + count),
+ maxbytes_fsb);
+ xfs_trim_extent(&got, offset_fsb, end_fsb - offset_fsb);
+ error = xfs_reflink_reserve_cow(ip, &got, &shared);
+ if (error)
+ goto out_unlock;
+ }
+
trace_xfs_iomap_found(ip, offset, count, 0, &got);
goto done;
}
@@ -961,19 +972,13 @@ xfs_file_iomap_begin(
struct xfs_mount *mp = ip->i_mount;
struct xfs_bmbt_irec imap;
xfs_fileoff_t offset_fsb, end_fsb;
- bool shared, trimmed;
int nimaps = 1, error = 0;
+ bool shared = false, trimmed = false;
unsigned lockmode;
if (XFS_FORCED_SHUTDOWN(mp))
return -EIO;
- if ((flags & (IOMAP_WRITE | IOMAP_ZERO)) && xfs_is_reflink_inode(ip)) {
- error = xfs_reflink_reserve_cow_range(ip, offset, length);
- if (error < 0)
- return error;
- }
-
if ((flags & IOMAP_WRITE) && !IS_DAX(inode) &&
!xfs_get_extsz_hint(ip)) {
/* Reserve delalloc blocks for regular writeback. */
@@ -981,7 +986,16 @@ xfs_file_iomap_begin(
iomap);
}
- lockmode = xfs_ilock_data_map_shared(ip);
+ /*
+ * COW writes will allocate delalloc space, so we need to make sure
+ * to take the lock exclusively here.
+ */
+ if ((flags & (IOMAP_WRITE | IOMAP_ZERO)) && xfs_is_reflink_inode(ip)) {
+ lockmode = XFS_ILOCK_EXCL;
+ xfs_ilock(ip, XFS_ILOCK_EXCL);
+ } else {
+ lockmode = xfs_ilock_data_map_shared(ip);
+ }
ASSERT(offset <= mp->m_super->s_maxbytes);
if ((xfs_fsize_t)offset + length > mp->m_super->s_maxbytes)
@@ -991,16 +1005,24 @@ xfs_file_iomap_begin(
error = xfs_bmapi_read(ip, offset_fsb, end_fsb - offset_fsb, &imap,
&nimaps, 0);
- if (error) {
- xfs_iunlock(ip, lockmode);
- return error;
+ if (error)
+ goto out_unlock;
+
+ if (flags & IOMAP_REPORT) {
+ /* Trim the mapping to the nearest shared extent boundary. */
+ error = xfs_reflink_trim_around_shared(ip, &imap, &shared,
+ &trimmed);
+ if (error)
+ goto out_unlock;
}
- /* Trim the mapping to the nearest shared extent boundary. */
- error = xfs_reflink_trim_around_shared(ip, &imap, &shared, &trimmed);
- if (error) {
- xfs_iunlock(ip, lockmode);
- return error;
+ if ((flags & (IOMAP_WRITE | IOMAP_ZERO)) && xfs_is_reflink_inode(ip)) {
+ error = xfs_reflink_reserve_cow(ip, &imap, &shared);
+ if (error)
+ goto out_unlock;
+
+ end_fsb = imap.br_startoff + imap.br_blockcount;
+ length = XFS_FSB_TO_B(mp, end_fsb) - offset;
}
if ((flags & IOMAP_WRITE) && imap_needs_alloc(inode, &imap, nimaps)) {
@@ -1039,6 +1061,9 @@ xfs_file_iomap_begin(
if (shared)
iomap->flags |= IOMAP_F_SHARED;
return 0;
+out_unlock:
+ xfs_iunlock(ip, lockmode);
+ return error;
}
static int
diff --git a/fs/xfs/xfs_mount.c b/fs/xfs/xfs_mount.c
index fc7873942bea..b341f10cf481 100644
--- a/fs/xfs/xfs_mount.c
+++ b/fs/xfs/xfs_mount.c
@@ -1009,6 +1009,7 @@ xfs_mountfs(
out_quota:
xfs_qm_unmount_quotas(mp);
out_rtunmount:
+ mp->m_super->s_flags &= ~MS_ACTIVE;
xfs_rtunmount_inodes(mp);
out_rele_rip:
IRELE(rip);
diff --git a/fs/xfs/xfs_reflink.c b/fs/xfs/xfs_reflink.c
index 5965e9455d91..a279b4e7f5fe 100644
--- a/fs/xfs/xfs_reflink.c
+++ b/fs/xfs/xfs_reflink.c
@@ -182,7 +182,8 @@ xfs_reflink_trim_around_shared(
if (!xfs_is_reflink_inode(ip) ||
ISUNWRITTEN(irec) ||
irec->br_startblock == HOLESTARTBLOCK ||
- irec->br_startblock == DELAYSTARTBLOCK) {
+ irec->br_startblock == DELAYSTARTBLOCK ||
+ isnullstartblock(irec->br_startblock)) {
*shared = false;
return 0;
}
@@ -227,50 +228,54 @@ xfs_reflink_trim_around_shared(
}
}
-/* Create a CoW reservation for a range of blocks within a file. */
-static int
-__xfs_reflink_reserve_cow(
+/*
+ * Trim the passed in imap to the next shared/unshared extent boundary, and
+ * if imap->br_startoff points to a shared extent reserve space for it in the
+ * COW fork. In this case *shared is set to true, else to false.
+ *
+ * Note that imap will always contain the block numbers for the existing blocks
+ * in the data fork, as the upper layers need them for read-modify-write
+ * operations.
+ */
+int
+xfs_reflink_reserve_cow(
struct xfs_inode *ip,
- xfs_fileoff_t *offset_fsb,
- xfs_fileoff_t end_fsb,
- bool *skipped)
+ struct xfs_bmbt_irec *imap,
+ bool *shared)
{
- struct xfs_bmbt_irec got, prev, imap;
- xfs_fileoff_t orig_end_fsb;
- int nimaps, eof = 0, error = 0;
- bool shared = false, trimmed = false;
+ struct xfs_bmbt_irec got, prev;
+ xfs_fileoff_t end_fsb, orig_end_fsb;
+ int eof = 0, error = 0;
+ bool trimmed;
xfs_extnum_t idx;
xfs_extlen_t align;
- /* Already reserved? Skip the refcount btree access. */
- xfs_bmap_search_extents(ip, *offset_fsb, XFS_COW_FORK, &eof, &idx,
+ /*
+ * Search the COW fork extent list first. This serves two purposes:
+ * first this implement the speculative preallocation using cowextisze,
+ * so that we also unshared block adjacent to shared blocks instead
+ * of just the shared blocks themselves. Second the lookup in the
+ * extent list is generally faster than going out to the shared extent
+ * tree.
+ */
+ xfs_bmap_search_extents(ip, imap->br_startoff, XFS_COW_FORK, &eof, &idx,
&got, &prev);
- if (!eof && got.br_startoff <= *offset_fsb) {
- end_fsb = orig_end_fsb = got.br_startoff + got.br_blockcount;
- trace_xfs_reflink_cow_found(ip, &got);
- goto done;
- }
+ if (!eof && got.br_startoff <= imap->br_startoff) {
+ trace_xfs_reflink_cow_found(ip, imap);
+ xfs_trim_extent(imap, got.br_startoff, got.br_blockcount);
- /* Read extent from the source file. */
- nimaps = 1;
- error = xfs_bmapi_read(ip, *offset_fsb, end_fsb - *offset_fsb,
- &imap, &nimaps, 0);
- if (error)
- goto out_unlock;
- ASSERT(nimaps == 1);
+ *shared = true;
+ return 0;
+ }
/* Trim the mapping to the nearest shared extent boundary. */
- error = xfs_reflink_trim_around_shared(ip, &imap, &shared, &trimmed);
+ error = xfs_reflink_trim_around_shared(ip, imap, shared, &trimmed);
if (error)
- goto out_unlock;
-
- end_fsb = orig_end_fsb = imap.br_startoff + imap.br_blockcount;
+ return error;
/* Not shared? Just report the (potentially capped) extent. */
- if (!shared) {
- *skipped = true;
- goto done;
- }
+ if (!*shared)
+ return 0;
/*
* Fork all the shared blocks from our write offset until the end of
@@ -278,72 +283,38 @@ __xfs_reflink_reserve_cow(
*/
error = xfs_qm_dqattach_locked(ip, 0);
if (error)
- goto out_unlock;
+ return error;
+
+ end_fsb = orig_end_fsb = imap->br_startoff + imap->br_blockcount;
align = xfs_eof_alignment(ip, xfs_get_cowextsz_hint(ip));
if (align)
end_fsb = roundup_64(end_fsb, align);
retry:
- error = xfs_bmapi_reserve_delalloc(ip, XFS_COW_FORK, *offset_fsb,
- end_fsb - *offset_fsb, &got,
- &prev, &idx, eof);
+ error = xfs_bmapi_reserve_delalloc(ip, XFS_COW_FORK, imap->br_startoff,
+ end_fsb - imap->br_startoff, &got, &prev, &idx, eof);
switch (error) {
case 0:
break;
case -ENOSPC:
case -EDQUOT:
/* retry without any preallocation */
- trace_xfs_reflink_cow_enospc(ip, &imap);
+ trace_xfs_reflink_cow_enospc(ip, imap);
if (end_fsb != orig_end_fsb) {
end_fsb = orig_end_fsb;
goto retry;
}
/*FALLTHRU*/
default:
- goto out_unlock;
+ return error;
}
if (end_fsb != orig_end_fsb)
xfs_inode_set_cowblocks_tag(ip);
trace_xfs_reflink_cow_alloc(ip, &got);
-done:
- *offset_fsb = end_fsb;
-out_unlock:
- return error;
-}
-
-/* Create a CoW reservation for part of a file. */
-int
-xfs_reflink_reserve_cow_range(
- struct xfs_inode *ip,
- xfs_off_t offset,
- xfs_off_t count)
-{
- struct xfs_mount *mp = ip->i_mount;
- xfs_fileoff_t offset_fsb, end_fsb;
- bool skipped = false;
- int error;
-
- trace_xfs_reflink_reserve_cow_range(ip, offset, count);
-
- offset_fsb = XFS_B_TO_FSBT(mp, offset);
- end_fsb = XFS_B_TO_FSB(mp, offset + count);
-
- xfs_ilock(ip, XFS_ILOCK_EXCL);
- while (offset_fsb < end_fsb) {
- error = __xfs_reflink_reserve_cow(ip, &offset_fsb, end_fsb,
- &skipped);
- if (error) {
- trace_xfs_reflink_reserve_cow_range_error(ip, error,
- _RET_IP_);
- break;
- }
- }
- xfs_iunlock(ip, XFS_ILOCK_EXCL);
-
- return error;
+ return 0;
}
/* Allocate all CoW reservations covering a range of blocks in a file. */
@@ -358,9 +329,8 @@ __xfs_reflink_allocate_cow(
struct xfs_defer_ops dfops;
struct xfs_trans *tp;
xfs_fsblock_t first_block;
- xfs_fileoff_t next_fsb;
int nimaps = 1, error;
- bool skipped = false;
+ bool shared;
xfs_defer_init(&dfops, &first_block);
@@ -371,33 +341,38 @@ __xfs_reflink_allocate_cow(
xfs_ilock(ip, XFS_ILOCK_EXCL);
- next_fsb = *offset_fsb;
- error = __xfs_reflink_reserve_cow(ip, &next_fsb, end_fsb, &skipped);
+ /* Read extent from the source file. */
+ nimaps = 1;
+ error = xfs_bmapi_read(ip, *offset_fsb, end_fsb - *offset_fsb,
+ &imap, &nimaps, 0);
+ if (error)
+ goto out_unlock;
+ ASSERT(nimaps == 1);
+
+ error = xfs_reflink_reserve_cow(ip, &imap, &shared);
if (error)
goto out_trans_cancel;
- if (skipped) {
- *offset_fsb = next_fsb;
+ if (!shared) {
+ *offset_fsb = imap.br_startoff + imap.br_blockcount;
goto out_trans_cancel;
}
xfs_trans_ijoin(tp, ip, 0);
- error = xfs_bmapi_write(tp, ip, *offset_fsb, next_fsb - *offset_fsb,
+ error = xfs_bmapi_write(tp, ip, imap.br_startoff, imap.br_blockcount,
XFS_BMAPI_COWFORK, &first_block,
XFS_EXTENTADD_SPACE_RES(mp, XFS_DATA_FORK),
&imap, &nimaps, &dfops);
if (error)
goto out_trans_cancel;
- /* We might not have been able to map the whole delalloc extent */
- *offset_fsb = min(*offset_fsb + imap.br_blockcount, next_fsb);
-
error = xfs_defer_finish(&tp, &dfops, NULL);
if (error)
goto out_trans_cancel;
error = xfs_trans_commit(tp);
+ *offset_fsb = imap.br_startoff + imap.br_blockcount;
out_unlock:
xfs_iunlock(ip, XFS_ILOCK_EXCL);
return error;
@@ -536,58 +511,49 @@ xfs_reflink_cancel_cow_blocks(
xfs_fileoff_t offset_fsb,
xfs_fileoff_t end_fsb)
{
- struct xfs_bmbt_irec irec;
- xfs_filblks_t count_fsb;
+ struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, XFS_COW_FORK);
+ struct xfs_bmbt_irec got, prev, del;
+ xfs_extnum_t idx;
xfs_fsblock_t firstfsb;
struct xfs_defer_ops dfops;
- int error = 0;
- int nimaps;
+ int error = 0, eof = 0;
if (!xfs_is_reflink_inode(ip))
return 0;
- /* Go find the old extent in the CoW fork. */
- while (offset_fsb < end_fsb) {
- nimaps = 1;
- count_fsb = (xfs_filblks_t)(end_fsb - offset_fsb);
- error = xfs_bmapi_read(ip, offset_fsb, count_fsb, &irec,
- &nimaps, XFS_BMAPI_COWFORK);
- if (error)
- break;
- ASSERT(nimaps == 1);
-
- trace_xfs_reflink_cancel_cow(ip, &irec);
+ xfs_bmap_search_extents(ip, offset_fsb, XFS_COW_FORK, &eof, &idx,
+ &got, &prev);
+ if (eof)
+ return 0;
- if (irec.br_startblock == DELAYSTARTBLOCK) {
- /* Free a delayed allocation. */
- xfs_mod_fdblocks(ip->i_mount, irec.br_blockcount,
- false);
- ip->i_delayed_blks -= irec.br_blockcount;
+ while (got.br_startoff < end_fsb) {
+ del = got;
+ xfs_trim_extent(&del, offset_fsb, end_fsb - offset_fsb);
+ trace_xfs_reflink_cancel_cow(ip, &del);
- /* Remove the mapping from the CoW fork. */
- error = xfs_bunmapi_cow(ip, &irec);
+ if (isnullstartblock(del.br_startblock)) {
+ error = xfs_bmap_del_extent_delay(ip, XFS_COW_FORK,
+ &idx, &got, &del);
if (error)
break;
- } else if (irec.br_startblock == HOLESTARTBLOCK) {
- /* empty */
} else {
xfs_trans_ijoin(*tpp, ip, 0);
xfs_defer_init(&dfops, &firstfsb);
/* Free the CoW orphan record. */
error = xfs_refcount_free_cow_extent(ip->i_mount,
- &dfops, irec.br_startblock,
- irec.br_blockcount);
+ &dfops, del.br_startblock,
+ del.br_blockcount);
if (error)
break;
xfs_bmap_add_free(ip->i_mount, &dfops,
- irec.br_startblock, irec.br_blockcount,
+ del.br_startblock, del.br_blockcount,
NULL);
/* Update quota accounting */
xfs_trans_mod_dquot_byino(*tpp, ip, XFS_TRANS_DQ_BCOUNT,
- -(long)irec.br_blockcount);
+ -(long)del.br_blockcount);
/* Roll the transaction */
error = xfs_defer_finish(tpp, &dfops, ip);
@@ -597,15 +563,18 @@ xfs_reflink_cancel_cow_blocks(
}
/* Remove the mapping from the CoW fork. */
- error = xfs_bunmapi_cow(ip, &irec);
- if (error)
- break;
+ xfs_bmap_del_extent_cow(ip, &idx, &got, &del);
}
- /* Roll on... */
- offset_fsb = irec.br_startoff + irec.br_blockcount;
+ if (++idx >= ifp->if_bytes / sizeof(struct xfs_bmbt_rec))
+ break;
+ xfs_bmbt_get_all(xfs_iext_get_ext(ifp, idx), &got);
}
+ /* clear tag if cow fork is emptied */
+ if (!ifp->if_bytes)
+ xfs_inode_clear_cowblocks_tag(ip);
+
return error;
}
@@ -668,25 +637,26 @@ xfs_reflink_end_cow(
xfs_off_t offset,
xfs_off_t count)
{
- struct xfs_bmbt_irec irec;
- struct xfs_bmbt_irec uirec;
+ struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, XFS_COW_FORK);
+ struct xfs_bmbt_irec got, prev, del;
struct xfs_trans *tp;
xfs_fileoff_t offset_fsb;
xfs_fileoff_t end_fsb;
- xfs_filblks_t count_fsb;
xfs_fsblock_t firstfsb;
struct xfs_defer_ops dfops;
- int error;
+ int error, eof = 0;
unsigned int resblks;
- xfs_filblks_t ilen;
xfs_filblks_t rlen;
- int nimaps;
+ xfs_extnum_t idx;
trace_xfs_reflink_end_cow(ip, offset, count);
+ /* No COW extents? That's easy! */
+ if (ifp->if_bytes == 0)
+ return 0;
+
offset_fsb = XFS_B_TO_FSBT(ip->i_mount, offset);
end_fsb = XFS_B_TO_FSB(ip->i_mount, offset + count);
- count_fsb = (xfs_filblks_t)(end_fsb - offset_fsb);
/* Start a rolling transaction to switch the mappings */
resblks = XFS_EXTENTADD_SPACE_RES(ip->i_mount, XFS_DATA_FORK);
@@ -698,72 +668,65 @@ xfs_reflink_end_cow(
xfs_ilock(ip, XFS_ILOCK_EXCL);
xfs_trans_ijoin(tp, ip, 0);
- /* Go find the old extent in the CoW fork. */
- while (offset_fsb < end_fsb) {
- /* Read extent from the source file */
- nimaps = 1;
- count_fsb = (xfs_filblks_t)(end_fsb - offset_fsb);
- error = xfs_bmapi_read(ip, offset_fsb, count_fsb, &irec,
- &nimaps, XFS_BMAPI_COWFORK);
- if (error)
- goto out_cancel;
- ASSERT(nimaps == 1);
+ xfs_bmap_search_extents(ip, end_fsb - 1, XFS_COW_FORK, &eof, &idx,
+ &got, &prev);
- ASSERT(irec.br_startblock != DELAYSTARTBLOCK);
- trace_xfs_reflink_cow_remap(ip, &irec);
+ /* If there is a hole at end_fsb - 1 go to the previous extent */
+ if (eof || got.br_startoff > end_fsb) {
+ ASSERT(idx > 0);
+ xfs_bmbt_get_all(xfs_iext_get_ext(ifp, --idx), &got);
+ }
- /*
- * We can have a hole in the CoW fork if part of a directio
- * write is CoW but part of it isn't.
- */
- rlen = ilen = irec.br_blockcount;
- if (irec.br_startblock == HOLESTARTBLOCK)
+ /* Walk backwards until we're out of the I/O range... */
+ while (got.br_startoff + got.br_blockcount > offset_fsb) {
+ del = got;
+ xfs_trim_extent(&del, offset_fsb, end_fsb - offset_fsb);
+
+ /* Extent delete may have bumped idx forward */
+ if (!del.br_blockcount) {
+ idx--;
goto next_extent;
+ }
+
+ ASSERT(!isnullstartblock(got.br_startblock));
/* Unmap the old blocks in the data fork. */
- while (rlen) {
- xfs_defer_init(&dfops, &firstfsb);
- error = __xfs_bunmapi(tp, ip, irec.br_startoff,
- &rlen, 0, 1, &firstfsb, &dfops);
- if (error)
- goto out_defer;
-
- /*
- * Trim the extent to whatever got unmapped.
- * Remember, bunmapi works backwards.
- */
- uirec.br_startblock = irec.br_startblock + rlen;
- uirec.br_startoff = irec.br_startoff + rlen;
- uirec.br_blockcount = irec.br_blockcount - rlen;
- irec.br_blockcount = rlen;
- trace_xfs_reflink_cow_remap_piece(ip, &uirec);
+ xfs_defer_init(&dfops, &firstfsb);
+ rlen = del.br_blockcount;
+ error = __xfs_bunmapi(tp, ip, del.br_startoff, &rlen, 0, 1,
+ &firstfsb, &dfops);
+ if (error)
+ goto out_defer;
- /* Free the CoW orphan record. */
- error = xfs_refcount_free_cow_extent(tp->t_mountp,
- &dfops, uirec.br_startblock,
- uirec.br_blockcount);
- if (error)
- goto out_defer;
+ /* Trim the extent to whatever got unmapped. */
+ if (rlen) {
+ xfs_trim_extent(&del, del.br_startoff + rlen,
+ del.br_blockcount - rlen);
+ }
+ trace_xfs_reflink_cow_remap(ip, &del);
- /* Map the new blocks into the data fork. */
- error = xfs_bmap_map_extent(tp->t_mountp, &dfops,
- ip, &uirec);
- if (error)
- goto out_defer;
+ /* Free the CoW orphan record. */
+ error = xfs_refcount_free_cow_extent(tp->t_mountp, &dfops,
+ del.br_startblock, del.br_blockcount);
+ if (error)
+ goto out_defer;
- /* Remove the mapping from the CoW fork. */
- error = xfs_bunmapi_cow(ip, &uirec);
- if (error)
- goto out_defer;
+ /* Map the new blocks into the data fork. */
+ error = xfs_bmap_map_extent(tp->t_mountp, &dfops, ip, &del);
+ if (error)
+ goto out_defer;
- error = xfs_defer_finish(&tp, &dfops, ip);
- if (error)
- goto out_defer;
- }
+ /* Remove the mapping from the CoW fork. */
+ xfs_bmap_del_extent_cow(ip, &idx, &got, &del);
+
+ error = xfs_defer_finish(&tp, &dfops, ip);
+ if (error)
+ goto out_defer;
next_extent:
- /* Roll on... */
- offset_fsb = irec.br_startoff + ilen;
+ if (idx < 0)
+ break;
+ xfs_bmbt_get_all(xfs_iext_get_ext(ifp, idx), &got);
}
error = xfs_trans_commit(tp);
@@ -774,7 +737,6 @@ next_extent:
out_defer:
xfs_defer_cancel(&dfops);
-out_cancel:
xfs_trans_cancel(tp);
xfs_iunlock(ip, XFS_ILOCK_EXCL);
out:
@@ -1312,19 +1274,26 @@ out_error:
*/
int
xfs_reflink_remap_range(
- struct xfs_inode *src,
- xfs_off_t srcoff,
- struct xfs_inode *dest,
- xfs_off_t destoff,
- xfs_off_t len,
- unsigned int flags)
+ struct file *file_in,
+ loff_t pos_in,
+ struct file *file_out,
+ loff_t pos_out,
+ u64 len,
+ bool is_dedupe)
{
+ struct inode *inode_in = file_inode(file_in);
+ struct xfs_inode *src = XFS_I(inode_in);
+ struct inode *inode_out = file_inode(file_out);
+ struct xfs_inode *dest = XFS_I(inode_out);
struct xfs_mount *mp = src->i_mount;
+ loff_t bs = inode_out->i_sb->s_blocksize;
+ bool same_inode = (inode_in == inode_out);
xfs_fileoff_t sfsbno, dfsbno;
xfs_filblks_t fsblen;
- int error;
xfs_extlen_t cowextsize;
- bool is_same;
+ loff_t isize;
+ ssize_t ret;
+ loff_t blen;
if (!xfs_sb_version_hasreflink(&mp->m_sb))
return -EOPNOTSUPP;
@@ -1332,17 +1301,8 @@ xfs_reflink_remap_range(
if (XFS_FORCED_SHUTDOWN(mp))
return -EIO;
- /* Don't reflink realtime inodes */
- if (XFS_IS_REALTIME_INODE(src) || XFS_IS_REALTIME_INODE(dest))
- return -EINVAL;
-
- if (flags & ~XFS_REFLINK_ALL)
- return -EINVAL;
-
- trace_xfs_reflink_remap_range(src, srcoff, len, dest, destoff);
-
/* Lock both files against IO */
- if (src->i_ino == dest->i_ino) {
+ if (same_inode) {
xfs_ilock(src, XFS_IOLOCK_EXCL);
xfs_ilock(src, XFS_MMAPLOCK_EXCL);
} else {
@@ -1350,39 +1310,126 @@ xfs_reflink_remap_range(
xfs_lock_two_inodes(src, dest, XFS_MMAPLOCK_EXCL);
}
+ /* Don't touch certain kinds of inodes */
+ ret = -EPERM;
+ if (IS_IMMUTABLE(inode_out))
+ goto out_unlock;
+
+ ret = -ETXTBSY;
+ if (IS_SWAPFILE(inode_in) || IS_SWAPFILE(inode_out))
+ goto out_unlock;
+
+
+ /* Don't reflink dirs, pipes, sockets... */
+ ret = -EISDIR;
+ if (S_ISDIR(inode_in->i_mode) || S_ISDIR(inode_out->i_mode))
+ goto out_unlock;
+ ret = -EINVAL;
+ if (S_ISFIFO(inode_in->i_mode) || S_ISFIFO(inode_out->i_mode))
+ goto out_unlock;
+ if (!S_ISREG(inode_in->i_mode) || !S_ISREG(inode_out->i_mode))
+ goto out_unlock;
+
+ /* Don't reflink realtime inodes */
+ if (XFS_IS_REALTIME_INODE(src) || XFS_IS_REALTIME_INODE(dest))
+ goto out_unlock;
+
+ /* Don't share DAX file data for now. */
+ if (IS_DAX(inode_in) || IS_DAX(inode_out))
+ goto out_unlock;
+
+ /* Are we going all the way to the end? */
+ isize = i_size_read(inode_in);
+ if (isize == 0) {
+ ret = 0;
+ goto out_unlock;
+ }
+
+ if (len == 0)
+ len = isize - pos_in;
+
+ /* Ensure offsets don't wrap and the input is inside i_size */
+ if (pos_in + len < pos_in || pos_out + len < pos_out ||
+ pos_in + len > isize)
+ goto out_unlock;
+
+ /* Don't allow dedupe past EOF in the dest file */
+ if (is_dedupe) {
+ loff_t disize;
+
+ disize = i_size_read(inode_out);
+ if (pos_out >= disize || pos_out + len > disize)
+ goto out_unlock;
+ }
+
+ /* If we're linking to EOF, continue to the block boundary. */
+ if (pos_in + len == isize)
+ blen = ALIGN(isize, bs) - pos_in;
+ else
+ blen = len;
+
+ /* Only reflink if we're aligned to block boundaries */
+ if (!IS_ALIGNED(pos_in, bs) || !IS_ALIGNED(pos_in + blen, bs) ||
+ !IS_ALIGNED(pos_out, bs) || !IS_ALIGNED(pos_out + blen, bs))
+ goto out_unlock;
+
+ /* Don't allow overlapped reflink within the same file */
+ if (same_inode) {
+ if (pos_out + blen > pos_in && pos_out < pos_in + blen)
+ goto out_unlock;
+ }
+
+ /* Wait for the completion of any pending IOs on both files */
+ inode_dio_wait(inode_in);
+ if (!same_inode)
+ inode_dio_wait(inode_out);
+
+ ret = filemap_write_and_wait_range(inode_in->i_mapping,
+ pos_in, pos_in + len - 1);
+ if (ret)
+ goto out_unlock;
+
+ ret = filemap_write_and_wait_range(inode_out->i_mapping,
+ pos_out, pos_out + len - 1);
+ if (ret)
+ goto out_unlock;
+
+ trace_xfs_reflink_remap_range(src, pos_in, len, dest, pos_out);
+
/*
* Check that the extents are the same.
*/
- if (flags & XFS_REFLINK_DEDUPE) {
- is_same = false;
- error = xfs_compare_extents(VFS_I(src), srcoff, VFS_I(dest),
- destoff, len, &is_same);
- if (error)
- goto out_error;
+ if (is_dedupe) {
+ bool is_same = false;
+
+ ret = xfs_compare_extents(inode_in, pos_in, inode_out, pos_out,
+ len, &is_same);
+ if (ret)
+ goto out_unlock;
if (!is_same) {
- error = -EBADE;
- goto out_error;
+ ret = -EBADE;
+ goto out_unlock;
}
}
- error = xfs_reflink_set_inode_flag(src, dest);
- if (error)
- goto out_error;
+ ret = xfs_reflink_set_inode_flag(src, dest);
+ if (ret)
+ goto out_unlock;
/*
* Invalidate the page cache so that we can clear any CoW mappings
* in the destination file.
*/
- truncate_inode_pages_range(&VFS_I(dest)->i_data, destoff,
- PAGE_ALIGN(destoff + len) - 1);
+ truncate_inode_pages_range(&inode_out->i_data, pos_out,
+ PAGE_ALIGN(pos_out + len) - 1);
- dfsbno = XFS_B_TO_FSBT(mp, destoff);
- sfsbno = XFS_B_TO_FSBT(mp, srcoff);
+ dfsbno = XFS_B_TO_FSBT(mp, pos_out);
+ sfsbno = XFS_B_TO_FSBT(mp, pos_in);
fsblen = XFS_B_TO_FSB(mp, len);
- error = xfs_reflink_remap_blocks(src, sfsbno, dest, dfsbno, fsblen,
- destoff + len);
- if (error)
- goto out_error;
+ ret = xfs_reflink_remap_blocks(src, sfsbno, dest, dfsbno, fsblen,
+ pos_out + len);
+ if (ret)
+ goto out_unlock;
/*
* Carry the cowextsize hint from src to dest if we're sharing the
@@ -1390,26 +1437,24 @@ xfs_reflink_remap_range(
* has a cowextsize hint, and the destination file does not.
*/
cowextsize = 0;
- if (srcoff == 0 && len == i_size_read(VFS_I(src)) &&
+ if (pos_in == 0 && len == i_size_read(inode_in) &&
(src->i_d.di_flags2 & XFS_DIFLAG2_COWEXTSIZE) &&
- destoff == 0 && len >= i_size_read(VFS_I(dest)) &&
+ pos_out == 0 && len >= i_size_read(inode_out) &&
!(dest->i_d.di_flags2 & XFS_DIFLAG2_COWEXTSIZE))
cowextsize = src->i_d.di_cowextsize;
- error = xfs_reflink_update_dest(dest, destoff + len, cowextsize);
- if (error)
- goto out_error;
+ ret = xfs_reflink_update_dest(dest, pos_out + len, cowextsize);
-out_error:
+out_unlock:
xfs_iunlock(src, XFS_MMAPLOCK_EXCL);
xfs_iunlock(src, XFS_IOLOCK_EXCL);
if (src->i_ino != dest->i_ino) {
xfs_iunlock(dest, XFS_MMAPLOCK_EXCL);
xfs_iunlock(dest, XFS_IOLOCK_EXCL);
}
- if (error)
- trace_xfs_reflink_remap_range_error(dest, error, _RET_IP_);
- return error;
+ if (ret)
+ trace_xfs_reflink_remap_range_error(dest, ret, _RET_IP_);
+ return ret;
}
/*
diff --git a/fs/xfs/xfs_reflink.h b/fs/xfs/xfs_reflink.h
index 5dc3c8ac12aa..fad11607c9ad 100644
--- a/fs/xfs/xfs_reflink.h
+++ b/fs/xfs/xfs_reflink.h
@@ -26,8 +26,8 @@ extern int xfs_reflink_find_shared(struct xfs_mount *mp, xfs_agnumber_t agno,
extern int xfs_reflink_trim_around_shared(struct xfs_inode *ip,
struct xfs_bmbt_irec *irec, bool *shared, bool *trimmed);
-extern int xfs_reflink_reserve_cow_range(struct xfs_inode *ip,
- xfs_off_t offset, xfs_off_t count);
+extern int xfs_reflink_reserve_cow(struct xfs_inode *ip,
+ struct xfs_bmbt_irec *imap, bool *shared);
extern int xfs_reflink_allocate_cow_range(struct xfs_inode *ip,
xfs_off_t offset, xfs_off_t count);
extern bool xfs_reflink_find_cow_mapping(struct xfs_inode *ip, xfs_off_t offset,
@@ -43,11 +43,8 @@ extern int xfs_reflink_cancel_cow_range(struct xfs_inode *ip, xfs_off_t offset,
extern int xfs_reflink_end_cow(struct xfs_inode *ip, xfs_off_t offset,
xfs_off_t count);
extern int xfs_reflink_recover_cow(struct xfs_mount *mp);
-#define XFS_REFLINK_DEDUPE 1 /* only reflink if contents match */
-#define XFS_REFLINK_ALL (XFS_REFLINK_DEDUPE)
-extern int xfs_reflink_remap_range(struct xfs_inode *src, xfs_off_t srcoff,
- struct xfs_inode *dest, xfs_off_t destoff, xfs_off_t len,
- unsigned int flags);
+extern int xfs_reflink_remap_range(struct file *file_in, loff_t pos_in,
+ struct file *file_out, loff_t pos_out, u64 len, bool is_dedupe);
extern int xfs_reflink_clear_inode_flag(struct xfs_inode *ip,
struct xfs_trans **tpp);
extern int xfs_reflink_unshare(struct xfs_inode *ip, xfs_off_t offset,
diff --git a/fs/xfs/xfs_sysfs.c b/fs/xfs/xfs_sysfs.c
index 5f8d55d29a11..276d3023d60f 100644
--- a/fs/xfs/xfs_sysfs.c
+++ b/fs/xfs/xfs_sysfs.c
@@ -512,13 +512,13 @@ static struct attribute *xfs_error_attrs[] = {
};
-struct kobj_type xfs_error_cfg_ktype = {
+static struct kobj_type xfs_error_cfg_ktype = {
.release = xfs_sysfs_release,
.sysfs_ops = &xfs_sysfs_ops,
.default_attrs = xfs_error_attrs,
};
-struct kobj_type xfs_error_ktype = {
+static struct kobj_type xfs_error_ktype = {
.release = xfs_sysfs_release,
.sysfs_ops = &xfs_sysfs_ops,
};
diff --git a/fs/xfs/xfs_trace.h b/fs/xfs/xfs_trace.h
index ad188d3a83f3..0907752be62d 100644
--- a/fs/xfs/xfs_trace.h
+++ b/fs/xfs/xfs_trace.h
@@ -3346,7 +3346,7 @@ DEFINE_INODE_IREC_EVENT(xfs_reflink_cow_alloc);
DEFINE_INODE_IREC_EVENT(xfs_reflink_cow_found);
DEFINE_INODE_IREC_EVENT(xfs_reflink_cow_enospc);
-DEFINE_RW_EVENT(xfs_reflink_reserve_cow_range);
+DEFINE_RW_EVENT(xfs_reflink_reserve_cow);
DEFINE_RW_EVENT(xfs_reflink_allocate_cow_range);
DEFINE_INODE_IREC_EVENT(xfs_reflink_bounce_dio_write);
@@ -3356,9 +3356,7 @@ DEFINE_INODE_IREC_EVENT(xfs_reflink_trim_irec);
DEFINE_SIMPLE_IO_EVENT(xfs_reflink_cancel_cow_range);
DEFINE_SIMPLE_IO_EVENT(xfs_reflink_end_cow);
DEFINE_INODE_IREC_EVENT(xfs_reflink_cow_remap);
-DEFINE_INODE_IREC_EVENT(xfs_reflink_cow_remap_piece);
-DEFINE_INODE_ERROR_EVENT(xfs_reflink_reserve_cow_range_error);
DEFINE_INODE_ERROR_EVENT(xfs_reflink_allocate_cow_range_error);
DEFINE_INODE_ERROR_EVENT(xfs_reflink_cancel_cow_range_error);
DEFINE_INODE_ERROR_EVENT(xfs_reflink_end_cow_error);
diff --git a/include/asm-generic/export.h b/include/asm-generic/export.h
index 43199a049da5..63554e9f6e0c 100644
--- a/include/asm-generic/export.h
+++ b/include/asm-generic/export.h
@@ -70,7 +70,7 @@ KSYM(__kcrctab_\name):
#include <generated/autoksyms.h>
#define __EXPORT_SYMBOL(sym, val, sec) \
- __cond_export_sym(sym, val, sec, config_enabled(__KSYM_##sym))
+ __cond_export_sym(sym, val, sec, __is_defined(__KSYM_##sym))
#define __cond_export_sym(sym, val, sec, conf) \
___cond_export_sym(sym, val, sec, conf)
#define ___cond_export_sym(sym, val, sec, enabled) \
diff --git a/include/drm/drmP.h b/include/drm/drmP.h
index e336e3901876..b352a7b812e6 100644
--- a/include/drm/drmP.h
+++ b/include/drm/drmP.h
@@ -76,6 +76,7 @@
#include <drm/drm_os_linux.h>
#include <drm/drm_sarea.h>
#include <drm/drm_vma_manager.h>
+#include <drm/drm_drv.h>
struct module;
@@ -135,35 +136,12 @@ struct dma_buf_attachment;
#define DRM_UT_PRIME 0x08
#define DRM_UT_ATOMIC 0x10
#define DRM_UT_VBL 0x20
-
-extern __printf(6, 7)
-void drm_dev_printk(const struct device *dev, const char *level,
- unsigned int category, const char *function_name,
- const char *prefix, const char *format, ...);
-
-extern __printf(3, 4)
-void drm_printk(const char *level, unsigned int category,
- const char *format, ...);
+#define DRM_UT_STATE 0x40
/***********************************************************************/
/** \name DRM template customization defaults */
/*@{*/
-/* driver capabilities and requirements mask */
-#define DRIVER_USE_AGP 0x1
-#define DRIVER_LEGACY 0x2
-#define DRIVER_PCI_DMA 0x8
-#define DRIVER_SG 0x10
-#define DRIVER_HAVE_DMA 0x20
-#define DRIVER_HAVE_IRQ 0x40
-#define DRIVER_IRQ_SHARED 0x80
-#define DRIVER_GEM 0x1000
-#define DRIVER_MODESET 0x2000
-#define DRIVER_PRIME 0x4000
-#define DRIVER_RENDER 0x8000
-#define DRIVER_ATOMIC 0x10000
-#define DRIVER_KMS_LEGACY_CONTEXT 0x20000
-
/***********************************************************************/
/** \name Macros to make printk easier */
/*@{*/
@@ -306,6 +284,27 @@ void drm_printk(const char *level, unsigned int category,
#define DRM_DEBUG_PRIME_RATELIMITED(fmt, args...) \
DRM_DEV_DEBUG_PRIME_RATELIMITED(NULL, fmt, ##args)
+/* Format strings and argument splitters to simplify printing
+ * various "complex" objects
+ */
+#define DRM_MODE_FMT "%d:\"%s\" %d %d %d %d %d %d %d %d %d %d 0x%x 0x%x"
+#define DRM_MODE_ARG(m) \
+ (m)->base.id, (m)->name, (m)->vrefresh, (m)->clock, \
+ (m)->hdisplay, (m)->hsync_start, (m)->hsync_end, (m)->htotal, \
+ (m)->vdisplay, (m)->vsync_start, (m)->vsync_end, (m)->vtotal, \
+ (m)->type, (m)->flags
+
+#define DRM_RECT_FMT "%dx%d%+d%+d"
+#define DRM_RECT_ARG(r) drm_rect_width(r), drm_rect_height(r), (r)->x1, (r)->y1
+
+/* for rect's in fixed-point format: */
+#define DRM_RECT_FP_FMT "%d.%06ux%d.%06u%+d.%06u%+d.%06u"
+#define DRM_RECT_FP_ARG(r) \
+ drm_rect_width(r) >> 16, ((drm_rect_width(r) & 0xffff) * 15625) >> 10, \
+ drm_rect_height(r) >> 16, ((drm_rect_height(r) & 0xffff) * 15625) >> 10, \
+ (r)->x1 >> 16, (((r)->x1 & 0xffff) * 15625) >> 10, \
+ (r)->y1 >> 16, (((r)->y1 & 0xffff) * 15625) >> 10
+
/*@}*/
/***********************************************************************/
@@ -458,263 +457,6 @@ struct drm_lock_data {
#define DRM_SCANOUTPOS_IN_VBLANK (1 << 1)
#define DRM_SCANOUTPOS_ACCURATE (1 << 2)
-/**
- * DRM driver structure. This structure represent the common code for
- * a family of cards. There will one drm_device for each card present
- * in this family
- */
-struct drm_driver {
- int (*load) (struct drm_device *, unsigned long flags);
- int (*firstopen) (struct drm_device *);
- int (*open) (struct drm_device *, struct drm_file *);
- void (*preclose) (struct drm_device *, struct drm_file *file_priv);
- void (*postclose) (struct drm_device *, struct drm_file *);
- void (*lastclose) (struct drm_device *);
- int (*unload) (struct drm_device *);
- int (*dma_ioctl) (struct drm_device *dev, void *data, struct drm_file *file_priv);
- int (*dma_quiescent) (struct drm_device *);
- int (*context_dtor) (struct drm_device *dev, int context);
- int (*set_busid)(struct drm_device *dev, struct drm_master *master);
-
- /**
- * get_vblank_counter - get raw hardware vblank counter
- * @dev: DRM device
- * @pipe: counter to fetch
- *
- * Driver callback for fetching a raw hardware vblank counter for @crtc.
- * If a device doesn't have a hardware counter, the driver can simply
- * use drm_vblank_no_hw_counter() function. The DRM core will account for
- * missed vblank events while interrupts where disabled based on system
- * timestamps.
- *
- * Wraparound handling and loss of events due to modesetting is dealt
- * with in the DRM core code.
- *
- * RETURNS
- * Raw vblank counter value.
- */
- u32 (*get_vblank_counter) (struct drm_device *dev, unsigned int pipe);
-
- /**
- * enable_vblank - enable vblank interrupt events
- * @dev: DRM device
- * @pipe: which irq to enable
- *
- * Enable vblank interrupts for @crtc. If the device doesn't have
- * a hardware vblank counter, the driver should use the
- * drm_vblank_no_hw_counter() function that keeps a virtual counter.
- *
- * RETURNS
- * Zero on success, appropriate errno if the given @crtc's vblank
- * interrupt cannot be enabled.
- */
- int (*enable_vblank) (struct drm_device *dev, unsigned int pipe);
-
- /**
- * disable_vblank - disable vblank interrupt events
- * @dev: DRM device
- * @pipe: which irq to enable
- *
- * Disable vblank interrupts for @crtc. If the device doesn't have
- * a hardware vblank counter, the driver should use the
- * drm_vblank_no_hw_counter() function that keeps a virtual counter.
- */
- void (*disable_vblank) (struct drm_device *dev, unsigned int pipe);
-
- /**
- * Called by \c drm_device_is_agp. Typically used to determine if a
- * card is really attached to AGP or not.
- *
- * \param dev DRM device handle
- *
- * \returns
- * One of three values is returned depending on whether or not the
- * card is absolutely \b not AGP (return of 0), absolutely \b is AGP
- * (return of 1), or may or may not be AGP (return of 2).
- */
- int (*device_is_agp) (struct drm_device *dev);
-
- /**
- * Called by vblank timestamping code.
- *
- * Return the current display scanout position from a crtc, and an
- * optional accurate ktime_get timestamp of when position was measured.
- *
- * \param dev DRM device.
- * \param pipe Id of the crtc to query.
- * \param flags Flags from the caller (DRM_CALLED_FROM_VBLIRQ or 0).
- * \param *vpos Target location for current vertical scanout position.
- * \param *hpos Target location for current horizontal scanout position.
- * \param *stime Target location for timestamp taken immediately before
- * scanout position query. Can be NULL to skip timestamp.
- * \param *etime Target location for timestamp taken immediately after
- * scanout position query. Can be NULL to skip timestamp.
- * \param mode Current display timings.
- *
- * Returns vpos as a positive number while in active scanout area.
- * Returns vpos as a negative number inside vblank, counting the number
- * of scanlines to go until end of vblank, e.g., -1 means "one scanline
- * until start of active scanout / end of vblank."
- *
- * \return Flags, or'ed together as follows:
- *
- * DRM_SCANOUTPOS_VALID = Query successful.
- * DRM_SCANOUTPOS_INVBL = Inside vblank.
- * DRM_SCANOUTPOS_ACCURATE = Returned position is accurate. A lack of
- * this flag means that returned position may be offset by a constant
- * but unknown small number of scanlines wrt. real scanout position.
- *
- */
- int (*get_scanout_position) (struct drm_device *dev, unsigned int pipe,
- unsigned int flags, int *vpos, int *hpos,
- ktime_t *stime, ktime_t *etime,
- const struct drm_display_mode *mode);
-
- /**
- * Called by \c drm_get_last_vbltimestamp. Should return a precise
- * timestamp when the most recent VBLANK interval ended or will end.
- *
- * Specifically, the timestamp in @vblank_time should correspond as
- * closely as possible to the time when the first video scanline of
- * the video frame after the end of VBLANK will start scanning out,
- * the time immediately after end of the VBLANK interval. If the
- * @crtc is currently inside VBLANK, this will be a time in the future.
- * If the @crtc is currently scanning out a frame, this will be the
- * past start time of the current scanout. This is meant to adhere
- * to the OpenML OML_sync_control extension specification.
- *
- * \param dev dev DRM device handle.
- * \param pipe crtc for which timestamp should be returned.
- * \param *max_error Maximum allowable timestamp error in nanoseconds.
- * Implementation should strive to provide timestamp
- * with an error of at most *max_error nanoseconds.
- * Returns true upper bound on error for timestamp.
- * \param *vblank_time Target location for returned vblank timestamp.
- * \param flags 0 = Defaults, no special treatment needed.
- * \param DRM_CALLED_FROM_VBLIRQ = Function is called from vblank
- * irq handler. Some drivers need to apply some workarounds
- * for gpu-specific vblank irq quirks if flag is set.
- *
- * \returns
- * Zero if timestamping isn't supported in current display mode or a
- * negative number on failure. A positive status code on success,
- * which describes how the vblank_time timestamp was computed.
- */
- int (*get_vblank_timestamp) (struct drm_device *dev, unsigned int pipe,
- int *max_error,
- struct timeval *vblank_time,
- unsigned flags);
-
- /* these have to be filled in */
-
- irqreturn_t(*irq_handler) (int irq, void *arg);
- void (*irq_preinstall) (struct drm_device *dev);
- int (*irq_postinstall) (struct drm_device *dev);
- void (*irq_uninstall) (struct drm_device *dev);
-
- /* Master routines */
- int (*master_create)(struct drm_device *dev, struct drm_master *master);
- void (*master_destroy)(struct drm_device *dev, struct drm_master *master);
- /**
- * master_set is called whenever the minor master is set.
- * master_drop is called whenever the minor master is dropped.
- */
-
- int (*master_set)(struct drm_device *dev, struct drm_file *file_priv,
- bool from_open);
- void (*master_drop)(struct drm_device *dev, struct drm_file *file_priv);
-
- int (*debugfs_init)(struct drm_minor *minor);
- void (*debugfs_cleanup)(struct drm_minor *minor);
-
- /**
- * @gem_free_object: deconstructor for drm_gem_objects
- *
- * This is deprecated and should not be used by new drivers. Use
- * @gem_free_object_unlocked instead.
- */
- void (*gem_free_object) (struct drm_gem_object *obj);
-
- /**
- * @gem_free_object_unlocked: deconstructor for drm_gem_objects
- *
- * This is for drivers which are not encumbered with dev->struct_mutex
- * legacy locking schemes. Use this hook instead of @gem_free_object.
- */
- void (*gem_free_object_unlocked) (struct drm_gem_object *obj);
-
- int (*gem_open_object) (struct drm_gem_object *, struct drm_file *);
- void (*gem_close_object) (struct drm_gem_object *, struct drm_file *);
-
- /**
- * Hook for allocating the GEM object struct, for use by core
- * helpers.
- */
- struct drm_gem_object *(*gem_create_object)(struct drm_device *dev,
- size_t size);
-
- /* prime: */
- /* export handle -> fd (see drm_gem_prime_handle_to_fd() helper) */
- int (*prime_handle_to_fd)(struct drm_device *dev, struct drm_file *file_priv,
- uint32_t handle, uint32_t flags, int *prime_fd);
- /* import fd -> handle (see drm_gem_prime_fd_to_handle() helper) */
- int (*prime_fd_to_handle)(struct drm_device *dev, struct drm_file *file_priv,
- int prime_fd, uint32_t *handle);
- /* export GEM -> dmabuf */
- struct dma_buf * (*gem_prime_export)(struct drm_device *dev,
- struct drm_gem_object *obj, int flags);
- /* import dmabuf -> GEM */
- struct drm_gem_object * (*gem_prime_import)(struct drm_device *dev,
- struct dma_buf *dma_buf);
- /* low-level interface used by drm_gem_prime_{import,export} */
- int (*gem_prime_pin)(struct drm_gem_object *obj);
- void (*gem_prime_unpin)(struct drm_gem_object *obj);
- struct reservation_object * (*gem_prime_res_obj)(
- struct drm_gem_object *obj);
- struct sg_table *(*gem_prime_get_sg_table)(struct drm_gem_object *obj);
- struct drm_gem_object *(*gem_prime_import_sg_table)(
- struct drm_device *dev,
- struct dma_buf_attachment *attach,
- struct sg_table *sgt);
- void *(*gem_prime_vmap)(struct drm_gem_object *obj);
- void (*gem_prime_vunmap)(struct drm_gem_object *obj, void *vaddr);
- int (*gem_prime_mmap)(struct drm_gem_object *obj,
- struct vm_area_struct *vma);
-
- /* vga arb irq handler */
- void (*vgaarb_irq)(struct drm_device *dev, bool state);
-
- /* dumb alloc support */
- int (*dumb_create)(struct drm_file *file_priv,
- struct drm_device *dev,
- struct drm_mode_create_dumb *args);
- int (*dumb_map_offset)(struct drm_file *file_priv,
- struct drm_device *dev, uint32_t handle,
- uint64_t *offset);
- int (*dumb_destroy)(struct drm_file *file_priv,
- struct drm_device *dev,
- uint32_t handle);
-
- /* Driver private ops for this object */
- const struct vm_operations_struct *gem_vm_ops;
-
- int major;
- int minor;
- int patchlevel;
- char *name;
- char *desc;
- char *date;
-
- u32 driver_features;
- int dev_priv_size;
- const struct drm_ioctl_desc *ioctls;
- int num_ioctls;
- const struct file_operations *fops;
-
- /* List of devices hanging off this driver with stealth attach. */
- struct list_head legacy_dev_list;
-};
-
enum drm_minor_type {
DRM_MINOR_PRIMARY,
DRM_MINOR_CONTROL,
@@ -941,8 +683,13 @@ static inline bool drm_is_primary_client(const struct drm_file *file_priv)
extern int drm_ioctl_permit(u32 flags, struct drm_file *file_priv);
extern long drm_ioctl(struct file *filp,
unsigned int cmd, unsigned long arg);
+#ifdef CONFIG_COMPAT
extern long drm_compat_ioctl(struct file *filp,
unsigned int cmd, unsigned long arg);
+#else
+/* Let drm_compat_ioctl be assigned to .compat_ioctl unconditionally */
+#define drm_compat_ioctl NULL
+#endif
extern bool drm_ioctl_flags(unsigned int nr, unsigned int *flags);
/* File Operations (drm_fops.c) */
@@ -980,15 +727,6 @@ void drm_clflush_virt_range(void *addr, unsigned long length);
* DMA quiscent + idle. DMA quiescent usually requires the hardware lock.
*/
-/* Modesetting support */
-extern void drm_vblank_pre_modeset(struct drm_device *dev, unsigned int pipe);
-extern void drm_vblank_post_modeset(struct drm_device *dev, unsigned int pipe);
-
-/* drm_drv.c */
-void drm_put_dev(struct drm_device *dev);
-void drm_unplug_dev(struct drm_device *dev);
-extern unsigned int drm_debug;
-
/* Debugfs support */
#if defined(CONFIG_DEBUG_FS)
extern int drm_debugfs_create_files(const struct drm_info_list *files,
@@ -1041,18 +779,6 @@ extern void drm_pci_free(struct drm_device *dev, struct drm_dma_handle * dmah);
extern void drm_sysfs_hotplug_event(struct drm_device *dev);
-struct drm_device *drm_dev_alloc(struct drm_driver *driver,
- struct device *parent);
-int drm_dev_init(struct drm_device *dev,
- struct drm_driver *driver,
- struct device *parent);
-void drm_dev_ref(struct drm_device *dev);
-void drm_dev_unref(struct drm_device *dev);
-int drm_dev_register(struct drm_device *dev, unsigned long flags);
-void drm_dev_unregister(struct drm_device *dev);
-
-struct drm_minor *drm_minor_acquire(unsigned int minor_id);
-void drm_minor_release(struct drm_minor *minor);
/*@}*/
diff --git a/include/drm/drm_atomic.h b/include/drm/drm_atomic.h
index fc8af53b18aa..c0eaec70a203 100644
--- a/include/drm/drm_atomic.h
+++ b/include/drm/drm_atomic.h
@@ -144,6 +144,7 @@ struct __drm_crtcs_state {
struct drm_crtc *ptr;
struct drm_crtc_state *state;
struct drm_crtc_commit *commit;
+ s64 __user *out_fence_ptr;
};
struct __drm_connnectors_state {
@@ -345,6 +346,8 @@ drm_atomic_set_crtc_for_plane(struct drm_plane_state *plane_state,
struct drm_crtc *crtc);
void drm_atomic_set_fb_for_plane(struct drm_plane_state *plane_state,
struct drm_framebuffer *fb);
+void drm_atomic_set_fence_for_plane(struct drm_plane_state *plane_state,
+ struct dma_fence *fence);
int __must_check
drm_atomic_set_crtc_for_connector(struct drm_connector_state *conn_state,
struct drm_crtc *crtc);
@@ -364,6 +367,13 @@ int __must_check drm_atomic_check_only(struct drm_atomic_state *state);
int __must_check drm_atomic_commit(struct drm_atomic_state *state);
int __must_check drm_atomic_nonblocking_commit(struct drm_atomic_state *state);
+void drm_state_dump(struct drm_device *dev, struct drm_printer *p);
+
+#ifdef CONFIG_DEBUG_FS
+struct drm_minor;
+int drm_atomic_debugfs_init(struct drm_minor *minor);
+#endif
+
#define for_each_connector_in_state(__state, connector, connector_state, __i) \
for ((__i) = 0; \
(__i) < (__state)->num_connector && \
diff --git a/include/drm/drm_connector.h b/include/drm/drm_connector.h
index ac9d7d8e0e43..34f9741ebb5b 100644
--- a/include/drm/drm_connector.h
+++ b/include/drm/drm_connector.h
@@ -37,6 +37,7 @@ struct drm_crtc;
struct drm_encoder;
struct drm_property;
struct drm_property_blob;
+struct drm_printer;
struct edid;
enum drm_connector_force {
@@ -481,6 +482,18 @@ struct drm_connector_funcs {
const struct drm_connector_state *state,
struct drm_property *property,
uint64_t *val);
+
+ /**
+ * @atomic_print_state:
+ *
+ * If driver subclasses struct &drm_connector_state, it should implement
+ * this optional hook for printing additional driver specific state.
+ *
+ * Do not call this directly, use drm_atomic_connector_print_state()
+ * instead.
+ */
+ void (*atomic_print_state)(struct drm_printer *p,
+ const struct drm_connector_state *state);
};
/* mode specified on the command line */
@@ -762,6 +775,30 @@ int drm_mode_connector_update_edid_property(struct drm_connector *connector,
const struct edid *edid);
/**
+ * struct drm_tile_group - Tile group metadata
+ * @refcount: reference count
+ * @dev: DRM device
+ * @id: tile group id exposed to userspace
+ * @group_data: Sink-private data identifying this group
+ *
+ * @group_data corresponds to displayid vend/prod/serial for external screens
+ * with an EDID.
+ */
+struct drm_tile_group {
+ struct kref refcount;
+ struct drm_device *dev;
+ int id;
+ u8 group_data[8];
+};
+
+struct drm_tile_group *drm_mode_create_tile_group(struct drm_device *dev,
+ char topology[8]);
+struct drm_tile_group *drm_mode_get_tile_group(struct drm_device *dev,
+ char topology[8]);
+void drm_mode_put_tile_group(struct drm_device *dev,
+ struct drm_tile_group *tg);
+
+/**
* drm_for_each_connector - iterate over all connectors
* @connector: the loop cursor
* @dev: the DRM device
diff --git a/include/drm/drm_crtc.h b/include/drm/drm_crtc.h
index fa1aa214c8ea..946672f97e1e 100644
--- a/include/drm/drm_crtc.h
+++ b/include/drm/drm_crtc.h
@@ -28,7 +28,6 @@
#include <linux/i2c.h>
#include <linux/spinlock.h>
#include <linux/types.h>
-#include <linux/idr.h>
#include <linux/fb.h>
#include <linux/hdmi.h>
#include <linux/media-bus-format.h>
@@ -48,11 +47,13 @@
#include <drm/drm_blend.h>
#include <drm/drm_color_mgmt.h>
#include <drm/drm_debugfs_crc.h>
+#include <drm/drm_mode_config.h>
struct drm_device;
struct drm_mode_set;
struct drm_file;
struct drm_clip_rect;
+struct drm_printer;
struct device_node;
struct dma_fence;
struct edid;
@@ -66,14 +67,6 @@ static inline uint64_t I642U64(int64_t val)
return (uint64_t)*((uint64_t *)&val);
}
-/* data corresponds to displayid vend/prod/serial */
-struct drm_tile_group {
- struct kref refcount;
- struct drm_device *dev;
- int id;
- u8 group_data[8];
-};
-
struct drm_crtc;
struct drm_encoder;
struct drm_pending_vblank_event;
@@ -594,6 +587,18 @@ struct drm_crtc_funcs {
*/
int (*set_crc_source)(struct drm_crtc *crtc, const char *source,
size_t *values_cnt);
+
+ /**
+ * @atomic_print_state:
+ *
+ * If driver subclasses struct &drm_crtc_state, it should implement
+ * this optional hook for printing additional driver specific state.
+ *
+ * Do not call this directly, use drm_atomic_crtc_print_state()
+ * instead.
+ */
+ void (*atomic_print_state)(struct drm_printer *p,
+ const struct drm_crtc_state *state);
};
/**
@@ -726,6 +731,35 @@ struct drm_crtc {
*/
struct drm_crtc_crc crc;
#endif
+
+ /**
+ * @fence_context:
+ *
+ * timeline context used for fence operations.
+ */
+ unsigned int fence_context;
+
+ /**
+ * @fence_lock:
+ *
+ * spinlock to protect the fences in the fence_context.
+ */
+
+ spinlock_t fence_lock;
+ /**
+ * @fence_seqno:
+ *
+ * Seqno variable used as monotonic counter for the fences
+ * created on the CRTC's timeline.
+ */
+ unsigned long fence_seqno;
+
+ /**
+ * @timeline_name:
+ *
+ * The name of the CRTC's fence timeline.
+ */
+ char timeline_name[32];
};
/**
@@ -755,626 +789,16 @@ struct drm_mode_set {
size_t num_connectors;
};
-/**
- * struct drm_mode_config_funcs - basic driver provided mode setting functions
- *
- * Some global (i.e. not per-CRTC, connector, etc) mode setting functions that
- * involve drivers.
- */
-struct drm_mode_config_funcs {
- /**
- * @fb_create:
- *
- * Create a new framebuffer object. The core does basic checks on the
- * requested metadata, but most of that is left to the driver. See
- * struct &drm_mode_fb_cmd2 for details.
- *
- * If the parameters are deemed valid and the backing storage objects in
- * the underlying memory manager all exist, then the driver allocates
- * a new &drm_framebuffer structure, subclassed to contain
- * driver-specific information (like the internal native buffer object
- * references). It also needs to fill out all relevant metadata, which
- * should be done by calling drm_helper_mode_fill_fb_struct().
- *
- * The initialization is finalized by calling drm_framebuffer_init(),
- * which registers the framebuffer and makes it accessible to other
- * threads.
- *
- * RETURNS:
- *
- * A new framebuffer with an initial reference count of 1 or a negative
- * error code encoded with ERR_PTR().
- */
- struct drm_framebuffer *(*fb_create)(struct drm_device *dev,
- struct drm_file *file_priv,
- const struct drm_mode_fb_cmd2 *mode_cmd);
-
- /**
- * @output_poll_changed:
- *
- * Callback used by helpers to inform the driver of output configuration
- * changes.
- *
- * Drivers implementing fbdev emulation with the helpers can call
- * drm_fb_helper_hotplug_changed from this hook to inform the fbdev
- * helper of output changes.
- *
- * FIXME:
- *
- * Except that there's no vtable for device-level helper callbacks
- * there's no reason this is a core function.
- */
- void (*output_poll_changed)(struct drm_device *dev);
-
- /**
- * @atomic_check:
- *
- * This is the only hook to validate an atomic modeset update. This
- * function must reject any modeset and state changes which the hardware
- * or driver doesn't support. This includes but is of course not limited
- * to:
- *
- * - Checking that the modes, framebuffers, scaling and placement
- * requirements and so on are within the limits of the hardware.
- *
- * - Checking that any hidden shared resources are not oversubscribed.
- * This can be shared PLLs, shared lanes, overall memory bandwidth,
- * display fifo space (where shared between planes or maybe even
- * CRTCs).
- *
- * - Checking that virtualized resources exported to userspace are not
- * oversubscribed. For various reasons it can make sense to expose
- * more planes, crtcs or encoders than which are physically there. One
- * example is dual-pipe operations (which generally should be hidden
- * from userspace if when lockstepped in hardware, exposed otherwise),
- * where a plane might need 1 hardware plane (if it's just on one
- * pipe), 2 hardware planes (when it spans both pipes) or maybe even
- * shared a hardware plane with a 2nd plane (if there's a compatible
- * plane requested on the area handled by the other pipe).
- *
- * - Check that any transitional state is possible and that if
- * requested, the update can indeed be done in the vblank period
- * without temporarily disabling some functions.
- *
- * - Check any other constraints the driver or hardware might have.
- *
- * - This callback also needs to correctly fill out the &drm_crtc_state
- * in this update to make sure that drm_atomic_crtc_needs_modeset()
- * reflects the nature of the possible update and returns true if and
- * only if the update cannot be applied without tearing within one
- * vblank on that CRTC. The core uses that information to reject
- * updates which require a full modeset (i.e. blanking the screen, or
- * at least pausing updates for a substantial amount of time) if
- * userspace has disallowed that in its request.
- *
- * - The driver also does not need to repeat basic input validation
- * like done for the corresponding legacy entry points. The core does
- * that before calling this hook.
- *
- * See the documentation of @atomic_commit for an exhaustive list of
- * error conditions which don't have to be checked at the
- * ->atomic_check() stage?
- *
- * See the documentation for struct &drm_atomic_state for how exactly
- * an atomic modeset update is described.
- *
- * Drivers using the atomic helpers can implement this hook using
- * drm_atomic_helper_check(), or one of the exported sub-functions of
- * it.
- *
- * RETURNS:
- *
- * 0 on success or one of the below negative error codes:
- *
- * - -EINVAL, if any of the above constraints are violated.
- *
- * - -EDEADLK, when returned from an attempt to acquire an additional
- * &drm_modeset_lock through drm_modeset_lock().
- *
- * - -ENOMEM, if allocating additional state sub-structures failed due
- * to lack of memory.
- *
- * - -EINTR, -EAGAIN or -ERESTARTSYS, if the IOCTL should be restarted.
- * This can either be due to a pending signal, or because the driver
- * needs to completely bail out to recover from an exceptional
- * situation like a GPU hang. From a userspace point all errors are
- * treated equally.
- */
- int (*atomic_check)(struct drm_device *dev,
- struct drm_atomic_state *state);
-
- /**
- * @atomic_commit:
- *
- * This is the only hook to commit an atomic modeset update. The core
- * guarantees that @atomic_check has been called successfully before
- * calling this function, and that nothing has been changed in the
- * interim.
- *
- * See the documentation for struct &drm_atomic_state for how exactly
- * an atomic modeset update is described.
- *
- * Drivers using the atomic helpers can implement this hook using
- * drm_atomic_helper_commit(), or one of the exported sub-functions of
- * it.
- *
- * Nonblocking commits (as indicated with the nonblock parameter) must
- * do any preparatory work which might result in an unsuccessful commit
- * in the context of this callback. The only exceptions are hardware
- * errors resulting in -EIO. But even in that case the driver must
- * ensure that the display pipe is at least running, to avoid
- * compositors crashing when pageflips don't work. Anything else,
- * specifically committing the update to the hardware, should be done
- * without blocking the caller. For updates which do not require a
- * modeset this must be guaranteed.
- *
- * The driver must wait for any pending rendering to the new
- * framebuffers to complete before executing the flip. It should also
- * wait for any pending rendering from other drivers if the underlying
- * buffer is a shared dma-buf. Nonblocking commits must not wait for
- * rendering in the context of this callback.
- *
- * An application can request to be notified when the atomic commit has
- * completed. These events are per-CRTC and can be distinguished by the
- * CRTC index supplied in &drm_event to userspace.
- *
- * The drm core will supply a struct &drm_event in the event
- * member of each CRTC's &drm_crtc_state structure. See the
- * documentation for &drm_crtc_state for more details about the precise
- * semantics of this event.
- *
- * NOTE:
- *
- * Drivers are not allowed to shut down any display pipe successfully
- * enabled through an atomic commit on their own. Doing so can result in
- * compositors crashing if a page flip is suddenly rejected because the
- * pipe is off.
- *
- * RETURNS:
- *
- * 0 on success or one of the below negative error codes:
- *
- * - -EBUSY, if a nonblocking updated is requested and there is
- * an earlier updated pending. Drivers are allowed to support a queue
- * of outstanding updates, but currently no driver supports that.
- * Note that drivers must wait for preceding updates to complete if a
- * synchronous update is requested, they are not allowed to fail the
- * commit in that case.
- *
- * - -ENOMEM, if the driver failed to allocate memory. Specifically
- * this can happen when trying to pin framebuffers, which must only
- * be done when committing the state.
- *
- * - -ENOSPC, as a refinement of the more generic -ENOMEM to indicate
- * that the driver has run out of vram, iommu space or similar GPU
- * address space needed for framebuffer.
- *
- * - -EIO, if the hardware completely died.
- *
- * - -EINTR, -EAGAIN or -ERESTARTSYS, if the IOCTL should be restarted.
- * This can either be due to a pending signal, or because the driver
- * needs to completely bail out to recover from an exceptional
- * situation like a GPU hang. From a userspace point of view all errors are
- * treated equally.
- *
- * This list is exhaustive. Specifically this hook is not allowed to
- * return -EINVAL (any invalid requests should be caught in
- * @atomic_check) or -EDEADLK (this function must not acquire
- * additional modeset locks).
- */
- int (*atomic_commit)(struct drm_device *dev,
- struct drm_atomic_state *state,
- bool nonblock);
-
- /**
- * @atomic_state_alloc:
- *
- * This optional hook can be used by drivers that want to subclass struct
- * &drm_atomic_state to be able to track their own driver-private global
- * state easily. If this hook is implemented, drivers must also
- * implement @atomic_state_clear and @atomic_state_free.
- *
- * RETURNS:
- *
- * A new &drm_atomic_state on success or NULL on failure.
- */
- struct drm_atomic_state *(*atomic_state_alloc)(struct drm_device *dev);
-
- /**
- * @atomic_state_clear:
- *
- * This hook must clear any driver private state duplicated into the
- * passed-in &drm_atomic_state. This hook is called when the caller
- * encountered a &drm_modeset_lock deadlock and needs to drop all
- * already acquired locks as part of the deadlock avoidance dance
- * implemented in drm_modeset_lock_backoff().
- *
- * Any duplicated state must be invalidated since a concurrent atomic
- * update might change it, and the drm atomic interfaces always apply
- * updates as relative changes to the current state.
- *
- * Drivers that implement this must call drm_atomic_state_default_clear()
- * to clear common state.
- */
- void (*atomic_state_clear)(struct drm_atomic_state *state);
-
- /**
- * @atomic_state_free:
- *
- * This hook needs driver private resources and the &drm_atomic_state
- * itself. Note that the core first calls drm_atomic_state_clear() to
- * avoid code duplicate between the clear and free hooks.
- *
- * Drivers that implement this must call drm_atomic_state_default_free()
- * to release common resources.
- */
- void (*atomic_state_free)(struct drm_atomic_state *state);
-};
-
-/**
- * struct drm_mode_config - Mode configuration control structure
- * @mutex: mutex protecting KMS related lists and structures
- * @connection_mutex: ww mutex protecting connector state and routing
- * @acquire_ctx: global implicit acquire context used by atomic drivers for
- * legacy IOCTLs
- * @fb_lock: mutex to protect fb state and lists
- * @num_fb: number of fbs available
- * @fb_list: list of framebuffers available
- * @num_encoder: number of encoders on this device
- * @encoder_list: list of encoder objects
- * @num_overlay_plane: number of overlay planes on this device
- * @num_total_plane: number of universal (i.e. with primary/curso) planes on this device
- * @plane_list: list of plane objects
- * @num_crtc: number of CRTCs on this device
- * @crtc_list: list of CRTC objects
- * @property_list: list of property objects
- * @min_width: minimum pixel width on this device
- * @min_height: minimum pixel height on this device
- * @max_width: maximum pixel width on this device
- * @max_height: maximum pixel height on this device
- * @funcs: core driver provided mode setting functions
- * @fb_base: base address of the framebuffer
- * @poll_enabled: track polling support for this device
- * @poll_running: track polling status for this device
- * @delayed_event: track delayed poll uevent deliver for this device
- * @output_poll_work: delayed work for polling in process context
- * @property_blob_list: list of all the blob property objects
- * @blob_lock: mutex for blob property allocation and management
- * @*_property: core property tracking
- * @preferred_depth: preferred RBG pixel depth, used by fb helpers
- * @prefer_shadow: hint to userspace to prefer shadow-fb rendering
- * @cursor_width: hint to userspace for max cursor width
- * @cursor_height: hint to userspace for max cursor height
- * @helper_private: mid-layer private data
- *
- * Core mode resource tracking structure. All CRTC, encoders, and connectors
- * enumerated by the driver are added here, as are global properties. Some
- * global restrictions are also here, e.g. dimension restrictions.
- */
-struct drm_mode_config {
- struct mutex mutex; /* protects configuration (mode lists etc.) */
- struct drm_modeset_lock connection_mutex; /* protects connector->encoder and encoder->crtc links */
- struct drm_modeset_acquire_ctx *acquire_ctx; /* for legacy _lock_all() / _unlock_all() */
-
- /**
- * @idr_mutex:
- *
- * Mutex for KMS ID allocation and management. Protects both @crtc_idr
- * and @tile_idr.
- */
- struct mutex idr_mutex;
-
- /**
- * @crtc_idr:
- *
- * Main KMS ID tracking object. Use this idr for all IDs, fb, crtc,
- * connector, modes - just makes life easier to have only one.
- */
- struct idr crtc_idr;
-
- /**
- * @tile_idr:
- *
- * Use this idr for allocating new IDs for tiled sinks like use in some
- * high-res DP MST screens.
- */
- struct idr tile_idr;
-
- struct mutex fb_lock; /* proctects global and per-file fb lists */
- int num_fb;
- struct list_head fb_list;
-
- /**
- * @num_connector: Number of connectors on this device.
- */
- int num_connector;
- /**
- * @connector_ida: ID allocator for connector indices.
- */
- struct ida connector_ida;
- /**
- * @connector_list: List of connector objects.
- */
- struct list_head connector_list;
- int num_encoder;
- struct list_head encoder_list;
-
- /*
- * Track # of overlay planes separately from # of total planes. By
- * default we only advertise overlay planes to userspace; if userspace
- * sets the "universal plane" capability bit, we'll go ahead and
- * expose all planes.
- */
- int num_overlay_plane;
- int num_total_plane;
- struct list_head plane_list;
-
- int num_crtc;
- struct list_head crtc_list;
-
- struct list_head property_list;
-
- int min_width, min_height;
- int max_width, max_height;
- const struct drm_mode_config_funcs *funcs;
- resource_size_t fb_base;
-
- /* output poll support */
- bool poll_enabled;
- bool poll_running;
- bool delayed_event;
- struct delayed_work output_poll_work;
-
- struct mutex blob_lock;
-
- /* pointers to standard properties */
- struct list_head property_blob_list;
- /**
- * @edid_property: Default connector property to hold the EDID of the
- * currently connected sink, if any.
- */
- struct drm_property *edid_property;
- /**
- * @dpms_property: Default connector property to control the
- * connector's DPMS state.
- */
- struct drm_property *dpms_property;
- /**
- * @path_property: Default connector property to hold the DP MST path
- * for the port.
- */
- struct drm_property *path_property;
- /**
- * @tile_property: Default connector property to store the tile
- * position of a tiled screen, for sinks which need to be driven with
- * multiple CRTCs.
- */
- struct drm_property *tile_property;
- /**
- * @plane_type_property: Default plane property to differentiate
- * CURSOR, PRIMARY and OVERLAY legacy uses of planes.
- */
- struct drm_property *plane_type_property;
- /**
- * @prop_src_x: Default atomic plane property for the plane source
- * position in the connected &drm_framebuffer.
- */
- struct drm_property *prop_src_x;
- /**
- * @prop_src_y: Default atomic plane property for the plane source
- * position in the connected &drm_framebuffer.
- */
- struct drm_property *prop_src_y;
- /**
- * @prop_src_w: Default atomic plane property for the plane source
- * position in the connected &drm_framebuffer.
- */
- struct drm_property *prop_src_w;
- /**
- * @prop_src_h: Default atomic plane property for the plane source
- * position in the connected &drm_framebuffer.
- */
- struct drm_property *prop_src_h;
- /**
- * @prop_crtc_x: Default atomic plane property for the plane destination
- * position in the &drm_crtc is is being shown on.
- */
- struct drm_property *prop_crtc_x;
- /**
- * @prop_crtc_y: Default atomic plane property for the plane destination
- * position in the &drm_crtc is is being shown on.
- */
- struct drm_property *prop_crtc_y;
- /**
- * @prop_crtc_w: Default atomic plane property for the plane destination
- * position in the &drm_crtc is is being shown on.
- */
- struct drm_property *prop_crtc_w;
- /**
- * @prop_crtc_h: Default atomic plane property for the plane destination
- * position in the &drm_crtc is is being shown on.
- */
- struct drm_property *prop_crtc_h;
- /**
- * @prop_fb_id: Default atomic plane property to specify the
- * &drm_framebuffer.
- */
- struct drm_property *prop_fb_id;
- /**
- * @prop_crtc_id: Default atomic plane property to specify the
- * &drm_crtc.
- */
- struct drm_property *prop_crtc_id;
- /**
- * @prop_active: Default atomic CRTC property to control the active
- * state, which is the simplified implementation for DPMS in atomic
- * drivers.
- */
- struct drm_property *prop_active;
- /**
- * @prop_mode_id: Default atomic CRTC property to set the mode for a
- * CRTC. A 0 mode implies that the CRTC is entirely disabled - all
- * connectors must be of and active must be set to disabled, too.
- */
- struct drm_property *prop_mode_id;
-
- /**
- * @dvi_i_subconnector_property: Optional DVI-I property to
- * differentiate between analog or digital mode.
- */
- struct drm_property *dvi_i_subconnector_property;
- /**
- * @dvi_i_select_subconnector_property: Optional DVI-I property to
- * select between analog or digital mode.
- */
- struct drm_property *dvi_i_select_subconnector_property;
-
- /**
- * @tv_subconnector_property: Optional TV property to differentiate
- * between different TV connector types.
- */
- struct drm_property *tv_subconnector_property;
- /**
- * @tv_select_subconnector_property: Optional TV property to select
- * between different TV connector types.
- */
- struct drm_property *tv_select_subconnector_property;
- /**
- * @tv_mode_property: Optional TV property to select
- * the output TV mode.
- */
- struct drm_property *tv_mode_property;
- /**
- * @tv_left_margin_property: Optional TV property to set the left
- * margin.
- */
- struct drm_property *tv_left_margin_property;
- /**
- * @tv_right_margin_property: Optional TV property to set the right
- * margin.
- */
- struct drm_property *tv_right_margin_property;
- /**
- * @tv_top_margin_property: Optional TV property to set the right
- * margin.
- */
- struct drm_property *tv_top_margin_property;
- /**
- * @tv_bottom_margin_property: Optional TV property to set the right
- * margin.
- */
- struct drm_property *tv_bottom_margin_property;
- /**
- * @tv_brightness_property: Optional TV property to set the
- * brightness.
- */
- struct drm_property *tv_brightness_property;
- /**
- * @tv_contrast_property: Optional TV property to set the
- * contrast.
- */
- struct drm_property *tv_contrast_property;
- /**
- * @tv_flicker_reduction_property: Optional TV property to control the
- * flicker reduction mode.
- */
- struct drm_property *tv_flicker_reduction_property;
- /**
- * @tv_overscan_property: Optional TV property to control the overscan
- * setting.
- */
- struct drm_property *tv_overscan_property;
- /**
- * @tv_saturation_property: Optional TV property to set the
- * saturation.
- */
- struct drm_property *tv_saturation_property;
- /**
- * @tv_hue_property: Optional TV property to set the hue.
- */
- struct drm_property *tv_hue_property;
-
- /**
- * @scaling_mode_property: Optional connector property to control the
- * upscaling, mostly used for built-in panels.
- */
- struct drm_property *scaling_mode_property;
- /**
- * @aspect_ratio_property: Optional connector property to control the
- * HDMI infoframe aspect ratio setting.
- */
- struct drm_property *aspect_ratio_property;
- /**
- * @degamma_lut_property: Optional CRTC property to set the LUT used to
- * convert the framebuffer's colors to linear gamma.
- */
- struct drm_property *degamma_lut_property;
- /**
- * @degamma_lut_size_property: Optional CRTC property for the size of
- * the degamma LUT as supported by the driver (read-only).
- */
- struct drm_property *degamma_lut_size_property;
- /**
- * @ctm_property: Optional CRTC property to set the
- * matrix used to convert colors after the lookup in the
- * degamma LUT.
- */
- struct drm_property *ctm_property;
- /**
- * @gamma_lut_property: Optional CRTC property to set the LUT used to
- * convert the colors, after the CTM matrix, to the gamma space of the
- * connected screen.
- */
- struct drm_property *gamma_lut_property;
- /**
- * @gamma_lut_size_property: Optional CRTC property for the size of the
- * gamma LUT as supported by the driver (read-only).
- */
- struct drm_property *gamma_lut_size_property;
-
- /**
- * @suggested_x_property: Optional connector property with a hint for
- * the position of the output on the host's screen.
- */
- struct drm_property *suggested_x_property;
- /**
- * @suggested_y_property: Optional connector property with a hint for
- * the position of the output on the host's screen.
- */
- struct drm_property *suggested_y_property;
-
- /* dumb ioctl parameters */
- uint32_t preferred_depth, prefer_shadow;
-
- /**
- * @async_page_flip: Does this device support async flips on the primary
- * plane?
- */
- bool async_page_flip;
-
- /**
- * @allow_fb_modifiers:
- *
- * Whether the driver supports fb modifiers in the ADDFB2.1 ioctl call.
- */
- bool allow_fb_modifiers;
-
- /* cursor size */
- uint32_t cursor_width, cursor_height;
-
- struct drm_mode_config_helper_funcs *helper_private;
-};
-
#define obj_to_crtc(x) container_of(x, struct drm_crtc, base)
-extern __printf(6, 7)
+__printf(6, 7)
int drm_crtc_init_with_planes(struct drm_device *dev,
struct drm_crtc *crtc,
struct drm_plane *primary,
struct drm_plane *cursor,
const struct drm_crtc_funcs *funcs,
const char *name, ...);
-extern void drm_crtc_cleanup(struct drm_crtc *crtc);
+void drm_crtc_cleanup(struct drm_crtc *crtc);
/**
* drm_crtc_index - find the index of a registered CRTC
@@ -1400,23 +824,12 @@ static inline uint32_t drm_crtc_mask(const struct drm_crtc *crtc)
return 1 << drm_crtc_index(crtc);
}
-extern void drm_crtc_get_hv_timing(const struct drm_display_mode *mode,
- int *hdisplay, int *vdisplay);
-extern int drm_crtc_force_disable(struct drm_crtc *crtc);
-extern int drm_crtc_force_disable_all(struct drm_device *dev);
-
-extern void drm_mode_config_init(struct drm_device *dev);
-extern void drm_mode_config_reset(struct drm_device *dev);
-extern void drm_mode_config_cleanup(struct drm_device *dev);
-
-extern int drm_mode_set_config_internal(struct drm_mode_set *set);
+void drm_crtc_get_hv_timing(const struct drm_display_mode *mode,
+ int *hdisplay, int *vdisplay);
+int drm_crtc_force_disable(struct drm_crtc *crtc);
+int drm_crtc_force_disable_all(struct drm_device *dev);
-extern struct drm_tile_group *drm_mode_create_tile_group(struct drm_device *dev,
- char topology[8]);
-extern struct drm_tile_group *drm_mode_get_tile_group(struct drm_device *dev,
- char topology[8]);
-extern void drm_mode_put_tile_group(struct drm_device *dev,
- struct drm_tile_group *tg);
+int drm_mode_set_config_internal(struct drm_mode_set *set);
/* Helpers */
static inline struct drm_crtc *drm_crtc_find(struct drm_device *dev,
diff --git a/include/drm/drm_drv.h b/include/drm/drm_drv.h
new file mode 100644
index 000000000000..aad8bbacd1f0
--- /dev/null
+++ b/include/drm/drm_drv.h
@@ -0,0 +1,430 @@
+/*
+ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
+ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
+ * Copyright (c) 2009-2010, Code Aurora Forum.
+ * Copyright 2016 Intel Corp.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef _DRM_DRV_H_
+#define _DRM_DRV_H_
+
+#include <linux/list.h>
+#include <linux/irqreturn.h>
+
+struct drm_device;
+struct drm_file;
+struct drm_gem_object;
+struct drm_master;
+struct drm_minor;
+struct dma_buf_attachment;
+struct drm_display_mode;
+struct drm_mode_create_dumb;
+
+/* driver capabilities and requirements mask */
+#define DRIVER_USE_AGP 0x1
+#define DRIVER_LEGACY 0x2
+#define DRIVER_PCI_DMA 0x8
+#define DRIVER_SG 0x10
+#define DRIVER_HAVE_DMA 0x20
+#define DRIVER_HAVE_IRQ 0x40
+#define DRIVER_IRQ_SHARED 0x80
+#define DRIVER_GEM 0x1000
+#define DRIVER_MODESET 0x2000
+#define DRIVER_PRIME 0x4000
+#define DRIVER_RENDER 0x8000
+#define DRIVER_ATOMIC 0x10000
+#define DRIVER_KMS_LEGACY_CONTEXT 0x20000
+
+/**
+ * struct drm_driver - DRM driver structure
+ *
+ * This structure represent the common code for a family of cards. There will
+ * one drm_device for each card present in this family. It contains lots of
+ * vfunc entries, and a pile of those probably should be moved to more
+ * appropriate places like &drm_mode_config_funcs or into a new operations
+ * structure for GEM drivers.
+ */
+struct drm_driver {
+ int (*load) (struct drm_device *, unsigned long flags);
+ int (*firstopen) (struct drm_device *);
+ int (*open) (struct drm_device *, struct drm_file *);
+ void (*preclose) (struct drm_device *, struct drm_file *file_priv);
+ void (*postclose) (struct drm_device *, struct drm_file *);
+ void (*lastclose) (struct drm_device *);
+ int (*unload) (struct drm_device *);
+ int (*dma_ioctl) (struct drm_device *dev, void *data, struct drm_file *file_priv);
+ int (*dma_quiescent) (struct drm_device *);
+ int (*context_dtor) (struct drm_device *dev, int context);
+ int (*set_busid)(struct drm_device *dev, struct drm_master *master);
+
+ /**
+ * @get_vblank_counter:
+ *
+ * Driver callback for fetching a raw hardware vblank counter for the
+ * CRTC specified with the pipe argument. If a device doesn't have a
+ * hardware counter, the driver can simply use
+ * drm_vblank_no_hw_counter() function. The DRM core will account for
+ * missed vblank events while interrupts where disabled based on system
+ * timestamps.
+ *
+ * Wraparound handling and loss of events due to modesetting is dealt
+ * with in the DRM core code, as long as drivers call
+ * drm_crtc_vblank_off() and drm_crtc_vblank_on() when disabling or
+ * enabling a CRTC.
+ *
+ * Returns:
+ *
+ * Raw vblank counter value.
+ */
+ u32 (*get_vblank_counter) (struct drm_device *dev, unsigned int pipe);
+
+ /**
+ * @enable_vblank:
+ *
+ * Enable vblank interrupts for the CRTC specified with the pipe
+ * argument.
+ *
+ * Returns:
+ *
+ * Zero on success, appropriate errno if the given @crtc's vblank
+ * interrupt cannot be enabled.
+ */
+ int (*enable_vblank) (struct drm_device *dev, unsigned int pipe);
+
+ /**
+ * @disable_vblank:
+ *
+ * Disable vblank interrupts for the CRTC specified with the pipe
+ * argument.
+ */
+ void (*disable_vblank) (struct drm_device *dev, unsigned int pipe);
+
+ /**
+ * @device_is_agp:
+ *
+ * Called by drm_device_is_agp(). Typically used to determine if a card
+ * is really attached to AGP or not.
+ *
+ * Returns:
+ *
+ * One of three values is returned depending on whether or not the
+ * card is absolutely not AGP (return of 0), absolutely is AGP
+ * (return of 1), or may or may not be AGP (return of 2).
+ */
+ int (*device_is_agp) (struct drm_device *dev);
+
+ /**
+ * @get_scanout_position:
+ *
+ * Called by vblank timestamping code.
+ *
+ * Returns the current display scanout position from a crtc, and an
+ * optional accurate ktime_get() timestamp of when position was
+ * measured. Note that this is a helper callback which is only used if a
+ * driver uses drm_calc_vbltimestamp_from_scanoutpos() for the
+ * @get_vblank_timestamp callback.
+ *
+ * Parameters:
+ *
+ * dev:
+ * DRM device.
+ * pipe:
+ * Id of the crtc to query.
+ * flags:
+ * Flags from the caller (DRM_CALLED_FROM_VBLIRQ or 0).
+ * vpos:
+ * Target location for current vertical scanout position.
+ * hpos:
+ * Target location for current horizontal scanout position.
+ * stime:
+ * Target location for timestamp taken immediately before
+ * scanout position query. Can be NULL to skip timestamp.
+ * etime:
+ * Target location for timestamp taken immediately after
+ * scanout position query. Can be NULL to skip timestamp.
+ * mode:
+ * Current display timings.
+ *
+ * Returns vpos as a positive number while in active scanout area.
+ * Returns vpos as a negative number inside vblank, counting the number
+ * of scanlines to go until end of vblank, e.g., -1 means "one scanline
+ * until start of active scanout / end of vblank."
+ *
+ * Returns:
+ *
+ * Flags, or'ed together as follows:
+ *
+ * DRM_SCANOUTPOS_VALID:
+ * Query successful.
+ * DRM_SCANOUTPOS_INVBL:
+ * Inside vblank.
+ * DRM_SCANOUTPOS_ACCURATE: Returned position is accurate. A lack of
+ * this flag means that returned position may be offset by a
+ * constant but unknown small number of scanlines wrt. real scanout
+ * position.
+ *
+ */
+ int (*get_scanout_position) (struct drm_device *dev, unsigned int pipe,
+ unsigned int flags, int *vpos, int *hpos,
+ ktime_t *stime, ktime_t *etime,
+ const struct drm_display_mode *mode);
+
+ /**
+ * @get_vblank_timestamp:
+ *
+ * Called by drm_get_last_vbltimestamp(). Should return a precise
+ * timestamp when the most recent VBLANK interval ended or will end.
+ *
+ * Specifically, the timestamp in @vblank_time should correspond as
+ * closely as possible to the time when the first video scanline of
+ * the video frame after the end of VBLANK will start scanning out,
+ * the time immediately after end of the VBLANK interval. If the
+ * @crtc is currently inside VBLANK, this will be a time in the future.
+ * If the @crtc is currently scanning out a frame, this will be the
+ * past start time of the current scanout. This is meant to adhere
+ * to the OpenML OML_sync_control extension specification.
+ *
+ * Paramters:
+ *
+ * dev:
+ * dev DRM device handle.
+ * pipe:
+ * crtc for which timestamp should be returned.
+ * max_error:
+ * Maximum allowable timestamp error in nanoseconds.
+ * Implementation should strive to provide timestamp
+ * with an error of at most max_error nanoseconds.
+ * Returns true upper bound on error for timestamp.
+ * vblank_time:
+ * Target location for returned vblank timestamp.
+ * flags:
+ * 0 = Defaults, no special treatment needed.
+ * DRM_CALLED_FROM_VBLIRQ = Function is called from vblank
+ * irq handler. Some drivers need to apply some workarounds
+ * for gpu-specific vblank irq quirks if flag is set.
+ *
+ * Returns:
+ *
+ * Zero if timestamping isn't supported in current display mode or a
+ * negative number on failure. A positive status code on success,
+ * which describes how the vblank_time timestamp was computed.
+ */
+ int (*get_vblank_timestamp) (struct drm_device *dev, unsigned int pipe,
+ int *max_error,
+ struct timeval *vblank_time,
+ unsigned flags);
+
+ /* these have to be filled in */
+
+ irqreturn_t(*irq_handler) (int irq, void *arg);
+ void (*irq_preinstall) (struct drm_device *dev);
+ int (*irq_postinstall) (struct drm_device *dev);
+ void (*irq_uninstall) (struct drm_device *dev);
+
+ /**
+ * @master_create:
+ *
+ * Called whenever a new master is created. Only used by vmwgfx.
+ */
+ int (*master_create)(struct drm_device *dev, struct drm_master *master);
+
+ /**
+ * @master_destroy:
+ *
+ * Called whenever a master is destroyed. Only used by vmwgfx.
+ */
+ void (*master_destroy)(struct drm_device *dev, struct drm_master *master);
+
+ /**
+ * @master_set:
+ *
+ * Called whenever the minor master is set. Only used by vmwgfx.
+ */
+ int (*master_set)(struct drm_device *dev, struct drm_file *file_priv,
+ bool from_open);
+ /**
+ * @master_drop:
+ *
+ * Called whenever the minor master is dropped. Only used by vmwgfx.
+ */
+ void (*master_drop)(struct drm_device *dev, struct drm_file *file_priv);
+
+ int (*debugfs_init)(struct drm_minor *minor);
+ void (*debugfs_cleanup)(struct drm_minor *minor);
+
+ /**
+ * @gem_free_object: deconstructor for drm_gem_objects
+ *
+ * This is deprecated and should not be used by new drivers. Use
+ * @gem_free_object_unlocked instead.
+ */
+ void (*gem_free_object) (struct drm_gem_object *obj);
+
+ /**
+ * @gem_free_object_unlocked: deconstructor for drm_gem_objects
+ *
+ * This is for drivers which are not encumbered with dev->struct_mutex
+ * legacy locking schemes. Use this hook instead of @gem_free_object.
+ */
+ void (*gem_free_object_unlocked) (struct drm_gem_object *obj);
+
+ int (*gem_open_object) (struct drm_gem_object *, struct drm_file *);
+ void (*gem_close_object) (struct drm_gem_object *, struct drm_file *);
+
+ /**
+ * Hook for allocating the GEM object struct, for use by core
+ * helpers.
+ */
+ struct drm_gem_object *(*gem_create_object)(struct drm_device *dev,
+ size_t size);
+
+ /* prime: */
+ /* export handle -> fd (see drm_gem_prime_handle_to_fd() helper) */
+ int (*prime_handle_to_fd)(struct drm_device *dev, struct drm_file *file_priv,
+ uint32_t handle, uint32_t flags, int *prime_fd);
+ /* import fd -> handle (see drm_gem_prime_fd_to_handle() helper) */
+ int (*prime_fd_to_handle)(struct drm_device *dev, struct drm_file *file_priv,
+ int prime_fd, uint32_t *handle);
+ /* export GEM -> dmabuf */
+ struct dma_buf * (*gem_prime_export)(struct drm_device *dev,
+ struct drm_gem_object *obj, int flags);
+ /* import dmabuf -> GEM */
+ struct drm_gem_object * (*gem_prime_import)(struct drm_device *dev,
+ struct dma_buf *dma_buf);
+ /* low-level interface used by drm_gem_prime_{import,export} */
+ int (*gem_prime_pin)(struct drm_gem_object *obj);
+ void (*gem_prime_unpin)(struct drm_gem_object *obj);
+ struct reservation_object * (*gem_prime_res_obj)(
+ struct drm_gem_object *obj);
+ struct sg_table *(*gem_prime_get_sg_table)(struct drm_gem_object *obj);
+ struct drm_gem_object *(*gem_prime_import_sg_table)(
+ struct drm_device *dev,
+ struct dma_buf_attachment *attach,
+ struct sg_table *sgt);
+ void *(*gem_prime_vmap)(struct drm_gem_object *obj);
+ void (*gem_prime_vunmap)(struct drm_gem_object *obj, void *vaddr);
+ int (*gem_prime_mmap)(struct drm_gem_object *obj,
+ struct vm_area_struct *vma);
+
+ /* vga arb irq handler */
+ void (*vgaarb_irq)(struct drm_device *dev, bool state);
+
+ /**
+ * @dumb_create:
+ *
+ * This creates a new dumb buffer in the driver's backing storage manager (GEM,
+ * TTM or something else entirely) and returns the resulting buffer handle. This
+ * handle can then be wrapped up into a framebuffer modeset object.
+ *
+ * Note that userspace is not allowed to use such objects for render
+ * acceleration - drivers must create their own private ioctls for such a use
+ * case.
+ *
+ * Width, height and depth are specified in the &drm_mode_create_dumb
+ * argument. The callback needs to fill the handle, pitch and size for
+ * the created buffer.
+ *
+ * Called by the user via ioctl.
+ *
+ * Returns:
+ *
+ * Zero on success, negative errno on failure.
+ */
+ int (*dumb_create)(struct drm_file *file_priv,
+ struct drm_device *dev,
+ struct drm_mode_create_dumb *args);
+ /**
+ * @dumb_map_offset:
+ *
+ * Allocate an offset in the drm device node's address space to be able to
+ * memory map a dumb buffer. GEM-based drivers must use
+ * drm_gem_create_mmap_offset() to implement this.
+ *
+ * Called by the user via ioctl.
+ *
+ * Returns:
+ *
+ * Zero on success, negative errno on failure.
+ */
+ int (*dumb_map_offset)(struct drm_file *file_priv,
+ struct drm_device *dev, uint32_t handle,
+ uint64_t *offset);
+ /**
+ * @dumb_destroy:
+ *
+ * This destroys the userspace handle for the given dumb backing storage buffer.
+ * Since buffer objects must be reference counted in the kernel a buffer object
+ * won't be immediately freed if a framebuffer modeset object still uses it.
+ *
+ * Called by the user via ioctl.
+ *
+ * Returns:
+ *
+ * Zero on success, negative errno on failure.
+ */
+ int (*dumb_destroy)(struct drm_file *file_priv,
+ struct drm_device *dev,
+ uint32_t handle);
+
+ /* Driver private ops for this object */
+ const struct vm_operations_struct *gem_vm_ops;
+
+ int major;
+ int minor;
+ int patchlevel;
+ char *name;
+ char *desc;
+ char *date;
+
+ u32 driver_features;
+ int dev_priv_size;
+ const struct drm_ioctl_desc *ioctls;
+ int num_ioctls;
+ const struct file_operations *fops;
+
+ /* List of devices hanging off this driver with stealth attach. */
+ struct list_head legacy_dev_list;
+};
+
+extern __printf(6, 7)
+void drm_dev_printk(const struct device *dev, const char *level,
+ unsigned int category, const char *function_name,
+ const char *prefix, const char *format, ...);
+extern __printf(3, 4)
+void drm_printk(const char *level, unsigned int category,
+ const char *format, ...);
+extern unsigned int drm_debug;
+
+int drm_dev_init(struct drm_device *dev,
+ struct drm_driver *driver,
+ struct device *parent);
+struct drm_device *drm_dev_alloc(struct drm_driver *driver,
+ struct device *parent);
+int drm_dev_register(struct drm_device *dev, unsigned long flags);
+void drm_dev_unregister(struct drm_device *dev);
+
+void drm_dev_ref(struct drm_device *dev);
+void drm_dev_unref(struct drm_device *dev);
+void drm_put_dev(struct drm_device *dev);
+void drm_unplug_dev(struct drm_device *dev);
+
+#endif
diff --git a/include/drm/drm_fb_cma_helper.h b/include/drm/drm_fb_cma_helper.h
index f313211f8ed5..3b00f6480b83 100644
--- a/include/drm/drm_fb_cma_helper.h
+++ b/include/drm/drm_fb_cma_helper.h
@@ -12,6 +12,8 @@ struct drm_fb_helper;
struct drm_device;
struct drm_file;
struct drm_mode_fb_cmd2;
+struct drm_plane;
+struct drm_plane_state;
struct drm_fbdev_cma *drm_fbdev_cma_init_with_funcs(struct drm_device *dev,
unsigned int preferred_bpp, unsigned int num_crtc,
@@ -41,6 +43,9 @@ struct drm_framebuffer *drm_fb_cma_create(struct drm_device *dev,
struct drm_gem_cma_object *drm_fb_cma_get_gem_obj(struct drm_framebuffer *fb,
unsigned int plane);
+int drm_fb_cma_prepare_fb(struct drm_plane *plane,
+ struct drm_plane_state *state);
+
#ifdef CONFIG_DEBUG_FS
struct seq_file;
diff --git a/include/drm/drm_fb_helper.h b/include/drm/drm_fb_helper.h
index ed8edfef75b2..975deedd593e 100644
--- a/include/drm/drm_fb_helper.h
+++ b/include/drm/drm_fb_helper.h
@@ -228,7 +228,9 @@ struct drm_fb_helper {
.fb_set_par = drm_fb_helper_set_par, \
.fb_setcmap = drm_fb_helper_setcmap, \
.fb_blank = drm_fb_helper_blank, \
- .fb_pan_display = drm_fb_helper_pan_display
+ .fb_pan_display = drm_fb_helper_pan_display, \
+ .fb_debug_enter = drm_fb_helper_debug_enter, \
+ .fb_debug_leave = drm_fb_helper_debug_leave
#ifdef CONFIG_DRM_FBDEV_EMULATION
void drm_fb_helper_prepare(struct drm_device *dev, struct drm_fb_helper *helper,
diff --git a/include/drm/drm_fourcc.h b/include/drm/drm_fourcc.h
index dc0aafab9ffd..fcc08da850c8 100644
--- a/include/drm/drm_fourcc.h
+++ b/include/drm/drm_fourcc.h
@@ -45,6 +45,14 @@ struct drm_format_info {
u8 vsub;
};
+/**
+ * struct drm_format_name_buf - name of a DRM format
+ * @str: string buffer containing the format name
+ */
+struct drm_format_name_buf {
+ char str[32];
+};
+
const struct drm_format_info *__drm_format_info(u32 format);
const struct drm_format_info *drm_format_info(u32 format);
uint32_t drm_mode_legacy_fb_format(uint32_t bpp, uint32_t depth);
@@ -54,6 +62,6 @@ int drm_format_horz_chroma_subsampling(uint32_t format);
int drm_format_vert_chroma_subsampling(uint32_t format);
int drm_format_plane_width(int width, uint32_t format, int plane);
int drm_format_plane_height(int height, uint32_t format, int plane);
-char *drm_get_format_name(uint32_t format) __malloc;
+const char *drm_get_format_name(uint32_t format, struct drm_format_name_buf *buf);
#endif /* __DRM_FOURCC_H__ */
diff --git a/include/drm/drm_irq.h b/include/drm/drm_irq.h
index 2401b14d301f..293d08caab60 100644
--- a/include/drm/drm_irq.h
+++ b/include/drm/drm_irq.h
@@ -130,42 +130,37 @@ struct drm_vblank_crtc {
bool enabled;
};
-extern int drm_irq_install(struct drm_device *dev, int irq);
-extern int drm_irq_uninstall(struct drm_device *dev);
+int drm_irq_install(struct drm_device *dev, int irq);
+int drm_irq_uninstall(struct drm_device *dev);
-extern int drm_vblank_init(struct drm_device *dev, unsigned int num_crtcs);
-extern int drm_wait_vblank(struct drm_device *dev, void *data,
- struct drm_file *filp);
-extern u32 drm_vblank_count(struct drm_device *dev, unsigned int pipe);
-extern u32 drm_crtc_vblank_count(struct drm_crtc *crtc);
-extern u32 drm_crtc_vblank_count_and_time(struct drm_crtc *crtc,
- struct timeval *vblanktime);
-extern void drm_crtc_send_vblank_event(struct drm_crtc *crtc,
- struct drm_pending_vblank_event *e);
-extern void drm_crtc_arm_vblank_event(struct drm_crtc *crtc,
- struct drm_pending_vblank_event *e);
-extern bool drm_handle_vblank(struct drm_device *dev, unsigned int pipe);
-extern bool drm_crtc_handle_vblank(struct drm_crtc *crtc);
-extern int drm_crtc_vblank_get(struct drm_crtc *crtc);
-extern void drm_crtc_vblank_put(struct drm_crtc *crtc);
-extern void drm_wait_one_vblank(struct drm_device *dev, unsigned int pipe);
-extern void drm_crtc_wait_one_vblank(struct drm_crtc *crtc);
-extern void drm_vblank_off(struct drm_device *dev, unsigned int pipe);
-extern void drm_vblank_on(struct drm_device *dev, unsigned int pipe);
-extern void drm_crtc_vblank_off(struct drm_crtc *crtc);
-extern void drm_crtc_vblank_reset(struct drm_crtc *crtc);
-extern void drm_crtc_vblank_on(struct drm_crtc *crtc);
-extern void drm_vblank_cleanup(struct drm_device *dev);
-extern u32 drm_accurate_vblank_count(struct drm_crtc *crtc);
-extern u32 drm_vblank_no_hw_counter(struct drm_device *dev, unsigned int pipe);
+int drm_vblank_init(struct drm_device *dev, unsigned int num_crtcs);
+u32 drm_crtc_vblank_count(struct drm_crtc *crtc);
+u32 drm_crtc_vblank_count_and_time(struct drm_crtc *crtc,
+ struct timeval *vblanktime);
+void drm_crtc_send_vblank_event(struct drm_crtc *crtc,
+ struct drm_pending_vblank_event *e);
+void drm_crtc_arm_vblank_event(struct drm_crtc *crtc,
+ struct drm_pending_vblank_event *e);
+bool drm_handle_vblank(struct drm_device *dev, unsigned int pipe);
+bool drm_crtc_handle_vblank(struct drm_crtc *crtc);
+int drm_crtc_vblank_get(struct drm_crtc *crtc);
+void drm_crtc_vblank_put(struct drm_crtc *crtc);
+void drm_wait_one_vblank(struct drm_device *dev, unsigned int pipe);
+void drm_crtc_wait_one_vblank(struct drm_crtc *crtc);
+void drm_crtc_vblank_off(struct drm_crtc *crtc);
+void drm_crtc_vblank_reset(struct drm_crtc *crtc);
+void drm_crtc_vblank_on(struct drm_crtc *crtc);
+void drm_vblank_cleanup(struct drm_device *dev);
+u32 drm_accurate_vblank_count(struct drm_crtc *crtc);
+u32 drm_vblank_no_hw_counter(struct drm_device *dev, unsigned int pipe);
-extern int drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev,
- unsigned int pipe, int *max_error,
- struct timeval *vblank_time,
- unsigned flags,
- const struct drm_display_mode *mode);
-extern void drm_calc_timestamping_constants(struct drm_crtc *crtc,
- const struct drm_display_mode *mode);
+int drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev,
+ unsigned int pipe, int *max_error,
+ struct timeval *vblank_time,
+ unsigned flags,
+ const struct drm_display_mode *mode);
+void drm_calc_timestamping_constants(struct drm_crtc *crtc,
+ const struct drm_display_mode *mode);
/**
* drm_crtc_vblank_waitqueue - get vblank waitqueue for the CRTC
diff --git a/include/drm/drm_mm.h b/include/drm/drm_mm.h
index 205ddcf6d55d..41ddafe92b2f 100644
--- a/include/drm/drm_mm.h
+++ b/include/drm/drm_mm.h
@@ -44,6 +44,9 @@
#ifdef CONFIG_DEBUG_FS
#include <linux/seq_file.h>
#endif
+#ifdef CONFIG_DRM_DEBUG_MM
+#include <linux/stackdepot.h>
+#endif
enum drm_mm_search_flags {
DRM_MM_SEARCH_DEFAULT = 0,
@@ -74,6 +77,9 @@ struct drm_mm_node {
u64 size;
u64 __subtree_last;
struct drm_mm *mm;
+#ifdef CONFIG_DRM_DEBUG_MM
+ depot_stack_handle_t stack;
+#endif
};
struct drm_mm {
diff --git a/include/drm/drm_mode_config.h b/include/drm/drm_mode_config.h
new file mode 100644
index 000000000000..bf9991b20611
--- /dev/null
+++ b/include/drm/drm_mode_config.h
@@ -0,0 +1,663 @@
+/*
+ * Copyright (c) 2016 Intel Corporation
+ *
+ * Permission to use, copy, modify, distribute, and sell this software and its
+ * documentation for any purpose is hereby granted without fee, provided that
+ * the above copyright notice appear in all copies and that both that copyright
+ * notice and this permission notice appear in supporting documentation, and
+ * that the name of the copyright holders not be used in advertising or
+ * publicity pertaining to distribution of the software without specific,
+ * written prior permission. The copyright holders make no representations
+ * about the suitability of this software for any purpose. It is provided "as
+ * is" without express or implied warranty.
+ *
+ * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
+ * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
+ * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
+ * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
+ * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
+ * OF THIS SOFTWARE.
+ */
+
+#ifndef __DRM_MODE_CONFIG_H__
+#define __DRM_MODE_CONFIG_H__
+
+#include <linux/mutex.h>
+#include <linux/types.h>
+#include <linux/idr.h>
+#include <linux/workqueue.h>
+
+#include <drm/drm_modeset_lock.h>
+
+struct drm_file;
+struct drm_device;
+struct drm_atomic_state;
+struct drm_mode_fb_cmd2;
+
+/**
+ * struct drm_mode_config_funcs - basic driver provided mode setting functions
+ *
+ * Some global (i.e. not per-CRTC, connector, etc) mode setting functions that
+ * involve drivers.
+ */
+struct drm_mode_config_funcs {
+ /**
+ * @fb_create:
+ *
+ * Create a new framebuffer object. The core does basic checks on the
+ * requested metadata, but most of that is left to the driver. See
+ * struct &drm_mode_fb_cmd2 for details.
+ *
+ * If the parameters are deemed valid and the backing storage objects in
+ * the underlying memory manager all exist, then the driver allocates
+ * a new &drm_framebuffer structure, subclassed to contain
+ * driver-specific information (like the internal native buffer object
+ * references). It also needs to fill out all relevant metadata, which
+ * should be done by calling drm_helper_mode_fill_fb_struct().
+ *
+ * The initialization is finalized by calling drm_framebuffer_init(),
+ * which registers the framebuffer and makes it accessible to other
+ * threads.
+ *
+ * RETURNS:
+ *
+ * A new framebuffer with an initial reference count of 1 or a negative
+ * error code encoded with ERR_PTR().
+ */
+ struct drm_framebuffer *(*fb_create)(struct drm_device *dev,
+ struct drm_file *file_priv,
+ const struct drm_mode_fb_cmd2 *mode_cmd);
+
+ /**
+ * @output_poll_changed:
+ *
+ * Callback used by helpers to inform the driver of output configuration
+ * changes.
+ *
+ * Drivers implementing fbdev emulation with the helpers can call
+ * drm_fb_helper_hotplug_changed from this hook to inform the fbdev
+ * helper of output changes.
+ *
+ * FIXME:
+ *
+ * Except that there's no vtable for device-level helper callbacks
+ * there's no reason this is a core function.
+ */
+ void (*output_poll_changed)(struct drm_device *dev);
+
+ /**
+ * @atomic_check:
+ *
+ * This is the only hook to validate an atomic modeset update. This
+ * function must reject any modeset and state changes which the hardware
+ * or driver doesn't support. This includes but is of course not limited
+ * to:
+ *
+ * - Checking that the modes, framebuffers, scaling and placement
+ * requirements and so on are within the limits of the hardware.
+ *
+ * - Checking that any hidden shared resources are not oversubscribed.
+ * This can be shared PLLs, shared lanes, overall memory bandwidth,
+ * display fifo space (where shared between planes or maybe even
+ * CRTCs).
+ *
+ * - Checking that virtualized resources exported to userspace are not
+ * oversubscribed. For various reasons it can make sense to expose
+ * more planes, crtcs or encoders than which are physically there. One
+ * example is dual-pipe operations (which generally should be hidden
+ * from userspace if when lockstepped in hardware, exposed otherwise),
+ * where a plane might need 1 hardware plane (if it's just on one
+ * pipe), 2 hardware planes (when it spans both pipes) or maybe even
+ * shared a hardware plane with a 2nd plane (if there's a compatible
+ * plane requested on the area handled by the other pipe).
+ *
+ * - Check that any transitional state is possible and that if
+ * requested, the update can indeed be done in the vblank period
+ * without temporarily disabling some functions.
+ *
+ * - Check any other constraints the driver or hardware might have.
+ *
+ * - This callback also needs to correctly fill out the &drm_crtc_state
+ * in this update to make sure that drm_atomic_crtc_needs_modeset()
+ * reflects the nature of the possible update and returns true if and
+ * only if the update cannot be applied without tearing within one
+ * vblank on that CRTC. The core uses that information to reject
+ * updates which require a full modeset (i.e. blanking the screen, or
+ * at least pausing updates for a substantial amount of time) if
+ * userspace has disallowed that in its request.
+ *
+ * - The driver also does not need to repeat basic input validation
+ * like done for the corresponding legacy entry points. The core does
+ * that before calling this hook.
+ *
+ * See the documentation of @atomic_commit for an exhaustive list of
+ * error conditions which don't have to be checked at the
+ * ->atomic_check() stage?
+ *
+ * See the documentation for struct &drm_atomic_state for how exactly
+ * an atomic modeset update is described.
+ *
+ * Drivers using the atomic helpers can implement this hook using
+ * drm_atomic_helper_check(), or one of the exported sub-functions of
+ * it.
+ *
+ * RETURNS:
+ *
+ * 0 on success or one of the below negative error codes:
+ *
+ * - -EINVAL, if any of the above constraints are violated.
+ *
+ * - -EDEADLK, when returned from an attempt to acquire an additional
+ * &drm_modeset_lock through drm_modeset_lock().
+ *
+ * - -ENOMEM, if allocating additional state sub-structures failed due
+ * to lack of memory.
+ *
+ * - -EINTR, -EAGAIN or -ERESTARTSYS, if the IOCTL should be restarted.
+ * This can either be due to a pending signal, or because the driver
+ * needs to completely bail out to recover from an exceptional
+ * situation like a GPU hang. From a userspace point all errors are
+ * treated equally.
+ */
+ int (*atomic_check)(struct drm_device *dev,
+ struct drm_atomic_state *state);
+
+ /**
+ * @atomic_commit:
+ *
+ * This is the only hook to commit an atomic modeset update. The core
+ * guarantees that @atomic_check has been called successfully before
+ * calling this function, and that nothing has been changed in the
+ * interim.
+ *
+ * See the documentation for struct &drm_atomic_state for how exactly
+ * an atomic modeset update is described.
+ *
+ * Drivers using the atomic helpers can implement this hook using
+ * drm_atomic_helper_commit(), or one of the exported sub-functions of
+ * it.
+ *
+ * Nonblocking commits (as indicated with the nonblock parameter) must
+ * do any preparatory work which might result in an unsuccessful commit
+ * in the context of this callback. The only exceptions are hardware
+ * errors resulting in -EIO. But even in that case the driver must
+ * ensure that the display pipe is at least running, to avoid
+ * compositors crashing when pageflips don't work. Anything else,
+ * specifically committing the update to the hardware, should be done
+ * without blocking the caller. For updates which do not require a
+ * modeset this must be guaranteed.
+ *
+ * The driver must wait for any pending rendering to the new
+ * framebuffers to complete before executing the flip. It should also
+ * wait for any pending rendering from other drivers if the underlying
+ * buffer is a shared dma-buf. Nonblocking commits must not wait for
+ * rendering in the context of this callback.
+ *
+ * An application can request to be notified when the atomic commit has
+ * completed. These events are per-CRTC and can be distinguished by the
+ * CRTC index supplied in &drm_event to userspace.
+ *
+ * The drm core will supply a struct &drm_event in the event
+ * member of each CRTC's &drm_crtc_state structure. See the
+ * documentation for &drm_crtc_state for more details about the precise
+ * semantics of this event.
+ *
+ * NOTE:
+ *
+ * Drivers are not allowed to shut down any display pipe successfully
+ * enabled through an atomic commit on their own. Doing so can result in
+ * compositors crashing if a page flip is suddenly rejected because the
+ * pipe is off.
+ *
+ * RETURNS:
+ *
+ * 0 on success or one of the below negative error codes:
+ *
+ * - -EBUSY, if a nonblocking updated is requested and there is
+ * an earlier updated pending. Drivers are allowed to support a queue
+ * of outstanding updates, but currently no driver supports that.
+ * Note that drivers must wait for preceding updates to complete if a
+ * synchronous update is requested, they are not allowed to fail the
+ * commit in that case.
+ *
+ * - -ENOMEM, if the driver failed to allocate memory. Specifically
+ * this can happen when trying to pin framebuffers, which must only
+ * be done when committing the state.
+ *
+ * - -ENOSPC, as a refinement of the more generic -ENOMEM to indicate
+ * that the driver has run out of vram, iommu space or similar GPU
+ * address space needed for framebuffer.
+ *
+ * - -EIO, if the hardware completely died.
+ *
+ * - -EINTR, -EAGAIN or -ERESTARTSYS, if the IOCTL should be restarted.
+ * This can either be due to a pending signal, or because the driver
+ * needs to completely bail out to recover from an exceptional
+ * situation like a GPU hang. From a userspace point of view all errors are
+ * treated equally.
+ *
+ * This list is exhaustive. Specifically this hook is not allowed to
+ * return -EINVAL (any invalid requests should be caught in
+ * @atomic_check) or -EDEADLK (this function must not acquire
+ * additional modeset locks).
+ */
+ int (*atomic_commit)(struct drm_device *dev,
+ struct drm_atomic_state *state,
+ bool nonblock);
+
+ /**
+ * @atomic_state_alloc:
+ *
+ * This optional hook can be used by drivers that want to subclass struct
+ * &drm_atomic_state to be able to track their own driver-private global
+ * state easily. If this hook is implemented, drivers must also
+ * implement @atomic_state_clear and @atomic_state_free.
+ *
+ * RETURNS:
+ *
+ * A new &drm_atomic_state on success or NULL on failure.
+ */
+ struct drm_atomic_state *(*atomic_state_alloc)(struct drm_device *dev);
+
+ /**
+ * @atomic_state_clear:
+ *
+ * This hook must clear any driver private state duplicated into the
+ * passed-in &drm_atomic_state. This hook is called when the caller
+ * encountered a &drm_modeset_lock deadlock and needs to drop all
+ * already acquired locks as part of the deadlock avoidance dance
+ * implemented in drm_modeset_lock_backoff().
+ *
+ * Any duplicated state must be invalidated since a concurrent atomic
+ * update might change it, and the drm atomic interfaces always apply
+ * updates as relative changes to the current state.
+ *
+ * Drivers that implement this must call drm_atomic_state_default_clear()
+ * to clear common state.
+ */
+ void (*atomic_state_clear)(struct drm_atomic_state *state);
+
+ /**
+ * @atomic_state_free:
+ *
+ * This hook needs driver private resources and the &drm_atomic_state
+ * itself. Note that the core first calls drm_atomic_state_clear() to
+ * avoid code duplicate between the clear and free hooks.
+ *
+ * Drivers that implement this must call drm_atomic_state_default_free()
+ * to release common resources.
+ */
+ void (*atomic_state_free)(struct drm_atomic_state *state);
+};
+
+/**
+ * struct drm_mode_config - Mode configuration control structure
+ * @mutex: mutex protecting KMS related lists and structures
+ * @connection_mutex: ww mutex protecting connector state and routing
+ * @acquire_ctx: global implicit acquire context used by atomic drivers for
+ * legacy IOCTLs
+ * @fb_lock: mutex to protect fb state and lists
+ * @num_fb: number of fbs available
+ * @fb_list: list of framebuffers available
+ * @num_encoder: number of encoders on this device
+ * @encoder_list: list of encoder objects
+ * @num_overlay_plane: number of overlay planes on this device
+ * @num_total_plane: number of universal (i.e. with primary/curso) planes on this device
+ * @plane_list: list of plane objects
+ * @num_crtc: number of CRTCs on this device
+ * @crtc_list: list of CRTC objects
+ * @property_list: list of property objects
+ * @min_width: minimum pixel width on this device
+ * @min_height: minimum pixel height on this device
+ * @max_width: maximum pixel width on this device
+ * @max_height: maximum pixel height on this device
+ * @funcs: core driver provided mode setting functions
+ * @fb_base: base address of the framebuffer
+ * @poll_enabled: track polling support for this device
+ * @poll_running: track polling status for this device
+ * @delayed_event: track delayed poll uevent deliver for this device
+ * @output_poll_work: delayed work for polling in process context
+ * @property_blob_list: list of all the blob property objects
+ * @blob_lock: mutex for blob property allocation and management
+ * @*_property: core property tracking
+ * @preferred_depth: preferred RBG pixel depth, used by fb helpers
+ * @prefer_shadow: hint to userspace to prefer shadow-fb rendering
+ * @cursor_width: hint to userspace for max cursor width
+ * @cursor_height: hint to userspace for max cursor height
+ * @helper_private: mid-layer private data
+ *
+ * Core mode resource tracking structure. All CRTC, encoders, and connectors
+ * enumerated by the driver are added here, as are global properties. Some
+ * global restrictions are also here, e.g. dimension restrictions.
+ */
+struct drm_mode_config {
+ struct mutex mutex; /* protects configuration (mode lists etc.) */
+ struct drm_modeset_lock connection_mutex; /* protects connector->encoder and encoder->crtc links */
+ struct drm_modeset_acquire_ctx *acquire_ctx; /* for legacy _lock_all() / _unlock_all() */
+
+ /**
+ * @idr_mutex:
+ *
+ * Mutex for KMS ID allocation and management. Protects both @crtc_idr
+ * and @tile_idr.
+ */
+ struct mutex idr_mutex;
+
+ /**
+ * @crtc_idr:
+ *
+ * Main KMS ID tracking object. Use this idr for all IDs, fb, crtc,
+ * connector, modes - just makes life easier to have only one.
+ */
+ struct idr crtc_idr;
+
+ /**
+ * @tile_idr:
+ *
+ * Use this idr for allocating new IDs for tiled sinks like use in some
+ * high-res DP MST screens.
+ */
+ struct idr tile_idr;
+
+ struct mutex fb_lock; /* proctects global and per-file fb lists */
+ int num_fb;
+ struct list_head fb_list;
+
+ /**
+ * @num_connector: Number of connectors on this device.
+ */
+ int num_connector;
+ /**
+ * @connector_ida: ID allocator for connector indices.
+ */
+ struct ida connector_ida;
+ /**
+ * @connector_list: List of connector objects.
+ */
+ struct list_head connector_list;
+ int num_encoder;
+ struct list_head encoder_list;
+
+ /*
+ * Track # of overlay planes separately from # of total planes. By
+ * default we only advertise overlay planes to userspace; if userspace
+ * sets the "universal plane" capability bit, we'll go ahead and
+ * expose all planes.
+ */
+ int num_overlay_plane;
+ int num_total_plane;
+ struct list_head plane_list;
+
+ int num_crtc;
+ struct list_head crtc_list;
+
+ struct list_head property_list;
+
+ int min_width, min_height;
+ int max_width, max_height;
+ const struct drm_mode_config_funcs *funcs;
+ resource_size_t fb_base;
+
+ /* output poll support */
+ bool poll_enabled;
+ bool poll_running;
+ bool delayed_event;
+ struct delayed_work output_poll_work;
+
+ struct mutex blob_lock;
+
+ /* pointers to standard properties */
+ struct list_head property_blob_list;
+ /**
+ * @edid_property: Default connector property to hold the EDID of the
+ * currently connected sink, if any.
+ */
+ struct drm_property *edid_property;
+ /**
+ * @dpms_property: Default connector property to control the
+ * connector's DPMS state.
+ */
+ struct drm_property *dpms_property;
+ /**
+ * @path_property: Default connector property to hold the DP MST path
+ * for the port.
+ */
+ struct drm_property *path_property;
+ /**
+ * @tile_property: Default connector property to store the tile
+ * position of a tiled screen, for sinks which need to be driven with
+ * multiple CRTCs.
+ */
+ struct drm_property *tile_property;
+ /**
+ * @plane_type_property: Default plane property to differentiate
+ * CURSOR, PRIMARY and OVERLAY legacy uses of planes.
+ */
+ struct drm_property *plane_type_property;
+ /**
+ * @prop_src_x: Default atomic plane property for the plane source
+ * position in the connected &drm_framebuffer.
+ */
+ struct drm_property *prop_src_x;
+ /**
+ * @prop_src_y: Default atomic plane property for the plane source
+ * position in the connected &drm_framebuffer.
+ */
+ struct drm_property *prop_src_y;
+ /**
+ * @prop_src_w: Default atomic plane property for the plane source
+ * position in the connected &drm_framebuffer.
+ */
+ struct drm_property *prop_src_w;
+ /**
+ * @prop_src_h: Default atomic plane property for the plane source
+ * position in the connected &drm_framebuffer.
+ */
+ struct drm_property *prop_src_h;
+ /**
+ * @prop_crtc_x: Default atomic plane property for the plane destination
+ * position in the &drm_crtc is is being shown on.
+ */
+ struct drm_property *prop_crtc_x;
+ /**
+ * @prop_crtc_y: Default atomic plane property for the plane destination
+ * position in the &drm_crtc is is being shown on.
+ */
+ struct drm_property *prop_crtc_y;
+ /**
+ * @prop_crtc_w: Default atomic plane property for the plane destination
+ * position in the &drm_crtc is is being shown on.
+ */
+ struct drm_property *prop_crtc_w;
+ /**
+ * @prop_crtc_h: Default atomic plane property for the plane destination
+ * position in the &drm_crtc is is being shown on.
+ */
+ struct drm_property *prop_crtc_h;
+ /**
+ * @prop_fb_id: Default atomic plane property to specify the
+ * &drm_framebuffer.
+ */
+ struct drm_property *prop_fb_id;
+ /**
+ * @prop_in_fence_fd: Sync File fd representing the incoming fences
+ * for a Plane.
+ */
+ struct drm_property *prop_in_fence_fd;
+ /**
+ * @prop_out_fence_ptr: Sync File fd pointer representing the
+ * outgoing fences for a CRTC. Userspace should provide a pointer to a
+ * value of type s64, and then cast that pointer to u64.
+ */
+ struct drm_property *prop_out_fence_ptr;
+ /**
+ * @prop_crtc_id: Default atomic plane property to specify the
+ * &drm_crtc.
+ */
+ struct drm_property *prop_crtc_id;
+ /**
+ * @prop_active: Default atomic CRTC property to control the active
+ * state, which is the simplified implementation for DPMS in atomic
+ * drivers.
+ */
+ struct drm_property *prop_active;
+ /**
+ * @prop_mode_id: Default atomic CRTC property to set the mode for a
+ * CRTC. A 0 mode implies that the CRTC is entirely disabled - all
+ * connectors must be of and active must be set to disabled, too.
+ */
+ struct drm_property *prop_mode_id;
+
+ /**
+ * @dvi_i_subconnector_property: Optional DVI-I property to
+ * differentiate between analog or digital mode.
+ */
+ struct drm_property *dvi_i_subconnector_property;
+ /**
+ * @dvi_i_select_subconnector_property: Optional DVI-I property to
+ * select between analog or digital mode.
+ */
+ struct drm_property *dvi_i_select_subconnector_property;
+
+ /**
+ * @tv_subconnector_property: Optional TV property to differentiate
+ * between different TV connector types.
+ */
+ struct drm_property *tv_subconnector_property;
+ /**
+ * @tv_select_subconnector_property: Optional TV property to select
+ * between different TV connector types.
+ */
+ struct drm_property *tv_select_subconnector_property;
+ /**
+ * @tv_mode_property: Optional TV property to select
+ * the output TV mode.
+ */
+ struct drm_property *tv_mode_property;
+ /**
+ * @tv_left_margin_property: Optional TV property to set the left
+ * margin.
+ */
+ struct drm_property *tv_left_margin_property;
+ /**
+ * @tv_right_margin_property: Optional TV property to set the right
+ * margin.
+ */
+ struct drm_property *tv_right_margin_property;
+ /**
+ * @tv_top_margin_property: Optional TV property to set the right
+ * margin.
+ */
+ struct drm_property *tv_top_margin_property;
+ /**
+ * @tv_bottom_margin_property: Optional TV property to set the right
+ * margin.
+ */
+ struct drm_property *tv_bottom_margin_property;
+ /**
+ * @tv_brightness_property: Optional TV property to set the
+ * brightness.
+ */
+ struct drm_property *tv_brightness_property;
+ /**
+ * @tv_contrast_property: Optional TV property to set the
+ * contrast.
+ */
+ struct drm_property *tv_contrast_property;
+ /**
+ * @tv_flicker_reduction_property: Optional TV property to control the
+ * flicker reduction mode.
+ */
+ struct drm_property *tv_flicker_reduction_property;
+ /**
+ * @tv_overscan_property: Optional TV property to control the overscan
+ * setting.
+ */
+ struct drm_property *tv_overscan_property;
+ /**
+ * @tv_saturation_property: Optional TV property to set the
+ * saturation.
+ */
+ struct drm_property *tv_saturation_property;
+ /**
+ * @tv_hue_property: Optional TV property to set the hue.
+ */
+ struct drm_property *tv_hue_property;
+
+ /**
+ * @scaling_mode_property: Optional connector property to control the
+ * upscaling, mostly used for built-in panels.
+ */
+ struct drm_property *scaling_mode_property;
+ /**
+ * @aspect_ratio_property: Optional connector property to control the
+ * HDMI infoframe aspect ratio setting.
+ */
+ struct drm_property *aspect_ratio_property;
+ /**
+ * @degamma_lut_property: Optional CRTC property to set the LUT used to
+ * convert the framebuffer's colors to linear gamma.
+ */
+ struct drm_property *degamma_lut_property;
+ /**
+ * @degamma_lut_size_property: Optional CRTC property for the size of
+ * the degamma LUT as supported by the driver (read-only).
+ */
+ struct drm_property *degamma_lut_size_property;
+ /**
+ * @ctm_property: Optional CRTC property to set the
+ * matrix used to convert colors after the lookup in the
+ * degamma LUT.
+ */
+ struct drm_property *ctm_property;
+ /**
+ * @gamma_lut_property: Optional CRTC property to set the LUT used to
+ * convert the colors, after the CTM matrix, to the gamma space of the
+ * connected screen.
+ */
+ struct drm_property *gamma_lut_property;
+ /**
+ * @gamma_lut_size_property: Optional CRTC property for the size of the
+ * gamma LUT as supported by the driver (read-only).
+ */
+ struct drm_property *gamma_lut_size_property;
+
+ /**
+ * @suggested_x_property: Optional connector property with a hint for
+ * the position of the output on the host's screen.
+ */
+ struct drm_property *suggested_x_property;
+ /**
+ * @suggested_y_property: Optional connector property with a hint for
+ * the position of the output on the host's screen.
+ */
+ struct drm_property *suggested_y_property;
+
+ /* dumb ioctl parameters */
+ uint32_t preferred_depth, prefer_shadow;
+
+ /**
+ * @async_page_flip: Does this device support async flips on the primary
+ * plane?
+ */
+ bool async_page_flip;
+
+ /**
+ * @allow_fb_modifiers:
+ *
+ * Whether the driver supports fb modifiers in the ADDFB2.1 ioctl call.
+ */
+ bool allow_fb_modifiers;
+
+ /* cursor size */
+ uint32_t cursor_width, cursor_height;
+
+ struct drm_mode_config_helper_funcs *helper_private;
+};
+
+void drm_mode_config_init(struct drm_device *dev);
+void drm_mode_config_reset(struct drm_device *dev);
+void drm_mode_config_cleanup(struct drm_device *dev);
+
+#endif
diff --git a/include/drm/drm_modeset_helper_vtables.h b/include/drm/drm_modeset_helper_vtables.h
index 10e449c86dbd..72478cf82147 100644
--- a/include/drm/drm_modeset_helper_vtables.h
+++ b/include/drm/drm_modeset_helper_vtables.h
@@ -361,8 +361,8 @@ struct drm_crtc_helper_funcs {
*
* Note that the power state of the display pipe when this function is
* called depends upon the exact helpers and calling sequence the driver
- * has picked. See drm_atomic_commit_planes() for a discussion of the
- * tradeoffs and variants of plane commit helpers.
+ * has picked. See drm_atomic_helper_commit_planes() for a discussion of
+ * the tradeoffs and variants of plane commit helpers.
*
* This callback is used by the atomic modeset helpers and by the
* transitional plane helpers, but it is optional.
@@ -385,8 +385,8 @@ struct drm_crtc_helper_funcs {
*
* Note that the power state of the display pipe when this function is
* called depends upon the exact helpers and calling sequence the driver
- * has picked. See drm_atomic_commit_planes() for a discussion of the
- * tradeoffs and variants of plane commit helpers.
+ * has picked. See drm_atomic_helper_commit_planes() for a discussion of
+ * the tradeoffs and variants of plane commit helpers.
*
* This callback is used by the atomic modeset helpers and by the
* transitional plane helpers, but it is optional.
@@ -940,8 +940,8 @@ struct drm_plane_helper_funcs {
*
* Note that the power state of the display pipe when this function is
* called depends upon the exact helpers and calling sequence the driver
- * has picked. See drm_atomic_commit_planes() for a discussion of the
- * tradeoffs and variants of plane commit helpers.
+ * has picked. See drm_atomic_helper_commit_planes() for a discussion of
+ * the tradeoffs and variants of plane commit helpers.
*
* This callback is used by the atomic modeset helpers and by the
* transitional plane helpers, but it is optional.
@@ -963,8 +963,8 @@ struct drm_plane_helper_funcs {
*
* Note that the power state of the display pipe when this function is
* called depends upon the exact helpers and calling sequence the driver
- * has picked. See drm_atomic_commit_planes() for a discussion of the
- * tradeoffs and variants of plane commit helpers.
+ * has picked. See drm_atomic_helper_commit_planes() for a discussion of
+ * the tradeoffs and variants of plane commit helpers.
*
* This callback is used by the atomic modeset helpers and by the
* transitional plane helpers, but it is optional.
diff --git a/include/drm/drm_modeset_lock.h b/include/drm/drm_modeset_lock.h
index c5576fbcb909..d918ce45ec2c 100644
--- a/include/drm/drm_modeset_lock.h
+++ b/include/drm/drm_modeset_lock.h
@@ -82,8 +82,6 @@ struct drm_modeset_lock {
struct list_head head;
};
-extern struct ww_class crtc_ww_class;
-
void drm_modeset_acquire_init(struct drm_modeset_acquire_ctx *ctx,
uint32_t flags);
void drm_modeset_acquire_fini(struct drm_modeset_acquire_ctx *ctx);
@@ -91,15 +89,7 @@ void drm_modeset_drop_locks(struct drm_modeset_acquire_ctx *ctx);
void drm_modeset_backoff(struct drm_modeset_acquire_ctx *ctx);
int drm_modeset_backoff_interruptible(struct drm_modeset_acquire_ctx *ctx);
-/**
- * drm_modeset_lock_init - initialize lock
- * @lock: lock to init
- */
-static inline void drm_modeset_lock_init(struct drm_modeset_lock *lock)
-{
- ww_mutex_init(&lock->mutex, &crtc_ww_class);
- INIT_LIST_HEAD(&lock->head);
-}
+void drm_modeset_lock_init(struct drm_modeset_lock *lock);
/**
* drm_modeset_lock_fini - cleanup lock
diff --git a/include/drm/drm_plane.h b/include/drm/drm_plane.h
index c5e8a0df1623..5b38eb94783b 100644
--- a/include/drm/drm_plane.h
+++ b/include/drm/drm_plane.h
@@ -28,15 +28,11 @@
#include <drm/drm_mode_object.h>
struct drm_crtc;
+struct drm_printer;
/**
* struct drm_plane_state - mutable plane state
* @plane: backpointer to the plane
- * @crtc: currently bound CRTC, NULL if disabled
- * @fb: currently bound framebuffer
- * @fence: optional fence to wait for before scanning out @fb
- * @crtc_x: left position of visible portion of plane on crtc
- * @crtc_y: upper position of visible portion of plane on crtc
* @crtc_w: width of visible portion of plane on crtc
* @crtc_h: height of visible portion of plane on crtc
* @src_x: left position of visible portion of plane within
@@ -47,22 +43,61 @@ struct drm_crtc;
* @src_h: height of visible portion of plane (in 16.16)
* @rotation: rotation of the plane
* @zpos: priority of the given plane on crtc (optional)
+ * Note that multiple active planes on the same crtc can have an identical
+ * zpos value. The rule to solving the conflict is to compare the plane
+ * object IDs; the plane with a higher ID must be stacked on top of a
+ * plane with a lower ID.
* @normalized_zpos: normalized value of zpos: unique, range from 0 to N-1
- * where N is the number of active planes for given crtc
+ * where N is the number of active planes for given crtc. Note that
+ * the driver must call drm_atomic_normalize_zpos() to update this before
+ * it can be trusted.
* @src: clipped source coordinates of the plane (in 16.16)
* @dst: clipped destination coordinates of the plane
- * @visible: visibility of the plane
* @state: backpointer to global drm_atomic_state
*/
struct drm_plane_state {
struct drm_plane *plane;
- struct drm_crtc *crtc; /* do not write directly, use drm_atomic_set_crtc_for_plane() */
- struct drm_framebuffer *fb; /* do not write directly, use drm_atomic_set_fb_for_plane() */
+ /**
+ * @crtc:
+ *
+ * Currently bound CRTC, NULL if disabled. Do not this write directly,
+ * use drm_atomic_set_crtc_for_plane()
+ */
+ struct drm_crtc *crtc;
+
+ /**
+ * @fb:
+ *
+ * Currently bound framebuffer. Do not write this directly, use
+ * drm_atomic_set_fb_for_plane()
+ */
+ struct drm_framebuffer *fb;
+
+ /**
+ * @fence:
+ *
+ * Optional fence to wait for before scanning out @fb. Do not write this
+ * directly, use drm_atomic_set_fence_for_plane()
+ */
struct dma_fence *fence;
- /* Signed dest location allows it to be partially off screen */
- int32_t crtc_x, crtc_y;
+ /**
+ * @crtc_x:
+ *
+ * Left position of visible portion of plane on crtc, signed dest
+ * location allows it to be partially off screen.
+ */
+
+ int32_t crtc_x;
+ /**
+ * @crtc_y:
+ *
+ * Upper position of visible portion of plane on crtc, signed dest
+ * location allows it to be partially off screen.
+ */
+ int32_t crtc_y;
+
uint32_t crtc_w, crtc_h;
/* Source values are 16.16 fixed point */
@@ -79,15 +114,41 @@ struct drm_plane_state {
/* Clipped coordinates */
struct drm_rect src, dst;
- /*
- * Is the plane actually visible? Can be false even
- * if fb!=NULL and crtc!=NULL, due to clipping.
+ /**
+ * @visible:
+ *
+ * Visibility of the plane. This can be false even if fb!=NULL and
+ * crtc!=NULL, due to clipping.
*/
bool visible;
struct drm_atomic_state *state;
};
+static inline struct drm_rect
+drm_plane_state_src(const struct drm_plane_state *state)
+{
+ struct drm_rect src = {
+ .x1 = state->src_x,
+ .y1 = state->src_y,
+ .x2 = state->src_x + state->src_w,
+ .y2 = state->src_y + state->src_h,
+ };
+ return src;
+}
+
+static inline struct drm_rect
+drm_plane_state_dest(const struct drm_plane_state *state)
+{
+ struct drm_rect dest = {
+ .x1 = state->crtc_x,
+ .y1 = state->crtc_y,
+ .x2 = state->crtc_x + state->crtc_w,
+ .y2 = state->crtc_y + state->crtc_h,
+ };
+ return dest;
+}
+
/**
* struct drm_plane_funcs - driver plane control functions
*/
@@ -316,6 +377,18 @@ struct drm_plane_funcs {
* before data structures are torndown.
*/
void (*early_unregister)(struct drm_plane *plane);
+
+ /**
+ * @atomic_print_state:
+ *
+ * If driver subclasses struct &drm_plane_state, it should implement
+ * this optional hook for printing additional driver specific state.
+ *
+ * Do not call this directly, use drm_atomic_plane_print_state()
+ * instead.
+ */
+ void (*atomic_print_state)(struct drm_printer *p,
+ const struct drm_plane_state *state);
};
/**
diff --git a/include/drm/drm_print.h b/include/drm/drm_print.h
new file mode 100644
index 000000000000..1adf84aea622
--- /dev/null
+++ b/include/drm/drm_print.h
@@ -0,0 +1,112 @@
+/*
+ * Copyright (C) 2016 Red Hat
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Rob Clark <robdclark@gmail.com>
+ */
+
+#ifndef DRM_PRINT_H_
+#define DRM_PRINT_H_
+
+#include <linux/seq_file.h>
+#include <linux/device.h>
+
+/**
+ * DOC: print
+ *
+ * A simple wrapper for dev_printk(), seq_printf(), etc. Allows same
+ * debug code to be used for both debugfs and printk logging.
+ *
+ * For example::
+ *
+ * void log_some_info(struct drm_printer *p)
+ * {
+ * drm_printf(p, "foo=%d\n", foo);
+ * drm_printf(p, "bar=%d\n", bar);
+ * }
+ *
+ * #ifdef CONFIG_DEBUG_FS
+ * void debugfs_show(struct seq_file *f)
+ * {
+ * struct drm_printer p = drm_seq_file_printer(f);
+ * log_some_info(&p);
+ * }
+ * #endif
+ *
+ * void some_other_function(...)
+ * {
+ * struct drm_printer p = drm_info_printer(drm->dev);
+ * log_some_info(&p);
+ * }
+ */
+
+/**
+ * struct drm_printer - drm output "stream"
+ * @printfn: actual output fxn
+ * @arg: output fxn specific data
+ *
+ * Do not use struct members directly. Use drm_printer_seq_file(),
+ * drm_printer_info(), etc to initialize. And drm_printf() for output.
+ */
+struct drm_printer {
+ void (*printfn)(struct drm_printer *p, struct va_format *vaf);
+ void *arg;
+};
+
+void __drm_printfn_seq_file(struct drm_printer *p, struct va_format *vaf);
+void __drm_printfn_info(struct drm_printer *p, struct va_format *vaf);
+
+void drm_printf(struct drm_printer *p, const char *f, ...);
+
+
+/**
+ * drm_seq_file_printer - construct a &drm_printer that outputs to &seq_file
+ * @f: the struct &seq_file to output to
+ *
+ * RETURNS:
+ * The &drm_printer object
+ */
+static inline struct drm_printer drm_seq_file_printer(struct seq_file *f)
+{
+ struct drm_printer p = {
+ .printfn = __drm_printfn_seq_file,
+ .arg = f,
+ };
+ return p;
+}
+
+/**
+ * drm_info_printer - construct a &drm_printer that outputs to dev_printk()
+ * @dev: the struct &device pointer
+ *
+ * RETURNS:
+ * The &drm_printer object
+ */
+static inline struct drm_printer drm_info_printer(struct device *dev)
+{
+ struct drm_printer p = {
+ .printfn = __drm_printfn_info,
+ .arg = dev,
+ };
+ return p;
+}
+
+#endif /* DRM_PRINT_H_ */
diff --git a/include/linux/acpi.h b/include/linux/acpi.h
index ddbeda6dbdc8..689a8b9b9c8f 100644
--- a/include/linux/acpi.h
+++ b/include/linux/acpi.h
@@ -326,6 +326,7 @@ struct pci_dev;
int acpi_pci_irq_enable (struct pci_dev *dev);
void acpi_penalize_isa_irq(int irq, int active);
bool acpi_isa_irq_available(int irq);
+void acpi_penalize_sci_irq(int irq, int trigger, int polarity);
void acpi_pci_irq_disable (struct pci_dev *dev);
extern int ec_read(u8 addr, u8 *val);
diff --git a/include/linux/clk-provider.h b/include/linux/clk-provider.h
index af596381fa0f..a428aec36ace 100644
--- a/include/linux/clk-provider.h
+++ b/include/linux/clk-provider.h
@@ -785,7 +785,7 @@ extern struct of_device_id __clk_of_table;
* routines, one at of_clk_init(), and one at platform device probe
*/
#define CLK_OF_DECLARE_DRIVER(name, compat, fn) \
- static void name##_of_clk_init_driver(struct device_node *np) \
+ static void __init name##_of_clk_init_driver(struct device_node *np) \
{ \
of_node_clear_flag(np, OF_POPULATED); \
fn(np); \
diff --git a/include/linux/dma-fence.h b/include/linux/dma-fence.h
index ba60c043a5d3..d51a7d23c358 100644
--- a/include/linux/dma-fence.h
+++ b/include/linux/dma-fence.h
@@ -225,7 +225,7 @@ static inline struct dma_fence *dma_fence_get_rcu(struct dma_fence *fence)
/**
* dma_fence_get_rcu_safe - acquire a reference to an RCU tracked fence
- * @fence: [in] pointer to fence to increase refcount of
+ * @fencep: [in] pointer to fence to increase refcount of
*
* Function returns NULL if no refcount could be obtained, or the fence.
* This function handles acquiring a reference to a fence that may be
@@ -382,7 +382,8 @@ signed long dma_fence_wait_timeout(struct dma_fence *,
bool intr, signed long timeout);
signed long dma_fence_wait_any_timeout(struct dma_fence **fences,
uint32_t count,
- bool intr, signed long timeout);
+ bool intr, signed long timeout,
+ uint32_t *idx);
/**
* dma_fence_wait - sleep until the fence gets signaled
diff --git a/include/linux/hyperv.h b/include/linux/hyperv.h
index 6824556d37ed..cd184bdca58f 100644
--- a/include/linux/hyperv.h
+++ b/include/linux/hyperv.h
@@ -1169,13 +1169,6 @@ int __must_check __vmbus_driver_register(struct hv_driver *hv_driver,
const char *mod_name);
void vmbus_driver_unregister(struct hv_driver *hv_driver);
-static inline const char *vmbus_dev_name(const struct hv_device *device_obj)
-{
- const struct kobject *kobj = &device_obj->device.kobj;
-
- return kobj->name;
-}
-
void vmbus_hvsock_device_unregister(struct vmbus_channel *channel);
int vmbus_allocate_mmio(struct resource **new, struct hv_device *device_obj,
diff --git a/include/linux/io.h b/include/linux/io.h
index e2c8419278c1..82ef36eac8a1 100644
--- a/include/linux/io.h
+++ b/include/linux/io.h
@@ -141,4 +141,26 @@ enum {
void *memremap(resource_size_t offset, size_t size, unsigned long flags);
void memunmap(void *addr);
+/*
+ * On x86 PAT systems we have memory tracking that keeps track of
+ * the allowed mappings on memory ranges. This tracking works for
+ * all the in-kernel mapping APIs (ioremap*), but where the user
+ * wishes to map a range from a physical device into user memory
+ * the tracking won't be updated. This API is to be used by
+ * drivers which remap physical device pages into userspace,
+ * and wants to make sure they are mapped WC and not UC.
+ */
+#ifndef arch_io_reserve_memtype_wc
+static inline int arch_io_reserve_memtype_wc(resource_size_t base,
+ resource_size_t size)
+{
+ return 0;
+}
+
+static inline void arch_io_free_memtype_wc(resource_size_t base,
+ resource_size_t size)
+{
+}
+#endif
+
#endif /* _LINUX_IO_H */
diff --git a/include/linux/iomap.h b/include/linux/iomap.h
index e63e288dee83..7892f55a1866 100644
--- a/include/linux/iomap.h
+++ b/include/linux/iomap.h
@@ -19,11 +19,15 @@ struct vm_fault;
#define IOMAP_UNWRITTEN 0x04 /* blocks allocated @blkno in unwritten state */
/*
- * Flags for iomap mappings:
+ * Flags for all iomap mappings:
*/
-#define IOMAP_F_MERGED 0x01 /* contains multiple blocks/extents */
-#define IOMAP_F_SHARED 0x02 /* block shared with another file */
-#define IOMAP_F_NEW 0x04 /* blocks have been newly allocated */
+#define IOMAP_F_NEW 0x01 /* blocks have been newly allocated */
+
+/*
+ * Flags that only need to be reported for IOMAP_REPORT requests:
+ */
+#define IOMAP_F_MERGED 0x10 /* contains multiple blocks/extents */
+#define IOMAP_F_SHARED 0x20 /* block shared with another file */
/*
* Magic value for blkno:
@@ -42,8 +46,9 @@ struct iomap {
/*
* Flags for iomap_begin / iomap_end. No flag implies a read.
*/
-#define IOMAP_WRITE (1 << 0)
-#define IOMAP_ZERO (1 << 1)
+#define IOMAP_WRITE (1 << 0) /* writing, must allocate blocks */
+#define IOMAP_ZERO (1 << 1) /* zeroing operation, may skip holes */
+#define IOMAP_REPORT (1 << 2) /* report extent status, e.g. FIEMAP */
struct iomap_ops {
/*
diff --git a/include/linux/ipv6.h b/include/linux/ipv6.h
index 7e9a789be5e0..ca1ad9ebbc92 100644
--- a/include/linux/ipv6.h
+++ b/include/linux/ipv6.h
@@ -123,12 +123,12 @@ struct inet6_skb_parm {
};
#if defined(CONFIG_NET_L3_MASTER_DEV)
-static inline bool skb_l3mdev_slave(__u16 flags)
+static inline bool ipv6_l3mdev_skb(__u16 flags)
{
return flags & IP6SKB_L3SLAVE;
}
#else
-static inline bool skb_l3mdev_slave(__u16 flags)
+static inline bool ipv6_l3mdev_skb(__u16 flags)
{
return false;
}
@@ -139,11 +139,22 @@ static inline bool skb_l3mdev_slave(__u16 flags)
static inline int inet6_iif(const struct sk_buff *skb)
{
- bool l3_slave = skb_l3mdev_slave(IP6CB(skb)->flags);
+ bool l3_slave = ipv6_l3mdev_skb(IP6CB(skb)->flags);
return l3_slave ? skb->skb_iif : IP6CB(skb)->iif;
}
+/* can not be used in TCP layer after tcp_v6_fill_cb */
+static inline bool inet6_exact_dif_match(struct net *net, struct sk_buff *skb)
+{
+#if defined(CONFIG_NET_L3_MASTER_DEV)
+ if (!net->ipv4.sysctl_tcp_l3mdev_accept &&
+ ipv6_l3mdev_skb(IP6CB(skb)->flags))
+ return true;
+#endif
+ return false;
+}
+
struct tcp6_request_sock {
struct tcp_request_sock tcp6rsk_tcp;
};
diff --git a/include/linux/kconfig.h b/include/linux/kconfig.h
index 15ec117ec537..8f2e059e4d45 100644
--- a/include/linux/kconfig.h
+++ b/include/linux/kconfig.h
@@ -31,7 +31,6 @@
* When CONFIG_BOOGER is not defined, we generate a (... 1, 0) pair, and when
* the last step cherry picks the 2nd arg, we get a zero.
*/
-#define config_enabled(cfg) ___is_defined(cfg)
#define __is_defined(x) ___is_defined(x)
#define ___is_defined(val) ____is_defined(__ARG_PLACEHOLDER_##val)
#define ____is_defined(arg1_or_junk) __take_second_arg(arg1_or_junk 1, 0)
@@ -41,13 +40,13 @@
* otherwise. For boolean options, this is equivalent to
* IS_ENABLED(CONFIG_FOO).
*/
-#define IS_BUILTIN(option) config_enabled(option)
+#define IS_BUILTIN(option) __is_defined(option)
/*
* IS_MODULE(CONFIG_FOO) evaluates to 1 if CONFIG_FOO is set to 'm', 0
* otherwise.
*/
-#define IS_MODULE(option) config_enabled(option##_MODULE)
+#define IS_MODULE(option) __is_defined(option##_MODULE)
/*
* IS_REACHABLE(CONFIG_FOO) evaluates to 1 if the currently compiled
diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h
index f6a164297358..3be7abd6e722 100644
--- a/include/linux/mlx4/device.h
+++ b/include/linux/mlx4/device.h
@@ -1399,7 +1399,8 @@ void mlx4_fmr_unmap(struct mlx4_dev *dev, struct mlx4_fmr *fmr,
u32 *lkey, u32 *rkey);
int mlx4_fmr_free(struct mlx4_dev *dev, struct mlx4_fmr *fmr);
int mlx4_SYNC_TPT(struct mlx4_dev *dev);
-int mlx4_test_interrupts(struct mlx4_dev *dev);
+int mlx4_test_interrupt(struct mlx4_dev *dev, int vector);
+int mlx4_test_async(struct mlx4_dev *dev);
int mlx4_query_diag_counters(struct mlx4_dev *dev, u8 op_modifier,
const u32 offset[], u32 value[],
size_t array_len, u8 port);
diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h
index 85c4786427e4..ecc451d89ccd 100644
--- a/include/linux/mlx5/driver.h
+++ b/include/linux/mlx5/driver.h
@@ -418,8 +418,12 @@ struct mlx5_core_health {
u32 prev;
int miss_counter;
bool sick;
+ /* wq spinlock to synchronize draining */
+ spinlock_t wq_lock;
struct workqueue_struct *wq;
+ unsigned long flags;
struct work_struct work;
+ struct delayed_work recover_work;
};
struct mlx5_cq_table {
@@ -626,10 +630,6 @@ struct mlx5_db {
};
enum {
- MLX5_DB_PER_PAGE = PAGE_SIZE / L1_CACHE_BYTES,
-};
-
-enum {
MLX5_COMP_EQ_SIZE = 1024,
};
@@ -638,13 +638,6 @@ enum {
MLX5_PTYS_EN = 1 << 2,
};
-struct mlx5_db_pgdir {
- struct list_head list;
- DECLARE_BITMAP(bitmap, MLX5_DB_PER_PAGE);
- __be32 *db_page;
- dma_addr_t db_dma;
-};
-
typedef void (*mlx5_cmd_cbk_t)(int status, void *context);
struct mlx5_cmd_work_ent {
@@ -789,6 +782,7 @@ void mlx5_health_cleanup(struct mlx5_core_dev *dev);
int mlx5_health_init(struct mlx5_core_dev *dev);
void mlx5_start_health_poll(struct mlx5_core_dev *dev);
void mlx5_stop_health_poll(struct mlx5_core_dev *dev);
+void mlx5_drain_health_wq(struct mlx5_core_dev *dev);
int mlx5_buf_alloc_node(struct mlx5_core_dev *dev, int size,
struct mlx5_buf *buf, int node);
int mlx5_buf_alloc(struct mlx5_core_dev *dev, int size, struct mlx5_buf *buf);
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 3a191853faaa..a92c8d73aeaf 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -1271,10 +1271,6 @@ extern int access_process_vm(struct task_struct *tsk, unsigned long addr, void *
extern int access_remote_vm(struct mm_struct *mm, unsigned long addr,
void *buf, int len, unsigned int gup_flags);
-long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
- unsigned long start, unsigned long nr_pages,
- unsigned int foll_flags, struct page **pages,
- struct vm_area_struct **vmas, int *nonblocking);
long get_user_pages_remote(struct task_struct *tsk, struct mm_struct *mm,
unsigned long start, unsigned long nr_pages,
unsigned int gup_flags, struct page **pages,
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index 7f2ae99e5daf..0f088f3a2fed 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -440,33 +440,7 @@ struct zone {
seqlock_t span_seqlock;
#endif
- /*
- * wait_table -- the array holding the hash table
- * wait_table_hash_nr_entries -- the size of the hash table array
- * wait_table_bits -- wait_table_size == (1 << wait_table_bits)
- *
- * The purpose of all these is to keep track of the people
- * waiting for a page to become available and make them
- * runnable again when possible. The trouble is that this
- * consumes a lot of space, especially when so few things
- * wait on pages at a given time. So instead of using
- * per-page waitqueues, we use a waitqueue hash table.
- *
- * The bucket discipline is to sleep on the same queue when
- * colliding and wake all in that wait queue when removing.
- * When something wakes, it must check to be sure its page is
- * truly available, a la thundering herd. The cost of a
- * collision is great, but given the expected load of the
- * table, they should be so rare as to be outweighed by the
- * benefits from the saved space.
- *
- * __wait_on_page_locked() and unlock_page() in mm/filemap.c, are the
- * primary users of these fields, and in mm/page_alloc.c
- * free_area_init_core() performs the initialization of them.
- */
- wait_queue_head_t *wait_table;
- unsigned long wait_table_hash_nr_entries;
- unsigned long wait_table_bits;
+ int initialized;
/* Write-intensive fields used from the page allocator */
ZONE_PADDING(_pad1_)
@@ -546,7 +520,7 @@ static inline bool zone_spans_pfn(const struct zone *zone, unsigned long pfn)
static inline bool zone_is_initialized(struct zone *zone)
{
- return !!zone->wait_table;
+ return zone->initialized;
}
static inline bool zone_is_empty(struct zone *zone)
diff --git a/include/linux/mtd/nand.h b/include/linux/mtd/nand.h
index c5d3d5024fc8..d8905a229f34 100644
--- a/include/linux/mtd/nand.h
+++ b/include/linux/mtd/nand.h
@@ -1184,7 +1184,7 @@ int nand_read_oob_syndrome(struct mtd_info *mtd, struct nand_chip *chip,
int page);
/* Reset and initialize a NAND device */
-int nand_reset(struct nand_chip *chip);
+int nand_reset(struct nand_chip *chip, int chipnr);
/* Free resources held by the NAND device */
void nand_cleanup(struct nand_chip *chip);
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index 136ae6bbe81e..91ee3643ccc8 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -2169,7 +2169,10 @@ struct napi_gro_cb {
/* Used to determine if flush_id can be ignored */
u8 is_atomic:1;
- /* 5 bit hole */
+ /* Number of gro_receive callbacks this packet already went through */
+ u8 recursion_counter:4;
+
+ /* 1 bit hole */
/* used to support CHECKSUM_COMPLETE for tunneling protocols */
__wsum csum;
@@ -2180,6 +2183,40 @@ struct napi_gro_cb {
#define NAPI_GRO_CB(skb) ((struct napi_gro_cb *)(skb)->cb)
+#define GRO_RECURSION_LIMIT 15
+static inline int gro_recursion_inc_test(struct sk_buff *skb)
+{
+ return ++NAPI_GRO_CB(skb)->recursion_counter == GRO_RECURSION_LIMIT;
+}
+
+typedef struct sk_buff **(*gro_receive_t)(struct sk_buff **, struct sk_buff *);
+static inline struct sk_buff **call_gro_receive(gro_receive_t cb,
+ struct sk_buff **head,
+ struct sk_buff *skb)
+{
+ if (unlikely(gro_recursion_inc_test(skb))) {
+ NAPI_GRO_CB(skb)->flush |= 1;
+ return NULL;
+ }
+
+ return cb(head, skb);
+}
+
+typedef struct sk_buff **(*gro_receive_sk_t)(struct sock *, struct sk_buff **,
+ struct sk_buff *);
+static inline struct sk_buff **call_gro_receive_sk(gro_receive_sk_t cb,
+ struct sock *sk,
+ struct sk_buff **head,
+ struct sk_buff *skb)
+{
+ if (unlikely(gro_recursion_inc_test(skb))) {
+ NAPI_GRO_CB(skb)->flush |= 1;
+ return NULL;
+ }
+
+ return cb(sk, head, skb);
+}
+
struct packet_type {
__be16 type; /* This is really htons(ether_type). */
struct net_device *dev; /* NULL is wildcarded here */
@@ -3877,7 +3914,7 @@ struct net_device *netdev_all_lower_get_next_rcu(struct net_device *dev,
ldev = netdev_all_lower_get_next(dev, &(iter)))
#define netdev_for_each_all_lower_dev_rcu(dev, ldev, iter) \
- for (iter = (dev)->all_adj_list.lower.next, \
+ for (iter = &(dev)->all_adj_list.lower, \
ldev = netdev_all_lower_get_next_rcu(dev, &(iter)); \
ldev; \
ldev = netdev_all_lower_get_next_rcu(dev, &(iter)))
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index 060d0ede88df..4741ecdb9817 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -1257,6 +1257,7 @@ extern u64 perf_swevent_set_period(struct perf_event *event);
extern void perf_event_enable(struct perf_event *event);
extern void perf_event_disable(struct perf_event *event);
extern void perf_event_disable_local(struct perf_event *event);
+extern void perf_event_disable_inatomic(struct perf_event *event);
extern void perf_event_task_tick(void);
#else /* !CONFIG_PERF_EVENTS: */
static inline void *
diff --git a/include/linux/qed/qed_if.h b/include/linux/qed/qed_if.h
index f9ae903bbb84..8978a60371f4 100644
--- a/include/linux/qed/qed_if.h
+++ b/include/linux/qed/qed_if.h
@@ -146,6 +146,7 @@ enum qed_led_mode {
#define DIRECT_REG_RD(reg_addr) readl((void __iomem *)(reg_addr))
#define QED_COALESCE_MAX 0xFF
+#define QED_DEFAULT_RX_USECS 12
/* forward */
struct qed_dev;
diff --git a/include/linux/qed/qede_roce.h b/include/linux/qed/qede_roce.h
index 99fbe6d55acb..f48d64b0e2fb 100644
--- a/include/linux/qed/qede_roce.h
+++ b/include/linux/qed/qede_roce.h
@@ -68,7 +68,7 @@ void qede_roce_unregister_driver(struct qedr_driver *drv);
bool qede_roce_supported(struct qede_dev *dev);
-#if IS_ENABLED(CONFIG_INFINIBAND_QEDR)
+#if IS_ENABLED(CONFIG_QED_RDMA)
int qede_roce_dev_add(struct qede_dev *dev);
void qede_roce_dev_event_open(struct qede_dev *dev);
void qede_roce_dev_event_close(struct qede_dev *dev);
diff --git a/include/linux/regmap.h b/include/linux/regmap.h
index 9adc7b21903d..f6673132431d 100644
--- a/include/linux/regmap.h
+++ b/include/linux/regmap.h
@@ -15,6 +15,7 @@
#include <linux/list.h>
#include <linux/rbtree.h>
+#include <linux/delay.h>
#include <linux/err.h>
#include <linux/bug.h>
#include <linux/lockdep.h>
@@ -116,22 +117,22 @@ struct reg_sequence {
#define regmap_read_poll_timeout(map, addr, val, cond, sleep_us, timeout_us) \
({ \
ktime_t timeout = ktime_add_us(ktime_get(), timeout_us); \
- int ret; \
+ int pollret; \
might_sleep_if(sleep_us); \
for (;;) { \
- ret = regmap_read((map), (addr), &(val)); \
- if (ret) \
+ pollret = regmap_read((map), (addr), &(val)); \
+ if (pollret) \
break; \
if (cond) \
break; \
if (timeout_us && ktime_compare(ktime_get(), timeout) > 0) { \
- ret = regmap_read((map), (addr), &(val)); \
+ pollret = regmap_read((map), (addr), &(val)); \
break; \
} \
if (sleep_us) \
usleep_range((sleep_us >> 2) + 1, sleep_us); \
} \
- ret ?: ((cond) ? 0 : -ETIMEDOUT); \
+ pollret ?: ((cond) ? 0 : -ETIMEDOUT); \
})
#ifdef CONFIG_REGMAP
diff --git a/include/linux/reservation.h b/include/linux/reservation.h
index 2e313cca08f0..d9706a6f5ae2 100644
--- a/include/linux/reservation.h
+++ b/include/linux/reservation.h
@@ -177,17 +177,14 @@ static inline struct dma_fence *
reservation_object_get_excl_rcu(struct reservation_object *obj)
{
struct dma_fence *fence;
- unsigned seq;
-retry:
- seq = read_seqcount_begin(&obj->seq);
+
+ if (!rcu_access_pointer(obj->fence_excl))
+ return NULL;
+
rcu_read_lock();
- fence = rcu_dereference(obj->fence_excl);
- if (read_seqcount_retry(&obj->seq, seq)) {
- rcu_read_unlock();
- goto retry;
- }
- fence = dma_fence_get(fence);
+ fence = dma_fence_get_rcu_safe(&obj->fence_excl);
rcu_read_unlock();
+
return fence;
}
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index 601258f6e621..32810f279f8e 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -936,6 +936,7 @@ struct sk_buff_fclones {
/**
* skb_fclone_busy - check if fclone is busy
+ * @sk: socket
* @skb: buffer
*
* Returns true if skb is a fast clone, and its clone is not freed.
diff --git a/include/net/addrconf.h b/include/net/addrconf.h
index f2d072787947..8f998afc1384 100644
--- a/include/net/addrconf.h
+++ b/include/net/addrconf.h
@@ -174,6 +174,7 @@ int ipv6_sock_mc_join(struct sock *sk, int ifindex,
const struct in6_addr *addr);
int ipv6_sock_mc_drop(struct sock *sk, int ifindex,
const struct in6_addr *addr);
+void __ipv6_sock_mc_close(struct sock *sk);
void ipv6_sock_mc_close(struct sock *sk);
bool inet6_mc_check(struct sock *sk, const struct in6_addr *mc_addr,
const struct in6_addr *src_addr);
diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h
index bd19faad0d96..14b51d739c3b 100644
--- a/include/net/cfg80211.h
+++ b/include/net/cfg80211.h
@@ -4047,14 +4047,29 @@ unsigned int ieee80211_get_mesh_hdrlen(struct ieee80211s_hdr *meshhdr);
*/
/**
+ * ieee80211_data_to_8023_exthdr - convert an 802.11 data frame to 802.3
+ * @skb: the 802.11 data frame
+ * @ehdr: pointer to a &struct ethhdr that will get the header, instead
+ * of it being pushed into the SKB
+ * @addr: the device MAC address
+ * @iftype: the virtual interface type
+ * Return: 0 on success. Non-zero on error.
+ */
+int ieee80211_data_to_8023_exthdr(struct sk_buff *skb, struct ethhdr *ehdr,
+ const u8 *addr, enum nl80211_iftype iftype);
+
+/**
* ieee80211_data_to_8023 - convert an 802.11 data frame to 802.3
* @skb: the 802.11 data frame
* @addr: the device MAC address
* @iftype: the virtual interface type
* Return: 0 on success. Non-zero on error.
*/
-int ieee80211_data_to_8023(struct sk_buff *skb, const u8 *addr,
- enum nl80211_iftype iftype);
+static inline int ieee80211_data_to_8023(struct sk_buff *skb, const u8 *addr,
+ enum nl80211_iftype iftype)
+{
+ return ieee80211_data_to_8023_exthdr(skb, NULL, addr, iftype);
+}
/**
* ieee80211_data_from_8023 - convert an 802.3 frame to 802.11
@@ -4072,22 +4087,23 @@ int ieee80211_data_from_8023(struct sk_buff *skb, const u8 *addr,
/**
* ieee80211_amsdu_to_8023s - decode an IEEE 802.11n A-MSDU frame
*
- * Decode an IEEE 802.11n A-MSDU frame and convert it to a list of
- * 802.3 frames. The @list will be empty if the decode fails. The
- * @skb is consumed after the function returns.
+ * Decode an IEEE 802.11 A-MSDU and convert it to a list of 802.3 frames.
+ * The @list will be empty if the decode fails. The @skb must be fully
+ * header-less before being passed in here; it is freed in this function.
*
- * @skb: The input IEEE 802.11n A-MSDU frame.
+ * @skb: The input A-MSDU frame without any headers.
* @list: The output list of 802.3 frames. It must be allocated and
* initialized by by the caller.
* @addr: The device MAC address.
* @iftype: The device interface type.
* @extra_headroom: The hardware extra headroom for SKBs in the @list.
- * @has_80211_header: Set it true if SKB is with IEEE 802.11 header.
+ * @check_da: DA to check in the inner ethernet header, or NULL
+ * @check_sa: SA to check in the inner ethernet header, or NULL
*/
void ieee80211_amsdu_to_8023s(struct sk_buff *skb, struct sk_buff_head *list,
const u8 *addr, enum nl80211_iftype iftype,
const unsigned int extra_headroom,
- bool has_80211_header);
+ const u8 *check_da, const u8 *check_sa);
/**
* cfg80211_classify8021d - determine the 802.1p/1d tag for a data frame
diff --git a/include/net/if_inet6.h b/include/net/if_inet6.h
index 515352c6280a..b0576cb2ab25 100644
--- a/include/net/if_inet6.h
+++ b/include/net/if_inet6.h
@@ -190,8 +190,8 @@ struct inet6_dev {
__u32 if_flags;
int dead;
+ u32 desync_factor;
u8 rndid[8];
- struct timer_list regen_timer;
struct list_head tempaddr_list;
struct in6_addr token;
diff --git a/include/net/ip.h b/include/net/ip.h
index bc43c0fcae12..5413883ac47f 100644
--- a/include/net/ip.h
+++ b/include/net/ip.h
@@ -38,7 +38,7 @@ struct sock;
struct inet_skb_parm {
int iif;
struct ip_options opt; /* Compiled IP options */
- unsigned char flags;
+ u16 flags;
#define IPSKB_FORWARDED BIT(0)
#define IPSKB_XFRM_TUNNEL_SIZE BIT(1)
@@ -48,10 +48,16 @@ struct inet_skb_parm {
#define IPSKB_DOREDIRECT BIT(5)
#define IPSKB_FRAG_PMTU BIT(6)
#define IPSKB_FRAG_SEGS BIT(7)
+#define IPSKB_L3SLAVE BIT(8)
u16 frag_max_size;
};
+static inline bool ipv4_l3mdev_skb(u16 flags)
+{
+ return !!(flags & IPSKB_L3SLAVE);
+}
+
static inline unsigned int ip_hdrlen(const struct sk_buff *skb)
{
return ip_hdr(skb)->ihl * 4;
@@ -572,7 +578,7 @@ int ip_options_rcv_srr(struct sk_buff *skb);
*/
void ipv4_pktinfo_prepare(const struct sock *sk, struct sk_buff *skb);
-void ip_cmsg_recv_offset(struct msghdr *msg, struct sk_buff *skb, int offset);
+void ip_cmsg_recv_offset(struct msghdr *msg, struct sk_buff *skb, int tlen, int offset);
int ip_cmsg_send(struct sock *sk, struct msghdr *msg,
struct ipcm_cookie *ipc, bool allow_ipv6);
int ip_setsockopt(struct sock *sk, int level, int optname, char __user *optval,
@@ -594,7 +600,7 @@ void ip_local_error(struct sock *sk, int err, __be32 daddr, __be16 dport,
static inline void ip_cmsg_recv(struct msghdr *msg, struct sk_buff *skb)
{
- ip_cmsg_recv_offset(msg, skb, 0);
+ ip_cmsg_recv_offset(msg, skb, 0, 0);
}
bool icmp_global_allow(void);
diff --git a/include/net/ip6_fib.h b/include/net/ip6_fib.h
index fb961a576abe..a74e2aa40ef4 100644
--- a/include/net/ip6_fib.h
+++ b/include/net/ip6_fib.h
@@ -230,6 +230,8 @@ struct fib6_table {
rwlock_t tb6_lock;
struct fib6_node tb6_root;
struct inet_peer_base tb6_peers;
+ unsigned int flags;
+#define RT6_TABLE_HAS_DFLT_ROUTER BIT(0)
};
#define RT6_TABLE_UNSPEC RT_TABLE_UNSPEC
diff --git a/include/net/ip6_route.h b/include/net/ip6_route.h
index e0cd318d5103..f83e78d071a3 100644
--- a/include/net/ip6_route.h
+++ b/include/net/ip6_route.h
@@ -32,6 +32,7 @@ struct route_info {
#define RT6_LOOKUP_F_SRCPREF_TMP 0x00000008
#define RT6_LOOKUP_F_SRCPREF_PUBLIC 0x00000010
#define RT6_LOOKUP_F_SRCPREF_COA 0x00000020
+#define RT6_LOOKUP_F_IGNORE_LINKSTATE 0x00000040
/* We do not (yet ?) support IPv6 jumbograms (RFC 2675)
* Unlike IPv4, hdr->seg_len doesn't include the IPv6 header
diff --git a/include/net/mac80211.h b/include/net/mac80211.h
index a810dfcb83c2..e2dba93e374f 100644
--- a/include/net/mac80211.h
+++ b/include/net/mac80211.h
@@ -811,14 +811,18 @@ enum mac80211_rate_control_flags {
* in the control information, and it will be filled by the rate
* control algorithm according to what should be sent. For example,
* if this array contains, in the format { <idx>, <count> } the
- * information
+ * information::
+ *
* { 3, 2 }, { 2, 2 }, { 1, 4 }, { -1, 0 }, { -1, 0 }
+ *
* then this means that the frame should be transmitted
* up to twice at rate 3, up to twice at rate 2, and up to four
* times at rate 1 if it doesn't get acknowledged. Say it gets
* acknowledged by the peer after the fifth attempt, the status
- * information should then contain
+ * information should then contain::
+ *
* { 3, 2 }, { 2, 2 }, { 1, 1 }, { -1, 0 } ...
+ *
* since it was transmitted twice at rate 3, twice at rate 2
* and once at rate 1 after which we received an acknowledgement.
*/
@@ -1168,8 +1172,8 @@ enum mac80211_rx_vht_flags {
* @rate_idx: index of data rate into band's supported rates or MCS index if
* HT or VHT is used (%RX_FLAG_HT/%RX_FLAG_VHT)
* @vht_nss: number of streams (VHT only)
- * @flag: %RX_FLAG_*
- * @vht_flag: %RX_VHT_FLAG_*
+ * @flag: %RX_FLAG_\*
+ * @vht_flag: %RX_VHT_FLAG_\*
* @rx_flags: internal RX flags for mac80211
* @ampdu_reference: A-MPDU reference number, must be a different value for
* each A-MPDU but the same for each subframe within one A-MPDU
@@ -1432,7 +1436,7 @@ enum ieee80211_vif_flags {
* @probe_req_reg: probe requests should be reported to mac80211 for this
* interface.
* @drv_priv: data area for driver use, will always be aligned to
- * sizeof(void *).
+ * sizeof(void \*).
* @txq: the multicast data TX queue (if driver uses the TXQ abstraction)
*/
struct ieee80211_vif {
@@ -1743,7 +1747,7 @@ struct ieee80211_sta_rates {
* @wme: indicates whether the STA supports QoS/WME (if local devices does,
* otherwise always false)
* @drv_priv: data area for driver use, will always be aligned to
- * sizeof(void *), size is determined in hw information.
+ * sizeof(void \*), size is determined in hw information.
* @uapsd_queues: bitmap of queues configured for uapsd. Only valid
* if wme is supported.
* @max_sp: max Service Period. Only valid if wme is supported.
@@ -2146,12 +2150,12 @@ enum ieee80211_hw_flags {
*
* @radiotap_mcs_details: lists which MCS information can the HW
* reports, by default it is set to _MCS, _GI and _BW but doesn't
- * include _FMT. Use %IEEE80211_RADIOTAP_MCS_HAVE_* values, only
+ * include _FMT. Use %IEEE80211_RADIOTAP_MCS_HAVE_\* values, only
* adding _BW is supported today.
*
* @radiotap_vht_details: lists which VHT MCS information the HW reports,
* the default is _GI | _BANDWIDTH.
- * Use the %IEEE80211_RADIOTAP_VHT_KNOWN_* values.
+ * Use the %IEEE80211_RADIOTAP_VHT_KNOWN_\* values.
*
* @radiotap_timestamp: Information for the radiotap timestamp field; if the
* 'units_pos' member is set to a non-negative value it must be set to
@@ -2486,6 +2490,7 @@ void ieee80211_free_txskb(struct ieee80211_hw *hw, struct sk_buff *skb);
* in the software stack cares about, we will, in the future, have mac80211
* tell the driver which information elements are interesting in the sense
* that we want to see changes in them. This will include
+ *
* - a list of information element IDs
* - a list of OUIs for the vendor information element
*
diff --git a/include/net/sock.h b/include/net/sock.h
index ebf75db08e06..73c6b008f1b7 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -252,6 +252,7 @@ struct sock_common {
* @sk_pacing_rate: Pacing rate (if supported by transport/packet scheduler)
* @sk_max_pacing_rate: Maximum pacing rate (%SO_MAX_PACING_RATE)
* @sk_sndbuf: size of send buffer in bytes
+ * @sk_padding: unused element for alignment
* @sk_no_check_tx: %SO_NO_CHECK setting, set checksum in TX packets
* @sk_no_check_rx: allow zero checksum in RX packets
* @sk_route_caps: route capabilities (e.g. %NETIF_F_TSO)
@@ -302,7 +303,8 @@ struct sock_common {
* @sk_backlog_rcv: callback to process the backlog
* @sk_destruct: called at sock freeing time, i.e. when all refcnt == 0
* @sk_reuseport_cb: reuseport group container
- */
+ * @sk_rcu: used during RCU grace period
+ */
struct sock {
/*
* Now struct inet_timewait_sock also uses sock_common, so please just
diff --git a/include/net/tcp.h b/include/net/tcp.h
index f83b7f220a65..5b82d4d94834 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -794,12 +794,23 @@ struct tcp_skb_cb {
*/
static inline int tcp_v6_iif(const struct sk_buff *skb)
{
- bool l3_slave = skb_l3mdev_slave(TCP_SKB_CB(skb)->header.h6.flags);
+ bool l3_slave = ipv6_l3mdev_skb(TCP_SKB_CB(skb)->header.h6.flags);
return l3_slave ? skb->skb_iif : TCP_SKB_CB(skb)->header.h6.iif;
}
#endif
+/* TCP_SKB_CB reference means this can not be used from early demux */
+static inline bool inet_exact_dif_match(struct net *net, struct sk_buff *skb)
+{
+#if IS_ENABLED(CONFIG_NET_L3_MASTER_DEV)
+ if (!net->ipv4.sysctl_tcp_l3mdev_accept &&
+ ipv4_l3mdev_skb(TCP_SKB_CB(skb)->header.h4.flags))
+ return true;
+#endif
+ return false;
+}
+
/* Due to TSO, an SKB can be composed of multiple actual
* packets. To keep these tracked properly, we use this.
*/
diff --git a/include/net/udp.h b/include/net/udp.h
index ea53a87d880f..4948790d393d 100644
--- a/include/net/udp.h
+++ b/include/net/udp.h
@@ -258,6 +258,7 @@ void udp_flush_pending_frames(struct sock *sk);
void udp4_hwcsum(struct sk_buff *skb, __be32 src, __be32 dst);
int udp_rcv(struct sk_buff *skb);
int udp_ioctl(struct sock *sk, int cmd, unsigned long arg);
+int __udp_disconnect(struct sock *sk, int flags);
int udp_disconnect(struct sock *sk, int flags);
unsigned int udp_poll(struct file *file, struct socket *sock, poll_table *wait);
struct sk_buff *skb_udp_tunnel_segment(struct sk_buff *skb,
diff --git a/include/net/vxlan.h b/include/net/vxlan.h
index 0255613a54a4..308adc4154f4 100644
--- a/include/net/vxlan.h
+++ b/include/net/vxlan.h
@@ -225,9 +225,9 @@ struct vxlan_config {
struct vxlan_dev {
struct hlist_node hlist; /* vni hash table */
struct list_head next; /* vxlan's per namespace list */
- struct vxlan_sock *vn4_sock; /* listening socket for IPv4 */
+ struct vxlan_sock __rcu *vn4_sock; /* listening socket for IPv4 */
#if IS_ENABLED(CONFIG_IPV6)
- struct vxlan_sock *vn6_sock; /* listening socket for IPv6 */
+ struct vxlan_sock __rcu *vn6_sock; /* listening socket for IPv6 */
#endif
struct net_device *dev;
struct net *net; /* netns for packet i/o */
diff --git a/include/uapi/drm/amdgpu_drm.h b/include/uapi/drm/amdgpu_drm.h
index 4684f378f046..2191a9e4f3db 100644
--- a/include/uapi/drm/amdgpu_drm.h
+++ b/include/uapi/drm/amdgpu_drm.h
@@ -50,6 +50,7 @@ extern "C" {
#define DRM_AMDGPU_WAIT_CS 0x09
#define DRM_AMDGPU_GEM_OP 0x10
#define DRM_AMDGPU_GEM_USERPTR 0x11
+#define DRM_AMDGPU_WAIT_FENCES 0x12
#define DRM_IOCTL_AMDGPU_GEM_CREATE DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_GEM_CREATE, union drm_amdgpu_gem_create)
#define DRM_IOCTL_AMDGPU_GEM_MMAP DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_GEM_MMAP, union drm_amdgpu_gem_mmap)
@@ -63,6 +64,7 @@ extern "C" {
#define DRM_IOCTL_AMDGPU_WAIT_CS DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_WAIT_CS, union drm_amdgpu_wait_cs)
#define DRM_IOCTL_AMDGPU_GEM_OP DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_GEM_OP, struct drm_amdgpu_gem_op)
#define DRM_IOCTL_AMDGPU_GEM_USERPTR DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_GEM_USERPTR, struct drm_amdgpu_gem_userptr)
+#define DRM_IOCTL_AMDGPU_WAIT_FENCES DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_WAIT_FENCES, union drm_amdgpu_wait_fences)
#define AMDGPU_GEM_DOMAIN_CPU 0x1
#define AMDGPU_GEM_DOMAIN_GTT 0x2
@@ -307,6 +309,32 @@ union drm_amdgpu_wait_cs {
struct drm_amdgpu_wait_cs_out out;
};
+struct drm_amdgpu_fence {
+ __u32 ctx_id;
+ __u32 ip_type;
+ __u32 ip_instance;
+ __u32 ring;
+ __u64 seq_no;
+};
+
+struct drm_amdgpu_wait_fences_in {
+ /** This points to uint64_t * which points to fences */
+ __u64 fences;
+ __u32 fence_count;
+ __u32 wait_all;
+ __u64 timeout_ns;
+};
+
+struct drm_amdgpu_wait_fences_out {
+ __u32 status;
+ __u32 first_signaled;
+};
+
+union drm_amdgpu_wait_fences {
+ struct drm_amdgpu_wait_fences_in in;
+ struct drm_amdgpu_wait_fences_out out;
+};
+
#define AMDGPU_GEM_OP_GET_GEM_CREATE_INFO 0
#define AMDGPU_GEM_OP_SET_PLACEMENT 1
diff --git a/include/uapi/drm/drm_mode.h b/include/uapi/drm/drm_mode.h
index 084b50a02dc5..ebf622f5b238 100644
--- a/include/uapi/drm/drm_mode.h
+++ b/include/uapi/drm/drm_mode.h
@@ -47,7 +47,15 @@ extern "C" {
#define DRM_MODE_TYPE_DRIVER (1<<6)
/* Video mode flags */
-/* bit compatible with the xorg definitions. */
+/* bit compatible with the xrandr RR_ definitions (bits 0-13)
+ *
+ * ABI warning: Existing userspace really expects
+ * the mode flags to match the xrandr definitions. Any
+ * changes that don't match the xrandr definitions will
+ * likely need a new client cap or some other mechanism
+ * to avoid breaking existing userspace. This includes
+ * allocating new flags in the previously unused bits!
+ */
#define DRM_MODE_FLAG_PHSYNC (1<<0)
#define DRM_MODE_FLAG_NHSYNC (1<<1)
#define DRM_MODE_FLAG_PVSYNC (1<<2)
@@ -81,8 +89,6 @@ extern "C" {
#define DRM_MODE_PICTURE_ASPECT_NONE 0
#define DRM_MODE_PICTURE_ASPECT_4_3 1
#define DRM_MODE_PICTURE_ASPECT_16_9 2
-#define DRM_MODE_PICTURE_ASPECT_64_27 3
-#define DRM_MODE_PICTURE_ASPECT_256_135 4
/* Aspect ratio flag bitmask (4 bits 22:19) */
#define DRM_MODE_FLAG_PIC_AR_MASK (0x0F<<19)
@@ -92,10 +98,6 @@ extern "C" {
(DRM_MODE_PICTURE_ASPECT_4_3<<19)
#define DRM_MODE_FLAG_PIC_AR_16_9 \
(DRM_MODE_PICTURE_ASPECT_16_9<<19)
-#define DRM_MODE_FLAG_PIC_AR_64_27 \
- (DRM_MODE_PICTURE_ASPECT_64_27<<19)
-#define DRM_MODE_FLAG_PIC_AR_256_135 \
- (DRM_MODE_PICTURE_ASPECT_256_135<<19)
/* DPMS flags */
/* bit compatible with the xorg definitions. */
diff --git a/include/uapi/drm/vc4_drm.h b/include/uapi/drm/vc4_drm.h
index ad7edc3edf7c..f07a09016726 100644
--- a/include/uapi/drm/vc4_drm.h
+++ b/include/uapi/drm/vc4_drm.h
@@ -286,6 +286,8 @@ struct drm_vc4_get_hang_state {
#define DRM_VC4_PARAM_V3D_IDENT1 1
#define DRM_VC4_PARAM_V3D_IDENT2 2
#define DRM_VC4_PARAM_SUPPORTS_BRANCHES 3
+#define DRM_VC4_PARAM_SUPPORTS_ETC1 4
+#define DRM_VC4_PARAM_SUPPORTS_THREADED_FS 5
struct drm_vc4_get_param {
__u32 param;
diff --git a/include/uapi/linux/ethtool.h b/include/uapi/linux/ethtool.h
index 099a4200732c..8e547231c1b7 100644
--- a/include/uapi/linux/ethtool.h
+++ b/include/uapi/linux/ethtool.h
@@ -119,8 +119,7 @@ struct ethtool_cmd {
static inline void ethtool_cmd_speed_set(struct ethtool_cmd *ep,
__u32 speed)
{
-
- ep->speed = (__u16)speed;
+ ep->speed = (__u16)(speed & 0xFFFF);
ep->speed_hi = (__u16)(speed >> 16);
}
diff --git a/include/uapi/linux/rtnetlink.h b/include/uapi/linux/rtnetlink.h
index 262f0379d83a..5a78be518101 100644
--- a/include/uapi/linux/rtnetlink.h
+++ b/include/uapi/linux/rtnetlink.h
@@ -350,7 +350,7 @@ struct rtnexthop {
#define RTNH_F_OFFLOAD 8 /* offloaded route */
#define RTNH_F_LINKDOWN 16 /* carrier-down on nexthop */
-#define RTNH_COMPARE_MASK (RTNH_F_DEAD | RTNH_F_LINKDOWN)
+#define RTNH_COMPARE_MASK (RTNH_F_DEAD | RTNH_F_LINKDOWN | RTNH_F_OFFLOAD)
/* Macros to handle hexthops */
diff --git a/include/video/display_timing.h b/include/video/display_timing.h
index 28d9d0d566ca..3d289e990aca 100644
--- a/include/video/display_timing.h
+++ b/include/video/display_timing.h
@@ -28,6 +28,10 @@ enum display_flags {
DISPLAY_FLAGS_INTERLACED = BIT(8),
DISPLAY_FLAGS_DOUBLESCAN = BIT(9),
DISPLAY_FLAGS_DOUBLECLK = BIT(10),
+ /* drive sync on pos. edge */
+ DISPLAY_FLAGS_SYNC_POSEDGE = BIT(11),
+ /* drive sync on neg. edge */
+ DISPLAY_FLAGS_SYNC_NEGEDGE = BIT(12),
};
/*
diff --git a/include/video/imx-ipu-v3.h b/include/video/imx-ipu-v3.h
index 173073eb6aaf..53cd07ccaa4c 100644
--- a/include/video/imx-ipu-v3.h
+++ b/include/video/imx-ipu-v3.h
@@ -247,8 +247,6 @@ void ipu_cpmem_set_yuv_planar_full(struct ipuv3_channel *ch,
unsigned int uv_stride,
unsigned int u_offset,
unsigned int v_offset);
-void ipu_cpmem_set_yuv_planar(struct ipuv3_channel *ch,
- u32 pixel_format, int stride, int height);
int ipu_cpmem_set_fmt(struct ipuv3_channel *ch, u32 drm_fourcc);
int ipu_cpmem_set_image(struct ipuv3_channel *ch, struct ipu_image *image);
void ipu_cpmem_dump(struct ipuv3_channel *ch);
@@ -320,6 +318,7 @@ int ipu_csi_init_interface(struct ipu_csi *csi,
bool ipu_csi_is_interlaced(struct ipu_csi *csi);
void ipu_csi_get_window(struct ipu_csi *csi, struct v4l2_rect *w);
void ipu_csi_set_window(struct ipu_csi *csi, struct v4l2_rect *w);
+void ipu_csi_set_downsize(struct ipu_csi *csi, bool horiz, bool vert);
void ipu_csi_set_test_generator(struct ipu_csi *csi, bool active,
u32 r_value, u32 g_value, u32 b_value,
u32 pix_clk);
diff --git a/include/video/of_display_timing.h b/include/video/of_display_timing.h
index ea755b5616d8..956455fc9f9a 100644
--- a/include/video/of_display_timing.h
+++ b/include/video/of_display_timing.h
@@ -16,21 +16,22 @@ struct display_timings;
#define OF_USE_NATIVE_MODE -1
#ifdef CONFIG_OF
-int of_get_display_timing(struct device_node *np, const char *name,
+int of_get_display_timing(const struct device_node *np, const char *name,
struct display_timing *dt);
-struct display_timings *of_get_display_timings(struct device_node *np);
-int of_display_timings_exist(struct device_node *np);
+struct display_timings *of_get_display_timings(const struct device_node *np);
+int of_display_timings_exist(const struct device_node *np);
#else
-static inline int of_get_display_timing(struct device_node *np, const char *name,
- struct display_timing *dt)
+static inline int of_get_display_timing(const struct device_node *np,
+ const char *name, struct display_timing *dt)
{
return -ENOSYS;
}
-static inline struct display_timings *of_get_display_timings(struct device_node *np)
+static inline struct display_timings *
+of_get_display_timings(const struct device_node *np)
{
return NULL;
}
-static inline int of_display_timings_exist(struct device_node *np)
+static inline int of_display_timings_exist(const struct device_node *np)
{
return -ENOSYS;
}
diff --git a/ipc/msgutil.c b/ipc/msgutil.c
index a521999de4f1..bf74eaa5c39f 100644
--- a/ipc/msgutil.c
+++ b/ipc/msgutil.c
@@ -53,7 +53,7 @@ static struct msg_msg *alloc_msg(size_t len)
size_t alen;
alen = min(len, DATALEN_MSG);
- msg = kmalloc(sizeof(*msg) + alen, GFP_KERNEL);
+ msg = kmalloc(sizeof(*msg) + alen, GFP_KERNEL_ACCOUNT);
if (msg == NULL)
return NULL;
@@ -65,7 +65,7 @@ static struct msg_msg *alloc_msg(size_t len)
while (len > 0) {
struct msg_msgseg *seg;
alen = min(len, DATALEN_SEG);
- seg = kmalloc(sizeof(*seg) + alen, GFP_KERNEL);
+ seg = kmalloc(sizeof(*seg) + alen, GFP_KERNEL_ACCOUNT);
if (seg == NULL)
goto out_err;
*pseg = seg;
diff --git a/kernel/events/core.c b/kernel/events/core.c
index c6e47e97b33f..0e292132efac 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -1960,6 +1960,12 @@ void perf_event_disable(struct perf_event *event)
}
EXPORT_SYMBOL_GPL(perf_event_disable);
+void perf_event_disable_inatomic(struct perf_event *event)
+{
+ event->pending_disable = 1;
+ irq_work_queue(&event->pending);
+}
+
static void perf_set_shadow_time(struct perf_event *event,
struct perf_event_context *ctx,
u64 tstamp)
@@ -7075,8 +7081,8 @@ static int __perf_event_overflow(struct perf_event *event,
if (events && atomic_dec_and_test(&event->event_limit)) {
ret = 1;
event->pending_kill = POLL_HUP;
- event->pending_disable = 1;
- irq_work_queue(&event->pending);
+
+ perf_event_disable_inatomic(event);
}
READ_ONCE(event->overflow_handler)(event, data, regs);
@@ -8855,7 +8861,10 @@ EXPORT_SYMBOL_GPL(perf_pmu_register);
void perf_pmu_unregister(struct pmu *pmu)
{
+ int remove_device;
+
mutex_lock(&pmus_lock);
+ remove_device = pmu_bus_running;
list_del_rcu(&pmu->entry);
mutex_unlock(&pmus_lock);
@@ -8869,10 +8878,12 @@ void perf_pmu_unregister(struct pmu *pmu)
free_percpu(pmu->pmu_disable_count);
if (pmu->type >= PERF_TYPE_MAX)
idr_remove(&pmu_idr, pmu->type);
- if (pmu->nr_addr_filters)
- device_remove_file(pmu->dev, &dev_attr_nr_addr_filters);
- device_del(pmu->dev);
- put_device(pmu->dev);
+ if (remove_device) {
+ if (pmu->nr_addr_filters)
+ device_remove_file(pmu->dev, &dev_attr_nr_addr_filters);
+ device_del(pmu->dev);
+ put_device(pmu->dev);
+ }
free_pmu_context(pmu);
}
EXPORT_SYMBOL_GPL(perf_pmu_unregister);
diff --git a/kernel/fork.c b/kernel/fork.c
index 623259fc794d..997ac1d584f7 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -315,6 +315,9 @@ static void account_kernel_stack(struct task_struct *tsk, int account)
static void release_task_stack(struct task_struct *tsk)
{
+ if (WARN_ON(tsk->state != TASK_DEAD))
+ return; /* Better to leak the stack than to free prematurely */
+
account_kernel_stack(tsk, -1);
arch_release_thread_stack(tsk->stack);
free_thread_stack(tsk);
@@ -1862,6 +1865,7 @@ bad_fork_cleanup_count:
atomic_dec(&p->cred->user->processes);
exit_creds(p);
bad_fork_free:
+ p->state = TASK_DEAD;
put_task_stack(p);
free_task(p);
fork_out:
diff --git a/kernel/kcov.c b/kernel/kcov.c
index 8d44b3fea9d0..30e6d05aa5a9 100644
--- a/kernel/kcov.c
+++ b/kernel/kcov.c
@@ -53,8 +53,15 @@ void notrace __sanitizer_cov_trace_pc(void)
/*
* We are interested in code coverage as a function of a syscall inputs,
* so we ignore code executed in interrupts.
+ * The checks for whether we are in an interrupt are open-coded, because
+ * 1. We can't use in_interrupt() here, since it also returns true
+ * when we are inside local_bh_disable() section.
+ * 2. We don't want to use (in_irq() | in_serving_softirq() | in_nmi()),
+ * since that leads to slower generated code (three separate tests,
+ * one for each of the flags).
*/
- if (!t || in_interrupt())
+ if (!t || (preempt_count() & (HARDIRQ_MASK | SOFTIRQ_OFFSET
+ | NMI_MASK)))
return;
mode = READ_ONCE(t->kcov_mode);
if (mode == KCOV_MODE_TRACE) {
diff --git a/kernel/power/suspend.c b/kernel/power/suspend.c
index 1e7f5da648d9..6ccb08f57fcb 100644
--- a/kernel/power/suspend.c
+++ b/kernel/power/suspend.c
@@ -498,9 +498,9 @@ static int enter_state(suspend_state_t state)
#ifndef CONFIG_SUSPEND_SKIP_SYNC
trace_suspend_resume(TPS("sync_filesystems"), 0, true);
- printk(KERN_INFO "PM: Syncing filesystems ... ");
+ pr_info("PM: Syncing filesystems ... ");
sys_sync();
- printk("done.\n");
+ pr_cont("done.\n");
trace_suspend_resume(TPS("sync_filesystems"), 0, false);
#endif
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 94732d1ab00a..154fd689fe02 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -5192,21 +5192,14 @@ void sched_show_task(struct task_struct *p)
int ppid;
unsigned long state = p->state;
+ if (!try_get_task_stack(p))
+ return;
if (state)
state = __ffs(state) + 1;
printk(KERN_INFO "%-15.15s %c", p->comm,
state < sizeof(stat_nam) - 1 ? stat_nam[state] : '?');
-#if BITS_PER_LONG == 32
- if (state == TASK_RUNNING)
- printk(KERN_CONT " running ");
- else
- printk(KERN_CONT " %08lx ", thread_saved_pc(p));
-#else
if (state == TASK_RUNNING)
printk(KERN_CONT " running task ");
- else
- printk(KERN_CONT " %016lx ", thread_saved_pc(p));
-#endif
#ifdef CONFIG_DEBUG_STACK_USAGE
free = stack_not_used(p);
#endif
@@ -5221,6 +5214,7 @@ void sched_show_task(struct task_struct *p)
print_worker_info(KERN_INFO, p);
show_stack(p, NULL);
+ put_task_stack(p);
}
void show_state_filter(unsigned long state_filter)
@@ -7515,11 +7509,27 @@ static struct kmem_cache *task_group_cache __read_mostly;
DECLARE_PER_CPU(cpumask_var_t, load_balance_mask);
DECLARE_PER_CPU(cpumask_var_t, select_idle_mask);
+#define WAIT_TABLE_BITS 8
+#define WAIT_TABLE_SIZE (1 << WAIT_TABLE_BITS)
+static wait_queue_head_t bit_wait_table[WAIT_TABLE_SIZE] __cacheline_aligned;
+
+wait_queue_head_t *bit_waitqueue(void *word, int bit)
+{
+ const int shift = BITS_PER_LONG == 32 ? 5 : 6;
+ unsigned long val = (unsigned long)word << shift | bit;
+
+ return bit_wait_table + hash_long(val, WAIT_TABLE_BITS);
+}
+EXPORT_SYMBOL(bit_waitqueue);
+
void __init sched_init(void)
{
int i, j;
unsigned long alloc_size = 0, ptr;
+ for (i = 0; i < WAIT_TABLE_SIZE; i++)
+ init_waitqueue_head(bit_wait_table + i);
+
#ifdef CONFIG_FAIR_GROUP_SCHED
alloc_size += 2 * nr_cpu_ids * sizeof(void **);
#endif
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index d941c97dfbc3..c242944f5cbd 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -8839,7 +8839,6 @@ int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent)
{
struct sched_entity *se;
struct cfs_rq *cfs_rq;
- struct rq *rq;
int i;
tg->cfs_rq = kzalloc(sizeof(cfs_rq) * nr_cpu_ids, GFP_KERNEL);
@@ -8854,8 +8853,6 @@ int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent)
init_cfs_bandwidth(tg_cfs_bandwidth(tg));
for_each_possible_cpu(i) {
- rq = cpu_rq(i);
-
cfs_rq = kzalloc_node(sizeof(struct cfs_rq),
GFP_KERNEL, cpu_to_node(i));
if (!cfs_rq)
diff --git a/kernel/sched/wait.c b/kernel/sched/wait.c
index 4f7053579fe3..9453efe9b25a 100644
--- a/kernel/sched/wait.c
+++ b/kernel/sched/wait.c
@@ -480,16 +480,6 @@ void wake_up_bit(void *word, int bit)
}
EXPORT_SYMBOL(wake_up_bit);
-wait_queue_head_t *bit_waitqueue(void *word, int bit)
-{
- const int shift = BITS_PER_LONG == 32 ? 5 : 6;
- const struct zone *zone = page_zone(virt_to_page(word));
- unsigned long val = (unsigned long)word << shift | bit;
-
- return &zone->wait_table[hash_long(val, zone->wait_table_bits)];
-}
-EXPORT_SYMBOL(bit_waitqueue);
-
/*
* Manipulate the atomic_t address to produce a better bit waitqueue table hash
* index (we're keying off bit -1, but that would produce a horrible hash
diff --git a/kernel/softirq.c b/kernel/softirq.c
index 1bf81ef91375..744fa611cae0 100644
--- a/kernel/softirq.c
+++ b/kernel/softirq.c
@@ -58,7 +58,7 @@ static struct softirq_action softirq_vec[NR_SOFTIRQS] __cacheline_aligned_in_smp
DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
const char * const softirq_to_name[NR_SOFTIRQS] = {
- "HI", "TIMER", "NET_TX", "NET_RX", "BLOCK", "BLOCK_IOPOLL",
+ "HI", "TIMER", "NET_TX", "NET_RX", "BLOCK", "IRQ_POLL",
"TASKLET", "SCHED", "HRTIMER", "RCU"
};
diff --git a/kernel/time/timer.c b/kernel/time/timer.c
index 2d47980a1bc4..c611c47de884 100644
--- a/kernel/time/timer.c
+++ b/kernel/time/timer.c
@@ -878,7 +878,7 @@ static inline struct timer_base *get_timer_base(u32 tflags)
#ifdef CONFIG_NO_HZ_COMMON
static inline struct timer_base *
-__get_target_base(struct timer_base *base, unsigned tflags)
+get_target_base(struct timer_base *base, unsigned tflags)
{
#ifdef CONFIG_SMP
if ((tflags & TIMER_PINNED) || !base->migration_enabled)
@@ -891,25 +891,27 @@ __get_target_base(struct timer_base *base, unsigned tflags)
static inline void forward_timer_base(struct timer_base *base)
{
+ unsigned long jnow = READ_ONCE(jiffies);
+
/*
* We only forward the base when it's idle and we have a delta between
* base clock and jiffies.
*/
- if (!base->is_idle || (long) (jiffies - base->clk) < 2)
+ if (!base->is_idle || (long) (jnow - base->clk) < 2)
return;
/*
* If the next expiry value is > jiffies, then we fast forward to
* jiffies otherwise we forward to the next expiry value.
*/
- if (time_after(base->next_expiry, jiffies))
- base->clk = jiffies;
+ if (time_after(base->next_expiry, jnow))
+ base->clk = jnow;
else
base->clk = base->next_expiry;
}
#else
static inline struct timer_base *
-__get_target_base(struct timer_base *base, unsigned tflags)
+get_target_base(struct timer_base *base, unsigned tflags)
{
return get_timer_this_cpu_base(tflags);
}
@@ -917,14 +919,6 @@ __get_target_base(struct timer_base *base, unsigned tflags)
static inline void forward_timer_base(struct timer_base *base) { }
#endif
-static inline struct timer_base *
-get_target_base(struct timer_base *base, unsigned tflags)
-{
- struct timer_base *target = __get_target_base(base, tflags);
-
- forward_timer_base(target);
- return target;
-}
/*
* We are using hashed locking: Holding per_cpu(timer_bases[x]).lock means
@@ -943,7 +937,14 @@ static struct timer_base *lock_timer_base(struct timer_list *timer,
{
for (;;) {
struct timer_base *base;
- u32 tf = timer->flags;
+ u32 tf;
+
+ /*
+ * We need to use READ_ONCE() here, otherwise the compiler
+ * might re-read @tf between the check for TIMER_MIGRATING
+ * and spin_lock().
+ */
+ tf = READ_ONCE(timer->flags);
if (!(tf & TIMER_MIGRATING)) {
base = get_timer_base(tf);
@@ -964,6 +965,8 @@ __mod_timer(struct timer_list *timer, unsigned long expires, bool pending_only)
unsigned long clk = 0, flags;
int ret = 0;
+ BUG_ON(!timer->function);
+
/*
* This is a common optimization triggered by the networking code - if
* the timer is re-modified to have the same timeout or ends up in the
@@ -972,13 +975,16 @@ __mod_timer(struct timer_list *timer, unsigned long expires, bool pending_only)
if (timer_pending(timer)) {
if (timer->expires == expires)
return 1;
+
/*
- * Take the current timer_jiffies of base, but without holding
- * the lock!
+ * We lock timer base and calculate the bucket index right
+ * here. If the timer ends up in the same bucket, then we
+ * just update the expiry time and avoid the whole
+ * dequeue/enqueue dance.
*/
- base = get_timer_base(timer->flags);
- clk = base->clk;
+ base = lock_timer_base(timer, &flags);
+ clk = base->clk;
idx = calc_wheel_index(expires, clk);
/*
@@ -988,14 +994,14 @@ __mod_timer(struct timer_list *timer, unsigned long expires, bool pending_only)
*/
if (idx == timer_get_idx(timer)) {
timer->expires = expires;
- return 1;
+ ret = 1;
+ goto out_unlock;
}
+ } else {
+ base = lock_timer_base(timer, &flags);
}
timer_stats_timer_set_start_info(timer);
- BUG_ON(!timer->function);
-
- base = lock_timer_base(timer, &flags);
ret = detach_if_pending(timer, base, false);
if (!ret && pending_only)
@@ -1025,12 +1031,16 @@ __mod_timer(struct timer_list *timer, unsigned long expires, bool pending_only)
}
}
+ /* Try to forward a stale timer base clock */
+ forward_timer_base(base);
+
timer->expires = expires;
/*
* If 'idx' was calculated above and the base time did not advance
- * between calculating 'idx' and taking the lock, only enqueue_timer()
- * and trigger_dyntick_cpu() is required. Otherwise we need to
- * (re)calculate the wheel index via internal_add_timer().
+ * between calculating 'idx' and possibly switching the base, only
+ * enqueue_timer() and trigger_dyntick_cpu() is required. Otherwise
+ * we need to (re)calculate the wheel index via
+ * internal_add_timer().
*/
if (idx != UINT_MAX && clk == base->clk) {
enqueue_timer(base, timer, idx);
@@ -1510,12 +1520,16 @@ u64 get_next_timer_interrupt(unsigned long basej, u64 basem)
is_max_delta = (nextevt == base->clk + NEXT_TIMER_MAX_DELTA);
base->next_expiry = nextevt;
/*
- * We have a fresh next event. Check whether we can forward the base:
+ * We have a fresh next event. Check whether we can forward the
+ * base. We can only do that when @basej is past base->clk
+ * otherwise we might rewind base->clk.
*/
- if (time_after(nextevt, jiffies))
- base->clk = jiffies;
- else if (time_after(nextevt, base->clk))
- base->clk = nextevt;
+ if (time_after(basej, base->clk)) {
+ if (time_after(nextevt, basej))
+ base->clk = basej;
+ else if (time_after(nextevt, base->clk))
+ base->clk = nextevt;
+ }
if (time_before_eq(nextevt, basej)) {
expires = basem;
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 33bc56cf60d7..b01e547d4d04 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -198,6 +198,7 @@ config FRAME_WARN
int "Warn for stack frames larger than (needs gcc 4.4)"
range 0 8192
default 0 if KASAN
+ default 2048 if GCC_PLUGIN_LATENT_ENTROPY
default 1024 if !64BIT
default 2048 if 64BIT
help
diff --git a/lib/genalloc.c b/lib/genalloc.c
index 0a1139644d32..144fe6b1a03e 100644
--- a/lib/genalloc.c
+++ b/lib/genalloc.c
@@ -292,7 +292,7 @@ unsigned long gen_pool_alloc_algo(struct gen_pool *pool, size_t size,
struct gen_pool_chunk *chunk;
unsigned long addr = 0;
int order = pool->min_alloc_order;
- int nbits, start_bit = 0, end_bit, remain;
+ int nbits, start_bit, end_bit, remain;
#ifndef CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG
BUG_ON(in_nmi());
@@ -307,6 +307,7 @@ unsigned long gen_pool_alloc_algo(struct gen_pool *pool, size_t size,
if (size > atomic_read(&chunk->avail))
continue;
+ start_bit = 0;
end_bit = chunk_size(chunk) >> order;
retry:
start_bit = algo(chunk->bits, end_bit, start_bit,
diff --git a/lib/stackdepot.c b/lib/stackdepot.c
index 60f77f1d470a..4d830e299989 100644
--- a/lib/stackdepot.c
+++ b/lib/stackdepot.c
@@ -50,7 +50,7 @@
STACK_ALLOC_ALIGN)
#define STACK_ALLOC_INDEX_BITS (DEPOT_STACK_BITS - \
STACK_ALLOC_NULL_PROTECTION_BITS - STACK_ALLOC_OFFSET_BITS)
-#define STACK_ALLOC_SLABS_CAP 1024
+#define STACK_ALLOC_SLABS_CAP 8192
#define STACK_ALLOC_MAX_SLABS \
(((1LL << (STACK_ALLOC_INDEX_BITS)) < STACK_ALLOC_SLABS_CAP) ? \
(1LL << (STACK_ALLOC_INDEX_BITS)) : STACK_ALLOC_SLABS_CAP)
diff --git a/lib/test_bpf.c b/lib/test_bpf.c
index 94346b4d8984..0362da0b66c3 100644
--- a/lib/test_bpf.c
+++ b/lib/test_bpf.c
@@ -4831,7 +4831,7 @@ static struct bpf_test tests[] = {
{ },
INTERNAL,
{ 0x34 },
- { { 1, 0xbef } },
+ { { ETH_HLEN, 0xbef } },
.fill_helper = bpf_fill_ld_abs_vlan_push_pop,
},
/*
diff --git a/mm/Kconfig b/mm/Kconfig
index be0ee11fa0d9..86e3e0e74d20 100644
--- a/mm/Kconfig
+++ b/mm/Kconfig
@@ -187,7 +187,7 @@ config MEMORY_HOTPLUG
bool "Allow for memory hot-add"
depends on SPARSEMEM || X86_64_ACPI_NUMA
depends on ARCH_ENABLE_MEMORY_HOTPLUG
- depends on !KASAN
+ depends on COMPILE_TEST || !KASAN
config MEMORY_HOTPLUG_SPARSE
def_bool y
diff --git a/mm/filemap.c b/mm/filemap.c
index 849f459ad078..c7fe2f16503f 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -790,9 +790,7 @@ EXPORT_SYMBOL(__page_cache_alloc);
*/
wait_queue_head_t *page_waitqueue(struct page *page)
{
- const struct zone *zone = page_zone(page);
-
- return &zone->wait_table[hash_ptr(page, zone->wait_table_bits)];
+ return bit_waitqueue(page, 0);
}
EXPORT_SYMBOL(page_waitqueue);
diff --git a/mm/gup.c b/mm/gup.c
index 7aa113c2d373..ec4f82704b6f 100644
--- a/mm/gup.c
+++ b/mm/gup.c
@@ -526,7 +526,7 @@ static int check_vma_flags(struct vm_area_struct *vma, unsigned long gup_flags)
* instead of __get_user_pages. __get_user_pages should be used only if
* you need some special @gup_flags.
*/
-long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
+static long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
unsigned long start, unsigned long nr_pages,
unsigned int gup_flags, struct page **pages,
struct vm_area_struct **vmas, int *nonblocking)
@@ -631,7 +631,6 @@ next_page:
} while (nr_pages);
return i;
}
-EXPORT_SYMBOL(__get_user_pages);
bool vma_permits_fault(struct vm_area_struct *vma, unsigned int fault_flags)
{
diff --git a/mm/kmemleak.c b/mm/kmemleak.c
index a5e453cf05c4..e5355a5b423f 100644
--- a/mm/kmemleak.c
+++ b/mm/kmemleak.c
@@ -1453,8 +1453,11 @@ static void kmemleak_scan(void)
read_lock(&tasklist_lock);
do_each_thread(g, p) {
- scan_block(task_stack_page(p), task_stack_page(p) +
- THREAD_SIZE, NULL);
+ void *stack = try_get_task_stack(p);
+ if (stack) {
+ scan_block(stack, stack + THREAD_SIZE, NULL);
+ put_task_stack(p);
+ }
} while_each_thread(g, p);
read_unlock(&tasklist_lock);
}
diff --git a/mm/list_lru.c b/mm/list_lru.c
index 1d05cb9d363d..234676e31edd 100644
--- a/mm/list_lru.c
+++ b/mm/list_lru.c
@@ -554,6 +554,8 @@ int __list_lru_init(struct list_lru *lru, bool memcg_aware,
err = memcg_init_list_lru(lru, memcg_aware);
if (err) {
kfree(lru->node);
+ /* Do this so a list_lru_destroy() doesn't crash: */
+ lru->node = NULL;
goto out;
}
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index ae052b5e3315..0f870ba43942 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -1917,6 +1917,15 @@ retry:
current->flags & PF_EXITING))
goto force;
+ /*
+ * Prevent unbounded recursion when reclaim operations need to
+ * allocate memory. This might exceed the limits temporarily,
+ * but we prefer facilitating memory reclaim and getting back
+ * under the limit over triggering OOM kills in these cases.
+ */
+ if (unlikely(current->flags & PF_MEMALLOC))
+ goto force;
+
if (unlikely(task_in_memcg_oom(current)))
goto nomem;
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
index 962927309b6e..cad4b9125695 100644
--- a/mm/memory_hotplug.c
+++ b/mm/memory_hotplug.c
@@ -268,7 +268,6 @@ void __init register_page_bootmem_info_node(struct pglist_data *pgdat)
unsigned long i, pfn, end_pfn, nr_pages;
int node = pgdat->node_id;
struct page *page;
- struct zone *zone;
nr_pages = PAGE_ALIGN(sizeof(struct pglist_data)) >> PAGE_SHIFT;
page = virt_to_page(pgdat);
@@ -276,19 +275,6 @@ void __init register_page_bootmem_info_node(struct pglist_data *pgdat)
for (i = 0; i < nr_pages; i++, page++)
get_page_bootmem(node, page, NODE_INFO);
- zone = &pgdat->node_zones[0];
- for (; zone < pgdat->node_zones + MAX_NR_ZONES - 1; zone++) {
- if (zone_is_initialized(zone)) {
- nr_pages = zone->wait_table_hash_nr_entries
- * sizeof(wait_queue_head_t);
- nr_pages = PAGE_ALIGN(nr_pages) >> PAGE_SHIFT;
- page = virt_to_page(zone->wait_table);
-
- for (i = 0; i < nr_pages; i++, page++)
- get_page_bootmem(node, page, NODE_INFO);
- }
- }
-
pfn = pgdat->node_start_pfn;
end_pfn = pgdat_end_pfn(pgdat);
@@ -2131,7 +2117,6 @@ void try_offline_node(int nid)
unsigned long start_pfn = pgdat->node_start_pfn;
unsigned long end_pfn = start_pfn + pgdat->node_spanned_pages;
unsigned long pfn;
- int i;
for (pfn = start_pfn; pfn < end_pfn; pfn += PAGES_PER_SECTION) {
unsigned long section_nr = pfn_to_section_nr(pfn);
@@ -2158,20 +2143,6 @@ void try_offline_node(int nid)
*/
node_set_offline(nid);
unregister_one_node(nid);
-
- /* free waittable in each zone */
- for (i = 0; i < MAX_NR_ZONES; i++) {
- struct zone *zone = pgdat->node_zones + i;
-
- /*
- * wait_table may be allocated from boot memory,
- * here only free if it's allocated by vmalloc.
- */
- if (is_vmalloc_addr(zone->wait_table)) {
- vfree(zone->wait_table);
- zone->wait_table = NULL;
- }
- }
}
EXPORT_SYMBOL(try_offline_node);
diff --git a/mm/nommu.c b/mm/nommu.c
index db5fd1795298..8b8faaf2a9e9 100644
--- a/mm/nommu.c
+++ b/mm/nommu.c
@@ -109,7 +109,7 @@ unsigned int kobjsize(const void *objp)
return PAGE_SIZE << compound_order(page);
}
-long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
+static long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
unsigned long start, unsigned long nr_pages,
unsigned int foll_flags, struct page **pages,
struct vm_area_struct **vmas, int *nonblocking)
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 2b3bf6767d54..072d791dce2d 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -92,7 +92,7 @@ int _node_numa_mem_[MAX_NUMNODES];
#endif
#ifdef CONFIG_GCC_PLUGIN_LATENT_ENTROPY
-volatile u64 latent_entropy __latent_entropy;
+volatile unsigned long latent_entropy __latent_entropy;
EXPORT_SYMBOL(latent_entropy);
#endif
@@ -4224,7 +4224,7 @@ static void show_migration_types(unsigned char type)
}
*p = '\0';
- printk("(%s) ", tmp);
+ printk(KERN_CONT "(%s) ", tmp);
}
/*
@@ -4335,7 +4335,8 @@ void show_free_areas(unsigned int filter)
free_pcp += per_cpu_ptr(zone->pageset, cpu)->pcp.count;
show_node(zone);
- printk("%s"
+ printk(KERN_CONT
+ "%s"
" free:%lukB"
" min:%lukB"
" low:%lukB"
@@ -4382,8 +4383,8 @@ void show_free_areas(unsigned int filter)
K(zone_page_state(zone, NR_FREE_CMA_PAGES)));
printk("lowmem_reserve[]:");
for (i = 0; i < MAX_NR_ZONES; i++)
- printk(" %ld", zone->lowmem_reserve[i]);
- printk("\n");
+ printk(KERN_CONT " %ld", zone->lowmem_reserve[i]);
+ printk(KERN_CONT "\n");
}
for_each_populated_zone(zone) {
@@ -4394,7 +4395,7 @@ void show_free_areas(unsigned int filter)
if (skip_free_areas_node(filter, zone_to_nid(zone)))
continue;
show_node(zone);
- printk("%s: ", zone->name);
+ printk(KERN_CONT "%s: ", zone->name);
spin_lock_irqsave(&zone->lock, flags);
for (order = 0; order < MAX_ORDER; order++) {
@@ -4412,11 +4413,12 @@ void show_free_areas(unsigned int filter)
}
spin_unlock_irqrestore(&zone->lock, flags);
for (order = 0; order < MAX_ORDER; order++) {
- printk("%lu*%lukB ", nr[order], K(1UL) << order);
+ printk(KERN_CONT "%lu*%lukB ",
+ nr[order], K(1UL) << order);
if (nr[order])
show_migration_types(types[order]);
}
- printk("= %lukB\n", K(total));
+ printk(KERN_CONT "= %lukB\n", K(total));
}
hugetlb_show_meminfo();
@@ -4977,72 +4979,6 @@ void __ref build_all_zonelists(pg_data_t *pgdat, struct zone *zone)
}
/*
- * Helper functions to size the waitqueue hash table.
- * Essentially these want to choose hash table sizes sufficiently
- * large so that collisions trying to wait on pages are rare.
- * But in fact, the number of active page waitqueues on typical
- * systems is ridiculously low, less than 200. So this is even
- * conservative, even though it seems large.
- *
- * The constant PAGES_PER_WAITQUEUE specifies the ratio of pages to
- * waitqueues, i.e. the size of the waitq table given the number of pages.
- */
-#define PAGES_PER_WAITQUEUE 256
-
-#ifndef CONFIG_MEMORY_HOTPLUG
-static inline unsigned long wait_table_hash_nr_entries(unsigned long pages)
-{
- unsigned long size = 1;
-
- pages /= PAGES_PER_WAITQUEUE;
-
- while (size < pages)
- size <<= 1;
-
- /*
- * Once we have dozens or even hundreds of threads sleeping
- * on IO we've got bigger problems than wait queue collision.
- * Limit the size of the wait table to a reasonable size.
- */
- size = min(size, 4096UL);
-
- return max(size, 4UL);
-}
-#else
-/*
- * A zone's size might be changed by hot-add, so it is not possible to determine
- * a suitable size for its wait_table. So we use the maximum size now.
- *
- * The max wait table size = 4096 x sizeof(wait_queue_head_t). ie:
- *
- * i386 (preemption config) : 4096 x 16 = 64Kbyte.
- * ia64, x86-64 (no preemption): 4096 x 20 = 80Kbyte.
- * ia64, x86-64 (preemption) : 4096 x 24 = 96Kbyte.
- *
- * The maximum entries are prepared when a zone's memory is (512K + 256) pages
- * or more by the traditional way. (See above). It equals:
- *
- * i386, x86-64, powerpc(4K page size) : = ( 2G + 1M)byte.
- * ia64(16K page size) : = ( 8G + 4M)byte.
- * powerpc (64K page size) : = (32G +16M)byte.
- */
-static inline unsigned long wait_table_hash_nr_entries(unsigned long pages)
-{
- return 4096UL;
-}
-#endif
-
-/*
- * This is an integer logarithm so that shifts can be used later
- * to extract the more random high bits from the multiplicative
- * hash function before the remainder is taken.
- */
-static inline unsigned long wait_table_bits(unsigned long size)
-{
- return ffz(~size);
-}
-
-/*
* Initially all pages are reserved - free ones are freed
* up by free_all_bootmem() once the early boot process is
* done. Non-atomic initialization, single-pass.
@@ -5304,49 +5240,6 @@ void __init setup_per_cpu_pageset(void)
alloc_percpu(struct per_cpu_nodestat);
}
-static noinline __ref
-int zone_wait_table_init(struct zone *zone, unsigned long zone_size_pages)
-{
- int i;
- size_t alloc_size;
-
- /*
- * The per-page waitqueue mechanism uses hashed waitqueues
- * per zone.
- */
- zone->wait_table_hash_nr_entries =
- wait_table_hash_nr_entries(zone_size_pages);
- zone->wait_table_bits =
- wait_table_bits(zone->wait_table_hash_nr_entries);
- alloc_size = zone->wait_table_hash_nr_entries
- * sizeof(wait_queue_head_t);
-
- if (!slab_is_available()) {
- zone->wait_table = (wait_queue_head_t *)
- memblock_virt_alloc_node_nopanic(
- alloc_size, zone->zone_pgdat->node_id);
- } else {
- /*
- * This case means that a zone whose size was 0 gets new memory
- * via memory hot-add.
- * But it may be the case that a new node was hot-added. In
- * this case vmalloc() will not be able to use this new node's
- * memory - this wait_table must be initialized to use this new
- * node itself as well.
- * To use this new node's memory, further consideration will be
- * necessary.
- */
- zone->wait_table = vmalloc(alloc_size);
- }
- if (!zone->wait_table)
- return -ENOMEM;
-
- for (i = 0; i < zone->wait_table_hash_nr_entries; ++i)
- init_waitqueue_head(zone->wait_table + i);
-
- return 0;
-}
-
static __meminit void zone_pcp_init(struct zone *zone)
{
/*
@@ -5367,10 +5260,7 @@ int __meminit init_currently_empty_zone(struct zone *zone,
unsigned long size)
{
struct pglist_data *pgdat = zone->zone_pgdat;
- int ret;
- ret = zone_wait_table_init(zone, size);
- if (ret)
- return ret;
+
pgdat->nr_zones = zone_idx(zone) + 1;
zone->zone_start_pfn = zone_start_pfn;
@@ -5382,6 +5272,7 @@ int __meminit init_currently_empty_zone(struct zone *zone,
zone_start_pfn, (zone_start_pfn + size));
zone_init_free_lists(zone);
+ zone->initialized = 1;
return 0;
}
diff --git a/mm/slab.c b/mm/slab.c
index 090fb26b3a39..0b0550ca85b4 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -233,6 +233,7 @@ static void kmem_cache_node_init(struct kmem_cache_node *parent)
spin_lock_init(&parent->list_lock);
parent->free_objects = 0;
parent->free_touched = 0;
+ parent->num_slabs = 0;
}
#define MAKE_LIST(cachep, listp, slab, nodeid) \
@@ -966,7 +967,7 @@ static int setup_kmem_cache_node(struct kmem_cache *cachep,
* guaranteed to be valid until irq is re-enabled, because it will be
* freed after synchronize_sched().
*/
- if (force_change)
+ if (old_shared && force_change)
synchronize_sched();
fail:
@@ -1382,24 +1383,27 @@ slab_out_of_memory(struct kmem_cache *cachep, gfp_t gfpflags, int nodeid)
for_each_kmem_cache_node(cachep, node, n) {
unsigned long active_objs = 0, num_objs = 0, free_objects = 0;
unsigned long active_slabs = 0, num_slabs = 0;
+ unsigned long num_slabs_partial = 0, num_slabs_free = 0;
+ unsigned long num_slabs_full;
spin_lock_irqsave(&n->list_lock, flags);
- list_for_each_entry(page, &n->slabs_full, lru) {
- active_objs += cachep->num;
- active_slabs++;
- }
+ num_slabs = n->num_slabs;
list_for_each_entry(page, &n->slabs_partial, lru) {
active_objs += page->active;
- active_slabs++;
+ num_slabs_partial++;
}
list_for_each_entry(page, &n->slabs_free, lru)
- num_slabs++;
+ num_slabs_free++;
free_objects += n->free_objects;
spin_unlock_irqrestore(&n->list_lock, flags);
- num_slabs += active_slabs;
num_objs = num_slabs * cachep->num;
+ active_slabs = num_slabs - num_slabs_free;
+ num_slabs_full = num_slabs -
+ (num_slabs_partial + num_slabs_free);
+ active_objs += (num_slabs_full * cachep->num);
+
pr_warn(" node %d: slabs: %ld/%ld, objs: %ld/%ld, free: %ld\n",
node, active_slabs, num_slabs, active_objs, num_objs,
free_objects);
@@ -2314,6 +2318,7 @@ static int drain_freelist(struct kmem_cache *cache,
page = list_entry(p, struct page, lru);
list_del(&page->lru);
+ n->num_slabs--;
/*
* Safe to drop the lock. The slab is no longer linked
* to the cache.
@@ -2752,6 +2757,8 @@ static void cache_grow_end(struct kmem_cache *cachep, struct page *page)
list_add_tail(&page->lru, &(n->slabs_free));
else
fixup_slab_list(cachep, n, page, &list);
+
+ n->num_slabs++;
STATS_INC_GROWN(cachep);
n->free_objects += cachep->num - page->active;
spin_unlock(&n->list_lock);
@@ -3443,6 +3450,7 @@ static void free_block(struct kmem_cache *cachep, void **objpp,
page = list_last_entry(&n->slabs_free, struct page, lru);
list_move(&page->lru, list);
+ n->num_slabs--;
}
}
@@ -4099,6 +4107,8 @@ void get_slabinfo(struct kmem_cache *cachep, struct slabinfo *sinfo)
unsigned long num_objs;
unsigned long active_slabs = 0;
unsigned long num_slabs, free_objects = 0, shared_avail = 0;
+ unsigned long num_slabs_partial = 0, num_slabs_free = 0;
+ unsigned long num_slabs_full = 0;
const char *name;
char *error = NULL;
int node;
@@ -4111,33 +4121,34 @@ void get_slabinfo(struct kmem_cache *cachep, struct slabinfo *sinfo)
check_irq_on();
spin_lock_irq(&n->list_lock);
- list_for_each_entry(page, &n->slabs_full, lru) {
- if (page->active != cachep->num && !error)
- error = "slabs_full accounting error";
- active_objs += cachep->num;
- active_slabs++;
- }
+ num_slabs += n->num_slabs;
+
list_for_each_entry(page, &n->slabs_partial, lru) {
if (page->active == cachep->num && !error)
error = "slabs_partial accounting error";
if (!page->active && !error)
error = "slabs_partial accounting error";
active_objs += page->active;
- active_slabs++;
+ num_slabs_partial++;
}
+
list_for_each_entry(page, &n->slabs_free, lru) {
if (page->active && !error)
error = "slabs_free accounting error";
- num_slabs++;
+ num_slabs_free++;
}
+
free_objects += n->free_objects;
if (n->shared)
shared_avail += n->shared->avail;
spin_unlock_irq(&n->list_lock);
}
- num_slabs += active_slabs;
num_objs = num_slabs * cachep->num;
+ active_slabs = num_slabs - num_slabs_free;
+ num_slabs_full = num_slabs - (num_slabs_partial + num_slabs_free);
+ active_objs += (num_slabs_full * cachep->num);
+
if (num_objs - active_objs != free_objects && !error)
error = "free_objects accounting error";
diff --git a/mm/slab.h b/mm/slab.h
index 9653f2e2591a..bc05fdc3edce 100644
--- a/mm/slab.h
+++ b/mm/slab.h
@@ -432,6 +432,7 @@ struct kmem_cache_node {
struct list_head slabs_partial; /* partial list first, better asm code */
struct list_head slabs_full;
struct list_head slabs_free;
+ unsigned long num_slabs;
unsigned long free_objects;
unsigned int free_limit;
unsigned int colour_next; /* Per-node cache coloring */
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 744f926af442..76fda2268148 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -3043,7 +3043,9 @@ unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *memcg,
sc.gfp_mask,
sc.reclaim_idx);
+ current->flags |= PF_MEMALLOC;
nr_reclaimed = do_try_to_free_pages(zonelist, &sc);
+ current->flags &= ~PF_MEMALLOC;
trace_mm_vmscan_memcg_reclaim_end(nr_reclaimed);
diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c
index 8de138d3306b..f2531ad66b68 100644
--- a/net/8021q/vlan.c
+++ b/net/8021q/vlan.c
@@ -664,7 +664,7 @@ static struct sk_buff **vlan_gro_receive(struct sk_buff **head,
skb_gro_pull(skb, sizeof(*vhdr));
skb_gro_postpull_rcsum(skb, vhdr, sizeof(*vhdr));
- pp = ptype->callbacks.gro_receive(head, skb);
+ pp = call_gro_receive(ptype->callbacks.gro_receive, head, skb);
out_unlock:
rcu_read_unlock();
diff --git a/net/batman-adv/hard-interface.c b/net/batman-adv/hard-interface.c
index 08ce36147c4c..e034afbd1bb0 100644
--- a/net/batman-adv/hard-interface.c
+++ b/net/batman-adv/hard-interface.c
@@ -652,7 +652,6 @@ void batadv_hardif_disable_interface(struct batadv_hard_iface *hard_iface,
batadv_softif_destroy_sysfs(hard_iface->soft_iface);
}
- hard_iface->soft_iface = NULL;
batadv_hardif_put(hard_iface);
out:
diff --git a/net/batman-adv/log.h b/net/batman-adv/log.h
index e0e1a88c3e58..d2905a855d1b 100644
--- a/net/batman-adv/log.h
+++ b/net/batman-adv/log.h
@@ -63,7 +63,7 @@ enum batadv_dbg_level {
BATADV_DBG_NC = BIT(5),
BATADV_DBG_MCAST = BIT(6),
BATADV_DBG_TP_METER = BIT(7),
- BATADV_DBG_ALL = 127,
+ BATADV_DBG_ALL = 255,
};
#ifdef CONFIG_BATMAN_ADV_DEBUG
diff --git a/net/batman-adv/originator.c b/net/batman-adv/originator.c
index 5f3bfc41aeb1..7c8d16086f0f 100644
--- a/net/batman-adv/originator.c
+++ b/net/batman-adv/originator.c
@@ -544,7 +544,7 @@ batadv_hardif_neigh_create(struct batadv_hard_iface *hard_iface,
if (bat_priv->algo_ops->neigh.hardif_init)
bat_priv->algo_ops->neigh.hardif_init(hardif_neigh);
- hlist_add_head(&hardif_neigh->list, &hard_iface->neigh_list);
+ hlist_add_head_rcu(&hardif_neigh->list, &hard_iface->neigh_list);
out:
spin_unlock_bh(&hard_iface->neigh_list_lock);
diff --git a/net/bluetooth/hci_request.c b/net/bluetooth/hci_request.c
index e2288421fe6b..1015d9c8d97d 100644
--- a/net/bluetooth/hci_request.c
+++ b/net/bluetooth/hci_request.c
@@ -969,41 +969,38 @@ void __hci_req_enable_advertising(struct hci_request *req)
hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
}
-static u8 append_local_name(struct hci_dev *hdev, u8 *ptr, u8 ad_len)
+u8 append_local_name(struct hci_dev *hdev, u8 *ptr, u8 ad_len)
{
- size_t complete_len;
size_t short_len;
- int max_len;
-
- max_len = HCI_MAX_AD_LENGTH - ad_len - 2;
- complete_len = strlen(hdev->dev_name);
- short_len = strlen(hdev->short_name);
-
- /* no space left for name */
- if (max_len < 1)
- return ad_len;
+ size_t complete_len;
- /* no name set */
- if (!complete_len)
+ /* no space left for name (+ NULL + type + len) */
+ if ((HCI_MAX_AD_LENGTH - ad_len) < HCI_MAX_SHORT_NAME_LENGTH + 3)
return ad_len;
- /* complete name fits and is eq to max short name len or smaller */
- if (complete_len <= max_len &&
- complete_len <= HCI_MAX_SHORT_NAME_LENGTH) {
+ /* use complete name if present and fits */
+ complete_len = strlen(hdev->dev_name);
+ if (complete_len && complete_len <= HCI_MAX_SHORT_NAME_LENGTH)
return eir_append_data(ptr, ad_len, EIR_NAME_COMPLETE,
- hdev->dev_name, complete_len);
- }
+ hdev->dev_name, complete_len + 1);
- /* short name set and fits */
- if (short_len && short_len <= max_len) {
+ /* use short name if present */
+ short_len = strlen(hdev->short_name);
+ if (short_len)
return eir_append_data(ptr, ad_len, EIR_NAME_SHORT,
- hdev->short_name, short_len);
- }
+ hdev->short_name, short_len + 1);
- /* no short name set so shorten complete name */
- if (!short_len) {
- return eir_append_data(ptr, ad_len, EIR_NAME_SHORT,
- hdev->dev_name, max_len);
+ /* use shortened full name if present, we already know that name
+ * is longer then HCI_MAX_SHORT_NAME_LENGTH
+ */
+ if (complete_len) {
+ u8 name[HCI_MAX_SHORT_NAME_LENGTH + 1];
+
+ memcpy(name, hdev->dev_name, HCI_MAX_SHORT_NAME_LENGTH);
+ name[HCI_MAX_SHORT_NAME_LENGTH] = '\0';
+
+ return eir_append_data(ptr, ad_len, EIR_NAME_SHORT, name,
+ sizeof(name));
}
return ad_len;
diff --git a/net/bluetooth/hci_request.h b/net/bluetooth/hci_request.h
index 6b06629245a8..dde77bd59f91 100644
--- a/net/bluetooth/hci_request.h
+++ b/net/bluetooth/hci_request.h
@@ -106,6 +106,8 @@ static inline void hci_update_background_scan(struct hci_dev *hdev)
void hci_request_setup(struct hci_dev *hdev);
void hci_request_cancel_all(struct hci_dev *hdev);
+u8 append_local_name(struct hci_dev *hdev, u8 *ptr, u8 ad_len);
+
static inline u16 eir_append_data(u8 *eir, u16 eir_len, u8 type,
u8 *data, u8 data_len)
{
diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c
index 736038085feb..1fba2a03f8ae 100644
--- a/net/bluetooth/mgmt.c
+++ b/net/bluetooth/mgmt.c
@@ -6017,7 +6017,15 @@ static int read_adv_features(struct sock *sk, struct hci_dev *hdev,
return err;
}
-static u8 tlv_data_max_len(u32 adv_flags, bool is_adv_data)
+static u8 calculate_name_len(struct hci_dev *hdev)
+{
+ u8 buf[HCI_MAX_SHORT_NAME_LENGTH + 3];
+
+ return append_local_name(hdev, buf, 0);
+}
+
+static u8 tlv_data_max_len(struct hci_dev *hdev, u32 adv_flags,
+ bool is_adv_data)
{
u8 max_len = HCI_MAX_AD_LENGTH;
@@ -6030,9 +6038,8 @@ static u8 tlv_data_max_len(u32 adv_flags, bool is_adv_data)
if (adv_flags & MGMT_ADV_FLAG_TX_POWER)
max_len -= 3;
} else {
- /* at least 1 byte of name should fit in */
if (adv_flags & MGMT_ADV_FLAG_LOCAL_NAME)
- max_len -= 3;
+ max_len -= calculate_name_len(hdev);
if (adv_flags & (MGMT_ADV_FLAG_APPEARANCE))
max_len -= 4;
@@ -6063,12 +6070,13 @@ static bool appearance_managed(u32 adv_flags)
return adv_flags & MGMT_ADV_FLAG_APPEARANCE;
}
-static bool tlv_data_is_valid(u32 adv_flags, u8 *data, u8 len, bool is_adv_data)
+static bool tlv_data_is_valid(struct hci_dev *hdev, u32 adv_flags, u8 *data,
+ u8 len, bool is_adv_data)
{
int i, cur_len;
u8 max_len;
- max_len = tlv_data_max_len(adv_flags, is_adv_data);
+ max_len = tlv_data_max_len(hdev, adv_flags, is_adv_data);
if (len > max_len)
return false;
@@ -6215,8 +6223,8 @@ static int add_advertising(struct sock *sk, struct hci_dev *hdev,
goto unlock;
}
- if (!tlv_data_is_valid(flags, cp->data, cp->adv_data_len, true) ||
- !tlv_data_is_valid(flags, cp->data + cp->adv_data_len,
+ if (!tlv_data_is_valid(hdev, flags, cp->data, cp->adv_data_len, true) ||
+ !tlv_data_is_valid(hdev, flags, cp->data + cp->adv_data_len,
cp->scan_rsp_len, false)) {
err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
MGMT_STATUS_INVALID_PARAMS);
@@ -6429,8 +6437,8 @@ static int get_adv_size_info(struct sock *sk, struct hci_dev *hdev,
rp.instance = cp->instance;
rp.flags = cp->flags;
- rp.max_adv_data_len = tlv_data_max_len(flags, true);
- rp.max_scan_rsp_len = tlv_data_max_len(flags, false);
+ rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
+ rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c
index c5fea9393946..2136e45f5277 100644
--- a/net/bridge/br_multicast.c
+++ b/net/bridge/br_multicast.c
@@ -972,13 +972,12 @@ static void br_multicast_enable(struct bridge_mcast_own_query *query)
mod_timer(&query->timer, jiffies);
}
-void br_multicast_enable_port(struct net_bridge_port *port)
+static void __br_multicast_enable_port(struct net_bridge_port *port)
{
struct net_bridge *br = port->br;
- spin_lock(&br->multicast_lock);
if (br->multicast_disabled || !netif_running(br->dev))
- goto out;
+ return;
br_multicast_enable(&port->ip4_own_query);
#if IS_ENABLED(CONFIG_IPV6)
@@ -987,8 +986,14 @@ void br_multicast_enable_port(struct net_bridge_port *port)
if (port->multicast_router == MDB_RTR_TYPE_PERM &&
hlist_unhashed(&port->rlist))
br_multicast_add_router(br, port);
+}
-out:
+void br_multicast_enable_port(struct net_bridge_port *port)
+{
+ struct net_bridge *br = port->br;
+
+ spin_lock(&br->multicast_lock);
+ __br_multicast_enable_port(port);
spin_unlock(&br->multicast_lock);
}
@@ -1994,8 +1999,9 @@ static void br_multicast_start_querier(struct net_bridge *br,
int br_multicast_toggle(struct net_bridge *br, unsigned long val)
{
- int err = 0;
struct net_bridge_mdb_htable *mdb;
+ struct net_bridge_port *port;
+ int err = 0;
spin_lock_bh(&br->multicast_lock);
if (br->multicast_disabled == !val)
@@ -2023,10 +2029,9 @@ rollback:
goto rollback;
}
- br_multicast_start_querier(br, &br->ip4_own_query);
-#if IS_ENABLED(CONFIG_IPV6)
- br_multicast_start_querier(br, &br->ip6_own_query);
-#endif
+ br_multicast_open(br);
+ list_for_each_entry(port, &br->port_list, list)
+ __br_multicast_enable_port(port);
unlock:
spin_unlock_bh(&br->multicast_lock);
diff --git a/net/core/dev.c b/net/core/dev.c
index 4bc19a164ba5..820bac239738 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -3035,6 +3035,7 @@ struct sk_buff *validate_xmit_skb_list(struct sk_buff *skb, struct net_device *d
}
return head;
}
+EXPORT_SYMBOL_GPL(validate_xmit_skb_list);
static void qdisc_pkt_len_init(struct sk_buff *skb)
{
@@ -4511,6 +4512,7 @@ static enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff
NAPI_GRO_CB(skb)->flush = 0;
NAPI_GRO_CB(skb)->free = 0;
NAPI_GRO_CB(skb)->encap_mark = 0;
+ NAPI_GRO_CB(skb)->recursion_counter = 0;
NAPI_GRO_CB(skb)->is_fou = 0;
NAPI_GRO_CB(skb)->is_atomic = 1;
NAPI_GRO_CB(skb)->gro_remcsum_start = 0;
@@ -5511,10 +5513,14 @@ struct net_device *netdev_all_lower_get_next_rcu(struct net_device *dev,
{
struct netdev_adjacent *lower;
- lower = list_first_or_null_rcu(&dev->all_adj_list.lower,
- struct netdev_adjacent, list);
+ lower = list_entry_rcu((*iter)->next, struct netdev_adjacent, list);
+
+ if (&lower->list == &dev->all_adj_list.lower)
+ return NULL;
+
+ *iter = &lower->list;
- return lower ? lower->dev : NULL;
+ return lower->dev;
}
EXPORT_SYMBOL(netdev_all_lower_get_next_rcu);
diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c
index 1a7b80f73376..ab193e5def07 100644
--- a/net/core/flow_dissector.c
+++ b/net/core/flow_dissector.c
@@ -246,15 +246,13 @@ ipv6:
case htons(ETH_P_8021AD):
case htons(ETH_P_8021Q): {
const struct vlan_hdr *vlan;
+ struct vlan_hdr _vlan;
+ bool vlan_tag_present = skb && skb_vlan_tag_present(skb);
- if (skb_vlan_tag_present(skb))
+ if (vlan_tag_present)
proto = skb->protocol;
- if (!skb_vlan_tag_present(skb) ||
- proto == cpu_to_be16(ETH_P_8021Q) ||
- proto == cpu_to_be16(ETH_P_8021AD)) {
- struct vlan_hdr _vlan;
-
+ if (!vlan_tag_present || eth_type_vlan(skb->protocol)) {
vlan = __skb_header_pointer(skb, nhoff, sizeof(_vlan),
data, hlen, &_vlan);
if (!vlan)
@@ -272,7 +270,7 @@ ipv6:
FLOW_DISSECTOR_KEY_VLAN,
target_container);
- if (skb_vlan_tag_present(skb)) {
+ if (vlan_tag_present) {
key_vlan->vlan_id = skb_vlan_tag_get_id(skb);
key_vlan->vlan_priority =
(skb_vlan_tag_get_prio(skb) >> VLAN_PRIO_SHIFT);
diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c
index 989434f36f96..f61c0e02a413 100644
--- a/net/core/net_namespace.c
+++ b/net/core/net_namespace.c
@@ -215,13 +215,14 @@ static void rtnl_net_notifyid(struct net *net, int cmd, int id);
*/
int peernet2id_alloc(struct net *net, struct net *peer)
{
+ unsigned long flags;
bool alloc;
int id;
- spin_lock_bh(&net->nsid_lock);
+ spin_lock_irqsave(&net->nsid_lock, flags);
alloc = atomic_read(&peer->count) == 0 ? false : true;
id = __peernet2id_alloc(net, peer, &alloc);
- spin_unlock_bh(&net->nsid_lock);
+ spin_unlock_irqrestore(&net->nsid_lock, flags);
if (alloc && id >= 0)
rtnl_net_notifyid(net, RTM_NEWNSID, id);
return id;
@@ -230,11 +231,12 @@ int peernet2id_alloc(struct net *net, struct net *peer)
/* This function returns, if assigned, the id of a peer netns. */
int peernet2id(struct net *net, struct net *peer)
{
+ unsigned long flags;
int id;
- spin_lock_bh(&net->nsid_lock);
+ spin_lock_irqsave(&net->nsid_lock, flags);
id = __peernet2id(net, peer);
- spin_unlock_bh(&net->nsid_lock);
+ spin_unlock_irqrestore(&net->nsid_lock, flags);
return id;
}
EXPORT_SYMBOL(peernet2id);
@@ -249,17 +251,18 @@ bool peernet_has_id(struct net *net, struct net *peer)
struct net *get_net_ns_by_id(struct net *net, int id)
{
+ unsigned long flags;
struct net *peer;
if (id < 0)
return NULL;
rcu_read_lock();
- spin_lock_bh(&net->nsid_lock);
+ spin_lock_irqsave(&net->nsid_lock, flags);
peer = idr_find(&net->netns_ids, id);
if (peer)
get_net(peer);
- spin_unlock_bh(&net->nsid_lock);
+ spin_unlock_irqrestore(&net->nsid_lock, flags);
rcu_read_unlock();
return peer;
@@ -422,17 +425,17 @@ static void cleanup_net(struct work_struct *work)
for_each_net(tmp) {
int id;
- spin_lock_bh(&tmp->nsid_lock);
+ spin_lock_irq(&tmp->nsid_lock);
id = __peernet2id(tmp, net);
if (id >= 0)
idr_remove(&tmp->netns_ids, id);
- spin_unlock_bh(&tmp->nsid_lock);
+ spin_unlock_irq(&tmp->nsid_lock);
if (id >= 0)
rtnl_net_notifyid(tmp, RTM_DELNSID, id);
}
- spin_lock_bh(&net->nsid_lock);
+ spin_lock_irq(&net->nsid_lock);
idr_destroy(&net->netns_ids);
- spin_unlock_bh(&net->nsid_lock);
+ spin_unlock_irq(&net->nsid_lock);
}
rtnl_unlock();
@@ -561,6 +564,7 @@ static int rtnl_net_newid(struct sk_buff *skb, struct nlmsghdr *nlh)
{
struct net *net = sock_net(skb->sk);
struct nlattr *tb[NETNSA_MAX + 1];
+ unsigned long flags;
struct net *peer;
int nsid, err;
@@ -581,15 +585,15 @@ static int rtnl_net_newid(struct sk_buff *skb, struct nlmsghdr *nlh)
if (IS_ERR(peer))
return PTR_ERR(peer);
- spin_lock_bh(&net->nsid_lock);
+ spin_lock_irqsave(&net->nsid_lock, flags);
if (__peernet2id(net, peer) >= 0) {
- spin_unlock_bh(&net->nsid_lock);
+ spin_unlock_irqrestore(&net->nsid_lock, flags);
err = -EEXIST;
goto out;
}
err = alloc_netid(net, peer, nsid);
- spin_unlock_bh(&net->nsid_lock);
+ spin_unlock_irqrestore(&net->nsid_lock, flags);
if (err >= 0) {
rtnl_net_notifyid(net, RTM_NEWNSID, err);
err = 0;
@@ -711,10 +715,11 @@ static int rtnl_net_dumpid(struct sk_buff *skb, struct netlink_callback *cb)
.idx = 0,
.s_idx = cb->args[0],
};
+ unsigned long flags;
- spin_lock_bh(&net->nsid_lock);
+ spin_lock_irqsave(&net->nsid_lock, flags);
idr_for_each(&net->netns_ids, rtnl_net_dumpid_one, &net_cb);
- spin_unlock_bh(&net->nsid_lock);
+ spin_unlock_irqrestore(&net->nsid_lock, flags);
cb->args[0] = net_cb.idx;
return skb->len;
diff --git a/net/core/pktgen.c b/net/core/pktgen.c
index 5219a9e2127a..306b8f0e03c1 100644
--- a/net/core/pktgen.c
+++ b/net/core/pktgen.c
@@ -216,8 +216,8 @@
#define M_QUEUE_XMIT 2 /* Inject packet into qdisc */
/* If lock -- protects updating of if_list */
-#define if_lock(t) spin_lock(&(t->if_lock));
-#define if_unlock(t) spin_unlock(&(t->if_lock));
+#define if_lock(t) mutex_lock(&(t->if_lock));
+#define if_unlock(t) mutex_unlock(&(t->if_lock));
/* Used to help with determining the pkts on receive */
#define PKTGEN_MAGIC 0xbe9be955
@@ -423,7 +423,7 @@ struct pktgen_net {
};
struct pktgen_thread {
- spinlock_t if_lock; /* for list of devices */
+ struct mutex if_lock; /* for list of devices */
struct list_head if_list; /* All device here */
struct list_head th_list;
struct task_struct *tsk;
@@ -2010,11 +2010,13 @@ static void pktgen_change_name(const struct pktgen_net *pn, struct net_device *d
{
struct pktgen_thread *t;
+ mutex_lock(&pktgen_thread_lock);
+
list_for_each_entry(t, &pn->pktgen_threads, th_list) {
struct pktgen_dev *pkt_dev;
- rcu_read_lock();
- list_for_each_entry_rcu(pkt_dev, &t->if_list, list) {
+ if_lock(t);
+ list_for_each_entry(pkt_dev, &t->if_list, list) {
if (pkt_dev->odev != dev)
continue;
@@ -2029,8 +2031,9 @@ static void pktgen_change_name(const struct pktgen_net *pn, struct net_device *d
dev->name);
break;
}
- rcu_read_unlock();
+ if_unlock(t);
}
+ mutex_unlock(&pktgen_thread_lock);
}
static int pktgen_device_event(struct notifier_block *unused,
@@ -3762,7 +3765,7 @@ static int __net_init pktgen_create_thread(int cpu, struct pktgen_net *pn)
return -ENOMEM;
}
- spin_lock_init(&t->if_lock);
+ mutex_init(&t->if_lock);
t->cpu = cpu;
INIT_LIST_HEAD(&t->if_list);
diff --git a/net/core/sock_reuseport.c b/net/core/sock_reuseport.c
index e92b759d906c..9a1a352fd1eb 100644
--- a/net/core/sock_reuseport.c
+++ b/net/core/sock_reuseport.c
@@ -129,7 +129,6 @@ int reuseport_add_sock(struct sock *sk, struct sock *sk2)
return 0;
}
-EXPORT_SYMBOL(reuseport_add_sock);
static void reuseport_free_rcu(struct rcu_head *head)
{
diff --git a/net/ethernet/eth.c b/net/ethernet/eth.c
index 66dff5e3d772..02acfff36028 100644
--- a/net/ethernet/eth.c
+++ b/net/ethernet/eth.c
@@ -439,7 +439,7 @@ struct sk_buff **eth_gro_receive(struct sk_buff **head,
skb_gro_pull(skb, sizeof(*eh));
skb_gro_postpull_rcsum(skb, eh, sizeof(*eh));
- pp = ptype->callbacks.gro_receive(head, skb);
+ pp = call_gro_receive(ptype->callbacks.gro_receive, head, skb);
out_unlock:
rcu_read_unlock();
diff --git a/net/hsr/hsr_forward.c b/net/hsr/hsr_forward.c
index 5ee1d43f1310..4ebe2aa3e7d3 100644
--- a/net/hsr/hsr_forward.c
+++ b/net/hsr/hsr_forward.c
@@ -300,10 +300,6 @@ static void hsr_forward_do(struct hsr_frame_info *frame)
static void check_local_dest(struct hsr_priv *hsr, struct sk_buff *skb,
struct hsr_frame_info *frame)
{
- struct net_device *master_dev;
-
- master_dev = hsr_port_get_hsr(hsr, HSR_PT_MASTER)->dev;
-
if (hsr_addr_is_self(hsr, eth_hdr(skb)->h_dest)) {
frame->is_local_exclusive = true;
skb->pkt_type = PACKET_HOST;
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
index 1effc986739e..9648c97e541f 100644
--- a/net/ipv4/af_inet.c
+++ b/net/ipv4/af_inet.c
@@ -1391,7 +1391,7 @@ struct sk_buff **inet_gro_receive(struct sk_buff **head, struct sk_buff *skb)
skb_gro_pull(skb, sizeof(*iph));
skb_set_transport_header(skb, skb_gro_offset(skb));
- pp = ops->callbacks.gro_receive(head, skb);
+ pp = call_gro_receive(ops->callbacks.gro_receive, head, skb);
out_unlock:
rcu_read_unlock();
diff --git a/net/ipv4/fou.c b/net/ipv4/fou.c
index cf50f7e2b012..030d1531e897 100644
--- a/net/ipv4/fou.c
+++ b/net/ipv4/fou.c
@@ -249,7 +249,7 @@ static struct sk_buff **fou_gro_receive(struct sock *sk,
if (!ops || !ops->callbacks.gro_receive)
goto out_unlock;
- pp = ops->callbacks.gro_receive(head, skb);
+ pp = call_gro_receive(ops->callbacks.gro_receive, head, skb);
out_unlock:
rcu_read_unlock();
@@ -441,7 +441,7 @@ next_proto:
if (WARN_ON_ONCE(!ops || !ops->callbacks.gro_receive))
goto out_unlock;
- pp = ops->callbacks.gro_receive(head, skb);
+ pp = call_gro_receive(ops->callbacks.gro_receive, head, skb);
flush = 0;
out_unlock:
diff --git a/net/ipv4/gre_offload.c b/net/ipv4/gre_offload.c
index 96e0efecefa6..d5cac99170b1 100644
--- a/net/ipv4/gre_offload.c
+++ b/net/ipv4/gre_offload.c
@@ -229,7 +229,7 @@ static struct sk_buff **gre_gro_receive(struct sk_buff **head,
/* Adjusted NAPI_GRO_CB(skb)->csum after skb_gro_pull()*/
skb_gro_postpull_rcsum(skb, greh, grehlen);
- pp = ptype->callbacks.gro_receive(head, skb);
+ pp = call_gro_receive(ptype->callbacks.gro_receive, head, skb);
flush = 0;
out_unlock:
diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c
index 77c20a489218..ca97835bfec4 100644
--- a/net/ipv4/inet_hashtables.c
+++ b/net/ipv4/inet_hashtables.c
@@ -25,6 +25,7 @@
#include <net/inet_hashtables.h>
#include <net/secure_seq.h>
#include <net/ip.h>
+#include <net/tcp.h>
#include <net/sock_reuseport.h>
static u32 inet_ehashfn(const struct net *net, const __be32 laddr,
@@ -172,7 +173,7 @@ EXPORT_SYMBOL_GPL(__inet_inherit_port);
static inline int compute_score(struct sock *sk, struct net *net,
const unsigned short hnum, const __be32 daddr,
- const int dif)
+ const int dif, bool exact_dif)
{
int score = -1;
struct inet_sock *inet = inet_sk(sk);
@@ -186,7 +187,7 @@ static inline int compute_score(struct sock *sk, struct net *net,
return -1;
score += 4;
}
- if (sk->sk_bound_dev_if) {
+ if (sk->sk_bound_dev_if || exact_dif) {
if (sk->sk_bound_dev_if != dif)
return -1;
score += 4;
@@ -215,11 +216,12 @@ struct sock *__inet_lookup_listener(struct net *net,
unsigned int hash = inet_lhashfn(net, hnum);
struct inet_listen_hashbucket *ilb = &hashinfo->listening_hash[hash];
int score, hiscore = 0, matches = 0, reuseport = 0;
+ bool exact_dif = inet_exact_dif_match(net, skb);
struct sock *sk, *result = NULL;
u32 phash = 0;
sk_for_each_rcu(sk, &ilb->head) {
- score = compute_score(sk, net, hnum, daddr, dif);
+ score = compute_score(sk, net, hnum, daddr, dif, exact_dif);
if (score > hiscore) {
reuseport = sk->sk_reuseport;
if (reuseport) {
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
index 05d105832bdb..03e7f7310423 100644
--- a/net/ipv4/ip_output.c
+++ b/net/ipv4/ip_output.c
@@ -538,7 +538,6 @@ int ip_do_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
{
struct iphdr *iph;
int ptr;
- struct net_device *dev;
struct sk_buff *skb2;
unsigned int mtu, hlen, left, len, ll_rs;
int offset;
@@ -546,8 +545,6 @@ int ip_do_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
struct rtable *rt = skb_rtable(skb);
int err = 0;
- dev = rt->dst.dev;
-
/* for offloaded checksums cleanup checksum before fragmentation */
if (skb->ip_summed == CHECKSUM_PARTIAL &&
(err = skb_checksum_help(skb)))
diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
index af4919792b6a..b8a2d63d1fb8 100644
--- a/net/ipv4/ip_sockglue.c
+++ b/net/ipv4/ip_sockglue.c
@@ -98,7 +98,7 @@ static void ip_cmsg_recv_retopts(struct msghdr *msg, struct sk_buff *skb)
}
static void ip_cmsg_recv_checksum(struct msghdr *msg, struct sk_buff *skb,
- int offset)
+ int tlen, int offset)
{
__wsum csum = skb->csum;
@@ -106,8 +106,9 @@ static void ip_cmsg_recv_checksum(struct msghdr *msg, struct sk_buff *skb,
return;
if (offset != 0)
- csum = csum_sub(csum, csum_partial(skb_transport_header(skb),
- offset, 0));
+ csum = csum_sub(csum,
+ csum_partial(skb_transport_header(skb) + tlen,
+ offset, 0));
put_cmsg(msg, SOL_IP, IP_CHECKSUM, sizeof(__wsum), &csum);
}
@@ -153,7 +154,7 @@ static void ip_cmsg_recv_dstaddr(struct msghdr *msg, struct sk_buff *skb)
}
void ip_cmsg_recv_offset(struct msghdr *msg, struct sk_buff *skb,
- int offset)
+ int tlen, int offset)
{
struct inet_sock *inet = inet_sk(skb->sk);
unsigned int flags = inet->cmsg_flags;
@@ -216,7 +217,7 @@ void ip_cmsg_recv_offset(struct msghdr *msg, struct sk_buff *skb,
}
if (flags & IP_CMSG_CHECKSUM)
- ip_cmsg_recv_checksum(msg, skb, offset);
+ ip_cmsg_recv_checksum(msg, skb, tlen, offset);
}
EXPORT_SYMBOL(ip_cmsg_recv_offset);
diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c
index 7cf7d6e380c2..205e2000d395 100644
--- a/net/ipv4/ping.c
+++ b/net/ipv4/ping.c
@@ -994,7 +994,7 @@ struct proto ping_prot = {
.init = ping_init_sock,
.close = ping_close,
.connect = ip4_datagram_connect,
- .disconnect = udp_disconnect,
+ .disconnect = __udp_disconnect,
.setsockopt = ip_setsockopt,
.getsockopt = ip_getsockopt,
.sendmsg = ping_v4_sendmsg,
diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
index 90a85c955872..ecbe5a7c2d6d 100644
--- a/net/ipv4/raw.c
+++ b/net/ipv4/raw.c
@@ -918,7 +918,7 @@ struct proto raw_prot = {
.close = raw_close,
.destroy = raw_destroy,
.connect = ip4_datagram_connect,
- .disconnect = udp_disconnect,
+ .disconnect = __udp_disconnect,
.ioctl = raw_ioctl,
.init = raw_init,
.setsockopt = raw_setsockopt,
diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c
index 1cb67de106fe..80bc36b25de2 100644
--- a/net/ipv4/sysctl_net_ipv4.c
+++ b/net/ipv4/sysctl_net_ipv4.c
@@ -96,11 +96,11 @@ static void inet_get_ping_group_range_table(struct ctl_table *table, kgid_t *low
container_of(table->data, struct net, ipv4.ping_group_range.range);
unsigned int seq;
do {
- seq = read_seqbegin(&net->ipv4.ip_local_ports.lock);
+ seq = read_seqbegin(&net->ipv4.ping_group_range.lock);
*low = data[0];
*high = data[1];
- } while (read_seqretry(&net->ipv4.ip_local_ports.lock, seq));
+ } while (read_seqretry(&net->ipv4.ping_group_range.lock, seq));
}
/* Update system visible IP port range */
@@ -109,10 +109,10 @@ static void set_ping_group_range(struct ctl_table *table, kgid_t low, kgid_t hig
kgid_t *data = table->data;
struct net *net =
container_of(table->data, struct net, ipv4.ping_group_range.range);
- write_seqlock(&net->ipv4.ip_local_ports.lock);
+ write_seqlock(&net->ipv4.ping_group_range.lock);
data[0] = low;
data[1] = high;
- write_sequnlock(&net->ipv4.ip_local_ports.lock);
+ write_sequnlock(&net->ipv4.ping_group_range.lock);
}
/* Validate changes from /proc interface. */
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index bd5e8d10893f..61b7be303eec 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -86,7 +86,6 @@
int sysctl_tcp_tw_reuse __read_mostly;
int sysctl_tcp_low_latency __read_mostly;
-EXPORT_SYMBOL(sysctl_tcp_low_latency);
#ifdef CONFIG_TCP_MD5SIG
static int tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
@@ -1887,7 +1886,6 @@ static void *listening_get_next(struct seq_file *seq, void *cur)
struct tcp_iter_state *st = seq->private;
struct net *net = seq_file_net(seq);
struct inet_listen_hashbucket *ilb;
- struct inet_connection_sock *icsk;
struct sock *sk = cur;
if (!sk) {
@@ -1909,7 +1907,6 @@ get_sk:
continue;
if (sk->sk_family == st->family)
return sk;
- icsk = inet_csk(sk);
}
spin_unlock_bh(&ilb->lock);
st->offset = 0;
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index 7d96dc2d3d08..d123d68f4d1d 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -1322,7 +1322,7 @@ try_again:
*addr_len = sizeof(*sin);
}
if (inet->cmsg_flags)
- ip_cmsg_recv_offset(msg, skb, sizeof(struct udphdr) + off);
+ ip_cmsg_recv_offset(msg, skb, sizeof(struct udphdr), off);
err = copied;
if (flags & MSG_TRUNC)
@@ -1345,7 +1345,7 @@ csum_copy_err:
goto try_again;
}
-int udp_disconnect(struct sock *sk, int flags)
+int __udp_disconnect(struct sock *sk, int flags)
{
struct inet_sock *inet = inet_sk(sk);
/*
@@ -1367,6 +1367,15 @@ int udp_disconnect(struct sock *sk, int flags)
sk_dst_reset(sk);
return 0;
}
+EXPORT_SYMBOL(__udp_disconnect);
+
+int udp_disconnect(struct sock *sk, int flags)
+{
+ lock_sock(sk);
+ __udp_disconnect(sk, flags);
+ release_sock(sk);
+ return 0;
+}
EXPORT_SYMBOL(udp_disconnect);
void udp_lib_unhash(struct sock *sk)
@@ -2193,7 +2202,7 @@ int udp_abort(struct sock *sk, int err)
sk->sk_err = err;
sk->sk_error_report(sk);
- udp_disconnect(sk, 0);
+ __udp_disconnect(sk, 0);
release_sock(sk);
diff --git a/net/ipv4/udp_offload.c b/net/ipv4/udp_offload.c
index f9333c963607..b2be1d9757ef 100644
--- a/net/ipv4/udp_offload.c
+++ b/net/ipv4/udp_offload.c
@@ -295,7 +295,7 @@ unflush:
skb_gro_pull(skb, sizeof(struct udphdr)); /* pull encapsulating udp header */
skb_gro_postpull_rcsum(skb, uh, sizeof(struct udphdr));
- pp = udp_sk(sk)->gro_receive(sk, head, skb);
+ pp = call_gro_receive_sk(udp_sk(sk)->gro_receive, sk, head, skb);
out_unlock:
rcu_read_unlock();
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index d8983e15f859..060dd9922018 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -147,9 +147,8 @@ static inline void addrconf_sysctl_unregister(struct inet6_dev *idev)
}
#endif
-static void __ipv6_regen_rndid(struct inet6_dev *idev);
-static void __ipv6_try_regen_rndid(struct inet6_dev *idev, struct in6_addr *tmpaddr);
-static void ipv6_regen_rndid(unsigned long data);
+static void ipv6_regen_rndid(struct inet6_dev *idev);
+static void ipv6_try_regen_rndid(struct inet6_dev *idev, struct in6_addr *tmpaddr);
static int ipv6_generate_eui64(u8 *eui, struct net_device *dev);
static int ipv6_count_addresses(struct inet6_dev *idev);
@@ -409,9 +408,7 @@ static struct inet6_dev *ipv6_add_dev(struct net_device *dev)
goto err_release;
}
- /* One reference from device. We must do this before
- * we invoke __ipv6_regen_rndid().
- */
+ /* One reference from device. */
in6_dev_hold(ndev);
if (dev->flags & (IFF_NOARP | IFF_LOOPBACK))
@@ -425,17 +422,15 @@ static struct inet6_dev *ipv6_add_dev(struct net_device *dev)
#endif
INIT_LIST_HEAD(&ndev->tempaddr_list);
- setup_timer(&ndev->regen_timer, ipv6_regen_rndid, (unsigned long)ndev);
+ ndev->desync_factor = U32_MAX;
if ((dev->flags&IFF_LOOPBACK) ||
dev->type == ARPHRD_TUNNEL ||
dev->type == ARPHRD_TUNNEL6 ||
dev->type == ARPHRD_SIT ||
dev->type == ARPHRD_NONE) {
ndev->cnf.use_tempaddr = -1;
- } else {
- in6_dev_hold(ndev);
- ipv6_regen_rndid((unsigned long) ndev);
- }
+ } else
+ ipv6_regen_rndid(ndev);
ndev->token = in6addr_any;
@@ -447,7 +442,6 @@ static struct inet6_dev *ipv6_add_dev(struct net_device *dev)
err = addrconf_sysctl_register(ndev);
if (err) {
ipv6_mc_destroy_dev(ndev);
- del_timer(&ndev->regen_timer);
snmp6_unregister_dev(ndev);
goto err_release;
}
@@ -1190,6 +1184,8 @@ static int ipv6_create_tempaddr(struct inet6_ifaddr *ifp, struct inet6_ifaddr *i
int ret = 0;
u32 addr_flags;
unsigned long now = jiffies;
+ long max_desync_factor;
+ s32 cnf_temp_preferred_lft;
write_lock_bh(&idev->lock);
if (ift) {
@@ -1222,23 +1218,42 @@ retry:
}
in6_ifa_hold(ifp);
memcpy(addr.s6_addr, ifp->addr.s6_addr, 8);
- __ipv6_try_regen_rndid(idev, tmpaddr);
+ ipv6_try_regen_rndid(idev, tmpaddr);
memcpy(&addr.s6_addr[8], idev->rndid, 8);
age = (now - ifp->tstamp) / HZ;
+
+ regen_advance = idev->cnf.regen_max_retry *
+ idev->cnf.dad_transmits *
+ NEIGH_VAR(idev->nd_parms, RETRANS_TIME) / HZ;
+
+ /* recalculate max_desync_factor each time and update
+ * idev->desync_factor if it's larger
+ */
+ cnf_temp_preferred_lft = READ_ONCE(idev->cnf.temp_prefered_lft);
+ max_desync_factor = min_t(__u32,
+ idev->cnf.max_desync_factor,
+ cnf_temp_preferred_lft - regen_advance);
+
+ if (unlikely(idev->desync_factor > max_desync_factor)) {
+ if (max_desync_factor > 0) {
+ get_random_bytes(&idev->desync_factor,
+ sizeof(idev->desync_factor));
+ idev->desync_factor %= max_desync_factor;
+ } else {
+ idev->desync_factor = 0;
+ }
+ }
+
tmp_valid_lft = min_t(__u32,
ifp->valid_lft,
idev->cnf.temp_valid_lft + age);
- tmp_prefered_lft = min_t(__u32,
- ifp->prefered_lft,
- idev->cnf.temp_prefered_lft + age -
- idev->cnf.max_desync_factor);
+ tmp_prefered_lft = cnf_temp_preferred_lft + age -
+ idev->desync_factor;
+ tmp_prefered_lft = min_t(__u32, ifp->prefered_lft, tmp_prefered_lft);
tmp_plen = ifp->prefix_len;
tmp_tstamp = ifp->tstamp;
spin_unlock_bh(&ifp->lock);
- regen_advance = idev->cnf.regen_max_retry *
- idev->cnf.dad_transmits *
- NEIGH_VAR(idev->nd_parms, RETRANS_TIME) / HZ;
write_unlock_bh(&idev->lock);
/* A temporary address is created only if this calculated Preferred
@@ -2150,7 +2165,7 @@ static int ipv6_inherit_eui64(u8 *eui, struct inet6_dev *idev)
}
/* (re)generation of randomized interface identifier (RFC 3041 3.2, 3.5) */
-static void __ipv6_regen_rndid(struct inet6_dev *idev)
+static void ipv6_regen_rndid(struct inet6_dev *idev)
{
regen:
get_random_bytes(idev->rndid, sizeof(idev->rndid));
@@ -2179,43 +2194,10 @@ regen:
}
}
-static void ipv6_regen_rndid(unsigned long data)
-{
- struct inet6_dev *idev = (struct inet6_dev *) data;
- unsigned long expires;
-
- rcu_read_lock_bh();
- write_lock_bh(&idev->lock);
-
- if (idev->dead)
- goto out;
-
- __ipv6_regen_rndid(idev);
-
- expires = jiffies +
- idev->cnf.temp_prefered_lft * HZ -
- idev->cnf.regen_max_retry * idev->cnf.dad_transmits *
- NEIGH_VAR(idev->nd_parms, RETRANS_TIME) -
- idev->cnf.max_desync_factor * HZ;
- if (time_before(expires, jiffies)) {
- pr_warn("%s: too short regeneration interval; timer disabled for %s\n",
- __func__, idev->dev->name);
- goto out;
- }
-
- if (!mod_timer(&idev->regen_timer, expires))
- in6_dev_hold(idev);
-
-out:
- write_unlock_bh(&idev->lock);
- rcu_read_unlock_bh();
- in6_dev_put(idev);
-}
-
-static void __ipv6_try_regen_rndid(struct inet6_dev *idev, struct in6_addr *tmpaddr)
+static void ipv6_try_regen_rndid(struct inet6_dev *idev, struct in6_addr *tmpaddr)
{
if (tmpaddr && memcmp(idev->rndid, &tmpaddr->s6_addr[8], 8) == 0)
- __ipv6_regen_rndid(idev);
+ ipv6_regen_rndid(idev);
}
/*
@@ -2356,7 +2338,7 @@ static void manage_tempaddrs(struct inet6_dev *idev,
max_valid = 0;
max_prefered = idev->cnf.temp_prefered_lft -
- idev->cnf.max_desync_factor - age;
+ idev->desync_factor - age;
if (max_prefered < 0)
max_prefered = 0;
@@ -3018,7 +3000,7 @@ static void init_loopback(struct net_device *dev)
* lo device down, release this obsolete dst and
* reallocate a new router for ifa.
*/
- if (sp_ifa->rt->dst.obsolete > 0) {
+ if (!atomic_read(&sp_ifa->rt->rt6i_ref)) {
ip6_rt_put(sp_ifa->rt);
sp_ifa->rt = NULL;
} else {
@@ -3594,9 +3576,6 @@ restart:
if (!how)
idev->if_flags &= ~(IF_RS_SENT|IF_RA_RCVD|IF_READY);
- if (how && del_timer(&idev->regen_timer))
- in6_dev_put(idev);
-
/* Step 3: clear tempaddr list */
while (!list_empty(&idev->tempaddr_list)) {
ifa = list_first_entry(&idev->tempaddr_list,
diff --git a/net/ipv6/inet6_hashtables.c b/net/ipv6/inet6_hashtables.c
index 00cf28ad4565..02761c9fe43e 100644
--- a/net/ipv6/inet6_hashtables.c
+++ b/net/ipv6/inet6_hashtables.c
@@ -96,7 +96,7 @@ EXPORT_SYMBOL(__inet6_lookup_established);
static inline int compute_score(struct sock *sk, struct net *net,
const unsigned short hnum,
const struct in6_addr *daddr,
- const int dif)
+ const int dif, bool exact_dif)
{
int score = -1;
@@ -109,7 +109,7 @@ static inline int compute_score(struct sock *sk, struct net *net,
return -1;
score++;
}
- if (sk->sk_bound_dev_if) {
+ if (sk->sk_bound_dev_if || exact_dif) {
if (sk->sk_bound_dev_if != dif)
return -1;
score++;
@@ -131,11 +131,12 @@ struct sock *inet6_lookup_listener(struct net *net,
unsigned int hash = inet_lhashfn(net, hnum);
struct inet_listen_hashbucket *ilb = &hashinfo->listening_hash[hash];
int score, hiscore = 0, matches = 0, reuseport = 0;
+ bool exact_dif = inet6_exact_dif_match(net, skb);
struct sock *sk, *result = NULL;
u32 phash = 0;
sk_for_each(sk, &ilb->head) {
- score = compute_score(sk, net, hnum, daddr, dif);
+ score = compute_score(sk, net, hnum, daddr, dif, exact_dif);
if (score > hiscore) {
reuseport = sk->sk_reuseport;
if (reuseport) {
@@ -263,13 +264,15 @@ EXPORT_SYMBOL_GPL(inet6_hash_connect);
int inet6_hash(struct sock *sk)
{
+ int err = 0;
+
if (sk->sk_state != TCP_CLOSE) {
local_bh_disable();
- __inet_hash(sk, NULL, ipv6_rcv_saddr_equal);
+ err = __inet_hash(sk, NULL, ipv6_rcv_saddr_equal);
local_bh_enable();
}
- return 0;
+ return err;
}
EXPORT_SYMBOL_GPL(inet6_hash);
diff --git a/net/ipv6/ip6_offload.c b/net/ipv6/ip6_offload.c
index e7bfd55899a3..1fcf61f1cbc3 100644
--- a/net/ipv6/ip6_offload.c
+++ b/net/ipv6/ip6_offload.c
@@ -246,7 +246,7 @@ static struct sk_buff **ipv6_gro_receive(struct sk_buff **head,
skb_gro_postpull_rcsum(skb, iph, nlen);
- pp = ops->callbacks.gro_receive(head, skb);
+ pp = call_gro_receive(ops->callbacks.gro_receive, head, skb);
out_unlock:
rcu_read_unlock();
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
index 6a66adba0c22..87784560dc46 100644
--- a/net/ipv6/ip6_tunnel.c
+++ b/net/ipv6/ip6_tunnel.c
@@ -157,6 +157,7 @@ ip6_tnl_lookup(struct net *net, const struct in6_addr *remote, const struct in6_
hash = HASH(&any, local);
for_each_ip6_tunnel_rcu(ip6n->tnls_r_l[hash]) {
if (ipv6_addr_equal(local, &t->parms.laddr) &&
+ ipv6_addr_any(&t->parms.raddr) &&
(t->dev->flags & IFF_UP))
return t;
}
@@ -164,6 +165,7 @@ ip6_tnl_lookup(struct net *net, const struct in6_addr *remote, const struct in6_
hash = HASH(remote, &any);
for_each_ip6_tunnel_rcu(ip6n->tnls_r_l[hash]) {
if (ipv6_addr_equal(remote, &t->parms.raddr) &&
+ ipv6_addr_any(&t->parms.laddr) &&
(t->dev->flags & IFF_UP))
return t;
}
@@ -1170,6 +1172,7 @@ route_lookup:
if (err)
return err;
+ skb->protocol = htons(ETH_P_IPV6);
skb_push(skb, sizeof(struct ipv6hdr));
skb_reset_network_header(skb);
ipv6h = ipv6_hdr(skb);
diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c
index 5330262ab673..636ec56f5f50 100644
--- a/net/ipv6/ipv6_sockglue.c
+++ b/net/ipv6/ipv6_sockglue.c
@@ -120,6 +120,7 @@ struct ipv6_txoptions *ipv6_update_options(struct sock *sk,
static bool setsockopt_needs_rtnl(int optname)
{
switch (optname) {
+ case IPV6_ADDRFORM:
case IPV6_ADD_MEMBERSHIP:
case IPV6_DROP_MEMBERSHIP:
case IPV6_JOIN_ANYCAST:
@@ -198,7 +199,7 @@ static int do_ipv6_setsockopt(struct sock *sk, int level, int optname,
}
fl6_free_socklist(sk);
- ipv6_sock_mc_close(sk);
+ __ipv6_sock_mc_close(sk);
/*
* Sock is moving from IPv6 to IPv4 (sk_prot), so
diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c
index 75c1fc54f188..14a3903f1c82 100644
--- a/net/ipv6/mcast.c
+++ b/net/ipv6/mcast.c
@@ -276,16 +276,14 @@ static struct inet6_dev *ip6_mc_find_dev_rcu(struct net *net,
return idev;
}
-void ipv6_sock_mc_close(struct sock *sk)
+void __ipv6_sock_mc_close(struct sock *sk)
{
struct ipv6_pinfo *np = inet6_sk(sk);
struct ipv6_mc_socklist *mc_lst;
struct net *net = sock_net(sk);
- if (!rcu_access_pointer(np->ipv6_mc_list))
- return;
+ ASSERT_RTNL();
- rtnl_lock();
while ((mc_lst = rtnl_dereference(np->ipv6_mc_list)) != NULL) {
struct net_device *dev;
@@ -303,8 +301,17 @@ void ipv6_sock_mc_close(struct sock *sk)
atomic_sub(sizeof(*mc_lst), &sk->sk_omem_alloc);
kfree_rcu(mc_lst, rcu);
-
}
+}
+
+void ipv6_sock_mc_close(struct sock *sk)
+{
+ struct ipv6_pinfo *np = inet6_sk(sk);
+
+ if (!rcu_access_pointer(np->ipv6_mc_list))
+ return;
+ rtnl_lock();
+ __ipv6_sock_mc_close(sk);
rtnl_unlock();
}
diff --git a/net/ipv6/ping.c b/net/ipv6/ping.c
index 0e983b694ee8..66e2d9dfc43a 100644
--- a/net/ipv6/ping.c
+++ b/net/ipv6/ping.c
@@ -180,7 +180,7 @@ struct proto pingv6_prot = {
.init = ping_init_sock,
.close = ping_close,
.connect = ip6_datagram_connect_v6_only,
- .disconnect = udp_disconnect,
+ .disconnect = __udp_disconnect,
.setsockopt = ipv6_setsockopt,
.getsockopt = ipv6_getsockopt,
.sendmsg = ping_v6_sendmsg,
diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
index 54404f08efcc..054a1d84fc5e 100644
--- a/net/ipv6/raw.c
+++ b/net/ipv6/raw.c
@@ -1241,7 +1241,7 @@ struct proto rawv6_prot = {
.close = rawv6_close,
.destroy = raw6_destroy,
.connect = ip6_datagram_connect_v6_only,
- .disconnect = udp_disconnect,
+ .disconnect = __udp_disconnect,
.ioctl = rawv6_ioctl,
.init = rawv6_init_sk,
.setsockopt = rawv6_setsockopt,
diff --git a/net/ipv6/reassembly.c b/net/ipv6/reassembly.c
index 2160d5d009cb..3815e8505ed2 100644
--- a/net/ipv6/reassembly.c
+++ b/net/ipv6/reassembly.c
@@ -456,7 +456,8 @@ static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *prev,
skb_network_header(head)[nhoff] = skb_transport_header(head)[0];
memmove(head->head + sizeof(struct frag_hdr), head->head,
(head->data - head->head) - sizeof(struct frag_hdr));
- head->mac_header += sizeof(struct frag_hdr);
+ if (skb_mac_header_was_set(head))
+ head->mac_header += sizeof(struct frag_hdr);
head->network_header += sizeof(struct frag_hdr);
skb_reset_transport_header(head);
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index bdbc38e8bf29..947ed1ded026 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -102,11 +102,13 @@ static int rt6_score_route(struct rt6_info *rt, int oif, int strict);
#ifdef CONFIG_IPV6_ROUTE_INFO
static struct rt6_info *rt6_add_route_info(struct net *net,
const struct in6_addr *prefix, int prefixlen,
- const struct in6_addr *gwaddr, int ifindex,
+ const struct in6_addr *gwaddr,
+ struct net_device *dev,
unsigned int pref);
static struct rt6_info *rt6_get_route_info(struct net *net,
const struct in6_addr *prefix, int prefixlen,
- const struct in6_addr *gwaddr, int ifindex);
+ const struct in6_addr *gwaddr,
+ struct net_device *dev);
#endif
struct uncached_list {
@@ -656,7 +658,8 @@ static struct rt6_info *find_match(struct rt6_info *rt, int oif, int strict,
struct net_device *dev = rt->dst.dev;
if (dev && !netif_carrier_ok(dev) &&
- idev->cnf.ignore_routes_with_linkdown)
+ idev->cnf.ignore_routes_with_linkdown &&
+ !(strict & RT6_LOOKUP_F_IGNORE_LINKSTATE))
goto out;
if (rt6_check_expired(rt))
@@ -803,7 +806,7 @@ int rt6_route_rcv(struct net_device *dev, u8 *opt, int len,
rt = rt6_get_dflt_router(gwaddr, dev);
else
rt = rt6_get_route_info(net, prefix, rinfo->prefix_len,
- gwaddr, dev->ifindex);
+ gwaddr, dev);
if (rt && !lifetime) {
ip6_del_rt(rt);
@@ -811,8 +814,8 @@ int rt6_route_rcv(struct net_device *dev, u8 *opt, int len,
}
if (!rt && lifetime)
- rt = rt6_add_route_info(net, prefix, rinfo->prefix_len, gwaddr, dev->ifindex,
- pref);
+ rt = rt6_add_route_info(net, prefix, rinfo->prefix_len, gwaddr,
+ dev, pref);
else if (rt)
rt->rt6i_flags = RTF_ROUTEINFO |
(rt->rt6i_flags & ~RTF_PREF_MASK) | RTF_PREF(pref);
@@ -1050,6 +1053,7 @@ struct rt6_info *ip6_pol_route(struct net *net, struct fib6_table *table,
int strict = 0;
strict |= flags & RT6_LOOKUP_F_IFACE;
+ strict |= flags & RT6_LOOKUP_F_IGNORE_LINKSTATE;
if (net->ipv6.devconf_all->forwarding == 0)
strict |= RT6_LOOKUP_F_REACHABLE;
@@ -1789,7 +1793,7 @@ static struct rt6_info *ip6_nh_lookup_table(struct net *net,
};
struct fib6_table *table;
struct rt6_info *rt;
- int flags = RT6_LOOKUP_F_IFACE;
+ int flags = RT6_LOOKUP_F_IFACE | RT6_LOOKUP_F_IGNORE_LINKSTATE;
table = fib6_get_table(net, cfg->fc_table);
if (!table)
@@ -2325,13 +2329,16 @@ static void ip6_rt_copy_init(struct rt6_info *rt, struct rt6_info *ort)
#ifdef CONFIG_IPV6_ROUTE_INFO
static struct rt6_info *rt6_get_route_info(struct net *net,
const struct in6_addr *prefix, int prefixlen,
- const struct in6_addr *gwaddr, int ifindex)
+ const struct in6_addr *gwaddr,
+ struct net_device *dev)
{
+ u32 tb_id = l3mdev_fib_table(dev) ? : RT6_TABLE_INFO;
+ int ifindex = dev->ifindex;
struct fib6_node *fn;
struct rt6_info *rt = NULL;
struct fib6_table *table;
- table = fib6_get_table(net, RT6_TABLE_INFO);
+ table = fib6_get_table(net, tb_id);
if (!table)
return NULL;
@@ -2357,12 +2364,13 @@ out:
static struct rt6_info *rt6_add_route_info(struct net *net,
const struct in6_addr *prefix, int prefixlen,
- const struct in6_addr *gwaddr, int ifindex,
+ const struct in6_addr *gwaddr,
+ struct net_device *dev,
unsigned int pref)
{
struct fib6_config cfg = {
.fc_metric = IP6_RT_PRIO_USER,
- .fc_ifindex = ifindex,
+ .fc_ifindex = dev->ifindex,
.fc_dst_len = prefixlen,
.fc_flags = RTF_GATEWAY | RTF_ADDRCONF | RTF_ROUTEINFO |
RTF_UP | RTF_PREF(pref),
@@ -2371,7 +2379,7 @@ static struct rt6_info *rt6_add_route_info(struct net *net,
.fc_nlinfo.nl_net = net,
};
- cfg.fc_table = l3mdev_fib_table_by_index(net, ifindex) ? : RT6_TABLE_INFO;
+ cfg.fc_table = l3mdev_fib_table(dev) ? : RT6_TABLE_INFO,
cfg.fc_dst = *prefix;
cfg.fc_gateway = *gwaddr;
@@ -2381,16 +2389,17 @@ static struct rt6_info *rt6_add_route_info(struct net *net,
ip6_route_add(&cfg);
- return rt6_get_route_info(net, prefix, prefixlen, gwaddr, ifindex);
+ return rt6_get_route_info(net, prefix, prefixlen, gwaddr, dev);
}
#endif
struct rt6_info *rt6_get_dflt_router(const struct in6_addr *addr, struct net_device *dev)
{
+ u32 tb_id = l3mdev_fib_table(dev) ? : RT6_TABLE_DFLT;
struct rt6_info *rt;
struct fib6_table *table;
- table = fib6_get_table(dev_net(dev), RT6_TABLE_DFLT);
+ table = fib6_get_table(dev_net(dev), tb_id);
if (!table)
return NULL;
@@ -2424,20 +2433,20 @@ struct rt6_info *rt6_add_dflt_router(const struct in6_addr *gwaddr,
cfg.fc_gateway = *gwaddr;
- ip6_route_add(&cfg);
+ if (!ip6_route_add(&cfg)) {
+ struct fib6_table *table;
+
+ table = fib6_get_table(dev_net(dev), cfg.fc_table);
+ if (table)
+ table->flags |= RT6_TABLE_HAS_DFLT_ROUTER;
+ }
return rt6_get_dflt_router(gwaddr, dev);
}
-void rt6_purge_dflt_routers(struct net *net)
+static void __rt6_purge_dflt_routers(struct fib6_table *table)
{
struct rt6_info *rt;
- struct fib6_table *table;
-
- /* NOTE: Keep consistent with rt6_get_dflt_router */
- table = fib6_get_table(net, RT6_TABLE_DFLT);
- if (!table)
- return;
restart:
read_lock_bh(&table->tb6_lock);
@@ -2451,6 +2460,27 @@ restart:
}
}
read_unlock_bh(&table->tb6_lock);
+
+ table->flags &= ~RT6_TABLE_HAS_DFLT_ROUTER;
+}
+
+void rt6_purge_dflt_routers(struct net *net)
+{
+ struct fib6_table *table;
+ struct hlist_head *head;
+ unsigned int h;
+
+ rcu_read_lock();
+
+ for (h = 0; h < FIB6_TABLE_HASHSZ; h++) {
+ head = &net->ipv6.fib_table_hash[h];
+ hlist_for_each_entry_rcu(table, head, tb6_hlist) {
+ if (table->flags & RT6_TABLE_HAS_DFLT_ROUTER)
+ __rt6_purge_dflt_routers(table);
+ }
+ }
+
+ rcu_read_unlock();
}
static void rtmsg_to_fib6_config(struct net *net,
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index 9aa7c1c7a9ce..b2ef061e6836 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -427,7 +427,8 @@ try_again:
if (is_udp4) {
if (inet->cmsg_flags)
- ip_cmsg_recv(msg, skb);
+ ip_cmsg_recv_offset(msg, skb,
+ sizeof(struct udphdr), off);
} else {
if (np->rxopt.all)
ip6_datagram_recv_specific_ctl(sk, msg, skb);
diff --git a/net/l2tp/l2tp_ip.c b/net/l2tp/l2tp_ip.c
index 42de4ccd159f..fce25afb652a 100644
--- a/net/l2tp/l2tp_ip.c
+++ b/net/l2tp/l2tp_ip.c
@@ -338,7 +338,7 @@ static int l2tp_ip_disconnect(struct sock *sk, int flags)
if (sock_flag(sk, SOCK_ZAPPED))
return 0;
- return udp_disconnect(sk, flags);
+ return __udp_disconnect(sk, flags);
}
static int l2tp_ip_getname(struct socket *sock, struct sockaddr *uaddr,
diff --git a/net/l2tp/l2tp_ip6.c b/net/l2tp/l2tp_ip6.c
index ea2ae6664cc8..ad3468c32b53 100644
--- a/net/l2tp/l2tp_ip6.c
+++ b/net/l2tp/l2tp_ip6.c
@@ -410,7 +410,7 @@ static int l2tp_ip6_disconnect(struct sock *sk, int flags)
if (sock_flag(sk, SOCK_ZAPPED))
return 0;
- return udp_disconnect(sk, flags);
+ return __udp_disconnect(sk, flags);
}
static int l2tp_ip6_getname(struct socket *sock, struct sockaddr *uaddr,
diff --git a/net/mac80211/aes_ccm.c b/net/mac80211/aes_ccm.c
index 7663c28ba353..a4e0d59a40dd 100644
--- a/net/mac80211/aes_ccm.c
+++ b/net/mac80211/aes_ccm.c
@@ -18,21 +18,24 @@
#include "key.h"
#include "aes_ccm.h"
-void ieee80211_aes_ccm_encrypt(struct crypto_aead *tfm, u8 *b_0, u8 *aad,
- u8 *data, size_t data_len, u8 *mic,
- size_t mic_len)
+int ieee80211_aes_ccm_encrypt(struct crypto_aead *tfm, u8 *b_0, u8 *aad,
+ u8 *data, size_t data_len, u8 *mic,
+ size_t mic_len)
{
struct scatterlist sg[3];
+ struct aead_request *aead_req;
+ int reqsize = sizeof(*aead_req) + crypto_aead_reqsize(tfm);
+ u8 *__aad;
- char aead_req_data[sizeof(struct aead_request) +
- crypto_aead_reqsize(tfm)]
- __aligned(__alignof__(struct aead_request));
- struct aead_request *aead_req = (void *) aead_req_data;
+ aead_req = kzalloc(reqsize + CCM_AAD_LEN, GFP_ATOMIC);
+ if (!aead_req)
+ return -ENOMEM;
- memset(aead_req, 0, sizeof(aead_req_data));
+ __aad = (u8 *)aead_req + reqsize;
+ memcpy(__aad, aad, CCM_AAD_LEN);
sg_init_table(sg, 3);
- sg_set_buf(&sg[0], &aad[2], be16_to_cpup((__be16 *)aad));
+ sg_set_buf(&sg[0], &__aad[2], be16_to_cpup((__be16 *)__aad));
sg_set_buf(&sg[1], data, data_len);
sg_set_buf(&sg[2], mic, mic_len);
@@ -41,6 +44,9 @@ void ieee80211_aes_ccm_encrypt(struct crypto_aead *tfm, u8 *b_0, u8 *aad,
aead_request_set_ad(aead_req, sg[0].length);
crypto_aead_encrypt(aead_req);
+ kzfree(aead_req);
+
+ return 0;
}
int ieee80211_aes_ccm_decrypt(struct crypto_aead *tfm, u8 *b_0, u8 *aad,
@@ -48,18 +54,23 @@ int ieee80211_aes_ccm_decrypt(struct crypto_aead *tfm, u8 *b_0, u8 *aad,
size_t mic_len)
{
struct scatterlist sg[3];
- char aead_req_data[sizeof(struct aead_request) +
- crypto_aead_reqsize(tfm)]
- __aligned(__alignof__(struct aead_request));
- struct aead_request *aead_req = (void *) aead_req_data;
+ struct aead_request *aead_req;
+ int reqsize = sizeof(*aead_req) + crypto_aead_reqsize(tfm);
+ u8 *__aad;
+ int err;
if (data_len == 0)
return -EINVAL;
- memset(aead_req, 0, sizeof(aead_req_data));
+ aead_req = kzalloc(reqsize + CCM_AAD_LEN, GFP_ATOMIC);
+ if (!aead_req)
+ return -ENOMEM;
+
+ __aad = (u8 *)aead_req + reqsize;
+ memcpy(__aad, aad, CCM_AAD_LEN);
sg_init_table(sg, 3);
- sg_set_buf(&sg[0], &aad[2], be16_to_cpup((__be16 *)aad));
+ sg_set_buf(&sg[0], &__aad[2], be16_to_cpup((__be16 *)__aad));
sg_set_buf(&sg[1], data, data_len);
sg_set_buf(&sg[2], mic, mic_len);
@@ -67,7 +78,10 @@ int ieee80211_aes_ccm_decrypt(struct crypto_aead *tfm, u8 *b_0, u8 *aad,
aead_request_set_crypt(aead_req, sg, sg, data_len + mic_len, b_0);
aead_request_set_ad(aead_req, sg[0].length);
- return crypto_aead_decrypt(aead_req);
+ err = crypto_aead_decrypt(aead_req);
+ kzfree(aead_req);
+
+ return err;
}
struct crypto_aead *ieee80211_aes_key_setup_encrypt(const u8 key[],
diff --git a/net/mac80211/aes_ccm.h b/net/mac80211/aes_ccm.h
index 6a73d1e4d186..fcd3254c5cf0 100644
--- a/net/mac80211/aes_ccm.h
+++ b/net/mac80211/aes_ccm.h
@@ -12,12 +12,14 @@
#include <linux/crypto.h>
+#define CCM_AAD_LEN 32
+
struct crypto_aead *ieee80211_aes_key_setup_encrypt(const u8 key[],
size_t key_len,
size_t mic_len);
-void ieee80211_aes_ccm_encrypt(struct crypto_aead *tfm, u8 *b_0, u8 *aad,
- u8 *data, size_t data_len, u8 *mic,
- size_t mic_len);
+int ieee80211_aes_ccm_encrypt(struct crypto_aead *tfm, u8 *b_0, u8 *aad,
+ u8 *data, size_t data_len, u8 *mic,
+ size_t mic_len);
int ieee80211_aes_ccm_decrypt(struct crypto_aead *tfm, u8 *b_0, u8 *aad,
u8 *data, size_t data_len, u8 *mic,
size_t mic_len);
diff --git a/net/mac80211/aes_gcm.c b/net/mac80211/aes_gcm.c
index 3afe361fd27c..8a4397cc1b08 100644
--- a/net/mac80211/aes_gcm.c
+++ b/net/mac80211/aes_gcm.c
@@ -15,20 +15,23 @@
#include "key.h"
#include "aes_gcm.h"
-void ieee80211_aes_gcm_encrypt(struct crypto_aead *tfm, u8 *j_0, u8 *aad,
- u8 *data, size_t data_len, u8 *mic)
+int ieee80211_aes_gcm_encrypt(struct crypto_aead *tfm, u8 *j_0, u8 *aad,
+ u8 *data, size_t data_len, u8 *mic)
{
struct scatterlist sg[3];
+ struct aead_request *aead_req;
+ int reqsize = sizeof(*aead_req) + crypto_aead_reqsize(tfm);
+ u8 *__aad;
- char aead_req_data[sizeof(struct aead_request) +
- crypto_aead_reqsize(tfm)]
- __aligned(__alignof__(struct aead_request));
- struct aead_request *aead_req = (void *)aead_req_data;
+ aead_req = kzalloc(reqsize + GCM_AAD_LEN, GFP_ATOMIC);
+ if (!aead_req)
+ return -ENOMEM;
- memset(aead_req, 0, sizeof(aead_req_data));
+ __aad = (u8 *)aead_req + reqsize;
+ memcpy(__aad, aad, GCM_AAD_LEN);
sg_init_table(sg, 3);
- sg_set_buf(&sg[0], &aad[2], be16_to_cpup((__be16 *)aad));
+ sg_set_buf(&sg[0], &__aad[2], be16_to_cpup((__be16 *)__aad));
sg_set_buf(&sg[1], data, data_len);
sg_set_buf(&sg[2], mic, IEEE80211_GCMP_MIC_LEN);
@@ -37,24 +40,31 @@ void ieee80211_aes_gcm_encrypt(struct crypto_aead *tfm, u8 *j_0, u8 *aad,
aead_request_set_ad(aead_req, sg[0].length);
crypto_aead_encrypt(aead_req);
+ kzfree(aead_req);
+ return 0;
}
int ieee80211_aes_gcm_decrypt(struct crypto_aead *tfm, u8 *j_0, u8 *aad,
u8 *data, size_t data_len, u8 *mic)
{
struct scatterlist sg[3];
- char aead_req_data[sizeof(struct aead_request) +
- crypto_aead_reqsize(tfm)]
- __aligned(__alignof__(struct aead_request));
- struct aead_request *aead_req = (void *)aead_req_data;
+ struct aead_request *aead_req;
+ int reqsize = sizeof(*aead_req) + crypto_aead_reqsize(tfm);
+ u8 *__aad;
+ int err;
if (data_len == 0)
return -EINVAL;
- memset(aead_req, 0, sizeof(aead_req_data));
+ aead_req = kzalloc(reqsize + GCM_AAD_LEN, GFP_ATOMIC);
+ if (!aead_req)
+ return -ENOMEM;
+
+ __aad = (u8 *)aead_req + reqsize;
+ memcpy(__aad, aad, GCM_AAD_LEN);
sg_init_table(sg, 3);
- sg_set_buf(&sg[0], &aad[2], be16_to_cpup((__be16 *)aad));
+ sg_set_buf(&sg[0], &__aad[2], be16_to_cpup((__be16 *)__aad));
sg_set_buf(&sg[1], data, data_len);
sg_set_buf(&sg[2], mic, IEEE80211_GCMP_MIC_LEN);
@@ -63,7 +73,10 @@ int ieee80211_aes_gcm_decrypt(struct crypto_aead *tfm, u8 *j_0, u8 *aad,
data_len + IEEE80211_GCMP_MIC_LEN, j_0);
aead_request_set_ad(aead_req, sg[0].length);
- return crypto_aead_decrypt(aead_req);
+ err = crypto_aead_decrypt(aead_req);
+ kzfree(aead_req);
+
+ return err;
}
struct crypto_aead *ieee80211_aes_gcm_key_setup_encrypt(const u8 key[],
diff --git a/net/mac80211/aes_gcm.h b/net/mac80211/aes_gcm.h
index 1347fda6b76a..55aed5352494 100644
--- a/net/mac80211/aes_gcm.h
+++ b/net/mac80211/aes_gcm.h
@@ -11,8 +11,10 @@
#include <linux/crypto.h>
-void ieee80211_aes_gcm_encrypt(struct crypto_aead *tfm, u8 *j_0, u8 *aad,
- u8 *data, size_t data_len, u8 *mic);
+#define GCM_AAD_LEN 32
+
+int ieee80211_aes_gcm_encrypt(struct crypto_aead *tfm, u8 *j_0, u8 *aad,
+ u8 *data, size_t data_len, u8 *mic);
int ieee80211_aes_gcm_decrypt(struct crypto_aead *tfm, u8 *j_0, u8 *aad,
u8 *data, size_t data_len, u8 *mic);
struct crypto_aead *ieee80211_aes_gcm_key_setup_encrypt(const u8 key[],
diff --git a/net/mac80211/aes_gmac.c b/net/mac80211/aes_gmac.c
index 3ddd927aaf30..bd72a862ddb7 100644
--- a/net/mac80211/aes_gmac.c
+++ b/net/mac80211/aes_gmac.c
@@ -17,28 +17,27 @@
#include "key.h"
#include "aes_gmac.h"
-#define GMAC_MIC_LEN 16
-#define GMAC_NONCE_LEN 12
-#define AAD_LEN 20
-
int ieee80211_aes_gmac(struct crypto_aead *tfm, const u8 *aad, u8 *nonce,
const u8 *data, size_t data_len, u8 *mic)
{
struct scatterlist sg[4];
- char aead_req_data[sizeof(struct aead_request) +
- crypto_aead_reqsize(tfm)]
- __aligned(__alignof__(struct aead_request));
- struct aead_request *aead_req = (void *)aead_req_data;
- u8 zero[GMAC_MIC_LEN], iv[AES_BLOCK_SIZE];
+ u8 *zero, *__aad, iv[AES_BLOCK_SIZE];
+ struct aead_request *aead_req;
+ int reqsize = sizeof(*aead_req) + crypto_aead_reqsize(tfm);
if (data_len < GMAC_MIC_LEN)
return -EINVAL;
- memset(aead_req, 0, sizeof(aead_req_data));
+ aead_req = kzalloc(reqsize + GMAC_MIC_LEN + GMAC_AAD_LEN, GFP_ATOMIC);
+ if (!aead_req)
+ return -ENOMEM;
+
+ zero = (u8 *)aead_req + reqsize;
+ __aad = zero + GMAC_MIC_LEN;
+ memcpy(__aad, aad, GMAC_AAD_LEN);
- memset(zero, 0, GMAC_MIC_LEN);
sg_init_table(sg, 4);
- sg_set_buf(&sg[0], aad, AAD_LEN);
+ sg_set_buf(&sg[0], __aad, GMAC_AAD_LEN);
sg_set_buf(&sg[1], data, data_len - GMAC_MIC_LEN);
sg_set_buf(&sg[2], zero, GMAC_MIC_LEN);
sg_set_buf(&sg[3], mic, GMAC_MIC_LEN);
@@ -49,9 +48,10 @@ int ieee80211_aes_gmac(struct crypto_aead *tfm, const u8 *aad, u8 *nonce,
aead_request_set_tfm(aead_req, tfm);
aead_request_set_crypt(aead_req, sg, sg, 0, iv);
- aead_request_set_ad(aead_req, AAD_LEN + data_len);
+ aead_request_set_ad(aead_req, GMAC_AAD_LEN + data_len);
crypto_aead_encrypt(aead_req);
+ kzfree(aead_req);
return 0;
}
diff --git a/net/mac80211/aes_gmac.h b/net/mac80211/aes_gmac.h
index d328204d73a8..32e6442c95be 100644
--- a/net/mac80211/aes_gmac.h
+++ b/net/mac80211/aes_gmac.h
@@ -11,6 +11,10 @@
#include <linux/crypto.h>
+#define GMAC_AAD_LEN 20
+#define GMAC_MIC_LEN 16
+#define GMAC_NONCE_LEN 12
+
struct crypto_aead *ieee80211_aes_gmac_key_setup(const u8 key[],
size_t key_len);
int ieee80211_aes_gmac(struct crypto_aead *tfm, const u8 *aad, u8 *nonce,
diff --git a/net/mac80211/offchannel.c b/net/mac80211/offchannel.c
index c3f610bba3fe..eede5c6db8d5 100644
--- a/net/mac80211/offchannel.c
+++ b/net/mac80211/offchannel.c
@@ -820,7 +820,7 @@ int ieee80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev,
mgmt->u.action.category == WLAN_CATEGORY_SPECTRUM_MGMT)
break;
rcu_read_lock();
- sta = sta_info_get(sdata, mgmt->da);
+ sta = sta_info_get_bss(sdata, mgmt->da);
rcu_read_unlock();
if (!sta)
return -ENOLINK;
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
index 6175db385ba7..a47bbc973f2d 100644
--- a/net/mac80211/rx.c
+++ b/net/mac80211/rx.c
@@ -2298,6 +2298,8 @@ ieee80211_rx_h_amsdu(struct ieee80211_rx_data *rx)
__le16 fc = hdr->frame_control;
struct sk_buff_head frame_list;
struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb);
+ struct ethhdr ethhdr;
+ const u8 *check_da = ethhdr.h_dest, *check_sa = ethhdr.h_source;
if (unlikely(!ieee80211_is_data(fc)))
return RX_CONTINUE;
@@ -2308,24 +2310,53 @@ ieee80211_rx_h_amsdu(struct ieee80211_rx_data *rx)
if (!(status->rx_flags & IEEE80211_RX_AMSDU))
return RX_CONTINUE;
- if (ieee80211_has_a4(hdr->frame_control) &&
- rx->sdata->vif.type == NL80211_IFTYPE_AP_VLAN &&
- !rx->sdata->u.vlan.sta)
- return RX_DROP_UNUSABLE;
+ if (unlikely(ieee80211_has_a4(hdr->frame_control))) {
+ switch (rx->sdata->vif.type) {
+ case NL80211_IFTYPE_AP_VLAN:
+ if (!rx->sdata->u.vlan.sta)
+ return RX_DROP_UNUSABLE;
+ break;
+ case NL80211_IFTYPE_STATION:
+ if (!rx->sdata->u.mgd.use_4addr)
+ return RX_DROP_UNUSABLE;
+ break;
+ default:
+ return RX_DROP_UNUSABLE;
+ }
+ check_da = NULL;
+ check_sa = NULL;
+ } else switch (rx->sdata->vif.type) {
+ case NL80211_IFTYPE_AP:
+ case NL80211_IFTYPE_AP_VLAN:
+ check_da = NULL;
+ break;
+ case NL80211_IFTYPE_STATION:
+ if (!rx->sta ||
+ !test_sta_flag(rx->sta, WLAN_STA_TDLS_PEER))
+ check_sa = NULL;
+ break;
+ case NL80211_IFTYPE_MESH_POINT:
+ check_sa = NULL;
+ break;
+ default:
+ break;
+ }
- if (is_multicast_ether_addr(hdr->addr1) &&
- ((rx->sdata->vif.type == NL80211_IFTYPE_AP_VLAN &&
- rx->sdata->u.vlan.sta) ||
- (rx->sdata->vif.type == NL80211_IFTYPE_STATION &&
- rx->sdata->u.mgd.use_4addr)))
+ if (is_multicast_ether_addr(hdr->addr1))
return RX_DROP_UNUSABLE;
skb->dev = dev;
__skb_queue_head_init(&frame_list);
+ if (ieee80211_data_to_8023_exthdr(skb, &ethhdr,
+ rx->sdata->vif.addr,
+ rx->sdata->vif.type))
+ return RX_DROP_UNUSABLE;
+
ieee80211_amsdu_to_8023s(skb, &frame_list, dev->dev_addr,
rx->sdata->vif.type,
- rx->local->hw.extra_tx_headroom, true);
+ rx->local->hw.extra_tx_headroom,
+ check_da, check_sa);
while (!skb_queue_empty(&frame_list)) {
rx->skb = __skb_dequeue(&frame_list);
diff --git a/net/mac80211/wpa.c b/net/mac80211/wpa.c
index b48c1e13e281..42ce9bd4426f 100644
--- a/net/mac80211/wpa.c
+++ b/net/mac80211/wpa.c
@@ -405,7 +405,7 @@ static int ccmp_encrypt_skb(struct ieee80211_tx_data *tx, struct sk_buff *skb,
u8 *pos;
u8 pn[6];
u64 pn64;
- u8 aad[2 * AES_BLOCK_SIZE];
+ u8 aad[CCM_AAD_LEN];
u8 b_0[AES_BLOCK_SIZE];
if (info->control.hw_key &&
@@ -461,10 +461,8 @@ static int ccmp_encrypt_skb(struct ieee80211_tx_data *tx, struct sk_buff *skb,
pos += IEEE80211_CCMP_HDR_LEN;
ccmp_special_blocks(skb, pn, b_0, aad);
- ieee80211_aes_ccm_encrypt(key->u.ccmp.tfm, b_0, aad, pos, len,
- skb_put(skb, mic_len), mic_len);
-
- return 0;
+ return ieee80211_aes_ccm_encrypt(key->u.ccmp.tfm, b_0, aad, pos, len,
+ skb_put(skb, mic_len), mic_len);
}
@@ -639,7 +637,7 @@ static int gcmp_encrypt_skb(struct ieee80211_tx_data *tx, struct sk_buff *skb)
u8 *pos;
u8 pn[6];
u64 pn64;
- u8 aad[2 * AES_BLOCK_SIZE];
+ u8 aad[GCM_AAD_LEN];
u8 j_0[AES_BLOCK_SIZE];
if (info->control.hw_key &&
@@ -696,10 +694,8 @@ static int gcmp_encrypt_skb(struct ieee80211_tx_data *tx, struct sk_buff *skb)
pos += IEEE80211_GCMP_HDR_LEN;
gcmp_special_blocks(skb, pn, j_0, aad);
- ieee80211_aes_gcm_encrypt(key->u.gcmp.tfm, j_0, aad, pos, len,
- skb_put(skb, IEEE80211_GCMP_MIC_LEN));
-
- return 0;
+ return ieee80211_aes_gcm_encrypt(key->u.gcmp.tfm, j_0, aad, pos, len,
+ skb_put(skb, IEEE80211_GCMP_MIC_LEN));
}
ieee80211_tx_result
@@ -1123,9 +1119,9 @@ ieee80211_crypto_aes_gmac_encrypt(struct ieee80211_tx_data *tx)
struct ieee80211_key *key = tx->key;
struct ieee80211_mmie_16 *mmie;
struct ieee80211_hdr *hdr;
- u8 aad[20];
+ u8 aad[GMAC_AAD_LEN];
u64 pn64;
- u8 nonce[12];
+ u8 nonce[GMAC_NONCE_LEN];
if (WARN_ON(skb_queue_len(&tx->skbs) != 1))
return TX_DROP;
@@ -1171,7 +1167,7 @@ ieee80211_crypto_aes_gmac_decrypt(struct ieee80211_rx_data *rx)
struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
struct ieee80211_key *key = rx->key;
struct ieee80211_mmie_16 *mmie;
- u8 aad[20], mic[16], ipn[6], nonce[12];
+ u8 aad[GMAC_AAD_LEN], mic[GMAC_MIC_LEN], ipn[6], nonce[GMAC_NONCE_LEN];
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
if (!ieee80211_is_mgmt(hdr->frame_control))
diff --git a/net/ncsi/internal.h b/net/ncsi/internal.h
index 13290a70fa71..1308a56f2591 100644
--- a/net/ncsi/internal.h
+++ b/net/ncsi/internal.h
@@ -246,6 +246,7 @@ enum {
ncsi_dev_state_config_gls,
ncsi_dev_state_config_done,
ncsi_dev_state_suspend_select = 0x0401,
+ ncsi_dev_state_suspend_gls,
ncsi_dev_state_suspend_dcnt,
ncsi_dev_state_suspend_dc,
ncsi_dev_state_suspend_deselect,
@@ -264,6 +265,7 @@ struct ncsi_dev_priv {
#endif
unsigned int package_num; /* Number of packages */
struct list_head packages; /* List of packages */
+ struct ncsi_channel *hot_channel; /* Channel was ever active */
struct ncsi_request requests[256]; /* Request table */
unsigned int request_id; /* Last used request ID */
#define NCSI_REQ_START_IDX 1
diff --git a/net/ncsi/ncsi-aen.c b/net/ncsi/ncsi-aen.c
index b41a6617d498..6898e7229285 100644
--- a/net/ncsi/ncsi-aen.c
+++ b/net/ncsi/ncsi-aen.c
@@ -141,23 +141,35 @@ static int ncsi_aen_handler_hncdsc(struct ncsi_dev_priv *ndp,
return -ENODEV;
/* If the channel is active one, we need reconfigure it */
+ spin_lock_irqsave(&nc->lock, flags);
ncm = &nc->modes[NCSI_MODE_LINK];
hncdsc = (struct ncsi_aen_hncdsc_pkt *)h;
ncm->data[3] = ntohl(hncdsc->status);
if (!list_empty(&nc->link) ||
- nc->state != NCSI_CHANNEL_ACTIVE ||
- (ncm->data[3] & 0x1))
+ nc->state != NCSI_CHANNEL_ACTIVE) {
+ spin_unlock_irqrestore(&nc->lock, flags);
return 0;
+ }
- if (ndp->flags & NCSI_DEV_HWA)
+ spin_unlock_irqrestore(&nc->lock, flags);
+ if (!(ndp->flags & NCSI_DEV_HWA) && !(ncm->data[3] & 0x1))
ndp->flags |= NCSI_DEV_RESHUFFLE;
/* If this channel is the active one and the link doesn't
* work, we have to choose another channel to be active one.
* The logic here is exactly similar to what we do when link
* is down on the active channel.
+ *
+ * On the other hand, we need configure it when host driver
+ * state on the active channel becomes ready.
*/
ncsi_stop_channel_monitor(nc);
+
+ spin_lock_irqsave(&nc->lock, flags);
+ nc->state = (ncm->data[3] & 0x1) ? NCSI_CHANNEL_INACTIVE :
+ NCSI_CHANNEL_ACTIVE;
+ spin_unlock_irqrestore(&nc->lock, flags);
+
spin_lock_irqsave(&ndp->lock, flags);
list_add_tail_rcu(&nc->link, &ndp->channel_queue);
spin_unlock_irqrestore(&ndp->lock, flags);
diff --git a/net/ncsi/ncsi-manage.c b/net/ncsi/ncsi-manage.c
index 5e509e547c2d..a3bd5fa8ad09 100644
--- a/net/ncsi/ncsi-manage.c
+++ b/net/ncsi/ncsi-manage.c
@@ -540,42 +540,86 @@ static void ncsi_suspend_channel(struct ncsi_dev_priv *ndp)
nd->state = ncsi_dev_state_suspend_select;
/* Fall through */
case ncsi_dev_state_suspend_select:
- case ncsi_dev_state_suspend_dcnt:
- case ncsi_dev_state_suspend_dc:
- case ncsi_dev_state_suspend_deselect:
ndp->pending_req_num = 1;
- np = ndp->active_package;
- nc = ndp->active_channel;
+ nca.type = NCSI_PKT_CMD_SP;
nca.package = np->id;
- if (nd->state == ncsi_dev_state_suspend_select) {
- nca.type = NCSI_PKT_CMD_SP;
- nca.channel = NCSI_RESERVED_CHANNEL;
- if (ndp->flags & NCSI_DEV_HWA)
- nca.bytes[0] = 0;
- else
- nca.bytes[0] = 1;
+ nca.channel = NCSI_RESERVED_CHANNEL;
+ if (ndp->flags & NCSI_DEV_HWA)
+ nca.bytes[0] = 0;
+ else
+ nca.bytes[0] = 1;
+
+ /* To retrieve the last link states of channels in current
+ * package when current active channel needs fail over to
+ * another one. It means we will possibly select another
+ * channel as next active one. The link states of channels
+ * are most important factor of the selection. So we need
+ * accurate link states. Unfortunately, the link states on
+ * inactive channels can't be updated with LSC AEN in time.
+ */
+ if (ndp->flags & NCSI_DEV_RESHUFFLE)
+ nd->state = ncsi_dev_state_suspend_gls;
+ else
nd->state = ncsi_dev_state_suspend_dcnt;
- } else if (nd->state == ncsi_dev_state_suspend_dcnt) {
- nca.type = NCSI_PKT_CMD_DCNT;
- nca.channel = nc->id;
- nd->state = ncsi_dev_state_suspend_dc;
- } else if (nd->state == ncsi_dev_state_suspend_dc) {
- nca.type = NCSI_PKT_CMD_DC;
+ ret = ncsi_xmit_cmd(&nca);
+ if (ret)
+ goto error;
+
+ break;
+ case ncsi_dev_state_suspend_gls:
+ ndp->pending_req_num = np->channel_num;
+
+ nca.type = NCSI_PKT_CMD_GLS;
+ nca.package = np->id;
+
+ nd->state = ncsi_dev_state_suspend_dcnt;
+ NCSI_FOR_EACH_CHANNEL(np, nc) {
nca.channel = nc->id;
- nca.bytes[0] = 1;
- nd->state = ncsi_dev_state_suspend_deselect;
- } else if (nd->state == ncsi_dev_state_suspend_deselect) {
- nca.type = NCSI_PKT_CMD_DP;
- nca.channel = NCSI_RESERVED_CHANNEL;
- nd->state = ncsi_dev_state_suspend_done;
+ ret = ncsi_xmit_cmd(&nca);
+ if (ret)
+ goto error;
}
+ break;
+ case ncsi_dev_state_suspend_dcnt:
+ ndp->pending_req_num = 1;
+
+ nca.type = NCSI_PKT_CMD_DCNT;
+ nca.package = np->id;
+ nca.channel = nc->id;
+
+ nd->state = ncsi_dev_state_suspend_dc;
ret = ncsi_xmit_cmd(&nca);
- if (ret) {
- nd->state = ncsi_dev_state_functional;
- return;
- }
+ if (ret)
+ goto error;
+
+ break;
+ case ncsi_dev_state_suspend_dc:
+ ndp->pending_req_num = 1;
+
+ nca.type = NCSI_PKT_CMD_DC;
+ nca.package = np->id;
+ nca.channel = nc->id;
+ nca.bytes[0] = 1;
+
+ nd->state = ncsi_dev_state_suspend_deselect;
+ ret = ncsi_xmit_cmd(&nca);
+ if (ret)
+ goto error;
+
+ break;
+ case ncsi_dev_state_suspend_deselect:
+ ndp->pending_req_num = 1;
+
+ nca.type = NCSI_PKT_CMD_DP;
+ nca.package = np->id;
+ nca.channel = NCSI_RESERVED_CHANNEL;
+
+ nd->state = ncsi_dev_state_suspend_done;
+ ret = ncsi_xmit_cmd(&nca);
+ if (ret)
+ goto error;
break;
case ncsi_dev_state_suspend_done:
@@ -589,6 +633,10 @@ static void ncsi_suspend_channel(struct ncsi_dev_priv *ndp)
netdev_warn(nd->dev, "Wrong NCSI state 0x%x in suspend\n",
nd->state);
}
+
+ return;
+error:
+ nd->state = ncsi_dev_state_functional;
}
static void ncsi_configure_channel(struct ncsi_dev_priv *ndp)
@@ -597,6 +645,7 @@ static void ncsi_configure_channel(struct ncsi_dev_priv *ndp)
struct net_device *dev = nd->dev;
struct ncsi_package *np = ndp->active_package;
struct ncsi_channel *nc = ndp->active_channel;
+ struct ncsi_channel *hot_nc = NULL;
struct ncsi_cmd_arg nca;
unsigned char index;
unsigned long flags;
@@ -702,12 +751,20 @@ static void ncsi_configure_channel(struct ncsi_dev_priv *ndp)
break;
case ncsi_dev_state_config_done:
spin_lock_irqsave(&nc->lock, flags);
- if (nc->modes[NCSI_MODE_LINK].data[2] & 0x1)
+ if (nc->modes[NCSI_MODE_LINK].data[2] & 0x1) {
+ hot_nc = nc;
nc->state = NCSI_CHANNEL_ACTIVE;
- else
+ } else {
+ hot_nc = NULL;
nc->state = NCSI_CHANNEL_INACTIVE;
+ }
spin_unlock_irqrestore(&nc->lock, flags);
+ /* Update the hot channel */
+ spin_lock_irqsave(&ndp->lock, flags);
+ ndp->hot_channel = hot_nc;
+ spin_unlock_irqrestore(&ndp->lock, flags);
+
ncsi_start_channel_monitor(nc);
ncsi_process_next_channel(ndp);
break;
@@ -725,10 +782,14 @@ error:
static int ncsi_choose_active_channel(struct ncsi_dev_priv *ndp)
{
struct ncsi_package *np;
- struct ncsi_channel *nc, *found;
+ struct ncsi_channel *nc, *found, *hot_nc;
struct ncsi_channel_mode *ncm;
unsigned long flags;
+ spin_lock_irqsave(&ndp->lock, flags);
+ hot_nc = ndp->hot_channel;
+ spin_unlock_irqrestore(&ndp->lock, flags);
+
/* The search is done once an inactive channel with up
* link is found.
*/
@@ -746,6 +807,9 @@ static int ncsi_choose_active_channel(struct ncsi_dev_priv *ndp)
if (!found)
found = nc;
+ if (nc == hot_nc)
+ found = nc;
+
ncm = &nc->modes[NCSI_MODE_LINK];
if (ncm->data[2] & 0x1) {
spin_unlock_irqrestore(&nc->lock, flags);
diff --git a/net/netfilter/core.c b/net/netfilter/core.c
index fcb5d1df11e9..004af030ef1a 100644
--- a/net/netfilter/core.c
+++ b/net/netfilter/core.c
@@ -361,16 +361,9 @@ next_hook:
if (ret == 0)
ret = -EPERM;
} else if ((verdict & NF_VERDICT_MASK) == NF_QUEUE) {
- int err;
-
- RCU_INIT_POINTER(state->hook_entries, entry);
- err = nf_queue(skb, state, verdict >> NF_VERDICT_QBITS);
- if (err < 0) {
- if (err == -ESRCH &&
- (verdict & NF_VERDICT_FLAG_QUEUE_BYPASS))
- goto next_hook;
- kfree_skb(skb);
- }
+ ret = nf_queue(skb, state, &entry, verdict);
+ if (ret == 1 && entry)
+ goto next_hook;
}
return ret;
}
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
index ba6a1d421222..df2f5a3901df 100644
--- a/net/netfilter/nf_conntrack_core.c
+++ b/net/netfilter/nf_conntrack_core.c
@@ -983,7 +983,7 @@ static void gc_worker(struct work_struct *work)
return;
ratio = scanned ? expired_count * 100 / scanned : 0;
- if (ratio >= 90)
+ if (ratio >= 90 || expired_count == GC_MAX_EVICTS)
next_run = 0;
gc_work->last_bucket = i;
diff --git a/net/netfilter/nf_internals.h b/net/netfilter/nf_internals.h
index e0adb5959342..9fdb655f85bc 100644
--- a/net/netfilter/nf_internals.h
+++ b/net/netfilter/nf_internals.h
@@ -18,7 +18,7 @@ unsigned int nf_iterate(struct sk_buff *skb, struct nf_hook_state *state,
/* nf_queue.c */
int nf_queue(struct sk_buff *skb, struct nf_hook_state *state,
- unsigned int queuenum);
+ struct nf_hook_entry **entryp, unsigned int verdict);
void nf_queue_nf_hook_drop(struct net *net, const struct nf_hook_entry *entry);
int __init netfilter_queue_init(void);
diff --git a/net/netfilter/nf_queue.c b/net/netfilter/nf_queue.c
index 96964a0070e1..8f08d759844a 100644
--- a/net/netfilter/nf_queue.c
+++ b/net/netfilter/nf_queue.c
@@ -107,13 +107,8 @@ void nf_queue_nf_hook_drop(struct net *net, const struct nf_hook_entry *entry)
rcu_read_unlock();
}
-/*
- * Any packet that leaves via this function must come back
- * through nf_reinject().
- */
-int nf_queue(struct sk_buff *skb,
- struct nf_hook_state *state,
- unsigned int queuenum)
+static int __nf_queue(struct sk_buff *skb, const struct nf_hook_state *state,
+ unsigned int queuenum)
{
int status = -ENOENT;
struct nf_queue_entry *entry = NULL;
@@ -161,6 +156,27 @@ err:
return status;
}
+/* Packets leaving via this function must come back through nf_reinject(). */
+int nf_queue(struct sk_buff *skb, struct nf_hook_state *state,
+ struct nf_hook_entry **entryp, unsigned int verdict)
+{
+ struct nf_hook_entry *entry = *entryp;
+ int ret;
+
+ RCU_INIT_POINTER(state->hook_entries, entry);
+ ret = __nf_queue(skb, state, verdict >> NF_VERDICT_QBITS);
+ if (ret < 0) {
+ if (ret == -ESRCH &&
+ (verdict & NF_VERDICT_FLAG_QUEUE_BYPASS)) {
+ *entryp = rcu_dereference(entry->next);
+ return 1;
+ }
+ kfree_skb(skb);
+ }
+
+ return 0;
+}
+
void nf_reinject(struct nf_queue_entry *entry, unsigned int verdict)
{
struct nf_hook_entry *hook_entry;
@@ -187,26 +203,26 @@ void nf_reinject(struct nf_queue_entry *entry, unsigned int verdict)
entry->state.thresh = INT_MIN;
if (verdict == NF_ACCEPT) {
- next_hook:
- verdict = nf_iterate(skb, &entry->state, &hook_entry);
+ hook_entry = rcu_dereference(hook_entry->next);
+ if (hook_entry)
+next_hook:
+ verdict = nf_iterate(skb, &entry->state, &hook_entry);
}
switch (verdict & NF_VERDICT_MASK) {
case NF_ACCEPT:
case NF_STOP:
+okfn:
local_bh_disable();
entry->state.okfn(entry->state.net, entry->state.sk, skb);
local_bh_enable();
break;
case NF_QUEUE:
- RCU_INIT_POINTER(entry->state.hook_entries, hook_entry);
- err = nf_queue(skb, &entry->state,
- verdict >> NF_VERDICT_QBITS);
- if (err < 0) {
- if (err == -ESRCH &&
- (verdict & NF_VERDICT_FLAG_QUEUE_BYPASS))
+ err = nf_queue(skb, &entry->state, &hook_entry, verdict);
+ if (err == 1) {
+ if (hook_entry)
goto next_hook;
- kfree_skb(skb);
+ goto okfn;
}
break;
case NF_STOLEN:
diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
index b70d3ea1430e..24db22257586 100644
--- a/net/netfilter/nf_tables_api.c
+++ b/net/netfilter/nf_tables_api.c
@@ -4423,7 +4423,7 @@ static int nf_tables_check_loops(const struct nft_ctx *ctx,
*/
unsigned int nft_parse_u32_check(const struct nlattr *attr, int max, u32 *dest)
{
- int val;
+ u32 val;
val = ntohl(nla_get_be32(attr));
if (val > max)
diff --git a/net/netfilter/nft_dynset.c b/net/netfilter/nft_dynset.c
index e3b83c31da2e..517f08767a3c 100644
--- a/net/netfilter/nft_dynset.c
+++ b/net/netfilter/nft_dynset.c
@@ -158,7 +158,8 @@ static int nft_dynset_init(const struct nft_ctx *ctx,
if (tb[NFTA_DYNSET_TIMEOUT] != NULL) {
if (!(set->flags & NFT_SET_TIMEOUT))
return -EINVAL;
- timeout = be64_to_cpu(nla_get_be64(tb[NFTA_DYNSET_TIMEOUT]));
+ timeout = msecs_to_jiffies(be64_to_cpu(nla_get_be64(
+ tb[NFTA_DYNSET_TIMEOUT])));
}
priv->sreg_key = nft_parse_register(tb[NFTA_DYNSET_SREG_KEY]);
@@ -246,7 +247,8 @@ static int nft_dynset_dump(struct sk_buff *skb, const struct nft_expr *expr)
goto nla_put_failure;
if (nla_put_string(skb, NFTA_DYNSET_SET_NAME, priv->set->name))
goto nla_put_failure;
- if (nla_put_be64(skb, NFTA_DYNSET_TIMEOUT, cpu_to_be64(priv->timeout),
+ if (nla_put_be64(skb, NFTA_DYNSET_TIMEOUT,
+ cpu_to_be64(jiffies_to_msecs(priv->timeout)),
NFTA_DYNSET_PAD))
goto nla_put_failure;
if (priv->expr && nft_expr_dump(skb, NFTA_DYNSET_EXPR, priv->expr))
diff --git a/net/netfilter/nft_exthdr.c b/net/netfilter/nft_exthdr.c
index a84cf3d66056..47beb3abcc9d 100644
--- a/net/netfilter/nft_exthdr.c
+++ b/net/netfilter/nft_exthdr.c
@@ -59,7 +59,8 @@ static int nft_exthdr_init(const struct nft_ctx *ctx,
const struct nlattr * const tb[])
{
struct nft_exthdr *priv = nft_expr_priv(expr);
- u32 offset, len, err;
+ u32 offset, len;
+ int err;
if (tb[NFTA_EXTHDR_DREG] == NULL ||
tb[NFTA_EXTHDR_TYPE] == NULL ||
diff --git a/net/netfilter/nft_hash.c b/net/netfilter/nft_hash.c
index 09473b415b95..baf694de3935 100644
--- a/net/netfilter/nft_hash.c
+++ b/net/netfilter/nft_hash.c
@@ -44,6 +44,7 @@ static const struct nla_policy nft_hash_policy[NFTA_HASH_MAX + 1] = {
[NFTA_HASH_LEN] = { .type = NLA_U32 },
[NFTA_HASH_MODULUS] = { .type = NLA_U32 },
[NFTA_HASH_SEED] = { .type = NLA_U32 },
+ [NFTA_HASH_OFFSET] = { .type = NLA_U32 },
};
static int nft_hash_init(const struct nft_ctx *ctx,
diff --git a/net/netfilter/nft_range.c b/net/netfilter/nft_range.c
index c6d5358482d1..fbc88009ca2e 100644
--- a/net/netfilter/nft_range.c
+++ b/net/netfilter/nft_range.c
@@ -28,22 +28,20 @@ static void nft_range_eval(const struct nft_expr *expr,
const struct nft_pktinfo *pkt)
{
const struct nft_range_expr *priv = nft_expr_priv(expr);
- bool mismatch;
int d1, d2;
d1 = memcmp(&regs->data[priv->sreg], &priv->data_from, priv->len);
d2 = memcmp(&regs->data[priv->sreg], &priv->data_to, priv->len);
switch (priv->op) {
case NFT_RANGE_EQ:
- mismatch = (d1 < 0 || d2 > 0);
+ if (d1 < 0 || d2 > 0)
+ regs->verdict.code = NFT_BREAK;
break;
case NFT_RANGE_NEQ:
- mismatch = (d1 >= 0 && d2 <= 0);
+ if (d1 >= 0 && d2 <= 0)
+ regs->verdict.code = NFT_BREAK;
break;
}
-
- if (mismatch)
- regs->verdict.code = NFT_BREAK;
}
static const struct nla_policy nft_range_policy[NFTA_RANGE_MAX + 1] = {
@@ -59,6 +57,7 @@ static int nft_range_init(const struct nft_ctx *ctx, const struct nft_expr *expr
struct nft_range_expr *priv = nft_expr_priv(expr);
struct nft_data_desc desc_from, desc_to;
int err;
+ u32 op;
err = nft_data_init(NULL, &priv->data_from, sizeof(priv->data_from),
&desc_from, tb[NFTA_RANGE_FROM_DATA]);
@@ -80,7 +79,20 @@ static int nft_range_init(const struct nft_ctx *ctx, const struct nft_expr *expr
if (err < 0)
goto err2;
- priv->op = ntohl(nla_get_be32(tb[NFTA_RANGE_OP]));
+ err = nft_parse_u32_check(tb[NFTA_RANGE_OP], U8_MAX, &op);
+ if (err < 0)
+ goto err2;
+
+ switch (op) {
+ case NFT_RANGE_EQ:
+ case NFT_RANGE_NEQ:
+ break;
+ default:
+ err = -EINVAL;
+ goto err2;
+ }
+
+ priv->op = op;
priv->len = desc_from.len;
return 0;
err2:
diff --git a/net/netfilter/x_tables.c b/net/netfilter/x_tables.c
index e0aa7c1d0224..fc4977456c30 100644
--- a/net/netfilter/x_tables.c
+++ b/net/netfilter/x_tables.c
@@ -1513,7 +1513,7 @@ xt_hook_ops_alloc(const struct xt_table *table, nf_hookfn *fn)
if (!num_hooks)
return ERR_PTR(-EINVAL);
- ops = kmalloc(sizeof(*ops) * num_hooks, GFP_KERNEL);
+ ops = kcalloc(num_hooks, sizeof(*ops), GFP_KERNEL);
if (ops == NULL)
return ERR_PTR(-ENOMEM);
diff --git a/net/netfilter/xt_NFLOG.c b/net/netfilter/xt_NFLOG.c
index 018eed7e1ff1..8668a5c18dc3 100644
--- a/net/netfilter/xt_NFLOG.c
+++ b/net/netfilter/xt_NFLOG.c
@@ -32,6 +32,7 @@ nflog_tg(struct sk_buff *skb, const struct xt_action_param *par)
li.u.ulog.copy_len = info->len;
li.u.ulog.group = info->group;
li.u.ulog.qthreshold = info->threshold;
+ li.u.ulog.flags = 0;
if (info->flags & XT_NFLOG_F_COPY_LEN)
li.u.ulog.flags |= NF_LOG_F_COPY_LEN;
diff --git a/net/netfilter/xt_hashlimit.c b/net/netfilter/xt_hashlimit.c
index 2fab0c65aa94..b89b688e9d01 100644
--- a/net/netfilter/xt_hashlimit.c
+++ b/net/netfilter/xt_hashlimit.c
@@ -431,7 +431,7 @@ static void htable_put(struct xt_hashlimit_htable *hinfo)
CREDITS_PER_JIFFY*HZ*60*60*24 < 2^32 ie.
*/
#define MAX_CPJ_v1 (0xFFFFFFFF / (HZ*60*60*24))
-#define MAX_CPJ (0xFFFFFFFFFFFFFFFF / (HZ*60*60*24))
+#define MAX_CPJ (0xFFFFFFFFFFFFFFFFULL / (HZ*60*60*24))
/* Repeated shift and or gives us all 1s, final shift and add 1 gives
* us the power of 2 below the theoretical max, so GCC simply does a
@@ -473,7 +473,7 @@ static u64 user2credits(u64 user, int revision)
return div64_u64(user * HZ * CREDITS_PER_JIFFY_v1,
XT_HASHLIMIT_SCALE);
} else {
- if (user > 0xFFFFFFFFFFFFFFFF / (HZ*CREDITS_PER_JIFFY))
+ if (user > 0xFFFFFFFFFFFFFFFFULL / (HZ*CREDITS_PER_JIFFY))
return div64_u64(user, XT_HASHLIMIT_SCALE_v2)
* HZ * CREDITS_PER_JIFFY;
diff --git a/net/netfilter/xt_ipcomp.c b/net/netfilter/xt_ipcomp.c
index 89d53104c6b3..000e70377f85 100644
--- a/net/netfilter/xt_ipcomp.c
+++ b/net/netfilter/xt_ipcomp.c
@@ -26,6 +26,8 @@
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Fan Du <fan.du@windriver.com>");
MODULE_DESCRIPTION("Xtables: IPv4/6 IPsec-IPComp SPI match");
+MODULE_ALIAS("ipt_ipcomp");
+MODULE_ALIAS("ip6t_ipcomp");
/* Returns 1 if the spi is matched by the range, 0 otherwise */
static inline bool
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index 11db0d619c00..d2238b204691 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -250,7 +250,7 @@ static void __fanout_link(struct sock *sk, struct packet_sock *po);
static int packet_direct_xmit(struct sk_buff *skb)
{
struct net_device *dev = skb->dev;
- netdev_features_t features;
+ struct sk_buff *orig_skb = skb;
struct netdev_queue *txq;
int ret = NETDEV_TX_BUSY;
@@ -258,9 +258,8 @@ static int packet_direct_xmit(struct sk_buff *skb)
!netif_carrier_ok(dev)))
goto drop;
- features = netif_skb_features(skb);
- if (skb_needs_linearize(skb, features) &&
- __skb_linearize(skb))
+ skb = validate_xmit_skb_list(skb, dev);
+ if (skb != orig_skb)
goto drop;
txq = skb_get_tx_queue(dev, skb);
@@ -280,7 +279,7 @@ static int packet_direct_xmit(struct sk_buff *skb)
return ret;
drop:
atomic_long_inc(&dev->tx_dropped);
- kfree_skb(skb);
+ kfree_skb_list(skb);
return NET_XMIT_DROP;
}
diff --git a/net/rds/Makefile b/net/rds/Makefile
index 0e72bec1529f..56c7d27eefee 100644
--- a/net/rds/Makefile
+++ b/net/rds/Makefile
@@ -13,5 +13,5 @@ obj-$(CONFIG_RDS_TCP) += rds_tcp.o
rds_tcp-y := tcp.o tcp_connect.o tcp_listen.o tcp_recv.o \
tcp_send.o tcp_stats.o
-ccflags-$(CONFIG_RDS_DEBUG) := -DDEBUG
+ccflags-$(CONFIG_RDS_DEBUG) := -DRDS_DEBUG
diff --git a/net/rds/rds.h b/net/rds/rds.h
index fd0bccb2f9f9..67ba67c058b1 100644
--- a/net/rds/rds.h
+++ b/net/rds/rds.h
@@ -33,7 +33,7 @@
#define KERNEL_HAS_ATOMIC64
#endif
-#ifdef DEBUG
+#ifdef RDS_DEBUG
#define rdsdebug(fmt, args...) pr_debug("%s(): " fmt, __func__ , ##args)
#else
/* sigh, pr_debug() causes unused variable warnings */
diff --git a/net/rxrpc/call_object.c b/net/rxrpc/call_object.c
index 4353a29f3b57..1ed18d8c9c9f 100644
--- a/net/rxrpc/call_object.c
+++ b/net/rxrpc/call_object.c
@@ -276,7 +276,7 @@ struct rxrpc_call *rxrpc_new_client_call(struct rxrpc_sock *rx,
goto error;
trace_rxrpc_call(call, rxrpc_call_connected, atomic_read(&call->usage),
- here, ERR_PTR(ret));
+ here, NULL);
spin_lock_bh(&call->conn->params.peer->lock);
hlist_add_head(&call->error_link,
diff --git a/net/rxrpc/peer_object.c b/net/rxrpc/peer_object.c
index 941b724d523b..862eea6b266c 100644
--- a/net/rxrpc/peer_object.c
+++ b/net/rxrpc/peer_object.c
@@ -193,8 +193,8 @@ static void rxrpc_assess_MTU_size(struct rxrpc_peer *peer)
fl6->fl6_dport = htons(7001);
fl6->fl6_sport = htons(7000);
dst = ip6_route_output(&init_net, NULL, fl6);
- if (IS_ERR(dst)) {
- _leave(" [route err %ld]", PTR_ERR(dst));
+ if (dst->error) {
+ _leave(" [route err %d]", dst->error);
return;
}
break;
diff --git a/net/sched/act_api.c b/net/sched/act_api.c
index a512b18c0088..f893d180da1c 100644
--- a/net/sched/act_api.c
+++ b/net/sched/act_api.c
@@ -1028,8 +1028,7 @@ static struct nlattr *find_dump_kind(const struct nlmsghdr *n)
if (tb[1] == NULL)
return NULL;
- if (nla_parse(tb2, TCA_ACT_MAX, nla_data(tb[1]),
- nla_len(tb[1]), NULL) < 0)
+ if (nla_parse_nested(tb2, TCA_ACT_MAX, tb[1], NULL) < 0)
return NULL;
kind = tb2[TCA_ACT_KIND];
diff --git a/net/sched/act_mirred.c b/net/sched/act_mirred.c
index 667dc382df82..6b07fba5770b 100644
--- a/net/sched/act_mirred.c
+++ b/net/sched/act_mirred.c
@@ -207,8 +207,11 @@ out:
static void tcf_stats_update(struct tc_action *a, u64 bytes, u32 packets,
u64 lastuse)
{
- tcf_lastuse_update(&a->tcfa_tm);
+ struct tcf_mirred *m = to_mirred(a);
+ struct tcf_t *tm = &m->tcf_tm;
+
_bstats_cpu_update(this_cpu_ptr(a->cpu_bstats), bytes, packets);
+ tm->lastuse = lastuse;
}
static int tcf_mirred_dump(struct sk_buff *skb, struct tc_action *a, int bind,
diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c
index 2ee29a3375f6..2b2a7974e4bb 100644
--- a/net/sched/cls_api.c
+++ b/net/sched/cls_api.c
@@ -345,7 +345,8 @@ replay:
if (err == 0) {
struct tcf_proto *next = rtnl_dereference(tp->next);
- tfilter_notify(net, skb, n, tp, fh,
+ tfilter_notify(net, skb, n, tp,
+ t->tcm_handle,
RTM_DELTFILTER, false);
if (tcf_destroy(tp, false))
RCU_INIT_POINTER(*back, next);
diff --git a/net/sctp/output.c b/net/sctp/output.c
index 2a5c1896d18f..6cb0df859195 100644
--- a/net/sctp/output.c
+++ b/net/sctp/output.c
@@ -418,6 +418,7 @@ int sctp_packet_transmit(struct sctp_packet *packet, gfp_t gfp)
__u8 has_data = 0;
int gso = 0;
int pktcount = 0;
+ int auth_len = 0;
struct dst_entry *dst;
unsigned char *auth = NULL; /* pointer to auth in skb data */
@@ -510,7 +511,12 @@ int sctp_packet_transmit(struct sctp_packet *packet, gfp_t gfp)
list_for_each_entry(chunk, &packet->chunk_list, list) {
int padded = SCTP_PAD4(chunk->skb->len);
- if (pkt_size + padded > tp->pathmtu)
+ if (chunk == packet->auth)
+ auth_len = padded;
+ else if (auth_len + padded + packet->overhead >
+ tp->pathmtu)
+ goto nomem;
+ else if (pkt_size + padded > tp->pathmtu)
break;
pkt_size += padded;
}
diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c
index 026e3bca4a94..8ec20a64a3f8 100644
--- a/net/sctp/sm_statefuns.c
+++ b/net/sctp/sm_statefuns.c
@@ -3422,6 +3422,12 @@ sctp_disposition_t sctp_sf_ootb(struct net *net,
return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
commands);
+ /* Report violation if chunk len overflows */
+ ch_end = ((__u8 *)ch) + SCTP_PAD4(ntohs(ch->length));
+ if (ch_end > skb_tail_pointer(skb))
+ return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
+ commands);
+
/* Now that we know we at least have a chunk header,
* do things that are type appropriate.
*/
@@ -3453,12 +3459,6 @@ sctp_disposition_t sctp_sf_ootb(struct net *net,
}
}
- /* Report violation if chunk len overflows */
- ch_end = ((__u8 *)ch) + SCTP_PAD4(ntohs(ch->length));
- if (ch_end > skb_tail_pointer(skb))
- return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
- commands);
-
ch = (sctp_chunkhdr_t *) ch_end;
} while (ch_end < skb_tail_pointer(skb));
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index fb02c7033307..9fbb6feb8c27 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -4687,7 +4687,7 @@ static int sctp_getsockopt_disable_fragments(struct sock *sk, int len,
static int sctp_getsockopt_events(struct sock *sk, int len, char __user *optval,
int __user *optlen)
{
- if (len <= 0)
+ if (len == 0)
return -EINVAL;
if (len > sizeof(struct sctp_event_subscribe))
len = sizeof(struct sctp_event_subscribe);
@@ -6430,6 +6430,9 @@ static int sctp_getsockopt(struct sock *sk, int level, int optname,
if (get_user(len, optlen))
return -EFAULT;
+ if (len < 0)
+ return -EINVAL;
+
lock_sock(sk);
switch (optname) {
diff --git a/net/sunrpc/auth_gss/auth_gss.c b/net/sunrpc/auth_gss/auth_gss.c
index d8bd97a5a7c9..3dfd769dc5b5 100644
--- a/net/sunrpc/auth_gss/auth_gss.c
+++ b/net/sunrpc/auth_gss/auth_gss.c
@@ -1616,7 +1616,7 @@ gss_validate(struct rpc_task *task, __be32 *p)
{
struct rpc_cred *cred = task->tk_rqstp->rq_cred;
struct gss_cl_ctx *ctx = gss_cred_get_ctx(cred);
- __be32 seq;
+ __be32 *seq = NULL;
struct kvec iov;
struct xdr_buf verf_buf;
struct xdr_netobj mic;
@@ -1631,9 +1631,12 @@ gss_validate(struct rpc_task *task, __be32 *p)
goto out_bad;
if (flav != RPC_AUTH_GSS)
goto out_bad;
- seq = htonl(task->tk_rqstp->rq_seqno);
- iov.iov_base = &seq;
- iov.iov_len = sizeof(seq);
+ seq = kmalloc(4, GFP_NOFS);
+ if (!seq)
+ goto out_bad;
+ *seq = htonl(task->tk_rqstp->rq_seqno);
+ iov.iov_base = seq;
+ iov.iov_len = 4;
xdr_buf_from_iov(&iov, &verf_buf);
mic.data = (u8 *)p;
mic.len = len;
@@ -1653,11 +1656,13 @@ gss_validate(struct rpc_task *task, __be32 *p)
gss_put_ctx(ctx);
dprintk("RPC: %5u %s: gss_verify_mic succeeded.\n",
task->tk_pid, __func__);
+ kfree(seq);
return p + XDR_QUADLEN(len);
out_bad:
gss_put_ctx(ctx);
dprintk("RPC: %5u %s failed ret %ld.\n", task->tk_pid, __func__,
PTR_ERR(ret));
+ kfree(seq);
return ret;
}
diff --git a/net/sunrpc/auth_gss/gss_krb5_crypto.c b/net/sunrpc/auth_gss/gss_krb5_crypto.c
index 244245bcbbd2..90115ceefd49 100644
--- a/net/sunrpc/auth_gss/gss_krb5_crypto.c
+++ b/net/sunrpc/auth_gss/gss_krb5_crypto.c
@@ -166,8 +166,8 @@ make_checksum_hmac_md5(struct krb5_ctx *kctx, char *header, int hdrlen,
unsigned int usage, struct xdr_netobj *cksumout)
{
struct scatterlist sg[1];
- int err;
- u8 checksumdata[GSS_KRB5_MAX_CKSUM_LEN];
+ int err = -1;
+ u8 *checksumdata;
u8 rc4salt[4];
struct crypto_ahash *md5;
struct crypto_ahash *hmac_md5;
@@ -187,23 +187,22 @@ make_checksum_hmac_md5(struct krb5_ctx *kctx, char *header, int hdrlen,
return GSS_S_FAILURE;
}
+ checksumdata = kmalloc(GSS_KRB5_MAX_CKSUM_LEN, GFP_NOFS);
+ if (!checksumdata)
+ return GSS_S_FAILURE;
+
md5 = crypto_alloc_ahash("md5", 0, CRYPTO_ALG_ASYNC);
if (IS_ERR(md5))
- return GSS_S_FAILURE;
+ goto out_free_cksum;
hmac_md5 = crypto_alloc_ahash(kctx->gk5e->cksum_name, 0,
CRYPTO_ALG_ASYNC);
- if (IS_ERR(hmac_md5)) {
- crypto_free_ahash(md5);
- return GSS_S_FAILURE;
- }
+ if (IS_ERR(hmac_md5))
+ goto out_free_md5;
req = ahash_request_alloc(md5, GFP_KERNEL);
- if (!req) {
- crypto_free_ahash(hmac_md5);
- crypto_free_ahash(md5);
- return GSS_S_FAILURE;
- }
+ if (!req)
+ goto out_free_hmac_md5;
ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_SLEEP, NULL, NULL);
@@ -232,11 +231,8 @@ make_checksum_hmac_md5(struct krb5_ctx *kctx, char *header, int hdrlen,
ahash_request_free(req);
req = ahash_request_alloc(hmac_md5, GFP_KERNEL);
- if (!req) {
- crypto_free_ahash(hmac_md5);
- crypto_free_ahash(md5);
- return GSS_S_FAILURE;
- }
+ if (!req)
+ goto out_free_hmac_md5;
ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_SLEEP, NULL, NULL);
@@ -258,8 +254,12 @@ make_checksum_hmac_md5(struct krb5_ctx *kctx, char *header, int hdrlen,
cksumout->len = kctx->gk5e->cksumlength;
out:
ahash_request_free(req);
- crypto_free_ahash(md5);
+out_free_hmac_md5:
crypto_free_ahash(hmac_md5);
+out_free_md5:
+ crypto_free_ahash(md5);
+out_free_cksum:
+ kfree(checksumdata);
return err ? GSS_S_FAILURE : 0;
}
@@ -276,8 +276,8 @@ make_checksum(struct krb5_ctx *kctx, char *header, int hdrlen,
struct crypto_ahash *tfm;
struct ahash_request *req;
struct scatterlist sg[1];
- int err;
- u8 checksumdata[GSS_KRB5_MAX_CKSUM_LEN];
+ int err = -1;
+ u8 *checksumdata;
unsigned int checksumlen;
if (kctx->gk5e->ctype == CKSUMTYPE_HMAC_MD5_ARCFOUR)
@@ -291,15 +291,17 @@ make_checksum(struct krb5_ctx *kctx, char *header, int hdrlen,
return GSS_S_FAILURE;
}
+ checksumdata = kmalloc(GSS_KRB5_MAX_CKSUM_LEN, GFP_NOFS);
+ if (checksumdata == NULL)
+ return GSS_S_FAILURE;
+
tfm = crypto_alloc_ahash(kctx->gk5e->cksum_name, 0, CRYPTO_ALG_ASYNC);
if (IS_ERR(tfm))
- return GSS_S_FAILURE;
+ goto out_free_cksum;
req = ahash_request_alloc(tfm, GFP_KERNEL);
- if (!req) {
- crypto_free_ahash(tfm);
- return GSS_S_FAILURE;
- }
+ if (!req)
+ goto out_free_ahash;
ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_SLEEP, NULL, NULL);
@@ -349,7 +351,10 @@ make_checksum(struct krb5_ctx *kctx, char *header, int hdrlen,
cksumout->len = kctx->gk5e->cksumlength;
out:
ahash_request_free(req);
+out_free_ahash:
crypto_free_ahash(tfm);
+out_free_cksum:
+ kfree(checksumdata);
return err ? GSS_S_FAILURE : 0;
}
@@ -368,8 +373,8 @@ make_checksum_v2(struct krb5_ctx *kctx, char *header, int hdrlen,
struct crypto_ahash *tfm;
struct ahash_request *req;
struct scatterlist sg[1];
- int err;
- u8 checksumdata[GSS_KRB5_MAX_CKSUM_LEN];
+ int err = -1;
+ u8 *checksumdata;
unsigned int checksumlen;
if (kctx->gk5e->keyed_cksum == 0) {
@@ -383,16 +388,18 @@ make_checksum_v2(struct krb5_ctx *kctx, char *header, int hdrlen,
return GSS_S_FAILURE;
}
+ checksumdata = kmalloc(GSS_KRB5_MAX_CKSUM_LEN, GFP_NOFS);
+ if (!checksumdata)
+ return GSS_S_FAILURE;
+
tfm = crypto_alloc_ahash(kctx->gk5e->cksum_name, 0, CRYPTO_ALG_ASYNC);
if (IS_ERR(tfm))
- return GSS_S_FAILURE;
+ goto out_free_cksum;
checksumlen = crypto_ahash_digestsize(tfm);
req = ahash_request_alloc(tfm, GFP_KERNEL);
- if (!req) {
- crypto_free_ahash(tfm);
- return GSS_S_FAILURE;
- }
+ if (!req)
+ goto out_free_ahash;
ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_SLEEP, NULL, NULL);
@@ -433,7 +440,10 @@ make_checksum_v2(struct krb5_ctx *kctx, char *header, int hdrlen,
}
out:
ahash_request_free(req);
+out_free_ahash:
crypto_free_ahash(tfm);
+out_free_cksum:
+ kfree(checksumdata);
return err ? GSS_S_FAILURE : 0;
}
@@ -666,14 +676,17 @@ gss_krb5_cts_crypt(struct crypto_skcipher *cipher, struct xdr_buf *buf,
u32 ret;
struct scatterlist sg[1];
SKCIPHER_REQUEST_ON_STACK(req, cipher);
- u8 data[GSS_KRB5_MAX_BLOCKSIZE * 2];
+ u8 *data;
struct page **save_pages;
u32 len = buf->len - offset;
- if (len > ARRAY_SIZE(data)) {
+ if (len > GSS_KRB5_MAX_BLOCKSIZE * 2) {
WARN_ON(0);
return -ENOMEM;
}
+ data = kmalloc(GSS_KRB5_MAX_BLOCKSIZE * 2, GFP_NOFS);
+ if (!data)
+ return -ENOMEM;
/*
* For encryption, we want to read from the cleartext
@@ -708,6 +721,7 @@ gss_krb5_cts_crypt(struct crypto_skcipher *cipher, struct xdr_buf *buf,
ret = write_bytes_to_xdr_buf(buf, offset, data, len);
out:
+ kfree(data);
return ret;
}
diff --git a/net/sunrpc/auth_gss/svcauth_gss.c b/net/sunrpc/auth_gss/svcauth_gss.c
index d67f7e1bc82d..45662d7f0943 100644
--- a/net/sunrpc/auth_gss/svcauth_gss.c
+++ b/net/sunrpc/auth_gss/svcauth_gss.c
@@ -718,30 +718,37 @@ gss_write_null_verf(struct svc_rqst *rqstp)
static int
gss_write_verf(struct svc_rqst *rqstp, struct gss_ctx *ctx_id, u32 seq)
{
- __be32 xdr_seq;
+ __be32 *xdr_seq;
u32 maj_stat;
struct xdr_buf verf_data;
struct xdr_netobj mic;
__be32 *p;
struct kvec iov;
+ int err = -1;
svc_putnl(rqstp->rq_res.head, RPC_AUTH_GSS);
- xdr_seq = htonl(seq);
+ xdr_seq = kmalloc(4, GFP_KERNEL);
+ if (!xdr_seq)
+ return -1;
+ *xdr_seq = htonl(seq);
- iov.iov_base = &xdr_seq;
- iov.iov_len = sizeof(xdr_seq);
+ iov.iov_base = xdr_seq;
+ iov.iov_len = 4;
xdr_buf_from_iov(&iov, &verf_data);
p = rqstp->rq_res.head->iov_base + rqstp->rq_res.head->iov_len;
mic.data = (u8 *)(p + 1);
maj_stat = gss_get_mic(ctx_id, &verf_data, &mic);
if (maj_stat != GSS_S_COMPLETE)
- return -1;
+ goto out;
*p++ = htonl(mic.len);
memset((u8 *)p + mic.len, 0, round_up_to_quad(mic.len) - mic.len);
p += XDR_QUADLEN(mic.len);
if (!xdr_ressize_check(rqstp, p))
- return -1;
- return 0;
+ goto out;
+ err = 0;
+out:
+ kfree(xdr_seq);
+ return err;
}
struct gss_domain {
diff --git a/net/sunrpc/xprtrdma/svc_rdma_backchannel.c b/net/sunrpc/xprtrdma/svc_rdma_backchannel.c
index 2d8545c34095..20027f8de129 100644
--- a/net/sunrpc/xprtrdma/svc_rdma_backchannel.c
+++ b/net/sunrpc/xprtrdma/svc_rdma_backchannel.c
@@ -177,18 +177,26 @@ xprt_rdma_bc_allocate(struct rpc_task *task)
return -EINVAL;
}
+ /* svc_rdma_sendto releases this page */
page = alloc_page(RPCRDMA_DEF_GFP);
if (!page)
return -ENOMEM;
-
rqst->rq_buffer = page_address(page);
+
+ rqst->rq_rbuffer = kmalloc(rqst->rq_rcvsize, RPCRDMA_DEF_GFP);
+ if (!rqst->rq_rbuffer) {
+ put_page(page);
+ return -ENOMEM;
+ }
return 0;
}
static void
xprt_rdma_bc_free(struct rpc_task *task)
{
- /* No-op: ctxt and page have already been freed. */
+ struct rpc_rqst *rqst = task->tk_rqstp;
+
+ kfree(rqst->rq_rbuffer);
}
static int
diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
index 0137af1c0916..e01c825bc683 100644
--- a/net/sunrpc/xprtsock.c
+++ b/net/sunrpc/xprtsock.c
@@ -2563,6 +2563,7 @@ static int bc_malloc(struct rpc_task *task)
buf->len = PAGE_SIZE;
rqst->rq_buffer = buf->data;
+ rqst->rq_rbuffer = (char *)rqst->rq_buffer + rqst->rq_callsize;
return 0;
}
diff --git a/net/switchdev/switchdev.c b/net/switchdev/switchdev.c
index 02beb35f577f..3b95fe980fa2 100644
--- a/net/switchdev/switchdev.c
+++ b/net/switchdev/switchdev.c
@@ -771,6 +771,9 @@ int switchdev_port_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
u32 mask = BR_LEARNING | BR_LEARNING_SYNC | BR_FLOOD;
int err;
+ if (!netif_is_bridge_port(dev))
+ return -EOPNOTSUPP;
+
err = switchdev_port_attr_get(dev, &attr);
if (err && err != -EOPNOTSUPP)
return err;
@@ -926,6 +929,9 @@ int switchdev_port_bridge_setlink(struct net_device *dev,
struct nlattr *afspec;
int err = 0;
+ if (!netif_is_bridge_port(dev))
+ return -EOPNOTSUPP;
+
protinfo = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg),
IFLA_PROTINFO);
if (protinfo) {
@@ -959,6 +965,9 @@ int switchdev_port_bridge_dellink(struct net_device *dev,
{
struct nlattr *afspec;
+ if (!netif_is_bridge_port(dev))
+ return -EOPNOTSUPP;
+
afspec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg),
IFLA_AF_SPEC);
if (afspec)
diff --git a/net/tipc/bcast.c b/net/tipc/bcast.c
index 753f774cb46f..aa1babbea385 100644
--- a/net/tipc/bcast.c
+++ b/net/tipc/bcast.c
@@ -247,11 +247,17 @@ int tipc_bcast_rcv(struct net *net, struct tipc_link *l, struct sk_buff *skb)
*
* RCU is locked, no other locks set
*/
-void tipc_bcast_ack_rcv(struct net *net, struct tipc_link *l, u32 acked)
+void tipc_bcast_ack_rcv(struct net *net, struct tipc_link *l,
+ struct tipc_msg *hdr)
{
struct sk_buff_head *inputq = &tipc_bc_base(net)->inputq;
+ u16 acked = msg_bcast_ack(hdr);
struct sk_buff_head xmitq;
+ /* Ignore bc acks sent by peer before bcast synch point was received */
+ if (msg_bc_ack_invalid(hdr))
+ return;
+
__skb_queue_head_init(&xmitq);
tipc_bcast_lock(net);
@@ -279,11 +285,11 @@ int tipc_bcast_sync_rcv(struct net *net, struct tipc_link *l,
__skb_queue_head_init(&xmitq);
tipc_bcast_lock(net);
- if (msg_type(hdr) == STATE_MSG) {
+ if (msg_type(hdr) != STATE_MSG) {
+ tipc_link_bc_init_rcv(l, hdr);
+ } else if (!msg_bc_ack_invalid(hdr)) {
tipc_link_bc_ack_rcv(l, msg_bcast_ack(hdr), &xmitq);
rc = tipc_link_bc_sync_rcv(l, hdr, &xmitq);
- } else {
- tipc_link_bc_init_rcv(l, hdr);
}
tipc_bcast_unlock(net);
diff --git a/net/tipc/bcast.h b/net/tipc/bcast.h
index 5ffe34472ccd..855d53c64ab3 100644
--- a/net/tipc/bcast.h
+++ b/net/tipc/bcast.h
@@ -55,7 +55,8 @@ void tipc_bcast_dec_bearer_dst_cnt(struct net *net, int bearer_id);
int tipc_bcast_get_mtu(struct net *net);
int tipc_bcast_xmit(struct net *net, struct sk_buff_head *list);
int tipc_bcast_rcv(struct net *net, struct tipc_link *l, struct sk_buff *skb);
-void tipc_bcast_ack_rcv(struct net *net, struct tipc_link *l, u32 acked);
+void tipc_bcast_ack_rcv(struct net *net, struct tipc_link *l,
+ struct tipc_msg *hdr);
int tipc_bcast_sync_rcv(struct net *net, struct tipc_link *l,
struct tipc_msg *hdr);
int tipc_nl_add_bc_link(struct net *net, struct tipc_nl_msg *msg);
diff --git a/net/tipc/link.c b/net/tipc/link.c
index b36e16cdc945..1055164c6232 100644
--- a/net/tipc/link.c
+++ b/net/tipc/link.c
@@ -1312,6 +1312,7 @@ static void tipc_link_build_proto_msg(struct tipc_link *l, int mtyp, bool probe,
msg_set_next_sent(hdr, l->snd_nxt);
msg_set_ack(hdr, l->rcv_nxt - 1);
msg_set_bcast_ack(hdr, bcl->rcv_nxt - 1);
+ msg_set_bc_ack_invalid(hdr, !node_up);
msg_set_last_bcast(hdr, l->bc_sndlink->snd_nxt - 1);
msg_set_link_tolerance(hdr, tolerance);
msg_set_linkprio(hdr, priority);
@@ -1574,6 +1575,7 @@ static void tipc_link_build_bc_init_msg(struct tipc_link *l,
__skb_queue_head_init(&list);
if (!tipc_link_build_bc_proto_msg(l->bc_rcvlink, false, 0, &list))
return;
+ msg_set_bc_ack_invalid(buf_msg(skb_peek(&list)), true);
tipc_link_xmit(l, &list, xmitq);
}
diff --git a/net/tipc/msg.h b/net/tipc/msg.h
index c3832cdf2278..50a739860d37 100644
--- a/net/tipc/msg.h
+++ b/net/tipc/msg.h
@@ -714,6 +714,23 @@ static inline void msg_set_peer_stopping(struct tipc_msg *m, u32 s)
msg_set_bits(m, 5, 13, 0x1, s);
}
+static inline bool msg_bc_ack_invalid(struct tipc_msg *m)
+{
+ switch (msg_user(m)) {
+ case BCAST_PROTOCOL:
+ case NAME_DISTRIBUTOR:
+ case LINK_PROTOCOL:
+ return msg_bits(m, 5, 14, 0x1);
+ default:
+ return false;
+ }
+}
+
+static inline void msg_set_bc_ack_invalid(struct tipc_msg *m, bool invalid)
+{
+ msg_set_bits(m, 5, 14, 0x1, invalid);
+}
+
static inline char *msg_media_addr(struct tipc_msg *m)
{
return (char *)&m->hdr[TIPC_MEDIA_INFO_OFFSET];
diff --git a/net/tipc/name_distr.c b/net/tipc/name_distr.c
index a04fe9be1c60..c1cfd92de17a 100644
--- a/net/tipc/name_distr.c
+++ b/net/tipc/name_distr.c
@@ -156,6 +156,7 @@ static void named_distribute(struct net *net, struct sk_buff_head *list,
pr_warn("Bulk publication failure\n");
return;
}
+ msg_set_bc_ack_invalid(buf_msg(skb), true);
item = (struct distr_item *)msg_data(buf_msg(skb));
}
diff --git a/net/tipc/node.c b/net/tipc/node.c
index 7ef14e2d2356..9d2f4c2b08ab 100644
--- a/net/tipc/node.c
+++ b/net/tipc/node.c
@@ -1535,7 +1535,7 @@ void tipc_rcv(struct net *net, struct sk_buff *skb, struct tipc_bearer *b)
if (unlikely(usr == LINK_PROTOCOL))
tipc_node_bc_sync_rcv(n, hdr, bearer_id, &xmitq);
else if (unlikely(tipc_link_acked(n->bc_entry.link) != bc_ack))
- tipc_bcast_ack_rcv(net, n->bc_entry.link, bc_ack);
+ tipc_bcast_ack_rcv(net, n->bc_entry.link, hdr);
/* Receive packet directly if conditions permit */
tipc_node_read_lock(n);
diff --git a/net/wireless/sysfs.c b/net/wireless/sysfs.c
index 0082f4b01795..14b3f007826d 100644
--- a/net/wireless/sysfs.c
+++ b/net/wireless/sysfs.c
@@ -104,13 +104,16 @@ static int wiphy_suspend(struct device *dev)
rtnl_lock();
if (rdev->wiphy.registered) {
- if (!rdev->wiphy.wowlan_config)
+ if (!rdev->wiphy.wowlan_config) {
cfg80211_leave_all(rdev);
+ cfg80211_process_rdev_events(rdev);
+ }
if (rdev->ops->suspend)
ret = rdev_suspend(rdev, rdev->wiphy.wowlan_config);
if (ret == 1) {
/* Driver refuse to configure wowlan */
cfg80211_leave_all(rdev);
+ cfg80211_process_rdev_events(rdev);
ret = rdev_suspend(rdev, NULL);
}
}
diff --git a/net/wireless/util.c b/net/wireless/util.c
index 8edce22d1b93..5ea12afc7706 100644
--- a/net/wireless/util.c
+++ b/net/wireless/util.c
@@ -420,8 +420,8 @@ unsigned int ieee80211_get_mesh_hdrlen(struct ieee80211s_hdr *meshhdr)
}
EXPORT_SYMBOL(ieee80211_get_mesh_hdrlen);
-static int __ieee80211_data_to_8023(struct sk_buff *skb, struct ethhdr *ehdr,
- const u8 *addr, enum nl80211_iftype iftype)
+int ieee80211_data_to_8023_exthdr(struct sk_buff *skb, struct ethhdr *ehdr,
+ const u8 *addr, enum nl80211_iftype iftype)
{
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
struct {
@@ -525,13 +525,7 @@ static int __ieee80211_data_to_8023(struct sk_buff *skb, struct ethhdr *ehdr,
return 0;
}
-
-int ieee80211_data_to_8023(struct sk_buff *skb, const u8 *addr,
- enum nl80211_iftype iftype)
-{
- return __ieee80211_data_to_8023(skb, NULL, addr, iftype);
-}
-EXPORT_SYMBOL(ieee80211_data_to_8023);
+EXPORT_SYMBOL(ieee80211_data_to_8023_exthdr);
int ieee80211_data_from_8023(struct sk_buff *skb, const u8 *addr,
enum nl80211_iftype iftype,
@@ -746,24 +740,18 @@ __ieee80211_amsdu_copy(struct sk_buff *skb, unsigned int hlen,
void ieee80211_amsdu_to_8023s(struct sk_buff *skb, struct sk_buff_head *list,
const u8 *addr, enum nl80211_iftype iftype,
const unsigned int extra_headroom,
- bool has_80211_header)
+ const u8 *check_da, const u8 *check_sa)
{
unsigned int hlen = ALIGN(extra_headroom, 4);
struct sk_buff *frame = NULL;
u16 ethertype;
u8 *payload;
- int offset = 0, remaining, err;
+ int offset = 0, remaining;
struct ethhdr eth;
bool reuse_frag = skb->head_frag && !skb_has_frag_list(skb);
bool reuse_skb = false;
bool last = false;
- if (has_80211_header) {
- err = __ieee80211_data_to_8023(skb, &eth, addr, iftype);
- if (err)
- goto out;
- }
-
while (!last) {
unsigned int subframe_len;
int len;
@@ -780,8 +768,17 @@ void ieee80211_amsdu_to_8023s(struct sk_buff *skb, struct sk_buff_head *list,
goto purge;
offset += sizeof(struct ethhdr);
- /* reuse skb for the last subframe */
last = remaining <= subframe_len + padding;
+
+ /* FIXME: should we really accept multicast DA? */
+ if ((check_da && !is_multicast_ether_addr(eth.h_dest) &&
+ !ether_addr_equal(check_da, eth.h_dest)) ||
+ (check_sa && !ether_addr_equal(check_sa, eth.h_source))) {
+ offset += len + padding;
+ continue;
+ }
+
+ /* reuse skb for the last subframe */
if (!skb_is_nonlinear(skb) && !reuse_frag && last) {
skb_pull(skb, offset);
frame = skb;
@@ -819,7 +816,6 @@ void ieee80211_amsdu_to_8023s(struct sk_buff *skb, struct sk_buff_head *list,
purge:
__skb_queue_purge(list);
- out:
dev_kfree_skb(skb);
}
EXPORT_SYMBOL(ieee80211_amsdu_to_8023s);
diff --git a/samples/bpf/parse_ldabs.c b/samples/bpf/parse_ldabs.c
index d17550198d06..6db6b21fdc6d 100644
--- a/samples/bpf/parse_ldabs.c
+++ b/samples/bpf/parse_ldabs.c
@@ -4,6 +4,7 @@
* modify it under the terms of version 2 of the GNU General Public
* License as published by the Free Software Foundation.
*/
+#define KBUILD_MODNAME "foo"
#include <linux/ip.h>
#include <linux/ipv6.h>
#include <linux/in.h>
diff --git a/samples/bpf/parse_simple.c b/samples/bpf/parse_simple.c
index cf2511c33905..10af53d33cc2 100644
--- a/samples/bpf/parse_simple.c
+++ b/samples/bpf/parse_simple.c
@@ -4,6 +4,7 @@
* modify it under the terms of version 2 of the GNU General Public
* License as published by the Free Software Foundation.
*/
+#define KBUILD_MODNAME "foo"
#include <linux/ip.h>
#include <linux/ipv6.h>
#include <linux/in.h>
diff --git a/samples/bpf/parse_varlen.c b/samples/bpf/parse_varlen.c
index edab34dce79b..95c16324760c 100644
--- a/samples/bpf/parse_varlen.c
+++ b/samples/bpf/parse_varlen.c
@@ -4,6 +4,7 @@
* modify it under the terms of version 2 of the GNU General Public
* License as published by the Free Software Foundation.
*/
+#define KBUILD_MODNAME "foo"
#include <linux/if_ether.h>
#include <linux/ip.h>
#include <linux/ipv6.h>
diff --git a/samples/bpf/tcbpf1_kern.c b/samples/bpf/tcbpf1_kern.c
index fa051b3d53ee..274c884c87fe 100644
--- a/samples/bpf/tcbpf1_kern.c
+++ b/samples/bpf/tcbpf1_kern.c
@@ -1,3 +1,4 @@
+#define KBUILD_MODNAME "foo"
#include <uapi/linux/bpf.h>
#include <uapi/linux/if_ether.h>
#include <uapi/linux/if_packet.h>
diff --git a/samples/bpf/tcbpf2_kern.c b/samples/bpf/tcbpf2_kern.c
index 3303bb85593b..9c823a609e75 100644
--- a/samples/bpf/tcbpf2_kern.c
+++ b/samples/bpf/tcbpf2_kern.c
@@ -5,6 +5,7 @@
* modify it under the terms of version 2 of the GNU General Public
* License as published by the Free Software Foundation.
*/
+#define KBUILD_MODNAME "foo"
#include <uapi/linux/bpf.h>
#include <uapi/linux/if_ether.h>
#include <uapi/linux/if_packet.h>
diff --git a/samples/bpf/test_cgrp2_tc_kern.c b/samples/bpf/test_cgrp2_tc_kern.c
index 10ff73404e3a..1547b36a7b7b 100644
--- a/samples/bpf/test_cgrp2_tc_kern.c
+++ b/samples/bpf/test_cgrp2_tc_kern.c
@@ -4,6 +4,7 @@
* modify it under the terms of version 2 of the GNU General Public
* License as published by the Free Software Foundation.
*/
+#define KBUILD_MODNAME "foo"
#include <uapi/linux/if_ether.h>
#include <uapi/linux/in6.h>
#include <uapi/linux/ipv6.h>
diff --git a/scripts/gcc-plugins/cyc_complexity_plugin.c b/scripts/gcc-plugins/cyc_complexity_plugin.c
index 34df974c6ba3..8af7db06122d 100644
--- a/scripts/gcc-plugins/cyc_complexity_plugin.c
+++ b/scripts/gcc-plugins/cyc_complexity_plugin.c
@@ -20,7 +20,7 @@
#include "gcc-common.h"
-int plugin_is_GPL_compatible;
+__visible int plugin_is_GPL_compatible;
static struct plugin_info cyc_complexity_plugin_info = {
.version = "20160225",
@@ -49,7 +49,7 @@ static unsigned int cyc_complexity_execute(void)
#include "gcc-generate-gimple-pass.h"
-int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
+__visible int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
{
const char * const plugin_name = plugin_info->base_name;
struct register_pass_info cyc_complexity_pass_info;
diff --git a/scripts/gcc-plugins/gcc-common.h b/scripts/gcc-plugins/gcc-common.h
index 172850bcd0d9..950fd2e64bb7 100644
--- a/scripts/gcc-plugins/gcc-common.h
+++ b/scripts/gcc-plugins/gcc-common.h
@@ -130,6 +130,7 @@ extern void dump_gimple_stmt(pretty_printer *, gimple, int, int);
#endif
#define __unused __attribute__((__unused__))
+#define __visible __attribute__((visibility("default")))
#define DECL_NAME_POINTER(node) IDENTIFIER_POINTER(DECL_NAME(node))
#define DECL_NAME_LENGTH(node) IDENTIFIER_LENGTH(DECL_NAME(node))
diff --git a/scripts/gcc-plugins/latent_entropy_plugin.c b/scripts/gcc-plugins/latent_entropy_plugin.c
index ff1939b804ae..8160f1c1b56e 100644
--- a/scripts/gcc-plugins/latent_entropy_plugin.c
+++ b/scripts/gcc-plugins/latent_entropy_plugin.c
@@ -77,7 +77,7 @@
#include "gcc-common.h"
-int plugin_is_GPL_compatible;
+__visible int plugin_is_GPL_compatible;
static GTY(()) tree latent_entropy_decl;
@@ -340,7 +340,7 @@ static enum tree_code get_op(tree *rhs)
break;
}
if (rhs)
- *rhs = build_int_cstu(unsigned_intDI_type_node, random_const);
+ *rhs = build_int_cstu(long_unsigned_type_node, random_const);
return op;
}
@@ -372,7 +372,7 @@ static void __perturb_latent_entropy(gimple_stmt_iterator *gsi,
enum tree_code op;
/* 1. create temporary copy of latent_entropy */
- temp = create_var(unsigned_intDI_type_node, "tmp_latent_entropy");
+ temp = create_var(long_unsigned_type_node, "temp_latent_entropy");
/* 2. read... */
add_referenced_var(latent_entropy_decl);
@@ -459,13 +459,13 @@ static void init_local_entropy(basic_block bb, tree local_entropy)
gsi_insert_before(&gsi, call, GSI_NEW_STMT);
update_stmt(call);
- udi_frame_addr = fold_convert(unsigned_intDI_type_node, frame_addr);
+ udi_frame_addr = fold_convert(long_unsigned_type_node, frame_addr);
assign = gimple_build_assign(local_entropy, udi_frame_addr);
gsi_insert_after(&gsi, assign, GSI_NEW_STMT);
update_stmt(assign);
/* 3. create temporary copy of latent_entropy */
- tmp = create_var(unsigned_intDI_type_node, "tmp_latent_entropy");
+ tmp = create_var(long_unsigned_type_node, "temp_latent_entropy");
/* 4. read the global entropy variable into local entropy */
add_referenced_var(latent_entropy_decl);
@@ -480,7 +480,7 @@ static void init_local_entropy(basic_block bb, tree local_entropy)
update_stmt(assign);
rand_cst = get_random_const();
- rand_const = build_int_cstu(unsigned_intDI_type_node, rand_cst);
+ rand_const = build_int_cstu(long_unsigned_type_node, rand_cst);
op = get_op(NULL);
assign = create_assign(op, local_entropy, local_entropy, rand_const);
gsi_insert_after(&gsi, assign, GSI_NEW_STMT);
@@ -529,7 +529,7 @@ static unsigned int latent_entropy_execute(void)
}
/* 1. create the local entropy variable */
- local_entropy = create_var(unsigned_intDI_type_node, "local_entropy");
+ local_entropy = create_var(long_unsigned_type_node, "local_entropy");
/* 2. initialize the local entropy variable */
init_local_entropy(bb, local_entropy);
@@ -561,10 +561,9 @@ static void latent_entropy_start_unit(void *gcc_data __unused,
if (in_lto_p)
return;
- /* extern volatile u64 latent_entropy */
- gcc_assert(TYPE_PRECISION(long_long_unsigned_type_node) == 64);
- quals = TYPE_QUALS(long_long_unsigned_type_node) | TYPE_QUAL_VOLATILE;
- type = build_qualified_type(long_long_unsigned_type_node, quals);
+ /* extern volatile unsigned long latent_entropy */
+ quals = TYPE_QUALS(long_unsigned_type_node) | TYPE_QUAL_VOLATILE;
+ type = build_qualified_type(long_unsigned_type_node, quals);
id = get_identifier("latent_entropy");
latent_entropy_decl = build_decl(UNKNOWN_LOCATION, VAR_DECL, id, type);
@@ -584,8 +583,8 @@ static void latent_entropy_start_unit(void *gcc_data __unused,
| TODO_update_ssa
#include "gcc-generate-gimple-pass.h"
-int plugin_init(struct plugin_name_args *plugin_info,
- struct plugin_gcc_version *version)
+__visible int plugin_init(struct plugin_name_args *plugin_info,
+ struct plugin_gcc_version *version)
{
bool enabled = true;
const char * const plugin_name = plugin_info->base_name;
diff --git a/scripts/gcc-plugins/sancov_plugin.c b/scripts/gcc-plugins/sancov_plugin.c
index aedd6113cb73..7ea0b3f50739 100644
--- a/scripts/gcc-plugins/sancov_plugin.c
+++ b/scripts/gcc-plugins/sancov_plugin.c
@@ -21,7 +21,7 @@
#include "gcc-common.h"
-int plugin_is_GPL_compatible;
+__visible int plugin_is_GPL_compatible;
tree sancov_fndecl;
@@ -86,7 +86,7 @@ static void sancov_start_unit(void __unused *gcc_data, void __unused *user_data)
#endif
}
-int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
+__visible int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
{
int i;
struct register_pass_info sancov_plugin_pass_info;
diff --git a/security/keys/Kconfig b/security/keys/Kconfig
index f826e8739023..d942c7c2bc0a 100644
--- a/security/keys/Kconfig
+++ b/security/keys/Kconfig
@@ -41,7 +41,7 @@ config BIG_KEYS
bool "Large payload keys"
depends on KEYS
depends on TMPFS
- select CRYPTO
+ depends on (CRYPTO_ANSI_CPRNG = y || CRYPTO_DRBG = y)
select CRYPTO_AES
select CRYPTO_ECB
select CRYPTO_RNG
diff --git a/security/keys/big_key.c b/security/keys/big_key.c
index c0b3030b5634..835c1ab30d01 100644
--- a/security/keys/big_key.c
+++ b/security/keys/big_key.c
@@ -9,6 +9,7 @@
* 2 of the Licence, or (at your option) any later version.
*/
+#define pr_fmt(fmt) "big_key: "fmt
#include <linux/init.h>
#include <linux/seq_file.h>
#include <linux/file.h>
@@ -341,44 +342,48 @@ error:
*/
static int __init big_key_init(void)
{
- return register_key_type(&key_type_big_key);
-}
-
-/*
- * Initialize big_key crypto and RNG algorithms
- */
-static int __init big_key_crypto_init(void)
-{
- int ret = -EINVAL;
+ struct crypto_skcipher *cipher;
+ struct crypto_rng *rng;
+ int ret;
- /* init RNG */
- big_key_rng = crypto_alloc_rng(big_key_rng_name, 0, 0);
- if (IS_ERR(big_key_rng)) {
- big_key_rng = NULL;
- return -EFAULT;
+ rng = crypto_alloc_rng(big_key_rng_name, 0, 0);
+ if (IS_ERR(rng)) {
+ pr_err("Can't alloc rng: %ld\n", PTR_ERR(rng));
+ return PTR_ERR(rng);
}
+ big_key_rng = rng;
+
/* seed RNG */
- ret = crypto_rng_reset(big_key_rng, NULL, crypto_rng_seedsize(big_key_rng));
- if (ret)
- goto error;
+ ret = crypto_rng_reset(rng, NULL, crypto_rng_seedsize(rng));
+ if (ret) {
+ pr_err("Can't reset rng: %d\n", ret);
+ goto error_rng;
+ }
/* init block cipher */
- big_key_skcipher = crypto_alloc_skcipher(big_key_alg_name,
- 0, CRYPTO_ALG_ASYNC);
- if (IS_ERR(big_key_skcipher)) {
- big_key_skcipher = NULL;
- ret = -EFAULT;
- goto error;
+ cipher = crypto_alloc_skcipher(big_key_alg_name, 0, CRYPTO_ALG_ASYNC);
+ if (IS_ERR(cipher)) {
+ ret = PTR_ERR(cipher);
+ pr_err("Can't alloc crypto: %d\n", ret);
+ goto error_rng;
+ }
+
+ big_key_skcipher = cipher;
+
+ ret = register_key_type(&key_type_big_key);
+ if (ret < 0) {
+ pr_err("Can't register type: %d\n", ret);
+ goto error_cipher;
}
return 0;
-error:
+error_cipher:
+ crypto_free_skcipher(big_key_skcipher);
+error_rng:
crypto_free_rng(big_key_rng);
- big_key_rng = NULL;
return ret;
}
-device_initcall(big_key_init);
-late_initcall(big_key_crypto_init);
+late_initcall(big_key_init);
diff --git a/security/keys/proc.c b/security/keys/proc.c
index f0611a6368cd..b9f531c9e4fa 100644
--- a/security/keys/proc.c
+++ b/security/keys/proc.c
@@ -181,7 +181,7 @@ static int proc_keys_show(struct seq_file *m, void *v)
struct timespec now;
unsigned long timo;
key_ref_t key_ref, skey_ref;
- char xbuf[12];
+ char xbuf[16];
int rc;
struct keyring_search_context ctx = {
diff --git a/sound/core/seq/seq_timer.c b/sound/core/seq/seq_timer.c
index dcc102813aef..37d9cfbc29f9 100644
--- a/sound/core/seq/seq_timer.c
+++ b/sound/core/seq/seq_timer.c
@@ -448,8 +448,8 @@ snd_seq_real_time_t snd_seq_timer_get_cur_time(struct snd_seq_timer *tmr)
ktime_get_ts64(&tm);
tm = timespec64_sub(tm, tmr->last_update);
- cur_time.tv_nsec = tm.tv_nsec;
- cur_time.tv_sec = tm.tv_sec;
+ cur_time.tv_nsec += tm.tv_nsec;
+ cur_time.tv_sec += tm.tv_sec;
snd_seq_sanity_real_time(&cur_time);
}
spin_unlock_irqrestore(&tmr->lock, flags);
diff --git a/sound/pci/asihpi/hpioctl.c b/sound/pci/asihpi/hpioctl.c
index d17937b92331..7e3aa50b21f9 100644
--- a/sound/pci/asihpi/hpioctl.c
+++ b/sound/pci/asihpi/hpioctl.c
@@ -111,7 +111,7 @@ long asihpi_hpi_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
return -EINVAL;
hm = kmalloc(sizeof(*hm), GFP_KERNEL);
- hr = kmalloc(sizeof(*hr), GFP_KERNEL);
+ hr = kzalloc(sizeof(*hr), GFP_KERNEL);
if (!hm || !hr) {
err = -ENOMEM;
goto out;
diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
index c3469f756ec2..c64d986009a9 100644
--- a/sound/pci/hda/hda_intel.c
+++ b/sound/pci/hda/hda_intel.c
@@ -341,8 +341,7 @@ enum {
/* quirks for Nvidia */
#define AZX_DCAPS_PRESET_NVIDIA \
- (AZX_DCAPS_NO_MSI | /*AZX_DCAPS_ALIGN_BUFSIZE |*/ \
- AZX_DCAPS_NO_64BIT | AZX_DCAPS_CORBRP_SELF_CLEAR |\
+ (AZX_DCAPS_NO_MSI | AZX_DCAPS_CORBRP_SELF_CLEAR |\
AZX_DCAPS_SNOOP_TYPE(NVIDIA))
#define AZX_DCAPS_PRESET_CTHDA \
@@ -1716,6 +1715,10 @@ static int azx_first_init(struct azx *chip)
}
}
+ /* NVidia hardware normally only supports up to 40 bits of DMA */
+ if (chip->pci->vendor == PCI_VENDOR_ID_NVIDIA)
+ dma_bits = 40;
+
/* disable 64bit DMA address on some devices */
if (chip->driver_caps & AZX_DCAPS_NO_64BIT) {
dev_dbg(card->dev, "Disabling 64bit DMA\n");
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index b58e8c76346a..2f909dd8b7b8 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -5811,8 +5811,6 @@ static const struct hda_model_fixup alc269_fixup_models[] = {
#define ALC295_STANDARD_PINS \
{0x12, 0xb7a60130}, \
{0x14, 0x90170110}, \
- {0x17, 0x21014020}, \
- {0x18, 0x21a19030}, \
{0x21, 0x04211020}
#define ALC298_STANDARD_PINS \
@@ -5859,11 +5857,19 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = {
{0x1b, 0x02011020},
{0x21, 0x0221101f}),
SND_HDA_PIN_QUIRK(0x10ec0255, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
+ {0x14, 0x90170110},
+ {0x1b, 0x01011020},
+ {0x21, 0x0221101f}),
+ SND_HDA_PIN_QUIRK(0x10ec0255, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
{0x14, 0x90170130},
{0x1b, 0x01014020},
{0x21, 0x0221103f}),
SND_HDA_PIN_QUIRK(0x10ec0255, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
{0x14, 0x90170130},
+ {0x1b, 0x01011020},
+ {0x21, 0x0221103f}),
+ SND_HDA_PIN_QUIRK(0x10ec0255, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
+ {0x14, 0x90170130},
{0x1b, 0x02011020},
{0x21, 0x0221103f}),
SND_HDA_PIN_QUIRK(0x10ec0255, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
@@ -6039,7 +6045,13 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = {
ALC292_STANDARD_PINS,
{0x13, 0x90a60140}),
SND_HDA_PIN_QUIRK(0x10ec0295, 0x1028, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE,
- ALC295_STANDARD_PINS),
+ ALC295_STANDARD_PINS,
+ {0x17, 0x21014020},
+ {0x18, 0x21a19030}),
+ SND_HDA_PIN_QUIRK(0x10ec0295, 0x1028, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE,
+ ALC295_STANDARD_PINS,
+ {0x17, 0x21014040},
+ {0x18, 0x21a19050}),
SND_HDA_PIN_QUIRK(0x10ec0298, 0x1028, "Dell", ALC298_FIXUP_DELL1_MIC_NO_PRESENCE,
ALC298_STANDARD_PINS,
{0x17, 0x90170110}),
@@ -6613,6 +6625,7 @@ enum {
ALC891_FIXUP_HEADSET_MODE,
ALC891_FIXUP_DELL_MIC_NO_PRESENCE,
ALC662_FIXUP_ACER_VERITON,
+ ALC892_FIXUP_ASROCK_MOBO,
};
static const struct hda_fixup alc662_fixups[] = {
@@ -6889,6 +6902,16 @@ static const struct hda_fixup alc662_fixups[] = {
{ }
}
},
+ [ALC892_FIXUP_ASROCK_MOBO] = {
+ .type = HDA_FIXUP_PINS,
+ .v.pins = (const struct hda_pintbl[]) {
+ { 0x15, 0x40f000f0 }, /* disabled */
+ { 0x16, 0x40f000f0 }, /* disabled */
+ { 0x18, 0x01014011 }, /* LO */
+ { 0x1a, 0x01014012 }, /* LO */
+ { }
+ }
+ },
};
static const struct snd_pci_quirk alc662_fixup_tbl[] = {
@@ -6926,6 +6949,7 @@ static const struct snd_pci_quirk alc662_fixup_tbl[] = {
SND_PCI_QUIRK(0x144d, 0xc051, "Samsung R720", ALC662_FIXUP_IDEAPAD),
SND_PCI_QUIRK(0x17aa, 0x38af, "Lenovo Ideapad Y550P", ALC662_FIXUP_IDEAPAD),
SND_PCI_QUIRK(0x17aa, 0x3a0d, "Lenovo Ideapad Y550", ALC662_FIXUP_IDEAPAD),
+ SND_PCI_QUIRK(0x1849, 0x5892, "ASRock B150M", ALC892_FIXUP_ASROCK_MOBO),
SND_PCI_QUIRK(0x19da, 0xa130, "Zotac Z68", ALC662_FIXUP_ZOTAC_Z68),
SND_PCI_QUIRK(0x1b0a, 0x01b8, "ACER Veriton", ALC662_FIXUP_ACER_VERITON),
SND_PCI_QUIRK(0x1b35, 0x2206, "CZC P10T", ALC662_FIXUP_CZC_P10T),
diff --git a/sound/usb/quirks-table.h b/sound/usb/quirks-table.h
index c60a776e815d..8a59d4782a0f 100644
--- a/sound/usb/quirks-table.h
+++ b/sound/usb/quirks-table.h
@@ -2907,6 +2907,23 @@ AU0828_DEVICE(0x2040, 0x7260, "Hauppauge", "HVR-950Q"),
AU0828_DEVICE(0x2040, 0x7213, "Hauppauge", "HVR-950Q"),
AU0828_DEVICE(0x2040, 0x7270, "Hauppauge", "HVR-950Q"),
+/* Syntek STK1160 */
+{
+ .match_flags = USB_DEVICE_ID_MATCH_DEVICE |
+ USB_DEVICE_ID_MATCH_INT_CLASS |
+ USB_DEVICE_ID_MATCH_INT_SUBCLASS,
+ .idVendor = 0x05e1,
+ .idProduct = 0x0408,
+ .bInterfaceClass = USB_CLASS_AUDIO,
+ .bInterfaceSubClass = USB_SUBCLASS_AUDIOCONTROL,
+ .driver_info = (unsigned long) &(const struct snd_usb_audio_quirk) {
+ .vendor_name = "Syntek",
+ .product_name = "STK1160",
+ .ifnum = QUIRK_ANY_INTERFACE,
+ .type = QUIRK_AUDIO_ALIGN_TRANSFER
+ }
+},
+
/* Digidesign Mbox */
{
/* Thanks to Clemens Ladisch <clemens@ladisch.de> */
diff --git a/tools/objtool/builtin-check.c b/tools/objtool/builtin-check.c
index 4490601a9235..e8a1f699058a 100644
--- a/tools/objtool/builtin-check.c
+++ b/tools/objtool/builtin-check.c
@@ -754,7 +754,7 @@ static struct rela *find_switch_table(struct objtool_file *file,
if (insn->type == INSN_JUMP_UNCONDITIONAL &&
insn->jump_dest &&
(insn->jump_dest->offset <= insn->offset ||
- insn->jump_dest->offset >= orig_insn->offset))
+ insn->jump_dest->offset > orig_insn->offset))
break;
text_rela = find_rela_by_dest_range(insn->sec, insn->offset,
diff --git a/tools/virtio/ringtest/Makefile b/tools/virtio/ringtest/Makefile
index 877a8a4721b6..c012edbdb13b 100644
--- a/tools/virtio/ringtest/Makefile
+++ b/tools/virtio/ringtest/Makefile
@@ -3,8 +3,8 @@ all:
all: ring virtio_ring_0_9 virtio_ring_poll virtio_ring_inorder ptr_ring noring
CFLAGS += -Wall
-CFLAGS += -pthread -O2 -ggdb
-LDFLAGS += -pthread -O2 -ggdb
+CFLAGS += -pthread -O2 -ggdb -flto -fwhole-program
+LDFLAGS += -pthread -O2 -ggdb -flto -fwhole-program
main.o: main.c main.h
ring.o: ring.c main.h
diff --git a/tools/virtio/ringtest/main.c b/tools/virtio/ringtest/main.c
index 147abb452a6c..f31353fac541 100644
--- a/tools/virtio/ringtest/main.c
+++ b/tools/virtio/ringtest/main.c
@@ -96,7 +96,13 @@ void set_affinity(const char *arg)
assert(!ret);
}
-static void run_guest(void)
+void poll_used(void)
+{
+ while (used_empty())
+ busy_wait();
+}
+
+static void __attribute__((__flatten__)) run_guest(void)
{
int completed_before;
int completed = 0;
@@ -141,7 +147,7 @@ static void run_guest(void)
assert(completed <= bufs);
assert(started <= bufs);
if (do_sleep) {
- if (enable_call())
+ if (used_empty() && enable_call())
wait_for_call();
} else {
poll_used();
@@ -149,7 +155,13 @@ static void run_guest(void)
}
}
-static void run_host(void)
+void poll_avail(void)
+{
+ while (avail_empty())
+ busy_wait();
+}
+
+static void __attribute__((__flatten__)) run_host(void)
{
int completed_before;
int completed = 0;
@@ -160,7 +172,7 @@ static void run_host(void)
for (;;) {
if (do_sleep) {
- if (enable_kick())
+ if (avail_empty() && enable_kick())
wait_for_kick();
} else {
poll_avail();
diff --git a/tools/virtio/ringtest/main.h b/tools/virtio/ringtest/main.h
index 16917acb0ade..34e63cc4c572 100644
--- a/tools/virtio/ringtest/main.h
+++ b/tools/virtio/ringtest/main.h
@@ -56,15 +56,15 @@ void alloc_ring(void);
int add_inbuf(unsigned, void *, void *);
void *get_buf(unsigned *, void **);
void disable_call();
+bool used_empty();
bool enable_call();
void kick_available();
-void poll_used();
/* host side */
void disable_kick();
+bool avail_empty();
bool enable_kick();
bool use_buf(unsigned *, void **);
void call_used();
-void poll_avail();
/* implemented by main */
extern bool do_sleep;
diff --git a/tools/virtio/ringtest/noring.c b/tools/virtio/ringtest/noring.c
index eda2f4824130..b8d1c1daac7c 100644
--- a/tools/virtio/ringtest/noring.c
+++ b/tools/virtio/ringtest/noring.c
@@ -24,8 +24,9 @@ void *get_buf(unsigned *lenp, void **bufp)
return "Buffer";
}
-void poll_used(void)
+bool used_empty()
{
+ return false;
}
void disable_call()
@@ -54,8 +55,9 @@ bool enable_kick()
assert(0);
}
-void poll_avail(void)
+bool avail_empty()
{
+ return false;
}
bool use_buf(unsigned *lenp, void **bufp)
diff --git a/tools/virtio/ringtest/ptr_ring.c b/tools/virtio/ringtest/ptr_ring.c
index bd2ad1d3b7a9..635b07b4fdd3 100644
--- a/tools/virtio/ringtest/ptr_ring.c
+++ b/tools/virtio/ringtest/ptr_ring.c
@@ -133,18 +133,9 @@ void *get_buf(unsigned *lenp, void **bufp)
return datap;
}
-void poll_used(void)
+bool used_empty()
{
- void *b;
-
- do {
- if (tailcnt == headcnt || __ptr_ring_full(&array)) {
- b = NULL;
- barrier();
- } else {
- b = "Buffer\n";
- }
- } while (!b);
+ return (tailcnt == headcnt || __ptr_ring_full(&array));
}
void disable_call()
@@ -173,14 +164,9 @@ bool enable_kick()
assert(0);
}
-void poll_avail(void)
+bool avail_empty()
{
- void *b;
-
- do {
- barrier();
- b = __ptr_ring_peek(&array);
- } while (!b);
+ return !__ptr_ring_peek(&array);
}
bool use_buf(unsigned *lenp, void **bufp)
diff --git a/tools/virtio/ringtest/ring.c b/tools/virtio/ringtest/ring.c
index c25c8d248b6b..747c5dd47be8 100644
--- a/tools/virtio/ringtest/ring.c
+++ b/tools/virtio/ringtest/ring.c
@@ -163,12 +163,11 @@ void *get_buf(unsigned *lenp, void **bufp)
return datap;
}
-void poll_used(void)
+bool used_empty()
{
unsigned head = (ring_size - 1) & guest.last_used_idx;
- while (ring[head].flags & DESC_HW)
- busy_wait();
+ return (ring[head].flags & DESC_HW);
}
void disable_call()
@@ -180,13 +179,11 @@ void disable_call()
bool enable_call()
{
- unsigned head = (ring_size - 1) & guest.last_used_idx;
-
event->call_index = guest.last_used_idx;
/* Flush call index write */
/* Barrier D (for pairing) */
smp_mb();
- return ring[head].flags & DESC_HW;
+ return used_empty();
}
void kick_available(void)
@@ -213,20 +210,17 @@ void disable_kick()
bool enable_kick()
{
- unsigned head = (ring_size - 1) & host.used_idx;
-
event->kick_index = host.used_idx;
/* Barrier C (for pairing) */
smp_mb();
- return !(ring[head].flags & DESC_HW);
+ return avail_empty();
}
-void poll_avail(void)
+bool avail_empty()
{
unsigned head = (ring_size - 1) & host.used_idx;
- while (!(ring[head].flags & DESC_HW))
- busy_wait();
+ return !(ring[head].flags & DESC_HW);
}
bool use_buf(unsigned *lenp, void **bufp)
diff --git a/tools/virtio/ringtest/virtio_ring_0_9.c b/tools/virtio/ringtest/virtio_ring_0_9.c
index 761866212aac..bbc3043b2fb1 100644
--- a/tools/virtio/ringtest/virtio_ring_0_9.c
+++ b/tools/virtio/ringtest/virtio_ring_0_9.c
@@ -194,24 +194,16 @@ void *get_buf(unsigned *lenp, void **bufp)
return datap;
}
-void poll_used(void)
+bool used_empty()
{
+ unsigned short last_used_idx = guest.last_used_idx;
#ifdef RING_POLL
- unsigned head = (ring_size - 1) & guest.last_used_idx;
+ unsigned short head = last_used_idx & (ring_size - 1);
+ unsigned index = ring.used->ring[head].id;
- for (;;) {
- unsigned index = ring.used->ring[head].id;
-
- if ((index ^ guest.last_used_idx ^ 0x8000) & ~(ring_size - 1))
- busy_wait();
- else
- break;
- }
+ return (index ^ last_used_idx ^ 0x8000) & ~(ring_size - 1);
#else
- unsigned head = guest.last_used_idx;
-
- while (ring.used->idx == head)
- busy_wait();
+ return ring.used->idx == last_used_idx;
#endif
}
@@ -224,22 +216,11 @@ void disable_call()
bool enable_call()
{
- unsigned short last_used_idx;
-
- vring_used_event(&ring) = (last_used_idx = guest.last_used_idx);
+ vring_used_event(&ring) = guest.last_used_idx;
/* Flush call index write */
/* Barrier D (for pairing) */
smp_mb();
-#ifdef RING_POLL
- {
- unsigned short head = last_used_idx & (ring_size - 1);
- unsigned index = ring.used->ring[head].id;
-
- return (index ^ last_used_idx ^ 0x8000) & ~(ring_size - 1);
- }
-#else
- return ring.used->idx == last_used_idx;
-#endif
+ return used_empty();
}
void kick_available(void)
@@ -266,36 +247,21 @@ void disable_kick()
bool enable_kick()
{
- unsigned head = host.used_idx;
-
- vring_avail_event(&ring) = head;
+ vring_avail_event(&ring) = host.used_idx;
/* Barrier C (for pairing) */
smp_mb();
-#ifdef RING_POLL
- {
- unsigned index = ring.avail->ring[head & (ring_size - 1)];
-
- return (index ^ head ^ 0x8000) & ~(ring_size - 1);
- }
-#else
- return head == ring.avail->idx;
-#endif
+ return avail_empty();
}
-void poll_avail(void)
+bool avail_empty()
{
unsigned head = host.used_idx;
#ifdef RING_POLL
- for (;;) {
- unsigned index = ring.avail->ring[head & (ring_size - 1)];
- if ((index ^ head ^ 0x8000) & ~(ring_size - 1))
- busy_wait();
- else
- break;
- }
+ unsigned index = ring.avail->ring[head & (ring_size - 1)];
+
+ return ((index ^ head ^ 0x8000) & ~(ring_size - 1));
#else
- while (ring.avail->idx == head)
- busy_wait();
+ return head == ring.avail->idx;
#endif
}
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index d92c3d5b0fbe..5c360347a1e9 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -1346,21 +1346,19 @@ unsigned long kvm_vcpu_gfn_to_hva_prot(struct kvm_vcpu *vcpu, gfn_t gfn, bool *w
static int get_user_page_nowait(unsigned long start, int write,
struct page **page)
{
- int flags = FOLL_TOUCH | FOLL_NOWAIT | FOLL_HWPOISON | FOLL_GET;
+ int flags = FOLL_NOWAIT | FOLL_HWPOISON;
if (write)
flags |= FOLL_WRITE;
- return __get_user_pages(current, current->mm, start, 1, flags, page,
- NULL, NULL);
+ return get_user_pages(start, 1, flags, page, NULL);
}
static inline int check_user_page_hwpoison(unsigned long addr)
{
- int rc, flags = FOLL_TOUCH | FOLL_HWPOISON | FOLL_WRITE;
+ int rc, flags = FOLL_HWPOISON | FOLL_WRITE;
- rc = __get_user_pages(current, current->mm, addr, 1,
- flags, NULL, NULL, NULL);
+ rc = get_user_pages(addr, 1, flags, NULL, NULL);
return rc == -EHWPOISON;
}