summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.mailmap10
-rw-r--r--Documentation/admin-guide/blockdev/drbd/figures.rst4
-rw-r--r--Documentation/admin-guide/blockdev/drbd/peer-states-8.dot (renamed from Documentation/admin-guide/blockdev/drbd/node-states-8.dot)5
-rw-r--r--Documentation/admin-guide/kernel-parameters.txt10
-rw-r--r--Documentation/arm64/pointer-authentication.rst9
-rw-r--r--Documentation/conf.py15
-rw-r--r--Documentation/cpu-freq/core.rst6
-rw-r--r--Documentation/devicetree/bindings/display/amlogic,meson-dw-hdmi.yaml5
-rw-r--r--Documentation/devicetree/bindings/display/amlogic,meson-vpu.yaml6
-rw-r--r--Documentation/devicetree/bindings/display/bridge/lvds-codec.yaml43
-rw-r--r--Documentation/devicetree/bindings/display/bridge/renesas,dsi-csi2-tx.yaml118
-rw-r--r--Documentation/devicetree/bindings/display/bridge/ti,sn65dsi83.yaml5
-rw-r--r--Documentation/devicetree/bindings/display/msm/dp-controller.yaml2
-rw-r--r--Documentation/devicetree/bindings/display/msm/edp.txt56
-rw-r--r--Documentation/devicetree/bindings/display/panel/panel-simple.yaml2
-rw-r--r--Documentation/devicetree/bindings/display/sprd/sprd,display-subsystem.yaml64
-rw-r--r--Documentation/devicetree/bindings/display/sprd/sprd,sharkl3-dpu.yaml77
-rw-r--r--Documentation/devicetree/bindings/display/sprd/sprd,sharkl3-dsi-host.yaml88
-rw-r--r--Documentation/devicetree/bindings/i2c/apple,i2c.yaml8
-rw-r--r--Documentation/devicetree/bindings/i2c/i2c-imx-lpi2c.yaml5
-rw-r--r--Documentation/devicetree/bindings/iio/adc/samsung,exynos-adc.yaml2
-rw-r--r--Documentation/devicetree/bindings/input/gpio-keys.yaml2
-rw-r--r--Documentation/devicetree/bindings/media/nxp,imx7-mipi-csi2.yaml14
-rw-r--r--Documentation/devicetree/bindings/net/ethernet-phy.yaml8
-rw-r--r--Documentation/devicetree/bindings/phy/xlnx,zynqmp-psgtr.yaml2
-rw-r--r--Documentation/devicetree/bindings/power/supply/bq25980.yaml2
-rw-r--r--Documentation/devicetree/bindings/regulator/samsung,s5m8767.yaml25
-rw-r--r--Documentation/devicetree/bindings/sound/wlf,wm8962.yaml3
-rw-r--r--Documentation/devicetree/bindings/spi/spi-rockchip.yaml1
-rw-r--r--Documentation/devicetree/bindings/vendor-prefixes.yaml2
-rw-r--r--Documentation/filesystems/cifs/ksmbd.rst10
-rw-r--r--Documentation/filesystems/netfs_library.rst95
-rw-r--r--Documentation/gpu/amdgpu-dc.rst74
-rw-r--r--Documentation/gpu/amdgpu.rst324
-rw-r--r--Documentation/gpu/amdgpu/amdgpu-glossary.rst87
-rw-r--r--Documentation/gpu/amdgpu/display/config_example.svg414
-rw-r--r--Documentation/gpu/amdgpu/display/dc-debug.rst77
-rw-r--r--Documentation/gpu/amdgpu/display/dc-glossary.rst237
-rw-r--r--Documentation/gpu/amdgpu/display/dc_pipeline_overview.svg1125
-rw-r--r--Documentation/gpu/amdgpu/display/dcn-overview.rst171
-rw-r--r--Documentation/gpu/amdgpu/display/display-manager.rst42
-rw-r--r--Documentation/gpu/amdgpu/display/global_sync_vblank.svg485
-rw-r--r--Documentation/gpu/amdgpu/display/index.rst31
-rw-r--r--Documentation/gpu/amdgpu/driver-core.rst182
-rw-r--r--Documentation/gpu/amdgpu/driver-misc.rst112
-rw-r--r--Documentation/gpu/amdgpu/index.rst17
-rw-r--r--Documentation/gpu/amdgpu/module-parameters.rst7
-rw-r--r--Documentation/gpu/amdgpu/ras.rst62
-rw-r--r--Documentation/gpu/amdgpu/thermal.rst65
-rw-r--r--Documentation/gpu/amdgpu/xgmi.rst5
-rw-r--r--Documentation/gpu/drivers.rst3
-rw-r--r--Documentation/gpu/todo.rst11
-rw-r--r--Documentation/i2c/smbus-protocol.rst14
-rw-r--r--Documentation/i2c/summary.rst8
-rw-r--r--Documentation/locking/locktypes.rst9
-rw-r--r--Documentation/networking/bonding.rst11
-rw-r--r--Documentation/networking/device_drivers/ethernet/freescale/dpaa2/overview.rst1
-rw-r--r--Documentation/networking/device_drivers/ethernet/intel/ixgbe.rst16
-rw-r--r--Documentation/networking/ip-sysctl.rst6
-rw-r--r--Documentation/networking/ipvs-sysctl.rst3
-rw-r--r--Documentation/networking/timestamping.rst8
-rw-r--r--Documentation/process/changes.rst11
-rw-r--r--Documentation/process/submitting-patches.rst3
-rw-r--r--Documentation/sound/hd-audio/models.rst2
-rw-r--r--MAINTAINERS70
-rw-r--r--Makefile16
-rw-r--r--arch/Kconfig10
-rw-r--r--arch/alpha/kernel/syscalls/syscall.tbl1
-rw-r--r--arch/arc/include/asm/cacheflush.h1
-rw-r--r--arch/arm/boot/dts/bcm2711.dtsi8
-rw-r--r--arch/arm/boot/dts/bcm5301x.dtsi4
-rw-r--r--arch/arm/boot/dts/imx6qdl-wandboard.dtsi1
-rw-r--r--arch/arm/boot/dts/imx6qp-prtwd3.dts2
-rw-r--r--arch/arm/boot/dts/imx6ull-pinfunc.h2
-rw-r--r--arch/arm/boot/dts/ls1021a-tsn.dts2
-rw-r--r--arch/arm/boot/dts/socfpga_arria10_socdk_qspi.dts2
-rw-r--r--arch/arm/boot/dts/socfpga_arria5_socdk.dts2
-rw-r--r--arch/arm/boot/dts/socfpga_cyclone5_socdk.dts2
-rw-r--r--arch/arm/boot/dts/socfpga_cyclone5_sockit.dts2
-rw-r--r--arch/arm/boot/dts/socfpga_cyclone5_socrates.dts2
-rw-r--r--arch/arm/boot/dts/socfpga_cyclone5_sodia.dts2
-rw-r--r--arch/arm/boot/dts/socfpga_cyclone5_vining_fpga.dts4
-rw-r--r--arch/arm/include/asm/cacheflush.h1
-rw-r--r--arch/arm/include/asm/efi.h1
-rw-r--r--arch/arm/kernel/entry-armv.S8
-rw-r--r--arch/arm/kernel/head-nommu.S1
-rw-r--r--arch/arm/mach-rockchip/platsmp.c2
-rw-r--r--arch/arm/mach-socfpga/core.h2
-rw-r--r--arch/arm/mach-socfpga/platsmp.c8
-rw-r--r--arch/arm64/Kconfig.platforms1
-rw-r--r--arch/arm64/boot/dts/allwinner/sun50i-h5-orangepi-zero-plus.dts2
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-axg-jethome-jethub-j100.dts30
-rw-r--r--arch/arm64/boot/dts/apple/t8103-j274.dts2
-rw-r--r--arch/arm64/boot/dts/apple/t8103.dtsi11
-rw-r--r--arch/arm64/boot/dts/exynos/exynosautov9.dtsi3
-rw-r--r--arch/arm64/boot/dts/freescale/fsl-ls1088a-ten64.dts2
-rw-r--r--arch/arm64/boot/dts/freescale/fsl-lx2160a-bluebox3.dts4
-rw-r--r--arch/arm64/boot/dts/freescale/fsl-lx2160a.dtsi4
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mq.dtsi2
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3308-roc-cc.dts2
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3399-khadas-edge.dtsi1
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3399-kobol-helios64.dts1
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3399-leez-p710.dts2
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3399-rock-pi-4.dtsi2
-rw-r--r--arch/arm64/include/asm/efi.h1
-rw-r--r--arch/arm64/include/asm/ftrace.h11
-rw-r--r--arch/arm64/include/asm/kvm_arm.h4
-rw-r--r--arch/arm64/include/asm/pgalloc.h2
-rw-r--r--arch/arm64/include/asm/stacktrace.h6
-rw-r--r--arch/arm64/include/asm/uaccess.h48
-rw-r--r--arch/arm64/kernel/entry-ftrace.S6
-rw-r--r--arch/arm64/kernel/ftrace.c6
-rw-r--r--arch/arm64/kernel/machine_kexec.c2
-rw-r--r--arch/arm64/kernel/machine_kexec_file.c1
-rw-r--r--arch/arm64/kernel/stacktrace.c18
-rw-r--r--arch/arm64/kvm/hyp/include/hyp/switch.h14
-rw-r--r--arch/arm64/kvm/hyp/include/hyp/sysreg-sr.h7
-rw-r--r--arch/arm64/kvm/hyp/nvhe/switch.c8
-rw-r--r--arch/arm64/kvm/hyp/vhe/switch.c4
-rw-r--r--arch/csky/kernel/traps.c4
-rw-r--r--arch/ia64/kernel/syscalls/syscall.tbl1
-rw-r--r--arch/m68k/include/asm/cacheflush_mm.h1
-rw-r--r--arch/m68k/kernel/syscalls/syscall.tbl1
-rw-r--r--arch/microblaze/kernel/syscalls/syscall.tbl1
-rw-r--r--arch/mips/Kconfig2
-rw-r--r--arch/mips/boot/compressed/Makefile2
-rw-r--r--arch/mips/include/asm/cacheflush.h2
-rw-r--r--arch/mips/include/asm/mach-ralink/spaces.h2
-rw-r--r--arch/mips/include/asm/pci.h4
-rw-r--r--arch/mips/kernel/cpu-probe.c4
-rw-r--r--arch/mips/kernel/proc.c2
-rw-r--r--arch/mips/net/bpf_jit_comp.h2
-rw-r--r--arch/mips/pci/pci-generic.c2
-rw-r--r--arch/nds32/include/asm/cacheflush.h1
-rw-r--r--arch/nios2/include/asm/cacheflush.h1
-rw-r--r--arch/parisc/Kconfig5
-rw-r--r--arch/parisc/Makefile5
-rw-r--r--arch/parisc/configs/generic-64bit_defconfig14
-rw-r--r--arch/parisc/include/asm/assembly.h11
-rw-r--r--arch/parisc/include/asm/cacheflush.h1
-rw-r--r--arch/parisc/include/asm/futex.h4
-rw-r--r--arch/parisc/install.sh1
-rw-r--r--arch/parisc/kernel/entry.S14
-rw-r--r--arch/parisc/kernel/syscall.S6
-rw-r--r--arch/parisc/kernel/time.c30
-rw-r--r--arch/parisc/kernel/traps.c2
-rw-r--r--arch/parisc/kernel/vmlinux.lds.S3
-rw-r--r--arch/powerpc/kernel/head_32.h6
-rw-r--r--arch/powerpc/kernel/module_64.c42
-rw-r--r--arch/powerpc/kernel/syscalls/syscall.tbl1
-rw-r--r--arch/powerpc/kvm/book3s_hv_builtin.c5
-rw-r--r--arch/powerpc/mm/ptdump/ptdump.c2
-rw-r--r--arch/powerpc/platforms/85xx/smp.c4
-rw-r--r--arch/riscv/boot/dts/sifive/hifive-unleashed-a00.dts1
-rw-r--r--arch/riscv/boot/dts/sifive/hifive-unmatched-a00.dts113
-rw-r--r--arch/riscv/include/asm/efi.h1
-rw-r--r--arch/riscv/include/asm/kvm_host.h8
-rw-r--r--arch/riscv/kvm/mmu.c6
-rw-r--r--arch/s390/configs/debug_defconfig12
-rw-r--r--arch/s390/configs/defconfig9
-rw-r--r--arch/s390/configs/zfcpdump_defconfig2
-rw-r--r--arch/s390/include/asm/pci_io.h7
-rw-r--r--arch/s390/kernel/ftrace.c2
-rw-r--r--arch/s390/kernel/irq.c9
-rw-r--r--arch/s390/kernel/machine_kexec_file.c38
-rw-r--r--arch/s390/lib/test_unwind.c5
-rw-r--r--arch/sh/include/asm/cacheflush.h1
-rw-r--r--arch/sh/kernel/syscalls/syscall.tbl1
-rw-r--r--arch/sparc/kernel/syscalls/syscall.tbl1
-rw-r--r--arch/x86/Kconfig1
-rw-r--r--arch/x86/entry/entry_64.S35
-rw-r--r--arch/x86/include/asm/efi.h2
-rw-r--r--arch/x86/include/asm/fpu/api.h6
-rw-r--r--arch/x86/include/asm/intel-family.h2
-rw-r--r--arch/x86/include/asm/kvm-x86-ops.h1
-rw-r--r--arch/x86/include/asm/kvm_host.h4
-rw-r--r--arch/x86/include/asm/pkru.h4
-rw-r--r--arch/x86/include/asm/sev-common.h11
-rw-r--r--arch/x86/include/asm/xen/hypercall.h4
-rw-r--r--arch/x86/include/asm/xen/hypervisor.h1
-rw-r--r--arch/x86/kernel/fpu/signal.c2
-rw-r--r--arch/x86/kernel/setup.c72
-rw-r--r--arch/x86/kernel/sev.c57
-rw-r--r--arch/x86/kernel/smpboot.c14
-rw-r--r--arch/x86/kernel/tsc.c28
-rw-r--r--arch/x86/kernel/tsc_sync.c41
-rw-r--r--arch/x86/kvm/hyperv.c7
-rw-r--r--arch/x86/kvm/ioapic.h1
-rw-r--r--arch/x86/kvm/irq.h1
-rw-r--r--arch/x86/kvm/lapic.c2
-rw-r--r--arch/x86/kvm/mmu/mmu.c134
-rw-r--r--arch/x86/kvm/mmu/paging_tmpl.h3
-rw-r--r--arch/x86/kvm/mmu/tdp_iter.c6
-rw-r--r--arch/x86/kvm/mmu/tdp_iter.h6
-rw-r--r--arch/x86/kvm/mmu/tdp_mmu.c67
-rw-r--r--arch/x86/kvm/mmu/tdp_mmu.h5
-rw-r--r--arch/x86/kvm/svm/avic.c17
-rw-r--r--arch/x86/kvm/svm/pmu.c2
-rw-r--r--arch/x86/kvm/svm/sev.c263
-rw-r--r--arch/x86/kvm/svm/svm.c22
-rw-r--r--arch/x86/kvm/svm/svm.h1
-rw-r--r--arch/x86/kvm/vmx/nested.c53
-rw-r--r--arch/x86/kvm/vmx/posted_intr.c20
-rw-r--r--arch/x86/kvm/vmx/vmx.c134
-rw-r--r--arch/x86/kvm/x86.c100
-rw-r--r--arch/x86/kvm/x86.h7
-rw-r--r--arch/x86/net/bpf_jit_comp.c51
-rw-r--r--arch/x86/platform/efi/quirks.c3
-rw-r--r--arch/x86/realmode/init.c12
-rw-r--r--arch/x86/tools/relocs.c2
-rw-r--r--arch/x86/xen/xen-asm.S20
-rw-r--r--arch/xtensa/include/asm/cacheflush.h3
-rw-r--r--arch/xtensa/kernel/syscalls/syscall.tbl1
-rw-r--r--block/bdev.c12
-rw-r--r--block/blk-core.c1
-rw-r--r--block/blk-iocost.c9
-rw-r--r--block/blk-mq.c3
-rw-r--r--block/fops.c4
-rw-r--r--block/ioprio.c3
-rw-r--r--drivers/Makefile3
-rw-r--r--drivers/acpi/cppc_acpi.c9
-rw-r--r--drivers/acpi/property.c14
-rw-r--r--drivers/android/binder.c23
-rw-r--r--drivers/android/binder_alloc.c2
-rw-r--r--drivers/ata/ahci_ceva.c3
-rw-r--r--drivers/ata/libata-core.c2
-rw-r--r--drivers/ata/libata-sata.c2
-rw-r--r--drivers/ata/libata-scsi.c15
-rw-r--r--drivers/ata/pata_falcon.c16
-rw-r--r--drivers/ata/sata_fsl.c20
-rw-r--r--drivers/auxdisplay/charlcd.c5
-rw-r--r--drivers/base/power/main.c2
-rw-r--r--drivers/block/loop.c2
-rw-r--r--drivers/block/virtio_blk.c3
-rw-r--r--drivers/block/xen-blkfront.c15
-rw-r--r--drivers/block/zram/zram_drv.c2
-rw-r--r--drivers/bus/mhi/core/pm.c21
-rw-r--r--drivers/bus/mhi/pci_generic.c2
-rw-r--r--drivers/bus/sunxi-rsb.c8
-rw-r--r--drivers/char/agp/parisc-agp.c6
-rw-r--r--drivers/char/ipmi/ipmi_msghandler.c50
-rw-r--r--drivers/char/ipmi/ipmi_ssif.c7
-rw-r--r--drivers/clk/clk.c15
-rw-r--r--drivers/clk/imx/clk-imx8qxp-lpcg.c2
-rw-r--r--drivers/clk/imx/clk-imx8qxp.c2
-rw-r--r--drivers/clk/qcom/clk-alpha-pll.c9
-rw-r--r--drivers/clk/qcom/clk-regmap-mux.c2
-rw-r--r--drivers/clk/qcom/common.c12
-rw-r--r--drivers/clk/qcom/common.h2
-rw-r--r--drivers/clk/qcom/gcc-sm6125.c4
-rw-r--r--drivers/clk/versatile/clk-icst.c2
-rw-r--r--drivers/clocksource/arm_arch_timer.c9
-rw-r--r--drivers/clocksource/dw_apb_timer_of.c2
-rw-r--r--drivers/cpufreq/cpufreq.c14
-rw-r--r--drivers/cpufreq/intel_pstate.c17
-rw-r--r--drivers/crypto/qat/qat_4xxx/adf_4xxx_hw_data.c7
-rw-r--r--drivers/dma-buf/dma-fence-array.c6
-rw-r--r--drivers/dma-buf/dma-resv.c3
-rw-r--r--drivers/dma-buf/heaps/system_heap.c2
-rw-r--r--drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c4
-rw-r--r--drivers/dma/dw-edma/dw-edma-pcie.c10
-rw-r--r--drivers/dma/idxd/irq.c2
-rw-r--r--drivers/dma/idxd/submit.c18
-rw-r--r--drivers/dma/st_fdma.c2
-rw-r--r--drivers/dma/ti/k3-udma.c157
-rw-r--r--drivers/firmware/arm_scmi/base.c15
-rw-r--r--drivers/firmware/arm_scmi/scmi_pm_domain.c4
-rw-r--r--drivers/firmware/arm_scmi/sensors.c2
-rw-r--r--drivers/firmware/arm_scmi/virtio.c10
-rw-r--r--drivers/firmware/arm_scmi/voltage.c2
-rw-r--r--drivers/firmware/scpi_pm_domain.c10
-rw-r--r--drivers/firmware/smccc/soc_id.c2
-rw-r--r--drivers/firmware/tegra/bpmp-debugfs.c5
-rw-r--r--drivers/gpio/gpio-dln2.c19
-rw-r--r--drivers/gpio/gpio-virtio.c6
-rw-r--r--drivers/gpu/drm/Kconfig9
-rw-r--r--drivers/gpu/drm/Makefile9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/Makefile2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu.h11
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c138
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h97
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c18
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.h9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c126
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10_3.c143
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c128
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c124
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c155
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.h35
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c232
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c23
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c215
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c201
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_display.c13
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c47
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_drv.h3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_encoders.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c388
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c126
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_fru_eeprom.c25
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c36
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c58
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h16
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ioc32.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h20
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_object.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_pll.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_pmu.c19
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_preempt_mgr.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c18
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c73
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h16
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c169
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c16
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c57
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c64
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.h5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/atombios_encoders.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v8_0.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c15
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c174
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfxhub_v2_0.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfxhub_v2_1.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c13
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c94
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmhub_v1_7.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmhub_v2_3.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/navi10_ih.c35
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nbio_v2_3.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nbio_v6_1.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nbio_v7_0.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nbio_v7_2.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nv.c9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_v11_0.c78
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c85
-rw-r--r--drivers/gpu/drm/amd/amdgpu/soc15.c19
-rw-r--r--drivers/gpu/drm/amd/amdgpu/soc15_common.h5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/umc_v6_7.c161
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vega10_ih.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vega20_ih.c1
-rw-r--r--drivers/gpu/drm/amd/amdkfd/cik_event_interrupt.c8
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_chardev.c54
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_crat.c148
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_dbgdev.c18
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device.c998
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c144
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h5
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_v9.c2
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c8
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_events.c6
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c31
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c46
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_interrupt.c18
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_iommu.c2
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c8
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_migrate.c8
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c4
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c12
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c14
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c18
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c12
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c35
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_vi.c4
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_priv.h22
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_process.c66
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c11
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_smi_events.c6
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_svm.c449
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_svm.h3
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_topology.c98
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_topology.h46
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c350
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h7
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c4
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c8
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c213
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c52
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c20
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c101
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr_vbios_smu.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn201/dcn201_clk_mgr.c43
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c26
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr_vbios_smu.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c44
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/dcn301_smu.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/vg_clk_mgr.c22
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c16
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.h7
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_smu.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc.c186
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_debug.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link.c359
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c1076
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link_dpia.c68
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link_enc_cfg.c53
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c63
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_resource.c210
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_sink.c10
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc.h36
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c37
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_dp_types.h40
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_hw_types.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_link.h31
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_types.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_audio.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_audio.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dmub_hw_lock_mgr.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c14
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dmub_psr.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c66
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c8
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_dscl.c111
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c127
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_init.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.c30
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c20
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c38
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c15
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp.c14
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dwb_scl.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c21
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c20
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_init.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.c9
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.c71
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c7
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_stream_encoder.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn201/dcn201_dccg.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn201/dcn201_hubp.c7
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn201/dcn201_hwseq.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn201/dcn201_link_encoder.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn201/dcn201_resource.c18
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubbub.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubp.c15
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn21/dcn21_init.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn21/dcn21_link_encoder.c9
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c35
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_stream_encoder.c20
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp.c36
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubp.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c50
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.h5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_init.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_mmhubbub.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_mpc.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_optc.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c18
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn301/dcn301_init.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn301/dcn301_panel_cntl.c10
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c68
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn302/dcn302_init.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.c22
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn303/dcn303_dccg.h20
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn303/dcn303_init.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.c21
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dccg.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dio_link_encoder.c99
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_link_encoder.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_link_encoder.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_stream_encoder.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.c45
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn31/dcn31_init.c9
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn31/dcn31_optc.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn31/dcn31_panel_cntl.c10
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c51
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.h31
-rw-r--r--drivers/gpu/drm/amd/display/dc/dm_helpers.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn21/display_rq_dlg_calc_21.c8
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c489
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dml_wrapper.c1889
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dml_wrapper_translation.c284
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dsc/rc_calc_fpu.c33
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dsc/rc_calc_fpu.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c14
-rw-r--r--drivers/gpu/drm/amd/display/dc/dsc/rc_calc.c28
-rw-r--r--drivers/gpu/drm/amd/display/dc/dsc/rc_calc.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dsc/rc_calc_dpi.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/core_status.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/core_types.h18
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h17
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/dc_link_dpia.h5
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/dcn_calcs.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/dml_wrapper.h34
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h7
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/dsc.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/link_enc_cfg.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/link_hwss.h10
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/resource.h17
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/dce110/irq_service_dce110.c10
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/dcn10/irq_service_dcn10.c7
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/dcn201/irq_service_dcn201.c7
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/dcn21/irq_service_dcn21.c7
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/dcn31/irq_service_dcn31.c7
-rw-r--r--drivers/gpu/drm/amd/display/dmub/dmub_srv.h42
-rw-r--r--drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h61
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c89
-rw-r--r--drivers/gpu/drm/amd/display/include/ddc_service_types.h4
-rw-r--r--drivers/gpu/drm/amd/display/include/logger_types.h4
-rw-r--r--drivers/gpu/drm/amd/include/amd_shared.h2
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/mp/mp_11_0_offset.h7
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/mp/mp_11_0_sh_mask.h12
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_2_0_offset.h2
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_2_0_sh_mask.h12
-rw-r--r--drivers/gpu/drm/amd/include/cyan_skillfish_ip_offset.h10
-rw-r--r--drivers/gpu/drm/amd/include/discovery.h49
-rw-r--r--drivers/gpu/drm/amd/include/kgd_kfd_interface.h57
-rw-r--r--drivers/gpu/drm/amd/include/kgd_pp_interface.h4
-rw-r--r--drivers/gpu/drm/amd/include/yellow_carp_offset.h4
-rw-r--r--drivers/gpu/drm/amd/pm/amdgpu_pm.c21
-rw-r--r--drivers/gpu/drm/amd/pm/inc/aldebaran_ppsmc.h4
-rw-r--r--drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h8
-rw-r--r--drivers/gpu/drm/amd/pm/inc/amdgpu_smu.h31
-rw-r--r--drivers/gpu/drm/amd/pm/inc/smu13_driver_if_aldebaran.h18
-rw-r--r--drivers/gpu/drm/amd/pm/inc/smu_types.h3
-rw-r--r--drivers/gpu/drm/amd/pm/inc/smu_v11_0.h2
-rw-r--r--drivers/gpu/drm/amd/pm/inc/smu_v13_0.h5
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c9
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.c29
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c38
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu8_hwmgr.c6
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c40
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_hwmgr.c10
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_hwmgr.c68
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c152
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c22
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c10
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c87
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c2
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c10
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c10
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu12/smu_v12_0.c6
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c134
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c40
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c34
-rw-r--r--drivers/gpu/drm/arm/Kconfig2
-rw-r--r--drivers/gpu/drm/arm/display/Kconfig1
-rw-r--r--drivers/gpu/drm/aspeed/Kconfig2
-rw-r--r--drivers/gpu/drm/aspeed/aspeed_gfx_drv.c2
-rw-r--r--drivers/gpu/drm/ast/Makefile2
-rw-r--r--drivers/gpu/drm/ast/ast_drv.h3
-rw-r--r--drivers/gpu/drm/ast/ast_i2c.c152
-rw-r--r--drivers/gpu/drm/ast/ast_mode.c156
-rw-r--r--drivers/gpu/drm/atmel-hlcdc/Kconfig1
-rw-r--r--drivers/gpu/drm/bridge/analogix/anx7625.c41
-rw-r--r--drivers/gpu/drm/bridge/chipone-icn6211.c39
-rw-r--r--drivers/gpu/drm/bridge/lontium-lt9611.c4
-rw-r--r--drivers/gpu/drm/bridge/lvds-codec.c15
-rw-r--r--drivers/gpu/drm/bridge/nwl-dsi.c1
-rw-r--r--drivers/gpu/drm/bridge/parade-ps8640.c30
-rw-r--r--drivers/gpu/drm/bridge/sil-sii8620.c2
-rw-r--r--drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi.c6
-rw-r--r--drivers/gpu/drm/bridge/ti-sn65dsi83.c53
-rw-r--r--drivers/gpu/drm/bridge/ti-sn65dsi86.c25
-rw-r--r--drivers/gpu/drm/drm_atomic_helper.c12
-rw-r--r--drivers/gpu/drm/drm_fb_helper.c8
-rw-r--r--drivers/gpu/drm/drm_format_helper.c64
-rw-r--r--drivers/gpu/drm/drm_fourcc.c3
-rw-r--r--drivers/gpu/drm/drm_gem_cma_helper.c90
-rw-r--r--drivers/gpu/drm/drm_gem_shmem_helper.c18
-rw-r--r--drivers/gpu/drm/drm_gem_vram_helper.c4
-rw-r--r--drivers/gpu/drm/drm_hashtab.c10
-rw-r--r--drivers/gpu/drm/drm_legacy.h40
-rw-r--r--drivers/gpu/drm/drm_mipi_dbi.c34
-rw-r--r--drivers/gpu/drm/drm_plane.c9
-rw-r--r--drivers/gpu/drm/drm_syncobj.c11
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_drv.c41
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c6
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gpu.c2
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gpu.h1
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_sched.c4
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_drv.c13
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_dsi.c135
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fbdev.c20
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fimc.c4
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_gem.c43
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_gem.h5
-rw-r--r--drivers/gpu/drm/fsl-dcu/Kconfig2
-rw-r--r--drivers/gpu/drm/hisilicon/kirin/Kconfig1
-rw-r--r--drivers/gpu/drm/hyperv/hyperv_drm_drv.c19
-rw-r--r--drivers/gpu/drm/i915/Makefile4
-rw-r--r--drivers/gpu/drm/i915/display/intel_atomic_plane.c4
-rw-r--r--drivers/gpu/drm/i915/display/intel_bw.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_display.c22
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpt.c4
-rw-r--r--drivers/gpu/drm/i915/display/intel_fbc.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_overlay.c2
-rw-r--r--drivers/gpu/drm/i915/display/skl_universal_plane.c2
-rw-r--r--drivers/gpu/drm/i915/dma_resv_utils.c17
-rw-r--r--drivers/gpu/drm/i915/dma_resv_utils.h13
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_clflush.c18
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_context.c26
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_create.c2
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c15
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_domain.c22
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c198
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_internal.c44
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_mman.c10
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_object.c92
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_object.h48
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_object_types.h46
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_pages.c21
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_phys.c6
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_pm.c6
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_region.c4
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_shmem.c195
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_shrinker.c137
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_stolen.c19
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_throttle.c3
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_ttm.c765
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_ttm.h41
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_ttm_move.c627
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_ttm_move.h41
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_ttm_pm.c3
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_userptr.c2
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_wait.c12
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gemfs.c5
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/huge_pages.c134
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c2
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c71
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c4
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/i915_gem_migrate.c26
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c26
-rw-r--r--drivers/gpu/drm/i915/gt/gen6_ppgtt.c151
-rw-r--r--drivers/gpu/drm/i915/gt/gen6_ppgtt.h2
-rw-r--r--drivers/gpu/drm/i915/gt/gen8_engine_cs.c2
-rw-r--r--drivers/gpu/drm/i915/gt/gen8_ppgtt.c34
-rw-r--r--drivers/gpu/drm/i915/gt/intel_context.c16
-rw-r--r--drivers/gpu/drm/i915/gt/intel_context.h3
-rw-r--r--drivers/gpu/drm/i915/gt/intel_context_types.h1
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_cs.c68
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_pm.c40
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_stats.h33
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_types.h84
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_user.c2
-rw-r--r--drivers/gpu/drm/i915/gt/intel_execlists_submission.c37
-rw-r--r--drivers/gpu/drm/i915/gt/intel_ggtt.c414
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt.c11
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt.h1
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_buffer_pool.c7
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_debugfs.h21
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_pm.c9
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gtt.c31
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gtt.h12
-rw-r--r--drivers/gpu/drm/i915/gt/intel_lrc.c5
-rw-r--r--drivers/gpu/drm/i915/gt/intel_migrate.c32
-rw-r--r--drivers/gpu/drm/i915/gt/intel_migrate.h9
-rw-r--r--drivers/gpu/drm/i915/gt/intel_mocs.c2
-rw-r--r--drivers/gpu/drm/i915/gt/intel_ppgtt.c12
-rw-r--r--drivers/gpu/drm/i915/gt/intel_rc6.c15
-rw-r--r--drivers/gpu/drm/i915/gt/intel_region_lmem.c17
-rw-r--r--drivers/gpu/drm/i915/gt/intel_reset.c58
-rw-r--r--drivers/gpu/drm/i915/gt/intel_ring_submission.c2
-rw-r--r--drivers/gpu/drm/i915/gt/intel_rps.c143
-rw-r--r--drivers/gpu/drm/i915/gt/intel_rps.h5
-rw-r--r--drivers/gpu/drm/i915/gt/intel_workarounds.c502
-rw-r--r--drivers/gpu/drm/i915/gt/mock_engine.c50
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_context.c2
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_engine.c2
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_engine_cs.c4
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_engine_heartbeat.c4
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_engine_pm.c35
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_execlists.c6
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_gt_pm.c8
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_hangcheck.c22
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_lrc.c2
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_migrate.c7
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_mocs.c2
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_reset.c2
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_ring_submission.c4
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_slpc.c6
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_timeline.c6
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_workarounds.c4
-rw-r--r--drivers/gpu/drm/i915/gt/uc/abi/guc_actions_abi.h1
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc.h42
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c21
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_ads.h5
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c18
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_fw.c33
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h13
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_log.h5
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_log_debugfs.c62
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.c157
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.h3
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_slpc_types.h13
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c373
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_submission.h2
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_huc.c75
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_huc.h2
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_uc.c9
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c188
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_uc_fw.h18
-rw-r--r--drivers/gpu/drm/i915/gt/uc/selftest_guc.c175
-rw-r--r--drivers/gpu/drm/i915/gt/uc/selftest_guc_multi_lrc.c2
-rw-r--r--drivers/gpu/drm/i915/gvt/gtt.c12
-rw-r--r--drivers/gpu/drm/i915/gvt/gvt.c2
-rw-r--r--drivers/gpu/drm/i915/gvt/scheduler.c2
-rw-r--r--drivers/gpu/drm/i915/i915_active.c28
-rw-r--r--drivers/gpu/drm/i915/i915_active.h17
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c42
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs_params.c4
-rw-r--r--drivers/gpu/drm/i915/i915_deps.c237
-rw-r--r--drivers/gpu/drm/i915/i915_deps.h45
-rw-r--r--drivers/gpu/drm/i915/i915_driver.c49
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h111
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c51
-rw-r--r--drivers/gpu/drm/i915/i915_getparam.c10
-rw-r--r--drivers/gpu/drm/i915/i915_gpu_error.c234
-rw-r--r--drivers/gpu/drm/i915/i915_gpu_error.h4
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c74
-rw-r--r--drivers/gpu/drm/i915/i915_params.c3
-rw-r--r--drivers/gpu/drm/i915/i915_params.h1
-rw-r--r--drivers/gpu/drm/i915/i915_pci.c20
-rw-r--r--drivers/gpu/drm/i915/i915_perf.c2
-rw-r--r--drivers/gpu/drm/i915/i915_pmu.c14
-rw-r--r--drivers/gpu/drm/i915/i915_query.c2
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h160
-rw-r--r--drivers/gpu/drm/i915/i915_request.c93
-rw-r--r--drivers/gpu/drm/i915/i915_request.h25
-rw-r--r--drivers/gpu/drm/i915/i915_scatterlist.c62
-rw-r--r--drivers/gpu/drm/i915/i915_scatterlist.h76
-rw-r--r--drivers/gpu/drm/i915/i915_sw_fence.c28
-rw-r--r--drivers/gpu/drm/i915/i915_sw_fence.h23
-rw-r--r--drivers/gpu/drm/i915/i915_sw_fence_work.c2
-rw-r--r--drivers/gpu/drm/i915/i915_sysfs.c41
-rw-r--r--drivers/gpu/drm/i915/i915_vma.c523
-rw-r--r--drivers/gpu/drm/i915/i915_vma.h14
-rw-r--r--drivers/gpu/drm/i915/i915_vma_snapshot.c134
-rw-r--r--drivers/gpu/drm/i915/i915_vma_snapshot.h112
-rw-r--r--drivers/gpu/drm/i915/i915_vma_types.h2
-rw-r--r--drivers/gpu/drm/i915/intel_device_info.c33
-rw-r--r--drivers/gpu/drm/i915/intel_device_info.h13
-rw-r--r--drivers/gpu/drm/i915/intel_gvt.c2
-rw-r--r--drivers/gpu/drm/i915/intel_memory_region.c154
-rw-r--r--drivers/gpu/drm/i915/intel_memory_region.h9
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c39
-rw-r--r--drivers/gpu/drm/i915/intel_region_ttm.c50
-rw-r--r--drivers/gpu/drm/i915/intel_region_ttm.h7
-rw-r--r--drivers/gpu/drm/i915/intel_step.c77
-rw-r--r--drivers/gpu/drm/i915/intel_step.h3
-rw-r--r--drivers/gpu/drm/i915/intel_uncore.c26
-rw-r--r--drivers/gpu/drm/i915/intel_uncore.h5
-rw-r--r--drivers/gpu/drm/i915/intel_wopcm.c2
-rw-r--r--drivers/gpu/drm/i915/pxp/intel_pxp_pm.c37
-rw-r--r--drivers/gpu/drm/i915/pxp/intel_pxp_pm.h19
-rw-r--r--drivers/gpu/drm/i915/pxp/intel_pxp_tee.c4
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_active.c2
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_gem.c2
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_gem_evict.c25
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_gem_gtt.c18
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_perf.c2
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_request.c24
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_selftest.c4
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_sw_fence.c2
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_vma.c17
-rw-r--r--drivers/gpu/drm/i915/selftests/igt_flush_test.c2
-rw-r--r--drivers/gpu/drm/i915/selftests/igt_live_test.c4
-rw-r--r--drivers/gpu/drm/i915/selftests/igt_reset.c2
-rw-r--r--drivers/gpu/drm/i915/selftests/intel_memory_region.c12
-rw-r--r--drivers/gpu/drm/i915/selftests/intel_uncore.c2
-rw-r--r--drivers/gpu/drm/i915/selftests/lib_sw_fence.c8
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_gem_device.c34
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_gtt.c12
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_region.c19
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_uncore.c2
-rw-r--r--drivers/gpu/drm/imx/Kconfig2
-rw-r--r--drivers/gpu/drm/imx/dcss/Kconfig2
-rw-r--r--drivers/gpu/drm/ingenic/Kconfig1
-rw-r--r--drivers/gpu/drm/ingenic/ingenic-drm-drv.c62
-rw-r--r--drivers/gpu/drm/ingenic/ingenic-drm.h38
-rw-r--r--drivers/gpu/drm/kmb/Kconfig1
-rw-r--r--drivers/gpu/drm/lima/lima_device.c1
-rw-r--r--drivers/gpu/drm/lima/lima_gem.c2
-rw-r--r--drivers/gpu/drm/mcde/Kconfig1
-rw-r--r--drivers/gpu/drm/mediatek/mtk_disp_ccorr.c6
-rw-r--r--drivers/gpu/drm/mediatek/mtk_disp_ovl.c20
-rw-r--r--drivers/gpu/drm/mediatek/mtk_disp_rdma.c6
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_crtc.c175
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c217
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.h23
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_drv.c142
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_plane.c3
-rw-r--r--drivers/gpu/drm/mediatek/mtk_hdmi.c12
-rw-r--r--drivers/gpu/drm/meson/Kconfig1
-rw-r--r--drivers/gpu/drm/meson/meson_drv.c25
-rw-r--r--drivers/gpu/drm/meson/meson_osd_afbcd.c41
-rw-r--r--drivers/gpu/drm/meson/meson_osd_afbcd.h1
-rw-r--r--drivers/gpu/drm/msm/Kconfig3
-rw-r--r--drivers/gpu/drm/msm/Makefile14
-rw-r--r--drivers/gpu/drm/msm/adreno/a2xx_gpu.c3
-rw-r--r--drivers/gpu/drm/msm/adreno/a3xx_gpu.c3
-rw-r--r--drivers/gpu/drm/msm/adreno/a4xx_gpu.c3
-rw-r--r--drivers/gpu/drm/msm/adreno/a5xx_debugfs.c4
-rw-r--r--drivers/gpu/drm/msm/adreno/a5xx_gpu.c55
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gmu.c25
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gpu.c35
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gpu.h10
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c108
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_hfi.c10
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_hfi.h11
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_device.c38
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_gpu.c4
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_gpu.h7
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c41
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.h5
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c38
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c17
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c6
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h5
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c8
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.c18
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.h3
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.c98
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.h44
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.c2
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.h2
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c150
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h40
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_mdss.c25
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c564
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_plane.h14
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_trace.h4
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c22
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_mdss.c56
-rw-r--r--drivers/gpu/drm/msm/disp/msm_disp_snapshot.c27
-rw-r--r--drivers/gpu/drm/msm/disp/msm_disp_snapshot.h14
-rw-r--r--drivers/gpu/drm/msm/disp/msm_disp_snapshot_util.c9
-rw-r--r--drivers/gpu/drm/msm/dp/dp_aux.c17
-rw-r--r--drivers/gpu/drm/msm/dp/dp_ctrl.c23
-rw-r--r--drivers/gpu/drm/msm/dp/dp_display.c59
-rw-r--r--drivers/gpu/drm/msm/dp/dp_display.h1
-rw-r--r--drivers/gpu/drm/msm/dp/dp_drm.c75
-rw-r--r--drivers/gpu/drm/msm/dp/dp_hpd.c2
-rw-r--r--drivers/gpu/drm/msm/dp/dp_hpd.h2
-rw-r--r--drivers/gpu/drm/msm/dp/dp_link.c19
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi.c16
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi.h5
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi_host.c57
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi_manager.c31
-rw-r--r--drivers/gpu/drm/msm/dsi/phy/dsi_phy.c13
-rw-r--r--drivers/gpu/drm/msm/edp/edp.c198
-rw-r--r--drivers/gpu/drm/msm/edp/edp.h77
-rw-r--r--drivers/gpu/drm/msm/edp/edp.xml.h388
-rw-r--r--drivers/gpu/drm/msm/edp/edp_aux.c265
-rw-r--r--drivers/gpu/drm/msm/edp/edp_bridge.c111
-rw-r--r--drivers/gpu/drm/msm/edp/edp_connector.c132
-rw-r--r--drivers/gpu/drm/msm/edp/edp_ctrl.c1373
-rw-r--r--drivers/gpu/drm/msm/edp/edp_phy.c98
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi.c19
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi.h19
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi_bridge.c83
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi_hpd.c (renamed from drivers/gpu/drm/msm/hdmi/hdmi_connector.c)158
-rw-r--r--drivers/gpu/drm/msm/msm_debugfs.c128
-rw-r--r--drivers/gpu/drm/msm/msm_drv.c164
-rw-r--r--drivers/gpu/drm/msm/msm_drv.h35
-rw-r--r--drivers/gpu/drm/msm/msm_fbdev.c13
-rw-r--r--drivers/gpu/drm/msm/msm_fence.h12
-rw-r--r--drivers/gpu/drm/msm/msm_gem.c5
-rw-r--r--drivers/gpu/drm/msm/msm_gem_shrinker.c1
-rw-r--r--drivers/gpu/drm/msm/msm_gem_submit.c4
-rw-r--r--drivers/gpu/drm/msm/msm_gpu.c22
-rw-r--r--drivers/gpu/drm/msm/msm_gpu.h69
-rw-r--r--drivers/gpu/drm/msm/msm_gpu_devfreq.c100
-rw-r--r--drivers/gpu/drm/msm/msm_kms.h11
-rw-r--r--drivers/gpu/drm/msm/msm_perf.c9
-rw-r--r--drivers/gpu/drm/msm/msm_rd.c16
-rw-r--r--drivers/gpu/drm/msm/msm_ringbuffer.c4
-rw-r--r--drivers/gpu/drm/mxsfb/Kconfig2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_dp.c15
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/device/base.c22
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/dp.c317
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/dp.h35
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/sorg94.c13
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/sorga102.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgf119.c14
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgm107.c18
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/acr/gm200.c6
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/acr/gp102.c6
-rw-r--r--drivers/gpu/drm/omapdrm/Makefile1
-rw-r--r--drivers/gpu/drm/omapdrm/dss/dispc.c35
-rw-r--r--drivers/gpu/drm/omapdrm/dss/dsi.c4
-rw-r--r--drivers/gpu/drm/omapdrm/dss/dss.c4
-rw-r--r--drivers/gpu/drm/omapdrm/dss/dss.h5
-rw-r--r--drivers/gpu/drm/omapdrm/dss/venc.c4
-rw-r--r--drivers/gpu/drm/omapdrm/omap_drv.c196
-rw-r--r--drivers/gpu/drm/omapdrm/omap_drv.h24
-rw-r--r--drivers/gpu/drm/omapdrm/omap_fb.c33
-rw-r--r--drivers/gpu/drm/omapdrm/omap_fb.h4
-rw-r--r--drivers/gpu/drm/omapdrm/omap_gem.c79
-rw-r--r--drivers/gpu/drm/omapdrm/omap_gem.h2
-rw-r--r--drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c34
-rw-r--r--drivers/gpu/drm/omapdrm/omap_overlay.c212
-rw-r--r--drivers/gpu/drm/omapdrm/omap_overlay.h35
-rw-r--r--drivers/gpu/drm/omapdrm/omap_plane.c349
-rw-r--r--drivers/gpu/drm/omapdrm/omap_plane.h1
-rw-r--r--drivers/gpu/drm/panel/Kconfig2
-rw-r--r--drivers/gpu/drm/panel/panel-boe-tv101wum-nl6.c110
-rw-r--r--drivers/gpu/drm/panel/panel-simple.c30
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_gem.c2
-rw-r--r--drivers/gpu/drm/pl111/Kconfig1
-rw-r--r--drivers/gpu/drm/qxl/qxl_debugfs.c17
-rw-r--r--drivers/gpu/drm/radeon/radeon_kms.c38
-rw-r--r--drivers/gpu/drm/radeon/radeon_vce.c2
-rw-r--r--drivers/gpu/drm/rcar-du/Kconfig31
-rw-r--r--drivers/gpu/drm/rcar-du/Makefile1
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_crtc.c31
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_drv.c6
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_kms.c10
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_mipi_dsi.c819
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_mipi_dsi_regs.h172
-rw-r--r--drivers/gpu/drm/rockchip/Makefile1
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_drv.c10
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_drv.h2
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_fbdev.c164
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_fbdev.h24
-rw-r--r--drivers/gpu/drm/selftests/test-drm_plane_helper.c4
-rw-r--r--drivers/gpu/drm/shmobile/Kconfig1
-rw-r--r--drivers/gpu/drm/sprd/Kconfig13
-rw-r--r--drivers/gpu/drm/sprd/Makefile8
-rw-r--r--drivers/gpu/drm/sprd/megacores_pll.c305
-rw-r--r--drivers/gpu/drm/sprd/sprd_dpu.c880
-rw-r--r--drivers/gpu/drm/sprd/sprd_dpu.h109
-rw-r--r--drivers/gpu/drm/sprd/sprd_drm.c205
-rw-r--r--drivers/gpu/drm/sprd/sprd_drm.h19
-rw-r--r--drivers/gpu/drm/sprd/sprd_dsi.c1073
-rw-r--r--drivers/gpu/drm/sprd/sprd_dsi.h126
-rw-r--r--drivers/gpu/drm/sti/Kconfig1
-rw-r--r--drivers/gpu/drm/stm/Kconfig1
-rw-r--r--drivers/gpu/drm/stm/drv.c5
-rw-r--r--drivers/gpu/drm/stm/dw_mipi_dsi-stm.c114
-rw-r--r--drivers/gpu/drm/stm/ltdc.c172
-rw-r--r--drivers/gpu/drm/stm/ltdc.h3
-rw-r--r--drivers/gpu/drm/sun4i/Kconfig1
-rw-r--r--drivers/gpu/drm/tegra/Kconfig3
-rw-r--r--drivers/gpu/drm/tegra/Makefile3
-rw-r--r--drivers/gpu/drm/tegra/dc.c194
-rw-r--r--drivers/gpu/drm/tegra/dc.h3
-rw-r--r--drivers/gpu/drm/tegra/drm.c30
-rw-r--r--drivers/gpu/drm/tegra/drm.h1
-rw-r--r--drivers/gpu/drm/tegra/gem.c171
-rw-r--r--drivers/gpu/drm/tegra/gr2d.c151
-rw-r--r--drivers/gpu/drm/tegra/gr3d.c353
-rw-r--r--drivers/gpu/drm/tegra/hdmi.c183
-rw-r--r--drivers/gpu/drm/tegra/hub.h1
-rw-r--r--drivers/gpu/drm/tegra/nvdec.c466
-rw-r--r--drivers/gpu/drm/tegra/plane.c65
-rw-r--r--drivers/gpu/drm/tegra/plane.h2
-rw-r--r--drivers/gpu/drm/tegra/rgb.c53
-rw-r--r--drivers/gpu/drm/tegra/submit.c77
-rw-r--r--drivers/gpu/drm/tegra/uapi.c68
-rw-r--r--drivers/gpu/drm/tegra/uapi.h5
-rw-r--r--drivers/gpu/drm/tegra/vic.c61
-rw-r--r--drivers/gpu/drm/tidss/Kconfig1
-rw-r--r--drivers/gpu/drm/tidss/tidss_drv.c2
-rw-r--r--drivers/gpu/drm/tilcdc/Kconfig1
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_drv.c4
-rw-r--r--drivers/gpu/drm/tiny/Kconfig20
-rw-r--r--drivers/gpu/drm/tiny/simpledrm.c6
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c4
-rw-r--r--drivers/gpu/drm/ttm/ttm_tt.c1
-rw-r--r--drivers/gpu/drm/tve200/Kconfig1
-rw-r--r--drivers/gpu/drm/v3d/v3d_bo.c4
-rw-r--r--drivers/gpu/drm/vc4/Kconfig1
-rw-r--r--drivers/gpu/drm/vc4/vc4_bo.c8
-rw-r--r--drivers/gpu/drm/vc4/vc4_kms.c55
-rw-r--r--drivers/gpu/drm/vc4/vc4_plane.c198
-rw-r--r--drivers/gpu/drm/vc4/vc4_regs.h19
-rw-r--r--drivers/gpu/drm/vgem/vgem_drv.c2
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_drv.c42
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_drv.h1
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_ioctl.c2
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_object.c2
-rw-r--r--drivers/gpu/drm/vmwgfx/Kconfig1
-rw-r--r--drivers/gpu/drm/vmwgfx/Makefile6
-rw-r--r--drivers/gpu/drm/vmwgfx/device_include/svga3d_cmd.h6
-rw-r--r--drivers/gpu/drm/vmwgfx/device_include/svga3d_devcaps.h10
-rw-r--r--drivers/gpu/drm/vmwgfx/device_include/svga3d_dx.h12
-rw-r--r--drivers/gpu/drm/vmwgfx/device_include/svga3d_limits.h8
-rw-r--r--drivers/gpu/drm/vmwgfx/device_include/svga3d_reg.h6
-rw-r--r--drivers/gpu/drm/vmwgfx/device_include/svga3d_types.h7
-rw-r--r--drivers/gpu/drm/vmwgfx/device_include/svga_escape.h6
-rw-r--r--drivers/gpu/drm/vmwgfx/device_include/svga_overlay.h6
-rw-r--r--drivers/gpu/drm/vmwgfx/device_include/svga_reg.h14
-rw-r--r--drivers/gpu/drm/vmwgfx/ttm_memory.c683
-rw-r--r--drivers/gpu/drm/vmwgfx/ttm_memory.h96
-rw-r--r--drivers/gpu/drm/vmwgfx/ttm_object.c180
-rw-r--r--drivers/gpu/drm/vmwgfx/ttm_object.h59
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_binding.c45
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_binding.h4
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_bo.c584
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_cmd.c7
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c43
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_context.c30
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c26
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.c89
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.h154
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c84
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fb.c13
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fence.c43
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_gem.c294
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c16
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_hashtab.c199
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_hashtab.h (renamed from include/drm/drm_hashtab.h)54
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c3
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.c45
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.h1
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_mob.c25
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_page_dirty.c20
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_prime.c1
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_resource.c17
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c10
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_shader.c91
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_simple_resource.c29
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_so.c21
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_so.h6
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c6
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_streamoutput.c21
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_surface.c158
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_system_manager.c90
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_thp.c184
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c135
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c74
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_va.c2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_validation.c35
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_validation.h53
-rw-r--r--drivers/gpu/drm/xen/xen_drm_front.c1
-rw-r--r--drivers/gpu/drm/xlnx/Kconfig1
-rw-r--r--drivers/gpu/host1x/Kconfig1
-rw-r--r--drivers/gpu/host1x/bus.c80
-rw-r--r--drivers/gpu/host1x/channel.c8
-rw-r--r--drivers/gpu/host1x/debug.c15
-rw-r--r--drivers/gpu/host1x/dev.c185
-rw-r--r--drivers/gpu/host1x/dev.h5
-rw-r--r--drivers/gpu/host1x/hw/channel_hw.c44
-rw-r--r--drivers/gpu/host1x/intr.c3
-rw-r--r--drivers/gpu/host1x/job.c160
-rw-r--r--drivers/gpu/host1x/job.h6
-rw-r--r--drivers/gpu/host1x/syncpt.c5
-rw-r--r--drivers/hid/Kconfig10
-rw-r--r--drivers/hid/hid-asus.c8
-rw-r--r--drivers/hid/hid-bigbenff.c2
-rw-r--r--drivers/hid/hid-chicony.c3
-rw-r--r--drivers/hid/hid-corsair.c7
-rw-r--r--drivers/hid/hid-elan.c2
-rw-r--r--drivers/hid/hid-elo.c3
-rw-r--r--drivers/hid/hid-ft260.c14
-rw-r--r--drivers/hid/hid-google-hammer.c2
-rw-r--r--drivers/hid/hid-holtek-kbd.c9
-rw-r--r--drivers/hid/hid-holtek-mouse.c24
-rw-r--r--drivers/hid/hid-ids.h4
-rw-r--r--drivers/hid/hid-input.c10
-rw-r--r--drivers/hid/hid-lg.c10
-rw-r--r--drivers/hid/hid-logitech-dj.c2
-rw-r--r--drivers/hid/hid-magicmouse.c7
-rw-r--r--drivers/hid/hid-multitouch.c5
-rw-r--r--drivers/hid/hid-nintendo.c13
-rw-r--r--drivers/hid/hid-prodikeys.c10
-rw-r--r--drivers/hid/hid-quirks.c1
-rw-r--r--drivers/hid/hid-roccat-arvo.c3
-rw-r--r--drivers/hid/hid-roccat-isku.c3
-rw-r--r--drivers/hid/hid-roccat-kone.c3
-rw-r--r--drivers/hid/hid-roccat-koneplus.c3
-rw-r--r--drivers/hid/hid-roccat-konepure.c3
-rw-r--r--drivers/hid/hid-roccat-kovaplus.c3
-rw-r--r--drivers/hid/hid-roccat-lua.c3
-rw-r--r--drivers/hid/hid-roccat-pyra.c3
-rw-r--r--drivers/hid/hid-roccat-ryos.c3
-rw-r--r--drivers/hid/hid-roccat-savu.c3
-rw-r--r--drivers/hid/hid-samsung.c3
-rw-r--r--drivers/hid/hid-sony.c24
-rw-r--r--drivers/hid/hid-thrustmaster.c9
-rw-r--r--drivers/hid/hid-u2fzero.c2
-rw-r--r--drivers/hid/hid-uclogic-core.c3
-rw-r--r--drivers/hid/hid-uclogic-params.c3
-rw-r--r--drivers/hid/hid-vivaldi.c3
-rw-r--r--drivers/hid/intel-ish-hid/ipc/pci-ish.c6
-rw-r--r--drivers/hid/intel-ish-hid/ishtp-fw-loader.c14
-rw-r--r--drivers/hid/intel-ish-hid/ishtp-hid-client.c14
-rw-r--r--drivers/hid/intel-ish-hid/ishtp/bus.c6
-rw-r--r--drivers/hid/wacom_sys.c19
-rw-r--r--drivers/hid/wacom_wac.c8
-rw-r--r--drivers/hid/wacom_wac.h1
-rw-r--r--drivers/hv/Kconfig1
-rw-r--r--drivers/hwmon/corsair-psu.c2
-rw-r--r--drivers/hwmon/dell-smm-hwmon.c7
-rw-r--r--drivers/hwmon/lm90.c106
-rw-r--r--drivers/hwmon/nct6775.c2
-rw-r--r--drivers/hwmon/pwm-fan.c2
-rw-r--r--drivers/hwmon/sht4x.c4
-rw-r--r--drivers/i2c/busses/i2c-cbus-gpio.c5
-rw-r--r--drivers/i2c/busses/i2c-i801.c32
-rw-r--r--drivers/i2c/busses/i2c-mpc.c2
-rw-r--r--drivers/i2c/busses/i2c-rk3x.c4
-rw-r--r--drivers/i2c/busses/i2c-stm32f7.c45
-rw-r--r--drivers/i2c/busses/i2c-virtio.c44
-rw-r--r--drivers/i2c/i2c-dev.c3
-rw-r--r--drivers/iio/accel/kxcjk-1013.c5
-rw-r--r--drivers/iio/accel/kxsd9.c6
-rw-r--r--drivers/iio/accel/mma8452.c2
-rw-r--r--drivers/iio/adc/Kconfig2
-rw-r--r--drivers/iio/adc/ad7768-1.c2
-rw-r--r--drivers/iio/adc/at91-sama5d2_adc.c3
-rw-r--r--drivers/iio/adc/axp20x_adc.c18
-rw-r--r--drivers/iio/adc/dln2-adc.c21
-rw-r--r--drivers/iio/adc/stm32-adc.c3
-rw-r--r--drivers/iio/gyro/adxrs290.c5
-rw-r--r--drivers/iio/gyro/itg3200_buffer.c2
-rw-r--r--drivers/iio/industrialio-trigger.c1
-rw-r--r--drivers/iio/light/ltr501.c2
-rw-r--r--drivers/iio/light/stk3310.c6
-rw-r--r--drivers/iio/trigger/stm32-timer-trigger.c2
-rw-r--r--drivers/infiniband/hw/hfi1/chip.c2
-rw-r--r--drivers/infiniband/hw/hfi1/driver.c2
-rw-r--r--drivers/infiniband/hw/hfi1/init.c40
-rw-r--r--drivers/infiniband/hw/hfi1/sdma.c2
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hw_v2.c78
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hw_v2.h8
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_srq.c2
-rw-r--r--drivers/infiniband/hw/irdma/hw.c7
-rw-r--r--drivers/infiniband/hw/irdma/main.h1
-rw-r--r--drivers/infiniband/hw/irdma/pble.c8
-rw-r--r--drivers/infiniband/hw/irdma/pble.h1
-rw-r--r--drivers/infiniband/hw/irdma/utils.c24
-rw-r--r--drivers/infiniband/hw/irdma/verbs.c23
-rw-r--r--drivers/infiniband/hw/irdma/verbs.h2
-rw-r--r--drivers/infiniband/hw/mlx5/mlx5_ib.h6
-rw-r--r--drivers/infiniband/hw/mlx5/mr.c26
-rw-r--r--drivers/infiniband/hw/qib/qib_user_sdma.c2
-rw-r--r--drivers/infiniband/sw/rxe/rxe_qp.c1
-rw-r--r--drivers/infiniband/ulp/rtrs/rtrs-clt-stats.c9
-rw-r--r--drivers/input/joystick/spaceball.c11
-rw-r--r--drivers/input/misc/iqs626a.c21
-rw-r--r--drivers/input/misc/xen-kbdfront.c1
-rw-r--r--drivers/input/mouse/appletouch.c4
-rw-r--r--drivers/input/mouse/elantech.c8
-rw-r--r--drivers/input/serio/i8042-x86ia64io.h21
-rw-r--r--drivers/input/serio/i8042.c54
-rw-r--r--drivers/input/touchscreen/atmel_mxt_ts.c2
-rw-r--r--drivers/input/touchscreen/elants_i2c.c46
-rw-r--r--drivers/input/touchscreen/goodix.c31
-rw-r--r--drivers/input/touchscreen/goodix.h1
-rw-r--r--drivers/input/touchscreen/goodix_fwupload.c2
-rw-r--r--drivers/iommu/amd/iommu_v2.c6
-rw-r--r--drivers/iommu/intel/cap_audit.c5
-rw-r--r--drivers/iommu/intel/iommu.c6
-rw-r--r--drivers/iommu/rockchip-iommu.c4
-rw-r--r--drivers/irqchip/irq-apple-aic.c2
-rw-r--r--drivers/irqchip/irq-armada-370-xp.c16
-rw-r--r--drivers/irqchip/irq-aspeed-scu-ic.c4
-rw-r--r--drivers/irqchip/irq-bcm7120-l2.c1
-rw-r--r--drivers/irqchip/irq-gic-v3-its.c2
-rw-r--r--drivers/irqchip/irq-mips-gic.c4
-rw-r--r--drivers/irqchip/irq-nvic.c2
-rw-r--r--drivers/isdn/mISDN/core.c6
-rw-r--r--drivers/isdn/mISDN/core.h4
-rw-r--r--drivers/isdn/mISDN/layer1.c4
-rw-r--r--drivers/md/bcache/super.c3
-rw-r--r--drivers/md/dm-integrity.c2
-rw-r--r--drivers/md/md.c4
-rw-r--r--drivers/md/persistent-data/dm-btree-remove.c2
-rw-r--r--drivers/media/cec/core/cec-adap.c1
-rw-r--r--drivers/media/common/videobuf2/videobuf2-dma-sg.c2
-rw-r--r--drivers/media/i2c/hi846.c4
-rw-r--r--drivers/media/v4l2-core/v4l2-compat-ioctl32.c41
-rw-r--r--drivers/memory/mtk-smi.c2
-rw-r--r--drivers/misc/cardreader/rtsx_pcr.c4
-rw-r--r--drivers/misc/eeprom/at25.c38
-rw-r--r--drivers/misc/fastrpc.c10
-rw-r--r--drivers/mmc/core/core.c7
-rw-r--r--drivers/mmc/core/core.h1
-rw-r--r--drivers/mmc/core/host.c9
-rw-r--r--drivers/mmc/host/meson-mx-sdhc-mmc.c16
-rw-r--r--drivers/mmc/host/mmc_spi.c7
-rw-r--r--drivers/mmc/host/mmci_stm32_sdmmc.c2
-rw-r--r--drivers/mmc/host/mtk-sd.c4
-rw-r--r--drivers/mmc/host/renesas_sdhi_core.c2
-rw-r--r--drivers/mmc/host/sdhci-esdhc-imx.c2
-rw-r--r--drivers/mmc/host/sdhci-tegra.c43
-rw-r--r--drivers/mmc/host/sdhci.c21
-rw-r--r--drivers/mmc/host/sdhci.h4
-rw-r--r--drivers/mtd/devices/mtd_dataflash.c8
-rw-r--r--drivers/mtd/nand/raw/Kconfig2
-rw-r--r--drivers/mtd/nand/raw/fsmc_nand.c36
-rw-r--r--drivers/mtd/nand/raw/nand_base.c6
-rw-r--r--drivers/net/Kconfig4
-rw-r--r--drivers/net/bonding/bond_alb.c14
-rw-r--r--drivers/net/bonding/bond_options.c2
-rw-r--r--drivers/net/can/kvaser_pciefd.c8
-rw-r--r--drivers/net/can/m_can/m_can.c42
-rw-r--r--drivers/net/can/m_can/m_can.h3
-rw-r--r--drivers/net/can/m_can/m_can_pci.c62
-rw-r--r--drivers/net/can/pch_can.c2
-rw-r--r--drivers/net/can/sja1000/ems_pcmcia.c7
-rw-r--r--drivers/net/can/usb/kvaser_usb/kvaser_usb_leaf.c101
-rw-r--r--drivers/net/dsa/b53/b53_spi.c14
-rw-r--r--drivers/net/dsa/microchip/ksz8795.c56
-rw-r--r--drivers/net/dsa/microchip/ksz9477.c66
-rw-r--r--drivers/net/dsa/microchip/ksz_common.c50
-rw-r--r--drivers/net/dsa/microchip/ksz_common.h4
-rw-r--r--drivers/net/dsa/mv88e6xxx/chip.c89
-rw-r--r--drivers/net/dsa/mv88e6xxx/port.c4
-rw-r--r--drivers/net/dsa/mv88e6xxx/serdes.c260
-rw-r--r--drivers/net/dsa/mv88e6xxx/serdes.h4
-rw-r--r--drivers/net/dsa/ocelot/felix.c5
-rw-r--r--drivers/net/dsa/qca8k.c18
-rw-r--r--drivers/net/dsa/rtl8365mb.c9
-rw-r--r--drivers/net/ethernet/altera/altera_tse_main.c9
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_common.h27
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_hw.h2
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_nic.c34
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c7
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_ring.c11
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_vec.c3
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c15
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils_fw2x.c3
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2.c22
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2.h2
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2_utils.h38
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2_utils_fw.c110
-rw-r--r--drivers/net/ethernet/asix/ax88796c_spi.c2
-rw-r--r--drivers/net/ethernet/atheros/ag71xx.c23
-rw-r--r--drivers/net/ethernet/broadcom/bcm4908_enet.c4
-rw-r--r--drivers/net/ethernet/broadcom/bcmsysport.c5
-rw-r--r--drivers/net/ethernet/broadcom/bcmsysport.h1
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmmii.c4
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c1
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c2
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h2
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c2
-rw-r--r--drivers/net/ethernet/freescale/fec.h3
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c2
-rw-r--r--drivers/net/ethernet/freescale/fman/fman_port.c12
-rw-r--r--drivers/net/ethernet/google/gve/gve_adminq.c8
-rw-r--r--drivers/net/ethernet/google/gve/gve_utils.c3
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hnae3.h2
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c28
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c4
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c4
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c3
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_sriov.c1
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.c28
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_debugfs.c8
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c75
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h2
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf.h3
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_ethtool.c76
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_main.c64
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_virtchnl.c47
-rw-r--r--drivers/net/ethernet/intel/ice/ice_base.c17
-rw-r--r--drivers/net/ethernet/intel/ice/ice_dcb_nl.c18
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c4
-rw-r--r--drivers/net/ethernet/intel/ice/ice_fdir.c2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_flex_pipe.c7
-rw-r--r--drivers/net/ethernet/intel/ice/ice_flex_pipe.h3
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lib.c9
-rw-r--r--drivers/net/ethernet/intel/ice/ice_main.c50
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ptp.c13
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ptp.h6
-rw-r--r--drivers/net/ethernet/intel/ice/ice_switch.c19
-rw-r--r--drivers/net/ethernet/intel/ice/ice_tc_lib.c30
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx.c19
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx.h1
-rw-r--r--drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c6
-rw-r--r--drivers/net/ethernet/intel/ice/ice_xsk.c67
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c49
-rw-r--r--drivers/net/ethernet/intel/igbvf/netdev.c1
-rw-r--r--drivers/net/ethernet/intel/igc/igc_i225.c2
-rw-r--r--drivers/net/ethernet/intel/igc/igc_main.c6
-rw-r--r--drivers/net/ethernet/intel/igc/igc_ptp.c15
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c4
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c3
-rw-r--r--drivers/net/ethernet/lantiq_xrx200.c36
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c20
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu.c2
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.c2
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_main.c35
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_switchdev.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_ethtool.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_netdev.c9
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/cmd.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en.h5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/health.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c35
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.c41
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.h6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c16
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c24
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c48
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rep.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rx.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.c33
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c20
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/health.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag/port_sel.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/tout.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/tout.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c41
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_domain.c9
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c3
-rw-r--r--drivers/net/ethernet/micrel/ks8851_par.c2
-rw-r--r--drivers/net/ethernet/microchip/lan743x_main.c12
-rw-r--r--drivers/net/ethernet/microsoft/mana/hw_channel.c10
-rw-r--r--drivers/net/ethernet/mscc/ocelot.c254
-rw-r--r--drivers/net/ethernet/mscc/ocelot_vcap.c16
-rw-r--r--drivers/net/ethernet/natsemi/xtsonic.c2
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net.h3
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c2
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c4
-rw-r--r--drivers/net/ethernet/ni/nixge.c4
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_lif.c2
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_int.c6
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_fp.c7
-rw-r--r--drivers/net/ethernet/qlogic/qla3xxx.c19
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c10
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov.h2
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c12
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c4
-rw-r--r--drivers/net/ethernet/realtek/r8169_main.c5
-rw-r--r--drivers/net/ethernet/sfc/ef100_nic.c3
-rw-r--r--drivers/net/ethernet/sfc/falcon/rx.c5
-rw-r--r--drivers/net/ethernet/sfc/rx_common.c5
-rw-r--r--drivers/net/ethernet/smsc/smc911x.c5
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-visconti.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac.h18
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c163
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c86
-rw-r--r--drivers/net/ethernet/ti/am65-cpsw-nuss.c29
-rw-r--r--drivers/net/fjes/fjes_main.c5
-rw-r--r--drivers/net/hamradio/mkiss.c6
-rw-r--r--drivers/net/ipa/ipa_cmd.c16
-rw-r--r--drivers/net/ipa/ipa_cmd.h6
-rw-r--r--drivers/net/ipa/ipa_endpoint.c2
-rw-r--r--drivers/net/ipa/ipa_main.c6
-rw-r--r--drivers/net/ipa/ipa_modem.c6
-rw-r--r--drivers/net/ipa/ipa_smp2p.c21
-rw-r--r--drivers/net/ipa/ipa_smp2p.h7
-rw-r--r--drivers/net/mdio/mdio-aspeed.c7
-rw-r--r--drivers/net/netdevsim/bpf.c1
-rw-r--r--drivers/net/netdevsim/ethtool.c5
-rw-r--r--drivers/net/phy/fixed_phy.c4
-rw-r--r--drivers/net/phy/mdio_bus.c3
-rw-r--r--drivers/net/phy/phylink.c27
-rw-r--r--drivers/net/slip/slip.h2
-rw-r--r--drivers/net/tun.c115
-rw-r--r--drivers/net/usb/asix_common.c8
-rw-r--r--drivers/net/usb/cdc_ncm.c2
-rw-r--r--drivers/net/usb/lan78xx.c8
-rw-r--r--drivers/net/usb/pegasus.c4
-rw-r--r--drivers/net/usb/qmi_wwan.c1
-rw-r--r--drivers/net/usb/r8152.c43
-rw-r--r--drivers/net/usb/smsc95xx.c55
-rw-r--r--drivers/net/veth.c8
-rw-r--r--drivers/net/virtio_net.c10
-rw-r--r--drivers/net/vmxnet3/vmxnet3_drv.c13
-rw-r--r--drivers/net/vrf.c10
-rw-r--r--drivers/net/wireguard/allowedips.c2
-rw-r--r--drivers/net/wireguard/device.c39
-rw-r--r--drivers/net/wireguard/device.h9
-rw-r--r--drivers/net/wireguard/main.c8
-rw-r--r--drivers/net/wireguard/queueing.c6
-rw-r--r--drivers/net/wireguard/queueing.h2
-rw-r--r--drivers/net/wireguard/ratelimiter.c4
-rw-r--r--drivers/net/wireguard/receive.c39
-rw-r--r--drivers/net/wireguard/socket.c2
-rw-r--r--drivers/net/wireless/ath/ath11k/mhi.c6
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/Kconfig14
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/Makefile2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/led.h2
-rw-r--r--drivers/net/wireless/intel/iwlegacy/Kconfig4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/Kconfig2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/uefi.c6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-drv.c22
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-drv.h3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c24
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mvm.h3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/ops.c5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/tx.c5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/drv.c10
-rw-r--r--drivers/net/wireless/mediatek/mt76/Makefile2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/pci_mac.c3
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/usb_sdio.c28
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_usb_core.c8
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/mac.c15
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/mcu.c4
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/sdio_mac.c21
-rw-r--r--drivers/net/wireless/mediatek/mt76/tx.c2
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2x00usb.c3
-rw-r--r--drivers/net/wireless/realtek/rtw89/fw.c2
-rw-r--r--drivers/net/wireless/realtek/rtw89/fw.h6
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_imem.c26
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_imem.h4
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_imem_ops.c7
-rw-r--r--drivers/net/xen-netback/common.h1
-rw-r--r--drivers/net/xen-netback/rx.c77
-rw-r--r--drivers/net/xen-netfront.c125
-rw-r--r--drivers/nfc/st21nfca/i2c.c29
-rw-r--r--drivers/nfc/virtual_ncidev.c2
-rw-r--r--drivers/nvme/host/core.c52
-rw-r--r--drivers/nvme/host/fabrics.c3
-rw-r--r--drivers/nvme/host/multipath.c3
-rw-r--r--drivers/nvme/host/nvme.h2
-rw-r--r--drivers/nvme/host/tcp.c61
-rw-r--r--drivers/nvme/host/zns.c5
-rw-r--r--drivers/nvme/target/io-cmd-file.c4
-rw-r--r--drivers/nvme/target/tcp.c53
-rw-r--r--drivers/of/irq.c27
-rw-r--r--drivers/of/platform.c4
-rw-r--r--drivers/pci/controller/Kconfig4
-rw-r--r--drivers/pci/controller/dwc/pci-exynos.c1
-rw-r--r--drivers/pci/controller/dwc/pcie-qcom-ep.c1
-rw-r--r--drivers/pci/controller/pci-aardvark.c9
-rw-r--r--drivers/pci/controller/pcie-apple.c14
-rw-r--r--drivers/pci/msi.c15
-rw-r--r--drivers/phy/hisilicon/phy-hi3670-pcie.c4
-rw-r--r--drivers/phy/marvell/phy-mvebu-cp110-utmi.c4
-rw-r--r--drivers/phy/qualcomm/phy-qcom-ipq806x-usb.c26
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp.c3
-rw-r--r--drivers/phy/qualcomm/phy-qcom-usb-hsic.c2
-rw-r--r--drivers/phy/st/phy-stm32-usbphyc.c2
-rw-r--r--drivers/phy/ti/phy-am654-serdes.c2
-rw-r--r--drivers/phy/ti/phy-j721e-wiz.c2
-rw-r--r--drivers/phy/ti/phy-omap-usb2.c6
-rw-r--r--drivers/phy/ti/phy-tusb1210.c2
-rw-r--r--drivers/pinctrl/bcm/pinctrl-bcm2835.c29
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mtk-common-v2.c8
-rw-r--r--drivers/pinctrl/stm32/pinctrl-stm32.c8
-rw-r--r--drivers/platform/chrome/cros_ec_ishtp.c14
-rw-r--r--drivers/platform/mellanox/mlxbf-pmc.c4
-rw-r--r--drivers/platform/x86/Makefile2
-rw-r--r--drivers/platform/x86/amd-pmc.c5
-rw-r--r--drivers/platform/x86/apple-gmux.c2
-rw-r--r--drivers/platform/x86/intel/Kconfig15
-rw-r--r--drivers/platform/x86/intel/hid.c7
-rw-r--r--drivers/platform/x86/intel/ishtp_eclite.c14
-rw-r--r--drivers/platform/x86/intel/pmc/pltdrv.c2
-rw-r--r--drivers/platform/x86/lg-laptop.c12
-rw-r--r--drivers/platform/x86/system76_acpi.c58
-rw-r--r--drivers/platform/x86/thinkpad_acpi.c6
-rw-r--r--drivers/platform/x86/touchscreen_dmi.c18
-rw-r--r--drivers/powercap/dtpm.c5
-rw-r--r--drivers/reset/tegra/reset-bpmp.c9
-rw-r--r--drivers/scsi/libiscsi.c6
-rw-r--r--drivers/scsi/lpfc/lpfc_debugfs.c4
-rw-r--r--drivers/scsi/lpfc/lpfc_els.c9
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_base.c4
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_base.h4
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_scsih.c59
-rw-r--r--drivers/scsi/pm8001/pm8001_init.c6
-rw-r--r--drivers/scsi/pm8001/pm80xx_hwi.c38
-rw-r--r--drivers/scsi/qedi/qedi_fw.c37
-rw-r--r--drivers/scsi/qedi/qedi_iscsi.c2
-rw-r--r--drivers/scsi/qedi/qedi_iscsi.h2
-rw-r--r--drivers/scsi/qla2xxx/qla_dbg.c3
-rw-r--r--drivers/scsi/qla2xxx/qla_edif.c2
-rw-r--r--drivers/scsi/scsi_debug.c45
-rw-r--r--drivers/scsi/scsi_sysfs.c2
-rw-r--r--drivers/scsi/ufs/ufs-mediatek.c1
-rw-r--r--drivers/scsi/ufs/ufshcd-pci.c18
-rw-r--r--drivers/scsi/ufs/ufshpb.c2
-rw-r--r--drivers/scsi/virtio_scsi.c1
-rw-r--r--drivers/scsi/vmw_pvscsi.c7
-rw-r--r--drivers/soc/imx/imx8m-blk-ctrl.c19
-rw-r--r--drivers/soc/imx/soc-imx.c4
-rw-r--r--drivers/soc/tegra/common.c25
-rw-r--r--drivers/soc/tegra/fuse/fuse-tegra.c2
-rw-r--r--drivers/soc/tegra/fuse/fuse.h2
-rw-r--r--drivers/spi/spi-armada-3700.c2
-rw-r--r--drivers/staging/Kconfig2
-rw-r--r--drivers/staging/Makefile1
-rw-r--r--drivers/staging/fbtft/fb_ssd1351.c4
-rw-r--r--drivers/staging/fbtft/fbtft-core.c9
-rw-r--r--drivers/staging/greybus/audio_helper.c8
-rw-r--r--drivers/staging/netlogic/Kconfig9
-rw-r--r--drivers/staging/netlogic/Makefile2
-rw-r--r--drivers/staging/netlogic/TODO11
-rw-r--r--drivers/staging/netlogic/platform_net.c219
-rw-r--r--drivers/staging/netlogic/platform_net.h21
-rw-r--r--drivers/staging/netlogic/xlr_net.c1080
-rw-r--r--drivers/staging/netlogic/xlr_net.h1079
-rw-r--r--drivers/staging/r8188eu/core/rtw_mlme_ext.c6
-rw-r--r--drivers/staging/r8188eu/os_dep/ioctl_linux.c8
-rw-r--r--drivers/staging/r8188eu/os_dep/mlme_linux.c2
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_core.c3
-rw-r--r--drivers/target/target_core_fabric_configfs.c16
-rw-r--r--drivers/target/target_core_spc.c14
-rw-r--r--drivers/tee/amdtee/core.c5
-rw-r--r--drivers/tee/optee/core.c6
-rw-r--r--drivers/tee/optee/ffa_abi.c7
-rw-r--r--drivers/tee/optee/smc_abi.c2
-rw-r--r--drivers/tee/tee_shm.c174
-rw-r--r--drivers/thermal/intel/int340x_thermal/processor_thermal_rfim.c2
-rw-r--r--drivers/tty/hvc/hvc_xen.c31
-rw-r--r--drivers/tty/n_hdlc.c23
-rw-r--r--drivers/tty/serial/8250/8250_bcm7271.c13
-rw-r--r--drivers/tty/serial/8250/8250_fintek.c20
-rw-r--r--drivers/tty/serial/8250/8250_pci.c39
-rw-r--r--drivers/tty/serial/8250/8250_port.c7
-rw-r--r--drivers/tty/serial/Kconfig2
-rw-r--r--drivers/tty/serial/amba-pl011.c1
-rw-r--r--drivers/tty/serial/fsl_lpuart.c1
-rw-r--r--drivers/tty/serial/liteuart.c20
-rw-r--r--drivers/tty/serial/msm_serial.c3
-rw-r--r--drivers/tty/serial/serial-tegra.c4
-rw-r--r--drivers/tty/serial/serial_core.c18
-rw-r--r--drivers/usb/cdns3/cdns3-gadget.c20
-rw-r--r--drivers/usb/cdns3/cdnsp-gadget.c12
-rw-r--r--drivers/usb/cdns3/cdnsp-mem.c3
-rw-r--r--drivers/usb/cdns3/cdnsp-ring.c11
-rw-r--r--drivers/usb/cdns3/cdnsp-trace.h4
-rw-r--r--drivers/usb/cdns3/host.c1
-rw-r--r--drivers/usb/chipidea/ci_hdrc_imx.c18
-rw-r--r--drivers/usb/core/config.c6
-rw-r--r--drivers/usb/core/hub.c24
-rw-r--r--drivers/usb/core/quirks.c6
-rw-r--r--drivers/usb/dwc2/gadget.c17
-rw-r--r--drivers/usb/dwc2/hcd_queue.c2
-rw-r--r--drivers/usb/dwc2/platform.c3
-rw-r--r--drivers/usb/dwc3/core.c8
-rw-r--r--drivers/usb/dwc3/core.h2
-rw-r--r--drivers/usb/dwc3/dwc3-qcom.c15
-rw-r--r--drivers/usb/dwc3/gadget.c39
-rw-r--r--drivers/usb/early/xhci-dbc.c15
-rw-r--r--drivers/usb/gadget/composite.c14
-rw-r--r--drivers/usb/gadget/function/f_fs.c9
-rw-r--r--drivers/usb/gadget/function/u_ether.c16
-rw-r--r--drivers/usb/gadget/legacy/dbgp.c15
-rw-r--r--drivers/usb/gadget/legacy/inode.c16
-rw-r--r--drivers/usb/gadget/udc/udc-xilinx.c5
-rw-r--r--drivers/usb/host/xhci-hub.c1
-rw-r--r--drivers/usb/host/xhci-mtk-sch.c2
-rw-r--r--drivers/usb/host/xhci-pci.c11
-rw-r--r--drivers/usb/host/xhci-ring.c22
-rw-r--r--drivers/usb/host/xhci-tegra.c41
-rw-r--r--drivers/usb/host/xhci.c26
-rw-r--r--drivers/usb/mtu3/mtu3_gadget.c12
-rw-r--r--drivers/usb/mtu3/mtu3_qmu.c7
-rw-r--r--drivers/usb/serial/cp210x.c6
-rw-r--r--drivers/usb/serial/option.c13
-rw-r--r--drivers/usb/serial/pl2303.c1
-rw-r--r--drivers/usb/typec/tcpm/fusb302.c6
-rw-r--r--drivers/usb/typec/tcpm/tcpm.c22
-rw-r--r--drivers/usb/typec/tipd/core.c35
-rw-r--r--drivers/usb/typec/ucsi/ucsi.c4
-rw-r--r--drivers/vdpa/vdpa.c3
-rw-r--r--drivers/vdpa/vdpa_sim/vdpa_sim.c7
-rw-r--r--drivers/vdpa/vdpa_user/vduse_dev.c6
-rw-r--r--drivers/vfio/pci/vfio_pci_igd.c5
-rw-r--r--drivers/vfio/vfio.c28
-rw-r--r--drivers/vhost/vdpa.c4
-rw-r--r--drivers/vhost/vsock.c8
-rw-r--r--drivers/video/console/vgacon.c14
-rw-r--r--drivers/video/fbdev/simplefb.c21
-rw-r--r--drivers/video/fbdev/xen-fbfront.c1
-rw-r--r--drivers/virt/nitro_enclaves/ne_misc_dev.c5
-rw-r--r--drivers/virtio/virtio_ring.c62
-rw-r--r--drivers/xen/Kconfig8
-rw-r--r--drivers/xen/events/events_base.c6
-rw-r--r--drivers/xen/pvcalls-front.c1
-rw-r--r--drivers/xen/xenbus/xenbus_probe.c27
-rw-r--r--drivers/xen/xenbus/xenbus_probe_frontend.c14
-rw-r--r--fs/afs/file.c5
-rw-r--r--fs/afs/super.c1
-rw-r--r--fs/aio.c186
-rw-r--r--fs/btrfs/ctree.c17
-rw-r--r--fs/btrfs/ctree.h7
-rw-r--r--fs/btrfs/delalloc-space.c12
-rw-r--r--fs/btrfs/disk-io.c8
-rw-r--r--fs/btrfs/extent-tree.c16
-rw-r--r--fs/btrfs/extent_io.c22
-rw-r--r--fs/btrfs/free-space-tree.c4
-rw-r--r--fs/btrfs/ioctl.c16
-rw-r--r--fs/btrfs/lzo.c2
-rw-r--r--fs/btrfs/qgroup.c3
-rw-r--r--fs/btrfs/root-tree.c3
-rw-r--r--fs/btrfs/tree-log.c7
-rw-r--r--fs/btrfs/volumes.c6
-rw-r--r--fs/btrfs/zoned.c2
-rw-r--r--fs/ceph/caps.c16
-rw-r--r--fs/ceph/file.c20
-rw-r--r--fs/ceph/mds_client.c3
-rw-r--r--fs/cifs/cifsfs.h2
-rw-r--r--fs/cifs/connect.c25
-rw-r--r--fs/cifs/fs_context.c38
-rw-r--r--fs/cifs/fscache.c46
-rw-r--r--fs/cifs/inode.c6
-rw-r--r--fs/cifs/sess.c55
-rw-r--r--fs/cifs/smb2pdu.c2
-rw-r--r--fs/erofs/utils.c8
-rw-r--r--fs/file.c68
-rw-r--r--fs/fuse/dev.c10
-rw-r--r--fs/gfs2/glock.c10
-rw-r--r--fs/gfs2/inode.c109
-rw-r--r--fs/inode.c2
-rw-r--r--fs/io-wq.c38
-rw-r--r--fs/io_uring.c91
-rw-r--r--fs/iomap/buffered-io.c26
-rw-r--r--fs/ksmbd/ndr.c2
-rw-r--r--fs/ksmbd/smb2ops.c3
-rw-r--r--fs/ksmbd/smb2pdu.c59
-rw-r--r--fs/namespace.c9
-rw-r--r--fs/netfs/read_helper.c25
-rw-r--r--fs/nfs/inode.c1
-rw-r--r--fs/nfs/nfs42proc.c4
-rw-r--r--fs/nfs/nfs42xdr.c3
-rw-r--r--fs/nfs/nfs4state.c4
-rw-r--r--fs/nfs/nfstrace.h1
-rw-r--r--fs/nfsd/nfs3proc.c11
-rw-r--r--fs/nfsd/nfs4recover.c1
-rw-r--r--fs/nfsd/nfs4state.c9
-rw-r--r--fs/nfsd/nfsctl.c14
-rw-r--r--fs/nfsd/nfsproc.c8
-rw-r--r--fs/ntfs/Kconfig1
-rw-r--r--fs/signalfd.c12
-rw-r--r--fs/smbfs_common/cifs_arc4.c13
-rw-r--r--fs/tracefs/inode.c76
-rw-r--r--fs/xfs/libxfs/xfs_attr.c17
-rw-r--r--fs/xfs/xfs_icache.c21
-rw-r--r--fs/xfs/xfs_inode.c1
-rw-r--r--fs/xfs/xfs_inode.h4
-rw-r--r--fs/xfs/xfs_super.c14
-rw-r--r--fs/zonefs/super.c1
-rw-r--r--include/asm-generic/cacheflush.h6
-rw-r--r--include/drm/bridge/dw_mipi_dsi.h4
-rw-r--r--include/drm/drm_device.h5
-rw-r--r--include/drm/drm_dp_helper.h7
-rw-r--r--include/drm/drm_drv.h5
-rw-r--r--include/drm/drm_format_helper.h3
-rw-r--r--include/drm/drm_gem_cma_helper.h189
-rw-r--r--include/drm/drm_gem_ttm_helper.h2
-rw-r--r--include/drm/drm_gem_vram_helper.h2
-rw-r--r--include/drm/drm_legacy.h15
-rw-r--r--include/drm/drm_mipi_dbi.h2
-rw-r--r--include/drm/drm_mm.h4
-rw-r--r--include/drm/drm_plane.h2
-rw-r--r--include/drm/ttm/ttm_bo_api.h1
-rw-r--r--include/drm/ttm/ttm_placement.h11
-rw-r--r--include/linux/acpi.h7
-rw-r--r--include/linux/bpf.h17
-rw-r--r--include/linux/btf.h14
-rw-r--r--include/linux/cacheflush.h18
-rw-r--r--include/linux/cacheinfo.h1
-rw-r--r--include/linux/compiler.h4
-rw-r--r--include/linux/delay.h14
-rw-r--r--include/linux/device/driver.h1
-rw-r--r--include/linux/efi.h6
-rw-r--r--include/linux/filter.h5
-rw-r--r--include/linux/fs.h1
-rw-r--r--include/linux/gfp.h2
-rw-r--r--include/linux/hid.h5
-rw-r--r--include/linux/highmem.h47
-rw-r--r--include/linux/host1x.h76
-rw-r--r--include/linux/instrumentation.h4
-rw-r--r--include/linux/intel-ish-client-if.h4
-rw-r--r--include/linux/kprobes.h2
-rw-r--r--include/linux/memblock.h4
-rw-r--r--include/linux/mhi.h13
-rw-r--r--include/linux/mlx5/mlx5_ifc.h5
-rw-r--r--include/linux/mmzone.h1
-rw-r--r--include/linux/mod_devicetable.h14
-rw-r--r--include/linux/netdevice.h21
-rw-r--r--include/linux/page-flags.h14
-rw-r--r--include/linux/pagemap.h27
-rw-r--r--include/linux/percpu-refcount.h2
-rw-r--r--include/linux/phy.h11
-rw-r--r--include/linux/pm_runtime.h2
-rw-r--r--include/linux/ptp_classify.h1
-rw-r--r--include/linux/regulator/driver.h14
-rw-r--r--include/linux/sched/cputime.h5
-rw-r--r--include/linux/siphash.h14
-rw-r--r--include/linux/skbuff.h3
-rw-r--r--include/linux/tee_drv.h4
-rw-r--r--include/linux/virtio.h2
-rw-r--r--include/linux/virtio_net.h25
-rw-r--r--include/linux/wait.h26
-rw-r--r--include/net/bond_alb.h2
-rw-r--r--include/net/busy_poll.h13
-rw-r--r--include/net/dst_cache.h11
-rw-r--r--include/net/fib_rules.h4
-rw-r--r--include/net/ip6_fib.h1
-rw-r--r--include/net/ip_fib.h2
-rw-r--r--include/net/ipv6_stubs.h1
-rw-r--r--include/net/netfilter/nf_conntrack.h6
-rw-r--r--include/net/netns/ipv4.h2
-rw-r--r--include/net/nl802154.h7
-rw-r--r--include/net/pkt_sched.h16
-rw-r--r--include/net/sch_generic.h2
-rw-r--r--include/net/sctp/sctp.h6
-rw-r--r--include/net/sctp/structs.h3
-rw-r--r--include/net/sock.h32
-rw-r--r--include/soc/mscc/ocelot_vcap.h2
-rw-r--r--include/soc/tegra/common.h15
-rw-r--r--include/sound/soc-acpi.h2
-rw-r--r--include/trace/events/rpcgss.h2
-rw-r--r--include/trace/events/vmscan.h4
-rw-r--r--include/uapi/asm-generic/poll.h2
-rw-r--r--include/uapi/drm/amdgpu_drm.h2
-rw-r--r--include/uapi/drm/drm.h18
-rw-r--r--include/uapi/drm/drm_fourcc.h11
-rw-r--r--include/uapi/drm/virtgpu_drm.h7
-rw-r--r--include/uapi/drm/vmwgfx_drm.h1
-rw-r--r--include/uapi/linux/byteorder/big_endian.h1
-rw-r--r--include/uapi/linux/byteorder/little_endian.h1
-rw-r--r--include/uapi/linux/if_ether.h2
-rw-r--r--include/uapi/linux/kfd_sysfs.h108
-rw-r--r--include/uapi/linux/mptcp.h18
-rw-r--r--include/uapi/linux/nfc.h6
-rw-r--r--include/uapi/linux/resource.h13
-rw-r--r--include/xen/events.h1
-rw-r--r--include/xen/xenbus.h1
-rw-r--r--kernel/audit.c21
-rw-r--r--kernel/bpf/btf.c11
-rw-r--r--kernel/bpf/verifier.c55
-rw-r--r--kernel/cpu.c7
-rw-r--r--kernel/crash_core.c11
-rw-r--r--kernel/events/core.c3
-rw-r--r--kernel/kprobes.c3
-rw-r--r--kernel/locking/rtmutex.c2
-rw-r--r--kernel/locking/rwsem.c182
-rw-r--r--kernel/power/hibernate.c6
-rw-r--r--kernel/power/user.c2
-rw-r--r--kernel/sched/core.c10
-rw-r--r--kernel/sched/cputime.c12
-rw-r--r--kernel/sched/wait.c7
-rw-r--r--kernel/signal.c9
-rw-r--r--kernel/softirq.c3
-rw-r--r--kernel/time/tick-sched.c7
-rw-r--r--kernel/time/timekeeping.c3
-rw-r--r--kernel/time/timer.c16
-rw-r--r--kernel/trace/ftrace.c8
-rw-r--r--kernel/trace/trace.h24
-rw-r--r--kernel/trace/trace_events.c12
-rw-r--r--kernel/trace/trace_events_hist.c2
-rw-r--r--kernel/trace/trace_events_synth.c11
-rw-r--r--kernel/trace/trace_uprobe.c1
-rw-r--r--kernel/trace/tracing_map.c3
-rw-r--r--kernel/ucount.c15
-rw-r--r--lib/Kconfig.debug6
-rw-r--r--lib/siphash.c12
-rw-r--r--mm/Kconfig2
-rw-r--r--mm/backing-dev.c7
-rw-r--r--mm/damon/core.c20
-rw-r--r--mm/damon/dbgfs.c15
-rw-r--r--mm/damon/vaddr-test.h79
-rw-r--r--mm/damon/vaddr.c2
-rw-r--r--mm/filemap.c2
-rw-r--r--mm/highmem.c2
-rw-r--r--mm/hugetlb.c27
-rw-r--r--mm/kfence/core.c1
-rw-r--r--mm/memcontrol.c108
-rw-r--r--mm/memory-failure.c14
-rw-r--r--mm/memory_hotplug.c1
-rw-r--r--mm/mempolicy.c3
-rw-r--r--mm/shmem.c3
-rw-r--r--mm/slub.c15
-rw-r--r--mm/swap_slots.c1
-rw-r--r--mm/util.c2
-rw-r--r--mm/vmscan.c65
-rw-r--r--net/8021q/vlan.c3
-rw-r--r--net/8021q/vlan_dev.c3
-rw-r--r--net/ax25/af_ax25.c4
-rw-r--r--net/bridge/br_ioctl.c2
-rw-r--r--net/bridge/br_multicast.c32
-rw-r--r--net/bridge/br_netlink.c4
-rw-r--r--net/bridge/br_private.h12
-rw-r--r--net/bridge/br_sysfs_br.c4
-rw-r--r--net/bridge/br_vlan_options.c4
-rw-r--r--net/core/dev.c13
-rw-r--r--net/core/devlink.c16
-rw-r--r--net/core/dst_cache.c19
-rw-r--r--net/core/fib_rules.c2
-rw-r--r--net/core/flow_dissector.c3
-rw-r--r--net/core/neighbour.c4
-rw-r--r--net/core/skbuff.c2
-rw-r--r--net/core/skmsg.c5
-rw-r--r--net/core/sock_map.c15
-rw-r--r--net/dsa/tag_ocelot.c6
-rw-r--r--net/ethtool/ioctl.c2
-rw-r--r--net/ethtool/netlink.c3
-rw-r--r--net/ipv4/af_inet.c12
-rw-r--r--net/ipv4/fib_frontend.c2
-rw-r--r--net/ipv4/fib_rules.c5
-rw-r--r--net/ipv4/fib_semantics.c4
-rw-r--r--net/ipv4/inet_connection_sock.c2
-rw-r--r--net/ipv4/inet_diag.c4
-rw-r--r--net/ipv4/nexthop.c35
-rw-r--r--net/ipv4/tcp.c3
-rw-r--r--net/ipv4/tcp_cubic.c5
-rw-r--r--net/ipv4/tcp_input.c2
-rw-r--r--net/ipv4/tcp_ipv4.c11
-rw-r--r--net/ipv4/tcp_minisocks.c4
-rw-r--r--net/ipv4/udp.c10
-rw-r--r--net/ipv6/af_inet6.c1
-rw-r--r--net/ipv6/fib6_rules.c4
-rw-r--r--net/ipv6/ip6_offload.c6
-rw-r--r--net/ipv6/ip6_output.c2
-rw-r--r--net/ipv6/ip6_vti.c2
-rw-r--r--net/ipv6/raw.c3
-rw-r--r--net/ipv6/route.c19
-rw-r--r--net/ipv6/seg6_iptunnel.c8
-rw-r--r--net/ipv6/sit.c1
-rw-r--r--net/ipv6/tcp_ipv6.c11
-rw-r--r--net/ipv6/udp.c6
-rw-r--r--net/mac80211/agg-rx.c5
-rw-r--r--net/mac80211/agg-tx.c16
-rw-r--r--net/mac80211/cfg.c3
-rw-r--r--net/mac80211/driver-ops.h5
-rw-r--r--net/mac80211/mlme.c13
-rw-r--r--net/mac80211/rx.c1
-rw-r--r--net/mac80211/sta_info.c21
-rw-r--r--net/mac80211/sta_info.h2
-rw-r--r--net/mac80211/tx.c10
-rw-r--r--net/mac80211/util.c23
-rw-r--r--net/mctp/route.c9
-rw-r--r--net/mctp/test/utils.c2
-rw-r--r--net/mpls/af_mpls.c97
-rw-r--r--net/mpls/internal.h2
-rw-r--r--net/mptcp/options.c32
-rw-r--r--net/mptcp/pm_netlink.c3
-rw-r--r--net/mptcp/protocol.c57
-rw-r--r--net/mptcp/protocol.h17
-rw-r--r--net/mptcp/sockopt.c1
-rw-r--r--net/ncsi/ncsi-cmd.c24
-rw-r--r--net/ncsi/ncsi-netlink.c6
-rw-r--r--net/netfilter/ipvs/ip_vs_core.c8
-rw-r--r--net/netfilter/nf_conntrack_core.c6
-rw-r--r--net/netfilter/nf_conntrack_netlink.c13
-rw-r--r--net/netfilter/nf_flow_table_core.c4
-rw-r--r--net/netfilter/nf_flow_table_offload.c4
-rw-r--r--net/netfilter/nf_tables_api.c4
-rw-r--r--net/netfilter/nfnetlink_log.c3
-rw-r--r--net/netfilter/nfnetlink_queue.c5
-rw-r--r--net/netfilter/nft_exthdr.c11
-rw-r--r--net/netfilter/nft_payload.c1
-rw-r--r--net/netfilter/nft_set_pipapo_avx2.c2
-rw-r--r--net/netfilter/xt_IDLETIMER.c4
-rw-r--r--net/netlink/af_netlink.c5
-rw-r--r--net/nfc/netlink.c12
-rw-r--r--net/openvswitch/flow.c8
-rw-r--r--net/packet/af_packet.c5
-rw-r--r--net/phonet/pep.c3
-rw-r--r--net/rds/connection.c1
-rw-r--r--net/rds/tcp.c2
-rw-r--r--net/rxrpc/conn_client.c14
-rw-r--r--net/rxrpc/peer_object.c14
-rw-r--r--net/sched/act_ct.c15
-rw-r--r--net/sched/cls_api.c8
-rw-r--r--net/sched/cls_flower.c6
-rw-r--r--net/sched/sch_cake.c6
-rw-r--r--net/sched/sch_ets.c8
-rw-r--r--net/sched/sch_fq_pie.c1
-rw-r--r--net/sched/sch_frag.c3
-rw-r--r--net/sctp/diag.c12
-rw-r--r--net/sctp/endpointola.c23
-rw-r--r--net/sctp/socket.c23
-rw-r--r--net/smc/af_smc.c18
-rw-r--r--net/smc/smc.h5
-rw-r--r--net/smc/smc_cdc.c52
-rw-r--r--net/smc/smc_cdc.h2
-rw-r--r--net/smc/smc_close.c14
-rw-r--r--net/smc/smc_core.c69
-rw-r--r--net/smc/smc_core.h6
-rw-r--r--net/smc/smc_ib.c4
-rw-r--r--net/smc/smc_ib.h1
-rw-r--r--net/smc/smc_llc.c2
-rw-r--r--net/smc/smc_wr.c51
-rw-r--r--net/smc/smc_wr.h5
-rw-r--r--net/sunrpc/xprtsock.c10
-rw-r--r--net/tipc/crypto.c8
-rw-r--r--net/tls/tls_main.c47
-rw-r--r--net/tls/tls_sw.c44
-rw-r--r--net/unix/af_unix.c3
-rw-r--r--net/vmw_vsock/virtio_transport_common.c3
-rw-r--r--net/wireless/reg.c30
-rw-r--r--net/xdp/xsk.c4
-rw-r--r--net/xdp/xsk_buff_pool.c1
-rw-r--r--samples/ftrace/Makefile1
-rw-r--r--samples/ftrace/ftrace-direct-multi-modify.c152
-rw-r--r--scripts/mod/devicetable-offsets.c3
-rw-r--r--scripts/mod/file2alias.c24
-rwxr-xr-xscripts/recordmcount.pl2
-rw-r--r--security/selinux/hooks.c35
-rw-r--r--security/selinux/ss/hashtab.c17
-rw-r--r--security/tomoyo/util.c31
-rw-r--r--sound/core/control_compat.c3
-rw-r--r--sound/core/jack.c4
-rw-r--r--sound/core/oss/pcm_oss.c37
-rw-r--r--sound/core/rawmidi.c1
-rw-r--r--sound/drivers/opl3/opl3_midi.c2
-rw-r--r--sound/hda/intel-dsp-config.c19
-rw-r--r--sound/hda/intel-sdw-acpi.c13
-rw-r--r--sound/pci/cmipci.c4
-rw-r--r--sound/pci/ctxfi/ctamixer.c14
-rw-r--r--sound/pci/ctxfi/ctdaio.c16
-rw-r--r--sound/pci/ctxfi/ctresource.c7
-rw-r--r--sound/pci/ctxfi/ctresource.h4
-rw-r--r--sound/pci/ctxfi/ctsrc.c7
-rw-r--r--sound/pci/hda/hda_intel.c12
-rw-r--r--sound/pci/hda/hda_local.h9
-rw-r--r--sound/pci/hda/patch_cs8409.c5
-rw-r--r--sound/pci/hda/patch_hdmi.c24
-rw-r--r--sound/pci/hda/patch_realtek.c137
-rw-r--r--sound/soc/amd/yc/pci-acp6x.c3
-rw-r--r--sound/soc/codecs/cs35l41-spi.c32
-rw-r--r--sound/soc/codecs/cs35l41.c21
-rw-r--r--sound/soc/codecs/cs35l41.h4
-rw-r--r--sound/soc/codecs/lpass-rx-macro.c2
-rw-r--r--sound/soc/codecs/rk817_codec.c1
-rw-r--r--sound/soc/codecs/rt1011.c55
-rw-r--r--sound/soc/codecs/rt1011.h7
-rw-r--r--sound/soc/codecs/rt5682-i2c.c1
-rw-r--r--sound/soc/codecs/rt5682.c52
-rw-r--r--sound/soc/codecs/rt5682.h1
-rw-r--r--sound/soc/codecs/rt5682s.c10
-rw-r--r--sound/soc/codecs/rt9120.c58
-rw-r--r--sound/soc/codecs/tas2770.c4
-rw-r--r--sound/soc/codecs/wcd934x.c129
-rw-r--r--sound/soc/codecs/wcd938x.c3
-rw-r--r--sound/soc/codecs/wm_adsp.c5
-rw-r--r--sound/soc/codecs/wsa881x.c16
-rw-r--r--sound/soc/intel/boards/sof_sdw.c69
-rw-r--r--sound/soc/intel/common/soc-acpi-intel-adl-match.c105
-rw-r--r--sound/soc/intel/common/soc-acpi-intel-cml-match.c6
-rw-r--r--sound/soc/mediatek/mt8173/mt8173-afe-pcm.c51
-rw-r--r--sound/soc/mediatek/mt8173/mt8173-rt5650.c8
-rw-r--r--sound/soc/meson/aiu-encoder-i2s.c33
-rw-r--r--sound/soc/meson/aiu-fifo-i2s.c19
-rw-r--r--sound/soc/meson/aiu-fifo.c6
-rw-r--r--sound/soc/qcom/qdsp6/audioreach.h4
-rw-r--r--sound/soc/qcom/qdsp6/q6adm.c4
-rw-r--r--sound/soc/qcom/qdsp6/q6asm-dai.c19
-rw-r--r--sound/soc/qcom/qdsp6/q6prm.c53
-rw-r--r--sound/soc/qcom/qdsp6/q6routing.c12
-rw-r--r--sound/soc/rockchip/rockchip_i2s_tdm.c52
-rw-r--r--sound/soc/sh/rcar/dma.c2
-rw-r--r--sound/soc/soc-acpi.c4
-rw-r--r--sound/soc/soc-dapm.c29
-rw-r--r--sound/soc/soc-topology.c3
-rw-r--r--sound/soc/sof/Kconfig2
-rw-r--r--sound/soc/sof/control.c8
-rw-r--r--sound/soc/sof/intel/hda-bus.c17
-rw-r--r--sound/soc/sof/intel/hda-codec.c14
-rw-r--r--sound/soc/sof/intel/hda-dsp.c3
-rw-r--r--sound/soc/sof/intel/hda.c23
-rw-r--r--sound/soc/sof/intel/pci-tgl.c4
-rw-r--r--sound/soc/stm/stm32_i2s.c2
-rw-r--r--sound/soc/tegra/tegra186_dspk.c181
-rw-r--r--sound/soc/tegra/tegra210_admaif.c140
-rw-r--r--sound/soc/tegra/tegra210_adx.c7
-rw-r--r--sound/soc/tegra/tegra210_ahub.c11
-rw-r--r--sound/soc/tegra/tegra210_amx.c7
-rw-r--r--sound/soc/tegra/tegra210_dmic.c184
-rw-r--r--sound/soc/tegra/tegra210_i2s.c296
-rw-r--r--sound/soc/tegra/tegra210_mixer.c30
-rw-r--r--sound/soc/tegra/tegra210_mvc.c38
-rw-r--r--sound/soc/tegra/tegra210_sfc.c127
-rw-r--r--sound/soc/tegra/tegra_asoc_machine.c11
-rw-r--r--sound/soc/tegra/tegra_asoc_machine.h1
-rw-r--r--sound/usb/mixer_quirks.c10
-rw-r--r--sound/usb/pcm.c14
-rw-r--r--sound/xen/xen_snd_front.c1
-rw-r--r--tools/bpf/resolve_btfids/main.c8
-rw-r--r--tools/build/Makefile.feature1
-rw-r--r--tools/build/feature/Makefile4
-rw-r--r--tools/build/feature/test-all.c5
-rw-r--r--tools/build/feature/test-libpython-version.c11
-rw-r--r--tools/include/linux/debug_locks.h14
-rw-r--r--tools/include/linux/hardirq.h12
-rw-r--r--tools/include/linux/irqflags.h39
-rw-r--r--tools/include/linux/kernel.h22
-rw-r--r--tools/include/linux/lockdep.h72
-rw-r--r--tools/include/linux/math.h25
-rw-r--r--tools/include/linux/proc_fs.h4
-rw-r--r--tools/include/linux/spinlock.h2
-rw-r--r--tools/include/linux/stacktrace.h33
-rw-r--r--tools/include/uapi/linux/if_link.h293
-rw-r--r--tools/objtool/elf.c1
-rw-r--r--tools/objtool/objtool.c4
-rw-r--r--tools/perf/Makefile.config2
-rw-r--r--tools/perf/arch/powerpc/entry/syscalls/syscall.tbl1
-rw-r--r--tools/perf/arch/s390/entry/syscalls/syscall.tbl1
-rw-r--r--tools/perf/bench/sched-messaging.c4
-rw-r--r--tools/perf/builtin-inject.c15
-rw-r--r--tools/perf/builtin-script.c2
-rw-r--r--tools/perf/scripts/python/intel-pt-events.py23
-rw-r--r--tools/perf/tests/expr.c4
-rw-r--r--tools/perf/tests/parse-metric.c1
-rw-r--r--tools/perf/ui/tui/setup.c8
-rw-r--r--tools/perf/util/bpf_skel/bperf.h14
-rw-r--r--tools/perf/util/bpf_skel/bperf_follower.bpf.c19
-rw-r--r--tools/perf/util/bpf_skel/bperf_leader.bpf.c19
-rw-r--r--tools/perf/util/bpf_skel/bpf_prog_profiler.bpf.c2
-rw-r--r--tools/perf/util/event.h5
-rw-r--r--tools/perf/util/expr.c12
-rw-r--r--tools/perf/util/header.c15
-rw-r--r--tools/perf/util/intel-pt-decoder/intel-pt-decoder.c85
-rw-r--r--tools/perf/util/intel-pt.c2
-rw-r--r--tools/perf/util/perf_regs.c3
-rw-r--r--tools/perf/util/pmu.c23
-rw-r--r--tools/perf/util/python.c2
-rw-r--r--tools/perf/util/smt.c2
-rw-r--r--tools/power/acpi/Makefile.config1
-rw-r--r--tools/power/acpi/Makefile.rules1
-rw-r--r--tools/testing/radix-tree/linux/lockdep.h3
-rw-r--r--tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.c20
-rw-r--r--tools/testing/selftests/bpf/prog_tests/btf_skc_cls_ingress.c16
-rw-r--r--tools/testing/selftests/bpf/progs/test_module_attach.c12
-rw-r--r--tools/testing/selftests/bpf/test_verifier.c2
-rw-r--r--tools/testing/selftests/bpf/verifier/atomic_cmpxchg.c86
-rw-r--r--tools/testing/selftests/bpf/verifier/atomic_fetch.c94
-rw-r--r--tools/testing/selftests/bpf/verifier/search_pruning.c71
-rw-r--r--tools/testing/selftests/bpf/verifier/spill_fill.c32
-rw-r--r--tools/testing/selftests/bpf/verifier/value_ptr_arith.c23
-rw-r--r--tools/testing/selftests/bpf/verifier/xdp_direct_packet_access.c632
-rw-r--r--tools/testing/selftests/damon/.gitignore2
-rw-r--r--tools/testing/selftests/damon/Makefile7
-rw-r--r--tools/testing/selftests/damon/_debugfs_common.sh52
-rw-r--r--tools/testing/selftests/damon/debugfs_attrs.sh73
-rw-r--r--tools/testing/selftests/damon/debugfs_empty_targets.sh13
-rw-r--r--tools/testing/selftests/damon/debugfs_huge_count_read_write.sh22
-rw-r--r--tools/testing/selftests/damon/debugfs_schemes.sh19
-rw-r--r--tools/testing/selftests/damon/debugfs_target_ids.sh19
-rw-r--r--tools/testing/selftests/damon/huge_count_read_write.c39
-rwxr-xr-xtools/testing/selftests/drivers/net/mlxsw/rif_mac_profiles_occ.sh30
-rw-r--r--tools/testing/selftests/kvm/.gitignore2
-rw-r--r--tools/testing/selftests/kvm/Makefile2
-rw-r--r--tools/testing/selftests/kvm/include/kvm_util.h1
-rw-r--r--tools/testing/selftests/kvm/kvm_create_max_vcpus.c30
-rw-r--r--tools/testing/selftests/kvm/kvm_page_table_test.c2
-rw-r--r--tools/testing/selftests/kvm/lib/kvm_util.c7
-rw-r--r--tools/testing/selftests/kvm/lib/x86_64/processor.c68
-rw-r--r--tools/testing/selftests/kvm/x86_64/hyperv_features.c140
-rw-r--r--tools/testing/selftests/kvm/x86_64/sev_migrate_tests.c165
-rw-r--r--tools/testing/selftests/kvm/x86_64/svm_int_ctl_test.c2
-rw-r--r--tools/testing/selftests/kvm/x86_64/userspace_io_test.c114
-rw-r--r--tools/testing/selftests/kvm/x86_64/vmx_invalid_nested_guest_state.c105
-rw-r--r--tools/testing/selftests/kvm/x86_64/vmx_pmu_msrs_test.c17
-rw-r--r--tools/testing/selftests/net/Makefile1
-rwxr-xr-xtools/testing/selftests/net/fcnal-test.sh57
-rwxr-xr-xtools/testing/selftests/net/fib_nexthops.sh63
-rwxr-xr-xtools/testing/selftests/net/fib_tests.sh59
-rw-r--r--tools/testing/selftests/net/forwarding/forwarding.config.sample2
-rwxr-xr-xtools/testing/selftests/net/icmp_redirect.sh2
-rw-r--r--tools/testing/selftests/net/mptcp/config1
-rw-r--r--tools/testing/selftests/net/tls.c557
-rw-r--r--tools/testing/selftests/net/toeplitz.c2
-rwxr-xr-xtools/testing/selftests/net/udpgro_fwd.sh6
-rw-r--r--tools/testing/selftests/net/udpgso.c12
-rw-r--r--tools/testing/selftests/net/udpgso_bench_tx.c8
-rw-r--r--tools/testing/selftests/netfilter/Makefile3
-rwxr-xr-xtools/testing/selftests/netfilter/conntrack_vrf.sh241
-rwxr-xr-xtools/testing/selftests/netfilter/nft_concat_range.sh24
-rwxr-xr-xtools/testing/selftests/netfilter/nft_nat.sh33
-rwxr-xr-xtools/testing/selftests/netfilter/nft_queue.sh54
-rwxr-xr-xtools/testing/selftests/netfilter/nft_zones_many.sh19
-rw-r--r--tools/testing/selftests/tc-testing/config2
-rw-r--r--tools/testing/selftests/tc-testing/tc-tests/actions/bpf.json2
-rw-r--r--tools/testing/selftests/tc-testing/tc-tests/qdiscs/mq.json12
-rwxr-xr-xtools/testing/selftests/tc-testing/tdc.py8
-rwxr-xr-xtools/testing/selftests/tc-testing/tdc.sh1
-rw-r--r--tools/testing/selftests/vm/userfaultfd.c16
-rwxr-xr-xtools/testing/selftests/wireguard/netns.sh30
-rw-r--r--tools/testing/selftests/wireguard/qemu/debug.config2
-rw-r--r--tools/testing/selftests/wireguard/qemu/kernel.config1
-rw-r--r--virt/kvm/kvm_main.c56
2123 files changed, 45855 insertions, 25138 deletions
diff --git a/.mailmap b/.mailmap
index 6277bb27b4bf..7e3fe690b86e 100644
--- a/.mailmap
+++ b/.mailmap
@@ -10,10 +10,12 @@
# Please keep this list dictionary sorted.
#
Aaron Durbin <adurbin@google.com>
+Abhinav Kumar <quic_abhinavk@quicinc.com> <abhinavk@codeaurora.org>
Adam Oldham <oldhamca@gmail.com>
Adam Radford <aradford@gmail.com>
Adriana Reus <adi.reus@gmail.com> <adriana.reus@intel.com>
Adrian Bunk <bunk@stusta.de>
+Akhil P Oommen <quic_akhilpo@quicinc.com> <akhilpo@codeaurora.org>
Alan Cox <alan@lxorguk.ukuu.org.uk>
Alan Cox <root@hraefn.swansea.linux.org.uk>
Aleksandar Markovic <aleksandar.markovic@mips.com> <aleksandar.markovic@imgtec.com>
@@ -126,6 +128,8 @@ Greg Kroah-Hartman <gregkh@suse.de>
Greg Kroah-Hartman <greg@kroah.com>
Greg Kurz <groug@kaod.org> <gkurz@linux.vnet.ibm.com>
Gregory CLEMENT <gregory.clement@bootlin.com> <gregory.clement@free-electrons.com>
+Guo Ren <guoren@kernel.org> <guoren@linux.alibaba.com>
+Guo Ren <guoren@kernel.org> <ren_guo@c-sky.com>
Gustavo Padovan <gustavo@las.ic.unicamp.br>
Gustavo Padovan <padovan@profusion.mobi>
Hanjun Guo <guohanjun@huawei.com> <hanjun.guo@linaro.org>
@@ -170,6 +174,7 @@ Jeff Layton <jlayton@kernel.org> <jlayton@redhat.com>
Jens Axboe <axboe@suse.de>
Jens Osterkamp <Jens.Osterkamp@de.ibm.com>
Jernej Skrabec <jernej.skrabec@gmail.com> <jernej.skrabec@siol.net>
+Jessica Zhang <quic_jesszhan@quicinc.com> <jesszhan@codeaurora.org>
Jiri Slaby <jirislaby@kernel.org> <jirislaby@gmail.com>
Jiri Slaby <jirislaby@kernel.org> <jslaby@novell.com>
Jiri Slaby <jirislaby@kernel.org> <jslaby@suse.com>
@@ -189,6 +194,7 @@ Juha Yrjola <at solidboot.com>
Juha Yrjola <juha.yrjola@nokia.com>
Juha Yrjola <juha.yrjola@solidboot.com>
Julien Thierry <julien.thierry.kdev@gmail.com> <julien.thierry@arm.com>
+Kalyan Thota <quic_kalyant@quicinc.com> <kalyan_t@codeaurora.org>
Kay Sievers <kay.sievers@vrfy.org>
Kees Cook <keescook@chromium.org> <kees.cook@canonical.com>
Kees Cook <keescook@chromium.org> <keescook@google.com>
@@ -200,9 +206,11 @@ Kenneth W Chen <kenneth.w.chen@intel.com>
Konstantin Khlebnikov <koct9i@gmail.com> <khlebnikov@yandex-team.ru>
Konstantin Khlebnikov <koct9i@gmail.com> <k.khlebnikov@samsung.com>
Koushik <raghavendra.koushik@neterion.com>
+Krishna Manikandan <quic_mkrishn@quicinc.com> <mkrishn@codeaurora.org>
Krzysztof Kozlowski <krzk@kernel.org> <k.kozlowski.k@gmail.com>
Krzysztof Kozlowski <krzk@kernel.org> <k.kozlowski@samsung.com>
Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>
+Kuogee Hsieh <quic_khsieh@quicinc.com> <khsieh@codeaurora.org>
Leonardo Bras <leobras.c@gmail.com> <leonardo@linux.ibm.com>
Leonid I Ananiev <leonid.i.ananiev@intel.com>
Leon Romanovsky <leon@kernel.org> <leon@leon.nu>
@@ -309,6 +317,7 @@ Qais Yousef <qsyousef@gmail.com> <qais.yousef@imgtec.com>
Quentin Monnet <quentin@isovalent.com> <quentin.monnet@netronome.com>
Quentin Perret <qperret@qperret.net> <quentin.perret@arm.com>
Rafael J. Wysocki <rjw@rjwysocki.net> <rjw@sisk.pl>
+Rajeev Nandan <quic_rajeevny@quicinc.com> <rajeevny@codeaurora.org>
Rajesh Shah <rajesh.shah@intel.com>
Ralf Baechle <ralf@linux-mips.org>
Ralf Wildenhues <Ralf.Wildenhues@gmx.de>
@@ -323,6 +332,7 @@ Rui Saraiva <rmps@joel.ist.utl.pt>
Sachin P Sant <ssant@in.ibm.com>
Sakari Ailus <sakari.ailus@linux.intel.com> <sakari.ailus@iki.fi>
Sam Ravnborg <sam@mars.ravnborg.org>
+Sankeerth Billakanti <quic_sbillaka@quicinc.com> <sbillaka@codeaurora.org>
Santosh Shilimkar <santosh.shilimkar@oracle.org>
Santosh Shilimkar <ssantosh@kernel.org>
Sarangdhar Joshi <spjoshi@codeaurora.org>
diff --git a/Documentation/admin-guide/blockdev/drbd/figures.rst b/Documentation/admin-guide/blockdev/drbd/figures.rst
index bd9a4901fe46..9f73253ea353 100644
--- a/Documentation/admin-guide/blockdev/drbd/figures.rst
+++ b/Documentation/admin-guide/blockdev/drbd/figures.rst
@@ -25,6 +25,6 @@ Sub graphs of DRBD's state transitions
:alt: disk-states-8.dot
:align: center
-.. kernel-figure:: node-states-8.dot
- :alt: node-states-8.dot
+.. kernel-figure:: peer-states-8.dot
+ :alt: peer-states-8.dot
:align: center
diff --git a/Documentation/admin-guide/blockdev/drbd/node-states-8.dot b/Documentation/admin-guide/blockdev/drbd/peer-states-8.dot
index bfa54e1f8016..6dc3954954d6 100644
--- a/Documentation/admin-guide/blockdev/drbd/node-states-8.dot
+++ b/Documentation/admin-guide/blockdev/drbd/peer-states-8.dot
@@ -1,8 +1,3 @@
-digraph node_states {
- Secondary -> Primary [ label = "ioctl_set_state()" ]
- Primary -> Secondary [ label = "ioctl_set_state()" ]
-}
-
digraph peer_states {
Secondary -> Primary [ label = "recv state packet" ]
Primary -> Secondary [ label = "recv state packet" ]
diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt
index eb258526d70d..b1fab3eb7b09 100644
--- a/Documentation/admin-guide/kernel-parameters.txt
+++ b/Documentation/admin-guide/kernel-parameters.txt
@@ -1689,6 +1689,8 @@
architectures force reset to be always executed
i8042.unlock [HW] Unlock (ignore) the keylock
i8042.kbdreset [HW] Reset device connected to KBD port
+ i8042.probe_defer
+ [HW] Allow deferred probing upon i8042 probe errors
i810= [HW,DRM]
@@ -2413,8 +2415,12 @@
Default is 1 (enabled)
kvm-intel.emulate_invalid_guest_state=
- [KVM,Intel] Enable emulation of invalid guest states
- Default is 0 (disabled)
+ [KVM,Intel] Disable emulation of invalid guest state.
+ Ignored if kvm-intel.enable_unrestricted_guest=1, as
+ guest state is never invalid for unrestricted guests.
+ This param doesn't apply to nested guests (L2), as KVM
+ never emulates invalid L2 guest state.
+ Default is 1 (enabled)
kvm-intel.flexpriority=
[KVM,Intel] Disable FlexPriority feature (TPR shadow).
diff --git a/Documentation/arm64/pointer-authentication.rst b/Documentation/arm64/pointer-authentication.rst
index f127666ea3a8..e5dad2e40aa8 100644
--- a/Documentation/arm64/pointer-authentication.rst
+++ b/Documentation/arm64/pointer-authentication.rst
@@ -53,11 +53,10 @@ The number of bits that the PAC occupies in a pointer is 55 minus the
virtual address size configured by the kernel. For example, with a
virtual address size of 48, the PAC is 7 bits wide.
-Recent versions of GCC can compile code with APIAKey-based return
-address protection when passed the -msign-return-address option. This
-uses instructions in the HINT space (unless -march=armv8.3-a or higher
-is also passed), and such code can run on systems without the pointer
-authentication extension.
+When ARM64_PTR_AUTH_KERNEL is selected, the kernel will be compiled
+with HINT space pointer authentication instructions protecting
+function returns. Kernels built with this option will work on hardware
+with or without pointer authentication support.
In addition to exec(), keys can also be reinitialized to random values
using the PR_PAC_RESET_KEYS prctl. A bitmask of PR_PAC_APIAKEY,
diff --git a/Documentation/conf.py b/Documentation/conf.py
index 17f7cee56987..76e5eb5cb62b 100644
--- a/Documentation/conf.py
+++ b/Documentation/conf.py
@@ -249,11 +249,16 @@ except ImportError:
html_static_path = ['sphinx-static']
-html_context = {
- 'css_files': [
- '_static/theme_overrides.css',
- ],
-}
+html_css_files = [
+ 'theme_overrides.css',
+]
+
+if major <= 1 and minor < 8:
+ html_context = {
+ 'css_files': [
+ '_static/theme_overrides.css',
+ ],
+ }
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
diff --git a/Documentation/cpu-freq/core.rst b/Documentation/cpu-freq/core.rst
index 33cb90bd1d8f..4ceef8e7217c 100644
--- a/Documentation/cpu-freq/core.rst
+++ b/Documentation/cpu-freq/core.rst
@@ -73,12 +73,12 @@ CPUFREQ_POSTCHANGE.
The third argument is a struct cpufreq_freqs with the following
values:
-===== ===========================
-cpu number of the affected CPU
+====== ======================================
+policy a pointer to the struct cpufreq_policy
old old frequency
new new frequency
flags flags of the cpufreq driver
-===== ===========================
+====== ======================================
3. CPUFreq Table Generation with Operating Performance Point (OPP)
==================================================================
diff --git a/Documentation/devicetree/bindings/display/amlogic,meson-dw-hdmi.yaml b/Documentation/devicetree/bindings/display/amlogic,meson-dw-hdmi.yaml
index cf5a208f2f10..343598c9f473 100644
--- a/Documentation/devicetree/bindings/display/amlogic,meson-dw-hdmi.yaml
+++ b/Documentation/devicetree/bindings/display/amlogic,meson-dw-hdmi.yaml
@@ -10,6 +10,9 @@ title: Amlogic specific extensions to the Synopsys Designware HDMI Controller
maintainers:
- Neil Armstrong <narmstrong@baylibre.com>
+allOf:
+ - $ref: /schemas/sound/name-prefix.yaml#
+
description: |
The Amlogic Meson Synopsys Designware Integration is composed of
- A Synopsys DesignWare HDMI Controller IP
@@ -99,6 +102,8 @@ properties:
"#sound-dai-cells":
const: 0
+ sound-name-prefix: true
+
required:
- compatible
- reg
diff --git a/Documentation/devicetree/bindings/display/amlogic,meson-vpu.yaml b/Documentation/devicetree/bindings/display/amlogic,meson-vpu.yaml
index 851cb0781217..047fd69e0377 100644
--- a/Documentation/devicetree/bindings/display/amlogic,meson-vpu.yaml
+++ b/Documentation/devicetree/bindings/display/amlogic,meson-vpu.yaml
@@ -78,6 +78,10 @@ properties:
interrupts:
maxItems: 1
+ amlogic,canvas:
+ description: should point to a canvas provider node
+ $ref: /schemas/types.yaml#/definitions/phandle
+
power-domains:
maxItems: 1
description: phandle to the associated power domain
@@ -106,6 +110,7 @@ required:
- port@1
- "#address-cells"
- "#size-cells"
+ - amlogic,canvas
additionalProperties: false
@@ -118,6 +123,7 @@ examples:
interrupts = <3>;
#address-cells = <1>;
#size-cells = <0>;
+ amlogic,canvas = <&canvas>;
/* CVBS VDAC output port */
port@0 {
diff --git a/Documentation/devicetree/bindings/display/bridge/lvds-codec.yaml b/Documentation/devicetree/bindings/display/bridge/lvds-codec.yaml
index 1faae3e323a4..5079c1cc337b 100644
--- a/Documentation/devicetree/bindings/display/bridge/lvds-codec.yaml
+++ b/Documentation/devicetree/bindings/display/bridge/lvds-codec.yaml
@@ -79,6 +79,14 @@ properties:
- port@0
- port@1
+ pclk-sample:
+ description:
+ Data sampling on rising or falling edge.
+ enum:
+ - 0 # Falling edge
+ - 1 # Rising edge
+ default: 0
+
powerdown-gpios:
description:
The GPIO used to control the power down line of this device.
@@ -86,21 +94,32 @@ properties:
power-supply: true
-if:
- not:
- properties:
- compatible:
- contains:
- const: lvds-decoder
-then:
- properties:
- ports:
+allOf:
+ - if:
+ not:
+ properties:
+ compatible:
+ contains:
+ const: lvds-decoder
+ then:
properties:
- port@0:
+ ports:
properties:
- endpoint:
+ port@0:
properties:
- data-mapping: false
+ endpoint:
+ properties:
+ data-mapping: false
+
+ - if:
+ not:
+ properties:
+ compatible:
+ contains:
+ const: lvds-encoder
+ then:
+ properties:
+ pclk-sample: false
required:
- compatible
diff --git a/Documentation/devicetree/bindings/display/bridge/renesas,dsi-csi2-tx.yaml b/Documentation/devicetree/bindings/display/bridge/renesas,dsi-csi2-tx.yaml
new file mode 100644
index 000000000000..afeeb967393d
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/bridge/renesas,dsi-csi2-tx.yaml
@@ -0,0 +1,118 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/display/bridge/renesas,dsi-csi2-tx.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Renesas R-Car MIPI DSI/CSI-2 Encoder
+
+maintainers:
+ - Laurent Pinchart <laurent.pinchart+renesas@ideasonboard.com>
+
+description: |
+ This binding describes the MIPI DSI/CSI-2 encoder embedded in the Renesas
+ R-Car V3U SoC. The encoder can operate in either DSI or CSI-2 mode, with up
+ to four data lanes.
+
+properties:
+ compatible:
+ enum:
+ - renesas,r8a779a0-dsi-csi2-tx # for V3U
+
+ reg:
+ maxItems: 1
+
+ clocks:
+ items:
+ - description: Functional clock
+ - description: DSI (and CSI-2) functional clock
+ - description: PLL reference clock
+
+ clock-names:
+ items:
+ - const: fck
+ - const: dsi
+ - const: pll
+
+ power-domains:
+ maxItems: 1
+
+ resets:
+ maxItems: 1
+
+ ports:
+ $ref: /schemas/graph.yaml#/properties/ports
+
+ properties:
+ port@0:
+ $ref: /schemas/graph.yaml#/properties/port
+ description: Parallel input port
+
+ port@1:
+ $ref: /schemas/graph.yaml#/$defs/port-base
+ unevaluatedProperties: false
+ description: DSI/CSI-2 output port
+
+ properties:
+ endpoint:
+ $ref: /schemas/media/video-interfaces.yaml#
+ unevaluatedProperties: false
+
+ properties:
+ data-lanes:
+ minItems: 1
+ maxItems: 4
+
+ required:
+ - data-lanes
+
+ required:
+ - port@0
+ - port@1
+
+required:
+ - compatible
+ - reg
+ - clocks
+ - power-domains
+ - resets
+ - ports
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/clock/r8a779a0-cpg-mssr.h>
+ #include <dt-bindings/power/r8a779a0-sysc.h>
+
+ dsi0: dsi-encoder@fed80000 {
+ compatible = "renesas,r8a779a0-dsi-csi2-tx";
+ reg = <0xfed80000 0x10000>;
+ power-domains = <&sysc R8A779A0_PD_ALWAYS_ON>;
+ clocks = <&cpg CPG_MOD 415>,
+ <&cpg CPG_CORE R8A779A0_CLK_DSI>,
+ <&cpg CPG_CORE R8A779A0_CLK_CP>;
+ clock-names = "fck", "dsi", "pll";
+ resets = <&cpg 415>;
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+ dsi0_in: endpoint {
+ remote-endpoint = <&du_out_dsi0>;
+ };
+ };
+
+ port@1 {
+ reg = <1>;
+ dsi0_out: endpoint {
+ data-lanes = <1 2>;
+ remote-endpoint = <&sn65dsi86_in>;
+ };
+ };
+ };
+ };
+...
diff --git a/Documentation/devicetree/bindings/display/bridge/ti,sn65dsi83.yaml b/Documentation/devicetree/bindings/display/bridge/ti,sn65dsi83.yaml
index b446d0f0f1b4..48a97bb3e2e0 100644
--- a/Documentation/devicetree/bindings/display/bridge/ti,sn65dsi83.yaml
+++ b/Documentation/devicetree/bindings/display/bridge/ti,sn65dsi83.yaml
@@ -32,6 +32,9 @@ properties:
maxItems: 1
description: GPIO specifier for bridge_en pin (active high).
+ vcc-supply:
+ description: A 1.8V power supply (see regulator/regulator.yaml).
+
ports:
$ref: /schemas/graph.yaml#/properties/ports
@@ -91,7 +94,6 @@ properties:
required:
- compatible
- reg
- - enable-gpios
- ports
allOf:
@@ -133,6 +135,7 @@ examples:
reg = <0x2d>;
enable-gpios = <&gpio2 1 GPIO_ACTIVE_HIGH>;
+ vcc-supply = <&reg_sn65dsi83_1v8>;
ports {
#address-cells = <1>;
diff --git a/Documentation/devicetree/bindings/display/msm/dp-controller.yaml b/Documentation/devicetree/bindings/display/msm/dp-controller.yaml
index 63e585f48789..5457612ab136 100644
--- a/Documentation/devicetree/bindings/display/msm/dp-controller.yaml
+++ b/Documentation/devicetree/bindings/display/msm/dp-controller.yaml
@@ -17,6 +17,8 @@ properties:
compatible:
enum:
- qcom,sc7180-dp
+ - qcom,sc7280-dp
+ - qcom,sc7280-edp
- qcom,sc8180x-dp
- qcom,sc8180x-edp
diff --git a/Documentation/devicetree/bindings/display/msm/edp.txt b/Documentation/devicetree/bindings/display/msm/edp.txt
deleted file mode 100644
index eff9daff418c..000000000000
--- a/Documentation/devicetree/bindings/display/msm/edp.txt
+++ /dev/null
@@ -1,56 +0,0 @@
-Qualcomm Technologies Inc. adreno/snapdragon eDP output
-
-Required properties:
-- compatible:
- * "qcom,mdss-edp"
-- reg: Physical base address and length of the registers of controller and PLL
-- reg-names: The names of register regions. The following regions are required:
- * "edp"
- * "pll_base"
-- interrupts: The interrupt signal from the eDP block.
-- power-domains: Should be <&mmcc MDSS_GDSC>.
-- clocks: device clocks
- See Documentation/devicetree/bindings/clock/clock-bindings.txt for details.
-- clock-names: the following clocks are required:
- * "core"
- * "iface"
- * "mdp_core"
- * "pixel"
- * "link"
-- #clock-cells: The value should be 1.
-- vdda-supply: phandle to vdda regulator device node
-- lvl-vdd-supply: phandle to regulator device node which is used to supply power
- to HPD receiving chip
-- panel-en-gpios: GPIO pin to supply power to panel.
-- panel-hpd-gpios: GPIO pin used for eDP hpd.
-
-
-Example:
- mdss_edp: qcom,mdss_edp@fd923400 {
- compatible = "qcom,mdss-edp";
- reg-names =
- "edp",
- "pll_base";
- reg = <0xfd923400 0x700>,
- <0xfd923a00 0xd4>;
- interrupt-parent = <&mdss_mdp>;
- interrupts = <12 0>;
- power-domains = <&mmcc MDSS_GDSC>;
- clock-names =
- "core",
- "pixel",
- "iface",
- "link",
- "mdp_core";
- clocks =
- <&mmcc MDSS_EDPAUX_CLK>,
- <&mmcc MDSS_EDPPIXEL_CLK>,
- <&mmcc MDSS_AHB_CLK>,
- <&mmcc MDSS_EDPLINK_CLK>,
- <&mmcc MDSS_MDP_CLK>;
- #clock-cells = <1>;
- vdda-supply = <&pma8084_l12>;
- lvl-vdd-supply = <&lvl_vreg>;
- panel-en-gpios = <&tlmm 137 0>;
- panel-hpd-gpios = <&tlmm 103 0>;
- };
diff --git a/Documentation/devicetree/bindings/display/panel/panel-simple.yaml b/Documentation/devicetree/bindings/display/panel/panel-simple.yaml
index f3c9395d23b6..62f5f050c1bc 100644
--- a/Documentation/devicetree/bindings/display/panel/panel-simple.yaml
+++ b/Documentation/devicetree/bindings/display/panel/panel-simple.yaml
@@ -290,6 +290,8 @@ properties:
- starry,kr070pe2t
# Starry 12.2" (1920x1200 pixels) TFT LCD panel
- starry,kr122ea0sra
+ # Team Source Display Technology TST043015CMHX 4.3" WQVGA TFT LCD panel
+ - team-source-display,tst043015cmhx
# Tianma Micro-electronics TM070JDHG30 7.0" WXGA TFT LCD panel
- tianma,tm070jdhg30
# Tianma Micro-electronics TM070JVHG33 7.0" WXGA TFT LCD panel
diff --git a/Documentation/devicetree/bindings/display/sprd/sprd,display-subsystem.yaml b/Documentation/devicetree/bindings/display/sprd/sprd,display-subsystem.yaml
new file mode 100644
index 000000000000..3d107e9434be
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/sprd/sprd,display-subsystem.yaml
@@ -0,0 +1,64 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/display/sprd/sprd,display-subsystem.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Unisoc DRM master device
+
+maintainers:
+ - Kevin Tang <kevin.tang@unisoc.com>
+
+description: |
+ The Unisoc DRM master device is a virtual device needed to list all
+ DPU devices or other display interface nodes that comprise the
+ graphics subsystem.
+
+ Unisoc's display pipeline have several components as below description,
+ multi display controllers and corresponding physical interfaces.
+ For different display scenarios, dpu0 and dpu1 maybe binding to different
+ encoder.
+
+ E.g:
+ dpu0 and dpu1 both binding to DSI for dual mipi-dsi display;
+ dpu0 binding to DSI for primary display, and dpu1 binding to DP for external display;
+
+ +-----------------------------------------+
+ | |
+ | +---------+ |
+ +----+ | +----+ +---------+ |DPHY/CPHY| | +------+
+ | +----->+dpu0+--->+MIPI|DSI +--->+Combo +----->+Panel0|
+ |AXI | | +----+ +---------+ +---------+ | +------+
+ | | | ^ |
+ | | | | |
+ | | | +-----------+ |
+ | | | | |
+ |APB | | +--+-+ +-----------+ +---+ | +------+
+ | +----->+dpu1+--->+DisplayPort+--->+PHY+--------->+Panel1|
+ | | | +----+ +-----------+ +---+ | +------+
+ +----+ | |
+ +-----------------------------------------+
+
+properties:
+ compatible:
+ const: sprd,display-subsystem
+
+ ports:
+ $ref: /schemas/types.yaml#/definitions/phandle-array
+ description:
+ Should contain a list of phandles pointing to display interface port
+ of DPU devices.
+
+required:
+ - compatible
+ - ports
+
+additionalProperties: false
+
+examples:
+ - |
+ display-subsystem {
+ compatible = "sprd,display-subsystem";
+ ports = <&dpu_out>;
+ };
+
diff --git a/Documentation/devicetree/bindings/display/sprd/sprd,sharkl3-dpu.yaml b/Documentation/devicetree/bindings/display/sprd/sprd,sharkl3-dpu.yaml
new file mode 100644
index 000000000000..4ebea60b8c5b
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/sprd/sprd,sharkl3-dpu.yaml
@@ -0,0 +1,77 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/display/sprd/sprd,sharkl3-dpu.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Unisoc Sharkl3 Display Processor Unit (DPU)
+
+maintainers:
+ - Kevin Tang <kevin.tang@unisoc.com>
+
+description: |
+ DPU (Display Processor Unit) is the Display Controller for the Unisoc SoCs
+ which transfers the image data from a video memory buffer to an internal
+ LCD interface.
+
+properties:
+ compatible:
+ const: sprd,sharkl3-dpu
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+ clocks:
+ minItems: 2
+
+ clock-names:
+ items:
+ - const: clk_src_128m
+ - const: clk_src_384m
+
+ power-domains:
+ maxItems: 1
+
+ iommus:
+ maxItems: 1
+
+ port:
+ type: object
+ description:
+ A port node with endpoint definitions as defined in
+ Documentation/devicetree/bindings/media/video-interfaces.txt.
+ That port should be the output endpoint, usually output to
+ the associated DSI.
+
+required:
+ - compatible
+ - reg
+ - interrupts
+ - clocks
+ - clock-names
+ - port
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+ #include <dt-bindings/clock/sprd,sc9860-clk.h>
+ dpu: dpu@63000000 {
+ compatible = "sprd,sharkl3-dpu";
+ reg = <0x63000000 0x1000>;
+ interrupts = <GIC_SPI 46 IRQ_TYPE_LEVEL_HIGH>;
+ clock-names = "clk_src_128m", "clk_src_384m";
+
+ clocks = <&pll CLK_TWPLL_128M>,
+ <&pll CLK_TWPLL_384M>;
+
+ dpu_port: port {
+ dpu_out: endpoint {
+ remote-endpoint = <&dsi_in>;
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/display/sprd/sprd,sharkl3-dsi-host.yaml b/Documentation/devicetree/bindings/display/sprd/sprd,sharkl3-dsi-host.yaml
new file mode 100644
index 000000000000..bc5594d18643
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/sprd/sprd,sharkl3-dsi-host.yaml
@@ -0,0 +1,88 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/display/sprd/sprd,sharkl3-dsi-host.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Unisoc MIPI DSI Controller
+
+maintainers:
+ - Kevin Tang <kevin.tang@unisoc.com>
+
+properties:
+ compatible:
+ const: sprd,sharkl3-dsi-host
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 2
+
+ clocks:
+ minItems: 1
+
+ clock-names:
+ items:
+ - const: clk_src_96m
+
+ power-domains:
+ maxItems: 1
+
+ ports:
+ type: object
+
+ properties:
+ "#address-cells":
+ const: 1
+
+ "#size-cells":
+ const: 0
+
+ port@0:
+ type: object
+ description:
+ A port node with endpoint definitions as defined in
+ Documentation/devicetree/bindings/media/video-interfaces.txt.
+ That port should be the input endpoint, usually coming from
+ the associated DPU.
+
+ required:
+ - "#address-cells"
+ - "#size-cells"
+ - port@0
+
+ additionalProperties: false
+
+required:
+ - compatible
+ - reg
+ - interrupts
+ - clocks
+ - clock-names
+ - ports
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+ #include <dt-bindings/clock/sprd,sc9860-clk.h>
+ dsi: dsi@63100000 {
+ compatible = "sprd,sharkl3-dsi-host";
+ reg = <0x63100000 0x1000>;
+ interrupts = <GIC_SPI 48 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 49 IRQ_TYPE_LEVEL_HIGH>;
+ clock-names = "clk_src_96m";
+ clocks = <&pll CLK_TWPLL_96M>;
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ port@0 {
+ reg = <0>;
+ dsi_in: endpoint {
+ remote-endpoint = <&dpu_out>;
+ };
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/i2c/apple,i2c.yaml b/Documentation/devicetree/bindings/i2c/apple,i2c.yaml
index 22fc8483256f..82b953181a52 100644
--- a/Documentation/devicetree/bindings/i2c/apple,i2c.yaml
+++ b/Documentation/devicetree/bindings/i2c/apple,i2c.yaml
@@ -20,9 +20,9 @@ allOf:
properties:
compatible:
- enum:
- - apple,t8103-i2c
- - apple,i2c
+ items:
+ - const: apple,t8103-i2c
+ - const: apple,i2c
reg:
maxItems: 1
@@ -51,7 +51,7 @@ unevaluatedProperties: false
examples:
- |
i2c@35010000 {
- compatible = "apple,t8103-i2c";
+ compatible = "apple,t8103-i2c", "apple,i2c";
reg = <0x35010000 0x4000>;
interrupt-parent = <&aic>;
interrupts = <0 627 4>;
diff --git a/Documentation/devicetree/bindings/i2c/i2c-imx-lpi2c.yaml b/Documentation/devicetree/bindings/i2c/i2c-imx-lpi2c.yaml
index 29b9447f3b84..fe0c89edf7c1 100644
--- a/Documentation/devicetree/bindings/i2c/i2c-imx-lpi2c.yaml
+++ b/Documentation/devicetree/bindings/i2c/i2c-imx-lpi2c.yaml
@@ -17,9 +17,10 @@ properties:
oneOf:
- enum:
- fsl,imx7ulp-lpi2c
- - fsl,imx8qm-lpi2c
- items:
- - const: fsl,imx8qxp-lpi2c
+ - enum:
+ - fsl,imx8qxp-lpi2c
+ - fsl,imx8qm-lpi2c
- const: fsl,imx7ulp-lpi2c
reg:
diff --git a/Documentation/devicetree/bindings/iio/adc/samsung,exynos-adc.yaml b/Documentation/devicetree/bindings/iio/adc/samsung,exynos-adc.yaml
index c65921e66dc1..81c87295912c 100644
--- a/Documentation/devicetree/bindings/iio/adc/samsung,exynos-adc.yaml
+++ b/Documentation/devicetree/bindings/iio/adc/samsung,exynos-adc.yaml
@@ -136,7 +136,7 @@ examples:
samsung,syscon-phandle = <&pmu_system_controller>;
/* NTC thermistor is a hwmon device */
- ncp15wb473 {
+ thermistor {
compatible = "murata,ncp15wb473";
pullup-uv = <1800000>;
pullup-ohm = <47000>;
diff --git a/Documentation/devicetree/bindings/input/gpio-keys.yaml b/Documentation/devicetree/bindings/input/gpio-keys.yaml
index 060a309ff8e7..dbe7ecc19ccb 100644
--- a/Documentation/devicetree/bindings/input/gpio-keys.yaml
+++ b/Documentation/devicetree/bindings/input/gpio-keys.yaml
@@ -142,7 +142,7 @@ examples:
down {
label = "GPIO Key DOWN";
linux,code = <108>;
- interrupts = <1 IRQ_TYPE_LEVEL_HIGH 7>;
+ interrupts = <1 IRQ_TYPE_EDGE_FALLING>;
};
};
diff --git a/Documentation/devicetree/bindings/media/nxp,imx7-mipi-csi2.yaml b/Documentation/devicetree/bindings/media/nxp,imx7-mipi-csi2.yaml
index 877183cf4278..1ef849dc74d7 100644
--- a/Documentation/devicetree/bindings/media/nxp,imx7-mipi-csi2.yaml
+++ b/Documentation/devicetree/bindings/media/nxp,imx7-mipi-csi2.yaml
@@ -79,6 +79,8 @@ properties:
properties:
data-lanes:
+ description:
+ Note that 'fsl,imx7-mipi-csi2' only supports up to 2 data lines.
items:
minItems: 1
maxItems: 4
@@ -91,18 +93,6 @@ properties:
required:
- data-lanes
- allOf:
- - if:
- properties:
- compatible:
- contains:
- const: fsl,imx7-mipi-csi2
- then:
- properties:
- data-lanes:
- items:
- maxItems: 2
-
port@1:
$ref: /schemas/graph.yaml#/properties/port
description:
diff --git a/Documentation/devicetree/bindings/net/ethernet-phy.yaml b/Documentation/devicetree/bindings/net/ethernet-phy.yaml
index 2766fe45bb98..ee42328a109d 100644
--- a/Documentation/devicetree/bindings/net/ethernet-phy.yaml
+++ b/Documentation/devicetree/bindings/net/ethernet-phy.yaml
@@ -91,6 +91,14 @@ properties:
compensate for the board being designed with the lanes
swapped.
+ enet-phy-lane-no-swap:
+ $ref: /schemas/types.yaml#/definitions/flag
+ description:
+ If set, indicates that PHY will disable swap of the
+ TX/RX lanes. This property allows the PHY to work correcly after
+ e.g. wrong bootstrap configuration caused by issues in PCB
+ layout design.
+
eee-broken-100tx:
$ref: /schemas/types.yaml#/definitions/flag
description:
diff --git a/Documentation/devicetree/bindings/phy/xlnx,zynqmp-psgtr.yaml b/Documentation/devicetree/bindings/phy/xlnx,zynqmp-psgtr.yaml
index 04d5654efb38..79906519c652 100644
--- a/Documentation/devicetree/bindings/phy/xlnx,zynqmp-psgtr.yaml
+++ b/Documentation/devicetree/bindings/phy/xlnx,zynqmp-psgtr.yaml
@@ -29,7 +29,7 @@ properties:
- PHY_TYPE_PCIE
- PHY_TYPE_SATA
- PHY_TYPE_SGMII
- - PHY_TYPE_USB
+ - PHY_TYPE_USB3
- description: The PHY instance
minimum: 0
maximum: 1 # for DP, SATA or USB
diff --git a/Documentation/devicetree/bindings/power/supply/bq25980.yaml b/Documentation/devicetree/bindings/power/supply/bq25980.yaml
index 06eca6667f67..8367a1fd4057 100644
--- a/Documentation/devicetree/bindings/power/supply/bq25980.yaml
+++ b/Documentation/devicetree/bindings/power/supply/bq25980.yaml
@@ -105,7 +105,7 @@ examples:
reg = <0x65>;
interrupt-parent = <&gpio1>;
interrupts = <16 IRQ_TYPE_EDGE_FALLING>;
- ti,watchdog-timer = <0>;
+ ti,watchdog-timeout-ms = <0>;
ti,sc-ocp-limit-microamp = <2000000>;
ti,sc-ovp-limit-microvolt = <17800000>;
monitored-battery = <&bat>;
diff --git a/Documentation/devicetree/bindings/regulator/samsung,s5m8767.yaml b/Documentation/devicetree/bindings/regulator/samsung,s5m8767.yaml
index 80a63d47790a..c98929a213e9 100644
--- a/Documentation/devicetree/bindings/regulator/samsung,s5m8767.yaml
+++ b/Documentation/devicetree/bindings/regulator/samsung,s5m8767.yaml
@@ -51,6 +51,19 @@ patternProperties:
description:
Properties for single BUCK regulator.
+ properties:
+ op_mode:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ enum: [0, 1, 2, 3]
+ default: 1
+ description: |
+ Describes the different operating modes of the regulator with power
+ mode change in SOC. The different possible values are:
+ 0 - always off mode
+ 1 - on in normal mode
+ 2 - low power mode
+ 3 - suspend mode
+
required:
- regulator-name
@@ -63,6 +76,18 @@ patternProperties:
Properties for single BUCK regulator.
properties:
+ op_mode:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ enum: [0, 1, 2, 3]
+ default: 1
+ description: |
+ Describes the different operating modes of the regulator with power
+ mode change in SOC. The different possible values are:
+ 0 - always off mode
+ 1 - on in normal mode
+ 2 - low power mode
+ 3 - suspend mode
+
s5m8767,pmic-ext-control-gpios:
maxItems: 1
description: |
diff --git a/Documentation/devicetree/bindings/sound/wlf,wm8962.yaml b/Documentation/devicetree/bindings/sound/wlf,wm8962.yaml
index 0e6249d7c133..5e172e9462b9 100644
--- a/Documentation/devicetree/bindings/sound/wlf,wm8962.yaml
+++ b/Documentation/devicetree/bindings/sound/wlf,wm8962.yaml
@@ -19,6 +19,9 @@ properties:
clocks:
maxItems: 1
+ interrupts:
+ maxItems: 1
+
"#sound-dai-cells":
const: 0
diff --git a/Documentation/devicetree/bindings/spi/spi-rockchip.yaml b/Documentation/devicetree/bindings/spi/spi-rockchip.yaml
index 7f987e79337c..52a78a2e362e 100644
--- a/Documentation/devicetree/bindings/spi/spi-rockchip.yaml
+++ b/Documentation/devicetree/bindings/spi/spi-rockchip.yaml
@@ -33,6 +33,7 @@ properties:
- rockchip,rk3328-spi
- rockchip,rk3368-spi
- rockchip,rk3399-spi
+ - rockchip,rk3568-spi
- rockchip,rv1126-spi
- const: rockchip,rk3066-spi
diff --git a/Documentation/devicetree/bindings/vendor-prefixes.yaml b/Documentation/devicetree/bindings/vendor-prefixes.yaml
index 7463870d0922..03777900be8b 100644
--- a/Documentation/devicetree/bindings/vendor-prefixes.yaml
+++ b/Documentation/devicetree/bindings/vendor-prefixes.yaml
@@ -1236,6 +1236,8 @@ patternProperties:
description: Truly Semiconductors Limited
"^visionox,.*":
description: Visionox
+ "^team-source-display,.*":
+ description: Shenzhen Team Source Display Technology Co., Ltd. (TSD)
"^tsd,.*":
description: Theobroma Systems Design und Consulting GmbH
"^tyan,.*":
diff --git a/Documentation/filesystems/cifs/ksmbd.rst b/Documentation/filesystems/cifs/ksmbd.rst
index a1326157d53f..b0d354fd8066 100644
--- a/Documentation/filesystems/cifs/ksmbd.rst
+++ b/Documentation/filesystems/cifs/ksmbd.rst
@@ -50,11 +50,11 @@ ksmbd.mountd (user space daemon)
--------------------------------
ksmbd.mountd is userspace process to, transfer user account and password that
-are registered using ksmbd.adduser(part of utils for user space). Further it
+are registered using ksmbd.adduser (part of utils for user space). Further it
allows sharing information parameters that parsed from smb.conf to ksmbd in
kernel. For the execution part it has a daemon which is continuously running
and connected to the kernel interface using netlink socket, it waits for the
-requests(dcerpc and share/user info). It handles RPC calls (at a minimum few
+requests (dcerpc and share/user info). It handles RPC calls (at a minimum few
dozen) that are most important for file server from NetShareEnum and
NetServerGetInfo. Complete DCE/RPC response is prepared from the user space
and passed over to the associated kernel thread for the client.
@@ -154,11 +154,11 @@ Each layer
1. Enable all component prints
# sudo ksmbd.control -d "all"
-2. Enable one of components(smb, auth, vfs, oplock, ipc, conn, rdma)
+2. Enable one of components (smb, auth, vfs, oplock, ipc, conn, rdma)
# sudo ksmbd.control -d "smb"
-3. Show what prints are enable.
- # cat/sys/class/ksmbd-control/debug
+3. Show what prints are enabled.
+ # cat /sys/class/ksmbd-control/debug
[smb] auth vfs oplock ipc conn [rdma]
4. Disable prints:
diff --git a/Documentation/filesystems/netfs_library.rst b/Documentation/filesystems/netfs_library.rst
index bb68d39f03b7..375baca7edcd 100644
--- a/Documentation/filesystems/netfs_library.rst
+++ b/Documentation/filesystems/netfs_library.rst
@@ -1,7 +1,7 @@
.. SPDX-License-Identifier: GPL-2.0
=================================
-NETWORK FILESYSTEM HELPER LIBRARY
+Network Filesystem Helper Library
=================================
.. Contents:
@@ -37,22 +37,22 @@ into a common call framework.
The following services are provided:
- * Handles transparent huge pages (THPs).
+ * Handle folios that span multiple pages.
- * Insulates the netfs from VM interface changes.
+ * Insulate the netfs from VM interface changes.
- * Allows the netfs to arbitrarily split reads up into pieces, even ones that
- don't match page sizes or page alignments and that may cross pages.
+ * Allow the netfs to arbitrarily split reads up into pieces, even ones that
+ don't match folio sizes or folio alignments and that may cross folios.
- * Allows the netfs to expand a readahead request in both directions to meet
- its needs.
+ * Allow the netfs to expand a readahead request in both directions to meet its
+ needs.
- * Allows the netfs to partially fulfil a read, which will then be resubmitted.
+ * Allow the netfs to partially fulfil a read, which will then be resubmitted.
- * Handles local caching, allowing cached data and server-read data to be
+ * Handle local caching, allowing cached data and server-read data to be
interleaved for a single request.
- * Handles clearing of bufferage that aren't on the server.
+ * Handle clearing of bufferage that aren't on the server.
* Handle retrying of reads that failed, switching reads from the cache to the
server as necessary.
@@ -70,22 +70,22 @@ Read Helper Functions
Three read helpers are provided::
- * void netfs_readahead(struct readahead_control *ractl,
- const struct netfs_read_request_ops *ops,
- void *netfs_priv);``
- * int netfs_readpage(struct file *file,
- struct page *page,
- const struct netfs_read_request_ops *ops,
- void *netfs_priv);
- * int netfs_write_begin(struct file *file,
- struct address_space *mapping,
- loff_t pos,
- unsigned int len,
- unsigned int flags,
- struct page **_page,
- void **_fsdata,
- const struct netfs_read_request_ops *ops,
- void *netfs_priv);
+ void netfs_readahead(struct readahead_control *ractl,
+ const struct netfs_read_request_ops *ops,
+ void *netfs_priv);
+ int netfs_readpage(struct file *file,
+ struct folio *folio,
+ const struct netfs_read_request_ops *ops,
+ void *netfs_priv);
+ int netfs_write_begin(struct file *file,
+ struct address_space *mapping,
+ loff_t pos,
+ unsigned int len,
+ unsigned int flags,
+ struct folio **_folio,
+ void **_fsdata,
+ const struct netfs_read_request_ops *ops,
+ void *netfs_priv);
Each corresponds to a VM operation, with the addition of a couple of parameters
for the use of the read helpers:
@@ -103,8 +103,8 @@ Both of these values will be stored into the read request structure.
For ->readahead() and ->readpage(), the network filesystem should just jump
into the corresponding read helper; whereas for ->write_begin(), it may be a
little more complicated as the network filesystem might want to flush
-conflicting writes or track dirty data and needs to put the acquired page if an
-error occurs after calling the helper.
+conflicting writes or track dirty data and needs to put the acquired folio if
+an error occurs after calling the helper.
The helpers manage the read request, calling back into the network filesystem
through the suppplied table of operations. Waits will be performed as
@@ -253,7 +253,7 @@ through which it can issue requests and negotiate::
void (*issue_op)(struct netfs_read_subrequest *subreq);
bool (*is_still_valid)(struct netfs_read_request *rreq);
int (*check_write_begin)(struct file *file, loff_t pos, unsigned len,
- struct page *page, void **_fsdata);
+ struct folio *folio, void **_fsdata);
void (*done)(struct netfs_read_request *rreq);
void (*cleanup)(struct address_space *mapping, void *netfs_priv);
};
@@ -313,13 +313,14 @@ The operations are as follows:
There is no return value; the netfs_subreq_terminated() function should be
called to indicate whether or not the operation succeeded and how much data
- it transferred. The filesystem also should not deal with setting pages
+ it transferred. The filesystem also should not deal with setting folios
uptodate, unlocking them or dropping their refs - the helpers need to deal
with this as they have to coordinate with copying to the local cache.
- Note that the helpers have the pages locked, but not pinned. It is possible
- to use the ITER_XARRAY iov iterator to refer to the range of the inode that
- is being operated upon without the need to allocate large bvec tables.
+ Note that the helpers have the folios locked, but not pinned. It is
+ possible to use the ITER_XARRAY iov iterator to refer to the range of the
+ inode that is being operated upon without the need to allocate large bvec
+ tables.
* ``is_still_valid()``
@@ -330,15 +331,15 @@ The operations are as follows:
* ``check_write_begin()``
[Optional] This is called from the netfs_write_begin() helper once it has
- allocated/grabbed the page to be modified to allow the filesystem to flush
+ allocated/grabbed the folio to be modified to allow the filesystem to flush
conflicting state before allowing it to be modified.
- It should return 0 if everything is now fine, -EAGAIN if the page should be
+ It should return 0 if everything is now fine, -EAGAIN if the folio should be
regrabbed and any other error code to abort the operation.
* ``done``
- [Optional] This is called after the pages in the request have all been
+ [Optional] This is called after the folios in the request have all been
unlocked (and marked uptodate if applicable).
* ``cleanup``
@@ -390,7 +391,7 @@ The read helpers work by the following general procedure:
* If NETFS_SREQ_CLEAR_TAIL was set, a short read will be cleared to the
end of the slice instead of reissuing.
- * Once the data is read, the pages that have been fully read/cleared:
+ * Once the data is read, the folios that have been fully read/cleared:
* Will be marked uptodate.
@@ -398,11 +399,11 @@ The read helpers work by the following general procedure:
* Unlocked
- * Any pages that need writing to the cache will then have DIO writes issued.
+ * Any folios that need writing to the cache will then have DIO writes issued.
* Synchronous operations will wait for reading to be complete.
- * Writes to the cache will proceed asynchronously and the pages will have the
+ * Writes to the cache will proceed asynchronously and the folios will have the
PG_fscache mark removed when that completes.
* The request structures will be cleaned up when everything has completed.
@@ -452,6 +453,9 @@ operation table looks like the following::
netfs_io_terminated_t term_func,
void *term_func_priv);
+ int (*prepare_write)(struct netfs_cache_resources *cres,
+ loff_t *_start, size_t *_len, loff_t i_size);
+
int (*write)(struct netfs_cache_resources *cres,
loff_t start_pos,
struct iov_iter *iter,
@@ -509,6 +513,14 @@ The methods defined in the table are:
indicating whether the termination is definitely happening in the caller's
context.
+ * ``prepare_write()``
+
+ [Required] Called to adjust a write to the cache and check that there is
+ sufficient space in the cache. The start and length values indicate the
+ size of the write that netfslib is proposing, and this can be adjusted by
+ the cache to respect DIO boundaries. The file size is passed for
+ information.
+
* ``write()``
[Required] Called to write to the cache. The start file offset is given
@@ -525,4 +537,9 @@ not the read request structure as they could be used in other situations where
there isn't a read request structure as well, such as writing dirty data to the
cache.
+
+API Function Reference
+======================
+
.. kernel-doc:: include/linux/netfs.h
+.. kernel-doc:: fs/netfs/read_helper.c
diff --git a/Documentation/gpu/amdgpu-dc.rst b/Documentation/gpu/amdgpu-dc.rst
deleted file mode 100644
index f7ff7e1309de..000000000000
--- a/Documentation/gpu/amdgpu-dc.rst
+++ /dev/null
@@ -1,74 +0,0 @@
-===================================
-drm/amd/display - Display Core (DC)
-===================================
-
-*placeholder - general description of supported platforms, what dc is, etc.*
-
-Because it is partially shared with other operating systems, the Display Core
-Driver is divided in two pieces.
-
-1. **Display Core (DC)** contains the OS-agnostic components. Things like
- hardware programming and resource management are handled here.
-2. **Display Manager (DM)** contains the OS-dependent components. Hooks to the
- amdgpu base driver and DRM are implemented here.
-
-It doesn't help that the entire package is frequently referred to as DC. But
-with the context in mind, it should be clear.
-
-When CONFIG_DRM_AMD_DC is enabled, DC will be initialized by default for
-supported ASICs. To force disable, set `amdgpu.dc=0` on kernel command line.
-Likewise, to force enable on unsupported ASICs, set `amdgpu.dc=1`.
-
-To determine if DC is loaded, search dmesg for the following entry:
-
-``Display Core initialized with <version number here>``
-
-AMDgpu Display Manager
-======================
-
-.. kernel-doc:: drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
- :doc: overview
-
-.. kernel-doc:: drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
- :internal:
-
-Lifecycle
----------
-
-.. kernel-doc:: drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
- :doc: DM Lifecycle
-
-.. kernel-doc:: drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
- :functions: dm_hw_init dm_hw_fini
-
-Interrupts
-----------
-
-.. kernel-doc:: drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c
- :doc: overview
-
-.. kernel-doc:: drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c
- :internal:
-
-.. kernel-doc:: drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
- :functions: register_hpd_handlers dm_crtc_high_irq dm_pflip_high_irq
-
-Atomic Implementation
----------------------
-
-.. kernel-doc:: drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
- :doc: atomic
-
-.. kernel-doc:: drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
- :functions: amdgpu_dm_atomic_check amdgpu_dm_atomic_commit_tail
-
-Display Core
-============
-
-**WIP**
-
-FreeSync Video
---------------
-
-.. kernel-doc:: drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
- :doc: FreeSync Video
diff --git a/Documentation/gpu/amdgpu.rst b/Documentation/gpu/amdgpu.rst
deleted file mode 100644
index 8ba72e898099..000000000000
--- a/Documentation/gpu/amdgpu.rst
+++ /dev/null
@@ -1,324 +0,0 @@
-=========================
- drm/amdgpu AMDgpu driver
-=========================
-
-The drm/amdgpu driver supports all AMD Radeon GPUs based on the Graphics Core
-Next (GCN) architecture.
-
-Module Parameters
-=================
-
-The amdgpu driver supports the following module parameters:
-
-.. kernel-doc:: drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
-
-Core Driver Infrastructure
-==========================
-
-This section covers core driver infrastructure.
-
-.. _amdgpu_memory_domains:
-
-Memory Domains
---------------
-
-.. kernel-doc:: include/uapi/drm/amdgpu_drm.h
- :doc: memory domains
-
-Buffer Objects
---------------
-
-.. kernel-doc:: drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
- :doc: amdgpu_object
-
-.. kernel-doc:: drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
- :internal:
-
-PRIME Buffer Sharing
---------------------
-
-.. kernel-doc:: drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
- :doc: PRIME Buffer Sharing
-
-.. kernel-doc:: drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
- :internal:
-
-MMU Notifier
-------------
-
-.. kernel-doc:: drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c
- :doc: MMU Notifier
-
-.. kernel-doc:: drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c
- :internal:
-
-AMDGPU Virtual Memory
----------------------
-
-.. kernel-doc:: drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
- :doc: GPUVM
-
-.. kernel-doc:: drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
- :internal:
-
-Interrupt Handling
-------------------
-
-.. kernel-doc:: drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
- :doc: Interrupt Handling
-
-.. kernel-doc:: drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
- :internal:
-
-IP Blocks
-------------------
-
-.. kernel-doc:: drivers/gpu/drm/amd/include/amd_shared.h
- :doc: IP Blocks
-
-.. kernel-doc:: drivers/gpu/drm/amd/include/amd_shared.h
- :identifiers: amd_ip_block_type amd_ip_funcs
-
-AMDGPU XGMI Support
-===================
-
-.. kernel-doc:: drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c
-
-AMDGPU RAS Support
-==================
-
-The AMDGPU RAS interfaces are exposed via sysfs (for informational queries) and
-debugfs (for error injection).
-
-RAS debugfs/sysfs Control and Error Injection Interfaces
---------------------------------------------------------
-
-.. kernel-doc:: drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
- :doc: AMDGPU RAS debugfs control interface
-
-RAS Reboot Behavior for Unrecoverable Errors
---------------------------------------------------------
-
-.. kernel-doc:: drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
- :doc: AMDGPU RAS Reboot Behavior for Unrecoverable Errors
-
-RAS Error Count sysfs Interface
--------------------------------
-
-.. kernel-doc:: drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
- :doc: AMDGPU RAS sysfs Error Count Interface
-
-RAS EEPROM debugfs Interface
-----------------------------
-
-.. kernel-doc:: drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
- :doc: AMDGPU RAS debugfs EEPROM table reset interface
-
-RAS VRAM Bad Pages sysfs Interface
-----------------------------------
-
-.. kernel-doc:: drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
- :doc: AMDGPU RAS sysfs gpu_vram_bad_pages Interface
-
-Sample Code
------------
-Sample code for testing error injection can be found here:
-https://cgit.freedesktop.org/mesa/drm/tree/tests/amdgpu/ras_tests.c
-
-This is part of the libdrm amdgpu unit tests which cover several areas of the GPU.
-There are four sets of tests:
-
-RAS Basic Test
-
-The test verifies the RAS feature enabled status and makes sure the necessary sysfs and debugfs files
-are present.
-
-RAS Query Test
-
-This test checks the RAS availability and enablement status for each supported IP block as well as
-the error counts.
-
-RAS Inject Test
-
-This test injects errors for each IP.
-
-RAS Disable Test
-
-This test tests disabling of RAS features for each IP block.
-
-
-GPU Power/Thermal Controls and Monitoring
-=========================================
-
-This section covers hwmon and power/thermal controls.
-
-HWMON Interfaces
-----------------
-
-.. kernel-doc:: drivers/gpu/drm/amd/pm/amdgpu_pm.c
- :doc: hwmon
-
-GPU sysfs Power State Interfaces
---------------------------------
-
-GPU power controls are exposed via sysfs files.
-
-power_dpm_state
-~~~~~~~~~~~~~~~
-
-.. kernel-doc:: drivers/gpu/drm/amd/pm/amdgpu_pm.c
- :doc: power_dpm_state
-
-power_dpm_force_performance_level
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-.. kernel-doc:: drivers/gpu/drm/amd/pm/amdgpu_pm.c
- :doc: power_dpm_force_performance_level
-
-pp_table
-~~~~~~~~
-
-.. kernel-doc:: drivers/gpu/drm/amd/pm/amdgpu_pm.c
- :doc: pp_table
-
-pp_od_clk_voltage
-~~~~~~~~~~~~~~~~~
-
-.. kernel-doc:: drivers/gpu/drm/amd/pm/amdgpu_pm.c
- :doc: pp_od_clk_voltage
-
-pp_dpm_*
-~~~~~~~~
-
-.. kernel-doc:: drivers/gpu/drm/amd/pm/amdgpu_pm.c
- :doc: pp_dpm_sclk pp_dpm_mclk pp_dpm_socclk pp_dpm_fclk pp_dpm_dcefclk pp_dpm_pcie
-
-pp_power_profile_mode
-~~~~~~~~~~~~~~~~~~~~~
-
-.. kernel-doc:: drivers/gpu/drm/amd/pm/amdgpu_pm.c
- :doc: pp_power_profile_mode
-
-\*_busy_percent
-~~~~~~~~~~~~~~~
-
-.. kernel-doc:: drivers/gpu/drm/amd/pm/amdgpu_pm.c
- :doc: gpu_busy_percent
-
-.. kernel-doc:: drivers/gpu/drm/amd/pm/amdgpu_pm.c
- :doc: mem_busy_percent
-
-gpu_metrics
-~~~~~~~~~~~~~~~~~~~~~
-
-.. kernel-doc:: drivers/gpu/drm/amd/pm/amdgpu_pm.c
- :doc: gpu_metrics
-
-GPU Product Information
-=======================
-
-Information about the GPU can be obtained on certain cards
-via sysfs
-
-product_name
-------------
-
-.. kernel-doc:: drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
- :doc: product_name
-
-product_number
---------------
-
-.. kernel-doc:: drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
- :doc: product_name
-
-serial_number
--------------
-
-.. kernel-doc:: drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
- :doc: serial_number
-
-unique_id
----------
-
-.. kernel-doc:: drivers/gpu/drm/amd/pm/amdgpu_pm.c
- :doc: unique_id
-
-GPU Memory Usage Information
-============================
-
-Various memory accounting can be accessed via sysfs
-
-mem_info_vram_total
--------------------
-
-.. kernel-doc:: drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
- :doc: mem_info_vram_total
-
-mem_info_vram_used
-------------------
-
-.. kernel-doc:: drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
- :doc: mem_info_vram_used
-
-mem_info_vis_vram_total
------------------------
-
-.. kernel-doc:: drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
- :doc: mem_info_vis_vram_total
-
-mem_info_vis_vram_used
-----------------------
-
-.. kernel-doc:: drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
- :doc: mem_info_vis_vram_used
-
-mem_info_gtt_total
-------------------
-
-.. kernel-doc:: drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c
- :doc: mem_info_gtt_total
-
-mem_info_gtt_used
------------------
-
-.. kernel-doc:: drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c
- :doc: mem_info_gtt_used
-
-PCIe Accounting Information
-===========================
-
-pcie_bw
--------
-
-.. kernel-doc:: drivers/gpu/drm/amd/pm/amdgpu_pm.c
- :doc: pcie_bw
-
-pcie_replay_count
------------------
-
-.. kernel-doc:: drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
- :doc: pcie_replay_count
-
-GPU SmartShift Information
-==========================
-
-GPU SmartShift information via sysfs
-
-smartshift_apu_power
---------------------
-
-.. kernel-doc:: drivers/gpu/drm/amd/pm/amdgpu_pm.c
- :doc: smartshift_apu_power
-
-smartshift_dgpu_power
----------------------
-
-.. kernel-doc:: drivers/gpu/drm/amd/pm/amdgpu_pm.c
- :doc: smartshift_dgpu_power
-
-smartshift_bias
----------------
-
-.. kernel-doc:: drivers/gpu/drm/amd/pm/amdgpu_pm.c
- :doc: smartshift_bias
diff --git a/Documentation/gpu/amdgpu/amdgpu-glossary.rst b/Documentation/gpu/amdgpu/amdgpu-glossary.rst
new file mode 100644
index 000000000000..859dcec6c6f9
--- /dev/null
+++ b/Documentation/gpu/amdgpu/amdgpu-glossary.rst
@@ -0,0 +1,87 @@
+===============
+AMDGPU Glossary
+===============
+
+Here you can find some generic acronyms used in the amdgpu driver. Notice that
+we have a dedicated glossary for Display Core at
+'Documentation/gpu/amdgpu/display/dc-glossary.rst'.
+
+.. glossary::
+
+ CP
+ Command Processor
+
+ CPLIB
+ Content Protection Library
+
+ DFS
+ Digital Frequency Synthesizer
+
+ ECP
+ Enhanced Content Protection
+
+ EOP
+ End Of Pipe/Pipeline
+
+ GC
+ Graphics and Compute
+
+ GMC
+ Graphic Memory Controller
+
+ IH
+ Interrupt Handler
+
+ HQD
+ Hardware Queue Descriptor
+
+ IB
+ Indirect Buffer
+
+ IP
+ Intellectual Property blocks
+
+ KCQ
+ Kernel Compute Queue
+
+ KGQ
+ Kernel Graphics Queue
+
+ KIQ
+ Kernel Interface Queue
+
+ MEC
+ MicroEngine Compute
+
+ MES
+ MicroEngine Scheduler
+
+ MMHUB
+ Multi-Media HUB
+
+ MQD
+ Memory Queue Descriptor
+
+ PPLib
+ PowerPlay Library - PowerPlay is the power management component.
+
+ PSP
+ Platform Security Processor
+
+ RCL
+ RunList Controller
+
+ SDMA
+ System DMA
+
+ SMU
+ System Management Unit
+
+ SS
+ Spread Spectrum
+
+ VCE
+ Video Compression Engine
+
+ VCN
+ Video Codec Next
diff --git a/Documentation/gpu/amdgpu/display/config_example.svg b/Documentation/gpu/amdgpu/display/config_example.svg
new file mode 100644
index 000000000000..cdac9858601c
--- /dev/null
+++ b/Documentation/gpu/amdgpu/display/config_example.svg
@@ -0,0 +1,414 @@
+<?xml version="1.0" encoding="UTF-8" standalone="no"?>
+<!-- Created with Inkscape (http://www.inkscape.org/) -->
+
+<svg
+ xmlns:dc="http://purl.org/dc/elements/1.1/"
+ xmlns:cc="http://creativecommons.org/ns#"
+ xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
+ xmlns:svg="http://www.w3.org/2000/svg"
+ xmlns="http://www.w3.org/2000/svg"
+ xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
+ xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
+ width="144.63406mm"
+ height="66.596054mm"
+ viewBox="0 0 144.15195 66.596054"
+ version="1.1"
+ id="svg8"
+ inkscape:version="0.92.5 (2060ec1f9f, 2020-04-08)"
+ sodipodi:docname="config_example.svg">
+ <defs
+ id="defs2">
+ <marker
+ inkscape:stockid="Arrow1Mend"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow1Mend"
+ style="overflow:visible"
+ inkscape:isstock="true">
+ <path
+ id="path4547"
+ d="M 0,0 5,-5 -12.5,0 5,5 Z"
+ style="fill:#ff0000;fill-opacity:1;fill-rule:evenodd;stroke:#ff0000;stroke-width:1.00000003pt;stroke-opacity:1"
+ transform="matrix(-0.4,0,0,-0.4,-4,0)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow1Mend"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow1Mend-3"
+ style="overflow:visible"
+ inkscape:isstock="true">
+ <path
+ inkscape:connector-curvature="0"
+ id="path4547-6"
+ d="M 0,0 5,-5 -12.5,0 5,5 Z"
+ style="fill:#ff0000;fill-opacity:1;fill-rule:evenodd;stroke:#ff0000;stroke-width:1.00000003pt;stroke-opacity:1"
+ transform="matrix(-0.4,0,0,-0.4,-4,0)" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow1Mend"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow1Mend-3-5"
+ style="overflow:visible"
+ inkscape:isstock="true">
+ <path
+ inkscape:connector-curvature="0"
+ id="path4547-6-3"
+ d="M 0,0 5,-5 -12.5,0 5,5 Z"
+ style="fill:#ff0000;fill-opacity:1;fill-rule:evenodd;stroke:#ff0000;stroke-width:1.00000003pt;stroke-opacity:1"
+ transform="matrix(-0.4,0,0,-0.4,-4,0)" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow1Mend"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow1Mend-3-5-0"
+ style="overflow:visible"
+ inkscape:isstock="true">
+ <path
+ inkscape:connector-curvature="0"
+ id="path4547-6-3-6"
+ d="M 0,0 5,-5 -12.5,0 5,5 Z"
+ style="fill:#ff0000;fill-opacity:1;fill-rule:evenodd;stroke:#ff0000;stroke-width:1.00000003pt;stroke-opacity:1"
+ transform="matrix(-0.4,0,0,-0.4,-4,0)" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow1Mend"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow1Mend-3-5-7"
+ style="overflow:visible"
+ inkscape:isstock="true">
+ <path
+ inkscape:connector-curvature="0"
+ id="path4547-6-3-3"
+ d="M 0,0 5,-5 -12.5,0 5,5 Z"
+ style="fill:#ff0000;fill-opacity:1;fill-rule:evenodd;stroke:#ff0000;stroke-width:1.00000003pt;stroke-opacity:1"
+ transform="matrix(-0.4,0,0,-0.4,-4,0)" />
+ </marker>
+ </defs>
+ <sodipodi:namedview
+ id="base"
+ pagecolor="#ffffff"
+ bordercolor="#666666"
+ borderopacity="1.0"
+ inkscape:pageopacity="0.0"
+ inkscape:pageshadow="2"
+ inkscape:zoom="0.98994949"
+ inkscape:cx="518.91791"
+ inkscape:cy="172.50112"
+ inkscape:document-units="mm"
+ inkscape:current-layer="layer1"
+ showgrid="true"
+ viewbox-width="209.3"
+ inkscape:window-width="3840"
+ inkscape:window-height="1136"
+ inkscape:window-x="0"
+ inkscape:window-y="27"
+ inkscape:window-maximized="1"
+ fit-margin-top="0"
+ fit-margin-left="0"
+ fit-margin-right="0"
+ fit-margin-bottom="0">
+ <inkscape:grid
+ type="xygrid"
+ id="grid817"
+ originx="4.390216"
+ originy="-208.88856" />
+ </sodipodi:namedview>
+ <metadata
+ id="metadata5">
+ <rdf:RDF>
+ <cc:Work
+ rdf:about="">
+ <dc:format>image/svg+xml</dc:format>
+ <dc:type
+ rdf:resource="http://purl.org/dc/dcmitype/StillImage" />
+ <dc:title></dc:title>
+ </cc:Work>
+ </rdf:RDF>
+ </metadata>
+ <g
+ inkscape:label="Layer 1"
+ inkscape:groupmode="layer"
+ id="layer1"
+ transform="translate(4.4048992,-21.515392)">
+ <path
+ style="fill:none;stroke:#000000;stroke-width:0.26458335px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
+ d="m 20.816662,35.062492 h 23.8125 v -5.291667 h 5.291667 v 5.291667 h 10.583334 v -5.291667 h 5.291667 v 5.291667 h 2.645833 v -5.291667 h 5.291667 v 5.291667 h 66.14583"
+ id="path4522"
+ inkscape:connector-curvature="0" />
+ <path
+ style="fill:none;stroke:#000000;stroke-width:0.26458335px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
+ d="m 20.816662,48.291659 h 7.9375 v -5.291667 h 5.291667 v 5.291667 h 58.208335 v -5.291667 h 5.291666 v 5.291667 h 42.33333"
+ id="path4524"
+ inkscape:connector-curvature="0" />
+ <path
+ style="fill:none;stroke:#000000;stroke-width:0.26458335px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
+ d="m 20.816662,61.520826 h 26.458334 v -5.291667 h 44.979168 v 5.291667 h 47.624996"
+ id="path4526"
+ inkscape:connector-curvature="0" />
+ <path
+ style="fill:none;stroke:#000000;stroke-width:0.26458335px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
+ d="M 20.816662,72.104159 H 139.87916"
+ id="path4528"
+ inkscape:connector-curvature="0" />
+ <path
+ style="fill:none;stroke:#000000;stroke-width:0.26458335px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
+ d="M 20.816662,77.395826 H 139.87916"
+ id="path4530"
+ inkscape:connector-curvature="0" />
+ <path
+ style="fill:none;stroke:#000000;stroke-width:0.26458335px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
+ d="M 20.816662,82.687493 H 139.87916"
+ id="path4532"
+ inkscape:connector-curvature="0" />
+ <path
+ style="fill:none;stroke:#000000;stroke-width:0.26458335px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
+ d="M 20.816662,87.97916 H 139.87916"
+ id="path4534"
+ inkscape:connector-curvature="0" />
+ <path
+ style="fill:none;stroke:#ff0000;stroke-width:0.5291667;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:0.52916668, 0.52916668;stroke-dashoffset:0;stroke-opacity:1;marker-end:url(#Arrow1Mend)"
+ d="m 47.274996,29.770826 c 3.836215,14.933158 3.472151,27.586643 0.264583,41.010418"
+ id="path4536"
+ inkscape:connector-curvature="0"
+ sodipodi:nodetypes="cc" />
+ <path
+ style="fill:none;stroke:#ff0000;stroke-width:0.5291667;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:0.52916669, 0.52916669;stroke-dashoffset:0;stroke-opacity:1;marker-end:url(#Arrow1Mend-3)"
+ d="m 63.149996,29.770826 c 3.836214,14.933158 5.059652,27.586642 1.852084,41.010418"
+ id="path4536-7"
+ inkscape:connector-curvature="0"
+ sodipodi:nodetypes="cc" />
+ <path
+ style="fill:none;stroke:#ff0000;stroke-width:0.5291667;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:0.5291667, 0.5291667;stroke-dashoffset:0;stroke-opacity:1;marker-end:url(#Arrow1Mend-3-5)"
+ d="m 71.087496,29.770825 c 3.836214,14.933158 5.059652,27.586643 1.852084,41.010419"
+ id="path4536-7-5"
+ inkscape:connector-curvature="0"
+ sodipodi:nodetypes="cc" />
+ <text
+ xml:space="preserve"
+ style="font-style:normal;font-weight:normal;font-size:10.58333397px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.26458335"
+ x="59.359009"
+ y="24.195677"
+ id="text6572"><tspan
+ sodipodi:role="line"
+ x="59.359009"
+ y="24.195677"
+ style="font-size:3.52777791px;line-height:5.39999962;text-align:center;text-anchor:middle;stroke-width:0.26458335"
+ id="tspan6574">Configurations</tspan></text>
+ <text
+ xml:space="preserve"
+ style="font-style:normal;font-weight:normal;font-size:10.58333397px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.26458335"
+ x="46.825508"
+ y="28.542402"
+ id="text6572-6"><tspan
+ sodipodi:role="line"
+ x="46.825508"
+ y="28.542402"
+ style="font-size:3.52777815px;line-height:5.39999962;text-align:center;text-anchor:middle;fill:#008000;stroke-width:0.26458335"
+ id="tspan6574-2">A</tspan></text>
+ <text
+ xml:space="preserve"
+ style="font-style:normal;font-weight:normal;font-size:10.58333397px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.26458335"
+ x="62.8895"
+ y="28.825886"
+ id="text6572-6-2"><tspan
+ sodipodi:role="line"
+ x="62.8895"
+ y="28.825886"
+ style="font-size:3.52777839px;line-height:5.39999962;text-align:center;text-anchor:middle;fill:#0000ff;stroke-width:0.26458335"
+ id="tspan6574-2-7">B</tspan></text>
+ <text
+ xml:space="preserve"
+ style="font-style:normal;font-weight:normal;font-size:10.58333397px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.26458335"
+ x="70.827003"
+ y="29.109362"
+ id="text6572-6-2-3"><tspan
+ sodipodi:role="line"
+ x="70.827003"
+ y="29.109362"
+ style="font-size:3.52777863px;line-height:5.39999962;text-align:center;text-anchor:middle;fill:#c87137;stroke-width:0.26458335"
+ id="tspan6574-2-7-6">C</tspan></text>
+ <path
+ style="fill:none;stroke:#ff0000;stroke-width:0.5291667;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:0.52916671, 0.52916671;stroke-dashoffset:0;stroke-opacity:1;marker-end:url(#Arrow1Mend-3-5-0)"
+ d="m 92.254164,42.999993 c 9.142136,12.745655 4.411987,28.608461 0.529167,38.364584"
+ id="path4536-7-5-2"
+ inkscape:connector-curvature="0"
+ sodipodi:nodetypes="cc" />
+ <path
+ style="fill:none;stroke:#000000;stroke-width:0.5291667;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
+ d="m 47.274996,72.104159 v 5.291667"
+ id="path8053"
+ inkscape:connector-curvature="0" />
+ <path
+ style="fill:none;stroke:#000000;stroke-width:0.5291667;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
+ d="m 64.472913,72.10416 v 5.291667"
+ id="path8053-6"
+ inkscape:connector-curvature="0" />
+ <path
+ style="fill:none;stroke:#000000;stroke-width:0.5291667;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
+ d="m 72.410413,72.10416 v 5.291667"
+ id="path8053-6-1"
+ inkscape:connector-curvature="0" />
+ <path
+ style="fill:none;stroke:#000000;stroke-width:0.5291667;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
+ d="m 92.254164,82.687494 v 5.291667"
+ id="path8053-6-1-8"
+ inkscape:connector-curvature="0" />
+ <text
+ xml:space="preserve"
+ style="font-style:normal;font-weight:normal;font-size:10.58333397px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.26458335"
+ x="55.802444"
+ y="76.167412"
+ id="text6572-6-7"><tspan
+ sodipodi:role="line"
+ x="55.802444"
+ y="76.167412"
+ style="font-size:3.52777839px;line-height:5.39999962;text-align:center;text-anchor:middle;fill:#008000;stroke-width:0.26458335"
+ id="tspan6574-2-9">A</tspan></text>
+ <text
+ xml:space="preserve"
+ style="font-style:normal;font-weight:normal;font-size:10.58333397px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.26458335"
+ x="68.559143"
+ y="75.883926"
+ id="text6572-6-2-2"><tspan
+ sodipodi:role="line"
+ x="68.559143"
+ y="75.883926"
+ style="font-size:3.52777863px;line-height:5.39999962;text-align:center;text-anchor:middle;fill:#0000ff;stroke-width:0.26458335"
+ id="tspan6574-2-7-0">B</tspan></text>
+ <text
+ xml:space="preserve"
+ style="font-style:normal;font-weight:normal;font-size:10.58333397px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.26458335"
+ x="84.812119"
+ y="75.883911"
+ id="text6572-6-2-3-2"><tspan
+ sodipodi:role="line"
+ x="84.812119"
+ y="75.883911"
+ style="font-size:3.52777863px;line-height:5.39999962;text-align:center;text-anchor:middle;fill:#c87137;stroke-width:0.26458335"
+ id="tspan6574-2-7-6-3">C</tspan></text>
+ <text
+ xml:space="preserve"
+ style="font-style:normal;font-weight:normal;font-size:10.58333397px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.26458335"
+ x="98.513756"
+ y="86.845222"
+ id="text6572-6-2-3-2-7"><tspan
+ sodipodi:role="line"
+ x="98.513756"
+ y="86.845222"
+ style="font-size:3.52777863px;line-height:5.39999962;text-align:center;text-anchor:middle;fill:#c87137;stroke-width:0.26458335"
+ id="tspan6574-2-7-6-3-5">C</tspan></text>
+ <text
+ xml:space="preserve"
+ style="font-style:normal;font-weight:normal;font-size:10.58333397px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.26458335"
+ x="35.452015"
+ y="75.694931"
+ id="text6572-9"><tspan
+ sodipodi:role="line"
+ x="35.452015"
+ y="75.694931"
+ style="font-size:3.52777815px;line-height:5.39999962;text-align:center;text-anchor:middle;stroke-width:0.26458335"
+ id="tspan6574-22">Old config</tspan></text>
+ <text
+ xml:space="preserve"
+ style="font-style:normal;font-weight:normal;font-size:10.58333397px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.26458335"
+ x="55.484753"
+ y="86.656235"
+ id="text6572-9-8"><tspan
+ sodipodi:role="line"
+ x="55.484753"
+ y="86.656235"
+ style="font-size:3.52777839px;line-height:5.39999962;text-align:center;text-anchor:middle;stroke-width:0.26458335"
+ id="tspan6574-22-9">Old config</tspan></text>
+ <path
+ style="fill:none;stroke:#ff0000;stroke-width:0.5291667;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:0.52916671, 0.52916671;stroke-dashoffset:0;stroke-opacity:1;marker-end:url(#Arrow1Mend-3-5-7)"
+ d="m 92.254164,42.999993 c 4.233333,4.7625 2.645833,13.229167 0.79375,17.197917"
+ inkscape:connector-curvature="0"
+ sodipodi:nodetypes="cc" />
+ <text
+ xml:space="preserve"
+ style="font-style:normal;font-weight:normal;font-size:10.58333397px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.26458335"
+ x="3.7020128"
+ y="33.550579"
+ id="text6572-1"><tspan
+ sodipodi:role="line"
+ x="3.7020128"
+ y="42.914349"
+ style="font-size:3.52777815px;line-height:5.39999962;text-align:center;text-anchor:middle;stroke-width:0.26458335"
+ id="tspan15310" /></text>
+ <text
+ xml:space="preserve"
+ style="font-style:normal;font-weight:normal;font-size:3.17500019px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.26458335"
+ x="13.366468"
+ y="46.590767"
+ id="text15316"><tspan
+ sodipodi:role="line"
+ x="13.366468"
+ y="46.590767"
+ style="text-align:center;text-anchor:middle;stroke-width:0.26458335"
+ id="tspan15318">VUpdate</tspan></text>
+ <text
+ xml:space="preserve"
+ style="font-style:normal;font-weight:normal;font-size:3.17500043px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.26458335"
+ x="14.45245"
+ y="29.676321"
+ id="text15316-3"><tspan
+ sodipodi:role="line"
+ id="tspan15314-1"
+ x="14.45245"
+ y="29.676321"
+ style="text-align:center;text-anchor:middle;stroke-width:0.26458335">Update</tspan><tspan
+ sodipodi:role="line"
+ x="14.45245"
+ y="33.645073"
+ style="text-align:center;text-anchor:middle;stroke-width:0.26458335"
+ id="tspan15318-9">Lock</tspan></text>
+ <text
+ xml:space="preserve"
+ style="font-style:normal;font-weight:normal;font-size:3.17500043px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.26458335"
+ x="7.5676007"
+ y="56.985115"
+ id="text15316-4"><tspan
+ sodipodi:role="line"
+ x="7.5676007"
+ y="56.985115"
+ style="text-align:center;text-anchor:middle;stroke-width:0.26458335"
+ id="tspan15318-7">Register update</tspan><tspan
+ sodipodi:role="line"
+ x="7.5676007"
+ y="60.953865"
+ style="text-align:center;text-anchor:middle;stroke-width:0.26458335"
+ id="tspan15361">Pending Status</tspan></text>
+ <text
+ xml:space="preserve"
+ style="font-style:normal;font-weight:normal;font-size:3.17500043px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.26458335"
+ x="16.074829"
+ y="76.167404"
+ id="text15316-8"><tspan
+ sodipodi:role="line"
+ x="16.074829"
+ y="76.167404"
+ style="text-align:center;text-anchor:middle;stroke-width:0.26458335"
+ id="tspan15318-4">Buf 0</tspan></text>
+ <text
+ xml:space="preserve"
+ style="font-style:normal;font-weight:normal;font-size:3.17500067px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.26458335"
+ x="16.156994"
+ y="86.089279"
+ id="text15316-8-5"><tspan
+ sodipodi:role="line"
+ x="16.156994"
+ y="86.089279"
+ style="text-align:center;text-anchor:middle;stroke-width:0.26458335"
+ id="tspan15318-4-0">Buf 1</tspan></text>
+ </g>
+</svg>
diff --git a/Documentation/gpu/amdgpu/display/dc-debug.rst b/Documentation/gpu/amdgpu/display/dc-debug.rst
new file mode 100644
index 000000000000..40c55a618918
--- /dev/null
+++ b/Documentation/gpu/amdgpu/display/dc-debug.rst
@@ -0,0 +1,77 @@
+========================
+Display Core Debug tools
+========================
+
+DC Visual Confirmation
+======================
+
+Display core provides a feature named visual confirmation, which is a set of
+bars added at the scanout time by the driver to convey some specific
+information. In general, you can enable this debug option by using::
+
+ echo <N> > /sys/kernel/debug/dri/0/amdgpu_dm_visual_confirm
+
+Where `N` is an integer number for some specific scenarios that the developer
+wants to enable, you will see some of these debug cases in the following
+subsection.
+
+Multiple Planes Debug
+---------------------
+
+If you want to enable or debug multiple planes in a specific user-space
+application, you can leverage a debug feature named visual confirm. For
+enabling it, you will need::
+
+ echo 1 > /sys/kernel/debug/dri/0/amdgpu_dm_visual_confirm
+
+You need to reload your GUI to see the visual confirmation. When the plane
+configuration changes or a full update occurs there will be a colored bar at
+the bottom of each hardware plane being drawn on the screen.
+
+* The color indicates the format - For example, red is AR24 and green is NV12
+* The height of the bar indicates the index of the plane
+* Pipe split can be observed if there are two bars with a difference in height
+ covering the same plane
+
+Consider the video playback case in which a video is played in a specific
+plane, and the desktop is drawn in another plane. The video plane should
+feature one or two green bars at the bottom of the video depending on pipe
+split configuration.
+
+* There should **not** be any visual corruption
+* There should **not** be any underflow or screen flashes
+* There should **not** be any black screens
+* There should **not** be any cursor corruption
+* Multiple plane **may** be briefly disabled during window transitions or
+ resizing but should come back after the action has finished
+
+Pipe Split Debug
+----------------
+
+Sometimes we need to debug if DCN is splitting pipes correctly, and visual
+confirmation is also handy for this case. Similar to the MPO case, you can use
+the below command to enable visual confirmation::
+
+ echo 1 > /sys/kernel/debug/dri/0/amdgpu_dm_visual_confirm
+
+In this case, if you have a pipe split, you will see one small red bar at the
+bottom of the display covering the entire display width and another bar
+covering the second pipe. In other words, you will see a bit high bar in the
+second pipe.
+
+DTN Debug
+=========
+
+DC (DCN) provides an extensive log that dumps multiple details from our
+hardware configuration. Via debugfs, you can capture those status values by
+using Display Test Next (DTN) log, which can be captured via debugfs by using::
+
+ cat /sys/kernel/debug/dri/0/amdgpu_dm_dtn_log
+
+Since this log is updated accordingly with DCN status, you can also follow the
+change in real-time by using something like::
+
+ sudo watch -d cat /sys/kernel/debug/dri/0/amdgpu_dm_dtn_log
+
+When reporting a bug related to DC, consider attaching this log before and
+after you reproduce the bug.
diff --git a/Documentation/gpu/amdgpu/display/dc-glossary.rst b/Documentation/gpu/amdgpu/display/dc-glossary.rst
new file mode 100644
index 000000000000..116f5f0942fd
--- /dev/null
+++ b/Documentation/gpu/amdgpu/display/dc-glossary.rst
@@ -0,0 +1,237 @@
+===========
+DC Glossary
+===========
+
+On this page, we try to keep track of acronyms related to the display
+component. If you do not find what you are looking for, look at the
+'Documentation/gpu/amdgpu/amdgpu-glossary.rst'; if you cannot find it anywhere,
+consider asking in the amdgfx and update this page.
+
+.. glossary::
+
+ ABM
+ Adaptive Backlight Modulation
+
+ APU
+ Accelerated Processing Unit
+
+ ASIC
+ Application-Specific Integrated Circuit
+
+ ASSR
+ Alternate Scrambler Seed Reset
+
+ AZ
+ Azalia (HD audio DMA engine)
+
+ BPC
+ Bits Per Colour/Component
+
+ BPP
+ Bits Per Pixel
+
+ Clocks
+ * PCLK: Pixel Clock
+ * SYMCLK: Symbol Clock
+ * SOCCLK: GPU Engine Clock
+ * DISPCLK: Display Clock
+ * DPPCLK: DPP Clock
+ * DCFCLK: Display Controller Fabric Clock
+ * REFCLK: Real Time Reference Clock
+ * PPLL: Pixel PLL
+ * FCLK: Fabric Clock
+ * MCLK: Memory Clock
+
+ CRC
+ Cyclic Redundancy Check
+
+ CRTC
+ Cathode Ray Tube Controller - commonly called "Controller" - Generates
+ raw stream of pixels, clocked at pixel clock
+
+ CVT
+ Coordinated Video Timings
+
+ DAL
+ Display Abstraction layer
+
+ DC (Software)
+ Display Core
+
+ DC (Hardware)
+ Display Controller
+
+ DCC
+ Delta Colour Compression
+
+ DCE
+ Display Controller Engine
+
+ DCHUB
+ Display Controller HUB
+
+ ARB
+ Arbiter
+
+ VTG
+ Vertical Timing Generator
+
+ DCN
+ Display Core Next
+
+ DCCG
+ Display Clock Generator block
+
+ DDC
+ Display Data Channel
+
+ DIO
+ Display IO
+
+ DPP
+ Display Pipes and Planes
+
+ DSC
+ Display Stream Compression (Reduce the amount of bits to represent pixel
+ count while at the same pixel clock)
+
+ dGPU
+ discrete GPU
+
+ DMIF
+ Display Memory Interface
+
+ DML
+ Display Mode Library
+
+ DMCU
+ Display Micro-Controller Unit
+
+ DMCUB
+ Display Micro-Controller Unit, version B
+
+ DPCD
+ DisplayPort Configuration Data
+
+ DPM(S)
+ Display Power Management (Signaling)
+
+ DRR
+ Dynamic Refresh Rate
+
+ DWB
+ Display Writeback
+
+ FB
+ Frame Buffer
+
+ FBC
+ Frame Buffer Compression
+
+ FEC
+ Forward Error Correction
+
+ FRL
+ Fixed Rate Link
+
+ GCO
+ Graphical Controller Object
+
+ GSL
+ Global Swap Lock
+
+ iGPU
+ integrated GPU
+
+ ISR
+ Interrupt Service Request
+
+ ISV
+ Independent Software Vendor
+
+ KMD
+ Kernel Mode Driver
+
+ LB
+ Line Buffer
+
+ LFC
+ Low Framerate Compensation
+
+ LTTPR
+ Link Training Tunable Phy Repeater
+
+ LUT
+ Lookup Table
+
+ MALL
+ Memory Access at Last Level
+
+ MC
+ Memory Controller
+
+ MPC
+ Multiple pipes and plane combine
+
+ MPO
+ Multi Plane Overlay
+
+ MST
+ Multi Stream Transport
+
+ NBP State
+ Northbridge Power State
+
+ NBIO
+ North Bridge Input/Output
+
+ ODM
+ Output Data Mapping
+
+ OPM
+ Output Protection Manager
+
+ OPP
+ Output Plane Processor
+
+ OPTC
+ Output Pipe Timing Combiner
+
+ OTG
+ Output Timing Generator
+
+ PCON
+ Power Controller
+
+ PGFSM
+ Power Gate Finite State Machine
+
+ PSR
+ Panel Self Refresh
+
+ SCL
+ Scaler
+
+ SDP
+ Scalable Data Port
+
+ SLS
+ Single Large Surface
+
+ SST
+ Single Stream Transport
+
+ TMDS
+ Transition-Minimized Differential Signaling
+
+ TMZ
+ Trusted Memory Zone
+
+ TTU
+ Time to Underflow
+
+ VRR
+ Variable Refresh Rate
+
+ UVD
+ Unified Video Decoder
diff --git a/Documentation/gpu/amdgpu/display/dc_pipeline_overview.svg b/Documentation/gpu/amdgpu/display/dc_pipeline_overview.svg
new file mode 100644
index 000000000000..9adecebfe65b
--- /dev/null
+++ b/Documentation/gpu/amdgpu/display/dc_pipeline_overview.svg
@@ -0,0 +1,1125 @@
+<?xml version="1.0" encoding="UTF-8" standalone="no"?>
+<!-- Created with Inkscape (http://www.inkscape.org/) -->
+
+<svg
+ xmlns:dc="http://purl.org/dc/elements/1.1/"
+ xmlns:cc="http://creativecommons.org/ns#"
+ xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
+ xmlns:svg="http://www.w3.org/2000/svg"
+ xmlns="http://www.w3.org/2000/svg"
+ xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
+ xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
+ width="1296.7491"
+ height="741.97845"
+ viewBox="0 0 343.0982 196.31514"
+ version="1.1"
+ id="svg8"
+ inkscape:version="0.92.5 (2060ec1f9f, 2020-04-08)"
+ sodipodi:docname="dc_pipeline_overview.svg">
+ <defs
+ id="defs2">
+ <marker
+ inkscape:stockid="Arrow2Mend"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="marker8858"
+ style="overflow:visible"
+ inkscape:isstock="true">
+ <path
+ id="path8616"
+ style="fill:#aa00d4;fill-opacity:1;fill-rule:evenodd;stroke:#aa00d4;stroke-width:0.625;stroke-linejoin:round;stroke-opacity:1"
+ d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+ transform="scale(-0.6)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow2Send"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow2Send"
+ style="overflow:visible"
+ inkscape:isstock="true">
+ <path
+ id="path8622"
+ style="fill:#ff0000;fill-opacity:1;fill-rule:evenodd;stroke:#ff0000;stroke-width:0.625;stroke-linejoin:round;stroke-opacity:1"
+ d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+ transform="matrix(-0.3,0,0,-0.3,0.69,0)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow1Lend"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow1Lend"
+ style="overflow:visible"
+ inkscape:isstock="true">
+ <path
+ id="path8592"
+ d="M 0,0 5,-5 -12.5,0 5,5 Z"
+ style="fill:#ff0000;fill-opacity:1;fill-rule:evenodd;stroke:#ff0000;stroke-width:1.00000003pt;stroke-opacity:1"
+ transform="matrix(-0.8,0,0,-0.8,-10,0)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow2Lend"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow2Lend"
+ style="overflow:visible"
+ inkscape:isstock="true">
+ <path
+ id="path8610"
+ style="fill:#ff0000;fill-opacity:1;fill-rule:evenodd;stroke:#ff0000;stroke-width:0.625;stroke-linejoin:round;stroke-opacity:1"
+ d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+ transform="matrix(-1.1,0,0,-1.1,-1.1,0)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow2Mend"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow2Mend"
+ style="overflow:visible"
+ inkscape:isstock="true">
+ <path
+ id="path1200"
+ style="fill:#ff0000;fill-opacity:1;fill-rule:evenodd;stroke:#ff0000;stroke-width:0.625;stroke-linejoin:round;stroke-opacity:1"
+ d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+ transform="scale(-0.6)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow2Mend"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow2Mend-8"
+ style="overflow:visible"
+ inkscape:isstock="true">
+ <path
+ inkscape:connector-curvature="0"
+ id="path1200-9"
+ style="fill:#ff0000;fill-opacity:1;fill-rule:evenodd;stroke:#ff0000;stroke-width:0.625;stroke-linejoin:round;stroke-opacity:1"
+ d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+ transform="scale(-0.6)" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow2Mend"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow2Mend-8-3"
+ style="overflow:visible"
+ inkscape:isstock="true">
+ <path
+ inkscape:connector-curvature="0"
+ id="path1200-9-6"
+ style="fill:#ff0000;fill-opacity:1;fill-rule:evenodd;stroke:#ff0000;stroke-width:0.625;stroke-linejoin:round;stroke-opacity:1"
+ d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+ transform="scale(-0.6)" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow2Mend"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow2Mend-8-3-2"
+ style="overflow:visible"
+ inkscape:isstock="true">
+ <path
+ inkscape:connector-curvature="0"
+ id="path1200-9-6-9"
+ style="fill:#ff0000;fill-opacity:1;fill-rule:evenodd;stroke:#ff0000;stroke-width:0.625;stroke-linejoin:round;stroke-opacity:1"
+ d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+ transform="scale(-0.6)" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow2Mend"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow2Mend-8-3-2-1"
+ style="overflow:visible"
+ inkscape:isstock="true">
+ <path
+ inkscape:connector-curvature="0"
+ id="path1200-9-6-9-9"
+ style="fill:#ff0000;fill-opacity:1;fill-rule:evenodd;stroke:#ff0000;stroke-width:0.625;stroke-linejoin:round;stroke-opacity:1"
+ d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+ transform="scale(-0.6)" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow2Mend"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow2Mend-8-3-2-7"
+ style="overflow:visible"
+ inkscape:isstock="true">
+ <path
+ inkscape:connector-curvature="0"
+ id="path1200-9-6-9-8"
+ style="fill:#008000;fill-opacity:1;fill-rule:evenodd;stroke:#008000;stroke-width:0.625;stroke-linejoin:round;stroke-opacity:1"
+ d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+ transform="scale(-0.6)" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow2Mend"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow2Mend-8-3-4"
+ style="overflow:visible"
+ inkscape:isstock="true">
+ <path
+ inkscape:connector-curvature="0"
+ id="path1200-9-6-5"
+ style="fill:#008000;fill-opacity:1;fill-rule:evenodd;stroke:#008000;stroke-width:0.625;stroke-linejoin:round;stroke-opacity:1"
+ d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+ transform="scale(-0.6)" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow2Mend"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow2Mend-8-0"
+ style="overflow:visible"
+ inkscape:isstock="true">
+ <path
+ inkscape:connector-curvature="0"
+ id="path1200-9-3"
+ style="fill:#008000;fill-opacity:1;fill-rule:evenodd;stroke:#008000;stroke-width:0.625;stroke-linejoin:round;stroke-opacity:1"
+ d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+ transform="scale(-0.6)" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow2Mend"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow2Mend-6"
+ style="overflow:visible"
+ inkscape:isstock="true">
+ <path
+ inkscape:connector-curvature="0"
+ id="path1200-1"
+ style="fill:#008000;fill-opacity:1;fill-rule:evenodd;stroke:#008000;stroke-width:0.625;stroke-linejoin:round;stroke-opacity:1"
+ d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+ transform="scale(-0.6)" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow2Mend"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow2Mend-8-3-2-6"
+ style="overflow:visible"
+ inkscape:isstock="true">
+ <path
+ inkscape:connector-curvature="0"
+ id="path1200-9-6-9-1"
+ style="fill:#ff0000;fill-opacity:1;fill-rule:evenodd;stroke:#ff0000;stroke-width:0.625;stroke-linejoin:round;stroke-opacity:1"
+ d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+ transform="scale(-0.6)" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow2Mend"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow2Mend-8-0-7"
+ style="overflow:visible"
+ inkscape:isstock="true">
+ <path
+ inkscape:connector-curvature="0"
+ id="path1200-9-3-4"
+ style="fill:#008000;fill-opacity:1;fill-rule:evenodd;stroke:#008000;stroke-width:0.625;stroke-linejoin:round;stroke-opacity:1"
+ d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+ transform="scale(-0.6)" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow2Mend"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow2Mend-6-3"
+ style="overflow:visible"
+ inkscape:isstock="true">
+ <path
+ inkscape:connector-curvature="0"
+ id="path1200-1-0"
+ style="fill:#008000;fill-opacity:1;fill-rule:evenodd;stroke:#008000;stroke-width:0.625;stroke-linejoin:round;stroke-opacity:1"
+ d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+ transform="scale(-0.6)" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow2Mend"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow2Mend-8-3-2-8"
+ style="overflow:visible"
+ inkscape:isstock="true">
+ <path
+ inkscape:connector-curvature="0"
+ id="path1200-9-6-9-6"
+ style="fill:#ff0000;fill-opacity:1;fill-rule:evenodd;stroke:#ff0000;stroke-width:0.625;stroke-linejoin:round;stroke-opacity:1"
+ d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+ transform="scale(-0.6)" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow2Mend"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow2Mend-3"
+ style="overflow:visible"
+ inkscape:isstock="true">
+ <path
+ id="path1200-6"
+ style="fill:#ff0000;fill-opacity:1;fill-rule:evenodd;stroke:#ff0000;stroke-width:0.625;stroke-linejoin:round;stroke-opacity:1"
+ d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+ transform="scale(-0.6)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow2Mend"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="marker8858-3"
+ style="overflow:visible"
+ inkscape:isstock="true">
+ <path
+ id="path8616-5"
+ style="fill:#00ffcc;fill-opacity:1;fill-rule:evenodd;stroke:#00ffcc;stroke-width:0.625;stroke-linejoin:round;stroke-opacity:1"
+ d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+ transform="scale(-0.6)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow2Mend"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow2Mend-8-3-3"
+ style="overflow:visible"
+ inkscape:isstock="true">
+ <path
+ inkscape:connector-curvature="0"
+ id="path1200-9-6-56"
+ style="fill:#ff0000;fill-opacity:1;fill-rule:evenodd;stroke:#ff0000;stroke-width:0.625;stroke-linejoin:round;stroke-opacity:1"
+ d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+ transform="scale(-0.6)" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow2Mend"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow2Mend-8-0-2"
+ style="overflow:visible"
+ inkscape:isstock="true">
+ <path
+ inkscape:connector-curvature="0"
+ id="path1200-9-3-9"
+ style="fill:#008000;fill-opacity:1;fill-rule:evenodd;stroke:#008000;stroke-width:0.625;stroke-linejoin:round;stroke-opacity:1"
+ d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+ transform="scale(-0.6)" />
+ </marker>
+ </defs>
+ <sodipodi:namedview
+ id="base"
+ pagecolor="#ffffff"
+ bordercolor="#666666"
+ borderopacity="1.0"
+ inkscape:pageopacity="0.0"
+ inkscape:pageshadow="2"
+ inkscape:zoom="2.8"
+ inkscape:cx="603.80172"
+ inkscape:cy="404.14319"
+ inkscape:document-units="mm"
+ inkscape:current-layer="layer1"
+ showgrid="false"
+ inkscape:window-width="3840"
+ inkscape:window-height="2096"
+ inkscape:window-x="0"
+ inkscape:window-y="27"
+ inkscape:window-maximized="1"
+ showguides="false"
+ fit-margin-top="0"
+ fit-margin-left="0"
+ fit-margin-right="0"
+ fit-margin-bottom="0"
+ units="px"
+ inkscape:snap-global="false" />
+ <metadata
+ id="metadata5">
+ <rdf:RDF>
+ <cc:Work
+ rdf:about="">
+ <dc:format>image/svg+xml</dc:format>
+ <dc:type
+ rdf:resource="http://purl.org/dc/dcmitype/StillImage" />
+ <dc:title />
+ </cc:Work>
+ </rdf:RDF>
+ </metadata>
+ <g
+ inkscape:label="Layer 1"
+ inkscape:groupmode="layer"
+ id="layer1"
+ transform="translate(419.79645,20.103767)">
+ <path
+ style="fill:#008000;stroke:#008000;stroke-width:0.59275198;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1;marker-end:url(#Arrow2Mend-8-3-2-7)"
+ d="m -340.37552,57.5332 h -14.81024"
+ id="path1171-7-1-3-0"
+ inkscape:connector-curvature="0" />
+ <path
+ style="fill:#008000;stroke:#008000;stroke-width:0.59715915;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1;marker-end:url(#Arrow2Mend-8-3-4)"
+ d="m -293.23443,57.5332 h -15.03129"
+ id="path1171-7-1-32"
+ inkscape:connector-curvature="0" />
+ <path
+ style="fill:#008000;stroke:#008000;stroke-width:0.59275198;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1;marker-end:url(#Arrow2Mend-8-0)"
+ d="M -246.45946,57.5332 H -261.2697"
+ id="path1171-7-6"
+ inkscape:connector-curvature="0" />
+ <path
+ style="fill:#008000;stroke:#008000;stroke-width:0.59275198;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1;marker-end:url(#Arrow2Mend-6)"
+ d="m -151.28623,57.5332 h -14.81024"
+ id="path1171-0"
+ inkscape:connector-curvature="0" />
+ <path
+ style="fill:none;stroke:#ff0000;stroke-width:0.98222464;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1;marker-end:url(#Arrow2Mend-8-3-2-6)"
+ d="m -310.11621,-10.988713 h -35.41856"
+ id="path1171-7-1-3-5"
+ inkscape:connector-curvature="0"
+ sodipodi:nodetypes="cc" />
+ <path
+ style="fill:none;stroke:#ff0000;stroke-width:1.33745635;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1;marker-end:url(#Arrow2Mend-8-3-2-1)"
+ d="M -174.42569,48.441117 V -10.963061 L -277.26548,-11.45916"
+ id="path1171-7-1-3-4"
+ inkscape:connector-curvature="0"
+ sodipodi:nodetypes="ccc" />
+ <path
+ style="fill:none;stroke:#ff0000;stroke-width:0.95872593;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1;marker-end:url(#Arrow2Mend-8-3)"
+ d="m -262.79442,87.935594 h 14.32069"
+ id="path1171-7-1"
+ inkscape:connector-curvature="0" />
+ <path
+ style="fill:none;stroke:#ff0000;stroke-width:0.97006679;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1;marker-end:url(#Arrow2Mend-8)"
+ d="m -309.80088,87.935594 h 14.44587"
+ id="path1171-7"
+ inkscape:connector-curvature="0" />
+ <path
+ style="fill:none;stroke:#ff0000;stroke-width:0.96187615;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1;marker-end:url(#Arrow2Mend)"
+ d="m -356.45657,87.935594 h 14.20296"
+ id="path1171"
+ inkscape:connector-curvature="0" />
+ <path
+ style="fill:none;stroke:#ff0000;stroke-width:0.96061862;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1;marker-end:url(#Arrow2Mend-8-3-2)"
+ d="m -167.44556,87.935594 h 14.16584"
+ id="path1171-7-1-3"
+ inkscape:connector-curvature="0" />
+ <path
+ style="fill:none;stroke:#008000;stroke-width:0.87091714;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1;marker-end:url(#Arrow2Mend-8-0-7)"
+ d="M -193.82812,48.312503 V 14.168502 l -84.03577,-0.467726"
+ id="path1171-7-6-4"
+ inkscape:connector-curvature="0"
+ sodipodi:nodetypes="ccc" />
+ <path
+ style="fill:none;stroke:#000000;stroke-width:0.81852055;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:4.91112339, 0.81852057;stroke-dashoffset:0;stroke-opacity:1"
+ d="m -133.33998,42.989657 v 5.457081"
+ id="path7149-3-7"
+ inkscape:connector-curvature="0" />
+ <path
+ style="fill:none;stroke:#000000;stroke-width:0.81852055;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:4.91112339, 0.81852057;stroke-dashoffset:0;stroke-opacity:1"
+ d="m -298.69506,162.44998 v 13.31197"
+ id="path7149"
+ inkscape:connector-curvature="0" />
+ <path
+ style="fill:none;stroke:#008080;stroke-width:0.81852055;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:0.81852057, 0.81852057;stroke-dashoffset:0;stroke-opacity:1"
+ d="m -242.80131,107.00907 v 9.60171"
+ id="path7040-5-4-7-5-6-9"
+ inkscape:connector-curvature="0" />
+ <path
+ style="fill:none;stroke:#008080;stroke-width:0.81852055;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:0.81852057, 0.81852057;stroke-dashoffset:0;stroke-opacity:1"
+ d="m -300.34873,107.17445 v 9.6017"
+ id="path7040-5-4-7-5-6"
+ inkscape:connector-curvature="0" />
+ <path
+ style="fill:none;stroke:#008080;stroke-width:0.81852055;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:0.81852057, 0.81852057;stroke-dashoffset:0;stroke-opacity:1"
+ d="m -359.26293,106.99745 v 9.60171"
+ id="path7040-5-4-7-5"
+ inkscape:connector-curvature="0" />
+ <path
+ style="fill:none;stroke:#008080;stroke-width:0.81852055;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:0.81852057, 0.81852057;stroke-dashoffset:0;stroke-opacity:1"
+ d="M -369.74543,25.114933 V 37.991587"
+ id="path7040-5-4-7-6"
+ inkscape:connector-curvature="0" />
+ <path
+ style="fill:none;stroke:#008080;stroke-width:0.91136348;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:0.91136346, 0.91136346;stroke-dashoffset:0;stroke-opacity:1"
+ d="M -135.17034,93.582486 V 107.10642 H -403.93077 V 37.882965 h 109.60575 V 25.225991"
+ id="path7038"
+ inkscape:connector-curvature="0"
+ sodipodi:nodetypes="cccccc" />
+ <path
+ style="fill:none;stroke:#008080;stroke-width:0.81852055;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:0.81852057, 0.81852057;stroke-dashoffset:0;stroke-opacity:1"
+ d="M -231.106,94.010086 V 106.96943"
+ id="path7040"
+ inkscape:connector-curvature="0" />
+ <path
+ style="fill:none;stroke:#008080;stroke-width:0.81852055;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:0.81852057, 0.81852057;stroke-dashoffset:0;stroke-opacity:1"
+ d="M -278.50224,93.844719 V 106.80406"
+ id="path7040-5"
+ inkscape:connector-curvature="0" />
+ <path
+ style="fill:none;stroke:#008080;stroke-width:0.81852055;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:0.81852057, 0.81852057;stroke-dashoffset:0;stroke-opacity:1"
+ d="M -325.89848,93.701083 V 106.99115"
+ id="path7040-5-4"
+ inkscape:connector-curvature="0" />
+ <path
+ style="fill:none;stroke:#008080;stroke-width:0.81852055;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:0.81852057, 0.81852057;stroke-dashoffset:0;stroke-opacity:1"
+ d="M -373.29471,93.899037 V 107.27179"
+ id="path7040-5-4-7"
+ inkscape:connector-curvature="0" />
+ <g
+ id="g934"
+ transform="matrix(0.61872421,0,0,0.61872421,-154.16506,-3.5724799)">
+ <rect
+ ry="2.1052283e-06"
+ y="84.280701"
+ x="-376.383"
+ height="72.786827"
+ width="49.352299"
+ id="rect834"
+ style="fill:none;stroke:#000000;stroke-width:1.16258347;stroke-linecap:round;stroke-linejoin:bevel;stroke-miterlimit:4;stroke-dasharray:none" />
+ <text
+ id="text838"
+ y="95.916664"
+ x="-371.17261"
+ style="font-style:normal;font-weight:normal;font-size:10.58333302px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.26458332"
+ xml:space="preserve"><tspan
+ style="stroke-width:0.26458332"
+ y="95.916664"
+ x="-371.17261"
+ id="tspan836"
+ sodipodi:role="line">DCHUB</tspan></text>
+ <text
+ id="text846"
+ y="121.99702"
+ x="-352.74997"
+ style="font-style:normal;font-weight:normal;font-size:10.58333302px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.26458332"
+ xml:space="preserve"><tspan
+ style="text-align:center;text-anchor:middle;stroke-width:0.26458332"
+ y="121.99702"
+ x="-352.74997"
+ id="tspan844"
+ sodipodi:role="line">HUBP</tspan><tspan
+ id="tspan863"
+ style="text-align:center;text-anchor:middle;stroke-width:0.26458332"
+ y="135.22618"
+ x="-352.74997"
+ sodipodi:role="line">(n)</tspan></text>
+ </g>
+ <g
+ id="g942"
+ transform="matrix(0.61872421,0,0,0.61872421,-158.40385,-3.2216813)">
+ <text
+ id="text838-5"
+ y="116.65257"
+ x="-269.45752"
+ style="font-style:normal;font-weight:normal;font-size:10.58333302px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.26458332"
+ xml:space="preserve"><tspan
+ style="text-align:center;text-anchor:middle;stroke-width:0.26458332"
+ y="116.65257"
+ x="-269.45752"
+ id="tspan836-3"
+ sodipodi:role="line">DPP</tspan><tspan
+ id="tspan936"
+ style="text-align:center;text-anchor:middle;stroke-width:0.26458332"
+ y="129.88174"
+ x="-269.45752"
+ sodipodi:role="line">(n)</tspan></text>
+ <rect
+ ry="2.1052283e-06"
+ y="83.71373"
+ x="-293.7952"
+ height="72.786827"
+ width="49.352303"
+ id="rect834-5"
+ style="fill:none;stroke:#000000;stroke-width:1.16258347;stroke-linecap:round;stroke-linejoin:bevel;stroke-miterlimit:4;stroke-dasharray:none" />
+ </g>
+ <g
+ id="g1158"
+ transform="matrix(0.61872421,0,0,0.61872421,-154.34048,-6.2618995)">
+ <text
+ id="text838-5-2"
+ y="128.87331"
+ x="-200.18195"
+ style="font-style:normal;font-weight:normal;font-size:10.58333302px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.26458332"
+ xml:space="preserve"><tspan
+ id="tspan936-1"
+ style="text-align:center;text-anchor:middle;stroke-width:0.26458332"
+ y="128.87331"
+ x="-200.18195"
+ sodipodi:role="line">MPC</tspan></text>
+ <rect
+ ry="2.1052283e-06"
+ y="88.627419"
+ x="-224.62555"
+ height="72.786827"
+ width="49.352303"
+ id="rect834-5-2"
+ style="fill:none;stroke:#000000;stroke-width:1.16258347;stroke-linecap:round;stroke-linejoin:bevel;stroke-miterlimit:4;stroke-dasharray:none" />
+ </g>
+ <g
+ id="g1153"
+ transform="matrix(0.61872421,0,0,0.61872421,-108.51628,-6.4957668)">
+ <text
+ id="text838-5-2-7"
+ y="129.2513"
+ x="-120.96272"
+ style="font-style:normal;font-weight:normal;font-size:10.58333302px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.26458332"
+ xml:space="preserve"><tspan
+ id="tspan936-1-0"
+ style="text-align:center;text-anchor:middle;stroke-width:0.26458332"
+ y="129.2513"
+ x="-120.96272"
+ sodipodi:role="line">OPTC</tspan></text>
+ <rect
+ ry="2.1052283e-06"
+ y="89.005402"
+ x="-145.62854"
+ height="72.786827"
+ width="49.352306"
+ id="rect834-5-2-9"
+ style="fill:none;stroke:#000000;stroke-width:1.16258347;stroke-linecap:round;stroke-linejoin:bevel;stroke-miterlimit:4;stroke-dasharray:none" />
+ </g>
+ <g
+ id="g1148"
+ transform="matrix(0.61872421,0,0,0.61872421,-105.25474,-7.6650796)">
+ <text
+ id="text838-5-2-7-3"
+ y="131.14117"
+ x="-48.981136"
+ style="font-style:normal;font-weight:normal;font-size:10.58333302px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.26458332"
+ xml:space="preserve"><tspan
+ id="tspan936-1-0-6"
+ style="text-align:center;text-anchor:middle;stroke-width:0.26458332"
+ y="131.14117"
+ x="-48.981136"
+ sodipodi:role="line">DIO</tspan></text>
+ <rect
+ ry="2.1052283e-06"
+ y="90.895279"
+ x="-73.435081"
+ height="72.786827"
+ width="49.352306"
+ id="rect834-5-2-9-0"
+ style="fill:none;stroke:#000000;stroke-width:1.16258347;stroke-linecap:round;stroke-linejoin:bevel;stroke-miterlimit:4;stroke-dasharray:none" />
+ </g>
+ <g
+ id="g1133"
+ transform="matrix(0.61872421,0,0,0.61872421,-181.52704,-7.6650796)">
+ <text
+ id="text838-5-2-6"
+ y="241.13223"
+ x="-286.96921"
+ style="font-style:normal;font-weight:normal;font-size:10.58333302px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.26458332"
+ xml:space="preserve"><tspan
+ id="tspan936-1-2"
+ style="text-align:center;text-anchor:middle;stroke-width:0.26458332"
+ y="241.13223"
+ x="-286.96921"
+ sodipodi:role="line">DCCG</tspan></text>
+ <rect
+ ry="2.1052283e-06"
+ y="200.88634"
+ x="-311.56009"
+ height="72.786827"
+ width="49.352306"
+ id="rect834-5-2-6"
+ style="fill:none;stroke:#000000;stroke-width:1.16258347;stroke-linecap:round;stroke-linejoin:bevel;stroke-miterlimit:4;stroke-dasharray:none" />
+ </g>
+ <g
+ id="g1138"
+ transform="matrix(0.61872421,0,0,0.61872421,-181.52704,-7.6650796)">
+ <text
+ id="text838-5-2-6-1"
+ y="241.81844"
+ x="-190.55942"
+ style="font-style:normal;font-weight:normal;font-size:10.58333302px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.26458332"
+ xml:space="preserve"><tspan
+ id="tspan936-1-2-8"
+ style="text-align:center;text-anchor:middle;stroke-width:0.26458332"
+ y="241.81844"
+ x="-190.55942"
+ sodipodi:role="line">DMU</tspan></text>
+ <rect
+ ry="2.1052283e-06"
+ y="201.6423"
+ x="-215.17615"
+ height="72.786827"
+ width="49.352306"
+ id="rect834-5-2-6-7"
+ style="fill:none;stroke:#000000;stroke-width:1.16258347;stroke-linecap:round;stroke-linejoin:bevel;stroke-miterlimit:4;stroke-dasharray:none" />
+ </g>
+ <text
+ xml:space="preserve"
+ style="font-style:normal;font-weight:normal;font-size:6.54816437px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.1637041"
+ x="-297.75696"
+ y="109.44505"
+ id="text1063"><tspan
+ sodipodi:role="line"
+ id="tspan1061"
+ x="-297.75696"
+ y="115.23865"
+ style="stroke-width:0.1637041" /></text>
+ <g
+ id="g1143"
+ transform="matrix(0.61872421,0,0,0.61872421,-181.52704,-8.9747125)">
+ <text
+ id="text838-5-2-6-1-9"
+ y="243.02728"
+ x="-99.967323"
+ style="font-style:normal;font-weight:normal;font-size:10.58333302px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.26458332"
+ xml:space="preserve"><tspan
+ id="tspan936-1-2-8-2"
+ style="text-align:center;text-anchor:middle;stroke-width:0.26458332"
+ y="243.02728"
+ x="-99.967323"
+ sodipodi:role="line">AZ</tspan></text>
+ <rect
+ ry="2.1052283e-06"
+ y="202.77623"
+ x="-124.83984"
+ height="72.786827"
+ width="49.352306"
+ id="rect834-5-2-6-7-0"
+ style="fill:none;stroke:#000000;stroke-width:1.16258347;stroke-linecap:round;stroke-linejoin:bevel;stroke-miterlimit:4;stroke-dasharray:none" />
+ </g>
+ <g
+ id="g1169"
+ transform="matrix(0.61872421,0,0,0.61872421,-154.16506,1.4555785)">
+ <text
+ id="text838-5-2-6-2"
+ y="5.9612885"
+ x="-348.74365"
+ style="font-style:normal;font-weight:normal;font-size:10.58333302px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.26458332"
+ xml:space="preserve"><tspan
+ id="tspan936-1-2-3"
+ style="text-align:center;text-anchor:middle;stroke-width:0.26458332"
+ y="5.9612885"
+ x="-348.74365"
+ sodipodi:role="line">MMHUBBUB</tspan></text>
+ <rect
+ ry="2.1010696e-06"
+ y="-34.142948"
+ x="-384.64743"
+ height="72.643044"
+ width="72.096924"
+ id="rect834-5-2-6-75"
+ style="fill:none;stroke:#000000;stroke-width:1.40378118;stroke-linecap:round;stroke-linejoin:bevel;stroke-miterlimit:4;stroke-dasharray:none" />
+ </g>
+ <g
+ id="g1164"
+ transform="matrix(0.61872421,0,0,0.61872421,-154.16506,-7.6650796)">
+ <text
+ id="text838-5-2-6-9"
+ y="13.465075"
+ x="-227.30836"
+ style="font-style:normal;font-weight:normal;font-size:10.58333302px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.26458332"
+ xml:space="preserve"><tspan
+ id="tspan936-1-2-2"
+ style="text-align:center;text-anchor:middle;stroke-width:0.26458332"
+ y="13.465075"
+ x="-227.30836"
+ sodipodi:role="line">DWB</tspan><tspan
+ id="tspan1128"
+ style="text-align:center;text-anchor:middle;stroke-width:0.26458332"
+ y="26.694241"
+ x="-227.30836"
+ sodipodi:role="line">(n)</tspan></text>
+ <rect
+ ry="2.1052283e-06"
+ y="-19.473768"
+ x="-251.83983"
+ height="72.786827"
+ width="49.352306"
+ id="rect834-5-2-6-2"
+ style="fill:none;stroke:#000000;stroke-width:1.16258347;stroke-linecap:round;stroke-linejoin:bevel;stroke-miterlimit:4;stroke-dasharray:none" />
+ </g>
+ <path
+ style="fill:none;stroke:#000000;stroke-width:0.91371936;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:5.48231601, 0.91371934;stroke-dashoffset:0;stroke-opacity:1"
+ d="m -358.95963,161.63019 v 14.12431 h 250.20395 V 43.149938 H -361.845 V 25.478973"
+ id="path7147"
+ inkscape:connector-curvature="0"
+ sodipodi:nodetypes="cccccc" />
+ <path
+ style="fill:none;stroke:#000000;stroke-width:0.81852055;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:4.91112339, 0.81852057;stroke-dashoffset:0;stroke-opacity:1"
+ d="m -242.92533,161.58513 v 14.05612"
+ id="path7149-3"
+ inkscape:connector-curvature="0" />
+ <path
+ style="fill:none;stroke:#000000;stroke-width:0.81852055;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:4.91112339, 0.81852057;stroke-dashoffset:0;stroke-opacity:1"
+ d="m -184.37695,42.955607 v 5.457082"
+ id="path7149-3-7-4"
+ inkscape:connector-curvature="0" />
+ <path
+ style="fill:none;stroke:#000000;stroke-width:0.81852055;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:4.91112339, 0.81852057;stroke-dashoffset:0;stroke-opacity:1"
+ d="m -277.36283,43.141644 v 5.457082"
+ id="path7149-3-7-5"
+ inkscape:connector-curvature="0" />
+ <path
+ style="fill:none;stroke:#000000;stroke-width:0.81852055;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:4.91112339, 0.81852057;stroke-dashoffset:0;stroke-opacity:1"
+ d="M -325.48437,42.976278 V 48.43336"
+ id="path7149-3-7-2"
+ inkscape:connector-curvature="0" />
+ <path
+ style="fill:none;stroke:#000000;stroke-width:0.81852055;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:4.91112339, 0.81852057;stroke-dashoffset:0;stroke-opacity:1"
+ d="m -361.86492,43.141644 v 5.457083"
+ id="path7149-3-7-54"
+ inkscape:connector-curvature="0" />
+ <path
+ style="fill:#008000;stroke:#008000;stroke-width:0.46329758;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1;marker-end:url(#Arrow2Mend-6-3)"
+ d="m -147.58542,-8.2978166 h -9.04766"
+ id="path1171-0-7"
+ inkscape:connector-curvature="0" />
+ <path
+ style="fill:none;stroke:#ff0000;stroke-width:0.98222464;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1;marker-end:url(#Arrow2Mend-8-3-2-8)"
+ d="m -157.13421,-1.6500501 h 8.66407"
+ id="path1171-7-1-3-8"
+ inkscape:connector-curvature="0" />
+ <path
+ style="fill:none;stroke:#000000;stroke-width:0.74503672;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:4.47022031, 0.74503672;stroke-dashoffset:0;stroke-opacity:1"
+ d="m -148.50314,4.9845652 h -7.91265"
+ id="path7149-3-7-8"
+ inkscape:connector-curvature="0" />
+ <path
+ style="fill:none;stroke:#008080;stroke-width:0.81852055;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:0.81852057, 0.81852057;stroke-dashoffset:0;stroke-opacity:1"
+ d="m -157.59442,11.623513 h 10.26991"
+ id="path7040-4"
+ inkscape:connector-curvature="0" />
+ <text
+ xml:space="preserve"
+ style="font-style:normal;font-weight:normal;font-size:6.54816437px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.1637041"
+ x="-142.72867"
+ y="-6.9685979"
+ id="text12079"><tspan
+ sodipodi:role="line"
+ id="tspan12077"
+ x="-142.72867"
+ y="-6.9685979"
+ style="font-size:4.80198765px;stroke-width:0.1637041">Global sync</tspan></text>
+ <text
+ xml:space="preserve"
+ style="font-style:normal;font-weight:normal;font-size:6.54816437px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.1637041"
+ x="-142.93031"
+ y="0.13578746"
+ id="text12079-3"><tspan
+ sodipodi:role="line"
+ id="tspan12077-1"
+ x="-142.93031"
+ y="0.13578746"
+ style="font-size:4.80198765px;stroke-width:0.1637041">Pixel data</tspan></text>
+ <text
+ xml:space="preserve"
+ style="font-style:normal;font-weight:normal;font-size:6.54816437px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.1637041"
+ x="-142.77556"
+ y="6.3093324"
+ id="text12079-3-4"><tspan
+ sodipodi:role="line"
+ id="tspan12077-1-9"
+ x="-142.77556"
+ y="6.3093324"
+ style="font-size:4.80198765px;stroke-width:0.1637041">Sideband signal</tspan></text>
+ <text
+ xml:space="preserve"
+ style="font-style:normal;font-weight:normal;font-size:6.54816437px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.1637041"
+ x="-142.72867"
+ y="12.948278"
+ id="text12079-3-4-2"><tspan
+ sodipodi:role="line"
+ id="tspan12077-1-9-0"
+ x="-142.72867"
+ y="12.948278"
+ style="font-size:4.80198765px;stroke-width:0.1637041">Config. Bus</tspan></text>
+ <path
+ style="fill:none;stroke:#aa00d4;stroke-width:1.32291663;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1;marker-end:url(#marker8858)"
+ d="m -406.68795,73.185276 h 14.20296"
+ id="path1171-75"
+ inkscape:connector-curvature="0" />
+ <text
+ xml:space="preserve"
+ style="font-style:normal;font-weight:normal;font-size:3.17499995px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.26458332"
+ x="-420.21503"
+ y="75.065918"
+ id="text8862"><tspan
+ sodipodi:role="line"
+ id="tspan8860"
+ x="-420.21503"
+ y="75.065918"
+ style="font-size:6.3499999px;stroke-width:0.26458332">SDP</tspan></text>
+ <path
+ style="fill:none;stroke:#00ffcc;stroke-width:1.25980031;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1;marker-end:url(#marker8858-3)"
+ d="m -119.19923,72.243805 h 12.88004"
+ id="path1171-75-6"
+ inkscape:connector-curvature="0" />
+ <text
+ xml:space="preserve"
+ style="font-style:normal;font-weight:normal;font-size:3.17499995px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.26458332"
+ x="-104.87327"
+ y="74.54258"
+ id="text8862-2"><tspan
+ sodipodi:role="line"
+ id="tspan8860-9"
+ x="-104.87327"
+ y="74.54258"
+ style="font-size:6.3499999px;stroke-width:0.26458332">Monitor</tspan></text>
+ <g
+ id="g6280"
+ transform="translate(-133.43389,-37.35791)">
+ <text
+ id="text838-5-2-7-6"
+ y="110.67171"
+ x="-97.4758"
+ style="font-style:normal;font-weight:normal;font-size:6.54816437px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.16370411"
+ xml:space="preserve"><tspan
+ id="tspan936-1-0-7"
+ style="text-align:center;text-anchor:middle;stroke-width:0.16370411"
+ y="110.67171"
+ x="-97.4758"
+ sodipodi:role="line">OPP</tspan></text>
+ <rect
+ ry="1.3025557e-06"
+ y="85.770599"
+ x="-112.73714"
+ height="45.034973"
+ width="30.535467"
+ id="rect834-5-2-9-5"
+ style="fill:none;stroke:#000000;stroke-width:0.71931857;stroke-linecap:round;stroke-linejoin:bevel;stroke-miterlimit:4;stroke-dasharray:none" />
+ </g>
+ <path
+ style="fill:#008000;stroke:#008000;stroke-width:0.59275198;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1;marker-end:url(#Arrow2Mend-8-0-2)"
+ d="m -199.6735,57.600919 h -14.81024"
+ id="path1171-7-6-1"
+ inkscape:connector-curvature="0" />
+ <path
+ style="fill:none;stroke:#ff0000;stroke-width:0.95872593;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1;marker-end:url(#Arrow2Mend-8-3-3)"
+ d="m -214.95012,88.003315 h 14.32069"
+ id="path1171-7-1-2"
+ inkscape:connector-curvature="0" />
+ <path
+ style="fill:none;stroke:#008080;stroke-width:0.81852055;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:0.81852058, 0.81852058;stroke-dashoffset:0;stroke-opacity:1"
+ d="M -182.99565,94.057598 V 107.01694"
+ id="path7040-7"
+ inkscape:connector-curvature="0" />
+ <path
+ style="fill:none;stroke:#000000;stroke-width:0.81852055;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:4.91112344, 0.81852058;stroke-dashoffset:0;stroke-opacity:1"
+ d="m -231.7616,43.563759 v 5.457082"
+ id="path7149-3-7-4-0"
+ inkscape:connector-curvature="0" />
+ <g
+ aria-label="["
+ transform="matrix(0,-1,0.74237844,0,14.567595,39.540924)"
+ style="font-style:normal;font-weight:normal;font-size:3.17499995px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.26458332"
+ id="text6872">
+ <path
+ d="m -65.936923,-548.78511 h 8.816294 v 2.79112 h -6.82247 v 176.34952 h 6.82247 v 2.41314 h -8.816294 z"
+ style="font-size:50.79999924px;stroke-width:0.26458332"
+ id="path6874"
+ inkscape:connector-curvature="0"
+ sodipodi:nodetypes="ccccccccc" />
+ </g>
+ <g
+ aria-label="["
+ transform="rotate(-90,182.49521,-144.01791)"
+ style="font-style:normal;font-weight:normal;font-size:3.17499995px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.26458332"
+ id="text6872-3">
+ <path
+ d="m -63.825546,-623.34091 h 7.228794 v 2.26195 h -5.764137 l 0,127.08032 h 5.764137 v 1.88397 h -7.228794 z"
+ style="font-size:50.79999924px;stroke-width:0.26458332"
+ id="path6874-6"
+ inkscape:connector-curvature="0"
+ sodipodi:nodetypes="ccccccccc" />
+ </g>
+ <text
+ xml:space="preserve"
+ style="font-style:normal;font-weight:normal;font-size:3.17499995px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.26458332"
+ x="-359.80389"
+ y="99.104233"
+ id="text6929"><tspan
+ sodipodi:role="line"
+ id="tspan6927"
+ x="-359.80389"
+ y="99.104233"
+ style="font-style:normal;font-variant:normal;font-weight:bold;font-stretch:normal;font-size:5.64444447px;font-family:sans-serif;-inkscape-font-specification:'sans-serif Bold';stroke-width:0.26458332">dc_plane</tspan></text>
+ <text
+ xml:space="preserve"
+ style="font-style:normal;font-weight:normal;font-size:3.17499995px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.26458332"
+ x="-223.56163"
+ y="99.142021"
+ id="text6933"><tspan
+ sodipodi:role="line"
+ id="tspan6931"
+ x="-223.56163"
+ y="99.142021"
+ style="font-style:normal;font-variant:normal;font-weight:bold;font-stretch:normal;font-size:5.64444447px;font-family:sans-serif;-inkscape-font-specification:'sans-serif Bold';stroke-width:0.26458332">dc_stream</tspan></text>
+ <g
+ aria-label="["
+ transform="matrix(0,1,1,0,153.30551,96.566025)"
+ style="font-style:normal;font-weight:normal;font-size:3.17499995px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.26458332"
+ id="text6872-35">
+ <path
+ d="m -65.936923,-545.95029 h 8.816294 v 2.79112 h -6.898066 v 271.78851 h 6.898066 v 2.41314 h -8.816294 z"
+ style="font-size:50.79999924px;stroke-width:0.26458332"
+ id="path6874-62"
+ inkscape:connector-curvature="0"
+ sodipodi:nodetypes="ccccccccc" />
+ </g>
+ <text
+ xml:space="preserve"
+ style="font-style:normal;font-weight:normal;font-size:3.17499995px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.26458332"
+ x="-267.43958"
+ y="28.5028"
+ id="text6933-9"><tspan
+ sodipodi:role="line"
+ id="tspan6931-1"
+ x="-267.43958"
+ y="28.5028"
+ style="font-style:normal;font-variant:normal;font-weight:bold;font-stretch:normal;font-size:5.64444447px;font-family:sans-serif;-inkscape-font-specification:'sans-serif Bold';stroke-width:0.26458332">dc_state</tspan></text>
+ <g
+ aria-label="["
+ transform="matrix(0,0.98158883,-1.0187565,0,0,-7.4835468)"
+ style="font-style:normal;font-variant:normal;font-weight:bold;font-stretch:normal;font-size:6.62759447px;line-height:1.25;font-family:sans-serif;-inkscape-font-specification:'sans-serif Bold';letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.5522995"
+ id="text6973">
+ <path
+ d="m 23.679381,144.30265 h 3.028123 v 1.29445 h -1.820839 v 7.91629 h 1.820839 v 1.29445 h -3.028123 z"
+ style="font-style:normal;font-variant:normal;font-weight:bold;font-stretch:normal;font-size:11.78239059px;font-family:sans-serif;-inkscape-font-specification:'sans-serif Bold';stroke-width:0.5522995"
+ id="path6975"
+ inkscape:connector-curvature="0"
+ sodipodi:nodetypes="ccccccccc" />
+ </g>
+ <text
+ xml:space="preserve"
+ style="font-style:normal;font-weight:normal;font-size:3.17499995px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.26458332"
+ x="-142.71655"
+ y="18.955769"
+ id="text6980"><tspan
+ sodipodi:role="line"
+ id="tspan6978"
+ x="-142.71655"
+ y="18.955769"
+ style="font-size:4.58611107px;stroke-width:0.26458332">Code struct</tspan></text>
+ <g
+ aria-label="["
+ transform="rotate(-90,94.826273,-58.762727)"
+ style="font-style:normal;font-weight:normal;font-size:3.17499995px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.26458332"
+ id="text6872-35-2">
+ <path
+ d="m -66.881863,-308.95922 h 7.115401 l 0,1.69499 h -5.197173 v 42.03568 h 5.197173 v 1.78948 h -7.115401 z"
+ style="font-size:50.79999924px;stroke-width:0.26458332"
+ id="path6874-62-7"
+ inkscape:connector-curvature="0"
+ sodipodi:nodetypes="ccccccccc" />
+ </g>
+ <text
+ xml:space="preserve"
+ style="font-style:normal;font-weight:normal;font-size:3.17499995px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.26458332"
+ x="-134.09625"
+ y="99.354439"
+ id="text6933-9-0"><tspan
+ sodipodi:role="line"
+ id="tspan6931-1-9"
+ x="-134.09625"
+ y="99.354439"
+ style="font-style:normal;font-variant:normal;font-weight:bold;font-stretch:normal;font-size:5.64444447px;font-family:sans-serif;-inkscape-font-specification:'sans-serif Bold';stroke-width:0.26458332">dc_link</tspan></text>
+ <g
+ aria-label="}"
+ transform="rotate(90,-145.27371,-140.09832)"
+ style="font-style:normal;font-weight:normal;font-size:3.17499995px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#3771c8;fill-opacity:1;stroke:none;stroke-width:0.26458332"
+ id="text1003">
+ <path
+ d="m 102.58571,58.211269 h 0.86816 c 1.15755,0 3.11267,-0.177767 3.45994,-0.5333 0.35553,-0.355534 1.28925,-1.124479 1.28925,-2.306836 V -61.475482 c 0,-1.289844 0.18603,-2.228288 0.5581,-2.815332 0.37207,-0.587044 0.26105,-0.992187 1.17882,-1.215429 -0.91777,-0.206706 -0.80675,-0.603581 -1.17882,-1.190625 -0.37207,-0.587045 -0.5581,-1.529623 -0.5581,-2.827735 v -3.075781 c 0,-1.174088 -0.93372,-1.938899 -1.28925,-2.294433 -0.34727,-0.363802 -2.30239,-0.545703 -3.45994,-0.545703 h -0.86816 v -1.773536 h 0.78134 c 2.05879,0 4.63403,0.305924 5.32029,0.917774 0.69453,0.60358 1.0418,1.81901 1.0418,3.646289 v 2.976562 c 0,1.231966 0.22324,2.087728 0.66973,2.567285 0.44648,0.471289 5.80035,0.706934 6.97444,0.706934 h 0.76894 v 1.773535 h -0.76894 c -1.17409,0 -6.52796,0.239778 -6.97444,0.719336 -0.44649,0.479557 -0.66973,1.343587 -0.66973,2.59209 V 55.420742 c 0,1.827279 -0.34727,3.046842 -1.0418,3.658691 -0.68626,0.611849 -3.2615,0.917774 -5.32029,0.917774 h -0.78134 z"
+ style="font-size:25.39999962px;fill:#3771c8;stroke-width:0.26458332"
+ id="path1005"
+ inkscape:connector-curvature="0"
+ sodipodi:nodetypes="cscsscccsscsccscsscsccscsscscc" />
+ </g>
+ <text
+ xml:space="preserve"
+ style="font-style:normal;font-weight:normal;font-size:3.17499995px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.26458332"
+ x="-200.59984"
+ y="129.60852"
+ id="text1010"><tspan
+ sodipodi:role="line"
+ id="tspan1008"
+ x="-200.59984"
+ y="129.60852"
+ style="font-style:italic;font-variant:normal;font-weight:bold;font-stretch:normal;font-size:6.3499999px;font-family:sans-serif;-inkscape-font-specification:'sans-serif Bold Italic';text-align:center;text-anchor:middle;stroke-width:0.26458332">Floating point</tspan><tspan
+ sodipodi:role="line"
+ x="-200.59984"
+ y="137.54602"
+ style="font-size:6.3499999px;text-align:center;text-anchor:middle;stroke-width:0.26458332"
+ id="tspan1059">calculation</tspan></text>
+ <g
+ aria-label="}"
+ transform="rotate(90,-94.294068,-92.593178)"
+ style="font-style:normal;font-weight:normal;font-size:3.17499995px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#3771c8;fill-opacity:1;stroke:none;stroke-width:0.26458332"
+ id="text1003-5">
+ <path
+ d="m 102.58571,58.211269 h 0.86816 c 1.15755,0 3.11267,-0.177767 3.45994,-0.5333 0.35553,-0.355534 1.10026,-1.124479 1.10026,-2.306836 V -44.637502 c 0,-1.289844 0.18603,-2.228288 0.5581,-2.815332 0.37207,-0.587044 0.45004,-0.992187 1.36781,-1.215429 -0.91777,-0.206706 -0.99574,-0.603581 -1.36781,-1.190625 -0.37207,-0.587045 -0.5581,-1.529623 -0.5581,-2.827735 v -19.913761 c 0,-1.174088 -0.74473,-1.938899 -1.10026,-2.294433 -0.34727,-0.363802 -2.30239,-0.545703 -3.45994,-0.545703 h -0.86816 v -1.773536 h 0.78134 c 2.05879,0 4.63403,0.305924 5.32029,0.917774 0.69453,0.60358 1.0418,1.81901 1.0418,3.646289 v 19.814542 c 0,1.231966 0.22324,2.087728 0.66973,2.567285 0.44648,0.471289 1.25677,0.706934 2.43086,0.706934 h 0.76894 v 1.773535 h -0.76894 c -1.17409,0 -1.98438,0.239778 -2.43086,0.719336 -0.44649,0.479557 -0.66973,1.343587 -0.66973,2.59209 v 99.897013 c 0,1.827279 -0.34727,3.046842 -1.0418,3.658691 -0.68626,0.611849 -3.2615,0.917774 -5.32029,0.917774 h -0.78134 z"
+ style="font-size:25.39999962px;fill:#3771c8;stroke-width:0.26458332"
+ id="path1005-3"
+ inkscape:connector-curvature="0"
+ sodipodi:nodetypes="cscsscccsscsccscsscsccscsscscc" />
+ </g>
+ <text
+ xml:space="preserve"
+ style="font-style:normal;font-weight:normal;font-size:3.17499995px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.26458332"
+ x="-137.43764"
+ y="122.46283"
+ id="text1010-5"><tspan
+ sodipodi:role="line"
+ id="tspan1008-6"
+ x="-137.43764"
+ y="122.46283"
+ style="font-size:6.3499999px;text-align:center;text-anchor:middle;stroke-width:0.26458332">bit-depth</tspan><tspan
+ sodipodi:role="line"
+ x="-137.43764"
+ y="130.40033"
+ style="font-size:6.3499999px;text-align:center;text-anchor:middle;stroke-width:0.26458332"
+ id="tspan1057">reduction/dither</tspan></text>
+ <text
+ xml:space="preserve"
+ style="font-style:normal;font-weight:normal;font-size:3.17499995px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#3771c8;fill-opacity:1;stroke:none;stroke-width:0.26458332;"
+ x="21.087883"
+ y="155.64751"
+ id="text1064"
+ transform="rotate(90)"><tspan
+ sodipodi:role="line"
+ id="tspan1062"
+ x="21.087883"
+ y="155.64751"
+ style="font-size:9.87777805px;stroke-width:0.26458332;fill:#3771c8;">}</tspan></text>
+ <text
+ xml:space="preserve"
+ style="font-style:normal;font-weight:normal;font-size:3.17499995px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.26458332"
+ x="-142.71655"
+ y="25.939869"
+ id="text6980-9"><tspan
+ sodipodi:role="line"
+ id="tspan6978-1"
+ x="-142.71655"
+ y="25.939869"
+ style="font-size:4.58611107px;stroke-width:0.26458332">Notes</tspan></text>
+ </g>
+</svg>
diff --git a/Documentation/gpu/amdgpu/display/dcn-overview.rst b/Documentation/gpu/amdgpu/display/dcn-overview.rst
new file mode 100644
index 000000000000..f98624d7828e
--- /dev/null
+++ b/Documentation/gpu/amdgpu/display/dcn-overview.rst
@@ -0,0 +1,171 @@
+=======================
+Display Core Next (DCN)
+=======================
+
+To equip our readers with the basic knowledge of how AMD Display Core Next
+(DCN) works, we need to start with an overview of the hardware pipeline. Below
+you can see a picture that provides a DCN overview, keep in mind that this is a
+generic diagram, and we have variations per ASIC.
+
+.. kernel-figure:: dc_pipeline_overview.svg
+
+Based on this diagram, we can pass through each block and briefly describe
+them:
+
+* **Display Controller Hub (DCHUB)**: This is the gateway between the Scalable
+ Data Port (SDP) and DCN. This component has multiple features, such as memory
+ arbitration, rotation, and cursor manipulation.
+
+* **Display Pipe and Plane (DPP)**: This block provides pre-blend pixel
+ processing such as color space conversion, linearization of pixel data, tone
+ mapping, and gamut mapping.
+
+* **Multiple Pipe/Plane Combined (MPC)**: This component performs blending of
+ multiple planes, using global or per-pixel alpha.
+
+* **Output Pixel Processing (OPP)**: Process and format pixels to be sent to
+ the display.
+
+* **Output Pipe Timing Combiner (OPTC)**: It generates time output to combine
+ streams or divide capabilities. CRC values are generated in this block.
+
+* **Display Output (DIO)**: Codify the output to the display connected to our
+ GPU.
+
+* **Display Writeback (DWB)**: It provides the ability to write the output of
+ the display pipe back to memory as video frames.
+
+* **Multi-Media HUB (MMHUBBUB)**: Memory controller interface for DMCUB and DWB
+ (Note that DWB is not hooked yet).
+
+* **DCN Management Unit (DMU)**: It provides registers with access control and
+ interrupts the controller to the SOC host interrupt unit. This block includes
+ the Display Micro-Controller Unit - version B (DMCUB), which is handled via
+ firmware.
+
+* **DCN Clock Generator Block (DCCG)**: It provides the clocks and resets
+ for all of the display controller clock domains.
+
+* **Azalia (AZ)**: Audio engine.
+
+The above diagram is an architecture generalization of DCN, which means that
+every ASIC has variations around this base model. Notice that the display
+pipeline is connected to the Scalable Data Port (SDP) via DCHUB; you can see
+the SDP as the element from our Data Fabric that feeds the display pipe.
+
+Always approach the DCN architecture as something flexible that can be
+configured and reconfigured in multiple ways; in other words, each block can be
+setup or ignored accordingly with userspace demands. For example, if we
+want to drive an 8k@60Hz with a DSC enabled, our DCN may require 4 DPP and 2
+OPP. It is DC's responsibility to drive the best configuration for each
+specific scenario. Orchestrate all of these components together requires a
+sophisticated communication interface which is highlighted in the diagram by
+the edges that connect each block; from the chart, each connection between
+these blocks represents:
+
+1. Pixel data interface (red): Represents the pixel data flow;
+2. Global sync signals (green): It is a set of synchronization signals composed
+ by VStartup, VUpdate, and VReady;
+3. Config interface: Responsible to configure blocks;
+4. Sideband signals: All other signals that do not fit the previous one.
+
+These signals are essential and play an important role in DCN. Nevertheless,
+the Global Sync deserves an extra level of detail described in the next
+section.
+
+All of these components are represented by a data structure named dc_state.
+From DCHUB to MPC, we have a representation called dc_plane; from MPC to OPTC,
+we have dc_stream, and the output (DIO) is handled by dc_link. Keep in mind
+that HUBP accesses a surface using a specific format read from memory, and our
+dc_plane should work to convert all pixels in the plane to something that can
+be sent to the display via dc_stream and dc_link.
+
+Front End and Back End
+----------------------
+
+Display pipeline can be broken down into two components that are usually
+referred as **Front End (FE)** and **Back End (BE)**, where FE consists of:
+
+* DCHUB (Mainly referring to a subcomponent named HUBP)
+* DPP
+* MPC
+
+On the other hand, BE consist of
+
+* OPP
+* OPTC
+* DIO (DP/HDMI stream encoder and link encoder)
+
+OPP and OPTC are two joining blocks between FE and BE. On a side note, this is
+a one-to-one mapping of the link encoder to PHY, but we can configure the DCN
+to choose which link encoder to connect to which PHY. FE's main responsibility
+is to change, blend and compose pixel data, while BE's job is to frame a
+generic pixel stream to a specific display's pixel stream.
+
+Data Flow
+---------
+
+Initially, data is passed in from VRAM through Data Fabric (DF) in native pixel
+formats. Such data format stays through till HUBP in DCHUB, where HUBP unpacks
+different pixel formats and outputs them to DPP in uniform streams through 4
+channels (1 for alpha + 3 for colors).
+
+The Converter and Cursor (CNVC) in DPP would then normalize the data
+representation and convert them to a DCN specific floating-point format (i.e.,
+different from the IEEE floating-point format). In the process, CNVC also
+applies a degamma function to transform the data from non-linear to linear
+space to relax the floating-point calculations following. Data would stay in
+this floating-point format from DPP to OPP.
+
+Starting OPP, because color transformation and blending have been completed
+(i.e alpha can be dropped), and the end sinks do not require the precision and
+dynamic range that floating points provide (i.e. all displays are in integer
+depth format), bit-depth reduction/dithering would kick in. In OPP, we would
+also apply a regamma function to introduce the gamma removed earlier back.
+Eventually, we output data in integer format at DIO.
+
+Global Sync
+-----------
+
+Many DCN registers are double buffered, most importantly the surface address.
+This allows us to update DCN hardware atomically for page flips, as well as
+for most other updates that don't require enabling or disabling of new pipes.
+
+(Note: There are many scenarios when DC will decide to reserve extra pipes
+in order to support outputs that need a very high pixel clock, or for
+power saving purposes.)
+
+These atomic register updates are driven by global sync signals in DCN. In
+order to understand how atomic updates interact with DCN hardware, and how DCN
+signals page flip and vblank events it is helpful to understand how global sync
+is programmed.
+
+Global sync consists of three signals, VSTARTUP, VUPDATE, and VREADY. These are
+calculated by the Display Mode Library - DML (drivers/gpu/drm/amd/display/dc/dml)
+based on a large number of parameters and ensure our hardware is able to feed
+the DCN pipeline without underflows or hangs in any given system configuration.
+The global sync signals always happen during VBlank, are independent from the
+VSync signal, and do not overlap each other.
+
+VUPDATE is the only signal that is of interest to the rest of the driver stack
+or userspace clients as it signals the point at which hardware latches to
+atomically programmed (i.e. double buffered) registers. Even though it is
+independent of the VSync signal we use VUPDATE to signal the VSync event as it
+provides the best indication of how atomic commits and hardware interact.
+
+Since DCN hardware is double-buffered the DC driver is able to program the
+hardware at any point during the frame.
+
+The below picture illustrates the global sync signals:
+
+.. kernel-figure:: global_sync_vblank.svg
+
+These signals affect core DCN behavior. Programming them incorrectly will lead
+to a number of negative consequences, most of them quite catastrophic.
+
+The following picture shows how global sync allows for a mailbox style of
+updates, i.e. it allows for multiple re-configurations between VUpdate
+events where only the last configuration programmed before the VUpdate signal
+becomes effective.
+
+.. kernel-figure:: config_example.svg
diff --git a/Documentation/gpu/amdgpu/display/display-manager.rst b/Documentation/gpu/amdgpu/display/display-manager.rst
new file mode 100644
index 000000000000..7ce31f89d9a0
--- /dev/null
+++ b/Documentation/gpu/amdgpu/display/display-manager.rst
@@ -0,0 +1,42 @@
+======================
+AMDgpu Display Manager
+======================
+
+.. contents:: Table of Contents
+ :depth: 3
+
+.. kernel-doc:: drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+ :doc: overview
+
+.. kernel-doc:: drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
+ :internal:
+
+Lifecycle
+=========
+
+.. kernel-doc:: drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+ :doc: DM Lifecycle
+
+.. kernel-doc:: drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+ :functions: dm_hw_init dm_hw_fini
+
+Interrupts
+==========
+
+.. kernel-doc:: drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c
+ :doc: overview
+
+.. kernel-doc:: drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c
+ :internal:
+
+.. kernel-doc:: drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+ :functions: register_hpd_handlers dm_crtc_high_irq dm_pflip_high_irq
+
+Atomic Implementation
+=====================
+
+.. kernel-doc:: drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+ :doc: atomic
+
+.. kernel-doc:: drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+ :functions: amdgpu_dm_atomic_check amdgpu_dm_atomic_commit_tail
diff --git a/Documentation/gpu/amdgpu/display/global_sync_vblank.svg b/Documentation/gpu/amdgpu/display/global_sync_vblank.svg
new file mode 100644
index 000000000000..48f5dc4fd5d3
--- /dev/null
+++ b/Documentation/gpu/amdgpu/display/global_sync_vblank.svg
@@ -0,0 +1,485 @@
+<?xml version="1.0" encoding="UTF-8" standalone="no"?>
+<!-- Created with Inkscape (http://www.inkscape.org/) -->
+
+<svg
+ xmlns:dc="http://purl.org/dc/elements/1.1/"
+ xmlns:cc="http://creativecommons.org/ns#"
+ xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
+ xmlns:svg="http://www.w3.org/2000/svg"
+ xmlns="http://www.w3.org/2000/svg"
+ xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
+ xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
+ width="232.24133mm"
+ height="96.174995mm"
+ viewBox="0 0 232.24133 96.174995"
+ version="1.1"
+ id="svg8"
+ inkscape:version="0.92.5 (2060ec1f9f, 2020-04-08)"
+ sodipodi:docname="global_sync_vblank.svg">
+ <defs
+ id="defs2">
+ <marker
+ inkscape:stockid="Arrow2Mend"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow2Mend"
+ style="overflow:visible"
+ inkscape:isstock="true">
+ <path
+ id="path862"
+ style="fill:#800080;fill-opacity:1;fill-rule:evenodd;stroke:#800080;stroke-width:0.625;stroke-linejoin:round;stroke-opacity:1"
+ d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+ transform="scale(-0.6)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow2Send"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow2Send"
+ style="overflow:visible"
+ inkscape:isstock="true">
+ <path
+ id="path868"
+ style="fill:#ff00ff;fill-opacity:1;fill-rule:evenodd;stroke:#ff00ff;stroke-width:0.625;stroke-linejoin:round;stroke-opacity:1"
+ d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+ transform="matrix(-0.3,0,0,-0.3,0.69,0)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow2Lend"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow2Lend"
+ style="overflow:visible"
+ inkscape:isstock="true">
+ <path
+ id="path856"
+ style="fill:#ff00ff;fill-opacity:1;fill-rule:evenodd;stroke:#ff00ff;stroke-width:0.625;stroke-linejoin:round;stroke-opacity:1"
+ d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+ transform="matrix(-1.1,0,0,-1.1,-1.1,0)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow1Lend"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="marker1719"
+ style="overflow:visible"
+ inkscape:isstock="true">
+ <path
+ id="path1717"
+ d="M 0,0 5,-5 -12.5,0 5,5 Z"
+ style="fill:#ff00ff;fill-opacity:1;fill-rule:evenodd;stroke:#ff00ff;stroke-width:1.00000003pt;stroke-opacity:1"
+ transform="matrix(-0.8,0,0,-0.8,-10,0)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow1Lend"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="marker1661"
+ style="overflow:visible"
+ inkscape:isstock="true">
+ <path
+ id="path1659"
+ d="M 0,0 5,-5 -12.5,0 5,5 Z"
+ style="fill:#ff00ff;fill-opacity:1;fill-rule:evenodd;stroke:#ff00ff;stroke-width:1.00000003pt;stroke-opacity:1"
+ transform="matrix(-0.8,0,0,-0.8,-10,0)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <marker
+ inkscape:isstock="true"
+ style="overflow:visible"
+ id="marker1311"
+ refX="0"
+ refY="0"
+ orient="auto"
+ inkscape:stockid="Arrow1Lend"
+ inkscape:collect="always">
+ <path
+ transform="matrix(-0.8,0,0,-0.8,-10,0)"
+ style="fill:#ff0000;fill-opacity:1;fill-rule:evenodd;stroke:#ff0000;stroke-width:1.00000003pt;stroke-opacity:1"
+ d="M 0,0 5,-5 -12.5,0 5,5 Z"
+ id="path1309"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <marker
+ inkscape:isstock="true"
+ style="overflow:visible"
+ id="marker1253"
+ refX="0"
+ refY="0"
+ orient="auto"
+ inkscape:stockid="Arrow1Lstart">
+ <path
+ transform="matrix(0.8,0,0,0.8,10,0)"
+ style="fill:#ff0000;fill-opacity:1;fill-rule:evenodd;stroke:#ff0000;stroke-width:1.00000003pt;stroke-opacity:1"
+ d="M 0,0 5,-5 -12.5,0 5,5 Z"
+ id="path1251"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow1Lend"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow1Lend"
+ style="overflow:visible"
+ inkscape:isstock="true"
+ inkscape:collect="always">
+ <path
+ id="path838"
+ d="M 0,0 5,-5 -12.5,0 5,5 Z"
+ style="fill:#ff0000;fill-opacity:1;fill-rule:evenodd;stroke:#ff0000;stroke-width:1.00000003pt;stroke-opacity:1"
+ transform="matrix(-0.8,0,0,-0.8,-10,0)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow1Lstart"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow1Lstart"
+ style="overflow:visible"
+ inkscape:isstock="true"
+ inkscape:collect="always">
+ <path
+ id="path835"
+ d="M 0,0 5,-5 -12.5,0 5,5 Z"
+ style="fill:#ff0000;fill-opacity:1;fill-rule:evenodd;stroke:#ff0000;stroke-width:1.00000003pt;stroke-opacity:1"
+ transform="matrix(0.8,0,0,0.8,10,0)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow1Send"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow1Send"
+ style="overflow:visible"
+ inkscape:isstock="true">
+ <path
+ id="path850"
+ d="M 0,0 5,-5 -12.5,0 5,5 Z"
+ style="fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:#000000;stroke-width:1.00000003pt;stroke-opacity:1"
+ transform="matrix(-0.2,0,0,-0.2,-1.2,0)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow2Sstart"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow2Sstart"
+ style="overflow:visible"
+ inkscape:isstock="true">
+ <path
+ id="path865"
+ style="fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:#000000;stroke-width:0.625;stroke-linejoin:round;stroke-opacity:1"
+ d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+ transform="matrix(0.3,0,0,0.3,-0.69,0)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow2Mend"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow2Mend-2"
+ style="overflow:visible"
+ inkscape:isstock="true">
+ <path
+ inkscape:connector-curvature="0"
+ id="path862-3"
+ style="fill:#800080;fill-opacity:1;fill-rule:evenodd;stroke:#800080;stroke-width:0.625;stroke-linejoin:round;stroke-opacity:1"
+ d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+ transform="scale(-0.6)" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow2Mend"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow2Mend-2-5"
+ style="overflow:visible"
+ inkscape:isstock="true">
+ <path
+ inkscape:connector-curvature="0"
+ id="path862-3-9"
+ style="fill:#800080;fill-opacity:1;fill-rule:evenodd;stroke:#800080;stroke-width:0.625;stroke-linejoin:round;stroke-opacity:1"
+ d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+ transform="scale(-0.6)" />
+ </marker>
+ </defs>
+ <sodipodi:namedview
+ id="base"
+ pagecolor="#ffffff"
+ bordercolor="#666666"
+ borderopacity="1.0"
+ inkscape:pageopacity="0.0"
+ inkscape:pageshadow="2"
+ inkscape:zoom="1.979899"
+ inkscape:cx="747.52324"
+ inkscape:cy="319.84503"
+ inkscape:document-units="mm"
+ inkscape:current-layer="layer1"
+ showgrid="true"
+ inkscape:window-width="3840"
+ inkscape:window-height="2096"
+ inkscape:window-x="0"
+ inkscape:window-y="27"
+ inkscape:window-maximized="1"
+ fit-margin-top="0"
+ fit-margin-left="0"
+ fit-margin-right="0"
+ fit-margin-bottom="0">
+ <inkscape:grid
+ type="xygrid"
+ id="grid815"
+ originx="15.282997"
+ originy="-184.54792" />
+ </sodipodi:namedview>
+ <metadata
+ id="metadata5">
+ <rdf:RDF>
+ <cc:Work
+ rdf:about="">
+ <dc:format>image/svg+xml</dc:format>
+ <dc:type
+ rdf:resource="http://purl.org/dc/dcmitype/StillImage" />
+ <dc:title />
+ </cc:Work>
+ </rdf:RDF>
+ </metadata>
+ <g
+ inkscape:label="Layer 1"
+ inkscape:groupmode="layer"
+ id="layer1"
+ transform="translate(15.282998,-16.277083)">
+ <path
+ style="fill:none;stroke:#000000;stroke-width:0.5291667;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
+ d="M 15.875,27.125001 V 16.541666 H 26.458333 V 27.125001 H 177.27084 V 16.541666 h 10.58333 v 10.583335 h 29.10416"
+ id="path817"
+ inkscape:connector-curvature="0"
+ sodipodi:nodetypes="ccccccccc" />
+ <path
+ style="fill:none;stroke:#000000;stroke-width:0.5291667;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
+ d="M 15.875,37.708334 H 44.979166 V 48.291667 H 100.54167 V 37.708334 H 206.375 v 10.583333 h 10.58333"
+ id="path819"
+ inkscape:connector-curvature="0" />
+ <path
+ style="fill:none;stroke:#000000;stroke-width:0.5291667;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
+ d="m 15.875,66.8125 h 97.89583 V 56.229167 h 7.9375 V 66.8125 h 92.60417"
+ id="path821"
+ inkscape:connector-curvature="0" />
+ <path
+ style="fill:none;stroke:#000000;stroke-width:0.5291667;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
+ d="m 15.875,85.333334 c 0,0 132.29166,0 132.29166,0 V 74.75 h 15.875 v 10.583334 h 47.625"
+ id="path823"
+ inkscape:connector-curvature="0" />
+ <path
+ style="fill:none;stroke:#000000;stroke-width:0.5291667;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
+ d="M 15.875,101.20833 H 187.85416 V 90.625 h 10.58334 v 10.58333 h 10.58333"
+ id="path825"
+ inkscape:connector-curvature="0" />
+ <path
+ style="fill:none;stroke:#000000;stroke-width:0.5291667;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:0.52916667, 0.52916667;stroke-dashoffset:0;stroke-opacity:1"
+ d="M 100.54167,48.291667 V 111.79167"
+ id="path827"
+ inkscape:connector-curvature="0" />
+ <path
+ style="fill:none;stroke:#000000;stroke-width:0.5291667;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:0.52916667, 0.52916667;stroke-dashoffset:0;stroke-opacity:1"
+ d="m 113.77083,66.8125 v 44.97917"
+ id="path829"
+ inkscape:connector-curvature="0" />
+ <path
+ style="fill:none;stroke:#000000;stroke-width:0.5291667;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:0.52916667, 0.52916667;stroke-dashoffset:0;stroke-opacity:1"
+ d="M 206.375,48.291667 V 109.14583"
+ id="path831"
+ inkscape:connector-curvature="0" />
+ <path
+ style="fill:none;stroke:#ff0000;stroke-width:0.26458332px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;marker-start:url(#Arrow1Lstart);marker-end:url(#Arrow1Lend)"
+ d="m 100.54167,106.5 h 13.22916"
+ id="path833"
+ inkscape:connector-curvature="0" />
+ <path
+ style="fill:none;stroke:#ff0000;stroke-width:0.26458332px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;marker-start:url(#marker1253);marker-end:url(#marker1311)"
+ d="M 113.77083,106.5 H 206.375"
+ id="path1243"
+ inkscape:connector-curvature="0" />
+ <text
+ xml:space="preserve"
+ style="font-style:normal;font-weight:normal;font-size:3.17499995px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.26458332"
+ x="105.83333"
+ y="111.79166"
+ id="text1405"><tspan
+ sodipodi:role="line"
+ id="tspan1403"
+ x="105.83333"
+ y="111.79166"
+ style="stroke-width:0.26458332">To</tspan></text>
+ <text
+ xml:space="preserve"
+ style="font-style:normal;font-weight:normal;font-size:3.17499995px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.26458332"
+ x="145.52083"
+ y="111.79166"
+ id="text1409"><tspan
+ sodipodi:role="line"
+ id="tspan1407"
+ x="145.52083"
+ y="111.79166"
+ style="stroke-width:0.26458332">VStartup Period</tspan></text>
+ <text
+ xml:space="preserve"
+ style="font-style:normal;font-weight:normal;font-size:3.17499995px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.26458332"
+ x="156.01123"
+ y="78.71875"
+ id="text1413"><tspan
+ sodipodi:role="line"
+ x="156.01123"
+ y="78.71875"
+ style="font-weight:bold;text-align:center;text-anchor:middle;stroke-width:0.26458332"
+ id="tspan1415">VUpdate</tspan><tspan
+ sodipodi:role="line"
+ x="156.01123"
+ y="82.6875"
+ style="font-weight:bold;text-align:center;text-anchor:middle;stroke-width:0.26458332"
+ id="tspan1440">Width</tspan></text>
+ <text
+ xml:space="preserve"
+ style="font-style:normal;font-weight:normal;font-size:3.17499995px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.26458332"
+ x="173.77611"
+ y="92.703873"
+ id="text1413-3"><tspan
+ sodipodi:role="line"
+ id="tspan1411-6"
+ x="173.77611"
+ y="92.703873"
+ style="font-weight:bold;text-align:center;text-anchor:middle;stroke-width:0.26458332">VReady</tspan><tspan
+ sodipodi:role="line"
+ x="173.77611"
+ y="96.672623"
+ style="font-weight:bold;text-align:center;text-anchor:middle;stroke-width:0.26458332"
+ id="tspan1415-7">Offset</tspan></text>
+ <text
+ xml:space="preserve"
+ style="font-style:normal;font-weight:normal;font-size:3.17499995px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.26458332"
+ x="135.78951"
+ y="70.78125"
+ id="text1413-5"><tspan
+ sodipodi:role="line"
+ x="135.78951"
+ y="70.78125"
+ style="font-weight:bold;text-align:center;text-anchor:middle;stroke-width:0.26458332"
+ id="tspan1440-5">VUpdate</tspan><tspan
+ sodipodi:role="line"
+ x="135.78951"
+ y="74.75"
+ style="font-weight:bold;text-align:center;text-anchor:middle;stroke-width:0.26458332"
+ id="tspan1465">Offset</tspan></text>
+ <text
+ xml:space="preserve"
+ style="font-style:normal;font-weight:normal;font-size:3.17499995px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.26458332"
+ x="137.39433"
+ y="48.291664"
+ id="text1479"><tspan
+ sodipodi:role="line"
+ id="tspan1477"
+ x="137.39433"
+ y="48.291664"
+ style="font-weight:bold;stroke-width:0.26458332">VSTARTUP_START</tspan></text>
+ <text
+ xml:space="preserve"
+ style="font-style:normal;font-weight:normal;font-size:3.17499995px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.26458332"
+ x="-5.4806676"
+ y="22.778271"
+ id="text1479-1"><tspan
+ sodipodi:role="line"
+ id="tspan1477-2"
+ x="-5.4806676"
+ y="22.778271"
+ style="font-weight:bold;font-size:4.93888903px;stroke-width:0.26458332">VSYNC</tspan></text>
+ <text
+ xml:space="preserve"
+ style="font-style:normal;font-weight:normal;font-size:3.17499995px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.26458332"
+ x="-9.3767252"
+ y="45.64584"
+ id="text1479-1-7"><tspan
+ sodipodi:role="line"
+ id="tspan1477-2-0"
+ x="-9.3767252"
+ y="45.64584"
+ style="font-weight:bold;font-size:5.64444447px;stroke-width:0.26458332">VBlank</tspan></text>
+ <text
+ xml:space="preserve"
+ style="font-style:normal;font-weight:normal;font-size:3.17499995px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.26458332"
+ x="-15.310558"
+ y="64.92263"
+ id="text1479-1-7-9"><tspan
+ sodipodi:role="line"
+ id="tspan1477-2-0-3"
+ x="-15.310558"
+ y="64.92263"
+ style="font-weight:bold;font-size:5.64444447px;stroke-width:0.26458332">VStartup</tspan></text>
+ <text
+ xml:space="preserve"
+ style="font-style:normal;font-weight:normal;font-size:3.17499995px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.26458332"
+ x="-14.17781"
+ y="85.144356"
+ id="text1479-1-7-9-6"><tspan
+ sodipodi:role="line"
+ id="tspan1477-2-0-3-0"
+ x="-14.17781"
+ y="85.144356"
+ style="font-weight:bold;font-size:5.64444447px;stroke-width:0.26458332">VUpdate</tspan></text>
+ <text
+ xml:space="preserve"
+ style="font-style:normal;font-weight:normal;font-size:3.17499995px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.26458332"
+ x="-11.052421"
+ y="101.39733"
+ id="text1479-1-7-9-6-6"><tspan
+ sodipodi:role="line"
+ id="tspan1477-2-0-3-0-2"
+ x="-11.052421"
+ y="101.39733"
+ style="font-weight:bold;font-size:5.64444447px;stroke-width:0.26458332">VReady</tspan></text>
+ <g
+ id="g5189"
+ transform="translate(269.875,-14.287499)">
+ <path
+ sodipodi:nodetypes="cc"
+ inkscape:connector-curvature="0"
+ id="path5143"
+ d="m -202.40625,45.645828 3.96875,-7.9375"
+ style="fill:none;stroke:#000000;stroke-width:0.5291667;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1" />
+ <path
+ sodipodi:nodetypes="cc"
+ inkscape:connector-curvature="0"
+ id="path5143-2"
+ d="m -199.76042,45.645828 3.96874,-7.937499"
+ style="fill:none;stroke:#000000;stroke-width:0.52916676;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1" />
+ </g>
+ <g
+ id="g5189-3"
+ transform="translate(268.55209,7.9375003)">
+ <path
+ sodipodi:nodetypes="cc"
+ inkscape:connector-curvature="0"
+ id="path5143-6"
+ d="m -202.40625,45.645828 3.96875,-7.9375"
+ style="fill:none;stroke:#000000;stroke-width:0.5291667;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1" />
+ <path
+ sodipodi:nodetypes="cc"
+ inkscape:connector-curvature="0"
+ id="path5143-2-1"
+ d="m -199.76042,45.645828 3.96874,-7.937499"
+ style="fill:none;stroke:#000000;stroke-width:0.52916676;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1" />
+ </g>
+ </g>
+</svg>
diff --git a/Documentation/gpu/amdgpu/display/index.rst b/Documentation/gpu/amdgpu/display/index.rst
new file mode 100644
index 000000000000..c1fb2fb3c710
--- /dev/null
+++ b/Documentation/gpu/amdgpu/display/index.rst
@@ -0,0 +1,31 @@
+.. _amdgpu-display-core:
+
+===================================
+drm/amd/display - Display Core (DC)
+===================================
+
+AMD display engine is partially shared with other operating systems; for this
+reason, our Display Core Driver is divided into two pieces:
+
+1. **Display Core (DC)** contains the OS-agnostic components. Things like
+ hardware programming and resource management are handled here.
+2. **Display Manager (DM)** contains the OS-dependent components. Hooks to the
+ amdgpu base driver and DRM are implemented here.
+
+The display pipe is responsible for "scanning out" a rendered frame from the
+GPU memory (also called VRAM, FrameBuffer, etc.) to a display. In other words,
+it would:
+
+1. Read frame information from memory;
+2. Perform required transformation;
+3. Send pixel data to sink devices.
+
+If you want to learn more about our driver details, take a look at the below
+table of content:
+
+.. toctree::
+
+ display-manager.rst
+ dc-debug.rst
+ dcn-overview.rst
+ dc-glossary.rst
diff --git a/Documentation/gpu/amdgpu/driver-core.rst b/Documentation/gpu/amdgpu/driver-core.rst
new file mode 100644
index 000000000000..ebf5932845a9
--- /dev/null
+++ b/Documentation/gpu/amdgpu/driver-core.rst
@@ -0,0 +1,182 @@
+============================
+ Core Driver Infrastructure
+============================
+
+GPU Hardware Structure
+======================
+
+Each ASIC is a collection of hardware blocks. We refer to them as
+"IPs" (Intellectual Property blocks). Each IP encapsulates certain
+functionality. IPs are versioned and can also be mixed and matched.
+E.g., you might have two different ASICs that both have System DMA (SDMA) 5.x IPs.
+The driver is arranged by IPs. There are driver components to handle
+the initialization and operation of each IP. There are also a bunch
+of smaller IPs that don't really need much if any driver interaction.
+Those end up getting lumped into the common stuff in the soc files.
+The soc files (e.g., vi.c, soc15.c nv.c) contain code for aspects of
+the SoC itself rather than specific IPs. E.g., things like GPU resets
+and register access functions are SoC dependent.
+
+An APU contains more than just CPU and GPU, it also contains all of
+the platform stuff (audio, usb, gpio, etc.). Also, a lot of
+components are shared between the CPU, platform, and the GPU (e.g.,
+SMU, PSP, etc.). Specific components (CPU, GPU, etc.) usually have
+their interface to interact with those common components. For things
+like S0i3 there is a ton of coordination required across all the
+components, but that is probably a bit beyond the scope of this
+section.
+
+With respect to the GPU, we have the following major IPs:
+
+GMC (Graphics Memory Controller)
+ This was a dedicated IP on older pre-vega chips, but has since
+ become somewhat decentralized on vega and newer chips. They now
+ have dedicated memory hubs for specific IPs or groups of IPs. We
+ still treat it as a single component in the driver however since
+ the programming model is still pretty similar. This is how the
+ different IPs on the GPU get the memory (VRAM or system memory).
+ It also provides the support for per process GPU virtual address
+ spaces.
+
+IH (Interrupt Handler)
+ This is the interrupt controller on the GPU. All of the IPs feed
+ their interrupts into this IP and it aggregates them into a set of
+ ring buffers that the driver can parse to handle interrupts from
+ different IPs.
+
+PSP (Platform Security Processor)
+ This handles security policy for the SoC and executes trusted
+ applications, and validates and loads firmwares for other blocks.
+
+SMU (System Management Unit)
+ This is the power management microcontroller. It manages the entire
+ SoC. The driver interacts with it to control power management
+ features like clocks, voltages, power rails, etc.
+
+DCN (Display Controller Next)
+ This is the display controller. It handles the display hardware.
+ It is described in more details in :ref:`Display Core <amdgpu-display-core>`.
+
+SDMA (System DMA)
+ This is a multi-purpose DMA engine. The kernel driver uses it for
+ various things including paging and GPU page table updates. It's also
+ exposed to userspace for use by user mode drivers (OpenGL, Vulkan,
+ etc.)
+
+GC (Graphics and Compute)
+ This is the graphics and compute engine, i.e., the block that
+ encompasses the 3D pipeline and and shader blocks. This is by far the
+ largest block on the GPU. The 3D pipeline has tons of sub-blocks. In
+ addition to that, it also contains the CP microcontrollers (ME, PFP,
+ CE, MEC) and the RLC microcontroller. It's exposed to userspace for
+ user mode drivers (OpenGL, Vulkan, OpenCL, etc.)
+
+VCN (Video Core Next)
+ This is the multi-media engine. It handles video and image encode and
+ decode. It's exposed to userspace for user mode drivers (VA-API,
+ OpenMAX, etc.)
+
+Graphics and Compute Microcontrollers
+-------------------------------------
+
+CP (Command Processor)
+ The name for the hardware block that encompasses the front end of the
+ GFX/Compute pipeline. Consists mainly of a bunch of microcontrollers
+ (PFP, ME, CE, MEC). The firmware that runs on these microcontrollers
+ provides the driver interface to interact with the GFX/Compute engine.
+
+ MEC (MicroEngine Compute)
+ This is the microcontroller that controls the compute queues on the
+ GFX/compute engine.
+
+ MES (MicroEngine Scheduler)
+ This is a new engine for managing queues. This is currently unused.
+
+RLC (RunList Controller)
+ This is another microcontroller in the GFX/Compute engine. It handles
+ power management related functionality within the GFX/Compute engine.
+ The name is a vestige of old hardware where it was originally added
+ and doesn't really have much relation to what the engine does now.
+
+Driver Structure
+================
+
+In general, the driver has a list of all of the IPs on a particular
+SoC and for things like init/fini/suspend/resume, more or less just
+walks the list and handles each IP.
+
+Some useful constructs:
+
+KIQ (Kernel Interface Queue)
+ This is a control queue used by the kernel driver to manage other gfx
+ and compute queues on the GFX/compute engine. You can use it to
+ map/unmap additional queues, etc.
+
+IB (Indirect Buffer)
+ A command buffer for a particular engine. Rather than writing
+ commands directly to the queue, you can write the commands into a
+ piece of memory and then put a pointer to the memory into the queue.
+ The hardware will then follow the pointer and execute the commands in
+ the memory, then returning to the rest of the commands in the ring.
+
+.. _amdgpu_memory_domains:
+
+Memory Domains
+==============
+
+.. kernel-doc:: include/uapi/drm/amdgpu_drm.h
+ :doc: memory domains
+
+Buffer Objects
+==============
+
+.. kernel-doc:: drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+ :doc: amdgpu_object
+
+.. kernel-doc:: drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+ :internal:
+
+PRIME Buffer Sharing
+====================
+
+.. kernel-doc:: drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
+ :doc: PRIME Buffer Sharing
+
+.. kernel-doc:: drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
+ :internal:
+
+MMU Notifier
+============
+
+.. kernel-doc:: drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c
+ :doc: MMU Notifier
+
+.. kernel-doc:: drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c
+ :internal:
+
+AMDGPU Virtual Memory
+=====================
+
+.. kernel-doc:: drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+ :doc: GPUVM
+
+.. kernel-doc:: drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+ :internal:
+
+Interrupt Handling
+==================
+
+.. kernel-doc:: drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
+ :doc: Interrupt Handling
+
+.. kernel-doc:: drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
+ :internal:
+
+IP Blocks
+=========
+
+.. kernel-doc:: drivers/gpu/drm/amd/include/amd_shared.h
+ :doc: IP Blocks
+
+.. kernel-doc:: drivers/gpu/drm/amd/include/amd_shared.h
+ :identifiers: amd_ip_block_type amd_ip_funcs
diff --git a/Documentation/gpu/amdgpu/driver-misc.rst b/Documentation/gpu/amdgpu/driver-misc.rst
new file mode 100644
index 000000000000..e3d6b2fa2493
--- /dev/null
+++ b/Documentation/gpu/amdgpu/driver-misc.rst
@@ -0,0 +1,112 @@
+================================
+ Misc AMDGPU driver information
+================================
+
+GPU Product Information
+=======================
+
+Information about the GPU can be obtained on certain cards
+via sysfs
+
+product_name
+------------
+
+.. kernel-doc:: drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+ :doc: product_name
+
+product_number
+--------------
+
+.. kernel-doc:: drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+ :doc: product_name
+
+serial_number
+-------------
+
+.. kernel-doc:: drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+ :doc: serial_number
+
+unique_id
+---------
+
+.. kernel-doc:: drivers/gpu/drm/amd/pm/amdgpu_pm.c
+ :doc: unique_id
+
+GPU Memory Usage Information
+============================
+
+Various memory accounting can be accessed via sysfs
+
+mem_info_vram_total
+-------------------
+
+.. kernel-doc:: drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
+ :doc: mem_info_vram_total
+
+mem_info_vram_used
+------------------
+
+.. kernel-doc:: drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
+ :doc: mem_info_vram_used
+
+mem_info_vis_vram_total
+-----------------------
+
+.. kernel-doc:: drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
+ :doc: mem_info_vis_vram_total
+
+mem_info_vis_vram_used
+----------------------
+
+.. kernel-doc:: drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
+ :doc: mem_info_vis_vram_used
+
+mem_info_gtt_total
+------------------
+
+.. kernel-doc:: drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c
+ :doc: mem_info_gtt_total
+
+mem_info_gtt_used
+-----------------
+
+.. kernel-doc:: drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c
+ :doc: mem_info_gtt_used
+
+PCIe Accounting Information
+===========================
+
+pcie_bw
+-------
+
+.. kernel-doc:: drivers/gpu/drm/amd/pm/amdgpu_pm.c
+ :doc: pcie_bw
+
+pcie_replay_count
+-----------------
+
+.. kernel-doc:: drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+ :doc: pcie_replay_count
+
+GPU SmartShift Information
+==========================
+
+GPU SmartShift information via sysfs
+
+smartshift_apu_power
+--------------------
+
+.. kernel-doc:: drivers/gpu/drm/amd/pm/amdgpu_pm.c
+ :doc: smartshift_apu_power
+
+smartshift_dgpu_power
+---------------------
+
+.. kernel-doc:: drivers/gpu/drm/amd/pm/amdgpu_pm.c
+ :doc: smartshift_dgpu_power
+
+smartshift_bias
+---------------
+
+.. kernel-doc:: drivers/gpu/drm/amd/pm/amdgpu_pm.c
+ :doc: smartshift_bias
diff --git a/Documentation/gpu/amdgpu/index.rst b/Documentation/gpu/amdgpu/index.rst
new file mode 100644
index 000000000000..a24e1cfa7407
--- /dev/null
+++ b/Documentation/gpu/amdgpu/index.rst
@@ -0,0 +1,17 @@
+==========================
+ drm/amdgpu AMDgpu driver
+==========================
+
+The drm/amdgpu driver supports all AMD Radeon GPUs based on the Graphics Core
+Next (GCN) architecture.
+
+.. toctree::
+
+ module-parameters
+ driver-core
+ display/index
+ xgmi
+ ras
+ thermal
+ driver-misc
+ amdgpu-glossary
diff --git a/Documentation/gpu/amdgpu/module-parameters.rst b/Documentation/gpu/amdgpu/module-parameters.rst
new file mode 100644
index 000000000000..ea538c8dda35
--- /dev/null
+++ b/Documentation/gpu/amdgpu/module-parameters.rst
@@ -0,0 +1,7 @@
+===================
+ Module Parameters
+===================
+
+The amdgpu driver supports the following module parameters:
+
+.. kernel-doc:: drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
diff --git a/Documentation/gpu/amdgpu/ras.rst b/Documentation/gpu/amdgpu/ras.rst
new file mode 100644
index 000000000000..047f76e395cf
--- /dev/null
+++ b/Documentation/gpu/amdgpu/ras.rst
@@ -0,0 +1,62 @@
+====================
+ AMDGPU RAS Support
+====================
+
+The AMDGPU RAS interfaces are exposed via sysfs (for informational queries) and
+debugfs (for error injection).
+
+RAS debugfs/sysfs Control and Error Injection Interfaces
+========================================================
+
+.. kernel-doc:: drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
+ :doc: AMDGPU RAS debugfs control interface
+
+RAS Reboot Behavior for Unrecoverable Errors
+============================================
+
+.. kernel-doc:: drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
+ :doc: AMDGPU RAS Reboot Behavior for Unrecoverable Errors
+
+RAS Error Count sysfs Interface
+===============================
+
+.. kernel-doc:: drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
+ :doc: AMDGPU RAS sysfs Error Count Interface
+
+RAS EEPROM debugfs Interface
+============================
+
+.. kernel-doc:: drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
+ :doc: AMDGPU RAS debugfs EEPROM table reset interface
+
+RAS VRAM Bad Pages sysfs Interface
+==================================
+
+.. kernel-doc:: drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
+ :doc: AMDGPU RAS sysfs gpu_vram_bad_pages Interface
+
+Sample Code
+===========
+Sample code for testing error injection can be found here:
+https://cgit.freedesktop.org/mesa/drm/tree/tests/amdgpu/ras_tests.c
+
+This is part of the libdrm amdgpu unit tests which cover several areas of the GPU.
+There are four sets of tests:
+
+RAS Basic Test
+
+The test verifies the RAS feature enabled status and makes sure the necessary sysfs and debugfs files
+are present.
+
+RAS Query Test
+
+This test checks the RAS availability and enablement status for each supported IP block as well as
+the error counts.
+
+RAS Inject Test
+
+This test injects errors for each IP.
+
+RAS Disable Test
+
+This test tests disabling of RAS features for each IP block.
diff --git a/Documentation/gpu/amdgpu/thermal.rst b/Documentation/gpu/amdgpu/thermal.rst
new file mode 100644
index 000000000000..8aeb0186c9ef
--- /dev/null
+++ b/Documentation/gpu/amdgpu/thermal.rst
@@ -0,0 +1,65 @@
+===========================================
+ GPU Power/Thermal Controls and Monitoring
+===========================================
+
+HWMON Interfaces
+================
+
+.. kernel-doc:: drivers/gpu/drm/amd/pm/amdgpu_pm.c
+ :doc: hwmon
+
+GPU sysfs Power State Interfaces
+================================
+
+GPU power controls are exposed via sysfs files.
+
+power_dpm_state
+---------------
+
+.. kernel-doc:: drivers/gpu/drm/amd/pm/amdgpu_pm.c
+ :doc: power_dpm_state
+
+power_dpm_force_performance_level
+---------------------------------
+
+.. kernel-doc:: drivers/gpu/drm/amd/pm/amdgpu_pm.c
+ :doc: power_dpm_force_performance_level
+
+pp_table
+--------
+
+.. kernel-doc:: drivers/gpu/drm/amd/pm/amdgpu_pm.c
+ :doc: pp_table
+
+pp_od_clk_voltage
+-----------------
+
+.. kernel-doc:: drivers/gpu/drm/amd/pm/amdgpu_pm.c
+ :doc: pp_od_clk_voltage
+
+pp_dpm_*
+--------
+
+.. kernel-doc:: drivers/gpu/drm/amd/pm/amdgpu_pm.c
+ :doc: pp_dpm_sclk pp_dpm_mclk pp_dpm_socclk pp_dpm_fclk pp_dpm_dcefclk pp_dpm_pcie
+
+pp_power_profile_mode
+---------------------
+
+.. kernel-doc:: drivers/gpu/drm/amd/pm/amdgpu_pm.c
+ :doc: pp_power_profile_mode
+
+\*_busy_percent
+---------------
+
+.. kernel-doc:: drivers/gpu/drm/amd/pm/amdgpu_pm.c
+ :doc: gpu_busy_percent
+
+.. kernel-doc:: drivers/gpu/drm/amd/pm/amdgpu_pm.c
+ :doc: mem_busy_percent
+
+gpu_metrics
+-----------
+
+.. kernel-doc:: drivers/gpu/drm/amd/pm/amdgpu_pm.c
+ :doc: gpu_metrics
diff --git a/Documentation/gpu/amdgpu/xgmi.rst b/Documentation/gpu/amdgpu/xgmi.rst
new file mode 100644
index 000000000000..23f2856f4524
--- /dev/null
+++ b/Documentation/gpu/amdgpu/xgmi.rst
@@ -0,0 +1,5 @@
+=====================
+ AMDGPU XGMI Support
+=====================
+
+.. kernel-doc:: drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c
diff --git a/Documentation/gpu/drivers.rst b/Documentation/gpu/drivers.rst
index b4a0ed3ca961..3a52f48215a3 100644
--- a/Documentation/gpu/drivers.rst
+++ b/Documentation/gpu/drivers.rst
@@ -4,8 +4,7 @@ GPU Driver Documentation
.. toctree::
- amdgpu
- amdgpu-dc
+ amdgpu/index
i915
mcde
meson
diff --git a/Documentation/gpu/todo.rst b/Documentation/gpu/todo.rst
index f504d363fef5..da138dd39883 100644
--- a/Documentation/gpu/todo.rst
+++ b/Documentation/gpu/todo.rst
@@ -646,6 +646,17 @@ See drivers/gpu/drm/amd/display/TODO for tasks.
Contact: Harry Wentland, Alex Deucher
+vmwgfx: Replace hashtable with Linux' implementation
+----------------------------------------------------
+
+The vmwgfx driver uses its own hashtable implementation. Replace the
+code with Linux' implementation and update the callers. It's mostly a
+refactoring task, but the interfaces are different.
+
+Contact: Zack Rusin, Thomas Zimmermann <tzimmermann@suse.de>
+
+Level: Intermediate
+
Bootsplash
==========
diff --git a/Documentation/i2c/smbus-protocol.rst b/Documentation/i2c/smbus-protocol.rst
index 9e07e6bbe6a3..00d8e17d0aca 100644
--- a/Documentation/i2c/smbus-protocol.rst
+++ b/Documentation/i2c/smbus-protocol.rst
@@ -36,6 +36,8 @@ Key to symbols
=============== =============================================================
S Start condition
+Sr Repeated start condition, used to switch from write to
+ read mode.
P Stop condition
Rd/Wr (1 bit) Read/Write bit. Rd equals 1, Wr equals 0.
A, NA (1 bit) Acknowledge (ACK) and Not Acknowledge (NACK) bit
@@ -100,7 +102,7 @@ Implemented by i2c_smbus_read_byte_data()
This reads a single byte from a device, from a designated register.
The register is specified through the Comm byte::
- S Addr Wr [A] Comm [A] S Addr Rd [A] [Data] NA P
+ S Addr Wr [A] Comm [A] Sr Addr Rd [A] [Data] NA P
Functionality flag: I2C_FUNC_SMBUS_READ_BYTE_DATA
@@ -114,7 +116,7 @@ This operation is very like Read Byte; again, data is read from a
device, from a designated register that is specified through the Comm
byte. But this time, the data is a complete word (16 bits)::
- S Addr Wr [A] Comm [A] S Addr Rd [A] [DataLow] A [DataHigh] NA P
+ S Addr Wr [A] Comm [A] Sr Addr Rd [A] [DataLow] A [DataHigh] NA P
Functionality flag: I2C_FUNC_SMBUS_READ_WORD_DATA
@@ -164,7 +166,7 @@ This command selects a device register (through the Comm byte), sends
16 bits of data to it, and reads 16 bits of data in return::
S Addr Wr [A] Comm [A] DataLow [A] DataHigh [A]
- S Addr Rd [A] [DataLow] A [DataHigh] NA P
+ Sr Addr Rd [A] [DataLow] A [DataHigh] NA P
Functionality flag: I2C_FUNC_SMBUS_PROC_CALL
@@ -181,7 +183,7 @@ of data is specified by the device in the Count byte.
::
S Addr Wr [A] Comm [A]
- S Addr Rd [A] [Count] A [Data] A [Data] A ... A [Data] NA P
+ Sr Addr Rd [A] [Count] A [Data] A [Data] A ... A [Data] NA P
Functionality flag: I2C_FUNC_SMBUS_READ_BLOCK_DATA
@@ -212,7 +214,7 @@ This command selects a device register (through the Comm byte), sends
1 to 31 bytes of data to it, and reads 1 to 31 bytes of data in return::
S Addr Wr [A] Comm [A] Count [A] Data [A] ...
- S Addr Rd [A] [Count] A [Data] ... A P
+ Sr Addr Rd [A] [Count] A [Data] ... A P
Functionality flag: I2C_FUNC_SMBUS_BLOCK_PROC_CALL
@@ -300,7 +302,7 @@ This command reads a block of bytes from a device, from a
designated register that is specified through the Comm byte::
S Addr Wr [A] Comm [A]
- S Addr Rd [A] [Data] A [Data] A ... A [Data] NA P
+ Sr Addr Rd [A] [Data] A [Data] A ... A [Data] NA P
Functionality flag: I2C_FUNC_SMBUS_READ_I2C_BLOCK
diff --git a/Documentation/i2c/summary.rst b/Documentation/i2c/summary.rst
index 136c4e333be7..786c618ba3be 100644
--- a/Documentation/i2c/summary.rst
+++ b/Documentation/i2c/summary.rst
@@ -11,9 +11,11 @@ systems. Some systems use variants that don't meet branding requirements,
and so are not advertised as being I2C but come under different names,
e.g. TWI (Two Wire Interface), IIC.
-The official I2C specification is the `"I2C-bus specification and user
-manual" (UM10204) <https://www.nxp.com/docs/en/user-guide/UM10204.pdf>`_
-published by NXP Semiconductors.
+The latest official I2C specification is the `"I2C-bus specification and user
+manual" (UM10204) <https://www.nxp.com/webapp/Download?colCode=UM10204>`_
+published by NXP Semiconductors. However, you need to log-in to the site to
+access the PDF. An older version of the specification (revision 6) is archived
+`here <https://web.archive.org/web/20210813122132/https://www.nxp.com/docs/en/user-guide/UM10204.pdf>`_.
SMBus (System Management Bus) is based on the I2C protocol, and is mostly
a subset of I2C protocols and signaling. Many I2C devices will work on an
diff --git a/Documentation/locking/locktypes.rst b/Documentation/locking/locktypes.rst
index ddada4a53749..4fd7b70fcde1 100644
--- a/Documentation/locking/locktypes.rst
+++ b/Documentation/locking/locktypes.rst
@@ -439,11 +439,9 @@ preemption. The following substitution works on both kernels::
spin_lock(&p->lock);
p->count += this_cpu_read(var2);
-On a non-PREEMPT_RT kernel migrate_disable() maps to preempt_disable()
-which makes the above code fully equivalent. On a PREEMPT_RT kernel
migrate_disable() ensures that the task is pinned on the current CPU which
in turn guarantees that the per-CPU access to var1 and var2 are staying on
-the same CPU.
+the same CPU while the task remains preemptible.
The migrate_disable() substitution is not valid for the following
scenario::
@@ -456,9 +454,8 @@ scenario::
p = this_cpu_ptr(&var1);
p->val = func2();
-While correct on a non-PREEMPT_RT kernel, this breaks on PREEMPT_RT because
-here migrate_disable() does not protect against reentrancy from a
-preempting task. A correct substitution for this case is::
+This breaks because migrate_disable() does not protect against reentrancy from
+a preempting task. A correct substitution for this case is::
func()
{
diff --git a/Documentation/networking/bonding.rst b/Documentation/networking/bonding.rst
index 31cfd7d674a6..c0a789b00806 100644
--- a/Documentation/networking/bonding.rst
+++ b/Documentation/networking/bonding.rst
@@ -196,11 +196,12 @@ ad_actor_sys_prio
ad_actor_system
In an AD system, this specifies the mac-address for the actor in
- protocol packet exchanges (LACPDUs). The value cannot be NULL or
- multicast. It is preferred to have the local-admin bit set for this
- mac but driver does not enforce it. If the value is not given then
- system defaults to using the masters' mac address as actors' system
- address.
+ protocol packet exchanges (LACPDUs). The value cannot be a multicast
+ address. If the all-zeroes MAC is specified, bonding will internally
+ use the MAC of the bond itself. It is preferred to have the
+ local-admin bit set for this mac but driver does not enforce it. If
+ the value is not given then system defaults to using the masters'
+ mac address as actors' system address.
This parameter has effect only in 802.3ad mode and is available through
SysFs interface.
diff --git a/Documentation/networking/device_drivers/ethernet/freescale/dpaa2/overview.rst b/Documentation/networking/device_drivers/ethernet/freescale/dpaa2/overview.rst
index d638b5a8aadd..199647729251 100644
--- a/Documentation/networking/device_drivers/ethernet/freescale/dpaa2/overview.rst
+++ b/Documentation/networking/device_drivers/ethernet/freescale/dpaa2/overview.rst
@@ -183,6 +183,7 @@ PHY and allows physical transmission and reception of Ethernet frames.
IRQ config, enable, reset
DPNI (Datapath Network Interface)
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Contains TX/RX queues, network interface configuration, and RX buffer pool
configuration mechanisms. The TX/RX queues are in memory and are identified
by queue number.
diff --git a/Documentation/networking/device_drivers/ethernet/intel/ixgbe.rst b/Documentation/networking/device_drivers/ethernet/intel/ixgbe.rst
index f1d5233e5e51..0a233b17c664 100644
--- a/Documentation/networking/device_drivers/ethernet/intel/ixgbe.rst
+++ b/Documentation/networking/device_drivers/ethernet/intel/ixgbe.rst
@@ -440,6 +440,22 @@ NOTE: For 82599-based network connections, if you are enabling jumbo frames in
a virtual function (VF), jumbo frames must first be enabled in the physical
function (PF). The VF MTU setting cannot be larger than the PF MTU.
+NBASE-T Support
+---------------
+The ixgbe driver supports NBASE-T on some devices. However, the advertisement
+of NBASE-T speeds is suppressed by default, to accommodate broken network
+switches which cannot cope with advertised NBASE-T speeds. Use the ethtool
+command to enable advertising NBASE-T speeds on devices which support it::
+
+ ethtool -s eth? advertise 0x1800000001028
+
+On Linux systems with INTERFACES(5), this can be specified as a pre-up command
+in /etc/network/interfaces so that the interface is always brought up with
+NBASE-T support, e.g.::
+
+ iface eth? inet dhcp
+ pre-up ethtool -s eth? advertise 0x1800000001028 || true
+
Generic Receive Offload, aka GRO
--------------------------------
The driver supports the in-kernel software implementation of GRO. GRO has
diff --git a/Documentation/networking/ip-sysctl.rst b/Documentation/networking/ip-sysctl.rst
index c04431144f7a..2572eecc3e86 100644
--- a/Documentation/networking/ip-sysctl.rst
+++ b/Documentation/networking/ip-sysctl.rst
@@ -25,7 +25,8 @@ ip_default_ttl - INTEGER
ip_no_pmtu_disc - INTEGER
Disable Path MTU Discovery. If enabled in mode 1 and a
fragmentation-required ICMP is received, the PMTU to this
- destination will be set to min_pmtu (see below). You will need
+ destination will be set to the smallest of the old MTU to
+ this destination and min_pmtu (see below). You will need
to raise min_pmtu to the smallest interface MTU on your system
manually if you want to avoid locally generated fragments.
@@ -49,7 +50,8 @@ ip_no_pmtu_disc - INTEGER
Default: FALSE
min_pmtu - INTEGER
- default 552 - minimum discovered Path MTU
+ default 552 - minimum Path MTU. Unless this is changed mannually,
+ each cached pmtu will never be lower than this setting.
ip_forward_use_pmtu - BOOLEAN
By default we don't trust protocol path MTUs while forwarding
diff --git a/Documentation/networking/ipvs-sysctl.rst b/Documentation/networking/ipvs-sysctl.rst
index 95ef56d62077..387fda80f05f 100644
--- a/Documentation/networking/ipvs-sysctl.rst
+++ b/Documentation/networking/ipvs-sysctl.rst
@@ -37,8 +37,7 @@ conn_reuse_mode - INTEGER
0: disable any special handling on port reuse. The new
connection will be delivered to the same real server that was
- servicing the previous connection. This will effectively
- disable expire_nodest_conn.
+ servicing the previous connection.
bit 1: enable rescheduling of new connections when it is safe.
That is, whenever expire_nodest_conn and for TCP sockets, when
diff --git a/Documentation/networking/timestamping.rst b/Documentation/networking/timestamping.rst
index a722eb30e014..f5809206eb93 100644
--- a/Documentation/networking/timestamping.rst
+++ b/Documentation/networking/timestamping.rst
@@ -486,8 +486,8 @@ of packets.
Drivers are free to use a more permissive configuration than the requested
configuration. It is expected that drivers should only implement directly the
most generic mode that can be supported. For example if the hardware can
-support HWTSTAMP_FILTER_V2_EVENT, then it should generally always upscale
-HWTSTAMP_FILTER_V2_L2_SYNC_MESSAGE, and so forth, as HWTSTAMP_FILTER_V2_EVENT
+support HWTSTAMP_FILTER_PTP_V2_EVENT, then it should generally always upscale
+HWTSTAMP_FILTER_PTP_V2_L2_SYNC, and so forth, as HWTSTAMP_FILTER_PTP_V2_EVENT
is more generic (and more useful to applications).
A driver which supports hardware time stamping shall update the struct
@@ -582,8 +582,8 @@ Time stamps for outgoing packets are to be generated as follows:
and hardware timestamping is not possible (SKBTX_IN_PROGRESS not set).
- As soon as the driver has sent the packet and/or obtained a
hardware time stamp for it, it passes the time stamp back by
- calling skb_hwtstamp_tx() with the original skb, the raw
- hardware time stamp. skb_hwtstamp_tx() clones the original skb and
+ calling skb_tstamp_tx() with the original skb, the raw
+ hardware time stamp. skb_tstamp_tx() clones the original skb and
adds the timestamps, therefore the original skb has to be freed now.
If obtaining the hardware time stamp somehow fails, then the driver
should not fall back to software time stamping. The rationale is that
diff --git a/Documentation/process/changes.rst b/Documentation/process/changes.rst
index b398b8576417..cf908d79666e 100644
--- a/Documentation/process/changes.rst
+++ b/Documentation/process/changes.rst
@@ -35,6 +35,7 @@ GNU make 3.81 make --version
binutils 2.23 ld -v
flex 2.5.35 flex --version
bison 2.0 bison --version
+pahole 1.16 pahole --version
util-linux 2.10o fdformat --version
kmod 13 depmod -V
e2fsprogs 1.41.4 e2fsck -V
@@ -108,6 +109,16 @@ Bison
Since Linux 4.16, the build system generates parsers
during build. This requires bison 2.0 or later.
+pahole:
+-------
+
+Since Linux 5.2, if CONFIG_DEBUG_INFO_BTF is selected, the build system
+generates BTF (BPF Type Format) from DWARF in vmlinux, a bit later from kernel
+modules as well. This requires pahole v1.16 or later.
+
+It is found in the 'dwarves' or 'pahole' distro packages or from
+https://fedorapeople.org/~acme/dwarves/.
+
Perl
----
diff --git a/Documentation/process/submitting-patches.rst b/Documentation/process/submitting-patches.rst
index da085d63af9b..6b3aaed66fba 100644
--- a/Documentation/process/submitting-patches.rst
+++ b/Documentation/process/submitting-patches.rst
@@ -14,7 +14,8 @@ works, see Documentation/process/development-process.rst. Also, read
Documentation/process/submit-checklist.rst
for a list of items to check before submitting code. If you are submitting
a driver, also read Documentation/process/submitting-drivers.rst; for device
-tree binding patches, read Documentation/process/submitting-patches.rst.
+tree binding patches, read
+Documentation/devicetree/bindings/submitting-patches.rst.
This documentation assumes that you're using ``git`` to prepare your patches.
If you're unfamiliar with ``git``, you would be well-advised to learn how to
diff --git a/Documentation/sound/hd-audio/models.rst b/Documentation/sound/hd-audio/models.rst
index 0ea967d34583..d25335993e55 100644
--- a/Documentation/sound/hd-audio/models.rst
+++ b/Documentation/sound/hd-audio/models.rst
@@ -326,6 +326,8 @@ usi-headset
Headset support on USI machines
dual-codecs
Lenovo laptops with dual codecs
+alc285-hp-amp-init
+ HP laptops which require speaker amplifier initialization (ALC285)
ALC680
======
diff --git a/MAINTAINERS b/MAINTAINERS
index 53b859d10de6..71cc6a55903e 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -966,6 +966,7 @@ F: drivers/gpu/drm/amd/include/kgd_kfd_interface.h
F: drivers/gpu/drm/amd/include/v9_structs.h
F: drivers/gpu/drm/amd/include/vi_structs.h
F: include/uapi/linux/kfd_ioctl.h
+F: include/uapi/linux/kfd_sysfs.h
AMD SPI DRIVER
M: Sanjay R Mehta <sanju.mehta@amd.com>
@@ -2263,6 +2264,15 @@ L: linux-iio@vger.kernel.org
S: Maintained
F: drivers/counter/microchip-tcb-capture.c
+ARM/MILBEAUT ARCHITECTURE
+M: Taichi Sugaya <sugaya.taichi@socionext.com>
+M: Takao Orito <orito.takao@socionext.com>
+L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
+S: Maintained
+F: arch/arm/boot/dts/milbeaut*
+F: arch/arm/mach-milbeaut/
+N: milbeaut
+
ARM/MIOA701 MACHINE SUPPORT
M: Robert Jarzmik <robert.jarzmik@free.fr>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
@@ -2729,10 +2739,11 @@ S: Maintained
F: drivers/memory/*emif*
ARM/TEXAS INSTRUMENT KEYSTONE ARCHITECTURE
+M: Nishanth Menon <nm@ti.com>
M: Santosh Shilimkar <ssantosh@kernel.org>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Maintained
-T: git git://git.kernel.org/pub/scm/linux/kernel/git/ssantosh/linux-keystone.git
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/ti/linux.git
F: arch/arm/boot/dts/keystone-*
F: arch/arm/mach-keystone/
@@ -3056,7 +3067,7 @@ F: Documentation/devicetree/bindings/phy/phy-ath79-usb.txt
F: drivers/phy/qualcomm/phy-ath79-usb.c
ATHEROS ATH GENERIC UTILITIES
-M: Kalle Valo <kvalo@codeaurora.org>
+M: Kalle Valo <kvalo@kernel.org>
L: linux-wireless@vger.kernel.org
S: Supported
F: drivers/net/wireless/ath/*
@@ -3071,7 +3082,7 @@ W: https://wireless.wiki.kernel.org/en/users/Drivers/ath5k
F: drivers/net/wireless/ath/ath5k/
ATHEROS ATH6KL WIRELESS DRIVER
-M: Kalle Valo <kvalo@codeaurora.org>
+M: Kalle Valo <kvalo@kernel.org>
L: linux-wireless@vger.kernel.org
S: Supported
W: https://wireless.wiki.kernel.org/en/users/Drivers/ath6kl
@@ -3570,13 +3581,14 @@ L: netdev@vger.kernel.org
S: Supported
F: drivers/net/ethernet/broadcom/b44.*
-BROADCOM B53 ETHERNET SWITCH DRIVER
+BROADCOM B53/SF2 ETHERNET SWITCH DRIVER
M: Florian Fainelli <f.fainelli@gmail.com>
L: netdev@vger.kernel.org
L: openwrt-devel@lists.openwrt.org (subscribers-only)
S: Supported
F: Documentation/devicetree/bindings/net/dsa/brcm,b53.yaml
F: drivers/net/dsa/b53/*
+F: drivers/net/dsa/bcm_sf2*
F: include/linux/dsa/brcm.h
F: include/linux/platform_data/b53.h
@@ -6038,6 +6050,7 @@ F: drivers/gpu/drm/tiny/mi0283qt.c
DRM DRIVER FOR MSM ADRENO GPU
M: Rob Clark <robdclark@gmail.com>
M: Sean Paul <sean@poorly.run>
+R: Abhinav Kumar <quic_abhinavk@quicinc.com>
L: linux-arm-msm@vger.kernel.org
L: dri-devel@lists.freedesktop.org
L: freedreno@lists.freedesktop.org
@@ -6413,6 +6426,7 @@ L: dri-devel@lists.freedesktop.org
L: linux-renesas-soc@vger.kernel.org
S: Supported
T: git git://linuxtv.org/pinchartl/media drm/du/next
+F: Documentation/devicetree/bindings/display/bridge/renesas,dsi-csi2-tx.yaml
F: Documentation/devicetree/bindings/display/bridge/renesas,dw-hdmi.yaml
F: Documentation/devicetree/bindings/display/bridge/renesas,lvds.yaml
F: Documentation/devicetree/bindings/display/renesas,du.yaml
@@ -9333,7 +9347,6 @@ S: Maintained
F: drivers/iio/pressure/dps310.c
INFINIBAND SUBSYSTEM
-M: Doug Ledford <dledford@redhat.com>
M: Jason Gunthorpe <jgg@nvidia.com>
L: linux-rdma@vger.kernel.org
S: Supported
@@ -9494,6 +9507,7 @@ INTEL DRM DRIVERS (excluding Poulsbo, Moorestown and derivative chipsets)
M: Jani Nikula <jani.nikula@linux.intel.com>
M: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
M: Rodrigo Vivi <rodrigo.vivi@intel.com>
+M: Tvrtko Ursulin <tvrtko.ursulin@linux.intel.com>
L: intel-gfx@lists.freedesktop.org
S: Supported
W: https://01.org/linuxgraphics/
@@ -10284,9 +10298,9 @@ F: lib/Kconfig.kcsan
F: scripts/Makefile.kcsan
KDUMP
-M: Dave Young <dyoung@redhat.com>
M: Baoquan He <bhe@redhat.com>
R: Vivek Goyal <vgoyal@redhat.com>
+R: Dave Young <dyoung@redhat.com>
L: kexec@lists.infradead.org
S: Maintained
W: http://lse.sourceforge.net/kdump/
@@ -12184,8 +12198,8 @@ F: drivers/net/ethernet/mellanox/mlx5/core/fpga/*
F: include/linux/mlx5/mlx5_ifc_fpga.h
MELLANOX ETHERNET SWITCH DRIVERS
-M: Jiri Pirko <jiri@nvidia.com>
M: Ido Schimmel <idosch@nvidia.com>
+M: Petr Machata <petrm@nvidia.com>
L: netdev@vger.kernel.org
S: Supported
W: http://www.mellanox.com
@@ -13253,7 +13267,7 @@ F: include/uapi/linux/if_*
F: include/uapi/linux/netdevice.h
NETWORKING DRIVERS (WIRELESS)
-M: Kalle Valo <kvalo@codeaurora.org>
+M: Kalle Valo <kvalo@kernel.org>
L: linux-wireless@vger.kernel.org
S: Maintained
Q: http://patchwork.kernel.org/project/linux-wireless/list/
@@ -14850,7 +14864,7 @@ PCIE DRIVER FOR MEDIATEK
M: Ryder Lee <ryder.lee@mediatek.com>
M: Jianjun Wang <jianjun.wang@mediatek.com>
L: linux-pci@vger.kernel.org
-L: linux-mediatek@lists.infradead.org
+L: linux-mediatek@lists.infradead.org (moderated for non-subscribers)
S: Supported
F: Documentation/devicetree/bindings/pci/mediatek*
F: drivers/pci/controller/*mediatek*
@@ -15709,7 +15723,7 @@ T: git git://linuxtv.org/anttip/media_tree.git
F: drivers/media/tuners/qt1010*
QUALCOMM ATHEROS ATH10K WIRELESS DRIVER
-M: Kalle Valo <kvalo@codeaurora.org>
+M: Kalle Valo <kvalo@kernel.org>
L: ath10k@lists.infradead.org
S: Supported
W: https://wireless.wiki.kernel.org/en/users/Drivers/ath10k
@@ -15717,7 +15731,7 @@ T: git git://git.kernel.org/pub/scm/linux/kernel/git/kvalo/ath.git
F: drivers/net/wireless/ath/ath10k/
QUALCOMM ATHEROS ATH11K WIRELESS DRIVER
-M: Kalle Valo <kvalo@codeaurora.org>
+M: Kalle Valo <kvalo@kernel.org>
L: ath11k@lists.infradead.org
S: Supported
T: git git://git.kernel.org/pub/scm/linux/kernel/git/kvalo/ath.git
@@ -15775,6 +15789,15 @@ S: Maintained
F: Documentation/devicetree/bindings/net/qcom,ethqos.txt
F: drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c
+QUALCOMM FASTRPC DRIVER
+M: Srinivas Kandagatla <srinivas.kandagatla@linaro.org>
+M: Amol Maheshwari <amahesh@qti.qualcomm.com>
+L: linux-arm-msm@vger.kernel.org
+S: Maintained
+F: Documentation/devicetree/bindings/misc/qcom,fastrpc.txt
+F: drivers/misc/fastrpc.c
+F: include/uapi/misc/fastrpc.h
+
QUALCOMM GENERIC INTERFACE I2C DRIVER
M: Akash Asthana <akashast@codeaurora.org>
M: Mukesh Savaliya <msavaliy@codeaurora.org>
@@ -15881,7 +15904,7 @@ F: Documentation/devicetree/bindings/media/*venus*
F: drivers/media/platform/qcom/venus/
QUALCOMM WCN36XX WIRELESS DRIVER
-M: Kalle Valo <kvalo@codeaurora.org>
+M: Kalle Valo <kvalo@kernel.org>
L: wcn36xx@lists.infradead.org
S: Supported
W: https://wireless.wiki.kernel.org/en/users/Drivers/wcn36xx
@@ -15983,6 +16006,7 @@ F: arch/mips/generic/board-ranchu.c
RANDOM NUMBER DRIVER
M: "Theodore Ts'o" <tytso@mit.edu>
+M: Jason A. Donenfeld <Jason@zx2c4.com>
S: Maintained
F: drivers/char/random.c
@@ -16505,6 +16529,12 @@ T: git git://linuxtv.org/media_tree.git
F: Documentation/devicetree/bindings/media/allwinner,sun8i-a83t-de2-rotate.yaml
F: drivers/media/platform/sunxi/sun8i-rotate/
+RPMSG TTY DRIVER
+M: Arnaud Pouliquen <arnaud.pouliquen@foss.st.com>
+L: linux-remoteproc@vger.kernel.org
+S: Maintained
+F: drivers/tty/rpmsg_tty.c
+
RTL2830 MEDIA DRIVER
M: Antti Palosaari <crope@iki.fi>
L: linux-media@vger.kernel.org
@@ -16626,8 +16656,8 @@ W: http://www.ibm.com/developerworks/linux/linux390/
F: drivers/iommu/s390-iommu.c
S390 IUCV NETWORK LAYER
-M: Julian Wiedmann <jwi@linux.ibm.com>
-M: Karsten Graul <kgraul@linux.ibm.com>
+M: Alexandra Winter <wintera@linux.ibm.com>
+M: Wenjia Zhang <wenjia@linux.ibm.com>
L: linux-s390@vger.kernel.org
L: netdev@vger.kernel.org
S: Supported
@@ -16637,8 +16667,8 @@ F: include/net/iucv/
F: net/iucv/
S390 NETWORK DRIVERS
-M: Julian Wiedmann <jwi@linux.ibm.com>
-M: Karsten Graul <kgraul@linux.ibm.com>
+M: Alexandra Winter <wintera@linux.ibm.com>
+M: Wenjia Zhang <wenjia@linux.ibm.com>
L: linux-s390@vger.kernel.org
L: netdev@vger.kernel.org
S: Supported
@@ -17412,7 +17442,7 @@ F: drivers/video/fbdev/sm712*
SILVACO I3C DUAL-ROLE MASTER
M: Miquel Raynal <miquel.raynal@bootlin.com>
M: Conor Culhane <conor.culhane@silvaco.com>
-L: linux-i3c@lists.infradead.org
+L: linux-i3c@lists.infradead.org (moderated for non-subscribers)
S: Maintained
F: Documentation/devicetree/bindings/i3c/silvaco,i3c-master.yaml
F: drivers/i3c/master/svc-i3c-master.c
@@ -18498,6 +18528,7 @@ F: include/uapi/linux/pkt_sched.h
F: include/uapi/linux/tc_act/
F: include/uapi/linux/tc_ematch/
F: net/sched/
+F: tools/testing/selftests/tc-testing
TC90522 MEDIA DRIVER
M: Akihiro Tsukada <tskd08@gmail.com>
@@ -19046,11 +19077,12 @@ F: drivers/mmc/host/tifm_sd.c
F: include/linux/tifm.h
TI KEYSTONE MULTICORE NAVIGATOR DRIVERS
+M: Nishanth Menon <nm@ti.com>
M: Santosh Shilimkar <ssantosh@kernel.org>
L: linux-kernel@vger.kernel.org
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Maintained
-T: git git://git.kernel.org/pub/scm/linux/kernel/git/ssantosh/linux-keystone.git
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/ti/linux.git
F: drivers/soc/ti/*
TI LM49xxx FAMILY ASoC CODEC DRIVERS
@@ -21048,7 +21080,7 @@ S: Maintained
F: arch/x86/kernel/cpu/zhaoxin.c
ZONEFS FILESYSTEM
-M: Damien Le Moal <damien.lemoal@wdc.com>
+M: Damien Le Moal <damien.lemoal@opensource.wdc.com>
M: Naohiro Aota <naohiro.aota@wdc.com>
R: Johannes Thumshirn <jth@kernel.org>
L: linux-fsdevel@vger.kernel.org
diff --git a/Makefile b/Makefile
index daf95a574b08..16d7f83ac368 100644
--- a/Makefile
+++ b/Makefile
@@ -2,8 +2,8 @@
VERSION = 5
PATCHLEVEL = 16
SUBLEVEL = 0
-EXTRAVERSION = -rc2
-NAME = Trick or Treat
+EXTRAVERSION = -rc8
+NAME = Gobble Gobble
# *DOCUMENTATION*
# To see a list of typical targets execute "make help"
@@ -789,7 +789,7 @@ stackp-flags-$(CONFIG_STACKPROTECTOR_STRONG) := -fstack-protector-strong
KBUILD_CFLAGS += $(stackp-flags-y)
KBUILD_CFLAGS-$(CONFIG_WERROR) += -Werror
-KBUILD_CFLAGS += $(KBUILD_CFLAGS-y) $(CONFIG_CC_IMPLICIT_FALLTHROUGH)
+KBUILD_CFLAGS += $(KBUILD_CFLAGS-y) $(CONFIG_CC_IMPLICIT_FALLTHROUGH:"%"=%)
ifdef CONFIG_CC_IS_CLANG
KBUILD_CPPFLAGS += -Qunused-arguments
@@ -1374,17 +1374,17 @@ endif
ifneq ($(dtstree),)
-%.dtb: dt_binding_check include/config/kernel.release scripts_dtc
- $(Q)$(MAKE) $(build)=$(dtstree) $(dtstree)/$@ $(dtstree)/$*.dt.yaml
+%.dtb: include/config/kernel.release scripts_dtc
+ $(Q)$(MAKE) $(build)=$(dtstree) $(dtstree)/$@
-%.dtbo: dt_binding_check include/config/kernel.release scripts_dtc
- $(Q)$(MAKE) $(build)=$(dtstree) $(dtstree)/$@ $(dtstree)/$*.dt.yaml
+%.dtbo: include/config/kernel.release scripts_dtc
+ $(Q)$(MAKE) $(build)=$(dtstree) $(dtstree)/$@
PHONY += dtbs dtbs_install dtbs_check
dtbs: include/config/kernel.release scripts_dtc
$(Q)$(MAKE) $(build)=$(dtstree)
-ifneq ($(filter dtbs_check %.dtb %.dtbo, $(MAKECMDGOALS)),)
+ifneq ($(filter dtbs_check, $(MAKECMDGOALS)),)
export CHECK_DTBS=y
dtbs: dt_binding_check
endif
diff --git a/arch/Kconfig b/arch/Kconfig
index 26b8ed11639d..d3c4ab249e9c 100644
--- a/arch/Kconfig
+++ b/arch/Kconfig
@@ -991,6 +991,16 @@ config HAVE_ARCH_COMPAT_MMAP_BASES
and vice-versa 32-bit applications to call 64-bit mmap().
Required for applications doing different bitness syscalls.
+config PAGE_SIZE_LESS_THAN_64KB
+ def_bool y
+ depends on !ARM64_64K_PAGES
+ depends on !IA64_PAGE_SIZE_64KB
+ depends on !PAGE_SIZE_64KB
+ depends on !PARISC_PAGE_SIZE_64KB
+ depends on !PPC_64K_PAGES
+ depends on !PPC_256K_PAGES
+ depends on !PAGE_SIZE_256KB
+
# This allows to use a set of generic functions to determine mmap base
# address by giving priority to top-down scheme only if the process
# is not in legacy mode (compat task, unlimited stack size or
diff --git a/arch/alpha/kernel/syscalls/syscall.tbl b/arch/alpha/kernel/syscalls/syscall.tbl
index e4a041cd5715..ca5a32228cd6 100644
--- a/arch/alpha/kernel/syscalls/syscall.tbl
+++ b/arch/alpha/kernel/syscalls/syscall.tbl
@@ -488,3 +488,4 @@
556 common landlock_restrict_self sys_landlock_restrict_self
# 557 reserved for memfd_secret
558 common process_mrelease sys_process_mrelease
+559 common futex_waitv sys_futex_waitv
diff --git a/arch/arc/include/asm/cacheflush.h b/arch/arc/include/asm/cacheflush.h
index e8c2c7469e10..e201b4b1655a 100644
--- a/arch/arc/include/asm/cacheflush.h
+++ b/arch/arc/include/asm/cacheflush.h
@@ -36,7 +36,6 @@ void __flush_dcache_page(phys_addr_t paddr, unsigned long vaddr);
#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
void flush_dcache_page(struct page *page);
-void flush_dcache_folio(struct folio *folio);
void dma_cache_wback_inv(phys_addr_t start, unsigned long sz);
void dma_cache_inv(phys_addr_t start, unsigned long sz);
diff --git a/arch/arm/boot/dts/bcm2711.dtsi b/arch/arm/boot/dts/bcm2711.dtsi
index 3b60297af7f6..9e01dbca4a01 100644
--- a/arch/arm/boot/dts/bcm2711.dtsi
+++ b/arch/arm/boot/dts/bcm2711.dtsi
@@ -506,11 +506,17 @@
#address-cells = <3>;
#interrupt-cells = <1>;
#size-cells = <2>;
- interrupts = <GIC_SPI 148 IRQ_TYPE_LEVEL_HIGH>,
+ interrupts = <GIC_SPI 147 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 148 IRQ_TYPE_LEVEL_HIGH>;
interrupt-names = "pcie", "msi";
interrupt-map-mask = <0x0 0x0 0x0 0x7>;
interrupt-map = <0 0 0 1 &gicv2 GIC_SPI 143
+ IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 0 2 &gicv2 GIC_SPI 144
+ IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 0 3 &gicv2 GIC_SPI 145
+ IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 0 4 &gicv2 GIC_SPI 146
IRQ_TYPE_LEVEL_HIGH>;
msi-controller;
msi-parent = <&pcie0>;
diff --git a/arch/arm/boot/dts/bcm5301x.dtsi b/arch/arm/boot/dts/bcm5301x.dtsi
index d4f355015e3c..f69d2af3c1fa 100644
--- a/arch/arm/boot/dts/bcm5301x.dtsi
+++ b/arch/arm/boot/dts/bcm5301x.dtsi
@@ -242,6 +242,8 @@
gpio-controller;
#gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
};
pcie0: pcie@12000 {
@@ -408,7 +410,7 @@
i2c0: i2c@18009000 {
compatible = "brcm,iproc-i2c";
reg = <0x18009000 0x50>;
- interrupts = <GIC_SPI 121 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts = <GIC_SPI 89 IRQ_TYPE_LEVEL_HIGH>;
#address-cells = <1>;
#size-cells = <0>;
clock-frequency = <100000>;
diff --git a/arch/arm/boot/dts/imx6qdl-wandboard.dtsi b/arch/arm/boot/dts/imx6qdl-wandboard.dtsi
index b62a0dbb033f..ec6fba5ee8fd 100644
--- a/arch/arm/boot/dts/imx6qdl-wandboard.dtsi
+++ b/arch/arm/boot/dts/imx6qdl-wandboard.dtsi
@@ -309,6 +309,7 @@
ethphy: ethernet-phy@1 {
reg = <1>;
+ qca,clk-out-frequency = <125000000>;
};
};
};
diff --git a/arch/arm/boot/dts/imx6qp-prtwd3.dts b/arch/arm/boot/dts/imx6qp-prtwd3.dts
index 7648e8a02000..cf6571cc4682 100644
--- a/arch/arm/boot/dts/imx6qp-prtwd3.dts
+++ b/arch/arm/boot/dts/imx6qp-prtwd3.dts
@@ -178,6 +178,8 @@
label = "cpu";
ethernet = <&fec>;
phy-mode = "rgmii-id";
+ rx-internal-delay-ps = <2000>;
+ tx-internal-delay-ps = <2000>;
fixed-link {
speed = <100>;
diff --git a/arch/arm/boot/dts/imx6ull-pinfunc.h b/arch/arm/boot/dts/imx6ull-pinfunc.h
index eb025a9d4759..7328d4ef8559 100644
--- a/arch/arm/boot/dts/imx6ull-pinfunc.h
+++ b/arch/arm/boot/dts/imx6ull-pinfunc.h
@@ -82,6 +82,6 @@
#define MX6ULL_PAD_CSI_DATA04__ESAI_TX_FS 0x01F4 0x0480 0x0000 0x9 0x0
#define MX6ULL_PAD_CSI_DATA05__ESAI_TX_CLK 0x01F8 0x0484 0x0000 0x9 0x0
#define MX6ULL_PAD_CSI_DATA06__ESAI_TX5_RX0 0x01FC 0x0488 0x0000 0x9 0x0
-#define MX6ULL_PAD_CSI_DATA07__ESAI_T0 0x0200 0x048C 0x0000 0x9 0x0
+#define MX6ULL_PAD_CSI_DATA07__ESAI_TX0 0x0200 0x048C 0x0000 0x9 0x0
#endif /* __DTS_IMX6ULL_PINFUNC_H */
diff --git a/arch/arm/boot/dts/ls1021a-tsn.dts b/arch/arm/boot/dts/ls1021a-tsn.dts
index ff0ffb22768b..1ea32fff4120 100644
--- a/arch/arm/boot/dts/ls1021a-tsn.dts
+++ b/arch/arm/boot/dts/ls1021a-tsn.dts
@@ -91,6 +91,8 @@
/* Internal port connected to eth2 */
ethernet = <&enet2>;
phy-mode = "rgmii";
+ rx-internal-delay-ps = <0>;
+ tx-internal-delay-ps = <0>;
reg = <4>;
fixed-link {
diff --git a/arch/arm/boot/dts/socfpga_arria10_socdk_qspi.dts b/arch/arm/boot/dts/socfpga_arria10_socdk_qspi.dts
index 2b645642b935..2a745522404d 100644
--- a/arch/arm/boot/dts/socfpga_arria10_socdk_qspi.dts
+++ b/arch/arm/boot/dts/socfpga_arria10_socdk_qspi.dts
@@ -12,7 +12,7 @@
flash0: n25q00@0 {
#address-cells = <1>;
#size-cells = <1>;
- compatible = "n25q00aa";
+ compatible = "micron,mt25qu02g", "jedec,spi-nor";
reg = <0>;
spi-max-frequency = <100000000>;
diff --git a/arch/arm/boot/dts/socfpga_arria5_socdk.dts b/arch/arm/boot/dts/socfpga_arria5_socdk.dts
index 90e676e7019f..1b02d46496a8 100644
--- a/arch/arm/boot/dts/socfpga_arria5_socdk.dts
+++ b/arch/arm/boot/dts/socfpga_arria5_socdk.dts
@@ -119,7 +119,7 @@
flash: flash@0 {
#address-cells = <1>;
#size-cells = <1>;
- compatible = "n25q256a";
+ compatible = "micron,n25q256a", "jedec,spi-nor";
reg = <0>;
spi-max-frequency = <100000000>;
diff --git a/arch/arm/boot/dts/socfpga_cyclone5_socdk.dts b/arch/arm/boot/dts/socfpga_cyclone5_socdk.dts
index 6f138b2b2616..51bb436784e2 100644
--- a/arch/arm/boot/dts/socfpga_cyclone5_socdk.dts
+++ b/arch/arm/boot/dts/socfpga_cyclone5_socdk.dts
@@ -124,7 +124,7 @@
flash0: n25q00@0 {
#address-cells = <1>;
#size-cells = <1>;
- compatible = "n25q00";
+ compatible = "micron,mt25qu02g", "jedec,spi-nor";
reg = <0>; /* chip select */
spi-max-frequency = <100000000>;
diff --git a/arch/arm/boot/dts/socfpga_cyclone5_sockit.dts b/arch/arm/boot/dts/socfpga_cyclone5_sockit.dts
index c155ff02eb6e..cae9ddd5ed38 100644
--- a/arch/arm/boot/dts/socfpga_cyclone5_sockit.dts
+++ b/arch/arm/boot/dts/socfpga_cyclone5_sockit.dts
@@ -169,7 +169,7 @@
flash: flash@0 {
#address-cells = <1>;
#size-cells = <1>;
- compatible = "n25q00";
+ compatible = "micron,mt25qu02g", "jedec,spi-nor";
reg = <0>;
spi-max-frequency = <100000000>;
diff --git a/arch/arm/boot/dts/socfpga_cyclone5_socrates.dts b/arch/arm/boot/dts/socfpga_cyclone5_socrates.dts
index 8d5d3996f6f2..ca18b959e655 100644
--- a/arch/arm/boot/dts/socfpga_cyclone5_socrates.dts
+++ b/arch/arm/boot/dts/socfpga_cyclone5_socrates.dts
@@ -80,7 +80,7 @@
flash: flash@0 {
#address-cells = <1>;
#size-cells = <1>;
- compatible = "n25q256a";
+ compatible = "micron,n25q256a", "jedec,spi-nor";
reg = <0>;
spi-max-frequency = <100000000>;
m25p,fast-read;
diff --git a/arch/arm/boot/dts/socfpga_cyclone5_sodia.dts b/arch/arm/boot/dts/socfpga_cyclone5_sodia.dts
index 99a71757cdf4..3f7aa7bf0863 100644
--- a/arch/arm/boot/dts/socfpga_cyclone5_sodia.dts
+++ b/arch/arm/boot/dts/socfpga_cyclone5_sodia.dts
@@ -116,7 +116,7 @@
flash0: n25q512a@0 {
#address-cells = <1>;
#size-cells = <1>;
- compatible = "n25q512a";
+ compatible = "micron,n25q512a", "jedec,spi-nor";
reg = <0>;
spi-max-frequency = <100000000>;
diff --git a/arch/arm/boot/dts/socfpga_cyclone5_vining_fpga.dts b/arch/arm/boot/dts/socfpga_cyclone5_vining_fpga.dts
index a060718758b6..25874e1b9c82 100644
--- a/arch/arm/boot/dts/socfpga_cyclone5_vining_fpga.dts
+++ b/arch/arm/boot/dts/socfpga_cyclone5_vining_fpga.dts
@@ -224,7 +224,7 @@
n25q128@0 {
#address-cells = <1>;
#size-cells = <1>;
- compatible = "n25q128";
+ compatible = "micron,n25q128", "jedec,spi-nor";
reg = <0>; /* chip select */
spi-max-frequency = <100000000>;
m25p,fast-read;
@@ -241,7 +241,7 @@
n25q00@1 {
#address-cells = <1>;
#size-cells = <1>;
- compatible = "n25q00";
+ compatible = "micron,mt25qu02g", "jedec,spi-nor";
reg = <1>; /* chip select */
spi-max-frequency = <100000000>;
m25p,fast-read;
diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h
index e68fb879e4f9..5e56288e343b 100644
--- a/arch/arm/include/asm/cacheflush.h
+++ b/arch/arm/include/asm/cacheflush.h
@@ -290,7 +290,6 @@ extern void flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr
*/
#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
extern void flush_dcache_page(struct page *);
-void flush_dcache_folio(struct folio *folio);
#define ARCH_IMPLEMENTS_FLUSH_KERNEL_VMAP_RANGE 1
static inline void flush_kernel_vmap_range(void *addr, int size)
diff --git a/arch/arm/include/asm/efi.h b/arch/arm/include/asm/efi.h
index a6f3b179e8a9..27218eabbf9a 100644
--- a/arch/arm/include/asm/efi.h
+++ b/arch/arm/include/asm/efi.h
@@ -17,7 +17,6 @@
#ifdef CONFIG_EFI
void efi_init(void);
-extern void efifb_setup_from_dmi(struct screen_info *si, const char *opt);
int efi_create_mapping(struct mm_struct *mm, efi_memory_desc_t *md);
int efi_set_mapping_permissions(struct mm_struct *mm, efi_memory_desc_t *md);
diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S
index deff286eb5ea..5cd057859fe9 100644
--- a/arch/arm/kernel/entry-armv.S
+++ b/arch/arm/kernel/entry-armv.S
@@ -596,11 +596,9 @@ call_fpe:
tstne r0, #0x04000000 @ bit 26 set on both ARM and Thumb-2
reteq lr
and r8, r0, #0x00000f00 @ mask out CP number
- THUMB( lsr r8, r8, #8 )
mov r7, #1
- add r6, r10, #TI_USED_CP
- ARM( strb r7, [r6, r8, lsr #8] ) @ set appropriate used_cp[]
- THUMB( strb r7, [r6, r8] ) @ set appropriate used_cp[]
+ add r6, r10, r8, lsr #8 @ add used_cp[] array offset first
+ strb r7, [r6, #TI_USED_CP] @ set appropriate used_cp[]
#ifdef CONFIG_IWMMXT
@ Test if we need to give access to iWMMXt coprocessors
ldr r5, [r10, #TI_FLAGS]
@@ -609,7 +607,7 @@ call_fpe:
bcs iwmmxt_task_enable
#endif
ARM( add pc, pc, r8, lsr #6 )
- THUMB( lsl r8, r8, #2 )
+ THUMB( lsr r8, r8, #6 )
THUMB( add pc, r8 )
nop
diff --git a/arch/arm/kernel/head-nommu.S b/arch/arm/kernel/head-nommu.S
index fadfee9e2b45..950bef83339f 100644
--- a/arch/arm/kernel/head-nommu.S
+++ b/arch/arm/kernel/head-nommu.S
@@ -114,6 +114,7 @@ ENTRY(secondary_startup)
add r12, r12, r10
ret r12
1: bl __after_proc_init
+ ldr r7, __secondary_data @ reload r7
ldr sp, [r7, #12] @ set up the stack pointer
ldr r0, [r7, #16] @ set up task pointer
mov fp, #0
diff --git a/arch/arm/mach-rockchip/platsmp.c b/arch/arm/mach-rockchip/platsmp.c
index d60856898d97..5ec58d004b7d 100644
--- a/arch/arm/mach-rockchip/platsmp.c
+++ b/arch/arm/mach-rockchip/platsmp.c
@@ -189,7 +189,7 @@ static int __init rockchip_smp_prepare_sram(struct device_node *node)
rockchip_boot_fn = __pa_symbol(secondary_startup);
/* copy the trampoline to sram, that runs during startup of the core */
- memcpy(sram_base_addr, &rockchip_secondary_trampoline, trampoline_sz);
+ memcpy_toio(sram_base_addr, &rockchip_secondary_trampoline, trampoline_sz);
flush_cache_all();
outer_clean_range(0, trampoline_sz);
diff --git a/arch/arm/mach-socfpga/core.h b/arch/arm/mach-socfpga/core.h
index fc2608b18a0d..18f01190dcfd 100644
--- a/arch/arm/mach-socfpga/core.h
+++ b/arch/arm/mach-socfpga/core.h
@@ -33,7 +33,7 @@ extern void __iomem *sdr_ctl_base_addr;
u32 socfpga_sdram_self_refresh(u32 sdr_base);
extern unsigned int socfpga_sdram_self_refresh_sz;
-extern char secondary_trampoline, secondary_trampoline_end;
+extern char secondary_trampoline[], secondary_trampoline_end[];
extern unsigned long socfpga_cpu1start_addr;
diff --git a/arch/arm/mach-socfpga/platsmp.c b/arch/arm/mach-socfpga/platsmp.c
index fbb80b883e5d..201191cf68f3 100644
--- a/arch/arm/mach-socfpga/platsmp.c
+++ b/arch/arm/mach-socfpga/platsmp.c
@@ -20,14 +20,14 @@
static int socfpga_boot_secondary(unsigned int cpu, struct task_struct *idle)
{
- int trampoline_size = &secondary_trampoline_end - &secondary_trampoline;
+ int trampoline_size = secondary_trampoline_end - secondary_trampoline;
if (socfpga_cpu1start_addr) {
/* This will put CPU #1 into reset. */
writel(RSTMGR_MPUMODRST_CPU1,
rst_manager_base_addr + SOCFPGA_RSTMGR_MODMPURST);
- memcpy(phys_to_virt(0), &secondary_trampoline, trampoline_size);
+ memcpy(phys_to_virt(0), secondary_trampoline, trampoline_size);
writel(__pa_symbol(secondary_startup),
sys_manager_base_addr + (socfpga_cpu1start_addr & 0x000000ff));
@@ -45,12 +45,12 @@ static int socfpga_boot_secondary(unsigned int cpu, struct task_struct *idle)
static int socfpga_a10_boot_secondary(unsigned int cpu, struct task_struct *idle)
{
- int trampoline_size = &secondary_trampoline_end - &secondary_trampoline;
+ int trampoline_size = secondary_trampoline_end - secondary_trampoline;
if (socfpga_cpu1start_addr) {
writel(RSTMGR_MPUMODRST_CPU1, rst_manager_base_addr +
SOCFPGA_A10_RSTMGR_MODMPURST);
- memcpy(phys_to_virt(0), &secondary_trampoline, trampoline_size);
+ memcpy(phys_to_virt(0), secondary_trampoline, trampoline_size);
writel(__pa_symbol(secondary_startup),
sys_manager_base_addr + (socfpga_cpu1start_addr & 0x00000fff));
diff --git a/arch/arm64/Kconfig.platforms b/arch/arm64/Kconfig.platforms
index 1aa8b7073218..54e3910e8b9b 100644
--- a/arch/arm64/Kconfig.platforms
+++ b/arch/arm64/Kconfig.platforms
@@ -161,7 +161,6 @@ config ARCH_MEDIATEK
config ARCH_MESON
bool "Amlogic Platforms"
- select COMMON_CLK
help
This enables support for the arm64 based Amlogic SoCs
such as the s905, S905X/D, S912, A113X/D or S905X/D2
diff --git a/arch/arm64/boot/dts/allwinner/sun50i-h5-orangepi-zero-plus.dts b/arch/arm64/boot/dts/allwinner/sun50i-h5-orangepi-zero-plus.dts
index d13980ed7a79..7ec5ac850a0d 100644
--- a/arch/arm64/boot/dts/allwinner/sun50i-h5-orangepi-zero-plus.dts
+++ b/arch/arm64/boot/dts/allwinner/sun50i-h5-orangepi-zero-plus.dts
@@ -69,7 +69,7 @@
pinctrl-0 = <&emac_rgmii_pins>;
phy-supply = <&reg_gmac_3v3>;
phy-handle = <&ext_rgmii_phy>;
- phy-mode = "rgmii";
+ phy-mode = "rgmii-id";
status = "okay";
};
diff --git a/arch/arm64/boot/dts/amlogic/meson-axg-jethome-jethub-j100.dts b/arch/arm64/boot/dts/amlogic/meson-axg-jethome-jethub-j100.dts
index 52ebe371df26..561eec21b4de 100644
--- a/arch/arm64/boot/dts/amlogic/meson-axg-jethome-jethub-j100.dts
+++ b/arch/arm64/boot/dts/amlogic/meson-axg-jethome-jethub-j100.dts
@@ -134,23 +134,23 @@
type = "critical";
};
};
- };
- cpu_cooling_maps: cooling-maps {
- map0 {
- trip = <&cpu_passive>;
- cooling-device = <&cpu0 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
- <&cpu1 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
- <&cpu2 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
- <&cpu3 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
- };
+ cpu_cooling_maps: cooling-maps {
+ map0 {
+ trip = <&cpu_passive>;
+ cooling-device = <&cpu0 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&cpu1 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&cpu2 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&cpu3 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
+ };
- map1 {
- trip = <&cpu_hot>;
- cooling-device = <&cpu0 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
- <&cpu1 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
- <&cpu2 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
- <&cpu3 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
+ map1 {
+ trip = <&cpu_hot>;
+ cooling-device = <&cpu0 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&cpu1 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&cpu2 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&cpu3 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
+ };
};
};
};
diff --git a/arch/arm64/boot/dts/apple/t8103-j274.dts b/arch/arm64/boot/dts/apple/t8103-j274.dts
index 33a80f9501dc..02c36301e985 100644
--- a/arch/arm64/boot/dts/apple/t8103-j274.dts
+++ b/arch/arm64/boot/dts/apple/t8103-j274.dts
@@ -60,7 +60,7 @@
&port02 {
bus-range = <3 3>;
- ethernet0: pci@0,0 {
+ ethernet0: ethernet@0,0 {
reg = <0x30000 0x0 0x0 0x0 0x0>;
/* To be filled by the loader */
local-mac-address = [00 10 18 00 00 00];
diff --git a/arch/arm64/boot/dts/apple/t8103.dtsi b/arch/arm64/boot/dts/apple/t8103.dtsi
index fc8b2bb06ffe..8b61e7fd3e9c 100644
--- a/arch/arm64/boot/dts/apple/t8103.dtsi
+++ b/arch/arm64/boot/dts/apple/t8103.dtsi
@@ -7,6 +7,7 @@
* Copyright The Asahi Linux Contributors
*/
+#include <dt-bindings/gpio/gpio.h>
#include <dt-bindings/interrupt-controller/apple-aic.h>
#include <dt-bindings/interrupt-controller/irq.h>
#include <dt-bindings/pinctrl/apple.h>
@@ -143,6 +144,7 @@
apple,npins = <212>;
interrupt-controller;
+ #interrupt-cells = <2>;
interrupt-parent = <&aic>;
interrupts = <AIC_IRQ 190 IRQ_TYPE_LEVEL_HIGH>,
<AIC_IRQ 191 IRQ_TYPE_LEVEL_HIGH>,
@@ -169,6 +171,7 @@
apple,npins = <42>;
interrupt-controller;
+ #interrupt-cells = <2>;
interrupt-parent = <&aic>;
interrupts = <AIC_IRQ 268 IRQ_TYPE_LEVEL_HIGH>,
<AIC_IRQ 269 IRQ_TYPE_LEVEL_HIGH>,
@@ -189,6 +192,7 @@
apple,npins = <23>;
interrupt-controller;
+ #interrupt-cells = <2>;
interrupt-parent = <&aic>;
interrupts = <AIC_IRQ 330 IRQ_TYPE_LEVEL_HIGH>,
<AIC_IRQ 331 IRQ_TYPE_LEVEL_HIGH>,
@@ -209,6 +213,7 @@
apple,npins = <16>;
interrupt-controller;
+ #interrupt-cells = <2>;
interrupt-parent = <&aic>;
interrupts = <AIC_IRQ 391 IRQ_TYPE_LEVEL_HIGH>,
<AIC_IRQ 392 IRQ_TYPE_LEVEL_HIGH>,
@@ -281,7 +286,7 @@
port00: pci@0,0 {
device_type = "pci";
reg = <0x0 0x0 0x0 0x0 0x0>;
- reset-gpios = <&pinctrl_ap 152 0>;
+ reset-gpios = <&pinctrl_ap 152 GPIO_ACTIVE_LOW>;
max-link-speed = <2>;
#address-cells = <3>;
@@ -301,7 +306,7 @@
port01: pci@1,0 {
device_type = "pci";
reg = <0x800 0x0 0x0 0x0 0x0>;
- reset-gpios = <&pinctrl_ap 153 0>;
+ reset-gpios = <&pinctrl_ap 153 GPIO_ACTIVE_LOW>;
max-link-speed = <2>;
#address-cells = <3>;
@@ -321,7 +326,7 @@
port02: pci@2,0 {
device_type = "pci";
reg = <0x1000 0x0 0x0 0x0 0x0>;
- reset-gpios = <&pinctrl_ap 33 0>;
+ reset-gpios = <&pinctrl_ap 33 GPIO_ACTIVE_LOW>;
max-link-speed = <1>;
#address-cells = <3>;
diff --git a/arch/arm64/boot/dts/exynos/exynosautov9.dtsi b/arch/arm64/boot/dts/exynos/exynosautov9.dtsi
index 3e4727344b4a..a960c0bc2dba 100644
--- a/arch/arm64/boot/dts/exynos/exynosautov9.dtsi
+++ b/arch/arm64/boot/dts/exynos/exynosautov9.dtsi
@@ -296,8 +296,7 @@
pinctrl-0 = <&ufs_rst_n &ufs_refclk_out>;
phys = <&ufs_0_phy>;
phy-names = "ufs-phy";
- samsung,sysreg = <&syscon_fsys2>;
- samsung,ufs-shareability-reg-offset = <0x710>;
+ samsung,sysreg = <&syscon_fsys2 0x710>;
status = "disabled";
};
};
diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1088a-ten64.dts b/arch/arm64/boot/dts/freescale/fsl-ls1088a-ten64.dts
index 3063851c2fb9..d3f03dcbb8c3 100644
--- a/arch/arm64/boot/dts/freescale/fsl-ls1088a-ten64.dts
+++ b/arch/arm64/boot/dts/freescale/fsl-ls1088a-ten64.dts
@@ -38,7 +38,6 @@
powerdn {
label = "External Power Down";
gpios = <&gpio1 17 GPIO_ACTIVE_LOW>;
- interrupts = <&gpio1 17 IRQ_TYPE_EDGE_FALLING>;
linux,code = <KEY_POWER>;
};
@@ -46,7 +45,6 @@
admin {
label = "ADMIN button";
gpios = <&gpio3 8 GPIO_ACTIVE_HIGH>;
- interrupts = <&gpio3 8 IRQ_TYPE_EDGE_RISING>;
linux,code = <KEY_WPS_BUTTON>;
};
};
diff --git a/arch/arm64/boot/dts/freescale/fsl-lx2160a-bluebox3.dts b/arch/arm64/boot/dts/freescale/fsl-lx2160a-bluebox3.dts
index b21be03da0af..042c486bdda2 100644
--- a/arch/arm64/boot/dts/freescale/fsl-lx2160a-bluebox3.dts
+++ b/arch/arm64/boot/dts/freescale/fsl-lx2160a-bluebox3.dts
@@ -386,6 +386,8 @@
reg = <2>;
ethernet = <&dpmac17>;
phy-mode = "rgmii-id";
+ rx-internal-delay-ps = <2000>;
+ tx-internal-delay-ps = <2000>;
fixed-link {
speed = <1000>;
@@ -529,6 +531,8 @@
reg = <2>;
ethernet = <&dpmac18>;
phy-mode = "rgmii-id";
+ rx-internal-delay-ps = <2000>;
+ tx-internal-delay-ps = <2000>;
fixed-link {
speed = <1000>;
diff --git a/arch/arm64/boot/dts/freescale/fsl-lx2160a.dtsi b/arch/arm64/boot/dts/freescale/fsl-lx2160a.dtsi
index dc8661ebd1f6..2433e6f2eda8 100644
--- a/arch/arm64/boot/dts/freescale/fsl-lx2160a.dtsi
+++ b/arch/arm64/boot/dts/freescale/fsl-lx2160a.dtsi
@@ -719,7 +719,7 @@
clock-names = "i2c";
clocks = <&clockgen QORIQ_CLK_PLATFORM_PLL
QORIQ_CLK_PLL_DIV(16)>;
- scl-gpio = <&gpio2 15 GPIO_ACTIVE_HIGH>;
+ scl-gpios = <&gpio2 15 GPIO_ACTIVE_HIGH>;
status = "disabled";
};
@@ -768,7 +768,7 @@
clock-names = "i2c";
clocks = <&clockgen QORIQ_CLK_PLATFORM_PLL
QORIQ_CLK_PLL_DIV(16)>;
- scl-gpio = <&gpio2 16 GPIO_ACTIVE_HIGH>;
+ scl-gpios = <&gpio2 16 GPIO_ACTIVE_HIGH>;
status = "disabled";
};
diff --git a/arch/arm64/boot/dts/freescale/imx8mq.dtsi b/arch/arm64/boot/dts/freescale/imx8mq.dtsi
index 972766b67a15..71bf497f99c2 100644
--- a/arch/arm64/boot/dts/freescale/imx8mq.dtsi
+++ b/arch/arm64/boot/dts/freescale/imx8mq.dtsi
@@ -524,8 +524,6 @@
<&clk IMX8MQ_VIDEO_PLL1>,
<&clk IMX8MQ_VIDEO_PLL1_OUT>;
assigned-clock-rates = <0>, <0>, <0>, <594000000>;
- interconnects = <&noc IMX8MQ_ICM_LCDIF &noc IMX8MQ_ICS_DRAM>;
- interconnect-names = "dram";
status = "disabled";
port@0 {
diff --git a/arch/arm64/boot/dts/rockchip/rk3308-roc-cc.dts b/arch/arm64/boot/dts/rockchip/rk3308-roc-cc.dts
index 665b2e69455d..ea6820902ede 100644
--- a/arch/arm64/boot/dts/rockchip/rk3308-roc-cc.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3308-roc-cc.dts
@@ -97,7 +97,7 @@
regulator-max-microvolt = <3300000>;
regulator-always-on;
regulator-boot-on;
- vim-supply = <&vcc_io>;
+ vin-supply = <&vcc_io>;
};
vdd_core: vdd-core {
diff --git a/arch/arm64/boot/dts/rockchip/rk3399-khadas-edge.dtsi b/arch/arm64/boot/dts/rockchip/rk3399-khadas-edge.dtsi
index d5c7648c841d..f1fcc6b5b402 100644
--- a/arch/arm64/boot/dts/rockchip/rk3399-khadas-edge.dtsi
+++ b/arch/arm64/boot/dts/rockchip/rk3399-khadas-edge.dtsi
@@ -705,7 +705,6 @@
&sdhci {
bus-width = <8>;
mmc-hs400-1_8v;
- mmc-hs400-enhanced-strobe;
non-removable;
status = "okay";
};
diff --git a/arch/arm64/boot/dts/rockchip/rk3399-kobol-helios64.dts b/arch/arm64/boot/dts/rockchip/rk3399-kobol-helios64.dts
index 63c7681843da..b6ac00f64613 100644
--- a/arch/arm64/boot/dts/rockchip/rk3399-kobol-helios64.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3399-kobol-helios64.dts
@@ -276,6 +276,7 @@
clock-output-names = "xin32k", "rk808-clkout2";
pinctrl-names = "default";
pinctrl-0 = <&pmic_int_l>;
+ rockchip,system-power-controller;
vcc1-supply = <&vcc5v0_sys>;
vcc2-supply = <&vcc5v0_sys>;
vcc3-supply = <&vcc5v0_sys>;
diff --git a/arch/arm64/boot/dts/rockchip/rk3399-leez-p710.dts b/arch/arm64/boot/dts/rockchip/rk3399-leez-p710.dts
index 7c93f840bc64..e890166e7fd4 100644
--- a/arch/arm64/boot/dts/rockchip/rk3399-leez-p710.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3399-leez-p710.dts
@@ -55,7 +55,7 @@
regulator-boot-on;
regulator-min-microvolt = <3300000>;
regulator-max-microvolt = <3300000>;
- vim-supply = <&vcc3v3_sys>;
+ vin-supply = <&vcc3v3_sys>;
};
vcc3v3_sys: vcc3v3-sys {
diff --git a/arch/arm64/boot/dts/rockchip/rk3399-rock-pi-4.dtsi b/arch/arm64/boot/dts/rockchip/rk3399-rock-pi-4.dtsi
index 98136c88fa49..6a434be62819 100644
--- a/arch/arm64/boot/dts/rockchip/rk3399-rock-pi-4.dtsi
+++ b/arch/arm64/boot/dts/rockchip/rk3399-rock-pi-4.dtsi
@@ -502,7 +502,7 @@
status = "okay";
bt656-supply = <&vcc_3v0>;
- audio-supply = <&vcc_3v0>;
+ audio-supply = <&vcc1v8_codec>;
sdmmc-supply = <&vcc_sdio>;
gpio1830-supply = <&vcc_3v0>;
};
diff --git a/arch/arm64/include/asm/efi.h b/arch/arm64/include/asm/efi.h
index d3e1825337be..ad55079abe47 100644
--- a/arch/arm64/include/asm/efi.h
+++ b/arch/arm64/include/asm/efi.h
@@ -14,7 +14,6 @@
#ifdef CONFIG_EFI
extern void efi_init(void);
-extern void efifb_setup_from_dmi(struct screen_info *si, const char *opt);
#else
#define efi_init()
#endif
diff --git a/arch/arm64/include/asm/ftrace.h b/arch/arm64/include/asm/ftrace.h
index 347b0cc68f07..1494cfa8639b 100644
--- a/arch/arm64/include/asm/ftrace.h
+++ b/arch/arm64/include/asm/ftrace.h
@@ -12,6 +12,17 @@
#define HAVE_FUNCTION_GRAPH_FP_TEST
+/*
+ * HAVE_FUNCTION_GRAPH_RET_ADDR_PTR means that the architecture can provide a
+ * "return address pointer" which can be used to uniquely identify a return
+ * address which has been overwritten.
+ *
+ * On arm64 we use the address of the caller's frame record, which remains the
+ * same for the lifetime of the instrumented function, unlike the return
+ * address in the LR.
+ */
+#define HAVE_FUNCTION_GRAPH_RET_ADDR_PTR
+
#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
#define ARCH_SUPPORTS_FTRACE_OPS 1
#else
diff --git a/arch/arm64/include/asm/kvm_arm.h b/arch/arm64/include/asm/kvm_arm.h
index a39fcf318c77..01d47c5886dc 100644
--- a/arch/arm64/include/asm/kvm_arm.h
+++ b/arch/arm64/include/asm/kvm_arm.h
@@ -91,7 +91,7 @@
#define HCR_HOST_VHE_FLAGS (HCR_RW | HCR_TGE | HCR_E2H)
/* TCR_EL2 Registers bits */
-#define TCR_EL2_RES1 ((1 << 31) | (1 << 23))
+#define TCR_EL2_RES1 ((1U << 31) | (1 << 23))
#define TCR_EL2_TBI (1 << 20)
#define TCR_EL2_PS_SHIFT 16
#define TCR_EL2_PS_MASK (7 << TCR_EL2_PS_SHIFT)
@@ -276,7 +276,7 @@
#define CPTR_EL2_TFP_SHIFT 10
/* Hyp Coprocessor Trap Register */
-#define CPTR_EL2_TCPAC (1 << 31)
+#define CPTR_EL2_TCPAC (1U << 31)
#define CPTR_EL2_TAM (1 << 30)
#define CPTR_EL2_TTA (1 << 20)
#define CPTR_EL2_TFP (1 << CPTR_EL2_TFP_SHIFT)
diff --git a/arch/arm64/include/asm/pgalloc.h b/arch/arm64/include/asm/pgalloc.h
index 8433a2058eb1..237224484d0f 100644
--- a/arch/arm64/include/asm/pgalloc.h
+++ b/arch/arm64/include/asm/pgalloc.h
@@ -76,7 +76,7 @@ static inline void __pmd_populate(pmd_t *pmdp, phys_addr_t ptep,
static inline void
pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmdp, pte_t *ptep)
{
- VM_BUG_ON(mm != &init_mm);
+ VM_BUG_ON(mm && mm != &init_mm);
__pmd_populate(pmdp, __pa(ptep), PMD_TYPE_TABLE | PMD_TABLE_UXN);
}
diff --git a/arch/arm64/include/asm/stacktrace.h b/arch/arm64/include/asm/stacktrace.h
index a4e046ef4568..6564a01cc085 100644
--- a/arch/arm64/include/asm/stacktrace.h
+++ b/arch/arm64/include/asm/stacktrace.h
@@ -47,9 +47,6 @@ struct stack_info {
* @prev_type: The type of stack this frame record was on, or a synthetic
* value of STACK_TYPE_UNKNOWN. This is used to detect a
* transition from one stack to another.
- *
- * @graph: When FUNCTION_GRAPH_TRACER is selected, holds the index of a
- * replacement lr value in the ftrace graph stack.
*/
struct stackframe {
unsigned long fp;
@@ -57,9 +54,6 @@ struct stackframe {
DECLARE_BITMAP(stacks_done, __NR_STACK_TYPES);
unsigned long prev_fp;
enum stack_type prev_type;
-#ifdef CONFIG_FUNCTION_GRAPH_TRACER
- int graph;
-#endif
#ifdef CONFIG_KRETPROBES
struct llist_node *kr_cur;
#endif
diff --git a/arch/arm64/include/asm/uaccess.h b/arch/arm64/include/asm/uaccess.h
index 6e2e0b7031ab..3a5ff5e20586 100644
--- a/arch/arm64/include/asm/uaccess.h
+++ b/arch/arm64/include/asm/uaccess.h
@@ -281,12 +281,22 @@ do { \
(x) = (__force __typeof__(*(ptr)))__gu_val; \
} while (0)
+/*
+ * We must not call into the scheduler between uaccess_ttbr0_enable() and
+ * uaccess_ttbr0_disable(). As `x` and `ptr` could contain blocking functions,
+ * we must evaluate these outside of the critical section.
+ */
#define __raw_get_user(x, ptr, err) \
do { \
+ __typeof__(*(ptr)) __user *__rgu_ptr = (ptr); \
+ __typeof__(x) __rgu_val; \
__chk_user_ptr(ptr); \
+ \
uaccess_ttbr0_enable(); \
- __raw_get_mem("ldtr", x, ptr, err); \
+ __raw_get_mem("ldtr", __rgu_val, __rgu_ptr, err); \
uaccess_ttbr0_disable(); \
+ \
+ (x) = __rgu_val; \
} while (0)
#define __get_user_error(x, ptr, err) \
@@ -310,14 +320,22 @@ do { \
#define get_user __get_user
+/*
+ * We must not call into the scheduler between __uaccess_enable_tco_async() and
+ * __uaccess_disable_tco_async(). As `dst` and `src` may contain blocking
+ * functions, we must evaluate these outside of the critical section.
+ */
#define __get_kernel_nofault(dst, src, type, err_label) \
do { \
+ __typeof__(dst) __gkn_dst = (dst); \
+ __typeof__(src) __gkn_src = (src); \
int __gkn_err = 0; \
\
__uaccess_enable_tco_async(); \
- __raw_get_mem("ldr", *((type *)(dst)), \
- (__force type *)(src), __gkn_err); \
+ __raw_get_mem("ldr", *((type *)(__gkn_dst)), \
+ (__force type *)(__gkn_src), __gkn_err); \
__uaccess_disable_tco_async(); \
+ \
if (unlikely(__gkn_err)) \
goto err_label; \
} while (0)
@@ -351,11 +369,19 @@ do { \
} \
} while (0)
+/*
+ * We must not call into the scheduler between uaccess_ttbr0_enable() and
+ * uaccess_ttbr0_disable(). As `x` and `ptr` could contain blocking functions,
+ * we must evaluate these outside of the critical section.
+ */
#define __raw_put_user(x, ptr, err) \
do { \
- __chk_user_ptr(ptr); \
+ __typeof__(*(ptr)) __user *__rpu_ptr = (ptr); \
+ __typeof__(*(ptr)) __rpu_val = (x); \
+ __chk_user_ptr(__rpu_ptr); \
+ \
uaccess_ttbr0_enable(); \
- __raw_put_mem("sttr", x, ptr, err); \
+ __raw_put_mem("sttr", __rpu_val, __rpu_ptr, err); \
uaccess_ttbr0_disable(); \
} while (0)
@@ -380,14 +406,22 @@ do { \
#define put_user __put_user
+/*
+ * We must not call into the scheduler between __uaccess_enable_tco_async() and
+ * __uaccess_disable_tco_async(). As `dst` and `src` may contain blocking
+ * functions, we must evaluate these outside of the critical section.
+ */
#define __put_kernel_nofault(dst, src, type, err_label) \
do { \
+ __typeof__(dst) __pkn_dst = (dst); \
+ __typeof__(src) __pkn_src = (src); \
int __pkn_err = 0; \
\
__uaccess_enable_tco_async(); \
- __raw_put_mem("str", *((type *)(src)), \
- (__force type *)(dst), __pkn_err); \
+ __raw_put_mem("str", *((type *)(__pkn_src)), \
+ (__force type *)(__pkn_dst), __pkn_err); \
__uaccess_disable_tco_async(); \
+ \
if (unlikely(__pkn_err)) \
goto err_label; \
} while(0)
diff --git a/arch/arm64/kernel/entry-ftrace.S b/arch/arm64/kernel/entry-ftrace.S
index b3e4f9a088b1..8cf970d219f5 100644
--- a/arch/arm64/kernel/entry-ftrace.S
+++ b/arch/arm64/kernel/entry-ftrace.S
@@ -77,11 +77,17 @@
.endm
SYM_CODE_START(ftrace_regs_caller)
+#ifdef BTI_C
+ BTI_C
+#endif
ftrace_regs_entry 1
b ftrace_common
SYM_CODE_END(ftrace_regs_caller)
SYM_CODE_START(ftrace_caller)
+#ifdef BTI_C
+ BTI_C
+#endif
ftrace_regs_entry 0
b ftrace_common
SYM_CODE_END(ftrace_caller)
diff --git a/arch/arm64/kernel/ftrace.c b/arch/arm64/kernel/ftrace.c
index fc62dfe73f93..4506c4a90ac1 100644
--- a/arch/arm64/kernel/ftrace.c
+++ b/arch/arm64/kernel/ftrace.c
@@ -244,8 +244,6 @@ void arch_ftrace_update_code(int command)
* on the way back to parent. For this purpose, this function is called
* in _mcount() or ftrace_caller() to replace return address (*parent) on
* the call stack to return_to_handler.
- *
- * Note that @frame_pointer is used only for sanity check later.
*/
void prepare_ftrace_return(unsigned long self_addr, unsigned long *parent,
unsigned long frame_pointer)
@@ -263,8 +261,10 @@ void prepare_ftrace_return(unsigned long self_addr, unsigned long *parent,
*/
old = *parent;
- if (!function_graph_enter(old, self_addr, frame_pointer, NULL))
+ if (!function_graph_enter(old, self_addr, frame_pointer,
+ (void *)frame_pointer)) {
*parent = return_hooker;
+ }
}
#ifdef CONFIG_DYNAMIC_FTRACE
diff --git a/arch/arm64/kernel/machine_kexec.c b/arch/arm64/kernel/machine_kexec.c
index 1038494135c8..6fb31c117ebe 100644
--- a/arch/arm64/kernel/machine_kexec.c
+++ b/arch/arm64/kernel/machine_kexec.c
@@ -147,7 +147,7 @@ int machine_kexec_post_load(struct kimage *kimage)
if (rc)
return rc;
kimage->arch.ttbr1 = __pa(trans_pgd);
- kimage->arch.zero_page = __pa(empty_zero_page);
+ kimage->arch.zero_page = __pa_symbol(empty_zero_page);
reloc_size = __relocate_new_kernel_end - __relocate_new_kernel_start;
memcpy(reloc_code, __relocate_new_kernel_start, reloc_size);
diff --git a/arch/arm64/kernel/machine_kexec_file.c b/arch/arm64/kernel/machine_kexec_file.c
index 63634b4d72c1..59c648d51848 100644
--- a/arch/arm64/kernel/machine_kexec_file.c
+++ b/arch/arm64/kernel/machine_kexec_file.c
@@ -149,6 +149,7 @@ int load_other_segments(struct kimage *image,
initrd_len, cmdline, 0);
if (!dtb) {
pr_err("Preparing for new dtb failed\n");
+ ret = -EINVAL;
goto out_err;
}
diff --git a/arch/arm64/kernel/stacktrace.c b/arch/arm64/kernel/stacktrace.c
index c30624fff6ac..94f83cd44e50 100644
--- a/arch/arm64/kernel/stacktrace.c
+++ b/arch/arm64/kernel/stacktrace.c
@@ -38,9 +38,6 @@ void start_backtrace(struct stackframe *frame, unsigned long fp,
{
frame->fp = fp;
frame->pc = pc;
-#ifdef CONFIG_FUNCTION_GRAPH_TRACER
- frame->graph = 0;
-#endif
#ifdef CONFIG_KRETPROBES
frame->kr_cur = NULL;
#endif
@@ -116,20 +113,23 @@ int notrace unwind_frame(struct task_struct *tsk, struct stackframe *frame)
frame->prev_fp = fp;
frame->prev_type = info.type;
+ frame->pc = ptrauth_strip_insn_pac(frame->pc);
+
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
if (tsk->ret_stack &&
- (ptrauth_strip_insn_pac(frame->pc) == (unsigned long)return_to_handler)) {
- struct ftrace_ret_stack *ret_stack;
+ (frame->pc == (unsigned long)return_to_handler)) {
+ unsigned long orig_pc;
/*
* This is a case where function graph tracer has
* modified a return address (LR) in a stack frame
* to hook a function return.
* So replace it to an original value.
*/
- ret_stack = ftrace_graph_get_ret_stack(tsk, frame->graph++);
- if (WARN_ON_ONCE(!ret_stack))
+ orig_pc = ftrace_graph_ret_addr(tsk, NULL, frame->pc,
+ (void *)frame->fp);
+ if (WARN_ON_ONCE(frame->pc == orig_pc))
return -EINVAL;
- frame->pc = ret_stack->ret;
+ frame->pc = orig_pc;
}
#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
#ifdef CONFIG_KRETPROBES
@@ -137,8 +137,6 @@ int notrace unwind_frame(struct task_struct *tsk, struct stackframe *frame)
frame->pc = kretprobe_find_ret_addr(tsk, (void *)frame->fp, &frame->kr_cur);
#endif
- frame->pc = ptrauth_strip_insn_pac(frame->pc);
-
return 0;
}
NOKPROBE_SYMBOL(unwind_frame);
diff --git a/arch/arm64/kvm/hyp/include/hyp/switch.h b/arch/arm64/kvm/hyp/include/hyp/switch.h
index 7a0af1d39303..96c5f3fb7838 100644
--- a/arch/arm64/kvm/hyp/include/hyp/switch.h
+++ b/arch/arm64/kvm/hyp/include/hyp/switch.h
@@ -403,6 +403,8 @@ typedef bool (*exit_handler_fn)(struct kvm_vcpu *, u64 *);
static const exit_handler_fn *kvm_get_exit_handler_array(struct kvm_vcpu *vcpu);
+static void early_exit_filter(struct kvm_vcpu *vcpu, u64 *exit_code);
+
/*
* Allow the hypervisor to handle the exit with an exit handler if it has one.
*
@@ -429,6 +431,18 @@ static inline bool kvm_hyp_handle_exit(struct kvm_vcpu *vcpu, u64 *exit_code)
*/
static inline bool fixup_guest_exit(struct kvm_vcpu *vcpu, u64 *exit_code)
{
+ /*
+ * Save PSTATE early so that we can evaluate the vcpu mode
+ * early on.
+ */
+ vcpu->arch.ctxt.regs.pstate = read_sysreg_el2(SYS_SPSR);
+
+ /*
+ * Check whether we want to repaint the state one way or
+ * another.
+ */
+ early_exit_filter(vcpu, exit_code);
+
if (ARM_EXCEPTION_CODE(*exit_code) != ARM_EXCEPTION_IRQ)
vcpu->arch.fault.esr_el2 = read_sysreg_el2(SYS_ESR);
diff --git a/arch/arm64/kvm/hyp/include/hyp/sysreg-sr.h b/arch/arm64/kvm/hyp/include/hyp/sysreg-sr.h
index de7e14c862e6..7ecca8b07851 100644
--- a/arch/arm64/kvm/hyp/include/hyp/sysreg-sr.h
+++ b/arch/arm64/kvm/hyp/include/hyp/sysreg-sr.h
@@ -70,7 +70,12 @@ static inline void __sysreg_save_el1_state(struct kvm_cpu_context *ctxt)
static inline void __sysreg_save_el2_return_state(struct kvm_cpu_context *ctxt)
{
ctxt->regs.pc = read_sysreg_el2(SYS_ELR);
- ctxt->regs.pstate = read_sysreg_el2(SYS_SPSR);
+ /*
+ * Guest PSTATE gets saved at guest fixup time in all
+ * cases. We still need to handle the nVHE host side here.
+ */
+ if (!has_vhe() && ctxt->__hyp_running_vcpu)
+ ctxt->regs.pstate = read_sysreg_el2(SYS_SPSR);
if (cpus_have_final_cap(ARM64_HAS_RAS_EXTN))
ctxt_sys_reg(ctxt, DISR_EL1) = read_sysreg_s(SYS_VDISR_EL2);
diff --git a/arch/arm64/kvm/hyp/nvhe/switch.c b/arch/arm64/kvm/hyp/nvhe/switch.c
index c0e3fed26d93..d13115a12434 100644
--- a/arch/arm64/kvm/hyp/nvhe/switch.c
+++ b/arch/arm64/kvm/hyp/nvhe/switch.c
@@ -233,7 +233,7 @@ static const exit_handler_fn *kvm_get_exit_handler_array(struct kvm_vcpu *vcpu)
* Returns false if the guest ran in AArch32 when it shouldn't have, and
* thus should exit to the host, or true if a the guest run loop can continue.
*/
-static bool handle_aarch32_guest(struct kvm_vcpu *vcpu, u64 *exit_code)
+static void early_exit_filter(struct kvm_vcpu *vcpu, u64 *exit_code)
{
struct kvm *kvm = kern_hyp_va(vcpu->kvm);
@@ -248,10 +248,7 @@ static bool handle_aarch32_guest(struct kvm_vcpu *vcpu, u64 *exit_code)
vcpu->arch.target = -1;
*exit_code &= BIT(ARM_EXIT_WITH_SERROR_BIT);
*exit_code |= ARM_EXCEPTION_IL;
- return false;
}
-
- return true;
}
/* Switch to the guest for legacy non-VHE systems */
@@ -316,9 +313,6 @@ int __kvm_vcpu_run(struct kvm_vcpu *vcpu)
/* Jump in the fire! */
exit_code = __guest_enter(vcpu);
- if (unlikely(!handle_aarch32_guest(vcpu, &exit_code)))
- break;
-
/* And we're baaack! */
} while (fixup_guest_exit(vcpu, &exit_code));
diff --git a/arch/arm64/kvm/hyp/vhe/switch.c b/arch/arm64/kvm/hyp/vhe/switch.c
index 5a2cb5d9bc4b..fbb26b93c347 100644
--- a/arch/arm64/kvm/hyp/vhe/switch.c
+++ b/arch/arm64/kvm/hyp/vhe/switch.c
@@ -112,6 +112,10 @@ static const exit_handler_fn *kvm_get_exit_handler_array(struct kvm_vcpu *vcpu)
return hyp_exit_handlers;
}
+static void early_exit_filter(struct kvm_vcpu *vcpu, u64 *exit_code)
+{
+}
+
/* Switch to the guest for VHE systems running in EL2 */
static int __kvm_vcpu_run_vhe(struct kvm_vcpu *vcpu)
{
diff --git a/arch/csky/kernel/traps.c b/arch/csky/kernel/traps.c
index e5fbf8653a21..2020af88b636 100644
--- a/arch/csky/kernel/traps.c
+++ b/arch/csky/kernel/traps.c
@@ -209,7 +209,7 @@ asmlinkage void do_trap_illinsn(struct pt_regs *regs)
asmlinkage void do_trap_fpe(struct pt_regs *regs)
{
-#ifdef CONFIG_CPU_HAS_FP
+#ifdef CONFIG_CPU_HAS_FPU
return fpu_fpe(regs);
#else
do_trap_error(regs, SIGILL, ILL_ILLOPC, regs->pc,
@@ -219,7 +219,7 @@ asmlinkage void do_trap_fpe(struct pt_regs *regs)
asmlinkage void do_trap_priv(struct pt_regs *regs)
{
-#ifdef CONFIG_CPU_HAS_FP
+#ifdef CONFIG_CPU_HAS_FPU
if (user_mode(regs) && fpu_libc_helper(regs))
return;
#endif
diff --git a/arch/ia64/kernel/syscalls/syscall.tbl b/arch/ia64/kernel/syscalls/syscall.tbl
index 6fea1844fb95..707ae121f6d3 100644
--- a/arch/ia64/kernel/syscalls/syscall.tbl
+++ b/arch/ia64/kernel/syscalls/syscall.tbl
@@ -369,3 +369,4 @@
446 common landlock_restrict_self sys_landlock_restrict_self
# 447 reserved for memfd_secret
448 common process_mrelease sys_process_mrelease
+449 common futex_waitv sys_futex_waitv
diff --git a/arch/m68k/include/asm/cacheflush_mm.h b/arch/m68k/include/asm/cacheflush_mm.h
index 8ab46625ddd3..1ac55e7b47f0 100644
--- a/arch/m68k/include/asm/cacheflush_mm.h
+++ b/arch/m68k/include/asm/cacheflush_mm.h
@@ -250,7 +250,6 @@ static inline void __flush_page_to_ram(void *vaddr)
#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
#define flush_dcache_page(page) __flush_page_to_ram(page_address(page))
-void flush_dcache_folio(struct folio *folio);
#define flush_dcache_mmap_lock(mapping) do { } while (0)
#define flush_dcache_mmap_unlock(mapping) do { } while (0)
#define flush_icache_page(vma, page) __flush_page_to_ram(page_address(page))
diff --git a/arch/m68k/kernel/syscalls/syscall.tbl b/arch/m68k/kernel/syscalls/syscall.tbl
index 7976dff8f879..45bc32a41b90 100644
--- a/arch/m68k/kernel/syscalls/syscall.tbl
+++ b/arch/m68k/kernel/syscalls/syscall.tbl
@@ -448,3 +448,4 @@
446 common landlock_restrict_self sys_landlock_restrict_self
# 447 reserved for memfd_secret
448 common process_mrelease sys_process_mrelease
+449 common futex_waitv sys_futex_waitv
diff --git a/arch/microblaze/kernel/syscalls/syscall.tbl b/arch/microblaze/kernel/syscalls/syscall.tbl
index 6b0e11362bd2..2204bde3ce4a 100644
--- a/arch/microblaze/kernel/syscalls/syscall.tbl
+++ b/arch/microblaze/kernel/syscalls/syscall.tbl
@@ -454,3 +454,4 @@
446 common landlock_restrict_self sys_landlock_restrict_self
# 447 reserved for memfd_secret
448 common process_mrelease sys_process_mrelease
+449 common futex_waitv sys_futex_waitv
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
index de60ad190057..0215dc1529e9 100644
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
@@ -3097,7 +3097,7 @@ config STACKTRACE_SUPPORT
config PGTABLE_LEVELS
int
default 4 if PAGE_SIZE_4KB && MIPS_VA_BITS_48
- default 3 if 64BIT && !PAGE_SIZE_64KB
+ default 3 if 64BIT && (!PAGE_SIZE_64KB || MIPS_VA_BITS_48)
default 2
config MIPS_AUTO_PFN_OFFSET
diff --git a/arch/mips/boot/compressed/Makefile b/arch/mips/boot/compressed/Makefile
index 2861a05c2e0c..f27cf31b4140 100644
--- a/arch/mips/boot/compressed/Makefile
+++ b/arch/mips/boot/compressed/Makefile
@@ -52,7 +52,7 @@ endif
vmlinuzobjs-$(CONFIG_KERNEL_XZ) += $(obj)/ashldi3.o
-vmlinuzobjs-$(CONFIG_KERNEL_ZSTD) += $(obj)/bswapdi.o
+vmlinuzobjs-$(CONFIG_KERNEL_ZSTD) += $(obj)/bswapdi.o $(obj)/ashldi3.o
targets := $(notdir $(vmlinuzobjs-y))
diff --git a/arch/mips/include/asm/cacheflush.h b/arch/mips/include/asm/cacheflush.h
index f207388541d5..b3dc9c589442 100644
--- a/arch/mips/include/asm/cacheflush.h
+++ b/arch/mips/include/asm/cacheflush.h
@@ -61,8 +61,6 @@ static inline void flush_dcache_page(struct page *page)
SetPageDcacheDirty(page);
}
-void flush_dcache_folio(struct folio *folio);
-
#define flush_dcache_mmap_lock(mapping) do { } while (0)
#define flush_dcache_mmap_unlock(mapping) do { } while (0)
diff --git a/arch/mips/include/asm/mach-ralink/spaces.h b/arch/mips/include/asm/mach-ralink/spaces.h
index 05d14c21c417..f7af11ea2d61 100644
--- a/arch/mips/include/asm/mach-ralink/spaces.h
+++ b/arch/mips/include/asm/mach-ralink/spaces.h
@@ -6,5 +6,7 @@
#define PCI_IOSIZE SZ_64K
#define IO_SPACE_LIMIT (PCI_IOSIZE - 1)
+#define pci_remap_iospace pci_remap_iospace
+
#include <asm/mach-generic/spaces.h>
#endif
diff --git a/arch/mips/include/asm/pci.h b/arch/mips/include/asm/pci.h
index 421231f55935..9ffc8192adae 100644
--- a/arch/mips/include/asm/pci.h
+++ b/arch/mips/include/asm/pci.h
@@ -20,10 +20,6 @@
#include <linux/list.h>
#include <linux/of.h>
-#ifdef CONFIG_PCI_DRIVERS_GENERIC
-#define pci_remap_iospace pci_remap_iospace
-#endif
-
#ifdef CONFIG_PCI_DRIVERS_LEGACY
/*
diff --git a/arch/mips/kernel/cpu-probe.c b/arch/mips/kernel/cpu-probe.c
index ac0e2cfc6d57..24a529c6c4be 100644
--- a/arch/mips/kernel/cpu-probe.c
+++ b/arch/mips/kernel/cpu-probe.c
@@ -1734,8 +1734,6 @@ static inline void decode_cpucfg(struct cpuinfo_mips *c)
static inline void cpu_probe_loongson(struct cpuinfo_mips *c, unsigned int cpu)
{
- decode_configs(c);
-
/* All Loongson processors covered here define ExcCode 16 as GSExc. */
c->options |= MIPS_CPU_GSEXCEX;
@@ -1796,6 +1794,8 @@ static inline void cpu_probe_loongson(struct cpuinfo_mips *c, unsigned int cpu)
panic("Unknown Loongson Processor ID!");
break;
}
+
+ decode_configs(c);
}
#else
static inline void cpu_probe_loongson(struct cpuinfo_mips *c, unsigned int cpu) { }
diff --git a/arch/mips/kernel/proc.c b/arch/mips/kernel/proc.c
index 376a6e2676e9..9f47a889b047 100644
--- a/arch/mips/kernel/proc.c
+++ b/arch/mips/kernel/proc.c
@@ -185,7 +185,7 @@ static int show_cpuinfo(struct seq_file *m, void *v)
seq_puts(m, " tx39_cache");
if (cpu_has_octeon_cache)
seq_puts(m, " octeon_cache");
- if (cpu_has_fpu)
+ if (raw_cpu_has_fpu)
seq_puts(m, " fpu");
if (cpu_has_32fpr)
seq_puts(m, " 32fpr");
diff --git a/arch/mips/net/bpf_jit_comp.h b/arch/mips/net/bpf_jit_comp.h
index 6f3a7b07294b..a37fe20818eb 100644
--- a/arch/mips/net/bpf_jit_comp.h
+++ b/arch/mips/net/bpf_jit_comp.h
@@ -98,7 +98,7 @@ do { \
#define emit(...) __emit(__VA_ARGS__)
/* Workaround for R10000 ll/sc errata */
-#ifdef CONFIG_WAR_R10000
+#ifdef CONFIG_WAR_R10000_LLSC
#define LLSC_beqz beqzl
#else
#define LLSC_beqz beqz
diff --git a/arch/mips/pci/pci-generic.c b/arch/mips/pci/pci-generic.c
index 18eb8a453a86..d2d68bac3d25 100644
--- a/arch/mips/pci/pci-generic.c
+++ b/arch/mips/pci/pci-generic.c
@@ -47,6 +47,7 @@ void pcibios_fixup_bus(struct pci_bus *bus)
pci_read_bridge_bases(bus);
}
+#ifdef pci_remap_iospace
int pci_remap_iospace(const struct resource *res, phys_addr_t phys_addr)
{
unsigned long vaddr;
@@ -60,3 +61,4 @@ int pci_remap_iospace(const struct resource *res, phys_addr_t phys_addr)
set_io_port_base(vaddr);
return 0;
}
+#endif
diff --git a/arch/nds32/include/asm/cacheflush.h b/arch/nds32/include/asm/cacheflush.h
index 3fc0bb7d6487..c2a222ebfa2a 100644
--- a/arch/nds32/include/asm/cacheflush.h
+++ b/arch/nds32/include/asm/cacheflush.h
@@ -27,7 +27,6 @@ void flush_cache_vunmap(unsigned long start, unsigned long end);
#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
void flush_dcache_page(struct page *page);
-void flush_dcache_folio(struct folio *folio);
void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
unsigned long vaddr, void *dst, void *src, int len);
void copy_from_user_page(struct vm_area_struct *vma, struct page *page,
diff --git a/arch/nios2/include/asm/cacheflush.h b/arch/nios2/include/asm/cacheflush.h
index 1999561b22aa..d0b71dd71287 100644
--- a/arch/nios2/include/asm/cacheflush.h
+++ b/arch/nios2/include/asm/cacheflush.h
@@ -29,7 +29,6 @@ extern void flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr,
unsigned long pfn);
#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
void flush_dcache_page(struct page *page);
-void flush_dcache_folio(struct folio *folio);
extern void flush_icache_range(unsigned long start, unsigned long end);
extern void flush_icache_page(struct vm_area_struct *vma, struct page *page);
diff --git a/arch/parisc/Kconfig b/arch/parisc/Kconfig
index b2188da09c73..011dc32fdb4d 100644
--- a/arch/parisc/Kconfig
+++ b/arch/parisc/Kconfig
@@ -85,11 +85,6 @@ config MMU
config STACK_GROWSUP
def_bool y
-config ARCH_DEFCONFIG
- string
- default "arch/parisc/configs/generic-32bit_defconfig" if !64BIT
- default "arch/parisc/configs/generic-64bit_defconfig" if 64BIT
-
config GENERIC_LOCKBREAK
bool
default y
diff --git a/arch/parisc/Makefile b/arch/parisc/Makefile
index 8db4af4879d0..82d77f4b0d08 100644
--- a/arch/parisc/Makefile
+++ b/arch/parisc/Makefile
@@ -15,7 +15,12 @@
# Mike Shaver, Helge Deller and Martin K. Petersen
#
+ifdef CONFIG_PARISC_SELF_EXTRACT
+boot := arch/parisc/boot
+KBUILD_IMAGE := $(boot)/bzImage
+else
KBUILD_IMAGE := vmlinuz
+endif
NM = sh $(srctree)/arch/parisc/nm
CHECKFLAGS += -D__hppa__=1
diff --git a/arch/parisc/configs/generic-64bit_defconfig b/arch/parisc/configs/generic-64bit_defconfig
index d2daeac2b217..1b8fd80cbe7f 100644
--- a/arch/parisc/configs/generic-64bit_defconfig
+++ b/arch/parisc/configs/generic-64bit_defconfig
@@ -1,7 +1,9 @@
CONFIG_LOCALVERSION="-64bit"
# CONFIG_LOCALVERSION_AUTO is not set
+CONFIG_KERNEL_LZ4=y
CONFIG_SYSVIPC=y
CONFIG_POSIX_MQUEUE=y
+CONFIG_AUDIT=y
CONFIG_BSD_PROCESS_ACCT=y
CONFIG_BSD_PROCESS_ACCT_V3=y
CONFIG_TASKSTATS=y
@@ -35,6 +37,7 @@ CONFIG_MODVERSIONS=y
CONFIG_BLK_DEV_INTEGRITY=y
CONFIG_BINFMT_MISC=m
# CONFIG_COMPACTION is not set
+CONFIG_MEMORY_FAILURE=y
CONFIG_NET=y
CONFIG_PACKET=y
CONFIG_UNIX=y
@@ -65,12 +68,15 @@ CONFIG_SCSI_ISCSI_ATTRS=y
CONFIG_SCSI_SRP_ATTRS=y
CONFIG_ISCSI_BOOT_SYSFS=y
CONFIG_SCSI_MPT2SAS=y
-CONFIG_SCSI_LASI700=m
+CONFIG_SCSI_LASI700=y
CONFIG_SCSI_SYM53C8XX_2=y
CONFIG_SCSI_ZALON=y
CONFIG_SCSI_QLA_ISCSI=m
CONFIG_SCSI_DH=y
CONFIG_ATA=y
+CONFIG_SATA_SIL=y
+CONFIG_SATA_SIS=y
+CONFIG_SATA_VIA=y
CONFIG_PATA_NS87415=y
CONFIG_PATA_SIL680=y
CONFIG_ATA_GENERIC=y
@@ -79,6 +85,7 @@ CONFIG_MD_LINEAR=m
CONFIG_BLK_DEV_DM=m
CONFIG_DM_RAID=m
CONFIG_DM_UEVENT=y
+CONFIG_DM_AUDIT=y
CONFIG_FUSION=y
CONFIG_FUSION_SPI=y
CONFIG_FUSION_SAS=y
@@ -196,10 +203,15 @@ CONFIG_FB_MATROX_G=y
CONFIG_FB_MATROX_I2C=y
CONFIG_FB_MATROX_MAVEN=y
CONFIG_FB_RADEON=y
+CONFIG_LOGO=y
+# CONFIG_LOGO_LINUX_CLUT224 is not set
CONFIG_HIDRAW=y
CONFIG_HID_PID=y
CONFIG_USB_HIDDEV=y
CONFIG_USB=y
+CONFIG_USB_EHCI_HCD=y
+CONFIG_USB_OHCI_HCD=y
+CONFIG_USB_OHCI_HCD_PLATFORM=y
CONFIG_UIO=y
CONFIG_UIO_PDRV_GENIRQ=m
CONFIG_UIO_AEC=m
diff --git a/arch/parisc/include/asm/assembly.h b/arch/parisc/include/asm/assembly.h
index 39e7985086f9..6d13ae236fcb 100644
--- a/arch/parisc/include/asm/assembly.h
+++ b/arch/parisc/include/asm/assembly.h
@@ -147,6 +147,17 @@
extrd,u \r, 63-(\sa), 64-(\sa), \t
.endm
+ /* Extract unsigned for 32- and 64-bit
+ * The extru instruction leaves the most significant 32 bits of the
+ * target register in an undefined state on PA 2.0 systems. */
+ .macro extru_safe r, p, len, t
+#ifdef CONFIG_64BIT
+ extrd,u \r, 32+(\p), \len, \t
+#else
+ extru \r, \p, \len, \t
+#endif
+ .endm
+
/* load 32-bit 'value' into 'reg' compensating for the ldil
* sign-extension when running in wide mode.
* WARNING!! neither 'value' nor 'reg' can be expressions
diff --git a/arch/parisc/include/asm/cacheflush.h b/arch/parisc/include/asm/cacheflush.h
index da0cd4b3a28f..859b8a34adcf 100644
--- a/arch/parisc/include/asm/cacheflush.h
+++ b/arch/parisc/include/asm/cacheflush.h
@@ -50,7 +50,6 @@ void invalidate_kernel_vmap_range(void *vaddr, int size);
#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
void flush_dcache_page(struct page *page);
-void flush_dcache_folio(struct folio *folio);
#define flush_dcache_mmap_lock(mapping) xa_lock_irq(&mapping->i_pages)
#define flush_dcache_mmap_unlock(mapping) xa_unlock_irq(&mapping->i_pages)
diff --git a/arch/parisc/include/asm/futex.h b/arch/parisc/include/asm/futex.h
index 70cf8f0a7617..9cd4dd6e63ad 100644
--- a/arch/parisc/include/asm/futex.h
+++ b/arch/parisc/include/asm/futex.h
@@ -14,7 +14,7 @@ static inline void
_futex_spin_lock(u32 __user *uaddr)
{
extern u32 lws_lock_start[];
- long index = ((long)uaddr & 0x3f8) >> 1;
+ long index = ((long)uaddr & 0x7f8) >> 1;
arch_spinlock_t *s = (arch_spinlock_t *)&lws_lock_start[index];
preempt_disable();
arch_spin_lock(s);
@@ -24,7 +24,7 @@ static inline void
_futex_spin_unlock(u32 __user *uaddr)
{
extern u32 lws_lock_start[];
- long index = ((long)uaddr & 0x3f8) >> 1;
+ long index = ((long)uaddr & 0x7f8) >> 1;
arch_spinlock_t *s = (arch_spinlock_t *)&lws_lock_start[index];
arch_spin_unlock(s);
preempt_enable();
diff --git a/arch/parisc/install.sh b/arch/parisc/install.sh
index 056d588befdd..70d3cffb0251 100644
--- a/arch/parisc/install.sh
+++ b/arch/parisc/install.sh
@@ -39,6 +39,7 @@ verify "$3"
if [ -n "${INSTALLKERNEL}" ]; then
if [ -x ~/bin/${INSTALLKERNEL} ]; then exec ~/bin/${INSTALLKERNEL} "$@"; fi
if [ -x /sbin/${INSTALLKERNEL} ]; then exec /sbin/${INSTALLKERNEL} "$@"; fi
+ if [ -x /usr/sbin/${INSTALLKERNEL} ]; then exec /usr/sbin/${INSTALLKERNEL} "$@"; fi
fi
# Default install
diff --git a/arch/parisc/kernel/entry.S b/arch/parisc/kernel/entry.S
index 88c188a965d8..6e9cdb269862 100644
--- a/arch/parisc/kernel/entry.S
+++ b/arch/parisc/kernel/entry.S
@@ -366,17 +366,9 @@
*/
.macro L2_ptep pmd,pte,index,va,fault
#if CONFIG_PGTABLE_LEVELS == 3
- extru \va,31-ASM_PMD_SHIFT,ASM_BITS_PER_PMD,\index
+ extru_safe \va,31-ASM_PMD_SHIFT,ASM_BITS_PER_PMD,\index
#else
-# if defined(CONFIG_64BIT)
- extrd,u \va,63-ASM_PGDIR_SHIFT,ASM_BITS_PER_PGD,\index
- #else
- # if PAGE_SIZE > 4096
- extru \va,31-ASM_PGDIR_SHIFT,32-ASM_PGDIR_SHIFT,\index
- # else
- extru \va,31-ASM_PGDIR_SHIFT,ASM_BITS_PER_PGD,\index
- # endif
-# endif
+ extru_safe \va,31-ASM_PGDIR_SHIFT,ASM_BITS_PER_PGD,\index
#endif
dep %r0,31,PAGE_SHIFT,\pmd /* clear offset */
#if CONFIG_PGTABLE_LEVELS < 3
@@ -386,7 +378,7 @@
bb,>=,n \pmd,_PxD_PRESENT_BIT,\fault
dep %r0,31,PxD_FLAG_SHIFT,\pmd /* clear flags */
SHLREG \pmd,PxD_VALUE_SHIFT,\pmd
- extru \va,31-PAGE_SHIFT,ASM_BITS_PER_PTE,\index
+ extru_safe \va,31-PAGE_SHIFT,ASM_BITS_PER_PTE,\index
dep %r0,31,PAGE_SHIFT,\pmd /* clear offset */
shladd \index,BITS_PER_PTE_ENTRY,\pmd,\pmd /* pmd is now pte */
.endm
diff --git a/arch/parisc/kernel/syscall.S b/arch/parisc/kernel/syscall.S
index 4fb3b6a993bf..65c88ca7a7ac 100644
--- a/arch/parisc/kernel/syscall.S
+++ b/arch/parisc/kernel/syscall.S
@@ -472,7 +472,7 @@ lws_start:
extrd,u %r1,PSW_W_BIT,1,%r1
/* sp must be aligned on 4, so deposit the W bit setting into
* the bottom of sp temporarily */
- or,ev %r1,%r30,%r30
+ or,od %r1,%r30,%r30
/* Clip LWS number to a 32-bit value for 32-bit processes */
depdi 0, 31, 32, %r20
@@ -566,7 +566,7 @@ lws_compare_and_swap:
ldo R%lws_lock_start(%r20), %r28
/* Extract eight bits from r26 and hash lock (Bits 3-11) */
- extru %r26, 28, 8, %r20
+ extru_safe %r26, 28, 8, %r20
/* Find lock to use, the hash is either one of 0 to
15, multiplied by 16 (keep it 16-byte aligned)
@@ -751,7 +751,7 @@ cas2_lock_start:
ldo R%lws_lock_start(%r20), %r28
/* Extract eight bits from r26 and hash lock (Bits 3-11) */
- extru %r26, 28, 8, %r20
+ extru_safe %r26, 28, 8, %r20
/* Find lock to use, the hash is either one of 0 to
15, multiplied by 16 (keep it 16-byte aligned)
diff --git a/arch/parisc/kernel/time.c b/arch/parisc/kernel/time.c
index 9fb1e794831b..061119a56fbe 100644
--- a/arch/parisc/kernel/time.c
+++ b/arch/parisc/kernel/time.c
@@ -249,30 +249,16 @@ void __init time_init(void)
static int __init init_cr16_clocksource(void)
{
/*
- * The cr16 interval timers are not syncronized across CPUs on
- * different sockets, so mark them unstable and lower rating on
- * multi-socket SMP systems.
+ * The cr16 interval timers are not syncronized across CPUs, even if
+ * they share the same socket.
*/
if (num_online_cpus() > 1 && !running_on_qemu) {
- int cpu;
- unsigned long cpu0_loc;
- cpu0_loc = per_cpu(cpu_data, 0).cpu_loc;
-
- for_each_online_cpu(cpu) {
- if (cpu == 0)
- continue;
- if ((cpu0_loc != 0) &&
- (cpu0_loc == per_cpu(cpu_data, cpu).cpu_loc))
- continue;
-
- /* mark sched_clock unstable */
- clear_sched_clock_stable();
-
- clocksource_cr16.name = "cr16_unstable";
- clocksource_cr16.flags = CLOCK_SOURCE_UNSTABLE;
- clocksource_cr16.rating = 0;
- break;
- }
+ /* mark sched_clock unstable */
+ clear_sched_clock_stable();
+
+ clocksource_cr16.name = "cr16_unstable";
+ clocksource_cr16.flags = CLOCK_SOURCE_UNSTABLE;
+ clocksource_cr16.rating = 0;
}
/* register at clocksource framework */
diff --git a/arch/parisc/kernel/traps.c b/arch/parisc/kernel/traps.c
index b11fb26ce299..892b7fc8f3c4 100644
--- a/arch/parisc/kernel/traps.c
+++ b/arch/parisc/kernel/traps.c
@@ -730,6 +730,8 @@ void notrace handle_interruption(int code, struct pt_regs *regs)
}
mmap_read_unlock(current->mm);
}
+ /* CPU could not fetch instruction, so clear stale IIR value. */
+ regs->iir = 0xbaadf00d;
fallthrough;
case 27:
/* Data memory protection ID trap */
diff --git a/arch/parisc/kernel/vmlinux.lds.S b/arch/parisc/kernel/vmlinux.lds.S
index 3d208afd15bc..2769eb991f58 100644
--- a/arch/parisc/kernel/vmlinux.lds.S
+++ b/arch/parisc/kernel/vmlinux.lds.S
@@ -57,8 +57,6 @@ SECTIONS
{
. = KERNEL_BINARY_TEXT_START;
- _stext = .; /* start of kernel text, includes init code & data */
-
__init_begin = .;
HEAD_TEXT_SECTION
MLONGCALL_DISCARD(INIT_TEXT_SECTION(8))
@@ -82,6 +80,7 @@ SECTIONS
/* freed after init ends here */
_text = .; /* Text and read-only data */
+ _stext = .;
MLONGCALL_KEEP(INIT_TEXT_SECTION(8))
.text ALIGN(PAGE_SIZE) : {
TEXT_TEXT
diff --git a/arch/powerpc/kernel/head_32.h b/arch/powerpc/kernel/head_32.h
index 6b1ec9e3541b..349c4a820231 100644
--- a/arch/powerpc/kernel/head_32.h
+++ b/arch/powerpc/kernel/head_32.h
@@ -202,11 +202,11 @@ vmap_stack_overflow:
mfspr r1, SPRN_SPRG_THREAD
lwz r1, TASK_CPU - THREAD(r1)
slwi r1, r1, 3
- addis r1, r1, emergency_ctx@ha
+ addis r1, r1, emergency_ctx-PAGE_OFFSET@ha
#else
- lis r1, emergency_ctx@ha
+ lis r1, emergency_ctx-PAGE_OFFSET@ha
#endif
- lwz r1, emergency_ctx@l(r1)
+ lwz r1, emergency_ctx-PAGE_OFFSET@l(r1)
addi r1, r1, THREAD_SIZE - INT_FRAME_SIZE
EXCEPTION_PROLOG_2 0 vmap_stack_overflow
prepare_transfer_to_handler
diff --git a/arch/powerpc/kernel/module_64.c b/arch/powerpc/kernel/module_64.c
index 6baa676e7cb6..5d77d3f5fbb5 100644
--- a/arch/powerpc/kernel/module_64.c
+++ b/arch/powerpc/kernel/module_64.c
@@ -422,11 +422,17 @@ static inline int create_stub(const Elf64_Shdr *sechdrs,
const char *name)
{
long reladdr;
+ func_desc_t desc;
+ int i;
if (is_mprofile_ftrace_call(name))
return create_ftrace_stub(entry, addr, me);
- memcpy(entry->jump, ppc64_stub_insns, sizeof(ppc64_stub_insns));
+ for (i = 0; i < sizeof(ppc64_stub_insns) / sizeof(u32); i++) {
+ if (patch_instruction(&entry->jump[i],
+ ppc_inst(ppc64_stub_insns[i])))
+ return 0;
+ }
/* Stub uses address relative to r2. */
reladdr = (unsigned long)entry - my_r2(sechdrs, me);
@@ -437,10 +443,24 @@ static inline int create_stub(const Elf64_Shdr *sechdrs,
}
pr_debug("Stub %p get data from reladdr %li\n", entry, reladdr);
- entry->jump[0] |= PPC_HA(reladdr);
- entry->jump[1] |= PPC_LO(reladdr);
- entry->funcdata = func_desc(addr);
- entry->magic = STUB_MAGIC;
+ if (patch_instruction(&entry->jump[0],
+ ppc_inst(entry->jump[0] | PPC_HA(reladdr))))
+ return 0;
+
+ if (patch_instruction(&entry->jump[1],
+ ppc_inst(entry->jump[1] | PPC_LO(reladdr))))
+ return 0;
+
+ // func_desc_t is 8 bytes if ABIv2, else 16 bytes
+ desc = func_desc(addr);
+ for (i = 0; i < sizeof(func_desc_t) / sizeof(u32); i++) {
+ if (patch_instruction(((u32 *)&entry->funcdata) + i,
+ ppc_inst(((u32 *)(&desc))[i])))
+ return 0;
+ }
+
+ if (patch_instruction(&entry->magic, ppc_inst(STUB_MAGIC)))
+ return 0;
return 1;
}
@@ -495,8 +515,11 @@ static int restore_r2(const char *name, u32 *instruction, struct module *me)
me->name, *instruction, instruction);
return 0;
}
+
/* ld r2,R2_STACK_OFFSET(r1) */
- *instruction = PPC_INST_LD_TOC;
+ if (patch_instruction(instruction, ppc_inst(PPC_INST_LD_TOC)))
+ return 0;
+
return 1;
}
@@ -636,9 +659,12 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
}
/* Only replace bits 2 through 26 */
- *(uint32_t *)location
- = (*(uint32_t *)location & ~0x03fffffc)
+ value = (*(uint32_t *)location & ~0x03fffffc)
| (value & 0x03fffffc);
+
+ if (patch_instruction((u32 *)location, ppc_inst(value)))
+ return -EFAULT;
+
break;
case R_PPC64_REL64:
diff --git a/arch/powerpc/kernel/syscalls/syscall.tbl b/arch/powerpc/kernel/syscalls/syscall.tbl
index 7bef917cc84e..15109af9d075 100644
--- a/arch/powerpc/kernel/syscalls/syscall.tbl
+++ b/arch/powerpc/kernel/syscalls/syscall.tbl
@@ -528,3 +528,4 @@
446 common landlock_restrict_self sys_landlock_restrict_self
# 447 reserved for memfd_secret
448 common process_mrelease sys_process_mrelease
+449 common futex_waitv sys_futex_waitv
diff --git a/arch/powerpc/kvm/book3s_hv_builtin.c b/arch/powerpc/kvm/book3s_hv_builtin.c
index fcf4760a3a0e..70b7a8f97153 100644
--- a/arch/powerpc/kvm/book3s_hv_builtin.c
+++ b/arch/powerpc/kvm/book3s_hv_builtin.c
@@ -695,6 +695,7 @@ static void flush_guest_tlb(struct kvm *kvm)
"r" (0) : "memory");
}
asm volatile("ptesync": : :"memory");
+ // POWER9 congruence-class TLBIEL leaves ERAT. Flush it now.
asm volatile(PPC_RADIX_INVALIDATE_ERAT_GUEST : : :"memory");
} else {
for (set = 0; set < kvm->arch.tlb_sets; ++set) {
@@ -705,7 +706,9 @@ static void flush_guest_tlb(struct kvm *kvm)
rb += PPC_BIT(51); /* increment set number */
}
asm volatile("ptesync": : :"memory");
- asm volatile(PPC_ISA_3_0_INVALIDATE_ERAT : : :"memory");
+ // POWER9 congruence-class TLBIEL leaves ERAT. Flush it now.
+ if (cpu_has_feature(CPU_FTR_ARCH_300))
+ asm volatile(PPC_ISA_3_0_INVALIDATE_ERAT : : :"memory");
}
}
diff --git a/arch/powerpc/mm/ptdump/ptdump.c b/arch/powerpc/mm/ptdump/ptdump.c
index bf251191e78d..32bfb215c485 100644
--- a/arch/powerpc/mm/ptdump/ptdump.c
+++ b/arch/powerpc/mm/ptdump/ptdump.c
@@ -183,7 +183,7 @@ static void note_prot_wx(struct pg_state *st, unsigned long addr)
{
pte_t pte = __pte(st->current_flags);
- if (!IS_ENABLED(CONFIG_PPC_DEBUG_WX) || !st->check_wx)
+ if (!IS_ENABLED(CONFIG_DEBUG_WX) || !st->check_wx)
return;
if (!pte_write(pte) || !pte_exec(pte))
diff --git a/arch/powerpc/platforms/85xx/smp.c b/arch/powerpc/platforms/85xx/smp.c
index 83f4a6389a28..d7081e9af65c 100644
--- a/arch/powerpc/platforms/85xx/smp.c
+++ b/arch/powerpc/platforms/85xx/smp.c
@@ -220,7 +220,7 @@ static int smp_85xx_start_cpu(int cpu)
local_irq_save(flags);
hard_irq_disable();
- if (qoriq_pm_ops)
+ if (qoriq_pm_ops && qoriq_pm_ops->cpu_up_prepare)
qoriq_pm_ops->cpu_up_prepare(cpu);
/* if cpu is not spinning, reset it */
@@ -292,7 +292,7 @@ static int smp_85xx_kick_cpu(int nr)
booting_thread_hwid = cpu_thread_in_core(nr);
primary = cpu_first_thread_sibling(nr);
- if (qoriq_pm_ops)
+ if (qoriq_pm_ops && qoriq_pm_ops->cpu_up_prepare)
qoriq_pm_ops->cpu_up_prepare(nr);
/*
diff --git a/arch/riscv/boot/dts/sifive/hifive-unleashed-a00.dts b/arch/riscv/boot/dts/sifive/hifive-unleashed-a00.dts
index ba304d4c455c..ced0d4e47938 100644
--- a/arch/riscv/boot/dts/sifive/hifive-unleashed-a00.dts
+++ b/arch/riscv/boot/dts/sifive/hifive-unleashed-a00.dts
@@ -76,6 +76,7 @@
spi-max-frequency = <20000000>;
voltage-ranges = <3300 3300>;
disable-wp;
+ gpios = <&gpio 11 GPIO_ACTIVE_LOW>;
};
};
diff --git a/arch/riscv/boot/dts/sifive/hifive-unmatched-a00.dts b/arch/riscv/boot/dts/sifive/hifive-unmatched-a00.dts
index 4f66919215f6..6bfa1f24d3de 100644
--- a/arch/riscv/boot/dts/sifive/hifive-unmatched-a00.dts
+++ b/arch/riscv/boot/dts/sifive/hifive-unmatched-a00.dts
@@ -2,6 +2,7 @@
/* Copyright (c) 2020 SiFive, Inc */
#include "fu740-c000.dtsi"
+#include <dt-bindings/gpio/gpio.h>
#include <dt-bindings/interrupt-controller/irq.h>
/* Clock frequency (in Hz) of the PCB crystal for rtcclk */
@@ -54,10 +55,21 @@
temperature-sensor@4c {
compatible = "ti,tmp451";
reg = <0x4c>;
+ vcc-supply = <&vdd_bpro>;
interrupt-parent = <&gpio>;
interrupts = <6 IRQ_TYPE_LEVEL_LOW>;
};
+ eeprom@54 {
+ compatible = "microchip,24c02", "atmel,24c02";
+ reg = <0x54>;
+ vcc-supply = <&vdd_bpro>;
+ label = "board-id";
+ pagesize = <16>;
+ read-only;
+ size = <256>;
+ };
+
pmic@58 {
compatible = "dlg,da9063";
reg = <0x58>;
@@ -65,48 +77,44 @@
interrupts = <1 IRQ_TYPE_LEVEL_LOW>;
interrupt-controller;
- regulators {
- vdd_bcore1: bcore1 {
- regulator-min-microvolt = <900000>;
- regulator-max-microvolt = <900000>;
- regulator-min-microamp = <5000000>;
- regulator-max-microamp = <5000000>;
- regulator-always-on;
- };
+ onkey {
+ compatible = "dlg,da9063-onkey";
+ };
- vdd_bcore2: bcore2 {
- regulator-min-microvolt = <900000>;
- regulator-max-microvolt = <900000>;
- regulator-min-microamp = <5000000>;
- regulator-max-microamp = <5000000>;
+ rtc {
+ compatible = "dlg,da9063-rtc";
+ };
+
+ wdt {
+ compatible = "dlg,da9063-watchdog";
+ };
+
+ regulators {
+ vdd_bcore: bcores-merged {
+ regulator-min-microvolt = <1050000>;
+ regulator-max-microvolt = <1050000>;
+ regulator-min-microamp = <4800000>;
+ regulator-max-microamp = <4800000>;
regulator-always-on;
};
vdd_bpro: bpro {
regulator-min-microvolt = <1800000>;
regulator-max-microvolt = <1800000>;
- regulator-min-microamp = <2500000>;
- regulator-max-microamp = <2500000>;
+ regulator-min-microamp = <2400000>;
+ regulator-max-microamp = <2400000>;
regulator-always-on;
};
vdd_bperi: bperi {
- regulator-min-microvolt = <1050000>;
- regulator-max-microvolt = <1050000>;
+ regulator-min-microvolt = <1060000>;
+ regulator-max-microvolt = <1060000>;
regulator-min-microamp = <1500000>;
regulator-max-microamp = <1500000>;
regulator-always-on;
};
- vdd_bmem: bmem {
- regulator-min-microvolt = <1200000>;
- regulator-max-microvolt = <1200000>;
- regulator-min-microamp = <3000000>;
- regulator-max-microamp = <3000000>;
- regulator-always-on;
- };
-
- vdd_bio: bio {
+ vdd_bmem_bio: bmem-bio-merged {
regulator-min-microvolt = <1200000>;
regulator-max-microvolt = <1200000>;
regulator-min-microamp = <3000000>;
@@ -117,86 +125,66 @@
vdd_ldo1: ldo1 {
regulator-min-microvolt = <1800000>;
regulator-max-microvolt = <1800000>;
- regulator-min-microamp = <100000>;
- regulator-max-microamp = <100000>;
regulator-always-on;
};
vdd_ldo2: ldo2 {
regulator-min-microvolt = <1800000>;
regulator-max-microvolt = <1800000>;
- regulator-min-microamp = <200000>;
- regulator-max-microamp = <200000>;
regulator-always-on;
};
vdd_ldo3: ldo3 {
- regulator-min-microvolt = <1800000>;
- regulator-max-microvolt = <1800000>;
- regulator-min-microamp = <200000>;
- regulator-max-microamp = <200000>;
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
regulator-always-on;
};
vdd_ldo4: ldo4 {
- regulator-min-microvolt = <1800000>;
- regulator-max-microvolt = <1800000>;
- regulator-min-microamp = <200000>;
- regulator-max-microamp = <200000>;
+ regulator-min-microvolt = <2500000>;
+ regulator-max-microvolt = <2500000>;
regulator-always-on;
};
vdd_ldo5: ldo5 {
- regulator-min-microvolt = <1800000>;
- regulator-max-microvolt = <1800000>;
- regulator-min-microamp = <100000>;
- regulator-max-microamp = <100000>;
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
regulator-always-on;
};
vdd_ldo6: ldo6 {
- regulator-min-microvolt = <3300000>;
- regulator-max-microvolt = <3300000>;
- regulator-min-microamp = <200000>;
- regulator-max-microamp = <200000>;
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
regulator-always-on;
};
vdd_ldo7: ldo7 {
- regulator-min-microvolt = <1800000>;
- regulator-max-microvolt = <1800000>;
- regulator-min-microamp = <200000>;
- regulator-max-microamp = <200000>;
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
regulator-always-on;
};
vdd_ldo8: ldo8 {
- regulator-min-microvolt = <1800000>;
- regulator-max-microvolt = <1800000>;
- regulator-min-microamp = <200000>;
- regulator-max-microamp = <200000>;
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
regulator-always-on;
};
vdd_ld09: ldo9 {
regulator-min-microvolt = <1050000>;
regulator-max-microvolt = <1050000>;
- regulator-min-microamp = <200000>;
- regulator-max-microamp = <200000>;
+ regulator-always-on;
};
vdd_ldo10: ldo10 {
regulator-min-microvolt = <1000000>;
regulator-max-microvolt = <1000000>;
- regulator-min-microamp = <300000>;
- regulator-max-microamp = <300000>;
+ regulator-always-on;
};
vdd_ldo11: ldo11 {
regulator-min-microvolt = <2500000>;
regulator-max-microvolt = <2500000>;
- regulator-min-microamp = <300000>;
- regulator-max-microamp = <300000>;
regulator-always-on;
};
};
@@ -223,6 +211,7 @@
spi-max-frequency = <20000000>;
voltage-ranges = <3300 3300>;
disable-wp;
+ gpios = <&gpio 15 GPIO_ACTIVE_LOW>;
};
};
@@ -245,4 +234,8 @@
&gpio {
status = "okay";
+ gpio-line-names = "J29.1", "PMICNTB", "PMICSHDN", "J8.1", "J8.3",
+ "PCIe_PWREN", "THERM", "UBRDG_RSTN", "PCIe_PERSTN",
+ "ULPI_RSTN", "J8.2", "UHUB_RSTN", "GEMGXL_RST", "J8.4",
+ "EN_VDD_SD", "SD_CD";
};
diff --git a/arch/riscv/include/asm/efi.h b/arch/riscv/include/asm/efi.h
index 49b398fe99f1..cc4f6787f937 100644
--- a/arch/riscv/include/asm/efi.h
+++ b/arch/riscv/include/asm/efi.h
@@ -13,7 +13,6 @@
#ifdef CONFIG_EFI
extern void efi_init(void);
-extern void efifb_setup_from_dmi(struct screen_info *si, const char *opt);
#else
#define efi_init()
#endif
diff --git a/arch/riscv/include/asm/kvm_host.h b/arch/riscv/include/asm/kvm_host.h
index 25ba21f98504..2639b9ee48f9 100644
--- a/arch/riscv/include/asm/kvm_host.h
+++ b/arch/riscv/include/asm/kvm_host.h
@@ -12,14 +12,12 @@
#include <linux/types.h>
#include <linux/kvm.h>
#include <linux/kvm_types.h>
+#include <asm/csr.h>
#include <asm/kvm_vcpu_fp.h>
#include <asm/kvm_vcpu_timer.h>
-#ifdef CONFIG_64BIT
-#define KVM_MAX_VCPUS (1U << 16)
-#else
-#define KVM_MAX_VCPUS (1U << 9)
-#endif
+#define KVM_MAX_VCPUS \
+ ((HGATP_VMID_MASK >> HGATP_VMID_SHIFT) + 1)
#define KVM_HALT_POLL_NS_DEFAULT 500000
diff --git a/arch/riscv/kvm/mmu.c b/arch/riscv/kvm/mmu.c
index d81bae8eb55e..fc058ff5f4b6 100644
--- a/arch/riscv/kvm/mmu.c
+++ b/arch/riscv/kvm/mmu.c
@@ -453,6 +453,12 @@ void kvm_arch_flush_shadow_all(struct kvm *kvm)
void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
struct kvm_memory_slot *slot)
{
+ gpa_t gpa = slot->base_gfn << PAGE_SHIFT;
+ phys_addr_t size = slot->npages << PAGE_SHIFT;
+
+ spin_lock(&kvm->mmu_lock);
+ stage2_unmap_range(kvm, gpa, size, false);
+ spin_unlock(&kvm->mmu_lock);
}
void kvm_arch_commit_memory_region(struct kvm *kvm,
diff --git a/arch/s390/configs/debug_defconfig b/arch/s390/configs/debug_defconfig
index fd825097cf04..e45cc27716de 100644
--- a/arch/s390/configs/debug_defconfig
+++ b/arch/s390/configs/debug_defconfig
@@ -117,6 +117,7 @@ CONFIG_UNIX=y
CONFIG_UNIX_DIAG=m
CONFIG_XFRM_USER=m
CONFIG_NET_KEY=m
+CONFIG_NET_SWITCHDEV=y
CONFIG_SMC=m
CONFIG_SMC_DIAG=m
CONFIG_INET=y
@@ -403,7 +404,6 @@ CONFIG_DEVTMPFS=y
CONFIG_CONNECTOR=y
CONFIG_ZRAM=y
CONFIG_BLK_DEV_LOOP=m
-CONFIG_BLK_DEV_CRYPTOLOOP=m
CONFIG_BLK_DEV_DRBD=m
CONFIG_BLK_DEV_NBD=m
CONFIG_BLK_DEV_RAM=y
@@ -476,6 +476,7 @@ CONFIG_MACVLAN=m
CONFIG_MACVTAP=m
CONFIG_VXLAN=m
CONFIG_BAREUDP=m
+CONFIG_AMT=m
CONFIG_TUN=m
CONFIG_VETH=m
CONFIG_VIRTIO_NET=m
@@ -489,6 +490,7 @@ CONFIG_NLMON=m
# CONFIG_NET_VENDOR_AMD is not set
# CONFIG_NET_VENDOR_AQUANTIA is not set
# CONFIG_NET_VENDOR_ARC is not set
+# CONFIG_NET_VENDOR_ASIX is not set
# CONFIG_NET_VENDOR_ATHEROS is not set
# CONFIG_NET_VENDOR_BROADCOM is not set
# CONFIG_NET_VENDOR_BROCADE is not set
@@ -510,6 +512,7 @@ CONFIG_NLMON=m
CONFIG_MLX4_EN=m
CONFIG_MLX5_CORE=m
CONFIG_MLX5_CORE_EN=y
+CONFIG_MLX5_ESWITCH=y
# CONFIG_NET_VENDOR_MICREL is not set
# CONFIG_NET_VENDOR_MICROCHIP is not set
# CONFIG_NET_VENDOR_MICROSEMI is not set
@@ -571,6 +574,7 @@ CONFIG_WATCHDOG=y
CONFIG_WATCHDOG_NOWAYOUT=y
CONFIG_SOFT_WATCHDOG=m
CONFIG_DIAG288_WATCHDOG=m
+# CONFIG_DRM_DEBUG_MODESET_LOCK is not set
CONFIG_FB=y
CONFIG_FRAMEBUFFER_CONSOLE=y
CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY=y
@@ -775,12 +779,14 @@ CONFIG_CRC4=m
CONFIG_CRC7=m
CONFIG_CRC8=m
CONFIG_RANDOM32_SELFTEST=y
+CONFIG_XZ_DEC_MICROLZMA=y
CONFIG_DMA_CMA=y
CONFIG_CMA_SIZE_MBYTES=0
CONFIG_PRINTK_TIME=y
CONFIG_DYNAMIC_DEBUG=y
CONFIG_DEBUG_INFO=y
CONFIG_DEBUG_INFO_DWARF4=y
+CONFIG_DEBUG_INFO_BTF=y
CONFIG_GDB_SCRIPTS=y
CONFIG_HEADERS_INSTALL=y
CONFIG_DEBUG_SECTION_MISMATCH=y
@@ -807,6 +813,7 @@ CONFIG_DEBUG_MEMORY_INIT=y
CONFIG_MEMORY_NOTIFIER_ERROR_INJECT=m
CONFIG_DEBUG_PER_CPU_MAPS=y
CONFIG_KFENCE=y
+CONFIG_KFENCE_STATIC_KEYS=y
CONFIG_DEBUG_SHIRQ=y
CONFIG_PANIC_ON_OOPS=y
CONFIG_DETECT_HUNG_TASK=y
@@ -842,6 +849,7 @@ CONFIG_FTRACE_STARTUP_TEST=y
CONFIG_SAMPLES=y
CONFIG_SAMPLE_TRACE_PRINTK=m
CONFIG_SAMPLE_FTRACE_DIRECT=m
+CONFIG_SAMPLE_FTRACE_DIRECT_MULTI=m
CONFIG_DEBUG_ENTRY=y
CONFIG_CIO_INJECT=y
CONFIG_KUNIT=m
@@ -860,7 +868,7 @@ CONFIG_FAIL_FUNCTION=y
CONFIG_FAULT_INJECTION_STACKTRACE_FILTER=y
CONFIG_LKDTM=m
CONFIG_TEST_MIN_HEAP=y
-CONFIG_KPROBES_SANITY_TEST=y
+CONFIG_KPROBES_SANITY_TEST=m
CONFIG_RBTREE_TEST=y
CONFIG_INTERVAL_TREE_TEST=m
CONFIG_PERCPU_TEST=m
diff --git a/arch/s390/configs/defconfig b/arch/s390/configs/defconfig
index c9c3cedff2d8..1c750bfca2d8 100644
--- a/arch/s390/configs/defconfig
+++ b/arch/s390/configs/defconfig
@@ -109,6 +109,7 @@ CONFIG_UNIX=y
CONFIG_UNIX_DIAG=m
CONFIG_XFRM_USER=m
CONFIG_NET_KEY=m
+CONFIG_NET_SWITCHDEV=y
CONFIG_SMC=m
CONFIG_SMC_DIAG=m
CONFIG_INET=y
@@ -394,7 +395,6 @@ CONFIG_DEVTMPFS=y
CONFIG_CONNECTOR=y
CONFIG_ZRAM=y
CONFIG_BLK_DEV_LOOP=m
-CONFIG_BLK_DEV_CRYPTOLOOP=m
CONFIG_BLK_DEV_DRBD=m
CONFIG_BLK_DEV_NBD=m
CONFIG_BLK_DEV_RAM=y
@@ -467,6 +467,7 @@ CONFIG_MACVLAN=m
CONFIG_MACVTAP=m
CONFIG_VXLAN=m
CONFIG_BAREUDP=m
+CONFIG_AMT=m
CONFIG_TUN=m
CONFIG_VETH=m
CONFIG_VIRTIO_NET=m
@@ -480,6 +481,7 @@ CONFIG_NLMON=m
# CONFIG_NET_VENDOR_AMD is not set
# CONFIG_NET_VENDOR_AQUANTIA is not set
# CONFIG_NET_VENDOR_ARC is not set
+# CONFIG_NET_VENDOR_ASIX is not set
# CONFIG_NET_VENDOR_ATHEROS is not set
# CONFIG_NET_VENDOR_BROADCOM is not set
# CONFIG_NET_VENDOR_BROCADE is not set
@@ -501,6 +503,7 @@ CONFIG_NLMON=m
CONFIG_MLX4_EN=m
CONFIG_MLX5_CORE=m
CONFIG_MLX5_CORE_EN=y
+CONFIG_MLX5_ESWITCH=y
# CONFIG_NET_VENDOR_MICREL is not set
# CONFIG_NET_VENDOR_MICROCHIP is not set
# CONFIG_NET_VENDOR_MICROSEMI is not set
@@ -762,12 +765,14 @@ CONFIG_PRIME_NUMBERS=m
CONFIG_CRC4=m
CONFIG_CRC7=m
CONFIG_CRC8=m
+CONFIG_XZ_DEC_MICROLZMA=y
CONFIG_DMA_CMA=y
CONFIG_CMA_SIZE_MBYTES=0
CONFIG_PRINTK_TIME=y
CONFIG_DYNAMIC_DEBUG=y
CONFIG_DEBUG_INFO=y
CONFIG_DEBUG_INFO_DWARF4=y
+CONFIG_DEBUG_INFO_BTF=y
CONFIG_GDB_SCRIPTS=y
CONFIG_DEBUG_SECTION_MISMATCH=y
CONFIG_MAGIC_SYSRQ=y
@@ -792,9 +797,11 @@ CONFIG_HIST_TRIGGERS=y
CONFIG_SAMPLES=y
CONFIG_SAMPLE_TRACE_PRINTK=m
CONFIG_SAMPLE_FTRACE_DIRECT=m
+CONFIG_SAMPLE_FTRACE_DIRECT_MULTI=m
CONFIG_KUNIT=m
CONFIG_KUNIT_DEBUGFS=y
CONFIG_LKDTM=m
+CONFIG_KPROBES_SANITY_TEST=m
CONFIG_PERCPU_TEST=m
CONFIG_ATOMIC64_SELFTEST=y
CONFIG_TEST_BPF=m
diff --git a/arch/s390/configs/zfcpdump_defconfig b/arch/s390/configs/zfcpdump_defconfig
index aceccf3b9a88..eed3b9acfa71 100644
--- a/arch/s390/configs/zfcpdump_defconfig
+++ b/arch/s390/configs/zfcpdump_defconfig
@@ -65,9 +65,11 @@ CONFIG_ZFCP=y
# CONFIG_NETWORK_FILESYSTEMS is not set
CONFIG_LSM="yama,loadpin,safesetid,integrity"
# CONFIG_ZLIB_DFLTCC is not set
+CONFIG_XZ_DEC_MICROLZMA=y
CONFIG_PRINTK_TIME=y
# CONFIG_SYMBOLIC_ERRNAME is not set
CONFIG_DEBUG_INFO=y
+CONFIG_DEBUG_INFO_BTF=y
CONFIG_DEBUG_FS=y
CONFIG_DEBUG_KERNEL=y
CONFIG_PANIC_ON_OOPS=y
diff --git a/arch/s390/include/asm/pci_io.h b/arch/s390/include/asm/pci_io.h
index e4dc64cc9c55..287bb88f7698 100644
--- a/arch/s390/include/asm/pci_io.h
+++ b/arch/s390/include/asm/pci_io.h
@@ -14,12 +14,13 @@
/* I/O Map */
#define ZPCI_IOMAP_SHIFT 48
-#define ZPCI_IOMAP_ADDR_BASE 0x8000000000000000UL
+#define ZPCI_IOMAP_ADDR_SHIFT 62
+#define ZPCI_IOMAP_ADDR_BASE (1UL << ZPCI_IOMAP_ADDR_SHIFT)
#define ZPCI_IOMAP_ADDR_OFF_MASK ((1UL << ZPCI_IOMAP_SHIFT) - 1)
#define ZPCI_IOMAP_MAX_ENTRIES \
- ((ULONG_MAX - ZPCI_IOMAP_ADDR_BASE + 1) / (1UL << ZPCI_IOMAP_SHIFT))
+ (1UL << (ZPCI_IOMAP_ADDR_SHIFT - ZPCI_IOMAP_SHIFT))
#define ZPCI_IOMAP_ADDR_IDX_MASK \
- (~ZPCI_IOMAP_ADDR_OFF_MASK - ZPCI_IOMAP_ADDR_BASE)
+ ((ZPCI_IOMAP_ADDR_BASE - 1) & ~ZPCI_IOMAP_ADDR_OFF_MASK)
struct zpci_iomap_entry {
u32 fh;
diff --git a/arch/s390/kernel/ftrace.c b/arch/s390/kernel/ftrace.c
index 5510c7d10ddc..21d62d8b6b9a 100644
--- a/arch/s390/kernel/ftrace.c
+++ b/arch/s390/kernel/ftrace.c
@@ -290,7 +290,6 @@ void kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip,
return;
regs = ftrace_get_regs(fregs);
- preempt_disable_notrace();
p = get_kprobe((kprobe_opcode_t *)ip);
if (unlikely(!p) || kprobe_disabled(p))
goto out;
@@ -318,7 +317,6 @@ void kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip,
}
__this_cpu_write(current_kprobe, NULL);
out:
- preempt_enable_notrace();
ftrace_test_recursion_unlock(bit);
}
NOKPROBE_SYMBOL(kprobe_ftrace_handler);
diff --git a/arch/s390/kernel/irq.c b/arch/s390/kernel/irq.c
index 0df83ecaa2e0..cb7099682340 100644
--- a/arch/s390/kernel/irq.c
+++ b/arch/s390/kernel/irq.c
@@ -138,7 +138,7 @@ void noinstr do_io_irq(struct pt_regs *regs)
struct pt_regs *old_regs = set_irq_regs(regs);
int from_idle;
- irq_enter();
+ irq_enter_rcu();
if (user_mode(regs)) {
update_timer_sys();
@@ -158,7 +158,8 @@ void noinstr do_io_irq(struct pt_regs *regs)
do_irq_async(regs, IO_INTERRUPT);
} while (MACHINE_IS_LPAR && irq_pending(regs));
- irq_exit();
+ irq_exit_rcu();
+
set_irq_regs(old_regs);
irqentry_exit(regs, state);
@@ -172,7 +173,7 @@ void noinstr do_ext_irq(struct pt_regs *regs)
struct pt_regs *old_regs = set_irq_regs(regs);
int from_idle;
- irq_enter();
+ irq_enter_rcu();
if (user_mode(regs)) {
update_timer_sys();
@@ -190,7 +191,7 @@ void noinstr do_ext_irq(struct pt_regs *regs)
do_irq_async(regs, EXT_INTERRUPT);
- irq_exit();
+ irq_exit_rcu();
set_irq_regs(old_regs);
irqentry_exit(regs, state);
diff --git a/arch/s390/kernel/machine_kexec_file.c b/arch/s390/kernel/machine_kexec_file.c
index 9975ad200d74..8f43575a4dd3 100644
--- a/arch/s390/kernel/machine_kexec_file.c
+++ b/arch/s390/kernel/machine_kexec_file.c
@@ -7,6 +7,8 @@
* Author(s): Philipp Rudo <prudo@linux.vnet.ibm.com>
*/
+#define pr_fmt(fmt) "kexec: " fmt
+
#include <linux/elf.h>
#include <linux/errno.h>
#include <linux/kexec.h>
@@ -290,8 +292,16 @@ int arch_kexec_apply_relocations_add(struct purgatory_info *pi,
const Elf_Shdr *relsec,
const Elf_Shdr *symtab)
{
+ const char *strtab, *name, *shstrtab;
+ const Elf_Shdr *sechdrs;
Elf_Rela *relas;
int i, r_type;
+ int ret;
+
+ /* String & section header string table */
+ sechdrs = (void *)pi->ehdr + pi->ehdr->e_shoff;
+ strtab = (char *)pi->ehdr + sechdrs[symtab->sh_link].sh_offset;
+ shstrtab = (char *)pi->ehdr + sechdrs[pi->ehdr->e_shstrndx].sh_offset;
relas = (void *)pi->ehdr + relsec->sh_offset;
@@ -304,15 +314,27 @@ int arch_kexec_apply_relocations_add(struct purgatory_info *pi,
sym = (void *)pi->ehdr + symtab->sh_offset;
sym += ELF64_R_SYM(relas[i].r_info);
- if (sym->st_shndx == SHN_UNDEF)
+ if (sym->st_name)
+ name = strtab + sym->st_name;
+ else
+ name = shstrtab + sechdrs[sym->st_shndx].sh_name;
+
+ if (sym->st_shndx == SHN_UNDEF) {
+ pr_err("Undefined symbol: %s\n", name);
return -ENOEXEC;
+ }
- if (sym->st_shndx == SHN_COMMON)
+ if (sym->st_shndx == SHN_COMMON) {
+ pr_err("symbol '%s' in common section\n", name);
return -ENOEXEC;
+ }
if (sym->st_shndx >= pi->ehdr->e_shnum &&
- sym->st_shndx != SHN_ABS)
+ sym->st_shndx != SHN_ABS) {
+ pr_err("Invalid section %d for symbol %s\n",
+ sym->st_shndx, name);
return -ENOEXEC;
+ }
loc = pi->purgatory_buf;
loc += section->sh_offset;
@@ -326,7 +348,15 @@ int arch_kexec_apply_relocations_add(struct purgatory_info *pi,
addr = section->sh_addr + relas[i].r_offset;
r_type = ELF64_R_TYPE(relas[i].r_info);
- arch_kexec_do_relocs(r_type, loc, val, addr);
+
+ if (r_type == R_390_PLT32DBL)
+ r_type = R_390_PC32DBL;
+
+ ret = arch_kexec_do_relocs(r_type, loc, val, addr);
+ if (ret) {
+ pr_err("Unknown rela relocation: %d\n", r_type);
+ return -ENOEXEC;
+ }
}
return 0;
}
diff --git a/arch/s390/lib/test_unwind.c b/arch/s390/lib/test_unwind.c
index cfc5f5557c06..bc7973359ae2 100644
--- a/arch/s390/lib/test_unwind.c
+++ b/arch/s390/lib/test_unwind.c
@@ -173,10 +173,11 @@ static noinline int unwindme_func4(struct unwindme *u)
}
/*
- * trigger specification exception
+ * Trigger operation exception; use insn notation to bypass
+ * llvm's integrated assembler sanity checks.
*/
asm volatile(
- " mvcl %%r1,%%r1\n"
+ " .insn e,0x0000\n" /* illegal opcode */
"0: nopr %%r7\n"
EX_TABLE(0b, 0b)
:);
diff --git a/arch/sh/include/asm/cacheflush.h b/arch/sh/include/asm/cacheflush.h
index c7a97f32432f..481a664287e2 100644
--- a/arch/sh/include/asm/cacheflush.h
+++ b/arch/sh/include/asm/cacheflush.h
@@ -43,7 +43,6 @@ extern void flush_cache_range(struct vm_area_struct *vma,
unsigned long start, unsigned long end);
#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
void flush_dcache_page(struct page *page);
-void flush_dcache_folio(struct folio *folio);
extern void flush_icache_range(unsigned long start, unsigned long end);
#define flush_icache_user_range flush_icache_range
extern void flush_icache_page(struct vm_area_struct *vma,
diff --git a/arch/sh/kernel/syscalls/syscall.tbl b/arch/sh/kernel/syscalls/syscall.tbl
index 208f131659c5..d9539d28bdaa 100644
--- a/arch/sh/kernel/syscalls/syscall.tbl
+++ b/arch/sh/kernel/syscalls/syscall.tbl
@@ -451,3 +451,4 @@
446 common landlock_restrict_self sys_landlock_restrict_self
# 447 reserved for memfd_secret
448 common process_mrelease sys_process_mrelease
+449 common futex_waitv sys_futex_waitv
diff --git a/arch/sparc/kernel/syscalls/syscall.tbl b/arch/sparc/kernel/syscalls/syscall.tbl
index c37764dc764d..46adabcb1720 100644
--- a/arch/sparc/kernel/syscalls/syscall.tbl
+++ b/arch/sparc/kernel/syscalls/syscall.tbl
@@ -494,3 +494,4 @@
446 common landlock_restrict_self sys_landlock_restrict_self
# 447 reserved for memfd_secret
448 common process_mrelease sys_process_mrelease
+449 common futex_waitv sys_futex_waitv
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 7399327d1eff..5c2ccb85f2ef 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -1932,6 +1932,7 @@ config EFI
depends on ACPI
select UCS2_STRING
select EFI_RUNTIME_WRAPPERS
+ select ARCH_USE_MEMREMAP_PROT
help
This enables the kernel to use EFI runtime services that are
available (such as the EFI variable services).
diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S
index e38a4cf795d9..97b1f84bb53f 100644
--- a/arch/x86/entry/entry_64.S
+++ b/arch/x86/entry/entry_64.S
@@ -574,6 +574,10 @@ SYM_INNER_LABEL(swapgs_restore_regs_and_return_to_usermode, SYM_L_GLOBAL)
ud2
1:
#endif
+#ifdef CONFIG_XEN_PV
+ ALTERNATIVE "", "jmp xenpv_restore_regs_and_return_to_usermode", X86_FEATURE_XENPV
+#endif
+
POP_REGS pop_rdi=0
/*
@@ -890,6 +894,7 @@ SYM_CODE_START_LOCAL(paranoid_entry)
.Lparanoid_entry_checkgs:
/* EBX = 1 -> kernel GSBASE active, no restore required */
movl $1, %ebx
+
/*
* The kernel-enforced convention is a negative GSBASE indicates
* a kernel value. No SWAPGS needed on entry and exit.
@@ -897,21 +902,14 @@ SYM_CODE_START_LOCAL(paranoid_entry)
movl $MSR_GS_BASE, %ecx
rdmsr
testl %edx, %edx
- jns .Lparanoid_entry_swapgs
- ret
+ js .Lparanoid_kernel_gsbase
-.Lparanoid_entry_swapgs:
+ /* EBX = 0 -> SWAPGS required on exit */
+ xorl %ebx, %ebx
swapgs
+.Lparanoid_kernel_gsbase:
- /*
- * The above SAVE_AND_SWITCH_TO_KERNEL_CR3 macro doesn't do an
- * unconditional CR3 write, even in the PTI case. So do an lfence
- * to prevent GS speculation, regardless of whether PTI is enabled.
- */
FENCE_SWAPGS_KERNEL_ENTRY
-
- /* EBX = 0 -> SWAPGS required on exit */
- xorl %ebx, %ebx
ret
SYM_CODE_END(paranoid_entry)
@@ -993,11 +991,6 @@ SYM_CODE_START_LOCAL(error_entry)
pushq %r12
ret
-.Lerror_entry_done_lfence:
- FENCE_SWAPGS_KERNEL_ENTRY
-.Lerror_entry_done:
- ret
-
/*
* There are two places in the kernel that can potentially fault with
* usergs. Handle them here. B stepping K8s sometimes report a
@@ -1020,8 +1013,14 @@ SYM_CODE_START_LOCAL(error_entry)
* .Lgs_change's error handler with kernel gsbase.
*/
SWAPGS
- FENCE_SWAPGS_USER_ENTRY
- jmp .Lerror_entry_done
+
+ /*
+ * Issue an LFENCE to prevent GS speculation, regardless of whether it is a
+ * kernel or user gsbase.
+ */
+.Lerror_entry_done_lfence:
+ FENCE_SWAPGS_KERNEL_ENTRY
+ ret
.Lbstep_iret:
/* Fix truncated RIP */
diff --git a/arch/x86/include/asm/efi.h b/arch/x86/include/asm/efi.h
index 4d0b126835b8..63158fd55856 100644
--- a/arch/x86/include/asm/efi.h
+++ b/arch/x86/include/asm/efi.h
@@ -197,8 +197,6 @@ static inline bool efi_runtime_supported(void)
extern void parse_efi_setup(u64 phys_addr, u32 data_len);
-extern void efifb_setup_from_dmi(struct screen_info *si, const char *opt);
-
extern void efi_thunk_runtime_setup(void);
efi_status_t efi_set_virtual_address_map(unsigned long memory_map_size,
unsigned long descriptor_size,
diff --git a/arch/x86/include/asm/fpu/api.h b/arch/x86/include/asm/fpu/api.h
index 6053674f9132..c2767a6a387e 100644
--- a/arch/x86/include/asm/fpu/api.h
+++ b/arch/x86/include/asm/fpu/api.h
@@ -102,12 +102,6 @@ extern void switch_fpu_return(void);
*/
extern int cpu_has_xfeatures(u64 xfeatures_mask, const char **feature_name);
-/*
- * Tasks that are not using SVA have mm->pasid set to zero to note that they
- * will not have the valid bit set in MSR_IA32_PASID while they are running.
- */
-#define PASID_DISABLED 0
-
/* Trap handling */
extern int fpu__exception_code(struct fpu *fpu, int trap_nr);
extern void fpu_sync_fpstate(struct fpu *fpu);
diff --git a/arch/x86/include/asm/intel-family.h b/arch/x86/include/asm/intel-family.h
index 5a0bcf8b78d7..048b6d5aff50 100644
--- a/arch/x86/include/asm/intel-family.h
+++ b/arch/x86/include/asm/intel-family.h
@@ -108,7 +108,7 @@
#define INTEL_FAM6_ALDERLAKE 0x97 /* Golden Cove / Gracemont */
#define INTEL_FAM6_ALDERLAKE_L 0x9A /* Golden Cove / Gracemont */
-#define INTEL_FAM6_RAPTOR_LAKE 0xB7
+#define INTEL_FAM6_RAPTORLAKE 0xB7
/* "Small Core" Processors (Atom) */
diff --git a/arch/x86/include/asm/kvm-x86-ops.h b/arch/x86/include/asm/kvm-x86-ops.h
index cefe1d81e2e8..9e50da3ed01a 100644
--- a/arch/x86/include/asm/kvm-x86-ops.h
+++ b/arch/x86/include/asm/kvm-x86-ops.h
@@ -47,6 +47,7 @@ KVM_X86_OP(set_dr7)
KVM_X86_OP(cache_reg)
KVM_X86_OP(get_rflags)
KVM_X86_OP(set_rflags)
+KVM_X86_OP(get_if_flag)
KVM_X86_OP(tlb_flush_all)
KVM_X86_OP(tlb_flush_current)
KVM_X86_OP_NULL(tlb_remote_flush)
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 6ac61f85e07b..555f4de47ef2 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -97,7 +97,7 @@
KVM_ARCH_REQ_FLAGS(25, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
#define KVM_REQ_TLB_FLUSH_CURRENT KVM_ARCH_REQ(26)
#define KVM_REQ_TLB_FLUSH_GUEST \
- KVM_ARCH_REQ_FLAGS(27, KVM_REQUEST_NO_WAKEUP)
+ KVM_ARCH_REQ_FLAGS(27, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
#define KVM_REQ_APF_READY KVM_ARCH_REQ(28)
#define KVM_REQ_MSR_FILTER_CHANGED KVM_ARCH_REQ(29)
#define KVM_REQ_UPDATE_CPU_DIRTY_LOGGING \
@@ -1036,6 +1036,7 @@ struct kvm_x86_msr_filter {
#define APICV_INHIBIT_REASON_PIT_REINJ 4
#define APICV_INHIBIT_REASON_X2APIC 5
#define APICV_INHIBIT_REASON_BLOCKIRQ 6
+#define APICV_INHIBIT_REASON_ABSENT 7
struct kvm_arch {
unsigned long n_used_mmu_pages;
@@ -1348,6 +1349,7 @@ struct kvm_x86_ops {
void (*cache_reg)(struct kvm_vcpu *vcpu, enum kvm_reg reg);
unsigned long (*get_rflags)(struct kvm_vcpu *vcpu);
void (*set_rflags)(struct kvm_vcpu *vcpu, unsigned long rflags);
+ bool (*get_if_flag)(struct kvm_vcpu *vcpu);
void (*tlb_flush_all)(struct kvm_vcpu *vcpu);
void (*tlb_flush_current)(struct kvm_vcpu *vcpu);
diff --git a/arch/x86/include/asm/pkru.h b/arch/x86/include/asm/pkru.h
index 4cd49afa0ca4..74f0a2d34ffd 100644
--- a/arch/x86/include/asm/pkru.h
+++ b/arch/x86/include/asm/pkru.h
@@ -4,8 +4,8 @@
#include <asm/cpufeature.h>
-#define PKRU_AD_BIT 0x1
-#define PKRU_WD_BIT 0x2
+#define PKRU_AD_BIT 0x1u
+#define PKRU_WD_BIT 0x2u
#define PKRU_BITS_PER_PKEY 2
#ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS
diff --git a/arch/x86/include/asm/sev-common.h b/arch/x86/include/asm/sev-common.h
index 2cef6c5a52c2..6acaf5af0a3d 100644
--- a/arch/x86/include/asm/sev-common.h
+++ b/arch/x86/include/asm/sev-common.h
@@ -73,4 +73,15 @@
#define GHCB_RESP_CODE(v) ((v) & GHCB_MSR_INFO_MASK)
+/*
+ * Error codes related to GHCB input that can be communicated back to the guest
+ * by setting the lower 32-bits of the GHCB SW_EXITINFO1 field to 2.
+ */
+#define GHCB_ERR_NOT_REGISTERED 1
+#define GHCB_ERR_INVALID_USAGE 2
+#define GHCB_ERR_INVALID_SCRATCH_AREA 3
+#define GHCB_ERR_MISSING_INPUT 4
+#define GHCB_ERR_INVALID_INPUT 5
+#define GHCB_ERR_INVALID_EVENT 6
+
#endif
diff --git a/arch/x86/include/asm/xen/hypercall.h b/arch/x86/include/asm/xen/hypercall.h
index 0575f5863b7f..e5e0fe10c692 100644
--- a/arch/x86/include/asm/xen/hypercall.h
+++ b/arch/x86/include/asm/xen/hypercall.h
@@ -281,13 +281,13 @@ HYPERVISOR_callback_op(int cmd, void *arg)
return _hypercall2(int, callback_op, cmd, arg);
}
-static inline int
+static __always_inline int
HYPERVISOR_set_debugreg(int reg, unsigned long value)
{
return _hypercall2(int, set_debugreg, reg, value);
}
-static inline unsigned long
+static __always_inline unsigned long
HYPERVISOR_get_debugreg(int reg)
{
return _hypercall1(unsigned long, get_debugreg, reg);
diff --git a/arch/x86/include/asm/xen/hypervisor.h b/arch/x86/include/asm/xen/hypervisor.h
index 4957f59deb40..5adab895127e 100644
--- a/arch/x86/include/asm/xen/hypervisor.h
+++ b/arch/x86/include/asm/xen/hypervisor.h
@@ -64,6 +64,7 @@ void xen_arch_unregister_cpu(int num);
#ifdef CONFIG_PVH
void __init xen_pvh_init(struct boot_params *boot_params);
+void __init mem_map_via_hcall(struct boot_params *boot_params_p);
#endif
#endif /* _ASM_X86_XEN_HYPERVISOR_H */
diff --git a/arch/x86/kernel/fpu/signal.c b/arch/x86/kernel/fpu/signal.c
index d5958278eba6..91d4b6de58ab 100644
--- a/arch/x86/kernel/fpu/signal.c
+++ b/arch/x86/kernel/fpu/signal.c
@@ -118,7 +118,7 @@ static inline bool save_xstate_epilog(void __user *buf, int ia32_frame,
struct fpstate *fpstate)
{
struct xregs_state __user *x = buf;
- struct _fpx_sw_bytes sw_bytes;
+ struct _fpx_sw_bytes sw_bytes = {};
u32 xfeatures;
int err;
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
index c410be738ae7..e04f5e6eb33f 100644
--- a/arch/x86/kernel/setup.c
+++ b/arch/x86/kernel/setup.c
@@ -713,9 +713,6 @@ static void __init early_reserve_memory(void)
early_reserve_initrd();
- if (efi_enabled(EFI_BOOT))
- efi_memblock_x86_reserve_range();
-
memblock_x86_reserve_range_setup_data();
reserve_ibft_region();
@@ -742,28 +739,6 @@ dump_kernel_offset(struct notifier_block *self, unsigned long v, void *p)
return 0;
}
-static char *prepare_command_line(void)
-{
-#ifdef CONFIG_CMDLINE_BOOL
-#ifdef CONFIG_CMDLINE_OVERRIDE
- strlcpy(boot_command_line, builtin_cmdline, COMMAND_LINE_SIZE);
-#else
- if (builtin_cmdline[0]) {
- /* append boot loader cmdline to builtin */
- strlcat(builtin_cmdline, " ", COMMAND_LINE_SIZE);
- strlcat(builtin_cmdline, boot_command_line, COMMAND_LINE_SIZE);
- strlcpy(boot_command_line, builtin_cmdline, COMMAND_LINE_SIZE);
- }
-#endif
-#endif
-
- strlcpy(command_line, boot_command_line, COMMAND_LINE_SIZE);
-
- parse_early_param();
-
- return command_line;
-}
-
/*
* Determine if we were loaded by an EFI loader. If so, then we have also been
* passed the efi memmap, systab, etc., so we should use these data structures
@@ -853,23 +828,6 @@ void __init setup_arch(char **cmdline_p)
x86_init.oem.arch_setup();
/*
- * x86_configure_nx() is called before parse_early_param() (called by
- * prepare_command_line()) to detect whether hardware doesn't support
- * NX (so that the early EHCI debug console setup can safely call
- * set_fixmap()). It may then be called again from within noexec_setup()
- * during parsing early parameters to honor the respective command line
- * option.
- */
- x86_configure_nx();
-
- /*
- * This parses early params and it needs to run before
- * early_reserve_memory() because latter relies on such settings
- * supplied as early params.
- */
- *cmdline_p = prepare_command_line();
-
- /*
* Do some memory reservations *before* memory is added to memblock, so
* memblock allocations won't overwrite it.
*
@@ -902,6 +860,36 @@ void __init setup_arch(char **cmdline_p)
bss_resource.start = __pa_symbol(__bss_start);
bss_resource.end = __pa_symbol(__bss_stop)-1;
+#ifdef CONFIG_CMDLINE_BOOL
+#ifdef CONFIG_CMDLINE_OVERRIDE
+ strlcpy(boot_command_line, builtin_cmdline, COMMAND_LINE_SIZE);
+#else
+ if (builtin_cmdline[0]) {
+ /* append boot loader cmdline to builtin */
+ strlcat(builtin_cmdline, " ", COMMAND_LINE_SIZE);
+ strlcat(builtin_cmdline, boot_command_line, COMMAND_LINE_SIZE);
+ strlcpy(boot_command_line, builtin_cmdline, COMMAND_LINE_SIZE);
+ }
+#endif
+#endif
+
+ strlcpy(command_line, boot_command_line, COMMAND_LINE_SIZE);
+ *cmdline_p = command_line;
+
+ /*
+ * x86_configure_nx() is called before parse_early_param() to detect
+ * whether hardware doesn't support NX (so that the early EHCI debug
+ * console setup can safely call set_fixmap()). It may then be called
+ * again from within noexec_setup() during parsing early parameters
+ * to honor the respective command line option.
+ */
+ x86_configure_nx();
+
+ parse_early_param();
+
+ if (efi_enabled(EFI_BOOT))
+ efi_memblock_x86_reserve_range();
+
#ifdef CONFIG_MEMORY_HOTPLUG
/*
* Memory used by the kernel cannot be hot-removed because Linux
diff --git a/arch/x86/kernel/sev.c b/arch/x86/kernel/sev.c
index 74f0ec955384..a9fc2ac7a8bd 100644
--- a/arch/x86/kernel/sev.c
+++ b/arch/x86/kernel/sev.c
@@ -294,11 +294,6 @@ static enum es_result vc_write_mem(struct es_em_ctxt *ctxt,
char *dst, char *buf, size_t size)
{
unsigned long error_code = X86_PF_PROT | X86_PF_WRITE;
- char __user *target = (char __user *)dst;
- u64 d8;
- u32 d4;
- u16 d2;
- u8 d1;
/*
* This function uses __put_user() independent of whether kernel or user
@@ -320,26 +315,42 @@ static enum es_result vc_write_mem(struct es_em_ctxt *ctxt,
* instructions here would cause infinite nesting.
*/
switch (size) {
- case 1:
+ case 1: {
+ u8 d1;
+ u8 __user *target = (u8 __user *)dst;
+
memcpy(&d1, buf, 1);
if (__put_user(d1, target))
goto fault;
break;
- case 2:
+ }
+ case 2: {
+ u16 d2;
+ u16 __user *target = (u16 __user *)dst;
+
memcpy(&d2, buf, 2);
if (__put_user(d2, target))
goto fault;
break;
- case 4:
+ }
+ case 4: {
+ u32 d4;
+ u32 __user *target = (u32 __user *)dst;
+
memcpy(&d4, buf, 4);
if (__put_user(d4, target))
goto fault;
break;
- case 8:
+ }
+ case 8: {
+ u64 d8;
+ u64 __user *target = (u64 __user *)dst;
+
memcpy(&d8, buf, 8);
if (__put_user(d8, target))
goto fault;
break;
+ }
default:
WARN_ONCE(1, "%s: Invalid size: %zu\n", __func__, size);
return ES_UNSUPPORTED;
@@ -362,11 +373,6 @@ static enum es_result vc_read_mem(struct es_em_ctxt *ctxt,
char *src, char *buf, size_t size)
{
unsigned long error_code = X86_PF_PROT;
- char __user *s = (char __user *)src;
- u64 d8;
- u32 d4;
- u16 d2;
- u8 d1;
/*
* This function uses __get_user() independent of whether kernel or user
@@ -388,26 +394,41 @@ static enum es_result vc_read_mem(struct es_em_ctxt *ctxt,
* instructions here would cause infinite nesting.
*/
switch (size) {
- case 1:
+ case 1: {
+ u8 d1;
+ u8 __user *s = (u8 __user *)src;
+
if (__get_user(d1, s))
goto fault;
memcpy(buf, &d1, 1);
break;
- case 2:
+ }
+ case 2: {
+ u16 d2;
+ u16 __user *s = (u16 __user *)src;
+
if (__get_user(d2, s))
goto fault;
memcpy(buf, &d2, 2);
break;
- case 4:
+ }
+ case 4: {
+ u32 d4;
+ u32 __user *s = (u32 __user *)src;
+
if (__get_user(d4, s))
goto fault;
memcpy(buf, &d4, 4);
break;
- case 8:
+ }
+ case 8: {
+ u64 d8;
+ u64 __user *s = (u64 __user *)src;
if (__get_user(d8, s))
goto fault;
memcpy(buf, &d8, 8);
break;
+ }
default:
WARN_ONCE(1, "%s: Invalid size: %zu\n", __func__, size);
return ES_UNSUPPORTED;
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index ac2909f0cab3..617012f4619f 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -579,6 +579,17 @@ static struct sched_domain_topology_level x86_numa_in_package_topology[] = {
{ NULL, },
};
+static struct sched_domain_topology_level x86_hybrid_topology[] = {
+#ifdef CONFIG_SCHED_SMT
+ { cpu_smt_mask, x86_smt_flags, SD_INIT_NAME(SMT) },
+#endif
+#ifdef CONFIG_SCHED_MC
+ { cpu_coregroup_mask, x86_core_flags, SD_INIT_NAME(MC) },
+#endif
+ { cpu_cpu_mask, SD_INIT_NAME(DIE) },
+ { NULL, },
+};
+
static struct sched_domain_topology_level x86_topology[] = {
#ifdef CONFIG_SCHED_SMT
{ cpu_smt_mask, x86_smt_flags, SD_INIT_NAME(SMT) },
@@ -1469,8 +1480,11 @@ void __init native_smp_cpus_done(unsigned int max_cpus)
calculate_max_logical_packages();
+ /* XXX for now assume numa-in-package and hybrid don't overlap */
if (x86_has_numa_in_package)
set_sched_topology(x86_numa_in_package_topology);
+ if (cpu_feature_enabled(X86_FEATURE_HYBRID_CPU))
+ set_sched_topology(x86_hybrid_topology);
nmi_selftest();
impress_friends();
diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c
index 2e076a459a0c..a698196377be 100644
--- a/arch/x86/kernel/tsc.c
+++ b/arch/x86/kernel/tsc.c
@@ -1180,6 +1180,12 @@ void mark_tsc_unstable(char *reason)
EXPORT_SYMBOL_GPL(mark_tsc_unstable);
+static void __init tsc_disable_clocksource_watchdog(void)
+{
+ clocksource_tsc_early.flags &= ~CLOCK_SOURCE_MUST_VERIFY;
+ clocksource_tsc.flags &= ~CLOCK_SOURCE_MUST_VERIFY;
+}
+
static void __init check_system_tsc_reliable(void)
{
#if defined(CONFIG_MGEODEGX1) || defined(CONFIG_MGEODE_LX) || defined(CONFIG_X86_GENERIC)
@@ -1196,6 +1202,23 @@ static void __init check_system_tsc_reliable(void)
#endif
if (boot_cpu_has(X86_FEATURE_TSC_RELIABLE))
tsc_clocksource_reliable = 1;
+
+ /*
+ * Disable the clocksource watchdog when the system has:
+ * - TSC running at constant frequency
+ * - TSC which does not stop in C-States
+ * - the TSC_ADJUST register which allows to detect even minimal
+ * modifications
+ * - not more than two sockets. As the number of sockets cannot be
+ * evaluated at the early boot stage where this has to be
+ * invoked, check the number of online memory nodes as a
+ * fallback solution which is an reasonable estimate.
+ */
+ if (boot_cpu_has(X86_FEATURE_CONSTANT_TSC) &&
+ boot_cpu_has(X86_FEATURE_NONSTOP_TSC) &&
+ boot_cpu_has(X86_FEATURE_TSC_ADJUST) &&
+ nr_online_nodes <= 2)
+ tsc_disable_clocksource_watchdog();
}
/*
@@ -1387,9 +1410,6 @@ static int __init init_tsc_clocksource(void)
if (tsc_unstable)
goto unreg;
- if (tsc_clocksource_reliable || no_tsc_watchdog)
- clocksource_tsc.flags &= ~CLOCK_SOURCE_MUST_VERIFY;
-
if (boot_cpu_has(X86_FEATURE_NONSTOP_TSC_S3))
clocksource_tsc.flags |= CLOCK_SOURCE_SUSPEND_NONSTOP;
@@ -1527,7 +1547,7 @@ void __init tsc_init(void)
}
if (tsc_clocksource_reliable || no_tsc_watchdog)
- clocksource_tsc_early.flags &= ~CLOCK_SOURCE_MUST_VERIFY;
+ tsc_disable_clocksource_watchdog();
clocksource_register_khz(&clocksource_tsc_early, tsc_khz);
detect_art();
diff --git a/arch/x86/kernel/tsc_sync.c b/arch/x86/kernel/tsc_sync.c
index 50a4515fe0ad..9452dc9664b5 100644
--- a/arch/x86/kernel/tsc_sync.c
+++ b/arch/x86/kernel/tsc_sync.c
@@ -30,6 +30,7 @@ struct tsc_adjust {
};
static DEFINE_PER_CPU(struct tsc_adjust, tsc_adjust);
+static struct timer_list tsc_sync_check_timer;
/*
* TSC's on different sockets may be reset asynchronously.
@@ -77,6 +78,46 @@ void tsc_verify_tsc_adjust(bool resume)
}
}
+/*
+ * Normally the tsc_sync will be checked every time system enters idle
+ * state, but there is still caveat that a system won't enter idle,
+ * either because it's too busy or configured purposely to not enter
+ * idle.
+ *
+ * So setup a periodic timer (every 10 minutes) to make sure the check
+ * is always on.
+ */
+
+#define SYNC_CHECK_INTERVAL (HZ * 600)
+
+static void tsc_sync_check_timer_fn(struct timer_list *unused)
+{
+ int next_cpu;
+
+ tsc_verify_tsc_adjust(false);
+
+ /* Run the check for all onlined CPUs in turn */
+ next_cpu = cpumask_next(raw_smp_processor_id(), cpu_online_mask);
+ if (next_cpu >= nr_cpu_ids)
+ next_cpu = cpumask_first(cpu_online_mask);
+
+ tsc_sync_check_timer.expires += SYNC_CHECK_INTERVAL;
+ add_timer_on(&tsc_sync_check_timer, next_cpu);
+}
+
+static int __init start_sync_check_timer(void)
+{
+ if (!cpu_feature_enabled(X86_FEATURE_TSC_ADJUST) || tsc_clocksource_reliable)
+ return 0;
+
+ timer_setup(&tsc_sync_check_timer, tsc_sync_check_timer_fn, 0);
+ tsc_sync_check_timer.expires = jiffies + SYNC_CHECK_INTERVAL;
+ add_timer(&tsc_sync_check_timer);
+
+ return 0;
+}
+late_initcall(start_sync_check_timer);
+
static void tsc_sanitize_first_cpu(struct tsc_adjust *cur, s64 bootval,
unsigned int cpu, bool bootcpu)
{
diff --git a/arch/x86/kvm/hyperv.c b/arch/x86/kvm/hyperv.c
index 5e19e6e4c2ce..8d8c1cc7cb53 100644
--- a/arch/x86/kvm/hyperv.c
+++ b/arch/x86/kvm/hyperv.c
@@ -1922,11 +1922,13 @@ static u64 kvm_hv_send_ipi(struct kvm_vcpu *vcpu, struct kvm_hv_hcall *hc, bool
all_cpus = send_ipi_ex.vp_set.format == HV_GENERIC_SET_ALL;
+ if (all_cpus)
+ goto check_and_send_ipi;
+
if (!sparse_banks_len)
goto ret_success;
- if (!all_cpus &&
- kvm_read_guest(kvm,
+ if (kvm_read_guest(kvm,
hc->ingpa + offsetof(struct hv_send_ipi_ex,
vp_set.bank_contents),
sparse_banks,
@@ -1934,6 +1936,7 @@ static u64 kvm_hv_send_ipi(struct kvm_vcpu *vcpu, struct kvm_hv_hcall *hc, bool
return HV_STATUS_INVALID_HYPERCALL_INPUT;
}
+check_and_send_ipi:
if ((vector < HV_IPI_LOW_VECTOR) || (vector > HV_IPI_HIGH_VECTOR))
return HV_STATUS_INVALID_HYPERCALL_INPUT;
diff --git a/arch/x86/kvm/ioapic.h b/arch/x86/kvm/ioapic.h
index e66e620c3bed..539333ac4b38 100644
--- a/arch/x86/kvm/ioapic.h
+++ b/arch/x86/kvm/ioapic.h
@@ -81,7 +81,6 @@ struct kvm_ioapic {
unsigned long irq_states[IOAPIC_NUM_PINS];
struct kvm_io_device dev;
struct kvm *kvm;
- void (*ack_notifier)(void *opaque, int irq);
spinlock_t lock;
struct rtc_status rtc_status;
struct delayed_work eoi_inject;
diff --git a/arch/x86/kvm/irq.h b/arch/x86/kvm/irq.h
index 650642b18d15..c2d7cfe82d00 100644
--- a/arch/x86/kvm/irq.h
+++ b/arch/x86/kvm/irq.h
@@ -56,7 +56,6 @@ struct kvm_pic {
struct kvm_io_device dev_master;
struct kvm_io_device dev_slave;
struct kvm_io_device dev_elcr;
- void (*ack_notifier)(void *opaque, int irq);
unsigned long irq_states[PIC_NUM_PINS];
};
diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
index 759952dd1222..f206fc35deff 100644
--- a/arch/x86/kvm/lapic.c
+++ b/arch/x86/kvm/lapic.c
@@ -707,7 +707,7 @@ static void pv_eoi_clr_pending(struct kvm_vcpu *vcpu)
static int apic_has_interrupt_for_ppr(struct kvm_lapic *apic, u32 ppr)
{
int highest_irr;
- if (apic->vcpu->arch.apicv_active)
+ if (kvm_x86_ops.sync_pir_to_irr)
highest_irr = static_call(kvm_x86_sync_pir_to_irr)(apic->vcpu);
else
highest_irr = apic_find_highest_irr(apic);
diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
index 3be9beea838d..fcdf3f8bb59a 100644
--- a/arch/x86/kvm/mmu/mmu.c
+++ b/arch/x86/kvm/mmu/mmu.c
@@ -1582,7 +1582,7 @@ bool kvm_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range)
flush = kvm_handle_gfn_range(kvm, range, kvm_unmap_rmapp);
if (is_tdp_mmu_enabled(kvm))
- flush |= kvm_tdp_mmu_unmap_gfn_range(kvm, range, flush);
+ flush = kvm_tdp_mmu_unmap_gfn_range(kvm, range, flush);
return flush;
}
@@ -1936,7 +1936,11 @@ static void mmu_audit_disable(void) { }
static bool is_obsolete_sp(struct kvm *kvm, struct kvm_mmu_page *sp)
{
- return sp->role.invalid ||
+ if (sp->role.invalid)
+ return true;
+
+ /* TDP MMU pages due not use the MMU generation. */
+ return !sp->tdp_mmu_page &&
unlikely(sp->mmu_valid_gen != kvm->arch.mmu_valid_gen);
}
@@ -2173,10 +2177,10 @@ static void shadow_walk_init_using_root(struct kvm_shadow_walk_iterator *iterato
iterator->shadow_addr = root;
iterator->level = vcpu->arch.mmu->shadow_root_level;
- if (iterator->level == PT64_ROOT_4LEVEL &&
+ if (iterator->level >= PT64_ROOT_4LEVEL &&
vcpu->arch.mmu->root_level < PT64_ROOT_4LEVEL &&
!vcpu->arch.mmu->direct_map)
- --iterator->level;
+ iterator->level = PT32E_ROOT_LEVEL;
if (iterator->level == PT32E_ROOT_LEVEL) {
/*
@@ -3976,6 +3980,34 @@ out_retry:
return true;
}
+/*
+ * Returns true if the page fault is stale and needs to be retried, i.e. if the
+ * root was invalidated by a memslot update or a relevant mmu_notifier fired.
+ */
+static bool is_page_fault_stale(struct kvm_vcpu *vcpu,
+ struct kvm_page_fault *fault, int mmu_seq)
+{
+ struct kvm_mmu_page *sp = to_shadow_page(vcpu->arch.mmu->root_hpa);
+
+ /* Special roots, e.g. pae_root, are not backed by shadow pages. */
+ if (sp && is_obsolete_sp(vcpu->kvm, sp))
+ return true;
+
+ /*
+ * Roots without an associated shadow page are considered invalid if
+ * there is a pending request to free obsolete roots. The request is
+ * only a hint that the current root _may_ be obsolete and needs to be
+ * reloaded, e.g. if the guest frees a PGD that KVM is tracking as a
+ * previous root, then __kvm_mmu_prepare_zap_page() signals all vCPUs
+ * to reload even if no vCPU is actively using the root.
+ */
+ if (!sp && kvm_test_request(KVM_REQ_MMU_RELOAD, vcpu))
+ return true;
+
+ return fault->slot &&
+ mmu_notifier_retry_hva(vcpu->kvm, mmu_seq, fault->hva);
+}
+
static int direct_page_fault(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault)
{
bool is_tdp_mmu_fault = is_tdp_mmu(vcpu->arch.mmu);
@@ -4013,8 +4045,9 @@ static int direct_page_fault(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault
else
write_lock(&vcpu->kvm->mmu_lock);
- if (fault->slot && mmu_notifier_retry_hva(vcpu->kvm, mmu_seq, fault->hva))
+ if (is_page_fault_stale(vcpu, fault, mmu_seq))
goto out_unlock;
+
r = make_mmu_pages_available(vcpu);
if (r)
goto out_unlock;
@@ -4855,7 +4888,7 @@ void kvm_init_shadow_npt_mmu(struct kvm_vcpu *vcpu, unsigned long cr0,
struct kvm_mmu *context = &vcpu->arch.guest_mmu;
struct kvm_mmu_role_regs regs = {
.cr0 = cr0,
- .cr4 = cr4,
+ .cr4 = cr4 & ~X86_CR4_PKE,
.efer = efer,
};
union kvm_mmu_role new_role;
@@ -4919,7 +4952,7 @@ void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, bool execonly,
context->direct_map = false;
update_permission_bitmask(context, true);
- update_pkru_bitmask(context);
+ context->pkru_mask = 0;
reset_rsvds_bits_mask_ept(vcpu, context, execonly);
reset_ept_shadow_zero_bits_mask(vcpu, context, execonly);
}
@@ -5025,6 +5058,14 @@ void kvm_mmu_after_set_cpuid(struct kvm_vcpu *vcpu)
/*
* Invalidate all MMU roles to force them to reinitialize as CPUID
* information is factored into reserved bit calculations.
+ *
+ * Correctly handling multiple vCPU models with respect to paging and
+ * physical address properties) in a single VM would require tracking
+ * all relevant CPUID information in kvm_mmu_page_role. That is very
+ * undesirable as it would increase the memory requirements for
+ * gfn_track (see struct kvm_mmu_page_role comments). For now that
+ * problem is swept under the rug; KVM's CPUID API is horrific and
+ * it's all but impossible to solve it without introducing a new API.
*/
vcpu->arch.root_mmu.mmu_role.ext.valid = 0;
vcpu->arch.guest_mmu.mmu_role.ext.valid = 0;
@@ -5032,24 +5073,10 @@ void kvm_mmu_after_set_cpuid(struct kvm_vcpu *vcpu)
kvm_mmu_reset_context(vcpu);
/*
- * KVM does not correctly handle changing guest CPUID after KVM_RUN, as
- * MAXPHYADDR, GBPAGES support, AMD reserved bit behavior, etc.. aren't
- * tracked in kvm_mmu_page_role. As a result, KVM may miss guest page
- * faults due to reusing SPs/SPTEs. Alert userspace, but otherwise
- * sweep the problem under the rug.
- *
- * KVM's horrific CPUID ABI makes the problem all but impossible to
- * solve, as correctly handling multiple vCPU models (with respect to
- * paging and physical address properties) in a single VM would require
- * tracking all relevant CPUID information in kvm_mmu_page_role. That
- * is very undesirable as it would double the memory requirements for
- * gfn_track (see struct kvm_mmu_page_role comments), and in practice
- * no sane VMM mucks with the core vCPU model on the fly.
+ * Changing guest CPUID after KVM_RUN is forbidden, see the comment in
+ * kvm_arch_vcpu_ioctl().
*/
- if (vcpu->arch.last_vmentry_cpu != -1) {
- pr_warn_ratelimited("KVM: KVM_SET_CPUID{,2} after KVM_RUN may cause guest instability\n");
- pr_warn_ratelimited("KVM: KVM_SET_CPUID{,2} will fail after KVM_RUN starting with Linux 5.16\n");
- }
+ KVM_BUG_ON(vcpu->arch.last_vmentry_cpu != -1, vcpu->kvm);
}
void kvm_mmu_reset_context(struct kvm_vcpu *vcpu)
@@ -5369,7 +5396,7 @@ void kvm_mmu_invalidate_gva(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
void kvm_mmu_invlpg(struct kvm_vcpu *vcpu, gva_t gva)
{
- kvm_mmu_invalidate_gva(vcpu, vcpu->arch.mmu, gva, INVALID_PAGE);
+ kvm_mmu_invalidate_gva(vcpu, vcpu->arch.walk_mmu, gva, INVALID_PAGE);
++vcpu->stat.invlpg;
}
EXPORT_SYMBOL_GPL(kvm_mmu_invlpg);
@@ -5854,8 +5881,6 @@ restart:
void kvm_mmu_zap_collapsible_sptes(struct kvm *kvm,
const struct kvm_memory_slot *slot)
{
- bool flush = false;
-
if (kvm_memslots_have_rmaps(kvm)) {
write_lock(&kvm->mmu_lock);
/*
@@ -5863,17 +5888,14 @@ void kvm_mmu_zap_collapsible_sptes(struct kvm *kvm,
* logging at a 4k granularity and never creates collapsible
* 2m SPTEs during dirty logging.
*/
- flush = slot_handle_level_4k(kvm, slot, kvm_mmu_zap_collapsible_spte, true);
- if (flush)
+ if (slot_handle_level_4k(kvm, slot, kvm_mmu_zap_collapsible_spte, true))
kvm_arch_flush_remote_tlbs_memslot(kvm, slot);
write_unlock(&kvm->mmu_lock);
}
if (is_tdp_mmu_enabled(kvm)) {
read_lock(&kvm->mmu_lock);
- flush = kvm_tdp_mmu_zap_collapsible_sptes(kvm, slot, flush);
- if (flush)
- kvm_arch_flush_remote_tlbs_memslot(kvm, slot);
+ kvm_tdp_mmu_zap_collapsible_sptes(kvm, slot);
read_unlock(&kvm->mmu_lock);
}
}
@@ -6182,23 +6204,46 @@ void kvm_mmu_module_exit(void)
mmu_audit_disable();
}
+/*
+ * Calculate the effective recovery period, accounting for '0' meaning "let KVM
+ * select a halving time of 1 hour". Returns true if recovery is enabled.
+ */
+static bool calc_nx_huge_pages_recovery_period(uint *period)
+{
+ /*
+ * Use READ_ONCE to get the params, this may be called outside of the
+ * param setters, e.g. by the kthread to compute its next timeout.
+ */
+ bool enabled = READ_ONCE(nx_huge_pages);
+ uint ratio = READ_ONCE(nx_huge_pages_recovery_ratio);
+
+ if (!enabled || !ratio)
+ return false;
+
+ *period = READ_ONCE(nx_huge_pages_recovery_period_ms);
+ if (!*period) {
+ /* Make sure the period is not less than one second. */
+ ratio = min(ratio, 3600u);
+ *period = 60 * 60 * 1000 / ratio;
+ }
+ return true;
+}
+
static int set_nx_huge_pages_recovery_param(const char *val, const struct kernel_param *kp)
{
bool was_recovery_enabled, is_recovery_enabled;
uint old_period, new_period;
int err;
- was_recovery_enabled = nx_huge_pages_recovery_ratio;
- old_period = nx_huge_pages_recovery_period_ms;
+ was_recovery_enabled = calc_nx_huge_pages_recovery_period(&old_period);
err = param_set_uint(val, kp);
if (err)
return err;
- is_recovery_enabled = nx_huge_pages_recovery_ratio;
- new_period = nx_huge_pages_recovery_period_ms;
+ is_recovery_enabled = calc_nx_huge_pages_recovery_period(&new_period);
- if (READ_ONCE(nx_huge_pages) && is_recovery_enabled &&
+ if (is_recovery_enabled &&
(!was_recovery_enabled || old_period > new_period)) {
struct kvm *kvm;
@@ -6262,18 +6307,13 @@ static void kvm_recover_nx_lpages(struct kvm *kvm)
static long get_nx_lpage_recovery_timeout(u64 start_time)
{
- uint ratio = READ_ONCE(nx_huge_pages_recovery_ratio);
- uint period = READ_ONCE(nx_huge_pages_recovery_period_ms);
+ bool enabled;
+ uint period;
- if (!period && ratio) {
- /* Make sure the period is not less than one second. */
- ratio = min(ratio, 3600u);
- period = 60 * 60 * 1000 / ratio;
- }
+ enabled = calc_nx_huge_pages_recovery_period(&period);
- return READ_ONCE(nx_huge_pages) && ratio
- ? start_time + msecs_to_jiffies(period) - get_jiffies_64()
- : MAX_SCHEDULE_TIMEOUT;
+ return enabled ? start_time + msecs_to_jiffies(period) - get_jiffies_64()
+ : MAX_SCHEDULE_TIMEOUT;
}
static int kvm_nx_lpage_recovery_worker(struct kvm *kvm, uintptr_t data)
diff --git a/arch/x86/kvm/mmu/paging_tmpl.h b/arch/x86/kvm/mmu/paging_tmpl.h
index f87d36898c44..708a5d297fe1 100644
--- a/arch/x86/kvm/mmu/paging_tmpl.h
+++ b/arch/x86/kvm/mmu/paging_tmpl.h
@@ -911,7 +911,8 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault
r = RET_PF_RETRY;
write_lock(&vcpu->kvm->mmu_lock);
- if (fault->slot && mmu_notifier_retry_hva(vcpu->kvm, mmu_seq, fault->hva))
+
+ if (is_page_fault_stale(vcpu, fault, mmu_seq))
goto out_unlock;
kvm_mmu_audit(vcpu, AUDIT_PRE_PAGE_FAULT);
diff --git a/arch/x86/kvm/mmu/tdp_iter.c b/arch/x86/kvm/mmu/tdp_iter.c
index b3ed302c1a35..caa96c270b95 100644
--- a/arch/x86/kvm/mmu/tdp_iter.c
+++ b/arch/x86/kvm/mmu/tdp_iter.c
@@ -26,6 +26,7 @@ static gfn_t round_gfn_for_level(gfn_t gfn, int level)
*/
void tdp_iter_restart(struct tdp_iter *iter)
{
+ iter->yielded = false;
iter->yielded_gfn = iter->next_last_level_gfn;
iter->level = iter->root_level;
@@ -160,6 +161,11 @@ static bool try_step_up(struct tdp_iter *iter)
*/
void tdp_iter_next(struct tdp_iter *iter)
{
+ if (iter->yielded) {
+ tdp_iter_restart(iter);
+ return;
+ }
+
if (try_step_down(iter))
return;
diff --git a/arch/x86/kvm/mmu/tdp_iter.h b/arch/x86/kvm/mmu/tdp_iter.h
index b1748b988d3a..e19cabbcb65c 100644
--- a/arch/x86/kvm/mmu/tdp_iter.h
+++ b/arch/x86/kvm/mmu/tdp_iter.h
@@ -45,6 +45,12 @@ struct tdp_iter {
* iterator walks off the end of the paging structure.
*/
bool valid;
+ /*
+ * True if KVM dropped mmu_lock and yielded in the middle of a walk, in
+ * which case tdp_iter_next() needs to restart the walk at the root
+ * level instead of advancing to the next entry.
+ */
+ bool yielded;
};
/*
diff --git a/arch/x86/kvm/mmu/tdp_mmu.c b/arch/x86/kvm/mmu/tdp_mmu.c
index a54c3491af42..1beb4ca90560 100644
--- a/arch/x86/kvm/mmu/tdp_mmu.c
+++ b/arch/x86/kvm/mmu/tdp_mmu.c
@@ -317,9 +317,6 @@ static void handle_removed_tdp_mmu_page(struct kvm *kvm, tdp_ptep_t pt,
struct kvm_mmu_page *sp = sptep_to_sp(rcu_dereference(pt));
int level = sp->role.level;
gfn_t base_gfn = sp->gfn;
- u64 old_child_spte;
- u64 *sptep;
- gfn_t gfn;
int i;
trace_kvm_mmu_prepare_zap_page(sp);
@@ -327,8 +324,9 @@ static void handle_removed_tdp_mmu_page(struct kvm *kvm, tdp_ptep_t pt,
tdp_mmu_unlink_page(kvm, sp, shared);
for (i = 0; i < PT64_ENT_PER_PAGE; i++) {
- sptep = rcu_dereference(pt) + i;
- gfn = base_gfn + i * KVM_PAGES_PER_HPAGE(level);
+ u64 *sptep = rcu_dereference(pt) + i;
+ gfn_t gfn = base_gfn + i * KVM_PAGES_PER_HPAGE(level);
+ u64 old_child_spte;
if (shared) {
/*
@@ -374,7 +372,7 @@ static void handle_removed_tdp_mmu_page(struct kvm *kvm, tdp_ptep_t pt,
shared);
}
- kvm_flush_remote_tlbs_with_address(kvm, gfn,
+ kvm_flush_remote_tlbs_with_address(kvm, base_gfn,
KVM_PAGES_PER_HPAGE(level + 1));
call_rcu(&sp->rcu_head, tdp_mmu_free_sp_rcu_callback);
@@ -504,6 +502,8 @@ static inline bool tdp_mmu_set_spte_atomic(struct kvm *kvm,
struct tdp_iter *iter,
u64 new_spte)
{
+ WARN_ON_ONCE(iter->yielded);
+
lockdep_assert_held_read(&kvm->mmu_lock);
/*
@@ -577,6 +577,8 @@ static inline void __tdp_mmu_set_spte(struct kvm *kvm, struct tdp_iter *iter,
u64 new_spte, bool record_acc_track,
bool record_dirty_log)
{
+ WARN_ON_ONCE(iter->yielded);
+
lockdep_assert_held_write(&kvm->mmu_lock);
/*
@@ -642,18 +644,19 @@ static inline void tdp_mmu_set_spte_no_dirty_log(struct kvm *kvm,
* If this function should yield and flush is set, it will perform a remote
* TLB flush before yielding.
*
- * If this function yields, it will also reset the tdp_iter's walk over the
- * paging structure and the calling function should skip to the next
- * iteration to allow the iterator to continue its traversal from the
- * paging structure root.
+ * If this function yields, iter->yielded is set and the caller must skip to
+ * the next iteration, where tdp_iter_next() will reset the tdp_iter's walk
+ * over the paging structures to allow the iterator to continue its traversal
+ * from the paging structure root.
*
- * Return true if this function yielded and the iterator's traversal was reset.
- * Return false if a yield was not needed.
+ * Returns true if this function yielded.
*/
-static inline bool tdp_mmu_iter_cond_resched(struct kvm *kvm,
- struct tdp_iter *iter, bool flush,
- bool shared)
+static inline bool __must_check tdp_mmu_iter_cond_resched(struct kvm *kvm,
+ struct tdp_iter *iter,
+ bool flush, bool shared)
{
+ WARN_ON(iter->yielded);
+
/* Ensure forward progress has been made before yielding. */
if (iter->next_last_level_gfn == iter->yielded_gfn)
return false;
@@ -673,12 +676,10 @@ static inline bool tdp_mmu_iter_cond_resched(struct kvm *kvm,
WARN_ON(iter->gfn > iter->next_last_level_gfn);
- tdp_iter_restart(iter);
-
- return true;
+ iter->yielded = true;
}
- return false;
+ return iter->yielded;
}
/*
@@ -1033,9 +1034,9 @@ bool kvm_tdp_mmu_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range,
{
struct kvm_mmu_page *root;
- for_each_tdp_mmu_root(kvm, root, range->slot->as_id)
- flush |= zap_gfn_range(kvm, root, range->start, range->end,
- range->may_block, flush, false);
+ for_each_tdp_mmu_root_yield_safe(kvm, root, range->slot->as_id, false)
+ flush = zap_gfn_range(kvm, root, range->start, range->end,
+ range->may_block, flush, false);
return flush;
}
@@ -1364,10 +1365,9 @@ void kvm_tdp_mmu_clear_dirty_pt_masked(struct kvm *kvm,
* Clear leaf entries which could be replaced by large mappings, for
* GFNs within the slot.
*/
-static bool zap_collapsible_spte_range(struct kvm *kvm,
+static void zap_collapsible_spte_range(struct kvm *kvm,
struct kvm_mmu_page *root,
- const struct kvm_memory_slot *slot,
- bool flush)
+ const struct kvm_memory_slot *slot)
{
gfn_t start = slot->base_gfn;
gfn_t end = start + slot->npages;
@@ -1378,10 +1378,8 @@ static bool zap_collapsible_spte_range(struct kvm *kvm,
tdp_root_for_each_pte(iter, root, start, end) {
retry:
- if (tdp_mmu_iter_cond_resched(kvm, &iter, flush, true)) {
- flush = false;
+ if (tdp_mmu_iter_cond_resched(kvm, &iter, false, true))
continue;
- }
if (!is_shadow_present_pte(iter.old_spte) ||
!is_last_spte(iter.old_spte, iter.level))
@@ -1393,6 +1391,7 @@ retry:
pfn, PG_LEVEL_NUM))
continue;
+ /* Note, a successful atomic zap also does a remote TLB flush. */
if (!tdp_mmu_zap_spte_atomic(kvm, &iter)) {
/*
* The iter must explicitly re-read the SPTE because
@@ -1401,30 +1400,24 @@ retry:
iter.old_spte = READ_ONCE(*rcu_dereference(iter.sptep));
goto retry;
}
- flush = true;
}
rcu_read_unlock();
-
- return flush;
}
/*
* Clear non-leaf entries (and free associated page tables) which could
* be replaced by large mappings, for GFNs within the slot.
*/
-bool kvm_tdp_mmu_zap_collapsible_sptes(struct kvm *kvm,
- const struct kvm_memory_slot *slot,
- bool flush)
+void kvm_tdp_mmu_zap_collapsible_sptes(struct kvm *kvm,
+ const struct kvm_memory_slot *slot)
{
struct kvm_mmu_page *root;
lockdep_assert_held_read(&kvm->mmu_lock);
for_each_tdp_mmu_root_yield_safe(kvm, root, slot->as_id, true)
- flush = zap_collapsible_spte_range(kvm, root, slot, flush);
-
- return flush;
+ zap_collapsible_spte_range(kvm, root, slot);
}
/*
diff --git a/arch/x86/kvm/mmu/tdp_mmu.h b/arch/x86/kvm/mmu/tdp_mmu.h
index 476b133544dd..3899004a5d91 100644
--- a/arch/x86/kvm/mmu/tdp_mmu.h
+++ b/arch/x86/kvm/mmu/tdp_mmu.h
@@ -64,9 +64,8 @@ void kvm_tdp_mmu_clear_dirty_pt_masked(struct kvm *kvm,
struct kvm_memory_slot *slot,
gfn_t gfn, unsigned long mask,
bool wrprot);
-bool kvm_tdp_mmu_zap_collapsible_sptes(struct kvm *kvm,
- const struct kvm_memory_slot *slot,
- bool flush);
+void kvm_tdp_mmu_zap_collapsible_sptes(struct kvm *kvm,
+ const struct kvm_memory_slot *slot);
bool kvm_tdp_mmu_write_protect_gfn(struct kvm *kvm,
struct kvm_memory_slot *slot, gfn_t gfn,
diff --git a/arch/x86/kvm/svm/avic.c b/arch/x86/kvm/svm/avic.c
index affc0ea98d30..8f9af7b7dbbe 100644
--- a/arch/x86/kvm/svm/avic.c
+++ b/arch/x86/kvm/svm/avic.c
@@ -900,6 +900,7 @@ out:
bool svm_check_apicv_inhibit_reasons(ulong bit)
{
ulong supported = BIT(APICV_INHIBIT_REASON_DISABLE) |
+ BIT(APICV_INHIBIT_REASON_ABSENT) |
BIT(APICV_INHIBIT_REASON_HYPERV) |
BIT(APICV_INHIBIT_REASON_NESTED) |
BIT(APICV_INHIBIT_REASON_IRQWIN) |
@@ -989,16 +990,18 @@ void avic_vcpu_put(struct kvm_vcpu *vcpu)
static void avic_set_running(struct kvm_vcpu *vcpu, bool is_run)
{
struct vcpu_svm *svm = to_svm(vcpu);
+ int cpu = get_cpu();
+ WARN_ON(cpu != vcpu->cpu);
svm->avic_is_running = is_run;
- if (!kvm_vcpu_apicv_active(vcpu))
- return;
-
- if (is_run)
- avic_vcpu_load(vcpu, vcpu->cpu);
- else
- avic_vcpu_put(vcpu);
+ if (kvm_vcpu_apicv_active(vcpu)) {
+ if (is_run)
+ avic_vcpu_load(vcpu, cpu);
+ else
+ avic_vcpu_put(vcpu);
+ }
+ put_cpu();
}
void svm_vcpu_blocking(struct kvm_vcpu *vcpu)
diff --git a/arch/x86/kvm/svm/pmu.c b/arch/x86/kvm/svm/pmu.c
index 871c426ec389..b4095dfeeee6 100644
--- a/arch/x86/kvm/svm/pmu.c
+++ b/arch/x86/kvm/svm/pmu.c
@@ -281,7 +281,7 @@ static void amd_pmu_refresh(struct kvm_vcpu *vcpu)
pmu->nr_arch_gp_counters = AMD64_NUM_COUNTERS;
pmu->counter_bitmask[KVM_PMC_GP] = ((u64)1 << 48) - 1;
- pmu->reserved_bits = 0xffffffff00200000ull;
+ pmu->reserved_bits = 0xfffffff000280000ull;
pmu->version = 1;
/* not applicable to AMD; but clean them to prevent any fall out */
pmu->counter_bitmask[KVM_PMC_FIXED] = 0;
diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c
index 21ac0a5de4e0..7656a2c5662a 100644
--- a/arch/x86/kvm/svm/sev.c
+++ b/arch/x86/kvm/svm/sev.c
@@ -1543,28 +1543,50 @@ static bool is_cmd_allowed_from_mirror(u32 cmd_id)
return false;
}
-static int sev_lock_for_migration(struct kvm *kvm)
+static int sev_lock_two_vms(struct kvm *dst_kvm, struct kvm *src_kvm)
{
- struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
+ struct kvm_sev_info *dst_sev = &to_kvm_svm(dst_kvm)->sev_info;
+ struct kvm_sev_info *src_sev = &to_kvm_svm(src_kvm)->sev_info;
+ int r = -EBUSY;
+
+ if (dst_kvm == src_kvm)
+ return -EINVAL;
/*
- * Bail if this VM is already involved in a migration to avoid deadlock
- * between two VMs trying to migrate to/from each other.
+ * Bail if these VMs are already involved in a migration to avoid
+ * deadlock between two VMs trying to migrate to/from each other.
*/
- if (atomic_cmpxchg_acquire(&sev->migration_in_progress, 0, 1))
+ if (atomic_cmpxchg_acquire(&dst_sev->migration_in_progress, 0, 1))
return -EBUSY;
- mutex_lock(&kvm->lock);
+ if (atomic_cmpxchg_acquire(&src_sev->migration_in_progress, 0, 1))
+ goto release_dst;
+ r = -EINTR;
+ if (mutex_lock_killable(&dst_kvm->lock))
+ goto release_src;
+ if (mutex_lock_killable(&src_kvm->lock))
+ goto unlock_dst;
return 0;
+
+unlock_dst:
+ mutex_unlock(&dst_kvm->lock);
+release_src:
+ atomic_set_release(&src_sev->migration_in_progress, 0);
+release_dst:
+ atomic_set_release(&dst_sev->migration_in_progress, 0);
+ return r;
}
-static void sev_unlock_after_migration(struct kvm *kvm)
+static void sev_unlock_two_vms(struct kvm *dst_kvm, struct kvm *src_kvm)
{
- struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
+ struct kvm_sev_info *dst_sev = &to_kvm_svm(dst_kvm)->sev_info;
+ struct kvm_sev_info *src_sev = &to_kvm_svm(src_kvm)->sev_info;
- mutex_unlock(&kvm->lock);
- atomic_set_release(&sev->migration_in_progress, 0);
+ mutex_unlock(&dst_kvm->lock);
+ mutex_unlock(&src_kvm->lock);
+ atomic_set_release(&dst_sev->migration_in_progress, 0);
+ atomic_set_release(&src_sev->migration_in_progress, 0);
}
@@ -1607,14 +1629,15 @@ static void sev_migrate_from(struct kvm_sev_info *dst,
dst->asid = src->asid;
dst->handle = src->handle;
dst->pages_locked = src->pages_locked;
+ dst->enc_context_owner = src->enc_context_owner;
src->asid = 0;
src->active = false;
src->handle = 0;
src->pages_locked = 0;
+ src->enc_context_owner = NULL;
- INIT_LIST_HEAD(&dst->regions_list);
- list_replace_init(&src->regions_list, &dst->regions_list);
+ list_cut_before(&dst->regions_list, &src->regions_list, &src->regions_list);
}
static int sev_es_migrate_from(struct kvm *dst, struct kvm *src)
@@ -1666,15 +1689,6 @@ int svm_vm_migrate_from(struct kvm *kvm, unsigned int source_fd)
bool charged = false;
int ret;
- ret = sev_lock_for_migration(kvm);
- if (ret)
- return ret;
-
- if (sev_guest(kvm)) {
- ret = -EINVAL;
- goto out_unlock;
- }
-
source_kvm_file = fget(source_fd);
if (!file_is_kvm(source_kvm_file)) {
ret = -EBADF;
@@ -1682,16 +1696,26 @@ int svm_vm_migrate_from(struct kvm *kvm, unsigned int source_fd)
}
source_kvm = source_kvm_file->private_data;
- ret = sev_lock_for_migration(source_kvm);
+ ret = sev_lock_two_vms(kvm, source_kvm);
if (ret)
goto out_fput;
- if (!sev_guest(source_kvm)) {
+ if (sev_guest(kvm) || !sev_guest(source_kvm)) {
ret = -EINVAL;
- goto out_source;
+ goto out_unlock;
}
src_sev = &to_kvm_svm(source_kvm)->sev_info;
+
+ /*
+ * VMs mirroring src's encryption context rely on it to keep the
+ * ASID allocated, but below we are clearing src_sev->asid.
+ */
+ if (src_sev->num_mirrored_vms) {
+ ret = -EBUSY;
+ goto out_unlock;
+ }
+
dst_sev->misc_cg = get_current_misc_cg();
cg_cleanup_sev = dst_sev;
if (dst_sev->misc_cg != src_sev->misc_cg) {
@@ -1728,13 +1752,11 @@ out_dst_cgroup:
sev_misc_cg_uncharge(cg_cleanup_sev);
put_misc_cg(cg_cleanup_sev->misc_cg);
cg_cleanup_sev->misc_cg = NULL;
-out_source:
- sev_unlock_after_migration(source_kvm);
+out_unlock:
+ sev_unlock_two_vms(kvm, source_kvm);
out_fput:
if (source_kvm_file)
fput(source_kvm_file);
-out_unlock:
- sev_unlock_after_migration(kvm);
return ret;
}
@@ -1953,76 +1975,60 @@ int svm_vm_copy_asid_from(struct kvm *kvm, unsigned int source_fd)
{
struct file *source_kvm_file;
struct kvm *source_kvm;
- struct kvm_sev_info source_sev, *mirror_sev;
+ struct kvm_sev_info *source_sev, *mirror_sev;
int ret;
source_kvm_file = fget(source_fd);
if (!file_is_kvm(source_kvm_file)) {
ret = -EBADF;
- goto e_source_put;
+ goto e_source_fput;
}
source_kvm = source_kvm_file->private_data;
- mutex_lock(&source_kvm->lock);
-
- if (!sev_guest(source_kvm)) {
- ret = -EINVAL;
- goto e_source_unlock;
- }
+ ret = sev_lock_two_vms(kvm, source_kvm);
+ if (ret)
+ goto e_source_fput;
- /* Mirrors of mirrors should work, but let's not get silly */
- if (is_mirroring_enc_context(source_kvm) || source_kvm == kvm) {
+ /*
+ * Mirrors of mirrors should work, but let's not get silly. Also
+ * disallow out-of-band SEV/SEV-ES init if the target is already an
+ * SEV guest, or if vCPUs have been created. KVM relies on vCPUs being
+ * created after SEV/SEV-ES initialization, e.g. to init intercepts.
+ */
+ if (sev_guest(kvm) || !sev_guest(source_kvm) ||
+ is_mirroring_enc_context(source_kvm) || kvm->created_vcpus) {
ret = -EINVAL;
- goto e_source_unlock;
+ goto e_unlock;
}
- memcpy(&source_sev, &to_kvm_svm(source_kvm)->sev_info,
- sizeof(source_sev));
-
/*
* The mirror kvm holds an enc_context_owner ref so its asid can't
* disappear until we're done with it
*/
+ source_sev = &to_kvm_svm(source_kvm)->sev_info;
kvm_get_kvm(source_kvm);
-
- fput(source_kvm_file);
- mutex_unlock(&source_kvm->lock);
- mutex_lock(&kvm->lock);
-
- /*
- * Disallow out-of-band SEV/SEV-ES init if the target is already an
- * SEV guest, or if vCPUs have been created. KVM relies on vCPUs being
- * created after SEV/SEV-ES initialization, e.g. to init intercepts.
- */
- if (sev_guest(kvm) || kvm->created_vcpus) {
- ret = -EINVAL;
- goto e_mirror_unlock;
- }
+ source_sev->num_mirrored_vms++;
/* Set enc_context_owner and copy its encryption context over */
mirror_sev = &to_kvm_svm(kvm)->sev_info;
mirror_sev->enc_context_owner = source_kvm;
mirror_sev->active = true;
- mirror_sev->asid = source_sev.asid;
- mirror_sev->fd = source_sev.fd;
- mirror_sev->es_active = source_sev.es_active;
- mirror_sev->handle = source_sev.handle;
+ mirror_sev->asid = source_sev->asid;
+ mirror_sev->fd = source_sev->fd;
+ mirror_sev->es_active = source_sev->es_active;
+ mirror_sev->handle = source_sev->handle;
+ INIT_LIST_HEAD(&mirror_sev->regions_list);
+ ret = 0;
+
/*
* Do not copy ap_jump_table. Since the mirror does not share the same
* KVM contexts as the original, and they may have different
* memory-views.
*/
- mutex_unlock(&kvm->lock);
- return 0;
-
-e_mirror_unlock:
- mutex_unlock(&kvm->lock);
- kvm_put_kvm(source_kvm);
- return ret;
-e_source_unlock:
- mutex_unlock(&source_kvm->lock);
-e_source_put:
+e_unlock:
+ sev_unlock_two_vms(kvm, source_kvm);
+e_source_fput:
if (source_kvm_file)
fput(source_kvm_file);
return ret;
@@ -2034,17 +2040,24 @@ void sev_vm_destroy(struct kvm *kvm)
struct list_head *head = &sev->regions_list;
struct list_head *pos, *q;
+ WARN_ON(sev->num_mirrored_vms);
+
if (!sev_guest(kvm))
return;
/* If this is a mirror_kvm release the enc_context_owner and skip sev cleanup */
if (is_mirroring_enc_context(kvm)) {
- kvm_put_kvm(sev->enc_context_owner);
+ struct kvm *owner_kvm = sev->enc_context_owner;
+ struct kvm_sev_info *owner_sev = &to_kvm_svm(owner_kvm)->sev_info;
+
+ mutex_lock(&owner_kvm->lock);
+ if (!WARN_ON(!owner_sev->num_mirrored_vms))
+ owner_sev->num_mirrored_vms--;
+ mutex_unlock(&owner_kvm->lock);
+ kvm_put_kvm(owner_kvm);
return;
}
- mutex_lock(&kvm->lock);
-
/*
* Ensure that all guest tagged cache entries are flushed before
* releasing the pages back to the system for use. CLFLUSH will
@@ -2064,8 +2077,6 @@ void sev_vm_destroy(struct kvm *kvm)
}
}
- mutex_unlock(&kvm->lock);
-
sev_unbind_asid(kvm, sev->handle);
sev_asid_free(sev);
}
@@ -2249,7 +2260,7 @@ void sev_free_vcpu(struct kvm_vcpu *vcpu)
__free_page(virt_to_page(svm->sev_es.vmsa));
if (svm->sev_es.ghcb_sa_free)
- kfree(svm->sev_es.ghcb_sa);
+ kvfree(svm->sev_es.ghcb_sa);
}
static void dump_ghcb(struct vcpu_svm *svm)
@@ -2341,24 +2352,29 @@ static void sev_es_sync_from_ghcb(struct vcpu_svm *svm)
memset(ghcb->save.valid_bitmap, 0, sizeof(ghcb->save.valid_bitmap));
}
-static int sev_es_validate_vmgexit(struct vcpu_svm *svm)
+static bool sev_es_validate_vmgexit(struct vcpu_svm *svm)
{
struct kvm_vcpu *vcpu;
struct ghcb *ghcb;
- u64 exit_code = 0;
+ u64 exit_code;
+ u64 reason;
ghcb = svm->sev_es.ghcb;
- /* Only GHCB Usage code 0 is supported */
- if (ghcb->ghcb_usage)
- goto vmgexit_err;
-
/*
- * Retrieve the exit code now even though is may not be marked valid
+ * Retrieve the exit code now even though it may not be marked valid
* as it could help with debugging.
*/
exit_code = ghcb_get_sw_exit_code(ghcb);
+ /* Only GHCB Usage code 0 is supported */
+ if (ghcb->ghcb_usage) {
+ reason = GHCB_ERR_INVALID_USAGE;
+ goto vmgexit_err;
+ }
+
+ reason = GHCB_ERR_MISSING_INPUT;
+
if (!ghcb_sw_exit_code_is_valid(ghcb) ||
!ghcb_sw_exit_info_1_is_valid(ghcb) ||
!ghcb_sw_exit_info_2_is_valid(ghcb))
@@ -2437,30 +2453,34 @@ static int sev_es_validate_vmgexit(struct vcpu_svm *svm)
case SVM_VMGEXIT_UNSUPPORTED_EVENT:
break;
default:
+ reason = GHCB_ERR_INVALID_EVENT;
goto vmgexit_err;
}
- return 0;
+ return true;
vmgexit_err:
vcpu = &svm->vcpu;
- if (ghcb->ghcb_usage) {
+ if (reason == GHCB_ERR_INVALID_USAGE) {
vcpu_unimpl(vcpu, "vmgexit: ghcb usage %#x is not valid\n",
ghcb->ghcb_usage);
+ } else if (reason == GHCB_ERR_INVALID_EVENT) {
+ vcpu_unimpl(vcpu, "vmgexit: exit code %#llx is not valid\n",
+ exit_code);
} else {
- vcpu_unimpl(vcpu, "vmgexit: exit reason %#llx is not valid\n",
+ vcpu_unimpl(vcpu, "vmgexit: exit code %#llx input is not valid\n",
exit_code);
dump_ghcb(svm);
}
- vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
- vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_UNEXPECTED_EXIT_REASON;
- vcpu->run->internal.ndata = 2;
- vcpu->run->internal.data[0] = exit_code;
- vcpu->run->internal.data[1] = vcpu->arch.last_vmentry_cpu;
+ /* Clear the valid entries fields */
+ memset(ghcb->save.valid_bitmap, 0, sizeof(ghcb->save.valid_bitmap));
+
+ ghcb_set_sw_exit_info_1(ghcb, 2);
+ ghcb_set_sw_exit_info_2(ghcb, reason);
- return -EINVAL;
+ return false;
}
void sev_es_unmap_ghcb(struct vcpu_svm *svm)
@@ -2482,7 +2502,7 @@ void sev_es_unmap_ghcb(struct vcpu_svm *svm)
svm->sev_es.ghcb_sa_sync = false;
}
- kfree(svm->sev_es.ghcb_sa);
+ kvfree(svm->sev_es.ghcb_sa);
svm->sev_es.ghcb_sa = NULL;
svm->sev_es.ghcb_sa_free = false;
}
@@ -2530,14 +2550,14 @@ static bool setup_vmgexit_scratch(struct vcpu_svm *svm, bool sync, u64 len)
scratch_gpa_beg = ghcb_get_sw_scratch(ghcb);
if (!scratch_gpa_beg) {
pr_err("vmgexit: scratch gpa not provided\n");
- return false;
+ goto e_scratch;
}
scratch_gpa_end = scratch_gpa_beg + len;
if (scratch_gpa_end < scratch_gpa_beg) {
pr_err("vmgexit: scratch length (%#llx) not valid for scratch address (%#llx)\n",
len, scratch_gpa_beg);
- return false;
+ goto e_scratch;
}
if ((scratch_gpa_beg & PAGE_MASK) == control->ghcb_gpa) {
@@ -2555,7 +2575,7 @@ static bool setup_vmgexit_scratch(struct vcpu_svm *svm, bool sync, u64 len)
scratch_gpa_end > ghcb_scratch_end) {
pr_err("vmgexit: scratch area is outside of GHCB shared buffer area (%#llx - %#llx)\n",
scratch_gpa_beg, scratch_gpa_end);
- return false;
+ goto e_scratch;
}
scratch_va = (void *)svm->sev_es.ghcb;
@@ -2568,18 +2588,18 @@ static bool setup_vmgexit_scratch(struct vcpu_svm *svm, bool sync, u64 len)
if (len > GHCB_SCRATCH_AREA_LIMIT) {
pr_err("vmgexit: scratch area exceeds KVM limits (%#llx requested, %#llx limit)\n",
len, GHCB_SCRATCH_AREA_LIMIT);
- return false;
+ goto e_scratch;
}
- scratch_va = kzalloc(len, GFP_KERNEL_ACCOUNT);
+ scratch_va = kvzalloc(len, GFP_KERNEL_ACCOUNT);
if (!scratch_va)
- return false;
+ goto e_scratch;
if (kvm_read_guest(svm->vcpu.kvm, scratch_gpa_beg, scratch_va, len)) {
/* Unable to copy scratch area from guest */
pr_err("vmgexit: kvm_read_guest for scratch area failed\n");
- kfree(scratch_va);
- return false;
+ kvfree(scratch_va);
+ goto e_scratch;
}
/*
@@ -2596,6 +2616,12 @@ static bool setup_vmgexit_scratch(struct vcpu_svm *svm, bool sync, u64 len)
svm->sev_es.ghcb_sa_len = len;
return true;
+
+e_scratch:
+ ghcb_set_sw_exit_info_1(ghcb, 2);
+ ghcb_set_sw_exit_info_2(ghcb, GHCB_ERR_INVALID_SCRATCH_AREA);
+
+ return false;
}
static void set_ghcb_msr_bits(struct vcpu_svm *svm, u64 value, u64 mask,
@@ -2646,7 +2672,7 @@ static int sev_handle_vmgexit_msr_protocol(struct vcpu_svm *svm)
ret = svm_invoke_exit_handler(vcpu, SVM_EXIT_CPUID);
if (!ret) {
- ret = -EINVAL;
+ /* Error, keep GHCB MSR value as-is */
break;
}
@@ -2682,10 +2708,13 @@ static int sev_handle_vmgexit_msr_protocol(struct vcpu_svm *svm)
GHCB_MSR_TERM_REASON_POS);
pr_info("SEV-ES guest requested termination: %#llx:%#llx\n",
reason_set, reason_code);
- fallthrough;
+
+ ret = -EINVAL;
+ break;
}
default:
- ret = -EINVAL;
+ /* Error, keep GHCB MSR value as-is */
+ break;
}
trace_kvm_vmgexit_msr_protocol_exit(svm->vcpu.vcpu_id,
@@ -2709,14 +2738,18 @@ int sev_handle_vmgexit(struct kvm_vcpu *vcpu)
if (!ghcb_gpa) {
vcpu_unimpl(vcpu, "vmgexit: GHCB gpa is not set\n");
- return -EINVAL;
+
+ /* Without a GHCB, just return right back to the guest */
+ return 1;
}
if (kvm_vcpu_map(vcpu, ghcb_gpa >> PAGE_SHIFT, &svm->sev_es.ghcb_map)) {
/* Unable to map GHCB from guest */
vcpu_unimpl(vcpu, "vmgexit: error mapping GHCB [%#llx] from guest\n",
ghcb_gpa);
- return -EINVAL;
+
+ /* Without a GHCB, just return right back to the guest */
+ return 1;
}
svm->sev_es.ghcb = svm->sev_es.ghcb_map.hva;
@@ -2726,15 +2759,14 @@ int sev_handle_vmgexit(struct kvm_vcpu *vcpu)
exit_code = ghcb_get_sw_exit_code(ghcb);
- ret = sev_es_validate_vmgexit(svm);
- if (ret)
- return ret;
+ if (!sev_es_validate_vmgexit(svm))
+ return 1;
sev_es_sync_from_ghcb(svm);
ghcb_set_sw_exit_info_1(ghcb, 0);
ghcb_set_sw_exit_info_2(ghcb, 0);
- ret = -EINVAL;
+ ret = 1;
switch (exit_code) {
case SVM_VMGEXIT_MMIO_READ:
if (!setup_vmgexit_scratch(svm, true, control->exit_info_2))
@@ -2775,20 +2807,17 @@ int sev_handle_vmgexit(struct kvm_vcpu *vcpu)
default:
pr_err("svm: vmgexit: unsupported AP jump table request - exit_info_1=%#llx\n",
control->exit_info_1);
- ghcb_set_sw_exit_info_1(ghcb, 1);
- ghcb_set_sw_exit_info_2(ghcb,
- X86_TRAP_UD |
- SVM_EVTINJ_TYPE_EXEPT |
- SVM_EVTINJ_VALID);
+ ghcb_set_sw_exit_info_1(ghcb, 2);
+ ghcb_set_sw_exit_info_2(ghcb, GHCB_ERR_INVALID_INPUT);
}
- ret = 1;
break;
}
case SVM_VMGEXIT_UNSUPPORTED_EVENT:
vcpu_unimpl(vcpu,
"vmgexit: unsupported event - exit_info_1=%#llx, exit_info_2=%#llx\n",
control->exit_info_1, control->exit_info_2);
+ ret = -EINVAL;
break;
default:
ret = svm_invoke_exit_handler(vcpu, exit_code);
@@ -2810,7 +2839,7 @@ int sev_es_string_io(struct vcpu_svm *svm, int size, unsigned int port, int in)
return -EINVAL;
if (!setup_vmgexit_scratch(svm, in, bytes))
- return -EINVAL;
+ return 1;
return kvm_sev_es_string_io(&svm->vcpu, size, port, svm->sev_es.ghcb_sa,
count, in);
diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
index 5630c241d5f6..5151efa424ac 100644
--- a/arch/x86/kvm/svm/svm.c
+++ b/arch/x86/kvm/svm/svm.c
@@ -1585,6 +1585,15 @@ static void svm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags)
to_svm(vcpu)->vmcb->save.rflags = rflags;
}
+static bool svm_get_if_flag(struct kvm_vcpu *vcpu)
+{
+ struct vmcb *vmcb = to_svm(vcpu)->vmcb;
+
+ return sev_es_guest(vcpu->kvm)
+ ? vmcb->control.int_state & SVM_GUEST_INTERRUPT_MASK
+ : kvm_get_rflags(vcpu) & X86_EFLAGS_IF;
+}
+
static void svm_cache_reg(struct kvm_vcpu *vcpu, enum kvm_reg reg)
{
switch (reg) {
@@ -3568,14 +3577,7 @@ bool svm_interrupt_blocked(struct kvm_vcpu *vcpu)
if (!gif_set(svm))
return true;
- if (sev_es_guest(vcpu->kvm)) {
- /*
- * SEV-ES guests to not expose RFLAGS. Use the VMCB interrupt mask
- * bit to determine the state of the IF flag.
- */
- if (!(vmcb->control.int_state & SVM_GUEST_INTERRUPT_MASK))
- return true;
- } else if (is_guest_mode(vcpu)) {
+ if (is_guest_mode(vcpu)) {
/* As long as interrupts are being delivered... */
if ((svm->nested.ctl.int_ctl & V_INTR_MASKING_MASK)
? !(svm->vmcb01.ptr->save.rflags & X86_EFLAGS_IF)
@@ -3586,7 +3588,7 @@ bool svm_interrupt_blocked(struct kvm_vcpu *vcpu)
if (nested_exit_on_intr(svm))
return false;
} else {
- if (!(kvm_get_rflags(vcpu) & X86_EFLAGS_IF))
+ if (!svm_get_if_flag(vcpu))
return true;
}
@@ -4621,6 +4623,7 @@ static struct kvm_x86_ops svm_x86_ops __initdata = {
.cache_reg = svm_cache_reg,
.get_rflags = svm_get_rflags,
.set_rflags = svm_set_rflags,
+ .get_if_flag = svm_get_if_flag,
.tlb_flush_all = svm_flush_tlb,
.tlb_flush_current = svm_flush_tlb,
@@ -4651,7 +4654,6 @@ static struct kvm_x86_ops svm_x86_ops __initdata = {
.load_eoi_exitmap = svm_load_eoi_exitmap,
.hwapic_irr_update = svm_hwapic_irr_update,
.hwapic_isr_update = svm_hwapic_isr_update,
- .sync_pir_to_irr = kvm_lapic_find_highest_irr,
.apicv_post_state_restore = avic_post_state_restore,
.set_tss_addr = svm_set_tss_addr,
diff --git a/arch/x86/kvm/svm/svm.h b/arch/x86/kvm/svm/svm.h
index 5faad3dc10e2..1c7306c370fa 100644
--- a/arch/x86/kvm/svm/svm.h
+++ b/arch/x86/kvm/svm/svm.h
@@ -79,6 +79,7 @@ struct kvm_sev_info {
struct list_head regions_list; /* List of registered regions */
u64 ap_jump_table; /* SEV-ES AP Jump Table address */
struct kvm *enc_context_owner; /* Owner of copied encryption context */
+ unsigned long num_mirrored_vms; /* Number of VMs sharing this ASID */
struct misc_cg *misc_cg; /* For misc cgroup accounting */
atomic_t migration_in_progress;
};
diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c
index 1e2f66951566..9c941535f78c 100644
--- a/arch/x86/kvm/vmx/nested.c
+++ b/arch/x86/kvm/vmx/nested.c
@@ -1162,29 +1162,26 @@ static void nested_vmx_transition_tlb_flush(struct kvm_vcpu *vcpu,
WARN_ON(!enable_vpid);
/*
- * If VPID is enabled and used by vmc12, but L2 does not have a unique
- * TLB tag (ASID), i.e. EPT is disabled and KVM was unable to allocate
- * a VPID for L2, flush the current context as the effective ASID is
- * common to both L1 and L2.
- *
- * Defer the flush so that it runs after vmcs02.EPTP has been set by
- * KVM_REQ_LOAD_MMU_PGD (if nested EPT is enabled) and to avoid
- * redundant flushes further down the nested pipeline.
- *
- * If a TLB flush isn't required due to any of the above, and vpid12 is
- * changing then the new "virtual" VPID (vpid12) will reuse the same
- * "real" VPID (vpid02), and so needs to be flushed. There's no direct
- * mapping between vpid02 and vpid12, vpid02 is per-vCPU and reused for
- * all nested vCPUs. Remember, a flush on VM-Enter does not invalidate
- * guest-physical mappings, so there is no need to sync the nEPT MMU.
+ * VPID is enabled and in use by vmcs12. If vpid12 is changing, then
+ * emulate a guest TLB flush as KVM does not track vpid12 history nor
+ * is the VPID incorporated into the MMU context. I.e. KVM must assume
+ * that the new vpid12 has never been used and thus represents a new
+ * guest ASID that cannot have entries in the TLB.
*/
- if (!nested_has_guest_tlb_tag(vcpu)) {
- kvm_make_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu);
- } else if (is_vmenter &&
- vmcs12->virtual_processor_id != vmx->nested.last_vpid) {
+ if (is_vmenter && vmcs12->virtual_processor_id != vmx->nested.last_vpid) {
vmx->nested.last_vpid = vmcs12->virtual_processor_id;
- vpid_sync_context(nested_get_vpid02(vcpu));
+ kvm_make_request(KVM_REQ_TLB_FLUSH_GUEST, vcpu);
+ return;
}
+
+ /*
+ * If VPID is enabled, used by vmc12, and vpid12 is not changing but
+ * does not have a unique TLB tag (ASID), i.e. EPT is disabled and
+ * KVM was unable to allocate a VPID for L2, flush the current context
+ * as the effective ASID is common to both L1 and L2.
+ */
+ if (!nested_has_guest_tlb_tag(vcpu))
+ kvm_make_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu);
}
static bool is_bitwise_subset(u64 superset, u64 subset, u64 mask)
@@ -2594,8 +2591,10 @@ static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12,
if ((vmcs12->vm_entry_controls & VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL) &&
WARN_ON_ONCE(kvm_set_msr(vcpu, MSR_CORE_PERF_GLOBAL_CTRL,
- vmcs12->guest_ia32_perf_global_ctrl)))
+ vmcs12->guest_ia32_perf_global_ctrl))) {
+ *entry_failure_code = ENTRY_FAIL_DEFAULT;
return -EINVAL;
+ }
kvm_rsp_write(vcpu, vmcs12->guest_rsp);
kvm_rip_write(vcpu, vmcs12->guest_rip);
@@ -3344,8 +3343,7 @@ enum nvmx_vmentry_status nested_vmx_enter_non_root_mode(struct kvm_vcpu *vcpu,
};
u32 failed_index;
- if (kvm_check_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu))
- kvm_vcpu_flush_tlb_current(vcpu);
+ kvm_service_local_tlb_flush_requests(vcpu);
evaluate_pending_interrupts = exec_controls_get(vmx) &
(CPU_BASED_INTR_WINDOW_EXITING | CPU_BASED_NMI_WINDOW_EXITING);
@@ -4502,9 +4500,8 @@ void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 vm_exit_reason,
(void)nested_get_evmcs_page(vcpu);
}
- /* Service the TLB flush request for L2 before switching to L1. */
- if (kvm_check_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu))
- kvm_vcpu_flush_tlb_current(vcpu);
+ /* Service pending TLB flush requests for L2 before switching to L1. */
+ kvm_service_local_tlb_flush_requests(vcpu);
/*
* VCPU_EXREG_PDPTR will be clobbered in arch/x86/kvm/vmx/vmx.h between
@@ -4857,6 +4854,7 @@ static int enter_vmx_operation(struct kvm_vcpu *vcpu)
if (!vmx->nested.cached_vmcs12)
goto out_cached_vmcs12;
+ vmx->nested.shadow_vmcs12_cache.gpa = INVALID_GPA;
vmx->nested.cached_shadow_vmcs12 = kzalloc(VMCS12_SIZE, GFP_KERNEL_ACCOUNT);
if (!vmx->nested.cached_shadow_vmcs12)
goto out_cached_shadow_vmcs12;
@@ -5289,8 +5287,7 @@ static int handle_vmptrld(struct kvm_vcpu *vcpu)
struct gfn_to_hva_cache *ghc = &vmx->nested.vmcs12_cache;
struct vmcs_hdr hdr;
- if (ghc->gpa != vmptr &&
- kvm_gfn_to_hva_cache_init(vcpu->kvm, ghc, vmptr, VMCS12_SIZE)) {
+ if (kvm_gfn_to_hva_cache_init(vcpu->kvm, ghc, vmptr, VMCS12_SIZE)) {
/*
* Reads from an unbacked page return all 1s,
* which means that the 32 bits located at the
diff --git a/arch/x86/kvm/vmx/posted_intr.c b/arch/x86/kvm/vmx/posted_intr.c
index 5f81ef092bd4..1c94783b5a54 100644
--- a/arch/x86/kvm/vmx/posted_intr.c
+++ b/arch/x86/kvm/vmx/posted_intr.c
@@ -5,6 +5,7 @@
#include <asm/cpu.h>
#include "lapic.h"
+#include "irq.h"
#include "posted_intr.h"
#include "trace.h"
#include "vmx.h"
@@ -77,13 +78,18 @@ after_clear_sn:
pi_set_on(pi_desc);
}
+static bool vmx_can_use_vtd_pi(struct kvm *kvm)
+{
+ return irqchip_in_kernel(kvm) && enable_apicv &&
+ kvm_arch_has_assigned_device(kvm) &&
+ irq_remapping_cap(IRQ_POSTING_CAP);
+}
+
void vmx_vcpu_pi_put(struct kvm_vcpu *vcpu)
{
struct pi_desc *pi_desc = vcpu_to_pi_desc(vcpu);
- if (!kvm_arch_has_assigned_device(vcpu->kvm) ||
- !irq_remapping_cap(IRQ_POSTING_CAP) ||
- !kvm_vcpu_apicv_active(vcpu))
+ if (!vmx_can_use_vtd_pi(vcpu->kvm))
return;
/* Set SN when the vCPU is preempted */
@@ -141,9 +147,7 @@ int pi_pre_block(struct kvm_vcpu *vcpu)
struct pi_desc old, new;
struct pi_desc *pi_desc = vcpu_to_pi_desc(vcpu);
- if (!kvm_arch_has_assigned_device(vcpu->kvm) ||
- !irq_remapping_cap(IRQ_POSTING_CAP) ||
- !kvm_vcpu_apicv_active(vcpu))
+ if (!vmx_can_use_vtd_pi(vcpu->kvm))
return 0;
WARN_ON(irqs_disabled());
@@ -270,9 +274,7 @@ int pi_update_irte(struct kvm *kvm, unsigned int host_irq, uint32_t guest_irq,
struct vcpu_data vcpu_info;
int idx, ret = 0;
- if (!kvm_arch_has_assigned_device(kvm) ||
- !irq_remapping_cap(IRQ_POSTING_CAP) ||
- !kvm_vcpu_apicv_active(kvm->vcpus[0]))
+ if (!vmx_can_use_vtd_pi(kvm))
return 0;
idx = srcu_read_lock(&kvm->irq_srcu);
diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
index ba66c171d951..0dbf94eb954f 100644
--- a/arch/x86/kvm/vmx/vmx.c
+++ b/arch/x86/kvm/vmx/vmx.c
@@ -1363,6 +1363,11 @@ void vmx_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags)
vmx->emulation_required = vmx_emulation_required(vcpu);
}
+static bool vmx_get_if_flag(struct kvm_vcpu *vcpu)
+{
+ return vmx_get_rflags(vcpu) & X86_EFLAGS_IF;
+}
+
u32 vmx_get_interrupt_shadow(struct kvm_vcpu *vcpu)
{
u32 interruptibility = vmcs_read32(GUEST_INTERRUPTIBILITY_INFO);
@@ -2646,15 +2651,6 @@ int alloc_loaded_vmcs(struct loaded_vmcs *loaded_vmcs)
if (!loaded_vmcs->msr_bitmap)
goto out_vmcs;
memset(loaded_vmcs->msr_bitmap, 0xff, PAGE_SIZE);
-
- if (IS_ENABLED(CONFIG_HYPERV) &&
- static_branch_unlikely(&enable_evmcs) &&
- (ms_hyperv.nested_features & HV_X64_NESTED_MSR_BITMAP)) {
- struct hv_enlightened_vmcs *evmcs =
- (struct hv_enlightened_vmcs *)loaded_vmcs->vmcs;
-
- evmcs->hv_enlightenments_control.msr_bitmap = 1;
- }
}
memset(&loaded_vmcs->host_state, 0, sizeof(struct vmcs_host_state));
@@ -2918,6 +2914,13 @@ static void vmx_flush_tlb_all(struct kvm_vcpu *vcpu)
}
}
+static inline int vmx_get_current_vpid(struct kvm_vcpu *vcpu)
+{
+ if (is_guest_mode(vcpu))
+ return nested_get_vpid02(vcpu);
+ return to_vmx(vcpu)->vpid;
+}
+
static void vmx_flush_tlb_current(struct kvm_vcpu *vcpu)
{
struct kvm_mmu *mmu = vcpu->arch.mmu;
@@ -2930,31 +2933,29 @@ static void vmx_flush_tlb_current(struct kvm_vcpu *vcpu)
if (enable_ept)
ept_sync_context(construct_eptp(vcpu, root_hpa,
mmu->shadow_root_level));
- else if (!is_guest_mode(vcpu))
- vpid_sync_context(to_vmx(vcpu)->vpid);
else
- vpid_sync_context(nested_get_vpid02(vcpu));
+ vpid_sync_context(vmx_get_current_vpid(vcpu));
}
static void vmx_flush_tlb_gva(struct kvm_vcpu *vcpu, gva_t addr)
{
/*
- * vpid_sync_vcpu_addr() is a nop if vmx->vpid==0, see the comment in
+ * vpid_sync_vcpu_addr() is a nop if vpid==0, see the comment in
* vmx_flush_tlb_guest() for an explanation of why this is ok.
*/
- vpid_sync_vcpu_addr(to_vmx(vcpu)->vpid, addr);
+ vpid_sync_vcpu_addr(vmx_get_current_vpid(vcpu), addr);
}
static void vmx_flush_tlb_guest(struct kvm_vcpu *vcpu)
{
/*
- * vpid_sync_context() is a nop if vmx->vpid==0, e.g. if enable_vpid==0
- * or a vpid couldn't be allocated for this vCPU. VM-Enter and VM-Exit
- * are required to flush GVA->{G,H}PA mappings from the TLB if vpid is
+ * vpid_sync_context() is a nop if vpid==0, e.g. if enable_vpid==0 or a
+ * vpid couldn't be allocated for this vCPU. VM-Enter and VM-Exit are
+ * required to flush GVA->{G,H}PA mappings from the TLB if vpid is
* disabled (VM-Enter with vpid enabled and vpid==0 is disallowed),
* i.e. no explicit INVVPID is necessary.
*/
- vpid_sync_context(to_vmx(vcpu)->vpid);
+ vpid_sync_context(vmx_get_current_vpid(vcpu));
}
void vmx_ept_load_pdptrs(struct kvm_vcpu *vcpu)
@@ -3963,8 +3964,7 @@ static int vmx_deliver_posted_interrupt(struct kvm_vcpu *vcpu, int vector)
if (pi_test_and_set_on(&vmx->pi_desc))
return 0;
- if (vcpu != kvm_get_running_vcpu() &&
- !kvm_vcpu_trigger_posted_interrupt(vcpu, false))
+ if (!kvm_vcpu_trigger_posted_interrupt(vcpu, false))
kvm_vcpu_kick(vcpu);
return 0;
@@ -5881,18 +5881,14 @@ static int __vmx_handle_exit(struct kvm_vcpu *vcpu, fastpath_t exit_fastpath)
vmx_flush_pml_buffer(vcpu);
/*
- * We should never reach this point with a pending nested VM-Enter, and
- * more specifically emulation of L2 due to invalid guest state (see
- * below) should never happen as that means we incorrectly allowed a
- * nested VM-Enter with an invalid vmcs12.
+ * KVM should never reach this point with a pending nested VM-Enter.
+ * More specifically, short-circuiting VM-Entry to emulate L2 due to
+ * invalid guest state should never happen as that means KVM knowingly
+ * allowed a nested VM-Enter with an invalid vmcs12. More below.
*/
if (KVM_BUG_ON(vmx->nested.nested_run_pending, vcpu->kvm))
return -EIO;
- /* If guest state is invalid, start emulating */
- if (vmx->emulation_required)
- return handle_invalid_guest_state(vcpu);
-
if (is_guest_mode(vcpu)) {
/*
* PML is never enabled when running L2, bail immediately if a
@@ -5914,10 +5910,30 @@ static int __vmx_handle_exit(struct kvm_vcpu *vcpu, fastpath_t exit_fastpath)
*/
nested_mark_vmcs12_pages_dirty(vcpu);
+ /*
+ * Synthesize a triple fault if L2 state is invalid. In normal
+ * operation, nested VM-Enter rejects any attempt to enter L2
+ * with invalid state. However, those checks are skipped if
+ * state is being stuffed via RSM or KVM_SET_NESTED_STATE. If
+ * L2 state is invalid, it means either L1 modified SMRAM state
+ * or userspace provided bad state. Synthesize TRIPLE_FAULT as
+ * doing so is architecturally allowed in the RSM case, and is
+ * the least awful solution for the userspace case without
+ * risking false positives.
+ */
+ if (vmx->emulation_required) {
+ nested_vmx_vmexit(vcpu, EXIT_REASON_TRIPLE_FAULT, 0, 0);
+ return 1;
+ }
+
if (nested_vmx_reflect_vmexit(vcpu))
return 1;
}
+ /* If guest state is invalid, start emulating. L2 is handled above. */
+ if (vmx->emulation_required)
+ return handle_invalid_guest_state(vcpu);
+
if (exit_reason.failed_vmentry) {
dump_vmcs(vcpu);
vcpu->run->exit_reason = KVM_EXIT_FAIL_ENTRY;
@@ -6262,9 +6278,9 @@ static int vmx_sync_pir_to_irr(struct kvm_vcpu *vcpu)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
int max_irr;
- bool max_irr_updated;
+ bool got_posted_interrupt;
- if (KVM_BUG_ON(!vcpu->arch.apicv_active, vcpu->kvm))
+ if (KVM_BUG_ON(!enable_apicv, vcpu->kvm))
return -EIO;
if (pi_test_on(&vmx->pi_desc)) {
@@ -6274,22 +6290,33 @@ static int vmx_sync_pir_to_irr(struct kvm_vcpu *vcpu)
* But on x86 this is just a compiler barrier anyway.
*/
smp_mb__after_atomic();
- max_irr_updated =
+ got_posted_interrupt =
kvm_apic_update_irr(vcpu, vmx->pi_desc.pir, &max_irr);
-
- /*
- * If we are running L2 and L1 has a new pending interrupt
- * which can be injected, this may cause a vmexit or it may
- * be injected into L2. Either way, this interrupt will be
- * processed via KVM_REQ_EVENT, not RVI, because we do not use
- * virtual interrupt delivery to inject L1 interrupts into L2.
- */
- if (is_guest_mode(vcpu) && max_irr_updated)
- kvm_make_request(KVM_REQ_EVENT, vcpu);
} else {
max_irr = kvm_lapic_find_highest_irr(vcpu);
+ got_posted_interrupt = false;
}
- vmx_hwapic_irr_update(vcpu, max_irr);
+
+ /*
+ * Newly recognized interrupts are injected via either virtual interrupt
+ * delivery (RVI) or KVM_REQ_EVENT. Virtual interrupt delivery is
+ * disabled in two cases:
+ *
+ * 1) If L2 is running and the vCPU has a new pending interrupt. If L1
+ * wants to exit on interrupts, KVM_REQ_EVENT is needed to synthesize a
+ * VM-Exit to L1. If L1 doesn't want to exit, the interrupt is injected
+ * into L2, but KVM doesn't use virtual interrupt delivery to inject
+ * interrupts into L2, and so KVM_REQ_EVENT is again needed.
+ *
+ * 2) If APICv is disabled for this vCPU, assigned devices may still
+ * attempt to post interrupts. The posted interrupt vector will cause
+ * a VM-Exit and the subsequent entry will call sync_pir_to_irr.
+ */
+ if (!is_guest_mode(vcpu) && kvm_vcpu_apicv_active(vcpu))
+ vmx_set_rvi(max_irr);
+ else if (got_posted_interrupt)
+ kvm_make_request(KVM_REQ_EVENT, vcpu);
+
return max_irr;
}
@@ -6601,9 +6628,7 @@ static fastpath_t vmx_vcpu_run(struct kvm_vcpu *vcpu)
* consistency check VM-Exit due to invalid guest state and bail.
*/
if (unlikely(vmx->emulation_required)) {
-
- /* We don't emulate invalid state of a nested guest */
- vmx->fail = is_guest_mode(vcpu);
+ vmx->fail = 0;
vmx->exit_reason.full = EXIT_REASON_INVALID_STATE;
vmx->exit_reason.failed_vmentry = 1;
@@ -6826,6 +6851,19 @@ static int vmx_create_vcpu(struct kvm_vcpu *vcpu)
if (err < 0)
goto free_pml;
+ /*
+ * Use Hyper-V 'Enlightened MSR Bitmap' feature when KVM runs as a
+ * nested (L1) hypervisor and Hyper-V in L0 supports it. Enable the
+ * feature only for vmcs01, KVM currently isn't equipped to realize any
+ * performance benefits from enabling it for vmcs02.
+ */
+ if (IS_ENABLED(CONFIG_HYPERV) && static_branch_unlikely(&enable_evmcs) &&
+ (ms_hyperv.nested_features & HV_X64_NESTED_MSR_BITMAP)) {
+ struct hv_enlightened_vmcs *evmcs = (void *)vmx->vmcs01.vmcs;
+
+ evmcs->hv_enlightenments_control.msr_bitmap = 1;
+ }
+
/* The MSR bitmap starts with all ones */
bitmap_fill(vmx->shadow_msr_intercept.read, MAX_POSSIBLE_PASSTHROUGH_MSRS);
bitmap_fill(vmx->shadow_msr_intercept.write, MAX_POSSIBLE_PASSTHROUGH_MSRS);
@@ -7509,6 +7547,7 @@ static void hardware_unsetup(void)
static bool vmx_check_apicv_inhibit_reasons(ulong bit)
{
ulong supported = BIT(APICV_INHIBIT_REASON_DISABLE) |
+ BIT(APICV_INHIBIT_REASON_ABSENT) |
BIT(APICV_INHIBIT_REASON_HYPERV) |
BIT(APICV_INHIBIT_REASON_BLOCKIRQ);
@@ -7558,6 +7597,7 @@ static struct kvm_x86_ops vmx_x86_ops __initdata = {
.cache_reg = vmx_cache_reg,
.get_rflags = vmx_get_rflags,
.set_rflags = vmx_set_rflags,
+ .get_if_flag = vmx_get_if_flag,
.tlb_flush_all = vmx_flush_tlb_all,
.tlb_flush_current = vmx_flush_tlb_current,
@@ -7761,10 +7801,10 @@ static __init int hardware_setup(void)
ple_window_shrink = 0;
}
- if (!cpu_has_vmx_apicv()) {
+ if (!cpu_has_vmx_apicv())
enable_apicv = 0;
+ if (!enable_apicv)
vmx_x86_ops.sync_pir_to_irr = NULL;
- }
if (cpu_has_vmx_tsc_scaling()) {
kvm_has_tsc_control = true;
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 5a403d92833f..e50e97ac4408 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -890,7 +890,8 @@ int kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
!load_pdptrs(vcpu, vcpu->arch.walk_mmu, kvm_read_cr3(vcpu)))
return 1;
- if (!(cr0 & X86_CR0_PG) && kvm_read_cr4_bits(vcpu, X86_CR4_PCIDE))
+ if (!(cr0 & X86_CR0_PG) &&
+ (is_64_bit_mode(vcpu) || kvm_read_cr4_bits(vcpu, X86_CR4_PCIDE)))
return 1;
static_call(kvm_x86_set_cr0)(vcpu, cr0);
@@ -1330,7 +1331,7 @@ static const u32 msrs_to_save_all[] = {
MSR_IA32_UMWAIT_CONTROL,
MSR_ARCH_PERFMON_FIXED_CTR0, MSR_ARCH_PERFMON_FIXED_CTR1,
- MSR_ARCH_PERFMON_FIXED_CTR0 + 2, MSR_ARCH_PERFMON_FIXED_CTR0 + 3,
+ MSR_ARCH_PERFMON_FIXED_CTR0 + 2,
MSR_CORE_PERF_FIXED_CTR_CTRL, MSR_CORE_PERF_GLOBAL_STATUS,
MSR_CORE_PERF_GLOBAL_CTRL, MSR_CORE_PERF_GLOBAL_OVF_CTRL,
MSR_ARCH_PERFMON_PERFCTR0, MSR_ARCH_PERFMON_PERFCTR1,
@@ -3258,6 +3259,29 @@ static void kvm_vcpu_flush_tlb_guest(struct kvm_vcpu *vcpu)
static_call(kvm_x86_tlb_flush_guest)(vcpu);
}
+
+static inline void kvm_vcpu_flush_tlb_current(struct kvm_vcpu *vcpu)
+{
+ ++vcpu->stat.tlb_flush;
+ static_call(kvm_x86_tlb_flush_current)(vcpu);
+}
+
+/*
+ * Service "local" TLB flush requests, which are specific to the current MMU
+ * context. In addition to the generic event handling in vcpu_enter_guest(),
+ * TLB flushes that are targeted at an MMU context also need to be serviced
+ * prior before nested VM-Enter/VM-Exit.
+ */
+void kvm_service_local_tlb_flush_requests(struct kvm_vcpu *vcpu)
+{
+ if (kvm_check_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu))
+ kvm_vcpu_flush_tlb_current(vcpu);
+
+ if (kvm_check_request(KVM_REQ_TLB_FLUSH_GUEST, vcpu))
+ kvm_vcpu_flush_tlb_guest(vcpu);
+}
+EXPORT_SYMBOL_GPL(kvm_service_local_tlb_flush_requests);
+
static void record_steal_time(struct kvm_vcpu *vcpu)
{
struct gfn_to_hva_cache *ghc = &vcpu->arch.st.cache;
@@ -3389,7 +3413,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
if (!msr_info->host_initiated)
return 1;
- if (guest_cpuid_has(vcpu, X86_FEATURE_PDCM) && kvm_get_msr_feature(&msr_ent))
+ if (kvm_get_msr_feature(&msr_ent))
return 1;
if (data & ~msr_ent.data)
return 1;
@@ -4133,6 +4157,7 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
case KVM_CAP_SGX_ATTRIBUTE:
#endif
case KVM_CAP_VM_COPY_ENC_CONTEXT_FROM:
+ case KVM_CAP_VM_MOVE_ENC_CONTEXT_FROM:
case KVM_CAP_SREGS2:
case KVM_CAP_EXIT_ON_EMULATION_FAILURE:
case KVM_CAP_VCPU_ATTRIBUTES:
@@ -4448,8 +4473,7 @@ void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
static int kvm_vcpu_ioctl_get_lapic(struct kvm_vcpu *vcpu,
struct kvm_lapic_state *s)
{
- if (vcpu->arch.apicv_active)
- static_call(kvm_x86_sync_pir_to_irr)(vcpu);
+ static_call_cond(kvm_x86_sync_pir_to_irr)(vcpu);
return kvm_apic_get_state(vcpu, s);
}
@@ -5124,6 +5148,17 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
struct kvm_cpuid __user *cpuid_arg = argp;
struct kvm_cpuid cpuid;
+ /*
+ * KVM does not correctly handle changing guest CPUID after KVM_RUN, as
+ * MAXPHYADDR, GBPAGES support, AMD reserved bit behavior, etc.. aren't
+ * tracked in kvm_mmu_page_role. As a result, KVM may miss guest page
+ * faults due to reusing SPs/SPTEs. In practice no sane VMM mucks with
+ * the core vCPU model on the fly, so fail.
+ */
+ r = -EINVAL;
+ if (vcpu->arch.last_vmentry_cpu != -1)
+ goto out;
+
r = -EFAULT;
if (copy_from_user(&cpuid, cpuid_arg, sizeof(cpuid)))
goto out;
@@ -5134,6 +5169,14 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
struct kvm_cpuid2 __user *cpuid_arg = argp;
struct kvm_cpuid2 cpuid;
+ /*
+ * KVM_SET_CPUID{,2} after KVM_RUN is forbidded, see the comment in
+ * KVM_SET_CPUID case above.
+ */
+ r = -EINVAL;
+ if (vcpu->arch.last_vmentry_cpu != -1)
+ goto out;
+
r = -EFAULT;
if (copy_from_user(&cpuid, cpuid_arg, sizeof(cpuid)))
goto out;
@@ -5698,6 +5741,7 @@ int kvm_vm_ioctl_enable_cap(struct kvm *kvm,
smp_wmb();
kvm->arch.irqchip_mode = KVM_IRQCHIP_SPLIT;
kvm->arch.nr_reserved_ioapic_pins = cap->args[0];
+ kvm_request_apicv_update(kvm, true, APICV_INHIBIT_REASON_ABSENT);
r = 0;
split_irqchip_unlock:
mutex_unlock(&kvm->lock);
@@ -6078,6 +6122,7 @@ set_identity_unlock:
/* Write kvm->irq_routing before enabling irqchip_in_kernel. */
smp_wmb();
kvm->arch.irqchip_mode = KVM_IRQCHIP_KERNEL;
+ kvm_request_apicv_update(kvm, true, APICV_INHIBIT_REASON_ABSENT);
create_irqchip_unlock:
mutex_unlock(&kvm->lock);
break;
@@ -7077,7 +7122,13 @@ static int emulator_pio_in(struct kvm_vcpu *vcpu, int size,
unsigned short port, void *val, unsigned int count)
{
if (vcpu->arch.pio.count) {
- /* Complete previous iteration. */
+ /*
+ * Complete a previous iteration that required userspace I/O.
+ * Note, @count isn't guaranteed to match pio.count as userspace
+ * can modify ECX before rerunning the vCPU. Ignore any such
+ * shenanigans as KVM doesn't support modifying the rep count,
+ * and the emulator ensures @count doesn't overflow the buffer.
+ */
} else {
int r = __emulator_pio_in(vcpu, size, port, count);
if (!r)
@@ -7086,7 +7137,6 @@ static int emulator_pio_in(struct kvm_vcpu *vcpu, int size,
/* Results already available, fall through. */
}
- WARN_ON(count != vcpu->arch.pio.count);
complete_emulator_pio_in(vcpu, val);
return 1;
}
@@ -8776,10 +8826,9 @@ static void kvm_apicv_init(struct kvm *kvm)
{
init_rwsem(&kvm->arch.apicv_update_lock);
- if (enable_apicv)
- clear_bit(APICV_INHIBIT_REASON_DISABLE,
- &kvm->arch.apicv_inhibit_reasons);
- else
+ set_bit(APICV_INHIBIT_REASON_ABSENT,
+ &kvm->arch.apicv_inhibit_reasons);
+ if (!enable_apicv)
set_bit(APICV_INHIBIT_REASON_DISABLE,
&kvm->arch.apicv_inhibit_reasons);
}
@@ -8952,14 +9001,7 @@ static void post_kvm_run_save(struct kvm_vcpu *vcpu)
{
struct kvm_run *kvm_run = vcpu->run;
- /*
- * if_flag is obsolete and useless, so do not bother
- * setting it for SEV-ES guests. Userspace can just
- * use kvm_run->ready_for_interrupt_injection.
- */
- kvm_run->if_flag = !vcpu->arch.guest_state_protected
- && (kvm_get_rflags(vcpu) & X86_EFLAGS_IF) != 0;
-
+ kvm_run->if_flag = static_call(kvm_x86_get_if_flag)(vcpu);
kvm_run->cr8 = kvm_get_cr8(vcpu);
kvm_run->apic_base = kvm_get_apic_base(vcpu);
@@ -9528,8 +9570,7 @@ static void vcpu_scan_ioapic(struct kvm_vcpu *vcpu)
if (irqchip_split(vcpu->kvm))
kvm_scan_ioapic_routes(vcpu, vcpu->arch.ioapic_handled_vectors);
else {
- if (vcpu->arch.apicv_active)
- static_call(kvm_x86_sync_pir_to_irr)(vcpu);
+ static_call_cond(kvm_x86_sync_pir_to_irr)(vcpu);
if (ioapic_in_kernel(vcpu->kvm))
kvm_ioapic_scan_entry(vcpu, vcpu->arch.ioapic_handled_vectors);
}
@@ -9648,10 +9689,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
/* Flushing all ASIDs flushes the current ASID... */
kvm_clear_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu);
}
- if (kvm_check_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu))
- kvm_vcpu_flush_tlb_current(vcpu);
- if (kvm_check_request(KVM_REQ_TLB_FLUSH_GUEST, vcpu))
- kvm_vcpu_flush_tlb_guest(vcpu);
+ kvm_service_local_tlb_flush_requests(vcpu);
if (kvm_check_request(KVM_REQ_REPORT_TPR_ACCESS, vcpu)) {
vcpu->run->exit_reason = KVM_EXIT_TPR_ACCESS;
@@ -9802,10 +9840,12 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
/*
* This handles the case where a posted interrupt was
- * notified with kvm_vcpu_kick.
+ * notified with kvm_vcpu_kick. Assigned devices can
+ * use the POSTED_INTR_VECTOR even if APICv is disabled,
+ * so do it even if APICv is disabled on this vCPU.
*/
- if (kvm_lapic_enabled(vcpu) && vcpu->arch.apicv_active)
- static_call(kvm_x86_sync_pir_to_irr)(vcpu);
+ if (kvm_lapic_enabled(vcpu))
+ static_call_cond(kvm_x86_sync_pir_to_irr)(vcpu);
if (kvm_vcpu_exit_request(vcpu)) {
vcpu->mode = OUTSIDE_GUEST_MODE;
@@ -9849,8 +9889,8 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
if (likely(exit_fastpath != EXIT_FASTPATH_REENTER_GUEST))
break;
- if (vcpu->arch.apicv_active)
- static_call(kvm_x86_sync_pir_to_irr)(vcpu);
+ if (kvm_lapic_enabled(vcpu))
+ static_call_cond(kvm_x86_sync_pir_to_irr)(vcpu);
if (unlikely(kvm_vcpu_exit_request(vcpu))) {
exit_fastpath = EXIT_FASTPATH_EXIT_HANDLED;
diff --git a/arch/x86/kvm/x86.h b/arch/x86/kvm/x86.h
index 997669ae9caa..4abcd8d9836d 100644
--- a/arch/x86/kvm/x86.h
+++ b/arch/x86/kvm/x86.h
@@ -103,6 +103,7 @@ static inline unsigned int __shrink_ple_window(unsigned int val,
#define MSR_IA32_CR_PAT_DEFAULT 0x0007040600070406ULL
+void kvm_service_local_tlb_flush_requests(struct kvm_vcpu *vcpu);
int kvm_check_nested_events(struct kvm_vcpu *vcpu);
static inline void kvm_clear_exception_queue(struct kvm_vcpu *vcpu)
@@ -185,12 +186,6 @@ static inline bool mmu_is_nested(struct kvm_vcpu *vcpu)
return vcpu->arch.walk_mmu == &vcpu->arch.nested_mmu;
}
-static inline void kvm_vcpu_flush_tlb_current(struct kvm_vcpu *vcpu)
-{
- ++vcpu->stat.tlb_flush;
- static_call(kvm_x86_tlb_flush_current)(vcpu);
-}
-
static inline int is_pae(struct kvm_vcpu *vcpu)
{
return kvm_read_cr4_bits(vcpu, X86_CR4_PAE);
diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
index 726700fabca6..bafe36e69227 100644
--- a/arch/x86/net/bpf_jit_comp.c
+++ b/arch/x86/net/bpf_jit_comp.c
@@ -1252,19 +1252,54 @@ st: if (is_imm8(insn->off))
case BPF_LDX | BPF_MEM | BPF_DW:
case BPF_LDX | BPF_PROBE_MEM | BPF_DW:
if (BPF_MODE(insn->code) == BPF_PROBE_MEM) {
- /* test src_reg, src_reg */
- maybe_emit_mod(&prog, src_reg, src_reg, true); /* always 1 byte */
- EMIT2(0x85, add_2reg(0xC0, src_reg, src_reg));
- /* jne start_of_ldx */
- EMIT2(X86_JNE, 0);
+ /* Though the verifier prevents negative insn->off in BPF_PROBE_MEM
+ * add abs(insn->off) to the limit to make sure that negative
+ * offset won't be an issue.
+ * insn->off is s16, so it won't affect valid pointers.
+ */
+ u64 limit = TASK_SIZE_MAX + PAGE_SIZE + abs(insn->off);
+ u8 *end_of_jmp1, *end_of_jmp2;
+
+ /* Conservatively check that src_reg + insn->off is a kernel address:
+ * 1. src_reg + insn->off >= limit
+ * 2. src_reg + insn->off doesn't become small positive.
+ * Cannot do src_reg + insn->off >= limit in one branch,
+ * since it needs two spare registers, but JIT has only one.
+ */
+
+ /* movabsq r11, limit */
+ EMIT2(add_1mod(0x48, AUX_REG), add_1reg(0xB8, AUX_REG));
+ EMIT((u32)limit, 4);
+ EMIT(limit >> 32, 4);
+ /* cmp src_reg, r11 */
+ maybe_emit_mod(&prog, src_reg, AUX_REG, true);
+ EMIT2(0x39, add_2reg(0xC0, src_reg, AUX_REG));
+ /* if unsigned '<' goto end_of_jmp2 */
+ EMIT2(X86_JB, 0);
+ end_of_jmp1 = prog;
+
+ /* mov r11, src_reg */
+ emit_mov_reg(&prog, true, AUX_REG, src_reg);
+ /* add r11, insn->off */
+ maybe_emit_1mod(&prog, AUX_REG, true);
+ EMIT2_off32(0x81, add_1reg(0xC0, AUX_REG), insn->off);
+ /* jmp if not carry to start_of_ldx
+ * Otherwise ERR_PTR(-EINVAL) + 128 will be the user addr
+ * that has to be rejected.
+ */
+ EMIT2(0x73 /* JNC */, 0);
+ end_of_jmp2 = prog;
+
/* xor dst_reg, dst_reg */
emit_mov_imm32(&prog, false, dst_reg, 0);
/* jmp byte_after_ldx */
EMIT2(0xEB, 0);
- /* populate jmp_offset for JNE above */
- temp[4] = prog - temp - 5 /* sizeof(test + jne) */;
+ /* populate jmp_offset for JB above to jump to xor dst_reg */
+ end_of_jmp1[-1] = end_of_jmp2 - end_of_jmp1;
+ /* populate jmp_offset for JNC above to jump to start_of_ldx */
start_of_ldx = prog;
+ end_of_jmp2[-1] = start_of_ldx - end_of_jmp2;
}
emit_ldx(&prog, BPF_SIZE(insn->code), dst_reg, src_reg, insn->off);
if (BPF_MODE(insn->code) == BPF_PROBE_MEM) {
@@ -1305,7 +1340,7 @@ st: if (is_imm8(insn->off))
* End result: x86 insn "mov rbx, qword ptr [rax+0x14]"
* of 4 bytes will be ignored and rbx will be zero inited.
*/
- ex->fixup = (prog - temp) | (reg2pt_regs[dst_reg] << 8);
+ ex->fixup = (prog - start_of_ldx) | (reg2pt_regs[dst_reg] << 8);
}
break;
diff --git a/arch/x86/platform/efi/quirks.c b/arch/x86/platform/efi/quirks.c
index b15ebfe40a73..b0b848d6933a 100644
--- a/arch/x86/platform/efi/quirks.c
+++ b/arch/x86/platform/efi/quirks.c
@@ -277,7 +277,8 @@ void __init efi_arch_mem_reserve(phys_addr_t addr, u64 size)
return;
}
- new = early_memremap(data.phys_map, data.size);
+ new = early_memremap_prot(data.phys_map, data.size,
+ pgprot_val(pgprot_encrypted(FIXMAP_PAGE_NORMAL)));
if (!new) {
pr_err("Failed to map new boot services memmap\n");
return;
diff --git a/arch/x86/realmode/init.c b/arch/x86/realmode/init.c
index 4a3da7592b99..38d24d2ab38b 100644
--- a/arch/x86/realmode/init.c
+++ b/arch/x86/realmode/init.c
@@ -72,6 +72,7 @@ static void __init setup_real_mode(void)
#ifdef CONFIG_X86_64
u64 *trampoline_pgd;
u64 efer;
+ int i;
#endif
base = (unsigned char *)real_mode_header;
@@ -128,8 +129,17 @@ static void __init setup_real_mode(void)
trampoline_header->flags = 0;
trampoline_pgd = (u64 *) __va(real_mode_header->trampoline_pgd);
+
+ /* Map the real mode stub as virtual == physical */
trampoline_pgd[0] = trampoline_pgd_entry.pgd;
- trampoline_pgd[511] = init_top_pgt[511].pgd;
+
+ /*
+ * Include the entirety of the kernel mapping into the trampoline
+ * PGD. This way, all mappings present in the normal kernel page
+ * tables are usable while running on trampoline_pgd.
+ */
+ for (i = pgd_index(__PAGE_OFFSET); i < PTRS_PER_PGD; i++)
+ trampoline_pgd[i] = init_top_pgt[i].pgd;
#endif
sme_sev_setup_real_mode(trampoline_header);
diff --git a/arch/x86/tools/relocs.c b/arch/x86/tools/relocs.c
index c736cf2ac76b..e2c5b296120d 100644
--- a/arch/x86/tools/relocs.c
+++ b/arch/x86/tools/relocs.c
@@ -68,7 +68,7 @@ static const char * const sym_regex_kernel[S_NSYMTYPES] = {
"(__parainstructions|__alt_instructions)(_end)?|"
"(__iommu_table|__apicdrivers|__smp_locks)(_end)?|"
"__(start|end)_pci_.*|"
-#if CONFIG_FW_LOADER_BUILTIN
+#if CONFIG_FW_LOADER
"__(start|end)_builtin_fw|"
#endif
"__(start|stop)___ksymtab(_gpl)?|"
diff --git a/arch/x86/xen/xen-asm.S b/arch/x86/xen/xen-asm.S
index 220dd9678494..444d824775f6 100644
--- a/arch/x86/xen/xen-asm.S
+++ b/arch/x86/xen/xen-asm.S
@@ -20,6 +20,7 @@
#include <linux/init.h>
#include <linux/linkage.h>
+#include <../entry/calling.h>
.pushsection .noinstr.text, "ax"
/*
@@ -193,6 +194,25 @@ SYM_CODE_START(xen_iret)
SYM_CODE_END(xen_iret)
/*
+ * XEN pv doesn't use trampoline stack, PER_CPU_VAR(cpu_tss_rw + TSS_sp0) is
+ * also the kernel stack. Reusing swapgs_restore_regs_and_return_to_usermode()
+ * in XEN pv would cause %rsp to move up to the top of the kernel stack and
+ * leave the IRET frame below %rsp, which is dangerous to be corrupted if #NMI
+ * interrupts. And swapgs_restore_regs_and_return_to_usermode() pushing the IRET
+ * frame at the same address is useless.
+ */
+SYM_CODE_START(xenpv_restore_regs_and_return_to_usermode)
+ UNWIND_HINT_REGS
+ POP_REGS
+
+ /* stackleak_erase() can work safely on the kernel stack. */
+ STACKLEAK_ERASE_NOCLOBBER
+
+ addq $8, %rsp /* skip regs->orig_ax */
+ jmp xen_iret
+SYM_CODE_END(xenpv_restore_regs_and_return_to_usermode)
+
+/*
* Xen handles syscall callbacks much like ordinary exceptions, which
* means we have:
* - kernel gs
diff --git a/arch/xtensa/include/asm/cacheflush.h b/arch/xtensa/include/asm/cacheflush.h
index a8a041609c5d..7b4359312c25 100644
--- a/arch/xtensa/include/asm/cacheflush.h
+++ b/arch/xtensa/include/asm/cacheflush.h
@@ -121,7 +121,6 @@ void flush_cache_page(struct vm_area_struct*,
#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
void flush_dcache_page(struct page *);
-void flush_dcache_folio(struct folio *);
void local_flush_cache_range(struct vm_area_struct *vma,
unsigned long start, unsigned long end);
@@ -138,9 +137,7 @@ void local_flush_cache_page(struct vm_area_struct *vma,
#define flush_cache_vunmap(start,end) do { } while (0)
#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 0
-#define ARCH_IMPLEMENTS_FLUSH_DCACHE_FOLIO
#define flush_dcache_page(page) do { } while (0)
-static inline void flush_dcache_folio(struct folio *folio) { }
#define flush_icache_range local_flush_icache_range
#define flush_cache_page(vma, addr, pfn) do { } while (0)
diff --git a/arch/xtensa/kernel/syscalls/syscall.tbl b/arch/xtensa/kernel/syscalls/syscall.tbl
index 104b327f8ac9..3e3e1a506bed 100644
--- a/arch/xtensa/kernel/syscalls/syscall.tbl
+++ b/arch/xtensa/kernel/syscalls/syscall.tbl
@@ -419,3 +419,4 @@
446 common landlock_restrict_self sys_landlock_restrict_self
# 447 reserved for memfd_secret
448 common process_mrelease sys_process_mrelease
+449 common futex_waitv sys_futex_waitv
diff --git a/block/bdev.c b/block/bdev.c
index b4dab2fb6a74..b1d087e5e205 100644
--- a/block/bdev.c
+++ b/block/bdev.c
@@ -753,8 +753,7 @@ struct block_device *blkdev_get_no_open(dev_t dev)
if (!bdev)
return NULL;
- if ((bdev->bd_disk->flags & GENHD_FL_HIDDEN) ||
- !try_module_get(bdev->bd_disk->fops->owner)) {
+ if ((bdev->bd_disk->flags & GENHD_FL_HIDDEN)) {
put_device(&bdev->bd_device);
return NULL;
}
@@ -764,7 +763,6 @@ struct block_device *blkdev_get_no_open(dev_t dev)
void blkdev_put_no_open(struct block_device *bdev)
{
- module_put(bdev->bd_disk->fops->owner);
put_device(&bdev->bd_device);
}
@@ -820,12 +818,14 @@ struct block_device *blkdev_get_by_dev(dev_t dev, fmode_t mode, void *holder)
ret = -ENXIO;
if (!disk_live(disk))
goto abort_claiming;
+ if (!try_module_get(disk->fops->owner))
+ goto abort_claiming;
if (bdev_is_partition(bdev))
ret = blkdev_get_part(bdev, mode);
else
ret = blkdev_get_whole(bdev, mode);
if (ret)
- goto abort_claiming;
+ goto put_module;
if (mode & FMODE_EXCL) {
bd_finish_claiming(bdev, holder);
@@ -847,7 +847,8 @@ struct block_device *blkdev_get_by_dev(dev_t dev, fmode_t mode, void *holder)
if (unblock_events)
disk_unblock_events(disk);
return bdev;
-
+put_module:
+ module_put(disk->fops->owner);
abort_claiming:
if (mode & FMODE_EXCL)
bd_abort_claiming(bdev, holder);
@@ -956,6 +957,7 @@ void blkdev_put(struct block_device *bdev, fmode_t mode)
blkdev_put_whole(bdev, mode);
mutex_unlock(&disk->open_mutex);
+ module_put(disk->fops->owner);
blkdev_put_no_open(bdev);
}
EXPORT_SYMBOL(blkdev_put);
diff --git a/block/blk-core.c b/block/blk-core.c
index f0f38ca8e22f..1378d084c770 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -1017,6 +1017,7 @@ EXPORT_SYMBOL(submit_bio);
/**
* bio_poll - poll for BIO completions
* @bio: bio to poll for
+ * @iob: batches of IO
* @flags: BLK_POLL_* flags that control the behavior
*
* Poll for completions on queue associated with the bio. Returns number of
diff --git a/block/blk-iocost.c b/block/blk-iocost.c
index a5b37cc65b17..769b64394298 100644
--- a/block/blk-iocost.c
+++ b/block/blk-iocost.c
@@ -2311,7 +2311,14 @@ static void ioc_timer_fn(struct timer_list *timer)
hwm = current_hweight_max(iocg);
new_hwi = hweight_after_donation(iocg, old_hwi, hwm,
usage, &now);
- if (new_hwi < hwm) {
+ /*
+ * Donation calculation assumes hweight_after_donation
+ * to be positive, a condition that a donor w/ hwa < 2
+ * can't meet. Don't bother with donation if hwa is
+ * below 2. It's not gonna make a meaningful difference
+ * anyway.
+ */
+ if (new_hwi < hwm && hwa >= 2) {
iocg->hweight_donating = hwa;
iocg->hweight_after_donation = new_hwi;
list_add(&iocg->surplus_list, &surpluses);
diff --git a/block/blk-mq.c b/block/blk-mq.c
index 8799fa73ef34..8874a63ae952 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -860,13 +860,14 @@ void blk_mq_end_request_batch(struct io_comp_batch *iob)
if (iob->need_ts)
__blk_mq_end_request_acct(rq, now);
+ rq_qos_done(rq->q, rq);
+
WRITE_ONCE(rq->state, MQ_RQ_IDLE);
if (!refcount_dec_and_test(&rq->ref))
continue;
blk_crypto_free_request(rq);
blk_pm_mark_last_busy(rq);
- rq_qos_done(rq->q, rq);
if (nr_tags == TAG_COMP_BATCH || cur_hctx != rq->mq_hctx) {
if (cur_hctx)
diff --git a/block/fops.c b/block/fops.c
index ad732a36f9b3..0da147edbd18 100644
--- a/block/fops.c
+++ b/block/fops.c
@@ -15,6 +15,7 @@
#include <linux/falloc.h>
#include <linux/suspend.h>
#include <linux/fs.h>
+#include <linux/module.h>
#include "blk.h"
static inline struct inode *bdev_file_inode(struct file *file)
@@ -340,8 +341,7 @@ static ssize_t __blkdev_direct_IO_async(struct kiocb *iocb,
} else {
ret = bio_iov_iter_get_pages(bio, iter);
if (unlikely(ret)) {
- bio->bi_status = BLK_STS_IOERR;
- bio_endio(bio);
+ bio_put(bio);
return ret;
}
}
diff --git a/block/ioprio.c b/block/ioprio.c
index 313c14a70bbd..6f01d35a5145 100644
--- a/block/ioprio.c
+++ b/block/ioprio.c
@@ -220,6 +220,7 @@ SYSCALL_DEFINE2(ioprio_get, int, which, int, who)
pgrp = task_pgrp(current);
else
pgrp = find_vpid(who);
+ read_lock(&tasklist_lock);
do_each_pid_thread(pgrp, PIDTYPE_PGID, p) {
tmpio = get_task_ioprio(p);
if (tmpio < 0)
@@ -229,6 +230,8 @@ SYSCALL_DEFINE2(ioprio_get, int, which, int, who)
else
ret = ioprio_best(ret, tmpio);
} while_each_pid_thread(pgrp, PIDTYPE_PGID, p);
+ read_unlock(&tasklist_lock);
+
break;
case IOPRIO_WHO_USER:
uid = make_kuid(current_user_ns(), who);
diff --git a/drivers/Makefile b/drivers/Makefile
index be5d40ae1488..a110338c860c 100644
--- a/drivers/Makefile
+++ b/drivers/Makefile
@@ -41,8 +41,7 @@ obj-$(CONFIG_DMADEVICES) += dma/
# SOC specific infrastructure drivers.
obj-y += soc/
-obj-$(CONFIG_VIRTIO) += virtio/
-obj-$(CONFIG_VIRTIO_PCI_LIB) += virtio/
+obj-y += virtio/
obj-$(CONFIG_VDPA) += vdpa/
obj-$(CONFIG_XEN) += xen/
diff --git a/drivers/acpi/cppc_acpi.c b/drivers/acpi/cppc_acpi.c
index a85c351589be..b62c87b8ce4a 100644
--- a/drivers/acpi/cppc_acpi.c
+++ b/drivers/acpi/cppc_acpi.c
@@ -998,7 +998,14 @@ static int cpc_write(int cpu, struct cpc_register_resource *reg_res, u64 val)
static int cppc_get_perf(int cpunum, enum cppc_regs reg_idx, u64 *perf)
{
struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpunum);
- struct cpc_register_resource *reg = &cpc_desc->cpc_regs[reg_idx];
+ struct cpc_register_resource *reg;
+
+ if (!cpc_desc) {
+ pr_debug("No CPC descriptor for CPU:%d\n", cpunum);
+ return -ENODEV;
+ }
+
+ reg = &cpc_desc->cpc_regs[reg_idx];
if (CPC_IN_PCC(reg)) {
int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpunum);
diff --git a/drivers/acpi/property.c b/drivers/acpi/property.c
index e312ebaed8db..2366f54d8e9c 100644
--- a/drivers/acpi/property.c
+++ b/drivers/acpi/property.c
@@ -1084,21 +1084,17 @@ struct fwnode_handle *acpi_get_next_subnode(const struct fwnode_handle *fwnode,
* Returns parent node of an ACPI device or data firmware node or %NULL if
* not available.
*/
-struct fwnode_handle *acpi_node_get_parent(const struct fwnode_handle *fwnode)
+static struct fwnode_handle *
+acpi_node_get_parent(const struct fwnode_handle *fwnode)
{
if (is_acpi_data_node(fwnode)) {
/* All data nodes have parent pointer so just return that */
return to_acpi_data_node(fwnode)->parent;
} else if (is_acpi_device_node(fwnode)) {
- acpi_handle handle, parent_handle;
-
- handle = to_acpi_device_node(fwnode)->handle;
- if (ACPI_SUCCESS(acpi_get_parent(handle, &parent_handle))) {
- struct acpi_device *adev;
+ struct device *dev = to_acpi_device_node(fwnode)->dev.parent;
- if (!acpi_bus_get_device(parent_handle, &adev))
- return acpi_fwnode_handle(adev);
- }
+ if (dev)
+ return acpi_fwnode_handle(to_acpi_device(dev));
}
return NULL;
diff --git a/drivers/android/binder.c b/drivers/android/binder.c
index 49fb74196d02..c75fb600740c 100644
--- a/drivers/android/binder.c
+++ b/drivers/android/binder.c
@@ -2710,7 +2710,7 @@ static void binder_transaction(struct binder_proc *proc,
t->from = thread;
else
t->from = NULL;
- t->sender_euid = proc->cred->euid;
+ t->sender_euid = task_euid(proc->tsk);
t->to_proc = target_proc;
t->to_thread = target_thread;
t->code = tr->code;
@@ -4422,23 +4422,20 @@ static int binder_thread_release(struct binder_proc *proc,
__release(&t->lock);
/*
- * If this thread used poll, make sure we remove the waitqueue
- * from any epoll data structures holding it with POLLFREE.
- * waitqueue_active() is safe to use here because we're holding
- * the inner lock.
+ * If this thread used poll, make sure we remove the waitqueue from any
+ * poll data structures holding it.
*/
- if ((thread->looper & BINDER_LOOPER_STATE_POLL) &&
- waitqueue_active(&thread->wait)) {
- wake_up_poll(&thread->wait, EPOLLHUP | POLLFREE);
- }
+ if (thread->looper & BINDER_LOOPER_STATE_POLL)
+ wake_up_pollfree(&thread->wait);
binder_inner_proc_unlock(thread->proc);
/*
- * This is needed to avoid races between wake_up_poll() above and
- * and ep_remove_waitqueue() called for other reasons (eg the epoll file
- * descriptor being closed); ep_remove_waitqueue() holds an RCU read
- * lock, so we can be sure it's done after calling synchronize_rcu().
+ * This is needed to avoid races between wake_up_pollfree() above and
+ * someone else removing the last entry from the queue for other reasons
+ * (e.g. ep_remove_wait_queue() being called due to an epoll file
+ * descriptor being closed). Such other users hold an RCU read lock, so
+ * we can be sure they're done after we call synchronize_rcu().
*/
if (thread->looper & BINDER_LOOPER_STATE_POLL)
synchronize_rcu();
diff --git a/drivers/android/binder_alloc.c b/drivers/android/binder_alloc.c
index 340515f54498..47bc74a8c7b6 100644
--- a/drivers/android/binder_alloc.c
+++ b/drivers/android/binder_alloc.c
@@ -671,7 +671,7 @@ static void binder_free_buf_locked(struct binder_alloc *alloc,
BUG_ON(buffer->user_data > alloc->buffer + alloc->buffer_size);
if (buffer->async_transaction) {
- alloc->free_async_space += size + sizeof(struct binder_buffer);
+ alloc->free_async_space += buffer_size + sizeof(struct binder_buffer);
binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC_ASYNC,
"%d: binder_free_buf size %zd async free %zd\n",
diff --git a/drivers/ata/ahci_ceva.c b/drivers/ata/ahci_ceva.c
index 50b56cd0039d..e9c7c07fd84c 100644
--- a/drivers/ata/ahci_ceva.c
+++ b/drivers/ata/ahci_ceva.c
@@ -94,6 +94,7 @@ struct ceva_ahci_priv {
static unsigned int ceva_ahci_read_id(struct ata_device *dev,
struct ata_taskfile *tf, u16 *id)
{
+ __le16 *__id = (__le16 *)id;
u32 err_mask;
err_mask = ata_do_dev_read_id(dev, tf, id);
@@ -103,7 +104,7 @@ static unsigned int ceva_ahci_read_id(struct ata_device *dev,
* Since CEVA controller does not support device sleep feature, we
* need to clear DEVSLP (bit 8) in word78 of the IDENTIFY DEVICE data.
*/
- id[ATA_ID_FEATURE_SUPP] &= cpu_to_le16(~(1 << 8));
+ __id[ATA_ID_FEATURE_SUPP] &= cpu_to_le16(~(1 << 8));
return 0;
}
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index 59ad8c979cb3..aba0c67d1bd6 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -3920,6 +3920,8 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
{ "VRFDFC22048UCHC-TE*", NULL, ATA_HORKAGE_NODMA },
/* Odd clown on sil3726/4726 PMPs */
{ "Config Disk", NULL, ATA_HORKAGE_DISABLE },
+ /* Similar story with ASMedia 1092 */
+ { "ASMT109x- Config", NULL, ATA_HORKAGE_DISABLE },
/* Weird ATAPI devices */
{ "TORiSAN DVD-ROM DRD-N216", NULL, ATA_HORKAGE_MAX_SEC_128 },
diff --git a/drivers/ata/libata-sata.c b/drivers/ata/libata-sata.c
index 5b78e86e3459..b9c77885b872 100644
--- a/drivers/ata/libata-sata.c
+++ b/drivers/ata/libata-sata.c
@@ -827,7 +827,7 @@ static ssize_t ata_scsi_lpm_show(struct device *dev,
if (ap->target_lpm_policy >= ARRAY_SIZE(ata_lpm_policy_names))
return -EINVAL;
- return snprintf(buf, PAGE_SIZE, "%s\n",
+ return sysfs_emit(buf, "%s\n",
ata_lpm_policy_names[ap->target_lpm_policy]);
}
DEVICE_ATTR(link_power_management_policy, S_IRUGO | S_IWUSR,
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
index 1b84d5526d77..313e9475507b 100644
--- a/drivers/ata/libata-scsi.c
+++ b/drivers/ata/libata-scsi.c
@@ -2859,8 +2859,19 @@ static unsigned int ata_scsi_pass_thru(struct ata_queued_cmd *qc)
goto invalid_fld;
}
- if (ata_is_ncq(tf->protocol) && (cdb[2 + cdb_offset] & 0x3) == 0)
- tf->protocol = ATA_PROT_NCQ_NODATA;
+ if ((cdb[2 + cdb_offset] & 0x3) == 0) {
+ /*
+ * When T_LENGTH is zero (No data is transferred), dir should
+ * be DMA_NONE.
+ */
+ if (scmd->sc_data_direction != DMA_NONE) {
+ fp = 2 + cdb_offset;
+ goto invalid_fld;
+ }
+
+ if (ata_is_ncq(tf->protocol))
+ tf->protocol = ATA_PROT_NCQ_NODATA;
+ }
/* enable LBA */
tf->flags |= ATA_TFLAG_LBA;
diff --git a/drivers/ata/pata_falcon.c b/drivers/ata/pata_falcon.c
index 121635aa8c00..823c88622e34 100644
--- a/drivers/ata/pata_falcon.c
+++ b/drivers/ata/pata_falcon.c
@@ -55,14 +55,14 @@ static unsigned int pata_falcon_data_xfer(struct ata_queued_cmd *qc,
/* Transfer multiple of 2 bytes */
if (rw == READ) {
if (swap)
- raw_insw_swapw((u16 *)data_addr, (u16 *)buf, words);
+ raw_insw_swapw(data_addr, (u16 *)buf, words);
else
- raw_insw((u16 *)data_addr, (u16 *)buf, words);
+ raw_insw(data_addr, (u16 *)buf, words);
} else {
if (swap)
- raw_outsw_swapw((u16 *)data_addr, (u16 *)buf, words);
+ raw_outsw_swapw(data_addr, (u16 *)buf, words);
else
- raw_outsw((u16 *)data_addr, (u16 *)buf, words);
+ raw_outsw(data_addr, (u16 *)buf, words);
}
/* Transfer trailing byte, if any. */
@@ -74,16 +74,16 @@ static unsigned int pata_falcon_data_xfer(struct ata_queued_cmd *qc,
if (rw == READ) {
if (swap)
- raw_insw_swapw((u16 *)data_addr, (u16 *)pad, 1);
+ raw_insw_swapw(data_addr, (u16 *)pad, 1);
else
- raw_insw((u16 *)data_addr, (u16 *)pad, 1);
+ raw_insw(data_addr, (u16 *)pad, 1);
*buf = pad[0];
} else {
pad[0] = *buf;
if (swap)
- raw_outsw_swapw((u16 *)data_addr, (u16 *)pad, 1);
+ raw_outsw_swapw(data_addr, (u16 *)pad, 1);
else
- raw_outsw((u16 *)data_addr, (u16 *)pad, 1);
+ raw_outsw(data_addr, (u16 *)pad, 1);
}
words++;
}
diff --git a/drivers/ata/sata_fsl.c b/drivers/ata/sata_fsl.c
index e5838b23c9e0..3b31a4f596d8 100644
--- a/drivers/ata/sata_fsl.c
+++ b/drivers/ata/sata_fsl.c
@@ -1394,6 +1394,14 @@ static int sata_fsl_init_controller(struct ata_host *host)
return 0;
}
+static void sata_fsl_host_stop(struct ata_host *host)
+{
+ struct sata_fsl_host_priv *host_priv = host->private_data;
+
+ iounmap(host_priv->hcr_base);
+ kfree(host_priv);
+}
+
/*
* scsi mid-layer and libata interface structures
*/
@@ -1426,6 +1434,8 @@ static struct ata_port_operations sata_fsl_ops = {
.port_start = sata_fsl_port_start,
.port_stop = sata_fsl_port_stop,
+ .host_stop = sata_fsl_host_stop,
+
.pmp_attach = sata_fsl_pmp_attach,
.pmp_detach = sata_fsl_pmp_detach,
};
@@ -1480,9 +1490,9 @@ static int sata_fsl_probe(struct platform_device *ofdev)
host_priv->ssr_base = ssr_base;
host_priv->csr_base = csr_base;
- irq = irq_of_parse_and_map(ofdev->dev.of_node, 0);
- if (!irq) {
- dev_err(&ofdev->dev, "invalid irq from platform\n");
+ irq = platform_get_irq(ofdev, 0);
+ if (irq < 0) {
+ retval = irq;
goto error_exit_with_cleanup;
}
host_priv->irq = irq;
@@ -1557,10 +1567,6 @@ static int sata_fsl_remove(struct platform_device *ofdev)
ata_host_detach(host);
- irq_dispose_mapping(host_priv->irq);
- iounmap(host_priv->hcr_base);
- kfree(host_priv);
-
return 0;
}
diff --git a/drivers/auxdisplay/charlcd.c b/drivers/auxdisplay/charlcd.c
index 304accde365c..6d309e4971b6 100644
--- a/drivers/auxdisplay/charlcd.c
+++ b/drivers/auxdisplay/charlcd.c
@@ -37,7 +37,7 @@ struct charlcd_priv {
bool must_clear;
/* contains the LCD config state */
- unsigned long int flags;
+ unsigned long flags;
/* Current escape sequence and it's length or -1 if outside */
struct {
@@ -578,6 +578,9 @@ static int charlcd_init(struct charlcd *lcd)
* Since charlcd_init_display() needs to write data, we have to
* enable mark the LCD initialized just before.
*/
+ if (WARN_ON(!lcd->ops->init_display))
+ return -EINVAL;
+
ret = lcd->ops->init_display(lcd);
if (ret)
return ret;
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
index f4d0c555de29..04ea92cbd9cf 100644
--- a/drivers/base/power/main.c
+++ b/drivers/base/power/main.c
@@ -1902,7 +1902,7 @@ int dpm_prepare(pm_message_t state)
device_block_probing();
mutex_lock(&dpm_list_mtx);
- while (!list_empty(&dpm_list)) {
+ while (!list_empty(&dpm_list) && !error) {
struct device *dev = to_device(dpm_list.next);
get_device(dev);
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index a154cab6cd98..c3a36cfaa855 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -2103,7 +2103,7 @@ static int loop_control_remove(int idx)
int ret;
if (idx < 0) {
- pr_warn("deleting an unspecified loop device is not supported.\n");
+ pr_warn_once("deleting an unspecified loop device is not supported.\n");
return -EINVAL;
}
diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
index 97bf051a50ce..6ae38776e30e 100644
--- a/drivers/block/virtio_blk.c
+++ b/drivers/block/virtio_blk.c
@@ -316,7 +316,7 @@ static blk_status_t virtio_queue_rq(struct blk_mq_hw_ctx *hctx,
struct request *req = bd->rq;
struct virtblk_req *vbr = blk_mq_rq_to_pdu(req);
unsigned long flags;
- unsigned int num;
+ int num;
int qid = hctx->queue_num;
bool notify = false;
blk_status_t status;
@@ -1049,7 +1049,6 @@ static struct virtio_driver virtio_blk = {
.feature_table_size = ARRAY_SIZE(features),
.feature_table_legacy = features_legacy,
.feature_table_size_legacy = ARRAY_SIZE(features_legacy),
- .suppress_used_validation = true,
.driver.name = KBUILD_MODNAME,
.driver.owner = THIS_MODULE,
.id_table = id_table,
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
index 8e3983e456f3..286cf1afad78 100644
--- a/drivers/block/xen-blkfront.c
+++ b/drivers/block/xen-blkfront.c
@@ -1512,9 +1512,12 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id)
unsigned long flags;
struct blkfront_ring_info *rinfo = (struct blkfront_ring_info *)dev_id;
struct blkfront_info *info = rinfo->dev_info;
+ unsigned int eoiflag = XEN_EOI_FLAG_SPURIOUS;
- if (unlikely(info->connected != BLKIF_STATE_CONNECTED))
+ if (unlikely(info->connected != BLKIF_STATE_CONNECTED)) {
+ xen_irq_lateeoi(irq, XEN_EOI_FLAG_SPURIOUS);
return IRQ_HANDLED;
+ }
spin_lock_irqsave(&rinfo->ring_lock, flags);
again:
@@ -1530,6 +1533,8 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id)
unsigned long id;
unsigned int op;
+ eoiflag = 0;
+
RING_COPY_RESPONSE(&rinfo->ring, i, &bret);
id = bret.id;
@@ -1646,6 +1651,8 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id)
spin_unlock_irqrestore(&rinfo->ring_lock, flags);
+ xen_irq_lateeoi(irq, eoiflag);
+
return IRQ_HANDLED;
err:
@@ -1653,6 +1660,8 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id)
spin_unlock_irqrestore(&rinfo->ring_lock, flags);
+ /* No EOI in order to avoid further interrupts. */
+
pr_alert("%s disabled for further use\n", info->gd->disk_name);
return IRQ_HANDLED;
}
@@ -1692,8 +1701,8 @@ static int setup_blkring(struct xenbus_device *dev,
if (err)
goto fail;
- err = bind_evtchn_to_irqhandler(rinfo->evtchn, blkif_interrupt, 0,
- "blkif", rinfo);
+ err = bind_evtchn_to_irqhandler_lateeoi(rinfo->evtchn, blkif_interrupt,
+ 0, "blkif", rinfo);
if (err <= 0) {
xenbus_dev_fatal(dev, err,
"bind_evtchn_to_irqhandler failed");
diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
index 08d7953ec5f1..25071126995b 100644
--- a/drivers/block/zram/zram_drv.c
+++ b/drivers/block/zram/zram_drv.c
@@ -1853,12 +1853,14 @@ static const struct block_device_operations zram_devops = {
.owner = THIS_MODULE
};
+#ifdef CONFIG_ZRAM_WRITEBACK
static const struct block_device_operations zram_wb_devops = {
.open = zram_open,
.submit_bio = zram_submit_bio,
.swap_slot_free_notify = zram_slot_free_notify,
.owner = THIS_MODULE
};
+#endif
static DEVICE_ATTR_WO(compact);
static DEVICE_ATTR_RW(disksize);
diff --git a/drivers/bus/mhi/core/pm.c b/drivers/bus/mhi/core/pm.c
index fb99e3727155..547e6e769546 100644
--- a/drivers/bus/mhi/core/pm.c
+++ b/drivers/bus/mhi/core/pm.c
@@ -881,7 +881,7 @@ int mhi_pm_suspend(struct mhi_controller *mhi_cntrl)
}
EXPORT_SYMBOL_GPL(mhi_pm_suspend);
-int mhi_pm_resume(struct mhi_controller *mhi_cntrl)
+static int __mhi_pm_resume(struct mhi_controller *mhi_cntrl, bool force)
{
struct mhi_chan *itr, *tmp;
struct device *dev = &mhi_cntrl->mhi_dev->dev;
@@ -898,8 +898,12 @@ int mhi_pm_resume(struct mhi_controller *mhi_cntrl)
if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state))
return -EIO;
- if (mhi_get_mhi_state(mhi_cntrl) != MHI_STATE_M3)
- return -EINVAL;
+ if (mhi_get_mhi_state(mhi_cntrl) != MHI_STATE_M3) {
+ dev_warn(dev, "Resuming from non M3 state (%s)\n",
+ TO_MHI_STATE_STR(mhi_get_mhi_state(mhi_cntrl)));
+ if (!force)
+ return -EINVAL;
+ }
/* Notify clients about exiting LPM */
list_for_each_entry_safe(itr, tmp, &mhi_cntrl->lpm_chans, node) {
@@ -940,8 +944,19 @@ int mhi_pm_resume(struct mhi_controller *mhi_cntrl)
return 0;
}
+
+int mhi_pm_resume(struct mhi_controller *mhi_cntrl)
+{
+ return __mhi_pm_resume(mhi_cntrl, false);
+}
EXPORT_SYMBOL_GPL(mhi_pm_resume);
+int mhi_pm_resume_force(struct mhi_controller *mhi_cntrl)
+{
+ return __mhi_pm_resume(mhi_cntrl, true);
+}
+EXPORT_SYMBOL_GPL(mhi_pm_resume_force);
+
int __mhi_device_get_sync(struct mhi_controller *mhi_cntrl)
{
int ret;
diff --git a/drivers/bus/mhi/pci_generic.c b/drivers/bus/mhi/pci_generic.c
index 59a4896a8030..4c577a731709 100644
--- a/drivers/bus/mhi/pci_generic.c
+++ b/drivers/bus/mhi/pci_generic.c
@@ -20,7 +20,7 @@
#define MHI_PCI_DEFAULT_BAR_NUM 0
-#define MHI_POST_RESET_DELAY_MS 500
+#define MHI_POST_RESET_DELAY_MS 2000
#define HEALTH_CHECK_PERIOD (HZ * 2)
diff --git a/drivers/bus/sunxi-rsb.c b/drivers/bus/sunxi-rsb.c
index 6f225dddc74f..4566e730ef2b 100644
--- a/drivers/bus/sunxi-rsb.c
+++ b/drivers/bus/sunxi-rsb.c
@@ -687,11 +687,11 @@ err_clk_disable:
static void sunxi_rsb_hw_exit(struct sunxi_rsb *rsb)
{
- /* Keep the clock and PM reference counts consistent. */
- if (pm_runtime_status_suspended(rsb->dev))
- pm_runtime_resume(rsb->dev);
reset_control_assert(rsb->rstc);
- clk_disable_unprepare(rsb->clk);
+
+ /* Keep the clock and PM reference counts consistent. */
+ if (!pm_runtime_status_suspended(rsb->dev))
+ clk_disable_unprepare(rsb->clk);
}
static int __maybe_unused sunxi_rsb_runtime_suspend(struct device *dev)
diff --git a/drivers/char/agp/parisc-agp.c b/drivers/char/agp/parisc-agp.c
index ed3c4c42fc23..d68d05d5d383 100644
--- a/drivers/char/agp/parisc-agp.c
+++ b/drivers/char/agp/parisc-agp.c
@@ -281,7 +281,7 @@ agp_ioc_init(void __iomem *ioc_regs)
return 0;
}
-static int
+static int __init
lba_find_capability(int cap)
{
struct _parisc_agp_info *info = &parisc_agp_info;
@@ -366,7 +366,7 @@ fail:
return error;
}
-static int
+static int __init
find_quicksilver(struct device *dev, void *data)
{
struct parisc_device **lba = data;
@@ -378,7 +378,7 @@ find_quicksilver(struct device *dev, void *data)
return 0;
}
-static int
+static int __init
parisc_agp_init(void)
{
extern struct sba_device *sba_list;
diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
index deed355422f4..c59265146e9c 100644
--- a/drivers/char/ipmi/ipmi_msghandler.c
+++ b/drivers/char/ipmi/ipmi_msghandler.c
@@ -191,6 +191,8 @@ struct ipmi_user {
struct work_struct remove_work;
};
+static struct workqueue_struct *remove_work_wq;
+
static struct ipmi_user *acquire_ipmi_user(struct ipmi_user *user, int *index)
__acquires(user->release_barrier)
{
@@ -1297,7 +1299,7 @@ static void free_user(struct kref *ref)
struct ipmi_user *user = container_of(ref, struct ipmi_user, refcount);
/* SRCU cleanup must happen in task context. */
- schedule_work(&user->remove_work);
+ queue_work(remove_work_wq, &user->remove_work);
}
static void _ipmi_destroy_user(struct ipmi_user *user)
@@ -3029,7 +3031,7 @@ cleanup_bmc_device(struct kref *ref)
* with removing the device attributes while reading a device
* attribute.
*/
- schedule_work(&bmc->remove_work);
+ queue_work(remove_work_wq, &bmc->remove_work);
}
/*
@@ -3918,9 +3920,11 @@ static int handle_ipmb_direct_rcv_cmd(struct ipmi_smi *intf,
/* We didn't find a user, deliver an error response. */
ipmi_inc_stat(intf, unhandled_commands);
- msg->data[0] = ((netfn + 1) << 2) | (msg->rsp[4] & 0x3);
- msg->data[1] = msg->rsp[2];
- msg->data[2] = msg->rsp[4] & ~0x3;
+ msg->data[0] = (netfn + 1) << 2;
+ msg->data[0] |= msg->rsp[2] & 0x3; /* rqLUN */
+ msg->data[1] = msg->rsp[1]; /* Addr */
+ msg->data[2] = msg->rsp[2] & ~0x3; /* rqSeq */
+ msg->data[2] |= msg->rsp[0] & 0x3; /* rsLUN */
msg->data[3] = cmd;
msg->data[4] = IPMI_INVALID_CMD_COMPLETION_CODE;
msg->data_size = 5;
@@ -4455,13 +4459,24 @@ return_unspecified:
msg->rsp[2] = IPMI_ERR_UNSPECIFIED;
msg->rsp_size = 3;
} else if (msg->type == IPMI_SMI_MSG_TYPE_IPMB_DIRECT) {
- /* commands must have at least 3 bytes, responses 4. */
- if (is_cmd && (msg->rsp_size < 3)) {
+ /* commands must have at least 4 bytes, responses 5. */
+ if (is_cmd && (msg->rsp_size < 4)) {
ipmi_inc_stat(intf, invalid_commands);
goto out;
}
- if (!is_cmd && (msg->rsp_size < 4))
- goto return_unspecified;
+ if (!is_cmd && (msg->rsp_size < 5)) {
+ ipmi_inc_stat(intf, invalid_ipmb_responses);
+ /* Construct a valid error response. */
+ msg->rsp[0] = msg->data[0] & 0xfc; /* NetFN */
+ msg->rsp[0] |= (1 << 2); /* Make it a response */
+ msg->rsp[0] |= msg->data[2] & 3; /* rqLUN */
+ msg->rsp[1] = msg->data[1]; /* Addr */
+ msg->rsp[2] = msg->data[2] & 0xfc; /* rqSeq */
+ msg->rsp[2] |= msg->data[0] & 0x3; /* rsLUN */
+ msg->rsp[3] = msg->data[3]; /* Cmd */
+ msg->rsp[4] = IPMI_ERR_UNSPECIFIED;
+ msg->rsp_size = 5;
+ }
} else if ((msg->data_size >= 2)
&& (msg->data[0] == (IPMI_NETFN_APP_REQUEST << 2))
&& (msg->data[1] == IPMI_SEND_MSG_CMD)
@@ -5031,6 +5046,7 @@ struct ipmi_smi_msg *ipmi_alloc_smi_msg(void)
if (rv) {
rv->done = free_smi_msg;
rv->user_data = NULL;
+ rv->type = IPMI_SMI_MSG_TYPE_NORMAL;
atomic_inc(&smi_msg_inuse_count);
}
return rv;
@@ -5376,7 +5392,16 @@ static int ipmi_init_msghandler(void)
if (initialized)
goto out;
- init_srcu_struct(&ipmi_interfaces_srcu);
+ rv = init_srcu_struct(&ipmi_interfaces_srcu);
+ if (rv)
+ goto out;
+
+ remove_work_wq = create_singlethread_workqueue("ipmi-msghandler-remove-wq");
+ if (!remove_work_wq) {
+ pr_err("unable to create ipmi-msghandler-remove-wq workqueue");
+ rv = -ENOMEM;
+ goto out_wq;
+ }
timer_setup(&ipmi_timer, ipmi_timeout, 0);
mod_timer(&ipmi_timer, jiffies + IPMI_TIMEOUT_JIFFIES);
@@ -5385,6 +5410,9 @@ static int ipmi_init_msghandler(void)
initialized = true;
+out_wq:
+ if (rv)
+ cleanup_srcu_struct(&ipmi_interfaces_srcu);
out:
mutex_unlock(&ipmi_interfaces_mutex);
return rv;
@@ -5408,6 +5436,8 @@ static void __exit cleanup_ipmi(void)
int count;
if (initialized) {
+ destroy_workqueue(remove_work_wq);
+
atomic_notifier_chain_unregister(&panic_notifier_list,
&panic_block);
diff --git a/drivers/char/ipmi/ipmi_ssif.c b/drivers/char/ipmi/ipmi_ssif.c
index 0c62e578749e..48aab77abebf 100644
--- a/drivers/char/ipmi/ipmi_ssif.c
+++ b/drivers/char/ipmi/ipmi_ssif.c
@@ -1659,6 +1659,9 @@ static int ssif_probe(struct i2c_client *client, const struct i2c_device_id *id)
}
}
+ ssif_info->client = client;
+ i2c_set_clientdata(client, ssif_info);
+
rv = ssif_check_and_remove(client, ssif_info);
/* If rv is 0 and addr source is not SI_ACPI, continue probing */
if (!rv && ssif_info->addr_source == SI_ACPI) {
@@ -1679,9 +1682,6 @@ static int ssif_probe(struct i2c_client *client, const struct i2c_device_id *id)
ipmi_addr_src_to_str(ssif_info->addr_source),
client->addr, client->adapter->name, slave_addr);
- ssif_info->client = client;
- i2c_set_clientdata(client, ssif_info);
-
/* Now check for system interface capabilities */
msg[0] = IPMI_NETFN_APP_REQUEST << 2;
msg[1] = IPMI_GET_SYSTEM_INTERFACE_CAPABILITIES_CMD;
@@ -1881,6 +1881,7 @@ static int ssif_probe(struct i2c_client *client, const struct i2c_device_id *id)
dev_err(&ssif_info->client->dev,
"Unable to start IPMI SSIF: %d\n", rv);
+ i2c_set_clientdata(client, NULL);
kfree(ssif_info);
}
kfree(resp);
diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c
index f467d63bbf1e..566ee2c78709 100644
--- a/drivers/clk/clk.c
+++ b/drivers/clk/clk.c
@@ -3418,6 +3418,14 @@ static int __clk_core_init(struct clk_core *core)
clk_prepare_lock();
+ /*
+ * Set hw->core after grabbing the prepare_lock to synchronize with
+ * callers of clk_core_fill_parent_index() where we treat hw->core
+ * being NULL as the clk not being registered yet. This is crucial so
+ * that clks aren't parented until their parent is fully registered.
+ */
+ core->hw->core = core;
+
ret = clk_pm_runtime_get(core);
if (ret)
goto unlock;
@@ -3582,8 +3590,10 @@ static int __clk_core_init(struct clk_core *core)
out:
clk_pm_runtime_put(core);
unlock:
- if (ret)
+ if (ret) {
hlist_del_init(&core->child_node);
+ core->hw->core = NULL;
+ }
clk_prepare_unlock();
@@ -3847,7 +3857,6 @@ __clk_register(struct device *dev, struct device_node *np, struct clk_hw *hw)
core->num_parents = init->num_parents;
core->min_rate = 0;
core->max_rate = ULONG_MAX;
- hw->core = core;
ret = clk_core_populate_parent_map(core, init);
if (ret)
@@ -3865,7 +3874,7 @@ __clk_register(struct device *dev, struct device_node *np, struct clk_hw *hw)
goto fail_create_clk;
}
- clk_core_link_consumer(hw->core, hw->clk);
+ clk_core_link_consumer(core, hw->clk);
ret = __clk_core_init(core);
if (!ret)
diff --git a/drivers/clk/imx/clk-imx8qxp-lpcg.c b/drivers/clk/imx/clk-imx8qxp-lpcg.c
index d3e905cf867d..b23758083ce5 100644
--- a/drivers/clk/imx/clk-imx8qxp-lpcg.c
+++ b/drivers/clk/imx/clk-imx8qxp-lpcg.c
@@ -370,7 +370,7 @@ static struct platform_driver imx8qxp_lpcg_clk_driver = {
.probe = imx8qxp_lpcg_clk_probe,
};
-builtin_platform_driver(imx8qxp_lpcg_clk_driver);
+module_platform_driver(imx8qxp_lpcg_clk_driver);
MODULE_AUTHOR("Aisheng Dong <aisheng.dong@nxp.com>");
MODULE_DESCRIPTION("NXP i.MX8QXP LPCG clock driver");
diff --git a/drivers/clk/imx/clk-imx8qxp.c b/drivers/clk/imx/clk-imx8qxp.c
index c53a688d8ccc..40a2efb1329b 100644
--- a/drivers/clk/imx/clk-imx8qxp.c
+++ b/drivers/clk/imx/clk-imx8qxp.c
@@ -308,7 +308,7 @@ static struct platform_driver imx8qxp_clk_driver = {
},
.probe = imx8qxp_clk_probe,
};
-builtin_platform_driver(imx8qxp_clk_driver);
+module_platform_driver(imx8qxp_clk_driver);
MODULE_AUTHOR("Aisheng Dong <aisheng.dong@nxp.com>");
MODULE_DESCRIPTION("NXP i.MX8QXP clock driver");
diff --git a/drivers/clk/qcom/clk-alpha-pll.c b/drivers/clk/qcom/clk-alpha-pll.c
index eaedcceb766f..8f65b9bdafce 100644
--- a/drivers/clk/qcom/clk-alpha-pll.c
+++ b/drivers/clk/qcom/clk-alpha-pll.c
@@ -1429,6 +1429,15 @@ EXPORT_SYMBOL_GPL(clk_alpha_pll_postdiv_fabia_ops);
void clk_trion_pll_configure(struct clk_alpha_pll *pll, struct regmap *regmap,
const struct alpha_pll_config *config)
{
+ /*
+ * If the bootloader left the PLL enabled it's likely that there are
+ * RCGs that will lock up if we disable the PLL below.
+ */
+ if (trion_pll_is_enabled(pll, regmap)) {
+ pr_debug("Trion PLL is already enabled, skipping configuration\n");
+ return;
+ }
+
clk_alpha_pll_write_config(regmap, PLL_L_VAL(pll), config->l);
regmap_write(regmap, PLL_CAL_L_VAL(pll), TRION_PLL_CAL_VAL);
clk_alpha_pll_write_config(regmap, PLL_ALPHA_VAL(pll), config->alpha);
diff --git a/drivers/clk/qcom/clk-regmap-mux.c b/drivers/clk/qcom/clk-regmap-mux.c
index b2d00b451963..45d9cca28064 100644
--- a/drivers/clk/qcom/clk-regmap-mux.c
+++ b/drivers/clk/qcom/clk-regmap-mux.c
@@ -28,7 +28,7 @@ static u8 mux_get_parent(struct clk_hw *hw)
val &= mask;
if (mux->parent_map)
- return qcom_find_src_index(hw, mux->parent_map, val);
+ return qcom_find_cfg_index(hw, mux->parent_map, val);
return val;
}
diff --git a/drivers/clk/qcom/common.c b/drivers/clk/qcom/common.c
index 0932e019dd12..75f09e6e057e 100644
--- a/drivers/clk/qcom/common.c
+++ b/drivers/clk/qcom/common.c
@@ -69,6 +69,18 @@ int qcom_find_src_index(struct clk_hw *hw, const struct parent_map *map, u8 src)
}
EXPORT_SYMBOL_GPL(qcom_find_src_index);
+int qcom_find_cfg_index(struct clk_hw *hw, const struct parent_map *map, u8 cfg)
+{
+ int i, num_parents = clk_hw_get_num_parents(hw);
+
+ for (i = 0; i < num_parents; i++)
+ if (cfg == map[i].cfg)
+ return i;
+
+ return -ENOENT;
+}
+EXPORT_SYMBOL_GPL(qcom_find_cfg_index);
+
struct regmap *
qcom_cc_map(struct platform_device *pdev, const struct qcom_cc_desc *desc)
{
diff --git a/drivers/clk/qcom/common.h b/drivers/clk/qcom/common.h
index bb39a7e106d8..9c8f7b798d9f 100644
--- a/drivers/clk/qcom/common.h
+++ b/drivers/clk/qcom/common.h
@@ -49,6 +49,8 @@ extern void
qcom_pll_set_fsm_mode(struct regmap *m, u32 reg, u8 bias_count, u8 lock_count);
extern int qcom_find_src_index(struct clk_hw *hw, const struct parent_map *map,
u8 src);
+extern int qcom_find_cfg_index(struct clk_hw *hw, const struct parent_map *map,
+ u8 cfg);
extern int qcom_cc_register_board_clk(struct device *dev, const char *path,
const char *name, unsigned long rate);
diff --git a/drivers/clk/qcom/gcc-sm6125.c b/drivers/clk/qcom/gcc-sm6125.c
index 543cfab7561f..431b55bb0d2f 100644
--- a/drivers/clk/qcom/gcc-sm6125.c
+++ b/drivers/clk/qcom/gcc-sm6125.c
@@ -1121,7 +1121,7 @@ static struct clk_rcg2 gcc_sdcc1_apps_clk_src = {
.name = "gcc_sdcc1_apps_clk_src",
.parent_data = gcc_parent_data_1,
.num_parents = ARRAY_SIZE(gcc_parent_data_1),
- .ops = &clk_rcg2_ops,
+ .ops = &clk_rcg2_floor_ops,
},
};
@@ -1143,7 +1143,7 @@ static struct clk_rcg2 gcc_sdcc1_ice_core_clk_src = {
.name = "gcc_sdcc1_ice_core_clk_src",
.parent_data = gcc_parent_data_0,
.num_parents = ARRAY_SIZE(gcc_parent_data_0),
- .ops = &clk_rcg2_floor_ops,
+ .ops = &clk_rcg2_ops,
},
};
diff --git a/drivers/clk/versatile/clk-icst.c b/drivers/clk/versatile/clk-icst.c
index d52f976dc875..d5cb372f0901 100644
--- a/drivers/clk/versatile/clk-icst.c
+++ b/drivers/clk/versatile/clk-icst.c
@@ -543,8 +543,8 @@ static void __init of_syscon_icst_setup(struct device_node *np)
regclk = icst_clk_setup(NULL, &icst_desc, name, parent_name, map, ctype);
if (IS_ERR(regclk)) {
- kfree(name);
pr_err("error setting up syscon ICST clock %s\n", name);
+ kfree(name);
return;
}
of_clk_add_provider(np, of_clk_src_simple_get, regclk);
diff --git a/drivers/clocksource/arm_arch_timer.c b/drivers/clocksource/arm_arch_timer.c
index 9a04eacc4412..1ecd52f903b8 100644
--- a/drivers/clocksource/arm_arch_timer.c
+++ b/drivers/clocksource/arm_arch_timer.c
@@ -394,8 +394,13 @@ EXPORT_SYMBOL_GPL(timer_unstable_counter_workaround);
static atomic_t timer_unstable_counter_workaround_in_use = ATOMIC_INIT(0);
-static void erratum_set_next_event_generic(const int access, unsigned long evt,
- struct clock_event_device *clk)
+/*
+ * Force the inlining of this function so that the register accesses
+ * can be themselves correctly inlined.
+ */
+static __always_inline
+void erratum_set_next_event_generic(const int access, unsigned long evt,
+ struct clock_event_device *clk)
{
unsigned long ctrl;
u64 cval;
diff --git a/drivers/clocksource/dw_apb_timer_of.c b/drivers/clocksource/dw_apb_timer_of.c
index 3819ef5b7098..3245eb0c602d 100644
--- a/drivers/clocksource/dw_apb_timer_of.c
+++ b/drivers/clocksource/dw_apb_timer_of.c
@@ -47,7 +47,7 @@ static int __init timer_get_base_and_rate(struct device_node *np,
pr_warn("pclk for %pOFn is present, but could not be activated\n",
np);
- if (!of_property_read_u32(np, "clock-freq", rate) &&
+ if (!of_property_read_u32(np, "clock-freq", rate) ||
!of_property_read_u32(np, "clock-frequency", rate))
return 0;
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index e338d2f010fe..096c3848fa41 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -1004,10 +1004,9 @@ static struct kobj_type ktype_cpufreq = {
.release = cpufreq_sysfs_release,
};
-static void add_cpu_dev_symlink(struct cpufreq_policy *policy, unsigned int cpu)
+static void add_cpu_dev_symlink(struct cpufreq_policy *policy, unsigned int cpu,
+ struct device *dev)
{
- struct device *dev = get_cpu_device(cpu);
-
if (unlikely(!dev))
return;
@@ -1296,8 +1295,9 @@ static void cpufreq_policy_free(struct cpufreq_policy *policy)
if (policy->max_freq_req) {
/*
- * CPUFREQ_CREATE_POLICY notification is sent only after
- * successfully adding max_freq_req request.
+ * Remove max_freq_req after sending CPUFREQ_REMOVE_POLICY
+ * notification, since CPUFREQ_CREATE_POLICY notification was
+ * sent after adding max_freq_req earlier.
*/
blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
CPUFREQ_REMOVE_POLICY, policy);
@@ -1391,7 +1391,7 @@ static int cpufreq_online(unsigned int cpu)
if (new_policy) {
for_each_cpu(j, policy->related_cpus) {
per_cpu(cpufreq_cpu_data, j) = policy;
- add_cpu_dev_symlink(policy, j);
+ add_cpu_dev_symlink(policy, j, get_cpu_device(j));
}
policy->min_freq_req = kzalloc(2 * sizeof(*policy->min_freq_req),
@@ -1565,7 +1565,7 @@ static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
/* Create sysfs link on CPU registration */
policy = per_cpu(cpufreq_cpu_data, cpu);
if (policy)
- add_cpu_dev_symlink(policy, cpu);
+ add_cpu_dev_symlink(policy, cpu, dev);
return 0;
}
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index 815df3daae9d..dec2a5649ac1 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -338,6 +338,8 @@ static void intel_pstste_sched_itmt_work_fn(struct work_struct *work)
static DECLARE_WORK(sched_itmt_work, intel_pstste_sched_itmt_work_fn);
+#define CPPC_MAX_PERF U8_MAX
+
static void intel_pstate_set_itmt_prio(int cpu)
{
struct cppc_perf_caps cppc_perf;
@@ -349,6 +351,14 @@ static void intel_pstate_set_itmt_prio(int cpu)
return;
/*
+ * On some systems with overclocking enabled, CPPC.highest_perf is hardcoded to 0xff.
+ * In this case we can't use CPPC.highest_perf to enable ITMT.
+ * In this case we can look at MSR_HWP_CAPABILITIES bits [8:0] to decide.
+ */
+ if (cppc_perf.highest_perf == CPPC_MAX_PERF)
+ cppc_perf.highest_perf = HWP_HIGHEST_PERF(READ_ONCE(all_cpu_data[cpu]->hwp_cap_cached));
+
+ /*
* The priorities can be set regardless of whether or not
* sched_set_itmt_support(true) has been called and it is valid to
* update them at any time after it has been called.
@@ -1006,6 +1016,12 @@ static void intel_pstate_hwp_offline(struct cpudata *cpu)
*/
value &= ~GENMASK_ULL(31, 24);
value |= HWP_ENERGY_PERF_PREFERENCE(cpu->epp_cached);
+ /*
+ * However, make sure that EPP will be set to "performance" when
+ * the CPU is brought back online again and the "performance"
+ * scaling algorithm is still in effect.
+ */
+ cpu->epp_policy = CPUFREQ_POLICY_UNKNOWN;
}
/*
@@ -2353,6 +2369,7 @@ static const struct x86_cpu_id intel_pstate_cpu_oob_ids[] __initconst = {
X86_MATCH(BROADWELL_D, core_funcs),
X86_MATCH(BROADWELL_X, core_funcs),
X86_MATCH(SKYLAKE_X, core_funcs),
+ X86_MATCH(ICELAKE_X, core_funcs),
{}
};
diff --git a/drivers/crypto/qat/qat_4xxx/adf_4xxx_hw_data.c b/drivers/crypto/qat/qat_4xxx/adf_4xxx_hw_data.c
index fa768f10635f..fd29861526d6 100644
--- a/drivers/crypto/qat/qat_4xxx/adf_4xxx_hw_data.c
+++ b/drivers/crypto/qat/qat_4xxx/adf_4xxx_hw_data.c
@@ -211,6 +211,12 @@ static u32 uof_get_ae_mask(u32 obj_num)
return adf_4xxx_fw_config[obj_num].ae_mask;
}
+static u32 get_vf2pf_sources(void __iomem *pmisc_addr)
+{
+ /* For the moment do not report vf2pf sources */
+ return 0;
+}
+
void adf_init_hw_data_4xxx(struct adf_hw_device_data *hw_data)
{
hw_data->dev_class = &adf_4xxx_class;
@@ -254,6 +260,7 @@ void adf_init_hw_data_4xxx(struct adf_hw_device_data *hw_data)
hw_data->set_msix_rttable = set_msix_default_rttable;
hw_data->set_ssm_wdtimer = adf_gen4_set_ssm_wdtimer;
hw_data->enable_pfvf_comms = pfvf_comms_disabled;
+ hw_data->get_vf2pf_sources = get_vf2pf_sources;
hw_data->disable_iov = adf_disable_sriov;
hw_data->min_iov_compat_ver = ADF_PFVF_COMPAT_THIS_VERSION;
diff --git a/drivers/dma-buf/dma-fence-array.c b/drivers/dma-buf/dma-fence-array.c
index d3fbd950be94..3e07f961e2f3 100644
--- a/drivers/dma-buf/dma-fence-array.c
+++ b/drivers/dma-buf/dma-fence-array.c
@@ -104,7 +104,11 @@ static bool dma_fence_array_signaled(struct dma_fence *fence)
{
struct dma_fence_array *array = to_dma_fence_array(fence);
- return atomic_read(&array->num_pending) <= 0;
+ if (atomic_read(&array->num_pending) > 0)
+ return false;
+
+ dma_fence_array_clear_pending_error(array);
+ return true;
}
static void dma_fence_array_release(struct dma_fence *fence)
diff --git a/drivers/dma-buf/dma-resv.c b/drivers/dma-buf/dma-resv.c
index ff3c0558b3b8..4deea75c0b9c 100644
--- a/drivers/dma-buf/dma-resv.c
+++ b/drivers/dma-buf/dma-resv.c
@@ -305,8 +305,7 @@ void dma_resv_add_excl_fence(struct dma_resv *obj, struct dma_fence *fence)
if (old)
i = old->shared_count;
- if (fence)
- dma_fence_get(fence);
+ dma_fence_get(fence);
write_seqcount_begin(&obj->seq);
/* write_seqcount_begin provides the necessary memory barrier */
diff --git a/drivers/dma-buf/heaps/system_heap.c b/drivers/dma-buf/heaps/system_heap.c
index f57a39ddd063..ab7fd896d2c4 100644
--- a/drivers/dma-buf/heaps/system_heap.c
+++ b/drivers/dma-buf/heaps/system_heap.c
@@ -290,7 +290,7 @@ static void system_heap_dma_buf_release(struct dma_buf *dmabuf)
int i;
table = &buffer->sg_table;
- for_each_sg(table->sgl, sg, table->nents, i) {
+ for_each_sgtable_sg(table, sg, i) {
struct page *page = sg_page(sg);
__free_pages(page, compound_order(page));
diff --git a/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c b/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c
index cd0d745eb071..33baf1591a49 100644
--- a/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c
+++ b/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c
@@ -373,7 +373,7 @@ static void axi_chan_block_xfer_start(struct axi_dma_chan *chan,
struct axi_dma_desc *first)
{
u32 priority = chan->chip->dw->hdata->priority[chan->id];
- struct axi_dma_chan_config config;
+ struct axi_dma_chan_config config = {};
u32 irq_mask;
u8 lms = 0; /* Select AXI0 master for LLI fetching */
@@ -391,7 +391,7 @@ static void axi_chan_block_xfer_start(struct axi_dma_chan *chan,
config.tt_fc = DWAXIDMAC_TT_FC_MEM_TO_MEM_DMAC;
config.prior = priority;
config.hs_sel_dst = DWAXIDMAC_HS_SEL_HW;
- config.hs_sel_dst = DWAXIDMAC_HS_SEL_HW;
+ config.hs_sel_src = DWAXIDMAC_HS_SEL_HW;
switch (chan->direction) {
case DMA_MEM_TO_DEV:
dw_axi_dma_set_byte_halfword(chan, true);
diff --git a/drivers/dma/dw-edma/dw-edma-pcie.c b/drivers/dma/dw-edma/dw-edma-pcie.c
index 198f6cd8ac1b..cee7aa231d7b 100644
--- a/drivers/dma/dw-edma/dw-edma-pcie.c
+++ b/drivers/dma/dw-edma/dw-edma-pcie.c
@@ -187,17 +187,9 @@ static int dw_edma_pcie_probe(struct pci_dev *pdev,
/* DMA configuration */
err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
- if (!err) {
+ if (err) {
pci_err(pdev, "DMA mask 64 set failed\n");
return err;
- } else {
- pci_err(pdev, "DMA mask 64 set failed\n");
-
- err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
- if (err) {
- pci_err(pdev, "DMA mask 32 set failed\n");
- return err;
- }
}
/* Data structure allocation */
diff --git a/drivers/dma/idxd/irq.c b/drivers/dma/idxd/irq.c
index 17f2f8a31b63..cf2c8bc4f147 100644
--- a/drivers/dma/idxd/irq.c
+++ b/drivers/dma/idxd/irq.c
@@ -137,10 +137,10 @@ halt:
INIT_WORK(&idxd->work, idxd_device_reinit);
queue_work(idxd->wq, &idxd->work);
} else {
- spin_lock(&idxd->dev_lock);
idxd->state = IDXD_DEV_HALTED;
idxd_wqs_quiesce(idxd);
idxd_wqs_unmap_portal(idxd);
+ spin_lock(&idxd->dev_lock);
idxd_device_clear_state(idxd);
dev_err(&idxd->pdev->dev,
"idxd halted, need %s.\n",
diff --git a/drivers/dma/idxd/submit.c b/drivers/dma/idxd/submit.c
index de76fb4abac2..83452fbbb168 100644
--- a/drivers/dma/idxd/submit.c
+++ b/drivers/dma/idxd/submit.c
@@ -106,6 +106,7 @@ static void llist_abort_desc(struct idxd_wq *wq, struct idxd_irq_entry *ie,
{
struct idxd_desc *d, *t, *found = NULL;
struct llist_node *head;
+ LIST_HEAD(flist);
desc->completion->status = IDXD_COMP_DESC_ABORT;
/*
@@ -120,7 +121,11 @@ static void llist_abort_desc(struct idxd_wq *wq, struct idxd_irq_entry *ie,
found = desc;
continue;
}
- list_add_tail(&desc->list, &ie->work_list);
+
+ if (d->completion->status)
+ list_add_tail(&d->list, &flist);
+ else
+ list_add_tail(&d->list, &ie->work_list);
}
}
@@ -130,6 +135,17 @@ static void llist_abort_desc(struct idxd_wq *wq, struct idxd_irq_entry *ie,
if (found)
complete_desc(found, IDXD_COMPLETE_ABORT);
+
+ /*
+ * complete_desc() will return desc to allocator and the desc can be
+ * acquired by a different process and the desc->list can be modified.
+ * Delete desc from list so the list trasversing does not get corrupted
+ * by the other process.
+ */
+ list_for_each_entry_safe(d, t, &flist, list) {
+ list_del_init(&d->list);
+ complete_desc(d, IDXD_COMPLETE_NORMAL);
+ }
}
int idxd_submit_desc(struct idxd_wq *wq, struct idxd_desc *desc)
diff --git a/drivers/dma/st_fdma.c b/drivers/dma/st_fdma.c
index 962b6e05287b..d95c421877fb 100644
--- a/drivers/dma/st_fdma.c
+++ b/drivers/dma/st_fdma.c
@@ -874,4 +874,4 @@ MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("STMicroelectronics FDMA engine driver");
MODULE_AUTHOR("Ludovic.barre <Ludovic.barre@st.com>");
MODULE_AUTHOR("Peter Griffin <peter.griffin@linaro.org>");
-MODULE_ALIAS("platform: " DRIVER_NAME);
+MODULE_ALIAS("platform:" DRIVER_NAME);
diff --git a/drivers/dma/ti/k3-udma.c b/drivers/dma/ti/k3-udma.c
index 041d8e32d630..6e56d1cef5ee 100644
--- a/drivers/dma/ti/k3-udma.c
+++ b/drivers/dma/ti/k3-udma.c
@@ -4534,45 +4534,60 @@ static int udma_setup_resources(struct udma_dev *ud)
rm_res = tisci_rm->rm_ranges[RM_RANGE_TCHAN];
if (IS_ERR(rm_res)) {
bitmap_zero(ud->tchan_map, ud->tchan_cnt);
+ irq_res.sets = 1;
} else {
bitmap_fill(ud->tchan_map, ud->tchan_cnt);
for (i = 0; i < rm_res->sets; i++)
udma_mark_resource_ranges(ud, ud->tchan_map,
&rm_res->desc[i], "tchan");
+ irq_res.sets = rm_res->sets;
}
- irq_res.sets = rm_res->sets;
/* rchan and matching default flow ranges */
rm_res = tisci_rm->rm_ranges[RM_RANGE_RCHAN];
if (IS_ERR(rm_res)) {
bitmap_zero(ud->rchan_map, ud->rchan_cnt);
+ irq_res.sets++;
} else {
bitmap_fill(ud->rchan_map, ud->rchan_cnt);
for (i = 0; i < rm_res->sets; i++)
udma_mark_resource_ranges(ud, ud->rchan_map,
&rm_res->desc[i], "rchan");
+ irq_res.sets += rm_res->sets;
}
- irq_res.sets += rm_res->sets;
irq_res.desc = kcalloc(irq_res.sets, sizeof(*irq_res.desc), GFP_KERNEL);
+ if (!irq_res.desc)
+ return -ENOMEM;
rm_res = tisci_rm->rm_ranges[RM_RANGE_TCHAN];
- for (i = 0; i < rm_res->sets; i++) {
- irq_res.desc[i].start = rm_res->desc[i].start;
- irq_res.desc[i].num = rm_res->desc[i].num;
- irq_res.desc[i].start_sec = rm_res->desc[i].start_sec;
- irq_res.desc[i].num_sec = rm_res->desc[i].num_sec;
+ if (IS_ERR(rm_res)) {
+ irq_res.desc[0].start = 0;
+ irq_res.desc[0].num = ud->tchan_cnt;
+ i = 1;
+ } else {
+ for (i = 0; i < rm_res->sets; i++) {
+ irq_res.desc[i].start = rm_res->desc[i].start;
+ irq_res.desc[i].num = rm_res->desc[i].num;
+ irq_res.desc[i].start_sec = rm_res->desc[i].start_sec;
+ irq_res.desc[i].num_sec = rm_res->desc[i].num_sec;
+ }
}
rm_res = tisci_rm->rm_ranges[RM_RANGE_RCHAN];
- for (j = 0; j < rm_res->sets; j++, i++) {
- if (rm_res->desc[j].num) {
- irq_res.desc[i].start = rm_res->desc[j].start +
- ud->soc_data->oes.udma_rchan;
- irq_res.desc[i].num = rm_res->desc[j].num;
- }
- if (rm_res->desc[j].num_sec) {
- irq_res.desc[i].start_sec = rm_res->desc[j].start_sec +
- ud->soc_data->oes.udma_rchan;
- irq_res.desc[i].num_sec = rm_res->desc[j].num_sec;
+ if (IS_ERR(rm_res)) {
+ irq_res.desc[i].start = 0;
+ irq_res.desc[i].num = ud->rchan_cnt;
+ } else {
+ for (j = 0; j < rm_res->sets; j++, i++) {
+ if (rm_res->desc[j].num) {
+ irq_res.desc[i].start = rm_res->desc[j].start +
+ ud->soc_data->oes.udma_rchan;
+ irq_res.desc[i].num = rm_res->desc[j].num;
+ }
+ if (rm_res->desc[j].num_sec) {
+ irq_res.desc[i].start_sec = rm_res->desc[j].start_sec +
+ ud->soc_data->oes.udma_rchan;
+ irq_res.desc[i].num_sec = rm_res->desc[j].num_sec;
+ }
}
}
ret = ti_sci_inta_msi_domain_alloc_irqs(ud->dev, &irq_res);
@@ -4690,14 +4705,15 @@ static int bcdma_setup_resources(struct udma_dev *ud)
rm_res = tisci_rm->rm_ranges[RM_RANGE_BCHAN];
if (IS_ERR(rm_res)) {
bitmap_zero(ud->bchan_map, ud->bchan_cnt);
+ irq_res.sets++;
} else {
bitmap_fill(ud->bchan_map, ud->bchan_cnt);
for (i = 0; i < rm_res->sets; i++)
udma_mark_resource_ranges(ud, ud->bchan_map,
&rm_res->desc[i],
"bchan");
+ irq_res.sets += rm_res->sets;
}
- irq_res.sets += rm_res->sets;
}
/* tchan ranges */
@@ -4705,14 +4721,15 @@ static int bcdma_setup_resources(struct udma_dev *ud)
rm_res = tisci_rm->rm_ranges[RM_RANGE_TCHAN];
if (IS_ERR(rm_res)) {
bitmap_zero(ud->tchan_map, ud->tchan_cnt);
+ irq_res.sets += 2;
} else {
bitmap_fill(ud->tchan_map, ud->tchan_cnt);
for (i = 0; i < rm_res->sets; i++)
udma_mark_resource_ranges(ud, ud->tchan_map,
&rm_res->desc[i],
"tchan");
+ irq_res.sets += rm_res->sets * 2;
}
- irq_res.sets += rm_res->sets * 2;
}
/* rchan ranges */
@@ -4720,47 +4737,72 @@ static int bcdma_setup_resources(struct udma_dev *ud)
rm_res = tisci_rm->rm_ranges[RM_RANGE_RCHAN];
if (IS_ERR(rm_res)) {
bitmap_zero(ud->rchan_map, ud->rchan_cnt);
+ irq_res.sets += 2;
} else {
bitmap_fill(ud->rchan_map, ud->rchan_cnt);
for (i = 0; i < rm_res->sets; i++)
udma_mark_resource_ranges(ud, ud->rchan_map,
&rm_res->desc[i],
"rchan");
+ irq_res.sets += rm_res->sets * 2;
}
- irq_res.sets += rm_res->sets * 2;
}
irq_res.desc = kcalloc(irq_res.sets, sizeof(*irq_res.desc), GFP_KERNEL);
+ if (!irq_res.desc)
+ return -ENOMEM;
if (ud->bchan_cnt) {
rm_res = tisci_rm->rm_ranges[RM_RANGE_BCHAN];
- for (i = 0; i < rm_res->sets; i++) {
- irq_res.desc[i].start = rm_res->desc[i].start +
- oes->bcdma_bchan_ring;
- irq_res.desc[i].num = rm_res->desc[i].num;
+ if (IS_ERR(rm_res)) {
+ irq_res.desc[0].start = oes->bcdma_bchan_ring;
+ irq_res.desc[0].num = ud->bchan_cnt;
+ i = 1;
+ } else {
+ for (i = 0; i < rm_res->sets; i++) {
+ irq_res.desc[i].start = rm_res->desc[i].start +
+ oes->bcdma_bchan_ring;
+ irq_res.desc[i].num = rm_res->desc[i].num;
+ }
}
}
if (ud->tchan_cnt) {
rm_res = tisci_rm->rm_ranges[RM_RANGE_TCHAN];
- for (j = 0; j < rm_res->sets; j++, i += 2) {
- irq_res.desc[i].start = rm_res->desc[j].start +
- oes->bcdma_tchan_data;
- irq_res.desc[i].num = rm_res->desc[j].num;
-
- irq_res.desc[i + 1].start = rm_res->desc[j].start +
- oes->bcdma_tchan_ring;
- irq_res.desc[i + 1].num = rm_res->desc[j].num;
+ if (IS_ERR(rm_res)) {
+ irq_res.desc[i].start = oes->bcdma_tchan_data;
+ irq_res.desc[i].num = ud->tchan_cnt;
+ irq_res.desc[i + 1].start = oes->bcdma_tchan_ring;
+ irq_res.desc[i + 1].num = ud->tchan_cnt;
+ i += 2;
+ } else {
+ for (j = 0; j < rm_res->sets; j++, i += 2) {
+ irq_res.desc[i].start = rm_res->desc[j].start +
+ oes->bcdma_tchan_data;
+ irq_res.desc[i].num = rm_res->desc[j].num;
+
+ irq_res.desc[i + 1].start = rm_res->desc[j].start +
+ oes->bcdma_tchan_ring;
+ irq_res.desc[i + 1].num = rm_res->desc[j].num;
+ }
}
}
if (ud->rchan_cnt) {
rm_res = tisci_rm->rm_ranges[RM_RANGE_RCHAN];
- for (j = 0; j < rm_res->sets; j++, i += 2) {
- irq_res.desc[i].start = rm_res->desc[j].start +
- oes->bcdma_rchan_data;
- irq_res.desc[i].num = rm_res->desc[j].num;
-
- irq_res.desc[i + 1].start = rm_res->desc[j].start +
- oes->bcdma_rchan_ring;
- irq_res.desc[i + 1].num = rm_res->desc[j].num;
+ if (IS_ERR(rm_res)) {
+ irq_res.desc[i].start = oes->bcdma_rchan_data;
+ irq_res.desc[i].num = ud->rchan_cnt;
+ irq_res.desc[i + 1].start = oes->bcdma_rchan_ring;
+ irq_res.desc[i + 1].num = ud->rchan_cnt;
+ i += 2;
+ } else {
+ for (j = 0; j < rm_res->sets; j++, i += 2) {
+ irq_res.desc[i].start = rm_res->desc[j].start +
+ oes->bcdma_rchan_data;
+ irq_res.desc[i].num = rm_res->desc[j].num;
+
+ irq_res.desc[i + 1].start = rm_res->desc[j].start +
+ oes->bcdma_rchan_ring;
+ irq_res.desc[i + 1].num = rm_res->desc[j].num;
+ }
}
}
@@ -4858,39 +4900,54 @@ static int pktdma_setup_resources(struct udma_dev *ud)
if (IS_ERR(rm_res)) {
/* all rflows are assigned exclusively to Linux */
bitmap_zero(ud->rflow_in_use, ud->rflow_cnt);
+ irq_res.sets = 1;
} else {
bitmap_fill(ud->rflow_in_use, ud->rflow_cnt);
for (i = 0; i < rm_res->sets; i++)
udma_mark_resource_ranges(ud, ud->rflow_in_use,
&rm_res->desc[i], "rflow");
+ irq_res.sets = rm_res->sets;
}
- irq_res.sets = rm_res->sets;
/* tflow ranges */
rm_res = tisci_rm->rm_ranges[RM_RANGE_TFLOW];
if (IS_ERR(rm_res)) {
/* all tflows are assigned exclusively to Linux */
bitmap_zero(ud->tflow_map, ud->tflow_cnt);
+ irq_res.sets++;
} else {
bitmap_fill(ud->tflow_map, ud->tflow_cnt);
for (i = 0; i < rm_res->sets; i++)
udma_mark_resource_ranges(ud, ud->tflow_map,
&rm_res->desc[i], "tflow");
+ irq_res.sets += rm_res->sets;
}
- irq_res.sets += rm_res->sets;
irq_res.desc = kcalloc(irq_res.sets, sizeof(*irq_res.desc), GFP_KERNEL);
+ if (!irq_res.desc)
+ return -ENOMEM;
rm_res = tisci_rm->rm_ranges[RM_RANGE_TFLOW];
- for (i = 0; i < rm_res->sets; i++) {
- irq_res.desc[i].start = rm_res->desc[i].start +
- oes->pktdma_tchan_flow;
- irq_res.desc[i].num = rm_res->desc[i].num;
+ if (IS_ERR(rm_res)) {
+ irq_res.desc[0].start = oes->pktdma_tchan_flow;
+ irq_res.desc[0].num = ud->tflow_cnt;
+ i = 1;
+ } else {
+ for (i = 0; i < rm_res->sets; i++) {
+ irq_res.desc[i].start = rm_res->desc[i].start +
+ oes->pktdma_tchan_flow;
+ irq_res.desc[i].num = rm_res->desc[i].num;
+ }
}
rm_res = tisci_rm->rm_ranges[RM_RANGE_RFLOW];
- for (j = 0; j < rm_res->sets; j++, i++) {
- irq_res.desc[i].start = rm_res->desc[j].start +
- oes->pktdma_rchan_flow;
- irq_res.desc[i].num = rm_res->desc[j].num;
+ if (IS_ERR(rm_res)) {
+ irq_res.desc[i].start = oes->pktdma_rchan_flow;
+ irq_res.desc[i].num = ud->rflow_cnt;
+ } else {
+ for (j = 0; j < rm_res->sets; j++, i++) {
+ irq_res.desc[i].start = rm_res->desc[j].start +
+ oes->pktdma_rchan_flow;
+ irq_res.desc[i].num = rm_res->desc[j].num;
+ }
}
ret = ti_sci_inta_msi_domain_alloc_irqs(ud->dev, &irq_res);
kfree(irq_res.desc);
diff --git a/drivers/firmware/arm_scmi/base.c b/drivers/firmware/arm_scmi/base.c
index de416f9e7921..f5219334fd3a 100644
--- a/drivers/firmware/arm_scmi/base.c
+++ b/drivers/firmware/arm_scmi/base.c
@@ -34,6 +34,12 @@ struct scmi_msg_resp_base_attributes {
__le16 reserved;
};
+struct scmi_msg_resp_base_discover_agent {
+ __le32 agent_id;
+ u8 name[SCMI_MAX_STR_SIZE];
+};
+
+
struct scmi_msg_base_error_notify {
__le32 event_control;
#define BASE_TP_NOTIFY_ALL BIT(0)
@@ -225,18 +231,21 @@ static int scmi_base_discover_agent_get(const struct scmi_protocol_handle *ph,
int id, char *name)
{
int ret;
+ struct scmi_msg_resp_base_discover_agent *agent_info;
struct scmi_xfer *t;
ret = ph->xops->xfer_get_init(ph, BASE_DISCOVER_AGENT,
- sizeof(__le32), SCMI_MAX_STR_SIZE, &t);
+ sizeof(__le32), sizeof(*agent_info), &t);
if (ret)
return ret;
put_unaligned_le32(id, t->tx.buf);
ret = ph->xops->do_xfer(ph, t);
- if (!ret)
- strlcpy(name, t->rx.buf, SCMI_MAX_STR_SIZE);
+ if (!ret) {
+ agent_info = t->rx.buf;
+ strlcpy(name, agent_info->name, SCMI_MAX_STR_SIZE);
+ }
ph->xops->xfer_put(ph, t);
diff --git a/drivers/firmware/arm_scmi/scmi_pm_domain.c b/drivers/firmware/arm_scmi/scmi_pm_domain.c
index 4371fdcd5a73..581d34c95769 100644
--- a/drivers/firmware/arm_scmi/scmi_pm_domain.c
+++ b/drivers/firmware/arm_scmi/scmi_pm_domain.c
@@ -138,9 +138,7 @@ static int scmi_pm_domain_probe(struct scmi_device *sdev)
scmi_pd_data->domains = domains;
scmi_pd_data->num_domains = num_domains;
- of_genpd_add_provider_onecell(np, scmi_pd_data);
-
- return 0;
+ return of_genpd_add_provider_onecell(np, scmi_pd_data);
}
static const struct scmi_device_id scmi_id_table[] = {
diff --git a/drivers/firmware/arm_scmi/sensors.c b/drivers/firmware/arm_scmi/sensors.c
index 308471586381..cdbb287bd8bc 100644
--- a/drivers/firmware/arm_scmi/sensors.c
+++ b/drivers/firmware/arm_scmi/sensors.c
@@ -637,7 +637,7 @@ static int scmi_sensor_config_get(const struct scmi_protocol_handle *ph,
if (ret)
return ret;
- put_unaligned_le32(cpu_to_le32(sensor_id), t->tx.buf);
+ put_unaligned_le32(sensor_id, t->tx.buf);
ret = ph->xops->do_xfer(ph, t);
if (!ret) {
struct sensors_info *si = ph->get_priv(ph);
diff --git a/drivers/firmware/arm_scmi/virtio.c b/drivers/firmware/arm_scmi/virtio.c
index 11e8efb71375..87039c5c03fd 100644
--- a/drivers/firmware/arm_scmi/virtio.c
+++ b/drivers/firmware/arm_scmi/virtio.c
@@ -82,7 +82,8 @@ static bool scmi_vio_have_vq_rx(struct virtio_device *vdev)
}
static int scmi_vio_feed_vq_rx(struct scmi_vio_channel *vioch,
- struct scmi_vio_msg *msg)
+ struct scmi_vio_msg *msg,
+ struct device *dev)
{
struct scatterlist sg_in;
int rc;
@@ -94,8 +95,7 @@ static int scmi_vio_feed_vq_rx(struct scmi_vio_channel *vioch,
rc = virtqueue_add_inbuf(vioch->vqueue, &sg_in, 1, msg, GFP_ATOMIC);
if (rc)
- dev_err_once(vioch->cinfo->dev,
- "failed to add to virtqueue (%d)\n", rc);
+ dev_err_once(dev, "failed to add to virtqueue (%d)\n", rc);
else
virtqueue_kick(vioch->vqueue);
@@ -108,7 +108,7 @@ static void scmi_finalize_message(struct scmi_vio_channel *vioch,
struct scmi_vio_msg *msg)
{
if (vioch->is_rx) {
- scmi_vio_feed_vq_rx(vioch, msg);
+ scmi_vio_feed_vq_rx(vioch, msg, vioch->cinfo->dev);
} else {
/* Here IRQs are assumed to be already disabled by the caller */
spin_lock(&vioch->lock);
@@ -269,7 +269,7 @@ static int virtio_chan_setup(struct scmi_chan_info *cinfo, struct device *dev,
list_add_tail(&msg->list, &vioch->free_list);
spin_unlock_irqrestore(&vioch->lock, flags);
} else {
- scmi_vio_feed_vq_rx(vioch, msg);
+ scmi_vio_feed_vq_rx(vioch, msg, cinfo->dev);
}
}
diff --git a/drivers/firmware/arm_scmi/voltage.c b/drivers/firmware/arm_scmi/voltage.c
index a5048956a0be..ac08e819088b 100644
--- a/drivers/firmware/arm_scmi/voltage.c
+++ b/drivers/firmware/arm_scmi/voltage.c
@@ -156,7 +156,7 @@ static int scmi_voltage_descriptors_get(const struct scmi_protocol_handle *ph,
int cnt;
cmd->domain_id = cpu_to_le32(v->id);
- cmd->level_index = desc_index;
+ cmd->level_index = cpu_to_le32(desc_index);
ret = ph->xops->do_xfer(ph, tl);
if (ret)
break;
diff --git a/drivers/firmware/scpi_pm_domain.c b/drivers/firmware/scpi_pm_domain.c
index 51201600d789..800673910b51 100644
--- a/drivers/firmware/scpi_pm_domain.c
+++ b/drivers/firmware/scpi_pm_domain.c
@@ -16,7 +16,6 @@ struct scpi_pm_domain {
struct generic_pm_domain genpd;
struct scpi_ops *ops;
u32 domain;
- char name[30];
};
/*
@@ -110,8 +109,13 @@ static int scpi_pm_domain_probe(struct platform_device *pdev)
scpi_pd->domain = i;
scpi_pd->ops = scpi_ops;
- sprintf(scpi_pd->name, "%pOFn.%d", np, i);
- scpi_pd->genpd.name = scpi_pd->name;
+ scpi_pd->genpd.name = devm_kasprintf(dev, GFP_KERNEL,
+ "%pOFn.%d", np, i);
+ if (!scpi_pd->genpd.name) {
+ dev_err(dev, "Failed to allocate genpd name:%pOFn.%d\n",
+ np, i);
+ continue;
+ }
scpi_pd->genpd.power_off = scpi_pd_power_off;
scpi_pd->genpd.power_on = scpi_pd_power_on;
diff --git a/drivers/firmware/smccc/soc_id.c b/drivers/firmware/smccc/soc_id.c
index 581aa5e9b077..dd7c3d5e8b0b 100644
--- a/drivers/firmware/smccc/soc_id.c
+++ b/drivers/firmware/smccc/soc_id.c
@@ -50,7 +50,7 @@ static int __init smccc_soc_init(void)
arm_smccc_1_1_invoke(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
ARM_SMCCC_ARCH_SOC_ID, &res);
- if (res.a0 == SMCCC_RET_NOT_SUPPORTED) {
+ if ((int)res.a0 == SMCCC_RET_NOT_SUPPORTED) {
pr_info("ARCH_SOC_ID not implemented, skipping ....\n");
return 0;
}
diff --git a/drivers/firmware/tegra/bpmp-debugfs.c b/drivers/firmware/tegra/bpmp-debugfs.c
index 6d66fe03fb6a..fd89899aeeed 100644
--- a/drivers/firmware/tegra/bpmp-debugfs.c
+++ b/drivers/firmware/tegra/bpmp-debugfs.c
@@ -77,13 +77,14 @@ static const char *get_filename(struct tegra_bpmp *bpmp,
const char *root_path, *filename = NULL;
char *root_path_buf;
size_t root_len;
+ size_t root_path_buf_len = 512;
- root_path_buf = kzalloc(512, GFP_KERNEL);
+ root_path_buf = kzalloc(root_path_buf_len, GFP_KERNEL);
if (!root_path_buf)
goto out;
root_path = dentry_path(bpmp->debugfs_mirror, root_path_buf,
- sizeof(root_path_buf));
+ root_path_buf_len);
if (IS_ERR(root_path))
goto out;
diff --git a/drivers/gpio/gpio-dln2.c b/drivers/gpio/gpio-dln2.c
index 026903e3ef54..08b9e2cf4f2d 100644
--- a/drivers/gpio/gpio-dln2.c
+++ b/drivers/gpio/gpio-dln2.c
@@ -46,6 +46,7 @@
struct dln2_gpio {
struct platform_device *pdev;
struct gpio_chip gpio;
+ struct irq_chip irqchip;
/*
* Cache pin direction to save us one transfer, since the hardware has
@@ -383,15 +384,6 @@ static void dln2_irq_bus_unlock(struct irq_data *irqd)
mutex_unlock(&dln2->irq_lock);
}
-static struct irq_chip dln2_gpio_irqchip = {
- .name = "dln2-irq",
- .irq_mask = dln2_irq_mask,
- .irq_unmask = dln2_irq_unmask,
- .irq_set_type = dln2_irq_set_type,
- .irq_bus_lock = dln2_irq_bus_lock,
- .irq_bus_sync_unlock = dln2_irq_bus_unlock,
-};
-
static void dln2_gpio_event(struct platform_device *pdev, u16 echo,
const void *data, int len)
{
@@ -473,8 +465,15 @@ static int dln2_gpio_probe(struct platform_device *pdev)
dln2->gpio.direction_output = dln2_gpio_direction_output;
dln2->gpio.set_config = dln2_gpio_set_config;
+ dln2->irqchip.name = "dln2-irq",
+ dln2->irqchip.irq_mask = dln2_irq_mask,
+ dln2->irqchip.irq_unmask = dln2_irq_unmask,
+ dln2->irqchip.irq_set_type = dln2_irq_set_type,
+ dln2->irqchip.irq_bus_lock = dln2_irq_bus_lock,
+ dln2->irqchip.irq_bus_sync_unlock = dln2_irq_bus_unlock,
+
girq = &dln2->gpio.irq;
- girq->chip = &dln2_gpio_irqchip;
+ girq->chip = &dln2->irqchip;
/* The event comes from the outside so no parent handler */
girq->parent_handler = NULL;
girq->num_parents = 0;
diff --git a/drivers/gpio/gpio-virtio.c b/drivers/gpio/gpio-virtio.c
index 84f96b78f32a..9f4941bc5760 100644
--- a/drivers/gpio/gpio-virtio.c
+++ b/drivers/gpio/gpio-virtio.c
@@ -100,11 +100,7 @@ static int _virtio_gpio_req(struct virtio_gpio *vgpio, u16 type, u16 gpio,
virtqueue_kick(vgpio->request_vq);
mutex_unlock(&vgpio->lock);
- if (!wait_for_completion_timeout(&line->completion, HZ)) {
- dev_err(dev, "GPIO operation timed out\n");
- ret = -ETIMEDOUT;
- goto out;
- }
+ wait_for_completion(&line->completion);
if (unlikely(res->status != VIRTIO_GPIO_STATUS_OK)) {
dev_err(dev, "GPIO request failed: %d\n", gpio);
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
index ae29fcba9e19..b1f22e457fd0 100644
--- a/drivers/gpu/drm/Kconfig
+++ b/drivers/gpu/drm/Kconfig
@@ -217,13 +217,6 @@ config DRM_GEM_CMA_HELPER
help
Choose this if you need the GEM CMA helper functions
-config DRM_KMS_CMA_HELPER
- bool
- depends on DRM
- select DRM_GEM_CMA_HELPER
- help
- Choose this if you need the KMS CMA helper functions
-
config DRM_GEM_SHMEM_HELPER
tristate
depends on DRM && MMU
@@ -395,6 +388,8 @@ source "drivers/gpu/drm/xlnx/Kconfig"
source "drivers/gpu/drm/gud/Kconfig"
+source "drivers/gpu/drm/sprd/Kconfig"
+
config DRM_HYPERV
tristate "DRM Support for Hyper-V synthetic video device"
depends on DRM && PCI && MMU && HYPERV
diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile
index fa16d3e0bbdc..301a44dc18e3 100644
--- a/drivers/gpu/drm/Makefile
+++ b/drivers/gpu/drm/Makefile
@@ -6,7 +6,7 @@
drm-y := drm_aperture.o drm_auth.o drm_cache.o \
drm_file.o drm_gem.o drm_ioctl.o \
drm_drv.o \
- drm_sysfs.o drm_hashtab.o drm_mm.o \
+ drm_sysfs.o drm_mm.o \
drm_crtc.o drm_fourcc.o drm_modes.o drm_edid.o drm_displayid.o \
drm_trace_points.o drm_prime.o \
drm_vma_manager.o \
@@ -20,8 +20,8 @@ drm-y := drm_aperture.o drm_auth.o drm_cache.o \
drm_managed.o drm_vblank_work.o
drm-$(CONFIG_DRM_LEGACY) += drm_agpsupport.o drm_bufs.o drm_context.o drm_dma.o \
- drm_irq.o drm_legacy_misc.o drm_lock.o drm_memory.o \
- drm_scatter.o drm_vm.o
+ drm_hashtab.o drm_irq.o drm_legacy_misc.o drm_lock.o \
+ drm_memory.o drm_scatter.o drm_vm.o
drm-$(CONFIG_DRM_LIB_RANDOM) += lib/drm_random.o
drm-$(CONFIG_COMPAT) += drm_ioc32.o
drm-$(CONFIG_DRM_PANEL) += drm_panel.o
@@ -36,6 +36,7 @@ obj-$(CONFIG_DRM_DP_AUX_BUS) += drm_dp_aux_bus.o
obj-$(CONFIG_DRM_NOMODESET) += drm_nomodeset.o
drm_cma_helper-y := drm_gem_cma_helper.o
+drm_cma_helper-$(CONFIG_DRM_KMS_HELPER) += drm_fb_cma_helper.o
obj-$(CONFIG_DRM_GEM_CMA_HELPER) += drm_cma_helper.o
drm_shmem_helper-y := drm_gem_shmem_helper.o
@@ -60,7 +61,6 @@ drm_kms_helper-y := drm_bridge_connector.o drm_crtc_helper.o drm_dp_helper.o \
drm_kms_helper-$(CONFIG_DRM_PANEL_BRIDGE) += bridge/panel.o
drm_kms_helper-$(CONFIG_DRM_FBDEV_EMULATION) += drm_fb_helper.o
-drm_kms_helper-$(CONFIG_DRM_KMS_CMA_HELPER) += drm_fb_cma_helper.o
drm_kms_helper-$(CONFIG_DRM_DP_AUX_CHARDEV) += drm_dp_aux_dev.o
drm_kms_helper-$(CONFIG_DRM_DP_CEC) += drm_dp_cec.o
@@ -134,3 +134,4 @@ obj-$(CONFIG_DRM_TIDSS) += tidss/
obj-y += xlnx/
obj-y += gud/
obj-$(CONFIG_DRM_HYPERV) += hyperv/
+obj-$(CONFIG_DRM_SPRD) += sprd/
diff --git a/drivers/gpu/drm/amd/amdgpu/Makefile b/drivers/gpu/drm/amd/amdgpu/Makefile
index 653726588956..7fedbb725e17 100644
--- a/drivers/gpu/drm/amd/amdgpu/Makefile
+++ b/drivers/gpu/drm/amd/amdgpu/Makefile
@@ -45,7 +45,7 @@ amdgpu-y += amdgpu_device.o amdgpu_kms.o \
amdgpu_atombios.o atombios_crtc.o amdgpu_connectors.o \
atom.o amdgpu_fence.o amdgpu_ttm.o amdgpu_object.o amdgpu_gart.o \
amdgpu_encoders.o amdgpu_display.o amdgpu_i2c.o \
- amdgpu_fb.o amdgpu_gem.o amdgpu_ring.o \
+ amdgpu_gem.o amdgpu_ring.o \
amdgpu_cs.o amdgpu_bios.o amdgpu_benchmark.o amdgpu_test.o \
atombios_dp.o amdgpu_afmt.o amdgpu_trace_points.o \
atombios_encoders.o amdgpu_sa.o atombios_i2c.o \
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index 9f017663ac50..4f771f9eb0e3 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -812,6 +812,7 @@ struct amd_powerplay {
#define AMDGPU_RESET_MAGIC_NUM 64
#define AMDGPU_MAX_DF_PERFMONS 4
+#define AMDGPU_PRODUCT_NAME_LEN 64
struct amdgpu_device {
struct device *dev;
struct pci_dev *pdev;
@@ -1082,7 +1083,7 @@ struct amdgpu_device {
/* Chip product information */
char product_number[16];
- char product_name[32];
+ char product_name[AMDGPU_PRODUCT_NAME_LEN];
char serial[20];
atomic_t throttling_logging_enabled;
@@ -1095,7 +1096,9 @@ struct amdgpu_device {
pci_channel_state_t pci_channel_state;
struct amdgpu_reset_control *reset_cntl;
- uint32_t ip_versions[HW_ID_MAX][HWIP_MAX_INSTANCE];
+ uint32_t ip_versions[MAX_HWIP][HWIP_MAX_INSTANCE];
+
+ bool ram_is_direct_mapped;
};
static inline struct amdgpu_device *drm_to_adev(struct drm_device *ddev)
@@ -1316,6 +1319,8 @@ void amdgpu_device_flush_hdp(struct amdgpu_device *adev,
void amdgpu_device_invalidate_hdp(struct amdgpu_device *adev,
struct amdgpu_ring *ring);
+void amdgpu_device_halt(struct amdgpu_device *adev);
+
/* atpx handler */
#if defined(CONFIG_VGA_SWITCHEROO)
void amdgpu_register_atpx_handler(void);
@@ -1359,8 +1364,6 @@ int amdgpu_device_resume(struct drm_device *dev, bool fbcon);
u32 amdgpu_get_vblank_counter_kms(struct drm_crtc *crtc);
int amdgpu_enable_vblank_kms(struct drm_crtc *crtc);
void amdgpu_disable_vblank_kms(struct drm_crtc *crtc);
-long amdgpu_kms_compat_ioctl(struct file *filp, unsigned int cmd,
- unsigned long arg);
int amdgpu_info_ioctl(struct drm_device *dev, void *data,
struct drm_file *filp);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
index 7077f21f0021..776a947b45df 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
@@ -72,7 +72,7 @@ void amdgpu_amdkfd_device_probe(struct amdgpu_device *adev)
if (!kfd_initialized)
return;
- adev->kfd.dev = kgd2kfd_probe((struct kgd_dev *)adev, vf);
+ adev->kfd.dev = kgd2kfd_probe(adev, vf);
if (adev->kfd.dev)
amdgpu_amdkfd_total_mem_size += adev->gmc.real_vram_size;
@@ -233,19 +233,16 @@ int amdgpu_amdkfd_post_reset(struct amdgpu_device *adev)
return r;
}
-void amdgpu_amdkfd_gpu_reset(struct kgd_dev *kgd)
+void amdgpu_amdkfd_gpu_reset(struct amdgpu_device *adev)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
-
if (amdgpu_device_should_recover_gpu(adev))
amdgpu_device_gpu_recover(adev, NULL);
}
-int amdgpu_amdkfd_alloc_gtt_mem(struct kgd_dev *kgd, size_t size,
+int amdgpu_amdkfd_alloc_gtt_mem(struct amdgpu_device *adev, size_t size,
void **mem_obj, uint64_t *gpu_addr,
void **cpu_ptr, bool cp_mqd_gfx9)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
struct amdgpu_bo *bo = NULL;
struct amdgpu_bo_param bp;
int r;
@@ -314,7 +311,7 @@ allocate_mem_reserve_bo_failed:
return r;
}
-void amdgpu_amdkfd_free_gtt_mem(struct kgd_dev *kgd, void *mem_obj)
+void amdgpu_amdkfd_free_gtt_mem(struct amdgpu_device *adev, void *mem_obj)
{
struct amdgpu_bo *bo = (struct amdgpu_bo *) mem_obj;
@@ -325,10 +322,9 @@ void amdgpu_amdkfd_free_gtt_mem(struct kgd_dev *kgd, void *mem_obj)
amdgpu_bo_unref(&(bo));
}
-int amdgpu_amdkfd_alloc_gws(struct kgd_dev *kgd, size_t size,
+int amdgpu_amdkfd_alloc_gws(struct amdgpu_device *adev, size_t size,
void **mem_obj)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
struct amdgpu_bo *bo = NULL;
struct amdgpu_bo_user *ubo;
struct amdgpu_bo_param bp;
@@ -355,18 +351,16 @@ int amdgpu_amdkfd_alloc_gws(struct kgd_dev *kgd, size_t size,
return 0;
}
-void amdgpu_amdkfd_free_gws(struct kgd_dev *kgd, void *mem_obj)
+void amdgpu_amdkfd_free_gws(struct amdgpu_device *adev, void *mem_obj)
{
struct amdgpu_bo *bo = (struct amdgpu_bo *)mem_obj;
amdgpu_bo_unref(&bo);
}
-uint32_t amdgpu_amdkfd_get_fw_version(struct kgd_dev *kgd,
+uint32_t amdgpu_amdkfd_get_fw_version(struct amdgpu_device *adev,
enum kgd_engine_type type)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
-
switch (type) {
case KGD_ENGINE_PFP:
return adev->gfx.pfp_fw_version;
@@ -399,11 +393,9 @@ uint32_t amdgpu_amdkfd_get_fw_version(struct kgd_dev *kgd,
return 0;
}
-void amdgpu_amdkfd_get_local_mem_info(struct kgd_dev *kgd,
+void amdgpu_amdkfd_get_local_mem_info(struct amdgpu_device *adev,
struct kfd_local_mem_info *mem_info)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
-
memset(mem_info, 0, sizeof(*mem_info));
mem_info->local_mem_size_public = adev->gmc.visible_vram_size;
@@ -428,19 +420,15 @@ void amdgpu_amdkfd_get_local_mem_info(struct kgd_dev *kgd,
mem_info->mem_clk_max = 100;
}
-uint64_t amdgpu_amdkfd_get_gpu_clock_counter(struct kgd_dev *kgd)
+uint64_t amdgpu_amdkfd_get_gpu_clock_counter(struct amdgpu_device *adev)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
-
if (adev->gfx.funcs->get_gpu_clock_counter)
return adev->gfx.funcs->get_gpu_clock_counter(adev);
return 0;
}
-uint32_t amdgpu_amdkfd_get_max_engine_clock_in_mhz(struct kgd_dev *kgd)
+uint32_t amdgpu_amdkfd_get_max_engine_clock_in_mhz(struct amdgpu_device *adev)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
-
/* the sclk is in quantas of 10kHz */
if (amdgpu_sriov_vf(adev))
return adev->clock.default_sclk / 100;
@@ -450,9 +438,8 @@ uint32_t amdgpu_amdkfd_get_max_engine_clock_in_mhz(struct kgd_dev *kgd)
return 100;
}
-void amdgpu_amdkfd_get_cu_info(struct kgd_dev *kgd, struct kfd_cu_info *cu_info)
+void amdgpu_amdkfd_get_cu_info(struct amdgpu_device *adev, struct kfd_cu_info *cu_info)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
struct amdgpu_cu_info acu_info = adev->gfx.cu_info;
memset(cu_info, 0, sizeof(*cu_info));
@@ -473,13 +460,12 @@ void amdgpu_amdkfd_get_cu_info(struct kgd_dev *kgd, struct kfd_cu_info *cu_info)
cu_info->lds_size = acu_info.lds_size;
}
-int amdgpu_amdkfd_get_dmabuf_info(struct kgd_dev *kgd, int dma_buf_fd,
- struct kgd_dev **dma_buf_kgd,
+int amdgpu_amdkfd_get_dmabuf_info(struct amdgpu_device *adev, int dma_buf_fd,
+ struct amdgpu_device **dmabuf_adev,
uint64_t *bo_size, void *metadata_buffer,
size_t buffer_size, uint32_t *metadata_size,
uint32_t *flags)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
struct dma_buf *dma_buf;
struct drm_gem_object *obj;
struct amdgpu_bo *bo;
@@ -507,8 +493,8 @@ int amdgpu_amdkfd_get_dmabuf_info(struct kgd_dev *kgd, int dma_buf_fd,
goto out_put;
r = 0;
- if (dma_buf_kgd)
- *dma_buf_kgd = (struct kgd_dev *)adev;
+ if (dmabuf_adev)
+ *dmabuf_adev = adev;
if (bo_size)
*bo_size = amdgpu_bo_size(bo);
if (metadata_buffer)
@@ -528,32 +514,18 @@ out_put:
return r;
}
-uint64_t amdgpu_amdkfd_get_vram_usage(struct kgd_dev *kgd)
+uint64_t amdgpu_amdkfd_get_vram_usage(struct amdgpu_device *adev)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
struct ttm_resource_manager *vram_man = ttm_manager_type(&adev->mman.bdev, TTM_PL_VRAM);
return amdgpu_vram_mgr_usage(vram_man);
}
-uint64_t amdgpu_amdkfd_get_hive_id(struct kgd_dev *kgd)
-{
- struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
-
- return adev->gmc.xgmi.hive_id;
-}
-
-uint64_t amdgpu_amdkfd_get_unique_id(struct kgd_dev *kgd)
+uint8_t amdgpu_amdkfd_get_xgmi_hops_count(struct amdgpu_device *dst,
+ struct amdgpu_device *src)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
-
- return adev->unique_id;
-}
-
-uint8_t amdgpu_amdkfd_get_xgmi_hops_count(struct kgd_dev *dst, struct kgd_dev *src)
-{
- struct amdgpu_device *peer_adev = (struct amdgpu_device *)src;
- struct amdgpu_device *adev = (struct amdgpu_device *)dst;
+ struct amdgpu_device *peer_adev = src;
+ struct amdgpu_device *adev = dst;
int ret = amdgpu_xgmi_get_hops_count(adev, peer_adev);
if (ret < 0) {
@@ -565,16 +537,18 @@ uint8_t amdgpu_amdkfd_get_xgmi_hops_count(struct kgd_dev *dst, struct kgd_dev *s
return (uint8_t)ret;
}
-int amdgpu_amdkfd_get_xgmi_bandwidth_mbytes(struct kgd_dev *dst, struct kgd_dev *src, bool is_min)
+int amdgpu_amdkfd_get_xgmi_bandwidth_mbytes(struct amdgpu_device *dst,
+ struct amdgpu_device *src,
+ bool is_min)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)dst, *peer_adev;
+ struct amdgpu_device *adev = dst, *peer_adev;
int num_links;
if (adev->asic_type != CHIP_ALDEBARAN)
return 0;
if (src)
- peer_adev = (struct amdgpu_device *)src;
+ peer_adev = src;
/* num links returns 0 for indirect peers since indirect route is unknown. */
num_links = is_min ? 1 : amdgpu_xgmi_get_num_links(adev, peer_adev);
@@ -589,9 +563,8 @@ int amdgpu_amdkfd_get_xgmi_bandwidth_mbytes(struct kgd_dev *dst, struct kgd_dev
return (num_links * 16 * 25000)/BITS_PER_BYTE;
}
-int amdgpu_amdkfd_get_pcie_bandwidth_mbytes(struct kgd_dev *dev, bool is_min)
+int amdgpu_amdkfd_get_pcie_bandwidth_mbytes(struct amdgpu_device *adev, bool is_min)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)dev;
int num_lanes_shift = (is_min ? ffs(adev->pm.pcie_mlw_mask) :
fls(adev->pm.pcie_mlw_mask)) - 1;
int gen_speed_shift = (is_min ? ffs(adev->pm.pcie_gen_mask &
@@ -647,39 +620,11 @@ int amdgpu_amdkfd_get_pcie_bandwidth_mbytes(struct kgd_dev *dev, bool is_min)
return (num_lanes_factor * gen_speed_mbits_factor)/BITS_PER_BYTE;
}
-uint64_t amdgpu_amdkfd_get_mmio_remap_phys_addr(struct kgd_dev *kgd)
-{
- struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
-
- return adev->rmmio_remap.bus_addr;
-}
-
-uint32_t amdgpu_amdkfd_get_num_gws(struct kgd_dev *kgd)
-{
- struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
-
- return adev->gds.gws_size;
-}
-
-uint32_t amdgpu_amdkfd_get_asic_rev_id(struct kgd_dev *kgd)
-{
- struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
-
- return adev->rev_id;
-}
-
-int amdgpu_amdkfd_get_noretry(struct kgd_dev *kgd)
-{
- struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
-
- return adev->gmc.noretry;
-}
-
-int amdgpu_amdkfd_submit_ib(struct kgd_dev *kgd, enum kgd_engine_type engine,
+int amdgpu_amdkfd_submit_ib(struct amdgpu_device *adev,
+ enum kgd_engine_type engine,
uint32_t vmid, uint64_t gpu_addr,
uint32_t *ib_cmd, uint32_t ib_len)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
struct amdgpu_job *job;
struct amdgpu_ib *ib;
struct amdgpu_ring *ring;
@@ -730,10 +675,8 @@ err:
return ret;
}
-void amdgpu_amdkfd_set_compute_idle(struct kgd_dev *kgd, bool idle)
+void amdgpu_amdkfd_set_compute_idle(struct amdgpu_device *adev, bool idle)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
-
amdgpu_dpm_switch_power_profile(adev,
PP_SMC_POWER_PROFILE_COMPUTE,
!idle);
@@ -747,10 +690,9 @@ bool amdgpu_amdkfd_is_kfd_vmid(struct amdgpu_device *adev, u32 vmid)
return false;
}
-int amdgpu_amdkfd_flush_gpu_tlb_vmid(struct kgd_dev *kgd, uint16_t vmid)
+int amdgpu_amdkfd_flush_gpu_tlb_vmid(struct amdgpu_device *adev,
+ uint16_t vmid)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
-
if (adev->family == AMDGPU_FAMILY_AI) {
int i;
@@ -763,10 +705,9 @@ int amdgpu_amdkfd_flush_gpu_tlb_vmid(struct kgd_dev *kgd, uint16_t vmid)
return 0;
}
-int amdgpu_amdkfd_flush_gpu_tlb_pasid(struct kgd_dev *kgd, uint16_t pasid,
- enum TLB_FLUSH_TYPE flush_type)
+int amdgpu_amdkfd_flush_gpu_tlb_pasid(struct amdgpu_device *adev,
+ uint16_t pasid, enum TLB_FLUSH_TYPE flush_type)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
bool all_hub = false;
if (adev->family == AMDGPU_FAMILY_AI)
@@ -775,21 +716,18 @@ int amdgpu_amdkfd_flush_gpu_tlb_pasid(struct kgd_dev *kgd, uint16_t pasid,
return amdgpu_gmc_flush_gpu_tlb_pasid(adev, pasid, flush_type, all_hub);
}
-bool amdgpu_amdkfd_have_atomics_support(struct kgd_dev *kgd)
+bool amdgpu_amdkfd_have_atomics_support(struct amdgpu_device *adev)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
-
return adev->have_atomics_support;
}
-void amdgpu_amdkfd_ras_poison_consumption_handler(struct kgd_dev *kgd)
+void amdgpu_amdkfd_ras_poison_consumption_handler(struct amdgpu_device *adev, bool reset)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
struct ras_err_data err_data = {0, 0, 0, NULL};
/* CPU MCA will handle page retirement if connected_to_cpu is 1 */
if (!adev->gmc.xgmi.connected_to_cpu)
- amdgpu_umc_process_ras_data_cb(adev, &err_data, NULL);
- else
- amdgpu_amdkfd_gpu_reset(kgd);
+ amdgpu_umc_poison_handler(adev, &err_data, reset);
+ else if (reset)
+ amdgpu_amdkfd_gpu_reset(adev);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
index a15a4787c7ee..61f899e54fd5 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
@@ -144,14 +144,16 @@ void amdgpu_amdkfd_interrupt(struct amdgpu_device *adev,
void amdgpu_amdkfd_device_probe(struct amdgpu_device *adev);
void amdgpu_amdkfd_device_init(struct amdgpu_device *adev);
void amdgpu_amdkfd_device_fini_sw(struct amdgpu_device *adev);
-int amdgpu_amdkfd_submit_ib(struct kgd_dev *kgd, enum kgd_engine_type engine,
+int amdgpu_amdkfd_submit_ib(struct amdgpu_device *adev,
+ enum kgd_engine_type engine,
uint32_t vmid, uint64_t gpu_addr,
uint32_t *ib_cmd, uint32_t ib_len);
-void amdgpu_amdkfd_set_compute_idle(struct kgd_dev *kgd, bool idle);
-bool amdgpu_amdkfd_have_atomics_support(struct kgd_dev *kgd);
-int amdgpu_amdkfd_flush_gpu_tlb_vmid(struct kgd_dev *kgd, uint16_t vmid);
-int amdgpu_amdkfd_flush_gpu_tlb_pasid(struct kgd_dev *kgd, uint16_t pasid,
- enum TLB_FLUSH_TYPE flush_type);
+void amdgpu_amdkfd_set_compute_idle(struct amdgpu_device *adev, bool idle);
+bool amdgpu_amdkfd_have_atomics_support(struct amdgpu_device *adev);
+int amdgpu_amdkfd_flush_gpu_tlb_vmid(struct amdgpu_device *adev,
+ uint16_t vmid);
+int amdgpu_amdkfd_flush_gpu_tlb_pasid(struct amdgpu_device *adev,
+ uint16_t pasid, enum TLB_FLUSH_TYPE flush_type);
bool amdgpu_amdkfd_is_kfd_vmid(struct amdgpu_device *adev, u32 vmid);
@@ -159,7 +161,7 @@ int amdgpu_amdkfd_pre_reset(struct amdgpu_device *adev);
int amdgpu_amdkfd_post_reset(struct amdgpu_device *adev);
-void amdgpu_amdkfd_gpu_reset(struct kgd_dev *kgd);
+void amdgpu_amdkfd_gpu_reset(struct amdgpu_device *adev);
int amdgpu_queue_mask_bit_to_set_resource_bit(struct amdgpu_device *adev,
int queue_bit);
@@ -198,37 +200,36 @@ int amdgpu_amdkfd_evict_userptr(struct kgd_mem *mem, struct mm_struct *mm)
}
#endif
/* Shared API */
-int amdgpu_amdkfd_alloc_gtt_mem(struct kgd_dev *kgd, size_t size,
+int amdgpu_amdkfd_alloc_gtt_mem(struct amdgpu_device *adev, size_t size,
void **mem_obj, uint64_t *gpu_addr,
void **cpu_ptr, bool mqd_gfx9);
-void amdgpu_amdkfd_free_gtt_mem(struct kgd_dev *kgd, void *mem_obj);
-int amdgpu_amdkfd_alloc_gws(struct kgd_dev *kgd, size_t size, void **mem_obj);
-void amdgpu_amdkfd_free_gws(struct kgd_dev *kgd, void *mem_obj);
+void amdgpu_amdkfd_free_gtt_mem(struct amdgpu_device *adev, void *mem_obj);
+int amdgpu_amdkfd_alloc_gws(struct amdgpu_device *adev, size_t size,
+ void **mem_obj);
+void amdgpu_amdkfd_free_gws(struct amdgpu_device *adev, void *mem_obj);
int amdgpu_amdkfd_add_gws_to_process(void *info, void *gws, struct kgd_mem **mem);
int amdgpu_amdkfd_remove_gws_from_process(void *info, void *mem);
-uint32_t amdgpu_amdkfd_get_fw_version(struct kgd_dev *kgd,
+uint32_t amdgpu_amdkfd_get_fw_version(struct amdgpu_device *adev,
enum kgd_engine_type type);
-void amdgpu_amdkfd_get_local_mem_info(struct kgd_dev *kgd,
+void amdgpu_amdkfd_get_local_mem_info(struct amdgpu_device *adev,
struct kfd_local_mem_info *mem_info);
-uint64_t amdgpu_amdkfd_get_gpu_clock_counter(struct kgd_dev *kgd);
+uint64_t amdgpu_amdkfd_get_gpu_clock_counter(struct amdgpu_device *adev);
-uint32_t amdgpu_amdkfd_get_max_engine_clock_in_mhz(struct kgd_dev *kgd);
-void amdgpu_amdkfd_get_cu_info(struct kgd_dev *kgd, struct kfd_cu_info *cu_info);
-int amdgpu_amdkfd_get_dmabuf_info(struct kgd_dev *kgd, int dma_buf_fd,
- struct kgd_dev **dmabuf_kgd,
+uint32_t amdgpu_amdkfd_get_max_engine_clock_in_mhz(struct amdgpu_device *adev);
+void amdgpu_amdkfd_get_cu_info(struct amdgpu_device *adev,
+ struct kfd_cu_info *cu_info);
+int amdgpu_amdkfd_get_dmabuf_info(struct amdgpu_device *adev, int dma_buf_fd,
+ struct amdgpu_device **dmabuf_adev,
uint64_t *bo_size, void *metadata_buffer,
size_t buffer_size, uint32_t *metadata_size,
uint32_t *flags);
-uint64_t amdgpu_amdkfd_get_vram_usage(struct kgd_dev *kgd);
-uint64_t amdgpu_amdkfd_get_hive_id(struct kgd_dev *kgd);
-uint64_t amdgpu_amdkfd_get_unique_id(struct kgd_dev *kgd);
-uint64_t amdgpu_amdkfd_get_mmio_remap_phys_addr(struct kgd_dev *kgd);
-uint32_t amdgpu_amdkfd_get_num_gws(struct kgd_dev *kgd);
-uint32_t amdgpu_amdkfd_get_asic_rev_id(struct kgd_dev *kgd);
-int amdgpu_amdkfd_get_noretry(struct kgd_dev *kgd);
-uint8_t amdgpu_amdkfd_get_xgmi_hops_count(struct kgd_dev *dst, struct kgd_dev *src);
-int amdgpu_amdkfd_get_xgmi_bandwidth_mbytes(struct kgd_dev *dst, struct kgd_dev *src, bool is_min);
-int amdgpu_amdkfd_get_pcie_bandwidth_mbytes(struct kgd_dev *dev, bool is_min);
+uint64_t amdgpu_amdkfd_get_vram_usage(struct amdgpu_device *adev);
+uint8_t amdgpu_amdkfd_get_xgmi_hops_count(struct amdgpu_device *dst,
+ struct amdgpu_device *src);
+int amdgpu_amdkfd_get_xgmi_bandwidth_mbytes(struct amdgpu_device *dst,
+ struct amdgpu_device *src,
+ bool is_min);
+int amdgpu_amdkfd_get_pcie_bandwidth_mbytes(struct amdgpu_device *adev, bool is_min);
/* Read user wptr from a specified user address space with page fault
* disabled. The memory must be pinned and mapped to the hardware when
@@ -258,45 +259,55 @@ int amdgpu_amdkfd_get_pcie_bandwidth_mbytes(struct kgd_dev *dev, bool is_min);
(&((struct amdgpu_fpriv *) \
((struct drm_file *)(drm_priv))->driver_priv)->vm)
-int amdgpu_amdkfd_gpuvm_acquire_process_vm(struct kgd_dev *kgd,
+int amdgpu_amdkfd_gpuvm_acquire_process_vm(struct amdgpu_device *adev,
struct file *filp, u32 pasid,
void **process_info,
struct dma_fence **ef);
-void amdgpu_amdkfd_gpuvm_release_process_vm(struct kgd_dev *kgd, void *drm_priv);
+void amdgpu_amdkfd_gpuvm_release_process_vm(struct amdgpu_device *adev,
+ void *drm_priv);
uint64_t amdgpu_amdkfd_gpuvm_get_process_page_dir(void *drm_priv);
int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
- struct kgd_dev *kgd, uint64_t va, uint64_t size,
+ struct amdgpu_device *adev, uint64_t va, uint64_t size,
void *drm_priv, struct kgd_mem **mem,
uint64_t *offset, uint32_t flags);
int amdgpu_amdkfd_gpuvm_free_memory_of_gpu(
- struct kgd_dev *kgd, struct kgd_mem *mem, void *drm_priv,
+ struct amdgpu_device *adev, struct kgd_mem *mem, void *drm_priv,
uint64_t *size);
int amdgpu_amdkfd_gpuvm_map_memory_to_gpu(
- struct kgd_dev *kgd, struct kgd_mem *mem, void *drm_priv, bool *table_freed);
+ struct amdgpu_device *adev, struct kgd_mem *mem, void *drm_priv,
+ bool *table_freed);
int amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu(
- struct kgd_dev *kgd, struct kgd_mem *mem, void *drm_priv);
+ struct amdgpu_device *adev, struct kgd_mem *mem, void *drm_priv);
int amdgpu_amdkfd_gpuvm_sync_memory(
- struct kgd_dev *kgd, struct kgd_mem *mem, bool intr);
-int amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel(struct kgd_dev *kgd,
+ struct amdgpu_device *adev, struct kgd_mem *mem, bool intr);
+int amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel(struct amdgpu_device *adev,
struct kgd_mem *mem, void **kptr, uint64_t *size);
-void amdgpu_amdkfd_gpuvm_unmap_gtt_bo_from_kernel(struct kgd_dev *kgd, struct kgd_mem *mem);
+void amdgpu_amdkfd_gpuvm_unmap_gtt_bo_from_kernel(struct amdgpu_device *adev,
+ struct kgd_mem *mem);
int amdgpu_amdkfd_gpuvm_restore_process_bos(void *process_info,
struct dma_fence **ef);
-int amdgpu_amdkfd_gpuvm_get_vm_fault_info(struct kgd_dev *kgd,
+int amdgpu_amdkfd_gpuvm_get_vm_fault_info(struct amdgpu_device *adev,
struct kfd_vm_fault_info *info);
-int amdgpu_amdkfd_gpuvm_import_dmabuf(struct kgd_dev *kgd,
+int amdgpu_amdkfd_gpuvm_import_dmabuf(struct amdgpu_device *adev,
struct dma_buf *dmabuf,
uint64_t va, void *drm_priv,
struct kgd_mem **mem, uint64_t *size,
uint64_t *mmap_offset);
-int amdgpu_amdkfd_get_tile_config(struct kgd_dev *kgd,
+int amdgpu_amdkfd_get_tile_config(struct amdgpu_device *adev,
struct tile_config *config);
-void amdgpu_amdkfd_ras_poison_consumption_handler(struct kgd_dev *kgd);
+void amdgpu_amdkfd_ras_poison_consumption_handler(struct amdgpu_device *adev,
+ bool reset);
#if IS_ENABLED(CONFIG_HSA_AMD)
void amdgpu_amdkfd_gpuvm_init_mem_limits(void);
void amdgpu_amdkfd_gpuvm_destroy_cb(struct amdgpu_device *adev,
struct amdgpu_vm *vm);
+
+/**
+ * @amdgpu_amdkfd_release_notify() - Notify KFD when GEM object is released
+ *
+ * Allows KFD to release its resources associated with the GEM object.
+ */
void amdgpu_amdkfd_release_notify(struct amdgpu_bo *bo);
void amdgpu_amdkfd_reserve_system_mem(uint64_t size);
#else
@@ -324,7 +335,7 @@ int kgd2kfd_schedule_evict_and_restore_process(struct mm_struct *mm,
#if IS_ENABLED(CONFIG_HSA_AMD)
int kgd2kfd_init(void);
void kgd2kfd_exit(void);
-struct kfd_dev *kgd2kfd_probe(struct kgd_dev *kgd, bool vf);
+struct kfd_dev *kgd2kfd_probe(struct amdgpu_device *adev, bool vf);
bool kgd2kfd_device_init(struct kfd_dev *kfd,
struct drm_device *ddev,
const struct kgd2kfd_shared_resources *gpu_resources);
@@ -348,7 +359,7 @@ static inline void kgd2kfd_exit(void)
}
static inline
-struct kfd_dev *kgd2kfd_probe(struct kgd_dev *kgd, bool vf)
+struct kfd_dev *kgd2kfd_probe(struct amdgpu_device *adev, bool vf)
{
return NULL;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c
index 5a7f680bcb3f..abe93b3ff765 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c
@@ -57,11 +57,6 @@
(*dump)[i++][1] = RREG32(addr); \
} while (0)
-static inline struct amdgpu_device *get_amdgpu_device(struct kgd_dev *kgd)
-{
- return (struct amdgpu_device *)kgd;
-}
-
static inline struct v9_sdma_mqd *get_sdma_mqd(void *mqd)
{
return (struct v9_sdma_mqd *)mqd;
@@ -123,10 +118,9 @@ static uint32_t get_sdma_rlc_reg_offset(struct amdgpu_device *adev,
return sdma_rlc_reg_offset;
}
-int kgd_arcturus_hqd_sdma_load(struct kgd_dev *kgd, void *mqd,
+int kgd_arcturus_hqd_sdma_load(struct amdgpu_device *adev, void *mqd,
uint32_t __user *wptr, struct mm_struct *mm)
{
- struct amdgpu_device *adev = get_amdgpu_device(kgd);
struct v9_sdma_mqd *m;
uint32_t sdma_rlc_reg_offset;
unsigned long end_jiffies;
@@ -193,11 +187,10 @@ int kgd_arcturus_hqd_sdma_load(struct kgd_dev *kgd, void *mqd,
return 0;
}
-int kgd_arcturus_hqd_sdma_dump(struct kgd_dev *kgd,
+int kgd_arcturus_hqd_sdma_dump(struct amdgpu_device *adev,
uint32_t engine_id, uint32_t queue_id,
uint32_t (**dump)[2], uint32_t *n_regs)
{
- struct amdgpu_device *adev = get_amdgpu_device(kgd);
uint32_t sdma_rlc_reg_offset = get_sdma_rlc_reg_offset(adev,
engine_id, queue_id);
uint32_t i = 0, reg;
@@ -225,9 +218,9 @@ int kgd_arcturus_hqd_sdma_dump(struct kgd_dev *kgd,
return 0;
}
-bool kgd_arcturus_hqd_sdma_is_occupied(struct kgd_dev *kgd, void *mqd)
+bool kgd_arcturus_hqd_sdma_is_occupied(struct amdgpu_device *adev,
+ void *mqd)
{
- struct amdgpu_device *adev = get_amdgpu_device(kgd);
struct v9_sdma_mqd *m;
uint32_t sdma_rlc_reg_offset;
uint32_t sdma_rlc_rb_cntl;
@@ -244,10 +237,9 @@ bool kgd_arcturus_hqd_sdma_is_occupied(struct kgd_dev *kgd, void *mqd)
return false;
}
-int kgd_arcturus_hqd_sdma_destroy(struct kgd_dev *kgd, void *mqd,
+int kgd_arcturus_hqd_sdma_destroy(struct amdgpu_device *adev, void *mqd,
unsigned int utimeout)
{
- struct amdgpu_device *adev = get_amdgpu_device(kgd);
struct v9_sdma_mqd *m;
uint32_t sdma_rlc_reg_offset;
uint32_t temp;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.h
index ce08131b7b5f..756c1a5679c0 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.h
@@ -20,11 +20,12 @@
* OTHER DEALINGS IN THE SOFTWARE.
*/
-int kgd_arcturus_hqd_sdma_load(struct kgd_dev *kgd, void *mqd,
+int kgd_arcturus_hqd_sdma_load(struct amdgpu_device *adev, void *mqd,
uint32_t __user *wptr, struct mm_struct *mm);
-int kgd_arcturus_hqd_sdma_dump(struct kgd_dev *kgd,
+int kgd_arcturus_hqd_sdma_dump(struct amdgpu_device *adev,
uint32_t engine_id, uint32_t queue_id,
uint32_t (**dump)[2], uint32_t *n_regs);
-bool kgd_arcturus_hqd_sdma_is_occupied(struct kgd_dev *kgd, void *mqd);
-int kgd_arcturus_hqd_sdma_destroy(struct kgd_dev *kgd, void *mqd,
+bool kgd_arcturus_hqd_sdma_is_occupied(struct amdgpu_device *adev,
+ void *mqd);
+int kgd_arcturus_hqd_sdma_destroy(struct amdgpu_device *adev, void *mqd,
unsigned int utimeout);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c
index 960acf68150a..7b7f4b2764c1 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c
@@ -39,37 +39,26 @@ enum hqd_dequeue_request_type {
SAVE_WAVES
};
-static inline struct amdgpu_device *get_amdgpu_device(struct kgd_dev *kgd)
-{
- return (struct amdgpu_device *)kgd;
-}
-
-static void lock_srbm(struct kgd_dev *kgd, uint32_t mec, uint32_t pipe,
+static void lock_srbm(struct amdgpu_device *adev, uint32_t mec, uint32_t pipe,
uint32_t queue, uint32_t vmid)
{
- struct amdgpu_device *adev = get_amdgpu_device(kgd);
-
mutex_lock(&adev->srbm_mutex);
nv_grbm_select(adev, mec, pipe, queue, vmid);
}
-static void unlock_srbm(struct kgd_dev *kgd)
+static void unlock_srbm(struct amdgpu_device *adev)
{
- struct amdgpu_device *adev = get_amdgpu_device(kgd);
-
nv_grbm_select(adev, 0, 0, 0, 0);
mutex_unlock(&adev->srbm_mutex);
}
-static void acquire_queue(struct kgd_dev *kgd, uint32_t pipe_id,
+static void acquire_queue(struct amdgpu_device *adev, uint32_t pipe_id,
uint32_t queue_id)
{
- struct amdgpu_device *adev = get_amdgpu_device(kgd);
-
uint32_t mec = (pipe_id / adev->gfx.mec.num_pipe_per_mec) + 1;
uint32_t pipe = (pipe_id % adev->gfx.mec.num_pipe_per_mec);
- lock_srbm(kgd, mec, pipe, queue_id, 0);
+ lock_srbm(adev, mec, pipe, queue_id, 0);
}
static uint64_t get_queue_mask(struct amdgpu_device *adev,
@@ -81,33 +70,29 @@ static uint64_t get_queue_mask(struct amdgpu_device *adev,
return 1ull << bit;
}
-static void release_queue(struct kgd_dev *kgd)
+static void release_queue(struct amdgpu_device *adev)
{
- unlock_srbm(kgd);
+ unlock_srbm(adev);
}
-static void kgd_program_sh_mem_settings(struct kgd_dev *kgd, uint32_t vmid,
+static void kgd_program_sh_mem_settings(struct amdgpu_device *adev, uint32_t vmid,
uint32_t sh_mem_config,
uint32_t sh_mem_ape1_base,
uint32_t sh_mem_ape1_limit,
uint32_t sh_mem_bases)
{
- struct amdgpu_device *adev = get_amdgpu_device(kgd);
-
- lock_srbm(kgd, 0, 0, 0, vmid);
+ lock_srbm(adev, 0, 0, 0, vmid);
WREG32_SOC15(GC, 0, mmSH_MEM_CONFIG, sh_mem_config);
WREG32_SOC15(GC, 0, mmSH_MEM_BASES, sh_mem_bases);
/* APE1 no longer exists on GFX9 */
- unlock_srbm(kgd);
+ unlock_srbm(adev);
}
-static int kgd_set_pasid_vmid_mapping(struct kgd_dev *kgd, u32 pasid,
+static int kgd_set_pasid_vmid_mapping(struct amdgpu_device *adev, u32 pasid,
unsigned int vmid)
{
- struct amdgpu_device *adev = get_amdgpu_device(kgd);
-
/*
* We have to assume that there is no outstanding mapping.
* The ATC_VMID_PASID_MAPPING_UPDATE_STATUS bit could be 0 because
@@ -150,22 +135,21 @@ static int kgd_set_pasid_vmid_mapping(struct kgd_dev *kgd, u32 pasid,
* but still works
*/
-static int kgd_init_interrupts(struct kgd_dev *kgd, uint32_t pipe_id)
+static int kgd_init_interrupts(struct amdgpu_device *adev, uint32_t pipe_id)
{
- struct amdgpu_device *adev = get_amdgpu_device(kgd);
uint32_t mec;
uint32_t pipe;
mec = (pipe_id / adev->gfx.mec.num_pipe_per_mec) + 1;
pipe = (pipe_id % adev->gfx.mec.num_pipe_per_mec);
- lock_srbm(kgd, mec, pipe, 0, 0);
+ lock_srbm(adev, mec, pipe, 0, 0);
WREG32_SOC15(GC, 0, mmCPC_INT_CNTL,
CP_INT_CNTL_RING0__TIME_STAMP_INT_ENABLE_MASK |
CP_INT_CNTL_RING0__OPCODE_ERROR_INT_ENABLE_MASK);
- unlock_srbm(kgd);
+ unlock_srbm(adev);
return 0;
}
@@ -218,12 +202,11 @@ static inline struct v10_sdma_mqd *get_sdma_mqd(void *mqd)
return (struct v10_sdma_mqd *)mqd;
}
-static int kgd_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id,
- uint32_t queue_id, uint32_t __user *wptr,
- uint32_t wptr_shift, uint32_t wptr_mask,
- struct mm_struct *mm)
+static int kgd_hqd_load(struct amdgpu_device *adev, void *mqd,
+ uint32_t pipe_id, uint32_t queue_id,
+ uint32_t __user *wptr, uint32_t wptr_shift,
+ uint32_t wptr_mask, struct mm_struct *mm)
{
- struct amdgpu_device *adev = get_amdgpu_device(kgd);
struct v10_compute_mqd *m;
uint32_t *mqd_hqd;
uint32_t reg, hqd_base, data;
@@ -231,7 +214,7 @@ static int kgd_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id,
m = get_mqd(mqd);
pr_debug("Load hqd of pipe %d queue %d\n", pipe_id, queue_id);
- acquire_queue(kgd, pipe_id, queue_id);
+ acquire_queue(adev, pipe_id, queue_id);
/* HQD registers extend from CP_MQD_BASE_ADDR to CP_HQD_EOP_WPTR_MEM. */
mqd_hqd = &m->cp_mqd_base_addr_lo;
@@ -296,16 +279,15 @@ static int kgd_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id,
data = REG_SET_FIELD(m->cp_hqd_active, CP_HQD_ACTIVE, ACTIVE, 1);
WREG32_SOC15(GC, 0, mmCP_HQD_ACTIVE, data);
- release_queue(kgd);
+ release_queue(adev);
return 0;
}
-static int kgd_hiq_mqd_load(struct kgd_dev *kgd, void *mqd,
+static int kgd_hiq_mqd_load(struct amdgpu_device *adev, void *mqd,
uint32_t pipe_id, uint32_t queue_id,
uint32_t doorbell_off)
{
- struct amdgpu_device *adev = get_amdgpu_device(kgd);
struct amdgpu_ring *kiq_ring = &adev->gfx.kiq.ring;
struct v10_compute_mqd *m;
uint32_t mec, pipe;
@@ -313,7 +295,7 @@ static int kgd_hiq_mqd_load(struct kgd_dev *kgd, void *mqd,
m = get_mqd(mqd);
- acquire_queue(kgd, pipe_id, queue_id);
+ acquire_queue(adev, pipe_id, queue_id);
mec = (pipe_id / adev->gfx.mec.num_pipe_per_mec) + 1;
pipe = (pipe_id % adev->gfx.mec.num_pipe_per_mec);
@@ -349,16 +331,15 @@ static int kgd_hiq_mqd_load(struct kgd_dev *kgd, void *mqd,
out_unlock:
spin_unlock(&adev->gfx.kiq.ring_lock);
- release_queue(kgd);
+ release_queue(adev);
return r;
}
-static int kgd_hqd_dump(struct kgd_dev *kgd,
+static int kgd_hqd_dump(struct amdgpu_device *adev,
uint32_t pipe_id, uint32_t queue_id,
uint32_t (**dump)[2], uint32_t *n_regs)
{
- struct amdgpu_device *adev = get_amdgpu_device(kgd);
uint32_t i = 0, reg;
#define HQD_N_REGS 56
#define DUMP_REG(addr) do { \
@@ -372,13 +353,13 @@ static int kgd_hqd_dump(struct kgd_dev *kgd,
if (*dump == NULL)
return -ENOMEM;
- acquire_queue(kgd, pipe_id, queue_id);
+ acquire_queue(adev, pipe_id, queue_id);
for (reg = SOC15_REG_OFFSET(GC, 0, mmCP_MQD_BASE_ADDR);
reg <= SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_WPTR_HI); reg++)
DUMP_REG(reg);
- release_queue(kgd);
+ release_queue(adev);
WARN_ON_ONCE(i != HQD_N_REGS);
*n_regs = i;
@@ -386,10 +367,9 @@ static int kgd_hqd_dump(struct kgd_dev *kgd,
return 0;
}
-static int kgd_hqd_sdma_load(struct kgd_dev *kgd, void *mqd,
+static int kgd_hqd_sdma_load(struct amdgpu_device *adev, void *mqd,
uint32_t __user *wptr, struct mm_struct *mm)
{
- struct amdgpu_device *adev = get_amdgpu_device(kgd);
struct v10_sdma_mqd *m;
uint32_t sdma_rlc_reg_offset;
unsigned long end_jiffies;
@@ -456,11 +436,10 @@ static int kgd_hqd_sdma_load(struct kgd_dev *kgd, void *mqd,
return 0;
}
-static int kgd_hqd_sdma_dump(struct kgd_dev *kgd,
+static int kgd_hqd_sdma_dump(struct amdgpu_device *adev,
uint32_t engine_id, uint32_t queue_id,
uint32_t (**dump)[2], uint32_t *n_regs)
{
- struct amdgpu_device *adev = get_amdgpu_device(kgd);
uint32_t sdma_rlc_reg_offset = get_sdma_rlc_reg_offset(adev,
engine_id, queue_id);
uint32_t i = 0, reg;
@@ -488,15 +467,15 @@ static int kgd_hqd_sdma_dump(struct kgd_dev *kgd,
return 0;
}
-static bool kgd_hqd_is_occupied(struct kgd_dev *kgd, uint64_t queue_address,
- uint32_t pipe_id, uint32_t queue_id)
+static bool kgd_hqd_is_occupied(struct amdgpu_device *adev,
+ uint64_t queue_address, uint32_t pipe_id,
+ uint32_t queue_id)
{
- struct amdgpu_device *adev = get_amdgpu_device(kgd);
uint32_t act;
bool retval = false;
uint32_t low, high;
- acquire_queue(kgd, pipe_id, queue_id);
+ acquire_queue(adev, pipe_id, queue_id);
act = RREG32_SOC15(GC, 0, mmCP_HQD_ACTIVE);
if (act) {
low = lower_32_bits(queue_address >> 8);
@@ -506,13 +485,12 @@ static bool kgd_hqd_is_occupied(struct kgd_dev *kgd, uint64_t queue_address,
high == RREG32_SOC15(GC, 0, mmCP_HQD_PQ_BASE_HI))
retval = true;
}
- release_queue(kgd);
+ release_queue(adev);
return retval;
}
-static bool kgd_hqd_sdma_is_occupied(struct kgd_dev *kgd, void *mqd)
+static bool kgd_hqd_sdma_is_occupied(struct amdgpu_device *adev, void *mqd)
{
- struct amdgpu_device *adev = get_amdgpu_device(kgd);
struct v10_sdma_mqd *m;
uint32_t sdma_rlc_reg_offset;
uint32_t sdma_rlc_rb_cntl;
@@ -529,12 +507,11 @@ static bool kgd_hqd_sdma_is_occupied(struct kgd_dev *kgd, void *mqd)
return false;
}
-static int kgd_hqd_destroy(struct kgd_dev *kgd, void *mqd,
+static int kgd_hqd_destroy(struct amdgpu_device *adev, void *mqd,
enum kfd_preempt_type reset_type,
unsigned int utimeout, uint32_t pipe_id,
uint32_t queue_id)
{
- struct amdgpu_device *adev = get_amdgpu_device(kgd);
enum hqd_dequeue_request_type type;
unsigned long end_jiffies;
uint32_t temp;
@@ -548,7 +525,7 @@ static int kgd_hqd_destroy(struct kgd_dev *kgd, void *mqd,
int retry;
#endif
- acquire_queue(kgd, pipe_id, queue_id);
+ acquire_queue(adev, pipe_id, queue_id);
if (m->cp_hqd_vmid == 0)
WREG32_FIELD15(GC, 0, RLC_CP_SCHEDULERS, scheduler1, 0);
@@ -633,20 +610,19 @@ loop:
break;
if (time_after(jiffies, end_jiffies)) {
pr_err("cp queue preemption time out.\n");
- release_queue(kgd);
+ release_queue(adev);
return -ETIME;
}
usleep_range(500, 1000);
}
- release_queue(kgd);
+ release_queue(adev);
return 0;
}
-static int kgd_hqd_sdma_destroy(struct kgd_dev *kgd, void *mqd,
+static int kgd_hqd_sdma_destroy(struct amdgpu_device *adev, void *mqd,
unsigned int utimeout)
{
- struct amdgpu_device *adev = get_amdgpu_device(kgd);
struct v10_sdma_mqd *m;
uint32_t sdma_rlc_reg_offset;
uint32_t temp;
@@ -683,11 +659,10 @@ static int kgd_hqd_sdma_destroy(struct kgd_dev *kgd, void *mqd,
return 0;
}
-static bool get_atc_vmid_pasid_mapping_info(struct kgd_dev *kgd,
+static bool get_atc_vmid_pasid_mapping_info(struct amdgpu_device *adev,
uint8_t vmid, uint16_t *p_pasid)
{
uint32_t value;
- struct amdgpu_device *adev = (struct amdgpu_device *) kgd;
value = RREG32(SOC15_REG_OFFSET(ATHUB, 0, mmATC_VMID0_PASID_MAPPING)
+ vmid);
@@ -696,12 +671,12 @@ static bool get_atc_vmid_pasid_mapping_info(struct kgd_dev *kgd,
return !!(value & ATC_VMID0_PASID_MAPPING__VALID_MASK);
}
-static int kgd_address_watch_disable(struct kgd_dev *kgd)
+static int kgd_address_watch_disable(struct amdgpu_device *adev)
{
return 0;
}
-static int kgd_address_watch_execute(struct kgd_dev *kgd,
+static int kgd_address_watch_execute(struct amdgpu_device *adev,
unsigned int watch_point_id,
uint32_t cntl_val,
uint32_t addr_hi,
@@ -710,11 +685,10 @@ static int kgd_address_watch_execute(struct kgd_dev *kgd,
return 0;
}
-static int kgd_wave_control_execute(struct kgd_dev *kgd,
+static int kgd_wave_control_execute(struct amdgpu_device *adev,
uint32_t gfx_index_val,
uint32_t sq_cmd)
{
- struct amdgpu_device *adev = get_amdgpu_device(kgd);
uint32_t data = 0;
mutex_lock(&adev->grbm_idx_mutex);
@@ -735,18 +709,16 @@ static int kgd_wave_control_execute(struct kgd_dev *kgd,
return 0;
}
-static uint32_t kgd_address_watch_get_offset(struct kgd_dev *kgd,
+static uint32_t kgd_address_watch_get_offset(struct amdgpu_device *adev,
unsigned int watch_point_id,
unsigned int reg_offset)
{
return 0;
}
-static void set_vm_context_page_table_base(struct kgd_dev *kgd, uint32_t vmid,
- uint64_t page_table_base)
+static void set_vm_context_page_table_base(struct amdgpu_device *adev,
+ uint32_t vmid, uint64_t page_table_base)
{
- struct amdgpu_device *adev = get_amdgpu_device(kgd);
-
if (!amdgpu_amdkfd_is_kfd_vmid(adev, vmid)) {
pr_err("trying to set page table base for wrong VMID %u\n",
vmid);
@@ -757,12 +729,10 @@ static void set_vm_context_page_table_base(struct kgd_dev *kgd, uint32_t vmid,
adev->gfxhub.funcs->setup_vm_pt_regs(adev, vmid, page_table_base);
}
-static void program_trap_handler_settings(struct kgd_dev *kgd,
+static void program_trap_handler_settings(struct amdgpu_device *adev,
uint32_t vmid, uint64_t tba_addr, uint64_t tma_addr)
{
- struct amdgpu_device *adev = get_amdgpu_device(kgd);
-
- lock_srbm(kgd, 0, 0, 0, vmid);
+ lock_srbm(adev, 0, 0, 0, vmid);
/*
* Program TBA registers
@@ -781,7 +751,7 @@ static void program_trap_handler_settings(struct kgd_dev *kgd,
WREG32(SOC15_REG_OFFSET(GC, 0, mmSQ_SHADER_TMA_HI),
upper_32_bits(tma_addr >> 8));
- unlock_srbm(kgd);
+ unlock_srbm(adev);
}
const struct kfd2kgd_calls gfx_v10_kfd2kgd = {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10_3.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10_3.c
index dac0d751d5af..1f37d3574001 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10_3.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10_3.c
@@ -38,37 +38,26 @@ enum hqd_dequeue_request_type {
SAVE_WAVES
};
-static inline struct amdgpu_device *get_amdgpu_device(struct kgd_dev *kgd)
-{
- return (struct amdgpu_device *)kgd;
-}
-
-static void lock_srbm(struct kgd_dev *kgd, uint32_t mec, uint32_t pipe,
+static void lock_srbm(struct amdgpu_device *adev, uint32_t mec, uint32_t pipe,
uint32_t queue, uint32_t vmid)
{
- struct amdgpu_device *adev = get_amdgpu_device(kgd);
-
mutex_lock(&adev->srbm_mutex);
nv_grbm_select(adev, mec, pipe, queue, vmid);
}
-static void unlock_srbm(struct kgd_dev *kgd)
+static void unlock_srbm(struct amdgpu_device *adev)
{
- struct amdgpu_device *adev = get_amdgpu_device(kgd);
-
nv_grbm_select(adev, 0, 0, 0, 0);
mutex_unlock(&adev->srbm_mutex);
}
-static void acquire_queue(struct kgd_dev *kgd, uint32_t pipe_id,
+static void acquire_queue(struct amdgpu_device *adev, uint32_t pipe_id,
uint32_t queue_id)
{
- struct amdgpu_device *adev = get_amdgpu_device(kgd);
-
uint32_t mec = (pipe_id / adev->gfx.mec.num_pipe_per_mec) + 1;
uint32_t pipe = (pipe_id % adev->gfx.mec.num_pipe_per_mec);
- lock_srbm(kgd, mec, pipe, queue_id, 0);
+ lock_srbm(adev, mec, pipe, queue_id, 0);
}
static uint64_t get_queue_mask(struct amdgpu_device *adev,
@@ -80,34 +69,30 @@ static uint64_t get_queue_mask(struct amdgpu_device *adev,
return 1ull << bit;
}
-static void release_queue(struct kgd_dev *kgd)
+static void release_queue(struct amdgpu_device *adev)
{
- unlock_srbm(kgd);
+ unlock_srbm(adev);
}
-static void program_sh_mem_settings_v10_3(struct kgd_dev *kgd, uint32_t vmid,
+static void program_sh_mem_settings_v10_3(struct amdgpu_device *adev, uint32_t vmid,
uint32_t sh_mem_config,
uint32_t sh_mem_ape1_base,
uint32_t sh_mem_ape1_limit,
uint32_t sh_mem_bases)
{
- struct amdgpu_device *adev = get_amdgpu_device(kgd);
-
- lock_srbm(kgd, 0, 0, 0, vmid);
+ lock_srbm(adev, 0, 0, 0, vmid);
WREG32_SOC15(GC, 0, mmSH_MEM_CONFIG, sh_mem_config);
WREG32_SOC15(GC, 0, mmSH_MEM_BASES, sh_mem_bases);
/* APE1 no longer exists on GFX9 */
- unlock_srbm(kgd);
+ unlock_srbm(adev);
}
/* ATC is defeatured on Sienna_Cichlid */
-static int set_pasid_vmid_mapping_v10_3(struct kgd_dev *kgd, unsigned int pasid,
+static int set_pasid_vmid_mapping_v10_3(struct amdgpu_device *adev, unsigned int pasid,
unsigned int vmid)
{
- struct amdgpu_device *adev = get_amdgpu_device(kgd);
-
uint32_t value = pasid << IH_VMID_0_LUT__PASID__SHIFT;
/* Mapping vmid to pasid also for IH block */
@@ -118,22 +103,21 @@ static int set_pasid_vmid_mapping_v10_3(struct kgd_dev *kgd, unsigned int pasid,
return 0;
}
-static int init_interrupts_v10_3(struct kgd_dev *kgd, uint32_t pipe_id)
+static int init_interrupts_v10_3(struct amdgpu_device *adev, uint32_t pipe_id)
{
- struct amdgpu_device *adev = get_amdgpu_device(kgd);
uint32_t mec;
uint32_t pipe;
mec = (pipe_id / adev->gfx.mec.num_pipe_per_mec) + 1;
pipe = (pipe_id % adev->gfx.mec.num_pipe_per_mec);
- lock_srbm(kgd, mec, pipe, 0, 0);
+ lock_srbm(adev, mec, pipe, 0, 0);
WREG32_SOC15(GC, 0, mmCPC_INT_CNTL,
CP_INT_CNTL_RING0__TIME_STAMP_INT_ENABLE_MASK |
CP_INT_CNTL_RING0__OPCODE_ERROR_INT_ENABLE_MASK);
- unlock_srbm(kgd);
+ unlock_srbm(adev);
return 0;
}
@@ -188,12 +172,11 @@ static inline struct v10_sdma_mqd *get_sdma_mqd(void *mqd)
return (struct v10_sdma_mqd *)mqd;
}
-static int hqd_load_v10_3(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id,
- uint32_t queue_id, uint32_t __user *wptr,
- uint32_t wptr_shift, uint32_t wptr_mask,
- struct mm_struct *mm)
+static int hqd_load_v10_3(struct amdgpu_device *adev, void *mqd,
+ uint32_t pipe_id, uint32_t queue_id,
+ uint32_t __user *wptr, uint32_t wptr_shift,
+ uint32_t wptr_mask, struct mm_struct *mm)
{
- struct amdgpu_device *adev = get_amdgpu_device(kgd);
struct v10_compute_mqd *m;
uint32_t *mqd_hqd;
uint32_t reg, hqd_base, data;
@@ -201,7 +184,7 @@ static int hqd_load_v10_3(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id,
m = get_mqd(mqd);
pr_debug("Load hqd of pipe %d queue %d\n", pipe_id, queue_id);
- acquire_queue(kgd, pipe_id, queue_id);
+ acquire_queue(adev, pipe_id, queue_id);
/* HIQ is set during driver init period with vmid set to 0*/
if (m->cp_hqd_vmid == 0) {
@@ -281,16 +264,15 @@ static int hqd_load_v10_3(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id,
data = REG_SET_FIELD(m->cp_hqd_active, CP_HQD_ACTIVE, ACTIVE, 1);
WREG32_SOC15(GC, 0, mmCP_HQD_ACTIVE, data);
- release_queue(kgd);
+ release_queue(adev);
return 0;
}
-static int hiq_mqd_load_v10_3(struct kgd_dev *kgd, void *mqd,
+static int hiq_mqd_load_v10_3(struct amdgpu_device *adev, void *mqd,
uint32_t pipe_id, uint32_t queue_id,
uint32_t doorbell_off)
{
- struct amdgpu_device *adev = get_amdgpu_device(kgd);
struct amdgpu_ring *kiq_ring = &adev->gfx.kiq.ring;
struct v10_compute_mqd *m;
uint32_t mec, pipe;
@@ -298,7 +280,7 @@ static int hiq_mqd_load_v10_3(struct kgd_dev *kgd, void *mqd,
m = get_mqd(mqd);
- acquire_queue(kgd, pipe_id, queue_id);
+ acquire_queue(adev, pipe_id, queue_id);
mec = (pipe_id / adev->gfx.mec.num_pipe_per_mec) + 1;
pipe = (pipe_id % adev->gfx.mec.num_pipe_per_mec);
@@ -334,16 +316,15 @@ static int hiq_mqd_load_v10_3(struct kgd_dev *kgd, void *mqd,
out_unlock:
spin_unlock(&adev->gfx.kiq.ring_lock);
- release_queue(kgd);
+ release_queue(adev);
return r;
}
-static int hqd_dump_v10_3(struct kgd_dev *kgd,
+static int hqd_dump_v10_3(struct amdgpu_device *adev,
uint32_t pipe_id, uint32_t queue_id,
uint32_t (**dump)[2], uint32_t *n_regs)
{
- struct amdgpu_device *adev = get_amdgpu_device(kgd);
uint32_t i = 0, reg;
#define HQD_N_REGS 56
#define DUMP_REG(addr) do { \
@@ -357,13 +338,13 @@ static int hqd_dump_v10_3(struct kgd_dev *kgd,
if (*dump == NULL)
return -ENOMEM;
- acquire_queue(kgd, pipe_id, queue_id);
+ acquire_queue(adev, pipe_id, queue_id);
for (reg = SOC15_REG_OFFSET(GC, 0, mmCP_MQD_BASE_ADDR);
reg <= SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_WPTR_HI); reg++)
DUMP_REG(reg);
- release_queue(kgd);
+ release_queue(adev);
WARN_ON_ONCE(i != HQD_N_REGS);
*n_regs = i;
@@ -371,10 +352,9 @@ static int hqd_dump_v10_3(struct kgd_dev *kgd,
return 0;
}
-static int hqd_sdma_load_v10_3(struct kgd_dev *kgd, void *mqd,
+static int hqd_sdma_load_v10_3(struct amdgpu_device *adev, void *mqd,
uint32_t __user *wptr, struct mm_struct *mm)
{
- struct amdgpu_device *adev = get_amdgpu_device(kgd);
struct v10_sdma_mqd *m;
uint32_t sdma_rlc_reg_offset;
unsigned long end_jiffies;
@@ -441,11 +421,10 @@ static int hqd_sdma_load_v10_3(struct kgd_dev *kgd, void *mqd,
return 0;
}
-static int hqd_sdma_dump_v10_3(struct kgd_dev *kgd,
+static int hqd_sdma_dump_v10_3(struct amdgpu_device *adev,
uint32_t engine_id, uint32_t queue_id,
uint32_t (**dump)[2], uint32_t *n_regs)
{
- struct amdgpu_device *adev = get_amdgpu_device(kgd);
uint32_t sdma_rlc_reg_offset = get_sdma_rlc_reg_offset(adev,
engine_id, queue_id);
uint32_t i = 0, reg;
@@ -473,15 +452,15 @@ static int hqd_sdma_dump_v10_3(struct kgd_dev *kgd,
return 0;
}
-static bool hqd_is_occupied_v10_3(struct kgd_dev *kgd, uint64_t queue_address,
- uint32_t pipe_id, uint32_t queue_id)
+static bool hqd_is_occupied_v10_3(struct amdgpu_device *adev,
+ uint64_t queue_address, uint32_t pipe_id,
+ uint32_t queue_id)
{
- struct amdgpu_device *adev = get_amdgpu_device(kgd);
uint32_t act;
bool retval = false;
uint32_t low, high;
- acquire_queue(kgd, pipe_id, queue_id);
+ acquire_queue(adev, pipe_id, queue_id);
act = RREG32_SOC15(GC, 0, mmCP_HQD_ACTIVE);
if (act) {
low = lower_32_bits(queue_address >> 8);
@@ -491,13 +470,13 @@ static bool hqd_is_occupied_v10_3(struct kgd_dev *kgd, uint64_t queue_address,
high == RREG32_SOC15(GC, 0, mmCP_HQD_PQ_BASE_HI))
retval = true;
}
- release_queue(kgd);
+ release_queue(adev);
return retval;
}
-static bool hqd_sdma_is_occupied_v10_3(struct kgd_dev *kgd, void *mqd)
+static bool hqd_sdma_is_occupied_v10_3(struct amdgpu_device *adev,
+ void *mqd)
{
- struct amdgpu_device *adev = get_amdgpu_device(kgd);
struct v10_sdma_mqd *m;
uint32_t sdma_rlc_reg_offset;
uint32_t sdma_rlc_rb_cntl;
@@ -514,18 +493,17 @@ static bool hqd_sdma_is_occupied_v10_3(struct kgd_dev *kgd, void *mqd)
return false;
}
-static int hqd_destroy_v10_3(struct kgd_dev *kgd, void *mqd,
+static int hqd_destroy_v10_3(struct amdgpu_device *adev, void *mqd,
enum kfd_preempt_type reset_type,
unsigned int utimeout, uint32_t pipe_id,
uint32_t queue_id)
{
- struct amdgpu_device *adev = get_amdgpu_device(kgd);
enum hqd_dequeue_request_type type;
unsigned long end_jiffies;
uint32_t temp;
struct v10_compute_mqd *m = get_mqd(mqd);
- acquire_queue(kgd, pipe_id, queue_id);
+ acquire_queue(adev, pipe_id, queue_id);
if (m->cp_hqd_vmid == 0)
WREG32_FIELD15(GC, 0, RLC_CP_SCHEDULERS, scheduler1, 0);
@@ -555,20 +533,19 @@ static int hqd_destroy_v10_3(struct kgd_dev *kgd, void *mqd,
if (time_after(jiffies, end_jiffies)) {
pr_err("cp queue pipe %d queue %d preemption failed\n",
pipe_id, queue_id);
- release_queue(kgd);
+ release_queue(adev);
return -ETIME;
}
usleep_range(500, 1000);
}
- release_queue(kgd);
+ release_queue(adev);
return 0;
}
-static int hqd_sdma_destroy_v10_3(struct kgd_dev *kgd, void *mqd,
+static int hqd_sdma_destroy_v10_3(struct amdgpu_device *adev, void *mqd,
unsigned int utimeout)
{
- struct amdgpu_device *adev = get_amdgpu_device(kgd);
struct v10_sdma_mqd *m;
uint32_t sdma_rlc_reg_offset;
uint32_t temp;
@@ -606,12 +583,12 @@ static int hqd_sdma_destroy_v10_3(struct kgd_dev *kgd, void *mqd,
}
-static int address_watch_disable_v10_3(struct kgd_dev *kgd)
+static int address_watch_disable_v10_3(struct amdgpu_device *adev)
{
return 0;
}
-static int address_watch_execute_v10_3(struct kgd_dev *kgd,
+static int address_watch_execute_v10_3(struct amdgpu_device *adev,
unsigned int watch_point_id,
uint32_t cntl_val,
uint32_t addr_hi,
@@ -620,11 +597,10 @@ static int address_watch_execute_v10_3(struct kgd_dev *kgd,
return 0;
}
-static int wave_control_execute_v10_3(struct kgd_dev *kgd,
+static int wave_control_execute_v10_3(struct amdgpu_device *adev,
uint32_t gfx_index_val,
uint32_t sq_cmd)
{
- struct amdgpu_device *adev = get_amdgpu_device(kgd);
uint32_t data = 0;
mutex_lock(&adev->grbm_idx_mutex);
@@ -645,28 +621,24 @@ static int wave_control_execute_v10_3(struct kgd_dev *kgd,
return 0;
}
-static uint32_t address_watch_get_offset_v10_3(struct kgd_dev *kgd,
+static uint32_t address_watch_get_offset_v10_3(struct amdgpu_device *adev,
unsigned int watch_point_id,
unsigned int reg_offset)
{
return 0;
}
-static void set_vm_context_page_table_base_v10_3(struct kgd_dev *kgd, uint32_t vmid,
- uint64_t page_table_base)
+static void set_vm_context_page_table_base_v10_3(struct amdgpu_device *adev,
+ uint32_t vmid, uint64_t page_table_base)
{
- struct amdgpu_device *adev = get_amdgpu_device(kgd);
-
/* SDMA is on gfxhub as well for Navi1* series */
adev->gfxhub.funcs->setup_vm_pt_regs(adev, vmid, page_table_base);
}
-static void program_trap_handler_settings_v10_3(struct kgd_dev *kgd,
+static void program_trap_handler_settings_v10_3(struct amdgpu_device *adev,
uint32_t vmid, uint64_t tba_addr, uint64_t tma_addr)
{
- struct amdgpu_device *adev = get_amdgpu_device(kgd);
-
- lock_srbm(kgd, 0, 0, 0, vmid);
+ lock_srbm(adev, 0, 0, 0, vmid);
/*
* Program TBA registers
@@ -685,15 +657,14 @@ static void program_trap_handler_settings_v10_3(struct kgd_dev *kgd,
WREG32(SOC15_REG_OFFSET(GC, 0, mmSQ_SHADER_TMA_HI),
upper_32_bits(tma_addr >> 8));
- unlock_srbm(kgd);
+ unlock_srbm(adev);
}
#if 0
-uint32_t enable_debug_trap_v10_3(struct kgd_dev *kgd,
+uint32_t enable_debug_trap_v10_3(struct amdgpu_device *adev,
uint32_t trap_debug_wave_launch_mode,
uint32_t vmid)
{
- struct amdgpu_device *adev = get_amdgpu_device(kgd);
uint32_t data = 0;
uint32_t orig_wave_cntl_value;
uint32_t orig_stall_vmid;
@@ -720,10 +691,8 @@ uint32_t enable_debug_trap_v10_3(struct kgd_dev *kgd,
return 0;
}
-uint32_t disable_debug_trap_v10_3(struct kgd_dev *kgd)
+uint32_t disable_debug_trap_v10_3(struct amdgpu_device *adev)
{
- struct amdgpu_device *adev = get_amdgpu_device(kgd);
-
mutex_lock(&adev->grbm_idx_mutex);
WREG32(SOC15_REG_OFFSET(GC, 0, mmSPI_GDBG_TRAP_MASK), 0);
@@ -733,11 +702,10 @@ uint32_t disable_debug_trap_v10_3(struct kgd_dev *kgd)
return 0;
}
-uint32_t set_wave_launch_trap_override_v10_3(struct kgd_dev *kgd,
+uint32_t set_wave_launch_trap_override_v10_3(struct amdgpu_device *adev,
uint32_t trap_override,
uint32_t trap_mask)
{
- struct amdgpu_device *adev = get_amdgpu_device(kgd);
uint32_t data = 0;
mutex_lock(&adev->grbm_idx_mutex);
@@ -762,11 +730,10 @@ uint32_t set_wave_launch_trap_override_v10_3(struct kgd_dev *kgd,
return 0;
}
-uint32_t set_wave_launch_mode_v10_3(struct kgd_dev *kgd,
+uint32_t set_wave_launch_mode_v10_3(struct amdgpu_device *adev,
uint8_t wave_launch_mode,
uint32_t vmid)
{
- struct amdgpu_device *adev = get_amdgpu_device(kgd);
uint32_t data = 0;
bool is_stall_mode;
bool is_mode_set;
@@ -805,16 +772,14 @@ uint32_t set_wave_launch_mode_v10_3(struct kgd_dev *kgd,
* sem_rearm_wait_time -- Wait Count for Semaphore re-arm.
* deq_retry_wait_time -- Wait Count for Global Wave Syncs.
*/
-void get_iq_wait_times_v10_3(struct kgd_dev *kgd,
+void get_iq_wait_times_v10_3(struct amdgpu_device *adev,
uint32_t *wait_times)
{
- struct amdgpu_device *adev = get_amdgpu_device(kgd);
-
*wait_times = RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_IQ_WAIT_TIME2));
}
-void build_grace_period_packet_info_v10_3(struct kgd_dev *kgd,
+void build_grace_period_packet_info_v10_3(struct amdgpu_device *adev,
uint32_t wait_times,
uint32_t grace_period,
uint32_t *reg_offset,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c
index b91d27e39bad..36528dad7684 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c
@@ -82,68 +82,54 @@ union TCP_WATCH_CNTL_BITS {
float f32All;
};
-static inline struct amdgpu_device *get_amdgpu_device(struct kgd_dev *kgd)
-{
- return (struct amdgpu_device *)kgd;
-}
-
-static void lock_srbm(struct kgd_dev *kgd, uint32_t mec, uint32_t pipe,
+static void lock_srbm(struct amdgpu_device *adev, uint32_t mec, uint32_t pipe,
uint32_t queue, uint32_t vmid)
{
- struct amdgpu_device *adev = get_amdgpu_device(kgd);
uint32_t value = PIPEID(pipe) | MEID(mec) | VMID(vmid) | QUEUEID(queue);
mutex_lock(&adev->srbm_mutex);
WREG32(mmSRBM_GFX_CNTL, value);
}
-static void unlock_srbm(struct kgd_dev *kgd)
+static void unlock_srbm(struct amdgpu_device *adev)
{
- struct amdgpu_device *adev = get_amdgpu_device(kgd);
-
WREG32(mmSRBM_GFX_CNTL, 0);
mutex_unlock(&adev->srbm_mutex);
}
-static void acquire_queue(struct kgd_dev *kgd, uint32_t pipe_id,
+static void acquire_queue(struct amdgpu_device *adev, uint32_t pipe_id,
uint32_t queue_id)
{
- struct amdgpu_device *adev = get_amdgpu_device(kgd);
-
uint32_t mec = (pipe_id / adev->gfx.mec.num_pipe_per_mec) + 1;
uint32_t pipe = (pipe_id % adev->gfx.mec.num_pipe_per_mec);
- lock_srbm(kgd, mec, pipe, queue_id, 0);
+ lock_srbm(adev, mec, pipe, queue_id, 0);
}
-static void release_queue(struct kgd_dev *kgd)
+static void release_queue(struct amdgpu_device *adev)
{
- unlock_srbm(kgd);
+ unlock_srbm(adev);
}
-static void kgd_program_sh_mem_settings(struct kgd_dev *kgd, uint32_t vmid,
+static void kgd_program_sh_mem_settings(struct amdgpu_device *adev, uint32_t vmid,
uint32_t sh_mem_config,
uint32_t sh_mem_ape1_base,
uint32_t sh_mem_ape1_limit,
uint32_t sh_mem_bases)
{
- struct amdgpu_device *adev = get_amdgpu_device(kgd);
-
- lock_srbm(kgd, 0, 0, 0, vmid);
+ lock_srbm(adev, 0, 0, 0, vmid);
WREG32(mmSH_MEM_CONFIG, sh_mem_config);
WREG32(mmSH_MEM_APE1_BASE, sh_mem_ape1_base);
WREG32(mmSH_MEM_APE1_LIMIT, sh_mem_ape1_limit);
WREG32(mmSH_MEM_BASES, sh_mem_bases);
- unlock_srbm(kgd);
+ unlock_srbm(adev);
}
-static int kgd_set_pasid_vmid_mapping(struct kgd_dev *kgd, u32 pasid,
+static int kgd_set_pasid_vmid_mapping(struct amdgpu_device *adev, u32 pasid,
unsigned int vmid)
{
- struct amdgpu_device *adev = get_amdgpu_device(kgd);
-
/*
* We have to assume that there is no outstanding mapping.
* The ATC_VMID_PASID_MAPPING_UPDATE_STATUS bit could be 0 because
@@ -165,21 +151,20 @@ static int kgd_set_pasid_vmid_mapping(struct kgd_dev *kgd, u32 pasid,
return 0;
}
-static int kgd_init_interrupts(struct kgd_dev *kgd, uint32_t pipe_id)
+static int kgd_init_interrupts(struct amdgpu_device *adev, uint32_t pipe_id)
{
- struct amdgpu_device *adev = get_amdgpu_device(kgd);
uint32_t mec;
uint32_t pipe;
mec = (pipe_id / adev->gfx.mec.num_pipe_per_mec) + 1;
pipe = (pipe_id % adev->gfx.mec.num_pipe_per_mec);
- lock_srbm(kgd, mec, pipe, 0, 0);
+ lock_srbm(adev, mec, pipe, 0, 0);
WREG32(mmCPC_INT_CNTL, CP_INT_CNTL_RING0__TIME_STAMP_INT_ENABLE_MASK |
CP_INT_CNTL_RING0__OPCODE_ERROR_INT_ENABLE_MASK);
- unlock_srbm(kgd);
+ unlock_srbm(adev);
return 0;
}
@@ -207,12 +192,11 @@ static inline struct cik_sdma_rlc_registers *get_sdma_mqd(void *mqd)
return (struct cik_sdma_rlc_registers *)mqd;
}
-static int kgd_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id,
- uint32_t queue_id, uint32_t __user *wptr,
- uint32_t wptr_shift, uint32_t wptr_mask,
- struct mm_struct *mm)
+static int kgd_hqd_load(struct amdgpu_device *adev, void *mqd,
+ uint32_t pipe_id, uint32_t queue_id,
+ uint32_t __user *wptr, uint32_t wptr_shift,
+ uint32_t wptr_mask, struct mm_struct *mm)
{
- struct amdgpu_device *adev = get_amdgpu_device(kgd);
struct cik_mqd *m;
uint32_t *mqd_hqd;
uint32_t reg, wptr_val, data;
@@ -220,7 +204,7 @@ static int kgd_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id,
m = get_mqd(mqd);
- acquire_queue(kgd, pipe_id, queue_id);
+ acquire_queue(adev, pipe_id, queue_id);
/* HQD registers extend from CP_MQD_BASE_ADDR to CP_MQD_CONTROL. */
mqd_hqd = &m->cp_mqd_base_addr_lo;
@@ -239,25 +223,24 @@ static int kgd_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id,
* release srbm_mutex to avoid circular dependency between
* srbm_mutex->mm_sem->reservation_ww_class_mutex->srbm_mutex.
*/
- release_queue(kgd);
+ release_queue(adev);
valid_wptr = read_user_wptr(mm, wptr, wptr_val);
- acquire_queue(kgd, pipe_id, queue_id);
+ acquire_queue(adev, pipe_id, queue_id);
if (valid_wptr)
WREG32(mmCP_HQD_PQ_WPTR, (wptr_val << wptr_shift) & wptr_mask);
data = REG_SET_FIELD(m->cp_hqd_active, CP_HQD_ACTIVE, ACTIVE, 1);
WREG32(mmCP_HQD_ACTIVE, data);
- release_queue(kgd);
+ release_queue(adev);
return 0;
}
-static int kgd_hqd_dump(struct kgd_dev *kgd,
+static int kgd_hqd_dump(struct amdgpu_device *adev,
uint32_t pipe_id, uint32_t queue_id,
uint32_t (**dump)[2], uint32_t *n_regs)
{
- struct amdgpu_device *adev = get_amdgpu_device(kgd);
uint32_t i = 0, reg;
#define HQD_N_REGS (35+4)
#define DUMP_REG(addr) do { \
@@ -271,7 +254,7 @@ static int kgd_hqd_dump(struct kgd_dev *kgd,
if (*dump == NULL)
return -ENOMEM;
- acquire_queue(kgd, pipe_id, queue_id);
+ acquire_queue(adev, pipe_id, queue_id);
DUMP_REG(mmCOMPUTE_STATIC_THREAD_MGMT_SE0);
DUMP_REG(mmCOMPUTE_STATIC_THREAD_MGMT_SE1);
@@ -281,7 +264,7 @@ static int kgd_hqd_dump(struct kgd_dev *kgd,
for (reg = mmCP_MQD_BASE_ADDR; reg <= mmCP_MQD_CONTROL; reg++)
DUMP_REG(reg);
- release_queue(kgd);
+ release_queue(adev);
WARN_ON_ONCE(i != HQD_N_REGS);
*n_regs = i;
@@ -289,10 +272,9 @@ static int kgd_hqd_dump(struct kgd_dev *kgd,
return 0;
}
-static int kgd_hqd_sdma_load(struct kgd_dev *kgd, void *mqd,
+static int kgd_hqd_sdma_load(struct amdgpu_device *adev, void *mqd,
uint32_t __user *wptr, struct mm_struct *mm)
{
- struct amdgpu_device *adev = get_amdgpu_device(kgd);
struct cik_sdma_rlc_registers *m;
unsigned long end_jiffies;
uint32_t sdma_rlc_reg_offset;
@@ -345,11 +327,10 @@ static int kgd_hqd_sdma_load(struct kgd_dev *kgd, void *mqd,
return 0;
}
-static int kgd_hqd_sdma_dump(struct kgd_dev *kgd,
+static int kgd_hqd_sdma_dump(struct amdgpu_device *adev,
uint32_t engine_id, uint32_t queue_id,
uint32_t (**dump)[2], uint32_t *n_regs)
{
- struct amdgpu_device *adev = get_amdgpu_device(kgd);
uint32_t sdma_offset = engine_id * SDMA1_REGISTER_OFFSET +
queue_id * KFD_CIK_SDMA_QUEUE_OFFSET;
uint32_t i = 0, reg;
@@ -372,15 +353,15 @@ static int kgd_hqd_sdma_dump(struct kgd_dev *kgd,
return 0;
}
-static bool kgd_hqd_is_occupied(struct kgd_dev *kgd, uint64_t queue_address,
- uint32_t pipe_id, uint32_t queue_id)
+static bool kgd_hqd_is_occupied(struct amdgpu_device *adev,
+ uint64_t queue_address, uint32_t pipe_id,
+ uint32_t queue_id)
{
- struct amdgpu_device *adev = get_amdgpu_device(kgd);
uint32_t act;
bool retval = false;
uint32_t low, high;
- acquire_queue(kgd, pipe_id, queue_id);
+ acquire_queue(adev, pipe_id, queue_id);
act = RREG32(mmCP_HQD_ACTIVE);
if (act) {
low = lower_32_bits(queue_address >> 8);
@@ -390,13 +371,12 @@ static bool kgd_hqd_is_occupied(struct kgd_dev *kgd, uint64_t queue_address,
high == RREG32(mmCP_HQD_PQ_BASE_HI))
retval = true;
}
- release_queue(kgd);
+ release_queue(adev);
return retval;
}
-static bool kgd_hqd_sdma_is_occupied(struct kgd_dev *kgd, void *mqd)
+static bool kgd_hqd_sdma_is_occupied(struct amdgpu_device *adev, void *mqd)
{
- struct amdgpu_device *adev = get_amdgpu_device(kgd);
struct cik_sdma_rlc_registers *m;
uint32_t sdma_rlc_reg_offset;
uint32_t sdma_rlc_rb_cntl;
@@ -412,12 +392,11 @@ static bool kgd_hqd_sdma_is_occupied(struct kgd_dev *kgd, void *mqd)
return false;
}
-static int kgd_hqd_destroy(struct kgd_dev *kgd, void *mqd,
+static int kgd_hqd_destroy(struct amdgpu_device *adev, void *mqd,
enum kfd_preempt_type reset_type,
unsigned int utimeout, uint32_t pipe_id,
uint32_t queue_id)
{
- struct amdgpu_device *adev = get_amdgpu_device(kgd);
uint32_t temp;
enum hqd_dequeue_request_type type;
unsigned long flags, end_jiffies;
@@ -426,7 +405,7 @@ static int kgd_hqd_destroy(struct kgd_dev *kgd, void *mqd,
if (amdgpu_in_reset(adev))
return -EIO;
- acquire_queue(kgd, pipe_id, queue_id);
+ acquire_queue(adev, pipe_id, queue_id);
WREG32(mmCP_HQD_PQ_DOORBELL_CONTROL, 0);
switch (reset_type) {
@@ -504,20 +483,19 @@ loop:
break;
if (time_after(jiffies, end_jiffies)) {
pr_err("cp queue preemption time out\n");
- release_queue(kgd);
+ release_queue(adev);
return -ETIME;
}
usleep_range(500, 1000);
}
- release_queue(kgd);
+ release_queue(adev);
return 0;
}
-static int kgd_hqd_sdma_destroy(struct kgd_dev *kgd, void *mqd,
+static int kgd_hqd_sdma_destroy(struct amdgpu_device *adev, void *mqd,
unsigned int utimeout)
{
- struct amdgpu_device *adev = get_amdgpu_device(kgd);
struct cik_sdma_rlc_registers *m;
uint32_t sdma_rlc_reg_offset;
uint32_t temp;
@@ -551,9 +529,8 @@ static int kgd_hqd_sdma_destroy(struct kgd_dev *kgd, void *mqd,
return 0;
}
-static int kgd_address_watch_disable(struct kgd_dev *kgd)
+static int kgd_address_watch_disable(struct amdgpu_device *adev)
{
- struct amdgpu_device *adev = get_amdgpu_device(kgd);
union TCP_WATCH_CNTL_BITS cntl;
unsigned int i;
@@ -571,13 +548,12 @@ static int kgd_address_watch_disable(struct kgd_dev *kgd)
return 0;
}
-static int kgd_address_watch_execute(struct kgd_dev *kgd,
+static int kgd_address_watch_execute(struct amdgpu_device *adev,
unsigned int watch_point_id,
uint32_t cntl_val,
uint32_t addr_hi,
uint32_t addr_lo)
{
- struct amdgpu_device *adev = get_amdgpu_device(kgd);
union TCP_WATCH_CNTL_BITS cntl;
cntl.u32All = cntl_val;
@@ -602,11 +578,10 @@ static int kgd_address_watch_execute(struct kgd_dev *kgd,
return 0;
}
-static int kgd_wave_control_execute(struct kgd_dev *kgd,
+static int kgd_wave_control_execute(struct amdgpu_device *adev,
uint32_t gfx_index_val,
uint32_t sq_cmd)
{
- struct amdgpu_device *adev = get_amdgpu_device(kgd);
uint32_t data;
mutex_lock(&adev->grbm_idx_mutex);
@@ -627,18 +602,17 @@ static int kgd_wave_control_execute(struct kgd_dev *kgd,
return 0;
}
-static uint32_t kgd_address_watch_get_offset(struct kgd_dev *kgd,
+static uint32_t kgd_address_watch_get_offset(struct amdgpu_device *adev,
unsigned int watch_point_id,
unsigned int reg_offset)
{
return watchRegs[watch_point_id * ADDRESS_WATCH_REG_MAX + reg_offset];
}
-static bool get_atc_vmid_pasid_mapping_info(struct kgd_dev *kgd,
+static bool get_atc_vmid_pasid_mapping_info(struct amdgpu_device *adev,
uint8_t vmid, uint16_t *p_pasid)
{
uint32_t value;
- struct amdgpu_device *adev = (struct amdgpu_device *) kgd;
value = RREG32(mmATC_VMID0_PASID_MAPPING + vmid);
*p_pasid = value & ATC_VMID0_PASID_MAPPING__PASID_MASK;
@@ -646,21 +620,17 @@ static bool get_atc_vmid_pasid_mapping_info(struct kgd_dev *kgd,
return !!(value & ATC_VMID0_PASID_MAPPING__VALID_MASK);
}
-static void set_scratch_backing_va(struct kgd_dev *kgd,
+static void set_scratch_backing_va(struct amdgpu_device *adev,
uint64_t va, uint32_t vmid)
{
- struct amdgpu_device *adev = (struct amdgpu_device *) kgd;
-
- lock_srbm(kgd, 0, 0, 0, vmid);
+ lock_srbm(adev, 0, 0, 0, vmid);
WREG32(mmSH_HIDDEN_PRIVATE_BASE_VMID, va);
- unlock_srbm(kgd);
+ unlock_srbm(adev);
}
-static void set_vm_context_page_table_base(struct kgd_dev *kgd, uint32_t vmid,
- uint64_t page_table_base)
+static void set_vm_context_page_table_base(struct amdgpu_device *adev,
+ uint32_t vmid, uint64_t page_table_base)
{
- struct amdgpu_device *adev = get_amdgpu_device(kgd);
-
if (!amdgpu_amdkfd_is_kfd_vmid(adev, vmid)) {
pr_err("trying to set page table base for wrong VMID\n");
return;
@@ -676,10 +646,8 @@ static void set_vm_context_page_table_base(struct kgd_dev *kgd, uint32_t vmid,
* @vmid: vmid pointer
* read vmid from register (CIK).
*/
-static uint32_t read_vmid_from_vmfault_reg(struct kgd_dev *kgd)
+static uint32_t read_vmid_from_vmfault_reg(struct amdgpu_device *adev)
{
- struct amdgpu_device *adev = get_amdgpu_device(kgd);
-
uint32_t status = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_STATUS);
return REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS, VMID);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c
index 5ce0ce704a21..52832cd69a93 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c
@@ -39,68 +39,54 @@ enum hqd_dequeue_request_type {
RESET_WAVES
};
-static inline struct amdgpu_device *get_amdgpu_device(struct kgd_dev *kgd)
-{
- return (struct amdgpu_device *)kgd;
-}
-
-static void lock_srbm(struct kgd_dev *kgd, uint32_t mec, uint32_t pipe,
+static void lock_srbm(struct amdgpu_device *adev, uint32_t mec, uint32_t pipe,
uint32_t queue, uint32_t vmid)
{
- struct amdgpu_device *adev = get_amdgpu_device(kgd);
uint32_t value = PIPEID(pipe) | MEID(mec) | VMID(vmid) | QUEUEID(queue);
mutex_lock(&adev->srbm_mutex);
WREG32(mmSRBM_GFX_CNTL, value);
}
-static void unlock_srbm(struct kgd_dev *kgd)
+static void unlock_srbm(struct amdgpu_device *adev)
{
- struct amdgpu_device *adev = get_amdgpu_device(kgd);
-
WREG32(mmSRBM_GFX_CNTL, 0);
mutex_unlock(&adev->srbm_mutex);
}
-static void acquire_queue(struct kgd_dev *kgd, uint32_t pipe_id,
+static void acquire_queue(struct amdgpu_device *adev, uint32_t pipe_id,
uint32_t queue_id)
{
- struct amdgpu_device *adev = get_amdgpu_device(kgd);
-
uint32_t mec = (pipe_id / adev->gfx.mec.num_pipe_per_mec) + 1;
uint32_t pipe = (pipe_id % adev->gfx.mec.num_pipe_per_mec);
- lock_srbm(kgd, mec, pipe, queue_id, 0);
+ lock_srbm(adev, mec, pipe, queue_id, 0);
}
-static void release_queue(struct kgd_dev *kgd)
+static void release_queue(struct amdgpu_device *adev)
{
- unlock_srbm(kgd);
+ unlock_srbm(adev);
}
-static void kgd_program_sh_mem_settings(struct kgd_dev *kgd, uint32_t vmid,
+static void kgd_program_sh_mem_settings(struct amdgpu_device *adev, uint32_t vmid,
uint32_t sh_mem_config,
uint32_t sh_mem_ape1_base,
uint32_t sh_mem_ape1_limit,
uint32_t sh_mem_bases)
{
- struct amdgpu_device *adev = get_amdgpu_device(kgd);
-
- lock_srbm(kgd, 0, 0, 0, vmid);
+ lock_srbm(adev, 0, 0, 0, vmid);
WREG32(mmSH_MEM_CONFIG, sh_mem_config);
WREG32(mmSH_MEM_APE1_BASE, sh_mem_ape1_base);
WREG32(mmSH_MEM_APE1_LIMIT, sh_mem_ape1_limit);
WREG32(mmSH_MEM_BASES, sh_mem_bases);
- unlock_srbm(kgd);
+ unlock_srbm(adev);
}
-static int kgd_set_pasid_vmid_mapping(struct kgd_dev *kgd, u32 pasid,
+static int kgd_set_pasid_vmid_mapping(struct amdgpu_device *adev, u32 pasid,
unsigned int vmid)
{
- struct amdgpu_device *adev = get_amdgpu_device(kgd);
-
/*
* We have to assume that there is no outstanding mapping.
* The ATC_VMID_PASID_MAPPING_UPDATE_STATUS bit could be 0 because
@@ -123,21 +109,20 @@ static int kgd_set_pasid_vmid_mapping(struct kgd_dev *kgd, u32 pasid,
return 0;
}
-static int kgd_init_interrupts(struct kgd_dev *kgd, uint32_t pipe_id)
+static int kgd_init_interrupts(struct amdgpu_device *adev, uint32_t pipe_id)
{
- struct amdgpu_device *adev = get_amdgpu_device(kgd);
uint32_t mec;
uint32_t pipe;
mec = (pipe_id / adev->gfx.mec.num_pipe_per_mec) + 1;
pipe = (pipe_id % adev->gfx.mec.num_pipe_per_mec);
- lock_srbm(kgd, mec, pipe, 0, 0);
+ lock_srbm(adev, mec, pipe, 0, 0);
WREG32(mmCPC_INT_CNTL, CP_INT_CNTL_RING0__TIME_STAMP_INT_ENABLE_MASK |
CP_INT_CNTL_RING0__OPCODE_ERROR_INT_ENABLE_MASK);
- unlock_srbm(kgd);
+ unlock_srbm(adev);
return 0;
}
@@ -165,12 +150,11 @@ static inline struct vi_sdma_mqd *get_sdma_mqd(void *mqd)
return (struct vi_sdma_mqd *)mqd;
}
-static int kgd_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id,
- uint32_t queue_id, uint32_t __user *wptr,
- uint32_t wptr_shift, uint32_t wptr_mask,
- struct mm_struct *mm)
+static int kgd_hqd_load(struct amdgpu_device *adev, void *mqd,
+ uint32_t pipe_id, uint32_t queue_id,
+ uint32_t __user *wptr, uint32_t wptr_shift,
+ uint32_t wptr_mask, struct mm_struct *mm)
{
- struct amdgpu_device *adev = get_amdgpu_device(kgd);
struct vi_mqd *m;
uint32_t *mqd_hqd;
uint32_t reg, wptr_val, data;
@@ -178,7 +162,7 @@ static int kgd_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id,
m = get_mqd(mqd);
- acquire_queue(kgd, pipe_id, queue_id);
+ acquire_queue(adev, pipe_id, queue_id);
/* HIQ is set during driver init period with vmid set to 0*/
if (m->cp_hqd_vmid == 0) {
@@ -206,7 +190,7 @@ static int kgd_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id,
* on ASICs that do not support context-save.
* EOP writes/reads can start anywhere in the ring.
*/
- if (get_amdgpu_device(kgd)->asic_type != CHIP_TONGA) {
+ if (adev->asic_type != CHIP_TONGA) {
WREG32(mmCP_HQD_EOP_RPTR, m->cp_hqd_eop_rptr);
WREG32(mmCP_HQD_EOP_WPTR, m->cp_hqd_eop_wptr);
WREG32(mmCP_HQD_EOP_WPTR_MEM, m->cp_hqd_eop_wptr_mem);
@@ -226,25 +210,24 @@ static int kgd_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id,
* release srbm_mutex to avoid circular dependency between
* srbm_mutex->mm_sem->reservation_ww_class_mutex->srbm_mutex.
*/
- release_queue(kgd);
+ release_queue(adev);
valid_wptr = read_user_wptr(mm, wptr, wptr_val);
- acquire_queue(kgd, pipe_id, queue_id);
+ acquire_queue(adev, pipe_id, queue_id);
if (valid_wptr)
WREG32(mmCP_HQD_PQ_WPTR, (wptr_val << wptr_shift) & wptr_mask);
data = REG_SET_FIELD(m->cp_hqd_active, CP_HQD_ACTIVE, ACTIVE, 1);
WREG32(mmCP_HQD_ACTIVE, data);
- release_queue(kgd);
+ release_queue(adev);
return 0;
}
-static int kgd_hqd_dump(struct kgd_dev *kgd,
+static int kgd_hqd_dump(struct amdgpu_device *adev,
uint32_t pipe_id, uint32_t queue_id,
uint32_t (**dump)[2], uint32_t *n_regs)
{
- struct amdgpu_device *adev = get_amdgpu_device(kgd);
uint32_t i = 0, reg;
#define HQD_N_REGS (54+4)
#define DUMP_REG(addr) do { \
@@ -258,7 +241,7 @@ static int kgd_hqd_dump(struct kgd_dev *kgd,
if (*dump == NULL)
return -ENOMEM;
- acquire_queue(kgd, pipe_id, queue_id);
+ acquire_queue(adev, pipe_id, queue_id);
DUMP_REG(mmCOMPUTE_STATIC_THREAD_MGMT_SE0);
DUMP_REG(mmCOMPUTE_STATIC_THREAD_MGMT_SE1);
@@ -268,7 +251,7 @@ static int kgd_hqd_dump(struct kgd_dev *kgd,
for (reg = mmCP_MQD_BASE_ADDR; reg <= mmCP_HQD_EOP_DONES; reg++)
DUMP_REG(reg);
- release_queue(kgd);
+ release_queue(adev);
WARN_ON_ONCE(i != HQD_N_REGS);
*n_regs = i;
@@ -276,10 +259,9 @@ static int kgd_hqd_dump(struct kgd_dev *kgd,
return 0;
}
-static int kgd_hqd_sdma_load(struct kgd_dev *kgd, void *mqd,
+static int kgd_hqd_sdma_load(struct amdgpu_device *adev, void *mqd,
uint32_t __user *wptr, struct mm_struct *mm)
{
- struct amdgpu_device *adev = get_amdgpu_device(kgd);
struct vi_sdma_mqd *m;
unsigned long end_jiffies;
uint32_t sdma_rlc_reg_offset;
@@ -331,11 +313,10 @@ static int kgd_hqd_sdma_load(struct kgd_dev *kgd, void *mqd,
return 0;
}
-static int kgd_hqd_sdma_dump(struct kgd_dev *kgd,
+static int kgd_hqd_sdma_dump(struct amdgpu_device *adev,
uint32_t engine_id, uint32_t queue_id,
uint32_t (**dump)[2], uint32_t *n_regs)
{
- struct amdgpu_device *adev = get_amdgpu_device(kgd);
uint32_t sdma_offset = engine_id * SDMA1_REGISTER_OFFSET +
queue_id * KFD_VI_SDMA_QUEUE_OFFSET;
uint32_t i = 0, reg;
@@ -367,15 +348,15 @@ static int kgd_hqd_sdma_dump(struct kgd_dev *kgd,
return 0;
}
-static bool kgd_hqd_is_occupied(struct kgd_dev *kgd, uint64_t queue_address,
- uint32_t pipe_id, uint32_t queue_id)
+static bool kgd_hqd_is_occupied(struct amdgpu_device *adev,
+ uint64_t queue_address, uint32_t pipe_id,
+ uint32_t queue_id)
{
- struct amdgpu_device *adev = get_amdgpu_device(kgd);
uint32_t act;
bool retval = false;
uint32_t low, high;
- acquire_queue(kgd, pipe_id, queue_id);
+ acquire_queue(adev, pipe_id, queue_id);
act = RREG32(mmCP_HQD_ACTIVE);
if (act) {
low = lower_32_bits(queue_address >> 8);
@@ -385,13 +366,12 @@ static bool kgd_hqd_is_occupied(struct kgd_dev *kgd, uint64_t queue_address,
high == RREG32(mmCP_HQD_PQ_BASE_HI))
retval = true;
}
- release_queue(kgd);
+ release_queue(adev);
return retval;
}
-static bool kgd_hqd_sdma_is_occupied(struct kgd_dev *kgd, void *mqd)
+static bool kgd_hqd_sdma_is_occupied(struct amdgpu_device *adev, void *mqd)
{
- struct amdgpu_device *adev = get_amdgpu_device(kgd);
struct vi_sdma_mqd *m;
uint32_t sdma_rlc_reg_offset;
uint32_t sdma_rlc_rb_cntl;
@@ -407,12 +387,11 @@ static bool kgd_hqd_sdma_is_occupied(struct kgd_dev *kgd, void *mqd)
return false;
}
-static int kgd_hqd_destroy(struct kgd_dev *kgd, void *mqd,
+static int kgd_hqd_destroy(struct amdgpu_device *adev, void *mqd,
enum kfd_preempt_type reset_type,
unsigned int utimeout, uint32_t pipe_id,
uint32_t queue_id)
{
- struct amdgpu_device *adev = get_amdgpu_device(kgd);
uint32_t temp;
enum hqd_dequeue_request_type type;
unsigned long flags, end_jiffies;
@@ -422,7 +401,7 @@ static int kgd_hqd_destroy(struct kgd_dev *kgd, void *mqd,
if (amdgpu_in_reset(adev))
return -EIO;
- acquire_queue(kgd, pipe_id, queue_id);
+ acquire_queue(adev, pipe_id, queue_id);
if (m->cp_hqd_vmid == 0)
WREG32_FIELD(RLC_CP_SCHEDULERS, scheduler1, 0);
@@ -502,20 +481,19 @@ loop:
break;
if (time_after(jiffies, end_jiffies)) {
pr_err("cp queue preemption time out.\n");
- release_queue(kgd);
+ release_queue(adev);
return -ETIME;
}
usleep_range(500, 1000);
}
- release_queue(kgd);
+ release_queue(adev);
return 0;
}
-static int kgd_hqd_sdma_destroy(struct kgd_dev *kgd, void *mqd,
+static int kgd_hqd_sdma_destroy(struct amdgpu_device *adev, void *mqd,
unsigned int utimeout)
{
- struct amdgpu_device *adev = get_amdgpu_device(kgd);
struct vi_sdma_mqd *m;
uint32_t sdma_rlc_reg_offset;
uint32_t temp;
@@ -549,11 +527,10 @@ static int kgd_hqd_sdma_destroy(struct kgd_dev *kgd, void *mqd,
return 0;
}
-static bool get_atc_vmid_pasid_mapping_info(struct kgd_dev *kgd,
+static bool get_atc_vmid_pasid_mapping_info(struct amdgpu_device *adev,
uint8_t vmid, uint16_t *p_pasid)
{
uint32_t value;
- struct amdgpu_device *adev = (struct amdgpu_device *) kgd;
value = RREG32(mmATC_VMID0_PASID_MAPPING + vmid);
*p_pasid = value & ATC_VMID0_PASID_MAPPING__PASID_MASK;
@@ -561,12 +538,12 @@ static bool get_atc_vmid_pasid_mapping_info(struct kgd_dev *kgd,
return !!(value & ATC_VMID0_PASID_MAPPING__VALID_MASK);
}
-static int kgd_address_watch_disable(struct kgd_dev *kgd)
+static int kgd_address_watch_disable(struct amdgpu_device *adev)
{
return 0;
}
-static int kgd_address_watch_execute(struct kgd_dev *kgd,
+static int kgd_address_watch_execute(struct amdgpu_device *adev,
unsigned int watch_point_id,
uint32_t cntl_val,
uint32_t addr_hi,
@@ -575,11 +552,10 @@ static int kgd_address_watch_execute(struct kgd_dev *kgd,
return 0;
}
-static int kgd_wave_control_execute(struct kgd_dev *kgd,
+static int kgd_wave_control_execute(struct amdgpu_device *adev,
uint32_t gfx_index_val,
uint32_t sq_cmd)
{
- struct amdgpu_device *adev = get_amdgpu_device(kgd);
uint32_t data = 0;
mutex_lock(&adev->grbm_idx_mutex);
@@ -600,28 +576,24 @@ static int kgd_wave_control_execute(struct kgd_dev *kgd,
return 0;
}
-static uint32_t kgd_address_watch_get_offset(struct kgd_dev *kgd,
+static uint32_t kgd_address_watch_get_offset(struct amdgpu_device *adev,
unsigned int watch_point_id,
unsigned int reg_offset)
{
return 0;
}
-static void set_scratch_backing_va(struct kgd_dev *kgd,
+static void set_scratch_backing_va(struct amdgpu_device *adev,
uint64_t va, uint32_t vmid)
{
- struct amdgpu_device *adev = (struct amdgpu_device *) kgd;
-
- lock_srbm(kgd, 0, 0, 0, vmid);
+ lock_srbm(adev, 0, 0, 0, vmid);
WREG32(mmSH_HIDDEN_PRIVATE_BASE_VMID, va);
- unlock_srbm(kgd);
+ unlock_srbm(adev);
}
-static void set_vm_context_page_table_base(struct kgd_dev *kgd, uint32_t vmid,
- uint64_t page_table_base)
+static void set_vm_context_page_table_base(struct amdgpu_device *adev,
+ uint32_t vmid, uint64_t page_table_base)
{
- struct amdgpu_device *adev = get_amdgpu_device(kgd);
-
if (!amdgpu_amdkfd_is_kfd_vmid(adev, vmid)) {
pr_err("trying to set page table base for wrong VMID\n");
return;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c
index bcc1cbeb8799..1abf662a0e91 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c
@@ -46,37 +46,26 @@ enum hqd_dequeue_request_type {
SAVE_WAVES
};
-static inline struct amdgpu_device *get_amdgpu_device(struct kgd_dev *kgd)
-{
- return (struct amdgpu_device *)kgd;
-}
-
-static void lock_srbm(struct kgd_dev *kgd, uint32_t mec, uint32_t pipe,
+static void lock_srbm(struct amdgpu_device *adev, uint32_t mec, uint32_t pipe,
uint32_t queue, uint32_t vmid)
{
- struct amdgpu_device *adev = get_amdgpu_device(kgd);
-
mutex_lock(&adev->srbm_mutex);
soc15_grbm_select(adev, mec, pipe, queue, vmid);
}
-static void unlock_srbm(struct kgd_dev *kgd)
+static void unlock_srbm(struct amdgpu_device *adev)
{
- struct amdgpu_device *adev = get_amdgpu_device(kgd);
-
soc15_grbm_select(adev, 0, 0, 0, 0);
mutex_unlock(&adev->srbm_mutex);
}
-static void acquire_queue(struct kgd_dev *kgd, uint32_t pipe_id,
+static void acquire_queue(struct amdgpu_device *adev, uint32_t pipe_id,
uint32_t queue_id)
{
- struct amdgpu_device *adev = get_amdgpu_device(kgd);
-
uint32_t mec = (pipe_id / adev->gfx.mec.num_pipe_per_mec) + 1;
uint32_t pipe = (pipe_id % adev->gfx.mec.num_pipe_per_mec);
- lock_srbm(kgd, mec, pipe, queue_id, 0);
+ lock_srbm(adev, mec, pipe, queue_id, 0);
}
static uint64_t get_queue_mask(struct amdgpu_device *adev,
@@ -88,33 +77,29 @@ static uint64_t get_queue_mask(struct amdgpu_device *adev,
return 1ull << bit;
}
-static void release_queue(struct kgd_dev *kgd)
+static void release_queue(struct amdgpu_device *adev)
{
- unlock_srbm(kgd);
+ unlock_srbm(adev);
}
-void kgd_gfx_v9_program_sh_mem_settings(struct kgd_dev *kgd, uint32_t vmid,
+void kgd_gfx_v9_program_sh_mem_settings(struct amdgpu_device *adev, uint32_t vmid,
uint32_t sh_mem_config,
uint32_t sh_mem_ape1_base,
uint32_t sh_mem_ape1_limit,
uint32_t sh_mem_bases)
{
- struct amdgpu_device *adev = get_amdgpu_device(kgd);
-
- lock_srbm(kgd, 0, 0, 0, vmid);
+ lock_srbm(adev, 0, 0, 0, vmid);
WREG32_RLC(SOC15_REG_OFFSET(GC, 0, mmSH_MEM_CONFIG), sh_mem_config);
WREG32_RLC(SOC15_REG_OFFSET(GC, 0, mmSH_MEM_BASES), sh_mem_bases);
/* APE1 no longer exists on GFX9 */
- unlock_srbm(kgd);
+ unlock_srbm(adev);
}
-int kgd_gfx_v9_set_pasid_vmid_mapping(struct kgd_dev *kgd, u32 pasid,
+int kgd_gfx_v9_set_pasid_vmid_mapping(struct amdgpu_device *adev, u32 pasid,
unsigned int vmid)
{
- struct amdgpu_device *adev = get_amdgpu_device(kgd);
-
/*
* We have to assume that there is no outstanding mapping.
* The ATC_VMID_PASID_MAPPING_UPDATE_STATUS bit could be 0 because
@@ -171,22 +156,21 @@ int kgd_gfx_v9_set_pasid_vmid_mapping(struct kgd_dev *kgd, u32 pasid,
* but still works
*/
-int kgd_gfx_v9_init_interrupts(struct kgd_dev *kgd, uint32_t pipe_id)
+int kgd_gfx_v9_init_interrupts(struct amdgpu_device *adev, uint32_t pipe_id)
{
- struct amdgpu_device *adev = get_amdgpu_device(kgd);
uint32_t mec;
uint32_t pipe;
mec = (pipe_id / adev->gfx.mec.num_pipe_per_mec) + 1;
pipe = (pipe_id % adev->gfx.mec.num_pipe_per_mec);
- lock_srbm(kgd, mec, pipe, 0, 0);
+ lock_srbm(adev, mec, pipe, 0, 0);
- WREG32(SOC15_REG_OFFSET(GC, 0, mmCPC_INT_CNTL),
+ WREG32_SOC15(GC, 0, mmCPC_INT_CNTL,
CP_INT_CNTL_RING0__TIME_STAMP_INT_ENABLE_MASK |
CP_INT_CNTL_RING0__OPCODE_ERROR_INT_ENABLE_MASK);
- unlock_srbm(kgd);
+ unlock_srbm(adev);
return 0;
}
@@ -233,19 +217,18 @@ static inline struct v9_sdma_mqd *get_sdma_mqd(void *mqd)
return (struct v9_sdma_mqd *)mqd;
}
-int kgd_gfx_v9_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id,
- uint32_t queue_id, uint32_t __user *wptr,
- uint32_t wptr_shift, uint32_t wptr_mask,
- struct mm_struct *mm)
+int kgd_gfx_v9_hqd_load(struct amdgpu_device *adev, void *mqd,
+ uint32_t pipe_id, uint32_t queue_id,
+ uint32_t __user *wptr, uint32_t wptr_shift,
+ uint32_t wptr_mask, struct mm_struct *mm)
{
- struct amdgpu_device *adev = get_amdgpu_device(kgd);
struct v9_mqd *m;
uint32_t *mqd_hqd;
uint32_t reg, hqd_base, data;
m = get_mqd(mqd);
- acquire_queue(kgd, pipe_id, queue_id);
+ acquire_queue(adev, pipe_id, queue_id);
/* HQD registers extend from CP_MQD_BASE_ADDR to CP_HQD_EOP_WPTR_MEM. */
mqd_hqd = &m->cp_mqd_base_addr_lo;
@@ -296,7 +279,7 @@ int kgd_gfx_v9_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id,
lower_32_bits((uintptr_t)wptr));
WREG32_RLC(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_WPTR_POLL_ADDR_HI),
upper_32_bits((uintptr_t)wptr));
- WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_PQ_WPTR_POLL_CNTL1),
+ WREG32_SOC15(GC, 0, mmCP_PQ_WPTR_POLL_CNTL1,
(uint32_t)get_queue_mask(adev, pipe_id, queue_id));
}
@@ -308,16 +291,15 @@ int kgd_gfx_v9_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id,
data = REG_SET_FIELD(m->cp_hqd_active, CP_HQD_ACTIVE, ACTIVE, 1);
WREG32_RLC(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_ACTIVE), data);
- release_queue(kgd);
+ release_queue(adev);
return 0;
}
-int kgd_gfx_v9_hiq_mqd_load(struct kgd_dev *kgd, void *mqd,
+int kgd_gfx_v9_hiq_mqd_load(struct amdgpu_device *adev, void *mqd,
uint32_t pipe_id, uint32_t queue_id,
uint32_t doorbell_off)
{
- struct amdgpu_device *adev = get_amdgpu_device(kgd);
struct amdgpu_ring *kiq_ring = &adev->gfx.kiq.ring;
struct v9_mqd *m;
uint32_t mec, pipe;
@@ -325,7 +307,7 @@ int kgd_gfx_v9_hiq_mqd_load(struct kgd_dev *kgd, void *mqd,
m = get_mqd(mqd);
- acquire_queue(kgd, pipe_id, queue_id);
+ acquire_queue(adev, pipe_id, queue_id);
mec = (pipe_id / adev->gfx.mec.num_pipe_per_mec) + 1;
pipe = (pipe_id % adev->gfx.mec.num_pipe_per_mec);
@@ -361,16 +343,15 @@ int kgd_gfx_v9_hiq_mqd_load(struct kgd_dev *kgd, void *mqd,
out_unlock:
spin_unlock(&adev->gfx.kiq.ring_lock);
- release_queue(kgd);
+ release_queue(adev);
return r;
}
-int kgd_gfx_v9_hqd_dump(struct kgd_dev *kgd,
+int kgd_gfx_v9_hqd_dump(struct amdgpu_device *adev,
uint32_t pipe_id, uint32_t queue_id,
uint32_t (**dump)[2], uint32_t *n_regs)
{
- struct amdgpu_device *adev = get_amdgpu_device(kgd);
uint32_t i = 0, reg;
#define HQD_N_REGS 56
#define DUMP_REG(addr) do { \
@@ -384,13 +365,13 @@ int kgd_gfx_v9_hqd_dump(struct kgd_dev *kgd,
if (*dump == NULL)
return -ENOMEM;
- acquire_queue(kgd, pipe_id, queue_id);
+ acquire_queue(adev, pipe_id, queue_id);
for (reg = SOC15_REG_OFFSET(GC, 0, mmCP_MQD_BASE_ADDR);
reg <= SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_WPTR_HI); reg++)
DUMP_REG(reg);
- release_queue(kgd);
+ release_queue(adev);
WARN_ON_ONCE(i != HQD_N_REGS);
*n_regs = i;
@@ -398,10 +379,9 @@ int kgd_gfx_v9_hqd_dump(struct kgd_dev *kgd,
return 0;
}
-static int kgd_hqd_sdma_load(struct kgd_dev *kgd, void *mqd,
+static int kgd_hqd_sdma_load(struct amdgpu_device *adev, void *mqd,
uint32_t __user *wptr, struct mm_struct *mm)
{
- struct amdgpu_device *adev = get_amdgpu_device(kgd);
struct v9_sdma_mqd *m;
uint32_t sdma_rlc_reg_offset;
unsigned long end_jiffies;
@@ -468,11 +448,10 @@ static int kgd_hqd_sdma_load(struct kgd_dev *kgd, void *mqd,
return 0;
}
-static int kgd_hqd_sdma_dump(struct kgd_dev *kgd,
+static int kgd_hqd_sdma_dump(struct amdgpu_device *adev,
uint32_t engine_id, uint32_t queue_id,
uint32_t (**dump)[2], uint32_t *n_regs)
{
- struct amdgpu_device *adev = get_amdgpu_device(kgd);
uint32_t sdma_rlc_reg_offset = get_sdma_rlc_reg_offset(adev,
engine_id, queue_id);
uint32_t i = 0, reg;
@@ -500,31 +479,30 @@ static int kgd_hqd_sdma_dump(struct kgd_dev *kgd,
return 0;
}
-bool kgd_gfx_v9_hqd_is_occupied(struct kgd_dev *kgd, uint64_t queue_address,
- uint32_t pipe_id, uint32_t queue_id)
+bool kgd_gfx_v9_hqd_is_occupied(struct amdgpu_device *adev,
+ uint64_t queue_address, uint32_t pipe_id,
+ uint32_t queue_id)
{
- struct amdgpu_device *adev = get_amdgpu_device(kgd);
uint32_t act;
bool retval = false;
uint32_t low, high;
- acquire_queue(kgd, pipe_id, queue_id);
- act = RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_ACTIVE));
+ acquire_queue(adev, pipe_id, queue_id);
+ act = RREG32_SOC15(GC, 0, mmCP_HQD_ACTIVE);
if (act) {
low = lower_32_bits(queue_address >> 8);
high = upper_32_bits(queue_address >> 8);
- if (low == RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_BASE)) &&
- high == RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_BASE_HI)))
+ if (low == RREG32_SOC15(GC, 0, mmCP_HQD_PQ_BASE) &&
+ high == RREG32_SOC15(GC, 0, mmCP_HQD_PQ_BASE_HI))
retval = true;
}
- release_queue(kgd);
+ release_queue(adev);
return retval;
}
-static bool kgd_hqd_sdma_is_occupied(struct kgd_dev *kgd, void *mqd)
+static bool kgd_hqd_sdma_is_occupied(struct amdgpu_device *adev, void *mqd)
{
- struct amdgpu_device *adev = get_amdgpu_device(kgd);
struct v9_sdma_mqd *m;
uint32_t sdma_rlc_reg_offset;
uint32_t sdma_rlc_rb_cntl;
@@ -541,12 +519,11 @@ static bool kgd_hqd_sdma_is_occupied(struct kgd_dev *kgd, void *mqd)
return false;
}
-int kgd_gfx_v9_hqd_destroy(struct kgd_dev *kgd, void *mqd,
+int kgd_gfx_v9_hqd_destroy(struct amdgpu_device *adev, void *mqd,
enum kfd_preempt_type reset_type,
unsigned int utimeout, uint32_t pipe_id,
uint32_t queue_id)
{
- struct amdgpu_device *adev = get_amdgpu_device(kgd);
enum hqd_dequeue_request_type type;
unsigned long end_jiffies;
uint32_t temp;
@@ -555,7 +532,7 @@ int kgd_gfx_v9_hqd_destroy(struct kgd_dev *kgd, void *mqd,
if (amdgpu_in_reset(adev))
return -EIO;
- acquire_queue(kgd, pipe_id, queue_id);
+ acquire_queue(adev, pipe_id, queue_id);
if (m->cp_hqd_vmid == 0)
WREG32_FIELD15_RLC(GC, 0, RLC_CP_SCHEDULERS, scheduler1, 0);
@@ -579,25 +556,24 @@ int kgd_gfx_v9_hqd_destroy(struct kgd_dev *kgd, void *mqd,
end_jiffies = (utimeout * HZ / 1000) + jiffies;
while (true) {
- temp = RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_ACTIVE));
+ temp = RREG32_SOC15(GC, 0, mmCP_HQD_ACTIVE);
if (!(temp & CP_HQD_ACTIVE__ACTIVE_MASK))
break;
if (time_after(jiffies, end_jiffies)) {
pr_err("cp queue preemption time out.\n");
- release_queue(kgd);
+ release_queue(adev);
return -ETIME;
}
usleep_range(500, 1000);
}
- release_queue(kgd);
+ release_queue(adev);
return 0;
}
-static int kgd_hqd_sdma_destroy(struct kgd_dev *kgd, void *mqd,
+static int kgd_hqd_sdma_destroy(struct amdgpu_device *adev, void *mqd,
unsigned int utimeout)
{
- struct amdgpu_device *adev = get_amdgpu_device(kgd);
struct v9_sdma_mqd *m;
uint32_t sdma_rlc_reg_offset;
uint32_t temp;
@@ -634,11 +610,10 @@ static int kgd_hqd_sdma_destroy(struct kgd_dev *kgd, void *mqd,
return 0;
}
-bool kgd_gfx_v9_get_atc_vmid_pasid_mapping_info(struct kgd_dev *kgd,
+bool kgd_gfx_v9_get_atc_vmid_pasid_mapping_info(struct amdgpu_device *adev,
uint8_t vmid, uint16_t *p_pasid)
{
uint32_t value;
- struct amdgpu_device *adev = (struct amdgpu_device *) kgd;
value = RREG32(SOC15_REG_OFFSET(ATHUB, 0, mmATC_VMID0_PASID_MAPPING)
+ vmid);
@@ -647,12 +622,12 @@ bool kgd_gfx_v9_get_atc_vmid_pasid_mapping_info(struct kgd_dev *kgd,
return !!(value & ATC_VMID0_PASID_MAPPING__VALID_MASK);
}
-int kgd_gfx_v9_address_watch_disable(struct kgd_dev *kgd)
+int kgd_gfx_v9_address_watch_disable(struct amdgpu_device *adev)
{
return 0;
}
-int kgd_gfx_v9_address_watch_execute(struct kgd_dev *kgd,
+int kgd_gfx_v9_address_watch_execute(struct amdgpu_device *adev,
unsigned int watch_point_id,
uint32_t cntl_val,
uint32_t addr_hi,
@@ -661,17 +636,16 @@ int kgd_gfx_v9_address_watch_execute(struct kgd_dev *kgd,
return 0;
}
-int kgd_gfx_v9_wave_control_execute(struct kgd_dev *kgd,
+int kgd_gfx_v9_wave_control_execute(struct amdgpu_device *adev,
uint32_t gfx_index_val,
uint32_t sq_cmd)
{
- struct amdgpu_device *adev = get_amdgpu_device(kgd);
uint32_t data = 0;
mutex_lock(&adev->grbm_idx_mutex);
WREG32_SOC15_RLC_SHADOW(GC, 0, mmGRBM_GFX_INDEX, gfx_index_val);
- WREG32(SOC15_REG_OFFSET(GC, 0, mmSQ_CMD), sq_cmd);
+ WREG32_SOC15(GC, 0, mmSQ_CMD, sq_cmd);
data = REG_SET_FIELD(data, GRBM_GFX_INDEX,
INSTANCE_BROADCAST_WRITES, 1);
@@ -686,18 +660,16 @@ int kgd_gfx_v9_wave_control_execute(struct kgd_dev *kgd,
return 0;
}
-uint32_t kgd_gfx_v9_address_watch_get_offset(struct kgd_dev *kgd,
+uint32_t kgd_gfx_v9_address_watch_get_offset(struct amdgpu_device *adev,
unsigned int watch_point_id,
unsigned int reg_offset)
{
return 0;
}
-void kgd_gfx_v9_set_vm_context_page_table_base(struct kgd_dev *kgd,
+void kgd_gfx_v9_set_vm_context_page_table_base(struct amdgpu_device *adev,
uint32_t vmid, uint64_t page_table_base)
{
- struct amdgpu_device *adev = get_amdgpu_device(kgd);
-
if (!amdgpu_amdkfd_is_kfd_vmid(adev, vmid)) {
pr_err("trying to set page table base for wrong VMID %u\n",
vmid);
@@ -750,7 +722,7 @@ static void get_wave_count(struct amdgpu_device *adev, int queue_idx,
pipe_idx = queue_idx / adev->gfx.mec.num_queue_per_pipe;
queue_slot = queue_idx % adev->gfx.mec.num_queue_per_pipe;
soc15_grbm_select(adev, 1, pipe_idx, queue_slot, 0);
- reg_val = RREG32(SOC15_REG_OFFSET(GC, 0, mmSPI_CSQ_WF_ACTIVE_COUNT_0) +
+ reg_val = RREG32_SOC15_IP(GC, SOC15_REG_OFFSET(GC, 0, mmSPI_CSQ_WF_ACTIVE_COUNT_0) +
queue_slot);
*wave_cnt = reg_val & SPI_CSQ_WF_ACTIVE_COUNT_0__COUNT_MASK;
if (*wave_cnt != 0)
@@ -804,7 +776,7 @@ static void get_wave_count(struct amdgpu_device *adev, int queue_idx,
*
* Reading registers referenced above involves programming GRBM appropriately
*/
-void kgd_gfx_v9_get_cu_occupancy(struct kgd_dev *kgd, int pasid,
+void kgd_gfx_v9_get_cu_occupancy(struct amdgpu_device *adev, int pasid,
int *pasid_wave_cnt, int *max_waves_per_cu)
{
int qidx;
@@ -818,10 +790,8 @@ void kgd_gfx_v9_get_cu_occupancy(struct kgd_dev *kgd, int pasid,
int pasid_tmp;
int max_queue_cnt;
int vmid_wave_cnt = 0;
- struct amdgpu_device *adev;
DECLARE_BITMAP(cp_queue_bitmap, KGD_MAX_QUEUES);
- adev = get_amdgpu_device(kgd);
lock_spi_csq_mutexes(adev);
soc15_grbm_select(adev, 1, 0, 0, 0);
@@ -839,8 +809,7 @@ void kgd_gfx_v9_get_cu_occupancy(struct kgd_dev *kgd, int pasid,
for (sh_idx = 0; sh_idx < sh_cnt; sh_idx++) {
gfx_v9_0_select_se_sh(adev, se_idx, sh_idx, 0xffffffff);
- queue_map = RREG32(SOC15_REG_OFFSET(GC, 0,
- mmSPI_CSQ_WF_ACTIVE_STATUS));
+ queue_map = RREG32_SOC15(GC, 0, mmSPI_CSQ_WF_ACTIVE_STATUS);
/*
* Assumption: queue map encodes following schema: four
@@ -882,30 +851,28 @@ void kgd_gfx_v9_get_cu_occupancy(struct kgd_dev *kgd, int pasid,
adev->gfx.cu_info.max_waves_per_simd;
}
-void kgd_gfx_v9_program_trap_handler_settings(struct kgd_dev *kgd,
+void kgd_gfx_v9_program_trap_handler_settings(struct amdgpu_device *adev,
uint32_t vmid, uint64_t tba_addr, uint64_t tma_addr)
{
- struct amdgpu_device *adev = get_amdgpu_device(kgd);
-
- lock_srbm(kgd, 0, 0, 0, vmid);
+ lock_srbm(adev, 0, 0, 0, vmid);
/*
* Program TBA registers
*/
- WREG32(SOC15_REG_OFFSET(GC, 0, mmSQ_SHADER_TBA_LO),
+ WREG32_SOC15(GC, 0, mmSQ_SHADER_TBA_LO,
lower_32_bits(tba_addr >> 8));
- WREG32(SOC15_REG_OFFSET(GC, 0, mmSQ_SHADER_TBA_HI),
+ WREG32_SOC15(GC, 0, mmSQ_SHADER_TBA_HI,
upper_32_bits(tba_addr >> 8));
/*
* Program TMA registers
*/
- WREG32(SOC15_REG_OFFSET(GC, 0, mmSQ_SHADER_TMA_LO),
+ WREG32_SOC15(GC, 0, mmSQ_SHADER_TMA_LO,
lower_32_bits(tma_addr >> 8));
- WREG32(SOC15_REG_OFFSET(GC, 0, mmSQ_SHADER_TMA_HI),
+ WREG32_SOC15(GC, 0, mmSQ_SHADER_TMA_HI,
upper_32_bits(tma_addr >> 8));
- unlock_srbm(kgd);
+ unlock_srbm(adev);
}
const struct kfd2kgd_calls gfx_v9_kfd2kgd = {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.h
index c63591106879..24be49df26fd 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.h
@@ -22,48 +22,49 @@
-void kgd_gfx_v9_program_sh_mem_settings(struct kgd_dev *kgd, uint32_t vmid,
+void kgd_gfx_v9_program_sh_mem_settings(struct amdgpu_device *adev, uint32_t vmid,
uint32_t sh_mem_config,
uint32_t sh_mem_ape1_base, uint32_t sh_mem_ape1_limit,
uint32_t sh_mem_bases);
-int kgd_gfx_v9_set_pasid_vmid_mapping(struct kgd_dev *kgd, u32 pasid,
+int kgd_gfx_v9_set_pasid_vmid_mapping(struct amdgpu_device *adev, u32 pasid,
unsigned int vmid);
-int kgd_gfx_v9_init_interrupts(struct kgd_dev *kgd, uint32_t pipe_id);
-int kgd_gfx_v9_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id,
+int kgd_gfx_v9_init_interrupts(struct amdgpu_device *adev, uint32_t pipe_id);
+int kgd_gfx_v9_hqd_load(struct amdgpu_device *adev, void *mqd, uint32_t pipe_id,
uint32_t queue_id, uint32_t __user *wptr,
uint32_t wptr_shift, uint32_t wptr_mask,
struct mm_struct *mm);
-int kgd_gfx_v9_hiq_mqd_load(struct kgd_dev *kgd, void *mqd,
+int kgd_gfx_v9_hiq_mqd_load(struct amdgpu_device *adev, void *mqd,
uint32_t pipe_id, uint32_t queue_id,
uint32_t doorbell_off);
-int kgd_gfx_v9_hqd_dump(struct kgd_dev *kgd,
+int kgd_gfx_v9_hqd_dump(struct amdgpu_device *adev,
uint32_t pipe_id, uint32_t queue_id,
uint32_t (**dump)[2], uint32_t *n_regs);
-bool kgd_gfx_v9_hqd_is_occupied(struct kgd_dev *kgd, uint64_t queue_address,
- uint32_t pipe_id, uint32_t queue_id);
-int kgd_gfx_v9_hqd_destroy(struct kgd_dev *kgd, void *mqd,
+bool kgd_gfx_v9_hqd_is_occupied(struct amdgpu_device *adev,
+ uint64_t queue_address, uint32_t pipe_id,
+ uint32_t queue_id);
+int kgd_gfx_v9_hqd_destroy(struct amdgpu_device *adev, void *mqd,
enum kfd_preempt_type reset_type,
unsigned int utimeout, uint32_t pipe_id,
uint32_t queue_id);
-int kgd_gfx_v9_address_watch_disable(struct kgd_dev *kgd);
-int kgd_gfx_v9_address_watch_execute(struct kgd_dev *kgd,
+int kgd_gfx_v9_address_watch_disable(struct amdgpu_device *adev);
+int kgd_gfx_v9_address_watch_execute(struct amdgpu_device *adev,
unsigned int watch_point_id,
uint32_t cntl_val,
uint32_t addr_hi,
uint32_t addr_lo);
-int kgd_gfx_v9_wave_control_execute(struct kgd_dev *kgd,
+int kgd_gfx_v9_wave_control_execute(struct amdgpu_device *adev,
uint32_t gfx_index_val,
uint32_t sq_cmd);
-uint32_t kgd_gfx_v9_address_watch_get_offset(struct kgd_dev *kgd,
+uint32_t kgd_gfx_v9_address_watch_get_offset(struct amdgpu_device *adev,
unsigned int watch_point_id,
unsigned int reg_offset);
-bool kgd_gfx_v9_get_atc_vmid_pasid_mapping_info(struct kgd_dev *kgd,
+bool kgd_gfx_v9_get_atc_vmid_pasid_mapping_info(struct amdgpu_device *adev,
uint8_t vmid, uint16_t *p_pasid);
-void kgd_gfx_v9_set_vm_context_page_table_base(struct kgd_dev *kgd,
+void kgd_gfx_v9_set_vm_context_page_table_base(struct amdgpu_device *adev,
uint32_t vmid, uint64_t page_table_base);
-void kgd_gfx_v9_get_cu_occupancy(struct kgd_dev *kgd, int pasid,
+void kgd_gfx_v9_get_cu_occupancy(struct amdgpu_device *adev, int pasid,
int *pasid_wave_cnt, int *max_waves_per_cu);
-void kgd_gfx_v9_program_trap_handler_settings(struct kgd_dev *kgd,
+void kgd_gfx_v9_program_trap_handler_settings(struct amdgpu_device *adev,
uint32_t vmid, uint64_t tba_addr, uint64_t tma_addr);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
index 71acd577803e..f9bab963a948 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
@@ -60,12 +60,6 @@ static const char * const domain_bit_to_string[] = {
static void amdgpu_amdkfd_restore_userptr_worker(struct work_struct *work);
-
-static inline struct amdgpu_device *get_amdgpu_device(struct kgd_dev *kgd)
-{
- return (struct amdgpu_device *)kgd;
-}
-
static bool kfd_mem_is_attached(struct amdgpu_vm *avm,
struct kgd_mem *mem)
{
@@ -126,8 +120,19 @@ static size_t amdgpu_amdkfd_acc_size(uint64_t size)
PAGE_ALIGN(size);
}
+/**
+ * @amdgpu_amdkfd_reserve_mem_limit() - Decrease available memory by size
+ * of buffer including any reserved for control structures
+ *
+ * @adev: Device to which allocated BO belongs to
+ * @size: Size of buffer, in bytes, encapsulated by B0. This should be
+ * equivalent to amdgpu_bo_size(BO)
+ * @alloc_flag: Flag used in allocating a BO as noted above
+ *
+ * Return: returns -ENOMEM in case of error, ZERO otherwise
+ */
static int amdgpu_amdkfd_reserve_mem_limit(struct amdgpu_device *adev,
- uint64_t size, u32 domain, bool sg)
+ uint64_t size, u32 alloc_flag)
{
uint64_t reserved_for_pt =
ESTIMATE_PT_SIZE(amdgpu_amdkfd_total_mem_size);
@@ -137,20 +142,24 @@ static int amdgpu_amdkfd_reserve_mem_limit(struct amdgpu_device *adev,
acc_size = amdgpu_amdkfd_acc_size(size);
vram_needed = 0;
- if (domain == AMDGPU_GEM_DOMAIN_GTT) {
- /* TTM GTT memory */
+ if (alloc_flag & KFD_IOC_ALLOC_MEM_FLAGS_GTT) {
system_mem_needed = acc_size + size;
ttm_mem_needed = acc_size + size;
- } else if (domain == AMDGPU_GEM_DOMAIN_CPU && !sg) {
- /* Userptr */
+ } else if (alloc_flag & KFD_IOC_ALLOC_MEM_FLAGS_VRAM) {
+ system_mem_needed = acc_size;
+ ttm_mem_needed = acc_size;
+ vram_needed = size;
+ } else if (alloc_flag & KFD_IOC_ALLOC_MEM_FLAGS_USERPTR) {
system_mem_needed = acc_size + size;
ttm_mem_needed = acc_size;
- } else {
- /* VRAM and SG */
+ } else if (alloc_flag &
+ (KFD_IOC_ALLOC_MEM_FLAGS_DOORBELL |
+ KFD_IOC_ALLOC_MEM_FLAGS_MMIO_REMAP)) {
system_mem_needed = acc_size;
ttm_mem_needed = acc_size;
- if (domain == AMDGPU_GEM_DOMAIN_VRAM)
- vram_needed = size;
+ } else {
+ pr_err("%s: Invalid BO type %#x\n", __func__, alloc_flag);
+ return -ENOMEM;
}
spin_lock(&kfd_mem_limit.mem_limit_lock);
@@ -166,64 +175,72 @@ static int amdgpu_amdkfd_reserve_mem_limit(struct amdgpu_device *adev,
(adev->kfd.vram_used + vram_needed >
adev->gmc.real_vram_size - reserved_for_pt)) {
ret = -ENOMEM;
- } else {
- kfd_mem_limit.system_mem_used += system_mem_needed;
- kfd_mem_limit.ttm_mem_used += ttm_mem_needed;
- adev->kfd.vram_used += vram_needed;
+ goto release;
}
+ /* Update memory accounting by decreasing available system
+ * memory, TTM memory and GPU memory as computed above
+ */
+ adev->kfd.vram_used += vram_needed;
+ kfd_mem_limit.system_mem_used += system_mem_needed;
+ kfd_mem_limit.ttm_mem_used += ttm_mem_needed;
+
+release:
spin_unlock(&kfd_mem_limit.mem_limit_lock);
return ret;
}
static void unreserve_mem_limit(struct amdgpu_device *adev,
- uint64_t size, u32 domain, bool sg)
+ uint64_t size, u32 alloc_flag)
{
size_t acc_size;
acc_size = amdgpu_amdkfd_acc_size(size);
spin_lock(&kfd_mem_limit.mem_limit_lock);
- if (domain == AMDGPU_GEM_DOMAIN_GTT) {
+
+ if (alloc_flag & KFD_IOC_ALLOC_MEM_FLAGS_GTT) {
kfd_mem_limit.system_mem_used -= (acc_size + size);
kfd_mem_limit.ttm_mem_used -= (acc_size + size);
- } else if (domain == AMDGPU_GEM_DOMAIN_CPU && !sg) {
+ } else if (alloc_flag & KFD_IOC_ALLOC_MEM_FLAGS_VRAM) {
+ kfd_mem_limit.system_mem_used -= acc_size;
+ kfd_mem_limit.ttm_mem_used -= acc_size;
+ adev->kfd.vram_used -= size;
+ } else if (alloc_flag & KFD_IOC_ALLOC_MEM_FLAGS_USERPTR) {
kfd_mem_limit.system_mem_used -= (acc_size + size);
kfd_mem_limit.ttm_mem_used -= acc_size;
- } else {
+ } else if (alloc_flag &
+ (KFD_IOC_ALLOC_MEM_FLAGS_DOORBELL |
+ KFD_IOC_ALLOC_MEM_FLAGS_MMIO_REMAP)) {
kfd_mem_limit.system_mem_used -= acc_size;
kfd_mem_limit.ttm_mem_used -= acc_size;
- if (domain == AMDGPU_GEM_DOMAIN_VRAM) {
- adev->kfd.vram_used -= size;
- WARN_ONCE(adev->kfd.vram_used < 0,
- "kfd VRAM memory accounting unbalanced");
- }
+ } else {
+ pr_err("%s: Invalid BO type %#x\n", __func__, alloc_flag);
+ goto release;
}
- WARN_ONCE(kfd_mem_limit.system_mem_used < 0,
- "kfd system memory accounting unbalanced");
+
+ WARN_ONCE(adev->kfd.vram_used < 0,
+ "KFD VRAM memory accounting unbalanced");
WARN_ONCE(kfd_mem_limit.ttm_mem_used < 0,
- "kfd TTM memory accounting unbalanced");
+ "KFD TTM memory accounting unbalanced");
+ WARN_ONCE(kfd_mem_limit.system_mem_used < 0,
+ "KFD system memory accounting unbalanced");
+release:
spin_unlock(&kfd_mem_limit.mem_limit_lock);
}
void amdgpu_amdkfd_release_notify(struct amdgpu_bo *bo)
{
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
- u32 domain = bo->preferred_domains;
- bool sg = (bo->preferred_domains == AMDGPU_GEM_DOMAIN_CPU);
-
- if (bo->flags & AMDGPU_AMDKFD_CREATE_USERPTR_BO) {
- domain = AMDGPU_GEM_DOMAIN_CPU;
- sg = false;
- }
+ u32 alloc_flags = bo->kfd_bo->alloc_flags;
+ u64 size = amdgpu_bo_size(bo);
- unreserve_mem_limit(adev, amdgpu_bo_size(bo), domain, sg);
+ unreserve_mem_limit(adev, size, alloc_flags);
kfree(bo->kfd_bo);
}
-
/* amdgpu_amdkfd_remove_eviction_fence - Removes eviction fence from BO's
* reservation object.
*
@@ -646,12 +663,6 @@ kfd_mem_attach_dmabuf(struct amdgpu_device *adev, struct kgd_mem *mem,
if (IS_ERR(gobj))
return PTR_ERR(gobj);
- /* Import takes an extra reference on the dmabuf. Drop it now to
- * avoid leaking it. We only need the one reference in
- * kgd_mem->dmabuf.
- */
- dma_buf_put(mem->dmabuf);
-
*bo = gem_to_amdgpu_bo(gobj);
(*bo)->flags |= AMDGPU_GEM_CREATE_PREEMPTIBLE;
(*bo)->parent = amdgpu_bo_ref(mem->bo);
@@ -697,10 +708,12 @@ static int kfd_mem_attach(struct amdgpu_device *adev, struct kgd_mem *mem,
pr_debug("\t add VA 0x%llx - 0x%llx to vm %p\n", va,
va + bo_size, vm);
- if (adev == bo_adev || (mem->domain == AMDGPU_GEM_DOMAIN_VRAM &&
- amdgpu_xgmi_same_hive(adev, bo_adev))) {
- /* Mappings on the local GPU and VRAM mappings in the
- * local hive share the original BO
+ if (adev == bo_adev ||
+ (amdgpu_ttm_tt_get_usermm(mem->bo->tbo.ttm) && adev->ram_is_direct_mapped) ||
+ (mem->domain == AMDGPU_GEM_DOMAIN_VRAM && amdgpu_xgmi_same_hive(adev, bo_adev))) {
+ /* Mappings on the local GPU, or VRAM mappings in the
+ * local hive, or userptr mapping IOMMU direct map mode
+ * share the original BO
*/
attachment[i]->type = KFD_MEM_ATT_SHARED;
bo[i] = mem->bo;
@@ -1278,12 +1291,60 @@ create_evict_fence_fail:
return ret;
}
-int amdgpu_amdkfd_gpuvm_acquire_process_vm(struct kgd_dev *kgd,
+/**
+ * amdgpu_amdkfd_gpuvm_pin_bo() - Pins a BO using following criteria
+ * @bo: Handle of buffer object being pinned
+ * @domain: Domain into which BO should be pinned
+ *
+ * - USERPTR BOs are UNPINNABLE and will return error
+ * - All other BO types (GTT, VRAM, MMIO and DOORBELL) will have their
+ * PIN count incremented. It is valid to PIN a BO multiple times
+ *
+ * Return: ZERO if successful in pinning, Non-Zero in case of error.
+ */
+static int amdgpu_amdkfd_gpuvm_pin_bo(struct amdgpu_bo *bo, u32 domain)
+{
+ int ret = 0;
+
+ ret = amdgpu_bo_reserve(bo, false);
+ if (unlikely(ret))
+ return ret;
+
+ ret = amdgpu_bo_pin_restricted(bo, domain, 0, 0);
+ if (ret)
+ pr_err("Error in Pinning BO to domain: %d\n", domain);
+
+ amdgpu_bo_sync_wait(bo, AMDGPU_FENCE_OWNER_KFD, false);
+ amdgpu_bo_unreserve(bo);
+
+ return ret;
+}
+
+/**
+ * amdgpu_amdkfd_gpuvm_unpin_bo() - Unpins BO using following criteria
+ * @bo: Handle of buffer object being unpinned
+ *
+ * - Is a illegal request for USERPTR BOs and is ignored
+ * - All other BO types (GTT, VRAM, MMIO and DOORBELL) will have their
+ * PIN count decremented. Calls to UNPIN must balance calls to PIN
+ */
+static void amdgpu_amdkfd_gpuvm_unpin_bo(struct amdgpu_bo *bo)
+{
+ int ret = 0;
+
+ ret = amdgpu_bo_reserve(bo, false);
+ if (unlikely(ret))
+ return;
+
+ amdgpu_bo_unpin(bo);
+ amdgpu_bo_unreserve(bo);
+}
+
+int amdgpu_amdkfd_gpuvm_acquire_process_vm(struct amdgpu_device *adev,
struct file *filp, u32 pasid,
void **process_info,
struct dma_fence **ef)
{
- struct amdgpu_device *adev = get_amdgpu_device(kgd);
struct amdgpu_fpriv *drv_priv;
struct amdgpu_vm *avm;
int ret;
@@ -1359,12 +1420,12 @@ void amdgpu_amdkfd_gpuvm_destroy_cb(struct amdgpu_device *adev,
}
}
-void amdgpu_amdkfd_gpuvm_release_process_vm(struct kgd_dev *kgd, void *drm_priv)
+void amdgpu_amdkfd_gpuvm_release_process_vm(struct amdgpu_device *adev,
+ void *drm_priv)
{
- struct amdgpu_device *adev = get_amdgpu_device(kgd);
struct amdgpu_vm *avm;
- if (WARN_ON(!kgd || !drm_priv))
+ if (WARN_ON(!adev || !drm_priv))
return;
avm = drm_priv_to_vm(drm_priv);
@@ -1392,17 +1453,16 @@ uint64_t amdgpu_amdkfd_gpuvm_get_process_page_dir(void *drm_priv)
}
int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
- struct kgd_dev *kgd, uint64_t va, uint64_t size,
+ struct amdgpu_device *adev, uint64_t va, uint64_t size,
void *drm_priv, struct kgd_mem **mem,
uint64_t *offset, uint32_t flags)
{
- struct amdgpu_device *adev = get_amdgpu_device(kgd);
struct amdgpu_vm *avm = drm_priv_to_vm(drm_priv);
enum ttm_bo_type bo_type = ttm_bo_type_device;
struct sg_table *sg = NULL;
uint64_t user_addr = 0;
struct amdgpu_bo *bo;
- struct drm_gem_object *gobj;
+ struct drm_gem_object *gobj = NULL;
u32 domain, alloc_domain;
u64 alloc_flags;
int ret;
@@ -1460,7 +1520,7 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
amdgpu_sync_create(&(*mem)->sync);
- ret = amdgpu_amdkfd_reserve_mem_limit(adev, size, alloc_domain, !!sg);
+ ret = amdgpu_amdkfd_reserve_mem_limit(adev, size, flags);
if (ret) {
pr_debug("Insufficient memory\n");
goto err_reserve_limit;
@@ -1501,6 +1561,15 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
ret = init_user_pages(*mem, user_addr);
if (ret)
goto allocate_init_user_pages_failed;
+ } else if (flags & (KFD_IOC_ALLOC_MEM_FLAGS_DOORBELL |
+ KFD_IOC_ALLOC_MEM_FLAGS_MMIO_REMAP)) {
+ ret = amdgpu_amdkfd_gpuvm_pin_bo(bo, AMDGPU_GEM_DOMAIN_GTT);
+ if (ret) {
+ pr_err("Pinning MMIO/DOORBELL BO during ALLOC FAILED\n");
+ goto err_pin_bo;
+ }
+ bo->allowed_domains = AMDGPU_GEM_DOMAIN_GTT;
+ bo->preferred_domains = AMDGPU_GEM_DOMAIN_GTT;
}
if (offset)
@@ -1509,17 +1578,20 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
return 0;
allocate_init_user_pages_failed:
+err_pin_bo:
remove_kgd_mem_from_kfd_bo_list(*mem, avm->process_info);
drm_vma_node_revoke(&gobj->vma_node, drm_priv);
err_node_allow:
- drm_gem_object_put(gobj);
/* Don't unreserve system mem limit twice */
goto err_reserve_limit;
err_bo_create:
- unreserve_mem_limit(adev, size, alloc_domain, !!sg);
+ unreserve_mem_limit(adev, size, flags);
err_reserve_limit:
mutex_destroy(&(*mem)->lock);
- kfree(*mem);
+ if (gobj)
+ drm_gem_object_put(gobj);
+ else
+ kfree(*mem);
err:
if (sg) {
sg_free_table(sg);
@@ -1529,7 +1601,7 @@ err:
}
int amdgpu_amdkfd_gpuvm_free_memory_of_gpu(
- struct kgd_dev *kgd, struct kgd_mem *mem, void *drm_priv,
+ struct amdgpu_device *adev, struct kgd_mem *mem, void *drm_priv,
uint64_t *size)
{
struct amdkfd_process_info *process_info = mem->process_info;
@@ -1542,6 +1614,14 @@ int amdgpu_amdkfd_gpuvm_free_memory_of_gpu(
bool is_imported = false;
mutex_lock(&mem->lock);
+
+ /* Unpin MMIO/DOORBELL BO's that were pinnned during allocation */
+ if (mem->alloc_flags &
+ (KFD_IOC_ALLOC_MEM_FLAGS_DOORBELL |
+ KFD_IOC_ALLOC_MEM_FLAGS_MMIO_REMAP)) {
+ amdgpu_amdkfd_gpuvm_unpin_bo(mem->bo);
+ }
+
mapped_to_gpu_memory = mem->mapped_to_gpu_memory;
is_imported = mem->is_imported;
mutex_unlock(&mem->lock);
@@ -1621,10 +1701,9 @@ int amdgpu_amdkfd_gpuvm_free_memory_of_gpu(
}
int amdgpu_amdkfd_gpuvm_map_memory_to_gpu(
- struct kgd_dev *kgd, struct kgd_mem *mem,
+ struct amdgpu_device *adev, struct kgd_mem *mem,
void *drm_priv, bool *table_freed)
{
- struct amdgpu_device *adev = get_amdgpu_device(kgd);
struct amdgpu_vm *avm = drm_priv_to_vm(drm_priv);
int ret;
struct amdgpu_bo *bo;
@@ -1751,7 +1830,7 @@ out:
}
int amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu(
- struct kgd_dev *kgd, struct kgd_mem *mem, void *drm_priv)
+ struct amdgpu_device *adev, struct kgd_mem *mem, void *drm_priv)
{
struct amdgpu_vm *avm = drm_priv_to_vm(drm_priv);
struct amdkfd_process_info *process_info = avm->process_info;
@@ -1812,7 +1891,7 @@ out:
}
int amdgpu_amdkfd_gpuvm_sync_memory(
- struct kgd_dev *kgd, struct kgd_mem *mem, bool intr)
+ struct amdgpu_device *adev, struct kgd_mem *mem, bool intr)
{
struct amdgpu_sync sync;
int ret;
@@ -1828,7 +1907,7 @@ int amdgpu_amdkfd_gpuvm_sync_memory(
return ret;
}
-int amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel(struct kgd_dev *kgd,
+int amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel(struct amdgpu_device *adev,
struct kgd_mem *mem, void **kptr, uint64_t *size)
{
int ret;
@@ -1884,7 +1963,8 @@ bo_reserve_failed:
return ret;
}
-void amdgpu_amdkfd_gpuvm_unmap_gtt_bo_from_kernel(struct kgd_dev *kgd, struct kgd_mem *mem)
+void amdgpu_amdkfd_gpuvm_unmap_gtt_bo_from_kernel(struct amdgpu_device *adev,
+ struct kgd_mem *mem)
{
struct amdgpu_bo *bo = mem->bo;
@@ -1894,12 +1974,9 @@ void amdgpu_amdkfd_gpuvm_unmap_gtt_bo_from_kernel(struct kgd_dev *kgd, struct kg
amdgpu_bo_unreserve(bo);
}
-int amdgpu_amdkfd_gpuvm_get_vm_fault_info(struct kgd_dev *kgd,
- struct kfd_vm_fault_info *mem)
+int amdgpu_amdkfd_gpuvm_get_vm_fault_info(struct amdgpu_device *adev,
+ struct kfd_vm_fault_info *mem)
{
- struct amdgpu_device *adev;
-
- adev = (struct amdgpu_device *)kgd;
if (atomic_read(&adev->gmc.vm_fault_info_updated) == 1) {
*mem = *adev->gmc.vm_fault_info;
mb();
@@ -1908,13 +1985,12 @@ int amdgpu_amdkfd_gpuvm_get_vm_fault_info(struct kgd_dev *kgd,
return 0;
}
-int amdgpu_amdkfd_gpuvm_import_dmabuf(struct kgd_dev *kgd,
+int amdgpu_amdkfd_gpuvm_import_dmabuf(struct amdgpu_device *adev,
struct dma_buf *dma_buf,
uint64_t va, void *drm_priv,
struct kgd_mem **mem, uint64_t *size,
uint64_t *mmap_offset)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
struct amdgpu_vm *avm = drm_priv_to_vm(drm_priv);
struct drm_gem_object *obj;
struct amdgpu_bo *bo;
@@ -2541,11 +2617,9 @@ int amdgpu_amdkfd_remove_gws_from_process(void *info, void *mem)
}
/* Returns GPU-specific tiling mode information */
-int amdgpu_amdkfd_get_tile_config(struct kgd_dev *kgd,
+int amdgpu_amdkfd_get_tile_config(struct amdgpu_device *adev,
struct tile_config *config)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
-
config->gb_addr_config = adev->gfx.config.gb_addr_config;
config->tile_config_ptr = adev->gfx.config.tile_mode_array;
config->num_tile_configs =
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
index 96b7bb13a2dd..12a6b1c99c93 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
@@ -1569,6 +1569,18 @@ void amdgpu_atombios_scratch_regs_engine_hung(struct amdgpu_device *adev,
WREG32(adev->bios_scratch_reg_offset + 3, tmp);
}
+void amdgpu_atombios_scratch_regs_set_backlight_level(struct amdgpu_device *adev,
+ u32 backlight_level)
+{
+ u32 tmp = RREG32(adev->bios_scratch_reg_offset + 2);
+
+ tmp &= ~ATOM_S2_CURRENT_BL_LEVEL_MASK;
+ tmp |= (backlight_level << ATOM_S2_CURRENT_BL_LEVEL_SHIFT) &
+ ATOM_S2_CURRENT_BL_LEVEL_MASK;
+
+ WREG32(adev->bios_scratch_reg_offset + 2, tmp);
+}
+
bool amdgpu_atombios_scratch_need_asic_init(struct amdgpu_device *adev)
{
u32 tmp = RREG32(adev->bios_scratch_reg_offset + 7);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.h
index 8cc0222dba19..27e74b1fc260 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.h
@@ -185,6 +185,8 @@ bool amdgpu_atombios_has_gpu_virtualization_table(struct amdgpu_device *adev);
void amdgpu_atombios_scratch_regs_lock(struct amdgpu_device *adev, bool lock);
void amdgpu_atombios_scratch_regs_engine_hung(struct amdgpu_device *adev,
bool hung);
+void amdgpu_atombios_scratch_regs_set_backlight_level(struct amdgpu_device *adev,
+ u32 backlight_level);
bool amdgpu_atombios_scratch_need_asic_init(struct amdgpu_device *adev);
void amdgpu_atombios_copy_swap(u8 *dst, u8 *src, u8 num_bytes, bool to_le);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c
index 97178b307ed6..4d4ddf026faf 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c
@@ -470,8 +470,8 @@ bool amdgpu_atomfirmware_dynamic_boot_config_supported(struct amdgpu_device *ade
/**
* amdgpu_atomfirmware_ras_rom_addr -- Get the RAS EEPROM addr from VBIOS
- * adev: amdgpu_device pointer
- * i2c_address: pointer to u8; if not NULL, will contain
+ * @adev: amdgpu_device pointer
+ * @i2c_address: pointer to u8; if not NULL, will contain
* the RAS EEPROM address if the function returns true
*
* Return true if VBIOS supports RAS EEPROM address reporting,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
index 7abe9500c0c6..d6d986be906a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
@@ -11,6 +11,7 @@
#include <linux/pci.h>
#include <linux/delay.h>
+#include "amdgpu.h"
#include "amd_acpi.h"
#define AMDGPU_PX_QUIRK_FORCE_ATPX (1 << 0)
@@ -165,7 +166,7 @@ static void amdgpu_atpx_parse_functions(struct amdgpu_atpx_functions *f, u32 mas
}
/**
- * amdgpu_atpx_validate_functions - validate ATPX functions
+ * amdgpu_atpx_validate - validate ATPX functions
*
* @atpx: amdgpu atpx struct
*
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
index 0de66f59adb8..c16a2704ced6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
@@ -108,7 +108,7 @@ int amdgpu_connector_get_monitor_bpc(struct drm_connector *connector)
case DRM_MODE_CONNECTOR_DVII:
case DRM_MODE_CONNECTOR_HDMIB:
if (amdgpu_connector->use_digital) {
- if (drm_detect_hdmi_monitor(amdgpu_connector_edid(connector))) {
+ if (connector->display_info.is_hdmi) {
if (connector->display_info.bpc)
bpc = connector->display_info.bpc;
}
@@ -116,7 +116,7 @@ int amdgpu_connector_get_monitor_bpc(struct drm_connector *connector)
break;
case DRM_MODE_CONNECTOR_DVID:
case DRM_MODE_CONNECTOR_HDMIA:
- if (drm_detect_hdmi_monitor(amdgpu_connector_edid(connector))) {
+ if (connector->display_info.is_hdmi) {
if (connector->display_info.bpc)
bpc = connector->display_info.bpc;
}
@@ -125,7 +125,7 @@ int amdgpu_connector_get_monitor_bpc(struct drm_connector *connector)
dig_connector = amdgpu_connector->con_priv;
if ((dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) ||
(dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_eDP) ||
- drm_detect_hdmi_monitor(amdgpu_connector_edid(connector))) {
+ connector->display_info.is_hdmi) {
if (connector->display_info.bpc)
bpc = connector->display_info.bpc;
}
@@ -149,7 +149,7 @@ int amdgpu_connector_get_monitor_bpc(struct drm_connector *connector)
break;
}
- if (drm_detect_hdmi_monitor(amdgpu_connector_edid(connector))) {
+ if (connector->display_info.is_hdmi) {
/*
* Pre DCE-8 hw can't handle > 12 bpc, and more than 12 bpc doesn't make
* much sense without support for > 12 bpc framebuffers. RGB 4:4:4 at
@@ -315,8 +315,10 @@ static void amdgpu_connector_get_edid(struct drm_connector *connector)
if (!amdgpu_connector->edid) {
/* some laptops provide a hardcoded edid in rom for LCDs */
if (((connector->connector_type == DRM_MODE_CONNECTOR_LVDS) ||
- (connector->connector_type == DRM_MODE_CONNECTOR_eDP)))
+ (connector->connector_type == DRM_MODE_CONNECTOR_eDP))) {
amdgpu_connector->edid = amdgpu_connector_get_hardcoded_edid(adev);
+ drm_connector_update_edid_property(connector, amdgpu_connector->edid);
+ }
}
}
@@ -326,6 +328,7 @@ static void amdgpu_connector_free_edid(struct drm_connector *connector)
kfree(amdgpu_connector->edid);
amdgpu_connector->edid = NULL;
+ drm_connector_update_edid_property(connector, NULL);
}
static int amdgpu_connector_ddc_get_modes(struct drm_connector *connector)
@@ -387,6 +390,9 @@ amdgpu_connector_lcd_native_mode(struct drm_encoder *encoder)
native_mode->vdisplay != 0 &&
native_mode->clock != 0) {
mode = drm_mode_duplicate(dev, native_mode);
+ if (!mode)
+ return NULL;
+
mode->type = DRM_MODE_TYPE_PREFERRED | DRM_MODE_TYPE_DRIVER;
drm_mode_set_name(mode);
@@ -401,6 +407,9 @@ amdgpu_connector_lcd_native_mode(struct drm_encoder *encoder)
* simpler.
*/
mode = drm_cvt_mode(dev, native_mode->hdisplay, native_mode->vdisplay, 60, true, false, false);
+ if (!mode)
+ return NULL;
+
mode->type = DRM_MODE_TYPE_PREFERRED | DRM_MODE_TYPE_DRIVER;
DRM_DEBUG_KMS("Adding cvt approximation of native panel mode %s\n", mode->name);
}
@@ -1171,7 +1180,7 @@ static enum drm_mode_status amdgpu_connector_dvi_mode_valid(struct drm_connector
(amdgpu_connector->connector_object_id == CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_D) ||
(amdgpu_connector->connector_object_id == CONNECTOR_OBJECT_ID_HDMI_TYPE_B)) {
return MODE_OK;
- } else if (drm_detect_hdmi_monitor(amdgpu_connector_edid(connector))) {
+ } else if (connector->display_info.is_hdmi) {
/* HDMI 1.3+ supports max clock of 340 Mhz */
if (mode->clock > 340000)
return MODE_CLOCK_HIGH;
@@ -1463,7 +1472,7 @@ static enum drm_mode_status amdgpu_connector_dp_mode_valid(struct drm_connector
(amdgpu_dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_eDP)) {
return amdgpu_atombios_dp_mode_valid_helper(connector, mode);
} else {
- if (drm_detect_hdmi_monitor(amdgpu_connector_edid(connector))) {
+ if (connector->display_info.is_hdmi) {
/* HDMI 1.3+ supports max clock of 340 Mhz */
if (mode->clock > 340000)
return MODE_CLOCK_HIGH;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
index 164d6a9e9fbb..25e2e5bf90eb 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
@@ -1618,6 +1618,9 @@ int amdgpu_debugfs_init(struct amdgpu_device *adev)
if (!debugfs_initialized())
return 0;
+ debugfs_create_x32("amdgpu_smu_debug", 0600, root,
+ &adev->pm.smu_debug_mask);
+
ent = debugfs_create_file("amdgpu_preempt_ib", 0600, root, adev,
&fops_ib_preempt);
if (IS_ERR(ent)) {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index 188accb71249..a8b08a72b71b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -30,6 +30,7 @@
#include <linux/module.h>
#include <linux/console.h>
#include <linux/slab.h>
+#include <linux/iommu.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_probe_helper.h>
@@ -331,7 +332,7 @@ void amdgpu_device_mm_access(struct amdgpu_device *adev, loff_t pos,
}
/**
- * amdgpu_device_vram_access - access vram by vram aperature
+ * amdgpu_device_aper_access - access vram by vram aperature
*
* @adev: amdgpu_device pointer
* @pos: offset of the buffer in vram
@@ -550,11 +551,11 @@ void amdgpu_device_wreg(struct amdgpu_device *adev,
trace_amdgpu_device_wreg(adev->pdev->device, reg, v);
}
-/*
+/**
* amdgpu_mm_wreg_mmio_rlc - write register either with mmio or with RLC path if in range
*
* this function is invoked only the debugfs register access
- * */
+ */
void amdgpu_mm_wreg_mmio_rlc(struct amdgpu_device *adev,
uint32_t reg, uint32_t v)
{
@@ -1100,7 +1101,7 @@ static void amdgpu_device_wb_fini(struct amdgpu_device *adev)
}
/**
- * amdgpu_device_wb_init- Init Writeback driver info and allocate memory
+ * amdgpu_device_wb_init - Init Writeback driver info and allocate memory
*
* @adev: amdgpu_device pointer
*
@@ -2316,6 +2317,10 @@ static int amdgpu_device_ip_init(struct amdgpu_device *adev)
/* need to do gmc hw init early so we can allocate gpu mem */
if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) {
+ /* Try to reserve bad pages early */
+ if (amdgpu_sriov_vf(adev))
+ amdgpu_virt_exchange_data(adev);
+
r = amdgpu_device_vram_scratch_init(adev);
if (r) {
DRM_ERROR("amdgpu_vram_scratch_init failed %d\n", r);
@@ -2347,7 +2352,7 @@ static int amdgpu_device_ip_init(struct amdgpu_device *adev)
}
if (amdgpu_sriov_vf(adev))
- amdgpu_virt_init_data_exchange(adev);
+ amdgpu_virt_exchange_data(adev);
r = amdgpu_ib_pool_init(adev);
if (r) {
@@ -2614,11 +2619,10 @@ static int amdgpu_device_ip_late_init(struct amdgpu_device *adev)
if (r)
DRM_ERROR("enable mgpu fan boost failed (%d).\n", r);
- /* For XGMI + passthrough configuration on arcturus, enable light SBR */
- if (adev->asic_type == CHIP_ARCTURUS &&
- amdgpu_passthrough(adev) &&
- adev->gmc.xgmi.num_physical_nodes > 1)
- smu_set_light_sbr(&adev->smu, true);
+ /* For passthrough configuration on arcturus and aldebaran, enable special handling SBR */
+ if (amdgpu_passthrough(adev) && ((adev->asic_type == CHIP_ARCTURUS && adev->gmc.xgmi.num_physical_nodes > 1)||
+ adev->asic_type == CHIP_ALDEBARAN ))
+ smu_handle_passthrough_sbr(&adev->smu, true);
if (adev->gmc.xgmi.num_physical_nodes > 1) {
mutex_lock(&mgpu_info.mutex);
@@ -2657,6 +2661,36 @@ static int amdgpu_device_ip_late_init(struct amdgpu_device *adev)
return 0;
}
+/**
+ * amdgpu_device_smu_fini_early - smu hw_fini wrapper
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * For ASICs need to disable SMC first
+ */
+static void amdgpu_device_smu_fini_early(struct amdgpu_device *adev)
+{
+ int i, r;
+
+ if (adev->ip_versions[GC_HWIP][0] > IP_VERSION(9, 0, 0))
+ return;
+
+ for (i = 0; i < adev->num_ip_blocks; i++) {
+ if (!adev->ip_blocks[i].status.hw)
+ continue;
+ if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) {
+ r = adev->ip_blocks[i].version->funcs->hw_fini((void *)adev);
+ /* XXX handle errors */
+ if (r) {
+ DRM_DEBUG("hw_fini of IP block <%s> failed %d\n",
+ adev->ip_blocks[i].version->funcs->name, r);
+ }
+ adev->ip_blocks[i].status.hw = false;
+ break;
+ }
+ }
+}
+
static int amdgpu_device_ip_fini_early(struct amdgpu_device *adev)
{
int i, r;
@@ -2677,21 +2711,8 @@ static int amdgpu_device_ip_fini_early(struct amdgpu_device *adev)
amdgpu_device_set_pg_state(adev, AMD_PG_STATE_UNGATE);
amdgpu_device_set_cg_state(adev, AMD_CG_STATE_UNGATE);
- /* need to disable SMC first */
- for (i = 0; i < adev->num_ip_blocks; i++) {
- if (!adev->ip_blocks[i].status.hw)
- continue;
- if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) {
- r = adev->ip_blocks[i].version->funcs->hw_fini((void *)adev);
- /* XXX handle errors */
- if (r) {
- DRM_DEBUG("hw_fini of IP block <%s> failed %d\n",
- adev->ip_blocks[i].version->funcs->name, r);
- }
- adev->ip_blocks[i].status.hw = false;
- break;
- }
- }
+ /* Workaroud for ASICs need to disable SMC first */
+ amdgpu_device_smu_fini_early(adev);
for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
if (!adev->ip_blocks[i].status.hw)
@@ -2733,8 +2754,6 @@ static int amdgpu_device_ip_fini(struct amdgpu_device *adev)
if (amdgpu_sriov_vf(adev) && adev->virt.ras_init_done)
amdgpu_virt_release_ras_err_handler_data(adev);
- amdgpu_ras_pre_fini(adev);
-
if (adev->gmc.xgmi.num_physical_nodes > 1)
amdgpu_xgmi_remove_device(adev);
@@ -3166,6 +3185,12 @@ static void amdgpu_device_detect_sriov_bios(struct amdgpu_device *adev)
bool amdgpu_device_asic_has_dc_support(enum amd_asic_type asic_type)
{
switch (asic_type) {
+#ifdef CONFIG_DRM_AMDGPU_SI
+ case CHIP_HAINAN:
+#endif
+ case CHIP_TOPAZ:
+ /* chips with no display hardware */
+ return false;
#if defined(CONFIG_DRM_AMD_DC)
case CHIP_TAHITI:
case CHIP_PITCAIRN:
@@ -3367,6 +3392,22 @@ static int amdgpu_device_get_job_timeout_settings(struct amdgpu_device *adev)
return ret;
}
+/**
+ * amdgpu_device_check_iommu_direct_map - check if RAM direct mapped to GPU
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * RAM direct mapped to GPU if IOMMU is not enabled or is pass through mode
+ */
+static void amdgpu_device_check_iommu_direct_map(struct amdgpu_device *adev)
+{
+ struct iommu_domain *domain;
+
+ domain = iommu_get_domain_for_dev(adev->dev);
+ if (!domain || domain->type == IOMMU_DOMAIN_IDENTITY)
+ adev->ram_is_direct_mapped = true;
+}
+
static const struct attribute *amdgpu_dev_attributes[] = {
&dev_attr_product_name.attr,
&dev_attr_product_number.attr,
@@ -3541,6 +3582,13 @@ int amdgpu_device_init(struct amdgpu_device *adev,
if (r)
return r;
+ /* Need to get xgmi info early to decide the reset behavior*/
+ if (adev->gmc.xgmi.supported) {
+ r = adev->gfxhub.funcs->get_xgmi_info(adev);
+ if (r)
+ return r;
+ }
+
/* enable PCIE atomic ops */
if (amdgpu_sriov_vf(adev))
adev->have_atomics_support = ((struct amd_sriov_msg_pf2vf_info *)
@@ -3687,8 +3735,6 @@ fence_driver_init:
/* Get a log2 for easy divisions. */
adev->mm_stats.log2_max_MBps = ilog2(max(1u, max_MBps));
- amdgpu_fbdev_init(adev);
-
r = amdgpu_pm_sysfs_init(adev);
if (r) {
adev->pm_sysfs_en = false;
@@ -3772,6 +3818,8 @@ fence_driver_init:
queue_delayed_work(system_wq, &mgpu_info.delayed_reset_work,
msecs_to_jiffies(AMDGPU_RESUME_MS));
+ amdgpu_device_check_iommu_direct_map(adev);
+
return 0;
release_ras_con:
@@ -3805,7 +3853,7 @@ static void amdgpu_device_unmap_mmio(struct amdgpu_device *adev)
}
/**
- * amdgpu_device_fini - tear down the driver
+ * amdgpu_device_fini_hw - tear down the driver
*
* @adev: amdgpu_device pointer
*
@@ -3833,7 +3881,7 @@ void amdgpu_device_fini_hw(struct amdgpu_device *adev)
/* disable all interrupts */
amdgpu_irq_disable_all(adev);
if (adev->mode_info.mode_config_initialized){
- if (!amdgpu_device_has_dc_support(adev))
+ if (!drm_drv_uses_atomic_modeset(adev_to_drm(adev)))
drm_helper_force_disable_all(adev_to_drm(adev));
else
drm_atomic_helper_shutdown(adev_to_drm(adev));
@@ -3846,17 +3894,21 @@ void amdgpu_device_fini_hw(struct amdgpu_device *adev)
amdgpu_ucode_sysfs_fini(adev);
sysfs_remove_files(&adev->dev->kobj, amdgpu_dev_attributes);
- amdgpu_fbdev_fini(adev);
+ /* disable ras feature must before hw fini */
+ amdgpu_ras_pre_fini(adev);
amdgpu_device_ip_fini_early(adev);
amdgpu_irq_fini_hw(adev);
- ttm_device_clear_dma_mappings(&adev->mman.bdev);
+ if (adev->mman.initialized)
+ ttm_device_clear_dma_mappings(&adev->mman.bdev);
amdgpu_gart_dummy_page_fini(adev);
- amdgpu_device_unmap_mmio(adev);
+ if (drm_dev_is_unplugged(adev_to_drm(adev)))
+ amdgpu_device_unmap_mmio(adev);
+
}
void amdgpu_device_fini_sw(struct amdgpu_device *adev)
@@ -3942,7 +3994,7 @@ int amdgpu_device_suspend(struct drm_device *dev, bool fbcon)
drm_kms_helper_poll_disable(dev);
if (fbcon)
- amdgpu_fbdev_set_suspend(adev, 1);
+ drm_fb_helper_set_suspend_unlocked(adev_to_drm(adev)->fb_helper, true);
cancel_delayed_work_sync(&adev->delayed_init_work);
@@ -4019,7 +4071,7 @@ int amdgpu_device_resume(struct drm_device *dev, bool fbcon)
flush_delayed_work(&adev->delayed_init_work);
if (fbcon)
- amdgpu_fbdev_set_suspend(adev, 0);
+ drm_fb_helper_set_suspend_unlocked(adev_to_drm(adev)->fb_helper, false);
drm_kms_helper_poll_enable(dev);
@@ -4288,6 +4340,11 @@ static int amdgpu_device_reset_sriov(struct amdgpu_device *adev,
bool from_hypervisor)
{
int r;
+ struct amdgpu_hive_info *hive = NULL;
+
+ amdgpu_amdkfd_pre_reset(adev);
+
+ amdgpu_amdkfd_pre_reset(adev);
if (from_hypervisor)
r = amdgpu_virt_request_full_gpu(adev, true);
@@ -4314,9 +4371,19 @@ static int amdgpu_device_reset_sriov(struct amdgpu_device *adev,
if (r)
goto error;
- amdgpu_irq_gpu_reset_resume_helper(adev);
- r = amdgpu_ib_ring_tests(adev);
- amdgpu_amdkfd_post_reset(adev);
+ hive = amdgpu_get_xgmi_hive(adev);
+ /* Update PSP FW topology after reset */
+ if (hive && adev->gmc.xgmi.num_physical_nodes > 1)
+ r = amdgpu_xgmi_update_topology(hive, adev);
+
+ if (hive)
+ amdgpu_put_xgmi_hive(hive);
+
+ if (!r) {
+ amdgpu_irq_gpu_reset_resume_helper(adev);
+ r = amdgpu_ib_ring_tests(adev);
+ amdgpu_amdkfd_post_reset(adev);
+ }
error:
if (!r && adev->virt.gim_feature & AMDGIM_FEATURE_GIM_FLR_VRAMLOST) {
@@ -4459,7 +4526,7 @@ int amdgpu_device_mode1_reset(struct amdgpu_device *adev)
int amdgpu_device_pre_asic_reset(struct amdgpu_device *adev,
struct amdgpu_reset_context *reset_context)
{
- int i, j, r = 0;
+ int i, r = 0;
struct amdgpu_job *job = NULL;
bool need_full_reset =
test_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
@@ -4481,15 +4548,8 @@ int amdgpu_device_pre_asic_reset(struct amdgpu_device *adev,
/*clear job fence from fence drv to avoid force_completion
*leave NULL and vm flush fence in fence drv */
- for (j = 0; j <= ring->fence_drv.num_fences_mask; j++) {
- struct dma_fence *old, **ptr;
+ amdgpu_fence_driver_clear_job_fences(ring);
- ptr = &ring->fence_drv.fences[j];
- old = rcu_dereference_protected(*ptr, 1);
- if (old && test_bit(AMDGPU_FENCE_FLAG_EMBED_IN_JOB_BIT, &old->flags)) {
- RCU_INIT_POINTER(*ptr, NULL);
- }
- }
/* after all hw jobs are reset, hw fence is meaningless, so force_completion */
amdgpu_fence_driver_force_completion(ring);
}
@@ -4649,7 +4709,7 @@ int amdgpu_do_asic_reset(struct list_head *device_list_handle,
if (r)
goto out;
- amdgpu_fbdev_set_suspend(tmp_adev, 0);
+ drm_fb_helper_set_suspend_unlocked(adev_to_drm(tmp_adev)->fb_helper, false);
/*
* The GPU enters bad state once faulty pages
@@ -4748,7 +4808,7 @@ static int amdgpu_device_lock_hive_adev(struct amdgpu_device *adev, struct amdgp
{
struct amdgpu_device *tmp_adev = NULL;
- if (adev->gmc.xgmi.num_physical_nodes > 1) {
+ if (!amdgpu_sriov_vf(adev) && (adev->gmc.xgmi.num_physical_nodes > 1)) {
if (!hive) {
dev_err(adev->dev, "Hive is NULL while device has multiple xgmi nodes");
return -ENODEV;
@@ -4960,7 +5020,8 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
* We always reset all schedulers for device and all devices for XGMI
* hive so that should take care of them too.
*/
- hive = amdgpu_get_xgmi_hive(adev);
+ if (!amdgpu_sriov_vf(adev))
+ hive = amdgpu_get_xgmi_hive(adev);
if (hive) {
if (atomic_cmpxchg(&hive->in_reset, 0, 1) != 0) {
DRM_INFO("Bailing on TDR for s_job:%llx, hive: %llx as another already in progress",
@@ -5001,7 +5062,7 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
* to put adev in the 1st position.
*/
INIT_LIST_HEAD(&device_list);
- if (adev->gmc.xgmi.num_physical_nodes > 1) {
+ if (!amdgpu_sriov_vf(adev) && (adev->gmc.xgmi.num_physical_nodes > 1)) {
list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head)
list_add_tail(&tmp_adev->reset_list, &device_list);
if (!list_is_first(&adev->reset_list, &device_list))
@@ -5031,7 +5092,8 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
cancel_delayed_work_sync(&tmp_adev->delayed_init_work);
- amdgpu_amdkfd_pre_reset(tmp_adev);
+ if (!amdgpu_sriov_vf(tmp_adev))
+ amdgpu_amdkfd_pre_reset(tmp_adev);
/*
* Mark these ASICs to be reseted as untracked first
@@ -5039,7 +5101,7 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
*/
amdgpu_unregister_gpu_instance(tmp_adev);
- amdgpu_fbdev_set_suspend(tmp_adev, 1);
+ drm_fb_helper_set_suspend_unlocked(adev_to_drm(adev)->fb_helper, true);
/* disable ras on ALL IPs */
if (!need_emergency_restart &&
@@ -5089,7 +5151,7 @@ retry: /* Rest of adevs pre asic reset from XGMI hive. */
tmp_vram_lost_counter = atomic_read(&((adev)->vram_lost_counter));
/* Actual ASIC resets if needed.*/
- /* TODO Implement XGMI hive reset logic for SRIOV */
+ /* Host driver will handle XGMI hive reset for SRIOV */
if (amdgpu_sriov_vf(adev)) {
r = amdgpu_device_reset_sriov(adev, job ? false : true);
if (r)
@@ -5130,7 +5192,7 @@ skip_hw_reset:
drm_sched_start(&ring->sched, !tmp_adev->asic_reset_res);
}
- if (!amdgpu_device_has_dc_support(tmp_adev) && !job_signaled) {
+ if (!drm_drv_uses_atomic_modeset(adev_to_drm(tmp_adev)) && !job_signaled) {
drm_helper_resume_force_mode(adev_to_drm(tmp_adev));
}
@@ -5151,7 +5213,7 @@ skip_sched_resume:
list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
/* unlock kfd: SRIOV would do it separately */
if (!need_emergency_restart && !amdgpu_sriov_vf(tmp_adev))
- amdgpu_amdkfd_post_reset(tmp_adev);
+ amdgpu_amdkfd_post_reset(tmp_adev);
/* kfd_post_reset will do nothing if kfd device is not initialized,
* need to bring up kfd here if it's not be initialized before
@@ -5634,3 +5696,42 @@ void amdgpu_device_invalidate_hdp(struct amdgpu_device *adev,
amdgpu_asic_invalidate_hdp(adev, ring);
}
+
+/**
+ * amdgpu_device_halt() - bring hardware to some kind of halt state
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Bring hardware to some kind of halt state so that no one can touch it
+ * any more. It will help to maintain error context when error occurred.
+ * Compare to a simple hang, the system will keep stable at least for SSH
+ * access. Then it should be trivial to inspect the hardware state and
+ * see what's going on. Implemented as following:
+ *
+ * 1. drm_dev_unplug() makes device inaccessible to user space(IOCTLs, etc),
+ * clears all CPU mappings to device, disallows remappings through page faults
+ * 2. amdgpu_irq_disable_all() disables all interrupts
+ * 3. amdgpu_fence_driver_hw_fini() signals all HW fences
+ * 4. set adev->no_hw_access to avoid potential crashes after setp 5
+ * 5. amdgpu_device_unmap_mmio() clears all MMIO mappings
+ * 6. pci_disable_device() and pci_wait_for_pending_transaction()
+ * flush any in flight DMA operations
+ */
+void amdgpu_device_halt(struct amdgpu_device *adev)
+{
+ struct pci_dev *pdev = adev->pdev;
+ struct drm_device *ddev = adev_to_drm(adev);
+
+ drm_dev_unplug(ddev);
+
+ amdgpu_irq_disable_all(adev);
+
+ amdgpu_fence_driver_hw_fini(adev);
+
+ adev->no_hw_access = true;
+
+ amdgpu_device_unmap_mmio(adev);
+
+ pci_disable_device(pdev);
+ pci_wait_for_pending_transaction(pdev);
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
index 4e3669407518..028190d42bb2 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
@@ -67,7 +67,8 @@
#include "smuio_v11_0_6.h"
#include "smuio_v13_0.h"
-MODULE_FIRMWARE("amdgpu/ip_discovery.bin");
+#define FIRMWARE_IP_DISCOVERY "amdgpu/ip_discovery.bin"
+MODULE_FIRMWARE(FIRMWARE_IP_DISCOVERY);
#define mmRCC_CONFIG_MEMSIZE 0xde3
#define mmMM_INDEX 0x0
@@ -157,6 +158,8 @@ static int hw_id_map[MAX_HWIP] = {
[HDP_HWIP] = HDP_HWID,
[SDMA0_HWIP] = SDMA0_HWID,
[SDMA1_HWIP] = SDMA1_HWID,
+ [SDMA2_HWIP] = SDMA2_HWID,
+ [SDMA3_HWIP] = SDMA3_HWID,
[MMHUB_HWIP] = MMHUB_HWID,
[ATHUB_HWIP] = ATHUB_HWID,
[NBIO_HWIP] = NBIF_HWID,
@@ -177,7 +180,7 @@ static int hw_id_map[MAX_HWIP] = {
[DCI_HWIP] = DCI_HWID,
};
-static int amdgpu_discovery_read_binary(struct amdgpu_device *adev, uint8_t *binary)
+static int amdgpu_discovery_read_binary_from_vram(struct amdgpu_device *adev, uint8_t *binary)
{
uint64_t vram_size = (uint64_t)RREG32(mmRCC_CONFIG_MEMSIZE) << 20;
uint64_t pos = vram_size - DISCOVERY_TMR_OFFSET;
@@ -187,6 +190,34 @@ static int amdgpu_discovery_read_binary(struct amdgpu_device *adev, uint8_t *bin
return 0;
}
+static int amdgpu_discovery_read_binary_from_file(struct amdgpu_device *adev, uint8_t *binary)
+{
+ const struct firmware *fw;
+ const char *fw_name;
+ int r;
+
+ switch (amdgpu_discovery) {
+ case 2:
+ fw_name = FIRMWARE_IP_DISCOVERY;
+ break;
+ default:
+ dev_warn(adev->dev, "amdgpu_discovery is not set properly\n");
+ return -EINVAL;
+ }
+
+ r = request_firmware(&fw, fw_name, adev->dev);
+ if (r) {
+ dev_err(adev->dev, "can't load firmware \"%s\"\n",
+ fw_name);
+ return r;
+ }
+
+ memcpy((u8 *)binary, (u8 *)fw->data, adev->mman.discovery_tmr_size);
+ release_firmware(fw);
+
+ return 0;
+}
+
static uint16_t amdgpu_discovery_calculate_checksum(uint8_t *data, uint32_t size)
{
uint16_t checksum = 0;
@@ -204,13 +235,20 @@ static inline bool amdgpu_discovery_verify_checksum(uint8_t *data, uint32_t size
return !!(amdgpu_discovery_calculate_checksum(data, size) == expected);
}
+static inline bool amdgpu_discovery_verify_binary_signature(uint8_t *binary)
+{
+ struct binary_header *bhdr;
+ bhdr = (struct binary_header *)binary;
+
+ return (le32_to_cpu(bhdr->binary_signature) == BINARY_SIGNATURE);
+}
+
static int amdgpu_discovery_init(struct amdgpu_device *adev)
{
struct table_info *info;
struct binary_header *bhdr;
struct ip_discovery_header *ihdr;
struct gpu_info_header *ghdr;
- const struct firmware *fw;
uint16_t offset;
uint16_t size;
uint16_t checksum;
@@ -221,39 +259,40 @@ static int amdgpu_discovery_init(struct amdgpu_device *adev)
if (!adev->mman.discovery_bin)
return -ENOMEM;
- if (amdgpu_discovery == 2) {
- r = request_firmware(&fw, "amdgpu/ip_discovery.bin", adev->dev);
- if (r)
- goto get_from_vram;
- dev_info(adev->dev, "Using IP discovery from file\n");
- memcpy((u8 *)adev->mman.discovery_bin, (u8 *)fw->data,
- adev->mman.discovery_tmr_size);
- release_firmware(fw);
- } else {
-get_from_vram:
- r = amdgpu_discovery_read_binary(adev, adev->mman.discovery_bin);
+ r = amdgpu_discovery_read_binary_from_vram(adev, adev->mman.discovery_bin);
+ if (r) {
+ dev_err(adev->dev, "failed to read ip discovery binary from vram\n");
+ r = -EINVAL;
+ goto out;
+ }
+
+ if(!amdgpu_discovery_verify_binary_signature(adev->mman.discovery_bin)) {
+ dev_warn(adev->dev, "get invalid ip discovery binary signature from vram\n");
+ /* retry read ip discovery binary from file */
+ r = amdgpu_discovery_read_binary_from_file(adev, adev->mman.discovery_bin);
if (r) {
- DRM_ERROR("failed to read ip discovery binary\n");
+ dev_err(adev->dev, "failed to read ip discovery binary from file\n");
+ r = -EINVAL;
+ goto out;
+ }
+ /* check the ip discovery binary signature */
+ if(!amdgpu_discovery_verify_binary_signature(adev->mman.discovery_bin)) {
+ dev_warn(adev->dev, "get invalid ip discovery binary signature from file\n");
+ r = -EINVAL;
goto out;
}
}
bhdr = (struct binary_header *)adev->mman.discovery_bin;
- if (le32_to_cpu(bhdr->binary_signature) != BINARY_SIGNATURE) {
- DRM_ERROR("invalid ip discovery binary signature\n");
- r = -EINVAL;
- goto out;
- }
-
offset = offsetof(struct binary_header, binary_checksum) +
sizeof(bhdr->binary_checksum);
- size = bhdr->binary_size - offset;
- checksum = bhdr->binary_checksum;
+ size = le16_to_cpu(bhdr->binary_size) - offset;
+ checksum = le16_to_cpu(bhdr->binary_checksum);
if (!amdgpu_discovery_verify_checksum(adev->mman.discovery_bin + offset,
size, checksum)) {
- DRM_ERROR("invalid ip discovery binary checksum\n");
+ dev_err(adev->dev, "invalid ip discovery binary checksum\n");
r = -EINVAL;
goto out;
}
@@ -264,14 +303,14 @@ get_from_vram:
ihdr = (struct ip_discovery_header *)(adev->mman.discovery_bin + offset);
if (le32_to_cpu(ihdr->signature) != DISCOVERY_TABLE_SIGNATURE) {
- DRM_ERROR("invalid ip discovery data table signature\n");
+ dev_err(adev->dev, "invalid ip discovery data table signature\n");
r = -EINVAL;
goto out;
}
if (!amdgpu_discovery_verify_checksum(adev->mman.discovery_bin + offset,
- ihdr->size, checksum)) {
- DRM_ERROR("invalid ip discovery data table checksum\n");
+ le16_to_cpu(ihdr->size), checksum)) {
+ dev_err(adev->dev, "invalid ip discovery data table checksum\n");
r = -EINVAL;
goto out;
}
@@ -282,8 +321,8 @@ get_from_vram:
ghdr = (struct gpu_info_header *)(adev->mman.discovery_bin + offset);
if (!amdgpu_discovery_verify_checksum(adev->mman.discovery_bin + offset,
- ghdr->size, checksum)) {
- DRM_ERROR("invalid gc data table checksum\n");
+ le32_to_cpu(ghdr->size), checksum)) {
+ dev_err(adev->dev, "invalid gc data table checksum\n");
r = -EINVAL;
goto out;
}
@@ -377,8 +416,18 @@ int amdgpu_discovery_reg_base_init(struct amdgpu_device *adev)
ip->major, ip->minor,
ip->revision);
- if (le16_to_cpu(ip->hw_id) == VCN_HWID)
+ if (le16_to_cpu(ip->hw_id) == VCN_HWID) {
+ /* Bit [5:0]: original revision value
+ * Bit [7:6]: en/decode capability:
+ * 0b00 : VCN function normally
+ * 0b10 : encode is disabled
+ * 0b01 : decode is disabled
+ */
+ adev->vcn.vcn_config[adev->vcn.num_vcn_inst] =
+ ip->revision & 0xc0;
+ ip->revision &= ~0xc0;
adev->vcn.num_vcn_inst++;
+ }
if (le16_to_cpu(ip->hw_id) == SDMA0_HWID ||
le16_to_cpu(ip->hw_id) == SDMA1_HWID ||
le16_to_cpu(ip->hw_id) == SDMA2_HWID ||
@@ -470,14 +519,6 @@ int amdgpu_discovery_get_ip_version(struct amdgpu_device *adev, int hw_id, int n
return -EINVAL;
}
-
-int amdgpu_discovery_get_vcn_version(struct amdgpu_device *adev, int vcn_instance,
- int *major, int *minor, int *revision)
-{
- return amdgpu_discovery_get_ip_version(adev, VCN_HWID,
- vcn_instance, major, minor, revision);
-}
-
void amdgpu_discovery_harvest_ip(struct amdgpu_device *adev)
{
struct binary_header *bhdr;
@@ -489,10 +530,10 @@ void amdgpu_discovery_harvest_ip(struct amdgpu_device *adev)
le16_to_cpu(bhdr->table_list[HARVEST_INFO].offset));
for (i = 0; i < 32; i++) {
- if (le32_to_cpu(harvest_info->list[i].hw_id) == 0)
+ if (le16_to_cpu(harvest_info->list[i].hw_id) == 0)
break;
- switch (le32_to_cpu(harvest_info->list[i].hw_id)) {
+ switch (le16_to_cpu(harvest_info->list[i].hw_id)) {
case VCN_HWID:
vcn_harvest_count++;
if (harvest_info->list[i].number_instance == 0)
@@ -524,10 +565,15 @@ void amdgpu_discovery_harvest_ip(struct amdgpu_device *adev)
}
}
+union gc_info {
+ struct gc_info_v1_0 v1;
+ struct gc_info_v2_0 v2;
+};
+
int amdgpu_discovery_get_gfx_info(struct amdgpu_device *adev)
{
struct binary_header *bhdr;
- struct gc_info_v1_0 *gc_info;
+ union gc_info *gc_info;
if (!adev->mman.discovery_bin) {
DRM_ERROR("ip discovery uninitialized\n");
@@ -535,28 +581,55 @@ int amdgpu_discovery_get_gfx_info(struct amdgpu_device *adev)
}
bhdr = (struct binary_header *)adev->mman.discovery_bin;
- gc_info = (struct gc_info_v1_0 *)(adev->mman.discovery_bin +
+ gc_info = (union gc_info *)(adev->mman.discovery_bin +
le16_to_cpu(bhdr->table_list[GC].offset));
-
- adev->gfx.config.max_shader_engines = le32_to_cpu(gc_info->gc_num_se);
- adev->gfx.config.max_cu_per_sh = 2 * (le32_to_cpu(gc_info->gc_num_wgp0_per_sa) +
- le32_to_cpu(gc_info->gc_num_wgp1_per_sa));
- adev->gfx.config.max_sh_per_se = le32_to_cpu(gc_info->gc_num_sa_per_se);
- adev->gfx.config.max_backends_per_se = le32_to_cpu(gc_info->gc_num_rb_per_se);
- adev->gfx.config.max_texture_channel_caches = le32_to_cpu(gc_info->gc_num_gl2c);
- adev->gfx.config.max_gprs = le32_to_cpu(gc_info->gc_num_gprs);
- adev->gfx.config.max_gs_threads = le32_to_cpu(gc_info->gc_num_max_gs_thds);
- adev->gfx.config.gs_vgt_table_depth = le32_to_cpu(gc_info->gc_gs_table_depth);
- adev->gfx.config.gs_prim_buffer_depth = le32_to_cpu(gc_info->gc_gsprim_buff_depth);
- adev->gfx.config.double_offchip_lds_buf = le32_to_cpu(gc_info->gc_double_offchip_lds_buffer);
- adev->gfx.cu_info.wave_front_size = le32_to_cpu(gc_info->gc_wave_size);
- adev->gfx.cu_info.max_waves_per_simd = le32_to_cpu(gc_info->gc_max_waves_per_simd);
- adev->gfx.cu_info.max_scratch_slots_per_cu = le32_to_cpu(gc_info->gc_max_scratch_slots_per_cu);
- adev->gfx.cu_info.lds_size = le32_to_cpu(gc_info->gc_lds_size);
- adev->gfx.config.num_sc_per_sh = le32_to_cpu(gc_info->gc_num_sc_per_se) /
- le32_to_cpu(gc_info->gc_num_sa_per_se);
- adev->gfx.config.num_packer_per_sc = le32_to_cpu(gc_info->gc_num_packer_per_sc);
-
+ switch (gc_info->v1.header.version_major) {
+ case 1:
+ adev->gfx.config.max_shader_engines = le32_to_cpu(gc_info->v1.gc_num_se);
+ adev->gfx.config.max_cu_per_sh = 2 * (le32_to_cpu(gc_info->v1.gc_num_wgp0_per_sa) +
+ le32_to_cpu(gc_info->v1.gc_num_wgp1_per_sa));
+ adev->gfx.config.max_sh_per_se = le32_to_cpu(gc_info->v1.gc_num_sa_per_se);
+ adev->gfx.config.max_backends_per_se = le32_to_cpu(gc_info->v1.gc_num_rb_per_se);
+ adev->gfx.config.max_texture_channel_caches = le32_to_cpu(gc_info->v1.gc_num_gl2c);
+ adev->gfx.config.max_gprs = le32_to_cpu(gc_info->v1.gc_num_gprs);
+ adev->gfx.config.max_gs_threads = le32_to_cpu(gc_info->v1.gc_num_max_gs_thds);
+ adev->gfx.config.gs_vgt_table_depth = le32_to_cpu(gc_info->v1.gc_gs_table_depth);
+ adev->gfx.config.gs_prim_buffer_depth = le32_to_cpu(gc_info->v1.gc_gsprim_buff_depth);
+ adev->gfx.config.double_offchip_lds_buf = le32_to_cpu(gc_info->v1.gc_double_offchip_lds_buffer);
+ adev->gfx.cu_info.wave_front_size = le32_to_cpu(gc_info->v1.gc_wave_size);
+ adev->gfx.cu_info.max_waves_per_simd = le32_to_cpu(gc_info->v1.gc_max_waves_per_simd);
+ adev->gfx.cu_info.max_scratch_slots_per_cu = le32_to_cpu(gc_info->v1.gc_max_scratch_slots_per_cu);
+ adev->gfx.cu_info.lds_size = le32_to_cpu(gc_info->v1.gc_lds_size);
+ adev->gfx.config.num_sc_per_sh = le32_to_cpu(gc_info->v1.gc_num_sc_per_se) /
+ le32_to_cpu(gc_info->v1.gc_num_sa_per_se);
+ adev->gfx.config.num_packer_per_sc = le32_to_cpu(gc_info->v1.gc_num_packer_per_sc);
+ break;
+ case 2:
+ adev->gfx.config.max_shader_engines = le32_to_cpu(gc_info->v2.gc_num_se);
+ adev->gfx.config.max_cu_per_sh = le32_to_cpu(gc_info->v2.gc_num_cu_per_sh);
+ adev->gfx.config.max_sh_per_se = le32_to_cpu(gc_info->v2.gc_num_sh_per_se);
+ adev->gfx.config.max_backends_per_se = le32_to_cpu(gc_info->v2.gc_num_rb_per_se);
+ adev->gfx.config.max_texture_channel_caches = le32_to_cpu(gc_info->v2.gc_num_tccs);
+ adev->gfx.config.max_gprs = le32_to_cpu(gc_info->v2.gc_num_gprs);
+ adev->gfx.config.max_gs_threads = le32_to_cpu(gc_info->v2.gc_num_max_gs_thds);
+ adev->gfx.config.gs_vgt_table_depth = le32_to_cpu(gc_info->v2.gc_gs_table_depth);
+ adev->gfx.config.gs_prim_buffer_depth = le32_to_cpu(gc_info->v2.gc_gsprim_buff_depth);
+ adev->gfx.config.double_offchip_lds_buf = le32_to_cpu(gc_info->v2.gc_double_offchip_lds_buffer);
+ adev->gfx.cu_info.wave_front_size = le32_to_cpu(gc_info->v2.gc_wave_size);
+ adev->gfx.cu_info.max_waves_per_simd = le32_to_cpu(gc_info->v2.gc_max_waves_per_simd);
+ adev->gfx.cu_info.max_scratch_slots_per_cu = le32_to_cpu(gc_info->v2.gc_max_scratch_slots_per_cu);
+ adev->gfx.cu_info.lds_size = le32_to_cpu(gc_info->v2.gc_lds_size);
+ adev->gfx.config.num_sc_per_sh = le32_to_cpu(gc_info->v2.gc_num_sc_per_se) /
+ le32_to_cpu(gc_info->v2.gc_num_sh_per_se);
+ adev->gfx.config.num_packer_per_sc = le32_to_cpu(gc_info->v2.gc_num_packer_per_sc);
+ break;
+ default:
+ dev_err(adev->dev,
+ "Unhandled GC info table %d.%d\n",
+ gc_info->v1.header.version_major,
+ gc_info->v1.header.version_minor);
+ return -EINVAL;
+ }
return 0;
}
@@ -915,9 +988,9 @@ static int amdgpu_discovery_set_mm_ip_blocks(struct amdgpu_device *adev)
break;
case IP_VERSION(3, 0, 0):
case IP_VERSION(3, 0, 16):
- case IP_VERSION(3, 0, 64):
case IP_VERSION(3, 1, 1):
case IP_VERSION(3, 0, 2):
+ case IP_VERSION(3, 0, 192):
amdgpu_device_ip_block_add(adev, &vcn_v3_0_ip_block);
if (!amdgpu_sriov_vf(adev))
amdgpu_device_ip_block_add(adev, &jpeg_v3_0_ip_block);
@@ -951,7 +1024,7 @@ static int amdgpu_discovery_set_mes_ip_blocks(struct amdgpu_device *adev)
amdgpu_device_ip_block_add(adev, &mes_v10_1_ip_block);
break;
default:
- break;;
+ break;
}
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.h
index 0ea029e3b850..14537cec19db 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.h
@@ -33,8 +33,6 @@ void amdgpu_discovery_harvest_ip(struct amdgpu_device *adev);
int amdgpu_discovery_get_ip_version(struct amdgpu_device *adev, int hw_id, int number_instance,
int *major, int *minor, int *revision);
-int amdgpu_discovery_get_vcn_version(struct amdgpu_device *adev, int vcn_instance,
- int *major, int *minor, int *revision);
int amdgpu_discovery_get_gfx_info(struct amdgpu_device *adev);
int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
index 68108f151dad..82011e75ed85 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
@@ -1360,7 +1360,7 @@ bool amdgpu_display_crtc_scaling_mode_fixup(struct drm_crtc *crtc,
if ((!(mode->flags & DRM_MODE_FLAG_INTERLACE)) &&
((amdgpu_encoder->underscan_type == UNDERSCAN_ON) ||
((amdgpu_encoder->underscan_type == UNDERSCAN_AUTO) &&
- drm_detect_hdmi_monitor(amdgpu_connector_edid(connector)) &&
+ connector->display_info.is_hdmi &&
amdgpu_display_is_hdtv_mode(mode)))) {
if (amdgpu_encoder->underscan_hborder != 0)
amdgpu_crtc->h_border = amdgpu_encoder->underscan_hborder;
@@ -1599,13 +1599,10 @@ int amdgpu_display_suspend_helper(struct amdgpu_device *adev)
continue;
}
robj = gem_to_amdgpu_bo(fb->obj[0]);
- /* don't unpin kernel fb objects */
- if (!amdgpu_fbdev_robj_is_fb(adev, robj)) {
- r = amdgpu_bo_reserve(robj, true);
- if (r == 0) {
- amdgpu_bo_unpin(robj);
- amdgpu_bo_unreserve(robj);
- }
+ r = amdgpu_bo_reserve(robj, true);
+ if (r == 0) {
+ amdgpu_bo_unpin(robj);
+ amdgpu_bo_unreserve(robj);
}
}
return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
index ae6ab93c868b..579adfafe4d0 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
@@ -61,9 +61,6 @@ static int amdgpu_dma_buf_attach(struct dma_buf *dmabuf,
if (pci_p2pdma_distance_many(adev->pdev, &attach->dev, 1, true) < 0)
attach->peer2peer = false;
- if (attach->dev->driver == adev->dev->driver)
- return 0;
-
r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
if (r < 0)
goto out;
@@ -384,7 +381,7 @@ amdgpu_dma_buf_move_notify(struct dma_buf_attachment *attach)
struct amdgpu_vm_bo_base *bo_base;
int r;
- if (bo->tbo.resource->mem_type == TTM_PL_SYSTEM)
+ if (!bo->tbo.resource || bo->tbo.resource->mem_type == TTM_PL_SYSTEM)
return;
r = ttm_bo_validate(&bo->tbo, &placement, &ctx);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
index 438468b82eb6..04f6da98ee59 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
@@ -313,9 +313,12 @@ module_param_named(dpm, amdgpu_dpm, int, 0444);
/**
* DOC: fw_load_type (int)
- * Set different firmware loading type for debugging (0 = direct, 1 = SMU, 2 = PSP). The default is -1 (auto).
+ * Set different firmware loading type for debugging, if supported.
+ * Set to 0 to force direct loading if supported by the ASIC. Set
+ * to -1 to select the default loading mode for the ASIC, as defined
+ * by the driver. The default is -1 (auto).
*/
-MODULE_PARM_DESC(fw_load_type, "firmware loading type (0 = direct, 1 = SMU, 2 = PSP, -1 = auto)");
+MODULE_PARM_DESC(fw_load_type, "firmware loading type (0 = force direct if supported, -1 = auto)");
module_param_named(fw_load_type, amdgpu_fw_load_type, int, 0444);
/**
@@ -327,10 +330,11 @@ module_param_named(aspm, amdgpu_aspm, int, 0444);
/**
* DOC: runpm (int)
- * Override for runtime power management control for dGPUs in PX/HG laptops. The amdgpu driver can dynamically power down
- * the dGPU on PX/HG laptops when it is idle. The default is -1 (auto enable). Setting the value to 0 disables this functionality.
+ * Override for runtime power management control for dGPUs. The amdgpu driver can dynamically power down
+ * the dGPUs when they are idle if supported. The default is -1 (auto enable).
+ * Setting the value to 0 disables this functionality.
*/
-MODULE_PARM_DESC(runpm, "PX runtime pm (2 = force enable with BAMACO, 1 = force enable with BACO, 0 = disable, -1 = PX only default)");
+MODULE_PARM_DESC(runpm, "PX runtime pm (2 = force enable with BAMACO, 1 = force enable with BACO, 0 = disable, -1 = auto)");
module_param_named(runpm, amdgpu_runtime_pm, int, 0444);
/**
@@ -2001,6 +2005,19 @@ retry_init:
goto err_pci;
}
+ /*
+ * 1. don't init fbdev on hw without DCE
+ * 2. don't init fbdev if there are no connectors
+ */
+ if (adev->mode_info.mode_config_initialized &&
+ !list_empty(&adev_to_drm(adev)->mode_config.connector_list)) {
+ /* select 8 bpp console on low vram cards */
+ if (adev->gmc.real_vram_size <= (32*1024*1024))
+ drm_fbdev_generic_setup(adev_to_drm(adev), 8);
+ else
+ drm_fbdev_generic_setup(adev_to_drm(adev), 32);
+ }
+
ret = amdgpu_debugfs_init(adev);
if (ret)
DRM_ERROR("Creating debugfs files failed (%d).\n", ret);
@@ -2152,7 +2169,10 @@ static int amdgpu_pmops_suspend(struct device *dev)
adev->in_s3 = true;
r = amdgpu_device_suspend(drm_dev, true);
adev->in_s3 = false;
-
+ if (r)
+ return r;
+ if (!adev->in_s0ix)
+ r = amdgpu_asic_reset(adev);
return r;
}
@@ -2233,12 +2253,27 @@ static int amdgpu_pmops_runtime_suspend(struct device *dev)
if (amdgpu_device_supports_px(drm_dev))
drm_dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
+ /*
+ * By setting mp1_state as PP_MP1_STATE_UNLOAD, MP1 will do some
+ * proper cleanups and put itself into a state ready for PNP. That
+ * can address some random resuming failure observed on BOCO capable
+ * platforms.
+ * TODO: this may be also needed for PX capable platform.
+ */
+ if (amdgpu_device_supports_boco(drm_dev))
+ adev->mp1_state = PP_MP1_STATE_UNLOAD;
+
ret = amdgpu_device_suspend(drm_dev, false);
if (ret) {
adev->in_runpm = false;
+ if (amdgpu_device_supports_boco(drm_dev))
+ adev->mp1_state = PP_MP1_STATE_NONE;
return ret;
}
+ if (amdgpu_device_supports_boco(drm_dev))
+ adev->mp1_state = PP_MP1_STATE_NONE;
+
if (amdgpu_device_supports_px(drm_dev)) {
/* Only need to handle PCI state in the driver for ATPX
* PCI core handles it for _PR3.
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.h
index e3a4f7048042..8178323e4bef 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.h
@@ -45,4 +45,7 @@
long amdgpu_drm_ioctl(struct file *filp,
unsigned int cmd, unsigned long arg);
+long amdgpu_kms_compat_ioctl(struct file *filp,
+ unsigned int cmd, unsigned long arg);
+
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_encoders.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_encoders.c
index af4ef84e27a7..c96e458ed088 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_encoders.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_encoders.c
@@ -222,7 +222,7 @@ bool amdgpu_dig_monitor_is_duallink(struct drm_encoder *encoder,
case DRM_MODE_CONNECTOR_HDMIB:
if (amdgpu_connector->use_digital) {
/* HDMI 1.3 supports up to 340 Mhz over single link */
- if (drm_detect_hdmi_monitor(amdgpu_connector_edid(connector))) {
+ if (connector->display_info.is_hdmi) {
if (pixel_clock > 340000)
return true;
else
@@ -244,7 +244,7 @@ bool amdgpu_dig_monitor_is_duallink(struct drm_encoder *encoder,
return false;
else {
/* HDMI 1.3 supports up to 340 Mhz over single link */
- if (drm_detect_hdmi_monitor(amdgpu_connector_edid(connector))) {
+ if (connector->display_info.is_hdmi) {
if (pixel_clock > 340000)
return true;
else
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c
deleted file mode 100644
index cd0acbea75da..000000000000
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c
+++ /dev/null
@@ -1,388 +0,0 @@
-/*
- * Copyright © 2007 David Airlie
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- *
- * Authors:
- * David Airlie
- */
-
-#include <linux/module.h>
-#include <linux/pm_runtime.h>
-#include <linux/slab.h>
-#include <linux/vga_switcheroo.h>
-
-#include <drm/amdgpu_drm.h>
-#include <drm/drm_crtc.h>
-#include <drm/drm_crtc_helper.h>
-#include <drm/drm_fb_helper.h>
-#include <drm/drm_fourcc.h>
-
-#include "amdgpu.h"
-#include "cikd.h"
-#include "amdgpu_gem.h"
-
-#include "amdgpu_display.h"
-
-/* object hierarchy -
- this contains a helper + a amdgpu fb
- the helper contains a pointer to amdgpu framebuffer baseclass.
-*/
-
-static int
-amdgpufb_open(struct fb_info *info, int user)
-{
- struct drm_fb_helper *fb_helper = info->par;
- int ret = pm_runtime_get_sync(fb_helper->dev->dev);
- if (ret < 0 && ret != -EACCES) {
- pm_runtime_mark_last_busy(fb_helper->dev->dev);
- pm_runtime_put_autosuspend(fb_helper->dev->dev);
- return ret;
- }
- return 0;
-}
-
-static int
-amdgpufb_release(struct fb_info *info, int user)
-{
- struct drm_fb_helper *fb_helper = info->par;
-
- pm_runtime_mark_last_busy(fb_helper->dev->dev);
- pm_runtime_put_autosuspend(fb_helper->dev->dev);
- return 0;
-}
-
-static const struct fb_ops amdgpufb_ops = {
- .owner = THIS_MODULE,
- DRM_FB_HELPER_DEFAULT_OPS,
- .fb_open = amdgpufb_open,
- .fb_release = amdgpufb_release,
- .fb_fillrect = drm_fb_helper_cfb_fillrect,
- .fb_copyarea = drm_fb_helper_cfb_copyarea,
- .fb_imageblit = drm_fb_helper_cfb_imageblit,
-};
-
-
-int amdgpu_align_pitch(struct amdgpu_device *adev, int width, int cpp, bool tiled)
-{
- int aligned = width;
- int pitch_mask = 0;
-
- switch (cpp) {
- case 1:
- pitch_mask = 255;
- break;
- case 2:
- pitch_mask = 127;
- break;
- case 3:
- case 4:
- pitch_mask = 63;
- break;
- }
-
- aligned += pitch_mask;
- aligned &= ~pitch_mask;
- return aligned * cpp;
-}
-
-static void amdgpufb_destroy_pinned_object(struct drm_gem_object *gobj)
-{
- struct amdgpu_bo *abo = gem_to_amdgpu_bo(gobj);
- int ret;
-
- ret = amdgpu_bo_reserve(abo, true);
- if (likely(ret == 0)) {
- amdgpu_bo_kunmap(abo);
- amdgpu_bo_unpin(abo);
- amdgpu_bo_unreserve(abo);
- }
- drm_gem_object_put(gobj);
-}
-
-static int amdgpufb_create_pinned_object(struct amdgpu_fbdev *rfbdev,
- struct drm_mode_fb_cmd2 *mode_cmd,
- struct drm_gem_object **gobj_p)
-{
- const struct drm_format_info *info;
- struct amdgpu_device *adev = rfbdev->adev;
- struct drm_gem_object *gobj = NULL;
- struct amdgpu_bo *abo = NULL;
- bool fb_tiled = false; /* useful for testing */
- u32 tiling_flags = 0, domain;
- int ret;
- int aligned_size, size;
- int height = mode_cmd->height;
- u32 cpp;
- u64 flags = AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
- AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS |
- AMDGPU_GEM_CREATE_VRAM_CLEARED;
-
- info = drm_get_format_info(adev_to_drm(adev), mode_cmd);
- cpp = info->cpp[0];
-
- /* need to align pitch with crtc limits */
- mode_cmd->pitches[0] = amdgpu_align_pitch(adev, mode_cmd->width, cpp,
- fb_tiled);
- domain = amdgpu_display_supported_domains(adev, flags);
- height = ALIGN(mode_cmd->height, 8);
- size = mode_cmd->pitches[0] * height;
- aligned_size = ALIGN(size, PAGE_SIZE);
- ret = amdgpu_gem_object_create(adev, aligned_size, 0, domain, flags,
- ttm_bo_type_device, NULL, &gobj);
- if (ret) {
- pr_err("failed to allocate framebuffer (%d)\n", aligned_size);
- return -ENOMEM;
- }
- abo = gem_to_amdgpu_bo(gobj);
-
- if (fb_tiled)
- tiling_flags = AMDGPU_TILING_SET(ARRAY_MODE, GRPH_ARRAY_2D_TILED_THIN1);
-
- ret = amdgpu_bo_reserve(abo, false);
- if (unlikely(ret != 0))
- goto out_unref;
-
- if (tiling_flags) {
- ret = amdgpu_bo_set_tiling_flags(abo,
- tiling_flags);
- if (ret)
- dev_err(adev->dev, "FB failed to set tiling flags\n");
- }
-
- ret = amdgpu_bo_pin(abo, domain);
- if (ret) {
- amdgpu_bo_unreserve(abo);
- goto out_unref;
- }
-
- ret = amdgpu_ttm_alloc_gart(&abo->tbo);
- if (ret) {
- amdgpu_bo_unreserve(abo);
- dev_err(adev->dev, "%p bind failed\n", abo);
- goto out_unref;
- }
-
- ret = amdgpu_bo_kmap(abo, NULL);
- amdgpu_bo_unreserve(abo);
- if (ret) {
- goto out_unref;
- }
-
- *gobj_p = gobj;
- return 0;
-out_unref:
- amdgpufb_destroy_pinned_object(gobj);
- *gobj_p = NULL;
- return ret;
-}
-
-static int amdgpufb_create(struct drm_fb_helper *helper,
- struct drm_fb_helper_surface_size *sizes)
-{
- struct amdgpu_fbdev *rfbdev = (struct amdgpu_fbdev *)helper;
- struct amdgpu_device *adev = rfbdev->adev;
- struct fb_info *info;
- struct drm_framebuffer *fb = NULL;
- struct drm_mode_fb_cmd2 mode_cmd;
- struct drm_gem_object *gobj = NULL;
- struct amdgpu_bo *abo = NULL;
- int ret;
-
- memset(&mode_cmd, 0, sizeof(mode_cmd));
- mode_cmd.width = sizes->surface_width;
- mode_cmd.height = sizes->surface_height;
-
- if (sizes->surface_bpp == 24)
- sizes->surface_bpp = 32;
-
- mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp,
- sizes->surface_depth);
-
- ret = amdgpufb_create_pinned_object(rfbdev, &mode_cmd, &gobj);
- if (ret) {
- DRM_ERROR("failed to create fbcon object %d\n", ret);
- return ret;
- }
-
- abo = gem_to_amdgpu_bo(gobj);
-
- /* okay we have an object now allocate the framebuffer */
- info = drm_fb_helper_alloc_fbi(helper);
- if (IS_ERR(info)) {
- ret = PTR_ERR(info);
- goto out;
- }
-
- ret = amdgpu_display_gem_fb_init(adev_to_drm(adev), &rfbdev->rfb,
- &mode_cmd, gobj);
- if (ret) {
- DRM_ERROR("failed to initialize framebuffer %d\n", ret);
- goto out;
- }
-
- fb = &rfbdev->rfb.base;
-
- /* setup helper */
- rfbdev->helper.fb = fb;
-
- info->fbops = &amdgpufb_ops;
-
- info->fix.smem_start = amdgpu_gmc_vram_cpu_pa(adev, abo);
- info->fix.smem_len = amdgpu_bo_size(abo);
- info->screen_base = amdgpu_bo_kptr(abo);
- info->screen_size = amdgpu_bo_size(abo);
-
- drm_fb_helper_fill_info(info, &rfbdev->helper, sizes);
-
- /* setup aperture base/size for vesafb takeover */
- info->apertures->ranges[0].base = adev_to_drm(adev)->mode_config.fb_base;
- info->apertures->ranges[0].size = adev->gmc.aper_size;
-
- /* Use default scratch pixmap (info->pixmap.flags = FB_PIXMAP_SYSTEM) */
-
- if (info->screen_base == NULL) {
- ret = -ENOSPC;
- goto out;
- }
-
- DRM_INFO("fb mappable at 0x%lX\n", info->fix.smem_start);
- DRM_INFO("vram apper at 0x%lX\n", (unsigned long)adev->gmc.aper_base);
- DRM_INFO("size %lu\n", (unsigned long)amdgpu_bo_size(abo));
- DRM_INFO("fb depth is %d\n", fb->format->depth);
- DRM_INFO(" pitch is %d\n", fb->pitches[0]);
-
- vga_switcheroo_client_fb_set(adev->pdev, info);
- return 0;
-
-out:
- if (fb && ret) {
- drm_gem_object_put(gobj);
- drm_framebuffer_unregister_private(fb);
- drm_framebuffer_cleanup(fb);
- kfree(fb);
- }
- return ret;
-}
-
-static int amdgpu_fbdev_destroy(struct drm_device *dev, struct amdgpu_fbdev *rfbdev)
-{
- struct amdgpu_framebuffer *rfb = &rfbdev->rfb;
- int i;
-
- drm_fb_helper_unregister_fbi(&rfbdev->helper);
-
- if (rfb->base.obj[0]) {
- for (i = 0; i < rfb->base.format->num_planes; i++)
- drm_gem_object_put(rfb->base.obj[0]);
- amdgpufb_destroy_pinned_object(rfb->base.obj[0]);
- rfb->base.obj[0] = NULL;
- drm_framebuffer_unregister_private(&rfb->base);
- drm_framebuffer_cleanup(&rfb->base);
- }
- drm_fb_helper_fini(&rfbdev->helper);
-
- return 0;
-}
-
-static const struct drm_fb_helper_funcs amdgpu_fb_helper_funcs = {
- .fb_probe = amdgpufb_create,
-};
-
-int amdgpu_fbdev_init(struct amdgpu_device *adev)
-{
- struct amdgpu_fbdev *rfbdev;
- int bpp_sel = 32;
- int ret;
-
- /* don't init fbdev on hw without DCE */
- if (!adev->mode_info.mode_config_initialized)
- return 0;
-
- /* don't init fbdev if there are no connectors */
- if (list_empty(&adev_to_drm(adev)->mode_config.connector_list))
- return 0;
-
- /* select 8 bpp console on low vram cards */
- if (adev->gmc.real_vram_size <= (32*1024*1024))
- bpp_sel = 8;
-
- rfbdev = kzalloc(sizeof(struct amdgpu_fbdev), GFP_KERNEL);
- if (!rfbdev)
- return -ENOMEM;
-
- rfbdev->adev = adev;
- adev->mode_info.rfbdev = rfbdev;
-
- drm_fb_helper_prepare(adev_to_drm(adev), &rfbdev->helper,
- &amdgpu_fb_helper_funcs);
-
- ret = drm_fb_helper_init(adev_to_drm(adev), &rfbdev->helper);
- if (ret) {
- kfree(rfbdev);
- return ret;
- }
-
- /* disable all the possible outputs/crtcs before entering KMS mode */
- if (!amdgpu_device_has_dc_support(adev) && !amdgpu_virtual_display)
- drm_helper_disable_unused_functions(adev_to_drm(adev));
-
- drm_fb_helper_initial_config(&rfbdev->helper, bpp_sel);
- return 0;
-}
-
-void amdgpu_fbdev_fini(struct amdgpu_device *adev)
-{
- if (!adev->mode_info.rfbdev)
- return;
-
- amdgpu_fbdev_destroy(adev_to_drm(adev), adev->mode_info.rfbdev);
- kfree(adev->mode_info.rfbdev);
- adev->mode_info.rfbdev = NULL;
-}
-
-void amdgpu_fbdev_set_suspend(struct amdgpu_device *adev, int state)
-{
- if (adev->mode_info.rfbdev)
- drm_fb_helper_set_suspend_unlocked(&adev->mode_info.rfbdev->helper,
- state);
-}
-
-int amdgpu_fbdev_total_size(struct amdgpu_device *adev)
-{
- struct amdgpu_bo *robj;
- int size = 0;
-
- if (!adev->mode_info.rfbdev)
- return 0;
-
- robj = gem_to_amdgpu_bo(adev->mode_info.rfbdev->rfb.base.obj[0]);
- size += amdgpu_bo_size(robj);
- return size;
-}
-
-bool amdgpu_fbdev_robj_is_fb(struct amdgpu_device *adev, struct amdgpu_bo *robj)
-{
- if (!adev->mode_info.rfbdev)
- return false;
- if (robj == gem_to_amdgpu_bo(adev->mode_info.rfbdev->rfb.base.obj[0]))
- return true;
- return false;
-}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
index 3b7e86ea7167..9afd11ca2709 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
@@ -77,11 +77,13 @@ void amdgpu_fence_slab_fini(void)
* Cast helper
*/
static const struct dma_fence_ops amdgpu_fence_ops;
+static const struct dma_fence_ops amdgpu_job_fence_ops;
static inline struct amdgpu_fence *to_amdgpu_fence(struct dma_fence *f)
{
struct amdgpu_fence *__f = container_of(f, struct amdgpu_fence, base);
- if (__f->base.ops == &amdgpu_fence_ops)
+ if (__f->base.ops == &amdgpu_fence_ops ||
+ __f->base.ops == &amdgpu_job_fence_ops)
return __f;
return NULL;
@@ -158,19 +160,18 @@ int amdgpu_fence_emit(struct amdgpu_ring *ring, struct dma_fence **f, struct amd
}
seq = ++ring->fence_drv.sync_seq;
- if (job != NULL && job->job_run_counter) {
+ if (job && job->job_run_counter) {
/* reinit seq for resubmitted jobs */
fence->seqno = seq;
} else {
- dma_fence_init(fence, &amdgpu_fence_ops,
- &ring->fence_drv.lock,
- adev->fence_context + ring->idx,
- seq);
- }
-
- if (job != NULL) {
- /* mark this fence has a parent job */
- set_bit(AMDGPU_FENCE_FLAG_EMBED_IN_JOB_BIT, &fence->flags);
+ if (job)
+ dma_fence_init(fence, &amdgpu_job_fence_ops,
+ &ring->fence_drv.lock,
+ adev->fence_context + ring->idx, seq);
+ else
+ dma_fence_init(fence, &amdgpu_fence_ops,
+ &ring->fence_drv.lock,
+ adev->fence_context + ring->idx, seq);
}
amdgpu_ring_emit_fence(ring, ring->fence_drv.gpu_addr,
@@ -621,6 +622,25 @@ void amdgpu_fence_driver_hw_init(struct amdgpu_device *adev)
}
/**
+ * amdgpu_fence_driver_clear_job_fences - clear job embedded fences of ring
+ *
+ * @ring: fence of the ring to be cleared
+ *
+ */
+void amdgpu_fence_driver_clear_job_fences(struct amdgpu_ring *ring)
+{
+ int i;
+ struct dma_fence *old, **ptr;
+
+ for (i = 0; i <= ring->fence_drv.num_fences_mask; i++) {
+ ptr = &ring->fence_drv.fences[i];
+ old = rcu_dereference_protected(*ptr, 1);
+ if (old && old->ops == &amdgpu_job_fence_ops)
+ RCU_INIT_POINTER(*ptr, NULL);
+ }
+}
+
+/**
* amdgpu_fence_driver_force_completion - force signal latest fence of ring
*
* @ring: fence of the ring to signal
@@ -643,16 +663,14 @@ static const char *amdgpu_fence_get_driver_name(struct dma_fence *fence)
static const char *amdgpu_fence_get_timeline_name(struct dma_fence *f)
{
- struct amdgpu_ring *ring;
+ return (const char *)to_amdgpu_fence(f)->ring->name;
+}
- if (test_bit(AMDGPU_FENCE_FLAG_EMBED_IN_JOB_BIT, &f->flags)) {
- struct amdgpu_job *job = container_of(f, struct amdgpu_job, hw_fence);
+static const char *amdgpu_job_fence_get_timeline_name(struct dma_fence *f)
+{
+ struct amdgpu_job *job = container_of(f, struct amdgpu_job, hw_fence);
- ring = to_amdgpu_ring(job->base.sched);
- } else {
- ring = to_amdgpu_fence(f)->ring;
- }
- return (const char *)ring->name;
+ return (const char *)to_amdgpu_ring(job->base.sched)->name;
}
/**
@@ -665,18 +683,25 @@ static const char *amdgpu_fence_get_timeline_name(struct dma_fence *f)
*/
static bool amdgpu_fence_enable_signaling(struct dma_fence *f)
{
- struct amdgpu_ring *ring;
+ if (!timer_pending(&to_amdgpu_fence(f)->ring->fence_drv.fallback_timer))
+ amdgpu_fence_schedule_fallback(to_amdgpu_fence(f)->ring);
- if (test_bit(AMDGPU_FENCE_FLAG_EMBED_IN_JOB_BIT, &f->flags)) {
- struct amdgpu_job *job = container_of(f, struct amdgpu_job, hw_fence);
+ return true;
+}
- ring = to_amdgpu_ring(job->base.sched);
- } else {
- ring = to_amdgpu_fence(f)->ring;
- }
+/**
+ * amdgpu_job_fence_enable_signaling - enable signalling on job fence
+ * @f: fence
+ *
+ * This is the simliar function with amdgpu_fence_enable_signaling above, it
+ * only handles the job embedded fence.
+ */
+static bool amdgpu_job_fence_enable_signaling(struct dma_fence *f)
+{
+ struct amdgpu_job *job = container_of(f, struct amdgpu_job, hw_fence);
- if (!timer_pending(&ring->fence_drv.fallback_timer))
- amdgpu_fence_schedule_fallback(ring);
+ if (!timer_pending(&to_amdgpu_ring(job->base.sched)->fence_drv.fallback_timer))
+ amdgpu_fence_schedule_fallback(to_amdgpu_ring(job->base.sched));
return true;
}
@@ -692,19 +717,23 @@ static void amdgpu_fence_free(struct rcu_head *rcu)
{
struct dma_fence *f = container_of(rcu, struct dma_fence, rcu);
- if (test_bit(AMDGPU_FENCE_FLAG_EMBED_IN_JOB_BIT, &f->flags)) {
- /* free job if fence has a parent job */
- struct amdgpu_job *job;
-
- job = container_of(f, struct amdgpu_job, hw_fence);
- kfree(job);
- } else {
/* free fence_slab if it's separated fence*/
- struct amdgpu_fence *fence;
+ kmem_cache_free(amdgpu_fence_slab, to_amdgpu_fence(f));
+}
- fence = to_amdgpu_fence(f);
- kmem_cache_free(amdgpu_fence_slab, fence);
- }
+/**
+ * amdgpu_job_fence_free - free up the job with embedded fence
+ *
+ * @rcu: RCU callback head
+ *
+ * Free up the job with embedded fence after the RCU grace period.
+ */
+static void amdgpu_job_fence_free(struct rcu_head *rcu)
+{
+ struct dma_fence *f = container_of(rcu, struct dma_fence, rcu);
+
+ /* free job if fence has a parent job */
+ kfree(container_of(f, struct amdgpu_job, hw_fence));
}
/**
@@ -720,6 +749,19 @@ static void amdgpu_fence_release(struct dma_fence *f)
call_rcu(&f->rcu, amdgpu_fence_free);
}
+/**
+ * amdgpu_job_fence_release - callback that job embedded fence can be freed
+ *
+ * @f: fence
+ *
+ * This is the simliar function with amdgpu_fence_release above, it
+ * only handles the job embedded fence.
+ */
+static void amdgpu_job_fence_release(struct dma_fence *f)
+{
+ call_rcu(&f->rcu, amdgpu_job_fence_free);
+}
+
static const struct dma_fence_ops amdgpu_fence_ops = {
.get_driver_name = amdgpu_fence_get_driver_name,
.get_timeline_name = amdgpu_fence_get_timeline_name,
@@ -727,6 +769,12 @@ static const struct dma_fence_ops amdgpu_fence_ops = {
.release = amdgpu_fence_release,
};
+static const struct dma_fence_ops amdgpu_job_fence_ops = {
+ .get_driver_name = amdgpu_fence_get_driver_name,
+ .get_timeline_name = amdgpu_job_fence_get_timeline_name,
+ .enable_signaling = amdgpu_job_fence_enable_signaling,
+ .release = amdgpu_job_fence_release,
+};
/*
* Fence debugfs
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fru_eeprom.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fru_eeprom.c
index 7709caeb233d..2a786e788627 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fru_eeprom.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fru_eeprom.c
@@ -56,6 +56,9 @@ static bool is_fru_eeprom_supported(struct amdgpu_device *adev)
return true;
else
return false;
+ case CHIP_ALDEBARAN:
+ /* All Aldebaran SKUs have the FRU */
+ return true;
default:
return false;
}
@@ -88,13 +91,17 @@ static int amdgpu_fru_read_eeprom(struct amdgpu_device *adev, uint32_t addrptr,
int amdgpu_fru_get_product_info(struct amdgpu_device *adev)
{
- unsigned char buff[34];
+ unsigned char buff[AMDGPU_PRODUCT_NAME_LEN+2];
u32 addrptr;
int size, len;
+ int offset = 2;
if (!is_fru_eeprom_supported(adev))
return 0;
+ if (adev->asic_type == CHIP_ALDEBARAN)
+ offset = 0;
+
/* If algo exists, it means that the i2c_adapter's initialized */
if (!adev->pm.smu_i2c.algo) {
DRM_WARN("Cannot access FRU, EEPROM accessor not initialized");
@@ -131,15 +138,13 @@ int amdgpu_fru_get_product_info(struct amdgpu_device *adev)
}
len = size;
- /* Product name should only be 32 characters. Any more,
- * and something could be wrong. Cap it at 32 to be safe
- */
- if (len >= sizeof(adev->product_name)) {
- DRM_WARN("FRU Product Number is larger than 32 characters. This is likely a mistake");
- len = sizeof(adev->product_name) - 1;
+ if (len >= AMDGPU_PRODUCT_NAME_LEN) {
+ DRM_WARN("FRU Product Name is larger than %d characters. This is likely a mistake",
+ AMDGPU_PRODUCT_NAME_LEN);
+ len = AMDGPU_PRODUCT_NAME_LEN - 1;
}
/* Start at 2 due to buff using fields 0 and 1 for the address */
- memcpy(adev->product_name, &buff[2], len);
+ memcpy(adev->product_name, &buff[offset], len);
adev->product_name[len] = '\0';
addrptr += size + 1;
@@ -157,7 +162,7 @@ int amdgpu_fru_get_product_info(struct amdgpu_device *adev)
DRM_WARN("FRU Product Number is larger than 16 characters. This is likely a mistake");
len = sizeof(adev->product_number) - 1;
}
- memcpy(adev->product_number, &buff[2], len);
+ memcpy(adev->product_number, &buff[offset], len);
adev->product_number[len] = '\0';
addrptr += size + 1;
@@ -184,7 +189,7 @@ int amdgpu_fru_get_product_info(struct amdgpu_device *adev)
DRM_WARN("FRU Serial Number is larger than 16 characters. This is likely a mistake");
len = sizeof(adev->serial) - 1;
}
- memcpy(adev->serial, &buff[2], len);
+ memcpy(adev->serial, &buff[offset], len);
adev->serial[len] = '\0';
return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
index a1e63ba4c54a..9a6507af1670 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
@@ -264,6 +264,9 @@ static int amdgpu_gem_object_mmap(struct drm_gem_object *obj, struct vm_area_str
!(vma->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)))
vma->vm_flags &= ~VM_MAYWRITE;
+ if (bo->kfd_bo)
+ vma->vm_flags |= VM_DONTCOPY;
+
return drm_gem_ttm_mmap(obj, vma);
}
@@ -877,6 +880,32 @@ out:
return r;
}
+static int amdgpu_gem_align_pitch(struct amdgpu_device *adev,
+ int width,
+ int cpp,
+ bool tiled)
+{
+ int aligned = width;
+ int pitch_mask = 0;
+
+ switch (cpp) {
+ case 1:
+ pitch_mask = 255;
+ break;
+ case 2:
+ pitch_mask = 127;
+ break;
+ case 3:
+ case 4:
+ pitch_mask = 63;
+ break;
+ }
+
+ aligned += pitch_mask;
+ aligned &= ~pitch_mask;
+ return aligned * cpp;
+}
+
int amdgpu_mode_dumb_create(struct drm_file *file_priv,
struct drm_device *dev,
struct drm_mode_create_dumb *args)
@@ -885,7 +914,8 @@ int amdgpu_mode_dumb_create(struct drm_file *file_priv,
struct drm_gem_object *gobj;
uint32_t handle;
u64 flags = AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
- AMDGPU_GEM_CREATE_CPU_GTT_USWC;
+ AMDGPU_GEM_CREATE_CPU_GTT_USWC |
+ AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
u32 domain;
int r;
@@ -897,8 +927,8 @@ int amdgpu_mode_dumb_create(struct drm_file *file_priv,
if (adev->mman.buffer_funcs_enabled)
flags |= AMDGPU_GEM_CREATE_VRAM_CLEARED;
- args->pitch = amdgpu_align_pitch(adev, args->width,
- DIV_ROUND_UP(args->bpp, 8), 0);
+ args->pitch = amdgpu_gem_align_pitch(adev, args->width,
+ DIV_ROUND_UP(args->bpp, 8), 0);
args->size = (u64)args->pitch * args->height;
args->size = ALIGN(args->size, PAGE_SIZE);
domain = amdgpu_bo_get_preferred_domain(adev,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
index 08478fce00f2..2430d6223c2d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
@@ -350,6 +350,7 @@ static inline uint64_t amdgpu_gmc_fault_key(uint64_t addr, uint16_t pasid)
* amdgpu_gmc_filter_faults - filter VM faults
*
* @adev: amdgpu device structure
+ * @ih: interrupt ring that the fault received from
* @addr: address of the VM fault
* @pasid: PASID of the process causing the fault
* @timestamp: timestamp of the fault
@@ -358,7 +359,8 @@ static inline uint64_t amdgpu_gmc_fault_key(uint64_t addr, uint16_t pasid)
* True if the fault was filtered and should not be processed further.
* False if the fault is a new one and needs to be handled.
*/
-bool amdgpu_gmc_filter_faults(struct amdgpu_device *adev, uint64_t addr,
+bool amdgpu_gmc_filter_faults(struct amdgpu_device *adev,
+ struct amdgpu_ih_ring *ih, uint64_t addr,
uint16_t pasid, uint64_t timestamp)
{
struct amdgpu_gmc *gmc = &adev->gmc;
@@ -366,6 +368,10 @@ bool amdgpu_gmc_filter_faults(struct amdgpu_device *adev, uint64_t addr,
struct amdgpu_gmc_fault *fault;
uint32_t hash;
+ /* Stale retry fault if timestamp goes backward */
+ if (amdgpu_ih_ts_after(timestamp, ih->processed_timestamp))
+ return true;
+
/* If we don't have space left in the ring buffer return immediately */
stamp = max(timestamp, AMDGPU_GMC_FAULT_TIMEOUT + 1) -
AMDGPU_GMC_FAULT_TIMEOUT;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h
index e55201134a01..8458cebc6d5b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h
@@ -316,7 +316,8 @@ void amdgpu_gmc_gart_location(struct amdgpu_device *adev,
struct amdgpu_gmc *mc);
void amdgpu_gmc_agp_location(struct amdgpu_device *adev,
struct amdgpu_gmc *mc);
-bool amdgpu_gmc_filter_faults(struct amdgpu_device *adev, uint64_t addr,
+bool amdgpu_gmc_filter_faults(struct amdgpu_device *adev,
+ struct amdgpu_ih_ring *ih, uint64_t addr,
uint16_t pasid, uint64_t timestamp);
void amdgpu_gmc_filter_faults_remove(struct amdgpu_device *adev, uint64_t addr,
uint16_t pasid);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c
index f3d62e196901..3df146579ad9 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c
@@ -164,52 +164,32 @@ void amdgpu_ih_ring_write(struct amdgpu_ih_ring *ih, const uint32_t *iv,
}
}
-/* Waiter helper that checks current rptr matches or passes checkpoint wptr */
-static bool amdgpu_ih_has_checkpoint_processed(struct amdgpu_device *adev,
- struct amdgpu_ih_ring *ih,
- uint32_t checkpoint_wptr,
- uint32_t *prev_rptr)
-{
- uint32_t cur_rptr = ih->rptr | (*prev_rptr & ~ih->ptr_mask);
-
- /* rptr has wrapped. */
- if (cur_rptr < *prev_rptr)
- cur_rptr += ih->ptr_mask + 1;
- *prev_rptr = cur_rptr;
-
- /* check ring is empty to workaround missing wptr overflow flag */
- return cur_rptr >= checkpoint_wptr ||
- (cur_rptr & ih->ptr_mask) == amdgpu_ih_get_wptr(adev, ih);
-}
-
/**
- * amdgpu_ih_wait_on_checkpoint_process - wait to process IVs up to checkpoint
+ * amdgpu_ih_wait_on_checkpoint_process_ts - wait to process IVs up to checkpoint
*
* @adev: amdgpu_device pointer
* @ih: ih ring to process
*
* Used to ensure ring has processed IVs up to the checkpoint write pointer.
*/
-int amdgpu_ih_wait_on_checkpoint_process(struct amdgpu_device *adev,
+int amdgpu_ih_wait_on_checkpoint_process_ts(struct amdgpu_device *adev,
struct amdgpu_ih_ring *ih)
{
- uint32_t checkpoint_wptr, rptr;
+ uint32_t checkpoint_wptr;
+ uint64_t checkpoint_ts;
+ long timeout = HZ;
if (!ih->enabled || adev->shutdown)
return -ENODEV;
checkpoint_wptr = amdgpu_ih_get_wptr(adev, ih);
- /* Order wptr with rptr. */
+ /* Order wptr with ring data. */
rmb();
- rptr = READ_ONCE(ih->rptr);
-
- /* wptr has wrapped. */
- if (rptr > checkpoint_wptr)
- checkpoint_wptr += ih->ptr_mask + 1;
+ checkpoint_ts = amdgpu_ih_decode_iv_ts(adev, ih, checkpoint_wptr, -1);
- return wait_event_interruptible(ih->wait_process,
- amdgpu_ih_has_checkpoint_processed(adev, ih,
- checkpoint_wptr, &rptr));
+ return wait_event_interruptible_timeout(ih->wait_process,
+ amdgpu_ih_ts_after(checkpoint_ts, ih->processed_timestamp) ||
+ ih->rptr == amdgpu_ih_get_wptr(adev, ih), timeout);
}
/**
@@ -223,7 +203,7 @@ int amdgpu_ih_wait_on_checkpoint_process(struct amdgpu_device *adev,
*/
int amdgpu_ih_process(struct amdgpu_device *adev, struct amdgpu_ih_ring *ih)
{
- unsigned int count = AMDGPU_IH_MAX_NUM_IVS;
+ unsigned int count;
u32 wptr;
if (!ih->enabled || adev->shutdown)
@@ -232,6 +212,7 @@ int amdgpu_ih_process(struct amdgpu_device *adev, struct amdgpu_ih_ring *ih)
wptr = amdgpu_ih_get_wptr(adev, ih);
restart_ih:
+ count = AMDGPU_IH_MAX_NUM_IVS;
DRM_DEBUG("%s: rptr %d, wptr %d\n", __func__, ih->rptr, wptr);
/* Order reading of wptr vs. reading of IH ring data */
@@ -298,3 +279,18 @@ void amdgpu_ih_decode_iv_helper(struct amdgpu_device *adev,
/* wptr/rptr are in bytes! */
ih->rptr += 32;
}
+
+uint64_t amdgpu_ih_decode_iv_ts_helper(struct amdgpu_ih_ring *ih, u32 rptr,
+ signed int offset)
+{
+ uint32_t iv_size = 32;
+ uint32_t ring_index;
+ uint32_t dw1, dw2;
+
+ rptr += iv_size * offset;
+ ring_index = (rptr & ih->ptr_mask) >> 2;
+
+ dw1 = le32_to_cpu(ih->ring[ring_index + 1]);
+ dw2 = le32_to_cpu(ih->ring[ring_index + 2]);
+ return dw1 | ((u64)(dw2 & 0xffff) << 32);
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h
index 0649b59830a5..dd1c2eded6b9 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h
@@ -68,20 +68,30 @@ struct amdgpu_ih_ring {
/* For waiting on IH processing at checkpoint. */
wait_queue_head_t wait_process;
+ uint64_t processed_timestamp;
};
+/* return true if time stamp t2 is after t1 with 48bit wrap around */
+#define amdgpu_ih_ts_after(t1, t2) \
+ (((int64_t)((t2) << 16) - (int64_t)((t1) << 16)) > 0LL)
+
/* provided by the ih block */
struct amdgpu_ih_funcs {
/* ring read/write ptr handling, called from interrupt context */
u32 (*get_wptr)(struct amdgpu_device *adev, struct amdgpu_ih_ring *ih);
void (*decode_iv)(struct amdgpu_device *adev, struct amdgpu_ih_ring *ih,
struct amdgpu_iv_entry *entry);
+ uint64_t (*decode_iv_ts)(struct amdgpu_ih_ring *ih, u32 rptr,
+ signed int offset);
void (*set_rptr)(struct amdgpu_device *adev, struct amdgpu_ih_ring *ih);
};
#define amdgpu_ih_get_wptr(adev, ih) (adev)->irq.ih_funcs->get_wptr((adev), (ih))
#define amdgpu_ih_decode_iv(adev, iv) \
(adev)->irq.ih_funcs->decode_iv((adev), (ih), (iv))
+#define amdgpu_ih_decode_iv_ts(adev, ih, rptr, offset) \
+ (WARN_ON_ONCE(!(adev)->irq.ih_funcs->decode_iv_ts) ? 0 : \
+ (adev)->irq.ih_funcs->decode_iv_ts((ih), (rptr), (offset)))
#define amdgpu_ih_set_rptr(adev, ih) (adev)->irq.ih_funcs->set_rptr((adev), (ih))
int amdgpu_ih_ring_init(struct amdgpu_device *adev, struct amdgpu_ih_ring *ih,
@@ -89,10 +99,12 @@ int amdgpu_ih_ring_init(struct amdgpu_device *adev, struct amdgpu_ih_ring *ih,
void amdgpu_ih_ring_fini(struct amdgpu_device *adev, struct amdgpu_ih_ring *ih);
void amdgpu_ih_ring_write(struct amdgpu_ih_ring *ih, const uint32_t *iv,
unsigned int num_dw);
-int amdgpu_ih_wait_on_checkpoint_process(struct amdgpu_device *adev,
- struct amdgpu_ih_ring *ih);
+int amdgpu_ih_wait_on_checkpoint_process_ts(struct amdgpu_device *adev,
+ struct amdgpu_ih_ring *ih);
int amdgpu_ih_process(struct amdgpu_device *adev, struct amdgpu_ih_ring *ih);
void amdgpu_ih_decode_iv_helper(struct amdgpu_device *adev,
struct amdgpu_ih_ring *ih,
struct amdgpu_iv_entry *entry);
+uint64_t amdgpu_ih_decode_iv_ts_helper(struct amdgpu_ih_ring *ih, u32 rptr,
+ signed int offset);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ioc32.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ioc32.c
index 5cf142e849bb..a1cbd7c3deb2 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ioc32.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ioc32.c
@@ -1,4 +1,4 @@
-/**
+/*
* \file amdgpu_ioc32.c
*
* 32-bit ioctl compatibility routines for the AMDGPU DRM.
@@ -37,12 +37,9 @@
long amdgpu_kms_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
unsigned int nr = DRM_IOCTL_NR(cmd);
- int ret;
if (nr < DRM_COMMAND_BASE)
return drm_compat_ioctl(filp, cmd, arg);
- ret = amdgpu_drm_ioctl(filp, cmd, arg);
-
- return ret;
+ return amdgpu_drm_ioctl(filp, cmd, arg);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
index cc2e0c9cfe0a..f5cbc2747ac6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
@@ -333,7 +333,6 @@ int amdgpu_irq_init(struct amdgpu_device *adev)
if (!amdgpu_device_has_dc_support(adev)) {
if (!adev->enable_virtual_display)
/* Disable vblank IRQs aggressively for power-saving */
- /* XXX: can this be enabled for DC? */
adev_to_drm(adev)->vblank_disable_immediate = true;
r = drm_vblank_init(adev_to_drm(adev), adev->mode_info.num_crtc);
@@ -391,7 +390,7 @@ void amdgpu_irq_fini_hw(struct amdgpu_device *adev)
}
/**
- * amdgpu_irq_fini - shut down interrupt handling
+ * amdgpu_irq_fini_sw - shut down interrupt handling
*
* @adev: amdgpu device pointer
*
@@ -529,6 +528,9 @@ void amdgpu_irq_dispatch(struct amdgpu_device *adev,
/* Send it to amdkfd as well if it isn't already handled */
if (!handled)
amdgpu_amdkfd_interrupt(adev, entry.iv_entry);
+
+ if (amdgpu_ih_ts_after(ih->processed_timestamp, entry.timestamp))
+ ih->processed_timestamp = entry.timestamp;
}
/**
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
index 89fb372ed49c..6043bf6fd414 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
@@ -232,8 +232,6 @@ struct amdgpu_i2c_chan {
struct mutex mutex;
};
-struct amdgpu_fbdev;
-
struct amdgpu_afmt {
bool enabled;
int offset;
@@ -309,13 +307,6 @@ struct amdgpu_framebuffer {
uint64_t address;
};
-struct amdgpu_fbdev {
- struct drm_fb_helper helper;
- struct amdgpu_framebuffer rfb;
- struct list_head fbdev_list;
- struct amdgpu_device *adev;
-};
-
struct amdgpu_mode_info {
struct atom_context *atom_context;
struct card_info *atom_card_info;
@@ -341,8 +332,6 @@ struct amdgpu_mode_info {
struct edid *bios_hardcoded_edid;
int bios_hardcoded_edid_size;
- /* pointer to fbdev info structure */
- struct amdgpu_fbdev *rfbdev;
/* firmware flags */
u32 firmware_flags;
/* pointer to backlight encoder */
@@ -631,15 +620,6 @@ bool amdgpu_crtc_get_scanout_position(struct drm_crtc *crtc,
int *hpos, ktime_t *stime, ktime_t *etime,
const struct drm_display_mode *mode);
-/* fbdev layer */
-int amdgpu_fbdev_init(struct amdgpu_device *adev);
-void amdgpu_fbdev_fini(struct amdgpu_device *adev);
-void amdgpu_fbdev_set_suspend(struct amdgpu_device *adev, int state);
-int amdgpu_fbdev_total_size(struct amdgpu_device *adev);
-bool amdgpu_fbdev_robj_is_fb(struct amdgpu_device *adev, struct amdgpu_bo *robj);
-
-int amdgpu_align_pitch(struct amdgpu_device *adev, int width, int bpp, bool tiled);
-
/* amdgpu_display.c */
void amdgpu_display_print_display_setup(struct drm_device *dev);
int amdgpu_display_modeset_create_props(struct amdgpu_device *adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
index 4fcfc2313b8c..3a7b56e57cec 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
@@ -1032,9 +1032,14 @@ int amdgpu_bo_init(struct amdgpu_device *adev)
/* On A+A platform, VRAM can be mapped as WB */
if (!adev->gmc.xgmi.connected_to_cpu) {
/* reserve PAT memory space to WC for VRAM */
- arch_io_reserve_memtype_wc(adev->gmc.aper_base,
+ int r = arch_io_reserve_memtype_wc(adev->gmc.aper_base,
adev->gmc.aper_size);
+ if (r) {
+ DRM_ERROR("Unable to set WC memtype for the aperture base\n");
+ return r;
+ }
+
/* Add an MTRR for the VRAM */
adev->gmc.vram_mtrr = arch_phys_wc_add(adev->gmc.aper_base,
adev->gmc.aper_size);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pll.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pll.c
index 4eaec446b49d..0bb2466d539a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pll.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pll.c
@@ -69,6 +69,7 @@ static void amdgpu_pll_reduce_ratio(unsigned *nom, unsigned *den,
/**
* amdgpu_pll_get_fb_ref_div - feedback and ref divider calculation
*
+ * @adev: amdgpu_device pointer
* @nom: nominator
* @den: denominator
* @post_div: post divider
@@ -106,6 +107,7 @@ static void amdgpu_pll_get_fb_ref_div(struct amdgpu_device *adev, unsigned int n
/**
* amdgpu_pll_compute - compute PLL paramaters
*
+ * @adev: amdgpu_device pointer
* @pll: information about the PLL
* @freq: requested frequency
* @dot_clock_p: resulting pixel clock
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pmu.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pmu.c
index 82e9ecf84352..71ee361d0972 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pmu.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pmu.c
@@ -233,6 +233,10 @@ static void amdgpu_perf_start(struct perf_event *event, int flags)
if (WARN_ON_ONCE(!(hwc->state & PERF_HES_STOPPED)))
return;
+ if ((!pe->adev->df.funcs) ||
+ (!pe->adev->df.funcs->pmc_start))
+ return;
+
WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE));
hwc->state = 0;
@@ -268,6 +272,10 @@ static void amdgpu_perf_read(struct perf_event *event)
pmu);
u64 count, prev;
+ if ((!pe->adev->df.funcs) ||
+ (!pe->adev->df.funcs->pmc_get_count))
+ return;
+
do {
prev = local64_read(&hwc->prev_count);
@@ -297,6 +305,10 @@ static void amdgpu_perf_stop(struct perf_event *event, int flags)
if (hwc->state & PERF_HES_UPTODATE)
return;
+ if ((!pe->adev->df.funcs) ||
+ (!pe->adev->df.funcs->pmc_stop))
+ return;
+
switch (hwc->config_base) {
case AMDGPU_PMU_EVENT_CONFIG_TYPE_DF:
case AMDGPU_PMU_EVENT_CONFIG_TYPE_XGMI:
@@ -326,6 +338,10 @@ static int amdgpu_perf_add(struct perf_event *event, int flags)
struct amdgpu_pmu_entry,
pmu);
+ if ((!pe->adev->df.funcs) ||
+ (!pe->adev->df.funcs->pmc_start))
+ return -EINVAL;
+
switch (pe->pmu_perf_type) {
case AMDGPU_PMU_PERF_TYPE_DF:
hwc->config_base = AMDGPU_PMU_EVENT_CONFIG_TYPE_DF;
@@ -371,6 +387,9 @@ static void amdgpu_perf_del(struct perf_event *event, int flags)
struct amdgpu_pmu_entry *pe = container_of(event->pmu,
struct amdgpu_pmu_entry,
pmu);
+ if ((!pe->adev->df.funcs) ||
+ (!pe->adev->df.funcs->pmc_stop))
+ return;
amdgpu_perf_stop(event, PERF_EF_UPDATE);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_preempt_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_preempt_mgr.c
index d02c8637f909..786afe4f58f9 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_preempt_mgr.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_preempt_mgr.c
@@ -59,7 +59,7 @@ static DEVICE_ATTR_RO(mem_info_preempt_used);
* @man: TTM memory type manager
* @tbo: TTM BO we need this range for
* @place: placement flags and restrictions
- * @mem: the resulting mem object
+ * @res: TTM memory object
*
* Dummy, just count the space used without allocating resources or any limit.
*/
@@ -85,7 +85,7 @@ static int amdgpu_preempt_mgr_new(struct ttm_resource_manager *man,
* amdgpu_preempt_mgr_del - free ranges
*
* @man: TTM memory type manager
- * @mem: TTM memory object
+ * @res: TTM memory object
*
* Free the allocated GTT again.
*/
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
index c641f84649d6..dee17a0e1187 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
@@ -518,7 +518,7 @@ static struct psp_gfx_cmd_resp *acquire_psp_cmd_buf(struct psp_context *psp)
return cmd;
}
-void release_psp_cmd_buf(struct psp_context *psp)
+static void release_psp_cmd_buf(struct psp_context *psp)
{
mutex_unlock(&psp->mutex);
}
@@ -2017,12 +2017,16 @@ static int psp_hw_start(struct psp_context *psp)
return ret;
}
+ if (amdgpu_sriov_vf(adev) && amdgpu_in_reset(adev))
+ goto skip_pin_bo;
+
ret = psp_tmr_init(psp);
if (ret) {
DRM_ERROR("PSP tmr init failed!\n");
return ret;
}
+skip_pin_bo:
/*
* For ASICs with DF Cstate management centralized
* to PMFW, TMR setup should be performed after PMFW
@@ -2452,6 +2456,18 @@ skip_memalloc:
return ret;
}
+ if (amdgpu_sriov_vf(adev) && amdgpu_in_reset(adev)) {
+ if (adev->gmc.xgmi.num_physical_nodes > 1) {
+ ret = psp_xgmi_initialize(psp, false, true);
+ /* Warning the XGMI seesion initialize failure
+ * Instead of stop driver initialization
+ */
+ if (ret)
+ dev_err(psp->adev->dev,
+ "XGMI: Failed to initialize XGMI session\n");
+ }
+ }
+
if (psp->ta_fw) {
ret = psp_ras_initialize(psp);
if (ret)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
index 08133de21fdd..cd9e5914944b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
@@ -867,9 +867,9 @@ static int amdgpu_ras_enable_all_features(struct amdgpu_device *adev,
/* feature ctl end */
-void amdgpu_ras_mca_query_error_status(struct amdgpu_device *adev,
- struct ras_common_if *ras_block,
- struct ras_err_data *err_data)
+static void amdgpu_ras_mca_query_error_status(struct amdgpu_device *adev,
+ struct ras_common_if *ras_block,
+ struct ras_err_data *err_data)
{
switch (ras_block->sub_block_index) {
case AMDGPU_RAS_MCA_BLOCK__MP0:
@@ -892,6 +892,38 @@ void amdgpu_ras_mca_query_error_status(struct amdgpu_device *adev,
}
}
+static void amdgpu_ras_get_ecc_info(struct amdgpu_device *adev, struct ras_err_data *err_data)
+{
+ struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
+ int ret = 0;
+
+ /*
+ * choosing right query method according to
+ * whether smu support query error information
+ */
+ ret = smu_get_ecc_info(&adev->smu, (void *)&(ras->umc_ecc));
+ if (ret == -EOPNOTSUPP) {
+ if (adev->umc.ras_funcs &&
+ adev->umc.ras_funcs->query_ras_error_count)
+ adev->umc.ras_funcs->query_ras_error_count(adev, err_data);
+
+ /* umc query_ras_error_address is also responsible for clearing
+ * error status
+ */
+ if (adev->umc.ras_funcs &&
+ adev->umc.ras_funcs->query_ras_error_address)
+ adev->umc.ras_funcs->query_ras_error_address(adev, err_data);
+ } else if (!ret) {
+ if (adev->umc.ras_funcs &&
+ adev->umc.ras_funcs->ecc_info_query_ras_error_count)
+ adev->umc.ras_funcs->ecc_info_query_ras_error_count(adev, err_data);
+
+ if (adev->umc.ras_funcs &&
+ adev->umc.ras_funcs->ecc_info_query_ras_error_address)
+ adev->umc.ras_funcs->ecc_info_query_ras_error_address(adev, err_data);
+ }
+}
+
/* query/inject/cure begin */
int amdgpu_ras_query_error_status(struct amdgpu_device *adev,
struct ras_query_if *info)
@@ -905,15 +937,7 @@ int amdgpu_ras_query_error_status(struct amdgpu_device *adev,
switch (info->head.block) {
case AMDGPU_RAS_BLOCK__UMC:
- if (adev->umc.ras_funcs &&
- adev->umc.ras_funcs->query_ras_error_count)
- adev->umc.ras_funcs->query_ras_error_count(adev, &err_data);
- /* umc query_ras_error_address is also responsible for clearing
- * error status
- */
- if (adev->umc.ras_funcs &&
- adev->umc.ras_funcs->query_ras_error_address)
- adev->umc.ras_funcs->query_ras_error_address(adev, &err_data);
+ amdgpu_ras_get_ecc_info(adev, &err_data);
break;
case AMDGPU_RAS_BLOCK__SDMA:
if (adev->sdma.funcs->query_ras_error_count) {
@@ -1137,9 +1161,9 @@ int amdgpu_ras_error_inject(struct amdgpu_device *adev,
/**
* amdgpu_ras_query_error_count -- Get error counts of all IPs
- * adev: pointer to AMD GPU device
- * ce_count: pointer to an integer to be set to the count of correctible errors.
- * ue_count: pointer to an integer to be set to the count of uncorrectible
+ * @adev: pointer to AMD GPU device
+ * @ce_count: pointer to an integer to be set to the count of correctible errors.
+ * @ue_count: pointer to an integer to be set to the count of uncorrectible
* errors.
*
* If set, @ce_count or @ue_count, count and return the corresponding
@@ -1723,6 +1747,16 @@ static void amdgpu_ras_log_on_err_counter(struct amdgpu_device *adev)
if (info.head.block == AMDGPU_RAS_BLOCK__PCIE_BIF)
continue;
+ /*
+ * this is a workaround for aldebaran, skip send msg to
+ * smu to get ecc_info table due to smu handle get ecc
+ * info table failed temporarily.
+ * should be removed until smu fix handle ecc_info table.
+ */
+ if ((info.head.block == AMDGPU_RAS_BLOCK__UMC) &&
+ (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 2)))
+ continue;
+
amdgpu_ras_query_error_status(adev, &info);
}
}
@@ -1935,9 +1969,11 @@ int amdgpu_ras_save_bad_pages(struct amdgpu_device *adev)
if (!con || !con->eh_data)
return 0;
+ mutex_lock(&con->recovery_lock);
control = &con->eeprom_control;
data = con->eh_data;
save_count = data->count - control->ras_num_recs;
+ mutex_unlock(&con->recovery_lock);
/* only new entries are saved */
if (save_count > 0) {
if (amdgpu_ras_eeprom_append(control,
@@ -2336,7 +2372,11 @@ int amdgpu_ras_init(struct amdgpu_device *adev)
}
/* Init poison supported flag, the default value is false */
- if (adev->df.funcs &&
+ if (adev->gmc.xgmi.connected_to_cpu) {
+ /* enabled by default when GPU is connected to CPU */
+ con->poison_supported = true;
+ }
+ else if (adev->df.funcs &&
adev->df.funcs->query_ras_poison_mode &&
adev->umc.ras_funcs &&
adev->umc.ras_funcs->query_ras_poison_mode) {
@@ -2477,7 +2517,6 @@ void amdgpu_ras_late_fini(struct amdgpu_device *adev,
amdgpu_ras_sysfs_remove(adev, ras_block);
if (ih_info->cb)
amdgpu_ras_interrupt_remove_handler(adev, ih_info);
- amdgpu_ras_feature_enable(adev, ras_block, 0);
}
/* do some init work after IP late init as dependence.
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h
index e36f4de9fa55..1c708122d492 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h
@@ -319,6 +319,19 @@ struct ras_common_if {
char name[32];
};
+#define MAX_UMC_CHANNEL_NUM 32
+
+struct ecc_info_per_ch {
+ uint16_t ce_count_lo_chip;
+ uint16_t ce_count_hi_chip;
+ uint64_t mca_umc_status;
+ uint64_t mca_umc_addr;
+};
+
+struct umc_ecc_info {
+ struct ecc_info_per_ch ecc[MAX_UMC_CHANNEL_NUM];
+};
+
struct amdgpu_ras {
/* ras infrastructure */
/* for ras itself. */
@@ -358,6 +371,9 @@ struct amdgpu_ras {
struct delayed_work ras_counte_delay_work;
atomic_t ras_ue_count;
atomic_t ras_ce_count;
+
+ /* record umc error info queried from smu */
+ struct umc_ecc_info umc_ecc;
};
struct ras_fs_data {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
index 4d380e79752c..fae7d185ad0d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
@@ -53,9 +53,6 @@ enum amdgpu_ring_priority_level {
#define AMDGPU_FENCE_FLAG_INT (1 << 1)
#define AMDGPU_FENCE_FLAG_TC_WB_ONLY (1 << 2)
-/* fence flag bit to indicate the face is embedded in job*/
-#define AMDGPU_FENCE_FLAG_EMBED_IN_JOB_BIT (DMA_FENCE_FLAG_USER_BITS + 1)
-
#define to_amdgpu_ring(s) container_of((s), struct amdgpu_ring, sched)
#define AMDGPU_IB_POOL_SIZE (1024 * 1024)
@@ -114,6 +111,7 @@ struct amdgpu_fence_driver {
struct dma_fence **fences;
};
+void amdgpu_fence_driver_clear_job_fences(struct amdgpu_ring *ring);
void amdgpu_fence_driver_force_completion(struct amdgpu_ring *ring);
int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index c15687ce67c4..fb0d8bffdce2 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -913,11 +913,6 @@ static int amdgpu_ttm_backend_bind(struct ttm_device *bdev,
ttm->num_pages, bo_mem, ttm);
}
- if (bo_mem->mem_type == AMDGPU_PL_GDS ||
- bo_mem->mem_type == AMDGPU_PL_GWS ||
- bo_mem->mem_type == AMDGPU_PL_OA)
- return -EINVAL;
-
if (bo_mem->mem_type != TTM_PL_TT ||
!amdgpu_gtt_mgr_has_gart_addr(bo_mem)) {
gtt->offset = AMDGPU_BO_INVALID_OFFSET;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c
index a90029ee9733..46264a4002f7 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c
@@ -23,6 +23,120 @@
#include "amdgpu_ras.h"
+static int amdgpu_umc_do_page_retirement(struct amdgpu_device *adev,
+ void *ras_error_status,
+ struct amdgpu_iv_entry *entry,
+ bool reset)
+{
+ struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status;
+ struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
+ int ret = 0;
+
+ kgd2kfd_set_sram_ecc_flag(adev->kfd.dev);
+ ret = smu_get_ecc_info(&adev->smu, (void *)&(con->umc_ecc));
+ if (ret == -EOPNOTSUPP) {
+ if (adev->umc.ras_funcs &&
+ adev->umc.ras_funcs->query_ras_error_count)
+ adev->umc.ras_funcs->query_ras_error_count(adev, ras_error_status);
+
+ if (adev->umc.ras_funcs &&
+ adev->umc.ras_funcs->query_ras_error_address &&
+ adev->umc.max_ras_err_cnt_per_query) {
+ err_data->err_addr =
+ kcalloc(adev->umc.max_ras_err_cnt_per_query,
+ sizeof(struct eeprom_table_record), GFP_KERNEL);
+
+ /* still call query_ras_error_address to clear error status
+ * even NOMEM error is encountered
+ */
+ if(!err_data->err_addr)
+ dev_warn(adev->dev, "Failed to alloc memory for "
+ "umc error address record!\n");
+
+ /* umc query_ras_error_address is also responsible for clearing
+ * error status
+ */
+ adev->umc.ras_funcs->query_ras_error_address(adev, ras_error_status);
+ }
+ } else if (!ret) {
+ if (adev->umc.ras_funcs &&
+ adev->umc.ras_funcs->ecc_info_query_ras_error_count)
+ adev->umc.ras_funcs->ecc_info_query_ras_error_count(adev, ras_error_status);
+
+ if (adev->umc.ras_funcs &&
+ adev->umc.ras_funcs->ecc_info_query_ras_error_address &&
+ adev->umc.max_ras_err_cnt_per_query) {
+ err_data->err_addr =
+ kcalloc(adev->umc.max_ras_err_cnt_per_query,
+ sizeof(struct eeprom_table_record), GFP_KERNEL);
+
+ /* still call query_ras_error_address to clear error status
+ * even NOMEM error is encountered
+ */
+ if(!err_data->err_addr)
+ dev_warn(adev->dev, "Failed to alloc memory for "
+ "umc error address record!\n");
+
+ /* umc query_ras_error_address is also responsible for clearing
+ * error status
+ */
+ adev->umc.ras_funcs->ecc_info_query_ras_error_address(adev, ras_error_status);
+ }
+ }
+
+ /* only uncorrectable error needs gpu reset */
+ if (err_data->ue_count) {
+ dev_info(adev->dev, "%ld uncorrectable hardware errors "
+ "detected in UMC block\n",
+ err_data->ue_count);
+
+ if ((amdgpu_bad_page_threshold != 0) &&
+ err_data->err_addr_cnt) {
+ amdgpu_ras_add_bad_pages(adev, err_data->err_addr,
+ err_data->err_addr_cnt);
+ amdgpu_ras_save_bad_pages(adev);
+
+ if (adev->smu.ppt_funcs && adev->smu.ppt_funcs->send_hbm_bad_pages_num)
+ adev->smu.ppt_funcs->send_hbm_bad_pages_num(&adev->smu, con->eeprom_control.ras_num_recs);
+ }
+
+ if (reset)
+ amdgpu_ras_reset_gpu(adev);
+ }
+
+ kfree(err_data->err_addr);
+ return AMDGPU_RAS_SUCCESS;
+}
+
+int amdgpu_umc_poison_handler(struct amdgpu_device *adev,
+ void *ras_error_status,
+ bool reset)
+{
+ int ret;
+ struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status;
+ struct ras_common_if head = {
+ .block = AMDGPU_RAS_BLOCK__UMC,
+ };
+ struct ras_manager *obj = amdgpu_ras_find_obj(adev, &head);
+
+ ret =
+ amdgpu_umc_do_page_retirement(adev, ras_error_status, NULL, reset);
+
+ if (ret == AMDGPU_RAS_SUCCESS && obj) {
+ obj->err_data.ue_count += err_data->ue_count;
+ obj->err_data.ce_count += err_data->ce_count;
+ }
+
+ return ret;
+}
+
+static int amdgpu_umc_process_ras_data_cb(struct amdgpu_device *adev,
+ void *ras_error_status,
+ struct amdgpu_iv_entry *entry)
+{
+ return amdgpu_umc_do_page_retirement(adev, ras_error_status, entry, true);
+}
+
int amdgpu_umc_ras_late_init(struct amdgpu_device *adev)
{
int r;
@@ -88,61 +202,6 @@ void amdgpu_umc_ras_fini(struct amdgpu_device *adev)
}
}
-int amdgpu_umc_process_ras_data_cb(struct amdgpu_device *adev,
- void *ras_error_status,
- struct amdgpu_iv_entry *entry)
-{
- struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status;
- struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
-
- kgd2kfd_set_sram_ecc_flag(adev->kfd.dev);
- if (adev->umc.ras_funcs &&
- adev->umc.ras_funcs->query_ras_error_count)
- adev->umc.ras_funcs->query_ras_error_count(adev, ras_error_status);
-
- if (adev->umc.ras_funcs &&
- adev->umc.ras_funcs->query_ras_error_address &&
- adev->umc.max_ras_err_cnt_per_query) {
- err_data->err_addr =
- kcalloc(adev->umc.max_ras_err_cnt_per_query,
- sizeof(struct eeprom_table_record), GFP_KERNEL);
-
- /* still call query_ras_error_address to clear error status
- * even NOMEM error is encountered
- */
- if(!err_data->err_addr)
- dev_warn(adev->dev, "Failed to alloc memory for "
- "umc error address record!\n");
-
- /* umc query_ras_error_address is also responsible for clearing
- * error status
- */
- adev->umc.ras_funcs->query_ras_error_address(adev, ras_error_status);
- }
-
- /* only uncorrectable error needs gpu reset */
- if (err_data->ue_count) {
- dev_info(adev->dev, "%ld uncorrectable hardware errors "
- "detected in UMC block\n",
- err_data->ue_count);
-
- if ((amdgpu_bad_page_threshold != 0) &&
- err_data->err_addr_cnt) {
- amdgpu_ras_add_bad_pages(adev, err_data->err_addr,
- err_data->err_addr_cnt);
- amdgpu_ras_save_bad_pages(adev);
-
- if (adev->smu.ppt_funcs && adev->smu.ppt_funcs->send_hbm_bad_pages_num)
- adev->smu.ppt_funcs->send_hbm_bad_pages_num(&adev->smu, con->eeprom_control.ras_num_recs);
- }
-
- amdgpu_ras_reset_gpu(adev);
- }
-
- kfree(err_data->err_addr);
- return AMDGPU_RAS_SUCCESS;
-}
-
int amdgpu_umc_process_ecc_irq(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
struct amdgpu_iv_entry *entry)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h
index 1f5fe2315236..b72194e8bfe5 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h
@@ -49,6 +49,10 @@ struct amdgpu_umc_ras_funcs {
void (*query_ras_error_address)(struct amdgpu_device *adev,
void *ras_error_status);
bool (*query_ras_poison_mode)(struct amdgpu_device *adev);
+ void (*ecc_info_query_ras_error_count)(struct amdgpu_device *adev,
+ void *ras_error_status);
+ void (*ecc_info_query_ras_error_address)(struct amdgpu_device *adev,
+ void *ras_error_status);
};
struct amdgpu_umc_funcs {
@@ -74,9 +78,9 @@ struct amdgpu_umc {
int amdgpu_umc_ras_late_init(struct amdgpu_device *adev);
void amdgpu_umc_ras_fini(struct amdgpu_device *adev);
-int amdgpu_umc_process_ras_data_cb(struct amdgpu_device *adev,
+int amdgpu_umc_poison_handler(struct amdgpu_device *adev,
void *ras_error_status,
- struct amdgpu_iv_entry *entry);
+ bool reset);
int amdgpu_umc_process_ecc_irq(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
struct amdgpu_iv_entry *entry);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
index 688bef1649b5..344f711ad144 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
@@ -434,7 +434,6 @@ void amdgpu_vce_free_handles(struct amdgpu_device *adev, struct drm_file *filp)
*
* @ring: ring we should submit the msg to
* @handle: VCE session handle to use
- * @bo: amdgpu object for which we query the offset
* @fence: optional fence to return
*
* Open up a stream for HW test
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
index 4f7c70845785..9a19a6a57b23 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
@@ -135,6 +135,7 @@ int amdgpu_vcn_sw_init(struct amdgpu_device *adev)
break;
case IP_VERSION(3, 0, 0):
case IP_VERSION(3, 0, 64):
+ case IP_VERSION(3, 0, 192):
if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 3, 0))
fw_name = FIRMWARE_SIENNA_CICHLID;
else
@@ -285,20 +286,13 @@ int amdgpu_vcn_sw_fini(struct amdgpu_device *adev)
bool amdgpu_vcn_is_disabled_vcn(struct amdgpu_device *adev, enum vcn_ring_type type, uint32_t vcn_instance)
{
bool ret = false;
+ int vcn_config = adev->vcn.vcn_config[vcn_instance];
- int major;
- int minor;
- int revision;
-
- /* if cannot find IP data, then this VCN does not exist */
- if (amdgpu_discovery_get_vcn_version(adev, vcn_instance, &major, &minor, &revision) != 0)
- return true;
-
- if ((type == VCN_ENCODE_RING) && (revision & VCN_BLOCK_ENCODE_DISABLE_MASK)) {
+ if ((type == VCN_ENCODE_RING) && (vcn_config & VCN_BLOCK_ENCODE_DISABLE_MASK)) {
ret = true;
- } else if ((type == VCN_DECODE_RING) && (revision & VCN_BLOCK_DECODE_DISABLE_MASK)) {
+ } else if ((type == VCN_DECODE_RING) && (vcn_config & VCN_BLOCK_DECODE_DISABLE_MASK)) {
ret = true;
- } else if ((type == VCN_UNIFIED_RING) && (revision & VCN_BLOCK_QUEUE_DISABLE_MASK)) {
+ } else if ((type == VCN_UNIFIED_RING) && (vcn_config & VCN_BLOCK_QUEUE_DISABLE_MASK)) {
ret = true;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
index bfa27ea94804..5d3728b027d3 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
@@ -235,6 +235,7 @@ struct amdgpu_vcn {
uint8_t num_vcn_inst;
struct amdgpu_vcn_inst inst[AMDGPU_MAX_VCN_INSTANCES];
+ uint8_t vcn_config[AMDGPU_MAX_VCN_INSTANCES];
struct amdgpu_vcn_reg internal;
struct mutex vcn_pg_lock;
struct mutex vcn1_jpeg1_workaround;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
index 04cf9b207e62..f8e574cc0e22 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
@@ -283,17 +283,15 @@ static int amdgpu_virt_init_ras_err_handler_data(struct amdgpu_device *adev)
*data = kmalloc(sizeof(struct amdgpu_virt_ras_err_handler_data), GFP_KERNEL);
if (!*data)
- return -ENOMEM;
+ goto data_failure;
bps = kmalloc_array(align_space, sizeof((*data)->bps), GFP_KERNEL);
- bps_bo = kmalloc_array(align_space, sizeof((*data)->bps_bo), GFP_KERNEL);
+ if (!bps)
+ goto bps_failure;
- if (!bps || !bps_bo) {
- kfree(bps);
- kfree(bps_bo);
- kfree(*data);
- return -ENOMEM;
- }
+ bps_bo = kmalloc_array(align_space, sizeof((*data)->bps_bo), GFP_KERNEL);
+ if (!bps_bo)
+ goto bps_bo_failure;
(*data)->bps = bps;
(*data)->bps_bo = bps_bo;
@@ -303,6 +301,13 @@ static int amdgpu_virt_init_ras_err_handler_data(struct amdgpu_device *adev)
virt->ras_init_done = true;
return 0;
+
+bps_bo_failure:
+ kfree(bps);
+bps_failure:
+ kfree(*data);
+data_failure:
+ return -ENOMEM;
}
static void amdgpu_virt_ras_release_bp(struct amdgpu_device *adev)
@@ -617,19 +622,37 @@ void amdgpu_virt_fini_data_exchange(struct amdgpu_device *adev)
void amdgpu_virt_init_data_exchange(struct amdgpu_device *adev)
{
- uint64_t bp_block_offset = 0;
- uint32_t bp_block_size = 0;
- struct amd_sriov_msg_pf2vf_info *pf2vf_v2 = NULL;
-
adev->virt.fw_reserve.p_pf2vf = NULL;
adev->virt.fw_reserve.p_vf2pf = NULL;
adev->virt.vf2pf_update_interval_ms = 0;
- if (adev->mman.fw_vram_usage_va != NULL) {
+ if (adev->bios != NULL) {
adev->virt.vf2pf_update_interval_ms = 2000;
adev->virt.fw_reserve.p_pf2vf =
(struct amd_sriov_msg_pf2vf_info_header *)
+ (adev->bios + (AMD_SRIOV_MSG_PF2VF_OFFSET_KB << 10));
+
+ amdgpu_virt_read_pf2vf_data(adev);
+ }
+
+ if (adev->virt.vf2pf_update_interval_ms != 0) {
+ INIT_DELAYED_WORK(&adev->virt.vf2pf_work, amdgpu_virt_update_vf2pf_work_item);
+ schedule_delayed_work(&(adev->virt.vf2pf_work), msecs_to_jiffies(adev->virt.vf2pf_update_interval_ms));
+ }
+}
+
+
+void amdgpu_virt_exchange_data(struct amdgpu_device *adev)
+{
+ uint64_t bp_block_offset = 0;
+ uint32_t bp_block_size = 0;
+ struct amd_sriov_msg_pf2vf_info *pf2vf_v2 = NULL;
+
+ if (adev->mman.fw_vram_usage_va != NULL) {
+
+ adev->virt.fw_reserve.p_pf2vf =
+ (struct amd_sriov_msg_pf2vf_info_header *)
(adev->mman.fw_vram_usage_va + (AMD_SRIOV_MSG_PF2VF_OFFSET_KB << 10));
adev->virt.fw_reserve.p_vf2pf =
(struct amd_sriov_msg_vf2pf_info_header *)
@@ -658,16 +681,10 @@ void amdgpu_virt_init_data_exchange(struct amdgpu_device *adev)
(adev->bios + (AMD_SRIOV_MSG_PF2VF_OFFSET_KB << 10));
amdgpu_virt_read_pf2vf_data(adev);
-
- return;
- }
-
- if (adev->virt.vf2pf_update_interval_ms != 0) {
- INIT_DELAYED_WORK(&adev->virt.vf2pf_work, amdgpu_virt_update_vf2pf_work_item);
- schedule_delayed_work(&(adev->virt.vf2pf_work), adev->virt.vf2pf_update_interval_ms);
}
}
+
void amdgpu_detect_virtualization(struct amdgpu_device *adev)
{
uint32_t reg;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
index 8d4c20bb71c5..9adfb8d63280 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
@@ -308,6 +308,7 @@ int amdgpu_virt_alloc_mm_table(struct amdgpu_device *adev);
void amdgpu_virt_free_mm_table(struct amdgpu_device *adev);
void amdgpu_virt_release_ras_err_handler_data(struct amdgpu_device *adev);
void amdgpu_virt_init_data_exchange(struct amdgpu_device *adev);
+void amdgpu_virt_exchange_data(struct amdgpu_device *adev);
void amdgpu_virt_fini_data_exchange(struct amdgpu_device *adev);
void amdgpu_detect_virtualization(struct amdgpu_device *adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c
index ce982afeff91..2dcc68e04e84 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c
@@ -16,6 +16,8 @@
#include "ivsrcid/ivsrcid_vislands30.h"
#include "amdgpu_vkms.h"
#include "amdgpu_display.h"
+#include "atom.h"
+#include "amdgpu_irq.h"
/**
* DOC: amdgpu_vkms
@@ -41,16 +43,16 @@ static const u32 amdgpu_vkms_formats[] = {
static enum hrtimer_restart amdgpu_vkms_vblank_simulate(struct hrtimer *timer)
{
- struct amdgpu_vkms_output *output = container_of(timer,
- struct amdgpu_vkms_output,
- vblank_hrtimer);
- struct drm_crtc *crtc = &output->crtc;
+ struct amdgpu_crtc *amdgpu_crtc = container_of(timer, struct amdgpu_crtc, vblank_timer);
+ struct drm_crtc *crtc = &amdgpu_crtc->base;
+ struct amdgpu_vkms_output *output = drm_crtc_to_amdgpu_vkms_output(crtc);
u64 ret_overrun;
bool ret;
- ret_overrun = hrtimer_forward_now(&output->vblank_hrtimer,
+ ret_overrun = hrtimer_forward_now(&amdgpu_crtc->vblank_timer,
output->period_ns);
- WARN_ON(ret_overrun != 1);
+ if (ret_overrun != 1)
+ DRM_WARN("%s: vblank timer overrun\n", __func__);
ret = drm_crtc_handle_vblank(crtc);
if (!ret)
@@ -65,22 +67,21 @@ static int amdgpu_vkms_enable_vblank(struct drm_crtc *crtc)
unsigned int pipe = drm_crtc_index(crtc);
struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
struct amdgpu_vkms_output *out = drm_crtc_to_amdgpu_vkms_output(crtc);
+ struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
drm_calc_timestamping_constants(crtc, &crtc->mode);
- hrtimer_init(&out->vblank_hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- out->vblank_hrtimer.function = &amdgpu_vkms_vblank_simulate;
out->period_ns = ktime_set(0, vblank->framedur_ns);
- hrtimer_start(&out->vblank_hrtimer, out->period_ns, HRTIMER_MODE_REL);
+ hrtimer_start(&amdgpu_crtc->vblank_timer, out->period_ns, HRTIMER_MODE_REL);
return 0;
}
static void amdgpu_vkms_disable_vblank(struct drm_crtc *crtc)
{
- struct amdgpu_vkms_output *out = drm_crtc_to_amdgpu_vkms_output(crtc);
+ struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
- hrtimer_cancel(&out->vblank_hrtimer);
+ hrtimer_cancel(&amdgpu_crtc->vblank_timer);
}
static bool amdgpu_vkms_get_vblank_timestamp(struct drm_crtc *crtc,
@@ -92,13 +93,14 @@ static bool amdgpu_vkms_get_vblank_timestamp(struct drm_crtc *crtc,
unsigned int pipe = crtc->index;
struct amdgpu_vkms_output *output = drm_crtc_to_amdgpu_vkms_output(crtc);
struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
+ struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
if (!READ_ONCE(vblank->enabled)) {
*vblank_time = ktime_get();
return true;
}
- *vblank_time = READ_ONCE(output->vblank_hrtimer.node.expires);
+ *vblank_time = READ_ONCE(amdgpu_crtc->vblank_timer.node.expires);
if (WARN_ON(*vblank_time == vblank->time))
return true;
@@ -165,6 +167,8 @@ static const struct drm_crtc_helper_funcs amdgpu_vkms_crtc_helper_funcs = {
static int amdgpu_vkms_crtc_init(struct drm_device *dev, struct drm_crtc *crtc,
struct drm_plane *primary, struct drm_plane *cursor)
{
+ struct amdgpu_device *adev = drm_to_adev(dev);
+ struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
int ret;
ret = drm_crtc_init_with_planes(dev, crtc, primary, cursor,
@@ -176,6 +180,17 @@ static int amdgpu_vkms_crtc_init(struct drm_device *dev, struct drm_crtc *crtc,
drm_crtc_helper_add(crtc, &amdgpu_vkms_crtc_helper_funcs);
+ amdgpu_crtc->crtc_id = drm_crtc_index(crtc);
+ adev->mode_info.crtcs[drm_crtc_index(crtc)] = amdgpu_crtc;
+
+ amdgpu_crtc->pll_id = ATOM_PPLL_INVALID;
+ amdgpu_crtc->encoder = NULL;
+ amdgpu_crtc->connector = NULL;
+ amdgpu_crtc->vsync_timer_enabled = AMDGPU_IRQ_STATE_DISABLE;
+
+ hrtimer_init(&amdgpu_crtc->vblank_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+ amdgpu_crtc->vblank_timer.function = &amdgpu_vkms_vblank_simulate;
+
return ret;
}
@@ -375,6 +390,7 @@ static struct drm_plane *amdgpu_vkms_plane_init(struct drm_device *dev,
int index)
{
struct drm_plane *plane;
+ uint64_t modifiers[] = {DRM_FORMAT_MOD_LINEAR, DRM_FORMAT_MOD_INVALID};
int ret;
plane = kzalloc(sizeof(*plane), GFP_KERNEL);
@@ -385,7 +401,7 @@ static struct drm_plane *amdgpu_vkms_plane_init(struct drm_device *dev,
&amdgpu_vkms_plane_funcs,
amdgpu_vkms_formats,
ARRAY_SIZE(amdgpu_vkms_formats),
- NULL, type, NULL);
+ modifiers, type, NULL);
if (ret) {
kfree(plane);
return ERR_PTR(ret);
@@ -396,12 +412,12 @@ static struct drm_plane *amdgpu_vkms_plane_init(struct drm_device *dev,
return plane;
}
-int amdgpu_vkms_output_init(struct drm_device *dev,
- struct amdgpu_vkms_output *output, int index)
+static int amdgpu_vkms_output_init(struct drm_device *dev, struct
+ amdgpu_vkms_output *output, int index)
{
struct drm_connector *connector = &output->connector;
struct drm_encoder *encoder = &output->encoder;
- struct drm_crtc *crtc = &output->crtc;
+ struct drm_crtc *crtc = &output->crtc.base;
struct drm_plane *primary, *cursor = NULL;
int ret;
@@ -465,6 +481,11 @@ static int amdgpu_vkms_sw_init(void *handle)
int r, i;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ adev->amdgpu_vkms_output = kcalloc(adev->mode_info.num_crtc,
+ sizeof(struct amdgpu_vkms_output), GFP_KERNEL);
+ if (!adev->amdgpu_vkms_output)
+ return -ENOMEM;
+
adev_to_drm(adev)->max_vblank_count = 0;
adev_to_drm(adev)->mode_config.funcs = &amdgpu_vkms_mode_funcs;
@@ -481,10 +502,6 @@ static int amdgpu_vkms_sw_init(void *handle)
if (r)
return r;
- adev->amdgpu_vkms_output = kcalloc(adev->mode_info.num_crtc, sizeof(struct amdgpu_vkms_output), GFP_KERNEL);
- if (!adev->amdgpu_vkms_output)
- return -ENOMEM;
-
/* allocate crtcs, encoders, connectors */
for (i = 0; i < adev->mode_info.num_crtc; i++) {
r = amdgpu_vkms_output_init(adev_to_drm(adev), &adev->amdgpu_vkms_output[i], i);
@@ -507,12 +524,13 @@ static int amdgpu_vkms_sw_fini(void *handle)
if (adev->mode_info.crtcs[i])
hrtimer_cancel(&adev->mode_info.crtcs[i]->vblank_timer);
- kfree(adev->mode_info.bios_hardcoded_edid);
- kfree(adev->amdgpu_vkms_output);
-
drm_kms_helper_poll_fini(adev_to_drm(adev));
+ drm_mode_config_cleanup(adev_to_drm(adev));
adev->mode_info.mode_config_initialized = false;
+
+ kfree(adev->mode_info.bios_hardcoded_edid);
+ kfree(adev->amdgpu_vkms_output);
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.h
index 97f1b79c0724..4f8722ff37c2 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.h
@@ -10,15 +10,14 @@
#define YRES_MAX 16384
#define drm_crtc_to_amdgpu_vkms_output(target) \
- container_of(target, struct amdgpu_vkms_output, crtc)
+ container_of(target, struct amdgpu_vkms_output, crtc.base)
extern const struct amdgpu_ip_block_version amdgpu_vkms_ip_block;
struct amdgpu_vkms_output {
- struct drm_crtc crtc;
+ struct amdgpu_crtc crtc;
struct drm_encoder encoder;
struct drm_connector connector;
- struct hrtimer vblank_hrtimer;
ktime_t period_ns;
struct drm_pending_vblank_event *event;
};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index a96ae4c0e040..b37fc7d7d2c7 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -53,7 +53,7 @@
* can be mapped as snooped (cached system pages) or unsnooped
* (uncached system pages).
* Each VM has an ID associated with it and there is a page table
- * associated with each VMID. When execting a command buffer,
+ * associated with each VMID. When executing a command buffer,
* the kernel tells the the ring what VMID to use for that command
* buffer. VMIDs are allocated dynamically as commands are submitted.
* The userspace drivers maintain their own address space and the kernel
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c
index 567df2db23ac..a38c6a747fa4 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c
@@ -265,6 +265,11 @@ static ssize_t amdgpu_xgmi_show_error(struct device *dev,
ficaa_pie_ctl_in = AMDGPU_XGMI_SET_FICAA(0x200);
ficaa_pie_status_in = AMDGPU_XGMI_SET_FICAA(0x208);
+ if ((!adev->df.funcs) ||
+ (!adev->df.funcs->get_fica) ||
+ (!adev->df.funcs->set_fica))
+ return -EINVAL;
+
fica_out = adev->df.funcs->get_fica(adev, ficaa_pie_ctl_in);
if (fica_out != 0x1f)
pr_err("xGMI error counters not enabled!\n");
diff --git a/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c b/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c
index 6134ed964027..a92d86e12718 100644
--- a/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c
+++ b/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c
@@ -469,7 +469,7 @@ int amdgpu_atombios_encoder_get_encoder_mode(struct drm_encoder *encoder)
if (amdgpu_connector->use_digital &&
(amdgpu_connector->audio == AMDGPU_AUDIO_ENABLE))
return ATOM_ENCODER_MODE_HDMI;
- else if (drm_detect_hdmi_monitor(amdgpu_connector_edid(connector)) &&
+ else if (connector->display_info.is_hdmi &&
(amdgpu_connector->audio == AMDGPU_AUDIO_AUTO))
return ATOM_ENCODER_MODE_HDMI;
else if (amdgpu_connector->use_digital)
@@ -488,7 +488,7 @@ int amdgpu_atombios_encoder_get_encoder_mode(struct drm_encoder *encoder)
if (amdgpu_audio != 0) {
if (amdgpu_connector->audio == AMDGPU_AUDIO_ENABLE)
return ATOM_ENCODER_MODE_HDMI;
- else if (drm_detect_hdmi_monitor(amdgpu_connector_edid(connector)) &&
+ else if (connector->display_info.is_hdmi &&
(amdgpu_connector->audio == AMDGPU_AUDIO_AUTO))
return ATOM_ENCODER_MODE_HDMI;
else
@@ -506,7 +506,7 @@ int amdgpu_atombios_encoder_get_encoder_mode(struct drm_encoder *encoder)
} else if (amdgpu_audio != 0) {
if (amdgpu_connector->audio == AMDGPU_AUDIO_ENABLE)
return ATOM_ENCODER_MODE_HDMI;
- else if (drm_detect_hdmi_monitor(amdgpu_connector_edid(connector)) &&
+ else if (connector->display_info.is_hdmi &&
(amdgpu_connector->audio == AMDGPU_AUDIO_AUTO))
return ATOM_ENCODER_MODE_HDMI;
else
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
index b200b9e722d9..8318ee8339f1 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
@@ -2092,22 +2092,18 @@ static int dce_v8_0_pick_dig_encoder(struct drm_encoder *encoder)
return 1;
else
return 0;
- break;
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
if (dig->linkb)
return 3;
else
return 2;
- break;
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
if (dig->linkb)
return 5;
else
return 4;
- break;
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY3:
return 6;
- break;
default:
DRM_ERROR("invalid encoder_id: 0x%x\n", amdgpu_encoder->encoder_id);
return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
index e7dfeb466a0e..dbe7442fb25c 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
@@ -7707,8 +7707,19 @@ static uint64_t gfx_v10_0_get_gpu_clock_counter(struct amdgpu_device *adev)
switch (adev->ip_versions[GC_HWIP][0]) {
case IP_VERSION(10, 3, 1):
case IP_VERSION(10, 3, 3):
- clock = (uint64_t)RREG32_SOC15(SMUIO, 0, mmGOLDEN_TSC_COUNT_LOWER_Vangogh) |
- ((uint64_t)RREG32_SOC15(SMUIO, 0, mmGOLDEN_TSC_COUNT_UPPER_Vangogh) << 32ULL);
+ preempt_disable();
+ clock_hi = RREG32_SOC15_NO_KIQ(SMUIO, 0, mmGOLDEN_TSC_COUNT_UPPER_Vangogh);
+ clock_lo = RREG32_SOC15_NO_KIQ(SMUIO, 0, mmGOLDEN_TSC_COUNT_LOWER_Vangogh);
+ hi_check = RREG32_SOC15_NO_KIQ(SMUIO, 0, mmGOLDEN_TSC_COUNT_UPPER_Vangogh);
+ /* The SMUIO TSC clock frequency is 100MHz, which sets 32-bit carry over
+ * roughly every 42 seconds.
+ */
+ if (hi_check != clock_hi) {
+ clock_lo = RREG32_SOC15_NO_KIQ(SMUIO, 0, mmGOLDEN_TSC_COUNT_LOWER_Vangogh);
+ clock_hi = hi_check;
+ }
+ preempt_enable();
+ clock = clock_lo | (clock_hi << 32ULL);
break;
default:
preempt_disable();
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
index b4b80f27b894..9189fb85a4dd 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
@@ -63,6 +63,13 @@
#define mmGCEA_PROBE_MAP 0x070c
#define mmGCEA_PROBE_MAP_BASE_IDX 0
+#define GFX9_RLCG_GC_WRITE_OLD (0x8 << 28)
+#define GFX9_RLCG_GC_WRITE (0x0 << 28)
+#define GFX9_RLCG_GC_READ (0x1 << 28)
+#define GFX9_RLCG_VFGATE_DISABLED 0x4000000
+#define GFX9_RLCG_WRONG_OPERATION_TYPE 0x2000000
+#define GFX9_RLCG_NOT_IN_RANGE 0x1000000
+
MODULE_FIRMWARE("amdgpu/vega10_ce.bin");
MODULE_FIRMWARE("amdgpu/vega10_pfp.bin");
MODULE_FIRMWARE("amdgpu/vega10_me.bin");
@@ -140,6 +147,11 @@ MODULE_FIRMWARE("amdgpu/aldebaran_rlc.bin");
#define mmTCP_CHAN_STEER_5_ARCT 0x0b0c
#define mmTCP_CHAN_STEER_5_ARCT_BASE_IDX 0
+#define mmGOLDEN_TSC_COUNT_UPPER_Renoir 0x0025
+#define mmGOLDEN_TSC_COUNT_UPPER_Renoir_BASE_IDX 1
+#define mmGOLDEN_TSC_COUNT_LOWER_Renoir 0x0026
+#define mmGOLDEN_TSC_COUNT_LOWER_Renoir_BASE_IDX 1
+
enum ta_ras_gfx_subblock {
/*CPC*/
TA_RAS_BLOCK__GFX_CPC_INDEX_START = 0,
@@ -734,7 +746,7 @@ static const u32 GFX_RLC_SRM_INDEX_CNTL_DATA_OFFSETS[] =
mmRLC_SRM_INDEX_CNTL_DATA_7 - mmRLC_SRM_INDEX_CNTL_DATA_0,
};
-static void gfx_v9_0_rlcg_w(struct amdgpu_device *adev, u32 offset, u32 v, u32 flag)
+static u32 gfx_v9_0_rlcg_rw(struct amdgpu_device *adev, u32 offset, u32 v, uint32_t flag)
{
static void *scratch_reg0;
static void *scratch_reg1;
@@ -743,21 +755,20 @@ static void gfx_v9_0_rlcg_w(struct amdgpu_device *adev, u32 offset, u32 v, u32 f
static void *spare_int;
static uint32_t grbm_cntl;
static uint32_t grbm_idx;
+ uint32_t i = 0;
+ uint32_t retries = 50000;
+ u32 ret = 0;
+ u32 tmp;
scratch_reg0 = adev->rmmio + (adev->reg_offset[GC_HWIP][0][mmSCRATCH_REG0_BASE_IDX] + mmSCRATCH_REG0)*4;
scratch_reg1 = adev->rmmio + (adev->reg_offset[GC_HWIP][0][mmSCRATCH_REG1_BASE_IDX] + mmSCRATCH_REG1)*4;
- scratch_reg2 = adev->rmmio + (adev->reg_offset[GC_HWIP][0][mmSCRATCH_REG1_BASE_IDX] + mmSCRATCH_REG2)*4;
- scratch_reg3 = adev->rmmio + (adev->reg_offset[GC_HWIP][0][mmSCRATCH_REG1_BASE_IDX] + mmSCRATCH_REG3)*4;
+ scratch_reg2 = adev->rmmio + (adev->reg_offset[GC_HWIP][0][mmSCRATCH_REG2_BASE_IDX] + mmSCRATCH_REG2)*4;
+ scratch_reg3 = adev->rmmio + (adev->reg_offset[GC_HWIP][0][mmSCRATCH_REG3_BASE_IDX] + mmSCRATCH_REG3)*4;
spare_int = adev->rmmio + (adev->reg_offset[GC_HWIP][0][mmRLC_SPARE_INT_BASE_IDX] + mmRLC_SPARE_INT)*4;
grbm_cntl = adev->reg_offset[GC_HWIP][0][mmGRBM_GFX_CNTL_BASE_IDX] + mmGRBM_GFX_CNTL;
grbm_idx = adev->reg_offset[GC_HWIP][0][mmGRBM_GFX_INDEX_BASE_IDX] + mmGRBM_GFX_INDEX;
- if (amdgpu_sriov_runtime(adev)) {
- pr_err("shouldn't call rlcg write register during runtime\n");
- return;
- }
-
if (offset == grbm_cntl || offset == grbm_idx) {
if (offset == grbm_cntl)
writel(v, scratch_reg2);
@@ -766,41 +777,95 @@ static void gfx_v9_0_rlcg_w(struct amdgpu_device *adev, u32 offset, u32 v, u32 f
writel(v, ((void __iomem *)adev->rmmio) + (offset * 4));
} else {
- uint32_t i = 0;
- uint32_t retries = 50000;
-
+ /*
+ * SCRATCH_REG0 = read/write value
+ * SCRATCH_REG1[30:28] = command
+ * SCRATCH_REG1[19:0] = address in dword
+ * SCRATCH_REG1[26:24] = Error reporting
+ */
writel(v, scratch_reg0);
- writel(offset | 0x80000000, scratch_reg1);
+ writel(offset | flag, scratch_reg1);
writel(1, spare_int);
- for (i = 0; i < retries; i++) {
- u32 tmp;
+ for (i = 0; i < retries; i++) {
tmp = readl(scratch_reg1);
- if (!(tmp & 0x80000000))
+ if (!(tmp & flag))
break;
udelay(10);
}
- if (i >= retries)
- pr_err("timeout: rlcg program reg:0x%05x failed !\n", offset);
+
+ if (i >= retries) {
+ if (amdgpu_sriov_reg_indirect_gc(adev)) {
+ if (tmp & GFX9_RLCG_VFGATE_DISABLED)
+ pr_err("The vfgate is disabled, program reg:0x%05x failed!\n", offset);
+ else if (tmp & GFX9_RLCG_WRONG_OPERATION_TYPE)
+ pr_err("Wrong operation type, program reg:0x%05x failed!\n", offset);
+ else if (tmp & GFX9_RLCG_NOT_IN_RANGE)
+ pr_err("The register is not in range, program reg:0x%05x failed!\n", offset);
+ else
+ pr_err("Unknown error type, program reg:0x%05x failed!\n", offset);
+ } else
+ pr_err("timeout: rlcg program reg:0x%05x failed!\n", offset);
+ }
+ }
+
+ ret = readl(scratch_reg0);
+
+ return ret;
+}
+
+static bool gfx_v9_0_get_rlcg_flag(struct amdgpu_device *adev, u32 acc_flags, u32 hwip,
+ int write, u32 *rlcg_flag)
+{
+
+ switch (hwip) {
+ case GC_HWIP:
+ if (amdgpu_sriov_reg_indirect_gc(adev)) {
+ *rlcg_flag = write ? GFX9_RLCG_GC_WRITE : GFX9_RLCG_GC_READ;
+
+ return true;
+ /* only in new version, AMDGPU_REGS_NO_KIQ and AMDGPU_REGS_RLC enabled simultaneously */
+ } else if ((acc_flags & AMDGPU_REGS_RLC) && !(acc_flags & AMDGPU_REGS_NO_KIQ) && write) {
+ *rlcg_flag = GFX9_RLCG_GC_WRITE_OLD;
+ return true;
+ }
+
+ break;
+ default:
+ return false;
}
+ return false;
+}
+
+static u32 gfx_v9_0_sriov_rreg(struct amdgpu_device *adev, u32 offset, u32 acc_flags, u32 hwip)
+{
+ u32 rlcg_flag;
+
+ if (!amdgpu_sriov_runtime(adev) && gfx_v9_0_get_rlcg_flag(adev, acc_flags, hwip, 0, &rlcg_flag))
+ return gfx_v9_0_rlcg_rw(adev, offset, 0, rlcg_flag);
+
+ if (acc_flags & AMDGPU_REGS_NO_KIQ)
+ return RREG32_NO_KIQ(offset);
+ else
+ return RREG32(offset);
}
static void gfx_v9_0_sriov_wreg(struct amdgpu_device *adev, u32 offset,
- u32 v, u32 acc_flags, u32 hwip)
+ u32 value, u32 acc_flags, u32 hwip)
{
- if ((acc_flags & AMDGPU_REGS_RLC) &&
- amdgpu_sriov_fullaccess(adev)) {
- gfx_v9_0_rlcg_w(adev, offset, v, acc_flags);
+ u32 rlcg_flag;
+ if (!amdgpu_sriov_runtime(adev) && gfx_v9_0_get_rlcg_flag(adev, acc_flags, hwip, 1, &rlcg_flag)) {
+ gfx_v9_0_rlcg_rw(adev, offset, value, rlcg_flag);
return;
}
if (acc_flags & AMDGPU_REGS_NO_KIQ)
- WREG32_NO_KIQ(offset, v);
+ WREG32_NO_KIQ(offset, value);
else
- WREG32(offset, v);
+ WREG32(offset, value);
}
#define VEGA10_GB_ADDR_CONFIG_GOLDEN 0x2a114042
@@ -3065,8 +3130,8 @@ static void gfx_v9_0_init_pg(struct amdgpu_device *adev)
AMD_PG_SUPPORT_CP |
AMD_PG_SUPPORT_GDS |
AMD_PG_SUPPORT_RLC_SMU_HS)) {
- WREG32(mmRLC_JUMP_TABLE_RESTORE,
- adev->gfx.rlc.cp_table_gpu_addr >> 8);
+ WREG32_SOC15(GC, 0, mmRLC_JUMP_TABLE_RESTORE,
+ adev->gfx.rlc.cp_table_gpu_addr >> 8);
gfx_v9_0_init_gfx_power_gating(adev);
}
}
@@ -4055,9 +4120,10 @@ static int gfx_v9_0_hw_fini(void *handle)
gfx_v9_0_cp_enable(adev, false);
- /* Skip suspend with A+A reset */
- if (adev->gmc.xgmi.connected_to_cpu && amdgpu_in_reset(adev)) {
- dev_dbg(adev->dev, "Device in reset. Skipping RLC halt\n");
+ /* Skip stopping RLC with A+A reset or when RLC controls GFX clock */
+ if ((adev->gmc.xgmi.connected_to_cpu && amdgpu_in_reset(adev)) ||
+ (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(9, 4, 2))) {
+ dev_dbg(adev->dev, "Skipping RLC halt\n");
return 0;
}
@@ -4238,19 +4304,38 @@ failed_kiq_read:
static uint64_t gfx_v9_0_get_gpu_clock_counter(struct amdgpu_device *adev)
{
- uint64_t clock;
+ uint64_t clock, clock_lo, clock_hi, hi_check;
- amdgpu_gfx_off_ctrl(adev, false);
- mutex_lock(&adev->gfx.gpu_clock_mutex);
- if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 0, 1) && amdgpu_sriov_runtime(adev)) {
- clock = gfx_v9_0_kiq_read_clock(adev);
- } else {
- WREG32_SOC15(GC, 0, mmRLC_CAPTURE_GPU_CLOCK_COUNT, 1);
- clock = (uint64_t)RREG32_SOC15(GC, 0, mmRLC_GPU_CLOCK_COUNT_LSB) |
- ((uint64_t)RREG32_SOC15(GC, 0, mmRLC_GPU_CLOCK_COUNT_MSB) << 32ULL);
+ switch (adev->ip_versions[GC_HWIP][0]) {
+ case IP_VERSION(9, 3, 0):
+ preempt_disable();
+ clock_hi = RREG32_SOC15_NO_KIQ(SMUIO, 0, mmGOLDEN_TSC_COUNT_UPPER_Renoir);
+ clock_lo = RREG32_SOC15_NO_KIQ(SMUIO, 0, mmGOLDEN_TSC_COUNT_LOWER_Renoir);
+ hi_check = RREG32_SOC15_NO_KIQ(SMUIO, 0, mmGOLDEN_TSC_COUNT_UPPER_Renoir);
+ /* The SMUIO TSC clock frequency is 100MHz, which sets 32-bit carry over
+ * roughly every 42 seconds.
+ */
+ if (hi_check != clock_hi) {
+ clock_lo = RREG32_SOC15_NO_KIQ(SMUIO, 0, mmGOLDEN_TSC_COUNT_LOWER_Renoir);
+ clock_hi = hi_check;
+ }
+ preempt_enable();
+ clock = clock_lo | (clock_hi << 32ULL);
+ break;
+ default:
+ amdgpu_gfx_off_ctrl(adev, false);
+ mutex_lock(&adev->gfx.gpu_clock_mutex);
+ if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 0, 1) && amdgpu_sriov_runtime(adev)) {
+ clock = gfx_v9_0_kiq_read_clock(adev);
+ } else {
+ WREG32_SOC15(GC, 0, mmRLC_CAPTURE_GPU_CLOCK_COUNT, 1);
+ clock = (uint64_t)RREG32_SOC15(GC, 0, mmRLC_GPU_CLOCK_COUNT_LSB) |
+ ((uint64_t)RREG32_SOC15(GC, 0, mmRLC_GPU_CLOCK_COUNT_MSB) << 32ULL);
+ }
+ mutex_unlock(&adev->gfx.gpu_clock_mutex);
+ amdgpu_gfx_off_ctrl(adev, true);
+ break;
}
- mutex_unlock(&adev->gfx.gpu_clock_mutex);
- amdgpu_gfx_off_ctrl(adev, true);
return clock;
}
@@ -5110,7 +5195,7 @@ static void gfx_v9_0_update_spm_vmid(struct amdgpu_device *adev, unsigned vmid)
if (amdgpu_sriov_is_pp_one_vf(adev))
data = RREG32_NO_KIQ(reg);
else
- data = RREG32(reg);
+ data = RREG32_SOC15(GC, 0, mmRLC_SPM_MC_CNTL);
data &= ~RLC_SPM_MC_CNTL__RLC_SPM_VMID_MASK;
data |= (vmid & RLC_SPM_MC_CNTL__RLC_SPM_VMID_MASK) << RLC_SPM_MC_CNTL__RLC_SPM_VMID__SHIFT;
@@ -5166,6 +5251,7 @@ static const struct amdgpu_rlc_funcs gfx_v9_0_rlc_funcs = {
.start = gfx_v9_0_rlc_start,
.update_spm_vmid = gfx_v9_0_update_spm_vmid,
.sriov_wreg = gfx_v9_0_sriov_wreg,
+ .sriov_rreg = gfx_v9_0_sriov_rreg,
.is_rlcg_access_range = gfx_v9_0_is_rlcg_access_range,
};
@@ -5771,16 +5857,16 @@ static void gfx_v9_0_set_compute_eop_interrupt_state(struct amdgpu_device *adev,
switch (state) {
case AMDGPU_IRQ_STATE_DISABLE:
- mec_int_cntl = RREG32(mec_int_cntl_reg);
+ mec_int_cntl = RREG32_SOC15_IP(GC,mec_int_cntl_reg);
mec_int_cntl = REG_SET_FIELD(mec_int_cntl, CP_ME1_PIPE0_INT_CNTL,
TIME_STAMP_INT_ENABLE, 0);
- WREG32(mec_int_cntl_reg, mec_int_cntl);
+ WREG32_SOC15_IP(GC, mec_int_cntl_reg, mec_int_cntl);
break;
case AMDGPU_IRQ_STATE_ENABLE:
- mec_int_cntl = RREG32(mec_int_cntl_reg);
+ mec_int_cntl = RREG32_SOC15_IP(GC, mec_int_cntl_reg);
mec_int_cntl = REG_SET_FIELD(mec_int_cntl, CP_ME1_PIPE0_INT_CNTL,
TIME_STAMP_INT_ENABLE, 1);
- WREG32(mec_int_cntl_reg, mec_int_cntl);
+ WREG32_SOC15_IP(GC, mec_int_cntl_reg, mec_int_cntl);
break;
default:
break;
diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c
index 480e41847d7c..ec4d5e15b766 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c
@@ -162,7 +162,6 @@ static void gfxhub_v1_0_init_tlb_regs(struct amdgpu_device *adev)
ENABLE_ADVANCED_DRIVER_MODEL, 1);
tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL,
SYSTEM_APERTURE_UNMAPPED_ACCESS, 0);
- tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ECO_BITS, 0);
tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL,
MTYPE, MTYPE_UC);/* XXX for emulation. */
tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ATC_EN, 1);
diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_0.c b/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_0.c
index 14c1c1a297dd..6e0ace2fbfab 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_0.c
@@ -196,7 +196,6 @@ static void gfxhub_v2_0_init_tlb_regs(struct amdgpu_device *adev)
ENABLE_ADVANCED_DRIVER_MODEL, 1);
tmp = REG_SET_FIELD(tmp, GCMC_VM_MX_L1_TLB_CNTL,
SYSTEM_APERTURE_UNMAPPED_ACCESS, 0);
- tmp = REG_SET_FIELD(tmp, GCMC_VM_MX_L1_TLB_CNTL, ECO_BITS, 0);
tmp = REG_SET_FIELD(tmp, GCMC_VM_MX_L1_TLB_CNTL,
MTYPE, MTYPE_UC); /* UC, uncached */
diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_1.c b/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_1.c
index e80d1dc43079..b4eddf6e98a6 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_1.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_1.c
@@ -197,7 +197,6 @@ static void gfxhub_v2_1_init_tlb_regs(struct amdgpu_device *adev)
ENABLE_ADVANCED_DRIVER_MODEL, 1);
tmp = REG_SET_FIELD(tmp, GCMC_VM_MX_L1_TLB_CNTL,
SYSTEM_APERTURE_UNMAPPED_ACCESS, 0);
- tmp = REG_SET_FIELD(tmp, GCMC_VM_MX_L1_TLB_CNTL, ECO_BITS, 0);
tmp = REG_SET_FIELD(tmp, GCMC_VM_MX_L1_TLB_CNTL,
MTYPE, MTYPE_UC); /* UC, uncached */
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
index 3ec5ff5a6dbe..3d5d47a799e3 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
@@ -107,7 +107,7 @@ static int gmc_v10_0_process_interrupt(struct amdgpu_device *adev,
/* Process it onyl if it's the first fault for this address */
if (entry->ih != &adev->irq.ih_soft &&
- amdgpu_gmc_filter_faults(adev, addr, entry->pasid,
+ amdgpu_gmc_filter_faults(adev, entry->ih, addr, entry->pasid,
entry->timestamp))
return 1;
@@ -914,12 +914,6 @@ static int gmc_v10_0_sw_init(void *handle)
return r;
}
- if (adev->gmc.xgmi.supported) {
- r = adev->gfxhub.funcs->get_xgmi_info(adev);
- if (r)
- return r;
- }
-
r = gmc_v10_0_mc_init(adev);
if (r)
return r;
@@ -992,10 +986,14 @@ static int gmc_v10_0_gart_enable(struct amdgpu_device *adev)
return -EINVAL;
}
+ if (amdgpu_sriov_vf(adev) && amdgpu_in_reset(adev))
+ goto skip_pin_bo;
+
r = amdgpu_gart_table_vram_pin(adev);
if (r)
return r;
+skip_pin_bo:
r = adev->gfxhub.funcs->gart_enable(adev);
if (r)
return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
index 492ebed2915b..63b890f1e8af 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
@@ -515,10 +515,10 @@ static void gmc_v8_0_mc_program(struct amdgpu_device *adev)
static int gmc_v8_0_mc_init(struct amdgpu_device *adev)
{
int r;
+ u32 tmp;
adev->gmc.vram_width = amdgpu_atombios_get_vram_width(adev);
if (!adev->gmc.vram_width) {
- u32 tmp;
int chansize, numchan;
/* Get VRAM informations */
@@ -562,8 +562,15 @@ static int gmc_v8_0_mc_init(struct amdgpu_device *adev)
adev->gmc.vram_width = numchan * chansize;
}
/* size in MB on si */
- adev->gmc.mc_vram_size = RREG32(mmCONFIG_MEMSIZE) * 1024ULL * 1024ULL;
- adev->gmc.real_vram_size = RREG32(mmCONFIG_MEMSIZE) * 1024ULL * 1024ULL;
+ tmp = RREG32(mmCONFIG_MEMSIZE);
+ /* some boards may have garbage in the upper 16 bits */
+ if (tmp & 0xffff0000) {
+ DRM_INFO("Probable bad vram size: 0x%08x\n", tmp);
+ if (tmp & 0xffff)
+ tmp &= 0xffff;
+ }
+ adev->gmc.mc_vram_size = tmp * 1024ULL * 1024ULL;
+ adev->gmc.real_vram_size = adev->gmc.mc_vram_size;
if (!(adev->flags & AMD_IS_APU)) {
r = amdgpu_device_resize_fb_bar(adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
index cb82404df534..57f2729a7bd0 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
@@ -478,9 +478,18 @@ static int gmc_v9_0_vm_fault_interrupt_state(struct amdgpu_device *adev,
hub = &adev->vmhub[j];
for (i = 0; i < 16; i++) {
reg = hub->vm_context0_cntl + i;
- tmp = RREG32(reg);
+
+ if (j == AMDGPU_GFXHUB_0)
+ tmp = RREG32_SOC15_IP(GC, reg);
+ else
+ tmp = RREG32_SOC15_IP(MMHUB, reg);
+
tmp &= ~bits;
- WREG32(reg, tmp);
+
+ if (j == AMDGPU_GFXHUB_0)
+ WREG32_SOC15_IP(GC, reg, tmp);
+ else
+ WREG32_SOC15_IP(MMHUB, reg, tmp);
}
}
break;
@@ -489,9 +498,18 @@ static int gmc_v9_0_vm_fault_interrupt_state(struct amdgpu_device *adev,
hub = &adev->vmhub[j];
for (i = 0; i < 16; i++) {
reg = hub->vm_context0_cntl + i;
- tmp = RREG32(reg);
+
+ if (j == AMDGPU_GFXHUB_0)
+ tmp = RREG32_SOC15_IP(GC, reg);
+ else
+ tmp = RREG32_SOC15_IP(MMHUB, reg);
+
tmp |= bits;
- WREG32(reg, tmp);
+
+ if (j == AMDGPU_GFXHUB_0)
+ WREG32_SOC15_IP(GC, reg, tmp);
+ else
+ WREG32_SOC15_IP(MMHUB, reg, tmp);
}
}
break;
@@ -523,7 +541,7 @@ static int gmc_v9_0_process_interrupt(struct amdgpu_device *adev,
/* Process it onyl if it's the first fault for this address */
if (entry->ih != &adev->irq.ih_soft &&
- amdgpu_gmc_filter_faults(adev, addr, entry->pasid,
+ amdgpu_gmc_filter_faults(adev, entry->ih, addr, entry->pasid,
entry->timestamp))
return 1;
@@ -788,9 +806,12 @@ static void gmc_v9_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
/* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */
if (use_semaphore) {
for (j = 0; j < adev->usec_timeout; j++) {
- /* a read return value of 1 means semaphore acuqire */
- tmp = RREG32_NO_KIQ(hub->vm_inv_eng0_sem +
- hub->eng_distance * eng);
+ /* a read return value of 1 means semaphore acquire */
+ if (vmhub == AMDGPU_GFXHUB_0)
+ tmp = RREG32_SOC15_IP_NO_KIQ(GC, hub->vm_inv_eng0_sem + hub->eng_distance * eng);
+ else
+ tmp = RREG32_SOC15_IP_NO_KIQ(MMHUB, hub->vm_inv_eng0_sem + hub->eng_distance * eng);
+
if (tmp & 0x1)
break;
udelay(1);
@@ -801,8 +822,10 @@ static void gmc_v9_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
}
do {
- WREG32_NO_KIQ(hub->vm_inv_eng0_req +
- hub->eng_distance * eng, inv_req);
+ if (vmhub == AMDGPU_GFXHUB_0)
+ WREG32_SOC15_IP_NO_KIQ(GC, hub->vm_inv_eng0_req + hub->eng_distance * eng, inv_req);
+ else
+ WREG32_SOC15_IP_NO_KIQ(MMHUB, hub->vm_inv_eng0_req + hub->eng_distance * eng, inv_req);
/*
* Issue a dummy read to wait for the ACK register to
@@ -815,8 +838,11 @@ static void gmc_v9_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
hub->eng_distance * eng);
for (j = 0; j < adev->usec_timeout; j++) {
- tmp = RREG32_NO_KIQ(hub->vm_inv_eng0_ack +
- hub->eng_distance * eng);
+ if (vmhub == AMDGPU_GFXHUB_0)
+ tmp = RREG32_SOC15_IP_NO_KIQ(GC, hub->vm_inv_eng0_ack + hub->eng_distance * eng);
+ else
+ tmp = RREG32_SOC15_IP_NO_KIQ(MMHUB, hub->vm_inv_eng0_ack + hub->eng_distance * eng);
+
if (tmp & (1 << vmid))
break;
udelay(1);
@@ -827,13 +853,16 @@ static void gmc_v9_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
} while (inv_req);
/* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */
- if (use_semaphore)
+ if (use_semaphore) {
/*
* add semaphore release after invalidation,
* write with 0 means semaphore release
*/
- WREG32_NO_KIQ(hub->vm_inv_eng0_sem +
- hub->eng_distance * eng, 0);
+ if (vmhub == AMDGPU_GFXHUB_0)
+ WREG32_SOC15_IP_NO_KIQ(GC, hub->vm_inv_eng0_sem + hub->eng_distance * eng, 0);
+ else
+ WREG32_SOC15_IP_NO_KIQ(MMHUB, hub->vm_inv_eng0_sem + hub->eng_distance * eng, 0);
+ }
spin_unlock(&adev->gmc.invalidate_lock);
@@ -1294,7 +1323,8 @@ static int gmc_v9_0_late_init(void *handle)
if (!amdgpu_sriov_vf(adev) &&
(adev->ip_versions[UMC_HWIP][0] == IP_VERSION(6, 0, 0))) {
if (!(adev->ras_enabled & (1 << AMDGPU_RAS_BLOCK__UMC))) {
- if (adev->df.funcs->enable_ecc_force_par_wr_rmw)
+ if (adev->df.funcs &&
+ adev->df.funcs->enable_ecc_force_par_wr_rmw)
adev->df.funcs->enable_ecc_force_par_wr_rmw(adev, false);
}
}
@@ -1505,9 +1535,11 @@ static int gmc_v9_0_sw_init(void *handle)
chansize = 64;
else
chansize = 128;
-
- numchan = adev->df.funcs->get_hbm_channel_number(adev);
- adev->gmc.vram_width = numchan * chansize;
+ if (adev->df.funcs &&
+ adev->df.funcs->get_hbm_channel_number) {
+ numchan = adev->df.funcs->get_hbm_channel_number(adev);
+ adev->gmc.vram_width = numchan * chansize;
+ }
}
adev->gmc.vram_type = vram_type;
@@ -1596,12 +1628,6 @@ static int gmc_v9_0_sw_init(void *handle)
}
adev->need_swiotlb = drm_need_swiotlb(44);
- if (adev->gmc.xgmi.supported) {
- r = adev->gfxhub.funcs->get_xgmi_info(adev);
- if (r)
- return r;
- }
-
r = gmc_v9_0_mc_init(adev);
if (r)
return r;
@@ -1714,10 +1740,14 @@ static int gmc_v9_0_gart_enable(struct amdgpu_device *adev)
return -EINVAL;
}
+ if (amdgpu_sriov_vf(adev) && amdgpu_in_reset(adev))
+ goto skip_pin_bo;
+
r = amdgpu_gart_table_vram_pin(adev);
if (r)
return r;
+skip_pin_bo:
r = adev->gfxhub.funcs->gart_enable(adev);
if (r)
return r;
@@ -1742,7 +1772,7 @@ static int gmc_v9_0_hw_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
bool value;
- int r, i;
+ int i;
/* The sequence of these two function calls matters.*/
gmc_v9_0_init_golden_registers(adev);
@@ -1777,9 +1807,7 @@ static int gmc_v9_0_hw_init(void *handle)
if (adev->umc.funcs && adev->umc.funcs->init_registers)
adev->umc.funcs->init_registers(adev);
- r = gmc_v9_0_gart_enable(adev);
-
- return r;
+ return gmc_v9_0_gart_enable(adev);
}
/**
@@ -1808,6 +1836,14 @@ static int gmc_v9_0_hw_fini(void *handle)
return 0;
}
+ /*
+ * Pair the operations did in gmc_v9_0_hw_init and thus maintain
+ * a correct cached state for GMC. Otherwise, the "gate" again
+ * operation on S3 resuming will fail due to wrong cached state.
+ */
+ if (adev->mmhub.funcs->update_power_gating)
+ adev->mmhub.funcs->update_power_gating(adev, false);
+
amdgpu_irq_put(adev, &adev->gmc.ecc_irq, 0);
amdgpu_irq_put(adev, &adev->gmc.vm_fault, 0);
diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
index a99953833820..1da2ec692057 100644
--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
@@ -145,7 +145,6 @@ static void mmhub_v1_0_init_tlb_regs(struct amdgpu_device *adev)
ENABLE_ADVANCED_DRIVER_MODEL, 1);
tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL,
SYSTEM_APERTURE_UNMAPPED_ACCESS, 0);
- tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ECO_BITS, 0);
tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL,
MTYPE, MTYPE_UC);/* XXX for emulation. */
tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ATC_EN, 1);
@@ -302,10 +301,10 @@ static void mmhub_v1_0_update_power_gating(struct amdgpu_device *adev,
if (amdgpu_sriov_vf(adev))
return;
- if (enable && adev->pg_flags & AMD_PG_SUPPORT_MMHUB) {
- amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_GMC, true);
-
- }
+ if (adev->pg_flags & AMD_PG_SUPPORT_MMHUB)
+ amdgpu_dpm_set_powergating_by_smu(adev,
+ AMD_IP_BLOCK_TYPE_GMC,
+ enable);
}
static int mmhub_v1_0_gart_enable(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_7.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_7.c
index f80a14a1b82d..f5f7181f9af5 100644
--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_7.c
+++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_7.c
@@ -165,7 +165,6 @@ static void mmhub_v1_7_init_tlb_regs(struct amdgpu_device *adev)
ENABLE_ADVANCED_DRIVER_MODEL, 1);
tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL,
SYSTEM_APERTURE_UNMAPPED_ACCESS, 0);
- tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ECO_BITS, 0);
tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL,
MTYPE, MTYPE_UC);/* XXX for emulation. */
tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ATC_EN, 1);
diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.c
index 25f8e93e5ec3..3718ff610ab2 100644
--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.c
@@ -267,7 +267,6 @@ static void mmhub_v2_0_init_tlb_regs(struct amdgpu_device *adev)
ENABLE_ADVANCED_DRIVER_MODEL, 1);
tmp = REG_SET_FIELD(tmp, MMMC_VM_MX_L1_TLB_CNTL,
SYSTEM_APERTURE_UNMAPPED_ACCESS, 0);
- tmp = REG_SET_FIELD(tmp, MMMC_VM_MX_L1_TLB_CNTL, ECO_BITS, 0);
tmp = REG_SET_FIELD(tmp, MMMC_VM_MX_L1_TLB_CNTL,
MTYPE, MTYPE_UC); /* UC, uncached */
diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v2_3.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v2_3.c
index a11d60ec6321..9e16da28505a 100644
--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v2_3.c
+++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v2_3.c
@@ -194,7 +194,6 @@ static void mmhub_v2_3_init_tlb_regs(struct amdgpu_device *adev)
ENABLE_ADVANCED_DRIVER_MODEL, 1);
tmp = REG_SET_FIELD(tmp, MMMC_VM_MX_L1_TLB_CNTL,
SYSTEM_APERTURE_UNMAPPED_ACCESS, 0);
- tmp = REG_SET_FIELD(tmp, MMMC_VM_MX_L1_TLB_CNTL, ECO_BITS, 0);
tmp = REG_SET_FIELD(tmp, MMMC_VM_MX_L1_TLB_CNTL,
MTYPE, MTYPE_UC); /* UC, uncached */
diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c
index c4ef822bbe8c..ff49eeaf7882 100644
--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c
+++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c
@@ -190,8 +190,6 @@ static void mmhub_v9_4_init_tlb_regs(struct amdgpu_device *adev, int hubid)
tmp = REG_SET_FIELD(tmp, VMSHAREDVC0_MC_VM_MX_L1_TLB_CNTL,
SYSTEM_APERTURE_UNMAPPED_ACCESS, 0);
tmp = REG_SET_FIELD(tmp, VMSHAREDVC0_MC_VM_MX_L1_TLB_CNTL,
- ECO_BITS, 0);
- tmp = REG_SET_FIELD(tmp, VMSHAREDVC0_MC_VM_MX_L1_TLB_CNTL,
MTYPE, MTYPE_UC);/* XXX for emulation. */
tmp = REG_SET_FIELD(tmp, VMSHAREDVC0_MC_VM_MX_L1_TLB_CNTL,
ATC_EN, 1);
diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
index 23b066bcffb2..0077e738db31 100644
--- a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
+++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
@@ -252,11 +252,12 @@ static void xgpu_ai_mailbox_flr_work(struct work_struct *work)
* otherwise the mailbox msg will be ruined/reseted by
* the VF FLR.
*/
- if (!down_write_trylock(&adev->reset_sem))
+ if (atomic_cmpxchg(&adev->in_gpu_reset, 0, 1) != 0)
return;
+ down_write(&adev->reset_sem);
+
amdgpu_virt_fini_data_exchange(adev);
- atomic_set(&adev->in_gpu_reset, 1);
xgpu_ai_mailbox_trans_msg(adev, IDH_READY_TO_RESET, 0, 0, 0);
diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h
index bd3b23171579..f9aa4d0bb638 100644
--- a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h
+++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h
@@ -26,7 +26,7 @@
#define AI_MAILBOX_POLL_ACK_TIMEDOUT 500
#define AI_MAILBOX_POLL_MSG_TIMEDOUT 6000
-#define AI_MAILBOX_POLL_FLR_TIMEDOUT 5000
+#define AI_MAILBOX_POLL_FLR_TIMEDOUT 10000
#define AI_MAILBOX_POLL_MSG_REP_MAX 11
enum idh_request {
diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c b/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c
index a35e6d87e537..477d0dde19c5 100644
--- a/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c
+++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c
@@ -281,11 +281,12 @@ static void xgpu_nv_mailbox_flr_work(struct work_struct *work)
* otherwise the mailbox msg will be ruined/reseted by
* the VF FLR.
*/
- if (!down_write_trylock(&adev->reset_sem))
+ if (atomic_cmpxchg(&adev->in_gpu_reset, 0, 1) != 0)
return;
+ down_write(&adev->reset_sem);
+
amdgpu_virt_fini_data_exchange(adev);
- atomic_set(&adev->in_gpu_reset, 1);
xgpu_nv_mailbox_trans_msg(adev, IDH_READY_TO_RESET, 0, 0, 0);
diff --git a/drivers/gpu/drm/amd/amdgpu/navi10_ih.c b/drivers/gpu/drm/amd/amdgpu/navi10_ih.c
index 1d8414c3fadb..8ce5b8ca1fd7 100644
--- a/drivers/gpu/drm/amd/amdgpu/navi10_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/navi10_ih.c
@@ -160,6 +160,7 @@ static int navi10_ih_toggle_ring_interrupts(struct amdgpu_device *adev,
tmp = RREG32(ih_regs->ih_rb_cntl);
tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, RB_ENABLE, (enable ? 1 : 0));
+ tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, RB_GPU_TS_ENABLE, 1);
/* enable_intr field is only valid in ring0 */
if (ih == &adev->irq.ih)
tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, ENABLE_INTR, (enable ? 1 : 0));
@@ -275,10 +276,8 @@ static int navi10_ih_enable_ring(struct amdgpu_device *adev,
tmp = navi10_ih_rb_cntl(ih, tmp);
if (ih == &adev->irq.ih)
tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, RPTR_REARM, !!adev->irq.msi_enabled);
- if (ih == &adev->irq.ih1) {
- tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_ENABLE, 0);
+ if (ih == &adev->irq.ih1)
tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, RB_FULL_DRAIN_ENABLE, 1);
- }
if (amdgpu_sriov_vf(adev) && amdgpu_sriov_reg_indirect_ih(adev)) {
if (psp_reg_program(&adev->psp, ih_regs->psp_reg_id, tmp)) {
@@ -319,7 +318,6 @@ static int navi10_ih_irq_init(struct amdgpu_device *adev)
{
struct amdgpu_ih_ring *ih[] = {&adev->irq.ih, &adev->irq.ih1, &adev->irq.ih2};
u32 ih_chicken;
- u32 tmp;
int ret;
int i;
@@ -363,15 +361,6 @@ static int navi10_ih_irq_init(struct amdgpu_device *adev)
adev->nbio.funcs->ih_doorbell_range(adev, ih[0]->use_doorbell,
ih[0]->doorbell_index);
- tmp = RREG32_SOC15(OSSSYS, 0, mmIH_STORM_CLIENT_LIST_CNTL);
- tmp = REG_SET_FIELD(tmp, IH_STORM_CLIENT_LIST_CNTL,
- CLIENT18_IS_STORM_CLIENT, 1);
- WREG32_SOC15(OSSSYS, 0, mmIH_STORM_CLIENT_LIST_CNTL, tmp);
-
- tmp = RREG32_SOC15(OSSSYS, 0, mmIH_INT_FLOOD_CNTL);
- tmp = REG_SET_FIELD(tmp, IH_INT_FLOOD_CNTL, FLOOD_CNTL_ENABLE, 1);
- WREG32_SOC15(OSSSYS, 0, mmIH_INT_FLOOD_CNTL, tmp);
-
pci_set_master(adev->pdev);
/* enable interrupts */
@@ -420,12 +409,19 @@ static u32 navi10_ih_get_wptr(struct amdgpu_device *adev,
u32 wptr, tmp;
struct amdgpu_ih_regs *ih_regs;
- wptr = le32_to_cpu(*ih->wptr_cpu);
- ih_regs = &ih->ih_regs;
+ if (ih == &adev->irq.ih) {
+ /* Only ring0 supports writeback. On other rings fall back
+ * to register-based code with overflow checking below.
+ */
+ wptr = le32_to_cpu(*ih->wptr_cpu);
- if (!REG_GET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW))
- goto out;
+ if (!REG_GET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW))
+ goto out;
+ }
+ ih_regs = &ih->ih_regs;
+
+ /* Double check that the overflow wasn't already cleared. */
wptr = RREG32_NO_KIQ(ih_regs->ih_rb_wptr);
if (!REG_GET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW))
goto out;
@@ -513,15 +509,11 @@ static int navi10_ih_self_irq(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
struct amdgpu_iv_entry *entry)
{
- uint32_t wptr = cpu_to_le32(entry->src_data[0]);
-
switch (entry->ring_id) {
case 1:
- *adev->irq.ih1.wptr_cpu = wptr;
schedule_work(&adev->irq.ih1_work);
break;
case 2:
- *adev->irq.ih2.wptr_cpu = wptr;
schedule_work(&adev->irq.ih2_work);
break;
default: break;
@@ -724,6 +716,7 @@ static const struct amd_ip_funcs navi10_ih_ip_funcs = {
static const struct amdgpu_ih_funcs navi10_ih_funcs = {
.get_wptr = navi10_ih_get_wptr,
.decode_iv = amdgpu_ih_decode_iv_helper,
+ .decode_iv_ts = amdgpu_ih_decode_iv_ts_helper,
.set_rptr = navi10_ih_set_rptr
};
diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v2_3.c b/drivers/gpu/drm/amd/amdgpu/nbio_v2_3.c
index 4ecd2b5808ce..ee7cab37dfd5 100644
--- a/drivers/gpu/drm/amd/amdgpu/nbio_v2_3.c
+++ b/drivers/gpu/drm/amd/amdgpu/nbio_v2_3.c
@@ -359,6 +359,10 @@ static void nbio_v2_3_init_registers(struct amdgpu_device *adev)
if (def != data)
WREG32_PCIE(smnPCIE_CONFIG_CNTL, data);
+
+ if (amdgpu_sriov_vf(adev))
+ adev->rmmio_remap.reg_offset = SOC15_REG_OFFSET(NBIO, 0,
+ mmBIF_BX_DEV0_EPF0_VF0_HDP_MEM_COHERENCY_FLUSH_CNTL) << 2;
}
#define NAVI10_PCIE__LC_L0S_INACTIVITY_DEFAULT 0x00000000 // off by default, no gains over L1
diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v6_1.c b/drivers/gpu/drm/amd/amdgpu/nbio_v6_1.c
index 0d2d629e2d6a..4bbacf1be25a 100644
--- a/drivers/gpu/drm/amd/amdgpu/nbio_v6_1.c
+++ b/drivers/gpu/drm/amd/amdgpu/nbio_v6_1.c
@@ -276,6 +276,10 @@ static void nbio_v6_1_init_registers(struct amdgpu_device *adev)
if (def != data)
WREG32_PCIE(smnPCIE_CI_CNTL, data);
+
+ if (amdgpu_sriov_vf(adev))
+ adev->rmmio_remap.reg_offset = SOC15_REG_OFFSET(NBIO, 0,
+ mmBIF_BX_DEV0_EPF0_VF0_HDP_MEM_COHERENCY_FLUSH_CNTL) << 2;
}
static void nbio_v6_1_program_ltr(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_0.c b/drivers/gpu/drm/amd/amdgpu/nbio_v7_0.c
index 3c00666a13e1..37a4039fdfc5 100644
--- a/drivers/gpu/drm/amd/amdgpu/nbio_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_0.c
@@ -273,7 +273,9 @@ const struct nbio_hdp_flush_reg nbio_v7_0_hdp_flush_reg = {
static void nbio_v7_0_init_registers(struct amdgpu_device *adev)
{
-
+ if (amdgpu_sriov_vf(adev))
+ adev->rmmio_remap.reg_offset =
+ SOC15_REG_OFFSET(NBIO, 0, mmHDP_MEM_COHERENCY_FLUSH_CNTL) << 2;
}
const struct amdgpu_nbio_funcs nbio_v7_0_funcs = {
diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_2.c b/drivers/gpu/drm/amd/amdgpu/nbio_v7_2.c
index 8f2a315e7c73..3444332ea110 100644
--- a/drivers/gpu/drm/amd/amdgpu/nbio_v7_2.c
+++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_2.c
@@ -371,6 +371,10 @@ static void nbio_v7_2_init_registers(struct amdgpu_device *adev)
if (def != data)
WREG32_PCIE_PORT(SOC15_REG_OFFSET(NBIO, 0, regPCIE_CONFIG_CNTL), data);
}
+
+ if (amdgpu_sriov_vf(adev))
+ adev->rmmio_remap.reg_offset = SOC15_REG_OFFSET(NBIO, 0,
+ regBIF_BX_PF0_HDP_MEM_COHERENCY_FLUSH_CNTL) << 2;
}
const struct amdgpu_nbio_funcs nbio_v7_2_funcs = {
diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c b/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c
index b8bd03d16dba..dc5e93756fea 100644
--- a/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c
+++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c
@@ -362,7 +362,9 @@ const struct nbio_hdp_flush_reg nbio_v7_4_hdp_flush_reg_ald = {
static void nbio_v7_4_init_registers(struct amdgpu_device *adev)
{
-
+ if (amdgpu_sriov_vf(adev))
+ adev->rmmio_remap.reg_offset = SOC15_REG_OFFSET(NBIO, 0,
+ mmBIF_BX_DEV0_EPF0_VF0_HDP_MEM_COHERENCY_FLUSH_CNTL) << 2;
}
static void nbio_v7_4_handle_ras_controller_intr_no_bifring(struct amdgpu_device *adev)
@@ -692,6 +694,9 @@ static void nbio_v7_4_program_aspm(struct amdgpu_device *adev)
{
uint32_t def, data;
+ if (adev->ip_versions[NBIO_HWIP][0] == IP_VERSION(7, 4, 4))
+ return;
+
def = data = RREG32_PCIE(smnPCIE_LC_CNTL);
data &= ~PCIE_LC_CNTL__LC_L1_INACTIVITY_MASK;
data &= ~PCIE_LC_CNTL__LC_L0S_INACTIVITY_MASK;
diff --git a/drivers/gpu/drm/amd/amdgpu/nv.c b/drivers/gpu/drm/amd/amdgpu/nv.c
index 59eafa31c626..2ec1ffb36b1f 100644
--- a/drivers/gpu/drm/amd/amdgpu/nv.c
+++ b/drivers/gpu/drm/amd/amdgpu/nv.c
@@ -183,6 +183,7 @@ static int nv_query_video_codecs(struct amdgpu_device *adev, bool encode,
switch (adev->ip_versions[UVD_HWIP][0]) {
case IP_VERSION(3, 0, 0):
case IP_VERSION(3, 0, 64):
+ case IP_VERSION(3, 0, 192):
if (amdgpu_sriov_vf(adev)) {
if (encode)
*codecs = &sriov_sc_video_codecs_encode;
@@ -731,8 +732,10 @@ static int nv_common_early_init(void *handle)
#define MMIO_REG_HOLE_OFFSET (0x80000 - PAGE_SIZE)
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- adev->rmmio_remap.reg_offset = MMIO_REG_HOLE_OFFSET;
- adev->rmmio_remap.bus_addr = adev->rmmio_base + MMIO_REG_HOLE_OFFSET;
+ if (!amdgpu_sriov_vf(adev)) {
+ adev->rmmio_remap.reg_offset = MMIO_REG_HOLE_OFFSET;
+ adev->rmmio_remap.bus_addr = adev->rmmio_base + MMIO_REG_HOLE_OFFSET;
+ }
adev->smc_rreg = NULL;
adev->smc_wreg = NULL;
adev->pcie_rreg = &nv_pcie_rreg;
@@ -1032,7 +1035,7 @@ static int nv_common_hw_init(void *handle)
* for the purpose of expose those registers
* to process space
*/
- if (adev->nbio.funcs->remap_hdp_registers)
+ if (adev->nbio.funcs->remap_hdp_registers && !amdgpu_sriov_vf(adev))
adev->nbio.funcs->remap_hdp_registers(adev);
/* enable the doorbell aperture */
nv_enable_doorbell_aperture(adev, true);
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c
index 2176ef85f137..d0e76b36d4ab 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c
@@ -277,13 +277,15 @@ static bool psp_v11_0_is_sos_alive(struct psp_context *psp)
return sol_reg != 0x0;
}
-static int psp_v11_0_bootloader_load_kdb(struct psp_context *psp)
+static int psp_v11_0_bootloader_load_component(struct psp_context *psp,
+ struct psp_bin_desc *bin_desc,
+ enum psp_bootloader_cmd bl_cmd)
{
int ret;
uint32_t psp_gfxdrv_command_reg = 0;
struct amdgpu_device *adev = psp->adev;
- /* Check tOS sign of life register to confirm sys driver and sOS
+ /* Check sOS sign of life register to confirm sys driver and sOS
* are already been loaded.
*/
if (psp_v11_0_is_sos_alive(psp))
@@ -293,13 +295,13 @@ static int psp_v11_0_bootloader_load_kdb(struct psp_context *psp)
if (ret)
return ret;
- /* Copy PSP KDB binary to memory */
- psp_copy_fw(psp, psp->kdb.start_addr, psp->kdb.size_bytes);
+ /* Copy PSP System Driver binary to memory */
+ psp_copy_fw(psp, bin_desc->start_addr, bin_desc->size_bytes);
- /* Provide the PSP KDB to bootloader */
+ /* Provide the sys driver to bootloader */
WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_36,
(uint32_t)(psp->fw_pri_mc_addr >> 20));
- psp_gfxdrv_command_reg = PSP_BL__LOAD_KEY_DATABASE;
+ psp_gfxdrv_command_reg = bl_cmd;
WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_35,
psp_gfxdrv_command_reg);
@@ -308,69 +310,19 @@ static int psp_v11_0_bootloader_load_kdb(struct psp_context *psp)
return ret;
}
-static int psp_v11_0_bootloader_load_spl(struct psp_context *psp)
+static int psp_v11_0_bootloader_load_kdb(struct psp_context *psp)
{
- int ret;
- uint32_t psp_gfxdrv_command_reg = 0;
- struct amdgpu_device *adev = psp->adev;
-
- /* Check tOS sign of life register to confirm sys driver and sOS
- * are already been loaded.
- */
- if (psp_v11_0_is_sos_alive(psp))
- return 0;
-
- ret = psp_v11_0_wait_for_bootloader(psp);
- if (ret)
- return ret;
-
- /* Copy PSP SPL binary to memory */
- psp_copy_fw(psp, psp->spl.start_addr, psp->spl.size_bytes);
-
- /* Provide the PSP SPL to bootloader */
- WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_36,
- (uint32_t)(psp->fw_pri_mc_addr >> 20));
- psp_gfxdrv_command_reg = PSP_BL__LOAD_TOS_SPL_TABLE;
- WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_35,
- psp_gfxdrv_command_reg);
-
- ret = psp_v11_0_wait_for_bootloader(psp);
+ return psp_v11_0_bootloader_load_component(psp, &psp->kdb, PSP_BL__LOAD_KEY_DATABASE);
+}
- return ret;
+static int psp_v11_0_bootloader_load_spl(struct psp_context *psp)
+{
+ return psp_v11_0_bootloader_load_component(psp, &psp->spl, PSP_BL__LOAD_TOS_SPL_TABLE);
}
static int psp_v11_0_bootloader_load_sysdrv(struct psp_context *psp)
{
- int ret;
- uint32_t psp_gfxdrv_command_reg = 0;
- struct amdgpu_device *adev = psp->adev;
-
- /* Check sOS sign of life register to confirm sys driver and sOS
- * are already been loaded.
- */
- if (psp_v11_0_is_sos_alive(psp))
- return 0;
-
- ret = psp_v11_0_wait_for_bootloader(psp);
- if (ret)
- return ret;
-
- /* Copy PSP System Driver binary to memory */
- psp_copy_fw(psp, psp->sys.start_addr, psp->sys.size_bytes);
-
- /* Provide the sys driver to bootloader */
- WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_36,
- (uint32_t)(psp->fw_pri_mc_addr >> 20));
- psp_gfxdrv_command_reg = PSP_BL__LOAD_SYSDRV;
- WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_35,
- psp_gfxdrv_command_reg);
-
- /* there might be handshake issue with hardware which needs delay */
- mdelay(20);
-
- ret = psp_v11_0_wait_for_bootloader(psp);
-
- return ret;
+ return psp_v11_0_bootloader_load_component(psp, &psp->sys, PSP_BL__LOAD_SYSDRV);
}
static int psp_v11_0_bootloader_load_sos(struct psp_context *psp)
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c
index 853d1511b889..81e033549dda 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c
@@ -481,8 +481,6 @@ static void sdma_v5_0_ring_emit_ib(struct amdgpu_ring *ring,
* sdma_v5_0_ring_emit_mem_sync - flush the IB by graphics cache rinse
*
* @ring: amdgpu ring pointer
- * @job: job to retrieve vmid from
- * @ib: IB object to schedule
*
* flush the IB by graphics cache rinse.
*/
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c b/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c
index 4d4d1aa51b8a..d3d6d5b045b8 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c
@@ -368,8 +368,6 @@ static void sdma_v5_2_ring_emit_ib(struct amdgpu_ring *ring,
* sdma_v5_2_ring_emit_mem_sync - flush the IB by graphics cache rinse
*
* @ring: amdgpu ring pointer
- * @job: job to retrieve vmid from
- * @ib: IB object to schedule
*
* flush the IB by graphics cache rinse.
*/
@@ -544,9 +542,6 @@ static void sdma_v5_2_ctx_switch_enable(struct amdgpu_device *adev, bool enable)
}
for (i = 0; i < adev->sdma.num_instances; i++) {
- f32_cntl = RREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_CNTL));
- f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_CNTL,
- AUTO_CTXSW_ENABLE, enable ? 1 : 0);
if (enable && amdgpu_sdma_phase_quantum) {
WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_PHASE0_QUANTUM),
phase_quantum);
@@ -555,7 +550,13 @@ static void sdma_v5_2_ctx_switch_enable(struct amdgpu_device *adev, bool enable)
WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_PHASE2_QUANTUM),
phase_quantum);
}
- WREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_CNTL), f32_cntl);
+
+ if (!amdgpu_sriov_vf(adev)) {
+ f32_cntl = RREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_CNTL));
+ f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_CNTL,
+ AUTO_CTXSW_ENABLE, enable ? 1 : 0);
+ WREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_CNTL), f32_cntl);
+ }
}
}
@@ -578,10 +579,12 @@ static void sdma_v5_2_enable(struct amdgpu_device *adev, bool enable)
sdma_v5_2_rlc_stop(adev);
}
- for (i = 0; i < adev->sdma.num_instances; i++) {
- f32_cntl = RREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_F32_CNTL));
- f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_F32_CNTL, HALT, enable ? 0 : 1);
- WREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_F32_CNTL), f32_cntl);
+ if (!amdgpu_sriov_vf(adev)) {
+ for (i = 0; i < adev->sdma.num_instances; i++) {
+ f32_cntl = RREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_F32_CNTL));
+ f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_F32_CNTL, HALT, enable ? 0 : 1);
+ WREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_F32_CNTL), f32_cntl);
+ }
}
}
@@ -610,7 +613,8 @@ static int sdma_v5_2_gfx_resume(struct amdgpu_device *adev)
ring = &adev->sdma.instance[i].ring;
wb_offset = (ring->rptr_offs * 4);
- WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_SEM_WAIT_FAIL_TIMER_CNTL), 0);
+ if (!amdgpu_sriov_vf(adev))
+ WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_SEM_WAIT_FAIL_TIMER_CNTL), 0);
/* Set ring buffer size in dwords */
rb_bufsz = order_base_2(ring->ring_size / 4);
@@ -685,32 +689,34 @@ static int sdma_v5_2_gfx_resume(struct amdgpu_device *adev)
sdma_v5_2_ring_set_wptr(ring);
/* set minor_ptr_update to 0 after wptr programed */
- WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_MINOR_PTR_UPDATE), 0);
- /* set utc l1 enable flag always to 1 */
- temp = RREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_CNTL));
- temp = REG_SET_FIELD(temp, SDMA0_CNTL, UTC_L1_ENABLE, 1);
-
- /* enable MCBP */
- temp = REG_SET_FIELD(temp, SDMA0_CNTL, MIDCMD_PREEMPT_ENABLE, 1);
- WREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_CNTL), temp);
-
- /* Set up RESP_MODE to non-copy addresses */
- temp = RREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_UTCL1_CNTL));
- temp = REG_SET_FIELD(temp, SDMA0_UTCL1_CNTL, RESP_MODE, 3);
- temp = REG_SET_FIELD(temp, SDMA0_UTCL1_CNTL, REDO_DELAY, 9);
- WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_UTCL1_CNTL), temp);
-
- /* program default cache read and write policy */
- temp = RREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_UTCL1_PAGE));
- /* clean read policy and write policy bits */
- temp &= 0xFF0FFF;
- temp |= ((CACHE_READ_POLICY_L2__DEFAULT << 12) |
- (CACHE_WRITE_POLICY_L2__DEFAULT << 14) |
- SDMA0_UTCL1_PAGE__LLC_NOALLOC_MASK);
- WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_UTCL1_PAGE), temp);
+ WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_MINOR_PTR_UPDATE), 0);
+ /* SRIOV VF has no control of any of registers below */
if (!amdgpu_sriov_vf(adev)) {
+ /* set utc l1 enable flag always to 1 */
+ temp = RREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_CNTL));
+ temp = REG_SET_FIELD(temp, SDMA0_CNTL, UTC_L1_ENABLE, 1);
+
+ /* enable MCBP */
+ temp = REG_SET_FIELD(temp, SDMA0_CNTL, MIDCMD_PREEMPT_ENABLE, 1);
+ WREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_CNTL), temp);
+
+ /* Set up RESP_MODE to non-copy addresses */
+ temp = RREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_UTCL1_CNTL));
+ temp = REG_SET_FIELD(temp, SDMA0_UTCL1_CNTL, RESP_MODE, 3);
+ temp = REG_SET_FIELD(temp, SDMA0_UTCL1_CNTL, REDO_DELAY, 9);
+ WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_UTCL1_CNTL), temp);
+
+ /* program default cache read and write policy */
+ temp = RREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_UTCL1_PAGE));
+ /* clean read policy and write policy bits */
+ temp &= 0xFF0FFF;
+ temp |= ((CACHE_READ_POLICY_L2__DEFAULT << 12) |
+ (CACHE_WRITE_POLICY_L2__DEFAULT << 14) |
+ SDMA0_UTCL1_PAGE__LLC_NOALLOC_MASK);
+ WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_UTCL1_PAGE), temp);
+
/* unhalt engine */
temp = RREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_F32_CNTL));
temp = REG_SET_FIELD(temp, SDMA0_F32_CNTL, HALT, 0);
@@ -1438,13 +1444,14 @@ static int sdma_v5_2_set_trap_irq_state(struct amdgpu_device *adev,
enum amdgpu_interrupt_state state)
{
u32 sdma_cntl;
-
u32 reg_offset = sdma_v5_2_get_reg_offset(adev, type, mmSDMA0_CNTL);
- sdma_cntl = RREG32(reg_offset);
- sdma_cntl = REG_SET_FIELD(sdma_cntl, SDMA0_CNTL, TRAP_ENABLE,
- state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
- WREG32(reg_offset, sdma_cntl);
+ if (!amdgpu_sriov_vf(adev)) {
+ sdma_cntl = RREG32(reg_offset);
+ sdma_cntl = REG_SET_FIELD(sdma_cntl, SDMA0_CNTL, TRAP_ENABLE,
+ state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
+ WREG32(reg_offset, sdma_cntl);
+ }
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c
index 0c316a2d42ed..0fc1747e4a70 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc15.c
+++ b/drivers/gpu/drm/amd/amdgpu/soc15.c
@@ -744,7 +744,7 @@ static void soc15_reg_base_init(struct amdgpu_device *adev)
vega10_reg_base_init(adev);
break;
case CHIP_RENOIR:
- /* It's safe to do ip discovery here for Renior,
+ /* It's safe to do ip discovery here for Renoir,
* it doesn't support SRIOV. */
if (amdgpu_discovery) {
r = amdgpu_discovery_reg_base_init(adev);
@@ -971,8 +971,10 @@ static int soc15_common_early_init(void *handle)
#define MMIO_REG_HOLE_OFFSET (0x80000 - PAGE_SIZE)
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- adev->rmmio_remap.reg_offset = MMIO_REG_HOLE_OFFSET;
- adev->rmmio_remap.bus_addr = adev->rmmio_base + MMIO_REG_HOLE_OFFSET;
+ if (!amdgpu_sriov_vf(adev)) {
+ adev->rmmio_remap.reg_offset = MMIO_REG_HOLE_OFFSET;
+ adev->rmmio_remap.bus_addr = adev->rmmio_base + MMIO_REG_HOLE_OFFSET;
+ }
adev->smc_rreg = NULL;
adev->smc_wreg = NULL;
adev->pcie_rreg = &soc15_pcie_rreg;
@@ -1236,7 +1238,9 @@ static int soc15_common_sw_init(void *handle)
if (amdgpu_sriov_vf(adev))
xgpu_ai_mailbox_add_irq_id(adev);
- adev->df.funcs->sw_init(adev);
+ if (adev->df.funcs &&
+ adev->df.funcs->sw_init)
+ adev->df.funcs->sw_init(adev);
return 0;
}
@@ -1248,7 +1252,10 @@ static int soc15_common_sw_fini(void *handle)
if (adev->nbio.ras_funcs &&
adev->nbio.ras_funcs->ras_fini)
adev->nbio.ras_funcs->ras_fini(adev);
- adev->df.funcs->sw_fini(adev);
+
+ if (adev->df.funcs &&
+ adev->df.funcs->sw_fini)
+ adev->df.funcs->sw_fini(adev);
return 0;
}
@@ -1285,7 +1292,7 @@ static int soc15_common_hw_init(void *handle)
* for the purpose of expose those registers
* to process space
*/
- if (adev->nbio.funcs->remap_hdp_registers)
+ if (adev->nbio.funcs->remap_hdp_registers && !amdgpu_sriov_vf(adev))
adev->nbio.funcs->remap_hdp_registers(adev);
/* enable the doorbell aperture */
diff --git a/drivers/gpu/drm/amd/amdgpu/soc15_common.h b/drivers/gpu/drm/amd/amdgpu/soc15_common.h
index 8a9ca87d8663..473767e03676 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc15_common.h
+++ b/drivers/gpu/drm/amd/amdgpu/soc15_common.h
@@ -51,6 +51,8 @@
#define RREG32_SOC15_IP(ip, reg) __RREG32_SOC15_RLC__(reg, 0, ip##_HWIP)
+#define RREG32_SOC15_IP_NO_KIQ(ip, reg) __RREG32_SOC15_RLC__(reg, AMDGPU_REGS_NO_KIQ, ip##_HWIP)
+
#define RREG32_SOC15_NO_KIQ(ip, inst, reg) \
__RREG32_SOC15_RLC__(adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg, \
AMDGPU_REGS_NO_KIQ, ip##_HWIP)
@@ -65,6 +67,9 @@
#define WREG32_SOC15_IP(ip, reg, value) \
__WREG32_SOC15_RLC__(reg, value, 0, ip##_HWIP)
+#define WREG32_SOC15_IP_NO_KIQ(ip, reg, value) \
+ __WREG32_SOC15_RLC__(reg, value, AMDGPU_REGS_NO_KIQ, ip##_HWIP)
+
#define WREG32_SOC15_NO_KIQ(ip, inst, reg, value) \
__WREG32_SOC15_RLC__(adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg, \
value, AMDGPU_REGS_NO_KIQ, ip##_HWIP)
diff --git a/drivers/gpu/drm/amd/amdgpu/umc_v6_7.c b/drivers/gpu/drm/amd/amdgpu/umc_v6_7.c
index f7ec3fe134e5..6dd1e19e8d43 100644
--- a/drivers/gpu/drm/amd/amdgpu/umc_v6_7.c
+++ b/drivers/gpu/drm/amd/amdgpu/umc_v6_7.c
@@ -50,6 +50,165 @@ static inline uint32_t get_umc_v6_7_reg_offset(struct amdgpu_device *adev,
return adev->umc.channel_offs * ch_inst + UMC_V6_7_INST_DIST * umc_inst;
}
+static inline uint32_t get_umc_v6_7_channel_index(struct amdgpu_device *adev,
+ uint32_t umc_inst,
+ uint32_t ch_inst)
+{
+ return adev->umc.channel_idx_tbl[umc_inst * adev->umc.channel_inst_num + ch_inst];
+}
+
+static void umc_v6_7_ecc_info_query_correctable_error_count(struct amdgpu_device *adev,
+ uint32_t channel_index,
+ unsigned long *error_count)
+{
+ uint32_t ecc_err_cnt;
+ uint64_t mc_umc_status;
+ struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
+
+ /*
+ * select the lower chip and check the error count
+ * skip add error count, calc error counter only from mca_umc_status
+ */
+ ecc_err_cnt = ras->umc_ecc.ecc[channel_index].ce_count_lo_chip;
+
+ /*
+ * select the higher chip and check the err counter
+ * skip add error count, calc error counter only from mca_umc_status
+ */
+ ecc_err_cnt = ras->umc_ecc.ecc[channel_index].ce_count_hi_chip;
+
+ /* check for SRAM correctable error
+ MCUMC_STATUS is a 64 bit register */
+ mc_umc_status = ras->umc_ecc.ecc[channel_index].mca_umc_status;
+ if (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1 &&
+ REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, CECC) == 1)
+ *error_count += 1;
+}
+
+static void umc_v6_7_ecc_info_querry_uncorrectable_error_count(struct amdgpu_device *adev,
+ uint32_t channel_index,
+ unsigned long *error_count)
+{
+ uint64_t mc_umc_status;
+ struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
+
+ /* check the MCUMC_STATUS */
+ mc_umc_status = ras->umc_ecc.ecc[channel_index].mca_umc_status;
+ if ((REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1) &&
+ (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Deferred) == 1 ||
+ REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UECC) == 1 ||
+ REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, PCC) == 1 ||
+ REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UC) == 1 ||
+ REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, TCC) == 1))
+ *error_count += 1;
+}
+
+static void umc_v6_7_ecc_info_query_ras_error_count(struct amdgpu_device *adev,
+ void *ras_error_status)
+{
+ struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status;
+
+ uint32_t umc_inst = 0;
+ uint32_t ch_inst = 0;
+ uint32_t umc_reg_offset = 0;
+ uint32_t channel_index = 0;
+
+ /*TODO: driver needs to toggle DF Cstate to ensure
+ * safe access of UMC registers. Will add the protection */
+ LOOP_UMC_INST_AND_CH(umc_inst, ch_inst) {
+ umc_reg_offset = get_umc_v6_7_reg_offset(adev,
+ umc_inst,
+ ch_inst);
+ channel_index = get_umc_v6_7_channel_index(adev,
+ umc_inst,
+ ch_inst);
+ umc_v6_7_ecc_info_query_correctable_error_count(adev,
+ channel_index,
+ &(err_data->ce_count));
+ umc_v6_7_ecc_info_querry_uncorrectable_error_count(adev,
+ channel_index,
+ &(err_data->ue_count));
+ }
+}
+
+static void umc_v6_7_ecc_info_query_error_address(struct amdgpu_device *adev,
+ struct ras_err_data *err_data,
+ uint32_t umc_reg_offset,
+ uint32_t ch_inst,
+ uint32_t umc_inst)
+{
+ uint64_t mc_umc_status, err_addr, retired_page;
+ struct eeprom_table_record *err_rec;
+ uint32_t channel_index;
+ struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
+
+ channel_index =
+ adev->umc.channel_idx_tbl[umc_inst * adev->umc.channel_inst_num + ch_inst];
+
+ mc_umc_status = ras->umc_ecc.ecc[channel_index].mca_umc_status;
+
+ if (mc_umc_status == 0)
+ return;
+
+ if (!err_data->err_addr)
+ return;
+
+ err_rec = &err_data->err_addr[err_data->err_addr_cnt];
+
+ /* calculate error address if ue/ce error is detected */
+ if (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1 &&
+ (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UECC) == 1 ||
+ REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, CECC) == 1)) {
+
+ err_addr = ras->umc_ecc.ecc[channel_index].mca_umc_addr;
+ err_addr = REG_GET_FIELD(err_addr, MCA_UMC_UMC0_MCUMC_ADDRT0, ErrorAddr);
+
+ /* translate umc channel address to soc pa, 3 parts are included */
+ retired_page = ADDR_OF_8KB_BLOCK(err_addr) |
+ ADDR_OF_256B_BLOCK(channel_index) |
+ OFFSET_IN_256B_BLOCK(err_addr);
+
+ /* we only save ue error information currently, ce is skipped */
+ if (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UECC)
+ == 1) {
+ err_rec->address = err_addr;
+ /* page frame address is saved */
+ err_rec->retired_page = retired_page >> AMDGPU_GPU_PAGE_SHIFT;
+ err_rec->ts = (uint64_t)ktime_get_real_seconds();
+ err_rec->err_type = AMDGPU_RAS_EEPROM_ERR_NON_RECOVERABLE;
+ err_rec->cu = 0;
+ err_rec->mem_channel = channel_index;
+ err_rec->mcumc_id = umc_inst;
+
+ err_data->err_addr_cnt++;
+ }
+ }
+}
+
+static void umc_v6_7_ecc_info_query_ras_error_address(struct amdgpu_device *adev,
+ void *ras_error_status)
+{
+ struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status;
+
+ uint32_t umc_inst = 0;
+ uint32_t ch_inst = 0;
+ uint32_t umc_reg_offset = 0;
+
+ /*TODO: driver needs to toggle DF Cstate to ensure
+ * safe access of UMC resgisters. Will add the protection
+ * when firmware interface is ready */
+ LOOP_UMC_INST_AND_CH(umc_inst, ch_inst) {
+ umc_reg_offset = get_umc_v6_7_reg_offset(adev,
+ umc_inst,
+ ch_inst);
+ umc_v6_7_ecc_info_query_error_address(adev,
+ err_data,
+ umc_reg_offset,
+ ch_inst,
+ umc_inst);
+ }
+}
+
static void umc_v6_7_query_correctable_error_count(struct amdgpu_device *adev,
uint32_t umc_reg_offset,
unsigned long *error_count)
@@ -327,4 +486,6 @@ const struct amdgpu_umc_ras_funcs umc_v6_7_ras_funcs = {
.query_ras_error_count = umc_v6_7_query_ras_error_count,
.query_ras_error_address = umc_v6_7_query_ras_error_address,
.query_ras_poison_mode = umc_v6_7_query_ras_poison_mode,
+ .ecc_info_query_ras_error_count = umc_v6_7_ecc_info_query_ras_error_count,
+ .ecc_info_query_ras_error_address = umc_v6_7_ecc_info_query_ras_error_address,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
index d54d720b3cf6..3799226defc0 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
@@ -246,6 +246,13 @@ static int vcn_v1_0_suspend(void *handle)
{
int r;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ bool idle_work_unexecuted;
+
+ idle_work_unexecuted = cancel_delayed_work_sync(&adev->vcn.idle_work);
+ if (idle_work_unexecuted) {
+ if (adev->pm.dpm_enabled)
+ amdgpu_dpm_enable_uvd(adev, false);
+ }
r = vcn_v1_0_hw_fini(adev);
if (r)
diff --git a/drivers/gpu/drm/amd/amdgpu/vega10_ih.c b/drivers/gpu/drm/amd/amdgpu/vega10_ih.c
index a9ca6988009e..3070466f54e1 100644
--- a/drivers/gpu/drm/amd/amdgpu/vega10_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/vega10_ih.c
@@ -640,6 +640,7 @@ const struct amd_ip_funcs vega10_ih_ip_funcs = {
static const struct amdgpu_ih_funcs vega10_ih_funcs = {
.get_wptr = vega10_ih_get_wptr,
.decode_iv = amdgpu_ih_decode_iv_helper,
+ .decode_iv_ts = amdgpu_ih_decode_iv_ts_helper,
.set_rptr = vega10_ih_set_rptr
};
diff --git a/drivers/gpu/drm/amd/amdgpu/vega20_ih.c b/drivers/gpu/drm/amd/amdgpu/vega20_ih.c
index f51dfc38ac65..3b4eb8285943 100644
--- a/drivers/gpu/drm/amd/amdgpu/vega20_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/vega20_ih.c
@@ -688,6 +688,7 @@ const struct amd_ip_funcs vega20_ih_ip_funcs = {
static const struct amdgpu_ih_funcs vega20_ih_funcs = {
.get_wptr = vega20_ih_get_wptr,
.decode_iv = amdgpu_ih_decode_iv_helper,
+ .decode_iv_ts = amdgpu_ih_decode_iv_ts_helper,
.set_rptr = vega20_ih_set_rptr
};
diff --git a/drivers/gpu/drm/amd/amdkfd/cik_event_interrupt.c b/drivers/gpu/drm/amd/amdkfd/cik_event_interrupt.c
index f6233019f042..d60576ce10cd 100644
--- a/drivers/gpu/drm/amd/amdkfd/cik_event_interrupt.c
+++ b/drivers/gpu/drm/amd/amdkfd/cik_event_interrupt.c
@@ -43,15 +43,15 @@ static bool cik_event_interrupt_isr(struct kfd_dev *dev,
*/
if ((ihre->source_id == CIK_INTSRC_GFX_PAGE_INV_FAULT ||
ihre->source_id == CIK_INTSRC_GFX_MEM_PROT_FAULT) &&
- dev->device_info->asic_family == CHIP_HAWAII) {
+ dev->adev->asic_type == CHIP_HAWAII) {
struct cik_ih_ring_entry *tmp_ihre =
(struct cik_ih_ring_entry *)patched_ihre;
*patched_flag = true;
*tmp_ihre = *ihre;
- vmid = f2g->read_vmid_from_vmfault_reg(dev->kgd);
- ret = f2g->get_atc_vmid_pasid_mapping_info(dev->kgd, vmid, &pasid);
+ vmid = f2g->read_vmid_from_vmfault_reg(dev->adev);
+ ret = f2g->get_atc_vmid_pasid_mapping_info(dev->adev, vmid, &pasid);
tmp_ihre->ring_id &= 0x000000ff;
tmp_ihre->ring_id |= vmid << 8;
@@ -113,7 +113,7 @@ static void cik_event_interrupt_wq(struct kfd_dev *dev,
kfd_process_vm_fault(dev->dqm, pasid);
memset(&info, 0, sizeof(info));
- amdgpu_amdkfd_gpuvm_get_vm_fault_info(dev->kgd, &info);
+ amdgpu_amdkfd_gpuvm_get_vm_fault_info(dev->adev, &info);
if (!info.page_addr && !info.status)
return;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
index 24ebd61395d8..4bfc0c8ab764 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
@@ -321,7 +321,7 @@ static int kfd_ioctl_create_queue(struct file *filep, struct kfd_process *p,
/* Return gpu_id as doorbell offset for mmap usage */
args->doorbell_offset = KFD_MMAP_TYPE_DOORBELL;
args->doorbell_offset |= KFD_MMAP_GPU_ID(args->gpu_id);
- if (KFD_IS_SOC15(dev->device_info->asic_family))
+ if (KFD_IS_SOC15(dev))
/* On SOC15 ASICs, include the doorbell offset within the
* process doorbell frame, which is 2 pages.
*/
@@ -580,7 +580,7 @@ static int kfd_ioctl_dbg_register(struct file *filep,
if (!dev)
return -EINVAL;
- if (dev->device_info->asic_family == CHIP_CARRIZO) {
+ if (dev->adev->asic_type == CHIP_CARRIZO) {
pr_debug("kfd_ioctl_dbg_register not supported on CZ\n");
return -EINVAL;
}
@@ -631,7 +631,7 @@ static int kfd_ioctl_dbg_unregister(struct file *filep,
if (!dev || !dev->dbgmgr)
return -EINVAL;
- if (dev->device_info->asic_family == CHIP_CARRIZO) {
+ if (dev->adev->asic_type == CHIP_CARRIZO) {
pr_debug("kfd_ioctl_dbg_unregister not supported on CZ\n");
return -EINVAL;
}
@@ -676,7 +676,7 @@ static int kfd_ioctl_dbg_address_watch(struct file *filep,
if (!dev)
return -EINVAL;
- if (dev->device_info->asic_family == CHIP_CARRIZO) {
+ if (dev->adev->asic_type == CHIP_CARRIZO) {
pr_debug("kfd_ioctl_dbg_wave_control not supported on CZ\n");
return -EINVAL;
}
@@ -784,7 +784,7 @@ static int kfd_ioctl_dbg_wave_control(struct file *filep,
if (!dev)
return -EINVAL;
- if (dev->device_info->asic_family == CHIP_CARRIZO) {
+ if (dev->adev->asic_type == CHIP_CARRIZO) {
pr_debug("kfd_ioctl_dbg_wave_control not supported on CZ\n");
return -EINVAL;
}
@@ -851,7 +851,7 @@ static int kfd_ioctl_get_clock_counters(struct file *filep,
dev = kfd_device_by_id(args->gpu_id);
if (dev)
/* Reading GPU clock counter from KGD */
- args->gpu_clock_counter = amdgpu_amdkfd_get_gpu_clock_counter(dev->kgd);
+ args->gpu_clock_counter = amdgpu_amdkfd_get_gpu_clock_counter(dev->adev);
else
/* Node without GPU resource */
args->gpu_clock_counter = 0;
@@ -1041,7 +1041,7 @@ static int kfd_ioctl_create_event(struct file *filp, struct kfd_process *p,
goto out_unlock;
}
- err = amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel(kfd->kgd,
+ err = amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel(kfd->adev,
mem, &kern_addr, &size);
if (err) {
pr_err("Failed to map event page to kernel\n");
@@ -1051,7 +1051,7 @@ static int kfd_ioctl_create_event(struct file *filp, struct kfd_process *p,
err = kfd_event_page_set(p, kern_addr, size);
if (err) {
pr_err("Failed to set event page\n");
- amdgpu_amdkfd_gpuvm_unmap_gtt_bo_from_kernel(kfd->kgd, mem);
+ amdgpu_amdkfd_gpuvm_unmap_gtt_bo_from_kernel(kfd->adev, mem);
goto out_unlock;
}
@@ -1137,7 +1137,7 @@ static int kfd_ioctl_set_scratch_backing_va(struct file *filep,
if (dev->dqm->sched_policy == KFD_SCHED_POLICY_NO_HWS &&
pdd->qpd.vmid != 0 && dev->kfd2kgd->set_scratch_backing_va)
dev->kfd2kgd->set_scratch_backing_va(
- dev->kgd, args->va_addr, pdd->qpd.vmid);
+ dev->adev, args->va_addr, pdd->qpd.vmid);
return 0;
@@ -1158,7 +1158,7 @@ static int kfd_ioctl_get_tile_config(struct file *filep,
if (!dev)
return -EINVAL;
- amdgpu_amdkfd_get_tile_config(dev->kgd, &config);
+ amdgpu_amdkfd_get_tile_config(dev->adev, &config);
args->gb_addr_config = config.gb_addr_config;
args->num_banks = config.num_banks;
@@ -1244,7 +1244,7 @@ bool kfd_dev_is_large_bar(struct kfd_dev *dev)
if (dev->use_iommu_v2)
return false;
- amdgpu_amdkfd_get_local_mem_info(dev->kgd, &mem_info);
+ amdgpu_amdkfd_get_local_mem_info(dev->adev, &mem_info);
if (mem_info.local_mem_size_private == 0 &&
mem_info.local_mem_size_public > 0)
return true;
@@ -1313,7 +1313,7 @@ static int kfd_ioctl_alloc_memory_of_gpu(struct file *filep,
err = -EINVAL;
goto err_unlock;
}
- offset = amdgpu_amdkfd_get_mmio_remap_phys_addr(dev->kgd);
+ offset = dev->adev->rmmio_remap.bus_addr;
if (!offset) {
err = -ENOMEM;
goto err_unlock;
@@ -1321,7 +1321,7 @@ static int kfd_ioctl_alloc_memory_of_gpu(struct file *filep,
}
err = amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
- dev->kgd, args->va_addr, args->size,
+ dev->adev, args->va_addr, args->size,
pdd->drm_priv, (struct kgd_mem **) &mem, &offset,
flags);
@@ -1353,7 +1353,7 @@ static int kfd_ioctl_alloc_memory_of_gpu(struct file *filep,
return 0;
err_free:
- amdgpu_amdkfd_gpuvm_free_memory_of_gpu(dev->kgd, (struct kgd_mem *)mem,
+ amdgpu_amdkfd_gpuvm_free_memory_of_gpu(dev->adev, (struct kgd_mem *)mem,
pdd->drm_priv, NULL);
err_unlock:
mutex_unlock(&p->mutex);
@@ -1399,7 +1399,7 @@ static int kfd_ioctl_free_memory_of_gpu(struct file *filep,
goto err_unlock;
}
- ret = amdgpu_amdkfd_gpuvm_free_memory_of_gpu(dev->kgd,
+ ret = amdgpu_amdkfd_gpuvm_free_memory_of_gpu(dev->adev,
(struct kgd_mem *)mem, pdd->drm_priv, &size);
/* If freeing the buffer failed, leave the handle in place for
@@ -1484,7 +1484,7 @@ static int kfd_ioctl_map_memory_to_gpu(struct file *filep,
goto get_mem_obj_from_handle_failed;
}
err = amdgpu_amdkfd_gpuvm_map_memory_to_gpu(
- peer->kgd, (struct kgd_mem *)mem,
+ peer->adev, (struct kgd_mem *)mem,
peer_pdd->drm_priv, &table_freed);
if (err) {
pr_err("Failed to map to gpu %d/%d\n",
@@ -1496,7 +1496,7 @@ static int kfd_ioctl_map_memory_to_gpu(struct file *filep,
mutex_unlock(&p->mutex);
- err = amdgpu_amdkfd_gpuvm_sync_memory(dev->kgd, (struct kgd_mem *) mem, true);
+ err = amdgpu_amdkfd_gpuvm_sync_memory(dev->adev, (struct kgd_mem *) mem, true);
if (err) {
pr_debug("Sync memory failed, wait interrupted by user signal\n");
goto sync_memory_failed;
@@ -1593,7 +1593,7 @@ static int kfd_ioctl_unmap_memory_from_gpu(struct file *filep,
goto get_mem_obj_from_handle_failed;
}
err = amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu(
- peer->kgd, (struct kgd_mem *)mem, peer_pdd->drm_priv);
+ peer->adev, (struct kgd_mem *)mem, peer_pdd->drm_priv);
if (err) {
pr_err("Failed to unmap from gpu %d/%d\n",
i, args->n_devices);
@@ -1603,8 +1603,8 @@ static int kfd_ioctl_unmap_memory_from_gpu(struct file *filep,
}
mutex_unlock(&p->mutex);
- if (dev->device_info->asic_family == CHIP_ALDEBARAN) {
- err = amdgpu_amdkfd_gpuvm_sync_memory(dev->kgd,
+ if (KFD_GC_VERSION(dev) == IP_VERSION(9, 4, 2)) {
+ err = amdgpu_amdkfd_gpuvm_sync_memory(dev->adev,
(struct kgd_mem *) mem, true);
if (err) {
pr_debug("Sync memory failed, wait interrupted by user signal\n");
@@ -1680,7 +1680,7 @@ static int kfd_ioctl_get_dmabuf_info(struct file *filep,
{
struct kfd_ioctl_get_dmabuf_info_args *args = data;
struct kfd_dev *dev = NULL;
- struct kgd_dev *dma_buf_kgd;
+ struct amdgpu_device *dmabuf_adev;
void *metadata_buffer = NULL;
uint32_t flags;
unsigned int i;
@@ -1700,15 +1700,15 @@ static int kfd_ioctl_get_dmabuf_info(struct file *filep,
}
/* Get dmabuf info from KGD */
- r = amdgpu_amdkfd_get_dmabuf_info(dev->kgd, args->dmabuf_fd,
- &dma_buf_kgd, &args->size,
+ r = amdgpu_amdkfd_get_dmabuf_info(dev->adev, args->dmabuf_fd,
+ &dmabuf_adev, &args->size,
metadata_buffer, args->metadata_size,
&args->metadata_size, &flags);
if (r)
goto exit;
/* Reverse-lookup gpu_id from kgd pointer */
- dev = kfd_device_by_kgd(dma_buf_kgd);
+ dev = kfd_device_by_adev(dmabuf_adev);
if (!dev) {
r = -EINVAL;
goto exit;
@@ -1758,7 +1758,7 @@ static int kfd_ioctl_import_dmabuf(struct file *filep,
goto err_unlock;
}
- r = amdgpu_amdkfd_gpuvm_import_dmabuf(dev->kgd, dmabuf,
+ r = amdgpu_amdkfd_gpuvm_import_dmabuf(dev->adev, dmabuf,
args->va_addr, pdd->drm_priv,
(struct kgd_mem **)&mem, &size,
NULL);
@@ -1779,7 +1779,7 @@ static int kfd_ioctl_import_dmabuf(struct file *filep,
return 0;
err_free:
- amdgpu_amdkfd_gpuvm_free_memory_of_gpu(dev->kgd, (struct kgd_mem *)mem,
+ amdgpu_amdkfd_gpuvm_free_memory_of_gpu(dev->adev, (struct kgd_mem *)mem,
pdd->drm_priv, NULL);
err_unlock:
mutex_unlock(&p->mutex);
@@ -2066,7 +2066,7 @@ static int kfd_mmio_mmap(struct kfd_dev *dev, struct kfd_process *process,
if (vma->vm_end - vma->vm_start != PAGE_SIZE)
return -EINVAL;
- address = amdgpu_amdkfd_get_mmio_remap_phys_addr(dev->kgd);
+ address = dev->adev->rmmio_remap.bus_addr;
vma->vm_flags |= VM_IO | VM_DONTCOPY | VM_DONTEXPAND | VM_NORESERVE |
VM_DONTDUMP | VM_PFNMAP;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_crat.c b/drivers/gpu/drm/amd/amdkfd/kfd_crat.c
index cfedfb1e8596..f187596faf66 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_crat.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_crat.c
@@ -1340,7 +1340,7 @@ static int kfd_fill_gpu_cache_info(struct kfd_dev *kdev,
int ret;
unsigned int num_cu_shared;
- switch (kdev->device_info->asic_family) {
+ switch (kdev->adev->asic_type) {
case CHIP_KAVERI:
pcache_info = kaveri_cache_info;
num_of_cache_types = ARRAY_SIZE(kaveri_cache_info);
@@ -1377,67 +1377,71 @@ static int kfd_fill_gpu_cache_info(struct kfd_dev *kdev,
pcache_info = vegam_cache_info;
num_of_cache_types = ARRAY_SIZE(vegam_cache_info);
break;
- case CHIP_VEGA10:
- pcache_info = vega10_cache_info;
- num_of_cache_types = ARRAY_SIZE(vega10_cache_info);
- break;
- case CHIP_VEGA12:
- pcache_info = vega12_cache_info;
- num_of_cache_types = ARRAY_SIZE(vega12_cache_info);
- break;
- case CHIP_VEGA20:
- case CHIP_ARCTURUS:
- pcache_info = vega20_cache_info;
- num_of_cache_types = ARRAY_SIZE(vega20_cache_info);
- break;
- case CHIP_ALDEBARAN:
- pcache_info = aldebaran_cache_info;
- num_of_cache_types = ARRAY_SIZE(aldebaran_cache_info);
- break;
- case CHIP_RAVEN:
- pcache_info = raven_cache_info;
- num_of_cache_types = ARRAY_SIZE(raven_cache_info);
- break;
- case CHIP_RENOIR:
- pcache_info = renoir_cache_info;
- num_of_cache_types = ARRAY_SIZE(renoir_cache_info);
- break;
- case CHIP_NAVI10:
- case CHIP_NAVI12:
- case CHIP_CYAN_SKILLFISH:
- pcache_info = navi10_cache_info;
- num_of_cache_types = ARRAY_SIZE(navi10_cache_info);
- break;
- case CHIP_NAVI14:
- pcache_info = navi14_cache_info;
- num_of_cache_types = ARRAY_SIZE(navi14_cache_info);
- break;
- case CHIP_SIENNA_CICHLID:
- pcache_info = sienna_cichlid_cache_info;
- num_of_cache_types = ARRAY_SIZE(sienna_cichlid_cache_info);
- break;
- case CHIP_NAVY_FLOUNDER:
- pcache_info = navy_flounder_cache_info;
- num_of_cache_types = ARRAY_SIZE(navy_flounder_cache_info);
- break;
- case CHIP_DIMGREY_CAVEFISH:
- pcache_info = dimgrey_cavefish_cache_info;
- num_of_cache_types = ARRAY_SIZE(dimgrey_cavefish_cache_info);
- break;
- case CHIP_VANGOGH:
- pcache_info = vangogh_cache_info;
- num_of_cache_types = ARRAY_SIZE(vangogh_cache_info);
- break;
- case CHIP_BEIGE_GOBY:
- pcache_info = beige_goby_cache_info;
- num_of_cache_types = ARRAY_SIZE(beige_goby_cache_info);
- break;
- case CHIP_YELLOW_CARP:
- pcache_info = yellow_carp_cache_info;
- num_of_cache_types = ARRAY_SIZE(yellow_carp_cache_info);
- break;
default:
- return -EINVAL;
+ switch(KFD_GC_VERSION(kdev)) {
+ case IP_VERSION(9, 0, 1):
+ pcache_info = vega10_cache_info;
+ num_of_cache_types = ARRAY_SIZE(vega10_cache_info);
+ break;
+ case IP_VERSION(9, 2, 1):
+ pcache_info = vega12_cache_info;
+ num_of_cache_types = ARRAY_SIZE(vega12_cache_info);
+ break;
+ case IP_VERSION(9, 4, 0):
+ case IP_VERSION(9, 4, 1):
+ pcache_info = vega20_cache_info;
+ num_of_cache_types = ARRAY_SIZE(vega20_cache_info);
+ break;
+ case IP_VERSION(9, 4, 2):
+ pcache_info = aldebaran_cache_info;
+ num_of_cache_types = ARRAY_SIZE(aldebaran_cache_info);
+ break;
+ case IP_VERSION(9, 1, 0):
+ case IP_VERSION(9, 2, 2):
+ pcache_info = raven_cache_info;
+ num_of_cache_types = ARRAY_SIZE(raven_cache_info);
+ break;
+ case IP_VERSION(9, 3, 0):
+ pcache_info = renoir_cache_info;
+ num_of_cache_types = ARRAY_SIZE(renoir_cache_info);
+ break;
+ case IP_VERSION(10, 1, 10):
+ case IP_VERSION(10, 1, 2):
+ case IP_VERSION(10, 1, 3):
+ pcache_info = navi10_cache_info;
+ num_of_cache_types = ARRAY_SIZE(navi10_cache_info);
+ break;
+ case IP_VERSION(10, 1, 1):
+ pcache_info = navi14_cache_info;
+ num_of_cache_types = ARRAY_SIZE(navi14_cache_info);
+ break;
+ case IP_VERSION(10, 3, 0):
+ pcache_info = sienna_cichlid_cache_info;
+ num_of_cache_types = ARRAY_SIZE(sienna_cichlid_cache_info);
+ break;
+ case IP_VERSION(10, 3, 2):
+ pcache_info = navy_flounder_cache_info;
+ num_of_cache_types = ARRAY_SIZE(navy_flounder_cache_info);
+ break;
+ case IP_VERSION(10, 3, 4):
+ pcache_info = dimgrey_cavefish_cache_info;
+ num_of_cache_types = ARRAY_SIZE(dimgrey_cavefish_cache_info);
+ break;
+ case IP_VERSION(10, 3, 1):
+ pcache_info = vangogh_cache_info;
+ num_of_cache_types = ARRAY_SIZE(vangogh_cache_info);
+ break;
+ case IP_VERSION(10, 3, 5):
+ pcache_info = beige_goby_cache_info;
+ num_of_cache_types = ARRAY_SIZE(beige_goby_cache_info);
+ break;
+ case IP_VERSION(10, 3, 3):
+ pcache_info = yellow_carp_cache_info;
+ num_of_cache_types = ARRAY_SIZE(yellow_carp_cache_info);
+ break;
+ default:
+ return -EINVAL;
+ }
}
*size_filled = 0;
@@ -1963,8 +1967,6 @@ static int kfd_fill_gpu_direct_io_link_to_cpu(int *avail_size,
struct crat_subtype_iolink *sub_type_hdr,
uint32_t proximity_domain)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)kdev->kgd;
-
*avail_size -= sizeof(struct crat_subtype_iolink);
if (*avail_size < 0)
return -ENOMEM;
@@ -1981,7 +1983,7 @@ static int kfd_fill_gpu_direct_io_link_to_cpu(int *avail_size,
/* Fill in IOLINK subtype.
* TODO: Fill-in other fields of iolink subtype
*/
- if (adev->gmc.xgmi.connected_to_cpu) {
+ if (kdev->adev->gmc.xgmi.connected_to_cpu) {
/*
* with host gpu xgmi link, host can access gpu memory whether
* or not pcie bar type is large, so always create bidirectional
@@ -1990,19 +1992,19 @@ static int kfd_fill_gpu_direct_io_link_to_cpu(int *avail_size,
sub_type_hdr->flags |= CRAT_IOLINK_FLAGS_BI_DIRECTIONAL;
sub_type_hdr->io_interface_type = CRAT_IOLINK_TYPE_XGMI;
sub_type_hdr->num_hops_xgmi = 1;
- if (adev->asic_type == CHIP_ALDEBARAN) {
+ if (KFD_GC_VERSION(kdev) == IP_VERSION(9, 4, 2)) {
sub_type_hdr->minimum_bandwidth_mbs =
amdgpu_amdkfd_get_xgmi_bandwidth_mbytes(
- kdev->kgd, NULL, true);
+ kdev->adev, NULL, true);
sub_type_hdr->maximum_bandwidth_mbs =
sub_type_hdr->minimum_bandwidth_mbs;
}
} else {
sub_type_hdr->io_interface_type = CRAT_IOLINK_TYPE_PCIEXPRESS;
sub_type_hdr->minimum_bandwidth_mbs =
- amdgpu_amdkfd_get_pcie_bandwidth_mbytes(kdev->kgd, true);
+ amdgpu_amdkfd_get_pcie_bandwidth_mbytes(kdev->adev, true);
sub_type_hdr->maximum_bandwidth_mbs =
- amdgpu_amdkfd_get_pcie_bandwidth_mbytes(kdev->kgd, false);
+ amdgpu_amdkfd_get_pcie_bandwidth_mbytes(kdev->adev, false);
}
sub_type_hdr->proximity_domain_from = proximity_domain;
@@ -2044,11 +2046,11 @@ static int kfd_fill_gpu_xgmi_link_to_gpu(int *avail_size,
sub_type_hdr->proximity_domain_from = proximity_domain_from;
sub_type_hdr->proximity_domain_to = proximity_domain_to;
sub_type_hdr->num_hops_xgmi =
- amdgpu_amdkfd_get_xgmi_hops_count(kdev->kgd, peer_kdev->kgd);
+ amdgpu_amdkfd_get_xgmi_hops_count(kdev->adev, peer_kdev->adev);
sub_type_hdr->maximum_bandwidth_mbs =
- amdgpu_amdkfd_get_xgmi_bandwidth_mbytes(kdev->kgd, peer_kdev->kgd, false);
+ amdgpu_amdkfd_get_xgmi_bandwidth_mbytes(kdev->adev, peer_kdev->adev, false);
sub_type_hdr->minimum_bandwidth_mbs = sub_type_hdr->maximum_bandwidth_mbs ?
- amdgpu_amdkfd_get_xgmi_bandwidth_mbytes(kdev->kgd, NULL, true) : 0;
+ amdgpu_amdkfd_get_xgmi_bandwidth_mbytes(kdev->adev, NULL, true) : 0;
return 0;
}
@@ -2114,7 +2116,7 @@ static int kfd_create_vcrat_image_gpu(void *pcrat_image,
cu->flags |= CRAT_CU_FLAGS_GPU_PRESENT;
cu->proximity_domain = proximity_domain;
- amdgpu_amdkfd_get_cu_info(kdev->kgd, &cu_info);
+ amdgpu_amdkfd_get_cu_info(kdev->adev, &cu_info);
cu->num_simd_per_cu = cu_info.simd_per_cu;
cu->num_simd_cores = cu_info.simd_per_cu * cu_info.cu_active_number;
cu->max_waves_simd = cu_info.max_waves_per_simd;
@@ -2145,7 +2147,7 @@ static int kfd_create_vcrat_image_gpu(void *pcrat_image,
* report the total FB size (public+private) as a single
* private heap.
*/
- amdgpu_amdkfd_get_local_mem_info(kdev->kgd, &local_mem_info);
+ amdgpu_amdkfd_get_local_mem_info(kdev->adev, &local_mem_info);
sub_type_hdr = (typeof(sub_type_hdr))((char *)sub_type_hdr +
sub_type_hdr->length);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_dbgdev.c b/drivers/gpu/drm/amd/amdkfd/kfd_dbgdev.c
index 159add0f5aaa..1e30717b5253 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_dbgdev.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_dbgdev.c
@@ -41,7 +41,7 @@
static void dbgdev_address_watch_disable_nodiq(struct kfd_dev *dev)
{
- dev->kfd2kgd->address_watch_disable(dev->kgd);
+ dev->kfd2kgd->address_watch_disable(dev->adev);
}
static int dbgdev_diq_submit_ib(struct kfd_dbgdev *dbgdev,
@@ -322,7 +322,7 @@ static int dbgdev_address_watch_nodiq(struct kfd_dbgdev *dbgdev,
pr_debug("\t\t%30s\n", "* * * * * * * * * * * * * * * * * *");
pdd->dev->kfd2kgd->address_watch_execute(
- dbgdev->dev->kgd,
+ dbgdev->dev->adev,
i,
cntl.u32All,
addrHi.u32All,
@@ -420,7 +420,7 @@ static int dbgdev_address_watch_diq(struct kfd_dbgdev *dbgdev,
aw_reg_add_dword =
dbgdev->dev->kfd2kgd->address_watch_get_offset(
- dbgdev->dev->kgd,
+ dbgdev->dev->adev,
i,
ADDRESS_WATCH_REG_CNTL);
@@ -431,7 +431,7 @@ static int dbgdev_address_watch_diq(struct kfd_dbgdev *dbgdev,
aw_reg_add_dword =
dbgdev->dev->kfd2kgd->address_watch_get_offset(
- dbgdev->dev->kgd,
+ dbgdev->dev->adev,
i,
ADDRESS_WATCH_REG_ADDR_HI);
@@ -441,7 +441,7 @@ static int dbgdev_address_watch_diq(struct kfd_dbgdev *dbgdev,
aw_reg_add_dword =
dbgdev->dev->kfd2kgd->address_watch_get_offset(
- dbgdev->dev->kgd,
+ dbgdev->dev->adev,
i,
ADDRESS_WATCH_REG_ADDR_LO);
@@ -457,7 +457,7 @@ static int dbgdev_address_watch_diq(struct kfd_dbgdev *dbgdev,
aw_reg_add_dword =
dbgdev->dev->kfd2kgd->address_watch_get_offset(
- dbgdev->dev->kgd,
+ dbgdev->dev->adev,
i,
ADDRESS_WATCH_REG_CNTL);
@@ -752,7 +752,7 @@ static int dbgdev_wave_control_nodiq(struct kfd_dbgdev *dbgdev,
pr_debug("\t\t %30s\n", "* * * * * * * * * * * * * * * * * *");
- return dbgdev->dev->kfd2kgd->wave_control_execute(dbgdev->dev->kgd,
+ return dbgdev->dev->kfd2kgd->wave_control_execute(dbgdev->dev->adev,
reg_gfx_index.u32All,
reg_sq_cmd.u32All);
}
@@ -784,7 +784,7 @@ int dbgdev_wave_reset_wavefronts(struct kfd_dev *dev, struct kfd_process *p)
for (vmid = first_vmid_to_scan; vmid <= last_vmid_to_scan; vmid++) {
status = dev->kfd2kgd->get_atc_vmid_pasid_mapping_info
- (dev->kgd, vmid, &queried_pasid);
+ (dev->adev, vmid, &queried_pasid);
if (status && queried_pasid == p->pasid) {
pr_debug("Killing wave fronts of vmid %d and pasid 0x%x\n",
@@ -811,7 +811,7 @@ int dbgdev_wave_reset_wavefronts(struct kfd_dev *dev, struct kfd_process *p)
/* for non DIQ we need to patch the VMID: */
reg_sq_cmd.bits.vm_id = vmid;
- dev->kfd2kgd->wave_control_execute(dev->kgd,
+ dev->kfd2kgd->wave_control_execute(dev->adev,
reg_gfx_index.u32All,
reg_sq_cmd.u32All);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
index 3b119db16003..127d41d0e4f0 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
@@ -53,770 +53,310 @@ extern const struct kfd2kgd_calls aldebaran_kfd2kgd;
extern const struct kfd2kgd_calls gfx_v10_kfd2kgd;
extern const struct kfd2kgd_calls gfx_v10_3_kfd2kgd;
-#ifdef KFD_SUPPORT_IOMMU_V2
-static const struct kfd_device_info kaveri_device_info = {
- .asic_family = CHIP_KAVERI,
- .asic_name = "kaveri",
- .gfx_target_version = 70000,
- .max_pasid_bits = 16,
- /* max num of queues for KV.TODO should be a dynamic value */
- .max_no_of_hqd = 24,
- .doorbell_size = 4,
- .ih_ring_entry_size = 4 * sizeof(uint32_t),
- .event_interrupt_class = &event_interrupt_class_cik,
- .num_of_watch_points = 4,
- .mqd_size_aligned = MQD_SIZE_ALIGNED,
- .supports_cwsr = false,
- .needs_iommu_device = true,
- .needs_pci_atomics = false,
- .num_sdma_engines = 2,
- .num_xgmi_sdma_engines = 0,
- .num_sdma_queues_per_engine = 2,
-};
-
-static const struct kfd_device_info carrizo_device_info = {
- .asic_family = CHIP_CARRIZO,
- .asic_name = "carrizo",
- .gfx_target_version = 80001,
- .max_pasid_bits = 16,
- /* max num of queues for CZ.TODO should be a dynamic value */
- .max_no_of_hqd = 24,
- .doorbell_size = 4,
- .ih_ring_entry_size = 4 * sizeof(uint32_t),
- .event_interrupt_class = &event_interrupt_class_cik,
- .num_of_watch_points = 4,
- .mqd_size_aligned = MQD_SIZE_ALIGNED,
- .supports_cwsr = true,
- .needs_iommu_device = true,
- .needs_pci_atomics = false,
- .num_sdma_engines = 2,
- .num_xgmi_sdma_engines = 0,
- .num_sdma_queues_per_engine = 2,
-};
-
-static const struct kfd_device_info raven_device_info = {
- .asic_family = CHIP_RAVEN,
- .asic_name = "raven",
- .gfx_target_version = 90002,
- .max_pasid_bits = 16,
- .max_no_of_hqd = 24,
- .doorbell_size = 8,
- .ih_ring_entry_size = 8 * sizeof(uint32_t),
- .event_interrupt_class = &event_interrupt_class_v9,
- .num_of_watch_points = 4,
- .mqd_size_aligned = MQD_SIZE_ALIGNED,
- .supports_cwsr = true,
- .needs_iommu_device = true,
- .needs_pci_atomics = true,
- .num_sdma_engines = 1,
- .num_xgmi_sdma_engines = 0,
- .num_sdma_queues_per_engine = 2,
-};
-#endif
-
-#ifdef CONFIG_DRM_AMDGPU_CIK
-static const struct kfd_device_info hawaii_device_info = {
- .asic_family = CHIP_HAWAII,
- .asic_name = "hawaii",
- .gfx_target_version = 70001,
- .max_pasid_bits = 16,
- /* max num of queues for KV.TODO should be a dynamic value */
- .max_no_of_hqd = 24,
- .doorbell_size = 4,
- .ih_ring_entry_size = 4 * sizeof(uint32_t),
- .event_interrupt_class = &event_interrupt_class_cik,
- .num_of_watch_points = 4,
- .mqd_size_aligned = MQD_SIZE_ALIGNED,
- .supports_cwsr = false,
- .needs_iommu_device = false,
- .needs_pci_atomics = false,
- .num_sdma_engines = 2,
- .num_xgmi_sdma_engines = 0,
- .num_sdma_queues_per_engine = 2,
-};
-#endif
-
-static const struct kfd_device_info tonga_device_info = {
- .asic_family = CHIP_TONGA,
- .asic_name = "tonga",
- .gfx_target_version = 80002,
- .max_pasid_bits = 16,
- .max_no_of_hqd = 24,
- .doorbell_size = 4,
- .ih_ring_entry_size = 4 * sizeof(uint32_t),
- .event_interrupt_class = &event_interrupt_class_cik,
- .num_of_watch_points = 4,
- .mqd_size_aligned = MQD_SIZE_ALIGNED,
- .supports_cwsr = false,
- .needs_iommu_device = false,
- .needs_pci_atomics = true,
- .num_sdma_engines = 2,
- .num_xgmi_sdma_engines = 0,
- .num_sdma_queues_per_engine = 2,
-};
-
-static const struct kfd_device_info fiji_device_info = {
- .asic_family = CHIP_FIJI,
- .asic_name = "fiji",
- .gfx_target_version = 80003,
- .max_pasid_bits = 16,
- .max_no_of_hqd = 24,
- .doorbell_size = 4,
- .ih_ring_entry_size = 4 * sizeof(uint32_t),
- .event_interrupt_class = &event_interrupt_class_cik,
- .num_of_watch_points = 4,
- .mqd_size_aligned = MQD_SIZE_ALIGNED,
- .supports_cwsr = true,
- .needs_iommu_device = false,
- .needs_pci_atomics = true,
- .num_sdma_engines = 2,
- .num_xgmi_sdma_engines = 0,
- .num_sdma_queues_per_engine = 2,
-};
-
-static const struct kfd_device_info fiji_vf_device_info = {
- .asic_family = CHIP_FIJI,
- .asic_name = "fiji",
- .gfx_target_version = 80003,
- .max_pasid_bits = 16,
- .max_no_of_hqd = 24,
- .doorbell_size = 4,
- .ih_ring_entry_size = 4 * sizeof(uint32_t),
- .event_interrupt_class = &event_interrupt_class_cik,
- .num_of_watch_points = 4,
- .mqd_size_aligned = MQD_SIZE_ALIGNED,
- .supports_cwsr = true,
- .needs_iommu_device = false,
- .needs_pci_atomics = false,
- .num_sdma_engines = 2,
- .num_xgmi_sdma_engines = 0,
- .num_sdma_queues_per_engine = 2,
-};
-
-
-static const struct kfd_device_info polaris10_device_info = {
- .asic_family = CHIP_POLARIS10,
- .asic_name = "polaris10",
- .gfx_target_version = 80003,
- .max_pasid_bits = 16,
- .max_no_of_hqd = 24,
- .doorbell_size = 4,
- .ih_ring_entry_size = 4 * sizeof(uint32_t),
- .event_interrupt_class = &event_interrupt_class_cik,
- .num_of_watch_points = 4,
- .mqd_size_aligned = MQD_SIZE_ALIGNED,
- .supports_cwsr = true,
- .needs_iommu_device = false,
- .needs_pci_atomics = true,
- .num_sdma_engines = 2,
- .num_xgmi_sdma_engines = 0,
- .num_sdma_queues_per_engine = 2,
-};
-
-static const struct kfd_device_info polaris10_vf_device_info = {
- .asic_family = CHIP_POLARIS10,
- .asic_name = "polaris10",
- .gfx_target_version = 80003,
- .max_pasid_bits = 16,
- .max_no_of_hqd = 24,
- .doorbell_size = 4,
- .ih_ring_entry_size = 4 * sizeof(uint32_t),
- .event_interrupt_class = &event_interrupt_class_cik,
- .num_of_watch_points = 4,
- .mqd_size_aligned = MQD_SIZE_ALIGNED,
- .supports_cwsr = true,
- .needs_iommu_device = false,
- .needs_pci_atomics = false,
- .num_sdma_engines = 2,
- .num_xgmi_sdma_engines = 0,
- .num_sdma_queues_per_engine = 2,
-};
-
-static const struct kfd_device_info polaris11_device_info = {
- .asic_family = CHIP_POLARIS11,
- .asic_name = "polaris11",
- .gfx_target_version = 80003,
- .max_pasid_bits = 16,
- .max_no_of_hqd = 24,
- .doorbell_size = 4,
- .ih_ring_entry_size = 4 * sizeof(uint32_t),
- .event_interrupt_class = &event_interrupt_class_cik,
- .num_of_watch_points = 4,
- .mqd_size_aligned = MQD_SIZE_ALIGNED,
- .supports_cwsr = true,
- .needs_iommu_device = false,
- .needs_pci_atomics = true,
- .num_sdma_engines = 2,
- .num_xgmi_sdma_engines = 0,
- .num_sdma_queues_per_engine = 2,
-};
-
-static const struct kfd_device_info polaris12_device_info = {
- .asic_family = CHIP_POLARIS12,
- .asic_name = "polaris12",
- .gfx_target_version = 80003,
- .max_pasid_bits = 16,
- .max_no_of_hqd = 24,
- .doorbell_size = 4,
- .ih_ring_entry_size = 4 * sizeof(uint32_t),
- .event_interrupt_class = &event_interrupt_class_cik,
- .num_of_watch_points = 4,
- .mqd_size_aligned = MQD_SIZE_ALIGNED,
- .supports_cwsr = true,
- .needs_iommu_device = false,
- .needs_pci_atomics = true,
- .num_sdma_engines = 2,
- .num_xgmi_sdma_engines = 0,
- .num_sdma_queues_per_engine = 2,
-};
-
-static const struct kfd_device_info vegam_device_info = {
- .asic_family = CHIP_VEGAM,
- .asic_name = "vegam",
- .gfx_target_version = 80003,
- .max_pasid_bits = 16,
- .max_no_of_hqd = 24,
- .doorbell_size = 4,
- .ih_ring_entry_size = 4 * sizeof(uint32_t),
- .event_interrupt_class = &event_interrupt_class_cik,
- .num_of_watch_points = 4,
- .mqd_size_aligned = MQD_SIZE_ALIGNED,
- .supports_cwsr = true,
- .needs_iommu_device = false,
- .needs_pci_atomics = true,
- .num_sdma_engines = 2,
- .num_xgmi_sdma_engines = 0,
- .num_sdma_queues_per_engine = 2,
-};
-
-static const struct kfd_device_info vega10_device_info = {
- .asic_family = CHIP_VEGA10,
- .asic_name = "vega10",
- .gfx_target_version = 90000,
- .max_pasid_bits = 16,
- .max_no_of_hqd = 24,
- .doorbell_size = 8,
- .ih_ring_entry_size = 8 * sizeof(uint32_t),
- .event_interrupt_class = &event_interrupt_class_v9,
- .num_of_watch_points = 4,
- .mqd_size_aligned = MQD_SIZE_ALIGNED,
- .supports_cwsr = true,
- .needs_iommu_device = false,
- .needs_pci_atomics = false,
- .num_sdma_engines = 2,
- .num_xgmi_sdma_engines = 0,
- .num_sdma_queues_per_engine = 2,
-};
-
-static const struct kfd_device_info vega10_vf_device_info = {
- .asic_family = CHIP_VEGA10,
- .asic_name = "vega10",
- .gfx_target_version = 90000,
- .max_pasid_bits = 16,
- .max_no_of_hqd = 24,
- .doorbell_size = 8,
- .ih_ring_entry_size = 8 * sizeof(uint32_t),
- .event_interrupt_class = &event_interrupt_class_v9,
- .num_of_watch_points = 4,
- .mqd_size_aligned = MQD_SIZE_ALIGNED,
- .supports_cwsr = true,
- .needs_iommu_device = false,
- .needs_pci_atomics = false,
- .num_sdma_engines = 2,
- .num_xgmi_sdma_engines = 0,
- .num_sdma_queues_per_engine = 2,
-};
-
-static const struct kfd_device_info vega12_device_info = {
- .asic_family = CHIP_VEGA12,
- .asic_name = "vega12",
- .gfx_target_version = 90004,
- .max_pasid_bits = 16,
- .max_no_of_hqd = 24,
- .doorbell_size = 8,
- .ih_ring_entry_size = 8 * sizeof(uint32_t),
- .event_interrupt_class = &event_interrupt_class_v9,
- .num_of_watch_points = 4,
- .mqd_size_aligned = MQD_SIZE_ALIGNED,
- .supports_cwsr = true,
- .needs_iommu_device = false,
- .needs_pci_atomics = false,
- .num_sdma_engines = 2,
- .num_xgmi_sdma_engines = 0,
- .num_sdma_queues_per_engine = 2,
-};
-
-static const struct kfd_device_info vega20_device_info = {
- .asic_family = CHIP_VEGA20,
- .asic_name = "vega20",
- .gfx_target_version = 90006,
- .max_pasid_bits = 16,
- .max_no_of_hqd = 24,
- .doorbell_size = 8,
- .ih_ring_entry_size = 8 * sizeof(uint32_t),
- .event_interrupt_class = &event_interrupt_class_v9,
- .num_of_watch_points = 4,
- .mqd_size_aligned = MQD_SIZE_ALIGNED,
- .supports_cwsr = true,
- .needs_iommu_device = false,
- .needs_pci_atomics = false,
- .num_sdma_engines = 2,
- .num_xgmi_sdma_engines = 0,
- .num_sdma_queues_per_engine = 8,
-};
-
-static const struct kfd_device_info arcturus_device_info = {
- .asic_family = CHIP_ARCTURUS,
- .asic_name = "arcturus",
- .gfx_target_version = 90008,
- .max_pasid_bits = 16,
- .max_no_of_hqd = 24,
- .doorbell_size = 8,
- .ih_ring_entry_size = 8 * sizeof(uint32_t),
- .event_interrupt_class = &event_interrupt_class_v9,
- .num_of_watch_points = 4,
- .mqd_size_aligned = MQD_SIZE_ALIGNED,
- .supports_cwsr = true,
- .needs_iommu_device = false,
- .needs_pci_atomics = false,
- .num_sdma_engines = 2,
- .num_xgmi_sdma_engines = 6,
- .num_sdma_queues_per_engine = 8,
-};
-
-static const struct kfd_device_info aldebaran_device_info = {
- .asic_family = CHIP_ALDEBARAN,
- .asic_name = "aldebaran",
- .gfx_target_version = 90010,
- .max_pasid_bits = 16,
- .max_no_of_hqd = 24,
- .doorbell_size = 8,
- .ih_ring_entry_size = 8 * sizeof(uint32_t),
- .event_interrupt_class = &event_interrupt_class_v9,
- .num_of_watch_points = 4,
- .mqd_size_aligned = MQD_SIZE_ALIGNED,
- .supports_cwsr = true,
- .needs_iommu_device = false,
- .needs_pci_atomics = false,
- .num_sdma_engines = 2,
- .num_xgmi_sdma_engines = 3,
- .num_sdma_queues_per_engine = 8,
-};
-
-static const struct kfd_device_info renoir_device_info = {
- .asic_family = CHIP_RENOIR,
- .asic_name = "renoir",
- .gfx_target_version = 90012,
- .max_pasid_bits = 16,
- .max_no_of_hqd = 24,
- .doorbell_size = 8,
- .ih_ring_entry_size = 8 * sizeof(uint32_t),
- .event_interrupt_class = &event_interrupt_class_v9,
- .num_of_watch_points = 4,
- .mqd_size_aligned = MQD_SIZE_ALIGNED,
- .supports_cwsr = true,
- .needs_iommu_device = false,
- .needs_pci_atomics = false,
- .num_sdma_engines = 1,
- .num_xgmi_sdma_engines = 0,
- .num_sdma_queues_per_engine = 2,
-};
-
-static const struct kfd_device_info navi10_device_info = {
- .asic_family = CHIP_NAVI10,
- .asic_name = "navi10",
- .gfx_target_version = 100100,
- .max_pasid_bits = 16,
- .max_no_of_hqd = 24,
- .doorbell_size = 8,
- .ih_ring_entry_size = 8 * sizeof(uint32_t),
- .event_interrupt_class = &event_interrupt_class_v9,
- .num_of_watch_points = 4,
- .mqd_size_aligned = MQD_SIZE_ALIGNED,
- .needs_iommu_device = false,
- .supports_cwsr = true,
- .needs_pci_atomics = true,
- .no_atomic_fw_version = 145,
- .num_sdma_engines = 2,
- .num_xgmi_sdma_engines = 0,
- .num_sdma_queues_per_engine = 8,
-};
-
-static const struct kfd_device_info navi12_device_info = {
- .asic_family = CHIP_NAVI12,
- .asic_name = "navi12",
- .gfx_target_version = 100101,
- .max_pasid_bits = 16,
- .max_no_of_hqd = 24,
- .doorbell_size = 8,
- .ih_ring_entry_size = 8 * sizeof(uint32_t),
- .event_interrupt_class = &event_interrupt_class_v9,
- .num_of_watch_points = 4,
- .mqd_size_aligned = MQD_SIZE_ALIGNED,
- .needs_iommu_device = false,
- .supports_cwsr = true,
- .needs_pci_atomics = true,
- .no_atomic_fw_version = 145,
- .num_sdma_engines = 2,
- .num_xgmi_sdma_engines = 0,
- .num_sdma_queues_per_engine = 8,
-};
-
-static const struct kfd_device_info navi14_device_info = {
- .asic_family = CHIP_NAVI14,
- .asic_name = "navi14",
- .gfx_target_version = 100102,
- .max_pasid_bits = 16,
- .max_no_of_hqd = 24,
- .doorbell_size = 8,
- .ih_ring_entry_size = 8 * sizeof(uint32_t),
- .event_interrupt_class = &event_interrupt_class_v9,
- .num_of_watch_points = 4,
- .mqd_size_aligned = MQD_SIZE_ALIGNED,
- .needs_iommu_device = false,
- .supports_cwsr = true,
- .needs_pci_atomics = true,
- .no_atomic_fw_version = 145,
- .num_sdma_engines = 2,
- .num_xgmi_sdma_engines = 0,
- .num_sdma_queues_per_engine = 8,
-};
-
-static const struct kfd_device_info sienna_cichlid_device_info = {
- .asic_family = CHIP_SIENNA_CICHLID,
- .asic_name = "sienna_cichlid",
- .gfx_target_version = 100300,
- .max_pasid_bits = 16,
- .max_no_of_hqd = 24,
- .doorbell_size = 8,
- .ih_ring_entry_size = 8 * sizeof(uint32_t),
- .event_interrupt_class = &event_interrupt_class_v9,
- .num_of_watch_points = 4,
- .mqd_size_aligned = MQD_SIZE_ALIGNED,
- .needs_iommu_device = false,
- .supports_cwsr = true,
- .needs_pci_atomics = true,
- .no_atomic_fw_version = 92,
- .num_sdma_engines = 4,
- .num_xgmi_sdma_engines = 0,
- .num_sdma_queues_per_engine = 8,
-};
-
-static const struct kfd_device_info navy_flounder_device_info = {
- .asic_family = CHIP_NAVY_FLOUNDER,
- .asic_name = "navy_flounder",
- .gfx_target_version = 100301,
- .max_pasid_bits = 16,
- .max_no_of_hqd = 24,
- .doorbell_size = 8,
- .ih_ring_entry_size = 8 * sizeof(uint32_t),
- .event_interrupt_class = &event_interrupt_class_v9,
- .num_of_watch_points = 4,
- .mqd_size_aligned = MQD_SIZE_ALIGNED,
- .needs_iommu_device = false,
- .supports_cwsr = true,
- .needs_pci_atomics = true,
- .no_atomic_fw_version = 92,
- .num_sdma_engines = 2,
- .num_xgmi_sdma_engines = 0,
- .num_sdma_queues_per_engine = 8,
-};
-
-static const struct kfd_device_info vangogh_device_info = {
- .asic_family = CHIP_VANGOGH,
- .asic_name = "vangogh",
- .gfx_target_version = 100303,
- .max_pasid_bits = 16,
- .max_no_of_hqd = 24,
- .doorbell_size = 8,
- .ih_ring_entry_size = 8 * sizeof(uint32_t),
- .event_interrupt_class = &event_interrupt_class_v9,
- .num_of_watch_points = 4,
- .mqd_size_aligned = MQD_SIZE_ALIGNED,
- .needs_iommu_device = false,
- .supports_cwsr = true,
- .needs_pci_atomics = true,
- .no_atomic_fw_version = 92,
- .num_sdma_engines = 1,
- .num_xgmi_sdma_engines = 0,
- .num_sdma_queues_per_engine = 2,
-};
-
-static const struct kfd_device_info dimgrey_cavefish_device_info = {
- .asic_family = CHIP_DIMGREY_CAVEFISH,
- .asic_name = "dimgrey_cavefish",
- .gfx_target_version = 100302,
- .max_pasid_bits = 16,
- .max_no_of_hqd = 24,
- .doorbell_size = 8,
- .ih_ring_entry_size = 8 * sizeof(uint32_t),
- .event_interrupt_class = &event_interrupt_class_v9,
- .num_of_watch_points = 4,
- .mqd_size_aligned = MQD_SIZE_ALIGNED,
- .needs_iommu_device = false,
- .supports_cwsr = true,
- .needs_pci_atomics = true,
- .no_atomic_fw_version = 92,
- .num_sdma_engines = 2,
- .num_xgmi_sdma_engines = 0,
- .num_sdma_queues_per_engine = 8,
-};
-
-static const struct kfd_device_info beige_goby_device_info = {
- .asic_family = CHIP_BEIGE_GOBY,
- .asic_name = "beige_goby",
- .gfx_target_version = 100304,
- .max_pasid_bits = 16,
- .max_no_of_hqd = 24,
- .doorbell_size = 8,
- .ih_ring_entry_size = 8 * sizeof(uint32_t),
- .event_interrupt_class = &event_interrupt_class_v9,
- .num_of_watch_points = 4,
- .mqd_size_aligned = MQD_SIZE_ALIGNED,
- .needs_iommu_device = false,
- .supports_cwsr = true,
- .needs_pci_atomics = true,
- .no_atomic_fw_version = 92,
- .num_sdma_engines = 1,
- .num_xgmi_sdma_engines = 0,
- .num_sdma_queues_per_engine = 8,
-};
-
-static const struct kfd_device_info yellow_carp_device_info = {
- .asic_family = CHIP_YELLOW_CARP,
- .asic_name = "yellow_carp",
- .gfx_target_version = 100305,
- .max_pasid_bits = 16,
- .max_no_of_hqd = 24,
- .doorbell_size = 8,
- .ih_ring_entry_size = 8 * sizeof(uint32_t),
- .event_interrupt_class = &event_interrupt_class_v9,
- .num_of_watch_points = 4,
- .mqd_size_aligned = MQD_SIZE_ALIGNED,
- .needs_iommu_device = false,
- .supports_cwsr = true,
- .needs_pci_atomics = true,
- .no_atomic_fw_version = 92,
- .num_sdma_engines = 1,
- .num_xgmi_sdma_engines = 0,
- .num_sdma_queues_per_engine = 2,
-};
-
-static const struct kfd_device_info cyan_skillfish_device_info = {
- .asic_family = CHIP_CYAN_SKILLFISH,
- .asic_name = "cyan_skillfish",
- .gfx_target_version = 100103,
- .max_pasid_bits = 16,
- .max_no_of_hqd = 24,
- .doorbell_size = 8,
- .ih_ring_entry_size = 8 * sizeof(uint32_t),
- .event_interrupt_class = &event_interrupt_class_v9,
- .num_of_watch_points = 4,
- .mqd_size_aligned = MQD_SIZE_ALIGNED,
- .needs_iommu_device = false,
- .supports_cwsr = true,
- .needs_pci_atomics = true,
- .num_sdma_engines = 2,
- .num_xgmi_sdma_engines = 0,
- .num_sdma_queues_per_engine = 8,
-};
-
static int kfd_gtt_sa_init(struct kfd_dev *kfd, unsigned int buf_size,
unsigned int chunk_size);
static void kfd_gtt_sa_fini(struct kfd_dev *kfd);
static int kfd_resume(struct kfd_dev *kfd);
-struct kfd_dev *kgd2kfd_probe(struct kgd_dev *kgd, bool vf)
+static void kfd_device_info_set_sdma_queue_num(struct kfd_dev *kfd)
{
- struct kfd_dev *kfd;
- const struct kfd_device_info *device_info;
- const struct kfd2kgd_calls *f2g;
- struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
+ uint32_t sdma_version = kfd->adev->ip_versions[SDMA0_HWIP][0];
+
+ switch (sdma_version) {
+ case IP_VERSION(4, 0, 0):/* VEGA10 */
+ case IP_VERSION(4, 0, 1):/* VEGA12 */
+ case IP_VERSION(4, 1, 0):/* RAVEN */
+ case IP_VERSION(4, 1, 1):/* RAVEN */
+ case IP_VERSION(4, 1, 2):/* RENIOR */
+ case IP_VERSION(5, 2, 1):/* VANGOGH */
+ case IP_VERSION(5, 2, 3):/* YELLOW_CARP */
+ kfd->device_info.num_sdma_queues_per_engine = 2;
+ break;
+ case IP_VERSION(4, 2, 0):/* VEGA20 */
+ case IP_VERSION(4, 2, 2):/* ARCTUTUS */
+ case IP_VERSION(4, 4, 0):/* ALDEBARAN */
+ case IP_VERSION(5, 0, 0):/* NAVI10 */
+ case IP_VERSION(5, 0, 1):/* CYAN_SKILLFISH */
+ case IP_VERSION(5, 0, 2):/* NAVI14 */
+ case IP_VERSION(5, 0, 5):/* NAVI12 */
+ case IP_VERSION(5, 2, 0):/* SIENNA_CICHLID */
+ case IP_VERSION(5, 2, 2):/* NAVY_FLOUDER */
+ case IP_VERSION(5, 2, 4):/* DIMGREY_CAVEFISH */
+ case IP_VERSION(5, 2, 5):/* BEIGE_GOBY */
+ kfd->device_info.num_sdma_queues_per_engine = 8;
+ break;
+ default:
+ dev_warn(kfd_device,
+ "Default sdma queue per engine(8) is set due to "
+ "mismatch of sdma ip block(SDMA_HWIP:0x%x).\n",
+ sdma_version);
+ kfd->device_info.num_sdma_queues_per_engine = 8;
+ }
+}
+
+static void kfd_device_info_set_event_interrupt_class(struct kfd_dev *kfd)
+{
+ uint32_t gc_version = KFD_GC_VERSION(kfd);
+
+ switch (gc_version) {
+ case IP_VERSION(9, 0, 1): /* VEGA10 */
+ case IP_VERSION(9, 1, 0): /* RAVEN */
+ case IP_VERSION(9, 2, 1): /* VEGA12 */
+ case IP_VERSION(9, 2, 2): /* RAVEN */
+ case IP_VERSION(9, 3, 0): /* RENOIR */
+ case IP_VERSION(9, 4, 0): /* VEGA20 */
+ case IP_VERSION(9, 4, 1): /* ARCTURUS */
+ case IP_VERSION(9, 4, 2): /* ALDEBARAN */
+ case IP_VERSION(10, 3, 1): /* VANGOGH */
+ case IP_VERSION(10, 3, 3): /* YELLOW_CARP */
+ case IP_VERSION(10, 1, 3): /* CYAN_SKILLFISH */
+ case IP_VERSION(10, 1, 10): /* NAVI10 */
+ case IP_VERSION(10, 1, 2): /* NAVI12 */
+ case IP_VERSION(10, 1, 1): /* NAVI14 */
+ case IP_VERSION(10, 3, 0): /* SIENNA_CICHLID */
+ case IP_VERSION(10, 3, 2): /* NAVY_FLOUNDER */
+ case IP_VERSION(10, 3, 4): /* DIMGREY_CAVEFISH */
+ case IP_VERSION(10, 3, 5): /* BEIGE_GOBY */
+ kfd->device_info.event_interrupt_class = &event_interrupt_class_v9;
+ break;
+ default:
+ dev_warn(kfd_device, "v9 event interrupt handler is set due to "
+ "mismatch of gc ip block(GC_HWIP:0x%x).\n", gc_version);
+ kfd->device_info.event_interrupt_class = &event_interrupt_class_v9;
+ }
+}
+
+static void kfd_device_info_init(struct kfd_dev *kfd,
+ bool vf, uint32_t gfx_target_version)
+{
+ uint32_t gc_version = KFD_GC_VERSION(kfd);
+ uint32_t asic_type = kfd->adev->asic_type;
+
+ kfd->device_info.max_pasid_bits = 16;
+ kfd->device_info.max_no_of_hqd = 24;
+ kfd->device_info.num_of_watch_points = 4;
+ kfd->device_info.mqd_size_aligned = MQD_SIZE_ALIGNED;
+ kfd->device_info.gfx_target_version = gfx_target_version;
+
+ if (KFD_IS_SOC15(kfd)) {
+ kfd->device_info.doorbell_size = 8;
+ kfd->device_info.ih_ring_entry_size = 8 * sizeof(uint32_t);
+ kfd->device_info.supports_cwsr = true;
+
+ kfd_device_info_set_sdma_queue_num(kfd);
+
+ kfd_device_info_set_event_interrupt_class(kfd);
+
+ /* Raven */
+ if (gc_version == IP_VERSION(9, 1, 0) ||
+ gc_version == IP_VERSION(9, 2, 2))
+ kfd->device_info.needs_iommu_device = true;
+
+ if (gc_version < IP_VERSION(11, 0, 0)) {
+ /* Navi2x+, Navi1x+ */
+ if (gc_version >= IP_VERSION(10, 3, 0))
+ kfd->device_info.no_atomic_fw_version = 92;
+ else if (gc_version >= IP_VERSION(10, 1, 1))
+ kfd->device_info.no_atomic_fw_version = 145;
+
+ /* Navi1x+ */
+ if (gc_version >= IP_VERSION(10, 1, 1))
+ kfd->device_info.needs_pci_atomics = true;
+ }
+ } else {
+ kfd->device_info.doorbell_size = 4;
+ kfd->device_info.ih_ring_entry_size = 4 * sizeof(uint32_t);
+ kfd->device_info.event_interrupt_class = &event_interrupt_class_cik;
+ kfd->device_info.num_sdma_queues_per_engine = 2;
+
+ if (asic_type != CHIP_KAVERI &&
+ asic_type != CHIP_HAWAII &&
+ asic_type != CHIP_TONGA)
+ kfd->device_info.supports_cwsr = true;
+
+ if (asic_type == CHIP_KAVERI ||
+ asic_type == CHIP_CARRIZO)
+ kfd->device_info.needs_iommu_device = true;
+
+ if (asic_type != CHIP_HAWAII && !vf)
+ kfd->device_info.needs_pci_atomics = true;
+ }
+}
+
+struct kfd_dev *kgd2kfd_probe(struct amdgpu_device *adev, bool vf)
+{
+ struct kfd_dev *kfd = NULL;
+ const struct kfd2kgd_calls *f2g = NULL;
struct pci_dev *pdev = adev->pdev;
+ uint32_t gfx_target_version = 0;
switch (adev->asic_type) {
#ifdef KFD_SUPPORT_IOMMU_V2
#ifdef CONFIG_DRM_AMDGPU_CIK
case CHIP_KAVERI:
- if (vf)
- device_info = NULL;
- else
- device_info = &kaveri_device_info;
- f2g = &gfx_v7_kfd2kgd;
+ gfx_target_version = 70000;
+ if (!vf)
+ f2g = &gfx_v7_kfd2kgd;
break;
#endif
case CHIP_CARRIZO:
- if (vf)
- device_info = NULL;
- else
- device_info = &carrizo_device_info;
- f2g = &gfx_v8_kfd2kgd;
+ gfx_target_version = 80001;
+ if (!vf)
+ f2g = &gfx_v8_kfd2kgd;
break;
#endif
#ifdef CONFIG_DRM_AMDGPU_CIK
case CHIP_HAWAII:
- if (vf)
- device_info = NULL;
- else
- device_info = &hawaii_device_info;
- f2g = &gfx_v7_kfd2kgd;
+ gfx_target_version = 70001;
+ if (!amdgpu_exp_hw_support)
+ pr_info(
+ "KFD support on Hawaii is experimental. See modparam exp_hw_support\n"
+ );
+ else if (!vf)
+ f2g = &gfx_v7_kfd2kgd;
break;
#endif
case CHIP_TONGA:
- if (vf)
- device_info = NULL;
- else
- device_info = &tonga_device_info;
- f2g = &gfx_v8_kfd2kgd;
+ gfx_target_version = 80002;
+ if (!vf)
+ f2g = &gfx_v8_kfd2kgd;
break;
case CHIP_FIJI:
- if (vf)
- device_info = &fiji_vf_device_info;
- else
- device_info = &fiji_device_info;
+ gfx_target_version = 80003;
f2g = &gfx_v8_kfd2kgd;
break;
case CHIP_POLARIS10:
- if (vf)
- device_info = &polaris10_vf_device_info;
- else
- device_info = &polaris10_device_info;
+ gfx_target_version = 80003;
f2g = &gfx_v8_kfd2kgd;
break;
case CHIP_POLARIS11:
- if (vf)
- device_info = NULL;
- else
- device_info = &polaris11_device_info;
- f2g = &gfx_v8_kfd2kgd;
+ gfx_target_version = 80003;
+ if (!vf)
+ f2g = &gfx_v8_kfd2kgd;
break;
case CHIP_POLARIS12:
- if (vf)
- device_info = NULL;
- else
- device_info = &polaris12_device_info;
- f2g = &gfx_v8_kfd2kgd;
+ gfx_target_version = 80003;
+ if (!vf)
+ f2g = &gfx_v8_kfd2kgd;
break;
case CHIP_VEGAM:
- if (vf)
- device_info = NULL;
- else
- device_info = &vegam_device_info;
- f2g = &gfx_v8_kfd2kgd;
+ gfx_target_version = 80003;
+ if (!vf)
+ f2g = &gfx_v8_kfd2kgd;
break;
default:
switch (adev->ip_versions[GC_HWIP][0]) {
+ /* Vega 10 */
case IP_VERSION(9, 0, 1):
- if (vf)
- device_info = &vega10_vf_device_info;
- else
- device_info = &vega10_device_info;
+ gfx_target_version = 90000;
f2g = &gfx_v9_kfd2kgd;
break;
#ifdef KFD_SUPPORT_IOMMU_V2
+ /* Raven */
case IP_VERSION(9, 1, 0):
case IP_VERSION(9, 2, 2):
- if (vf)
- device_info = NULL;
- else
- device_info = &raven_device_info;
- f2g = &gfx_v9_kfd2kgd;
+ gfx_target_version = 90002;
+ if (!vf)
+ f2g = &gfx_v9_kfd2kgd;
break;
#endif
+ /* Vega12 */
case IP_VERSION(9, 2, 1):
- if (vf)
- device_info = NULL;
- else
- device_info = &vega12_device_info;
- f2g = &gfx_v9_kfd2kgd;
+ gfx_target_version = 90004;
+ if (!vf)
+ f2g = &gfx_v9_kfd2kgd;
break;
+ /* Renoir */
case IP_VERSION(9, 3, 0):
- if (vf)
- device_info = NULL;
- else
- device_info = &renoir_device_info;
- f2g = &gfx_v9_kfd2kgd;
+ gfx_target_version = 90012;
+ if (!vf)
+ f2g = &gfx_v9_kfd2kgd;
break;
+ /* Vega20 */
case IP_VERSION(9, 4, 0):
- if (vf)
- device_info = NULL;
- else
- device_info = &vega20_device_info;
- f2g = &gfx_v9_kfd2kgd;
+ gfx_target_version = 90006;
+ if (!vf)
+ f2g = &gfx_v9_kfd2kgd;
break;
+ /* Arcturus */
case IP_VERSION(9, 4, 1):
- device_info = &arcturus_device_info;
+ gfx_target_version = 90008;
f2g = &arcturus_kfd2kgd;
break;
+ /* Aldebaran */
case IP_VERSION(9, 4, 2):
- device_info = &aldebaran_device_info;
+ gfx_target_version = 90010;
f2g = &aldebaran_kfd2kgd;
break;
+ /* Navi10 */
case IP_VERSION(10, 1, 10):
- if (vf)
- device_info = NULL;
- else
- device_info = &navi10_device_info;
- f2g = &gfx_v10_kfd2kgd;
+ gfx_target_version = 100100;
+ if (!vf)
+ f2g = &gfx_v10_kfd2kgd;
break;
+ /* Navi12 */
case IP_VERSION(10, 1, 2):
- device_info = &navi12_device_info;
+ gfx_target_version = 100101;
f2g = &gfx_v10_kfd2kgd;
break;
+ /* Navi14 */
case IP_VERSION(10, 1, 1):
- if (vf)
- device_info = NULL;
- else
- device_info = &navi14_device_info;
- f2g = &gfx_v10_kfd2kgd;
+ gfx_target_version = 100102;
+ if (!vf)
+ f2g = &gfx_v10_kfd2kgd;
break;
+ /* Cyan Skillfish */
case IP_VERSION(10, 1, 3):
- if (vf)
- device_info = NULL;
- else
- device_info = &cyan_skillfish_device_info;
- f2g = &gfx_v10_kfd2kgd;
+ gfx_target_version = 100103;
+ if (!vf)
+ f2g = &gfx_v10_kfd2kgd;
break;
+ /* Sienna Cichlid */
case IP_VERSION(10, 3, 0):
- device_info = &sienna_cichlid_device_info;
+ gfx_target_version = 100300;
f2g = &gfx_v10_3_kfd2kgd;
break;
+ /* Navy Flounder */
case IP_VERSION(10, 3, 2):
- device_info = &navy_flounder_device_info;
+ gfx_target_version = 100301;
f2g = &gfx_v10_3_kfd2kgd;
break;
+ /* Van Gogh */
case IP_VERSION(10, 3, 1):
- if (vf)
- device_info = NULL;
- else
- device_info = &vangogh_device_info;
- f2g = &gfx_v10_3_kfd2kgd;
+ gfx_target_version = 100303;
+ if (!vf)
+ f2g = &gfx_v10_3_kfd2kgd;
break;
+ /* Dimgrey Cavefish */
case IP_VERSION(10, 3, 4):
- device_info = &dimgrey_cavefish_device_info;
+ gfx_target_version = 100302;
f2g = &gfx_v10_3_kfd2kgd;
break;
+ /* Beige Goby */
case IP_VERSION(10, 3, 5):
- device_info = &beige_goby_device_info;
+ gfx_target_version = 100304;
f2g = &gfx_v10_3_kfd2kgd;
break;
+ /* Yellow Carp */
case IP_VERSION(10, 3, 3):
- if (vf)
- device_info = NULL;
- else
- device_info = &yellow_carp_device_info;
- f2g = &gfx_v10_3_kfd2kgd;
+ gfx_target_version = 100305;
+ if (!vf)
+ f2g = &gfx_v10_3_kfd2kgd;
break;
default:
- return NULL;
+ break;
}
break;
}
- if (!device_info || !f2g) {
- dev_err(kfd_device, "%s %s not supported in kfd\n",
- amdgpu_asic_name[adev->asic_type], vf ? "VF" : "");
+ if (!f2g) {
+ if (adev->ip_versions[GC_HWIP][0])
+ dev_err(kfd_device, "GC IP %06x %s not supported in kfd\n",
+ adev->ip_versions[GC_HWIP][0], vf ? "VF" : "");
+ else
+ dev_err(kfd_device, "%s %s not supported in kfd\n",
+ amdgpu_asic_name[adev->asic_type], vf ? "VF" : "");
return NULL;
}
@@ -824,8 +364,8 @@ struct kfd_dev *kgd2kfd_probe(struct kgd_dev *kgd, bool vf)
if (!kfd)
return NULL;
- kfd->kgd = kgd;
- kfd->device_info = device_info;
+ kfd->adev = adev;
+ kfd_device_info_init(kfd, vf, gfx_target_version);
kfd->pdev = pdev;
kfd->init_complete = false;
kfd->kfd2kgd = f2g;
@@ -844,24 +384,24 @@ struct kfd_dev *kgd2kfd_probe(struct kgd_dev *kgd, bool vf)
static void kfd_cwsr_init(struct kfd_dev *kfd)
{
- if (cwsr_enable && kfd->device_info->supports_cwsr) {
- if (kfd->device_info->asic_family < CHIP_VEGA10) {
+ if (cwsr_enable && kfd->device_info.supports_cwsr) {
+ if (KFD_GC_VERSION(kfd) < IP_VERSION(9, 0, 1)) {
BUILD_BUG_ON(sizeof(cwsr_trap_gfx8_hex) > PAGE_SIZE);
kfd->cwsr_isa = cwsr_trap_gfx8_hex;
kfd->cwsr_isa_size = sizeof(cwsr_trap_gfx8_hex);
- } else if (kfd->device_info->asic_family == CHIP_ARCTURUS) {
+ } else if (KFD_GC_VERSION(kfd) == IP_VERSION(9, 4, 1)) {
BUILD_BUG_ON(sizeof(cwsr_trap_arcturus_hex) > PAGE_SIZE);
kfd->cwsr_isa = cwsr_trap_arcturus_hex;
kfd->cwsr_isa_size = sizeof(cwsr_trap_arcturus_hex);
- } else if (kfd->device_info->asic_family == CHIP_ALDEBARAN) {
+ } else if (KFD_GC_VERSION(kfd) == IP_VERSION(9, 4, 2)) {
BUILD_BUG_ON(sizeof(cwsr_trap_aldebaran_hex) > PAGE_SIZE);
kfd->cwsr_isa = cwsr_trap_aldebaran_hex;
kfd->cwsr_isa_size = sizeof(cwsr_trap_aldebaran_hex);
- } else if (kfd->device_info->asic_family < CHIP_NAVI10) {
+ } else if (KFD_GC_VERSION(kfd) < IP_VERSION(10, 1, 1)) {
BUILD_BUG_ON(sizeof(cwsr_trap_gfx9_hex) > PAGE_SIZE);
kfd->cwsr_isa = cwsr_trap_gfx9_hex;
kfd->cwsr_isa_size = sizeof(cwsr_trap_gfx9_hex);
- } else if (kfd->device_info->asic_family < CHIP_SIENNA_CICHLID) {
+ } else if (KFD_GC_VERSION(kfd) < IP_VERSION(10, 3, 0)) {
BUILD_BUG_ON(sizeof(cwsr_trap_nv1x_hex) > PAGE_SIZE);
kfd->cwsr_isa = cwsr_trap_nv1x_hex;
kfd->cwsr_isa_size = sizeof(cwsr_trap_nv1x_hex);
@@ -882,18 +422,17 @@ static int kfd_gws_init(struct kfd_dev *kfd)
if (kfd->dqm->sched_policy == KFD_SCHED_POLICY_NO_HWS)
return 0;
- if (hws_gws_support
- || (kfd->device_info->asic_family == CHIP_VEGA10
- && kfd->mec2_fw_version >= 0x81b3)
- || (kfd->device_info->asic_family >= CHIP_VEGA12
- && kfd->device_info->asic_family <= CHIP_RAVEN
- && kfd->mec2_fw_version >= 0x1b3)
- || (kfd->device_info->asic_family == CHIP_ARCTURUS
- && kfd->mec2_fw_version >= 0x30)
- || (kfd->device_info->asic_family == CHIP_ALDEBARAN
- && kfd->mec2_fw_version >= 0x28))
- ret = amdgpu_amdkfd_alloc_gws(kfd->kgd,
- amdgpu_amdkfd_get_num_gws(kfd->kgd), &kfd->gws);
+ if (hws_gws_support || (KFD_IS_SOC15(kfd) &&
+ ((KFD_GC_VERSION(kfd) == IP_VERSION(9, 0, 1)
+ && kfd->mec2_fw_version >= 0x81b3) ||
+ (KFD_GC_VERSION(kfd) <= IP_VERSION(9, 4, 0)
+ && kfd->mec2_fw_version >= 0x1b3) ||
+ (KFD_GC_VERSION(kfd) == IP_VERSION(9, 4, 1)
+ && kfd->mec2_fw_version >= 0x30) ||
+ (KFD_GC_VERSION(kfd) == IP_VERSION(9, 4, 2)
+ && kfd->mec2_fw_version >= 0x28))))
+ ret = amdgpu_amdkfd_alloc_gws(kfd->adev,
+ kfd->adev->gds.gws_size, &kfd->gws);
return ret;
}
@@ -910,11 +449,11 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd,
unsigned int size, map_process_packet_size;
kfd->ddev = ddev;
- kfd->mec_fw_version = amdgpu_amdkfd_get_fw_version(kfd->kgd,
+ kfd->mec_fw_version = amdgpu_amdkfd_get_fw_version(kfd->adev,
KGD_ENGINE_MEC1);
- kfd->mec2_fw_version = amdgpu_amdkfd_get_fw_version(kfd->kgd,
+ kfd->mec2_fw_version = amdgpu_amdkfd_get_fw_version(kfd->adev,
KGD_ENGINE_MEC2);
- kfd->sdma_fw_version = amdgpu_amdkfd_get_fw_version(kfd->kgd,
+ kfd->sdma_fw_version = amdgpu_amdkfd_get_fw_version(kfd->adev,
KGD_ENGINE_SDMA1);
kfd->shared_resources = *gpu_resources;
@@ -927,16 +466,16 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd,
* 32 and 64-bit requests are possible and must be
* supported.
*/
- kfd->pci_atomic_requested = amdgpu_amdkfd_have_atomics_support(kfd->kgd);
+ kfd->pci_atomic_requested = amdgpu_amdkfd_have_atomics_support(kfd->adev);
if (!kfd->pci_atomic_requested &&
- kfd->device_info->needs_pci_atomics &&
- (!kfd->device_info->no_atomic_fw_version ||
- kfd->mec_fw_version < kfd->device_info->no_atomic_fw_version)) {
+ kfd->device_info.needs_pci_atomics &&
+ (!kfd->device_info.no_atomic_fw_version ||
+ kfd->mec_fw_version < kfd->device_info.no_atomic_fw_version)) {
dev_info(kfd_device,
"skipped device %x:%x, PCI rejects atomics %d<%d\n",
kfd->pdev->vendor, kfd->pdev->device,
kfd->mec_fw_version,
- kfd->device_info->no_atomic_fw_version);
+ kfd->device_info.no_atomic_fw_version);
return false;
}
@@ -953,16 +492,15 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd,
/* calculate max size of mqds needed for queues */
size = max_num_of_queues_per_device *
- kfd->device_info->mqd_size_aligned;
+ kfd->device_info.mqd_size_aligned;
/*
* calculate max size of runlist packet.
* There can be only 2 packets at once
*/
- map_process_packet_size =
- kfd->device_info->asic_family == CHIP_ALDEBARAN ?
+ map_process_packet_size = KFD_GC_VERSION(kfd) == IP_VERSION(9, 4, 2) ?
sizeof(struct pm4_mes_map_process_aldebaran) :
- sizeof(struct pm4_mes_map_process);
+ sizeof(struct pm4_mes_map_process);
size += (KFD_MAX_NUM_OF_PROCESSES * map_process_packet_size +
max_num_of_queues_per_device * sizeof(struct pm4_mes_map_queues)
+ sizeof(struct pm4_mes_runlist)) * 2;
@@ -974,7 +512,7 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd,
size += 512 * 1024;
if (amdgpu_amdkfd_alloc_gtt_mem(
- kfd->kgd, size, &kfd->gtt_mem,
+ kfd->adev, size, &kfd->gtt_mem,
&kfd->gtt_start_gpu_addr, &kfd->gtt_start_cpu_ptr,
false)) {
dev_err(kfd_device, "Could not allocate %d bytes\n", size);
@@ -995,9 +533,9 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd,
goto kfd_doorbell_error;
}
- kfd->hive_id = amdgpu_amdkfd_get_hive_id(kfd->kgd);
+ kfd->hive_id = kfd->adev->gmc.xgmi.hive_id;
- kfd->noretry = amdgpu_amdkfd_get_noretry(kfd->kgd);
+ kfd->noretry = kfd->adev->gmc.noretry;
if (kfd_interrupt_init(kfd)) {
dev_err(kfd_device, "Error initializing interrupts\n");
@@ -1015,7 +553,7 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd,
*/
if (kfd_gws_init(kfd)) {
dev_err(kfd_device, "Could not allocate %d gws\n",
- amdgpu_amdkfd_get_num_gws(kfd->kgd));
+ kfd->adev->gds.gws_size);
goto gws_error;
}
@@ -1030,7 +568,7 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd,
kfd_cwsr_init(kfd);
- svm_migrate_init((struct amdgpu_device *)kfd->kgd);
+ svm_migrate_init(kfd->adev);
if(kgd2kfd_resume_iommu(kfd))
goto device_iommu_error;
@@ -1068,10 +606,10 @@ kfd_interrupt_error:
kfd_doorbell_error:
kfd_gtt_sa_fini(kfd);
kfd_gtt_sa_init_error:
- amdgpu_amdkfd_free_gtt_mem(kfd->kgd, kfd->gtt_mem);
+ amdgpu_amdkfd_free_gtt_mem(kfd->adev, kfd->gtt_mem);
alloc_gtt_mem_failure:
if (kfd->gws)
- amdgpu_amdkfd_free_gws(kfd->kgd, kfd->gws);
+ amdgpu_amdkfd_free_gws(kfd->adev, kfd->gws);
dev_err(kfd_device,
"device %x:%x NOT added due to errors\n",
kfd->pdev->vendor, kfd->pdev->device);
@@ -1088,9 +626,9 @@ void kgd2kfd_device_exit(struct kfd_dev *kfd)
kfd_doorbell_fini(kfd);
ida_destroy(&kfd->doorbell_ida);
kfd_gtt_sa_fini(kfd);
- amdgpu_amdkfd_free_gtt_mem(kfd->kgd, kfd->gtt_mem);
+ amdgpu_amdkfd_free_gtt_mem(kfd->adev, kfd->gtt_mem);
if (kfd->gws)
- amdgpu_amdkfd_free_gws(kfd->kgd, kfd->gws);
+ amdgpu_amdkfd_free_gws(kfd->adev, kfd->gws);
}
kfree(kfd);
@@ -1229,7 +767,7 @@ void kgd2kfd_interrupt(struct kfd_dev *kfd, const void *ih_ring_entry)
if (!kfd->init_complete)
return;
- if (kfd->device_info->ih_ring_entry_size > sizeof(patched_ihre)) {
+ if (kfd->device_info.ih_ring_entry_size > sizeof(patched_ihre)) {
dev_err_once(kfd_device, "Ring entry too small\n");
return;
}
@@ -1526,7 +1064,7 @@ void kgd2kfd_set_sram_ecc_flag(struct kfd_dev *kfd)
void kfd_inc_compute_active(struct kfd_dev *kfd)
{
if (atomic_inc_return(&kfd->compute_profile) == 1)
- amdgpu_amdkfd_set_compute_idle(kfd->kgd, false);
+ amdgpu_amdkfd_set_compute_idle(kfd->adev, false);
}
void kfd_dec_compute_active(struct kfd_dev *kfd)
@@ -1534,7 +1072,7 @@ void kfd_dec_compute_active(struct kfd_dev *kfd)
int count = atomic_dec_return(&kfd->compute_profile);
if (count == 0)
- amdgpu_amdkfd_set_compute_idle(kfd->kgd, true);
+ amdgpu_amdkfd_set_compute_idle(kfd->adev, true);
WARN_ONCE(count < 0, "Compute profile ref. count error");
}
@@ -1544,6 +1082,26 @@ void kgd2kfd_smi_event_throttle(struct kfd_dev *kfd, uint64_t throttle_bitmask)
kfd_smi_event_update_thermal_throttling(kfd, throttle_bitmask);
}
+/* kfd_get_num_sdma_engines returns the number of PCIe optimized SDMA and
+ * kfd_get_num_xgmi_sdma_engines returns the number of XGMI SDMA.
+ * When the device has more than two engines, we reserve two for PCIe to enable
+ * full-duplex and the rest are used as XGMI.
+ */
+unsigned int kfd_get_num_sdma_engines(struct kfd_dev *kdev)
+{
+ /* If XGMI is not supported, all SDMA engines are PCIe */
+ if (!kdev->adev->gmc.xgmi.supported)
+ return kdev->adev->sdma.num_instances;
+
+ return min(kdev->adev->sdma.num_instances, 2);
+}
+
+unsigned int kfd_get_num_xgmi_sdma_engines(struct kfd_dev *kdev)
+{
+ /* After reserved for PCIe, the rest of engines are XGMI */
+ return kdev->adev->sdma.num_instances - kfd_get_num_sdma_engines(kdev);
+}
+
#if defined(CONFIG_DEBUG_FS)
/* This function will send a package to HIQ to hang the HWS
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
index 93e33dd84dd4..19890e350107 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
@@ -47,7 +47,7 @@ static int execute_queues_cpsch(struct device_queue_manager *dqm,
uint32_t filter_param);
static int unmap_queues_cpsch(struct device_queue_manager *dqm,
enum kfd_unmap_queues_filter filter,
- uint32_t filter_param);
+ uint32_t filter_param, bool reset);
static int map_queues_cpsch(struct device_queue_manager *dqm);
@@ -99,38 +99,29 @@ unsigned int get_pipes_per_mec(struct device_queue_manager *dqm)
return dqm->dev->shared_resources.num_pipe_per_mec;
}
-static unsigned int get_num_sdma_engines(struct device_queue_manager *dqm)
-{
- return dqm->dev->device_info->num_sdma_engines;
-}
-
-static unsigned int get_num_xgmi_sdma_engines(struct device_queue_manager *dqm)
-{
- return dqm->dev->device_info->num_xgmi_sdma_engines;
-}
-
static unsigned int get_num_all_sdma_engines(struct device_queue_manager *dqm)
{
- return get_num_sdma_engines(dqm) + get_num_xgmi_sdma_engines(dqm);
+ return kfd_get_num_sdma_engines(dqm->dev) +
+ kfd_get_num_xgmi_sdma_engines(dqm->dev);
}
unsigned int get_num_sdma_queues(struct device_queue_manager *dqm)
{
- return dqm->dev->device_info->num_sdma_engines
- * dqm->dev->device_info->num_sdma_queues_per_engine;
+ return kfd_get_num_sdma_engines(dqm->dev) *
+ dqm->dev->device_info.num_sdma_queues_per_engine;
}
unsigned int get_num_xgmi_sdma_queues(struct device_queue_manager *dqm)
{
- return dqm->dev->device_info->num_xgmi_sdma_engines
- * dqm->dev->device_info->num_sdma_queues_per_engine;
+ return kfd_get_num_xgmi_sdma_engines(dqm->dev) *
+ dqm->dev->device_info.num_sdma_queues_per_engine;
}
void program_sh_mem_settings(struct device_queue_manager *dqm,
struct qcm_process_device *qpd)
{
return dqm->dev->kfd2kgd->program_sh_mem_settings(
- dqm->dev->kgd, qpd->vmid,
+ dqm->dev->adev, qpd->vmid,
qpd->sh_mem_config,
qpd->sh_mem_ape1_base,
qpd->sh_mem_ape1_limit,
@@ -157,7 +148,7 @@ static int allocate_doorbell(struct qcm_process_device *qpd, struct queue *q)
{
struct kfd_dev *dev = qpd->dqm->dev;
- if (!KFD_IS_SOC15(dev->device_info->asic_family)) {
+ if (!KFD_IS_SOC15(dev)) {
/* On pre-SOC15 chips we need to use the queue ID to
* preserve the user mode ABI.
*/
@@ -202,7 +193,7 @@ static void deallocate_doorbell(struct qcm_process_device *qpd,
unsigned int old;
struct kfd_dev *dev = qpd->dqm->dev;
- if (!KFD_IS_SOC15(dev->device_info->asic_family) ||
+ if (!KFD_IS_SOC15(dev) ||
q->properties.type == KFD_QUEUE_TYPE_SDMA ||
q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI)
return;
@@ -216,7 +207,7 @@ static void program_trap_handler_settings(struct device_queue_manager *dqm,
{
if (dqm->dev->kfd2kgd->program_trap_handler_settings)
dqm->dev->kfd2kgd->program_trap_handler_settings(
- dqm->dev->kgd, qpd->vmid,
+ dqm->dev->adev, qpd->vmid,
qpd->tba_addr, qpd->tma_addr);
}
@@ -250,21 +241,20 @@ static int allocate_vmid(struct device_queue_manager *dqm,
program_sh_mem_settings(dqm, qpd);
- if (dqm->dev->device_info->asic_family >= CHIP_VEGA10 &&
- dqm->dev->cwsr_enabled)
+ if (KFD_IS_SOC15(dqm->dev) && dqm->dev->cwsr_enabled)
program_trap_handler_settings(dqm, qpd);
/* qpd->page_table_base is set earlier when register_process()
* is called, i.e. when the first queue is created.
*/
- dqm->dev->kfd2kgd->set_vm_context_page_table_base(dqm->dev->kgd,
+ dqm->dev->kfd2kgd->set_vm_context_page_table_base(dqm->dev->adev,
qpd->vmid,
qpd->page_table_base);
/* invalidate the VM context after pasid and vmid mapping is set up */
kfd_flush_tlb(qpd_to_pdd(qpd), TLB_FLUSH_LEGACY);
if (dqm->dev->kfd2kgd->set_scratch_backing_va)
- dqm->dev->kfd2kgd->set_scratch_backing_va(dqm->dev->kgd,
+ dqm->dev->kfd2kgd->set_scratch_backing_va(dqm->dev->adev,
qpd->sh_hidden_private_base, qpd->vmid);
return 0;
@@ -283,7 +273,7 @@ static int flush_texture_cache_nocpsch(struct kfd_dev *kdev,
if (ret)
return ret;
- return amdgpu_amdkfd_submit_ib(kdev->kgd, KGD_ENGINE_MEC1, qpd->vmid,
+ return amdgpu_amdkfd_submit_ib(kdev->adev, KGD_ENGINE_MEC1, qpd->vmid,
qpd->ib_base, (uint32_t *)qpd->ib_kaddr,
pmf->release_mem_size / sizeof(uint32_t));
}
@@ -293,7 +283,7 @@ static void deallocate_vmid(struct device_queue_manager *dqm,
struct queue *q)
{
/* On GFX v7, CP doesn't flush TC at dequeue */
- if (q->device->device_info->asic_family == CHIP_HAWAII)
+ if (q->device->adev->asic_type == CHIP_HAWAII)
if (flush_texture_cache_nocpsch(q->device, qpd))
pr_err("Failed to flush TC\n");
@@ -580,7 +570,7 @@ static int update_queue(struct device_queue_manager *dqm, struct queue *q,
/* Make sure the queue is unmapped before updating the MQD */
if (dqm->sched_policy != KFD_SCHED_POLICY_NO_HWS) {
retval = unmap_queues_cpsch(dqm,
- KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0);
+ KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0, false);
if (retval) {
pr_err("unmap queue failed\n");
goto out_unlock;
@@ -776,7 +766,7 @@ static int restore_process_queues_nocpsch(struct device_queue_manager *dqm,
if (!list_empty(&qpd->queues_list)) {
dqm->dev->kfd2kgd->set_vm_context_page_table_base(
- dqm->dev->kgd,
+ dqm->dev->adev,
qpd->vmid,
qpd->page_table_base);
kfd_flush_tlb(pdd, TLB_FLUSH_LEGACY);
@@ -954,7 +944,7 @@ set_pasid_vmid_mapping(struct device_queue_manager *dqm, u32 pasid,
unsigned int vmid)
{
return dqm->dev->kfd2kgd->set_pasid_vmid_mapping(
- dqm->dev->kgd, pasid, vmid);
+ dqm->dev->adev, pasid, vmid);
}
static void init_interrupts(struct device_queue_manager *dqm)
@@ -963,7 +953,7 @@ static void init_interrupts(struct device_queue_manager *dqm)
for (i = 0 ; i < get_pipes_per_mec(dqm) ; i++)
if (is_pipe_enabled(dqm, 0, i))
- dqm->dev->kfd2kgd->init_interrupts(dqm->dev->kgd, i);
+ dqm->dev->kfd2kgd->init_interrupts(dqm->dev->adev, i);
}
static int initialize_nocpsch(struct device_queue_manager *dqm)
@@ -1017,7 +1007,7 @@ static int start_nocpsch(struct device_queue_manager *dqm)
pr_info("SW scheduler is used");
init_interrupts(dqm);
- if (dqm->dev->device_info->asic_family == CHIP_HAWAII)
+ if (dqm->dev->adev->asic_type == CHIP_HAWAII)
return pm_init(&dqm->packet_mgr, dqm);
dqm->sched_running = true;
@@ -1026,7 +1016,7 @@ static int start_nocpsch(struct device_queue_manager *dqm)
static int stop_nocpsch(struct device_queue_manager *dqm)
{
- if (dqm->dev->device_info->asic_family == CHIP_HAWAII)
+ if (dqm->dev->adev->asic_type == CHIP_HAWAII)
pm_uninit(&dqm->packet_mgr, false);
dqm->sched_running = false;
@@ -1055,9 +1045,9 @@ static int allocate_sdma_queue(struct device_queue_manager *dqm,
dqm->sdma_bitmap &= ~(1ULL << bit);
q->sdma_id = bit;
q->properties.sdma_engine_id = q->sdma_id %
- get_num_sdma_engines(dqm);
+ kfd_get_num_sdma_engines(dqm->dev);
q->properties.sdma_queue_id = q->sdma_id /
- get_num_sdma_engines(dqm);
+ kfd_get_num_sdma_engines(dqm->dev);
} else if (q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI) {
if (dqm->xgmi_sdma_bitmap == 0) {
pr_err("No more XGMI SDMA queue to allocate\n");
@@ -1072,10 +1062,11 @@ static int allocate_sdma_queue(struct device_queue_manager *dqm,
* assumes the first N engines are always
* PCIe-optimized ones
*/
- q->properties.sdma_engine_id = get_num_sdma_engines(dqm) +
- q->sdma_id % get_num_xgmi_sdma_engines(dqm);
+ q->properties.sdma_engine_id =
+ kfd_get_num_sdma_engines(dqm->dev) +
+ q->sdma_id % kfd_get_num_xgmi_sdma_engines(dqm->dev);
q->properties.sdma_queue_id = q->sdma_id /
- get_num_xgmi_sdma_engines(dqm);
+ kfd_get_num_xgmi_sdma_engines(dqm->dev);
}
pr_debug("SDMA engine id: %d\n", q->properties.sdma_engine_id);
@@ -1132,7 +1123,7 @@ static int set_sched_resources(struct device_queue_manager *dqm)
res.queue_mask |= 1ull
<< amdgpu_queue_mask_bit_to_set_resource_bit(
- (struct amdgpu_device *)dqm->dev->kgd, i);
+ dqm->dev->adev, i);
}
res.gws_mask = ~0ull;
res.oac_mask = res.gds_heap_base = res.gds_heap_size = 0;
@@ -1232,7 +1223,7 @@ static int stop_cpsch(struct device_queue_manager *dqm)
}
if (!dqm->is_hws_hang)
- unmap_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES, 0);
+ unmap_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES, 0, false);
hanging = dqm->is_hws_hang || dqm->is_resetting;
dqm->sched_running = false;
@@ -1428,7 +1419,7 @@ static int map_queues_cpsch(struct device_queue_manager *dqm)
/* dqm->lock mutex has to be locked before calling this function */
static int unmap_queues_cpsch(struct device_queue_manager *dqm,
enum kfd_unmap_queues_filter filter,
- uint32_t filter_param)
+ uint32_t filter_param, bool reset)
{
int retval = 0;
struct mqd_manager *mqd_mgr;
@@ -1441,7 +1432,7 @@ static int unmap_queues_cpsch(struct device_queue_manager *dqm,
return retval;
retval = pm_send_unmap_queue(&dqm->packet_mgr, KFD_QUEUE_TYPE_COMPUTE,
- filter, filter_param, false, 0);
+ filter, filter_param, reset, 0);
if (retval)
return retval;
@@ -1485,6 +1476,21 @@ static int unmap_queues_cpsch(struct device_queue_manager *dqm,
return retval;
}
+/* only for compute queue */
+static int reset_queues_cpsch(struct device_queue_manager *dqm,
+ uint16_t pasid)
+{
+ int retval;
+
+ dqm_lock(dqm);
+
+ retval = unmap_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_BY_PASID,
+ pasid, true);
+
+ dqm_unlock(dqm);
+ return retval;
+}
+
/* dqm->lock mutex has to be locked before calling this function */
static int execute_queues_cpsch(struct device_queue_manager *dqm,
enum kfd_unmap_queues_filter filter,
@@ -1494,7 +1500,7 @@ static int execute_queues_cpsch(struct device_queue_manager *dqm,
if (dqm->is_hws_hang)
return -EIO;
- retval = unmap_queues_cpsch(dqm, filter, filter_param);
+ retval = unmap_queues_cpsch(dqm, filter, filter_param, false);
if (retval)
return retval;
@@ -1847,10 +1853,10 @@ static int allocate_hiq_sdma_mqd(struct device_queue_manager *dqm)
struct kfd_mem_obj *mem_obj = &dqm->hiq_sdma_mqd;
uint32_t size = dqm->mqd_mgrs[KFD_MQD_TYPE_SDMA]->mqd_size *
get_num_all_sdma_engines(dqm) *
- dev->device_info->num_sdma_queues_per_engine +
+ dev->device_info.num_sdma_queues_per_engine +
dqm->mqd_mgrs[KFD_MQD_TYPE_HIQ]->mqd_size;
- retval = amdgpu_amdkfd_alloc_gtt_mem(dev->kgd, size,
+ retval = amdgpu_amdkfd_alloc_gtt_mem(dev->adev, size,
&(mem_obj->gtt_mem), &(mem_obj->gpu_addr),
(void *)&(mem_obj->cpu_ptr), false);
@@ -1867,7 +1873,7 @@ struct device_queue_manager *device_queue_manager_init(struct kfd_dev *dev)
if (!dqm)
return NULL;
- switch (dev->device_info->asic_family) {
+ switch (dev->adev->asic_type) {
/* HWS is not available on Hawaii. */
case CHIP_HAWAII:
/* HWS depends on CWSR for timely dequeue. CWSR is not
@@ -1905,6 +1911,7 @@ struct device_queue_manager *device_queue_manager_init(struct kfd_dev *dev)
dqm->ops.evict_process_queues = evict_process_queues_cpsch;
dqm->ops.restore_process_queues = restore_process_queues_cpsch;
dqm->ops.get_wave_state = get_wave_state;
+ dqm->ops.reset_queues = reset_queues_cpsch;
break;
case KFD_SCHED_POLICY_NO_HWS:
/* initialize dqm for no cp scheduling */
@@ -1930,7 +1937,7 @@ struct device_queue_manager *device_queue_manager_init(struct kfd_dev *dev)
goto out_free;
}
- switch (dev->device_info->asic_family) {
+ switch (dev->adev->asic_type) {
case CHIP_CARRIZO:
device_queue_manager_init_vi(&dqm->asic_ops);
break;
@@ -1952,31 +1959,16 @@ struct device_queue_manager *device_queue_manager_init(struct kfd_dev *dev)
device_queue_manager_init_vi_tonga(&dqm->asic_ops);
break;
- case CHIP_VEGA10:
- case CHIP_VEGA12:
- case CHIP_VEGA20:
- case CHIP_RAVEN:
- case CHIP_RENOIR:
- case CHIP_ARCTURUS:
- case CHIP_ALDEBARAN:
- device_queue_manager_init_v9(&dqm->asic_ops);
- break;
- case CHIP_NAVI10:
- case CHIP_NAVI12:
- case CHIP_NAVI14:
- case CHIP_SIENNA_CICHLID:
- case CHIP_NAVY_FLOUNDER:
- case CHIP_VANGOGH:
- case CHIP_DIMGREY_CAVEFISH:
- case CHIP_BEIGE_GOBY:
- case CHIP_YELLOW_CARP:
- case CHIP_CYAN_SKILLFISH:
- device_queue_manager_init_v10_navi10(&dqm->asic_ops);
- break;
default:
- WARN(1, "Unexpected ASIC family %u",
- dev->device_info->asic_family);
- goto out_free;
+ if (KFD_GC_VERSION(dev) >= IP_VERSION(10, 1, 1))
+ device_queue_manager_init_v10_navi10(&dqm->asic_ops);
+ else if (KFD_GC_VERSION(dev) >= IP_VERSION(9, 0, 1))
+ device_queue_manager_init_v9(&dqm->asic_ops);
+ else {
+ WARN(1, "Unexpected ASIC family %u",
+ dev->adev->asic_type);
+ goto out_free;
+ }
}
if (init_mqd_managers(dqm))
@@ -2000,7 +1992,7 @@ static void deallocate_hiq_sdma_mqd(struct kfd_dev *dev,
{
WARN(!mqd, "No hiq sdma mqd trunk to free");
- amdgpu_amdkfd_free_gtt_mem(dev->kgd, mqd->gtt_mem);
+ amdgpu_amdkfd_free_gtt_mem(dev->adev, mqd->gtt_mem);
}
void device_queue_manager_uninit(struct device_queue_manager *dqm)
@@ -2031,7 +2023,7 @@ static void kfd_process_hw_exception(struct work_struct *work)
{
struct device_queue_manager *dqm = container_of(work,
struct device_queue_manager, hw_exception_work);
- amdgpu_amdkfd_gpu_reset(dqm->dev->kgd);
+ amdgpu_amdkfd_gpu_reset(dqm->dev->adev);
}
#if defined(CONFIG_DEBUG_FS)
@@ -2070,7 +2062,7 @@ int dqm_debugfs_hqds(struct seq_file *m, void *data)
return 0;
}
- r = dqm->dev->kfd2kgd->hqd_dump(dqm->dev->kgd,
+ r = dqm->dev->kfd2kgd->hqd_dump(dqm->dev->adev,
KFD_CIK_HIQ_PIPE, KFD_CIK_HIQ_QUEUE,
&dump, &n_regs);
if (!r) {
@@ -2092,7 +2084,7 @@ int dqm_debugfs_hqds(struct seq_file *m, void *data)
continue;
r = dqm->dev->kfd2kgd->hqd_dump(
- dqm->dev->kgd, pipe, queue, &dump, &n_regs);
+ dqm->dev->adev, pipe, queue, &dump, &n_regs);
if (r)
break;
@@ -2106,10 +2098,10 @@ int dqm_debugfs_hqds(struct seq_file *m, void *data)
for (pipe = 0; pipe < get_num_all_sdma_engines(dqm); pipe++) {
for (queue = 0;
- queue < dqm->dev->device_info->num_sdma_queues_per_engine;
+ queue < dqm->dev->device_info.num_sdma_queues_per_engine;
queue++) {
r = dqm->dev->kfd2kgd->hqd_sdma_dump(
- dqm->dev->kgd, pipe, queue, &dump, &n_regs);
+ dqm->dev->adev, pipe, queue, &dump, &n_regs);
if (r)
break;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
index 499fc0ea387f..e145e4deb53a 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
@@ -81,6 +81,8 @@ struct device_process_node {
*
* @get_wave_state: Retrieves context save state and optionally copies the
* control stack, if kept in the MQD, to the given userspace address.
+ *
+ * @reset_queues: reset queues which consume RAS poison
*/
struct device_queue_manager_ops {
@@ -134,6 +136,9 @@ struct device_queue_manager_ops {
void __user *ctl_stack,
u32 *ctl_stack_used_size,
u32 *save_area_used_size);
+
+ int (*reset_queues)(struct device_queue_manager *dqm,
+ uint16_t pasid);
};
struct device_queue_manager_asic_ops {
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_v9.c
index b5c3d13643f1..f20434d9980e 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_v9.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_v9.c
@@ -62,7 +62,7 @@ static int update_qpd_v9(struct device_queue_manager *dqm,
SH_MEM_ALIGNMENT_MODE_UNALIGNED <<
SH_MEM_CONFIG__ALIGNMENT_MODE__SHIFT;
- if (dqm->dev->device_info->asic_family == CHIP_ALDEBARAN) {
+ if (KFD_GC_VERSION(dqm->dev) == IP_VERSION(9, 4, 2)) {
/* Aldebaran can safely support different XNACK modes
* per process
*/
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c b/drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c
index 768d153acff4..0dbcf54657ed 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c
@@ -48,7 +48,7 @@
/* # of doorbell bytes allocated for each process. */
size_t kfd_doorbell_process_slice(struct kfd_dev *kfd)
{
- return roundup(kfd->device_info->doorbell_size *
+ return roundup(kfd->device_info.doorbell_size *
KFD_MAX_NUM_OF_QUEUES_PER_PROCESS,
PAGE_SIZE);
}
@@ -180,7 +180,7 @@ void __iomem *kfd_get_kernel_doorbell(struct kfd_dev *kfd,
if (inx >= KFD_MAX_NUM_OF_QUEUES_PER_PROCESS)
return NULL;
- inx *= kfd->device_info->doorbell_size / sizeof(u32);
+ inx *= kfd->device_info.doorbell_size / sizeof(u32);
/*
* Calculating the kernel doorbell offset using the first
@@ -201,7 +201,7 @@ void kfd_release_kernel_doorbell(struct kfd_dev *kfd, u32 __iomem *db_addr)
unsigned int inx;
inx = (unsigned int)(db_addr - kfd->doorbell_kernel_ptr)
- * sizeof(u32) / kfd->device_info->doorbell_size;
+ * sizeof(u32) / kfd->device_info.doorbell_size;
mutex_lock(&kfd->doorbell_mutex);
__clear_bit(inx, kfd->doorbell_available_index);
@@ -239,7 +239,7 @@ unsigned int kfd_get_doorbell_dw_offset_in_bar(struct kfd_dev *kfd,
return kfd->doorbell_base_dw_offset +
pdd->doorbell_index
* kfd_doorbell_process_slice(kfd) / sizeof(u32) +
- doorbell_id * kfd->device_info->doorbell_size / sizeof(u32);
+ doorbell_id * kfd->device_info.doorbell_size / sizeof(u32);
}
uint64_t kfd_get_number_elems(struct kfd_dev *kfd)
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_events.c b/drivers/gpu/drm/amd/amdkfd/kfd_events.c
index 3eea4edee355..afe72dd11325 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_events.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_events.c
@@ -935,8 +935,10 @@ void kfd_signal_iommu_event(struct kfd_dev *dev, u32 pasid,
/* Workaround on Raven to not kill the process when memory is freed
* before IOMMU is able to finish processing all the excessive PPRs
*/
- if (dev->device_info->asic_family != CHIP_RAVEN &&
- dev->device_info->asic_family != CHIP_RENOIR) {
+
+ if (KFD_GC_VERSION(dev) != IP_VERSION(9, 1, 0) &&
+ KFD_GC_VERSION(dev) != IP_VERSION(9, 2, 2) &&
+ KFD_GC_VERSION(dev) != IP_VERSION(9, 3, 0)) {
mutex_lock(&p->event_mutex);
/* Lookup events by type and signal them */
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c b/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c
index d1388896f9c1..2e2b7ceb71db 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c
@@ -394,7 +394,7 @@ int kfd_init_apertures(struct kfd_process *process)
pdd->gpuvm_base = pdd->gpuvm_limit = 0;
pdd->scratch_base = pdd->scratch_limit = 0;
} else {
- switch (dev->device_info->asic_family) {
+ switch (dev->adev->asic_type) {
case CHIP_KAVERI:
case CHIP_HAWAII:
case CHIP_CARRIZO:
@@ -406,29 +406,14 @@ int kfd_init_apertures(struct kfd_process *process)
case CHIP_VEGAM:
kfd_init_apertures_vi(pdd, id);
break;
- case CHIP_VEGA10:
- case CHIP_VEGA12:
- case CHIP_VEGA20:
- case CHIP_RAVEN:
- case CHIP_RENOIR:
- case CHIP_ARCTURUS:
- case CHIP_ALDEBARAN:
- case CHIP_NAVI10:
- case CHIP_NAVI12:
- case CHIP_NAVI14:
- case CHIP_SIENNA_CICHLID:
- case CHIP_NAVY_FLOUNDER:
- case CHIP_VANGOGH:
- case CHIP_DIMGREY_CAVEFISH:
- case CHIP_BEIGE_GOBY:
- case CHIP_YELLOW_CARP:
- case CHIP_CYAN_SKILLFISH:
- kfd_init_apertures_v9(pdd, id);
- break;
default:
- WARN(1, "Unexpected ASIC family %u",
- dev->device_info->asic_family);
- return -EINVAL;
+ if (KFD_GC_VERSION(dev) >= IP_VERSION(9, 0, 1))
+ kfd_init_apertures_v9(pdd, id);
+ else {
+ WARN(1, "Unexpected ASIC family %u",
+ dev->adev->asic_type);
+ return -EINVAL;
+ }
}
if (!dev->use_iommu_v2) {
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c
index 543e7ea75593..b8ac28fb1231 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c
@@ -89,6 +89,44 @@ enum SQ_INTERRUPT_ERROR_TYPE {
#define KFD_SQ_INT_DATA__ERR_TYPE_MASK 0xF00000
#define KFD_SQ_INT_DATA__ERR_TYPE__SHIFT 20
+static void event_interrupt_poison_consumption(struct kfd_dev *dev,
+ uint16_t pasid, uint16_t source_id)
+{
+ int ret = -EINVAL;
+ struct kfd_process *p = kfd_lookup_process_by_pasid(pasid);
+
+ if (!p)
+ return;
+
+ /* all queues of a process will be unmapped in one time */
+ if (atomic_read(&p->poison)) {
+ kfd_unref_process(p);
+ return;
+ }
+
+ atomic_set(&p->poison, 1);
+ kfd_unref_process(p);
+
+ switch (source_id) {
+ case SOC15_INTSRC_SQ_INTERRUPT_MSG:
+ if (dev->dqm->ops.reset_queues)
+ ret = dev->dqm->ops.reset_queues(dev->dqm, pasid);
+ break;
+ case SOC15_INTSRC_SDMA_ECC:
+ default:
+ break;
+ }
+
+ kfd_signal_poison_consumed_event(dev, pasid);
+
+ /* resetting queue passes, do page retirement without gpu reset
+ resetting queue fails, fallback to gpu reset solution */
+ if (!ret)
+ amdgpu_amdkfd_ras_poison_consumption_handler(dev->adev, false);
+ else
+ amdgpu_amdkfd_ras_poison_consumption_handler(dev->adev, true);
+}
+
static bool event_interrupt_isr_v9(struct kfd_dev *dev,
const uint32_t *ih_ring_entry,
uint32_t *patched_ihre,
@@ -135,7 +173,7 @@ static bool event_interrupt_isr_v9(struct kfd_dev *dev,
*patched_flag = true;
memcpy(patched_ihre, ih_ring_entry,
- dev->device_info->ih_ring_entry_size);
+ dev->device_info.ih_ring_entry_size);
pasid = dev->dqm->vmid_pasid[vmid];
@@ -230,8 +268,7 @@ static void event_interrupt_wq_v9(struct kfd_dev *dev,
sq_intr_err);
if (sq_intr_err != SQ_INTERRUPT_ERROR_TYPE_ILLEGAL_INST &&
sq_intr_err != SQ_INTERRUPT_ERROR_TYPE_MEMVIOL) {
- kfd_signal_poison_consumed_event(dev, pasid);
- amdgpu_amdkfd_ras_poison_consumption_handler(dev->kgd);
+ event_interrupt_poison_consumption(dev, pasid, source_id);
return;
}
break;
@@ -252,8 +289,7 @@ static void event_interrupt_wq_v9(struct kfd_dev *dev,
if (source_id == SOC15_INTSRC_SDMA_TRAP) {
kfd_signal_event_interrupt(pasid, context_id0 & 0xfffffff, 28);
} else if (source_id == SOC15_INTSRC_SDMA_ECC) {
- kfd_signal_poison_consumed_event(dev, pasid);
- amdgpu_amdkfd_ras_poison_consumption_handler(dev->kgd);
+ event_interrupt_poison_consumption(dev, pasid, source_id);
return;
}
} else if (client_id == SOC15_IH_CLIENTID_VMC ||
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_interrupt.c b/drivers/gpu/drm/amd/amdkfd/kfd_interrupt.c
index bc47f6a44456..81887c2013c9 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_interrupt.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_interrupt.c
@@ -54,7 +54,7 @@ int kfd_interrupt_init(struct kfd_dev *kfd)
int r;
r = kfifo_alloc(&kfd->ih_fifo,
- KFD_IH_NUM_ENTRIES * kfd->device_info->ih_ring_entry_size,
+ KFD_IH_NUM_ENTRIES * kfd->device_info.ih_ring_entry_size,
GFP_KERNEL);
if (r) {
dev_err(kfd_chardev(), "Failed to allocate IH fifo\n");
@@ -114,8 +114,8 @@ bool enqueue_ih_ring_entry(struct kfd_dev *kfd, const void *ih_ring_entry)
int count;
count = kfifo_in(&kfd->ih_fifo, ih_ring_entry,
- kfd->device_info->ih_ring_entry_size);
- if (count != kfd->device_info->ih_ring_entry_size) {
+ kfd->device_info.ih_ring_entry_size);
+ if (count != kfd->device_info.ih_ring_entry_size) {
dev_err_ratelimited(kfd_chardev(),
"Interrupt ring overflow, dropping interrupt %d\n",
count);
@@ -133,11 +133,11 @@ static bool dequeue_ih_ring_entry(struct kfd_dev *kfd, void *ih_ring_entry)
int count;
count = kfifo_out(&kfd->ih_fifo, ih_ring_entry,
- kfd->device_info->ih_ring_entry_size);
+ kfd->device_info.ih_ring_entry_size);
- WARN_ON(count && count != kfd->device_info->ih_ring_entry_size);
+ WARN_ON(count && count != kfd->device_info.ih_ring_entry_size);
- return count == kfd->device_info->ih_ring_entry_size;
+ return count == kfd->device_info.ih_ring_entry_size;
}
static void interrupt_wq(struct work_struct *work)
@@ -146,13 +146,13 @@ static void interrupt_wq(struct work_struct *work)
interrupt_work);
uint32_t ih_ring_entry[KFD_MAX_RING_ENTRY_SIZE];
- if (dev->device_info->ih_ring_entry_size > sizeof(ih_ring_entry)) {
+ if (dev->device_info.ih_ring_entry_size > sizeof(ih_ring_entry)) {
dev_err_once(kfd_chardev(), "Ring entry too small\n");
return;
}
while (dequeue_ih_ring_entry(dev, ih_ring_entry))
- dev->device_info->event_interrupt_class->interrupt_wq(dev,
+ dev->device_info.event_interrupt_class->interrupt_wq(dev,
ih_ring_entry);
}
@@ -163,7 +163,7 @@ bool interrupt_is_wanted(struct kfd_dev *dev,
/* integer and bitwise OR so there is no boolean short-circuiting */
unsigned int wanted = 0;
- wanted |= dev->device_info->event_interrupt_class->interrupt_isr(dev,
+ wanted |= dev->device_info.event_interrupt_class->interrupt_isr(dev,
ih_ring_entry, patched_ihre, flag);
return wanted != 0;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_iommu.c b/drivers/gpu/drm/amd/amdkfd/kfd_iommu.c
index 73f2257acc23..66ad8d0b8f7f 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_iommu.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_iommu.c
@@ -89,7 +89,7 @@ int kfd_iommu_device_init(struct kfd_dev *kfd)
}
pasid_limit = min_t(unsigned int,
- (unsigned int)(1 << kfd->device_info->max_pasid_bits),
+ (unsigned int)(1 << kfd->device_info.max_pasid_bits),
iommu_info.max_pasids);
if (!kfd_set_pasid_limit(pasid_limit)) {
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c
index 64b4ac339904..16f8bc4ca7f6 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c
@@ -91,7 +91,7 @@ static bool kq_initialize(struct kernel_queue *kq, struct kfd_dev *dev,
kq->pq_gpu_addr = kq->pq->gpu_addr;
/* For CIK family asics, kq->eop_mem is not needed */
- if (dev->device_info->asic_family > CHIP_MULLINS) {
+ if (dev->adev->asic_type > CHIP_MULLINS) {
retval = kfd_gtt_sa_allocate(dev, PAGE_SIZE, &kq->eop_mem);
if (retval != 0)
goto err_eop_allocate_vidmem;
@@ -111,7 +111,7 @@ static bool kq_initialize(struct kernel_queue *kq, struct kfd_dev *dev,
kq->rptr_kernel = kq->rptr_mem->cpu_ptr;
kq->rptr_gpu_addr = kq->rptr_mem->gpu_addr;
- retval = kfd_gtt_sa_allocate(dev, dev->device_info->doorbell_size,
+ retval = kfd_gtt_sa_allocate(dev, dev->device_info.doorbell_size,
&kq->wptr_mem);
if (retval != 0)
@@ -297,7 +297,7 @@ void kq_submit_packet(struct kernel_queue *kq)
}
pr_debug("\n");
#endif
- if (kq->dev->device_info->doorbell_size == 8) {
+ if (kq->dev->device_info.doorbell_size == 8) {
*kq->wptr64_kernel = kq->pending_wptr64;
write_kernel_doorbell64(kq->queue->properties.doorbell_ptr,
kq->pending_wptr64);
@@ -310,7 +310,7 @@ void kq_submit_packet(struct kernel_queue *kq)
void kq_rollback_packet(struct kernel_queue *kq)
{
- if (kq->dev->device_info->doorbell_size == 8) {
+ if (kq->dev->device_info.doorbell_size == 8) {
kq->pending_wptr64 = *kq->wptr64_kernel;
kq->pending_wptr = *kq->wptr_kernel %
(kq->queue->properties.queue_size / 4);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
index 9b9c2b9bf2ef..ed5385137f48 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
@@ -108,8 +108,8 @@ error_free:
* svm_migrate_copy_memory_gart - sdma copy data between ram and vram
*
* @adev: amdgpu device the sdma ring running
- * @src: source page address array
- * @dst: destination page address array
+ * @sys: system DMA pointer to be copied
+ * @vram: vram destination DMA pointer
* @npages: number of pages to copy
* @direction: enum MIGRATION_COPY_DIR
* @mfence: output, sdma fence to signal after sdma is done
@@ -549,7 +549,7 @@ static void svm_migrate_page_free(struct page *page)
if (svm_bo) {
pr_debug_ratelimited("ref: %d\n", kref_read(&svm_bo->kref));
- svm_range_bo_unref(svm_bo);
+ svm_range_bo_unref_async(svm_bo);
}
}
@@ -938,7 +938,7 @@ int svm_migrate_init(struct amdgpu_device *adev)
void *r;
/* Page migration works on Vega10 or newer */
- if (kfddev->device_info->asic_family < CHIP_VEGA10)
+ if (!KFD_IS_SOC15(kfddev))
return -EINVAL;
pgmap = &kfddev->pgmap;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c
index c021519af810..e2825ad4d699 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c
@@ -71,7 +71,7 @@ struct kfd_mem_obj *allocate_sdma_mqd(struct kfd_dev *dev,
return NULL;
offset = (q->sdma_engine_id *
- dev->device_info->num_sdma_queues_per_engine +
+ dev->device_info.num_sdma_queues_per_engine +
q->sdma_queue_id) *
dev->dqm->mqd_mgrs[KFD_MQD_TYPE_SDMA]->mqd_size;
@@ -100,7 +100,7 @@ void mqd_symmetrically_map_cu_mask(struct mqd_manager *mm,
struct kfd_cu_info cu_info;
uint32_t cu_per_sh[KFD_MAX_NUM_SE][KFD_MAX_NUM_SH_PER_SE] = {0};
int i, se, sh, cu;
- amdgpu_amdkfd_get_cu_info(mm->dev->kgd, &cu_info);
+ amdgpu_amdkfd_get_cu_info(mm->dev->adev, &cu_info);
if (cu_mask_count > cu_info.cu_active_number)
cu_mask_count = cu_info.cu_active_number;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c
index 8128f4d312f1..e9a8e21e144e 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c
@@ -171,7 +171,7 @@ static int load_mqd(struct mqd_manager *mm, void *mqd, uint32_t pipe_id,
uint32_t wptr_shift = (p->format == KFD_QUEUE_FORMAT_AQL ? 4 : 0);
uint32_t wptr_mask = (uint32_t)((p->queue_size / 4) - 1);
- return mm->dev->kfd2kgd->hqd_load(mm->dev->kgd, mqd, pipe_id, queue_id,
+ return mm->dev->kfd2kgd->hqd_load(mm->dev->adev, mqd, pipe_id, queue_id,
(uint32_t __user *)p->write_ptr,
wptr_shift, wptr_mask, mms);
}
@@ -180,7 +180,7 @@ static int load_mqd_sdma(struct mqd_manager *mm, void *mqd,
uint32_t pipe_id, uint32_t queue_id,
struct queue_properties *p, struct mm_struct *mms)
{
- return mm->dev->kfd2kgd->hqd_sdma_load(mm->dev->kgd, mqd,
+ return mm->dev->kfd2kgd->hqd_sdma_load(mm->dev->adev, mqd,
(uint32_t __user *)p->write_ptr,
mms);
}
@@ -276,7 +276,7 @@ static int destroy_mqd(struct mqd_manager *mm, void *mqd,
unsigned int timeout, uint32_t pipe_id,
uint32_t queue_id)
{
- return mm->dev->kfd2kgd->hqd_destroy(mm->dev->kgd, mqd, type, timeout,
+ return mm->dev->kfd2kgd->hqd_destroy(mm->dev->adev, mqd, type, timeout,
pipe_id, queue_id);
}
@@ -289,7 +289,7 @@ static int destroy_mqd_sdma(struct mqd_manager *mm, void *mqd,
unsigned int timeout, uint32_t pipe_id,
uint32_t queue_id)
{
- return mm->dev->kfd2kgd->hqd_sdma_destroy(mm->dev->kgd, mqd, timeout);
+ return mm->dev->kfd2kgd->hqd_sdma_destroy(mm->dev->adev, mqd, timeout);
}
static bool is_occupied(struct mqd_manager *mm, void *mqd,
@@ -297,7 +297,7 @@ static bool is_occupied(struct mqd_manager *mm, void *mqd,
uint32_t queue_id)
{
- return mm->dev->kfd2kgd->hqd_is_occupied(mm->dev->kgd, queue_address,
+ return mm->dev->kfd2kgd->hqd_is_occupied(mm->dev->adev, queue_address,
pipe_id, queue_id);
}
@@ -306,7 +306,7 @@ static bool is_occupied_sdma(struct mqd_manager *mm, void *mqd,
uint64_t queue_address, uint32_t pipe_id,
uint32_t queue_id)
{
- return mm->dev->kfd2kgd->hqd_sdma_is_occupied(mm->dev->kgd, mqd);
+ return mm->dev->kfd2kgd->hqd_sdma_is_occupied(mm->dev->adev, mqd);
}
/*
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c
index 270160fc401b..d74d8a6ac27a 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c
@@ -148,7 +148,7 @@ static int load_mqd(struct mqd_manager *mm, void *mqd,
/* AQL write pointer counts in 64B packets, PM4/CP counts in dwords. */
uint32_t wptr_shift = (p->format == KFD_QUEUE_FORMAT_AQL ? 4 : 0);
- r = mm->dev->kfd2kgd->hqd_load(mm->dev->kgd, mqd, pipe_id, queue_id,
+ r = mm->dev->kfd2kgd->hqd_load(mm->dev->adev, mqd, pipe_id, queue_id,
(uint32_t __user *)p->write_ptr,
wptr_shift, 0, mms);
return r;
@@ -158,7 +158,7 @@ static int hiq_load_mqd_kiq(struct mqd_manager *mm, void *mqd,
uint32_t pipe_id, uint32_t queue_id,
struct queue_properties *p, struct mm_struct *mms)
{
- return mm->dev->kfd2kgd->hiq_mqd_load(mm->dev->kgd, mqd, pipe_id,
+ return mm->dev->kfd2kgd->hiq_mqd_load(mm->dev->adev, mqd, pipe_id,
queue_id, p->doorbell_off);
}
@@ -239,7 +239,7 @@ static int destroy_mqd(struct mqd_manager *mm, void *mqd,
uint32_t queue_id)
{
return mm->dev->kfd2kgd->hqd_destroy
- (mm->dev->kgd, mqd, type, timeout,
+ (mm->dev->adev, mqd, type, timeout,
pipe_id, queue_id);
}
@@ -254,7 +254,7 @@ static bool is_occupied(struct mqd_manager *mm, void *mqd,
uint32_t queue_id)
{
return mm->dev->kfd2kgd->hqd_is_occupied(
- mm->dev->kgd, queue_address,
+ mm->dev->adev, queue_address,
pipe_id, queue_id);
}
@@ -320,7 +320,7 @@ static int load_mqd_sdma(struct mqd_manager *mm, void *mqd,
uint32_t pipe_id, uint32_t queue_id,
struct queue_properties *p, struct mm_struct *mms)
{
- return mm->dev->kfd2kgd->hqd_sdma_load(mm->dev->kgd, mqd,
+ return mm->dev->kfd2kgd->hqd_sdma_load(mm->dev->adev, mqd,
(uint32_t __user *)p->write_ptr,
mms);
}
@@ -363,14 +363,14 @@ static int destroy_mqd_sdma(struct mqd_manager *mm, void *mqd,
unsigned int timeout, uint32_t pipe_id,
uint32_t queue_id)
{
- return mm->dev->kfd2kgd->hqd_sdma_destroy(mm->dev->kgd, mqd, timeout);
+ return mm->dev->kfd2kgd->hqd_sdma_destroy(mm->dev->adev, mqd, timeout);
}
static bool is_occupied_sdma(struct mqd_manager *mm, void *mqd,
uint64_t queue_address, uint32_t pipe_id,
uint32_t queue_id)
{
- return mm->dev->kfd2kgd->hqd_sdma_is_occupied(mm->dev->kgd, mqd);
+ return mm->dev->kfd2kgd->hqd_sdma_is_occupied(mm->dev->adev, mqd);
}
#if defined(CONFIG_DEBUG_FS)
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c
index 4e5932f54b5a..326eb2285029 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c
@@ -108,7 +108,7 @@ static struct kfd_mem_obj *allocate_mqd(struct kfd_dev *kfd,
mqd_mem_obj = kzalloc(sizeof(struct kfd_mem_obj), GFP_KERNEL);
if (!mqd_mem_obj)
return NULL;
- retval = amdgpu_amdkfd_alloc_gtt_mem(kfd->kgd,
+ retval = amdgpu_amdkfd_alloc_gtt_mem(kfd->adev,
ALIGN(q->ctl_stack_size, PAGE_SIZE) +
ALIGN(sizeof(struct v9_mqd), PAGE_SIZE),
&(mqd_mem_obj->gtt_mem),
@@ -199,7 +199,7 @@ static int load_mqd(struct mqd_manager *mm, void *mqd,
/* AQL write pointer counts in 64B packets, PM4/CP counts in dwords. */
uint32_t wptr_shift = (p->format == KFD_QUEUE_FORMAT_AQL ? 4 : 0);
- return mm->dev->kfd2kgd->hqd_load(mm->dev->kgd, mqd, pipe_id, queue_id,
+ return mm->dev->kfd2kgd->hqd_load(mm->dev->adev, mqd, pipe_id, queue_id,
(uint32_t __user *)p->write_ptr,
wptr_shift, 0, mms);
}
@@ -208,7 +208,7 @@ static int hiq_load_mqd_kiq(struct mqd_manager *mm, void *mqd,
uint32_t pipe_id, uint32_t queue_id,
struct queue_properties *p, struct mm_struct *mms)
{
- return mm->dev->kfd2kgd->hiq_mqd_load(mm->dev->kgd, mqd, pipe_id,
+ return mm->dev->kfd2kgd->hiq_mqd_load(mm->dev->adev, mqd, pipe_id,
queue_id, p->doorbell_off);
}
@@ -291,7 +291,7 @@ static int destroy_mqd(struct mqd_manager *mm, void *mqd,
uint32_t queue_id)
{
return mm->dev->kfd2kgd->hqd_destroy
- (mm->dev->kgd, mqd, type, timeout,
+ (mm->dev->adev, mqd, type, timeout,
pipe_id, queue_id);
}
@@ -301,7 +301,7 @@ static void free_mqd(struct mqd_manager *mm, void *mqd,
struct kfd_dev *kfd = mm->dev;
if (mqd_mem_obj->gtt_mem) {
- amdgpu_amdkfd_free_gtt_mem(kfd->kgd, mqd_mem_obj->gtt_mem);
+ amdgpu_amdkfd_free_gtt_mem(kfd->adev, mqd_mem_obj->gtt_mem);
kfree(mqd_mem_obj);
} else {
kfd_gtt_sa_free(mm->dev, mqd_mem_obj);
@@ -313,7 +313,7 @@ static bool is_occupied(struct mqd_manager *mm, void *mqd,
uint32_t queue_id)
{
return mm->dev->kfd2kgd->hqd_is_occupied(
- mm->dev->kgd, queue_address,
+ mm->dev->adev, queue_address,
pipe_id, queue_id);
}
@@ -375,7 +375,7 @@ static int load_mqd_sdma(struct mqd_manager *mm, void *mqd,
uint32_t pipe_id, uint32_t queue_id,
struct queue_properties *p, struct mm_struct *mms)
{
- return mm->dev->kfd2kgd->hqd_sdma_load(mm->dev->kgd, mqd,
+ return mm->dev->kfd2kgd->hqd_sdma_load(mm->dev->adev, mqd,
(uint32_t __user *)p->write_ptr,
mms);
}
@@ -418,14 +418,14 @@ static int destroy_mqd_sdma(struct mqd_manager *mm, void *mqd,
unsigned int timeout, uint32_t pipe_id,
uint32_t queue_id)
{
- return mm->dev->kfd2kgd->hqd_sdma_destroy(mm->dev->kgd, mqd, timeout);
+ return mm->dev->kfd2kgd->hqd_sdma_destroy(mm->dev->adev, mqd, timeout);
}
static bool is_occupied_sdma(struct mqd_manager *mm, void *mqd,
uint64_t queue_address, uint32_t pipe_id,
uint32_t queue_id)
{
- return mm->dev->kfd2kgd->hqd_sdma_is_occupied(mm->dev->kgd, mqd);
+ return mm->dev->kfd2kgd->hqd_sdma_is_occupied(mm->dev->adev, mqd);
}
#if defined(CONFIG_DEBUG_FS)
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c
index cd9220eb8a7a..d456e950ce1d 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c
@@ -162,7 +162,7 @@ static int load_mqd(struct mqd_manager *mm, void *mqd,
uint32_t wptr_shift = (p->format == KFD_QUEUE_FORMAT_AQL ? 4 : 0);
uint32_t wptr_mask = (uint32_t)((p->queue_size / 4) - 1);
- return mm->dev->kfd2kgd->hqd_load(mm->dev->kgd, mqd, pipe_id, queue_id,
+ return mm->dev->kfd2kgd->hqd_load(mm->dev->adev, mqd, pipe_id, queue_id,
(uint32_t __user *)p->write_ptr,
wptr_shift, wptr_mask, mms);
}
@@ -265,7 +265,7 @@ static int destroy_mqd(struct mqd_manager *mm, void *mqd,
uint32_t queue_id)
{
return mm->dev->kfd2kgd->hqd_destroy
- (mm->dev->kgd, mqd, type, timeout,
+ (mm->dev->adev, mqd, type, timeout,
pipe_id, queue_id);
}
@@ -280,7 +280,7 @@ static bool is_occupied(struct mqd_manager *mm, void *mqd,
uint32_t queue_id)
{
return mm->dev->kfd2kgd->hqd_is_occupied(
- mm->dev->kgd, queue_address,
+ mm->dev->adev, queue_address,
pipe_id, queue_id);
}
@@ -347,7 +347,7 @@ static int load_mqd_sdma(struct mqd_manager *mm, void *mqd,
uint32_t pipe_id, uint32_t queue_id,
struct queue_properties *p, struct mm_struct *mms)
{
- return mm->dev->kfd2kgd->hqd_sdma_load(mm->dev->kgd, mqd,
+ return mm->dev->kfd2kgd->hqd_sdma_load(mm->dev->adev, mqd,
(uint32_t __user *)p->write_ptr,
mms);
}
@@ -389,14 +389,14 @@ static int destroy_mqd_sdma(struct mqd_manager *mm, void *mqd,
unsigned int timeout, uint32_t pipe_id,
uint32_t queue_id)
{
- return mm->dev->kfd2kgd->hqd_sdma_destroy(mm->dev->kgd, mqd, timeout);
+ return mm->dev->kfd2kgd->hqd_sdma_destroy(mm->dev->adev, mqd, timeout);
}
static bool is_occupied_sdma(struct mqd_manager *mm, void *mqd,
uint64_t queue_address, uint32_t pipe_id,
uint32_t queue_id)
{
- return mm->dev->kfd2kgd->hqd_sdma_is_occupied(mm->dev->kgd, mqd);
+ return mm->dev->kfd2kgd->hqd_sdma_is_occupied(mm->dev->adev, mqd);
}
#if defined(CONFIG_DEBUG_FS)
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c
index e547f1f8c49f..1439420925a0 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c
@@ -223,7 +223,7 @@ static int pm_create_runlist_ib(struct packet_manager *pm,
int pm_init(struct packet_manager *pm, struct device_queue_manager *dqm)
{
- switch (dqm->dev->device_info->asic_family) {
+ switch (dqm->dev->adev->asic_type) {
case CHIP_KAVERI:
case CHIP_HAWAII:
/* PM4 packet structures on CIK are the same as on VI */
@@ -236,31 +236,16 @@ int pm_init(struct packet_manager *pm, struct device_queue_manager *dqm)
case CHIP_VEGAM:
pm->pmf = &kfd_vi_pm_funcs;
break;
- case CHIP_VEGA10:
- case CHIP_VEGA12:
- case CHIP_VEGA20:
- case CHIP_RAVEN:
- case CHIP_RENOIR:
- case CHIP_ARCTURUS:
- case CHIP_NAVI10:
- case CHIP_NAVI12:
- case CHIP_NAVI14:
- case CHIP_SIENNA_CICHLID:
- case CHIP_NAVY_FLOUNDER:
- case CHIP_VANGOGH:
- case CHIP_DIMGREY_CAVEFISH:
- case CHIP_BEIGE_GOBY:
- case CHIP_YELLOW_CARP:
- case CHIP_CYAN_SKILLFISH:
- pm->pmf = &kfd_v9_pm_funcs;
- break;
- case CHIP_ALDEBARAN:
- pm->pmf = &kfd_aldebaran_pm_funcs;
- break;
default:
- WARN(1, "Unexpected ASIC family %u",
- dqm->dev->device_info->asic_family);
- return -EINVAL;
+ if (KFD_GC_VERSION(dqm->dev) == IP_VERSION(9, 4, 2))
+ pm->pmf = &kfd_aldebaran_pm_funcs;
+ else if (KFD_GC_VERSION(dqm->dev) >= IP_VERSION(9, 0, 1))
+ pm->pmf = &kfd_v9_pm_funcs;
+ else {
+ WARN(1, "Unexpected ASIC family %u",
+ dqm->dev->adev->asic_type);
+ return -EINVAL;
+ }
}
pm->dqm = dqm;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_vi.c b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_vi.c
index 08442e7d9944..3c0658e32e93 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_vi.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_vi.c
@@ -110,8 +110,8 @@ static int pm_runlist_vi(struct packet_manager *pm, uint32_t *buffer,
return 0;
}
-int pm_set_resources_vi(struct packet_manager *pm, uint32_t *buffer,
- struct scheduling_resources *res)
+static int pm_set_resources_vi(struct packet_manager *pm, uint32_t *buffer,
+ struct scheduling_resources *res)
{
struct pm4_mes_set_resources *packet;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
index 94e92c0812db..ea68f3b3a4e9 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
@@ -183,7 +183,8 @@ enum cache_policy {
cache_policy_noncoherent
};
-#define KFD_IS_SOC15(chip) ((chip) >= CHIP_VEGA10)
+#define KFD_GC_VERSION(dev) ((dev)->adev->ip_versions[GC_HWIP][0])
+#define KFD_IS_SOC15(dev) ((KFD_GC_VERSION(dev)) >= (IP_VERSION(9, 0, 1)))
struct kfd_event_interrupt_class {
bool (*interrupt_isr)(struct kfd_dev *dev,
@@ -194,8 +195,6 @@ struct kfd_event_interrupt_class {
};
struct kfd_device_info {
- enum amd_asic_type asic_family;
- const char *asic_name;
uint32_t gfx_target_version;
const struct kfd_event_interrupt_class *event_interrupt_class;
unsigned int max_pasid_bits;
@@ -208,11 +207,12 @@ struct kfd_device_info {
bool needs_iommu_device;
bool needs_pci_atomics;
uint32_t no_atomic_fw_version;
- unsigned int num_sdma_engines;
- unsigned int num_xgmi_sdma_engines;
unsigned int num_sdma_queues_per_engine;
};
+unsigned int kfd_get_num_sdma_engines(struct kfd_dev *kdev);
+unsigned int kfd_get_num_xgmi_sdma_engines(struct kfd_dev *kdev);
+
struct kfd_mem_obj {
uint32_t range_start;
uint32_t range_end;
@@ -228,9 +228,9 @@ struct kfd_vmid_info {
};
struct kfd_dev {
- struct kgd_dev *kgd;
+ struct amdgpu_device *adev;
- const struct kfd_device_info *device_info;
+ struct kfd_device_info device_info;
struct pci_dev *pdev;
struct drm_device *ddev;
@@ -766,7 +766,7 @@ struct svm_range_list {
struct list_head deferred_range_list;
spinlock_t deferred_list_lock;
atomic_t evicted_ranges;
- bool drain_pagefaults;
+ atomic_t drain_pagefaults;
struct delayed_work restore_work;
DECLARE_BITMAP(bitmap_supported, MAX_GPU_INSTANCE);
struct task_struct *faulting_task;
@@ -856,6 +856,8 @@ struct kfd_process {
struct svm_range_list svms;
bool xnack_enabled;
+
+ atomic_t poison;
};
#define KFD_PROCESS_TABLE_SIZE 5 /* bits: 32 entries */
@@ -891,7 +893,7 @@ struct kfd_process *kfd_lookup_process_by_pasid(u32 pasid);
struct kfd_process *kfd_lookup_process_by_mm(const struct mm_struct *mm);
int kfd_process_gpuidx_from_gpuid(struct kfd_process *p, uint32_t gpu_id);
-int kfd_process_gpuid_from_kgd(struct kfd_process *p,
+int kfd_process_gpuid_from_adev(struct kfd_process *p,
struct amdgpu_device *adev, uint32_t *gpuid,
uint32_t *gpuidx);
static inline int kfd_process_gpuid_from_gpuidx(struct kfd_process *p,
@@ -984,7 +986,7 @@ struct kfd_topology_device *kfd_topology_device_by_proximity_domain(
struct kfd_topology_device *kfd_topology_device_by_id(uint32_t gpu_id);
struct kfd_dev *kfd_device_by_id(uint32_t gpu_id);
struct kfd_dev *kfd_device_by_pci_dev(const struct pci_dev *pdev);
-struct kfd_dev *kfd_device_by_kgd(const struct kgd_dev *kgd);
+struct kfd_dev *kfd_device_by_adev(const struct amdgpu_device *adev);
int kfd_topology_enum_kfd_devices(uint8_t idx, struct kfd_dev **kdev);
int kfd_numa_node_to_apic_id(int numa_node_id);
void kfd_double_confirm_iommu_support(struct kfd_dev *gpu);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process.c b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
index b993011cfa64..f1930ff2c74a 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_process.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
@@ -251,14 +251,13 @@ cleanup:
}
/**
- * @kfd_get_cu_occupancy - Collect number of waves in-flight on this device
+ * kfd_get_cu_occupancy - Collect number of waves in-flight on this device
* by current process. Translates acquired wave count into number of compute units
* that are occupied.
*
- * @atr: Handle of attribute that allows reporting of wave count. The attribute
+ * @attr: Handle of attribute that allows reporting of wave count. The attribute
* handle encapsulates GPU device it is associated with, thereby allowing collection
* of waves in flight, etc
- *
* @buffer: Handle of user provided buffer updated with wave count
*
* Return: Number of bytes written to user buffer or an error value
@@ -288,7 +287,7 @@ static int kfd_get_cu_occupancy(struct attribute *attr, char *buffer)
/* Collect wave count from device if it supports */
wave_cnt = 0;
max_waves_per_cu = 0;
- dev->kfd2kgd->get_cu_occupancy(dev->kgd, proc->pasid, &wave_cnt,
+ dev->kfd2kgd->get_cu_occupancy(dev->adev, proc->pasid, &wave_cnt,
&max_waves_per_cu);
/* Translate wave count to number of compute units */
@@ -692,12 +691,12 @@ static void kfd_process_free_gpuvm(struct kgd_mem *mem,
struct kfd_dev *dev = pdd->dev;
if (kptr) {
- amdgpu_amdkfd_gpuvm_unmap_gtt_bo_from_kernel(dev->kgd, mem);
+ amdgpu_amdkfd_gpuvm_unmap_gtt_bo_from_kernel(dev->adev, mem);
kptr = NULL;
}
- amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu(dev->kgd, mem, pdd->drm_priv);
- amdgpu_amdkfd_gpuvm_free_memory_of_gpu(dev->kgd, mem, pdd->drm_priv,
+ amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu(dev->adev, mem, pdd->drm_priv);
+ amdgpu_amdkfd_gpuvm_free_memory_of_gpu(dev->adev, mem, pdd->drm_priv,
NULL);
}
@@ -714,24 +713,24 @@ static int kfd_process_alloc_gpuvm(struct kfd_process_device *pdd,
struct kfd_dev *kdev = pdd->dev;
int err;
- err = amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(kdev->kgd, gpu_va, size,
+ err = amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(kdev->adev, gpu_va, size,
pdd->drm_priv, mem, NULL, flags);
if (err)
goto err_alloc_mem;
- err = amdgpu_amdkfd_gpuvm_map_memory_to_gpu(kdev->kgd, *mem,
+ err = amdgpu_amdkfd_gpuvm_map_memory_to_gpu(kdev->adev, *mem,
pdd->drm_priv, NULL);
if (err)
goto err_map_mem;
- err = amdgpu_amdkfd_gpuvm_sync_memory(kdev->kgd, *mem, true);
+ err = amdgpu_amdkfd_gpuvm_sync_memory(kdev->adev, *mem, true);
if (err) {
pr_debug("Sync memory failed, wait interrupted by user signal\n");
goto sync_memory_failed;
}
if (kptr) {
- err = amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel(kdev->kgd,
+ err = amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel(kdev->adev,
(struct kgd_mem *)*mem, kptr, NULL);
if (err) {
pr_debug("Map GTT BO to kernel failed\n");
@@ -742,10 +741,10 @@ static int kfd_process_alloc_gpuvm(struct kfd_process_device *pdd,
return err;
sync_memory_failed:
- amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu(kdev->kgd, *mem, pdd->drm_priv);
+ amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu(kdev->adev, *mem, pdd->drm_priv);
err_map_mem:
- amdgpu_amdkfd_gpuvm_free_memory_of_gpu(kdev->kgd, *mem, pdd->drm_priv,
+ amdgpu_amdkfd_gpuvm_free_memory_of_gpu(kdev->adev, *mem, pdd->drm_priv,
NULL);
err_alloc_mem:
*mem = NULL;
@@ -940,10 +939,10 @@ static void kfd_process_device_free_bos(struct kfd_process_device *pdd)
if (!peer_pdd->drm_priv)
continue;
amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu(
- peer_pdd->dev->kgd, mem, peer_pdd->drm_priv);
+ peer_pdd->dev->adev, mem, peer_pdd->drm_priv);
}
- amdgpu_amdkfd_gpuvm_free_memory_of_gpu(pdd->dev->kgd, mem,
+ amdgpu_amdkfd_gpuvm_free_memory_of_gpu(pdd->dev->adev, mem,
pdd->drm_priv, NULL);
kfd_process_device_remove_obj_handle(pdd, id);
}
@@ -974,7 +973,7 @@ static void kfd_process_kunmap_signal_bo(struct kfd_process *p)
if (!mem)
goto out;
- amdgpu_amdkfd_gpuvm_unmap_gtt_bo_from_kernel(kdev->kgd, mem);
+ amdgpu_amdkfd_gpuvm_unmap_gtt_bo_from_kernel(kdev->adev, mem);
out:
mutex_unlock(&p->mutex);
@@ -1003,7 +1002,7 @@ static void kfd_process_destroy_pdds(struct kfd_process *p)
if (pdd->drm_file) {
amdgpu_amdkfd_gpuvm_release_process_vm(
- pdd->dev->kgd, pdd->drm_priv);
+ pdd->dev->adev, pdd->drm_priv);
fput(pdd->drm_file);
}
@@ -1011,7 +1010,7 @@ static void kfd_process_destroy_pdds(struct kfd_process *p)
free_pages((unsigned long)pdd->qpd.cwsr_kaddr,
get_order(KFD_CWSR_TBA_TMA_SIZE));
- kfree(pdd->qpd.doorbell_bitmap);
+ bitmap_free(pdd->qpd.doorbell_bitmap);
idr_destroy(&pdd->alloc_idr);
kfd_free_process_doorbells(pdd->dev, pdd->doorbell_index);
@@ -1317,14 +1316,13 @@ bool kfd_process_xnack_mode(struct kfd_process *p, bool supported)
* support the SVM APIs and don't need to be considered
* for the XNACK mode selection.
*/
- if (dev->device_info->asic_family < CHIP_VEGA10)
+ if (!KFD_IS_SOC15(dev))
continue;
/* Aldebaran can always support XNACK because it can support
* per-process XNACK mode selection. But let the dev->noretry
* setting still influence the default XNACK mode.
*/
- if (supported &&
- dev->device_info->asic_family == CHIP_ALDEBARAN)
+ if (supported && KFD_GC_VERSION(dev) == IP_VERSION(9, 4, 2))
continue;
/* GFXv10 and later GPUs do not support shader preemption
@@ -1332,7 +1330,7 @@ bool kfd_process_xnack_mode(struct kfd_process *p, bool supported)
* management and memory-manager-related preemptions or
* even deadlocks.
*/
- if (dev->device_info->asic_family >= CHIP_NAVI10)
+ if (KFD_GC_VERSION(dev) >= IP_VERSION(10, 1, 1))
return false;
if (dev->noretry)
@@ -1431,12 +1429,11 @@ static int init_doorbell_bitmap(struct qcm_process_device *qpd,
int range_start = dev->shared_resources.non_cp_doorbells_start;
int range_end = dev->shared_resources.non_cp_doorbells_end;
- if (!KFD_IS_SOC15(dev->device_info->asic_family))
+ if (!KFD_IS_SOC15(dev))
return 0;
- qpd->doorbell_bitmap =
- kzalloc(DIV_ROUND_UP(KFD_MAX_NUM_OF_QUEUES_PER_PROCESS,
- BITS_PER_BYTE), GFP_KERNEL);
+ qpd->doorbell_bitmap = bitmap_zalloc(KFD_MAX_NUM_OF_QUEUES_PER_PROCESS,
+ GFP_KERNEL);
if (!qpd->doorbell_bitmap)
return -ENOMEM;
@@ -1448,9 +1445,9 @@ static int init_doorbell_bitmap(struct qcm_process_device *qpd,
for (i = 0; i < KFD_MAX_NUM_OF_QUEUES_PER_PROCESS / 2; i++) {
if (i >= range_start && i <= range_end) {
- set_bit(i, qpd->doorbell_bitmap);
- set_bit(i + KFD_QUEUE_DOORBELL_MIRROR_OFFSET,
- qpd->doorbell_bitmap);
+ __set_bit(i, qpd->doorbell_bitmap);
+ __set_bit(i + KFD_QUEUE_DOORBELL_MIRROR_OFFSET,
+ qpd->doorbell_bitmap);
}
}
@@ -1547,7 +1544,7 @@ int kfd_process_device_init_vm(struct kfd_process_device *pdd,
dev = pdd->dev;
ret = amdgpu_amdkfd_gpuvm_acquire_process_vm(
- dev->kgd, drm_file, p->pasid,
+ dev->adev, drm_file, p->pasid,
&p->kgd_process_info, &p->ef);
if (ret) {
pr_err("Failed to create process VM object\n");
@@ -1779,14 +1776,13 @@ int kfd_process_gpuidx_from_gpuid(struct kfd_process *p, uint32_t gpu_id)
}
int
-kfd_process_gpuid_from_kgd(struct kfd_process *p, struct amdgpu_device *adev,
+kfd_process_gpuid_from_adev(struct kfd_process *p, struct amdgpu_device *adev,
uint32_t *gpuid, uint32_t *gpuidx)
{
- struct kgd_dev *kgd = (struct kgd_dev *)adev;
int i;
for (i = 0; i < p->n_pdds; i++)
- if (p->pdds[i] && p->pdds[i]->dev->kgd == kgd) {
+ if (p->pdds[i] && p->pdds[i]->dev->adev == adev) {
*gpuid = p->pdds[i]->dev->id;
*gpuidx = i;
return 0;
@@ -1951,10 +1947,10 @@ void kfd_flush_tlb(struct kfd_process_device *pdd, enum TLB_FLUSH_TYPE type)
* only happens when the first queue is created.
*/
if (pdd->qpd.vmid)
- amdgpu_amdkfd_flush_gpu_tlb_vmid(dev->kgd,
+ amdgpu_amdkfd_flush_gpu_tlb_vmid(dev->adev,
pdd->qpd.vmid);
} else {
- amdgpu_amdkfd_flush_gpu_tlb_pasid(dev->kgd,
+ amdgpu_amdkfd_flush_gpu_tlb_pasid(dev->adev,
pdd->process->pasid, type);
}
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
index 3627e7ac161b..5e5c84a8e1ef 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
@@ -118,7 +118,7 @@ int pqm_set_gws(struct process_queue_manager *pqm, unsigned int qid,
return ret;
pqn->q->gws = mem;
- pdd->qpd.num_gws = gws ? amdgpu_amdkfd_get_num_gws(dev->kgd) : 0;
+ pdd->qpd.num_gws = gws ? dev->adev->gds.gws_size : 0;
return pqn->q->device->dqm->ops.update_queue(pqn->q->device->dqm,
pqn->q, NULL);
@@ -135,9 +135,8 @@ void kfd_process_dequeue_from_all_devices(struct kfd_process *p)
int pqm_init(struct process_queue_manager *pqm, struct kfd_process *p)
{
INIT_LIST_HEAD(&pqm->queues);
- pqm->queue_slot_bitmap =
- kzalloc(DIV_ROUND_UP(KFD_MAX_NUM_OF_QUEUES_PER_PROCESS,
- BITS_PER_BYTE), GFP_KERNEL);
+ pqm->queue_slot_bitmap = bitmap_zalloc(KFD_MAX_NUM_OF_QUEUES_PER_PROCESS,
+ GFP_KERNEL);
if (!pqm->queue_slot_bitmap)
return -ENOMEM;
pqm->process = p;
@@ -159,7 +158,7 @@ void pqm_uninit(struct process_queue_manager *pqm)
kfree(pqn);
}
- kfree(pqm->queue_slot_bitmap);
+ bitmap_free(pqm->queue_slot_bitmap);
pqm->queue_slot_bitmap = NULL;
}
@@ -220,7 +219,7 @@ int pqm_create_queue(struct process_queue_manager *pqm,
* Hence we also check the type as well
*/
if ((pdd->qpd.is_debug) || (type == KFD_QUEUE_TYPE_DIQ))
- max_queues = dev->device_info->max_no_of_hqd/2;
+ max_queues = dev->device_info.max_no_of_hqd/2;
if (pdd->qpd.queue_count >= max_queues)
return -ENOSPC;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_smi_events.c b/drivers/gpu/drm/amd/amdkfd/kfd_smi_events.c
index ed4bc5f844ce..deae12dc777d 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_smi_events.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_smi_events.c
@@ -207,7 +207,6 @@ void kfd_smi_event_update_gpu_reset(struct kfd_dev *dev, bool post_reset)
void kfd_smi_event_update_thermal_throttling(struct kfd_dev *dev,
uint64_t throttle_bitmask)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)dev->kgd;
/*
* ThermalThrottle msg = throttle_bitmask(8):
* thermal_interrupt_count(16):
@@ -223,14 +222,13 @@ void kfd_smi_event_update_thermal_throttling(struct kfd_dev *dev,
len = snprintf(fifo_in, sizeof(fifo_in), "%x %llx:%llx\n",
KFD_SMI_EVENT_THERMAL_THROTTLE, throttle_bitmask,
- atomic64_read(&adev->smu.throttle_int_counter));
+ atomic64_read(&dev->adev->smu.throttle_int_counter));
add_event_to_kfifo(dev, KFD_SMI_EVENT_THERMAL_THROTTLE, fifo_in, len);
}
void kfd_smi_event_update_vmfault(struct kfd_dev *dev, uint16_t pasid)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)dev->kgd;
struct amdgpu_task_info task_info;
/* VmFault msg = (hex)uint32_pid(8) + :(1) + task name(16) = 25 */
/* 1 byte event + 1 byte space + 25 bytes msg + 1 byte \n +
@@ -243,7 +241,7 @@ void kfd_smi_event_update_vmfault(struct kfd_dev *dev, uint16_t pasid)
return;
memset(&task_info, 0, sizeof(struct amdgpu_task_info));
- amdgpu_vm_get_task_info(adev, pasid, &task_info);
+ amdgpu_vm_get_task_info(dev->adev, pasid, &task_info);
/* Report VM faults from user applications, not retry from kernel */
if (!task_info.pid)
return;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
index 16137c4247bb..aa5ee91cd595 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
@@ -193,7 +193,6 @@ svm_range_dma_map(struct svm_range *prange, unsigned long *bitmap,
for_each_set_bit(gpuidx, bitmap, MAX_GPU_INSTANCE) {
struct kfd_process_device *pdd;
- struct amdgpu_device *adev;
pr_debug("mapping to gpu idx 0x%x\n", gpuidx);
pdd = kfd_process_device_from_gpuidx(p, gpuidx);
@@ -201,9 +200,8 @@ svm_range_dma_map(struct svm_range *prange, unsigned long *bitmap,
pr_debug("failed to find device idx %d\n", gpuidx);
return -EINVAL;
}
- adev = (struct amdgpu_device *)pdd->dev->kgd;
- r = svm_range_dma_map_dev(adev, prange, offset, npages,
+ r = svm_range_dma_map_dev(pdd->dev->adev, prange, offset, npages,
hmm_pfns, gpuidx);
if (r)
break;
@@ -334,6 +332,8 @@ static void svm_range_bo_release(struct kref *kref)
struct svm_range_bo *svm_bo;
svm_bo = container_of(kref, struct svm_range_bo, kref);
+ pr_debug("svm_bo 0x%p\n", svm_bo);
+
spin_lock(&svm_bo->list_lock);
while (!list_empty(&svm_bo->range_list)) {
struct svm_range *prange =
@@ -367,12 +367,33 @@ static void svm_range_bo_release(struct kref *kref)
kfree(svm_bo);
}
-void svm_range_bo_unref(struct svm_range_bo *svm_bo)
+static void svm_range_bo_wq_release(struct work_struct *work)
{
- if (!svm_bo)
- return;
+ struct svm_range_bo *svm_bo;
+
+ svm_bo = container_of(work, struct svm_range_bo, release_work);
+ svm_range_bo_release(&svm_bo->kref);
+}
+
+static void svm_range_bo_release_async(struct kref *kref)
+{
+ struct svm_range_bo *svm_bo;
+
+ svm_bo = container_of(kref, struct svm_range_bo, kref);
+ pr_debug("svm_bo 0x%p\n", svm_bo);
+ INIT_WORK(&svm_bo->release_work, svm_range_bo_wq_release);
+ schedule_work(&svm_bo->release_work);
+}
+
+void svm_range_bo_unref_async(struct svm_range_bo *svm_bo)
+{
+ kref_put(&svm_bo->kref, svm_range_bo_release_async);
+}
- kref_put(&svm_bo->kref, svm_range_bo_release);
+static void svm_range_bo_unref(struct svm_range_bo *svm_bo)
+{
+ if (svm_bo)
+ kref_put(&svm_bo->kref, svm_range_bo_release);
}
static bool
@@ -581,7 +602,7 @@ svm_range_get_adev_by_id(struct svm_range *prange, uint32_t gpu_id)
return NULL;
}
- return (struct amdgpu_device *)pdd->dev->kgd;
+ return pdd->dev->adev;
}
struct kfd_process_device *
@@ -593,7 +614,7 @@ svm_range_get_pdd_by_adev(struct svm_range *prange, struct amdgpu_device *adev)
p = container_of(prange->svms, struct kfd_process, svms);
- r = kfd_process_gpuid_from_kgd(p, adev, &gpuid, &gpu_idx);
+ r = kfd_process_gpuid_from_adev(p, adev, &gpuid, &gpu_idx);
if (r) {
pr_debug("failed to get device id by adev %p\n", adev);
return NULL;
@@ -706,6 +727,61 @@ svm_range_apply_attrs(struct kfd_process *p, struct svm_range *prange,
}
}
+static bool
+svm_range_is_same_attrs(struct kfd_process *p, struct svm_range *prange,
+ uint32_t nattr, struct kfd_ioctl_svm_attribute *attrs)
+{
+ uint32_t i;
+ int gpuidx;
+
+ for (i = 0; i < nattr; i++) {
+ switch (attrs[i].type) {
+ case KFD_IOCTL_SVM_ATTR_PREFERRED_LOC:
+ if (prange->preferred_loc != attrs[i].value)
+ return false;
+ break;
+ case KFD_IOCTL_SVM_ATTR_PREFETCH_LOC:
+ /* Prefetch should always trigger a migration even
+ * if the value of the attribute didn't change.
+ */
+ return false;
+ case KFD_IOCTL_SVM_ATTR_ACCESS:
+ case KFD_IOCTL_SVM_ATTR_ACCESS_IN_PLACE:
+ case KFD_IOCTL_SVM_ATTR_NO_ACCESS:
+ gpuidx = kfd_process_gpuidx_from_gpuid(p,
+ attrs[i].value);
+ if (attrs[i].type == KFD_IOCTL_SVM_ATTR_NO_ACCESS) {
+ if (test_bit(gpuidx, prange->bitmap_access) ||
+ test_bit(gpuidx, prange->bitmap_aip))
+ return false;
+ } else if (attrs[i].type == KFD_IOCTL_SVM_ATTR_ACCESS) {
+ if (!test_bit(gpuidx, prange->bitmap_access))
+ return false;
+ } else {
+ if (!test_bit(gpuidx, prange->bitmap_aip))
+ return false;
+ }
+ break;
+ case KFD_IOCTL_SVM_ATTR_SET_FLAGS:
+ if ((prange->flags & attrs[i].value) != attrs[i].value)
+ return false;
+ break;
+ case KFD_IOCTL_SVM_ATTR_CLR_FLAGS:
+ if ((prange->flags & attrs[i].value) != 0)
+ return false;
+ break;
+ case KFD_IOCTL_SVM_ATTR_GRANULARITY:
+ if (prange->granularity != attrs[i].value)
+ return false;
+ break;
+ default:
+ WARN_ONCE(1, "svm_range_check_attrs wasn't called?");
+ }
+ }
+
+ return true;
+}
+
/**
* svm_range_debug_dump - print all range information from svms
* @svms: svm range list header
@@ -743,14 +819,6 @@ static void svm_range_debug_dump(struct svm_range_list *svms)
}
}
-static bool
-svm_range_is_same_attrs(struct svm_range *old, struct svm_range *new)
-{
- return (old->prefetch_loc == new->prefetch_loc &&
- old->flags == new->flags &&
- old->granularity == new->granularity);
-}
-
static int
svm_range_split_array(void *ppnew, void *ppold, size_t size,
uint64_t old_start, uint64_t old_n,
@@ -943,7 +1011,7 @@ svm_range_split(struct svm_range *prange, uint64_t start, uint64_t last,
}
static int
-svm_range_split_tail(struct svm_range *prange, struct svm_range *new,
+svm_range_split_tail(struct svm_range *prange,
uint64_t new_last, struct list_head *insert_list)
{
struct svm_range *tail;
@@ -955,7 +1023,7 @@ svm_range_split_tail(struct svm_range *prange, struct svm_range *new,
}
static int
-svm_range_split_head(struct svm_range *prange, struct svm_range *new,
+svm_range_split_head(struct svm_range *prange,
uint64_t new_start, struct list_head *insert_list)
{
struct svm_range *head;
@@ -1053,8 +1121,8 @@ svm_range_get_pte_flags(struct amdgpu_device *adev, struct svm_range *prange,
if (domain == SVM_RANGE_VRAM_DOMAIN)
bo_adev = amdgpu_ttm_adev(prange->svm_bo->bo->tbo.bdev);
- switch (adev->asic_type) {
- case CHIP_ARCTURUS:
+ switch (KFD_GC_VERSION(adev->kfd.dev)) {
+ case IP_VERSION(9, 4, 1):
if (domain == SVM_RANGE_VRAM_DOMAIN) {
if (bo_adev == adev) {
mapping_flags |= coherent ?
@@ -1070,7 +1138,7 @@ svm_range_get_pte_flags(struct amdgpu_device *adev, struct svm_range *prange,
AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC;
}
break;
- case CHIP_ALDEBARAN:
+ case IP_VERSION(9, 4, 2):
if (domain == SVM_RANGE_VRAM_DOMAIN) {
if (bo_adev == adev) {
mapping_flags |= coherent ?
@@ -1129,7 +1197,6 @@ svm_range_unmap_from_gpus(struct svm_range *prange, unsigned long start,
DECLARE_BITMAP(bitmap, MAX_GPU_INSTANCE);
struct kfd_process_device *pdd;
struct dma_fence *fence = NULL;
- struct amdgpu_device *adev;
struct kfd_process *p;
uint32_t gpuidx;
int r = 0;
@@ -1145,9 +1212,9 @@ svm_range_unmap_from_gpus(struct svm_range *prange, unsigned long start,
pr_debug("failed to find device idx %d\n", gpuidx);
return -EINVAL;
}
- adev = (struct amdgpu_device *)pdd->dev->kgd;
- r = svm_range_unmap_from_gpu(adev, drm_priv_to_vm(pdd->drm_priv),
+ r = svm_range_unmap_from_gpu(pdd->dev->adev,
+ drm_priv_to_vm(pdd->drm_priv),
start, last, &fence);
if (r)
break;
@@ -1159,7 +1226,7 @@ svm_range_unmap_from_gpus(struct svm_range *prange, unsigned long start,
if (r)
break;
}
- amdgpu_amdkfd_flush_gpu_tlb_pasid((struct kgd_dev *)adev,
+ amdgpu_amdkfd_flush_gpu_tlb_pasid(pdd->dev->adev,
p->pasid, TLB_FLUSH_HEAVYWEIGHT);
}
@@ -1172,7 +1239,6 @@ svm_range_map_to_gpu(struct amdgpu_device *adev, struct amdgpu_vm *vm,
unsigned long npages, bool readonly, dma_addr_t *dma_addr,
struct amdgpu_device *bo_adev, struct dma_fence **fence)
{
- struct amdgpu_bo_va bo_va;
bool table_freed = false;
uint64_t pte_flags;
unsigned long last_start;
@@ -1185,9 +1251,6 @@ svm_range_map_to_gpu(struct amdgpu_device *adev, struct amdgpu_vm *vm,
pr_debug("svms 0x%p [0x%lx 0x%lx] readonly %d\n", prange->svms,
last_start, last_start + npages - 1, readonly);
- if (prange->svm_bo && prange->ttm_res)
- bo_va.is_xgmi = amdgpu_xgmi_same_hive(adev, bo_adev);
-
for (i = offset; i < offset + npages; i++) {
last_domain = dma_addr[i] & SVM_RANGE_VRAM_DOMAIN;
dma_addr[i] &= ~SVM_RANGE_VRAM_DOMAIN;
@@ -1243,8 +1306,7 @@ svm_range_map_to_gpu(struct amdgpu_device *adev, struct amdgpu_vm *vm,
struct kfd_process *p;
p = container_of(prange->svms, struct kfd_process, svms);
- amdgpu_amdkfd_flush_gpu_tlb_pasid((struct kgd_dev *)adev,
- p->pasid, TLB_FLUSH_LEGACY);
+ amdgpu_amdkfd_flush_gpu_tlb_pasid(adev, p->pasid, TLB_FLUSH_LEGACY);
}
out:
return r;
@@ -1257,7 +1319,6 @@ svm_range_map_to_gpus(struct svm_range *prange, unsigned long offset,
{
struct kfd_process_device *pdd;
struct amdgpu_device *bo_adev;
- struct amdgpu_device *adev;
struct kfd_process *p;
struct dma_fence *fence = NULL;
uint32_t gpuidx;
@@ -1276,19 +1337,18 @@ svm_range_map_to_gpus(struct svm_range *prange, unsigned long offset,
pr_debug("failed to find device idx %d\n", gpuidx);
return -EINVAL;
}
- adev = (struct amdgpu_device *)pdd->dev->kgd;
pdd = kfd_bind_process_to_device(pdd->dev, p);
if (IS_ERR(pdd))
return -EINVAL;
- if (bo_adev && adev != bo_adev &&
- !amdgpu_xgmi_same_hive(adev, bo_adev)) {
+ if (bo_adev && pdd->dev->adev != bo_adev &&
+ !amdgpu_xgmi_same_hive(pdd->dev->adev, bo_adev)) {
pr_debug("cannot map to device idx %d\n", gpuidx);
continue;
}
- r = svm_range_map_to_gpu(adev, drm_priv_to_vm(pdd->drm_priv),
+ r = svm_range_map_to_gpu(pdd->dev->adev, drm_priv_to_vm(pdd->drm_priv),
prange, offset, npages, readonly,
prange->dma_addr[gpuidx],
bo_adev, wait ? &fence : NULL);
@@ -1322,7 +1382,6 @@ struct svm_validate_context {
static int svm_range_reserve_bos(struct svm_validate_context *ctx)
{
struct kfd_process_device *pdd;
- struct amdgpu_device *adev;
struct amdgpu_vm *vm;
uint32_t gpuidx;
int r;
@@ -1334,7 +1393,6 @@ static int svm_range_reserve_bos(struct svm_validate_context *ctx)
pr_debug("failed to find device idx %d\n", gpuidx);
return -EINVAL;
}
- adev = (struct amdgpu_device *)pdd->dev->kgd;
vm = drm_priv_to_vm(pdd->drm_priv);
ctx->tv[gpuidx].bo = &vm->root.bo->tbo;
@@ -1356,9 +1414,9 @@ static int svm_range_reserve_bos(struct svm_validate_context *ctx)
r = -EINVAL;
goto unreserve_out;
}
- adev = (struct amdgpu_device *)pdd->dev->kgd;
- r = amdgpu_vm_validate_pt_bos(adev, drm_priv_to_vm(pdd->drm_priv),
+ r = amdgpu_vm_validate_pt_bos(pdd->dev->adev,
+ drm_priv_to_vm(pdd->drm_priv),
svm_range_bo_validate, NULL);
if (r) {
pr_debug("failed %d validate pt bos\n", r);
@@ -1381,12 +1439,10 @@ static void svm_range_unreserve_bos(struct svm_validate_context *ctx)
static void *kfd_svm_page_owner(struct kfd_process *p, int32_t gpuidx)
{
struct kfd_process_device *pdd;
- struct amdgpu_device *adev;
pdd = kfd_process_device_from_gpuidx(p, gpuidx);
- adev = (struct amdgpu_device *)pdd->dev->kgd;
- return SVM_ADEV_PGMAP_OWNER(adev);
+ return SVM_ADEV_PGMAP_OWNER(pdd->dev->adev);
}
/*
@@ -1574,7 +1630,6 @@ retry_flush_work:
static void svm_range_restore_work(struct work_struct *work)
{
struct delayed_work *dwork = to_delayed_work(work);
- struct amdkfd_process_info *process_info;
struct svm_range_list *svms;
struct svm_range *prange;
struct kfd_process *p;
@@ -1594,12 +1649,10 @@ static void svm_range_restore_work(struct work_struct *work)
* the lifetime of this thread, kfd_process and mm will be valid.
*/
p = container_of(svms, struct kfd_process, svms);
- process_info = p->kgd_process_info;
mm = p->mm;
if (!mm)
return;
- mutex_lock(&process_info->lock);
svm_range_list_lock_and_flush_work(svms, mm);
mutex_lock(&svms->lock);
@@ -1652,7 +1705,6 @@ static void svm_range_restore_work(struct work_struct *work)
out_reschedule:
mutex_unlock(&svms->lock);
mmap_write_unlock(mm);
- mutex_unlock(&process_info->lock);
/* If validation failed, reschedule another attempt */
if (evicted_ranges) {
@@ -1664,6 +1716,10 @@ out_reschedule:
/**
* svm_range_evict - evict svm range
+ * @prange: svm range structure
+ * @mm: current process mm_struct
+ * @start: starting process queue number
+ * @last: last process queue number
*
* Stop all queues of the process to ensure GPU doesn't access the memory, then
* return to let CPU evict the buffer and proceed CPU pagetable update.
@@ -1768,46 +1824,49 @@ static struct svm_range *svm_range_clone(struct svm_range *old)
}
/**
- * svm_range_handle_overlap - split overlap ranges
- * @svms: svm range list header
- * @new: range added with this attributes
- * @start: range added start address, in pages
- * @last: range last address, in pages
- * @update_list: output, the ranges attributes are updated. For set_attr, this
- * will do validation and map to GPUs. For unmap, this will be
- * removed and unmap from GPUs
- * @insert_list: output, the ranges will be inserted into svms, attributes are
- * not changes. For set_attr, this will add into svms.
- * @remove_list:output, the ranges will be removed from svms
- * @left: the remaining range after overlap, For set_attr, this will be added
- * as new range.
+ * svm_range_add - add svm range and handle overlap
+ * @p: the range add to this process svms
+ * @start: page size aligned
+ * @size: page size aligned
+ * @nattr: number of attributes
+ * @attrs: array of attributes
+ * @update_list: output, the ranges need validate and update GPU mapping
+ * @insert_list: output, the ranges need insert to svms
+ * @remove_list: output, the ranges are replaced and need remove from svms
*
- * Total have 5 overlap cases.
+ * Check if the virtual address range has overlap with any existing ranges,
+ * split partly overlapping ranges and add new ranges in the gaps. All changes
+ * should be applied to the range_list and interval tree transactionally. If
+ * any range split or allocation fails, the entire update fails. Therefore any
+ * existing overlapping svm_ranges are cloned and the original svm_ranges left
+ * unchanged.
*
- * This function handles overlap of an address interval with existing
- * struct svm_ranges for applying new attributes. This may require
- * splitting existing struct svm_ranges. All changes should be applied to
- * the range_list and interval tree transactionally. If any split operation
- * fails, the entire update fails. Therefore the existing overlapping
- * svm_ranges are cloned and the original svm_ranges left unchanged. If the
- * transaction succeeds, the modified clones are added and the originals
- * freed. Otherwise the clones are removed and the old svm_ranges remain.
+ * If the transaction succeeds, the caller can update and insert clones and
+ * new ranges, then free the originals.
*
- * Context: The caller must hold svms->lock
+ * Otherwise the caller can free the clones and new ranges, while the old
+ * svm_ranges remain unchanged.
+ *
+ * Context: Process context, caller must hold svms->lock
+ *
+ * Return:
+ * 0 - OK, otherwise error code
*/
static int
-svm_range_handle_overlap(struct svm_range_list *svms, struct svm_range *new,
- unsigned long start, unsigned long last,
- struct list_head *update_list,
- struct list_head *insert_list,
- struct list_head *remove_list,
- unsigned long *left)
+svm_range_add(struct kfd_process *p, uint64_t start, uint64_t size,
+ uint32_t nattr, struct kfd_ioctl_svm_attribute *attrs,
+ struct list_head *update_list, struct list_head *insert_list,
+ struct list_head *remove_list)
{
+ unsigned long last = start + size - 1UL;
+ struct svm_range_list *svms = &p->svms;
struct interval_tree_node *node;
struct svm_range *prange;
struct svm_range *tmp;
int r = 0;
+ pr_debug("svms 0x%p [0x%llx 0x%lx]\n", &p->svms, start, last);
+
INIT_LIST_HEAD(update_list);
INIT_LIST_HEAD(insert_list);
INIT_LIST_HEAD(remove_list);
@@ -1815,18 +1874,24 @@ svm_range_handle_overlap(struct svm_range_list *svms, struct svm_range *new,
node = interval_tree_iter_first(&svms->objects, start, last);
while (node) {
struct interval_tree_node *next;
- struct svm_range *old;
unsigned long next_start;
pr_debug("found overlap node [0x%lx 0x%lx]\n", node->start,
node->last);
- old = container_of(node, struct svm_range, it_node);
+ prange = container_of(node, struct svm_range, it_node);
next = interval_tree_iter_next(node, start, last);
next_start = min(node->last, last) + 1;
- if (node->start < start || node->last > last) {
- /* node intersects the updated range, clone+split it */
+ if (svm_range_is_same_attrs(p, prange, nattr, attrs)) {
+ /* nothing to do */
+ } else if (node->start < start || node->last > last) {
+ /* node intersects the update range and its attributes
+ * will change. Clone and split it, apply updates only
+ * to the overlapping part
+ */
+ struct svm_range *old = prange;
+
prange = svm_range_clone(old);
if (!prange) {
r = -ENOMEM;
@@ -1835,17 +1900,18 @@ svm_range_handle_overlap(struct svm_range_list *svms, struct svm_range *new,
list_add(&old->remove_list, remove_list);
list_add(&prange->insert_list, insert_list);
+ list_add(&prange->update_list, update_list);
if (node->start < start) {
pr_debug("change old range start\n");
- r = svm_range_split_head(prange, new, start,
+ r = svm_range_split_head(prange, start,
insert_list);
if (r)
goto out;
}
if (node->last > last) {
pr_debug("change old range last\n");
- r = svm_range_split_tail(prange, new, last,
+ r = svm_range_split_tail(prange, last,
insert_list);
if (r)
goto out;
@@ -1854,16 +1920,12 @@ svm_range_handle_overlap(struct svm_range_list *svms, struct svm_range *new,
/* The node is contained within start..last,
* just update it
*/
- prange = old;
- }
-
- if (!svm_range_is_same_attrs(prange, new))
list_add(&prange->update_list, update_list);
+ }
/* insert a new node if needed */
if (node->start > start) {
- prange = svm_range_new(prange->svms, start,
- node->start - 1);
+ prange = svm_range_new(svms, start, node->start - 1);
if (!prange) {
r = -ENOMEM;
goto out;
@@ -1877,8 +1939,16 @@ svm_range_handle_overlap(struct svm_range_list *svms, struct svm_range *new,
start = next_start;
}
- if (left && start <= last)
- *left = last - start + 1;
+ /* add a final range at the end if needed */
+ if (start <= last) {
+ prange = svm_range_new(svms, start, last);
+ if (!prange) {
+ r = -ENOMEM;
+ goto out;
+ }
+ list_add(&prange->insert_list, insert_list);
+ list_add(&prange->update_list, update_list);
+ }
out:
if (r)
@@ -1966,23 +2036,30 @@ svm_range_handle_list_op(struct svm_range_list *svms, struct svm_range *prange)
static void svm_range_drain_retry_fault(struct svm_range_list *svms)
{
struct kfd_process_device *pdd;
- struct amdgpu_device *adev;
struct kfd_process *p;
+ int drain;
uint32_t i;
p = container_of(svms, struct kfd_process, svms);
+restart:
+ drain = atomic_read(&svms->drain_pagefaults);
+ if (!drain)
+ return;
+
for_each_set_bit(i, svms->bitmap_supported, p->n_pdds) {
pdd = p->pdds[i];
if (!pdd)
continue;
pr_debug("drain retry fault gpu %d svms %p\n", i, svms);
- adev = (struct amdgpu_device *)pdd->dev->kgd;
- amdgpu_ih_wait_on_checkpoint_process(adev, &adev->irq.ih1);
+ amdgpu_ih_wait_on_checkpoint_process_ts(pdd->dev->adev,
+ &pdd->dev->adev->irq.ih1);
pr_debug("drain retry fault gpu %d svms 0x%p done\n", i, svms);
}
+ if (atomic_cmpxchg(&svms->drain_pagefaults, drain, 0) != drain)
+ goto restart;
}
static void svm_range_deferred_list_work(struct work_struct *work)
@@ -1990,43 +2067,41 @@ static void svm_range_deferred_list_work(struct work_struct *work)
struct svm_range_list *svms;
struct svm_range *prange;
struct mm_struct *mm;
+ struct kfd_process *p;
svms = container_of(work, struct svm_range_list, deferred_list_work);
pr_debug("enter svms 0x%p\n", svms);
+ p = container_of(svms, struct kfd_process, svms);
+ /* Avoid mm is gone when inserting mmu notifier */
+ mm = get_task_mm(p->lead_thread);
+ if (!mm) {
+ pr_debug("svms 0x%p process mm gone\n", svms);
+ return;
+ }
+retry:
+ mmap_write_lock(mm);
+
+ /* Checking for the need to drain retry faults must be inside
+ * mmap write lock to serialize with munmap notifiers.
+ */
+ if (unlikely(atomic_read(&svms->drain_pagefaults))) {
+ mmap_write_unlock(mm);
+ svm_range_drain_retry_fault(svms);
+ goto retry;
+ }
+
spin_lock(&svms->deferred_list_lock);
while (!list_empty(&svms->deferred_range_list)) {
prange = list_first_entry(&svms->deferred_range_list,
struct svm_range, deferred_list);
+ list_del_init(&prange->deferred_list);
spin_unlock(&svms->deferred_list_lock);
+
pr_debug("prange 0x%p [0x%lx 0x%lx] op %d\n", prange,
prange->start, prange->last, prange->work_item.op);
- mm = prange->work_item.mm;
-retry:
- mmap_write_lock(mm);
mutex_lock(&svms->lock);
-
- /* Checking for the need to drain retry faults must be in
- * mmap write lock to serialize with munmap notifiers.
- *
- * Remove from deferred_list must be inside mmap write lock,
- * otherwise, svm_range_list_lock_and_flush_work may hold mmap
- * write lock, and continue because deferred_list is empty, then
- * deferred_list handle is blocked by mmap write lock.
- */
- spin_lock(&svms->deferred_list_lock);
- if (unlikely(svms->drain_pagefaults)) {
- svms->drain_pagefaults = false;
- spin_unlock(&svms->deferred_list_lock);
- mutex_unlock(&svms->lock);
- mmap_write_unlock(mm);
- svm_range_drain_retry_fault(svms);
- goto retry;
- }
- list_del_init(&prange->deferred_list);
- spin_unlock(&svms->deferred_list_lock);
-
mutex_lock(&prange->migrate_mutex);
while (!list_empty(&prange->child_list)) {
struct svm_range *pchild;
@@ -2042,12 +2117,13 @@ retry:
svm_range_handle_list_op(svms, prange);
mutex_unlock(&svms->lock);
- mmap_write_unlock(mm);
spin_lock(&svms->deferred_list_lock);
}
spin_unlock(&svms->deferred_list_lock);
+ mmap_write_unlock(mm);
+ mmput(mm);
pr_debug("exit svms 0x%p\n", svms);
}
@@ -2056,12 +2132,6 @@ svm_range_add_list_work(struct svm_range_list *svms, struct svm_range *prange,
struct mm_struct *mm, enum svm_work_list_ops op)
{
spin_lock(&svms->deferred_list_lock);
- /* Make sure pending page faults are drained in the deferred worker
- * before the range is freed to avoid straggler interrupts on
- * unmapped memory causing "phantom faults".
- */
- if (op == SVM_OP_UNMAP_RANGE)
- svms->drain_pagefaults = true;
/* if prange is on the deferred list */
if (!list_empty(&prange->deferred_list)) {
pr_debug("update exist prange 0x%p work op %d\n", prange, op);
@@ -2140,6 +2210,12 @@ svm_range_unmap_from_cpu(struct mm_struct *mm, struct svm_range *prange,
pr_debug("svms 0x%p prange 0x%p [0x%lx 0x%lx] [0x%lx 0x%lx]\n", svms,
prange, prange->start, prange->last, start, last);
+ /* Make sure pending page faults are drained in the deferred worker
+ * before the range is freed to avoid straggler interrupts on
+ * unmapped memory causing "phantom faults".
+ */
+ atomic_inc(&svms->drain_pagefaults);
+
unmap_parent = start <= prange->start && last >= prange->last;
list_for_each_entry(pchild, &prange->child_list, child_list) {
@@ -2169,6 +2245,9 @@ svm_range_unmap_from_cpu(struct mm_struct *mm, struct svm_range *prange,
/**
* svm_range_cpu_invalidate_pagetables - interval notifier callback
+ * @mni: mmu_interval_notifier struct
+ * @range: mmu_notifier_range struct
+ * @cur_seq: value to pass to mmu_interval_set_seq()
*
* If event is MMU_NOTIFY_UNMAP, this is from CPU unmap range, otherwise, it
* is from migration, or CPU page invalidation callback.
@@ -2198,8 +2277,8 @@ svm_range_cpu_invalidate_pagetables(struct mmu_interval_notifier *mni,
start = mni->interval_tree.start;
last = mni->interval_tree.last;
- start = (start > range->start ? start : range->start) >> PAGE_SHIFT;
- last = (last < (range->end - 1) ? last : range->end - 1) >> PAGE_SHIFT;
+ start = max(start, range->start) >> PAGE_SHIFT;
+ last = min(last, range->end - 1) >> PAGE_SHIFT;
pr_debug("[0x%lx 0x%lx] range[0x%lx 0x%lx] notifier[0x%lx 0x%lx] %d\n",
start, last, range->start >> PAGE_SHIFT,
(range->end - 1) >> PAGE_SHIFT,
@@ -2301,7 +2380,7 @@ svm_range_best_restore_location(struct svm_range *prange,
p = container_of(prange->svms, struct kfd_process, svms);
- r = kfd_process_gpuid_from_kgd(p, adev, &gpuid, gpuidx);
+ r = kfd_process_gpuid_from_adev(p, adev, &gpuid, gpuidx);
if (r < 0) {
pr_debug("failed to get gpuid from kgd\n");
return -1;
@@ -2478,7 +2557,7 @@ svm_range *svm_range_create_unregistered_range(struct amdgpu_device *adev,
pr_debug("Failed to create prange in address [0x%llx]\n", addr);
return NULL;
}
- if (kfd_process_gpuid_from_kgd(p, adev, &gpuid, &gpuidx)) {
+ if (kfd_process_gpuid_from_adev(p, adev, &gpuid, &gpuidx)) {
pr_debug("failed to get gpuid from kgd\n");
svm_range_free(prange);
return NULL;
@@ -2545,7 +2624,7 @@ svm_range_count_fault(struct amdgpu_device *adev, struct kfd_process *p,
uint32_t gpuid;
int r;
- r = kfd_process_gpuid_from_kgd(p, adev, &gpuid, &gpuidx);
+ r = kfd_process_gpuid_from_adev(p, adev, &gpuid, &gpuidx);
if (r < 0)
return;
}
@@ -2559,20 +2638,13 @@ svm_range_count_fault(struct amdgpu_device *adev, struct kfd_process *p,
}
static bool
-svm_fault_allowed(struct mm_struct *mm, uint64_t addr, bool write_fault)
+svm_fault_allowed(struct vm_area_struct *vma, bool write_fault)
{
unsigned long requested = VM_READ;
- struct vm_area_struct *vma;
if (write_fault)
requested |= VM_WRITE;
- vma = find_vma(mm, addr << PAGE_SHIFT);
- if (!vma || (addr << PAGE_SHIFT) < vma->vm_start) {
- pr_debug("address 0x%llx VMA is removed\n", addr);
- return true;
- }
-
pr_debug("requested 0x%lx, vma permission flags 0x%lx\n", requested,
vma->vm_flags);
return (vma->vm_flags & requested) == requested;
@@ -2590,6 +2662,7 @@ svm_range_restore_pages(struct amdgpu_device *adev, unsigned int pasid,
int32_t best_loc;
int32_t gpuidx = MAX_GPU_INSTANCE;
bool write_locked = false;
+ struct vm_area_struct *vma;
int r = 0;
if (!KFD_IS_SVM_API_SUPPORTED(adev->kfd.dev)) {
@@ -2600,7 +2673,7 @@ svm_range_restore_pages(struct amdgpu_device *adev, unsigned int pasid,
p = kfd_lookup_process_by_pasid(pasid);
if (!p) {
pr_debug("kfd process not founded pasid 0x%x\n", pasid);
- return -ESRCH;
+ return 0;
}
if (!p->xnack_enabled) {
pr_debug("XNACK not enabled for pasid 0x%x\n", pasid);
@@ -2611,10 +2684,19 @@ svm_range_restore_pages(struct amdgpu_device *adev, unsigned int pasid,
pr_debug("restoring svms 0x%p fault address 0x%llx\n", svms, addr);
+ if (atomic_read(&svms->drain_pagefaults)) {
+ pr_debug("draining retry fault, drop fault 0x%llx\n", addr);
+ r = 0;
+ goto out;
+ }
+
+ /* p->lead_thread is available as kfd_process_wq_release flush the work
+ * before releasing task ref.
+ */
mm = get_task_mm(p->lead_thread);
if (!mm) {
pr_debug("svms 0x%p failed to get mm\n", svms);
- r = -ESRCH;
+ r = 0;
goto out;
}
@@ -2652,6 +2734,7 @@ retry_write_locked:
if (svm_range_skip_recover(prange)) {
amdgpu_gmc_filter_faults_remove(adev, addr, pasid);
+ r = 0;
goto out_unlock_range;
}
@@ -2660,10 +2743,21 @@ retry_write_locked:
if (timestamp < AMDGPU_SVM_RANGE_RETRY_FAULT_PENDING) {
pr_debug("svms 0x%p [0x%lx %lx] already restored\n",
svms, prange->start, prange->last);
+ r = 0;
goto out_unlock_range;
}
- if (!svm_fault_allowed(mm, addr, write_fault)) {
+ /* __do_munmap removed VMA, return success as we are handling stale
+ * retry fault.
+ */
+ vma = find_vma(mm, addr << PAGE_SHIFT);
+ if (!vma || (addr << PAGE_SHIFT) < vma->vm_start) {
+ pr_debug("address 0x%llx VMA is removed\n", addr);
+ r = 0;
+ goto out_unlock_range;
+ }
+
+ if (!svm_fault_allowed(vma, write_fault)) {
pr_debug("fault addr 0x%llx no %s permission\n", addr,
write_fault ? "write" : "read");
r = -EPERM;
@@ -2741,6 +2835,14 @@ void svm_range_list_fini(struct kfd_process *p)
/* Ensure list work is finished before process is destroyed */
flush_work(&p->svms.deferred_list_work);
+ /*
+ * Ensure no retry fault comes in afterwards, as page fault handler will
+ * not find kfd process and take mm lock to recover fault.
+ */
+ atomic_inc(&p->svms.drain_pagefaults);
+ svm_range_drain_retry_fault(&p->svms);
+
+
list_for_each_entry_safe(prange, next, &p->svms.list, list) {
svm_range_unlink(prange);
svm_range_remove_notifier(prange);
@@ -2761,6 +2863,7 @@ int svm_range_list_init(struct kfd_process *p)
mutex_init(&svms->lock);
INIT_LIST_HEAD(&svms->list);
atomic_set(&svms->evicted_ranges, 0);
+ atomic_set(&svms->drain_pagefaults, 0);
INIT_DELAYED_WORK(&svms->restore_work, svm_range_restore_work);
INIT_WORK(&svms->deferred_list_work, svm_range_deferred_list_work);
INIT_LIST_HEAD(&svms->deferred_range_list);
@@ -2868,59 +2971,6 @@ svm_range_is_valid(struct kfd_process *p, uint64_t start, uint64_t size)
}
/**
- * svm_range_add - add svm range and handle overlap
- * @p: the range add to this process svms
- * @start: page size aligned
- * @size: page size aligned
- * @nattr: number of attributes
- * @attrs: array of attributes
- * @update_list: output, the ranges need validate and update GPU mapping
- * @insert_list: output, the ranges need insert to svms
- * @remove_list: output, the ranges are replaced and need remove from svms
- *
- * Check if the virtual address range has overlap with the registered ranges,
- * split the overlapped range, copy and adjust pages address and vram nodes in
- * old and new ranges.
- *
- * Context: Process context, caller must hold svms->lock
- *
- * Return:
- * 0 - OK, otherwise error code
- */
-static int
-svm_range_add(struct kfd_process *p, uint64_t start, uint64_t size,
- uint32_t nattr, struct kfd_ioctl_svm_attribute *attrs,
- struct list_head *update_list, struct list_head *insert_list,
- struct list_head *remove_list)
-{
- uint64_t last = start + size - 1UL;
- struct svm_range_list *svms;
- struct svm_range new = {0};
- struct svm_range *prange;
- unsigned long left = 0;
- int r = 0;
-
- pr_debug("svms 0x%p [0x%llx 0x%llx]\n", &p->svms, start, last);
-
- svm_range_apply_attrs(p, &new, nattr, attrs);
-
- svms = &p->svms;
-
- r = svm_range_handle_overlap(svms, &new, start, last, update_list,
- insert_list, remove_list, &left);
- if (r)
- return r;
-
- if (left) {
- prange = svm_range_new(svms, last - left + 1, last);
- list_add(&prange->insert_list, insert_list);
- list_add(&prange->update_list, update_list);
- }
-
- return 0;
-}
-
-/**
* svm_range_best_prefetch_location - decide the best prefetch location
* @prange: svm range structure
*
@@ -2953,7 +3003,6 @@ svm_range_best_prefetch_location(struct svm_range *prange)
uint32_t best_loc = prange->prefetch_loc;
struct kfd_process_device *pdd;
struct amdgpu_device *bo_adev;
- struct amdgpu_device *adev;
struct kfd_process *p;
uint32_t gpuidx;
@@ -2981,12 +3030,11 @@ svm_range_best_prefetch_location(struct svm_range *prange)
pr_debug("failed to get device by idx 0x%x\n", gpuidx);
continue;
}
- adev = (struct amdgpu_device *)pdd->dev->kgd;
- if (adev == bo_adev)
+ if (pdd->dev->adev == bo_adev)
continue;
- if (!amdgpu_xgmi_same_hive(adev, bo_adev)) {
+ if (!amdgpu_xgmi_same_hive(pdd->dev->adev, bo_adev)) {
best_loc = 0;
break;
}
@@ -3150,7 +3198,6 @@ static int
svm_range_set_attr(struct kfd_process *p, uint64_t start, uint64_t size,
uint32_t nattr, struct kfd_ioctl_svm_attribute *attrs)
{
- struct amdkfd_process_info *process_info = p->kgd_process_info;
struct mm_struct *mm = current->mm;
struct list_head update_list;
struct list_head insert_list;
@@ -3169,8 +3216,6 @@ svm_range_set_attr(struct kfd_process *p, uint64_t start, uint64_t size,
svms = &p->svms;
- mutex_lock(&process_info->lock);
-
svm_range_list_lock_and_flush_work(svms, mm);
r = svm_range_is_valid(p, start, size);
@@ -3246,8 +3291,6 @@ out_unlock_range:
mutex_unlock(&svms->lock);
mmap_read_unlock(mm);
out:
- mutex_unlock(&process_info->lock);
-
pr_debug("pasid 0x%x svms 0x%p [0x%llx 0x%llx] done, r=%d\n", p->pasid,
&p->svms, start, start + size - 1, r);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.h b/drivers/gpu/drm/amd/amdkfd/kfd_svm.h
index 6dc91c33e80f..2f8a95e86dcb 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.h
@@ -48,6 +48,7 @@ struct svm_range_bo {
struct work_struct eviction_work;
struct svm_range_list *svms;
uint32_t evicting;
+ struct work_struct release_work;
};
enum svm_work_list_ops {
@@ -195,7 +196,7 @@ void svm_range_list_lock_and_flush_work(struct svm_range_list *svms, struct mm_s
*/
#define KFD_IS_SVM_API_SUPPORTED(dev) ((dev)->pgmap.type != 0)
-void svm_range_bo_unref(struct svm_range_bo *svm_bo);
+void svm_range_bo_unref_async(struct svm_range_bo *svm_bo);
#else
struct kfd_process;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
index dd593ad0614a..948fbb39336e 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
@@ -113,7 +113,7 @@ struct kfd_dev *kfd_device_by_pci_dev(const struct pci_dev *pdev)
return device;
}
-struct kfd_dev *kfd_device_by_kgd(const struct kgd_dev *kgd)
+struct kfd_dev *kfd_device_by_adev(const struct amdgpu_device *adev)
{
struct kfd_topology_device *top_dev;
struct kfd_dev *device = NULL;
@@ -121,7 +121,7 @@ struct kfd_dev *kfd_device_by_kgd(const struct kgd_dev *kgd)
down_read(&topology_lock);
list_for_each_entry(top_dev, &topology_device_list, list)
- if (top_dev->gpu && top_dev->gpu->kgd == kgd) {
+ if (top_dev->gpu && top_dev->gpu->adev == adev) {
device = top_dev->gpu;
break;
}
@@ -503,7 +503,7 @@ static ssize_t node_show(struct kobject *kobj, struct attribute *attr,
if (dev->gpu) {
log_max_watch_addr =
- __ilog2_u32(dev->gpu->device_info->num_of_watch_points);
+ __ilog2_u32(dev->gpu->device_info.num_of_watch_points);
if (log_max_watch_addr) {
dev->node_props.capability |=
@@ -515,7 +515,7 @@ static ssize_t node_show(struct kobject *kobj, struct attribute *attr,
HSA_CAP_WATCH_POINTS_TOTALBITS_MASK);
}
- if (dev->gpu->device_info->asic_family == CHIP_TONGA)
+ if (dev->gpu->adev->asic_type == CHIP_TONGA)
dev->node_props.capability |=
HSA_CAP_AQL_QUEUE_DOUBLE_MAP;
@@ -531,7 +531,7 @@ static ssize_t node_show(struct kobject *kobj, struct attribute *attr,
sysfs_show_32bit_prop(buffer, offs, "sdma_fw_version",
dev->gpu->sdma_fw_version);
sysfs_show_64bit_prop(buffer, offs, "unique_id",
- amdgpu_amdkfd_get_unique_id(dev->gpu->kgd));
+ dev->gpu->adev->unique_id);
}
@@ -1106,7 +1106,7 @@ static uint32_t kfd_generate_gpu_id(struct kfd_dev *gpu)
if (!gpu)
return 0;
- amdgpu_amdkfd_get_local_mem_info(gpu->kgd, &local_mem_info);
+ amdgpu_amdkfd_get_local_mem_info(gpu->adev, &local_mem_info);
local_mem_size = local_mem_info.local_mem_size_private +
local_mem_info.local_mem_size_public;
@@ -1189,7 +1189,7 @@ static void kfd_fill_mem_clk_max_info(struct kfd_topology_device *dev)
* for APUs - If CRAT from ACPI reports more than one bank, then
* all the banks will report the same mem_clk_max information
*/
- amdgpu_amdkfd_get_local_mem_info(dev->gpu->kgd, &local_mem_info);
+ amdgpu_amdkfd_get_local_mem_info(dev->gpu->adev, &local_mem_info);
list_for_each_entry(mem, &dev->mem_props, list)
mem->mem_clk_max = local_mem_info.mem_clk_max;
@@ -1217,8 +1217,7 @@ static void kfd_set_iolink_no_atomics(struct kfd_topology_device *dev,
/* set gpu (dev) flags. */
} else {
if (!dev->gpu->pci_atomic_requested ||
- dev->gpu->device_info->asic_family ==
- CHIP_HAWAII)
+ dev->gpu->adev->asic_type == CHIP_HAWAII)
link->flags |= CRAT_IOLINK_FLAGS_NO_ATOMICS_32_BIT |
CRAT_IOLINK_FLAGS_NO_ATOMICS_64_BIT;
}
@@ -1239,7 +1238,7 @@ static void kfd_set_iolink_non_coherent(struct kfd_topology_device *to_dev,
*/
if (inbound_link->iolink_type == CRAT_IOLINK_TYPE_PCIEXPRESS ||
(inbound_link->iolink_type == CRAT_IOLINK_TYPE_XGMI &&
- to_dev->gpu->device_info->asic_family == CHIP_VEGA20)) {
+ KFD_GC_VERSION(to_dev->gpu) == IP_VERSION(9, 4, 0))) {
outbound_link->flags |= CRAT_IOLINK_FLAGS_NON_COHERENT;
inbound_link->flags |= CRAT_IOLINK_FLAGS_NON_COHERENT;
}
@@ -1286,7 +1285,8 @@ int kfd_topology_add_device(struct kfd_dev *gpu)
void *crat_image = NULL;
size_t image_size = 0;
int proximity_domain;
- struct amdgpu_device *adev;
+ int i;
+ const char *asic_name = amdgpu_asic_name[gpu->adev->asic_type];
INIT_LIST_HEAD(&temp_topology_device_list);
@@ -1296,10 +1296,8 @@ int kfd_topology_add_device(struct kfd_dev *gpu)
proximity_domain = atomic_inc_return(&topology_crat_proximity_domain);
- adev = (struct amdgpu_device *)(gpu->kgd);
-
/* Include the CPU in xGMI hive if xGMI connected by assigning it the hive ID. */
- if (gpu->hive_id && adev->gmc.xgmi.connected_to_cpu) {
+ if (gpu->hive_id && gpu->adev->gmc.xgmi.connected_to_cpu) {
struct kfd_topology_device *top_dev;
down_read(&topology_lock);
@@ -1372,45 +1370,48 @@ int kfd_topology_add_device(struct kfd_dev *gpu)
* needed for the topology
*/
- amdgpu_amdkfd_get_cu_info(dev->gpu->kgd, &cu_info);
+ amdgpu_amdkfd_get_cu_info(dev->gpu->adev, &cu_info);
- strncpy(dev->node_props.name, gpu->device_info->asic_name,
- KFD_TOPOLOGY_PUBLIC_NAME_SIZE);
+ for (i = 0; i < KFD_TOPOLOGY_PUBLIC_NAME_SIZE-1; i++) {
+ dev->node_props.name[i] = __tolower(asic_name[i]);
+ if (asic_name[i] == '\0')
+ break;
+ }
+ dev->node_props.name[i] = '\0';
dev->node_props.simd_arrays_per_engine =
cu_info.num_shader_arrays_per_engine;
- dev->node_props.gfx_target_version = gpu->device_info->gfx_target_version;
+ dev->node_props.gfx_target_version = gpu->device_info.gfx_target_version;
dev->node_props.vendor_id = gpu->pdev->vendor;
dev->node_props.device_id = gpu->pdev->device;
dev->node_props.capability |=
- ((amdgpu_amdkfd_get_asic_rev_id(dev->gpu->kgd) <<
- HSA_CAP_ASIC_REVISION_SHIFT) &
+ ((dev->gpu->adev->rev_id << HSA_CAP_ASIC_REVISION_SHIFT) &
HSA_CAP_ASIC_REVISION_MASK);
dev->node_props.location_id = pci_dev_id(gpu->pdev);
dev->node_props.domain = pci_domain_nr(gpu->pdev->bus);
dev->node_props.max_engine_clk_fcompute =
- amdgpu_amdkfd_get_max_engine_clock_in_mhz(dev->gpu->kgd);
+ amdgpu_amdkfd_get_max_engine_clock_in_mhz(dev->gpu->adev);
dev->node_props.max_engine_clk_ccompute =
cpufreq_quick_get_max(0) / 1000;
dev->node_props.drm_render_minor =
gpu->shared_resources.drm_render_minor;
dev->node_props.hive_id = gpu->hive_id;
- dev->node_props.num_sdma_engines = gpu->device_info->num_sdma_engines;
+ dev->node_props.num_sdma_engines = kfd_get_num_sdma_engines(gpu);
dev->node_props.num_sdma_xgmi_engines =
- gpu->device_info->num_xgmi_sdma_engines;
+ kfd_get_num_xgmi_sdma_engines(gpu);
dev->node_props.num_sdma_queues_per_engine =
- gpu->device_info->num_sdma_queues_per_engine;
+ gpu->device_info.num_sdma_queues_per_engine;
dev->node_props.num_gws = (dev->gpu->gws &&
dev->gpu->dqm->sched_policy != KFD_SCHED_POLICY_NO_HWS) ?
- amdgpu_amdkfd_get_num_gws(dev->gpu->kgd) : 0;
+ dev->gpu->adev->gds.gws_size : 0;
dev->node_props.num_cp_queues = get_cp_queues_num(dev->gpu->dqm);
kfd_fill_mem_clk_max_info(dev);
kfd_fill_iolink_non_crat_info(dev);
- switch (dev->gpu->device_info->asic_family) {
+ switch (dev->gpu->adev->asic_type) {
case CHIP_KAVERI:
case CHIP_HAWAII:
case CHIP_TONGA:
@@ -1429,30 +1430,14 @@ int kfd_topology_add_device(struct kfd_dev *gpu)
HSA_CAP_DOORBELL_TYPE_TOTALBITS_SHIFT) &
HSA_CAP_DOORBELL_TYPE_TOTALBITS_MASK);
break;
- case CHIP_VEGA10:
- case CHIP_VEGA12:
- case CHIP_VEGA20:
- case CHIP_RAVEN:
- case CHIP_RENOIR:
- case CHIP_ARCTURUS:
- case CHIP_ALDEBARAN:
- case CHIP_NAVI10:
- case CHIP_NAVI12:
- case CHIP_NAVI14:
- case CHIP_SIENNA_CICHLID:
- case CHIP_NAVY_FLOUNDER:
- case CHIP_VANGOGH:
- case CHIP_DIMGREY_CAVEFISH:
- case CHIP_BEIGE_GOBY:
- case CHIP_YELLOW_CARP:
- case CHIP_CYAN_SKILLFISH:
- dev->node_props.capability |= ((HSA_CAP_DOORBELL_TYPE_2_0 <<
- HSA_CAP_DOORBELL_TYPE_TOTALBITS_SHIFT) &
- HSA_CAP_DOORBELL_TYPE_TOTALBITS_MASK);
- break;
default:
- WARN(1, "Unexpected ASIC family %u",
- dev->gpu->device_info->asic_family);
+ if (KFD_GC_VERSION(dev->gpu) >= IP_VERSION(9, 0, 1))
+ dev->node_props.capability |= ((HSA_CAP_DOORBELL_TYPE_2_0 <<
+ HSA_CAP_DOORBELL_TYPE_TOTALBITS_SHIFT) &
+ HSA_CAP_DOORBELL_TYPE_TOTALBITS_MASK);
+ else
+ WARN(1, "Unexpected ASIC family %u",
+ dev->gpu->adev->asic_type);
}
/*
@@ -1469,7 +1454,7 @@ int kfd_topology_add_device(struct kfd_dev *gpu)
* because it doesn't consider masked out CUs
* max_waves_per_simd: Carrizo reports wrong max_waves_per_simd
*/
- if (dev->gpu->device_info->asic_family == CHIP_CARRIZO) {
+ if (dev->gpu->adev->asic_type == CHIP_CARRIZO) {
dev->node_props.simd_count =
cu_info.simd_per_cu * cu_info.cu_active_number;
dev->node_props.max_waves_per_simd = 10;
@@ -1477,16 +1462,17 @@ int kfd_topology_add_device(struct kfd_dev *gpu)
/* kfd only concerns sram ecc on GFX and HBM ecc on UMC */
dev->node_props.capability |=
- ((adev->ras_enabled & BIT(AMDGPU_RAS_BLOCK__GFX)) != 0) ?
+ ((dev->gpu->adev->ras_enabled & BIT(AMDGPU_RAS_BLOCK__GFX)) != 0) ?
HSA_CAP_SRAM_EDCSUPPORTED : 0;
- dev->node_props.capability |= ((adev->ras_enabled & BIT(AMDGPU_RAS_BLOCK__UMC)) != 0) ?
+ dev->node_props.capability |=
+ ((dev->gpu->adev->ras_enabled & BIT(AMDGPU_RAS_BLOCK__UMC)) != 0) ?
HSA_CAP_MEM_EDCSUPPORTED : 0;
- if (adev->asic_type != CHIP_VEGA10)
- dev->node_props.capability |= (adev->ras_enabled != 0) ?
+ if (KFD_GC_VERSION(dev->gpu) != IP_VERSION(9, 0, 1))
+ dev->node_props.capability |= (dev->gpu->adev->ras_enabled != 0) ?
HSA_CAP_RASEVENTNOTIFY : 0;
- if (KFD_IS_SVM_API_SUPPORTED(adev->kfd.dev))
+ if (KFD_IS_SVM_API_SUPPORTED(dev->gpu->adev->kfd.dev))
dev->node_props.capability |= HSA_CAP_SVMAPI_SUPPORTED;
kfd_debug_print_topology();
@@ -1592,7 +1578,7 @@ void kfd_double_confirm_iommu_support(struct kfd_dev *gpu)
gpu->use_iommu_v2 = false;
- if (!gpu->device_info->needs_iommu_device)
+ if (!gpu->device_info.needs_iommu_device)
return;
down_read(&topology_lock);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.h b/drivers/gpu/drm/amd/amdkfd/kfd_topology.h
index a8db017c9b8e..f0cc59d2fd5d 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.h
@@ -25,38 +25,11 @@
#include <linux/types.h>
#include <linux/list.h>
+#include <linux/kfd_sysfs.h>
#include "kfd_crat.h"
#define KFD_TOPOLOGY_PUBLIC_NAME_SIZE 32
-#define HSA_CAP_HOT_PLUGGABLE 0x00000001
-#define HSA_CAP_ATS_PRESENT 0x00000002
-#define HSA_CAP_SHARED_WITH_GRAPHICS 0x00000004
-#define HSA_CAP_QUEUE_SIZE_POW2 0x00000008
-#define HSA_CAP_QUEUE_SIZE_32BIT 0x00000010
-#define HSA_CAP_QUEUE_IDLE_EVENT 0x00000020
-#define HSA_CAP_VA_LIMIT 0x00000040
-#define HSA_CAP_WATCH_POINTS_SUPPORTED 0x00000080
-#define HSA_CAP_WATCH_POINTS_TOTALBITS_MASK 0x00000f00
-#define HSA_CAP_WATCH_POINTS_TOTALBITS_SHIFT 8
-#define HSA_CAP_DOORBELL_TYPE_TOTALBITS_MASK 0x00003000
-#define HSA_CAP_DOORBELL_TYPE_TOTALBITS_SHIFT 12
-
-#define HSA_CAP_DOORBELL_TYPE_PRE_1_0 0x0
-#define HSA_CAP_DOORBELL_TYPE_1_0 0x1
-#define HSA_CAP_DOORBELL_TYPE_2_0 0x2
-#define HSA_CAP_AQL_QUEUE_DOUBLE_MAP 0x00004000
-
-#define HSA_CAP_RESERVED_WAS_SRAM_EDCSUPPORTED 0x00080000 /* Old buggy user mode depends on this being 0 */
-#define HSA_CAP_MEM_EDCSUPPORTED 0x00100000
-#define HSA_CAP_RASEVENTNOTIFY 0x00200000
-#define HSA_CAP_ASIC_REVISION_MASK 0x03c00000
-#define HSA_CAP_ASIC_REVISION_SHIFT 22
-#define HSA_CAP_SRAM_EDCSUPPORTED 0x04000000
-#define HSA_CAP_SVMAPI_SUPPORTED 0x08000000
-#define HSA_CAP_FLAGS_COHERENTHOSTACCESS 0x10000000
-#define HSA_CAP_RESERVED 0xe00f8000
-
struct kfd_node_properties {
uint64_t hive_id;
uint32_t cpu_cores_count;
@@ -93,17 +66,6 @@ struct kfd_node_properties {
char name[KFD_TOPOLOGY_PUBLIC_NAME_SIZE];
};
-#define HSA_MEM_HEAP_TYPE_SYSTEM 0
-#define HSA_MEM_HEAP_TYPE_FB_PUBLIC 1
-#define HSA_MEM_HEAP_TYPE_FB_PRIVATE 2
-#define HSA_MEM_HEAP_TYPE_GPU_GDS 3
-#define HSA_MEM_HEAP_TYPE_GPU_LDS 4
-#define HSA_MEM_HEAP_TYPE_GPU_SCRATCH 5
-
-#define HSA_MEM_FLAGS_HOT_PLUGGABLE 0x00000001
-#define HSA_MEM_FLAGS_NON_VOLATILE 0x00000002
-#define HSA_MEM_FLAGS_RESERVED 0xfffffffc
-
struct kfd_mem_properties {
struct list_head list;
uint32_t heap_type;
@@ -116,12 +78,6 @@ struct kfd_mem_properties {
struct attribute attr;
};
-#define HSA_CACHE_TYPE_DATA 0x00000001
-#define HSA_CACHE_TYPE_INSTRUCTION 0x00000002
-#define HSA_CACHE_TYPE_CPU 0x00000004
-#define HSA_CACHE_TYPE_HSACU 0x00000008
-#define HSA_CACHE_TYPE_RESERVED 0xfffffff0
-
struct kfd_cache_properties {
struct list_head list;
uint32_t processor_id_low;
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index 4595c59f2bf0..2f0b14f8f833 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -51,6 +51,7 @@
#include <drm/drm_hdcp.h>
#endif
#include "amdgpu_pm.h"
+#include "amdgpu_atombios.h"
#include "amd_shared.h"
#include "amdgpu_dm_irq.h"
@@ -623,7 +624,7 @@ static void dm_dcn_vertical_interrupt0_high_irq(void *interrupt_params)
#endif /* CONFIG_DRM_AMD_SECURE_DISPLAY */
/**
- * dmub_aux_setconfig_reply_callback - Callback for AUX or SET_CONFIG command.
+ * dmub_aux_setconfig_callback - Callback for AUX or SET_CONFIG command.
* @adev: amdgpu_device pointer
* @notify: dmub notification structure
*
@@ -631,7 +632,8 @@ static void dm_dcn_vertical_interrupt0_high_irq(void *interrupt_params)
* Copies dmub notification to DM which is to be read by AUX command.
* issuing thread and also signals the event to wake up the thread.
*/
-void dmub_aux_setconfig_callback(struct amdgpu_device *adev, struct dmub_notification *notify)
+static void dmub_aux_setconfig_callback(struct amdgpu_device *adev,
+ struct dmub_notification *notify)
{
if (adev->dm.dmub_notify)
memcpy(adev->dm.dmub_notify, notify, sizeof(struct dmub_notification));
@@ -647,7 +649,8 @@ void dmub_aux_setconfig_callback(struct amdgpu_device *adev, struct dmub_notific
* Dmub Hpd interrupt processing callback. Gets displayindex through the
* ink index and calls helper to do the processing.
*/
-void dmub_hpd_callback(struct amdgpu_device *adev, struct dmub_notification *notify)
+static void dmub_hpd_callback(struct amdgpu_device *adev,
+ struct dmub_notification *notify)
{
struct amdgpu_dm_connector *aconnector;
struct amdgpu_dm_connector *hpd_aconnector = NULL;
@@ -704,8 +707,10 @@ void dmub_hpd_callback(struct amdgpu_device *adev, struct dmub_notification *not
* to dmub interrupt handling thread
* Return: true if successfully registered, false if there is existing registration
*/
-bool register_dmub_notify_callback(struct amdgpu_device *adev, enum dmub_notification_type type,
-dmub_notify_interrupt_callback_t callback, bool dmub_int_thread_offload)
+static bool register_dmub_notify_callback(struct amdgpu_device *adev,
+ enum dmub_notification_type type,
+ dmub_notify_interrupt_callback_t callback,
+ bool dmub_int_thread_offload)
{
if (callback != NULL && type < ARRAY_SIZE(adev->dm.dmub_thread_offload)) {
adev->dm.dmub_callback[type] = callback;
@@ -789,8 +794,7 @@ static void dm_dmub_outbox1_low_irq(void *interrupt_params)
plink = adev->dm.dc->links[notify.link_index];
if (plink) {
plink->hpd_status =
- notify.hpd_status ==
- DP_HPD_PLUG ? true : false;
+ notify.hpd_status == DP_HPD_PLUG;
}
}
queue_work(adev->dm.delayed_hpd_wq, &dmub_hpd_wrk->handle_hpd_work);
@@ -1050,6 +1054,11 @@ static int dm_dmub_hw_init(struct amdgpu_device *adev)
return 0;
}
+ /* Reset DMCUB if it was previously running - before we overwrite its memory. */
+ status = dmub_srv_hw_reset(dmub_srv);
+ if (status != DMUB_STATUS_OK)
+ DRM_WARN("Error resetting DMUB HW: %d\n", status);
+
hdr = (const struct dmcub_firmware_header_v1_0 *)dmub_fw->data;
fw_inst_const = dmub_fw->data +
@@ -1453,8 +1462,21 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
if (amdgpu_dc_feature_mask & DC_EDP_NO_POWER_SEQUENCING)
init_data.flags.edp_no_power_sequencing = true;
+#ifdef CONFIG_DRM_AMD_DC_DCN
+ if (amdgpu_dc_feature_mask & DC_DISABLE_LTTPR_DP1_4A)
+ init_data.flags.allow_lttpr_non_transparent_mode.bits.DP1_4A = true;
+ if (amdgpu_dc_feature_mask & DC_DISABLE_LTTPR_DP2_0)
+ init_data.flags.allow_lttpr_non_transparent_mode.bits.DP2_0 = true;
+#endif
+
init_data.flags.power_down_display_on_boot = true;
+ if (check_seamless_boot_capability(adev)) {
+ init_data.flags.power_down_display_on_boot = false;
+ init_data.flags.allow_seamless_boot_optimization = true;
+ DRM_INFO("Seamless boot condition check passed\n");
+ }
+
INIT_LIST_HEAD(&adev->dm.da_list);
/* Display Core create. */
adev->dm.dc = dc_create(&init_data);
@@ -1479,8 +1501,10 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
if (amdgpu_dc_debug_mask & DC_DISABLE_STUTTER)
adev->dm.dc->debug.disable_stutter = true;
- if (amdgpu_dc_debug_mask & DC_DISABLE_DSC)
+ if (amdgpu_dc_debug_mask & DC_DISABLE_DSC) {
adev->dm.dc->debug.disable_dsc = true;
+ adev->dm.dc->debug.disable_dsc_edp = true;
+ }
if (amdgpu_dc_debug_mask & DC_DISABLE_CLOCK_GATING)
adev->dm.dc->debug.disable_clock_gate = true;
@@ -2303,14 +2327,6 @@ static enum dc_status amdgpu_dm_commit_zero_streams(struct dc *dc)
goto fail;
}
-
- res = dc_validate_global_state(dc, context, false);
-
- if (res != DC_OK) {
- DRM_ERROR("%s:resource validation failed, dc_status:%d\n", __func__, res);
- goto fail;
- }
-
res = dc_commit_state(dc, context);
fail:
@@ -2561,6 +2577,23 @@ static int dm_resume(void *handle)
if (amdgpu_in_reset(adev)) {
dc_state = dm->cached_dc_state;
+ /*
+ * The dc->current_state is backed up into dm->cached_dc_state
+ * before we commit 0 streams.
+ *
+ * DC will clear link encoder assignments on the real state
+ * but the changes won't propagate over to the copy we made
+ * before the 0 streams commit.
+ *
+ * DC expects that link encoder assignments are *not* valid
+ * when committing a state, so as a workaround it needs to be
+ * cleared here.
+ */
+ link_enc_cfg_init(dm->dc, dc_state);
+
+ if (dc_enable_dmub_notifications(adev->dm.dc))
+ amdgpu_dm_outbox_init(adev);
+
r = dm_dmub_hw_init(adev);
if (r)
DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
@@ -2572,20 +2605,11 @@ static int dm_resume(void *handle)
for (i = 0; i < dc_state->stream_count; i++) {
dc_state->streams[i]->mode_changed = true;
- for (j = 0; j < dc_state->stream_status->plane_count; j++) {
- dc_state->stream_status->plane_states[j]->update_flags.raw
+ for (j = 0; j < dc_state->stream_status[i].plane_count; j++) {
+ dc_state->stream_status[i].plane_states[j]->update_flags.raw
= 0xffffffff;
}
}
-#if defined(CONFIG_DRM_AMD_DC_DCN)
- /*
- * Resource allocation happens for link encoders for newer ASIC in
- * dc_validate_global_state, so we need to revalidate it.
- *
- * This shouldn't fail (it passed once before), so warn if it does.
- */
- WARN_ON(dc_validate_global_state(dm->dc, dc_state, false) != DC_OK);
-#endif
WARN_ON(!dc_commit_state(dm->dc, dc_state));
@@ -2608,6 +2632,10 @@ static int dm_resume(void *handle)
/* TODO: Remove dc_state->dccg, use dc->dccg directly. */
dc_resource_state_construct(dm->dc, dm_state->context);
+ /* Re-enable outbox interrupts for DPIA. */
+ if (dc_enable_dmub_notifications(adev->dm.dc))
+ amdgpu_dm_outbox_init(adev);
+
/* Before powering on DC we need to re-initialize DMUB. */
r = dm_dmub_hw_init(adev);
if (r)
@@ -2938,13 +2966,12 @@ void amdgpu_dm_update_connector_after_detect(
aconnector->edid =
(struct edid *)sink->dc_edid.raw_edid;
- drm_connector_update_edid_property(connector,
- aconnector->edid);
if (aconnector->dc_link->aux_mode)
drm_dp_cec_set_edid(&aconnector->dm_dp_aux.aux,
aconnector->edid);
}
+ drm_connector_update_edid_property(connector, aconnector->edid);
amdgpu_dm_update_freesync_caps(connector, aconnector->edid);
update_connector_ext_caps(aconnector);
} else {
@@ -3909,6 +3936,9 @@ static int amdgpu_dm_backlight_set_level(struct amdgpu_display_manager *dm,
caps = dm->backlight_caps[bl_idx];
dm->brightness[bl_idx] = user_brightness;
+ /* update scratch register */
+ if (bl_idx == 0)
+ amdgpu_atombios_scratch_regs_set_backlight_level(dm->adev, dm->brightness[bl_idx]);
brightness = convert_brightness_from_user(&caps, dm->brightness[bl_idx]);
link = (struct dc_link *)dm->backlight_link[bl_idx];
@@ -4251,6 +4281,14 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
}
+ /*
+ * Disable vblank IRQs aggressively for power-saving.
+ *
+ * TODO: Fix vblank control helpers to delay PSR entry to allow this when PSR
+ * is also supported.
+ */
+ adev_to_drm(adev)->vblank_disable_immediate = !psr_feature_enabled;
+
/* Software is initialized. Now we can register interrupt handlers. */
switch (adev->asic_type) {
#if defined(CONFIG_DRM_AMD_DC_SI)
@@ -6036,11 +6074,72 @@ static void update_dsc_caps(struct amdgpu_dm_connector *aconnector,
{
stream->timing.flags.DSC = 0;
- if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) {
- dc_dsc_parse_dsc_dpcd(aconnector->dc_link->ctx->dc,
- aconnector->dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.raw,
- aconnector->dc_link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.raw,
- dsc_caps);
+ if (aconnector->dc_link && (sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT ||
+ sink->sink_signal == SIGNAL_TYPE_EDP)) {
+ if (sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_NONE ||
+ sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_DP_HDMI_CONVERTER)
+ dc_dsc_parse_dsc_dpcd(aconnector->dc_link->ctx->dc,
+ aconnector->dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.raw,
+ aconnector->dc_link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.raw,
+ dsc_caps);
+ }
+}
+
+static void apply_dsc_policy_for_edp(struct amdgpu_dm_connector *aconnector,
+ struct dc_sink *sink, struct dc_stream_state *stream,
+ struct dsc_dec_dpcd_caps *dsc_caps,
+ uint32_t max_dsc_target_bpp_limit_override)
+{
+ const struct dc_link_settings *verified_link_cap = NULL;
+ uint32_t link_bw_in_kbps;
+ uint32_t edp_min_bpp_x16, edp_max_bpp_x16;
+ struct dc *dc = sink->ctx->dc;
+ struct dc_dsc_bw_range bw_range = {0};
+ struct dc_dsc_config dsc_cfg = {0};
+
+ verified_link_cap = dc_link_get_link_cap(stream->link);
+ link_bw_in_kbps = dc_link_bandwidth_kbps(stream->link, verified_link_cap);
+ edp_min_bpp_x16 = 8 * 16;
+ edp_max_bpp_x16 = 8 * 16;
+
+ if (edp_max_bpp_x16 > dsc_caps->edp_max_bits_per_pixel)
+ edp_max_bpp_x16 = dsc_caps->edp_max_bits_per_pixel;
+
+ if (edp_max_bpp_x16 < edp_min_bpp_x16)
+ edp_min_bpp_x16 = edp_max_bpp_x16;
+
+ if (dc_dsc_compute_bandwidth_range(dc->res_pool->dscs[0],
+ dc->debug.dsc_min_slice_height_override,
+ edp_min_bpp_x16, edp_max_bpp_x16,
+ dsc_caps,
+ &stream->timing,
+ &bw_range)) {
+
+ if (bw_range.max_kbps < link_bw_in_kbps) {
+ if (dc_dsc_compute_config(dc->res_pool->dscs[0],
+ dsc_caps,
+ dc->debug.dsc_min_slice_height_override,
+ max_dsc_target_bpp_limit_override,
+ 0,
+ &stream->timing,
+ &dsc_cfg)) {
+ stream->timing.dsc_cfg = dsc_cfg;
+ stream->timing.flags.DSC = 1;
+ stream->timing.dsc_cfg.bits_per_pixel = edp_max_bpp_x16;
+ }
+ return;
+ }
+ }
+
+ if (dc_dsc_compute_config(dc->res_pool->dscs[0],
+ dsc_caps,
+ dc->debug.dsc_min_slice_height_override,
+ max_dsc_target_bpp_limit_override,
+ link_bw_in_kbps,
+ &stream->timing,
+ &dsc_cfg)) {
+ stream->timing.dsc_cfg = dsc_cfg;
+ stream->timing.flags.DSC = 1;
}
}
@@ -6051,6 +6150,9 @@ static void apply_dsc_policy_for_stream(struct amdgpu_dm_connector *aconnector,
struct drm_connector *drm_connector = &aconnector->base;
uint32_t link_bandwidth_kbps;
uint32_t max_dsc_target_bpp_limit_override = 0;
+ struct dc *dc = sink->ctx->dc;
+ uint32_t max_supported_bw_in_kbps, timing_bw_in_kbps;
+ uint32_t dsc_max_supported_bw_in_kbps;
link_bandwidth_kbps = dc_link_bandwidth_kbps(aconnector->dc_link,
dc_link_get_link_cap(aconnector->dc_link));
@@ -6063,17 +6165,43 @@ static void apply_dsc_policy_for_stream(struct amdgpu_dm_connector *aconnector,
dc_dsc_policy_set_enable_dsc_when_not_needed(
aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE);
- if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) {
+ if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_EDP && !dc->debug.disable_dsc_edp &&
+ dc->caps.edp_dsc_support && aconnector->dsc_settings.dsc_force_enable != DSC_CLK_FORCE_DISABLE) {
- if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0],
+ apply_dsc_policy_for_edp(aconnector, sink, stream, dsc_caps, max_dsc_target_bpp_limit_override);
+
+ } else if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) {
+ if (sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_NONE) {
+ if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0],
dsc_caps,
aconnector->dc_link->ctx->dc->debug.dsc_min_slice_height_override,
max_dsc_target_bpp_limit_override,
link_bandwidth_kbps,
&stream->timing,
&stream->timing.dsc_cfg)) {
- stream->timing.flags.DSC = 1;
- DRM_DEBUG_DRIVER("%s: [%s] DSC is selected from SST RX\n", __func__, drm_connector->name);
+ stream->timing.flags.DSC = 1;
+ DRM_DEBUG_DRIVER("%s: [%s] DSC is selected from SST RX\n",
+ __func__, drm_connector->name);
+ }
+ } else if (sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_DP_HDMI_CONVERTER) {
+ timing_bw_in_kbps = dc_bandwidth_in_kbps_from_timing(&stream->timing);
+ max_supported_bw_in_kbps = link_bandwidth_kbps;
+ dsc_max_supported_bw_in_kbps = link_bandwidth_kbps;
+
+ if (timing_bw_in_kbps > max_supported_bw_in_kbps &&
+ max_supported_bw_in_kbps > 0 &&
+ dsc_max_supported_bw_in_kbps > 0)
+ if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0],
+ dsc_caps,
+ aconnector->dc_link->ctx->dc->debug.dsc_min_slice_height_override,
+ max_dsc_target_bpp_limit_override,
+ dsc_max_supported_bw_in_kbps,
+ &stream->timing,
+ &stream->timing.dsc_cfg)) {
+ stream->timing.flags.DSC = 1;
+ DRM_DEBUG_DRIVER("%s: [%s] DSC is selected from DP-HDMI PCON\n",
+ __func__, drm_connector->name);
+ }
}
}
@@ -8217,15 +8345,8 @@ void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
break;
case DRM_MODE_CONNECTOR_DisplayPort:
aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
- if (link->is_dig_mapping_flexible &&
- link->dc->res_pool->funcs->link_encs_assign) {
- link->link_enc =
- link_enc_cfg_get_link_enc_used_by_link(link->ctx->dc, link);
- if (!link->link_enc)
- link->link_enc =
- link_enc_cfg_get_next_avail_link_enc(link->ctx->dc);
- }
-
+ link->link_enc = dp_get_link_enc(link);
+ ASSERT(link->link_enc);
if (link->link_enc)
aconnector->base.ycbcr_420_allowed =
link->link_enc->features.dp_ycbcr420_supported ? true : false;
@@ -10628,6 +10749,24 @@ static int dm_update_plane_state(struct dc *dc,
return ret;
}
+static void dm_get_oriented_plane_size(struct drm_plane_state *plane_state,
+ int *src_w, int *src_h)
+{
+ switch (plane_state->rotation & DRM_MODE_ROTATE_MASK) {
+ case DRM_MODE_ROTATE_90:
+ case DRM_MODE_ROTATE_270:
+ *src_w = plane_state->src_h >> 16;
+ *src_h = plane_state->src_w >> 16;
+ break;
+ case DRM_MODE_ROTATE_0:
+ case DRM_MODE_ROTATE_180:
+ default:
+ *src_w = plane_state->src_w >> 16;
+ *src_h = plane_state->src_h >> 16;
+ break;
+ }
+}
+
static int dm_check_crtc_cursor(struct drm_atomic_state *state,
struct drm_crtc *crtc,
struct drm_crtc_state *new_crtc_state)
@@ -10636,6 +10775,8 @@ static int dm_check_crtc_cursor(struct drm_atomic_state *state,
struct drm_plane_state *new_cursor_state, *new_underlying_state;
int i;
int cursor_scale_w, cursor_scale_h, underlying_scale_w, underlying_scale_h;
+ int cursor_src_w, cursor_src_h;
+ int underlying_src_w, underlying_src_h;
/* On DCE and DCN there is no dedicated hardware cursor plane. We get a
* cursor per pipe but it's going to inherit the scaling and
@@ -10647,10 +10788,9 @@ static int dm_check_crtc_cursor(struct drm_atomic_state *state,
return 0;
}
- cursor_scale_w = new_cursor_state->crtc_w * 1000 /
- (new_cursor_state->src_w >> 16);
- cursor_scale_h = new_cursor_state->crtc_h * 1000 /
- (new_cursor_state->src_h >> 16);
+ dm_get_oriented_plane_size(new_cursor_state, &cursor_src_w, &cursor_src_h);
+ cursor_scale_w = new_cursor_state->crtc_w * 1000 / cursor_src_w;
+ cursor_scale_h = new_cursor_state->crtc_h * 1000 / cursor_src_h;
for_each_new_plane_in_state_reverse(state, underlying, new_underlying_state, i) {
/* Narrow down to non-cursor planes on the same CRTC as the cursor */
@@ -10661,10 +10801,10 @@ static int dm_check_crtc_cursor(struct drm_atomic_state *state,
if (!new_underlying_state->fb)
continue;
- underlying_scale_w = new_underlying_state->crtc_w * 1000 /
- (new_underlying_state->src_w >> 16);
- underlying_scale_h = new_underlying_state->crtc_h * 1000 /
- (new_underlying_state->src_h >> 16);
+ dm_get_oriented_plane_size(new_underlying_state,
+ &underlying_src_w, &underlying_src_h);
+ underlying_scale_w = new_underlying_state->crtc_w * 1000 / underlying_src_w;
+ underlying_scale_h = new_underlying_state->crtc_h * 1000 / underlying_src_h;
if (cursor_scale_w != underlying_scale_w ||
cursor_scale_h != underlying_scale_h) {
@@ -10759,8 +10899,10 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
trace_amdgpu_dm_atomic_check_begin(state);
ret = drm_atomic_helper_check_modeset(dev, state);
- if (ret)
+ if (ret) {
+ DRM_DEBUG_DRIVER("drm_atomic_helper_check_modeset() failed\n");
goto fail;
+ }
/* Check connector changes */
for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
@@ -10776,6 +10918,7 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
new_crtc_state = drm_atomic_get_crtc_state(state, new_con_state->crtc);
if (IS_ERR(new_crtc_state)) {
+ DRM_DEBUG_DRIVER("drm_atomic_get_crtc_state() failed\n");
ret = PTR_ERR(new_crtc_state);
goto fail;
}
@@ -10790,8 +10933,10 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
if (drm_atomic_crtc_needs_modeset(new_crtc_state)) {
ret = add_affected_mst_dsc_crtcs(state, crtc);
- if (ret)
+ if (ret) {
+ DRM_DEBUG_DRIVER("add_affected_mst_dsc_crtcs() failed\n");
goto fail;
+ }
}
}
}
@@ -10806,19 +10951,25 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
continue;
ret = amdgpu_dm_verify_lut_sizes(new_crtc_state);
- if (ret)
+ if (ret) {
+ DRM_DEBUG_DRIVER("amdgpu_dm_verify_lut_sizes() failed\n");
goto fail;
+ }
if (!new_crtc_state->enable)
continue;
ret = drm_atomic_add_affected_connectors(state, crtc);
- if (ret)
+ if (ret) {
+ DRM_DEBUG_DRIVER("drm_atomic_add_affected_connectors() failed\n");
goto fail;
+ }
ret = drm_atomic_add_affected_planes(state, crtc);
- if (ret)
+ if (ret) {
+ DRM_DEBUG_DRIVER("drm_atomic_add_affected_planes() failed\n");
goto fail;
+ }
if (dm_old_crtc_state->dsc_force_changed)
new_crtc_state->mode_changed = true;
@@ -10855,6 +11006,7 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
if (IS_ERR(new_plane_state)) {
ret = PTR_ERR(new_plane_state);
+ DRM_DEBUG_DRIVER("new_plane_state is BAD\n");
goto fail;
}
}
@@ -10867,8 +11019,10 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
new_plane_state,
false,
&lock_and_validation_needed);
- if (ret)
+ if (ret) {
+ DRM_DEBUG_DRIVER("dm_update_plane_state() failed\n");
goto fail;
+ }
}
/* Disable all crtcs which require disable */
@@ -10878,8 +11032,10 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
new_crtc_state,
false,
&lock_and_validation_needed);
- if (ret)
+ if (ret) {
+ DRM_DEBUG_DRIVER("DISABLE: dm_update_crtc_state() failed\n");
goto fail;
+ }
}
/* Enable all crtcs which require enable */
@@ -10889,8 +11045,10 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
new_crtc_state,
true,
&lock_and_validation_needed);
- if (ret)
+ if (ret) {
+ DRM_DEBUG_DRIVER("ENABLE: dm_update_crtc_state() failed\n");
goto fail;
+ }
}
/* Add new/modified planes */
@@ -10900,20 +11058,26 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
new_plane_state,
true,
&lock_and_validation_needed);
- if (ret)
+ if (ret) {
+ DRM_DEBUG_DRIVER("dm_update_plane_state() failed\n");
goto fail;
+ }
}
/* Run this here since we want to validate the streams we created */
ret = drm_atomic_helper_check_planes(dev, state);
- if (ret)
+ if (ret) {
+ DRM_DEBUG_DRIVER("drm_atomic_helper_check_planes() failed\n");
goto fail;
+ }
/* Check cursor planes scaling */
for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
ret = dm_check_crtc_cursor(state, crtc, new_crtc_state);
- if (ret)
+ if (ret) {
+ DRM_DEBUG_DRIVER("dm_check_crtc_cursor() failed\n");
goto fail;
+ }
}
if (state->legacy_cursor_update) {
@@ -11000,20 +11164,28 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
*/
if (lock_and_validation_needed) {
ret = dm_atomic_get_state(state, &dm_state);
- if (ret)
+ if (ret) {
+ DRM_DEBUG_DRIVER("dm_atomic_get_state() failed\n");
goto fail;
+ }
ret = do_aquire_global_lock(dev, state);
- if (ret)
+ if (ret) {
+ DRM_DEBUG_DRIVER("do_aquire_global_lock() failed\n");
goto fail;
+ }
#if defined(CONFIG_DRM_AMD_DC_DCN)
- if (!compute_mst_dsc_configs_for_state(state, dm_state->context, vars))
+ if (!compute_mst_dsc_configs_for_state(state, dm_state->context, vars)) {
+ DRM_DEBUG_DRIVER("compute_mst_dsc_configs_for_state() failed\n");
goto fail;
+ }
ret = dm_update_mst_vcpi_slots_for_dsc(state, dm_state->context, vars);
- if (ret)
+ if (ret) {
+ DRM_DEBUG_DRIVER("dm_update_mst_vcpi_slots_for_dsc() failed\n");
goto fail;
+ }
#endif
/*
@@ -11023,12 +11195,13 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
* to get stuck in an infinite loop and hang eventually.
*/
ret = drm_dp_mst_atomic_check(state);
- if (ret)
+ if (ret) {
+ DRM_DEBUG_DRIVER("drm_dp_mst_atomic_check() failed\n");
goto fail;
- status = dc_validate_global_state(dc, dm_state->context, false);
+ }
+ status = dc_validate_global_state(dc, dm_state->context, true);
if (status != DC_OK) {
- drm_dbg_atomic(dev,
- "DC global validation failure: %s (%d)",
+ DRM_DEBUG_DRIVER("DC global validation failure: %s (%d)",
dc_status_to_str(status), status);
ret = -EINVAL;
goto fail;
@@ -11150,7 +11323,7 @@ static bool dm_edid_parser_send_cea(struct amdgpu_display_manager *dm,
sizeof(cmd.edid_cea) - sizeof(cmd.edid_cea.header);
input->offset = offset;
input->length = length;
- input->total_length = total_length;
+ input->cea_total_length = total_length;
memcpy(input->payload, data, length);
res = dc_dmub_srv_cmd_with_reply_data(dm->dc->ctx->dmub_srv, &cmd);
@@ -11457,8 +11630,10 @@ uint32_t dm_read_reg_func(const struct dc_context *ctx, uint32_t address,
return value;
}
-int amdgpu_dm_set_dmub_async_sync_status(bool is_cmd_aux, struct dc_context *ctx,
- uint8_t status_type, uint32_t *operation_result)
+static int amdgpu_dm_set_dmub_async_sync_status(bool is_cmd_aux,
+ struct dc_context *ctx,
+ uint8_t status_type,
+ uint32_t *operation_result)
{
struct amdgpu_device *adev = ctx->driver_context;
int return_status = -1;
@@ -11529,3 +11704,24 @@ int amdgpu_dm_process_dmub_aux_transfer_sync(bool is_cmd_aux, struct dc_context
ctx, DMUB_ASYNC_TO_SYNC_ACCESS_SUCCESS,
(uint32_t *)operation_result);
}
+
+/*
+ * Check whether seamless boot is supported.
+ *
+ * So far we only support seamless boot on CHIP_VANGOGH.
+ * If everything goes well, we may consider expanding
+ * seamless boot to other ASICs.
+ */
+bool check_seamless_boot_capability(struct amdgpu_device *adev)
+{
+ switch (adev->asic_type) {
+ case CHIP_VANGOGH:
+ if (!adev->mman.keep_stolen_vga_memory)
+ return true;
+ break;
+ default:
+ break;
+ }
+
+ return false;
+}
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
index 37e61a88d49e..c98e402eab0c 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
@@ -50,9 +50,9 @@
#define AMDGPU_DMUB_NOTIFICATION_MAX 5
-/**
+/*
* DMUB Async to Sync Mechanism Status
- **/
+ */
#define DMUB_ASYNC_TO_SYNC_ACCESS_FAIL 1
#define DMUB_ASYNC_TO_SYNC_ACCESS_TIMEOUT 2
#define DMUB_ASYNC_TO_SYNC_ACCESS_SUCCESS 3
@@ -731,4 +731,7 @@ extern const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs;
int amdgpu_dm_process_dmub_aux_transfer_sync(bool is_cmd_aux,
struct dc_context *ctx, unsigned int link_index,
void *payload, void *operation_result);
+
+bool check_seamless_boot_capability(struct amdgpu_device *adev);
+
#endif /* __AMDGPU_DM_H__ */
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c
index a022e5bb30a5..a71177305bcd 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c
@@ -285,8 +285,12 @@ static int __set_input_tf(struct dc_transfer_func *func,
}
/**
+ * amdgpu_dm_verify_lut_sizes
+ * @crtc_state: the DRM CRTC state
+ *
* Verifies that the Degamma and Gamma LUTs attached to the |crtc_state| are of
* the expected size.
+ *
* Returns 0 on success.
*/
int amdgpu_dm_verify_lut_sizes(const struct drm_crtc_state *crtc_state)
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c
index cce062adc439..8a441a22c46e 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c
@@ -314,6 +314,14 @@ int amdgpu_dm_crtc_set_crc_source(struct drm_crtc *crtc, const char *src_name)
ret = -EINVAL;
goto cleanup;
}
+
+ if ((aconn->base.connector_type != DRM_MODE_CONNECTOR_DisplayPort) &&
+ (aconn->base.connector_type != DRM_MODE_CONNECTOR_eDP)) {
+ DRM_DEBUG_DRIVER("No DP connector available for CRC source\n");
+ ret = -EINVAL;
+ goto cleanup;
+ }
+
}
#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
index 0277685864c5..26719efa5396 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
@@ -824,6 +824,48 @@ static int dmub_fw_state_show(struct seq_file *m, void *data)
return seq_write(m, state_base, state_size);
}
+/* psr_capability_show() - show eDP panel PSR capability
+ *
+ * The read function: sink_psr_capability_show
+ * Shows if sink has PSR capability or not.
+ * If yes - the PSR version is appended
+ *
+ * cat /sys/kernel/debug/dri/0/eDP-X/psr_capability
+ *
+ * Expected output:
+ * "Sink support: no\n" - if panel doesn't support PSR
+ * "Sink support: yes [0x01]\n" - if panel supports PSR1
+ * "Driver support: no\n" - if driver doesn't support PSR
+ * "Driver support: yes [0x01]\n" - if driver supports PSR1
+ */
+static int psr_capability_show(struct seq_file *m, void *data)
+{
+ struct drm_connector *connector = m->private;
+ struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
+ struct dc_link *link = aconnector->dc_link;
+
+ if (!link)
+ return -ENODEV;
+
+ if (link->type == dc_connection_none)
+ return -ENODEV;
+
+ if (!(link->connector_signal & SIGNAL_TYPE_EDP))
+ return -ENODEV;
+
+ seq_printf(m, "Sink support: %s", yesno(link->dpcd_caps.psr_caps.psr_version != 0));
+ if (link->dpcd_caps.psr_caps.psr_version)
+ seq_printf(m, " [0x%02x]", link->dpcd_caps.psr_caps.psr_version);
+ seq_puts(m, "\n");
+
+ seq_printf(m, "Driver support: %s", yesno(link->psr_settings.psr_feature_enabled));
+ if (link->psr_settings.psr_version)
+ seq_printf(m, " [0x%02x]", link->psr_settings.psr_version);
+ seq_puts(m, "\n");
+
+ return 0;
+}
+
/*
* Returns the current and maximum output bpc for the connector.
* Example usage: cat /sys/kernel/debug/dri/0/DP-1/output_bpc
@@ -2467,6 +2509,7 @@ DEFINE_SHOW_ATTRIBUTE(dp_lttpr_status);
DEFINE_SHOW_ATTRIBUTE(hdcp_sink_capability);
#endif
DEFINE_SHOW_ATTRIBUTE(internal_display);
+DEFINE_SHOW_ATTRIBUTE(psr_capability);
static const struct file_operations dp_dsc_clock_en_debugfs_fops = {
.owner = THIS_MODULE,
@@ -2712,6 +2755,138 @@ static const struct {
{"internal_display", &internal_display_fops}
};
+/*
+ * Returns supported customized link rates by this eDP panel.
+ * Example usage: cat /sys/kernel/debug/dri/0/eDP-x/ilr_setting
+ */
+static int edp_ilr_show(struct seq_file *m, void *unused)
+{
+ struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(m->private);
+ struct dc_link *link = aconnector->dc_link;
+ uint8_t supported_link_rates[16];
+ uint32_t link_rate_in_khz;
+ uint32_t entry = 0;
+ uint8_t dpcd_rev;
+
+ memset(supported_link_rates, 0, sizeof(supported_link_rates));
+ dm_helpers_dp_read_dpcd(link->ctx, link, DP_SUPPORTED_LINK_RATES,
+ supported_link_rates, sizeof(supported_link_rates));
+
+ dpcd_rev = link->dpcd_caps.dpcd_rev.raw;
+
+ if (dpcd_rev >= DP_DPCD_REV_13 &&
+ (supported_link_rates[entry+1] != 0 || supported_link_rates[entry] != 0)) {
+
+ for (entry = 0; entry < 16; entry += 2) {
+ link_rate_in_khz = (supported_link_rates[entry+1] * 0x100 +
+ supported_link_rates[entry]) * 200;
+ seq_printf(m, "[%d] %d kHz\n", entry/2, link_rate_in_khz);
+ }
+ } else {
+ seq_printf(m, "ILR is not supported by this eDP panel.\n");
+ }
+
+ return 0;
+}
+
+/*
+ * Set supported customized link rate to eDP panel.
+ *
+ * echo <lane_count> <link_rate option> > ilr_setting
+ *
+ * for example, supported ILR : [0] 1620000 kHz [1] 2160000 kHz [2] 2430000 kHz ...
+ * echo 4 1 > /sys/kernel/debug/dri/0/eDP-x/ilr_setting
+ * to set 4 lanes and 2.16 GHz
+ */
+static ssize_t edp_ilr_write(struct file *f, const char __user *buf,
+ size_t size, loff_t *pos)
+{
+ struct amdgpu_dm_connector *connector = file_inode(f)->i_private;
+ struct dc_link *link = connector->dc_link;
+ struct amdgpu_device *adev = drm_to_adev(connector->base.dev);
+ struct dc *dc = (struct dc *)link->dc;
+ struct dc_link_settings prefer_link_settings;
+ char *wr_buf = NULL;
+ const uint32_t wr_buf_size = 40;
+ /* 0: lane_count; 1: link_rate */
+ int max_param_num = 2;
+ uint8_t param_nums = 0;
+ long param[2];
+ bool valid_input = true;
+
+ if (size == 0)
+ return -EINVAL;
+
+ wr_buf = kcalloc(wr_buf_size, sizeof(char), GFP_KERNEL);
+ if (!wr_buf)
+ return -ENOMEM;
+
+ if (parse_write_buffer_into_params(wr_buf, wr_buf_size,
+ (long *)param, buf,
+ max_param_num,
+ &param_nums)) {
+ kfree(wr_buf);
+ return -EINVAL;
+ }
+
+ if (param_nums <= 0) {
+ kfree(wr_buf);
+ return -EINVAL;
+ }
+
+ switch (param[0]) {
+ case LANE_COUNT_ONE:
+ case LANE_COUNT_TWO:
+ case LANE_COUNT_FOUR:
+ break;
+ default:
+ valid_input = false;
+ break;
+ }
+
+ if (param[1] >= link->dpcd_caps.edp_supported_link_rates_count)
+ valid_input = false;
+
+ if (!valid_input) {
+ kfree(wr_buf);
+ DRM_DEBUG_DRIVER("Invalid Input value. No HW will be programmed\n");
+ prefer_link_settings.use_link_rate_set = false;
+ dc_link_set_preferred_training_settings(dc, NULL, NULL, link, true);
+ return size;
+ }
+
+ /* save user force lane_count, link_rate to preferred settings
+ * spread spectrum will not be changed
+ */
+ prefer_link_settings.link_spread = link->cur_link_settings.link_spread;
+ prefer_link_settings.lane_count = param[0];
+ prefer_link_settings.use_link_rate_set = true;
+ prefer_link_settings.link_rate_set = param[1];
+ prefer_link_settings.link_rate = link->dpcd_caps.edp_supported_link_rates[param[1]];
+
+ mutex_lock(&adev->dm.dc_lock);
+ dc_link_set_preferred_training_settings(dc, &prefer_link_settings,
+ NULL, link, false);
+ mutex_unlock(&adev->dm.dc_lock);
+
+ kfree(wr_buf);
+ return size;
+}
+
+static int edp_ilr_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, edp_ilr_show, inode->i_private);
+}
+
+static const struct file_operations edp_ilr_debugfs_fops = {
+ .owner = THIS_MODULE,
+ .open = edp_ilr_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .write = edp_ilr_write
+};
+
void connector_debugfs_init(struct amdgpu_dm_connector *connector)
{
int i;
@@ -2726,11 +2901,14 @@ void connector_debugfs_init(struct amdgpu_dm_connector *connector)
}
}
if (connector->base.connector_type == DRM_MODE_CONNECTOR_eDP) {
+ debugfs_create_file_unsafe("psr_capability", 0444, dir, connector, &psr_capability_fops);
debugfs_create_file_unsafe("psr_state", 0444, dir, connector, &psr_fops);
debugfs_create_file("amdgpu_current_backlight_pwm", 0444, dir, connector,
&current_backlight_fops);
debugfs_create_file("amdgpu_target_backlight_pwm", 0444, dir, connector,
&target_backlight_fops);
+ debugfs_create_file("ilr_setting", 0644, dir, connector,
+ &edp_ilr_debugfs_fops);
}
for (i = 0; i < ARRAY_SIZE(connector_debugfs_entries); i++) {
@@ -2909,10 +3087,13 @@ static int crc_win_update_set(void *data, u64 val)
struct amdgpu_device *adev = drm_to_adev(new_crtc->dev);
struct crc_rd_work *crc_rd_wrk = adev->dm.crc_rd_wrk;
+ if (!crc_rd_wrk)
+ return 0;
+
if (val) {
spin_lock_irq(&adev_to_drm(adev)->event_lock);
spin_lock_irq(&crc_rd_wrk->crc_rd_work_lock);
- if (crc_rd_wrk && crc_rd_wrk->crtc) {
+ if (crc_rd_wrk->crtc) {
old_crtc = crc_rd_wrk->crtc;
old_acrtc = to_amdgpu_crtc(old_crtc);
}
@@ -3190,6 +3371,32 @@ static int disable_hpd_get(void *data, u64 *val)
DEFINE_DEBUGFS_ATTRIBUTE(disable_hpd_ops, disable_hpd_get,
disable_hpd_set, "%llu\n");
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+/*
+ * Temporary w/a to force sst sequence in M42D DP2 mst receiver
+ * Example usage: echo 1 > /sys/kernel/debug/dri/0/amdgpu_dm_dp_set_mst_en_for_sst
+ */
+static int dp_force_sst_set(void *data, u64 val)
+{
+ struct amdgpu_device *adev = data;
+
+ adev->dm.dc->debug.set_mst_en_for_sst = val;
+
+ return 0;
+}
+
+static int dp_force_sst_get(void *data, u64 *val)
+{
+ struct amdgpu_device *adev = data;
+
+ *val = adev->dm.dc->debug.set_mst_en_for_sst;
+
+ return 0;
+}
+DEFINE_DEBUGFS_ATTRIBUTE(dp_set_mst_en_for_sst_ops, dp_force_sst_get,
+ dp_force_sst_set, "%llu\n");
+#endif
+
/*
* Sets the DC visual confirm debug option from the given string.
* Example usage: echo 1 > /sys/kernel/debug/dri/0/amdgpu_visual_confirm
@@ -3299,6 +3506,10 @@ void dtn_debugfs_init(struct amdgpu_device *adev)
adev, &mst_topo_fops);
debugfs_create_file("amdgpu_dm_dtn_log", 0644, root, adev,
&dtn_log_fops);
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ debugfs_create_file("amdgpu_dm_dp_set_mst_en_for_sst", 0644, root, adev,
+ &dp_set_mst_en_for_sst_ops);
+#endif
debugfs_create_file_unsafe("amdgpu_dm_visual_confirm", 0644, root, adev,
&visual_confirm_fops);
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
index 8cbeeb7c986d..29f07c26d080 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
@@ -83,16 +83,17 @@ static int amdgpu_dm_patch_edid_caps(struct dc_edid_caps *edid_caps)
* void
* */
enum dc_edid_status dm_helpers_parse_edid_caps(
- struct dc_context *ctx,
+ struct dc_link *link,
const struct dc_edid *edid,
struct dc_edid_caps *edid_caps)
{
+ struct amdgpu_dm_connector *aconnector = link->priv;
+ struct drm_connector *connector = &aconnector->base;
struct edid *edid_buf = (struct edid *) edid->raw_edid;
struct cea_sad *sads;
int sad_count = -1;
int sadb_count = -1;
int i = 0;
- int j = 0;
uint8_t *sadb = NULL;
enum dc_edid_status result = EDID_OK;
@@ -111,23 +112,11 @@ enum dc_edid_status dm_helpers_parse_edid_caps(
edid_caps->manufacture_week = edid_buf->mfg_week;
edid_caps->manufacture_year = edid_buf->mfg_year;
- /* One of the four detailed_timings stores the monitor name. It's
- * stored in an array of length 13. */
- for (i = 0; i < 4; i++) {
- if (edid_buf->detailed_timings[i].data.other_data.type == 0xfc) {
- while (j < 13 && edid_buf->detailed_timings[i].data.other_data.data.str.str[j]) {
- if (edid_buf->detailed_timings[i].data.other_data.data.str.str[j] == '\n')
- break;
-
- edid_caps->display_name[j] =
- edid_buf->detailed_timings[i].data.other_data.data.str.str[j];
- j++;
- }
- }
- }
+ drm_edid_get_monitor_name(edid_buf,
+ edid_caps->display_name,
+ AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS);
- edid_caps->edid_hdmi = drm_detect_hdmi_monitor(
- (struct edid *) edid->raw_edid);
+ edid_caps->edid_hdmi = connector->display_info.is_hdmi;
sad_count = drm_edid_to_sad((struct edid *) edid->raw_edid, &sads);
if (sad_count <= 0)
@@ -584,9 +573,18 @@ bool dm_helpers_dp_write_dsc_enable(
ret = drm_dp_dpcd_write(aconnector->dsc_aux, DP_DSC_ENABLE, &enable_dsc, 1);
}
- if (stream->signal == SIGNAL_TYPE_DISPLAY_PORT) {
- ret = dm_helpers_dp_write_dpcd(ctx, stream->link, DP_DSC_ENABLE, &enable_dsc, 1);
- DC_LOG_DC("Send DSC %s to sst display\n", enable_dsc ? "enable" : "disable");
+ if (stream->signal == SIGNAL_TYPE_DISPLAY_PORT || stream->signal == SIGNAL_TYPE_EDP) {
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ if (stream->sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_NONE) {
+#endif
+ ret = dm_helpers_dp_write_dpcd(ctx, stream->link, DP_DSC_ENABLE, &enable_dsc, 1);
+ DC_LOG_DC("Send DSC %s to SST RX\n", enable_dsc ? "enable" : "disable");
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ } else if (stream->sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_DP_HDMI_CONVERTER) {
+ ret = dm_helpers_dp_write_dpcd(ctx, stream->link, DP_DSC_ENABLE, &enable_dsc, 1);
+ DC_LOG_DC("Send DSC %s to DP-HDMI PCON\n", enable_dsc ? "enable" : "disable");
+ }
+#endif
}
return (ret > 0);
@@ -650,14 +648,8 @@ enum dc_edid_status dm_helpers_read_local_edid(
/* We don't need the original edid anymore */
kfree(edid);
- /* connector->display_info will be parsed from EDID and saved
- * into drm_connector->display_info from edid by call stack
- * below:
- * drm_parse_ycbcr420_deep_color_info
- * drm_parse_hdmi_forum_vsdb
- * drm_parse_cea_ext
- * drm_add_display_info
- * drm_connector_update_edid_property
+ /* connector->display_info is parsed from EDID and saved
+ * into drm_connector->display_info
*
* drm_connector->display_info will be used by amdgpu_dm funcs,
* like fill_stream_properties_from_drm_display_mode
@@ -665,7 +657,7 @@ enum dc_edid_status dm_helpers_read_local_edid(
amdgpu_dm_update_connector_after_detect(aconnector);
edid_status = dm_helpers_parse_edid_caps(
- ctx,
+ link,
&sink->dc_edid,
&sink->edid_caps);
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
index 32a5ce09a62a..cc34a35d0bcb 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
@@ -36,6 +36,8 @@
#include "dm_helpers.h"
#include "dc_link_ddc.h"
+#include "ddc_service_types.h"
+#include "dpcd_defs.h"
#include "i2caux_interface.h"
#include "dmub_cmd.h"
@@ -157,6 +159,16 @@ static const struct drm_connector_funcs dm_dp_mst_connector_funcs = {
};
#if defined(CONFIG_DRM_AMD_DC_DCN)
+static bool needs_dsc_aux_workaround(struct dc_link *link)
+{
+ if (link->dpcd_caps.branch_dev_id == DP_BRANCH_DEVICE_ID_90CC24 &&
+ (link->dpcd_caps.dpcd_rev.raw == DPCD_REV_14 || link->dpcd_caps.dpcd_rev.raw == DPCD_REV_12) &&
+ link->dpcd_caps.sink_count.bits.SINK_COUNT >= 2)
+ return true;
+
+ return false;
+}
+
static bool validate_dsc_caps_on_connector(struct amdgpu_dm_connector *aconnector)
{
struct dc_sink *dc_sink = aconnector->dc_sink;
@@ -166,7 +178,7 @@ static bool validate_dsc_caps_on_connector(struct amdgpu_dm_connector *aconnecto
u8 *dsc_branch_dec_caps = NULL;
aconnector->dsc_aux = drm_dp_mst_dsc_aux_for_port(port);
-#if defined(CONFIG_HP_HOOK_WORKAROUND)
+
/*
* drm_dp_mst_dsc_aux_for_port() will return NULL for certain configs
* because it only check the dsc/fec caps of the "port variable" and not the dock
@@ -176,10 +188,10 @@ static bool validate_dsc_caps_on_connector(struct amdgpu_dm_connector *aconnecto
* Workaround: explicitly check the use case above and use the mst dock's aux as dsc_aux
*
*/
-
- if (!aconnector->dsc_aux && !port->parent->port_parent)
+ if (!aconnector->dsc_aux && !port->parent->port_parent &&
+ needs_dsc_aux_workaround(aconnector->dc_link))
aconnector->dsc_aux = &aconnector->mst_port->dm_dp_aux.aux;
-#endif
+
if (!aconnector->dsc_aux)
return false;
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c
index c022e56f9459..c510638b4f99 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c
@@ -26,6 +26,73 @@
#include "amdgpu_dm_psr.h"
#include "dc.h"
#include "dm_helpers.h"
+#include "amdgpu_dm.h"
+
+static bool link_get_psr_caps(struct dc_link *link)
+{
+ uint8_t psr_dpcd_data[EDP_PSR_RECEIVER_CAP_SIZE];
+ uint8_t edp_rev_dpcd_data;
+
+
+
+ if (!dm_helpers_dp_read_dpcd(NULL, link, DP_PSR_SUPPORT,
+ psr_dpcd_data, sizeof(psr_dpcd_data)))
+ return false;
+
+ if (!dm_helpers_dp_read_dpcd(NULL, link, DP_EDP_DPCD_REV,
+ &edp_rev_dpcd_data, sizeof(edp_rev_dpcd_data)))
+ return false;
+
+ link->dpcd_caps.psr_caps.psr_version = psr_dpcd_data[0];
+ link->dpcd_caps.psr_caps.edp_revision = edp_rev_dpcd_data;
+
+#ifdef CONFIG_DRM_AMD_DC_DCN
+ if (link->dpcd_caps.psr_caps.psr_version > 0x1) {
+ uint8_t alpm_dpcd_data;
+ uint8_t su_granularity_dpcd_data;
+
+ if (!dm_helpers_dp_read_dpcd(NULL, link, DP_RECEIVER_ALPM_CAP,
+ &alpm_dpcd_data, sizeof(alpm_dpcd_data)))
+ return false;
+
+ if (!dm_helpers_dp_read_dpcd(NULL, link, DP_PSR2_SU_Y_GRANULARITY,
+ &su_granularity_dpcd_data, sizeof(su_granularity_dpcd_data)))
+ return false;
+
+ link->dpcd_caps.psr_caps.y_coordinate_required = psr_dpcd_data[1] & DP_PSR2_SU_Y_COORDINATE_REQUIRED;
+ link->dpcd_caps.psr_caps.su_granularity_required = psr_dpcd_data[1] & DP_PSR2_SU_GRANULARITY_REQUIRED;
+
+ link->dpcd_caps.psr_caps.alpm_cap = alpm_dpcd_data & DP_ALPM_CAP;
+ link->dpcd_caps.psr_caps.standby_support = alpm_dpcd_data & (1 << 1);
+
+ link->dpcd_caps.psr_caps.su_y_granularity = su_granularity_dpcd_data;
+ }
+#endif
+ return true;
+}
+
+#ifdef CONFIG_DRM_AMD_DC_DCN
+static bool link_supports_psrsu(struct dc_link *link)
+{
+ struct dc *dc = link->ctx->dc;
+
+ if (!dc->caps.dmcub_support)
+ return false;
+
+ if (dc->ctx->dce_version < DCN_VERSION_3_1)
+ return false;
+
+ if (!link->dpcd_caps.psr_caps.alpm_cap ||
+ !link->dpcd_caps.psr_caps.y_coordinate_required)
+ return false;
+
+ if (link->dpcd_caps.psr_caps.su_granularity_required &&
+ !link->dpcd_caps.psr_caps.su_y_granularity)
+ return false;
+
+ return true;
+}
+#endif
/*
* amdgpu_dm_set_psr_caps() - set link psr capabilities
@@ -34,26 +101,34 @@
*/
void amdgpu_dm_set_psr_caps(struct dc_link *link)
{
- uint8_t dpcd_data[EDP_PSR_RECEIVER_CAP_SIZE];
-
if (!(link->connector_signal & SIGNAL_TYPE_EDP))
return;
+
if (link->type == dc_connection_none)
return;
- if (dm_helpers_dp_read_dpcd(NULL, link, DP_PSR_SUPPORT,
- dpcd_data, sizeof(dpcd_data))) {
- link->dpcd_caps.psr_caps.psr_version = dpcd_data[0];
-
- if (dpcd_data[0] == 0) {
- link->psr_settings.psr_version = DC_PSR_VERSION_UNSUPPORTED;
- link->psr_settings.psr_feature_enabled = false;
- } else {
+
+ if (!link_get_psr_caps(link)) {
+ DRM_ERROR("amdgpu: Failed to read PSR Caps!\n");
+ return;
+ }
+
+ if (link->dpcd_caps.psr_caps.psr_version == 0) {
+ link->psr_settings.psr_version = DC_PSR_VERSION_UNSUPPORTED;
+ link->psr_settings.psr_feature_enabled = false;
+
+ } else {
+#ifdef CONFIG_DRM_AMD_DC_DCN
+ if (link_supports_psrsu(link))
+ link->psr_settings.psr_version = DC_PSR_VERSION_SU_1;
+ else
+#endif
link->psr_settings.psr_version = DC_PSR_VERSION_1;
- link->psr_settings.psr_feature_enabled = true;
- }
- DRM_INFO("PSR support:%d\n", link->psr_settings.psr_feature_enabled);
+ link->psr_settings.psr_feature_enabled = true;
}
+
+ DRM_INFO("PSR support:%d\n", link->psr_settings.psr_feature_enabled);
+
}
/*
diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
index a4bef4364afd..1e385d55e7fb 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
+++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
@@ -2995,7 +2995,7 @@ static bool bios_parser2_construct(
&bp->object_info_tbl.revision);
if (bp->object_info_tbl.revision.major == 1
- && bp->object_info_tbl.revision.minor >= 4) {
+ && bp->object_info_tbl.revision.minor == 4) {
struct display_object_info_table_v1_4 *tbl_v1_4;
tbl_v1_4 = GET_IMAGE(struct display_object_info_table_v1_4,
@@ -3004,8 +3004,10 @@ static bool bios_parser2_construct(
return false;
bp->object_info_tbl.v1_4 = tbl_v1_4;
- } else
+ } else {
+ ASSERT(0);
return false;
+ }
dal_firmware_parser_init_cmd_tbl(bp);
dal_bios_parser_init_cmd_tbl_helper2(&bp->cmd_helper, dce_version);
diff --git a/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c b/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c
index 6b248cd2a461..ec19678a0702 100644
--- a/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c
+++ b/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c
@@ -739,7 +739,9 @@ static void hack_bounding_box(struct dcn_bw_internal_vars *v,
hack_force_pipe_split(v, context->streams[0]->timing.pix_clk_100hz);
}
-unsigned int get_highest_allowed_voltage_level(uint32_t chip_family, uint32_t hw_internal_rev, uint32_t pci_revision_id)
+static unsigned int get_highest_allowed_voltage_level(uint32_t chip_family,
+ uint32_t hw_internal_rev,
+ uint32_t pci_revision_id)
{
/* for low power RV2 variants, the highest voltage level we want is 0 */
if ((chip_family == FAMILY_RV) &&
@@ -763,7 +765,7 @@ unsigned int get_highest_allowed_voltage_level(uint32_t chip_family, uint32_t hw
return 4;
}
-bool dcn_validate_bandwidth(
+bool dcn10_validate_bandwidth(
struct dc *dc,
struct dc_state *context,
bool fast_validate)
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c
index 26f96ee32472..9200c8ce02ba 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c
@@ -308,8 +308,7 @@ void dc_destroy_clk_mgr(struct clk_mgr *clk_mgr_base)
case FAMILY_NV:
if (ASICREV_IS_SIENNA_CICHLID_P(clk_mgr_base->ctx->asic_id.hw_internal_rev)) {
dcn3_clk_mgr_destroy(clk_mgr);
- }
- if (ASICREV_IS_DIMGREY_CAVEFISH_P(clk_mgr_base->ctx->asic_id.hw_internal_rev)) {
+ } else if (ASICREV_IS_DIMGREY_CAVEFISH_P(clk_mgr_base->ctx->asic_id.hw_internal_rev)) {
dcn3_clk_mgr_destroy(clk_mgr);
}
if (ASICREV_IS_BEIGE_GOBY_P(clk_mgr_base->ctx->asic_id.hw_internal_rev)) {
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr.c
index 76ec8ec92efd..60761ff3cbf1 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr.c
@@ -34,7 +34,7 @@
#include "rv1_clk_mgr_vbios_smu.h"
#include "rv1_clk_mgr_clk.h"
-void rv1_init_clocks(struct clk_mgr *clk_mgr)
+static void rv1_init_clocks(struct clk_mgr *clk_mgr)
{
memset(&(clk_mgr->clks), 0, sizeof(struct dc_clocks));
}
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr_vbios_smu.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr_vbios_smu.c
index fe18bb9e19aa..06bab24d8e27 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr_vbios_smu.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr_vbios_smu.c
@@ -28,6 +28,8 @@
#include "reg_helper.h"
#include <linux/delay.h>
+#include "rv1_clk_mgr_vbios_smu.h"
+
#define MAX_INSTANCE 5
#define MAX_SEGMENT 5
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c
index 2108bff49d4e..9f35f2e8f971 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c
@@ -409,7 +409,7 @@ void dcn2_init_clocks(struct clk_mgr *clk_mgr)
clk_mgr->clks.prev_p_state_change_support = true;
}
-void dcn2_enable_pme_wa(struct clk_mgr *clk_mgr_base)
+static void dcn2_enable_pme_wa(struct clk_mgr *clk_mgr_base)
{
struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
struct pp_smu_funcs_nv *pp_smu = NULL;
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn201/dcn201_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn201/dcn201_clk_mgr.c
index db9950244c7b..fbdd0a92d146 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn201/dcn201_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn201/dcn201_clk_mgr.c
@@ -74,42 +74,6 @@ static const struct clk_mgr_mask clk_mgr_mask = {
CLK_COMMON_MASK_SH_LIST_DCN201_BASE(_MASK)
};
-void dcn201_update_clocks_vbios(struct clk_mgr *clk_mgr,
- struct dc_state *context,
- bool safe_to_lower)
-{
- struct dc_clocks *new_clocks = &context->bw_ctx.bw.dcn.clk;
-
- bool update_dppclk = false;
- bool update_dispclk = false;
-
- if (should_set_clock(safe_to_lower, new_clocks->dppclk_khz, clk_mgr->clks.dppclk_khz)) {
- clk_mgr->clks.dppclk_khz = new_clocks->dppclk_khz;
- update_dppclk = true;
- }
-
- if (should_set_clock(safe_to_lower, new_clocks->dispclk_khz, clk_mgr->clks.dispclk_khz)) {
- clk_mgr->clks.dispclk_khz = new_clocks->dispclk_khz;
- update_dispclk = true;
- }
-
- if (update_dppclk || update_dispclk) {
- struct bp_set_dce_clock_parameters dce_clk_params;
- struct dc_bios *bp = clk_mgr->ctx->dc_bios;
-
- if (update_dispclk) {
- memset(&dce_clk_params, 0, sizeof(dce_clk_params));
- dce_clk_params.target_clock_frequency = new_clocks->dispclk_khz;
- dce_clk_params.pll_id = CLOCK_SOURCE_ID_DFS;
- dce_clk_params.clock_type = DCECLOCK_TYPE_DISPLAY_CLOCK;
- bp->funcs->set_dce_clock(bp, &dce_clk_params);
- }
- /* currently there is no DCECLOCK_TYPE_DPPCLK type defined in VBIOS interface.
- * vbios program DPPCLK to the same DispCLK limitation
- */
- }
-}
-
static void dcn201_init_clocks(struct clk_mgr *clk_mgr)
{
memset(&(clk_mgr->clks), 0, sizeof(struct dc_clocks));
@@ -126,10 +90,8 @@ static void dcn201_update_clocks(struct clk_mgr *clk_mgr_base,
struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
struct dc_clocks *new_clocks = &context->bw_ctx.bw.dcn.clk;
struct dc *dc = clk_mgr_base->ctx->dc;
- int display_count;
bool update_dppclk = false;
bool update_dispclk = false;
- bool enter_display_off = false;
bool dpp_clock_lowered = false;
bool force_reset = false;
bool p_state_change_support;
@@ -145,10 +107,7 @@ static void dcn201_update_clocks(struct clk_mgr *clk_mgr_base,
dcn2_read_clocks_from_hw_dentist(clk_mgr_base);
}
- display_count = clk_mgr_helper_get_active_display_cnt(dc, context);
-
- if (display_count == 0)
- enter_display_off = true;
+ clk_mgr_helper_get_active_display_cnt(dc, context);
if (should_set_clock(safe_to_lower, new_clocks->phyclk_khz, clk_mgr_base->clks.phyclk_khz))
clk_mgr_base->clks.phyclk_khz = new_clocks->phyclk_khz;
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c
index ac2d4c4f04e4..fbda42313bfe 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c
@@ -56,9 +56,7 @@
/* TODO: evaluate how to lower or disable all dcn clocks in screen off case */
-int rn_get_active_display_cnt_wa(
- struct dc *dc,
- struct dc_state *context)
+static int rn_get_active_display_cnt_wa(struct dc *dc, struct dc_state *context)
{
int i, display_count;
bool tmds_present = false;
@@ -77,7 +75,8 @@ int rn_get_active_display_cnt_wa(
const struct dc_link *link = dc->links[i];
/* abusing the fact that the dig and phy are coupled to see if the phy is enabled */
- if (link->link_enc->funcs->is_dig_enabled(link->link_enc))
+ if (link->link_enc->funcs->is_dig_enabled &&
+ link->link_enc->funcs->is_dig_enabled(link->link_enc))
display_count++;
}
@@ -88,7 +87,7 @@ int rn_get_active_display_cnt_wa(
return display_count;
}
-void rn_set_low_power_state(struct clk_mgr *clk_mgr_base)
+static void rn_set_low_power_state(struct clk_mgr *clk_mgr_base)
{
struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
@@ -122,7 +121,7 @@ static void rn_update_clocks_update_dpp_dto(struct clk_mgr_internal *clk_mgr,
}
-void rn_update_clocks(struct clk_mgr *clk_mgr_base,
+static void rn_update_clocks(struct clk_mgr *clk_mgr_base,
struct dc_state *context,
bool safe_to_lower)
{
@@ -437,25 +436,14 @@ static void rn_dump_clk_registers(struct clk_state_registers_and_bypass *regs_an
}
}
-/* This function produce translated logical clk state values*/
-void rn_get_clk_states(struct clk_mgr *clk_mgr_base, struct clk_states *s)
-{
- struct clk_state_registers_and_bypass sb = { 0 };
- struct clk_log_info log_info = { 0 };
-
- rn_dump_clk_registers(&sb, clk_mgr_base, &log_info);
-
- s->dprefclk_khz = sb.dprefclk * 1000;
-}
-
-void rn_enable_pme_wa(struct clk_mgr *clk_mgr_base)
+static void rn_enable_pme_wa(struct clk_mgr *clk_mgr_base)
{
struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
rn_vbios_smu_enable_pme_wa(clk_mgr);
}
-void rn_init_clocks(struct clk_mgr *clk_mgr)
+static void rn_init_clocks(struct clk_mgr *clk_mgr)
{
memset(&(clk_mgr->clks), 0, sizeof(struct dc_clocks));
// Assumption is that boot state always supports pstate
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr_vbios_smu.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr_vbios_smu.c
index 9f7eed6688c4..8161a6ae410d 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr_vbios_smu.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr_vbios_smu.c
@@ -33,6 +33,8 @@
#include "mp/mp_12_0_0_offset.h"
#include "mp/mp_12_0_0_sh_mask.h"
+#include "rn_clk_mgr_vbios_smu.h"
+
#define REG(reg_name) \
(MP0_BASE.instance[0].segment[mm ## reg_name ## _BASE_IDX] + mm ## reg_name)
@@ -86,7 +88,9 @@ static uint32_t rn_smu_wait_for_response(struct clk_mgr_internal *clk_mgr, unsig
}
-int rn_vbios_smu_send_msg_with_param(struct clk_mgr_internal *clk_mgr, unsigned int msg_id, unsigned int param)
+static int rn_vbios_smu_send_msg_with_param(struct clk_mgr_internal *clk_mgr,
+ unsigned int msg_id,
+ unsigned int param)
{
uint32_t result;
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c
index 1861a147a7fa..f977f29907df 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c
@@ -252,6 +252,7 @@ static void dcn3_update_clocks(struct clk_mgr *clk_mgr_base,
bool update_dispclk = false;
bool enter_display_off = false;
bool dpp_clock_lowered = false;
+ bool update_pstate_unsupported_clk = false;
struct dmcu *dmcu = clk_mgr_base->ctx->dc->res_pool->dmcu;
bool force_reset = false;
bool update_uclk = false;
@@ -299,13 +300,28 @@ static void dcn3_update_clocks(struct clk_mgr *clk_mgr_base,
clk_mgr_base->clks.prev_p_state_change_support = clk_mgr_base->clks.p_state_change_support;
total_plane_count = clk_mgr_helper_get_active_plane_cnt(dc, context);
p_state_change_support = new_clocks->p_state_change_support || (total_plane_count == 0);
- if (should_update_pstate_support(safe_to_lower, p_state_change_support, clk_mgr_base->clks.p_state_change_support)) {
+
+ // invalidate the current P-State forced min in certain dc_mode_softmax situations
+ if (dc->clk_mgr->dc_mode_softmax_enabled && safe_to_lower && !p_state_change_support) {
+ if ((new_clocks->dramclk_khz <= dc->clk_mgr->bw_params->dc_mode_softmax_memclk * 1000) !=
+ (clk_mgr_base->clks.dramclk_khz <= dc->clk_mgr->bw_params->dc_mode_softmax_memclk * 1000))
+ update_pstate_unsupported_clk = true;
+ }
+
+ if (should_update_pstate_support(safe_to_lower, p_state_change_support, clk_mgr_base->clks.p_state_change_support) ||
+ update_pstate_unsupported_clk) {
clk_mgr_base->clks.p_state_change_support = p_state_change_support;
/* to disable P-State switching, set UCLK min = max */
- if (!clk_mgr_base->clks.p_state_change_support)
- dcn30_smu_set_hard_min_by_freq(clk_mgr, PPCLK_UCLK,
+ if (!clk_mgr_base->clks.p_state_change_support) {
+ if (dc->clk_mgr->dc_mode_softmax_enabled &&
+ new_clocks->dramclk_khz <= dc->clk_mgr->bw_params->dc_mode_softmax_memclk * 1000)
+ dcn30_smu_set_hard_min_by_freq(clk_mgr, PPCLK_UCLK,
+ dc->clk_mgr->bw_params->dc_mode_softmax_memclk);
+ else
+ dcn30_smu_set_hard_min_by_freq(clk_mgr, PPCLK_UCLK,
clk_mgr_base->bw_params->clk_table.entries[clk_mgr_base->bw_params->clk_table.num_entries - 1].memclk_mhz);
+ }
}
/* Always update saved value, even if new value not set due to P-State switching unsupported */
@@ -421,6 +437,24 @@ static void dcn3_set_hard_max_memclk(struct clk_mgr *clk_mgr_base)
clk_mgr_base->bw_params->clk_table.entries[clk_mgr_base->bw_params->clk_table.num_entries - 1].memclk_mhz);
}
+static void dcn3_set_max_memclk(struct clk_mgr *clk_mgr_base, unsigned int memclk_mhz)
+{
+ struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
+
+ if (!clk_mgr->smu_present)
+ return;
+
+ dcn30_smu_set_hard_max_by_freq(clk_mgr, PPCLK_UCLK, memclk_mhz);
+}
+static void dcn3_set_min_memclk(struct clk_mgr *clk_mgr_base, unsigned int memclk_mhz)
+{
+ struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
+
+ if (!clk_mgr->smu_present)
+ return;
+ dcn30_smu_set_hard_min_by_freq(clk_mgr, PPCLK_UCLK, memclk_mhz);
+}
+
/* Get current memclk states, update bounding box */
static void dcn3_get_memclk_states_from_smu(struct clk_mgr *clk_mgr_base)
{
@@ -436,6 +470,8 @@ static void dcn3_get_memclk_states_from_smu(struct clk_mgr *clk_mgr_base)
&num_levels);
clk_mgr_base->bw_params->clk_table.num_entries = num_levels ? num_levels : 1;
+ clk_mgr_base->bw_params->dc_mode_softmax_memclk = dcn30_smu_get_dc_mode_max_dpm_freq(clk_mgr, PPCLK_UCLK);
+
/* Refresh bounding box */
clk_mgr_base->ctx->dc->res_pool->funcs->update_bw_bounding_box(
clk_mgr->base.ctx->dc, clk_mgr_base->bw_params);
@@ -505,6 +541,8 @@ static struct clk_mgr_funcs dcn3_funcs = {
.notify_wm_ranges = dcn3_notify_wm_ranges,
.set_hard_min_memclk = dcn3_set_hard_min_memclk,
.set_hard_max_memclk = dcn3_set_hard_max_memclk,
+ .set_max_memclk = dcn3_set_max_memclk,
+ .set_min_memclk = dcn3_set_min_memclk,
.get_memclk_states_from_smu = dcn3_get_memclk_states_from_smu,
.are_clock_states_equal = dcn3_are_clock_states_equal,
.enable_pme_wa = dcn3_enable_pme_wa,
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/dcn301_smu.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/dcn301_smu.c
index 6ea642615854..d9920d91838d 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/dcn301_smu.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/dcn301_smu.c
@@ -88,9 +88,9 @@ static uint32_t dcn301_smu_wait_for_response(struct clk_mgr_internal *clk_mgr, u
return res_val;
}
-int dcn301_smu_send_msg_with_param(
- struct clk_mgr_internal *clk_mgr,
- unsigned int msg_id, unsigned int param)
+static int dcn301_smu_send_msg_with_param(struct clk_mgr_internal *clk_mgr,
+ unsigned int msg_id,
+ unsigned int param)
{
uint32_t result;
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/vg_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/vg_clk_mgr.c
index 3eee32faa208..48005def1164 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/vg_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/vg_clk_mgr.c
@@ -89,9 +89,9 @@ static int vg_get_active_display_cnt_wa(
return display_count;
}
-void vg_update_clocks(struct clk_mgr *clk_mgr_base,
- struct dc_state *context,
- bool safe_to_lower)
+static void vg_update_clocks(struct clk_mgr *clk_mgr_base,
+ struct dc_state *context,
+ bool safe_to_lower)
{
struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
struct dc_clocks *new_clocks = &context->bw_ctx.bw.dcn.clk;
@@ -367,18 +367,6 @@ static void vg_dump_clk_registers(struct clk_state_registers_and_bypass *regs_an
}
}
-/* This function produce translated logical clk state values*/
-void vg_get_clk_states(struct clk_mgr *clk_mgr_base, struct clk_states *s)
-{
-
- struct clk_state_registers_and_bypass sb = { 0 };
- struct clk_log_info log_info = { 0 };
-
- vg_dump_clk_registers(&sb, clk_mgr_base, &log_info);
-
- s->dprefclk_khz = sb.dprefclk * 1000;
-}
-
static void vg_enable_pme_wa(struct clk_mgr *clk_mgr_base)
{
struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
@@ -386,7 +374,7 @@ static void vg_enable_pme_wa(struct clk_mgr *clk_mgr_base)
dcn301_smu_enable_pme_wa(clk_mgr);
}
-void vg_init_clocks(struct clk_mgr *clk_mgr)
+static void vg_init_clocks(struct clk_mgr *clk_mgr)
{
memset(&(clk_mgr->clks), 0, sizeof(struct dc_clocks));
// Assumption is that boot state always supports pstate
@@ -753,7 +741,7 @@ void vg_clk_mgr_construct(
sizeof(struct watermarks),
&clk_mgr->smu_wm_set.mc_address.quad_part);
- if (clk_mgr->smu_wm_set.wm_set == 0) {
+ if (!clk_mgr->smu_wm_set.wm_set) {
clk_mgr->smu_wm_set.wm_set = &dummy_wms;
clk_mgr->smu_wm_set.mc_address.quad_part = 0;
}
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c
index f4c9a458ace8..4162ce40089b 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c
@@ -66,7 +66,7 @@
#define TO_CLK_MGR_DCN31(clk_mgr)\
container_of(clk_mgr, struct clk_mgr_dcn31, base)
-int dcn31_get_active_display_cnt_wa(
+static int dcn31_get_active_display_cnt_wa(
struct dc *dc,
struct dc_state *context)
{
@@ -118,7 +118,7 @@ static void dcn31_disable_otg_wa(struct clk_mgr *clk_mgr_base, bool disable)
}
}
-static void dcn31_update_clocks(struct clk_mgr *clk_mgr_base,
+void dcn31_update_clocks(struct clk_mgr *clk_mgr_base,
struct dc_state *context,
bool safe_to_lower)
{
@@ -158,6 +158,7 @@ static void dcn31_update_clocks(struct clk_mgr *clk_mgr_base,
union display_idle_optimization_u idle_info = { 0 };
idle_info.idle_info.df_request_disabled = 1;
idle_info.idle_info.phy_ref_clk_off = 1;
+ idle_info.idle_info.s0i2_rdy = 1;
dcn31_smu_set_display_idle_optimization(clk_mgr, idle_info.data);
/* update power state */
clk_mgr_base->clks.pwr_state = DCN_PWR_STATE_LOW_POWER;
@@ -284,7 +285,7 @@ static void dcn31_enable_pme_wa(struct clk_mgr *clk_mgr_base)
dcn31_smu_enable_pme_wa(clk_mgr);
}
-static void dcn31_init_clocks(struct clk_mgr *clk_mgr)
+void dcn31_init_clocks(struct clk_mgr *clk_mgr)
{
memset(&(clk_mgr->clks), 0, sizeof(struct dc_clocks));
// Assumption is that boot state always supports pstate
@@ -294,7 +295,7 @@ static void dcn31_init_clocks(struct clk_mgr *clk_mgr)
clk_mgr->clks.zstate_support = DCN_ZSTATE_SUPPORT_UNKNOWN;
}
-static bool dcn31_are_clock_states_equal(struct dc_clocks *a,
+bool dcn31_are_clock_states_equal(struct dc_clocks *a,
struct dc_clocks *b)
{
if (a->dispclk_khz != b->dispclk_khz)
@@ -540,10 +541,9 @@ static unsigned int find_clk_for_voltage(
return clock;
}
-void dcn31_clk_mgr_helper_populate_bw_params(
- struct clk_mgr_internal *clk_mgr,
- struct integrated_info *bios_info,
- const DpmClocks_t *clock_table)
+static void dcn31_clk_mgr_helper_populate_bw_params(struct clk_mgr_internal *clk_mgr,
+ struct integrated_info *bios_info,
+ const DpmClocks_t *clock_table)
{
int i, j;
struct clk_bw_params *bw_params = clk_mgr->base.bw_params;
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.h b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.h
index f8f100535526..961b10a49486 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.h
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.h
@@ -39,6 +39,13 @@ struct clk_mgr_dcn31 {
struct dcn31_smu_watermark_set smu_wm_set;
};
+bool dcn31_are_clock_states_equal(struct dc_clocks *a,
+ struct dc_clocks *b);
+void dcn31_init_clocks(struct clk_mgr *clk_mgr);
+void dcn31_update_clocks(struct clk_mgr *clk_mgr_base,
+ struct dc_state *context,
+ bool safe_to_lower);
+
void dcn31_clk_mgr_construct(struct dc_context *ctx,
struct clk_mgr_dcn31 *clk_mgr,
struct pp_smu_funcs *pp_smu,
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_smu.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_smu.c
index 8c2b77eb9459..b7ace235a2d5 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_smu.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_smu.c
@@ -95,9 +95,9 @@ static uint32_t dcn31_smu_wait_for_response(struct clk_mgr_internal *clk_mgr, un
return res_val;
}
-int dcn31_smu_send_msg_with_param(
- struct clk_mgr_internal *clk_mgr,
- unsigned int msg_id, unsigned int param)
+static int dcn31_smu_send_msg_with_param(struct clk_mgr_internal *clk_mgr,
+ unsigned int msg_id,
+ unsigned int param)
{
uint32_t result;
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c
index 0ded4decee05..91c4874473d6 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
@@ -221,9 +221,9 @@ static bool create_links(
link = link_create(&link_init_params);
if (link) {
- dc->links[dc->link_count] = link;
- link->dc = dc;
- ++dc->link_count;
+ dc->links[dc->link_count] = link;
+ link->dc = dc;
+ ++dc->link_count;
}
}
@@ -274,24 +274,6 @@ static bool create_links(
goto failed_alloc;
}
-#if defined(CONFIG_DRM_AMD_DC_DCN)
- if (IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment) &&
- dc->caps.dp_hpo &&
- link->dc->res_pool->res_cap->num_hpo_dp_link_encoder > 0) {
- /* FPGA case - Allocate HPO DP link encoder */
- if (i < link->dc->res_pool->res_cap->num_hpo_dp_link_encoder) {
- link->hpo_dp_link_enc = link->dc->res_pool->hpo_dp_link_enc[i];
-
- if (link->hpo_dp_link_enc == NULL) {
- BREAK_TO_DEBUGGER();
- goto failed_alloc;
- }
- link->hpo_dp_link_enc->hpd_source = link->link_enc->hpd_source;
- link->hpo_dp_link_enc->transmitter = link->link_enc->transmitter;
- }
- }
-#endif
-
link->link_status.dpcd_caps = &link->dpcd_caps;
enc_init.ctx = dc->ctx;
@@ -808,6 +790,10 @@ void dc_stream_set_static_screen_params(struct dc *dc,
static void dc_destruct(struct dc *dc)
{
+ // reset link encoder assignment table on destruct
+ if (dc->res_pool && dc->res_pool->funcs->link_encs_assign)
+ link_enc_cfg_init(dc, dc->current_state);
+
if (dc->current_state) {
dc_release_state(dc->current_state);
dc->current_state = NULL;
@@ -1016,8 +1002,6 @@ static bool dc_construct(struct dc *dc,
goto fail;
}
- dc_resource_state_construct(dc, dc->current_state);
-
if (!create_links(dc, init_params->num_virtual_links))
goto fail;
@@ -1027,8 +1011,7 @@ static bool dc_construct(struct dc *dc,
if (!create_link_encoders(dc))
goto fail;
- /* Initialise DIG link encoder resource tracking variables. */
- link_enc_cfg_init(dc, dc->current_state);
+ dc_resource_state_construct(dc, dc->current_state);
return true;
@@ -1421,22 +1404,29 @@ static void program_timing_sync(
status->timing_sync_info.master = false;
}
- /* remove any other unblanked pipes as they have already been synced */
- for (j = j + 1; j < group_size; j++) {
- bool is_blanked;
- if (pipe_set[j]->stream_res.opp->funcs->dpg_is_blanked)
- is_blanked =
- pipe_set[j]->stream_res.opp->funcs->dpg_is_blanked(pipe_set[j]->stream_res.opp);
- else
- is_blanked =
- pipe_set[j]->stream_res.tg->funcs->is_blanked(pipe_set[j]->stream_res.tg);
- if (!is_blanked) {
- group_size--;
- pipe_set[j] = pipe_set[group_size];
- j--;
+ /* remove any other pipes that are already been synced */
+ if (dc->config.use_pipe_ctx_sync_logic) {
+ /* check pipe's syncd to decide which pipe to be removed */
+ for (j = 1; j < group_size; j++) {
+ if (pipe_set[j]->pipe_idx_syncd == pipe_set[0]->pipe_idx_syncd) {
+ group_size--;
+ pipe_set[j] = pipe_set[group_size];
+ j--;
+ } else
+ /* link slave pipe's syncd with master pipe */
+ pipe_set[j]->pipe_idx_syncd = pipe_set[0]->pipe_idx_syncd;
}
- }
+ } else {
+ /* remove any other pipes by checking valid plane */
+ for (j = j + 1; j < group_size; j++) {
+ if (pipe_set[j]->plane_state) {
+ group_size--;
+ pipe_set[j] = pipe_set[group_size];
+ j--;
+ }
+ }
+ }
if (group_size > 1) {
if (sync_type == TIMING_SYNCHRONIZABLE) {
@@ -1830,6 +1820,19 @@ bool dc_commit_state(struct dc *dc, struct dc_state *context)
dc_stream_log(dc, stream);
}
+ /*
+ * Previous validation was perfomred with fast_validation = true and
+ * the full DML state required for hardware programming was skipped.
+ *
+ * Re-validate here to calculate these parameters / watermarks.
+ */
+ result = dc_validate_global_state(dc, context, false);
+ if (result != DC_OK) {
+ DC_LOG_ERROR("DC commit global validation failure: %s (%d)",
+ dc_status_to_str(result), result);
+ return result;
+ }
+
result = dc_commit_state_no_check(dc, context);
return (result == DC_OK);
@@ -2870,7 +2873,8 @@ static void commit_planes_for_stream(struct dc *dc,
#endif
if ((update_type != UPDATE_TYPE_FAST) && stream->update_flags.bits.dsc_changed)
- if (top_pipe_to_program->stream_res.tg->funcs->lock_doublebuffer_enable) {
+ if (top_pipe_to_program &&
+ top_pipe_to_program->stream_res.tg->funcs->lock_doublebuffer_enable) {
if (should_use_dmub_lock(stream->link)) {
union dmub_hw_lock_flags hw_locks = { 0 };
struct dmub_hw_lock_inst_flags inst_flags = { 0 };
@@ -2979,12 +2983,12 @@ static void commit_planes_for_stream(struct dc *dc,
#ifdef CONFIG_DRM_AMD_DC_DCN
if (dc->debug.validate_dml_output) {
for (i = 0; i < dc->res_pool->pipe_count; i++) {
- struct pipe_ctx cur_pipe = context->res_ctx.pipe_ctx[i];
- if (cur_pipe.stream == NULL)
+ struct pipe_ctx *cur_pipe = &context->res_ctx.pipe_ctx[i];
+ if (cur_pipe->stream == NULL)
continue;
- cur_pipe.plane_res.hubp->funcs->validate_dml_output(
- cur_pipe.plane_res.hubp, dc->ctx,
+ cur_pipe->plane_res.hubp->funcs->validate_dml_output(
+ cur_pipe->plane_res.hubp, dc->ctx,
&context->res_ctx.pipe_ctx[i].rq_regs,
&context->res_ctx.pipe_ctx[i].dlg_regs,
&context->res_ctx.pipe_ctx[i].ttu_regs);
@@ -3426,7 +3430,7 @@ struct dc_sink *dc_link_add_remote_sink(
goto fail_add_sink;
edid_status = dm_helpers_parse_edid_caps(
- link->ctx,
+ link,
&dc_sink->dc_edid,
&dc_sink->edid_caps);
@@ -3583,6 +3587,98 @@ void dc_lock_memory_clock_frequency(struct dc *dc)
core_link_enable_stream(dc->current_state, &dc->current_state->res_ctx.pipe_ctx[i]);
}
+static void blank_and_force_memclk(struct dc *dc, bool apply, unsigned int memclk_mhz)
+{
+ struct dc_state *context = dc->current_state;
+ struct hubp *hubp;
+ struct pipe_ctx *pipe;
+ int i;
+
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ pipe = &context->res_ctx.pipe_ctx[i];
+
+ if (pipe->stream != NULL) {
+ dc->hwss.disable_pixel_data(dc, pipe, true);
+
+ // wait for double buffer
+ pipe->stream_res.tg->funcs->wait_for_state(pipe->stream_res.tg, CRTC_STATE_VACTIVE);
+ pipe->stream_res.tg->funcs->wait_for_state(pipe->stream_res.tg, CRTC_STATE_VBLANK);
+ pipe->stream_res.tg->funcs->wait_for_state(pipe->stream_res.tg, CRTC_STATE_VACTIVE);
+
+ hubp = pipe->plane_res.hubp;
+ hubp->funcs->set_blank_regs(hubp, true);
+ }
+ }
+
+ dc->clk_mgr->funcs->set_max_memclk(dc->clk_mgr, memclk_mhz);
+ dc->clk_mgr->funcs->set_min_memclk(dc->clk_mgr, memclk_mhz);
+
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ pipe = &context->res_ctx.pipe_ctx[i];
+
+ if (pipe->stream != NULL) {
+ dc->hwss.disable_pixel_data(dc, pipe, false);
+
+ hubp = pipe->plane_res.hubp;
+ hubp->funcs->set_blank_regs(hubp, false);
+ }
+ }
+}
+
+
+/**
+ * dc_enable_dcmode_clk_limit() - lower clocks in dc (battery) mode
+ * @dc: pointer to dc of the dm calling this
+ * @enable: True = transition to DC mode, false = transition back to AC mode
+ *
+ * Some SoCs define additional clock limits when in DC mode, DM should
+ * invoke this function when the platform undergoes a power source transition
+ * so DC can apply/unapply the limit. This interface may be disruptive to
+ * the onscreen content.
+ *
+ * Context: Triggered by OS through DM interface, or manually by escape calls.
+ * Need to hold a dclock when doing so.
+ *
+ * Return: none (void function)
+ *
+ */
+void dc_enable_dcmode_clk_limit(struct dc *dc, bool enable)
+{
+ uint32_t hw_internal_rev = dc->ctx->asic_id.hw_internal_rev;
+ unsigned int softMax, maxDPM, funcMin;
+ bool p_state_change_support;
+
+ if (!ASICREV_IS_BEIGE_GOBY_P(hw_internal_rev))
+ return;
+
+ softMax = dc->clk_mgr->bw_params->dc_mode_softmax_memclk;
+ maxDPM = dc->clk_mgr->bw_params->clk_table.entries[dc->clk_mgr->bw_params->clk_table.num_entries - 1].memclk_mhz;
+ funcMin = (dc->clk_mgr->clks.dramclk_khz + 999) / 1000;
+ p_state_change_support = dc->clk_mgr->clks.p_state_change_support;
+
+ if (enable && !dc->clk_mgr->dc_mode_softmax_enabled) {
+ if (p_state_change_support) {
+ if (funcMin <= softMax)
+ dc->clk_mgr->funcs->set_max_memclk(dc->clk_mgr, softMax);
+ // else: No-Op
+ } else {
+ if (funcMin <= softMax)
+ blank_and_force_memclk(dc, true, softMax);
+ // else: No-Op
+ }
+ } else if (!enable && dc->clk_mgr->dc_mode_softmax_enabled) {
+ if (p_state_change_support) {
+ if (funcMin <= softMax)
+ dc->clk_mgr->funcs->set_max_memclk(dc->clk_mgr, maxDPM);
+ // else: No-Op
+ } else {
+ if (funcMin <= softMax)
+ blank_and_force_memclk(dc, true, maxDPM);
+ // else: No-Op
+ }
+ }
+ dc->clk_mgr->dc_mode_softmax_enabled = enable;
+}
bool dc_is_plane_eligible_for_idle_optimizations(struct dc *dc, struct dc_plane_state *plane,
struct dc_cursor_attributes *cursor_attr)
{
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_debug.c b/drivers/gpu/drm/amd/display/dc/core/dc_debug.c
index 21be2a684393..643762542e4d 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_debug.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_debug.c
@@ -422,6 +422,8 @@ char *dc_status_to_str(enum dc_status status)
return "The operation is not supported.";
case DC_UNSUPPORTED_VALUE:
return "The value specified is not supported.";
+ case DC_NO_LINK_ENC_RESOURCE:
+ return "No link encoder resource";
case DC_ERROR_UNEXPECTED:
return "Unexpected error";
}
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
index 60544788e911..dc1380b6c5e0 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
@@ -66,31 +66,6 @@
/*******************************************************************************
* Private functions
******************************************************************************/
-#if defined(CONFIG_DRM_AMD_DC_DCN)
-static bool add_dp_hpo_link_encoder_to_link(struct dc_link *link)
-{
- struct hpo_dp_link_encoder *enc = resource_get_unused_hpo_dp_link_encoder(
- link->dc->res_pool);
-
- if (!link->hpo_dp_link_enc && enc) {
- link->hpo_dp_link_enc = enc;
- link->hpo_dp_link_enc->transmitter = link->link_enc->transmitter;
- link->hpo_dp_link_enc->hpd_source = link->link_enc->hpd_source;
- }
-
- return (link->hpo_dp_link_enc != NULL);
-}
-
-static void remove_dp_hpo_link_encoder_from_link(struct dc_link *link)
-{
- if (link->hpo_dp_link_enc) {
- link->hpo_dp_link_enc->hpd_source = HPD_SOURCEID_UNKNOWN;
- link->hpo_dp_link_enc->transmitter = TRANSMITTER_UNKNOWN;
- link->hpo_dp_link_enc = NULL;
- }
-}
-#endif
-
static void dc_link_destruct(struct dc_link *link)
{
int i;
@@ -118,12 +93,6 @@ static void dc_link_destruct(struct dc_link *link)
link->link_enc->funcs->destroy(&link->link_enc);
}
-#if defined(CONFIG_DRM_AMD_DC_DCN)
- if (link->hpo_dp_link_enc) {
- remove_dp_hpo_link_encoder_from_link(link);
- }
-#endif
-
if (link->local_sink)
dc_sink_release(link->local_sink);
@@ -270,10 +239,10 @@ bool dc_link_detect_sink(struct dc_link *link, enum dc_connection_type *type)
/* Link may not have physical HPD pin. */
if (link->ep_type != DISPLAY_ENDPOINT_PHY) {
- if (link->hpd_status)
- *type = dc_connection_single;
- else
+ if (link->is_hpd_pending || !link->hpd_status)
*type = dc_connection_none;
+ else
+ *type = dc_connection_single;
return true;
}
@@ -758,6 +727,18 @@ static bool detect_dp(struct dc_link *link,
dal_ddc_service_set_transaction_type(link->ddc,
sink_caps->transaction_type);
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ /* Apply work around for tunneled MST on certain USB4 docks. Always use DSC if dock
+ * reports DSC support.
+ */
+ if (link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA &&
+ link->type == dc_connection_mst_branch &&
+ link->dpcd_caps.branch_dev_id == DP_BRANCH_DEVICE_ID_90CC24 &&
+ link->dpcd_caps.dsc_caps.dsc_basic_caps.fields.dsc_support.DSC_SUPPORT &&
+ !link->dc->debug.dpia_debug.bits.disable_mst_dsc_work_around)
+ link->wa_flags.dpia_mst_dsc_always_on = true;
+#endif
+
#if defined(CONFIG_DRM_AMD_DC_HDCP)
/* In case of fallback to SST when topology discovery below fails
* HDCP caps will be querried again later by the upper layer (caller
@@ -869,6 +850,7 @@ static bool dc_link_detect_helper(struct dc_link *link,
enum dc_connection_type pre_connection_type = dc_connection_none;
bool perform_dp_seamless_boot = false;
const uint32_t post_oui_delay = 30; // 30ms
+ struct link_resource link_res = { 0 };
DC_LOGGER_INIT(link->ctx->logger);
@@ -963,7 +945,10 @@ static bool dc_link_detect_helper(struct dc_link *link,
#if defined(CONFIG_DRM_AMD_DC_DCN)
if (dp_get_link_encoding_format(&link->reported_link_cap) == DP_128b_132b_ENCODING)
- add_dp_hpo_link_encoder_to_link(link);
+ link_res.hpo_dp_link_enc = resource_get_hpo_dp_link_enc_for_det_lt(
+ &link->dc->current_state->res_ctx,
+ link->dc->res_pool,
+ link);
#endif
if (link->type == dc_connection_mst_branch) {
@@ -974,7 +959,7 @@ static bool dc_link_detect_helper(struct dc_link *link,
* empty which leads to allocate_mst_payload() has "0"
* pbn_per_slot value leading to exception on dc_fixpt_div()
*/
- dp_verify_mst_link_cap(link);
+ dp_verify_mst_link_cap(link, &link_res);
/*
* This call will initiate MST topology discovery. Which
@@ -1138,6 +1123,7 @@ static bool dc_link_detect_helper(struct dc_link *link,
// verify link cap for SST non-seamless boot
if (!perform_dp_seamless_boot)
dp_verify_link_cap_with_retries(link,
+ &link_res,
&link->reported_link_cap,
LINK_TRAINING_MAX_VERIFY_RETRY);
} else {
@@ -1203,6 +1189,10 @@ static bool dc_link_detect_helper(struct dc_link *link,
LINK_INFO("link=%d, mst branch is now Disconnected\n",
link->link_index);
+ /* Disable work around which keeps DSC on for tunneled MST on certain USB4 docks. */
+ if (link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA)
+ link->wa_flags.dpia_mst_dsc_always_on = false;
+
dm_helpers_dp_mst_stop_top_mgr(link->ctx, link);
link->mst_stream_alloc_table.stream_count = 0;
@@ -1828,6 +1818,8 @@ static void enable_stream_features(struct pipe_ctx *pipe_ctx)
union down_spread_ctrl old_downspread;
union down_spread_ctrl new_downspread;
+ memset(&old_downspread, 0, sizeof(old_downspread));
+
core_link_read_dpcd(link, DP_DOWNSPREAD_CTRL,
&old_downspread.raw, sizeof(old_downspread));
@@ -1999,6 +1991,57 @@ static enum dc_status enable_link_dp_mst(
return enable_link_dp(state, pipe_ctx);
}
+void dc_link_blank_all_dp_displays(struct dc *dc)
+{
+ unsigned int i;
+ uint8_t dpcd_power_state = '\0';
+ enum dc_status status = DC_ERROR_UNEXPECTED;
+
+ for (i = 0; i < dc->link_count; i++) {
+ if ((dc->links[i]->connector_signal != SIGNAL_TYPE_DISPLAY_PORT) ||
+ (dc->links[i]->priv == NULL) || (dc->links[i]->local_sink == NULL))
+ continue;
+
+ /* DP 2.0 spec requires that we read LTTPR caps first */
+ dp_retrieve_lttpr_cap(dc->links[i]);
+ /* if any of the displays are lit up turn them off */
+ status = core_link_read_dpcd(dc->links[i], DP_SET_POWER,
+ &dpcd_power_state, sizeof(dpcd_power_state));
+
+ if (status == DC_OK && dpcd_power_state == DP_POWER_STATE_D0)
+ dc_link_blank_dp_stream(dc->links[i], true);
+ }
+
+}
+
+void dc_link_blank_dp_stream(struct dc_link *link, bool hw_init)
+{
+ unsigned int j;
+ struct dc *dc = link->ctx->dc;
+ enum signal_type signal = link->connector_signal;
+
+ if ((signal == SIGNAL_TYPE_EDP) ||
+ (signal == SIGNAL_TYPE_DISPLAY_PORT)) {
+ if (link->ep_type == DISPLAY_ENDPOINT_PHY &&
+ link->link_enc->funcs->get_dig_frontend &&
+ link->link_enc->funcs->is_dig_enabled(link->link_enc)) {
+ unsigned int fe = link->link_enc->funcs->get_dig_frontend(link->link_enc);
+
+ if (fe != ENGINE_ID_UNKNOWN)
+ for (j = 0; j < dc->res_pool->stream_enc_count; j++) {
+ if (fe == dc->res_pool->stream_enc[j]->id) {
+ dc->res_pool->stream_enc[j]->funcs->dp_blank(link,
+ dc->res_pool->stream_enc[j]);
+ break;
+ }
+ }
+ }
+
+ if ((!link->wa_flags.dp_keep_receiver_powered) || hw_init)
+ dp_receiver_power_ctrl(link, false);
+ }
+}
+
static bool get_ext_hdmi_settings(struct pipe_ctx *pipe_ctx,
enum engine_id eng_id,
struct ext_hdmi_settings *settings)
@@ -2436,7 +2479,8 @@ static void write_i2c_redriver_setting(
DC_LOG_DEBUG("Set redriver failed");
}
-static void disable_link(struct dc_link *link, enum signal_type signal)
+static void disable_link(struct dc_link *link, const struct link_resource *link_res,
+ enum signal_type signal)
{
/*
* TODO: implement call for dp_set_hw_test_pattern
@@ -2455,20 +2499,20 @@ static void disable_link(struct dc_link *link, enum signal_type signal)
struct dc_link_settings link_settings = link->cur_link_settings;
#endif
if (dc_is_dp_sst_signal(signal))
- dp_disable_link_phy(link, signal);
+ dp_disable_link_phy(link, link_res, signal);
else
- dp_disable_link_phy_mst(link, signal);
+ dp_disable_link_phy_mst(link, link_res, signal);
if (dc_is_dp_sst_signal(signal) ||
link->mst_stream_alloc_table.stream_count == 0) {
#if defined(CONFIG_DRM_AMD_DC_DCN)
if (dp_get_link_encoding_format(&link_settings) == DP_8b_10b_ENCODING) {
dp_set_fec_enable(link, false);
- dp_set_fec_ready(link, false);
+ dp_set_fec_ready(link, link_res, false);
}
#else
dp_set_fec_enable(link, false);
- dp_set_fec_ready(link, false);
+ dp_set_fec_ready(link, link_res, false);
#endif
}
} else {
@@ -2579,7 +2623,7 @@ static enum dc_status enable_link(
* new link settings.
*/
if (link->link_status.link_active) {
- disable_link(link, pipe_ctx->stream->signal);
+ disable_link(link, &pipe_ctx->link_res, pipe_ctx->stream->signal);
}
switch (pipe_ctx->stream->signal) {
@@ -2699,8 +2743,23 @@ static bool dp_active_dongle_validate_timing(
return false;
}
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ if (dongle_caps->dp_hdmi_frl_max_link_bw_in_kbps > 0) { // DP to HDMI FRL converter
+ struct dc_crtc_timing outputTiming = *timing;
+
+ if (timing->flags.DSC && !timing->dsc_cfg.is_frl)
+ /* DP input has DSC, HDMI FRL output doesn't have DSC, remove DSC from output timing */
+ outputTiming.flags.DSC = 0;
+ if (dc_bandwidth_in_kbps_from_timing(&outputTiming) > dongle_caps->dp_hdmi_frl_max_link_bw_in_kbps)
+ return false;
+ } else { // DP to HDMI TMDS converter
+ if (get_timing_pixel_clock_100hz(timing) > (dongle_caps->dp_hdmi_max_pixel_clk_in_khz * 10))
+ return false;
+ }
+#else
if (get_timing_pixel_clock_100hz(timing) > (dongle_caps->dp_hdmi_max_pixel_clk_in_khz * 10))
return false;
+#endif
#if defined(CONFIG_DRM_AMD_DC_DCN)
}
@@ -2946,7 +3005,7 @@ bool dc_link_set_psr_allow_active(struct dc_link *link, const bool *allow_active
link->psr_settings.psr_power_opt = *power_opts;
if (psr != NULL && link->psr_settings.psr_feature_enabled && psr->funcs->psr_set_power_opt)
- psr->funcs->psr_set_power_opt(psr, link->psr_settings.psr_power_opt);
+ psr->funcs->psr_set_power_opt(psr, link->psr_settings.psr_power_opt, panel_inst);
}
/* Enable or Disable PSR */
@@ -3334,11 +3393,12 @@ static void dc_log_vcp_x_y(const struct dc_link *link, struct fixed31_32 avg_tim
/*
* Payload allocation/deallocation for SST introduced in DP2.0
*/
-enum dc_status dc_link_update_sst_payload(struct pipe_ctx *pipe_ctx, bool allocate)
+static enum dc_status dc_link_update_sst_payload(struct pipe_ctx *pipe_ctx,
+ bool allocate)
{
struct dc_stream_state *stream = pipe_ctx->stream;
struct dc_link *link = stream->link;
- struct hpo_dp_link_encoder *hpo_dp_link_encoder = link->hpo_dp_link_enc;
+ struct hpo_dp_link_encoder *hpo_dp_link_encoder = pipe_ctx->link_res.hpo_dp_link_enc;
struct hpo_dp_stream_encoder *hpo_dp_stream_encoder = pipe_ctx->stream_res.hpo_dp_stream_enc;
struct link_mst_stream_allocation_table proposed_table = {0};
struct fixed31_32 avg_time_slots_per_mtp;
@@ -3420,7 +3480,7 @@ enum dc_status dc_link_allocate_mst_payload(struct pipe_ctx *pipe_ctx)
struct link_encoder *link_encoder = NULL;
struct stream_encoder *stream_encoder = pipe_ctx->stream_res.stream_enc;
#if defined(CONFIG_DRM_AMD_DC_DCN)
- struct hpo_dp_link_encoder *hpo_dp_link_encoder = link->hpo_dp_link_enc;
+ struct hpo_dp_link_encoder *hpo_dp_link_encoder = pipe_ctx->link_res.hpo_dp_link_enc;
struct hpo_dp_stream_encoder *hpo_dp_stream_encoder = pipe_ctx->stream_res.hpo_dp_stream_enc;
#endif
struct dp_mst_stream_allocation_table proposed_table = {0};
@@ -3750,7 +3810,7 @@ static enum dc_status deallocate_mst_payload(struct pipe_ctx *pipe_ctx)
struct link_encoder *link_encoder = NULL;
struct stream_encoder *stream_encoder = pipe_ctx->stream_res.stream_enc;
#if defined(CONFIG_DRM_AMD_DC_DCN)
- struct hpo_dp_link_encoder *hpo_dp_link_encoder = link->hpo_dp_link_enc;
+ struct hpo_dp_link_encoder *hpo_dp_link_encoder = pipe_ctx->link_res.hpo_dp_link_enc;
struct hpo_dp_stream_encoder *hpo_dp_stream_encoder = pipe_ctx->stream_res.hpo_dp_stream_enc;
#endif
struct dp_mst_stream_allocation_table proposed_table = {0};
@@ -3913,9 +3973,6 @@ static void update_psp_stream_config(struct pipe_ctx *pipe_ctx, bool dpms_off)
struct cp_psp *cp_psp = &pipe_ctx->stream->ctx->cp_psp;
#if defined(CONFIG_DRM_AMD_DC_DCN)
struct link_encoder *link_enc = NULL;
- struct dc_state *state = pipe_ctx->stream->ctx->dc->current_state;
- struct link_enc_assignment link_enc_assign;
- int i;
#endif
if (cp_psp && cp_psp->funcs.update_stream_config) {
@@ -3929,12 +3986,9 @@ static void update_psp_stream_config(struct pipe_ctx *pipe_ctx, bool dpms_off)
config.dig_be = pipe_ctx->stream->link->link_enc_hw_inst;
#if defined(CONFIG_DRM_AMD_DC_DCN)
config.stream_enc_idx = pipe_ctx->stream_res.stream_enc->id - ENGINE_ID_DIGA;
-
+
if (pipe_ctx->stream->link->ep_type == DISPLAY_ENDPOINT_PHY ||
pipe_ctx->stream->link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA) {
- link_enc = pipe_ctx->stream->link->link_enc;
- config.dio_output_type = pipe_ctx->stream->link->ep_type;
- config.dio_output_idx = link_enc->transmitter - TRANSMITTER_UNIPHY_A;
if (pipe_ctx->stream->link->ep_type == DISPLAY_ENDPOINT_PHY)
link_enc = pipe_ctx->stream->link->link_enc;
else if (pipe_ctx->stream->link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA)
@@ -3943,18 +3997,15 @@ static void update_psp_stream_config(struct pipe_ctx *pipe_ctx, bool dpms_off)
pipe_ctx->stream->ctx->dc,
pipe_ctx->stream);
}
+ ASSERT(link_enc);
+
// Initialize PHY ID with ABCDE - 01234 mapping except when it is B0
config.phy_idx = link_enc->transmitter - TRANSMITTER_UNIPHY_A;
- //look up the link_enc_assignment for the current pipe_ctx
- for (i = 0; i < state->stream_count; i++) {
- if (pipe_ctx->stream == state->streams[i]) {
- link_enc_assign = state->res_ctx.link_enc_cfg_ctx.link_enc_assignments[i];
- }
- }
// Add flag to guard new A0 DIG mapping
- if (pipe_ctx->stream->ctx->dc->enable_c20_dtm_b0 == true) {
- config.dig_be = link_enc_assign.eng_id;
+ if (pipe_ctx->stream->ctx->dc->enable_c20_dtm_b0 == true &&
+ pipe_ctx->stream->link->dc->ctx->dce_version == DCN_VERSION_3_1) {
+ config.dig_be = link_enc->preferred_engine;
config.dio_output_type = pipe_ctx->stream->link->ep_type;
config.dio_output_idx = link_enc->transmitter - TRANSMITTER_UNIPHY_A;
} else {
@@ -3966,10 +4017,8 @@ static void update_psp_stream_config(struct pipe_ctx *pipe_ctx, bool dpms_off)
if (pipe_ctx->stream->ctx->dc->enable_c20_dtm_b0 == true &&
link_enc->ctx->asic_id.hw_internal_rev == YELLOW_CARP_B0) {
if (pipe_ctx->stream->link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA) {
- link_enc = link_enc_assign.stream->link_enc;
-
// enum ID 1-4 maps to DPIA PHY ID 0-3
- config.phy_idx = link_enc_assign.ep_id.link_id.enum_id - ENUM_ID_1;
+ config.phy_idx = pipe_ctx->stream->link->link_id.enum_id - ENUM_ID_1;
} else { // for non DPIA mode over B0, ABCDE maps to 01564
switch (link_enc->transmitter) {
@@ -4006,7 +4055,8 @@ static void update_psp_stream_config(struct pipe_ctx *pipe_ctx, bool dpms_off)
config.link_enc_idx = link_enc->transmitter - TRANSMITTER_UNIPHY_A;
if (is_dp_128b_132b_signal(pipe_ctx)) {
config.stream_enc_idx = pipe_ctx->stream_res.hpo_dp_stream_enc->id - ENGINE_ID_HPO_DP_0;
- config.link_enc_idx = pipe_ctx->stream->link->hpo_dp_link_enc->inst;
+
+ config.link_enc_idx = pipe_ctx->link_res.hpo_dp_link_enc->inst;
config.dp2_enabled = 1;
}
#endif
@@ -4037,7 +4087,7 @@ static void fpga_dp_hpo_enable_link_and_stream(struct dc_state *state, struct pi
stream->link->cur_link_settings = link_settings;
/* Enable clock, Configure lane count, and Enable Link Encoder*/
- enable_dp_hpo_output(stream->link, &stream->link->cur_link_settings);
+ enable_dp_hpo_output(stream->link, &pipe_ctx->link_res, &stream->link->cur_link_settings);
#ifdef DIAGS_BUILD
/* Workaround for FPGA HPO capture DP link data:
@@ -4087,12 +4137,12 @@ static void fpga_dp_hpo_enable_link_and_stream(struct dc_state *state, struct pi
proposed_table.stream_allocations[0].hpo_dp_stream_enc = pipe_ctx->stream_res.hpo_dp_stream_enc;
}
- stream->link->hpo_dp_link_enc->funcs->update_stream_allocation_table(
- stream->link->hpo_dp_link_enc,
+ pipe_ctx->link_res.hpo_dp_link_enc->funcs->update_stream_allocation_table(
+ pipe_ctx->link_res.hpo_dp_link_enc,
&proposed_table);
- stream->link->hpo_dp_link_enc->funcs->set_throttled_vcp_size(
- stream->link->hpo_dp_link_enc,
+ pipe_ctx->link_res.hpo_dp_link_enc->funcs->set_throttled_vcp_size(
+ pipe_ctx->link_res.hpo_dp_link_enc,
pipe_ctx->stream_res.hpo_dp_stream_enc->inst,
avg_time_slots_per_mtp);
@@ -4242,7 +4292,8 @@ void core_link_enable_stream(
/* eDP lit up by bios already, no need to enable again. */
if (pipe_ctx->stream->signal == SIGNAL_TYPE_EDP &&
apply_edp_fast_boot_optimization &&
- !pipe_ctx->stream->timing.flags.DSC) {
+ !pipe_ctx->stream->timing.flags.DSC &&
+ !pipe_ctx->next_odm_pipe) {
pipe_ctx->stream->dpms_off = false;
#if defined(CONFIG_DRM_AMD_DC_HDCP)
update_psp_stream_config(pipe_ctx, false);
@@ -4280,7 +4331,8 @@ void core_link_enable_stream(
if (status != DC_FAIL_DP_LINK_TRAINING ||
pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
if (false == stream->link->link_status.link_active)
- disable_link(stream->link, pipe_ctx->stream->signal);
+ disable_link(stream->link, &pipe_ctx->link_res,
+ pipe_ctx->stream->signal);
BREAK_TO_DEBUGGER();
return;
}
@@ -4429,14 +4481,14 @@ void core_link_disable_stream(struct pipe_ctx *pipe_ctx)
* state machine.
* In DP2 or MST mode, our encoder will stay video active
*/
- disable_link(pipe_ctx->stream->link, pipe_ctx->stream->signal);
+ disable_link(pipe_ctx->stream->link, &pipe_ctx->link_res, pipe_ctx->stream->signal);
dc->hwss.disable_stream(pipe_ctx);
} else {
dc->hwss.disable_stream(pipe_ctx);
- disable_link(pipe_ctx->stream->link, pipe_ctx->stream->signal);
+ disable_link(pipe_ctx->stream->link, &pipe_ctx->link_res, pipe_ctx->stream->signal);
}
#else
- disable_link(pipe_ctx->stream->link, pipe_ctx->stream->signal);
+ disable_link(pipe_ctx->stream->link, &pipe_ctx->link_res, pipe_ctx->stream->signal);
dc->hwss.disable_stream(pipe_ctx);
#endif
@@ -4519,16 +4571,22 @@ void dc_link_set_drive_settings(struct dc *dc,
{
int i;
+ struct pipe_ctx *pipe = NULL;
+ const struct link_resource *link_res;
- for (i = 0; i < dc->link_count; i++) {
- if (dc->links[i] == link)
- break;
- }
+ link_res = dc_link_get_cur_link_res(link);
- if (i >= dc->link_count)
+ for (i = 0; i < MAX_PIPES; i++) {
+ pipe = &dc->current_state->res_ctx.pipe_ctx[i];
+ if (pipe->stream && pipe->stream->link) {
+ if (pipe->stream->link == link)
+ break;
+ }
+ }
+ if (pipe && link_res)
+ dc_link_dp_set_drive_settings(pipe->stream->link, link_res, lt_settings);
+ else
ASSERT_CRITICAL(false);
-
- dc_link_dp_set_drive_settings(dc->links[i], lt_settings);
}
void dc_link_set_preferred_link_settings(struct dc *dc,
@@ -4589,11 +4647,9 @@ void dc_link_set_preferred_training_settings(struct dc *dc,
if (link_setting != NULL) {
link->preferred_link_setting = *link_setting;
#if defined(CONFIG_DRM_AMD_DC_DCN)
- if (dp_get_link_encoding_format(link_setting) ==
- DP_128b_132b_ENCODING && !link->hpo_dp_link_enc) {
- if (!add_dp_hpo_link_encoder_to_link(link))
- memset(&link->preferred_link_setting, 0, sizeof(link->preferred_link_setting));
- }
+ if (dp_get_link_encoding_format(link_setting) == DP_128b_132b_ENCODING)
+ /* TODO: add dc update for acquiring link res */
+ skip_immediate_retrain = true;
#endif
} else {
link->preferred_link_setting.lane_count = LANE_COUNT_UNKNOWN;
@@ -4720,6 +4776,9 @@ void dc_link_overwrite_extended_receiver_cap(
bool dc_link_is_fec_supported(const struct dc_link *link)
{
+ /* TODO - use asic cap instead of link_enc->features
+ * we no longer know which link enc to use for this link before commit
+ */
struct link_encoder *link_enc = NULL;
/* Links supporting dynamically assigned link encoder will be assigned next
@@ -4749,6 +4808,8 @@ bool dc_link_should_enable_fec(const struct dc_link *link)
link->local_sink &&
link->local_sink->edid_caps.panel_patch.disable_fec) ||
(link->connector_signal == SIGNAL_TYPE_EDP
+ // enable FEC for EDP if DSC is supported
+ && link->dpcd_caps.dsc_caps.dsc_basic_caps.fields.dsc_support.DSC_SUPPORT == false
))
is_fec_disable = true;
@@ -4812,3 +4873,125 @@ uint32_t dc_bandwidth_in_kbps_from_timing(
return kbps;
}
+
+const struct link_resource *dc_link_get_cur_link_res(const struct dc_link *link)
+{
+ int i;
+ struct pipe_ctx *pipe = NULL;
+ const struct link_resource *link_res = NULL;
+
+ for (i = 0; i < MAX_PIPES; i++) {
+ pipe = &link->dc->current_state->res_ctx.pipe_ctx[i];
+ if (pipe->stream && pipe->stream->link && pipe->top_pipe == NULL) {
+ if (pipe->stream->link == link) {
+ link_res = &pipe->link_res;
+ break;
+ }
+ }
+ }
+
+ return link_res;
+}
+
+/**
+ * dc_get_cur_link_res_map() - take a snapshot of current link resource allocation state
+ * @dc: pointer to dc of the dm calling this
+ * @map: a dc link resource snapshot defined internally to dc.
+ *
+ * DM needs to capture a snapshot of current link resource allocation mapping
+ * and store it in its persistent storage.
+ *
+ * Some of the link resource is using first come first serve policy.
+ * The allocation mapping depends on original hotplug order. This information
+ * is lost after driver is loaded next time. The snapshot is used in order to
+ * restore link resource to its previous state so user will get consistent
+ * link capability allocation across reboot.
+ *
+ * Return: none (void function)
+ *
+ */
+void dc_get_cur_link_res_map(const struct dc *dc, uint32_t *map)
+{
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ struct dc_link *link;
+ uint8_t i;
+ uint32_t hpo_dp_recycle_map = 0;
+
+ *map = 0;
+
+ if (dc->caps.dp_hpo) {
+ for (i = 0; i < dc->caps.max_links; i++) {
+ link = dc->links[i];
+ if (link->link_status.link_active &&
+ dp_get_link_encoding_format(&link->reported_link_cap) == DP_128b_132b_ENCODING &&
+ dp_get_link_encoding_format(&link->cur_link_settings) != DP_128b_132b_ENCODING)
+ /* hpo dp link encoder is considered as recycled, when RX reports 128b/132b encoding capability
+ * but current link doesn't use it.
+ */
+ hpo_dp_recycle_map |= (1 << i);
+ }
+ *map |= (hpo_dp_recycle_map << LINK_RES_HPO_DP_REC_MAP__SHIFT);
+ }
+#endif
+}
+
+/**
+ * dc_restore_link_res_map() - restore link resource allocation state from a snapshot
+ * @dc: pointer to dc of the dm calling this
+ * @map: a dc link resource snapshot defined internally to dc.
+ *
+ * DM needs to call this function after initial link detection on boot and
+ * before first commit streams to restore link resource allocation state
+ * from previous boot session.
+ *
+ * Some of the link resource is using first come first serve policy.
+ * The allocation mapping depends on original hotplug order. This information
+ * is lost after driver is loaded next time. The snapshot is used in order to
+ * restore link resource to its previous state so user will get consistent
+ * link capability allocation across reboot.
+ *
+ * Return: none (void function)
+ *
+ */
+void dc_restore_link_res_map(const struct dc *dc, uint32_t *map)
+{
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ struct dc_link *link;
+ uint8_t i;
+ unsigned int available_hpo_dp_count;
+ uint32_t hpo_dp_recycle_map = (*map & LINK_RES_HPO_DP_REC_MAP__MASK)
+ >> LINK_RES_HPO_DP_REC_MAP__SHIFT;
+
+ if (dc->caps.dp_hpo) {
+ available_hpo_dp_count = dc->res_pool->hpo_dp_link_enc_count;
+ /* remove excess 128b/132b encoding support for not recycled links */
+ for (i = 0; i < dc->caps.max_links; i++) {
+ if ((hpo_dp_recycle_map & (1 << i)) == 0) {
+ link = dc->links[i];
+ if (link->type != dc_connection_none &&
+ dp_get_link_encoding_format(&link->verified_link_cap) == DP_128b_132b_ENCODING) {
+ if (available_hpo_dp_count > 0)
+ available_hpo_dp_count--;
+ else
+ /* remove 128b/132b encoding capability by limiting verified link rate to HBR3 */
+ link->verified_link_cap.link_rate = LINK_RATE_HIGH3;
+ }
+ }
+ }
+ /* remove excess 128b/132b encoding support for recycled links */
+ for (i = 0; i < dc->caps.max_links; i++) {
+ if ((hpo_dp_recycle_map & (1 << i)) != 0) {
+ link = dc->links[i];
+ if (link->type != dc_connection_none &&
+ dp_get_link_encoding_format(&link->verified_link_cap) == DP_128b_132b_ENCODING) {
+ if (available_hpo_dp_count > 0)
+ available_hpo_dp_count--;
+ else
+ /* remove 128b/132b encoding capability by limiting verified link rate to HBR3 */
+ link->verified_link_cap.link_rate = LINK_RATE_HIGH3;
+ }
+ }
+ }
+ }
+#endif
+}
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c
index 60539b1f2a80..24dc662ec3e4 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c
@@ -626,7 +626,7 @@ bool dal_ddc_submit_aux_command(struct ddc_service *ddc,
do {
struct aux_payload current_payload;
bool is_end_of_payload = (retrieved + DEFAULT_AUX_MAX_DATA_SIZE) >=
- payload->length ? true : false;
+ payload->length;
uint32_t payload_length = is_end_of_payload ?
payload->length - retrieved : DEFAULT_AUX_MAX_DATA_SIZE;
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
index cb7bf9148904..05e216524370 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
@@ -100,6 +100,7 @@ static const struct dp_lt_fallback_entry dp_lt_fallbacks[] = {
#endif
static bool decide_fallback_link_setting(
+ struct dc_link *link,
struct dc_link_settings initial_link_settings,
struct dc_link_settings *current_link_setting,
enum link_training_result training_result);
@@ -398,6 +399,223 @@ static uint8_t get_dpcd_link_rate(const struct dc_link_settings *link_settings)
}
#endif
+static void vendor_specific_lttpr_wa_one_start(struct dc_link *link)
+{
+ const uint8_t vendor_lttpr_write_data[4] = {0x1, 0x50, 0x63, 0xff};
+ const uint8_t offset = dp_convert_to_count(
+ link->dpcd_caps.lttpr_caps.phy_repeater_cnt);
+ uint32_t vendor_lttpr_write_address = 0xF004F;
+
+ if (offset != 0xFF)
+ vendor_lttpr_write_address +=
+ ((DP_REPEATER_CONFIGURATION_AND_STATUS_SIZE) * (offset - 1));
+
+ /* W/A for certain LTTPR to reset their lane settings, part one of two */
+ core_link_write_dpcd(
+ link,
+ vendor_lttpr_write_address,
+ &vendor_lttpr_write_data[0],
+ sizeof(vendor_lttpr_write_data));
+}
+
+static void vendor_specific_lttpr_wa_one_end(
+ struct dc_link *link,
+ uint8_t retry_count)
+{
+ const uint8_t vendor_lttpr_write_data[4] = {0x1, 0x50, 0x63, 0x0};
+ const uint8_t offset = dp_convert_to_count(
+ link->dpcd_caps.lttpr_caps.phy_repeater_cnt);
+ uint32_t vendor_lttpr_write_address = 0xF004F;
+
+ if (!retry_count) {
+ if (offset != 0xFF)
+ vendor_lttpr_write_address +=
+ ((DP_REPEATER_CONFIGURATION_AND_STATUS_SIZE) * (offset - 1));
+
+ /* W/A for certain LTTPR to reset their lane settings, part two of two */
+ core_link_write_dpcd(
+ link,
+ vendor_lttpr_write_address,
+ &vendor_lttpr_write_data[0],
+ sizeof(vendor_lttpr_write_data));
+ }
+}
+
+static void vendor_specific_lttpr_wa_one_two(
+ struct dc_link *link,
+ const uint8_t rate)
+{
+ if (link->apply_vendor_specific_lttpr_link_rate_wa) {
+ uint8_t toggle_rate = 0x0;
+
+ if (rate == 0x6)
+ toggle_rate = 0xA;
+ else
+ toggle_rate = 0x6;
+
+ if (link->vendor_specific_lttpr_link_rate_wa == rate) {
+ /* W/A for certain LTTPR to reset internal state for link training */
+ core_link_write_dpcd(
+ link,
+ DP_LINK_BW_SET,
+ &toggle_rate,
+ 1);
+ }
+
+ /* Store the last attempted link rate for this link */
+ link->vendor_specific_lttpr_link_rate_wa = rate;
+ }
+}
+
+static void vendor_specific_lttpr_wa_three(
+ struct dc_link *link,
+ union lane_adjust dpcd_lane_adjust[LANE_COUNT_DP_MAX])
+{
+ const uint8_t vendor_lttpr_write_data_vs[3] = {0x0, 0x53, 0x63};
+ const uint8_t vendor_lttpr_write_data_pe[3] = {0x0, 0x54, 0x63};
+ const uint8_t offset = dp_convert_to_count(
+ link->dpcd_caps.lttpr_caps.phy_repeater_cnt);
+ uint32_t vendor_lttpr_write_address = 0xF004F;
+ uint32_t vendor_lttpr_read_address = 0xF0053;
+ uint8_t dprx_vs = 0;
+ uint8_t dprx_pe = 0;
+ uint8_t lane;
+
+ if (offset != 0xFF) {
+ vendor_lttpr_write_address +=
+ ((DP_REPEATER_CONFIGURATION_AND_STATUS_SIZE) * (offset - 1));
+ vendor_lttpr_read_address +=
+ ((DP_REPEATER_CONFIGURATION_AND_STATUS_SIZE) * (offset - 1));
+ }
+
+ /* W/A to read lane settings requested by DPRX */
+ core_link_write_dpcd(
+ link,
+ vendor_lttpr_write_address,
+ &vendor_lttpr_write_data_vs[0],
+ sizeof(vendor_lttpr_write_data_vs));
+ core_link_read_dpcd(
+ link,
+ vendor_lttpr_read_address,
+ &dprx_vs,
+ 1);
+ core_link_write_dpcd(
+ link,
+ vendor_lttpr_write_address,
+ &vendor_lttpr_write_data_pe[0],
+ sizeof(vendor_lttpr_write_data_pe));
+ core_link_read_dpcd(
+ link,
+ vendor_lttpr_read_address,
+ &dprx_pe,
+ 1);
+
+ for (lane = 0; lane < LANE_COUNT_DP_MAX; lane++) {
+ dpcd_lane_adjust[lane].bits.VOLTAGE_SWING_LANE = (dprx_vs >> (2 * lane)) & 0x3;
+ dpcd_lane_adjust[lane].bits.PRE_EMPHASIS_LANE = (dprx_pe >> (2 * lane)) & 0x3;
+ }
+}
+
+static void vendor_specific_lttpr_wa_three_dpcd(
+ struct dc_link *link,
+ union dpcd_training_lane dpcd_lane_adjust[LANE_COUNT_DP_MAX])
+{
+ union lane_adjust lane_adjust[LANE_COUNT_DP_MAX];
+ uint8_t lane = 0;
+
+ vendor_specific_lttpr_wa_three(link, lane_adjust);
+
+ for (lane = 0; lane < LANE_COUNT_DP_MAX; lane++) {
+ dpcd_lane_adjust[lane].bits.VOLTAGE_SWING_SET = lane_adjust[lane].bits.VOLTAGE_SWING_LANE;
+ dpcd_lane_adjust[lane].bits.PRE_EMPHASIS_SET = lane_adjust[lane].bits.PRE_EMPHASIS_LANE;
+ }
+}
+
+static void vendor_specific_lttpr_wa_four(
+ struct dc_link *link,
+ bool apply_wa)
+{
+ const uint8_t vendor_lttpr_write_data_one[4] = {0x1, 0x55, 0x63, 0x8};
+ const uint8_t vendor_lttpr_write_data_two[4] = {0x1, 0x55, 0x63, 0x0};
+ const uint8_t offset = dp_convert_to_count(
+ link->dpcd_caps.lttpr_caps.phy_repeater_cnt);
+ uint32_t vendor_lttpr_write_address = 0xF004F;
+#if defined(CONFIG_DRM_AMD_DC_DP2_0)
+ uint8_t sink_status = 0;
+ uint8_t i;
+#endif
+
+ if (offset != 0xFF)
+ vendor_lttpr_write_address +=
+ ((DP_REPEATER_CONFIGURATION_AND_STATUS_SIZE) * (offset - 1));
+
+ /* W/A to pass through DPCD write of TPS=0 to DPRX */
+ if (apply_wa) {
+ core_link_write_dpcd(
+ link,
+ vendor_lttpr_write_address,
+ &vendor_lttpr_write_data_one[0],
+ sizeof(vendor_lttpr_write_data_one));
+ }
+
+ /* clear training pattern set */
+ dpcd_set_training_pattern(link, DP_TRAINING_PATTERN_VIDEOIDLE);
+
+ if (apply_wa) {
+ core_link_write_dpcd(
+ link,
+ vendor_lttpr_write_address,
+ &vendor_lttpr_write_data_two[0],
+ sizeof(vendor_lttpr_write_data_two));
+ }
+
+#if defined(CONFIG_DRM_AMD_DC_DP2_0)
+ /* poll for intra-hop disable */
+ for (i = 0; i < 10; i++) {
+ if ((core_link_read_dpcd(link, DP_SINK_STATUS, &sink_status, 1) == DC_OK) &&
+ (sink_status & DP_INTRA_HOP_AUX_REPLY_INDICATION) == 0)
+ break;
+ udelay(1000);
+ }
+#endif
+}
+
+static void vendor_specific_lttpr_wa_five(
+ struct dc_link *link,
+ const union dpcd_training_lane dpcd_lane_adjust[LANE_COUNT_DP_MAX],
+ uint8_t lane_count)
+{
+ const uint32_t vendor_lttpr_write_address = 0xF004F;
+ const uint8_t vendor_lttpr_write_data_reset[4] = {0x1, 0x50, 0x63, 0xFF};
+ uint8_t vendor_lttpr_write_data_vs[4] = {0x1, 0x51, 0x63, 0x0};
+ uint8_t vendor_lttpr_write_data_pe[4] = {0x1, 0x52, 0x63, 0x0};
+ uint8_t lane = 0;
+
+ for (lane = 0; lane < lane_count; lane++) {
+ vendor_lttpr_write_data_vs[3] |=
+ dpcd_lane_adjust[lane].bits.VOLTAGE_SWING_SET << (2 * lane);
+ vendor_lttpr_write_data_pe[3] |=
+ dpcd_lane_adjust[lane].bits.PRE_EMPHASIS_SET << (2 * lane);
+ }
+
+ /* Force LTTPR to output desired VS and PE */
+ core_link_write_dpcd(
+ link,
+ vendor_lttpr_write_address,
+ &vendor_lttpr_write_data_reset[0],
+ sizeof(vendor_lttpr_write_data_reset));
+ core_link_write_dpcd(
+ link,
+ vendor_lttpr_write_address,
+ &vendor_lttpr_write_data_vs[0],
+ sizeof(vendor_lttpr_write_data_vs));
+ core_link_write_dpcd(
+ link,
+ vendor_lttpr_write_address,
+ &vendor_lttpr_write_data_pe[0],
+ sizeof(vendor_lttpr_write_data_pe));
+}
+
enum dc_status dpcd_set_link_settings(
struct dc_link *link,
const struct link_training_settings *lt_settings)
@@ -430,7 +648,7 @@ enum dc_status dpcd_set_link_settings(
status = core_link_write_dpcd(link, DP_LANE_COUNT_SET,
&lane_count_set.raw, 1);
- if (link->dpcd_caps.dpcd_rev.raw >= DPCD_REV_14 &&
+ if (link->dpcd_caps.dpcd_rev.raw >= DPCD_REV_13 &&
lt_settings->link_settings.use_link_rate_set == true) {
rate = 0;
/* WA for some MUX chips that will power down with eDP and lose supported
@@ -452,6 +670,15 @@ enum dc_status dpcd_set_link_settings(
#else
rate = (uint8_t) (lt_settings->link_settings.link_rate);
#endif
+ if (link->dc->debug.apply_vendor_specific_lttpr_wa &&
+ (link->chip_caps & EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN) &&
+ link->lttpr_mode == LTTPR_MODE_TRANSPARENT)
+ vendor_specific_lttpr_wa_one_start(link);
+
+ if (link->dc->debug.apply_vendor_specific_lttpr_wa &&
+ (link->chip_caps & EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN))
+ vendor_specific_lttpr_wa_one_two(link, rate);
+
status = core_link_write_dpcd(link, DP_LINK_BW_SET, &rate, 1);
}
@@ -1024,6 +1251,7 @@ bool dp_is_max_vs_reached(
static bool perform_post_lt_adj_req_sequence(
struct dc_link *link,
+ const struct link_resource *link_res,
struct link_training_settings *lt_settings)
{
enum dc_lane_count lane_count =
@@ -1087,6 +1315,7 @@ static bool perform_post_lt_adj_req_sequence(
lt_settings->hw_lane_settings, lt_settings->dpcd_lane_settings);
dc_link_dp_set_drive_settings(link,
+ link_res,
lt_settings);
break;
}
@@ -1161,6 +1390,7 @@ enum link_training_result dp_get_cr_failure(enum dc_lane_count ln_count,
static enum link_training_result perform_channel_equalization_sequence(
struct dc_link *link,
+ const struct link_resource *link_res,
struct link_training_settings *lt_settings,
uint32_t offset)
{
@@ -1183,12 +1413,12 @@ static enum link_training_result perform_channel_equalization_sequence(
tr_pattern = DP_TRAINING_PATTERN_SEQUENCE_4;
#endif
- dp_set_hw_training_pattern(link, tr_pattern, offset);
+ dp_set_hw_training_pattern(link, link_res, tr_pattern, offset);
for (retries_ch_eq = 0; retries_ch_eq <= LINK_TRAINING_MAX_RETRY_COUNT;
retries_ch_eq++) {
- dp_set_hw_lane_settings(link, lt_settings, offset);
+ dp_set_hw_lane_settings(link, link_res, lt_settings, offset);
/* 2. update DPCD*/
if (!retries_ch_eq)
@@ -1211,6 +1441,12 @@ static enum link_training_result perform_channel_equalization_sequence(
dp_translate_training_aux_read_interval(
link->dpcd_caps.lttpr_caps.aux_rd_interval[offset - 1]);
+ if (link->dc->debug.apply_vendor_specific_lttpr_wa &&
+ (link->chip_caps & EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN) &&
+ link->lttpr_mode == LTTPR_MODE_TRANSPARENT) {
+ wait_time_microsec = 16000;
+ }
+
dp_wait_for_training_aux_rd_interval(
link,
wait_time_microsec);
@@ -1246,18 +1482,20 @@ static enum link_training_result perform_channel_equalization_sequence(
}
static void start_clock_recovery_pattern_early(struct dc_link *link,
+ const struct link_resource *link_res,
struct link_training_settings *lt_settings,
uint32_t offset)
{
DC_LOG_HW_LINK_TRAINING("%s\n GPU sends TPS1. Wait 400us.\n",
__func__);
- dp_set_hw_training_pattern(link, lt_settings->pattern_for_cr, offset);
- dp_set_hw_lane_settings(link, lt_settings, offset);
+ dp_set_hw_training_pattern(link, link_res, lt_settings->pattern_for_cr, offset);
+ dp_set_hw_lane_settings(link, link_res, lt_settings, offset);
udelay(400);
}
static enum link_training_result perform_clock_recovery_sequence(
struct dc_link *link,
+ const struct link_resource *link_res,
struct link_training_settings *lt_settings,
uint32_t offset)
{
@@ -1273,7 +1511,7 @@ static enum link_training_result perform_clock_recovery_sequence(
retry_count = 0;
if (!link->ctx->dc->work_arounds.lt_early_cr_pattern)
- dp_set_hw_training_pattern(link, lt_settings->pattern_for_cr, offset);
+ dp_set_hw_training_pattern(link, link_res, lt_settings->pattern_for_cr, offset);
/* najeeb - The synaptics MST hub can put the LT in
* infinite loop by switching the VS
@@ -1290,6 +1528,7 @@ static enum link_training_result perform_clock_recovery_sequence(
/* 1. call HWSS to set lane settings*/
dp_set_hw_lane_settings(
link,
+ link_res,
lt_settings,
offset);
@@ -1311,8 +1550,10 @@ static enum link_training_result perform_clock_recovery_sequence(
/* 3. wait receiver to lock-on*/
wait_time_microsec = lt_settings->cr_pattern_time;
- if (link->lttpr_mode == LTTPR_MODE_NON_TRANSPARENT)
- wait_time_microsec = TRAINING_AUX_RD_INTERVAL;
+ if (link->dc->debug.apply_vendor_specific_lttpr_wa &&
+ (link->chip_caps & EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN)) {
+ wait_time_microsec = 16000;
+ }
dp_wait_for_training_aux_rd_interval(
link,
@@ -1329,6 +1570,13 @@ static enum link_training_result perform_clock_recovery_sequence(
dpcd_lane_adjust,
offset);
+ if (link->dc->debug.apply_vendor_specific_lttpr_wa &&
+ (link->chip_caps & EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN) &&
+ link->lttpr_mode == LTTPR_MODE_TRANSPARENT) {
+ vendor_specific_lttpr_wa_one_end(link, retry_count);
+ vendor_specific_lttpr_wa_three(link, dpcd_lane_adjust);
+ }
+
/* 5. check CR done*/
if (dp_is_cr_done(lane_count, dpcd_lane_status))
return LINK_TRAINING_SUCCESS;
@@ -1379,13 +1627,14 @@ static enum link_training_result perform_clock_recovery_sequence(
static inline enum link_training_result dp_transition_to_video_idle(
struct dc_link *link,
+ const struct link_resource *link_res,
struct link_training_settings *lt_settings,
enum link_training_result status)
{
union lane_count_set lane_count_set = {0};
/* 4. mainlink output idle pattern*/
- dp_set_hw_test_pattern(link, DP_TEST_PATTERN_VIDEO_MODE, NULL, 0);
+ dp_set_hw_test_pattern(link, link_res, DP_TEST_PATTERN_VIDEO_MODE, NULL, 0);
/*
* 5. post training adjust if required
@@ -1409,7 +1658,7 @@ static inline enum link_training_result dp_transition_to_video_idle(
}
if (status == LINK_TRAINING_SUCCESS &&
- perform_post_lt_adj_req_sequence(link, lt_settings) == false)
+ perform_post_lt_adj_req_sequence(link, link_res, lt_settings) == false)
status = LINK_TRAINING_LQA_FAIL;
lane_count_set.bits.LANE_COUNT_SET = lt_settings->link_settings.lane_count;
@@ -1852,10 +2101,11 @@ static void print_status_message(
void dc_link_dp_set_drive_settings(
struct dc_link *link,
+ const struct link_resource *link_res,
struct link_training_settings *lt_settings)
{
/* program ASIC PHY settings*/
- dp_set_hw_lane_settings(link, lt_settings, DPRX);
+ dp_set_hw_lane_settings(link, link_res, lt_settings, DPRX);
dp_hw_to_dpcd_lane_settings(lt_settings,
lt_settings->hw_lane_settings, lt_settings->dpcd_lane_settings);
@@ -1866,6 +2116,7 @@ void dc_link_dp_set_drive_settings(
bool dc_link_dp_perform_link_training_skip_aux(
struct dc_link *link,
+ const struct link_resource *link_res,
const struct dc_link_settings *link_setting)
{
struct link_training_settings lt_settings = {0};
@@ -1882,10 +2133,10 @@ bool dc_link_dp_perform_link_training_skip_aux(
/* 1. Perform_clock_recovery_sequence. */
/* transmit training pattern for clock recovery */
- dp_set_hw_training_pattern(link, lt_settings.pattern_for_cr, DPRX);
+ dp_set_hw_training_pattern(link, link_res, lt_settings.pattern_for_cr, DPRX);
/* call HWSS to set lane settings*/
- dp_set_hw_lane_settings(link, &lt_settings, DPRX);
+ dp_set_hw_lane_settings(link, link_res, &lt_settings, DPRX);
/* wait receiver to lock-on*/
dp_wait_for_training_aux_rd_interval(link, lt_settings.cr_pattern_time);
@@ -1893,10 +2144,10 @@ bool dc_link_dp_perform_link_training_skip_aux(
/* 2. Perform_channel_equalization_sequence. */
/* transmit training pattern for channel equalization. */
- dp_set_hw_training_pattern(link, lt_settings.pattern_for_eq, DPRX);
+ dp_set_hw_training_pattern(link, link_res, lt_settings.pattern_for_eq, DPRX);
/* call HWSS to set lane settings*/
- dp_set_hw_lane_settings(link, &lt_settings, DPRX);
+ dp_set_hw_lane_settings(link, link_res, &lt_settings, DPRX);
/* wait receiver to lock-on. */
dp_wait_for_training_aux_rd_interval(link, lt_settings.eq_pattern_time);
@@ -1904,7 +2155,7 @@ bool dc_link_dp_perform_link_training_skip_aux(
/* 3. Perform_link_training_int. */
/* Mainlink output idle pattern. */
- dp_set_hw_test_pattern(link, DP_TEST_PATTERN_VIDEO_MODE, NULL, 0);
+ dp_set_hw_test_pattern(link, link_res, DP_TEST_PATTERN_VIDEO_MODE, NULL, 0);
print_status_message(link, &lt_settings, LINK_TRAINING_SUCCESS);
@@ -1985,6 +2236,7 @@ static void dpcd_128b_132b_get_aux_rd_interval(struct dc_link *link,
static enum link_training_result dp_perform_128b_132b_channel_eq_done_sequence(
struct dc_link *link,
+ const struct link_resource *link_res,
struct link_training_settings *lt_settings)
{
uint8_t loop_count;
@@ -1996,7 +2248,7 @@ static enum link_training_result dp_perform_128b_132b_channel_eq_done_sequence(
union lane_adjust dpcd_lane_adjust[LANE_COUNT_DP_MAX] = {0};
/* Transmit 128b/132b_TPS1 over Main-Link */
- dp_set_hw_training_pattern(link, lt_settings->pattern_for_cr, DPRX);
+ dp_set_hw_training_pattern(link, link_res, lt_settings->pattern_for_cr, DPRX);
/* Set TRAINING_PATTERN_SET to 01h */
dpcd_set_training_pattern(link, lt_settings->pattern_for_cr);
@@ -2006,8 +2258,8 @@ static enum link_training_result dp_perform_128b_132b_channel_eq_done_sequence(
&dpcd_lane_status_updated, dpcd_lane_adjust, DPRX);
dp_decide_lane_settings(lt_settings, dpcd_lane_adjust,
lt_settings->hw_lane_settings, lt_settings->dpcd_lane_settings);
- dp_set_hw_lane_settings(link, lt_settings, DPRX);
- dp_set_hw_training_pattern(link, lt_settings->pattern_for_eq, DPRX);
+ dp_set_hw_lane_settings(link, link_res, lt_settings, DPRX);
+ dp_set_hw_training_pattern(link, link_res, lt_settings->pattern_for_eq, DPRX);
/* Set loop counter to start from 1 */
loop_count = 1;
@@ -2034,7 +2286,7 @@ static enum link_training_result dp_perform_128b_132b_channel_eq_done_sequence(
} else if (dpcd_lane_status_updated.bits.LT_FAILED_128b_132b) {
status = DP_128b_132b_LT_FAILED;
} else {
- dp_set_hw_lane_settings(link, lt_settings, DPRX);
+ dp_set_hw_lane_settings(link, link_res, lt_settings, DPRX);
dpcd_set_lane_settings(link, lt_settings, DPRX);
}
loop_count++;
@@ -2063,6 +2315,7 @@ static enum link_training_result dp_perform_128b_132b_channel_eq_done_sequence(
static enum link_training_result dp_perform_128b_132b_cds_done_sequence(
struct dc_link *link,
+ const struct link_resource *link_res,
struct link_training_settings *lt_settings)
{
/* Assumption: assume hardware has transmitted eq pattern */
@@ -2099,6 +2352,7 @@ static enum link_training_result dp_perform_128b_132b_cds_done_sequence(
static enum link_training_result dp_perform_8b_10b_link_training(
struct dc_link *link,
+ const struct link_resource *link_res,
struct link_training_settings *lt_settings)
{
enum link_training_result status = LINK_TRAINING_SUCCESS;
@@ -2108,7 +2362,7 @@ static enum link_training_result dp_perform_8b_10b_link_training(
uint8_t lane = 0;
if (link->ctx->dc->work_arounds.lt_early_cr_pattern)
- start_clock_recovery_pattern_early(link, lt_settings, DPRX);
+ start_clock_recovery_pattern_early(link, link_res, lt_settings, DPRX);
/* 1. set link rate, lane count and spread. */
dpcd_set_link_settings(link, lt_settings);
@@ -2122,12 +2376,13 @@ static enum link_training_result dp_perform_8b_10b_link_training(
for (repeater_id = repeater_cnt; (repeater_id > 0 && status == LINK_TRAINING_SUCCESS);
repeater_id--) {
- status = perform_clock_recovery_sequence(link, lt_settings, repeater_id);
+ status = perform_clock_recovery_sequence(link, link_res, lt_settings, repeater_id);
if (status != LINK_TRAINING_SUCCESS)
break;
status = perform_channel_equalization_sequence(link,
+ link_res,
lt_settings,
repeater_id);
@@ -2138,13 +2393,14 @@ static enum link_training_result dp_perform_8b_10b_link_training(
}
for (lane = 0; lane < (uint8_t)lt_settings->link_settings.lane_count; lane++)
- lt_settings->dpcd_lane_settings[lane].bits.VOLTAGE_SWING_SET = VOLTAGE_SWING_LEVEL0;
+ lt_settings->dpcd_lane_settings[lane].raw = 0;
}
if (status == LINK_TRAINING_SUCCESS) {
- status = perform_clock_recovery_sequence(link, lt_settings, DPRX);
+ status = perform_clock_recovery_sequence(link, link_res, lt_settings, DPRX);
if (status == LINK_TRAINING_SUCCESS) {
status = perform_channel_equalization_sequence(link,
+ link_res,
lt_settings,
DPRX);
}
@@ -2156,6 +2412,7 @@ static enum link_training_result dp_perform_8b_10b_link_training(
#if defined(CONFIG_DRM_AMD_DC_DCN)
static enum link_training_result dp_perform_128b_132b_link_training(
struct dc_link *link,
+ const struct link_resource *link_res,
struct link_training_settings *lt_settings)
{
enum link_training_result result = LINK_TRAINING_SUCCESS;
@@ -2167,23 +2424,358 @@ static enum link_training_result dp_perform_128b_132b_link_training(
decide_8b_10b_training_settings(link,
&lt_settings->link_settings,
&legacy_settings);
- return dp_perform_8b_10b_link_training(link, &legacy_settings);
+ return dp_perform_8b_10b_link_training(link, link_res, &legacy_settings);
}
dpcd_set_link_settings(link, lt_settings);
if (result == LINK_TRAINING_SUCCESS)
- result = dp_perform_128b_132b_channel_eq_done_sequence(link, lt_settings);
+ result = dp_perform_128b_132b_channel_eq_done_sequence(link, link_res, lt_settings);
if (result == LINK_TRAINING_SUCCESS)
- result = dp_perform_128b_132b_cds_done_sequence(link, lt_settings);
+ result = dp_perform_128b_132b_cds_done_sequence(link, link_res, lt_settings);
return result;
}
#endif
+static enum link_training_result dc_link_dp_perform_fixed_vs_pe_training_sequence(
+ struct dc_link *link,
+ const struct link_resource *link_res,
+ struct link_training_settings *lt_settings)
+{
+ const uint8_t vendor_lttpr_write_data_reset[4] = {0x1, 0x50, 0x63, 0xFF};
+ const uint8_t offset = dp_convert_to_count(
+ link->dpcd_caps.lttpr_caps.phy_repeater_cnt);
+ const uint8_t vendor_lttpr_write_data_intercept_en[4] = {0x1, 0x55, 0x63, 0x0};
+ const uint8_t vendor_lttpr_write_data_intercept_dis[4] = {0x1, 0x55, 0x63, 0x68};
+ uint8_t vendor_lttpr_write_data_vs[4] = {0x1, 0x51, 0x63, 0x0};
+ uint8_t vendor_lttpr_write_data_pe[4] = {0x1, 0x52, 0x63, 0x0};
+ uint32_t vendor_lttpr_write_address = 0xF004F;
+ enum link_training_result status = LINK_TRAINING_SUCCESS;
+ uint8_t lane = 0;
+ union down_spread_ctrl downspread = {0};
+ union lane_count_set lane_count_set = {0};
+ uint8_t toggle_rate;
+ uint8_t rate;
+
+ /* Only 8b/10b is supported */
+ ASSERT(dp_get_link_encoding_format(&lt_settings->link_settings) ==
+ DP_8b_10b_ENCODING);
+
+ if (offset != 0xFF) {
+ vendor_lttpr_write_address +=
+ ((DP_REPEATER_CONFIGURATION_AND_STATUS_SIZE) * (offset - 1));
+ }
+
+ /* Vendor specific: Reset lane settings */
+ core_link_write_dpcd(
+ link,
+ vendor_lttpr_write_address,
+ &vendor_lttpr_write_data_reset[0],
+ sizeof(vendor_lttpr_write_data_reset));
+ core_link_write_dpcd(
+ link,
+ vendor_lttpr_write_address,
+ &vendor_lttpr_write_data_vs[0],
+ sizeof(vendor_lttpr_write_data_vs));
+ core_link_write_dpcd(
+ link,
+ vendor_lttpr_write_address,
+ &vendor_lttpr_write_data_pe[0],
+ sizeof(vendor_lttpr_write_data_pe));
+
+ /* Vendor specific: Enable intercept */
+ core_link_write_dpcd(
+ link,
+ vendor_lttpr_write_address,
+ &vendor_lttpr_write_data_intercept_en[0],
+ sizeof(vendor_lttpr_write_data_intercept_en));
+
+ /* 1. set link rate, lane count and spread. */
+
+ downspread.raw = (uint8_t)(lt_settings->link_settings.link_spread);
+
+ lane_count_set.bits.LANE_COUNT_SET =
+ lt_settings->link_settings.lane_count;
+
+ lane_count_set.bits.ENHANCED_FRAMING = lt_settings->enhanced_framing;
+ lane_count_set.bits.POST_LT_ADJ_REQ_GRANTED = 0;
+
+
+ if (lt_settings->pattern_for_eq < DP_TRAINING_PATTERN_SEQUENCE_4) {
+ lane_count_set.bits.POST_LT_ADJ_REQ_GRANTED =
+ link->dpcd_caps.max_ln_count.bits.POST_LT_ADJ_REQ_SUPPORTED;
+ }
+
+ core_link_write_dpcd(link, DP_DOWNSPREAD_CTRL,
+ &downspread.raw, sizeof(downspread));
+
+ core_link_write_dpcd(link, DP_LANE_COUNT_SET,
+ &lane_count_set.raw, 1);
+
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ rate = get_dpcd_link_rate(&lt_settings->link_settings);
+#else
+ rate = (uint8_t) (lt_settings->link_settings.link_rate);
+#endif
+
+ /* Vendor specific: Toggle link rate */
+ toggle_rate = (rate == 0x6) ? 0xA : 0x6;
+
+ if (link->vendor_specific_lttpr_link_rate_wa == rate) {
+ core_link_write_dpcd(
+ link,
+ DP_LINK_BW_SET,
+ &toggle_rate,
+ 1);
+ }
+
+ link->vendor_specific_lttpr_link_rate_wa = rate;
+
+ core_link_write_dpcd(link, DP_LINK_BW_SET, &rate, 1);
+
+ DC_LOG_HW_LINK_TRAINING("%s\n %x rate = %x\n %x lane = %x framing = %x\n %x spread = %x\n",
+ __func__,
+ DP_LINK_BW_SET,
+ lt_settings->link_settings.link_rate,
+ DP_LANE_COUNT_SET,
+ lt_settings->link_settings.lane_count,
+ lt_settings->enhanced_framing,
+ DP_DOWNSPREAD_CTRL,
+ lt_settings->link_settings.link_spread);
+
+ /* 2. Perform link training */
+
+ /* Perform Clock Recovery Sequence */
+ if (status == LINK_TRAINING_SUCCESS) {
+ uint32_t retries_cr;
+ uint32_t retry_count;
+ uint32_t wait_time_microsec;
+ enum dc_lane_count lane_count = lt_settings->link_settings.lane_count;
+ union lane_status dpcd_lane_status[LANE_COUNT_DP_MAX];
+ union lane_align_status_updated dpcd_lane_status_updated;
+ union lane_adjust dpcd_lane_adjust[LANE_COUNT_DP_MAX] = {0};
+
+ retries_cr = 0;
+ retry_count = 0;
+
+ while ((retries_cr < LINK_TRAINING_MAX_RETRY_COUNT) &&
+ (retry_count < LINK_TRAINING_MAX_CR_RETRY)) {
+
+ memset(&dpcd_lane_status, '\0', sizeof(dpcd_lane_status));
+ memset(&dpcd_lane_status_updated, '\0',
+ sizeof(dpcd_lane_status_updated));
+
+ /* 1. call HWSS to set lane settings */
+ dp_set_hw_lane_settings(
+ link,
+ link_res,
+ lt_settings,
+ 0);
+
+ /* 2. update DPCD of the receiver */
+ if (!retry_count) {
+ /* EPR #361076 - write as a 5-byte burst,
+ * but only for the 1-st iteration.
+ */
+ dpcd_set_lt_pattern_and_lane_settings(
+ link,
+ lt_settings,
+ lt_settings->pattern_for_cr,
+ 0);
+ /* Vendor specific: Disable intercept */
+ core_link_write_dpcd(
+ link,
+ vendor_lttpr_write_address,
+ &vendor_lttpr_write_data_intercept_dis[0],
+ sizeof(vendor_lttpr_write_data_intercept_dis));
+ } else {
+ vendor_lttpr_write_data_vs[3] = 0;
+ vendor_lttpr_write_data_pe[3] = 0;
+
+ for (lane = 0; lane < lane_count; lane++) {
+ vendor_lttpr_write_data_vs[3] |=
+ lt_settings->dpcd_lane_settings[lane].bits.VOLTAGE_SWING_SET << (2 * lane);
+ vendor_lttpr_write_data_pe[3] |=
+ lt_settings->dpcd_lane_settings[lane].bits.PRE_EMPHASIS_SET << (2 * lane);
+ }
+
+ /* Vendor specific: Update VS and PE to DPRX requested value */
+ core_link_write_dpcd(
+ link,
+ vendor_lttpr_write_address,
+ &vendor_lttpr_write_data_vs[0],
+ sizeof(vendor_lttpr_write_data_vs));
+ core_link_write_dpcd(
+ link,
+ vendor_lttpr_write_address,
+ &vendor_lttpr_write_data_pe[0],
+ sizeof(vendor_lttpr_write_data_pe));
+
+ dpcd_set_lane_settings(
+ link,
+ lt_settings,
+ 0);
+ }
+
+ /* 3. wait receiver to lock-on*/
+ wait_time_microsec = lt_settings->cr_pattern_time;
+
+ dp_wait_for_training_aux_rd_interval(
+ link,
+ wait_time_microsec);
+
+ /* 4. Read lane status and requested drive
+ * settings as set by the sink
+ */
+ dp_get_lane_status_and_lane_adjust(
+ link,
+ lt_settings,
+ dpcd_lane_status,
+ &dpcd_lane_status_updated,
+ dpcd_lane_adjust,
+ 0);
+
+ /* 5. check CR done*/
+ if (dp_is_cr_done(lane_count, dpcd_lane_status)) {
+ status = LINK_TRAINING_SUCCESS;
+ break;
+ }
+
+ /* 6. max VS reached*/
+ if (dp_is_max_vs_reached(lt_settings))
+ break;
+
+ /* 7. same lane settings */
+ /* Note: settings are the same for all lanes,
+ * so comparing first lane is sufficient
+ */
+ if (lt_settings->dpcd_lane_settings[0].bits.VOLTAGE_SWING_SET ==
+ dpcd_lane_adjust[0].bits.VOLTAGE_SWING_LANE)
+ retries_cr++;
+ else
+ retries_cr = 0;
+
+ /* 8. update VS/PE/PC2 in lt_settings*/
+ dp_decide_lane_settings(lt_settings, dpcd_lane_adjust,
+ lt_settings->hw_lane_settings, lt_settings->dpcd_lane_settings);
+ retry_count++;
+ }
+
+ if (retry_count >= LINK_TRAINING_MAX_CR_RETRY) {
+ ASSERT(0);
+ DC_LOG_ERROR("%s: Link Training Error, could not get CR after %d tries. Possibly voltage swing issue",
+ __func__,
+ LINK_TRAINING_MAX_CR_RETRY);
+
+ }
+
+ status = dp_get_cr_failure(lane_count, dpcd_lane_status);
+ }
+
+ /* Perform Channel EQ Sequence */
+ if (status == LINK_TRAINING_SUCCESS) {
+ enum dc_dp_training_pattern tr_pattern;
+ uint32_t retries_ch_eq;
+ uint32_t wait_time_microsec;
+ enum dc_lane_count lane_count = lt_settings->link_settings.lane_count;
+ union lane_align_status_updated dpcd_lane_status_updated = {0};
+ union lane_status dpcd_lane_status[LANE_COUNT_DP_MAX] = {0};
+ union lane_adjust dpcd_lane_adjust[LANE_COUNT_DP_MAX] = {0};
+
+ /* Note: also check that TPS4 is a supported feature*/
+ tr_pattern = lt_settings->pattern_for_eq;
+
+ dp_set_hw_training_pattern(link, link_res, tr_pattern, 0);
+
+ status = LINK_TRAINING_EQ_FAIL_EQ;
+
+ for (retries_ch_eq = 0; retries_ch_eq <= LINK_TRAINING_MAX_RETRY_COUNT;
+ retries_ch_eq++) {
+
+ dp_set_hw_lane_settings(link, link_res, lt_settings, 0);
+
+ vendor_lttpr_write_data_vs[3] = 0;
+ vendor_lttpr_write_data_pe[3] = 0;
+
+ for (lane = 0; lane < lane_count; lane++) {
+ vendor_lttpr_write_data_vs[3] |=
+ lt_settings->dpcd_lane_settings[lane].bits.VOLTAGE_SWING_SET << (2 * lane);
+ vendor_lttpr_write_data_pe[3] |=
+ lt_settings->dpcd_lane_settings[lane].bits.PRE_EMPHASIS_SET << (2 * lane);
+ }
+
+ /* Vendor specific: Update VS and PE to DPRX requested value */
+ core_link_write_dpcd(
+ link,
+ vendor_lttpr_write_address,
+ &vendor_lttpr_write_data_vs[0],
+ sizeof(vendor_lttpr_write_data_vs));
+ core_link_write_dpcd(
+ link,
+ vendor_lttpr_write_address,
+ &vendor_lttpr_write_data_pe[0],
+ sizeof(vendor_lttpr_write_data_pe));
+
+ /* 2. update DPCD*/
+ if (!retries_ch_eq)
+ /* EPR #361076 - write as a 5-byte burst,
+ * but only for the 1-st iteration
+ */
+
+ dpcd_set_lt_pattern_and_lane_settings(
+ link,
+ lt_settings,
+ tr_pattern, 0);
+ else
+ dpcd_set_lane_settings(link, lt_settings, 0);
+
+ /* 3. wait for receiver to lock-on*/
+ wait_time_microsec = lt_settings->eq_pattern_time;
+
+ dp_wait_for_training_aux_rd_interval(
+ link,
+ wait_time_microsec);
+
+ /* 4. Read lane status and requested
+ * drive settings as set by the sink
+ */
+ dp_get_lane_status_and_lane_adjust(
+ link,
+ lt_settings,
+ dpcd_lane_status,
+ &dpcd_lane_status_updated,
+ dpcd_lane_adjust,
+ 0);
+
+ /* 5. check CR done*/
+ if (!dp_is_cr_done(lane_count, dpcd_lane_status)) {
+ status = LINK_TRAINING_EQ_FAIL_CR;
+ break;
+ }
+
+ /* 6. check CHEQ done*/
+ if (dp_is_ch_eq_done(lane_count, dpcd_lane_status) &&
+ dp_is_symbol_locked(lane_count, dpcd_lane_status) &&
+ dp_is_interlane_aligned(dpcd_lane_status_updated)) {
+ status = LINK_TRAINING_SUCCESS;
+ break;
+ }
+
+ /* 7. update VS/PE/PC2 in lt_settings*/
+ dp_decide_lane_settings(lt_settings, dpcd_lane_adjust,
+ lt_settings->hw_lane_settings, lt_settings->dpcd_lane_settings);
+ }
+ }
+
+ return status;
+}
+
+
enum link_training_result dc_link_dp_perform_link_training(
struct dc_link *link,
+ const struct link_resource *link_res,
const struct dc_link_settings *link_settings,
bool skip_video_pattern)
{
@@ -2203,30 +2795,51 @@ enum link_training_result dc_link_dp_perform_link_training(
&lt_settings);
/* reset previous training states */
- dpcd_exit_training_mode(link);
+ if (link->dc->debug.apply_vendor_specific_lttpr_wa &&
+ (link->chip_caps & EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN) &&
+ link->lttpr_mode == LTTPR_MODE_TRANSPARENT) {
+ link->apply_vendor_specific_lttpr_link_rate_wa = true;
+ vendor_specific_lttpr_wa_four(link, true);
+ } else {
+ dpcd_exit_training_mode(link);
+ }
/* configure link prior to entering training mode */
dpcd_configure_lttpr_mode(link, &lt_settings);
- dp_set_fec_ready(link, lt_settings.should_set_fec_ready);
+ dp_set_fec_ready(link, link_res, lt_settings.should_set_fec_ready);
dpcd_configure_channel_coding(link, &lt_settings);
/* enter training mode:
* Per DP specs starting from here, DPTX device shall not issue
* Non-LT AUX transactions inside training mode.
*/
- if (encoding == DP_8b_10b_ENCODING)
- status = dp_perform_8b_10b_link_training(link, &lt_settings);
+ if (!link->dc->debug.apply_vendor_specific_lttpr_wa &&
+ (link->chip_caps & EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN) &&
+ link->lttpr_mode == LTTPR_MODE_TRANSPARENT)
+ status = dc_link_dp_perform_fixed_vs_pe_training_sequence(link, link_res, &lt_settings);
+ else if (encoding == DP_8b_10b_ENCODING)
+ status = dp_perform_8b_10b_link_training(link, link_res, &lt_settings);
#if defined(CONFIG_DRM_AMD_DC_DCN)
else if (encoding == DP_128b_132b_ENCODING)
- status = dp_perform_128b_132b_link_training(link, &lt_settings);
+ status = dp_perform_128b_132b_link_training(link, link_res, &lt_settings);
#endif
else
ASSERT(0);
- /* exit training mode and switch to video idle */
- dpcd_exit_training_mode(link);
+ /* exit training mode */
+ if (link->dc->debug.apply_vendor_specific_lttpr_wa &&
+ (link->chip_caps & EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN) &&
+ link->lttpr_mode == LTTPR_MODE_TRANSPARENT) {
+ link->apply_vendor_specific_lttpr_link_rate_wa = false;
+ vendor_specific_lttpr_wa_four(link, (status != LINK_TRAINING_SUCCESS));
+ } else {
+ dpcd_exit_training_mode(link);
+ }
+
+ /* switch to video idle */
if ((status == LINK_TRAINING_SUCCESS) || !skip_video_pattern)
status = dp_transition_to_video_idle(link,
+ link_res,
&lt_settings,
status);
@@ -2278,6 +2891,7 @@ bool perform_link_training_with_retries(
dp_enable_link_phy(
link,
+ &pipe_ctx->link_res,
signal,
pipe_ctx->clock_source->id,
&current_setting);
@@ -2305,23 +2919,24 @@ bool perform_link_training_with_retries(
dp_set_panel_mode(link, panel_mode);
if (link->aux_access_disabled) {
- dc_link_dp_perform_link_training_skip_aux(link, &current_setting);
+ dc_link_dp_perform_link_training_skip_aux(link, &pipe_ctx->link_res, &current_setting);
return true;
} else {
/** @todo Consolidate USB4 DP and DPx.x training. */
if (link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA) {
status = dc_link_dpia_perform_link_training(link,
- &current_setting,
- skip_video_pattern);
+ &pipe_ctx->link_res,
+ &current_setting,
+ skip_video_pattern);
/* Transmit idle pattern once training successful. */
if (status == LINK_TRAINING_SUCCESS)
- dp_set_hw_test_pattern(link, DP_TEST_PATTERN_VIDEO_MODE,
- NULL, 0);
+ dp_set_hw_test_pattern(link, &pipe_ctx->link_res, DP_TEST_PATTERN_VIDEO_MODE, NULL, 0);
} else {
status = dc_link_dp_perform_link_training(link,
- &current_setting,
- skip_video_pattern);
+ &pipe_ctx->link_res,
+ &current_setting,
+ skip_video_pattern);
}
if (status == LINK_TRAINING_SUCCESS)
@@ -2336,7 +2951,7 @@ bool perform_link_training_with_retries(
DC_LOG_WARNING("%s: Link training attempt %u of %d failed\n",
__func__, (unsigned int)j + 1, attempts);
- dp_disable_link_phy(link, signal);
+ dp_disable_link_phy(link, &pipe_ctx->link_res, signal);
/* Abort link training if failure due to sink being unplugged. */
if (status == LINK_TRAINING_ABORT) {
@@ -2349,7 +2964,7 @@ bool perform_link_training_with_retries(
uint32_t req_bw;
uint32_t link_bw;
- decide_fallback_link_setting(*link_setting, &current_setting, status);
+ decide_fallback_link_setting(link, *link_setting, &current_setting, status);
/* Fail link training if reduced link bandwidth no longer meets
* stream requirements.
*/
@@ -2385,12 +3000,13 @@ static enum clock_source_id get_clock_source_id(struct dc_link *link)
return dp_cs_id;
}
-static void set_dp_mst_mode(struct dc_link *link, bool mst_enable)
+static void set_dp_mst_mode(struct dc_link *link, const struct link_resource *link_res,
+ bool mst_enable)
{
if (mst_enable == false &&
link->type == dc_connection_mst_branch) {
/* Disable MST on link. Use only local sink. */
- dp_disable_link_phy_mst(link, link->connector_signal);
+ dp_disable_link_phy_mst(link, link_res, link->connector_signal);
link->type = dc_connection_single;
link->local_sink = link->remote_sinks[0];
@@ -2401,7 +3017,7 @@ static void set_dp_mst_mode(struct dc_link *link, bool mst_enable)
link->type == dc_connection_single &&
link->remote_sinks[0] != NULL) {
/* Re-enable MST on link. */
- dp_disable_link_phy(link, link->connector_signal);
+ dp_disable_link_phy(link, link_res, link->connector_signal);
dp_enable_mst_on_sink(link, true);
link->type = dc_connection_mst_branch;
@@ -2427,6 +3043,7 @@ bool dc_link_dp_sync_lt_begin(struct dc_link *link)
enum link_training_result dc_link_dp_sync_lt_attempt(
struct dc_link *link,
+ const struct link_resource *link_res,
struct dc_link_settings *link_settings,
struct dc_link_training_overrides *lt_overrides)
{
@@ -2446,14 +3063,14 @@ enum link_training_result dc_link_dp_sync_lt_attempt(
&lt_settings);
/* Setup MST Mode */
if (lt_overrides->mst_enable)
- set_dp_mst_mode(link, *lt_overrides->mst_enable);
+ set_dp_mst_mode(link, link_res, *lt_overrides->mst_enable);
/* Disable link */
- dp_disable_link_phy(link, link->connector_signal);
+ dp_disable_link_phy(link, link_res, link->connector_signal);
/* Enable link */
dp_cs_id = get_clock_source_id(link);
- dp_enable_link_phy(link, link->connector_signal,
+ dp_enable_link_phy(link, link_res, link->connector_signal,
dp_cs_id, link_settings);
/* Set FEC enable */
@@ -2461,7 +3078,7 @@ enum link_training_result dc_link_dp_sync_lt_attempt(
if (dp_get_link_encoding_format(link_settings) == DP_8b_10b_ENCODING) {
#endif
fec_enable = lt_overrides->fec_enable && *lt_overrides->fec_enable;
- dp_set_fec_ready(link, fec_enable);
+ dp_set_fec_ready(link, NULL, fec_enable);
#if defined(CONFIG_DRM_AMD_DC_DCN)
}
#endif
@@ -2478,7 +3095,7 @@ enum link_training_result dc_link_dp_sync_lt_attempt(
/* Attempt to train with given link training settings */
if (link->ctx->dc->work_arounds.lt_early_cr_pattern)
- start_clock_recovery_pattern_early(link, &lt_settings, DPRX);
+ start_clock_recovery_pattern_early(link, link_res, &lt_settings, DPRX);
/* Set link rate, lane count and spread. */
dpcd_set_link_settings(link, &lt_settings);
@@ -2486,9 +3103,10 @@ enum link_training_result dc_link_dp_sync_lt_attempt(
/* 2. perform link training (set link training done
* to false is done as well)
*/
- lt_status = perform_clock_recovery_sequence(link, &lt_settings, DPRX);
+ lt_status = perform_clock_recovery_sequence(link, link_res, &lt_settings, DPRX);
if (lt_status == LINK_TRAINING_SUCCESS) {
lt_status = perform_channel_equalization_sequence(link,
+ link_res,
&lt_settings,
DPRX);
}
@@ -2509,11 +3127,11 @@ bool dc_link_dp_sync_lt_end(struct dc_link *link, bool link_down)
#if defined(CONFIG_DRM_AMD_DC_DCN)
struct dc_link_settings link_settings = link->cur_link_settings;
#endif
- dp_disable_link_phy(link, link->connector_signal);
+ dp_disable_link_phy(link, NULL, link->connector_signal);
#if defined(CONFIG_DRM_AMD_DC_DCN)
if (dp_get_link_encoding_format(&link_settings) == DP_8b_10b_ENCODING)
#endif
- dp_set_fec_ready(link, false);
+ dp_set_fec_ready(link, NULL, false);
}
link->sync_lt_in_progress = false;
@@ -2568,7 +3186,8 @@ bool dc_link_dp_get_max_link_enc_cap(const struct dc_link *link, struct dc_link_
return false;
}
-static struct dc_link_settings get_max_link_cap(struct dc_link *link)
+static struct dc_link_settings get_max_link_cap(struct dc_link *link,
+ const struct link_resource *link_res)
{
struct dc_link_settings max_link_cap = {0};
#if defined(CONFIG_DRM_AMD_DC_DCN)
@@ -2592,9 +3211,11 @@ static struct dc_link_settings get_max_link_cap(struct dc_link *link)
if (link_enc)
link_enc->funcs->get_max_link_cap(link_enc, &max_link_cap);
#if defined(CONFIG_DRM_AMD_DC_DCN)
- if (max_link_cap.link_rate >= LINK_RATE_UHBR10 &&
- !link->hpo_dp_link_enc)
- max_link_cap.link_rate = LINK_RATE_HIGH3;
+ if (max_link_cap.link_rate >= LINK_RATE_UHBR10) {
+ if (!link_res->hpo_dp_link_enc ||
+ link->dc->debug.disable_uhbr)
+ max_link_cap.link_rate = LINK_RATE_HIGH3;
+ }
#endif
/* Lower link settings based on sink's link cap */
@@ -2612,7 +3233,7 @@ static struct dc_link_settings get_max_link_cap(struct dc_link *link)
* account for lttpr repeaters cap
* notes: repeaters do not snoop in the DPRX Capabilities addresses (3.6.3).
*/
- if (link->lttpr_mode == LTTPR_MODE_NON_TRANSPARENT) {
+ if (link->lttpr_mode != LTTPR_MODE_NON_LTTPR) {
if (link->dpcd_caps.lttpr_caps.max_lane_count < max_link_cap.lane_count)
max_link_cap.lane_count = link->dpcd_caps.lttpr_caps.max_lane_count;
@@ -2751,6 +3372,7 @@ bool hpd_rx_irq_check_link_loss_status(
bool dp_verify_link_cap(
struct dc_link *link,
+ const struct link_resource *link_res,
struct dc_link_settings *known_limit_link_setting,
int *fail_count)
{
@@ -2768,7 +3390,7 @@ bool dp_verify_link_cap(
/* link training starts with the maximum common settings
* supported by both sink and ASIC.
*/
- max_link_cap = get_max_link_cap(link);
+ max_link_cap = get_max_link_cap(link, link_res);
initial_link_settings = get_common_supported_link_settings(
*known_limit_link_setting,
max_link_cap);
@@ -2808,7 +3430,7 @@ bool dp_verify_link_cap(
* find the physical link capability
*/
/* disable PHY done possible by BIOS, will be done by driver itself */
- dp_disable_link_phy(link, link->connector_signal);
+ dp_disable_link_phy(link, link_res, link->connector_signal);
dp_cs_id = get_clock_source_id(link);
@@ -2820,8 +3442,8 @@ bool dp_verify_link_cap(
*/
if (link->link_enc && link->link_enc->features.flags.bits.DP_IS_USB_C &&
link->dc->debug.usbc_combo_phy_reset_wa) {
- dp_enable_link_phy(link, link->connector_signal, dp_cs_id, cur);
- dp_disable_link_phy(link, link->connector_signal);
+ dp_enable_link_phy(link, link_res, link->connector_signal, dp_cs_id, cur);
+ dp_disable_link_phy(link, link_res, link->connector_signal);
}
do {
@@ -2832,6 +3454,7 @@ bool dp_verify_link_cap(
dp_enable_link_phy(
link,
+ link_res,
link->connector_signal,
dp_cs_id,
cur);
@@ -2842,6 +3465,7 @@ bool dp_verify_link_cap(
else {
status = dc_link_dp_perform_link_training(
link,
+ link_res,
cur,
skip_video_pattern);
if (status == LINK_TRAINING_SUCCESS)
@@ -2863,8 +3487,8 @@ bool dp_verify_link_cap(
* setting or before returning we'll enable it later
* based on the actual mode we're driving
*/
- dp_disable_link_phy(link, link->connector_signal);
- } while (!success && decide_fallback_link_setting(
+ dp_disable_link_phy(link, link_res, link->connector_signal);
+ } while (!success && decide_fallback_link_setting(link,
initial_link_settings, cur, status));
/* Link Training failed for all Link Settings
@@ -2887,6 +3511,7 @@ bool dp_verify_link_cap(
bool dp_verify_link_cap_with_retries(
struct dc_link *link,
+ const struct link_resource *link_res,
struct dc_link_settings *known_limit_link_setting,
int attempts)
{
@@ -2904,7 +3529,7 @@ bool dp_verify_link_cap_with_retries(
link->verified_link_cap.link_rate = LINK_RATE_LOW;
link->verified_link_cap.link_spread = LINK_SPREAD_DISABLED;
break;
- } else if (dp_verify_link_cap(link,
+ } else if (dp_verify_link_cap(link, link_res,
known_limit_link_setting,
&fail_count) && fail_count == 0) {
success = true;
@@ -2916,13 +3541,13 @@ bool dp_verify_link_cap_with_retries(
}
bool dp_verify_mst_link_cap(
- struct dc_link *link)
+ struct dc_link *link, const struct link_resource *link_res)
{
struct dc_link_settings max_link_cap = {0};
if (dp_get_link_encoding_format(&link->reported_link_cap) ==
DP_8b_10b_ENCODING) {
- max_link_cap = get_max_link_cap(link);
+ max_link_cap = get_max_link_cap(link, link_res);
link->verified_link_cap = get_common_supported_link_settings(
link->reported_link_cap,
max_link_cap);
@@ -2931,6 +3556,7 @@ bool dp_verify_mst_link_cap(
else if (dp_get_link_encoding_format(&link->reported_link_cap) ==
DP_128b_132b_ENCODING) {
dp_verify_link_cap_with_retries(link,
+ link_res,
&link->reported_link_cap,
LINK_TRAINING_MAX_VERIFY_RETRY);
}
@@ -3116,6 +3742,7 @@ static bool decide_fallback_link_setting_max_bw_policy(
* and no further fallback could be done
*/
static bool decide_fallback_link_setting(
+ struct dc_link *link,
struct dc_link_settings initial_link_settings,
struct dc_link_settings *current_link_setting,
enum link_training_result training_result)
@@ -3123,7 +3750,8 @@ static bool decide_fallback_link_setting(
if (!current_link_setting)
return false;
#if defined(CONFIG_DRM_AMD_DC_DCN)
- if (dp_get_link_encoding_format(&initial_link_settings) == DP_128b_132b_ENCODING)
+ if (dp_get_link_encoding_format(&initial_link_settings) == DP_128b_132b_ENCODING ||
+ link->dc->debug.force_dp2_lt_fallback_method)
return decide_fallback_link_setting_max_bw_policy(&initial_link_settings,
current_link_setting);
#endif
@@ -3346,6 +3974,148 @@ bool decide_edp_link_settings(struct dc_link *link, struct dc_link_settings *lin
return false;
}
+static bool decide_edp_link_settings_with_dsc(struct dc_link *link,
+ struct dc_link_settings *link_setting,
+ uint32_t req_bw,
+ enum dc_link_rate max_link_rate)
+{
+ struct dc_link_settings initial_link_setting;
+ struct dc_link_settings current_link_setting;
+ uint32_t link_bw;
+
+ unsigned int policy = 0;
+
+ policy = link->ctx->dc->debug.force_dsc_edp_policy;
+ if (max_link_rate == LINK_RATE_UNKNOWN)
+ max_link_rate = link->verified_link_cap.link_rate;
+ /*
+ * edp_supported_link_rates_count is only valid for eDP v1.4 or higher.
+ * Per VESA eDP spec, "The DPCD revision for eDP v1.4 is 13h"
+ */
+ if ((link->dpcd_caps.dpcd_rev.raw < DPCD_REV_13 ||
+ link->dpcd_caps.edp_supported_link_rates_count == 0)) {
+ /* for DSC enabled case, we search for minimum lane count */
+ memset(&initial_link_setting, 0, sizeof(initial_link_setting));
+ initial_link_setting.lane_count = LANE_COUNT_ONE;
+ initial_link_setting.link_rate = LINK_RATE_LOW;
+ initial_link_setting.link_spread = LINK_SPREAD_DISABLED;
+ initial_link_setting.use_link_rate_set = false;
+ initial_link_setting.link_rate_set = 0;
+ current_link_setting = initial_link_setting;
+ if (req_bw > dc_link_bandwidth_kbps(link, &link->verified_link_cap))
+ return false;
+
+ /* search for the minimum link setting that:
+ * 1. is supported according to the link training result
+ * 2. could support the b/w requested by the timing
+ */
+ while (current_link_setting.link_rate <=
+ max_link_rate) {
+ link_bw = dc_link_bandwidth_kbps(
+ link,
+ &current_link_setting);
+ if (req_bw <= link_bw) {
+ *link_setting = current_link_setting;
+ return true;
+ }
+ if (policy) {
+ /* minimize lane */
+ if (current_link_setting.link_rate < max_link_rate) {
+ current_link_setting.link_rate =
+ increase_link_rate(
+ current_link_setting.link_rate);
+ } else {
+ if (current_link_setting.lane_count <
+ link->verified_link_cap.lane_count) {
+ current_link_setting.lane_count =
+ increase_lane_count(
+ current_link_setting.lane_count);
+ current_link_setting.link_rate = initial_link_setting.link_rate;
+ } else
+ break;
+ }
+ } else {
+ /* minimize link rate */
+ if (current_link_setting.lane_count <
+ link->verified_link_cap.lane_count) {
+ current_link_setting.lane_count =
+ increase_lane_count(
+ current_link_setting.lane_count);
+ } else {
+ current_link_setting.link_rate =
+ increase_link_rate(
+ current_link_setting.link_rate);
+ current_link_setting.lane_count =
+ initial_link_setting.lane_count;
+ }
+ }
+ }
+ return false;
+ }
+
+ /* if optimize edp link is supported */
+ memset(&initial_link_setting, 0, sizeof(initial_link_setting));
+ initial_link_setting.lane_count = LANE_COUNT_ONE;
+ initial_link_setting.link_rate = link->dpcd_caps.edp_supported_link_rates[0];
+ initial_link_setting.link_spread = LINK_SPREAD_DISABLED;
+ initial_link_setting.use_link_rate_set = true;
+ initial_link_setting.link_rate_set = 0;
+ current_link_setting = initial_link_setting;
+
+ /* search for the minimum link setting that:
+ * 1. is supported according to the link training result
+ * 2. could support the b/w requested by the timing
+ */
+ while (current_link_setting.link_rate <=
+ max_link_rate) {
+ link_bw = dc_link_bandwidth_kbps(
+ link,
+ &current_link_setting);
+ if (req_bw <= link_bw) {
+ *link_setting = current_link_setting;
+ return true;
+ }
+ if (policy) {
+ /* minimize lane */
+ if (current_link_setting.link_rate_set <
+ link->dpcd_caps.edp_supported_link_rates_count
+ && current_link_setting.link_rate < max_link_rate) {
+ current_link_setting.link_rate_set++;
+ current_link_setting.link_rate =
+ link->dpcd_caps.edp_supported_link_rates[current_link_setting.link_rate_set];
+ } else {
+ if (current_link_setting.lane_count < link->verified_link_cap.lane_count) {
+ current_link_setting.lane_count =
+ increase_lane_count(
+ current_link_setting.lane_count);
+ current_link_setting.link_rate_set = initial_link_setting.link_rate_set;
+ current_link_setting.link_rate =
+ link->dpcd_caps.edp_supported_link_rates[current_link_setting.link_rate_set];
+ } else
+ break;
+ }
+ } else {
+ /* minimize link rate */
+ if (current_link_setting.lane_count <
+ link->verified_link_cap.lane_count) {
+ current_link_setting.lane_count =
+ increase_lane_count(
+ current_link_setting.lane_count);
+ } else {
+ if (current_link_setting.link_rate_set < link->dpcd_caps.edp_supported_link_rates_count) {
+ current_link_setting.link_rate_set++;
+ current_link_setting.link_rate =
+ link->dpcd_caps.edp_supported_link_rates[current_link_setting.link_rate_set];
+ current_link_setting.lane_count =
+ initial_link_setting.lane_count;
+ } else
+ break;
+ }
+ }
+ }
+ return false;
+}
+
static bool decide_mst_link_settings(const struct dc_link *link, struct dc_link_settings *link_setting)
{
*link_setting = link->verified_link_cap;
@@ -3380,7 +4150,25 @@ void decide_link_settings(struct dc_stream_state *stream,
if (decide_mst_link_settings(link, link_setting))
return;
} else if (link->connector_signal == SIGNAL_TYPE_EDP) {
- if (decide_edp_link_settings(link, link_setting, req_bw))
+ /* enable edp link optimization for DSC eDP case */
+ if (stream->timing.flags.DSC) {
+ enum dc_link_rate max_link_rate = LINK_RATE_UNKNOWN;
+
+ if (link->ctx->dc->debug.force_dsc_edp_policy) {
+ /* calculate link max link rate cap*/
+ struct dc_link_settings tmp_link_setting;
+ struct dc_crtc_timing tmp_timing = stream->timing;
+ uint32_t orig_req_bw;
+
+ tmp_link_setting.link_rate = LINK_RATE_UNKNOWN;
+ tmp_timing.flags.DSC = 0;
+ orig_req_bw = dc_bandwidth_in_kbps_from_timing(&tmp_timing);
+ decide_edp_link_settings(link, &tmp_link_setting, orig_req_bw);
+ max_link_rate = tmp_link_setting.link_rate;
+ }
+ if (decide_edp_link_settings_with_dsc(link, link_setting, req_bw, max_link_rate))
+ return;
+ } else if (decide_edp_link_settings(link, link_setting, req_bw))
return;
} else if (decide_dp_link_settings(link, link_setting, req_bw))
return;
@@ -3421,7 +4209,6 @@ static bool handle_hpd_irq_psr_sink(struct dc_link *link)
&psr_configuration.raw,
sizeof(psr_configuration.raw));
-
if (psr_configuration.bits.ENABLE) {
unsigned char dpcdbuf[3] = {0};
union psr_error_status psr_error_status;
@@ -3453,10 +4240,12 @@ static bool handle_hpd_irq_psr_sink(struct dc_link *link)
sizeof(psr_error_status.raw));
/* PSR error, disable and re-enable PSR */
- allow_active = false;
- dc_link_set_psr_allow_active(link, &allow_active, true, false, NULL);
- allow_active = true;
- dc_link_set_psr_allow_active(link, &allow_active, true, false, NULL);
+ if (link->psr_settings.psr_allow_active) {
+ allow_active = false;
+ dc_link_set_psr_allow_active(link, &allow_active, true, false, NULL);
+ allow_active = true;
+ dc_link_set_psr_allow_active(link, &allow_active, true, false, NULL);
+ }
return true;
} else if (psr_sink_psr_status.bits.SINK_SELF_REFRESH_STATUS ==
@@ -3534,6 +4323,13 @@ static void dp_test_send_phy_test_pattern(struct dc_link *link)
&dpcd_lane_adjustment[0].raw,
sizeof(dpcd_lane_adjustment));
+ if (link->dc->debug.apply_vendor_specific_lttpr_wa &&
+ (link->chip_caps & EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN) &&
+ link->lttpr_mode == LTTPR_MODE_TRANSPARENT)
+ vendor_specific_lttpr_wa_three_dpcd(
+ link,
+ link_training_settings.dpcd_lane_settings);
+
/*get post cursor 2 parameters
* For DP 1.1a or eariler, this DPCD register's value is 0
* For DP 1.2 or later:
@@ -4153,6 +4949,56 @@ static int translate_dpcd_max_bpc(enum dpcd_downstream_port_max_bpc bpc)
return -1;
}
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+uint32_t dc_link_bw_kbps_from_raw_frl_link_rate_data(uint8_t bw)
+{
+ switch (bw) {
+ case 0b001:
+ return 9000000;
+ case 0b010:
+ return 18000000;
+ case 0b011:
+ return 24000000;
+ case 0b100:
+ return 32000000;
+ case 0b101:
+ return 40000000;
+ case 0b110:
+ return 48000000;
+ }
+
+ return 0;
+}
+
+/**
+ * Return PCON's post FRL link training supported BW if its non-zero, otherwise return max_supported_frl_bw.
+ */
+static uint32_t intersect_frl_link_bw_support(
+ const uint32_t max_supported_frl_bw_in_kbps,
+ const union hdmi_encoded_link_bw hdmi_encoded_link_bw)
+{
+ uint32_t supported_bw_in_kbps = max_supported_frl_bw_in_kbps;
+
+ // HDMI_ENCODED_LINK_BW bits are only valid if HDMI Link Configuration bit is 1 (FRL mode)
+ if (hdmi_encoded_link_bw.bits.FRL_MODE) {
+ if (hdmi_encoded_link_bw.bits.BW_48Gbps)
+ supported_bw_in_kbps = 48000000;
+ else if (hdmi_encoded_link_bw.bits.BW_40Gbps)
+ supported_bw_in_kbps = 40000000;
+ else if (hdmi_encoded_link_bw.bits.BW_32Gbps)
+ supported_bw_in_kbps = 32000000;
+ else if (hdmi_encoded_link_bw.bits.BW_24Gbps)
+ supported_bw_in_kbps = 24000000;
+ else if (hdmi_encoded_link_bw.bits.BW_18Gbps)
+ supported_bw_in_kbps = 18000000;
+ else if (hdmi_encoded_link_bw.bits.BW_9Gbps)
+ supported_bw_in_kbps = 9000000;
+ }
+
+ return supported_bw_in_kbps;
+}
+#endif
+
static void read_dp_device_vendor_id(struct dc_link *link)
{
struct dp_device_vendor_id dp_id;
@@ -4264,6 +5110,27 @@ static void get_active_converter_info(
translate_dpcd_max_bpc(
hdmi_color_caps.bits.MAX_BITS_PER_COLOR_COMPONENT);
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ if (link->dc->caps.hdmi_frl_pcon_support) {
+ union hdmi_encoded_link_bw hdmi_encoded_link_bw;
+
+ link->dpcd_caps.dongle_caps.dp_hdmi_frl_max_link_bw_in_kbps =
+ dc_link_bw_kbps_from_raw_frl_link_rate_data(
+ hdmi_color_caps.bits.MAX_ENCODED_LINK_BW_SUPPORT);
+
+ // Intersect reported max link bw support with the supported link rate post FRL link training
+ if (core_link_read_dpcd(link, DP_PCON_HDMI_POST_FRL_STATUS,
+ &hdmi_encoded_link_bw.raw, sizeof(hdmi_encoded_link_bw)) == DC_OK) {
+ link->dpcd_caps.dongle_caps.dp_hdmi_frl_max_link_bw_in_kbps = intersect_frl_link_bw_support(
+ link->dpcd_caps.dongle_caps.dp_hdmi_frl_max_link_bw_in_kbps,
+ hdmi_encoded_link_bw);
+ }
+
+ if (link->dpcd_caps.dongle_caps.dp_hdmi_frl_max_link_bw_in_kbps > 0)
+ link->dpcd_caps.dongle_caps.extendedCapValid = true;
+ }
+#endif
+
if (link->dpcd_caps.dongle_caps.dp_hdmi_max_pixel_clk_in_khz != 0)
link->dpcd_caps.dongle_caps.extendedCapValid = true;
}
@@ -4454,7 +5321,7 @@ bool dp_retrieve_lttpr_cap(struct dc_link *link)
lttpr_dpcd_data,
sizeof(lttpr_dpcd_data));
if (status != DC_OK) {
- dm_error("%s: Read LTTPR caps data failed.\n", __func__);
+ DC_LOG_DP2("%s: Read LTTPR caps data failed.\n", __func__);
return false;
}
@@ -5218,7 +6085,7 @@ bool dc_link_dp_set_test_pattern(
DP_TEST_PATTERN_VIDEO_MODE) {
/* Set CRTC Test Pattern */
set_crtc_test_pattern(link, pipe_ctx, test_pattern, test_pattern_color_space);
- dp_set_hw_test_pattern(link, test_pattern,
+ dp_set_hw_test_pattern(link, &pipe_ctx->link_res, test_pattern,
(uint8_t *)p_custom_pattern,
(uint32_t)cust_pattern_size);
@@ -5240,8 +6107,18 @@ bool dc_link_dp_set_test_pattern(
if (is_dp_phy_pattern(test_pattern)) {
/* Set DPCD Lane Settings before running test pattern */
if (p_link_settings != NULL) {
- dp_set_hw_lane_settings(link, p_link_settings, DPRX);
- dpcd_set_lane_settings(link, p_link_settings, DPRX);
+ if (link->dc->debug.apply_vendor_specific_lttpr_wa &&
+ (link->chip_caps & EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN) &&
+ link->lttpr_mode == LTTPR_MODE_TRANSPARENT) {
+ dpcd_set_lane_settings(link, p_link_settings, DPRX);
+ vendor_specific_lttpr_wa_five(
+ link,
+ p_link_settings->dpcd_lane_settings,
+ p_link_settings->link_settings.lane_count);
+ } else {
+ dp_set_hw_lane_settings(link, &pipe_ctx->link_res, p_link_settings, DPRX);
+ dpcd_set_lane_settings(link, p_link_settings, DPRX);
+ }
}
/* Blank stream if running test pattern */
@@ -5254,7 +6131,7 @@ bool dc_link_dp_set_test_pattern(
pipes->stream_res.stream_enc->funcs->dp_blank(link, pipe_ctx->stream_res.stream_enc);
}
- dp_set_hw_test_pattern(link, test_pattern,
+ dp_set_hw_test_pattern(link, &pipe_ctx->link_res, test_pattern,
(uint8_t *)p_custom_pattern,
(uint32_t)cust_pattern_size);
@@ -5574,7 +6451,7 @@ enum dp_panel_mode dp_get_panel_mode(struct dc_link *link)
return DP_PANEL_MODE_DEFAULT;
}
-enum dc_status dp_set_fec_ready(struct dc_link *link, bool ready)
+enum dc_status dp_set_fec_ready(struct dc_link *link, const struct link_resource *link_res, bool ready)
{
/* FEC has to be "set ready" before the link training.
* The policy is to always train with FEC
@@ -5665,6 +6542,23 @@ void dp_set_fec_enable(struct dc_link *link, bool enable)
}
}
+struct link_encoder *dp_get_link_enc(struct dc_link *link)
+{
+ struct link_encoder *link_enc;
+
+ link_enc = link->link_enc;
+ if (link->is_dig_mapping_flexible &&
+ link->dc->res_pool->funcs->link_encs_assign) {
+ link_enc = link_enc_cfg_get_link_enc_used_by_link(link->ctx->dc,
+ link);
+ if (!link->link_enc)
+ link_enc = link_enc_cfg_get_next_avail_link_enc(
+ link->ctx->dc);
+ }
+
+ return link_enc;
+}
+
void dpcd_set_source_specific_data(struct dc_link *link)
{
if (!link->dc->vendor_signature.is_valid) {
@@ -5885,7 +6779,10 @@ bool is_edp_ilr_optimization_required(struct dc_link *link, struct dc_crtc_timin
req_bw = dc_bandwidth_in_kbps_from_timing(crtc_timing);
- decide_edp_link_settings(link, &link_setting, req_bw);
+ if (!crtc_timing->flags.DSC)
+ decide_edp_link_settings(link, &link_setting, req_bw);
+ else
+ decide_edp_link_settings_with_dsc(link, &link_setting, req_bw, LINK_RATE_UNKNOWN);
if (link->dpcd_caps.edp_supported_link_rates[link_rate_set] != link_setting.link_rate ||
lane_count_set.bits.LANE_COUNT_SET != link_setting.lane_count) {
@@ -6121,8 +7018,21 @@ struct fixed31_32 calculate_sst_avg_time_slots_per_mtp(
bool is_dp_128b_132b_signal(struct pipe_ctx *pipe_ctx)
{
+ /* If this assert is hit then we have a link encoder dynamic management issue */
+ ASSERT(pipe_ctx->stream_res.hpo_dp_stream_enc ? pipe_ctx->link_res.hpo_dp_link_enc != NULL : true);
return (pipe_ctx->stream_res.hpo_dp_stream_enc &&
- pipe_ctx->stream->link->hpo_dp_link_enc &&
+ pipe_ctx->link_res.hpo_dp_link_enc &&
dc_is_dp_signal(pipe_ctx->stream->signal));
}
#endif
+
+void edp_panel_backlight_power_on(struct dc_link *link)
+{
+ if (link->connector_signal != SIGNAL_TYPE_EDP)
+ return;
+
+ link->dc->hwss.edp_power_control(link, true);
+ link->dc->hwss.edp_wait_for_hpd_ready(link, true);
+ if (link->dc->hwss.edp_backlight_control)
+ link->dc->hwss.edp_backlight_control(link, true);
+}
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dpia.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_dpia.c
index b1c9f77d6bf4..0e95bc5df4e7 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dpia.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dpia.c
@@ -77,7 +77,9 @@ enum dc_status dpcd_get_tunneling_device_data(struct dc_link *link)
* @param[in] link_setting Lane count, link rate and downspread control.
* @param[out] lt_settings Link settings and drive settings (voltage swing and pre-emphasis).
*/
-static enum link_training_result dpia_configure_link(struct dc_link *link,
+static enum link_training_result dpia_configure_link(
+ struct dc_link *link,
+ const struct link_resource *link_res,
const struct dc_link_settings *link_setting,
struct link_training_settings *lt_settings)
{
@@ -94,25 +96,25 @@ static enum link_training_result dpia_configure_link(struct dc_link *link,
lt_settings);
status = dpcd_configure_channel_coding(link, lt_settings);
- if (status != DC_OK && !link->hpd_status)
+ if (status != DC_OK && link->is_hpd_pending)
return LINK_TRAINING_ABORT;
/* Configure lttpr mode */
status = dpcd_configure_lttpr_mode(link, lt_settings);
- if (status != DC_OK && !link->hpd_status)
+ if (status != DC_OK && link->is_hpd_pending)
return LINK_TRAINING_ABORT;
/* Set link rate, lane count and spread. */
status = dpcd_set_link_settings(link, lt_settings);
- if (status != DC_OK && !link->hpd_status)
+ if (status != DC_OK && link->is_hpd_pending)
return LINK_TRAINING_ABORT;
if (link->preferred_training_settings.fec_enable)
fec_enable = *link->preferred_training_settings.fec_enable;
else
fec_enable = true;
- status = dp_set_fec_ready(link, fec_enable);
- if (status != DC_OK && !link->hpd_status)
+ status = dp_set_fec_ready(link, link_res, fec_enable);
+ if (status != DC_OK && link->is_hpd_pending)
return LINK_TRAINING_ABORT;
return LINK_TRAINING_SUCCESS;
@@ -252,7 +254,9 @@ static enum dc_status dpcd_set_lt_pattern(struct dc_link *link,
* @param lt_settings link_setting and drive settings (voltage swing and pre-emphasis).
* @param hop The Hop in display path. DPRX = 0.
*/
-static enum link_training_result dpia_training_cr_non_transparent(struct dc_link *link,
+static enum link_training_result dpia_training_cr_non_transparent(
+ struct dc_link *link,
+ const struct link_resource *link_res,
struct link_training_settings *lt_settings,
uint32_t hop)
{
@@ -388,7 +392,7 @@ static enum link_training_result dpia_training_cr_non_transparent(struct dc_link
}
/* Abort link training if clock recovery failed due to HPD unplug. */
- if (!link->hpd_status)
+ if (link->is_hpd_pending)
result = LINK_TRAINING_ABORT;
DC_LOG_HW_LINK_TRAINING("%s\n DPIA(%d) clock recovery\n"
@@ -411,7 +415,9 @@ static enum link_training_result dpia_training_cr_non_transparent(struct dc_link
* @param link DPIA link being trained.
* @param lt_settings link_setting and drive settings (voltage swing and pre-emphasis).
*/
-static enum link_training_result dpia_training_cr_transparent(struct dc_link *link,
+static enum link_training_result dpia_training_cr_transparent(
+ struct dc_link *link,
+ const struct link_resource *link_res,
struct link_training_settings *lt_settings)
{
enum link_training_result result = LINK_TRAINING_CR_FAIL_LANE0;
@@ -490,7 +496,7 @@ static enum link_training_result dpia_training_cr_transparent(struct dc_link *li
}
/* Abort link training if clock recovery failed due to HPD unplug. */
- if (!link->hpd_status)
+ if (link->is_hpd_pending)
result = LINK_TRAINING_ABORT;
DC_LOG_HW_LINK_TRAINING("%s\n DPIA(%d) clock recovery\n"
@@ -511,16 +517,18 @@ static enum link_training_result dpia_training_cr_transparent(struct dc_link *li
* @param lt_settings link_setting and drive settings (voltage swing and pre-emphasis).
* @param hop The Hop in display path. DPRX = 0.
*/
-static enum link_training_result dpia_training_cr_phase(struct dc_link *link,
+static enum link_training_result dpia_training_cr_phase(
+ struct dc_link *link,
+ const struct link_resource *link_res,
struct link_training_settings *lt_settings,
uint32_t hop)
{
enum link_training_result result = LINK_TRAINING_CR_FAIL_LANE0;
if (link->lttpr_mode == LTTPR_MODE_NON_TRANSPARENT)
- result = dpia_training_cr_non_transparent(link, lt_settings, hop);
+ result = dpia_training_cr_non_transparent(link, link_res, lt_settings, hop);
else
- result = dpia_training_cr_transparent(link, lt_settings);
+ result = dpia_training_cr_transparent(link, link_res, lt_settings);
return result;
}
@@ -561,7 +569,9 @@ static uint32_t dpia_get_eq_aux_rd_interval(const struct dc_link *link,
* @param lt_settings link_setting and drive settings (voltage swing and pre-emphasis).
* @param hop The Hop in display path. DPRX = 0.
*/
-static enum link_training_result dpia_training_eq_non_transparent(struct dc_link *link,
+static enum link_training_result dpia_training_eq_non_transparent(
+ struct dc_link *link,
+ const struct link_resource *link_res,
struct link_training_settings *lt_settings,
uint32_t hop)
{
@@ -675,7 +685,7 @@ static enum link_training_result dpia_training_eq_non_transparent(struct dc_link
}
/* Abort link training if equalization failed due to HPD unplug. */
- if (!link->hpd_status)
+ if (link->is_hpd_pending)
result = LINK_TRAINING_ABORT;
DC_LOG_HW_LINK_TRAINING("%s\n DPIA(%d) equalization\n"
@@ -700,7 +710,9 @@ static enum link_training_result dpia_training_eq_non_transparent(struct dc_link
* @param lt_settings link_setting and drive settings (voltage swing and pre-emphasis).
* @param hop The Hop in display path. DPRX = 0.
*/
-static enum link_training_result dpia_training_eq_transparent(struct dc_link *link,
+static enum link_training_result dpia_training_eq_transparent(
+ struct dc_link *link,
+ const struct link_resource *link_res,
struct link_training_settings *lt_settings)
{
enum link_training_result result = LINK_TRAINING_EQ_FAIL_EQ;
@@ -758,7 +770,7 @@ static enum link_training_result dpia_training_eq_transparent(struct dc_link *li
}
/* Abort link training if equalization failed due to HPD unplug. */
- if (!link->hpd_status)
+ if (link->is_hpd_pending)
result = LINK_TRAINING_ABORT;
DC_LOG_HW_LINK_TRAINING("%s\n DPIA(%d) equalization\n"
@@ -779,16 +791,18 @@ static enum link_training_result dpia_training_eq_transparent(struct dc_link *li
* @param lt_settings link_setting and drive settings (voltage swing and pre-emphasis).
* @param hop The Hop in display path. DPRX = 0.
*/
-static enum link_training_result dpia_training_eq_phase(struct dc_link *link,
+static enum link_training_result dpia_training_eq_phase(
+ struct dc_link *link,
+ const struct link_resource *link_res,
struct link_training_settings *lt_settings,
uint32_t hop)
{
enum link_training_result result;
if (link->lttpr_mode == LTTPR_MODE_NON_TRANSPARENT)
- result = dpia_training_eq_non_transparent(link, lt_settings, hop);
+ result = dpia_training_eq_non_transparent(link, link_res, lt_settings, hop);
else
- result = dpia_training_eq_transparent(link, lt_settings);
+ result = dpia_training_eq_transparent(link, link_res, lt_settings);
return result;
}
@@ -892,10 +906,10 @@ static void dpia_training_abort(struct dc_link *link, uint32_t hop)
__func__,
link->link_id.enum_id - ENUM_ID_1,
link->lttpr_mode,
- link->hpd_status);
+ link->is_hpd_pending);
/* Abandon clean-up if sink unplugged. */
- if (!link->hpd_status)
+ if (link->is_hpd_pending)
return;
if (hop != DPRX)
@@ -908,7 +922,9 @@ static void dpia_training_abort(struct dc_link *link, uint32_t hop)
core_link_send_set_config(link, DPIA_SET_CFG_SET_LINK, data);
}
-enum link_training_result dc_link_dpia_perform_link_training(struct dc_link *link,
+enum link_training_result dc_link_dpia_perform_link_training(
+ struct dc_link *link,
+ const struct link_resource *link_res,
const struct dc_link_settings *link_setting,
bool skip_video_pattern)
{
@@ -918,7 +934,7 @@ enum link_training_result dc_link_dpia_perform_link_training(struct dc_link *lin
int8_t repeater_id; /* Current hop. */
/* Configure link as prescribed in link_setting and set LTTPR mode. */
- result = dpia_configure_link(link, link_setting, &lt_settings);
+ result = dpia_configure_link(link, link_res, link_setting, &lt_settings);
if (result != LINK_TRAINING_SUCCESS)
return result;
@@ -930,12 +946,12 @@ enum link_training_result dc_link_dpia_perform_link_training(struct dc_link *lin
*/
for (repeater_id = repeater_cnt; repeater_id >= 0; repeater_id--) {
/* Clock recovery. */
- result = dpia_training_cr_phase(link, &lt_settings, repeater_id);
+ result = dpia_training_cr_phase(link, link_res, &lt_settings, repeater_id);
if (result != LINK_TRAINING_SUCCESS)
break;
/* Equalization. */
- result = dpia_training_eq_phase(link, &lt_settings, repeater_id);
+ result = dpia_training_eq_phase(link, link_res, &lt_settings, repeater_id);
if (result != LINK_TRAINING_SUCCESS)
break;
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_enc_cfg.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_enc_cfg.c
index 25e48a8cbb78..a55944da8d53 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_enc_cfg.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_enc_cfg.c
@@ -118,7 +118,10 @@ static void remove_link_enc_assignment(
*/
if (get_stream_using_link_enc(state, eng_id) == NULL)
state->res_ctx.link_enc_cfg_ctx.link_enc_avail[eng_idx] = eng_id;
+
stream->link_enc = NULL;
+ state->res_ctx.link_enc_cfg_ctx.link_enc_assignments[i].eng_id = ENGINE_ID_UNKNOWN;
+ state->res_ctx.link_enc_cfg_ctx.link_enc_assignments[i].stream = NULL;
break;
}
}
@@ -148,6 +151,7 @@ static void add_link_enc_assignment(
.ep_type = stream->link->ep_type},
.eng_id = eng_id,
.stream = stream};
+ dc_stream_retain(stream);
state->res_ctx.link_enc_cfg_ctx.link_enc_avail[eng_idx] = ENGINE_ID_UNKNOWN;
stream->link_enc = stream->ctx->dc->res_pool->link_encoders[eng_idx];
break;
@@ -227,7 +231,7 @@ static struct link_encoder *get_link_enc_used_by_link(
.link_id = link->link_id,
.ep_type = link->ep_type};
- for (i = 0; i < state->stream_count; i++) {
+ for (i = 0; i < MAX_PIPES; i++) {
struct link_enc_assignment assignment = state->res_ctx.link_enc_cfg_ctx.link_enc_assignments[i];
if (assignment.valid == true && are_ep_ids_equal(&assignment.ep_id, &ep_id))
@@ -237,28 +241,18 @@ static struct link_encoder *get_link_enc_used_by_link(
return link_enc;
}
/* Clear all link encoder assignments. */
-static void clear_enc_assignments(struct dc_state *state)
+static void clear_enc_assignments(const struct dc *dc, struct dc_state *state)
{
int i;
- enum engine_id eng_id;
- struct dc_stream_state *stream;
for (i = 0; i < MAX_PIPES; i++) {
state->res_ctx.link_enc_cfg_ctx.link_enc_assignments[i].valid = false;
- eng_id = state->res_ctx.link_enc_cfg_ctx.link_enc_assignments[i].eng_id;
- stream = state->res_ctx.link_enc_cfg_ctx.link_enc_assignments[i].stream;
- if (eng_id != ENGINE_ID_UNKNOWN)
- state->res_ctx.link_enc_cfg_ctx.link_enc_avail[eng_id - ENGINE_ID_DIGA] = eng_id;
- if (stream)
- stream->link_enc = NULL;
+ state->res_ctx.link_enc_cfg_ctx.link_enc_assignments[i].eng_id = ENGINE_ID_UNKNOWN;
+ if (state->res_ctx.link_enc_cfg_ctx.link_enc_assignments[i].stream != NULL) {
+ dc_stream_release(state->res_ctx.link_enc_cfg_ctx.link_enc_assignments[i].stream);
+ state->res_ctx.link_enc_cfg_ctx.link_enc_assignments[i].stream = NULL;
+ }
}
-}
-
-void link_enc_cfg_init(
- struct dc *dc,
- struct dc_state *state)
-{
- int i;
for (i = 0; i < dc->res_pool->res_cap->num_dig_link_enc; i++) {
if (dc->res_pool->link_encoders[i])
@@ -266,8 +260,13 @@ void link_enc_cfg_init(
else
state->res_ctx.link_enc_cfg_ctx.link_enc_avail[i] = ENGINE_ID_UNKNOWN;
}
+}
- clear_enc_assignments(state);
+void link_enc_cfg_init(
+ const struct dc *dc,
+ struct dc_state *state)
+{
+ clear_enc_assignments(dc, state);
state->res_ctx.link_enc_cfg_ctx.mode = LINK_ENC_CFG_STEADY;
}
@@ -284,12 +283,9 @@ void link_enc_cfg_link_encs_assign(
ASSERT(state->stream_count == stream_count);
- if (stream_count == 0)
- clear_enc_assignments(state);
-
/* Release DIG link encoder resources before running assignment algorithm. */
- for (i = 0; i < stream_count; i++)
- dc->res_pool->funcs->link_enc_unassign(state, streams[i]);
+ for (i = 0; i < dc->current_state->stream_count; i++)
+ dc->res_pool->funcs->link_enc_unassign(state, dc->current_state->streams[i]);
for (i = 0; i < MAX_PIPES; i++)
ASSERT(state->res_ctx.link_enc_cfg_ctx.link_enc_assignments[i].valid == false);
@@ -544,6 +540,7 @@ bool link_enc_cfg_validate(struct dc *dc, struct dc_state *state)
uint8_t dig_stream_count = 0;
int matching_stream_ptrs = 0;
int eng_ids_per_ep_id[MAX_PIPES] = {0};
+ int valid_bitmap = 0;
/* (1) No. valid entries same as stream count. */
for (i = 0; i < MAX_PIPES; i++) {
@@ -625,5 +622,15 @@ bool link_enc_cfg_validate(struct dc *dc, struct dc_state *state)
is_valid = valid_entries && valid_stream_ptrs && valid_uniqueness && valid_avail && valid_streams;
ASSERT(is_valid);
+ if (is_valid == false) {
+ valid_bitmap =
+ (valid_entries & 0x1) |
+ ((valid_stream_ptrs & 0x1) << 1) |
+ ((valid_uniqueness & 0x1) << 2) |
+ ((valid_avail & 0x1) << 3) |
+ ((valid_streams & 0x1) << 4);
+ dm_error("Invalid link encoder assignments: 0x%x\n", valid_bitmap);
+ }
+
return is_valid;
}
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c
index 368e834c6809..45d03d3a95c3 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c
@@ -71,6 +71,7 @@ void dp_source_sequence_trace(struct dc_link *link, uint8_t dp_test_mode)
void dp_enable_link_phy(
struct dc_link *link,
+ const struct link_resource *link_res,
enum signal_type signal,
enum clock_source_id clock_source,
const struct dc_link_settings *link_settings)
@@ -135,7 +136,7 @@ void dp_enable_link_phy(
#if defined(CONFIG_DRM_AMD_DC_DCN)
if (dp_get_link_encoding_format(link_settings) == DP_128b_132b_ENCODING) {
- enable_dp_hpo_output(link, link_settings);
+ enable_dp_hpo_output(link, link_res, link_settings);
} else if (dp_get_link_encoding_format(link_settings) == DP_8b_10b_ENCODING) {
if (dc_is_dp_sst_signal(signal)) {
link_enc->funcs->enable_dp_output(
@@ -236,12 +237,13 @@ bool edp_receiver_ready_T7(struct dc_link *link)
return result;
}
-void dp_disable_link_phy(struct dc_link *link, enum signal_type signal)
+void dp_disable_link_phy(struct dc_link *link, const struct link_resource *link_res,
+ enum signal_type signal)
{
struct dc *dc = link->ctx->dc;
struct dmcu *dmcu = dc->res_pool->dmcu;
#if defined(CONFIG_DRM_AMD_DC_DCN)
- struct hpo_dp_link_encoder *hpo_link_enc = link->hpo_dp_link_enc;
+ struct hpo_dp_link_encoder *hpo_link_enc = link_res->hpo_dp_link_enc;
#endif
struct link_encoder *link_enc;
@@ -260,7 +262,7 @@ void dp_disable_link_phy(struct dc_link *link, enum signal_type signal)
link->dc->hwss.edp_backlight_control(link, false);
#if defined(CONFIG_DRM_AMD_DC_DCN)
if (dp_get_link_encoding_format(&link->cur_link_settings) == DP_128b_132b_ENCODING)
- disable_dp_hpo_output(link, signal);
+ disable_dp_hpo_output(link, link_res, signal);
else
link_enc->funcs->disable_output(link_enc, signal);
#else
@@ -274,7 +276,7 @@ void dp_disable_link_phy(struct dc_link *link, enum signal_type signal)
#if defined(CONFIG_DRM_AMD_DC_DCN)
if (dp_get_link_encoding_format(&link->cur_link_settings) == DP_128b_132b_ENCODING &&
hpo_link_enc)
- disable_dp_hpo_output(link, signal);
+ disable_dp_hpo_output(link, link_res, signal);
else
link_enc->funcs->disable_output(link_enc, signal);
#else
@@ -294,13 +296,14 @@ void dp_disable_link_phy(struct dc_link *link, enum signal_type signal)
dc->clk_mgr->funcs->notify_link_rate_change(dc->clk_mgr, link);
}
-void dp_disable_link_phy_mst(struct dc_link *link, enum signal_type signal)
+void dp_disable_link_phy_mst(struct dc_link *link, const struct link_resource *link_res,
+ enum signal_type signal)
{
/* MST disable link only when no stream use the link */
if (link->mst_stream_alloc_table.stream_count > 0)
return;
- dp_disable_link_phy(link, signal);
+ dp_disable_link_phy(link, link_res, signal);
/* set the sink to SST mode after disabling the link */
dp_enable_mst_on_sink(link, false);
@@ -308,6 +311,7 @@ void dp_disable_link_phy_mst(struct dc_link *link, enum signal_type signal)
bool dp_set_hw_training_pattern(
struct dc_link *link,
+ const struct link_resource *link_res,
enum dc_dp_training_pattern pattern,
uint32_t offset)
{
@@ -338,7 +342,7 @@ bool dp_set_hw_training_pattern(
break;
}
- dp_set_hw_test_pattern(link, test_pattern, NULL, 0);
+ dp_set_hw_test_pattern(link, link_res, test_pattern, NULL, 0);
return true;
}
@@ -349,6 +353,7 @@ bool dp_set_hw_training_pattern(
#endif
void dp_set_hw_lane_settings(
struct dc_link *link,
+ const struct link_resource *link_res,
const struct link_training_settings *link_settings,
uint32_t offset)
{
@@ -361,8 +366,8 @@ void dp_set_hw_lane_settings(
#if defined(CONFIG_DRM_AMD_DC_DCN)
if (dp_get_link_encoding_format(&link_settings->link_settings) ==
DP_128b_132b_ENCODING) {
- link->hpo_dp_link_enc->funcs->set_ffe(
- link->hpo_dp_link_enc,
+ link_res->hpo_dp_link_enc->funcs->set_ffe(
+ link_res->hpo_dp_link_enc,
&link_settings->link_settings,
link_settings->lane_settings[0].FFE_PRESET.raw);
} else if (dp_get_link_encoding_format(&link_settings->link_settings)
@@ -379,6 +384,7 @@ void dp_set_hw_lane_settings(
void dp_set_hw_test_pattern(
struct dc_link *link,
+ const struct link_resource *link_res,
enum dp_test_pattern test_pattern,
uint8_t *custom_pattern,
uint32_t custom_pattern_size)
@@ -406,8 +412,8 @@ void dp_set_hw_test_pattern(
#if defined(CONFIG_DRM_AMD_DC_DCN)
switch (link_encoding_format) {
case DP_128b_132b_ENCODING:
- link->hpo_dp_link_enc->funcs->set_link_test_pattern(
- link->hpo_dp_link_enc, &pattern_param);
+ link_res->hpo_dp_link_enc->funcs->set_link_test_pattern(
+ link_res->hpo_dp_link_enc, &pattern_param);
break;
case DP_8b_10b_ENCODING:
ASSERT(encoder);
@@ -446,7 +452,7 @@ void dp_retrain_link_dp_test(struct dc_link *link,
pipes[i].stream_res.stream_enc);
/* disable any test pattern that might be active */
- dp_set_hw_test_pattern(link,
+ dp_set_hw_test_pattern(link, &pipes[i].link_res,
DP_TEST_PATTERN_VIDEO_MODE, NULL, 0);
dp_receiver_power_ctrl(link, false);
@@ -763,7 +769,9 @@ static enum phyd32clk_clock_source get_phyd32clk_src(struct dc_link *link)
}
}
-void enable_dp_hpo_output(struct dc_link *link, const struct dc_link_settings *link_settings)
+void enable_dp_hpo_output(struct dc_link *link,
+ const struct link_resource *link_res,
+ const struct dc_link_settings *link_settings)
{
const struct dc *dc = link->dc;
enum phyd32clk_clock_source phyd32clk;
@@ -789,10 +797,11 @@ void enable_dp_hpo_output(struct dc_link *link, const struct dc_link_settings *l
}
} else {
/* DP2.0 HW: call transmitter control to enable PHY */
- link->hpo_dp_link_enc->funcs->enable_link_phy(
- link->hpo_dp_link_enc,
+ link_res->hpo_dp_link_enc->funcs->enable_link_phy(
+ link_res->hpo_dp_link_enc,
link_settings,
- link->link_enc->transmitter);
+ link->link_enc->transmitter,
+ link->link_enc->hpd_source);
}
/* DCCG muxing and DTBCLK DTO */
@@ -806,24 +815,26 @@ void enable_dp_hpo_output(struct dc_link *link, const struct dc_link_settings *l
phyd32clk = get_phyd32clk_src(link);
dc->res_pool->dccg->funcs->enable_symclk32_le(
dc->res_pool->dccg,
- link->hpo_dp_link_enc->inst,
+ link_res->hpo_dp_link_enc->inst,
phyd32clk);
- link->hpo_dp_link_enc->funcs->link_enable(
- link->hpo_dp_link_enc,
- link_settings->lane_count);
+ link_res->hpo_dp_link_enc->funcs->link_enable(
+ link_res->hpo_dp_link_enc,
+ link_settings->lane_count);
}
}
-void disable_dp_hpo_output(struct dc_link *link, enum signal_type signal)
+void disable_dp_hpo_output(struct dc_link *link,
+ const struct link_resource *link_res,
+ enum signal_type signal)
{
const struct dc *dc = link->dc;
- link->hpo_dp_link_enc->funcs->link_disable(link->hpo_dp_link_enc);
+ link_res->hpo_dp_link_enc->funcs->link_disable(link_res->hpo_dp_link_enc);
if (IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
dc->res_pool->dccg->funcs->disable_symclk32_le(
dc->res_pool->dccg,
- link->hpo_dp_link_enc->inst);
+ link_res->hpo_dp_link_enc->inst);
dc->res_pool->dccg->funcs->set_physymclk(
dc->res_pool->dccg,
@@ -834,8 +845,8 @@ void disable_dp_hpo_output(struct dc_link *link, enum signal_type signal)
dm_set_phyd32clk(dc->ctx, 0);
} else {
/* DP2.0 HW: call transmitter control to disable PHY */
- link->hpo_dp_link_enc->funcs->disable_link_phy(
- link->hpo_dp_link_enc,
+ link_res->hpo_dp_link_enc->funcs->disable_link_phy(
+ link_res->hpo_dp_link_enc,
signal);
}
}
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
index c32fdccd4d92..eaeef72773f6 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
@@ -1664,6 +1664,10 @@ bool dc_is_stream_unchanged(
if (old_stream->ignore_msa_timing_param != stream->ignore_msa_timing_param)
return false;
+ // Only Have Audio left to check whether it is same or not. This is a corner case for Tiled sinks
+ if (old_stream->audio_info.mode_count != stream->audio_info.mode_count)
+ return false;
+
return true;
}
@@ -1720,6 +1724,94 @@ static void update_hpo_dp_stream_engine_usage(
res_ctx->is_hpo_dp_stream_enc_acquired[i] = acquired;
}
}
+
+static inline int find_acquired_hpo_dp_link_enc_for_link(
+ const struct resource_context *res_ctx,
+ const struct dc_link *link)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(res_ctx->hpo_dp_link_enc_to_link_idx); i++)
+ if (res_ctx->hpo_dp_link_enc_ref_cnts[i] > 0 &&
+ res_ctx->hpo_dp_link_enc_to_link_idx[i] == link->link_index)
+ return i;
+
+ return -1;
+}
+
+static inline int find_free_hpo_dp_link_enc(const struct resource_context *res_ctx,
+ const struct resource_pool *pool)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(res_ctx->hpo_dp_link_enc_ref_cnts); i++)
+ if (res_ctx->hpo_dp_link_enc_ref_cnts[i] == 0)
+ break;
+
+ return (i < ARRAY_SIZE(res_ctx->hpo_dp_link_enc_ref_cnts) &&
+ i < pool->hpo_dp_link_enc_count) ? i : -1;
+}
+
+static inline void acquire_hpo_dp_link_enc(
+ struct resource_context *res_ctx,
+ unsigned int link_index,
+ int enc_index)
+{
+ res_ctx->hpo_dp_link_enc_to_link_idx[enc_index] = link_index;
+ res_ctx->hpo_dp_link_enc_ref_cnts[enc_index] = 1;
+}
+
+static inline void retain_hpo_dp_link_enc(
+ struct resource_context *res_ctx,
+ int enc_index)
+{
+ res_ctx->hpo_dp_link_enc_ref_cnts[enc_index]++;
+}
+
+static inline void release_hpo_dp_link_enc(
+ struct resource_context *res_ctx,
+ int enc_index)
+{
+ ASSERT(res_ctx->hpo_dp_link_enc_ref_cnts[enc_index] > 0);
+ res_ctx->hpo_dp_link_enc_ref_cnts[enc_index]--;
+}
+
+static bool add_hpo_dp_link_enc_to_ctx(struct resource_context *res_ctx,
+ const struct resource_pool *pool,
+ struct pipe_ctx *pipe_ctx,
+ struct dc_stream_state *stream)
+{
+ int enc_index;
+
+ enc_index = find_acquired_hpo_dp_link_enc_for_link(res_ctx, stream->link);
+
+ if (enc_index >= 0) {
+ retain_hpo_dp_link_enc(res_ctx, enc_index);
+ } else {
+ enc_index = find_free_hpo_dp_link_enc(res_ctx, pool);
+ if (enc_index >= 0)
+ acquire_hpo_dp_link_enc(res_ctx, stream->link->link_index, enc_index);
+ }
+
+ if (enc_index >= 0)
+ pipe_ctx->link_res.hpo_dp_link_enc = pool->hpo_dp_link_enc[enc_index];
+
+ return pipe_ctx->link_res.hpo_dp_link_enc != NULL;
+}
+
+static void remove_hpo_dp_link_enc_from_ctx(struct resource_context *res_ctx,
+ struct pipe_ctx *pipe_ctx,
+ struct dc_stream_state *stream)
+{
+ int enc_index;
+
+ enc_index = find_acquired_hpo_dp_link_enc_for_link(res_ctx, stream->link);
+
+ if (enc_index >= 0) {
+ release_hpo_dp_link_enc(res_ctx, enc_index);
+ pipe_ctx->link_res.hpo_dp_link_enc = NULL;
+ }
+}
#endif
/* TODO: release audio object */
@@ -1882,6 +1974,7 @@ enum dc_status dc_remove_stream_from_ctx(
&new_ctx->res_ctx, dc->res_pool,
del_pipe->stream_res.hpo_dp_stream_enc,
false);
+ remove_hpo_dp_link_enc_from_ctx(&new_ctx->res_ctx, del_pipe, del_pipe->stream);
}
#endif
@@ -2078,7 +2171,6 @@ static void mark_seamless_boot_stream(
{
struct dc_bios *dcb = dc->ctx->dc_bios;
- /* TODO: Check Linux */
if (dc->config.allow_seamless_boot_optimization &&
!dcb->funcs->is_accelerated_mode(dcb)) {
if (dc_validate_seamless_boot_timing(dc, stream->sink, &stream->timing))
@@ -2158,6 +2250,8 @@ enum dc_status resource_map_pool_resources(
&context->res_ctx, pool,
pipe_ctx->stream_res.hpo_dp_stream_enc,
true);
+ if (!add_hpo_dp_link_enc_to_ctx(&context->res_ctx, pool, pipe_ctx, stream))
+ return DC_NO_LINK_ENC_RESOURCE;
}
}
#endif
@@ -2224,6 +2318,9 @@ void dc_resource_state_construct(
struct dc_state *dst_ctx)
{
dst_ctx->clk_mgr = dc->clk_mgr;
+
+ /* Initialise DIG link encoder resource tracking variables. */
+ link_enc_cfg_init(dc, dst_ctx);
}
@@ -2252,16 +2349,6 @@ enum dc_status dc_validate_global_state(
if (!new_ctx)
return DC_ERROR_UNEXPECTED;
-#if defined(CONFIG_DRM_AMD_DC_DCN)
-
- /*
- * Update link encoder to stream assignment.
- * TODO: Split out reason allocation from validation.
- */
- if (dc->res_pool->funcs->link_encs_assign && fast_validate == false)
- dc->res_pool->funcs->link_encs_assign(
- dc, new_ctx, new_ctx->streams, new_ctx->stream_count);
-#endif
if (dc->res_pool->funcs->validate_global) {
result = dc->res_pool->funcs->validate_global(dc, new_ctx);
@@ -2313,6 +2400,16 @@ enum dc_status dc_validate_global_state(
if (!dc->res_pool->funcs->validate_bandwidth(dc, new_ctx, fast_validate))
result = DC_FAIL_BANDWIDTH_VALIDATE;
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ /*
+ * Only update link encoder to stream assignment after bandwidth validation passed.
+ * TODO: Split out assignment and validation.
+ */
+ if (result == DC_OK && dc->res_pool->funcs->link_encs_assign && fast_validate == false)
+ dc->res_pool->funcs->link_encs_assign(
+ dc, new_ctx, new_ctx->streams, new_ctx->stream_count);
+#endif
+
return result;
}
@@ -2506,17 +2603,7 @@ static void set_avi_info_frame(
/* TODO : We should handle YCC quantization */
/* but we do not have matrix calculation */
- if (stream->qy_bit == 1) {
- if (color_space == COLOR_SPACE_SRGB ||
- color_space == COLOR_SPACE_2020_RGB_FULLRANGE)
- hdmi_info.bits.YQ0_YQ1 = YYC_QUANTIZATION_LIMITED_RANGE;
- else if (color_space == COLOR_SPACE_SRGB_LIMITED ||
- color_space == COLOR_SPACE_2020_RGB_LIMITEDRANGE)
- hdmi_info.bits.YQ0_YQ1 = YYC_QUANTIZATION_LIMITED_RANGE;
- else
- hdmi_info.bits.YQ0_YQ1 = YYC_QUANTIZATION_LIMITED_RANGE;
- } else
- hdmi_info.bits.YQ0_YQ1 = YYC_QUANTIZATION_LIMITED_RANGE;
+ hdmi_info.bits.YQ0_YQ1 = YYC_QUANTIZATION_LIMITED_RANGE;
///VIC
format = stream->timing.timing_3d_format;
@@ -2840,6 +2927,8 @@ bool pipe_need_reprogram(
#if defined(CONFIG_DRM_AMD_DC_DCN)
if (pipe_ctx_old->stream_res.hpo_dp_stream_enc != pipe_ctx->stream_res.hpo_dp_stream_enc)
return true;
+ if (pipe_ctx_old->link_res.hpo_dp_link_enc != pipe_ctx->link_res.hpo_dp_link_enc)
+ return true;
#endif
/* DIG link encoder resource assignment for stream changed. */
@@ -3108,21 +3197,76 @@ void get_audio_check(struct audio_info *aud_modes,
}
#if defined(CONFIG_DRM_AMD_DC_DCN)
-struct hpo_dp_link_encoder *resource_get_unused_hpo_dp_link_encoder(
- const struct resource_pool *pool)
+struct hpo_dp_link_encoder *resource_get_hpo_dp_link_enc_for_det_lt(
+ const struct resource_context *res_ctx,
+ const struct resource_pool *pool,
+ const struct dc_link *link)
{
- uint8_t i;
- struct hpo_dp_link_encoder *enc = NULL;
+ struct hpo_dp_link_encoder *hpo_dp_link_enc = NULL;
+ int enc_index;
- ASSERT(pool->hpo_dp_link_enc_count <= MAX_HPO_DP2_LINK_ENCODERS);
+ enc_index = find_acquired_hpo_dp_link_enc_for_link(res_ctx, link);
- for (i = 0; i < pool->hpo_dp_link_enc_count; i++) {
- if (pool->hpo_dp_link_enc[i]->transmitter == TRANSMITTER_UNKNOWN) {
- enc = pool->hpo_dp_link_enc[i];
- break;
+ if (enc_index < 0)
+ enc_index = find_free_hpo_dp_link_enc(res_ctx, pool);
+
+ if (enc_index >= 0)
+ hpo_dp_link_enc = pool->hpo_dp_link_enc[enc_index];
+
+ return hpo_dp_link_enc;
+}
+#endif
+
+void reset_syncd_pipes_from_disabled_pipes(struct dc *dc,
+ struct dc_state *context)
+{
+ int i, j;
+ struct pipe_ctx *pipe_ctx_old, *pipe_ctx, *pipe_ctx_syncd;
+
+ /* If pipe backend is reset, need to reset pipe syncd status */
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ pipe_ctx_old = &dc->current_state->res_ctx.pipe_ctx[i];
+ pipe_ctx = &context->res_ctx.pipe_ctx[i];
+
+ if (!pipe_ctx_old->stream)
+ continue;
+
+ if (pipe_ctx_old->top_pipe || pipe_ctx_old->prev_odm_pipe)
+ continue;
+
+ if (!pipe_ctx->stream ||
+ pipe_need_reprogram(pipe_ctx_old, pipe_ctx)) {
+
+ /* Reset all the syncd pipes from the disabled pipe */
+ for (j = 0; j < dc->res_pool->pipe_count; j++) {
+ pipe_ctx_syncd = &context->res_ctx.pipe_ctx[j];
+ if ((GET_PIPE_SYNCD_FROM_PIPE(pipe_ctx_syncd) == pipe_ctx_old->pipe_idx) ||
+ !IS_PIPE_SYNCD_VALID(pipe_ctx_syncd))
+ SET_PIPE_SYNCD_TO_PIPE(pipe_ctx_syncd, j);
+ }
}
}
+}
+
+void check_syncd_pipes_for_disabled_master_pipe(struct dc *dc,
+ struct dc_state *context,
+ uint8_t disabled_master_pipe_idx)
+{
+ int i;
+ struct pipe_ctx *pipe_ctx, *pipe_ctx_check;
+
+ pipe_ctx = &context->res_ctx.pipe_ctx[disabled_master_pipe_idx];
+ if ((GET_PIPE_SYNCD_FROM_PIPE(pipe_ctx) != disabled_master_pipe_idx) ||
+ !IS_PIPE_SYNCD_VALID(pipe_ctx))
+ SET_PIPE_SYNCD_TO_PIPE(pipe_ctx, disabled_master_pipe_idx);
- return enc;
+ /* for the pipe disabled, check if any slave pipe exists and assert */
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ pipe_ctx_check = &context->res_ctx.pipe_ctx[i];
+
+ if ((GET_PIPE_SYNCD_FROM_PIPE(pipe_ctx_check) == disabled_master_pipe_idx) &&
+ IS_PIPE_SYNCD_VALID(pipe_ctx_check) && (i != disabled_master_pipe_idx))
+ DC_ERR("DC: Failure: pipe_idx[%d] syncd with disabled master pipe_idx[%d]\n",
+ i, disabled_master_pipe_idx);
+ }
}
-#endif
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_sink.c b/drivers/gpu/drm/amd/display/dc/core/dc_sink.c
index a249a0e5edd0..4b5e4d8e7735 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_sink.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_sink.c
@@ -33,14 +33,6 @@
* Private functions
******************************************************************************/
-static void dc_sink_destruct(struct dc_sink *sink)
-{
- if (sink->dc_container_id) {
- kfree(sink->dc_container_id);
- sink->dc_container_id = NULL;
- }
-}
-
static bool dc_sink_construct(struct dc_sink *sink, const struct dc_sink_init_data *init_params)
{
@@ -75,7 +67,7 @@ void dc_sink_retain(struct dc_sink *sink)
static void dc_sink_free(struct kref *kref)
{
struct dc_sink *sink = container_of(kref, struct dc_sink, refcount);
- dc_sink_destruct(sink);
+ kfree(sink->dc_container_id);
kfree(sink);
}
diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h
index 3aac3f4a2852..288e7b01f561 100644
--- a/drivers/gpu/drm/amd/display/dc/dc.h
+++ b/drivers/gpu/drm/amd/display/dc/dc.h
@@ -47,7 +47,7 @@ struct aux_payload;
struct set_config_cmd_payload;
struct dmub_notification;
-#define DC_VER "3.2.160"
+#define DC_VER "3.2.167"
#define MAX_SURFACES 3
#define MAX_PLANES 6
@@ -75,6 +75,16 @@ enum dc_plane_type {
DC_PLANE_TYPE_DCN_UNIVERSAL,
};
+// Sizes defined as multiples of 64KB
+enum det_size {
+ DET_SIZE_DEFAULT = 0,
+ DET_SIZE_192KB = 3,
+ DET_SIZE_256KB = 4,
+ DET_SIZE_320KB = 5,
+ DET_SIZE_384KB = 6
+};
+
+
struct dc_plane_cap {
enum dc_plane_type type;
uint32_t blends_with_above : 1;
@@ -187,7 +197,9 @@ struct dc_caps {
struct dc_color_caps color;
#if defined(CONFIG_DRM_AMD_DC_DCN)
bool dp_hpo;
+ bool hdmi_frl_pcon_support;
#endif
+ bool edp_dsc_support;
bool vbios_lttpr_aware;
bool vbios_lttpr_enable;
};
@@ -332,6 +344,7 @@ struct dc_config {
uint8_t vblank_alignment_max_frame_time_diff;
bool is_asymmetric_memory;
bool is_single_rank_dimm;
+ bool use_pipe_ctx_sync_logic;
};
enum visual_confirm {
@@ -508,7 +521,9 @@ union dpia_debug_options {
uint32_t disable_dpia:1;
uint32_t force_non_lttpr:1;
uint32_t extend_aux_rd_interval:1;
- uint32_t reserved:29;
+ uint32_t disable_mst_dsc_work_around:1;
+ uint32_t hpd_delay_in_ms:12;
+ uint32_t reserved:16;
} bits;
uint32_t raw;
};
@@ -573,6 +588,8 @@ struct dc_debug_options {
bool native422_support;
bool disable_dsc;
enum visual_confirm visual_confirm;
+ int visual_confirm_rect_height;
+
bool sanity_checks;
bool max_disp_clk;
bool surface_trace;
@@ -667,11 +684,15 @@ struct dc_debug_options {
bool validate_dml_output;
bool enable_dmcub_surface_flip;
bool usbc_combo_phy_reset_wa;
+ bool disable_dsc_edp;
+ unsigned int force_dsc_edp_policy;
bool enable_dram_clock_change_one_display_vactive;
#if defined(CONFIG_DRM_AMD_DC_DCN)
/* TODO - remove once tested */
bool legacy_dp2_lt;
bool set_mst_en_for_sst;
+ bool disable_uhbr;
+ bool force_dp2_lt_fallback_method;
#endif
union mem_low_power_enable_options enable_mem_low_power;
union root_clock_optimization_options root_clock_optimization;
@@ -684,11 +705,14 @@ struct dc_debug_options {
/* FEC/PSR1 sequence enable delay in 100us */
uint8_t fec_enable_delay_in100us;
bool enable_driver_sequence_debug;
+ enum det_size crb_alloc_policy;
+ int crb_alloc_policy_min_disp_count;
#if defined(CONFIG_DRM_AMD_DC_DCN)
bool disable_z10;
bool enable_sw_cntl_psr;
union dpia_debug_options dpia_debug;
#endif
+ bool apply_vendor_specific_lttpr_wa;
};
struct gpu_info_soc_bounding_box_v1_0;
@@ -1289,6 +1313,11 @@ struct dc_sink_dsc_caps {
// 'true' if these are virtual DPCD's DSC caps (immediately upstream of sink in MST topology),
// 'false' if they are sink's DSC caps
bool is_virtual_dpcd_dsc;
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ // 'true' if MST topology supports DSC passthrough for sink
+ // 'false' if MST topology does not support DSC passthrough
+ bool is_dsc_passthrough_supported;
+#endif
struct dsc_dec_dpcd_caps dsc_dec_caps;
};
@@ -1404,6 +1433,9 @@ void dc_unlock_memory_clock_frequency(struct dc *dc);
*/
void dc_lock_memory_clock_frequency(struct dc *dc);
+/* set soft max for memclk, to be used for AC/DC switching clock limitations */
+void dc_enable_dcmode_clk_limit(struct dc *dc, bool enable);
+
/* cleanup on driver unload */
void dc_hardware_release(struct dc *dc);
diff --git a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c
index 360f3199ea6f..541376fabbef 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c
+++ b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c
@@ -115,13 +115,44 @@ void dc_dmub_srv_wait_idle(struct dc_dmub_srv *dc_dmub_srv)
}
}
+void dc_dmub_srv_clear_inbox0_ack(struct dc_dmub_srv *dmub_srv)
+{
+ struct dmub_srv *dmub = dmub_srv->dmub;
+ struct dc_context *dc_ctx = dmub_srv->ctx;
+ enum dmub_status status = DMUB_STATUS_OK;
+
+ status = dmub_srv_clear_inbox0_ack(dmub);
+ if (status != DMUB_STATUS_OK) {
+ DC_ERROR("Error clearing INBOX0 ack: status=%d\n", status);
+ dc_dmub_srv_log_diagnostic_data(dmub_srv);
+ }
+}
+
+void dc_dmub_srv_wait_for_inbox0_ack(struct dc_dmub_srv *dmub_srv)
+{
+ struct dmub_srv *dmub = dmub_srv->dmub;
+ struct dc_context *dc_ctx = dmub_srv->ctx;
+ enum dmub_status status = DMUB_STATUS_OK;
+
+ status = dmub_srv_wait_for_inbox0_ack(dmub, 100000);
+ if (status != DMUB_STATUS_OK) {
+ DC_ERROR("Error waiting for INBOX0 HW Lock Ack\n");
+ dc_dmub_srv_log_diagnostic_data(dmub_srv);
+ }
+}
+
void dc_dmub_srv_send_inbox0_cmd(struct dc_dmub_srv *dmub_srv,
union dmub_inbox0_data_register data)
{
struct dmub_srv *dmub = dmub_srv->dmub;
- if (dmub->hw_funcs.send_inbox0_cmd)
- dmub->hw_funcs.send_inbox0_cmd(dmub, data);
- // TODO: Add wait command -- poll register for ACK
+ struct dc_context *dc_ctx = dmub_srv->ctx;
+ enum dmub_status status = DMUB_STATUS_OK;
+
+ status = dmub_srv_send_inbox0_cmd(dmub, data);
+ if (status != DMUB_STATUS_OK) {
+ DC_ERROR("Error sending INBOX0 cmd\n");
+ dc_dmub_srv_log_diagnostic_data(dmub_srv);
+ }
}
bool dc_dmub_srv_cmd_with_reply_data(struct dc_dmub_srv *dc_dmub_srv, union dmub_rb_cmd *cmd)
diff --git a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h
index 3e35eee7188c..7e4e2ec5915d 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h
@@ -68,6 +68,8 @@ bool dc_dmub_srv_get_dmub_outbox0_msg(const struct dc *dc, struct dmcub_trace_bu
void dc_dmub_trace_event_control(struct dc *dc, bool enable);
+void dc_dmub_srv_clear_inbox0_ack(struct dc_dmub_srv *dmub_srv);
+void dc_dmub_srv_wait_for_inbox0_ack(struct dc_dmub_srv *dmub_srv);
void dc_dmub_srv_send_inbox0_cmd(struct dc_dmub_srv *dmub_srv, union dmub_inbox0_data_register data);
bool dc_dmub_srv_get_diagnostic_data(struct dc_dmub_srv *dc_dmub_srv, struct dmub_diagnostic_data *dmub_oca);
diff --git a/drivers/gpu/drm/amd/display/dc/dc_dp_types.h b/drivers/gpu/drm/amd/display/dc/dc_dp_types.h
index e68e9a86a4d9..353dac420f34 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_dp_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_dp_types.h
@@ -378,7 +378,14 @@ enum dpcd_downstream_port_detailed_type {
union dwnstream_port_caps_byte2 {
struct {
uint8_t MAX_BITS_PER_COLOR_COMPONENT:2;
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ uint8_t MAX_ENCODED_LINK_BW_SUPPORT:3;
+ uint8_t SOURCE_CONTROL_MODE_SUPPORT:1;
+ uint8_t CONCURRENT_LINK_BRING_UP_SEQ_SUPPORT:1;
+ uint8_t RESERVED:1;
+#else
uint8_t RESERVED:6;
+#endif
} bits;
uint8_t raw;
};
@@ -416,6 +423,30 @@ union dwnstream_port_caps_byte3_hdmi {
uint8_t raw;
};
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+union hdmi_sink_encoded_link_bw_support {
+ struct {
+ uint8_t HDMI_SINK_ENCODED_LINK_BW_SUPPORT:3;
+ uint8_t RESERVED:5;
+ } bits;
+ uint8_t raw;
+};
+
+union hdmi_encoded_link_bw {
+ struct {
+ uint8_t FRL_MODE:1; // Bit 0
+ uint8_t BW_9Gbps:1;
+ uint8_t BW_18Gbps:1;
+ uint8_t BW_24Gbps:1;
+ uint8_t BW_32Gbps:1;
+ uint8_t BW_40Gbps:1;
+ uint8_t BW_48Gbps:1;
+ uint8_t RESERVED:1; // Bit 7
+ } bits;
+ uint8_t raw;
+};
+#endif
+
/*4-byte structure for detailed capabilities of a down-stream port
(DP-to-TMDS converter).*/
union dwnstream_portxcaps {
@@ -852,6 +883,15 @@ struct psr_caps {
unsigned char psr_version;
unsigned int psr_rfb_setup_time;
bool psr_exit_link_training_required;
+ unsigned char edp_revision;
+ unsigned char support_ver;
+ bool su_granularity_required;
+ bool y_coordinate_required;
+ uint8_t su_y_granularity;
+ bool alpm_cap;
+ bool standby_support;
+ uint8_t rate_control_caps;
+ unsigned int psr_power_opt_flag;
};
/* Length of router topology ID read from DPCD in bytes. */
diff --git a/drivers/gpu/drm/amd/display/dc/dc_hw_types.h b/drivers/gpu/drm/amd/display/dc/dc_hw_types.h
index 52355fe6994c..eac34f591a3f 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_hw_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_hw_types.h
@@ -741,6 +741,9 @@ struct dc_dsc_config {
uint32_t version_minor; /* DSC minor version. Full version is formed as 1.version_minor. */
bool ycbcr422_simple; /* Tell DSC engine to convert YCbCr 4:2:2 to 'YCbCr 4:2:2 simple'. */
int32_t rc_buffer_size; /* DSC RC buffer block size in bytes */
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ bool is_frl; /* indicate if DSC is applied based on HDMI FRL sink's capability */
+#endif
bool is_dp; /* indicate if DSC is applied based on DP's capability */
};
struct dc_crtc_timing {
diff --git a/drivers/gpu/drm/amd/display/dc/dc_link.h b/drivers/gpu/drm/amd/display/dc/dc_link.h
index 180ecd860296..c0e37ad0e26c 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_link.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_link.h
@@ -30,6 +30,8 @@
#include "dc_types.h"
#include "grph_object_defs.h"
+struct link_resource;
+
enum dc_link_fec_state {
dc_link_fec_not_ready,
dc_link_fec_ready,
@@ -113,6 +115,7 @@ struct dc_link {
* DIG encoder. */
bool is_dig_mapping_flexible;
bool hpd_status; /* HPD status of link without physical HPD pin. */
+ bool is_hpd_pending; /* Indicates a new received hpd */
bool edp_sink_present;
@@ -159,9 +162,6 @@ struct dc_link {
struct panel_cntl *panel_cntl;
struct link_encoder *link_enc;
-#if defined(CONFIG_DRM_AMD_DC_DCN)
- struct hpo_dp_link_encoder *hpo_dp_link_enc;
-#endif
struct graphics_object_id link_id;
/* Endpoint type distinguishes display endpoints which do not have entries
* in the BIOS connector table from those that do. Helps when tracking link
@@ -185,12 +185,18 @@ struct dc_link {
/* Drive settings read from integrated info table */
struct dc_lane_settings bios_forced_drive_settings;
+ /* Vendor specific LTTPR workaround variables */
+ uint8_t vendor_specific_lttpr_link_rate_wa;
+ bool apply_vendor_specific_lttpr_link_rate_wa;
+
/* MST record stream using this link */
struct link_flags {
bool dp_keep_receiver_powered;
bool dp_skip_DID2;
bool dp_skip_reset_segment;
bool dp_mot_reset_segment;
+ /* Some USB4 docks do not handle turning off MST DSC once it has been enabled. */
+ bool dpia_mst_dsc_always_on;
} wa_flags;
struct link_mst_stream_allocation_table mst_stream_alloc_table;
@@ -224,6 +230,8 @@ static inline void get_edp_links(const struct dc *dc,
*edp_num = 0;
for (i = 0; i < dc->link_count; i++) {
// report any eDP links, even unconnected DDI's
+ if (!dc->links[i])
+ continue;
if (dc->links[i]->connector_signal == SIGNAL_TYPE_EDP) {
edp_links[*edp_num] = dc->links[i];
if (++(*edp_num) == MAX_NUM_EDP)
@@ -287,6 +295,10 @@ bool dc_link_setup_psr(struct dc_link *dc_link,
void dc_link_get_psr_residency(const struct dc_link *link, uint32_t *residency);
+void dc_link_blank_all_dp_displays(struct dc *dc);
+
+void dc_link_blank_dp_stream(struct dc_link *link, bool hw_init);
+
/* Request DC to detect if there is a Panel connected.
* boot - If this call is during initial boot.
* Return false for any type of detection failure or MST detection
@@ -298,7 +310,7 @@ enum dc_detect_reason {
DETECT_REASON_HPD,
DETECT_REASON_HPDRX,
DETECT_REASON_FALLBACK,
- DETECT_REASON_RETRAIN
+ DETECT_REASON_RETRAIN,
};
bool dc_link_detect(struct dc_link *dc_link, enum dc_detect_reason reason);
@@ -346,14 +358,17 @@ void dc_link_remove_remote_sink(
void dc_link_dp_set_drive_settings(
struct dc_link *link,
+ const struct link_resource *link_res,
struct link_training_settings *lt_settings);
bool dc_link_dp_perform_link_training_skip_aux(
struct dc_link *link,
+ const struct link_resource *link_res,
const struct dc_link_settings *link_setting);
enum link_training_result dc_link_dp_perform_link_training(
struct dc_link *link,
+ const struct link_resource *link_res,
const struct dc_link_settings *link_settings,
bool skip_video_pattern);
@@ -361,6 +376,7 @@ bool dc_link_dp_sync_lt_begin(struct dc_link *link);
enum link_training_result dc_link_dp_sync_lt_attempt(
struct dc_link *link,
+ const struct link_resource *link_res,
struct dc_link_settings *link_setting,
struct dc_link_training_overrides *lt_settings);
@@ -438,6 +454,13 @@ bool dc_link_is_fec_supported(const struct dc_link *link);
bool dc_link_should_enable_fec(const struct dc_link *link);
#if defined(CONFIG_DRM_AMD_DC_DCN)
+uint32_t dc_link_bw_kbps_from_raw_frl_link_rate_data(uint8_t bw);
enum dp_link_encoding dc_link_dp_mst_decide_link_encoding_format(const struct dc_link *link);
#endif
+
+const struct link_resource *dc_link_get_cur_link_res(const struct dc_link *link);
+/* take a snapshot of current link resource allocation state */
+void dc_get_cur_link_res_map(const struct dc *dc, uint32_t *map);
+/* restore link resource allocation state from a snapshot */
+void dc_restore_link_res_map(const struct dc *dc, uint32_t *map);
#endif /* DC_LINK_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/dc_types.h b/drivers/gpu/drm/amd/display/dc/dc_types.h
index 388457ffc0a8..0285a4b38d05 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_types.h
@@ -430,6 +430,7 @@ struct dc_dongle_caps {
uint32_t dp_hdmi_max_bpc;
uint32_t dp_hdmi_max_pixel_clk_in_khz;
#if defined(CONFIG_DRM_AMD_DC_DCN)
+ uint32_t dp_hdmi_frl_max_link_bw_in_kbps;
struct dc_dongle_dfp_cap_ext dfp_cap_ext;
#endif
};
@@ -950,6 +951,7 @@ enum dc_gpu_mem_alloc_type {
enum dc_psr_version {
DC_PSR_VERSION_1 = 0,
+ DC_PSR_VERSION_SU_1 = 1,
DC_PSR_VERSION_UNSUPPORTED = 0xFFFFFFFF,
};
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_audio.c b/drivers/gpu/drm/amd/display/dc/dce/dce_audio.c
index 27218ede150a..70eaac017624 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_audio.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_audio.c
@@ -67,9 +67,6 @@ static void write_indirect_azalia_reg(struct audio *audio,
/* AZALIA_F0_CODEC_ENDPOINT_DATA endpoint data */
REG_SET(AZALIA_F0_CODEC_ENDPOINT_DATA, 0,
AZALIA_ENDPOINT_REG_DATA, reg_data);
-
- DC_LOG_HW_AUDIO("AUDIO:write_indirect_azalia_reg: index: %u data: %u\n",
- reg_index, reg_data);
}
static uint32_t read_indirect_azalia_reg(struct audio *audio, uint32_t reg_index)
@@ -85,9 +82,6 @@ static uint32_t read_indirect_azalia_reg(struct audio *audio, uint32_t reg_index
/* AZALIA_F0_CODEC_ENDPOINT_DATA endpoint data */
value = REG_READ(AZALIA_F0_CODEC_ENDPOINT_DATA);
- DC_LOG_HW_AUDIO("AUDIO:read_indirect_azalia_reg: index: %u data: %u\n",
- reg_index, value);
-
return value;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_audio.h b/drivers/gpu/drm/amd/display/dc/dce/dce_audio.h
index 5622d5e32d81..dbd2cfed0603 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_audio.h
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_audio.h
@@ -113,6 +113,7 @@ struct dce_audio_shift {
uint8_t DCCG_AUDIO_DTO2_USE_512FBR_DTO;
uint32_t DCCG_AUDIO_DTO0_USE_512FBR_DTO;
uint32_t DCCG_AUDIO_DTO1_USE_512FBR_DTO;
+ uint32_t CLOCK_GATING_DISABLE;
};
struct dce_audio_mask {
@@ -132,6 +133,7 @@ struct dce_audio_mask {
uint32_t DCCG_AUDIO_DTO2_USE_512FBR_DTO;
uint32_t DCCG_AUDIO_DTO0_USE_512FBR_DTO;
uint32_t DCCG_AUDIO_DTO1_USE_512FBR_DTO;
+ uint32_t CLOCK_GATING_DISABLE;
};
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c
index 1e77ffee71b3..f1c61d5aee6c 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c
@@ -788,8 +788,9 @@ static bool dce110_link_encoder_validate_hdmi_output(
crtc_timing->pixel_encoding == PIXEL_ENCODING_YCBCR420)
return false;
- if (!enc110->base.features.flags.bits.HDMI_6GB_EN &&
- adjusted_pix_clk_khz >= 300000)
+ if ((!enc110->base.features.flags.bits.HDMI_6GB_EN ||
+ enc110->base.ctx->dc->debug.hdmi20_disable) &&
+ adjusted_pix_clk_khz >= 300000)
return false;
if (enc110->base.ctx->dc->debug.hdmi20_disable &&
crtc_timing->pixel_encoding == PIXEL_ENCODING_YCBCR420)
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_hw_lock_mgr.c b/drivers/gpu/drm/amd/display/dc/dce/dmub_hw_lock_mgr.c
index 9baf8ca0a920..b1b2e3c6f379 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dmub_hw_lock_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_hw_lock_mgr.c
@@ -56,8 +56,11 @@ void dmub_hw_lock_mgr_inbox0_cmd(struct dc_dmub_srv *dmub_srv,
union dmub_inbox0_cmd_lock_hw hw_lock_cmd)
{
union dmub_inbox0_data_register data = { 0 };
+
data.inbox0_cmd_lock_hw = hw_lock_cmd;
+ dc_dmub_srv_clear_inbox0_ack(dmub_srv);
dc_dmub_srv_send_inbox0_cmd(dmub_srv, data);
+ dc_dmub_srv_wait_for_inbox0_ack(dmub_srv);
}
bool should_use_dmub_lock(struct dc_link *link)
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c b/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c
index 90eb8eedacf2..87ed48d5530d 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c
@@ -230,7 +230,7 @@ static void dmub_psr_set_level(struct dmub_psr *dmub, uint16_t psr_level, uint8_
/**
* Set PSR power optimization flags.
*/
-static void dmub_psr_set_power_opt(struct dmub_psr *dmub, unsigned int power_opt)
+static void dmub_psr_set_power_opt(struct dmub_psr *dmub, unsigned int power_opt, uint8_t panel_inst)
{
union dmub_rb_cmd cmd;
struct dc_context *dc = dmub->ctx;
@@ -239,7 +239,9 @@ static void dmub_psr_set_power_opt(struct dmub_psr *dmub, unsigned int power_opt
cmd.psr_set_power_opt.header.type = DMUB_CMD__PSR;
cmd.psr_set_power_opt.header.sub_type = DMUB_CMD__SET_PSR_POWER_OPT;
cmd.psr_set_power_opt.header.payload_bytes = sizeof(struct dmub_cmd_psr_set_power_opt_data);
+ cmd.psr_set_power_opt.psr_set_power_opt_data.cmd_version = DMUB_CMD_PSR_CONTROL_VERSION_1;
cmd.psr_set_power_opt.psr_set_power_opt_data.power_opt = power_opt;
+ cmd.psr_set_power_opt.psr_set_power_opt_data.panel_inst = panel_inst;
dc_dmub_srv_cmd_queue(dc->dmub_srv, &cmd);
dc_dmub_srv_cmd_execute(dc->dmub_srv);
@@ -327,6 +329,16 @@ static bool dmub_psr_copy_settings(struct dmub_psr *dmub,
copy_settings_data->fec_enable_delay_in100us = link->dc->debug.fec_enable_delay_in100us;
copy_settings_data->cmd_version = DMUB_CMD_PSR_CONTROL_VERSION_1;
copy_settings_data->panel_inst = panel_inst;
+ copy_settings_data->dsc_enable_status = (pipe_ctx->stream->timing.flags.DSC == 1);
+
+ if (link->fec_state == dc_link_fec_enabled &&
+ (!memcmp(link->dpcd_caps.sink_dev_id_str, DP_SINK_DEVICE_STR_ID_1,
+ sizeof(link->dpcd_caps.sink_dev_id_str)) ||
+ !memcmp(link->dpcd_caps.sink_dev_id_str, DP_SINK_DEVICE_STR_ID_2,
+ sizeof(link->dpcd_caps.sink_dev_id_str))))
+ copy_settings_data->debug.bitfields.force_wakeup_by_tps3 = 1;
+ else
+ copy_settings_data->debug.bitfields.force_wakeup_by_tps3 = 0;
dc_dmub_srv_cmd_queue(dc->dmub_srv, &cmd);
dc_dmub_srv_cmd_execute(dc->dmub_srv);
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.h b/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.h
index 5dbd479660f1..01acc01cc191 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.h
+++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.h
@@ -46,7 +46,7 @@ struct dmub_psr_funcs {
void (*psr_force_static)(struct dmub_psr *dmub, uint8_t panel_inst);
void (*psr_get_residency)(struct dmub_psr *dmub, uint32_t *residency,
uint8_t panel_inst);
- void (*psr_set_power_opt)(struct dmub_psr *dmub, unsigned int power_opt);
+ void (*psr_set_power_opt)(struct dmub_psr *dmub, unsigned int power_opt, uint8_t panel_inst);
};
struct dmub_psr *dmub_psr_create(struct dc_context *ctx);
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
index 24e47df526f6..f1593186e964 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
@@ -69,6 +69,8 @@
#include "dcn10/dcn10_hw_sequencer.h"
+#include "dce110_hw_sequencer.h"
+
#define GAMMA_HW_POINTS_NUM 256
/*
@@ -1564,6 +1566,10 @@ static enum dc_status apply_single_controller_ctx_to_hw(
&pipe_ctx->stream->audio_info);
}
+ /* make sure no pipes syncd to the pipe being enabled */
+ if (!pipe_ctx->stream->apply_seamless_boot_optimization && dc->config.use_pipe_ctx_sync_logic)
+ check_syncd_pipes_for_disabled_master_pipe(dc, context, pipe_ctx->pipe_idx);
+
#if defined(CONFIG_DRM_AMD_DC_DCN)
/* DCN3.1 FPGA Workaround
* Need to enable HPO DP Stream Encoder before setting OTG master enable.
@@ -1602,6 +1608,11 @@ static enum dc_status apply_single_controller_ctx_to_hw(
pipe_ctx->stream_res.stream_enc,
pipe_ctx->stream_res.tg->inst);
+ if (dc_is_dp_signal(pipe_ctx->stream->signal) &&
+ pipe_ctx->stream_res.stream_enc->funcs->reset_fifo)
+ pipe_ctx->stream_res.stream_enc->funcs->reset_fifo(
+ pipe_ctx->stream_res.stream_enc);
+
if (dc_is_dp_signal(pipe_ctx->stream->signal))
dp_source_sequence_trace(link, DPCD_SOURCE_SEQ_AFTER_CONNECT_DIG_FE_OTG);
@@ -1655,30 +1666,12 @@ static enum dc_status apply_single_controller_ctx_to_hw(
static void power_down_encoders(struct dc *dc)
{
- int i, j;
+ int i;
for (i = 0; i < dc->link_count; i++) {
enum signal_type signal = dc->links[i]->connector_signal;
- if ((signal == SIGNAL_TYPE_EDP) ||
- (signal == SIGNAL_TYPE_DISPLAY_PORT)) {
- if (dc->links[i]->link_enc->funcs->get_dig_frontend &&
- dc->links[i]->link_enc->funcs->is_dig_enabled(dc->links[i]->link_enc)) {
- unsigned int fe = dc->links[i]->link_enc->funcs->get_dig_frontend(
- dc->links[i]->link_enc);
-
- for (j = 0; j < dc->res_pool->stream_enc_count; j++) {
- if (fe == dc->res_pool->stream_enc[j]->id) {
- dc->res_pool->stream_enc[j]->funcs->dp_blank(dc->links[i],
- dc->res_pool->stream_enc[j]);
- break;
- }
- }
- }
-
- if (!dc->links[i]->wa_flags.dp_keep_receiver_powered)
- dp_receiver_power_ctrl(dc->links[i], false);
- }
+ dc_link_blank_dp_stream(dc->links[i], false);
if (signal != SIGNAL_TYPE_EDP)
signal = SIGNAL_TYPE_NONE;
@@ -1805,7 +1798,6 @@ void dce110_enable_accelerated_mode(struct dc *dc, struct dc_state *context)
struct dc_stream_state *edp_streams[MAX_NUM_EDP];
struct dc_link *edp_link_with_sink = NULL;
struct dc_link *edp_link = NULL;
- struct dc_stream_state *edp_stream = NULL;
struct dce_hwseq *hws = dc->hwseq;
int edp_with_sink_num;
int edp_num;
@@ -1826,27 +1818,29 @@ void dce110_enable_accelerated_mode(struct dc *dc, struct dc_state *context)
get_edp_streams(context, edp_streams, &edp_stream_num);
// Check fastboot support, disable on DCE8 because of blank screens
- if (edp_num && dc->ctx->dce_version != DCE_VERSION_8_0 &&
+ if (edp_num && edp_stream_num && dc->ctx->dce_version != DCE_VERSION_8_0 &&
dc->ctx->dce_version != DCE_VERSION_8_1 &&
dc->ctx->dce_version != DCE_VERSION_8_3) {
for (i = 0; i < edp_num; i++) {
edp_link = edp_links[i];
+ if (edp_link != edp_streams[0]->link)
+ continue;
// enable fastboot if backend is enabled on eDP
- if (edp_link->link_enc->funcs->is_dig_enabled(edp_link->link_enc)) {
- /* Set optimization flag on eDP stream*/
- if (edp_stream_num && edp_link->link_status.link_active) {
- edp_stream = edp_streams[0];
- can_apply_edp_fast_boot = !is_edp_ilr_optimization_required(edp_stream->link, &edp_stream->timing);
- edp_stream->apply_edp_fast_boot_optimization = can_apply_edp_fast_boot;
- if (can_apply_edp_fast_boot)
- DC_LOG_EVENT_LINK_TRAINING("eDP fast boot disabled to optimize link rate\n");
-
- break;
- }
+ if (edp_link->link_enc->funcs->is_dig_enabled &&
+ edp_link->link_enc->funcs->is_dig_enabled(edp_link->link_enc) &&
+ edp_link->link_status.link_active) {
+ struct dc_stream_state *edp_stream = edp_streams[0];
+
+ can_apply_edp_fast_boot = !is_edp_ilr_optimization_required(edp_stream->link, &edp_stream->timing);
+ edp_stream->apply_edp_fast_boot_optimization = can_apply_edp_fast_boot;
+ if (can_apply_edp_fast_boot)
+ DC_LOG_EVENT_LINK_TRAINING("eDP fast boot disabled to optimize link rate\n");
+
+ break;
}
}
// We are trying to enable eDP, don't power down VDD
- if (edp_stream_num)
+ if (can_apply_edp_fast_boot)
keep_edp_vdd_on = true;
}
@@ -2307,6 +2301,10 @@ enum dc_status dce110_apply_ctx_to_hw(
enum dc_status status;
int i;
+ /* reset syncd pipes from disabled pipes */
+ if (dc->config.use_pipe_ctx_sync_logic)
+ reset_syncd_pipes_from_disabled_pipes(dc, context);
+
/* Reset old context */
/* look up the targets that have been removed since last commit */
hws->funcs.reset_hw_ctx_wrap(dc, context);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c
index 91fdfcd8a14e..db7ca4b0cdb9 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c
@@ -119,14 +119,6 @@ void dpp_read_state(struct dpp *dpp_base,
}
}
-/* Program gamut remap in bypass mode */
-void dpp_set_gamut_remap_bypass(struct dcn10_dpp *dpp)
-{
- REG_SET(CM_GAMUT_REMAP_CONTROL, 0,
- CM_GAMUT_REMAP_MODE, 0);
- /* Gamut remap in bypass */
-}
-
#define IDENTITY_RATIO(ratio) (dc_fixpt_u2d19(ratio) == (1 << 19))
bool dpp1_get_optimal_number_of_taps(
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_dscl.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_dscl.c
index 44293d66b46b..f607a0e28f14 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_dscl.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_dscl.c
@@ -39,6 +39,10 @@
#define BLACK_OFFSET_RGB_Y 0x0
#define BLACK_OFFSET_CBCR 0x8000
+#define VISUAL_CONFIRM_RECT_HEIGHT_DEFAULT 3
+#define VISUAL_CONFIRM_RECT_HEIGHT_MIN 1
+#define VISUAL_CONFIRM_RECT_HEIGHT_MAX 10
+
#define REG(reg)\
dpp->tf_regs->reg
@@ -85,51 +89,6 @@ enum dscl_mode_sel {
DSCL_MODE_DSCL_BYPASS = 6
};
-static void dpp1_dscl_set_overscan(
- struct dcn10_dpp *dpp,
- const struct scaler_data *data)
-{
- uint32_t left = data->recout.x;
- uint32_t top = data->recout.y;
-
- int right = data->h_active - data->recout.x - data->recout.width;
- int bottom = data->v_active - data->recout.y - data->recout.height;
-
- if (right < 0) {
- BREAK_TO_DEBUGGER();
- right = 0;
- }
- if (bottom < 0) {
- BREAK_TO_DEBUGGER();
- bottom = 0;
- }
-
- REG_SET_2(DSCL_EXT_OVERSCAN_LEFT_RIGHT, 0,
- EXT_OVERSCAN_LEFT, left,
- EXT_OVERSCAN_RIGHT, right);
-
- REG_SET_2(DSCL_EXT_OVERSCAN_TOP_BOTTOM, 0,
- EXT_OVERSCAN_BOTTOM, bottom,
- EXT_OVERSCAN_TOP, top);
-}
-
-static void dpp1_dscl_set_otg_blank(
- struct dcn10_dpp *dpp, const struct scaler_data *data)
-{
- uint32_t h_blank_start = data->h_active;
- uint32_t h_blank_end = 0;
- uint32_t v_blank_start = data->v_active;
- uint32_t v_blank_end = 0;
-
- REG_SET_2(OTG_H_BLANK, 0,
- OTG_H_BLANK_START, h_blank_start,
- OTG_H_BLANK_END, h_blank_end);
-
- REG_SET_2(OTG_V_BLANK, 0,
- OTG_V_BLANK_START, v_blank_start,
- OTG_V_BLANK_END, v_blank_end);
-}
-
static int dpp1_dscl_get_pixel_depth_val(enum lb_pixel_depth depth)
{
if (depth == LB_PIXEL_DEPTH_30BPP)
@@ -551,58 +510,6 @@ static enum lb_memory_config dpp1_dscl_find_lb_memory_config(struct dcn10_dpp *d
return LB_MEMORY_CONFIG_0;
}
-void dpp1_dscl_set_scaler_auto_scale(
- struct dpp *dpp_base,
- const struct scaler_data *scl_data)
-{
- enum lb_memory_config lb_config;
- struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base);
- enum dscl_mode_sel dscl_mode = dpp1_dscl_get_dscl_mode(
- dpp_base, scl_data, dpp_base->ctx->dc->debug.always_scale);
- bool ycbcr = scl_data->format >= PIXEL_FORMAT_VIDEO_BEGIN
- && scl_data->format <= PIXEL_FORMAT_VIDEO_END;
-
- dpp1_dscl_set_overscan(dpp, scl_data);
-
- dpp1_dscl_set_otg_blank(dpp, scl_data);
-
- REG_UPDATE(SCL_MODE, DSCL_MODE, dscl_mode);
-
- if (dscl_mode == DSCL_MODE_DSCL_BYPASS)
- return;
-
- lb_config = dpp1_dscl_find_lb_memory_config(dpp, scl_data);
- dpp1_dscl_set_lb(dpp, &scl_data->lb_params, lb_config);
-
- if (dscl_mode == DSCL_MODE_SCALING_444_BYPASS)
- return;
-
- /* TODO: v_min */
- REG_SET_3(DSCL_AUTOCAL, 0,
- AUTOCAL_MODE, AUTOCAL_MODE_AUTOSCALE,
- AUTOCAL_NUM_PIPE, 0,
- AUTOCAL_PIPE_ID, 0);
-
- /* Black offsets */
- if (ycbcr)
- REG_SET_2(SCL_BLACK_OFFSET, 0,
- SCL_BLACK_OFFSET_RGB_Y, BLACK_OFFSET_RGB_Y,
- SCL_BLACK_OFFSET_CBCR, BLACK_OFFSET_CBCR);
- else
-
- REG_SET_2(SCL_BLACK_OFFSET, 0,
- SCL_BLACK_OFFSET_RGB_Y, BLACK_OFFSET_RGB_Y,
- SCL_BLACK_OFFSET_CBCR, BLACK_OFFSET_RGB_Y);
-
- REG_SET_4(SCL_TAP_CONTROL, 0,
- SCL_V_NUM_TAPS, scl_data->taps.v_taps - 1,
- SCL_H_NUM_TAPS, scl_data->taps.h_taps - 1,
- SCL_V_NUM_TAPS_C, scl_data->taps.v_taps_c - 1,
- SCL_H_NUM_TAPS_C, scl_data->taps.h_taps_c - 1);
-
- dpp1_dscl_set_scl_filter(dpp, scl_data, ycbcr);
-}
-
static void dpp1_dscl_set_manual_ratio_init(
struct dcn10_dpp *dpp, const struct scaler_data *data)
@@ -685,9 +592,17 @@ static void dpp1_dscl_set_recout(struct dcn10_dpp *dpp,
const struct rect *recout)
{
int visual_confirm_on = 0;
+ unsigned short visual_confirm_rect_height = VISUAL_CONFIRM_RECT_HEIGHT_DEFAULT;
+
if (dpp->base.ctx->dc->debug.visual_confirm != VISUAL_CONFIRM_DISABLE)
visual_confirm_on = 1;
+ /* Check bounds to ensure the VC bar height was set to a sane value */
+ if ((dpp->base.ctx->dc->debug.visual_confirm_rect_height >= VISUAL_CONFIRM_RECT_HEIGHT_MIN) &&
+ (dpp->base.ctx->dc->debug.visual_confirm_rect_height <= VISUAL_CONFIRM_RECT_HEIGHT_MAX)) {
+ visual_confirm_rect_height = dpp->base.ctx->dc->debug.visual_confirm_rect_height;
+ }
+
REG_SET_2(RECOUT_START, 0,
/* First pixel of RECOUT in the active OTG area */
RECOUT_START_X, recout->x,
@@ -699,7 +614,7 @@ static void dpp1_dscl_set_recout(struct dcn10_dpp *dpp,
RECOUT_WIDTH, recout->width,
/* Number of RECOUT vertical lines */
RECOUT_HEIGHT, recout->height
- - visual_confirm_on * 2 * (dpp->base.inst + 1));
+ - visual_confirm_on * 2 * (dpp->base.inst + visual_confirm_rect_height));
}
/**
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
index 0b788d794fb3..f19015413ce3 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
@@ -77,9 +77,9 @@
#define PGFSM_POWER_ON 0
#define PGFSM_POWER_OFF 2
-void print_microsec(struct dc_context *dc_ctx,
- struct dc_log_buffer_ctx *log_ctx,
- uint32_t ref_cycle)
+static void print_microsec(struct dc_context *dc_ctx,
+ struct dc_log_buffer_ctx *log_ctx,
+ uint32_t ref_cycle)
{
const uint32_t ref_clk_mhz = dc_ctx->dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000;
static const unsigned int frac = 1000;
@@ -132,7 +132,8 @@ static void log_mpc_crc(struct dc *dc,
REG_READ(DPP_TOP0_DPP_CRC_VAL_B_A), REG_READ(DPP_TOP0_DPP_CRC_VAL_R_G));
}
-void dcn10_log_hubbub_state(struct dc *dc, struct dc_log_buffer_ctx *log_ctx)
+static void dcn10_log_hubbub_state(struct dc *dc,
+ struct dc_log_buffer_ctx *log_ctx)
{
struct dc_context *dc_ctx = dc->ctx;
struct dcn_hubbub_wm wm;
@@ -467,8 +468,6 @@ void dcn10_log_hw_state(struct dc *dc,
log_mpc_crc(dc, log_ctx);
{
- int hpo_dp_link_enc_count = 0;
-
if (pool->hpo_dp_stream_enc_count > 0) {
DTN_INFO("DP HPO S_ENC: Enabled OTG Format Depth Vid SDP Compressed Link\n");
for (i = 0; i < pool->hpo_dp_stream_enc_count; i++) {
@@ -499,18 +498,14 @@ void dcn10_log_hw_state(struct dc *dc,
}
/* log DP HPO L_ENC section if any hpo_dp_link_enc exists */
- for (i = 0; i < dc->link_count; i++)
- if (dc->links[i]->hpo_dp_link_enc)
- hpo_dp_link_enc_count++;
-
- if (hpo_dp_link_enc_count) {
+ if (pool->hpo_dp_link_enc_count) {
DTN_INFO("DP HPO L_ENC: Enabled Mode Lanes Stream Slots VC Rate X VC Rate Y\n");
- for (i = 0; i < dc->link_count; i++) {
- struct hpo_dp_link_encoder *hpo_dp_link_enc = dc->links[i]->hpo_dp_link_enc;
+ for (i = 0; i < pool->hpo_dp_link_enc_count; i++) {
+ struct hpo_dp_link_encoder *hpo_dp_link_enc = pool->hpo_dp_link_enc[i];
struct hpo_dp_link_enc_state hpo_dp_le_state = {0};
- if (hpo_dp_link_enc && hpo_dp_link_enc->funcs->read_state) {
+ if (hpo_dp_link_enc->funcs->read_state) {
hpo_dp_link_enc->funcs->read_state(hpo_dp_link_enc, &hpo_dp_le_state);
DTN_INFO("[%d]: %d %6s %d %d %d %d %d\n",
hpo_dp_link_enc->inst,
@@ -1362,11 +1357,48 @@ void dcn10_init_pipes(struct dc *dc, struct dc_state *context)
tg->funcs->tg_init(tg);
}
+
+ /* Power gate DSCs */
+ if (hws->funcs.dsc_pg_control != NULL) {
+ uint32_t num_opps = 0;
+ uint32_t opp_id_src0 = OPP_ID_INVALID;
+ uint32_t opp_id_src1 = OPP_ID_INVALID;
+
+ // Step 1: To find out which OPTC is running & OPTC DSC is ON
+ for (i = 0; i < dc->res_pool->res_cap->num_timing_generator; i++) {
+ uint32_t optc_dsc_state = 0;
+ struct timing_generator *tg = dc->res_pool->timing_generators[i];
+
+ if (tg->funcs->is_tg_enabled(tg)) {
+ if (tg->funcs->get_dsc_status)
+ tg->funcs->get_dsc_status(tg, &optc_dsc_state);
+ // Only one OPTC with DSC is ON, so if we got one result, we would exit this block.
+ // non-zero value is DSC enabled
+ if (optc_dsc_state != 0) {
+ tg->funcs->get_optc_source(tg, &num_opps, &opp_id_src0, &opp_id_src1);
+ break;
+ }
+ }
+ }
+
+ // Step 2: To power down DSC but skip DSC of running OPTC
+ for (i = 0; i < dc->res_pool->res_cap->num_dsc; i++) {
+ struct dcn_dsc_state s = {0};
+
+ dc->res_pool->dscs[i]->funcs->dsc_read_state(dc->res_pool->dscs[i], &s);
+
+ if ((s.dsc_opp_source == opp_id_src0 || s.dsc_opp_source == opp_id_src1) &&
+ s.dsc_clock_en && s.dsc_fw_en)
+ continue;
+
+ hws->funcs.dsc_pg_control(hws, dc->res_pool->dscs[i]->inst, false);
+ }
+ }
}
void dcn10_init_hw(struct dc *dc)
{
- int i, j;
+ int i;
struct abm *abm = dc->res_pool->abm;
struct dmcu *dmcu = dc->res_pool->dmcu;
struct dce_hwseq *hws = dc->hwseq;
@@ -1468,43 +1500,8 @@ void dcn10_init_hw(struct dc *dc)
dmub_enable_outbox_notification(dc);
/* we want to turn off all dp displays before doing detection */
- if (dc->config.power_down_display_on_boot) {
- uint8_t dpcd_power_state = '\0';
- enum dc_status status = DC_ERROR_UNEXPECTED;
-
- for (i = 0; i < dc->link_count; i++) {
- if (dc->links[i]->connector_signal != SIGNAL_TYPE_DISPLAY_PORT)
- continue;
-
- /* DP 2.0 requires that LTTPR Caps be read first */
- dp_retrieve_lttpr_cap(dc->links[i]);
-
- /*
- * If any of the displays are lit up turn them off.
- * The reason is that some MST hubs cannot be turned off
- * completely until we tell them to do so.
- * If not turned off, then displays connected to MST hub
- * won't light up.
- */
- status = core_link_read_dpcd(dc->links[i], DP_SET_POWER,
- &dpcd_power_state, sizeof(dpcd_power_state));
- if (status == DC_OK && dpcd_power_state == DP_POWER_STATE_D0) {
- /* blank dp stream before power off receiver*/
- if (dc->links[i]->link_enc->funcs->get_dig_frontend) {
- unsigned int fe = dc->links[i]->link_enc->funcs->get_dig_frontend(dc->links[i]->link_enc);
-
- for (j = 0; j < dc->res_pool->stream_enc_count; j++) {
- if (fe == dc->res_pool->stream_enc[j]->id) {
- dc->res_pool->stream_enc[j]->funcs->dp_blank(dc->links[i],
- dc->res_pool->stream_enc[j]);
- break;
- }
- }
- }
- dp_receiver_power_ctrl(dc->links[i], false);
- }
- }
- }
+ if (dc->config.power_down_display_on_boot)
+ dc_link_blank_all_dp_displays(dc);
/* If taking control over from VBIOS, we may want to optimize our first
* mode set, so we need to skip powering down pipes until we know which
@@ -1637,7 +1634,7 @@ void dcn10_reset_hw_ctx_wrap(
dcn10_reset_back_end_for_pipe(dc, pipe_ctx_old, dc->current_state);
if (hws->funcs.enable_stream_gating)
- hws->funcs.enable_stream_gating(dc, pipe_ctx);
+ hws->funcs.enable_stream_gating(dc, pipe_ctx_old);
if (old_clk)
old_clk->funcs->cs_power_down(old_clk);
}
@@ -1970,10 +1967,9 @@ static bool wait_for_reset_trigger_to_occur(
return rc;
}
-uint64_t reduceSizeAndFraction(
- uint64_t *numerator,
- uint64_t *denominator,
- bool checkUint32Bounary)
+static uint64_t reduceSizeAndFraction(uint64_t *numerator,
+ uint64_t *denominator,
+ bool checkUint32Bounary)
{
int i;
bool ret = checkUint32Bounary == false;
@@ -2021,7 +2017,7 @@ uint64_t reduceSizeAndFraction(
return ret;
}
-bool is_low_refresh_rate(struct pipe_ctx *pipe)
+static bool is_low_refresh_rate(struct pipe_ctx *pipe)
{
uint32_t master_pipe_refresh_rate =
pipe->stream->timing.pix_clk_100hz * 100 /
@@ -2030,7 +2026,8 @@ bool is_low_refresh_rate(struct pipe_ctx *pipe)
return master_pipe_refresh_rate <= 30;
}
-uint8_t get_clock_divider(struct pipe_ctx *pipe, bool account_low_refresh_rate)
+static uint8_t get_clock_divider(struct pipe_ctx *pipe,
+ bool account_low_refresh_rate)
{
uint32_t clock_divider = 1;
uint32_t numpipes = 1;
@@ -2050,10 +2047,8 @@ uint8_t get_clock_divider(struct pipe_ctx *pipe, bool account_low_refresh_rate)
return clock_divider;
}
-int dcn10_align_pixel_clocks(
- struct dc *dc,
- int group_size,
- struct pipe_ctx *grouped_pipes[])
+static int dcn10_align_pixel_clocks(struct dc *dc, int group_size,
+ struct pipe_ctx *grouped_pipes[])
{
struct dc_context *dc_ctx = dc->ctx;
int i, master = -1, embedded = -1;
@@ -2342,7 +2337,7 @@ static void mmhub_read_vm_context0_settings(struct dcn10_hubp *hubp1,
}
-void dcn10_program_pte_vm(struct dce_hwseq *hws, struct hubp *hubp)
+static void dcn10_program_pte_vm(struct dce_hwseq *hws, struct hubp *hubp)
{
struct dcn10_hubp *hubp1 = TO_DCN10_HUBP(hubp);
struct vm_system_aperture_param apt = {0};
@@ -2624,7 +2619,7 @@ static void dcn10_update_dchubp_dpp(
/* new calculated dispclk, dppclk are stored in
* context->bw_ctx.bw.dcn.clk.dispclk_khz / dppclk_khz. current
* dispclk, dppclk are from dc->clk_mgr->clks.dispclk_khz.
- * dcn_validate_bandwidth compute new dispclk, dppclk.
+ * dcn10_validate_bandwidth compute new dispclk, dppclk.
* dispclk will put in use after optimize_bandwidth when
* ramp_up_dispclk_with_dpp is called.
* there are two places for dppclk be put in use. One location
@@ -2638,7 +2633,7 @@ static void dcn10_update_dchubp_dpp(
* for example, eDP + external dp, change resolution of DP from
* 1920x1080x144hz to 1280x960x60hz.
* before change: dispclk = 337889 dppclk = 337889
- * change mode, dcn_validate_bandwidth calculate
+ * change mode, dcn10_validate_bandwidth calculate
* dispclk = 143122 dppclk = 143122
* update_dchubp_dpp be executed before dispclk be updated,
* dispclk = 337889, but dppclk use new value dispclk /2 =
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_init.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_init.c
index 34001a30d449..10e613ec7d24 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_init.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_init.c
@@ -78,6 +78,7 @@ static const struct hw_sequencer_funcs dcn10_funcs = {
.get_clock = dcn10_get_clock,
.get_vupdate_offset_from_vsync = dcn10_get_vupdate_offset_from_vsync,
.calc_vupdate_position = dcn10_calc_vupdate_position,
+ .power_down = dce110_power_down,
.set_backlight_level = dce110_set_backlight_level,
.set_abm_immediate_disable = dce110_set_abm_immediate_disable,
.set_pipe = dce110_set_pipe,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c
index 2dc4b4e4ba02..f4b34c110eae 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c
@@ -646,8 +646,9 @@ static bool dcn10_link_encoder_validate_hdmi_output(
crtc_timing->pixel_encoding == PIXEL_ENCODING_YCBCR420)
return false;
- if (!enc10->base.features.flags.bits.HDMI_6GB_EN &&
- adjusted_pix_clk_100hz >= 3000000)
+ if ((!enc10->base.features.flags.bits.HDMI_6GB_EN ||
+ enc10->base.ctx->dc->debug.hdmi20_disable) &&
+ adjusted_pix_clk_100hz >= 3000000)
return false;
if (enc10->base.ctx->dc->debug.hdmi20_disable &&
crtc_timing->pixel_encoding == PIXEL_ENCODING_YCBCR420)
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.c
index d54d731415d7..2c409356f512 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.c
@@ -348,36 +348,6 @@ void opp1_program_stereo(
*/
}
-void opp1_program_oppbuf(
- struct output_pixel_processor *opp,
- struct oppbuf_params *oppbuf)
-{
- struct dcn10_opp *oppn10 = TO_DCN10_OPP(opp);
-
- /* Program the oppbuf active width to be the frame width from mpc */
- REG_UPDATE(OPPBUF_CONTROL, OPPBUF_ACTIVE_WIDTH, oppbuf->active_width);
-
- /* Specifies the number of segments in multi-segment mode (DP-MSO operation)
- * description "In 1/2/4 segment mode, specifies the horizontal active width in pixels of the display panel.
- * In 4 segment split left/right mode, specifies the horizontal 1/2 active width in pixels of the display panel.
- * Used to determine segment boundaries in multi-segment mode. Used to determine the width of the vertical active space in 3D frame packed modes.
- * OPPBUF_ACTIVE_WIDTH must be integer divisible by the total number of segments."
- */
- REG_UPDATE(OPPBUF_CONTROL, OPPBUF_DISPLAY_SEGMENTATION, oppbuf->mso_segmentation);
-
- /* description "Specifies the number of overlap pixels (1-8 overlapping pixels supported), used in multi-segment mode (DP-MSO operation)" */
- REG_UPDATE(OPPBUF_CONTROL, OPPBUF_OVERLAP_PIXEL_NUM, oppbuf->mso_overlap_pixel_num);
-
- /* description "Specifies the number of times a pixel is replicated (0-15 pixel replications supported).
- * A value of 0 disables replication. The total number of times a pixel is output is OPPBUF_PIXEL_REPETITION + 1."
- */
- REG_UPDATE(OPPBUF_CONTROL, OPPBUF_PIXEL_REPETITION, oppbuf->pixel_repetition);
-
- /* Controls the number of padded pixels at the end of a segment */
- if (REG(OPPBUF_CONTROL1))
- REG_UPDATE(OPPBUF_CONTROL1, OPPBUF_NUM_SEGMENT_PADDED_PIXELS, oppbuf->num_segment_padded_pixels);
-}
-
void opp1_pipe_clock_control(struct output_pixel_processor *opp, bool enable)
{
struct dcn10_opp *oppn10 = TO_DCN10_OPP(opp);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c
index 3d2a2848857a..b1671b00ce40 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c
@@ -132,22 +132,6 @@ void optc1_setup_vertical_interrupt2(
}
/**
- * Vupdate keepout can be set to a window to block the update lock for that pipe from changing.
- * Start offset begins with vstartup and goes for x number of clocks,
- * end offset starts from end of vupdate to x number of clocks.
- */
-void optc1_set_vupdate_keepout(struct timing_generator *optc,
- struct vupdate_keepout_params *params)
-{
- struct optc *optc1 = DCN10TG_FROM_TG(optc);
-
- REG_SET_3(OTG_VUPDATE_KEEPOUT, 0,
- MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_START_OFFSET, params->start_offset,
- MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_END_OFFSET, params->end_offset,
- OTG_MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_EN, params->enable);
-}
-
-/**
* program_timing_generator used by mode timing set
* Program CRTC Timing Registers - OTG_H_*, OTG_V_*, Pixel repetition.
* Including SYNC. Call BIOS command table to program Timings.
@@ -876,7 +860,7 @@ void optc1_set_static_screen_control(
OTG_STATIC_SCREEN_FRAME_COUNT, num_frames);
}
-void optc1_setup_manual_trigger(struct timing_generator *optc)
+static void optc1_setup_manual_trigger(struct timing_generator *optc)
{
struct optc *optc1 = DCN10TG_FROM_TG(optc);
@@ -894,7 +878,7 @@ void optc1_setup_manual_trigger(struct timing_generator *optc)
OTG_TRIGA_CLEAR, 1);
}
-void optc1_program_manual_trigger(struct timing_generator *optc)
+static void optc1_program_manual_trigger(struct timing_generator *optc)
{
struct optc *optc1 = DCN10TG_FROM_TG(optc);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
index f37551e00023..858b72149897 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
@@ -686,9 +686,8 @@ static struct output_pixel_processor *dcn10_opp_create(
return &opp->base;
}
-struct dce_aux *dcn10_aux_engine_create(
- struct dc_context *ctx,
- uint32_t inst)
+static struct dce_aux *dcn10_aux_engine_create(struct dc_context *ctx,
+ uint32_t inst)
{
struct aux_engine_dce110 *aux_engine =
kzalloc(sizeof(struct aux_engine_dce110), GFP_KERNEL);
@@ -724,9 +723,8 @@ static const struct dce_i2c_mask i2c_masks = {
I2C_COMMON_MASK_SH_LIST_DCE110(_MASK)
};
-struct dce_i2c_hw *dcn10_i2c_hw_create(
- struct dc_context *ctx,
- uint32_t inst)
+static struct dce_i2c_hw *dcn10_i2c_hw_create(struct dc_context *ctx,
+ uint32_t inst)
{
struct dce_i2c_hw *dce_i2c_hw =
kzalloc(sizeof(struct dce_i2c_hw), GFP_KERNEL);
@@ -805,7 +803,7 @@ static const struct encoder_feature_support link_enc_feature = {
.flags.bits.IS_TPS4_CAPABLE = true
};
-struct link_encoder *dcn10_link_encoder_create(
+static struct link_encoder *dcn10_link_encoder_create(
const struct encoder_init_data *enc_init_data)
{
struct dcn10_link_encoder *enc10 =
@@ -847,7 +845,7 @@ static struct panel_cntl *dcn10_panel_cntl_create(const struct panel_cntl_init_d
return &panel_cntl->base;
}
-struct clock_source *dcn10_clock_source_create(
+static struct clock_source *dcn10_clock_source_create(
struct dc_context *ctx,
struct dc_bios *bios,
enum clock_source_id id,
@@ -945,7 +943,7 @@ static const struct resource_create_funcs res_create_maximus_funcs = {
.create_hwseq = dcn10_hwseq_create,
};
-void dcn10_clock_source_destroy(struct clock_source **clk_src)
+static void dcn10_clock_source_destroy(struct clock_source **clk_src)
{
kfree(TO_DCE110_CLK_SRC(*clk_src));
*clk_src = NULL;
@@ -978,10 +976,8 @@ static void dcn10_resource_destruct(struct dcn10_resource_pool *pool)
pool->base.mpc = NULL;
}
- if (pool->base.hubbub != NULL) {
- kfree(pool->base.hubbub);
- pool->base.hubbub = NULL;
- }
+ kfree(pool->base.hubbub);
+ pool->base.hubbub = NULL;
for (i = 0; i < pool->base.pipe_count; i++) {
if (pool->base.opps[i] != NULL)
@@ -1011,14 +1007,10 @@ static void dcn10_resource_destruct(struct dcn10_resource_pool *pool)
for (i = 0; i < pool->base.res_cap->num_ddc; i++) {
if (pool->base.engines[i] != NULL)
dce110_engine_destroy(&pool->base.engines[i]);
- if (pool->base.hw_i2cs[i] != NULL) {
- kfree(pool->base.hw_i2cs[i]);
- pool->base.hw_i2cs[i] = NULL;
- }
- if (pool->base.sw_i2cs[i] != NULL) {
- kfree(pool->base.sw_i2cs[i]);
- pool->base.sw_i2cs[i] = NULL;
- }
+ kfree(pool->base.hw_i2cs[i]);
+ pool->base.hw_i2cs[i] = NULL;
+ kfree(pool->base.sw_i2cs[i]);
+ pool->base.sw_i2cs[i] = NULL;
}
for (i = 0; i < pool->base.audio_count; i++) {
@@ -1128,7 +1120,7 @@ static enum dc_status build_mapped_resource(
return DC_OK;
}
-enum dc_status dcn10_add_stream_to_ctx(
+static enum dc_status dcn10_add_stream_to_ctx(
struct dc *dc,
struct dc_state *new_ctx,
struct dc_stream_state *dc_stream)
@@ -1320,7 +1312,7 @@ static const struct resource_funcs dcn10_res_pool_funcs = {
.destroy = dcn10_destroy_resource_pool,
.link_enc_create = dcn10_link_encoder_create,
.panel_cntl_create = dcn10_panel_cntl_create,
- .validate_bandwidth = dcn_validate_bandwidth,
+ .validate_bandwidth = dcn10_validate_bandwidth,
.acquire_idle_pipe_for_layer = dcn10_acquire_idle_pipe_for_layer,
.validate_plane = dcn10_validate_plane,
.validate_global = dcn10_validate_global,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c
index b0c08ee6bc2c..bf4436d7aaab 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c
@@ -902,6 +902,19 @@ void enc1_stream_encoder_stop_dp_info_packets(
}
+void enc1_stream_encoder_reset_fifo(
+ struct stream_encoder *enc)
+{
+ struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc);
+
+ /* set DIG_START to 0x1 to reset FIFO */
+ REG_UPDATE(DIG_FE_CNTL, DIG_START, 1);
+ udelay(100);
+
+ /* write 0 to take the FIFO out of reset */
+ REG_UPDATE(DIG_FE_CNTL, DIG_START, 0);
+}
+
void enc1_stream_encoder_dp_blank(
struct dc_link *link,
struct stream_encoder *enc)
@@ -1587,6 +1600,8 @@ static const struct stream_encoder_funcs dcn10_str_enc_funcs = {
enc1_stream_encoder_send_immediate_sdp_message,
.stop_dp_info_packets =
enc1_stream_encoder_stop_dp_info_packets,
+ .reset_fifo =
+ enc1_stream_encoder_reset_fifo,
.dp_blank =
enc1_stream_encoder_dp_blank,
.dp_unblank =
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.h
index 687d7e4bf7ca..a146a41f68e9 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.h
@@ -626,6 +626,9 @@ void enc1_stream_encoder_send_immediate_sdp_message(
void enc1_stream_encoder_stop_dp_info_packets(
struct stream_encoder *enc);
+void enc1_stream_encoder_reset_fifo(
+ struct stream_encoder *enc);
+
void enc1_stream_encoder_dp_blank(
struct dc_link *link,
struct stream_encoder *enc);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp.c
index a9e420c7d75a..970b65efeac1 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp.c
@@ -251,20 +251,6 @@ static void dpp2_cnv_setup (
}
-void dpp2_cnv_set_bias_scale(
- struct dpp *dpp_base,
- struct dc_bias_and_scale *bias_and_scale)
-{
- struct dcn20_dpp *dpp = TO_DCN20_DPP(dpp_base);
-
- REG_UPDATE(FCNV_FP_BIAS_R, FCNV_FP_BIAS_R, bias_and_scale->bias_red);
- REG_UPDATE(FCNV_FP_BIAS_G, FCNV_FP_BIAS_G, bias_and_scale->bias_green);
- REG_UPDATE(FCNV_FP_BIAS_B, FCNV_FP_BIAS_B, bias_and_scale->bias_blue);
- REG_UPDATE(FCNV_FP_SCALE_R, FCNV_FP_SCALE_R, bias_and_scale->scale_red);
- REG_UPDATE(FCNV_FP_SCALE_G, FCNV_FP_SCALE_G, bias_and_scale->scale_green);
- REG_UPDATE(FCNV_FP_SCALE_B, FCNV_FP_SCALE_B, bias_and_scale->scale_blue);
-}
-
/*compute the maximum number of lines that we can fit in the line buffer*/
void dscl2_calc_lb_num_partitions(
const struct scaler_data *scl_data,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.c
index 79b640e202eb..ef5c4c0f4d6c 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.c
@@ -162,6 +162,8 @@ static void dsc2_read_state(struct display_stream_compressor *dsc, struct dcn_ds
REG_GET(DSCC_PPS_CONFIG2, PIC_WIDTH, &s->dsc_pic_width);
REG_GET(DSCC_PPS_CONFIG2, PIC_HEIGHT, &s->dsc_pic_height);
REG_GET(DSCC_PPS_CONFIG7, SLICE_BPG_OFFSET, &s->dsc_slice_bpg_offset);
+ REG_GET_2(DSCRM_DSC_FORWARD_CONFIG, DSCRM_DSC_FORWARD_EN, &s->dsc_fw_en,
+ DSCRM_DSC_OPP_PIPE_SOURCE, &s->dsc_opp_source);
}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dwb_scl.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dwb_scl.c
index 880954ac0b02..994fb732a7cb 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dwb_scl.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dwb_scl.c
@@ -527,7 +527,7 @@ static const uint16_t filter_12tap_16p_183[108] = {
0, 84, 16328, 16032, 416, 1944, 1944, 416, 16032, 16328, 84, 0,
};
-const uint16_t *wbscl_get_filter_3tap_16p(struct fixed31_32 ratio)
+static const uint16_t *wbscl_get_filter_3tap_16p(struct fixed31_32 ratio)
{
if (ratio.value < dc_fixpt_one.value)
return filter_3tap_16p_upscale;
@@ -539,7 +539,7 @@ const uint16_t *wbscl_get_filter_3tap_16p(struct fixed31_32 ratio)
return filter_3tap_16p_183;
}
-const uint16_t *wbscl_get_filter_4tap_16p(struct fixed31_32 ratio)
+static const uint16_t *wbscl_get_filter_4tap_16p(struct fixed31_32 ratio)
{
if (ratio.value < dc_fixpt_one.value)
return filter_4tap_16p_upscale;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c
index 5adf42a7cc27..dc1752e9f461 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c
@@ -192,9 +192,8 @@ void hubp2_vready_at_or_After_vsync(struct hubp *hubp,
REG_UPDATE(DCHUBP_CNTL, HUBP_VREADY_AT_OR_AFTER_VSYNC, value);
}
-void hubp2_program_requestor(
- struct hubp *hubp,
- struct _vcs_dpi_display_rq_regs_st *rq_regs)
+static void hubp2_program_requestor(struct hubp *hubp,
+ struct _vcs_dpi_display_rq_regs_st *rq_regs)
{
struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp);
@@ -930,6 +929,16 @@ bool hubp2_is_flip_pending(struct hubp *hubp)
void hubp2_set_blank(struct hubp *hubp, bool blank)
{
+ hubp2_set_blank_regs(hubp, blank);
+
+ if (blank) {
+ hubp->mpcc_id = 0xf;
+ hubp->opp_id = OPP_ID_INVALID;
+ }
+}
+
+void hubp2_set_blank_regs(struct hubp *hubp, bool blank)
+{
struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp);
uint32_t blank_en = blank ? 1 : 0;
@@ -951,9 +960,6 @@ void hubp2_set_blank(struct hubp *hubp, bool blank)
HUBP_NO_OUTSTANDING_REQ, 1,
1, 200);
}
-
- hubp->mpcc_id = 0xf;
- hubp->opp_id = OPP_ID_INVALID;
}
}
@@ -1285,7 +1291,7 @@ void hubp2_read_state(struct hubp *hubp)
}
-void hubp2_validate_dml_output(struct hubp *hubp,
+static void hubp2_validate_dml_output(struct hubp *hubp,
struct dc_context *ctx,
struct _vcs_dpi_display_rq_regs_st *dml_rq_regs,
struct _vcs_dpi_display_dlg_regs_st *dml_dlg_attr,
@@ -1603,6 +1609,7 @@ static struct hubp_funcs dcn20_hubp_funcs = {
.hubp_setup_interdependent = hubp2_setup_interdependent,
.hubp_set_vm_system_aperture_settings = hubp2_set_vm_system_aperture_settings,
.set_blank = hubp2_set_blank,
+ .set_blank_regs = hubp2_set_blank_regs,
.dcc_control = hubp2_dcc_control,
.mem_program_viewport = min_set_viewport,
.set_cursor_attributes = hubp2_cursor_set_attributes,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.h
index eea2254b15e4..9204c3ef323b 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.h
@@ -330,6 +330,7 @@ void hubp2_program_surface_config(
bool hubp2_is_flip_pending(struct hubp *hubp);
void hubp2_set_blank(struct hubp *hubp, bool blank);
+void hubp2_set_blank_regs(struct hubp *hubp, bool blank);
void hubp2_cursor_set_position(
struct hubp *hubp,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
index 4f88376a118f..4991e93e5308 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
@@ -615,6 +615,11 @@ void dcn20_disable_plane(struct dc *dc, struct pipe_ctx *pipe_ctx)
pipe_ctx->pipe_idx);
}
+void dcn20_disable_pixel_data(struct dc *dc, struct pipe_ctx *pipe_ctx, bool blank)
+{
+ dcn20_blank_pixel_data(dc, pipe_ctx, blank);
+}
+
static int calc_mpc_flow_ctrl_cnt(const struct dc_stream_state *stream,
int opp_cnt)
{
@@ -1080,10 +1085,8 @@ static void dcn20_power_on_plane(
}
}
-void dcn20_enable_plane(
- struct dc *dc,
- struct pipe_ctx *pipe_ctx,
- struct dc_state *context)
+static void dcn20_enable_plane(struct dc *dc, struct pipe_ctx *pipe_ctx,
+ struct dc_state *context)
{
//if (dc->debug.sanity_checks) {
// dcn10_verify_allow_pstate_change_high(dc);
@@ -1842,6 +1845,11 @@ void dcn20_optimize_bandwidth(
dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000,
true);
+ if (dc->clk_mgr->dc_mode_softmax_enabled)
+ if (dc->clk_mgr->clks.dramclk_khz > dc->clk_mgr->bw_params->dc_mode_softmax_memclk * 1000 &&
+ context->bw_ctx.bw.dcn.clk.dramclk_khz <= dc->clk_mgr->bw_params->dc_mode_softmax_memclk * 1000)
+ dc->clk_mgr->funcs->set_max_memclk(dc->clk_mgr, dc->clk_mgr->bw_params->dc_mode_softmax_memclk);
+
dc->clk_mgr->funcs->update_clocks(
dc->clk_mgr,
context,
@@ -2270,7 +2278,7 @@ void dcn20_reset_hw_ctx_wrap(
dcn20_reset_back_end_for_pipe(dc, pipe_ctx_old, dc->current_state);
if (hws->funcs.enable_stream_gating)
- hws->funcs.enable_stream_gating(dc, pipe_ctx);
+ hws->funcs.enable_stream_gating(dc, pipe_ctx_old);
if (old_clk)
old_clk->funcs->cs_power_down(old_clk);
}
@@ -2406,7 +2414,7 @@ void dcn20_enable_stream(struct pipe_ctx *pipe_ctx)
pipe_ctx->stream_res.hpo_dp_stream_enc->funcs->map_stream_to_link(
pipe_ctx->stream_res.hpo_dp_stream_enc,
pipe_ctx->stream_res.hpo_dp_stream_enc->inst,
- link->hpo_dp_link_enc->inst);
+ pipe_ctx->link_res.hpo_dp_link_enc->inst);
}
if (!is_dp_128b_132b_signal(pipe_ctx) && link_enc)
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.h
index 6bba191cd33e..33a36c02b2f8 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.h
@@ -53,6 +53,10 @@ void dcn20_enable_stream(struct pipe_ctx *pipe_ctx);
void dcn20_unblank_stream(struct pipe_ctx *pipe_ctx,
struct dc_link_settings *link_settings);
void dcn20_disable_plane(struct dc *dc, struct pipe_ctx *pipe_ctx);
+void dcn20_disable_pixel_data(
+ struct dc *dc,
+ struct pipe_ctx *pipe_ctx,
+ bool blank);
void dcn20_blank_pixel_data(
struct dc *dc,
struct pipe_ctx *pipe_ctx,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_init.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_init.c
index 5cfd4b0afea5..91e4885b743e 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_init.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_init.c
@@ -27,6 +27,8 @@
#include "dcn10/dcn10_hw_sequencer.h"
#include "dcn20_hwseq.h"
+#include "dcn20_init.h"
+
static const struct hw_sequencer_funcs dcn20_funcs = {
.program_gamut_remap = dcn10_program_gamut_remap,
.init_hw = dcn10_init_hw,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.c
index 947eb0df3f12..15734db0cdea 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.c
@@ -400,10 +400,9 @@ static void mpc20_program_ogam_pwl(
}
-void apply_DEDCN20_305_wa(
- struct mpc *mpc,
- int mpcc_id, enum dc_lut_mode current_mode,
- enum dc_lut_mode next_mode)
+static void apply_DEDCN20_305_wa(struct mpc *mpc, int mpcc_id,
+ enum dc_lut_mode current_mode,
+ enum dc_lut_mode next_mode)
{
struct dcn20_mpc *mpc20 = TO_DCN20_MPC(mpc);
@@ -525,7 +524,7 @@ static void mpc2_init_mpcc(struct mpcc *mpcc, int mpcc_inst)
mpcc->sm_cfg.enable = false;
}
-struct mpcc *mpc2_get_mpcc_for_dpp(struct mpc_tree *tree, int dpp_id)
+static struct mpcc *mpc2_get_mpcc_for_dpp(struct mpc_tree *tree, int dpp_id)
{
struct mpcc *tmp_mpcc = tree->opp_list;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.c
index c90b8516dcc1..0340fdd3f5fb 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.c
@@ -73,21 +73,6 @@ bool optc2_enable_crtc(struct timing_generator *optc)
}
/**
- * DRR double buffering control to select buffer point
- * for V_TOTAL, H_TOTAL, VTOTAL_MIN, VTOTAL_MAX, VTOTAL_MIN_SEL and VTOTAL_MAX_SEL registers
- * Options: anytime, start of frame, dp start of frame (range timing)
- */
-void optc2_set_timing_db_mode(struct timing_generator *optc, bool enable)
-{
- struct optc *optc1 = DCN10TG_FROM_TG(optc);
-
- uint32_t blank_data_double_buffer_enable = enable ? 1 : 0;
-
- REG_UPDATE(OTG_DOUBLE_BUFFER_CONTROL,
- OTG_RANGE_TIMING_DBUF_UPDATE_MODE, blank_data_double_buffer_enable);
-}
-
-/**
*For the below, I'm not sure how your GSL parameters are stored in your env,
* so I will assume a gsl_params struct for now
*/
@@ -110,30 +95,6 @@ void optc2_set_gsl(struct timing_generator *optc,
}
-/* Use the gsl allow flip as the master update lock */
-void optc2_use_gsl_as_master_update_lock(struct timing_generator *optc,
- const struct gsl_params *params)
-{
- struct optc *optc1 = DCN10TG_FROM_TG(optc);
-
- REG_UPDATE(OTG_GSL_CONTROL,
- OTG_MASTER_UPDATE_LOCK_GSL_EN, params->master_update_lock_gsl_en);
-}
-
-/* You can control the GSL timing by limiting GSL to a window (X,Y) */
-void optc2_set_gsl_window(struct timing_generator *optc,
- const struct gsl_params *params)
-{
- struct optc *optc1 = DCN10TG_FROM_TG(optc);
-
- REG_SET_2(OTG_GSL_WINDOW_X, 0,
- OTG_GSL_WINDOW_START_X, params->gsl_window_start_x,
- OTG_GSL_WINDOW_END_X, params->gsl_window_end_x);
- REG_SET_2(OTG_GSL_WINDOW_Y, 0,
- OTG_GSL_WINDOW_START_Y, params->gsl_window_start_y,
- OTG_GSL_WINDOW_END_Y, params->gsl_window_end_y);
-}
-
void optc2_set_gsl_source_select(
struct timing_generator *optc,
int group_idx,
@@ -156,18 +117,6 @@ void optc2_set_gsl_source_select(
}
}
-/* DSC encoder frame start controls: x = h position, line_num = # of lines from vstartup */
-void optc2_set_dsc_encoder_frame_start(struct timing_generator *optc,
- int x_position,
- int line_num)
-{
- struct optc *optc1 = DCN10TG_FROM_TG(optc);
-
- REG_SET_2(OTG_DSC_START_POSITION, 0,
- OTG_DSC_START_POSITION_X, x_position,
- OTG_DSC_START_POSITION_LINE_NUM, line_num);
-}
-
/* Set DSC-related configuration.
* dsc_mode: 0 disables DSC, other values enable DSC in specified format
* sc_bytes_per_pixel: Bytes per pixel in u3.28 format
@@ -190,6 +139,19 @@ void optc2_set_dsc_config(struct timing_generator *optc,
OPTC_DSC_SLICE_WIDTH, dsc_slice_width);
}
+/* Get DSC-related configuration.
+ * dsc_mode: 0 disables DSC, other values enable DSC in specified format
+ */
+void optc2_get_dsc_status(struct timing_generator *optc,
+ uint32_t *dsc_mode)
+{
+ struct optc *optc1 = DCN10TG_FROM_TG(optc);
+
+ REG_GET(OPTC_DATA_FORMAT_CONTROL,
+ OPTC_DSC_MODE, dsc_mode);
+}
+
+
/*TEMP: Need to figure out inheritance model here.*/
bool optc2_is_two_pixels_per_containter(const struct dc_crtc_timing *timing)
{
@@ -280,8 +242,8 @@ void optc2_get_optc_source(struct timing_generator *optc,
*num_of_src_opp = 1;
}
-void optc2_set_dwb_source(struct timing_generator *optc,
- uint32_t dwb_pipe_inst)
+static void optc2_set_dwb_source(struct timing_generator *optc,
+ uint32_t dwb_pipe_inst)
{
struct optc *optc1 = DCN10TG_FROM_TG(optc);
@@ -293,7 +255,7 @@ void optc2_set_dwb_source(struct timing_generator *optc,
OPTC_DWB1_SOURCE_SELECT, optc->inst);
}
-void optc2_align_vblanks(
+static void optc2_align_vblanks(
struct timing_generator *optc_master,
struct timing_generator *optc_slave,
uint32_t master_pixel_clock_100Hz,
@@ -579,6 +541,7 @@ static struct timing_generator_funcs dcn20_tg_funcs = {
.get_crc = optc1_get_crc,
.configure_crc = optc2_configure_crc,
.set_dsc_config = optc2_set_dsc_config,
+ .get_dsc_status = optc2_get_dsc_status,
.set_dwb_source = optc2_set_dwb_source,
.set_odm_bypass = optc2_set_odm_bypass,
.set_odm_combine = optc2_set_odm_combine,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.h
index be19a6885fbf..f7968b9ca16e 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.h
@@ -98,6 +98,9 @@ void optc2_set_dsc_config(struct timing_generator *optc,
uint32_t dsc_bytes_per_pixel,
uint32_t dsc_slice_width);
+void optc2_get_dsc_status(struct timing_generator *optc,
+ uint32_t *dsc_mode);
+
void optc2_set_odm_bypass(struct timing_generator *optc,
const struct dc_crtc_timing *dc_crtc_timing);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
index 3883f918b3bb..2bc93df023ad 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
@@ -1069,7 +1069,7 @@ static const struct dc_debug_options debug_defaults_drv = {
.timing_trace = false,
.clock_trace = true,
.disable_pplib_clock_request = true,
- .pipe_split_policy = MPC_SPLIT_AVOID_MULT_DISP,
+ .pipe_split_policy = MPC_SPLIT_DYNAMIC,
.force_single_disp_pipe_split = false,
.disable_dcc = DCC_ENABLE,
.vsr_support = true,
@@ -3093,8 +3093,7 @@ static enum dcn_zstate_support_state decide_zstate_support(struct dc *dc, struc
else if (context->stream_count == 1 && context->streams[0]->signal == SIGNAL_TYPE_EDP) {
struct dc_link *link = context->streams[0]->sink->link;
- if ((link->link_index == 0 && link->psr_settings.psr_feature_enabled)
- || context->bw_ctx.dml.vba.StutterPeriod > 5000.0)
+ if (link->link_index == 0 && context->bw_ctx.dml.vba.StutterPeriod > 5000.0)
return DCN_ZSTATE_SUPPORT_ALLOW;
else
return DCN_ZSTATE_SUPPORT_DISALLOW;
@@ -3796,6 +3795,8 @@ static bool dcn20_resource_construct(
dc->caps.color.mpc.ogam_rom_caps.hlg = 0;
dc->caps.color.mpc.ocsc = 1;
+ dc->caps.hdmi_frl_pcon_support = true;
+
if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV) {
dc->debug = debug_defaults_drv;
} else if (dc->ctx->dce_environment == DCE_ENV_FPGA_MAXIMUS) {
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_stream_encoder.c
index aab25ca8343a..8a70f92795c2 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_stream_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_stream_encoder.c
@@ -593,6 +593,8 @@ static const struct stream_encoder_funcs dcn20_str_enc_funcs = {
enc1_stream_encoder_send_immediate_sdp_message,
.stop_dp_info_packets =
enc1_stream_encoder_stop_dp_info_packets,
+ .reset_fifo =
+ enc1_stream_encoder_reset_fifo,
.dp_blank =
enc1_stream_encoder_dp_blank,
.dp_unblank =
diff --git a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_dccg.c b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_dccg.c
index f5bf04f7da25..9a3402148fde 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_dccg.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_dccg.c
@@ -44,7 +44,8 @@
#define DC_LOGGER \
dccg->ctx->logger
-void dccg201_update_dpp_dto(struct dccg *dccg, int dpp_inst, int req_dppclk)
+static void dccg201_update_dpp_dto(struct dccg *dccg, int dpp_inst,
+ int req_dppclk)
{
/* vbios handles it */
}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_hubp.c
index 6b6f74d4afd1..35dd4bac242a 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_hubp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_hubp.c
@@ -55,7 +55,7 @@ static void hubp201_program_surface_config(
hubp1_program_pixel_format(hubp, format);
}
-void hubp201_program_deadline(
+static void hubp201_program_deadline(
struct hubp *hubp,
struct _vcs_dpi_display_dlg_regs_st *dlg_attr,
struct _vcs_dpi_display_ttu_regs_st *ttu_attr)
@@ -63,9 +63,8 @@ void hubp201_program_deadline(
hubp1_program_deadline(hubp, dlg_attr, ttu_attr);
}
-void hubp201_program_requestor(
- struct hubp *hubp,
- struct _vcs_dpi_display_rq_regs_st *rq_regs)
+static void hubp201_program_requestor(struct hubp *hubp,
+ struct _vcs_dpi_display_rq_regs_st *rq_regs)
{
struct dcn201_hubp *hubp201 = TO_DCN201_HUBP(hubp);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_hwseq.c
index cfd09b3f705e..fe22530242d2 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_hwseq.c
@@ -134,11 +134,12 @@ void dcn201_update_plane_addr(const struct dc *dc, struct pipe_ctx *pipe_ctx)
PHYSICAL_ADDRESS_LOC addr;
struct dc_plane_state *plane_state = pipe_ctx->plane_state;
struct dce_hwseq *hws = dc->hwseq;
- struct dc_plane_address uma = plane_state->address;
+ struct dc_plane_address uma;
if (plane_state == NULL)
return;
+ uma = plane_state->address;
addr_patched = patch_address_for_sbs_tb_stereo(pipe_ctx, &addr);
plane_address_in_gpu_space_to_uma(hws, &uma);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_link_encoder.c
index a65e8f7801db..7f9ec59ef443 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_link_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_link_encoder.c
@@ -50,8 +50,8 @@
#define IND_REG(index) \
(enc10->link_regs->index)
-void dcn201_link_encoder_get_max_link_cap(struct link_encoder *enc,
- struct dc_link_settings *link_settings)
+static void dcn201_link_encoder_get_max_link_cap(struct link_encoder *enc,
+ struct dc_link_settings *link_settings)
{
uint32_t value1, value2;
struct dcn10_link_encoder *enc10 = TO_DCN10_LINK_ENC(enc);
@@ -66,7 +66,7 @@ void dcn201_link_encoder_get_max_link_cap(struct link_encoder *enc,
}
}
-bool dcn201_link_encoder_is_in_alt_mode(struct link_encoder *enc)
+static bool dcn201_link_encoder_is_in_alt_mode(struct link_encoder *enc)
{
uint32_t value;
struct dcn10_link_encoder *enc10 = TO_DCN10_LINK_ENC(enc);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_resource.c b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_resource.c
index 0fa381088d1d..0bb7d3dd53fa 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_resource.c
@@ -603,7 +603,7 @@ static const struct dc_debug_options debug_defaults_drv = {
.timing_trace = false,
.clock_trace = true,
.disable_pplib_clock_request = true,
- .pipe_split_policy = MPC_SPLIT_AVOID,
+ .pipe_split_policy = MPC_SPLIT_DYNAMIC,
.force_single_disp_pipe_split = false,
.disable_dcc = DCC_ENABLE,
.vsr_support = true,
@@ -672,9 +672,8 @@ static struct output_pixel_processor *dcn201_opp_create(
return &opp->base;
}
-struct dce_aux *dcn201_aux_engine_create(
- struct dc_context *ctx,
- uint32_t inst)
+static struct dce_aux *dcn201_aux_engine_create(struct dc_context *ctx,
+ uint32_t inst)
{
struct aux_engine_dce110 *aux_engine =
kzalloc(sizeof(struct aux_engine_dce110), GFP_ATOMIC);
@@ -706,9 +705,8 @@ static const struct dce_i2c_mask i2c_masks = {
I2C_COMMON_MASK_SH_LIST_DCN2(_MASK)
};
-struct dce_i2c_hw *dcn201_i2c_hw_create(
- struct dc_context *ctx,
- uint32_t inst)
+static struct dce_i2c_hw *dcn201_i2c_hw_create(struct dc_context *ctx,
+ uint32_t inst)
{
struct dce_i2c_hw *dce_i2c_hw =
kzalloc(sizeof(struct dce_i2c_hw), GFP_ATOMIC);
@@ -789,7 +787,7 @@ static const struct encoder_feature_support link_enc_feature = {
.flags.bits.IS_TPS4_CAPABLE = true
};
-struct link_encoder *dcn201_link_encoder_create(
+static struct link_encoder *dcn201_link_encoder_create(
const struct encoder_init_data *enc_init_data)
{
struct dcn20_link_encoder *enc20 =
@@ -811,7 +809,7 @@ struct link_encoder *dcn201_link_encoder_create(
return &enc10->base;
}
-struct clock_source *dcn201_clock_source_create(
+static struct clock_source *dcn201_clock_source_create(
struct dc_context *ctx,
struct dc_bios *bios,
enum clock_source_id id,
@@ -906,7 +904,7 @@ static const struct resource_create_funcs res_create_maximus_funcs = {
.create_hwseq = dcn201_hwseq_create,
};
-void dcn201_clock_source_destroy(struct clock_source **clk_src)
+static void dcn201_clock_source_destroy(struct clock_source **clk_src)
{
kfree(TO_DCE110_CLK_SRC(*clk_src));
*clk_src = NULL;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubbub.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubbub.c
index 36044cb8ec83..c5e200d09038 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubbub.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubbub.c
@@ -680,7 +680,7 @@ void hubbub21_wm_read_state(struct hubbub *hubbub,
DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_D, &s->dram_clk_chanage);
}
-void hubbub21_apply_DEDCN21_147_wa(struct hubbub *hubbub)
+static void hubbub21_apply_DEDCN21_147_wa(struct hubbub *hubbub)
{
struct dcn20_hubbub *hubbub1 = TO_DCN20_HUBBUB(hubbub);
uint32_t prog_wm_value;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubp.c
index 3de1bcf9b3d8..58e459c7e7d3 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubp.c
@@ -183,7 +183,7 @@ static void hubp21_setup(
}
-void hubp21_set_viewport(
+static void hubp21_set_viewport(
struct hubp *hubp,
const struct rect *viewport,
const struct rect *viewport_c)
@@ -225,8 +225,8 @@ void hubp21_set_viewport(
SEC_VIEWPORT_Y_START_C, viewport_c->y);
}
-void hubp21_set_vm_system_aperture_settings(struct hubp *hubp,
- struct vm_system_aperture_param *apt)
+static void hubp21_set_vm_system_aperture_settings(struct hubp *hubp,
+ struct vm_system_aperture_param *apt)
{
struct dcn21_hubp *hubp21 = TO_DCN21_HUBP(hubp);
@@ -248,7 +248,7 @@ void hubp21_set_vm_system_aperture_settings(struct hubp *hubp,
SYSTEM_ACCESS_MODE, 0x3);
}
-void hubp21_validate_dml_output(struct hubp *hubp,
+static void hubp21_validate_dml_output(struct hubp *hubp,
struct dc_context *ctx,
struct _vcs_dpi_display_rq_regs_st *dml_rq_regs,
struct _vcs_dpi_display_dlg_regs_st *dml_dlg_attr,
@@ -664,7 +664,8 @@ static void program_surface_flip_and_addr(struct hubp *hubp, struct surface_flip
flip_regs->DCSURF_PRIMARY_SURFACE_ADDRESS);
}
-void dmcub_PLAT_54186_wa(struct hubp *hubp, struct surface_flip_registers *flip_regs)
+static void dmcub_PLAT_54186_wa(struct hubp *hubp,
+ struct surface_flip_registers *flip_regs)
{
struct dc_dmub_srv *dmcub = hubp->ctx->dmub_srv;
struct dcn21_hubp *hubp21 = TO_DCN21_HUBP(hubp);
@@ -697,7 +698,7 @@ void dmcub_PLAT_54186_wa(struct hubp *hubp, struct surface_flip_registers *flip_
PERF_TRACE(); // TODO: remove after performance is stable.
}
-bool hubp21_program_surface_flip_and_addr(
+static bool hubp21_program_surface_flip_and_addr(
struct hubp *hubp,
const struct dc_plane_address *address,
bool flip_immediate)
@@ -805,7 +806,7 @@ bool hubp21_program_surface_flip_and_addr(
return true;
}
-void hubp21_init(struct hubp *hubp)
+static void hubp21_init(struct hubp *hubp)
{
// DEDCN21-133: Inconsistent row starting line for flip between DPTE and Meta
// This is a chicken bit to enable the ECO fix.
diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_init.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_init.c
index 54c11ba550ae..b270f0b194dc 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_init.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_init.c
@@ -28,6 +28,8 @@
#include "dcn20/dcn20_hwseq.h"
#include "dcn21_hwseq.h"
+#include "dcn21_init.h"
+
static const struct hw_sequencer_funcs dcn21_funcs = {
.program_gamut_remap = dcn10_program_gamut_remap,
.init_hw = dcn10_init_hw,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_link_encoder.c
index aa46c35b05a2..0a1ba6e7081c 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_link_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_link_encoder.c
@@ -203,7 +203,7 @@ static bool update_cfg_data(
return true;
}
-bool dcn21_link_encoder_acquire_phy(struct link_encoder *enc)
+static bool dcn21_link_encoder_acquire_phy(struct link_encoder *enc)
{
struct dcn10_link_encoder *enc10 = TO_DCN10_LINK_ENC(enc);
int value;
@@ -277,7 +277,7 @@ void dcn21_link_encoder_enable_dp_output(
}
-void dcn21_link_encoder_enable_dp_mst_output(
+static void dcn21_link_encoder_enable_dp_mst_output(
struct link_encoder *enc,
const struct dc_link_settings *link_settings,
enum clock_source_id clock_source)
@@ -288,9 +288,8 @@ void dcn21_link_encoder_enable_dp_mst_output(
dcn10_link_encoder_enable_dp_mst_output(enc, link_settings, clock_source);
}
-void dcn21_link_encoder_disable_output(
- struct link_encoder *enc,
- enum signal_type signal)
+static void dcn21_link_encoder_disable_output(struct link_encoder *enc,
+ enum signal_type signal)
{
dcn10_link_encoder_disable_output(enc, signal);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c
index d452a0d1777e..e5cc6bf45743 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c
@@ -784,9 +784,8 @@ static const struct dce_i2c_mask i2c_masks = {
I2C_COMMON_MASK_SH_LIST_DCN2(_MASK)
};
-struct dce_i2c_hw *dcn21_i2c_hw_create(
- struct dc_context *ctx,
- uint32_t inst)
+static struct dce_i2c_hw *dcn21_i2c_hw_create(struct dc_context *ctx,
+ uint32_t inst)
{
struct dce_i2c_hw *dce_i2c_hw =
kzalloc(sizeof(struct dce_i2c_hw), GFP_KERNEL);
@@ -874,7 +873,7 @@ static const struct dc_debug_options debug_defaults_drv = {
.clock_trace = true,
.disable_pplib_clock_request = true,
.min_disp_clk_khz = 100000,
- .pipe_split_policy = MPC_SPLIT_AVOID_MULT_DISP,
+ .pipe_split_policy = MPC_SPLIT_DYNAMIC,
.force_single_disp_pipe_split = false,
.disable_dcc = DCC_ENABLE,
.vsr_support = true,
@@ -1093,7 +1092,7 @@ static void patch_bounding_box(struct dc *dc, struct _vcs_dpi_soc_bounding_box_s
}
}
-void dcn21_calculate_wm(
+static void dcn21_calculate_wm(
struct dc *dc, struct dc_state *context,
display_e2e_pipe_params_st *pipes,
int *out_pipe_cnt,
@@ -1390,7 +1389,7 @@ validate_out:
* with DC_FP_START()/DC_FP_END(). Use the same approach as for
* dcn20_validate_bandwidth in dcn20_resource.c.
*/
-bool dcn21_validate_bandwidth(struct dc *dc, struct dc_state *context,
+static bool dcn21_validate_bandwidth(struct dc *dc, struct dc_state *context,
bool fast_validate)
{
bool voltage_supported;
@@ -1480,8 +1479,8 @@ static struct hubbub *dcn21_hubbub_create(struct dc_context *ctx)
return &hubbub->base;
}
-struct output_pixel_processor *dcn21_opp_create(
- struct dc_context *ctx, uint32_t inst)
+static struct output_pixel_processor *dcn21_opp_create(struct dc_context *ctx,
+ uint32_t inst)
{
struct dcn20_opp *opp =
kzalloc(sizeof(struct dcn20_opp), GFP_KERNEL);
@@ -1496,9 +1495,8 @@ struct output_pixel_processor *dcn21_opp_create(
return &opp->base;
}
-struct timing_generator *dcn21_timing_generator_create(
- struct dc_context *ctx,
- uint32_t instance)
+static struct timing_generator *dcn21_timing_generator_create(struct dc_context *ctx,
+ uint32_t instance)
{
struct optc *tgn10 =
kzalloc(sizeof(struct optc), GFP_KERNEL);
@@ -1518,7 +1516,7 @@ struct timing_generator *dcn21_timing_generator_create(
return &tgn10->base;
}
-struct mpc *dcn21_mpc_create(struct dc_context *ctx)
+static struct mpc *dcn21_mpc_create(struct dc_context *ctx)
{
struct dcn20_mpc *mpc20 = kzalloc(sizeof(struct dcn20_mpc),
GFP_KERNEL);
@@ -1545,8 +1543,8 @@ static void read_dce_straps(
}
-struct display_stream_compressor *dcn21_dsc_create(
- struct dc_context *ctx, uint32_t inst)
+static struct display_stream_compressor *dcn21_dsc_create(struct dc_context *ctx,
+ uint32_t inst)
{
struct dcn20_dsc *dsc =
kzalloc(sizeof(struct dcn20_dsc), GFP_KERNEL);
@@ -1683,9 +1681,8 @@ static struct dc_cap_funcs cap_funcs = {
.get_dcc_compression_cap = dcn20_get_dcc_compression_cap
};
-struct stream_encoder *dcn21_stream_encoder_create(
- enum engine_id eng_id,
- struct dc_context *ctx)
+static struct stream_encoder *dcn21_stream_encoder_create(enum engine_id eng_id,
+ struct dc_context *ctx)
{
struct dcn10_stream_encoder *enc1 =
kzalloc(sizeof(struct dcn10_stream_encoder), GFP_KERNEL);
@@ -1917,7 +1914,7 @@ static int dcn21_populate_dml_pipes_from_context(
return pipe_cnt;
}
-enum dc_status dcn21_patch_unknown_plane_state(struct dc_plane_state *plane_state)
+static enum dc_status dcn21_patch_unknown_plane_state(struct dc_plane_state *plane_state)
{
enum dc_status result = DC_OK;
@@ -2028,6 +2025,8 @@ static bool dcn21_resource_construct(
dc->caps.color.mpc.ogam_rom_caps.hlg = 0;
dc->caps.color.mpc.ocsc = 1;
+ dc->caps.hdmi_frl_pcon_support = true;
+
if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV)
dc->debug = debug_defaults_drv;
else if (dc->ctx->dce_environment == DCE_ENV_FPGA_MAXIMUS) {
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_stream_encoder.c
index ebd9c35c914f..8daa12730bc1 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_stream_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_stream_encoder.c
@@ -50,22 +50,6 @@
enc1->base.ctx
-void convert_dc_info_packet_to_128(
- const struct dc_info_packet *info_packet,
- struct dc_info_packet_128 *info_packet_128)
-{
- unsigned int i;
-
- info_packet_128->hb0 = info_packet->hb0;
- info_packet_128->hb1 = info_packet->hb1;
- info_packet_128->hb2 = info_packet->hb2;
- info_packet_128->hb3 = info_packet->hb3;
-
- for (i = 0; i < 32; i++) {
- info_packet_128->sb[i] = info_packet->sb[i];
- }
-
-}
static void enc3_update_hdmi_info_packet(
struct dcn10_stream_encoder *enc1,
uint32_t packet_index,
@@ -489,7 +473,7 @@ static void enc3_dp_set_odm_combine(
}
/* setup stream encoder in dvi mode */
-void enc3_stream_encoder_dvi_set_stream_attribute(
+static void enc3_stream_encoder_dvi_set_stream_attribute(
struct stream_encoder *enc,
struct dc_crtc_timing *crtc_timing,
bool is_dual_link)
@@ -805,6 +789,8 @@ static const struct stream_encoder_funcs dcn30_str_enc_funcs = {
enc3_stream_encoder_update_dp_info_packets,
.stop_dp_info_packets =
enc1_stream_encoder_stop_dp_info_packets,
+ .reset_fifo =
+ enc1_stream_encoder_reset_fifo,
.dp_blank =
enc1_stream_encoder_dp_blank,
.dp_unblank =
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp.c
index c1d967ed6551..ab3918c0a15b 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp.c
@@ -41,8 +41,7 @@
dpp->tf_shift->field_name, dpp->tf_mask->field_name
-void dpp30_read_state(struct dpp *dpp_base,
- struct dcn_dpp_state *s)
+static void dpp30_read_state(struct dpp *dpp_base, struct dcn_dpp_state *s)
{
struct dcn20_dpp *dpp = TO_DCN20_DPP(dpp_base);
@@ -373,7 +372,7 @@ void dpp3_set_cursor_attributes(
}
-bool dpp3_get_optimal_number_of_taps(
+static bool dpp3_get_optimal_number_of_taps(
struct dpp *dpp,
struct scaler_data *scl_data,
const struct scaling_taps *in_taps)
@@ -474,22 +473,7 @@ bool dpp3_get_optimal_number_of_taps(
return true;
}
-void dpp3_cnv_set_bias_scale(
- struct dpp *dpp_base,
- struct dc_bias_and_scale *bias_and_scale)
-{
- struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base);
-
- REG_UPDATE(FCNV_FP_BIAS_R, FCNV_FP_BIAS_R, bias_and_scale->bias_red);
- REG_UPDATE(FCNV_FP_BIAS_G, FCNV_FP_BIAS_G, bias_and_scale->bias_green);
- REG_UPDATE(FCNV_FP_BIAS_B, FCNV_FP_BIAS_B, bias_and_scale->bias_blue);
- REG_UPDATE(FCNV_FP_SCALE_R, FCNV_FP_SCALE_R, bias_and_scale->scale_red);
- REG_UPDATE(FCNV_FP_SCALE_G, FCNV_FP_SCALE_G, bias_and_scale->scale_green);
- REG_UPDATE(FCNV_FP_SCALE_B, FCNV_FP_SCALE_B, bias_and_scale->scale_blue);
-}
-
-void dpp3_deferred_update(
- struct dpp *dpp_base)
+static void dpp3_deferred_update(struct dpp *dpp_base)
{
int bypass_state;
struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base);
@@ -751,8 +735,8 @@ static enum dc_lut_mode dpp3_get_blndgam_current(struct dpp *dpp_base)
return mode;
}
-bool dpp3_program_blnd_lut(
- struct dpp *dpp_base, const struct pwl_params *params)
+static bool dpp3_program_blnd_lut(struct dpp *dpp_base,
+ const struct pwl_params *params)
{
enum dc_lut_mode current_mode;
enum dc_lut_mode next_mode;
@@ -1164,9 +1148,8 @@ static void dpp3_program_shaper_lutb_settings(
}
-bool dpp3_program_shaper(
- struct dpp *dpp_base,
- const struct pwl_params *params)
+static bool dpp3_program_shaper(struct dpp *dpp_base,
+ const struct pwl_params *params)
{
enum dc_lut_mode current_mode;
enum dc_lut_mode next_mode;
@@ -1355,9 +1338,8 @@ static void dpp3_select_3dlut_ram_mask(
REG_SET(CM_3DLUT_INDEX, 0, CM_3DLUT_INDEX, 0);
}
-bool dpp3_program_3dlut(
- struct dpp *dpp_base,
- struct tetrahedral_params *params)
+static bool dpp3_program_3dlut(struct dpp *dpp_base,
+ struct tetrahedral_params *params)
{
enum dc_lut_mode mode;
bool is_17x17x17;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubp.c
index eac08926b574..6a4dcafb9bba 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubp.c
@@ -490,6 +490,7 @@ static struct hubp_funcs dcn30_hubp_funcs = {
.hubp_setup_interdependent = hubp2_setup_interdependent,
.hubp_set_vm_system_aperture_settings = hubp3_set_vm_system_aperture_settings,
.set_blank = hubp2_set_blank,
+ .set_blank_regs = hubp2_set_blank_regs,
.dcc_control = hubp3_dcc_control,
.mem_program_viewport = min_set_viewport,
.set_cursor_attributes = hubp2_cursor_set_attributes,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c
index df2717116604..1db1ca19411d 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c
@@ -344,6 +344,17 @@ void dcn30_enable_writeback(
dwb->funcs->enable(dwb, &wb_info->dwb_params);
}
+void dcn30_prepare_bandwidth(struct dc *dc,
+ struct dc_state *context)
+{
+ if (dc->clk_mgr->dc_mode_softmax_enabled)
+ if (dc->clk_mgr->clks.dramclk_khz <= dc->clk_mgr->bw_params->dc_mode_softmax_memclk * 1000 &&
+ context->bw_ctx.bw.dcn.clk.dramclk_khz > dc->clk_mgr->bw_params->dc_mode_softmax_memclk * 1000)
+ dc->clk_mgr->funcs->set_max_memclk(dc->clk_mgr, dc->clk_mgr->bw_params->clk_table.entries[dc->clk_mgr->bw_params->clk_table.num_entries - 1].memclk_mhz);
+
+ dcn20_prepare_bandwidth(dc, context);
+}
+
void dcn30_disable_writeback(
struct dc *dc,
unsigned int dwb_pipe_inst)
@@ -437,7 +448,7 @@ void dcn30_init_hw(struct dc *dc)
struct dce_hwseq *hws = dc->hwseq;
struct dc_bios *dcb = dc->ctx->dc_bios;
struct resource_pool *res_pool = dc->res_pool;
- int i, j;
+ int i;
int edp_num;
uint32_t backlight = MAX_BACKLIGHT_LEVEL;
@@ -534,41 +545,8 @@ void dcn30_init_hw(struct dc *dc)
hws->funcs.dsc_pg_control(hws, res_pool->dscs[i]->inst, false);
/* we want to turn off all dp displays before doing detection */
- if (dc->config.power_down_display_on_boot) {
- uint8_t dpcd_power_state = '\0';
- enum dc_status status = DC_ERROR_UNEXPECTED;
-
- for (i = 0; i < dc->link_count; i++) {
- if (dc->links[i]->connector_signal != SIGNAL_TYPE_DISPLAY_PORT)
- continue;
- /* DP 2.0 states that LTTPR regs must be read first */
- dp_retrieve_lttpr_cap(dc->links[i]);
-
- /* if any of the displays are lit up turn them off */
- status = core_link_read_dpcd(dc->links[i], DP_SET_POWER,
- &dpcd_power_state, sizeof(dpcd_power_state));
- if (status == DC_OK && dpcd_power_state == DP_POWER_STATE_D0) {
- /* blank dp stream before power off receiver*/
- if (dc->links[i]->link_enc->funcs->get_dig_frontend) {
- unsigned int fe;
-
- fe = dc->links[i]->link_enc->funcs->get_dig_frontend(
- dc->links[i]->link_enc);
- if (fe == ENGINE_ID_UNKNOWN)
- continue;
-
- for (j = 0; j < dc->res_pool->stream_enc_count; j++) {
- if (fe == dc->res_pool->stream_enc[j]->id) {
- dc->res_pool->stream_enc[j]->funcs->dp_blank(dc->links[i],
- dc->res_pool->stream_enc[j]);
- break;
- }
- }
- }
- dp_receiver_power_ctrl(dc->links[i], false);
- }
- }
- }
+ if (dc->config.power_down_display_on_boot)
+ dc_link_blank_all_dp_displays(dc);
/* If taking control over from VBIOS, we may want to optimize our first
* mode set, so we need to skip powering down pipes until we know which
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.h b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.h
index e9a0005288d3..73e7b690e82c 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.h
@@ -27,7 +27,7 @@
#define __DC_HWSS_DCN30_H__
#include "hw_sequencer_private.h"
-
+#include "dcn20/dcn20_hwseq.h"
struct dc;
void dcn30_init_hw(struct dc *dc);
@@ -47,6 +47,9 @@ void dcn30_disable_writeback(
struct dc *dc,
unsigned int dwb_pipe_inst);
+void dcn30_prepare_bandwidth(struct dc *dc,
+ struct dc_state *context);
+
bool dcn30_mmhubbub_warmup(
struct dc *dc,
unsigned int num_dwb,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_init.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_init.c
index 93f32a312fee..bb347319de83 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_init.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_init.c
@@ -29,6 +29,8 @@
#include "dcn21/dcn21_hwseq.h"
#include "dcn30_hwseq.h"
+#include "dcn30_init.h"
+
static const struct hw_sequencer_funcs dcn30_funcs = {
.program_gamut_remap = dcn10_program_gamut_remap,
.init_hw = dcn30_init_hw,
@@ -53,6 +55,7 @@ static const struct hw_sequencer_funcs dcn30_funcs = {
.enable_audio_stream = dce110_enable_audio_stream,
.disable_audio_stream = dce110_disable_audio_stream,
.disable_plane = dcn20_disable_plane,
+ .disable_pixel_data = dcn20_disable_pixel_data,
.pipe_control_lock = dcn20_pipe_control_lock,
.interdependent_update_lock = dcn10_lock_all_pipes,
.cursor_lock = dcn10_cursor_lock,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_mmhubbub.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_mmhubbub.c
index 1c4b171c68ad..7a93eff183d9 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_mmhubbub.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_mmhubbub.c
@@ -100,7 +100,7 @@ static void mmhubbub3_warmup_mcif(struct mcif_wb *mcif_wb,
REG_UPDATE(MMHUBBUB_WARMUP_CONTROL_STATUS, MMHUBBUB_WARMUP_EN, false);
}
-void mmhubbub3_config_mcif_buf(struct mcif_wb *mcif_wb,
+static void mmhubbub3_config_mcif_buf(struct mcif_wb *mcif_wb,
struct mcif_buf_params *params,
unsigned int dest_height)
{
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_mpc.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_mpc.c
index 95149734378b..0ce0d6165f43 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_mpc.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_mpc.c
@@ -1362,7 +1362,7 @@ uint32_t mpcc3_acquire_rmu(struct mpc *mpc, int mpcc_id, int rmu_idx)
return -1;
}
-int mpcc3_release_rmu(struct mpc *mpc, int mpcc_id)
+static int mpcc3_release_rmu(struct mpc *mpc, int mpcc_id)
{
struct dcn30_mpc *mpc30 = TO_DCN30_MPC(mpc);
int rmu_idx;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_optc.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_optc.c
index 5d9e6413d67a..f5e8916601d3 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_optc.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_optc.c
@@ -332,6 +332,7 @@ static struct timing_generator_funcs dcn30_tg_funcs = {
.get_crc = optc1_get_crc,
.configure_crc = optc2_configure_crc,
.set_dsc_config = optc3_set_dsc_config,
+ .get_dsc_status = optc2_get_dsc_status,
.set_dwb_source = NULL,
.set_odm_bypass = optc3_set_odm_bypass,
.set_odm_combine = optc3_set_odm_combine,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c
index 79a66e0c4303..602ec9a08549 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c
@@ -816,7 +816,7 @@ static const struct dc_plane_cap plane_cap = {
.argb8888 = true,
.nv12 = true,
.fp16 = true,
- .p010 = false,
+ .p010 = true,
.ayuv = false,
},
@@ -840,7 +840,7 @@ static const struct dc_debug_options debug_defaults_drv = {
.timing_trace = false,
.clock_trace = true,
.disable_pplib_clock_request = true,
- .pipe_split_policy = MPC_SPLIT_AVOID_MULT_DISP,
+ .pipe_split_policy = MPC_SPLIT_DYNAMIC,
.force_single_disp_pipe_split = false,
.disable_dcc = DCC_ENABLE,
.vsr_support = true,
@@ -875,7 +875,7 @@ static const struct dc_debug_options debug_defaults_diags = {
.use_max_lb = true
};
-void dcn30_dpp_destroy(struct dpp **dpp)
+static void dcn30_dpp_destroy(struct dpp **dpp)
{
kfree(TO_DCN20_DPP(*dpp));
*dpp = NULL;
@@ -992,7 +992,7 @@ static struct mpc *dcn30_mpc_create(
return &mpc30->base;
}
-struct hubbub *dcn30_hubbub_create(struct dc_context *ctx)
+static struct hubbub *dcn30_hubbub_create(struct dc_context *ctx)
{
int i;
@@ -1143,9 +1143,8 @@ static struct afmt *dcn30_afmt_create(
return &afmt3->base;
}
-struct stream_encoder *dcn30_stream_encoder_create(
- enum engine_id eng_id,
- struct dc_context *ctx)
+static struct stream_encoder *dcn30_stream_encoder_create(enum engine_id eng_id,
+ struct dc_context *ctx)
{
struct dcn10_stream_encoder *enc1;
struct vpg *vpg;
@@ -1179,8 +1178,7 @@ struct stream_encoder *dcn30_stream_encoder_create(
return &enc1->base;
}
-struct dce_hwseq *dcn30_hwseq_create(
- struct dc_context *ctx)
+static struct dce_hwseq *dcn30_hwseq_create(struct dc_context *ctx)
{
struct dce_hwseq *hws = kzalloc(sizeof(struct dce_hwseq), GFP_KERNEL);
@@ -2639,6 +2637,8 @@ static bool dcn30_resource_construct(
dc->caps.color.mpc.ogam_rom_caps.hlg = 0;
dc->caps.color.mpc.ocsc = 1;
+ dc->caps.hdmi_frl_pcon_support = true;
+
/* read VBIOS LTTPR caps */
{
if (ctx->dc_bios->funcs->get_lttpr_caps) {
diff --git a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_init.c b/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_init.c
index e85b695f2351..3d42a1a337ec 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_init.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_init.c
@@ -30,6 +30,8 @@
#include "dcn30/dcn30_hwseq.h"
#include "dcn301_hwseq.h"
+#include "dcn301_init.h"
+
static const struct hw_sequencer_funcs dcn301_funcs = {
.program_gamut_remap = dcn10_program_gamut_remap,
.init_hw = dcn10_init_hw,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_panel_cntl.c b/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_panel_cntl.c
index 736bda30abc3..ad0df1a72a90 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_panel_cntl.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_panel_cntl.c
@@ -93,7 +93,7 @@ static unsigned int dcn301_get_16_bit_backlight_from_pwm(struct panel_cntl *pane
return (uint32_t)(current_backlight);
}
-uint32_t dcn301_panel_cntl_hw_init(struct panel_cntl *panel_cntl)
+static uint32_t dcn301_panel_cntl_hw_init(struct panel_cntl *panel_cntl)
{
struct dcn301_panel_cntl *dcn301_panel_cntl = TO_DCN301_PANEL_CNTL(panel_cntl);
uint32_t value;
@@ -147,7 +147,7 @@ uint32_t dcn301_panel_cntl_hw_init(struct panel_cntl *panel_cntl)
return current_backlight;
}
-void dcn301_panel_cntl_destroy(struct panel_cntl **panel_cntl)
+static void dcn301_panel_cntl_destroy(struct panel_cntl **panel_cntl)
{
struct dcn301_panel_cntl *dcn301_panel_cntl = TO_DCN301_PANEL_CNTL(*panel_cntl);
@@ -155,7 +155,7 @@ void dcn301_panel_cntl_destroy(struct panel_cntl **panel_cntl)
*panel_cntl = NULL;
}
-bool dcn301_is_panel_backlight_on(struct panel_cntl *panel_cntl)
+static bool dcn301_is_panel_backlight_on(struct panel_cntl *panel_cntl)
{
struct dcn301_panel_cntl *dcn301_panel_cntl = TO_DCN301_PANEL_CNTL(panel_cntl);
uint32_t value;
@@ -165,7 +165,7 @@ bool dcn301_is_panel_backlight_on(struct panel_cntl *panel_cntl)
return value;
}
-bool dcn301_is_panel_powered_on(struct panel_cntl *panel_cntl)
+static bool dcn301_is_panel_powered_on(struct panel_cntl *panel_cntl)
{
struct dcn301_panel_cntl *dcn301_panel_cntl = TO_DCN301_PANEL_CNTL(panel_cntl);
uint32_t pwr_seq_state, dig_on, dig_on_ovrd;
@@ -177,7 +177,7 @@ bool dcn301_is_panel_powered_on(struct panel_cntl *panel_cntl)
return (pwr_seq_state == 1) || (dig_on == 1 && dig_on_ovrd == 1);
}
-void dcn301_store_backlight_level(struct panel_cntl *panel_cntl)
+static void dcn301_store_backlight_level(struct panel_cntl *panel_cntl)
{
struct dcn301_panel_cntl *dcn301_panel_cntl = TO_DCN301_PANEL_CNTL(panel_cntl);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c b/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c
index fbaa03f26d8b..c1c6e602b06c 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c
@@ -656,7 +656,7 @@ static const struct dc_plane_cap plane_cap = {
.argb8888 = true,
.nv12 = true,
.fp16 = true,
- .p010 = false,
+ .p010 = true,
.ayuv = false,
},
@@ -686,7 +686,7 @@ static const struct dc_debug_options debug_defaults_drv = {
.disable_clock_gate = true,
.disable_pplib_clock_request = true,
.disable_pplib_wm_range = true,
- .pipe_split_policy = MPC_SPLIT_AVOID_MULT_DISP,
+ .pipe_split_policy = MPC_SPLIT_DYNAMIC,
.force_single_disp_pipe_split = false,
.disable_dcc = DCC_ENABLE,
.vsr_support = true,
@@ -717,15 +717,13 @@ static const struct dc_debug_options debug_defaults_diags = {
.use_max_lb = false,
};
-void dcn301_dpp_destroy(struct dpp **dpp)
+static void dcn301_dpp_destroy(struct dpp **dpp)
{
kfree(TO_DCN20_DPP(*dpp));
*dpp = NULL;
}
-struct dpp *dcn301_dpp_create(
- struct dc_context *ctx,
- uint32_t inst)
+static struct dpp *dcn301_dpp_create(struct dc_context *ctx, uint32_t inst)
{
struct dcn3_dpp *dpp =
kzalloc(sizeof(struct dcn3_dpp), GFP_KERNEL);
@@ -741,8 +739,8 @@ struct dpp *dcn301_dpp_create(
kfree(dpp);
return NULL;
}
-struct output_pixel_processor *dcn301_opp_create(
- struct dc_context *ctx, uint32_t inst)
+static struct output_pixel_processor *dcn301_opp_create(struct dc_context *ctx,
+ uint32_t inst)
{
struct dcn20_opp *opp =
kzalloc(sizeof(struct dcn20_opp), GFP_KERNEL);
@@ -757,9 +755,7 @@ struct output_pixel_processor *dcn301_opp_create(
return &opp->base;
}
-struct dce_aux *dcn301_aux_engine_create(
- struct dc_context *ctx,
- uint32_t inst)
+static struct dce_aux *dcn301_aux_engine_create(struct dc_context *ctx, uint32_t inst)
{
struct aux_engine_dce110 *aux_engine =
kzalloc(sizeof(struct aux_engine_dce110), GFP_KERNEL);
@@ -793,9 +789,7 @@ static const struct dce_i2c_mask i2c_masks = {
I2C_COMMON_MASK_SH_LIST_DCN2(_MASK)
};
-struct dce_i2c_hw *dcn301_i2c_hw_create(
- struct dc_context *ctx,
- uint32_t inst)
+static struct dce_i2c_hw *dcn301_i2c_hw_create(struct dc_context *ctx, uint32_t inst)
{
struct dce_i2c_hw *dce_i2c_hw =
kzalloc(sizeof(struct dce_i2c_hw), GFP_KERNEL);
@@ -829,7 +823,7 @@ static struct mpc *dcn301_mpc_create(
return &mpc30->base;
}
-struct hubbub *dcn301_hubbub_create(struct dc_context *ctx)
+static struct hubbub *dcn301_hubbub_create(struct dc_context *ctx)
{
int i;
@@ -860,9 +854,8 @@ struct hubbub *dcn301_hubbub_create(struct dc_context *ctx)
return &hubbub3->base;
}
-struct timing_generator *dcn301_timing_generator_create(
- struct dc_context *ctx,
- uint32_t instance)
+static struct timing_generator *dcn301_timing_generator_create(
+ struct dc_context *ctx, uint32_t instance)
{
struct optc *tgn10 =
kzalloc(sizeof(struct optc), GFP_KERNEL);
@@ -894,7 +887,7 @@ static const struct encoder_feature_support link_enc_feature = {
.flags.bits.IS_TPS4_CAPABLE = true
};
-struct link_encoder *dcn301_link_encoder_create(
+static struct link_encoder *dcn301_link_encoder_create(
const struct encoder_init_data *enc_init_data)
{
struct dcn20_link_encoder *enc20 =
@@ -915,7 +908,7 @@ struct link_encoder *dcn301_link_encoder_create(
return &enc20->enc10.base;
}
-struct panel_cntl *dcn301_panel_cntl_create(const struct panel_cntl_init_data *init_data)
+static struct panel_cntl *dcn301_panel_cntl_create(const struct panel_cntl_init_data *init_data)
{
struct dcn301_panel_cntl *panel_cntl =
kzalloc(sizeof(struct dcn301_panel_cntl), GFP_KERNEL);
@@ -997,9 +990,8 @@ static struct afmt *dcn301_afmt_create(
return &afmt3->base;
}
-struct stream_encoder *dcn301_stream_encoder_create(
- enum engine_id eng_id,
- struct dc_context *ctx)
+static struct stream_encoder *dcn301_stream_encoder_create(enum engine_id eng_id,
+ struct dc_context *ctx)
{
struct dcn10_stream_encoder *enc1;
struct vpg *vpg;
@@ -1033,8 +1025,7 @@ struct stream_encoder *dcn301_stream_encoder_create(
return &enc1->base;
}
-struct dce_hwseq *dcn301_hwseq_create(
- struct dc_context *ctx)
+static struct dce_hwseq *dcn301_hwseq_create(struct dc_context *ctx)
{
struct dce_hwseq *hws = kzalloc(sizeof(struct dce_hwseq), GFP_KERNEL);
@@ -1182,9 +1173,7 @@ static void dcn301_destruct(struct dcn301_resource_pool *pool)
dcn_dccg_destroy(&pool->base.dccg);
}
-struct hubp *dcn301_hubp_create(
- struct dc_context *ctx,
- uint32_t inst)
+static struct hubp *dcn301_hubp_create(struct dc_context *ctx, uint32_t inst)
{
struct dcn20_hubp *hubp2 =
kzalloc(sizeof(struct dcn20_hubp), GFP_KERNEL);
@@ -1201,7 +1190,7 @@ struct hubp *dcn301_hubp_create(
return NULL;
}
-bool dcn301_dwbc_create(struct dc_context *ctx, struct resource_pool *pool)
+static bool dcn301_dwbc_create(struct dc_context *ctx, struct resource_pool *pool)
{
int i;
uint32_t pipe_count = pool->res_cap->num_dwb;
@@ -1226,7 +1215,7 @@ bool dcn301_dwbc_create(struct dc_context *ctx, struct resource_pool *pool)
return true;
}
-bool dcn301_mmhubbub_create(struct dc_context *ctx, struct resource_pool *pool)
+static bool dcn301_mmhubbub_create(struct dc_context *ctx, struct resource_pool *pool)
{
int i;
uint32_t pipe_count = pool->res_cap->num_dwb;
@@ -1449,9 +1438,7 @@ static bool dcn301_resource_construct(
dc->caps.post_blend_color_processing = true;
dc->caps.force_dp_tps4_for_cp2520 = true;
dc->caps.extended_aux_timeout_support = true;
-#ifdef CONFIG_DRM_AMD_DC_DMUB
dc->caps.dmcub_support = true;
-#endif
/* Color pipeline capabilities */
dc->caps.color.dpp.dcn_arch = 1;
@@ -1487,6 +1474,23 @@ static bool dcn301_resource_construct(
dc->caps.color.mpc.ogam_rom_caps.hlg = 0;
dc->caps.color.mpc.ocsc = 1;
+ /* read VBIOS LTTPR caps */
+ if (ctx->dc_bios->funcs->get_lttpr_caps) {
+ enum bp_result bp_query_result;
+ uint8_t is_vbios_lttpr_enable = 0;
+
+ bp_query_result = ctx->dc_bios->funcs->get_lttpr_caps(ctx->dc_bios, &is_vbios_lttpr_enable);
+ dc->caps.vbios_lttpr_enable = (bp_query_result == BP_RESULT_OK) && !!is_vbios_lttpr_enable;
+ }
+
+ if (ctx->dc_bios->funcs->get_lttpr_interop) {
+ enum bp_result bp_query_result;
+ uint8_t is_vbios_interop_enabled = 0;
+
+ bp_query_result = ctx->dc_bios->funcs->get_lttpr_interop(ctx->dc_bios, &is_vbios_interop_enabled);
+ dc->caps.vbios_lttpr_aware = (bp_query_result == BP_RESULT_OK) && !!is_vbios_interop_enabled;
+ }
+
if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV)
dc->debug = debug_defaults_drv;
else if (dc->ctx->dce_environment == DCE_ENV_FPGA_MAXIMUS) {
diff --git a/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_init.c b/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_init.c
index d88b9011c502..eb375f30f5bc 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_init.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_init.c
@@ -29,6 +29,8 @@
#include "dc.h"
+#include "dcn302_init.h"
+
void dcn302_hw_sequencer_construct(struct dc *dc)
{
dcn30_hw_sequencer_construct(dc);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.c b/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.c
index fcf96cf08c76..2e9cbfa7663b 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.c
@@ -211,7 +211,7 @@ static const struct dc_debug_options debug_defaults_drv = {
.timing_trace = false,
.clock_trace = true,
.disable_pplib_clock_request = true,
- .pipe_split_policy = MPC_SPLIT_AVOID_MULT_DISP,
+ .pipe_split_policy = MPC_SPLIT_DYNAMIC,
.force_single_disp_pipe_split = false,
.disable_dcc = DCC_ENABLE,
.vsr_support = true,
@@ -276,7 +276,7 @@ static const struct dc_plane_cap plane_cap = {
.argb8888 = true,
.nv12 = true,
.fp16 = true,
- .p010 = false,
+ .p010 = true,
.ayuv = false,
},
.max_upscale_factor = {
@@ -1557,6 +1557,24 @@ static bool dcn302_resource_construct(
dc->caps.color.mpc.ogam_rom_caps.hlg = 0;
dc->caps.color.mpc.ocsc = 1;
+ /* read VBIOS LTTPR caps */
+ if (ctx->dc_bios->funcs->get_lttpr_caps) {
+ enum bp_result bp_query_result;
+ uint8_t is_vbios_lttpr_enable = 0;
+
+ bp_query_result = ctx->dc_bios->funcs->get_lttpr_caps(ctx->dc_bios, &is_vbios_lttpr_enable);
+ dc->caps.vbios_lttpr_enable = (bp_query_result == BP_RESULT_OK) && !!is_vbios_lttpr_enable;
+ }
+
+ if (ctx->dc_bios->funcs->get_lttpr_interop) {
+ enum bp_result bp_query_result;
+ uint8_t is_vbios_interop_enabled = 0;
+
+ bp_query_result = ctx->dc_bios->funcs->get_lttpr_interop(ctx->dc_bios,
+ &is_vbios_interop_enabled);
+ dc->caps.vbios_lttpr_aware = (bp_query_result == BP_RESULT_OK) && !!is_vbios_interop_enabled;
+ }
+
if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV)
dc->debug = debug_defaults_drv;
else
diff --git a/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_dccg.h b/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_dccg.h
index a79c54bbc899..294bd757bcb5 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_dccg.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_dccg.h
@@ -15,7 +15,11 @@
SR(DPPCLK_DTO_CTRL),\
DCCG_SRII(DTO_PARAM, DPPCLK, 0),\
DCCG_SRII(DTO_PARAM, DPPCLK, 1),\
- SR(REFCLK_CNTL)
+ SR(REFCLK_CNTL),\
+ SR(DISPCLK_FREQ_CHANGE_CNTL),\
+ DCCG_SRII(PIXEL_RATE_CNTL, OTG, 0),\
+ DCCG_SRII(PIXEL_RATE_CNTL, OTG, 1)
+
#define DCCG_MASK_SH_LIST_DCN3_03(mask_sh) \
DCCG_SFI(DPPCLK_DTO_CTRL, DTO_ENABLE, DPPCLK, 0, mask_sh),\
@@ -25,6 +29,18 @@
DCCG_SF(DPPCLK0_DTO_PARAM, DPPCLK0_DTO_PHASE, mask_sh),\
DCCG_SF(DPPCLK0_DTO_PARAM, DPPCLK0_DTO_MODULO, mask_sh),\
DCCG_SF(REFCLK_CNTL, REFCLK_CLOCK_EN, mask_sh),\
- DCCG_SF(REFCLK_CNTL, REFCLK_SRC_SEL, mask_sh)
+ DCCG_SF(REFCLK_CNTL, REFCLK_SRC_SEL, mask_sh),\
+ DCCG_SF(DISPCLK_FREQ_CHANGE_CNTL, DISPCLK_STEP_DELAY, mask_sh),\
+ DCCG_SF(DISPCLK_FREQ_CHANGE_CNTL, DISPCLK_STEP_SIZE, mask_sh),\
+ DCCG_SF(DISPCLK_FREQ_CHANGE_CNTL, DISPCLK_FREQ_RAMP_DONE, mask_sh),\
+ DCCG_SF(DISPCLK_FREQ_CHANGE_CNTL, DISPCLK_MAX_ERRDET_CYCLES, mask_sh),\
+ DCCG_SF(DISPCLK_FREQ_CHANGE_CNTL, DCCG_FIFO_ERRDET_RESET, mask_sh),\
+ DCCG_SF(DISPCLK_FREQ_CHANGE_CNTL, DCCG_FIFO_ERRDET_STATE, mask_sh),\
+ DCCG_SF(DISPCLK_FREQ_CHANGE_CNTL, DCCG_FIFO_ERRDET_OVR_EN, mask_sh),\
+ DCCG_SF(DISPCLK_FREQ_CHANGE_CNTL, DISPCLK_CHG_FWD_CORR_DISABLE, mask_sh),\
+ DCCG_SFII(OTG, PIXEL_RATE_CNTL, OTG, ADD_PIXEL, 0, mask_sh),\
+ DCCG_SFII(OTG, PIXEL_RATE_CNTL, OTG, ADD_PIXEL, 1, mask_sh),\
+ DCCG_SFII(OTG, PIXEL_RATE_CNTL, OTG, DROP_PIXEL, 0, mask_sh),\
+ DCCG_SFII(OTG, PIXEL_RATE_CNTL, OTG, DROP_PIXEL, 1, mask_sh)
#endif //__DCN303_DCCG_H__
diff --git a/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_init.c b/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_init.c
index aa5dbbade2bd..f499f8ab5e47 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_init.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_init.c
@@ -9,6 +9,8 @@
#include "dcn30/dcn30_init.h"
#include "dc.h"
+#include "dcn303_init.h"
+
void dcn303_hw_sequencer_construct(struct dc *dc)
{
dcn30_hw_sequencer_construct(dc);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.c b/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.c
index 4a9b64023675..2de687f64cf6 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.c
@@ -193,7 +193,7 @@ static const struct dc_debug_options debug_defaults_drv = {
.timing_trace = false,
.clock_trace = true,
.disable_pplib_clock_request = true,
- .pipe_split_policy = MPC_SPLIT_AVOID_MULT_DISP,
+ .pipe_split_policy = MPC_SPLIT_DYNAMIC,
.force_single_disp_pipe_split = false,
.disable_dcc = DCC_ENABLE,
.vsr_support = true,
@@ -254,7 +254,7 @@ static const struct dc_plane_cap plane_cap = {
.argb8888 = true,
.nv12 = true,
.fp16 = true,
- .p010 = false,
+ .p010 = true,
.ayuv = false,
},
.max_upscale_factor = {
@@ -1500,6 +1500,23 @@ static bool dcn303_resource_construct(
dc->caps.color.mpc.ogam_rom_caps.hlg = 0;
dc->caps.color.mpc.ocsc = 1;
+ /* read VBIOS LTTPR caps */
+ if (ctx->dc_bios->funcs->get_lttpr_caps) {
+ enum bp_result bp_query_result;
+ uint8_t is_vbios_lttpr_enable = 0;
+
+ bp_query_result = ctx->dc_bios->funcs->get_lttpr_caps(ctx->dc_bios, &is_vbios_lttpr_enable);
+ dc->caps.vbios_lttpr_enable = (bp_query_result == BP_RESULT_OK) && !!is_vbios_lttpr_enable;
+ }
+
+ if (ctx->dc_bios->funcs->get_lttpr_interop) {
+ enum bp_result bp_query_result;
+ uint8_t is_vbios_interop_enabled = 0;
+
+ bp_query_result = ctx->dc_bios->funcs->get_lttpr_interop(ctx->dc_bios, &is_vbios_interop_enabled);
+ dc->caps.vbios_lttpr_aware = (bp_query_result == BP_RESULT_OK) && !!is_vbios_interop_enabled;
+ }
+
if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV)
dc->debug = debug_defaults_drv;
else
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dccg.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dccg.c
index 815481a3ef54..ea4f8e06b07c 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dccg.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dccg.c
@@ -462,7 +462,7 @@ void dccg31_set_physymclk(
}
/* Controls the generation of pixel valid for OTG in (OTG -> HPO case) */
-void dccg31_set_dtbclk_dto(
+static void dccg31_set_dtbclk_dto(
struct dccg *dccg,
int dtbclk_inst,
int req_dtbclk_khz,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dio_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dio_link_encoder.c
index ee6f13bef377..71c359f9cdd2 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dio_link_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dio_link_encoder.c
@@ -67,6 +67,39 @@
#define MIN(X, Y) ((X) < (Y) ? (X) : (Y))
#endif
+static uint8_t phy_id_from_transmitter(enum transmitter t)
+{
+ uint8_t phy_id;
+
+ switch (t) {
+ case TRANSMITTER_UNIPHY_A:
+ phy_id = 0;
+ break;
+ case TRANSMITTER_UNIPHY_B:
+ phy_id = 1;
+ break;
+ case TRANSMITTER_UNIPHY_C:
+ phy_id = 2;
+ break;
+ case TRANSMITTER_UNIPHY_D:
+ phy_id = 3;
+ break;
+ case TRANSMITTER_UNIPHY_E:
+ phy_id = 4;
+ break;
+ case TRANSMITTER_UNIPHY_F:
+ phy_id = 5;
+ break;
+ case TRANSMITTER_UNIPHY_G:
+ phy_id = 6;
+ break;
+ default:
+ phy_id = 0;
+ break;
+ }
+ return phy_id;
+}
+
void dcn31_link_encoder_set_dio_phy_mux(
struct link_encoder *enc,
enum encoder_type_select sel,
@@ -141,7 +174,7 @@ void dcn31_link_encoder_set_dio_phy_mux(
}
}
-void enc31_hw_init(struct link_encoder *enc)
+static void enc31_hw_init(struct link_encoder *enc)
{
struct dcn10_link_encoder *enc10 = TO_DCN10_LINK_ENC(enc);
@@ -536,57 +569,45 @@ void dcn31_link_encoder_disable_output(
bool dcn31_link_encoder_is_in_alt_mode(struct link_encoder *enc)
{
struct dcn10_link_encoder *enc10 = TO_DCN10_LINK_ENC(enc);
- uint32_t dp_alt_mode_disable;
+ struct dc_dmub_srv *dc_dmub_srv = enc->ctx->dmub_srv;
+ union dmub_rb_cmd cmd;
bool is_usb_c_alt_mode = false;
- if (enc->features.flags.bits.DP_IS_USB_C) {
- if (enc->ctx->asic_id.hw_internal_rev != YELLOW_CARP_B0) {
- // [Note] no need to check hw_internal_rev once phy mux selection is ready
- REG_GET(RDPCSTX_PHY_CNTL6, RDPCS_PHY_DPALT_DISABLE, &dp_alt_mode_disable);
- } else {
- /*
- * B0 phys use a new set of registers to check whether alt mode is disabled.
- * if value == 1 alt mode is disabled, otherwise it is enabled.
- */
- if ((enc10->base.transmitter == TRANSMITTER_UNIPHY_A)
- || (enc10->base.transmitter == TRANSMITTER_UNIPHY_B)
- || (enc10->base.transmitter == TRANSMITTER_UNIPHY_E)) {
- REG_GET(RDPCSTX_PHY_CNTL6, RDPCS_PHY_DPALT_DISABLE, &dp_alt_mode_disable);
- } else {
- // [Note] need to change TRANSMITTER_UNIPHY_C/D to F/G once phy mux selection is ready
- REG_GET(RDPCSPIPE_PHY_CNTL6, RDPCS_PHY_DPALT_DISABLE, &dp_alt_mode_disable);
- }
- }
+ if (enc->features.flags.bits.DP_IS_USB_C && dc_dmub_srv) {
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.query_dp_alt.header.type = DMUB_CMD__VBIOS;
+ cmd.query_dp_alt.header.sub_type = DMUB_CMD__VBIOS_TRANSMITTER_QUERY_DP_ALT;
+ cmd.query_dp_alt.header.payload_bytes = sizeof(cmd.panel_cntl.data);
+ cmd.query_dp_alt.data.phy_id = phy_id_from_transmitter(enc10->base.transmitter);
- is_usb_c_alt_mode = (dp_alt_mode_disable == 0);
+ if (!dc_dmub_srv_cmd_with_reply_data(dc_dmub_srv, &cmd))
+ return false;
+
+ is_usb_c_alt_mode = (cmd.query_dp_alt.data.is_dp_alt_disable == 0);
}
return is_usb_c_alt_mode;
}
-void dcn31_link_encoder_get_max_link_cap(struct link_encoder *enc,
- struct dc_link_settings *link_settings)
+void dcn31_link_encoder_get_max_link_cap(struct link_encoder *enc, struct dc_link_settings *link_settings)
{
struct dcn10_link_encoder *enc10 = TO_DCN10_LINK_ENC(enc);
- uint32_t is_in_usb_c_dp4_mode = 0;
+ struct dc_dmub_srv *dc_dmub_srv = enc->ctx->dmub_srv;
+ union dmub_rb_cmd cmd;
dcn10_link_encoder_get_max_link_cap(enc, link_settings);
- /* in usb c dp2 mode, max lane count is 2 */
- if (enc->funcs->is_in_alt_mode && enc->funcs->is_in_alt_mode(enc)) {
- if (enc->ctx->asic_id.hw_internal_rev != YELLOW_CARP_B0) {
- // [Note] no need to check hw_internal_rev once phy mux selection is ready
- REG_GET(RDPCSTX_PHY_CNTL6, RDPCS_PHY_DPALT_DP4, &is_in_usb_c_dp4_mode);
- } else {
- if ((enc10->base.transmitter == TRANSMITTER_UNIPHY_A)
- || (enc10->base.transmitter == TRANSMITTER_UNIPHY_B)
- || (enc10->base.transmitter == TRANSMITTER_UNIPHY_E)) {
- REG_GET(RDPCSTX_PHY_CNTL6, RDPCS_PHY_DPALT_DP4, &is_in_usb_c_dp4_mode);
- } else {
- REG_GET(RDPCSPIPE_PHY_CNTL6, RDPCS_PHY_DPALT_DP4, &is_in_usb_c_dp4_mode);
- }
- }
- if (!is_in_usb_c_dp4_mode)
+ if (enc->features.flags.bits.DP_IS_USB_C && dc_dmub_srv) {
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.query_dp_alt.header.type = DMUB_CMD__VBIOS;
+ cmd.query_dp_alt.header.sub_type = DMUB_CMD__VBIOS_TRANSMITTER_QUERY_DP_ALT;
+ cmd.query_dp_alt.header.payload_bytes = sizeof(cmd.panel_cntl.data);
+ cmd.query_dp_alt.data.phy_id = phy_id_from_transmitter(enc10->base.transmitter);
+
+ if (!dc_dmub_srv_cmd_with_reply_data(dc_dmub_srv, &cmd))
+ return;
+
+ if (cmd.query_dp_alt.data.is_usb && cmd.query_dp_alt.data.is_dp4 == 0)
link_settings->lane_count = MIN(LANE_COUNT_TWO, link_settings->lane_count);
}
}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_link_encoder.c
index 6c08e21bb708..80dfaa4d4d81 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_link_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_link_encoder.c
@@ -499,7 +499,8 @@ static enum bp_result link_transmitter_control(
void dcn31_hpo_dp_link_enc_enable_dp_output(
struct hpo_dp_link_encoder *enc,
const struct dc_link_settings *link_settings,
- enum transmitter transmitter)
+ enum transmitter transmitter,
+ enum hpd_source_id hpd_source)
{
struct dcn31_hpo_dp_link_encoder *enc3 = DCN3_1_HPO_DP_LINK_ENC_FROM_HPO_LINK_ENC(enc);
struct bp_transmitter_control cntl = { 0 };
@@ -508,6 +509,9 @@ void dcn31_hpo_dp_link_enc_enable_dp_output(
/* Set the transmitter */
enc3->base.transmitter = transmitter;
+ /* Set the hpd source */
+ enc3->base.hpd_source = hpd_source;
+
/* Enable the PHY */
cntl.action = TRANSMITTER_CONTROL_ENABLE;
cntl.engine_id = ENGINE_ID_UNKNOWN;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_link_encoder.h b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_link_encoder.h
index 0706ccaf6fec..e324e9b83136 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_link_encoder.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_link_encoder.h
@@ -184,7 +184,8 @@ void hpo_dp_link_encoder31_construct(struct dcn31_hpo_dp_link_encoder *enc31,
void dcn31_hpo_dp_link_enc_enable_dp_output(
struct hpo_dp_link_encoder *enc,
const struct dc_link_settings *link_settings,
- enum transmitter transmitter);
+ enum transmitter transmitter,
+ enum hpd_source_id hpd_source);
void dcn31_hpo_dp_link_enc_disable_output(
struct hpo_dp_link_encoder *enc,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_stream_encoder.c
index 565f12dd179a..5065904c7833 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_stream_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_stream_encoder.c
@@ -358,8 +358,8 @@ static void dcn31_hpo_dp_stream_enc_set_stream_attribute(
h_width = hw_crtc_timing.h_border_left + hw_crtc_timing.h_addressable + hw_crtc_timing.h_border_right;
v_height = hw_crtc_timing.v_border_top + hw_crtc_timing.v_addressable + hw_crtc_timing.v_border_bottom;
- hsp = hw_crtc_timing.flags.HSYNC_POSITIVE_POLARITY ? 0x80 : 0;
- vsp = hw_crtc_timing.flags.VSYNC_POSITIVE_POLARITY ? 0x80 : 0;
+ hsp = hw_crtc_timing.flags.HSYNC_POSITIVE_POLARITY ? 0 : 0x80;
+ vsp = hw_crtc_timing.flags.VSYNC_POSITIVE_POLARITY ? 0 : 0x80;
v_freq = hw_crtc_timing.pix_clk_100hz * 100;
/* MSA Packet Mapping to 32-bit Link Symbols - DP2 spec, section 2.7.4.1
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.c
index 5dd1ce9ddb53..4206ce5bf9a9 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.c
@@ -112,7 +112,7 @@ void dcn31_init_hw(struct dc *dc)
struct dc_bios *dcb = dc->ctx->dc_bios;
struct resource_pool *res_pool = dc->res_pool;
uint32_t backlight = MAX_BACKLIGHT_LEVEL;
- int i, j;
+ int i;
if (dc->clk_mgr && dc->clk_mgr->funcs->init_clocks)
dc->clk_mgr->funcs->init_clocks(dc->clk_mgr);
@@ -192,50 +192,13 @@ void dcn31_init_hw(struct dc *dc)
link->link_status.link_active = true;
}
- /* Power gate DSCs */
- for (i = 0; i < res_pool->res_cap->num_dsc; i++)
- if (hws->funcs.dsc_pg_control != NULL)
- hws->funcs.dsc_pg_control(hws, res_pool->dscs[i]->inst, false);
-
/* Enables outbox notifications for usb4 dpia */
if (dc->res_pool->usb4_dpia_count)
dmub_enable_outbox_notification(dc);
/* we want to turn off all dp displays before doing detection */
- if (dc->config.power_down_display_on_boot) {
- uint8_t dpcd_power_state = '\0';
- enum dc_status status = DC_ERROR_UNEXPECTED;
-
- for (i = 0; i < dc->link_count; i++) {
- if (dc->links[i]->connector_signal != SIGNAL_TYPE_DISPLAY_PORT)
- continue;
-
- /* if any of the displays are lit up turn them off */
- status = core_link_read_dpcd(dc->links[i], DP_SET_POWER,
- &dpcd_power_state, sizeof(dpcd_power_state));
- if (status == DC_OK && dpcd_power_state == DP_POWER_STATE_D0) {
- /* blank dp stream before power off receiver*/
- if (dc->links[i]->ep_type == DISPLAY_ENDPOINT_PHY &&
- dc->links[i]->link_enc->funcs->get_dig_frontend) {
- unsigned int fe;
-
- fe = dc->links[i]->link_enc->funcs->get_dig_frontend(
- dc->links[i]->link_enc);
- if (fe == ENGINE_ID_UNKNOWN)
- continue;
-
- for (j = 0; j < dc->res_pool->stream_enc_count; j++) {
- if (fe == dc->res_pool->stream_enc[j]->id) {
- dc->res_pool->stream_enc[j]->funcs->dp_blank(dc->links[i],
- dc->res_pool->stream_enc[j]);
- break;
- }
- }
- }
- dp_receiver_power_ctrl(dc->links[i], false);
- }
- }
- }
+ if (dc->config.power_down_display_on_boot)
+ dc_link_blank_all_dp_displays(dc);
/* If taking control over from VBIOS, we may want to optimize our first
* mode set, so we need to skip powering down pipes until we know which
@@ -602,7 +565,7 @@ void dcn31_reset_hw_ctx_wrap(
dcn31_reset_back_end_for_pipe(dc, pipe_ctx_old, dc->current_state);
if (hws->funcs.enable_stream_gating)
- hws->funcs.enable_stream_gating(dc, pipe_ctx);
+ hws->funcs.enable_stream_gating(dc, pipe_ctx_old);
if (old_clk)
old_clk->funcs->cs_power_down(old_clk);
}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_init.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_init.c
index 05335a8c3c2d..d7559e5a99ce 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_init.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_init.c
@@ -31,6 +31,8 @@
#include "dcn301/dcn301_hwseq.h"
#include "dcn31/dcn31_hwseq.h"
+#include "dcn31_init.h"
+
static const struct hw_sequencer_funcs dcn31_funcs = {
.program_gamut_remap = dcn10_program_gamut_remap,
.init_hw = dcn31_init_hw,
@@ -101,6 +103,8 @@ static const struct hw_sequencer_funcs dcn31_funcs = {
.z10_restore = dcn31_z10_restore,
.z10_save_init = dcn31_z10_save_init,
.set_disp_pattern_generator = dcn30_set_disp_pattern_generator,
+ .optimize_pwr_state = dcn21_optimize_pwr_state,
+ .exit_optimized_pwr_state = dcn21_exit_optimized_pwr_state,
.update_visual_confirm_color = dcn20_update_visual_confirm_color,
};
@@ -149,4 +153,9 @@ void dcn31_hw_sequencer_construct(struct dc *dc)
dc->hwss.init_hw = dcn20_fpga_init_hw;
dc->hwseq->funcs.init_pipes = NULL;
}
+ if (dc->debug.disable_z10) {
+ /*hw not support z10 or sw disable it*/
+ dc->hwss.z10_restore = NULL;
+ dc->hwss.z10_save_init = NULL;
+ }
}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_optc.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_optc.c
index a4b1d98f0007..e8562fa11366 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_optc.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_optc.c
@@ -256,6 +256,7 @@ static struct timing_generator_funcs dcn31_tg_funcs = {
.get_crc = optc1_get_crc,
.configure_crc = optc2_configure_crc,
.set_dsc_config = optc3_set_dsc_config,
+ .get_dsc_status = optc2_get_dsc_status,
.set_dwb_source = NULL,
.set_odm_bypass = optc3_set_odm_bypass,
.set_odm_combine = optc31_set_odm_combine,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_panel_cntl.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_panel_cntl.c
index 3b3721386571..83ece02380a8 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_panel_cntl.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_panel_cntl.c
@@ -65,7 +65,7 @@ static uint32_t dcn31_get_16_bit_backlight_from_pwm(struct panel_cntl *panel_cnt
return cmd.panel_cntl.data.current_backlight;
}
-uint32_t dcn31_panel_cntl_hw_init(struct panel_cntl *panel_cntl)
+static uint32_t dcn31_panel_cntl_hw_init(struct panel_cntl *panel_cntl)
{
struct dcn31_panel_cntl *dcn31_panel_cntl = TO_DCN31_PANEL_CNTL(panel_cntl);
struct dc_dmub_srv *dc_dmub_srv = panel_cntl->ctx->dmub_srv;
@@ -96,7 +96,7 @@ uint32_t dcn31_panel_cntl_hw_init(struct panel_cntl *panel_cntl)
return cmd.panel_cntl.data.current_backlight;
}
-void dcn31_panel_cntl_destroy(struct panel_cntl **panel_cntl)
+static void dcn31_panel_cntl_destroy(struct panel_cntl **panel_cntl)
{
struct dcn31_panel_cntl *dcn31_panel_cntl = TO_DCN31_PANEL_CNTL(*panel_cntl);
@@ -104,7 +104,7 @@ void dcn31_panel_cntl_destroy(struct panel_cntl **panel_cntl)
*panel_cntl = NULL;
}
-bool dcn31_is_panel_backlight_on(struct panel_cntl *panel_cntl)
+static bool dcn31_is_panel_backlight_on(struct panel_cntl *panel_cntl)
{
union dmub_rb_cmd cmd;
@@ -114,7 +114,7 @@ bool dcn31_is_panel_backlight_on(struct panel_cntl *panel_cntl)
return cmd.panel_cntl.data.is_backlight_on;
}
-bool dcn31_is_panel_powered_on(struct panel_cntl *panel_cntl)
+static bool dcn31_is_panel_powered_on(struct panel_cntl *panel_cntl)
{
union dmub_rb_cmd cmd;
@@ -124,7 +124,7 @@ bool dcn31_is_panel_powered_on(struct panel_cntl *panel_cntl)
return cmd.panel_cntl.data.is_powered_on;
}
-void dcn31_store_backlight_level(struct panel_cntl *panel_cntl)
+static void dcn31_store_backlight_level(struct panel_cntl *panel_cntl)
{
union dmub_rb_cmd cmd;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c
index 18896294ae12..ba4c33500a6d 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c
@@ -355,6 +355,14 @@ static const struct dce110_clk_src_regs clk_src_regs[] = {
clk_src_regs(3, D),
clk_src_regs(4, E)
};
+/*pll_id being rempped in dmub, in driver it is logical instance*/
+static const struct dce110_clk_src_regs clk_src_regs_b0[] = {
+ clk_src_regs(0, A),
+ clk_src_regs(1, B),
+ clk_src_regs(2, F),
+ clk_src_regs(3, G),
+ clk_src_regs(4, E)
+};
static const struct dce110_clk_src_shift cs_shift = {
CS_COMMON_MASK_SH_LIST_DCN2_0(__SHIFT)
@@ -485,7 +493,8 @@ static const struct dcn31_apg_mask apg_mask = {
SE_DCN3_REG_LIST(id)\
}
-static const struct dcn10_stream_enc_registers stream_enc_regs[] = {
+/* Some encoders won't be initialized here - but they're logical, not physical. */
+static const struct dcn10_stream_enc_registers stream_enc_regs[ENGINE_ID_COUNT] = {
stream_enc_regs(0),
stream_enc_regs(1),
stream_enc_regs(2),
@@ -968,7 +977,7 @@ static const struct dc_plane_cap plane_cap = {
.argb8888 = true,
.nv12 = true,
.fp16 = true,
- .p010 = false,
+ .p010 = true,
.ayuv = false,
},
@@ -994,7 +1003,7 @@ static const struct dc_debug_options debug_defaults_drv = {
.timing_trace = false,
.clock_trace = true,
.disable_pplib_clock_request = false,
- .pipe_split_policy = MPC_SPLIT_AVOID,
+ .pipe_split_policy = MPC_SPLIT_DYNAMIC,
.force_single_disp_pipe_split = false,
.disable_dcc = DCC_ENABLE,
.vsr_support = true,
@@ -1023,6 +1032,7 @@ static const struct dc_debug_options debug_defaults_drv = {
},
.optimize_edp_link_rate = true,
.enable_sw_cntl_psr = true,
+ .apply_vendor_specific_lttpr_wa = true,
};
static const struct dc_debug_options debug_defaults_diags = {
@@ -1270,7 +1280,7 @@ static struct link_encoder *dcn31_link_enc_create_minimal(
return &enc20->enc10.base;
}
-struct panel_cntl *dcn31_panel_cntl_create(const struct panel_cntl_init_data *init_data)
+static struct panel_cntl *dcn31_panel_cntl_create(const struct panel_cntl_init_data *init_data)
{
struct dcn31_panel_cntl *panel_cntl =
kzalloc(sizeof(struct dcn31_panel_cntl), GFP_KERNEL);
@@ -1774,6 +1784,7 @@ static int dcn31_populate_dml_pipes_from_context(
int i, pipe_cnt;
struct resource_context *res_ctx = &context->res_ctx;
struct pipe_ctx *pipe;
+ bool upscaled = false;
dcn20_populate_dml_pipes_from_context(dc, context, pipes, fast_validate);
@@ -1785,6 +1796,11 @@ static int dcn31_populate_dml_pipes_from_context(
pipe = &res_ctx->pipe_ctx[i];
timing = &pipe->stream->timing;
+ if (pipe->plane_state &&
+ (pipe->plane_state->src_rect.height < pipe->plane_state->dst_rect.height ||
+ pipe->plane_state->src_rect.width < pipe->plane_state->dst_rect.width))
+ upscaled = true;
+
/*
* Immediate flip can be set dynamically after enabling the plane.
* We need to require support for immediate flip or underflow can be
@@ -1829,6 +1845,11 @@ static int dcn31_populate_dml_pipes_from_context(
context->bw_ctx.dml.ip.det_buffer_size_kbytes = 192;
pipes[0].pipe.src.unbounded_req_mode = true;
}
+ } else if (context->stream_count >= dc->debug.crb_alloc_policy_min_disp_count
+ && dc->debug.crb_alloc_policy > DET_SIZE_DEFAULT) {
+ context->bw_ctx.dml.ip.det_buffer_size_kbytes = dc->debug.crb_alloc_policy * 64;
+ } else if (context->stream_count >= 3 && upscaled) {
+ context->bw_ctx.dml.ip.det_buffer_size_kbytes = 192;
}
return pipe_cnt;
@@ -2199,6 +2220,8 @@ static bool dcn31_resource_construct(
dc->caps.post_blend_color_processing = true;
dc->caps.force_dp_tps4_for_cp2520 = true;
dc->caps.dp_hpo = true;
+ dc->caps.hdmi_frl_pcon_support = true;
+ dc->caps.edp_dsc_support = true;
dc->caps.extended_aux_timeout_support = true;
dc->caps.dmcub_support = true;
dc->caps.is_apu = true;
@@ -2237,6 +2260,9 @@ static bool dcn31_resource_construct(
dc->caps.color.mpc.ogam_rom_caps.hlg = 0;
dc->caps.color.mpc.ocsc = 1;
+ /* Use pipe context based otg sync logic */
+ dc->config.use_pipe_ctx_sync_logic = true;
+
/* read VBIOS LTTPR caps */
{
if (ctx->dc_bios->funcs->get_lttpr_caps) {
@@ -2276,14 +2302,27 @@ static bool dcn31_resource_construct(
dcn30_clock_source_create(ctx, ctx->dc_bios,
CLOCK_SOURCE_COMBO_PHY_PLL1,
&clk_src_regs[1], false);
- pool->base.clock_sources[DCN31_CLK_SRC_PLL2] =
+ /*move phypllx_pixclk_resync to dmub next*/
+ if (dc->ctx->asic_id.hw_internal_rev == YELLOW_CARP_B0) {
+ pool->base.clock_sources[DCN31_CLK_SRC_PLL2] =
+ dcn30_clock_source_create(ctx, ctx->dc_bios,
+ CLOCK_SOURCE_COMBO_PHY_PLL2,
+ &clk_src_regs_b0[2], false);
+ pool->base.clock_sources[DCN31_CLK_SRC_PLL3] =
+ dcn30_clock_source_create(ctx, ctx->dc_bios,
+ CLOCK_SOURCE_COMBO_PHY_PLL3,
+ &clk_src_regs_b0[3], false);
+ } else {
+ pool->base.clock_sources[DCN31_CLK_SRC_PLL2] =
dcn30_clock_source_create(ctx, ctx->dc_bios,
CLOCK_SOURCE_COMBO_PHY_PLL2,
&clk_src_regs[2], false);
- pool->base.clock_sources[DCN31_CLK_SRC_PLL3] =
+ pool->base.clock_sources[DCN31_CLK_SRC_PLL3] =
dcn30_clock_source_create(ctx, ctx->dc_bios,
CLOCK_SOURCE_COMBO_PHY_PLL3,
&clk_src_regs[3], false);
+ }
+
pool->base.clock_sources[DCN31_CLK_SRC_PLL4] =
dcn30_clock_source_create(ctx, ctx->dc_bios,
CLOCK_SOURCE_COMBO_PHY_PLL4,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.h b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.h
index 416fe7a721d8..a513363b3326 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.h
@@ -49,4 +49,35 @@ struct resource_pool *dcn31_create_resource_pool(
const struct dc_init_data *init_data,
struct dc *dc);
+/*temp: B0 specific before switch to dcn313 headers*/
+#ifndef regPHYPLLF_PIXCLK_RESYNC_CNTL
+#define regPHYPLLF_PIXCLK_RESYNC_CNTL 0x007e
+#define regPHYPLLF_PIXCLK_RESYNC_CNTL_BASE_IDX 1
+#define regPHYPLLG_PIXCLK_RESYNC_CNTL 0x005f
+#define regPHYPLLG_PIXCLK_RESYNC_CNTL_BASE_IDX 1
+
+//PHYPLLF_PIXCLK_RESYNC_CNTL
+#define PHYPLLF_PIXCLK_RESYNC_CNTL__PHYPLLF_PIXCLK_RESYNC_ENABLE__SHIFT 0x0
+#define PHYPLLF_PIXCLK_RESYNC_CNTL__PHYPLLF_DEEP_COLOR_DTO_ENABLE_STATUS__SHIFT 0x1
+#define PHYPLLF_PIXCLK_RESYNC_CNTL__PHYPLLF_DCCG_DEEP_COLOR_CNTL__SHIFT 0x4
+#define PHYPLLF_PIXCLK_RESYNC_CNTL__PHYPLLF_PIXCLK_ENABLE__SHIFT 0x8
+#define PHYPLLF_PIXCLK_RESYNC_CNTL__PHYPLLF_PIXCLK_DOUBLE_RATE_ENABLE__SHIFT 0x9
+#define PHYPLLF_PIXCLK_RESYNC_CNTL__PHYPLLF_PIXCLK_RESYNC_ENABLE_MASK 0x00000001L
+#define PHYPLLF_PIXCLK_RESYNC_CNTL__PHYPLLF_DEEP_COLOR_DTO_ENABLE_STATUS_MASK 0x00000002L
+#define PHYPLLF_PIXCLK_RESYNC_CNTL__PHYPLLF_DCCG_DEEP_COLOR_CNTL_MASK 0x00000030L
+#define PHYPLLF_PIXCLK_RESYNC_CNTL__PHYPLLF_PIXCLK_ENABLE_MASK 0x00000100L
+#define PHYPLLF_PIXCLK_RESYNC_CNTL__PHYPLLF_PIXCLK_DOUBLE_RATE_ENABLE_MASK 0x00000200L
+
+//PHYPLLG_PIXCLK_RESYNC_CNTL
+#define PHYPLLG_PIXCLK_RESYNC_CNTL__PHYPLLG_PIXCLK_RESYNC_ENABLE__SHIFT 0x0
+#define PHYPLLG_PIXCLK_RESYNC_CNTL__PHYPLLG_DEEP_COLOR_DTO_ENABLE_STATUS__SHIFT 0x1
+#define PHYPLLG_PIXCLK_RESYNC_CNTL__PHYPLLG_DCCG_DEEP_COLOR_CNTL__SHIFT 0x4
+#define PHYPLLG_PIXCLK_RESYNC_CNTL__PHYPLLG_PIXCLK_ENABLE__SHIFT 0x8
+#define PHYPLLG_PIXCLK_RESYNC_CNTL__PHYPLLG_PIXCLK_DOUBLE_RATE_ENABLE__SHIFT 0x9
+#define PHYPLLG_PIXCLK_RESYNC_CNTL__PHYPLLG_PIXCLK_RESYNC_ENABLE_MASK 0x00000001L
+#define PHYPLLG_PIXCLK_RESYNC_CNTL__PHYPLLG_DEEP_COLOR_DTO_ENABLE_STATUS_MASK 0x00000002L
+#define PHYPLLG_PIXCLK_RESYNC_CNTL__PHYPLLG_DCCG_DEEP_COLOR_CNTL_MASK 0x00000030L
+#define PHYPLLG_PIXCLK_RESYNC_CNTL__PHYPLLG_PIXCLK_ENABLE_MASK 0x00000100L
+#define PHYPLLG_PIXCLK_RESYNC_CNTL__PHYPLLG_PIXCLK_DOUBLE_RATE_ENABLE_MASK 0x00000200L
+#endif
#endif /* _DCN31_RESOURCE_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/dm_helpers.h b/drivers/gpu/drm/amd/display/dc/dm_helpers.h
index 0fe66b080a03..7f94e3f70d7f 100644
--- a/drivers/gpu/drm/amd/display/dc/dm_helpers.h
+++ b/drivers/gpu/drm/amd/display/dc/dm_helpers.h
@@ -59,7 +59,7 @@ void dm_helpers_free_gpu_mem(
void *pvMem);
enum dc_edid_status dm_helpers_parse_edid_caps(
- struct dc_context *ctx,
+ struct dc_link *link,
const struct dc_edid *edid,
struct dc_edid_caps *edid_caps);
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_rq_dlg_calc_21.c b/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_rq_dlg_calc_21.c
index 46c433c0bcb0..8bc27de4c104 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_rq_dlg_calc_21.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_rq_dlg_calc_21.c
@@ -1711,14 +1711,6 @@ void dml21_rq_dlg_get_dlg_reg(
dml_print("DML_DLG: Calculation for pipe[%d] end\n", pipe_idx);
}
-void dml_rq_dlg_get_arb_params(struct display_mode_lib *mode_lib, display_arb_params_st *arb_param)
-{
- memset(arb_param, 0, sizeof(*arb_param));
- arb_param->max_req_outstanding = 256;
- arb_param->min_req_outstanding = 68;
- arb_param->sat_level_us = 60;
-}
-
static void calculate_ttu_cursor(
struct display_mode_lib *mode_lib,
double *refcyc_per_req_delivery_pre_cur,
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c b/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c
index 7e937bdcea00..6feb23432f8d 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c
@@ -422,62 +422,8 @@ static void CalculateUrgentBurstFactor(
static void UseMinimumDCFCLK(
struct display_mode_lib *mode_lib,
- int MaxInterDCNTileRepeaters,
int MaxPrefetchMode,
- double FinalDRAMClockChangeLatency,
- double SREnterPlusExitTime,
- int ReturnBusWidth,
- int RoundTripPingLatencyCycles,
- int ReorderingBytes,
- int PixelChunkSizeInKByte,
- int MetaChunkSize,
- bool GPUVMEnable,
- int GPUVMMaxPageTableLevels,
- bool HostVMEnable,
- int NumberOfActivePlanes,
- double HostVMMinPageSize,
- int HostVMMaxNonCachedPageTableLevels,
- bool DynamicMetadataVMEnabled,
- enum immediate_flip_requirement ImmediateFlipRequirement,
- bool ProgressiveToInterlaceUnitInOPP,
- double MaxAveragePercentOfIdealFabricAndSDPPortBWDisplayCanUseInNormalSystemOperation,
- double PercentOfIdealFabricAndSDPPortBWReceivedAfterUrgLatency,
- int VTotal[],
- int VActive[],
- int DynamicMetadataTransmittedBytes[],
- int DynamicMetadataLinesBeforeActiveRequired[],
- bool Interlace[],
- double RequiredDPPCLK[][2][DC__NUM_DPP__MAX],
- double RequiredDISPCLK[][2],
- double UrgLatency[],
- unsigned int NoOfDPP[][2][DC__NUM_DPP__MAX],
- double ProjectedDCFCLKDeepSleep[][2],
- double MaximumVStartup[][2][DC__NUM_DPP__MAX],
- double TotalVActivePixelBandwidth[][2],
- double TotalVActiveCursorBandwidth[][2],
- double TotalMetaRowBandwidth[][2],
- double TotalDPTERowBandwidth[][2],
- unsigned int TotalNumberOfActiveDPP[][2],
- unsigned int TotalNumberOfDCCActiveDPP[][2],
- int dpte_group_bytes[],
- double PrefetchLinesY[][2][DC__NUM_DPP__MAX],
- double PrefetchLinesC[][2][DC__NUM_DPP__MAX],
- int swath_width_luma_ub_all_states[][2][DC__NUM_DPP__MAX],
- int swath_width_chroma_ub_all_states[][2][DC__NUM_DPP__MAX],
- int BytePerPixelY[],
- int BytePerPixelC[],
- int HTotal[],
- double PixelClock[],
- double PDEAndMetaPTEBytesPerFrame[][2][DC__NUM_DPP__MAX],
- double DPTEBytesPerRow[][2][DC__NUM_DPP__MAX],
- double MetaRowBytes[][2][DC__NUM_DPP__MAX],
- bool DynamicMetadataEnable[],
- double VActivePixelBandwidth[][2][DC__NUM_DPP__MAX],
- double VActiveCursorBandwidth[][2][DC__NUM_DPP__MAX],
- double ReadBandwidthLuma[],
- double ReadBandwidthChroma[],
- double DCFCLKPerState[],
- double DCFCLKState[][2]);
+ int ReorderingBytes);
static void CalculatePixelDeliveryTimes(
unsigned int NumberOfActivePlanes,
@@ -3949,6 +3895,102 @@ static double TruncToValidBPP(
return BPP_INVALID;
}
+static noinline void CalculatePrefetchSchedulePerPlane(
+ struct display_mode_lib *mode_lib,
+ double HostVMInefficiencyFactor,
+ int i,
+ unsigned j,
+ unsigned k)
+{
+ struct vba_vars_st *v = &mode_lib->vba;
+ Pipe myPipe;
+
+ myPipe.DPPCLK = v->RequiredDPPCLK[i][j][k];
+ myPipe.DISPCLK = v->RequiredDISPCLK[i][j];
+ myPipe.PixelClock = v->PixelClock[k];
+ myPipe.DCFCLKDeepSleep = v->ProjectedDCFCLKDeepSleep[i][j];
+ myPipe.DPPPerPlane = v->NoOfDPP[i][j][k];
+ myPipe.ScalerEnabled = v->ScalerEnabled[k];
+ myPipe.SourceScan = v->SourceScan[k];
+ myPipe.BlockWidth256BytesY = v->Read256BlockWidthY[k];
+ myPipe.BlockHeight256BytesY = v->Read256BlockHeightY[k];
+ myPipe.BlockWidth256BytesC = v->Read256BlockWidthC[k];
+ myPipe.BlockHeight256BytesC = v->Read256BlockHeightC[k];
+ myPipe.InterlaceEnable = v->Interlace[k];
+ myPipe.NumberOfCursors = v->NumberOfCursors[k];
+ myPipe.VBlank = v->VTotal[k] - v->VActive[k];
+ myPipe.HTotal = v->HTotal[k];
+ myPipe.DCCEnable = v->DCCEnable[k];
+ myPipe.ODMCombineIsEnabled = v->ODMCombineEnablePerState[i][k] == dm_odm_combine_mode_4to1
+ || v->ODMCombineEnablePerState[i][k] == dm_odm_combine_mode_2to1;
+ myPipe.SourcePixelFormat = v->SourcePixelFormat[k];
+ myPipe.BytePerPixelY = v->BytePerPixelY[k];
+ myPipe.BytePerPixelC = v->BytePerPixelC[k];
+ myPipe.ProgressiveToInterlaceUnitInOPP = v->ProgressiveToInterlaceUnitInOPP;
+ v->NoTimeForPrefetch[i][j][k] = CalculatePrefetchSchedule(
+ mode_lib,
+ HostVMInefficiencyFactor,
+ &myPipe,
+ v->DSCDelayPerState[i][k],
+ v->DPPCLKDelaySubtotal + v->DPPCLKDelayCNVCFormater,
+ v->DPPCLKDelaySCL,
+ v->DPPCLKDelaySCLLBOnly,
+ v->DPPCLKDelayCNVCCursor,
+ v->DISPCLKDelaySubtotal,
+ v->SwathWidthYThisState[k] / v->HRatio[k],
+ v->OutputFormat[k],
+ v->MaxInterDCNTileRepeaters,
+ dml_min(v->MaxVStartup, v->MaximumVStartup[i][j][k]),
+ v->MaximumVStartup[i][j][k],
+ v->GPUVMMaxPageTableLevels,
+ v->GPUVMEnable,
+ v->HostVMEnable,
+ v->HostVMMaxNonCachedPageTableLevels,
+ v->HostVMMinPageSize,
+ v->DynamicMetadataEnable[k],
+ v->DynamicMetadataVMEnabled,
+ v->DynamicMetadataLinesBeforeActiveRequired[k],
+ v->DynamicMetadataTransmittedBytes[k],
+ v->UrgLatency[i],
+ v->ExtraLatency,
+ v->TimeCalc,
+ v->PDEAndMetaPTEBytesPerFrame[i][j][k],
+ v->MetaRowBytes[i][j][k],
+ v->DPTEBytesPerRow[i][j][k],
+ v->PrefetchLinesY[i][j][k],
+ v->SwathWidthYThisState[k],
+ v->PrefillY[k],
+ v->MaxNumSwY[k],
+ v->PrefetchLinesC[i][j][k],
+ v->SwathWidthCThisState[k],
+ v->PrefillC[k],
+ v->MaxNumSwC[k],
+ v->swath_width_luma_ub_this_state[k],
+ v->swath_width_chroma_ub_this_state[k],
+ v->SwathHeightYThisState[k],
+ v->SwathHeightCThisState[k],
+ v->TWait,
+ &v->DSTXAfterScaler[k],
+ &v->DSTYAfterScaler[k],
+ &v->LineTimesForPrefetch[k],
+ &v->PrefetchBW[k],
+ &v->LinesForMetaPTE[k],
+ &v->LinesForMetaAndDPTERow[k],
+ &v->VRatioPreY[i][j][k],
+ &v->VRatioPreC[i][j][k],
+ &v->RequiredPrefetchPixelDataBWLuma[i][j][k],
+ &v->RequiredPrefetchPixelDataBWChroma[i][j][k],
+ &v->NoTimeForDynamicMetadata[i][j][k],
+ &v->Tno_bw[k],
+ &v->prefetch_vmrow_bw[k],
+ &v->dummy7[k],
+ &v->dummy8[k],
+ &v->dummy13[k],
+ &v->VUpdateOffsetPix[k],
+ &v->VUpdateWidthPix[k],
+ &v->VReadyOffsetPix[k]);
+}
+
void dml31_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_lib)
{
struct vba_vars_st *v = &mode_lib->vba;
@@ -5079,66 +5121,8 @@ void dml31_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
}
}
- if (v->UseMinimumRequiredDCFCLK == true) {
- UseMinimumDCFCLK(
- mode_lib,
- v->MaxInterDCNTileRepeaters,
- MaxPrefetchMode,
- v->DRAMClockChangeLatency,
- v->SREnterPlusExitTime,
- v->ReturnBusWidth,
- v->RoundTripPingLatencyCycles,
- ReorderingBytes,
- v->PixelChunkSizeInKByte,
- v->MetaChunkSize,
- v->GPUVMEnable,
- v->GPUVMMaxPageTableLevels,
- v->HostVMEnable,
- v->NumberOfActivePlanes,
- v->HostVMMinPageSize,
- v->HostVMMaxNonCachedPageTableLevels,
- v->DynamicMetadataVMEnabled,
- v->ImmediateFlipRequirement[0],
- v->ProgressiveToInterlaceUnitInOPP,
- v->MaxAveragePercentOfIdealFabricAndSDPPortBWDisplayCanUseInNormalSystemOperation,
- v->PercentOfIdealFabricAndSDPPortBWReceivedAfterUrgLatency,
- v->VTotal,
- v->VActive,
- v->DynamicMetadataTransmittedBytes,
- v->DynamicMetadataLinesBeforeActiveRequired,
- v->Interlace,
- v->RequiredDPPCLK,
- v->RequiredDISPCLK,
- v->UrgLatency,
- v->NoOfDPP,
- v->ProjectedDCFCLKDeepSleep,
- v->MaximumVStartup,
- v->TotalVActivePixelBandwidth,
- v->TotalVActiveCursorBandwidth,
- v->TotalMetaRowBandwidth,
- v->TotalDPTERowBandwidth,
- v->TotalNumberOfActiveDPP,
- v->TotalNumberOfDCCActiveDPP,
- v->dpte_group_bytes,
- v->PrefetchLinesY,
- v->PrefetchLinesC,
- v->swath_width_luma_ub_all_states,
- v->swath_width_chroma_ub_all_states,
- v->BytePerPixelY,
- v->BytePerPixelC,
- v->HTotal,
- v->PixelClock,
- v->PDEAndMetaPTEBytesPerFrame,
- v->DPTEBytesPerRow,
- v->MetaRowBytes,
- v->DynamicMetadataEnable,
- v->VActivePixelBandwidth,
- v->VActiveCursorBandwidth,
- v->ReadBandwidthLuma,
- v->ReadBandwidthChroma,
- v->DCFCLKPerState,
- v->DCFCLKState);
- }
+ if (v->UseMinimumRequiredDCFCLK == true)
+ UseMinimumDCFCLK(mode_lib, MaxPrefetchMode, ReorderingBytes);
for (i = 0; i < v->soc.num_states; ++i) {
for (j = 0; j <= 1; ++j) {
@@ -5276,92 +5260,9 @@ void dml31_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
v->SREnterPlusExitTime);
for (k = 0; k < v->NumberOfActivePlanes; k++) {
- Pipe myPipe;
-
- myPipe.DPPCLK = v->RequiredDPPCLK[i][j][k];
- myPipe.DISPCLK = v->RequiredDISPCLK[i][j];
- myPipe.PixelClock = v->PixelClock[k];
- myPipe.DCFCLKDeepSleep = v->ProjectedDCFCLKDeepSleep[i][j];
- myPipe.DPPPerPlane = v->NoOfDPP[i][j][k];
- myPipe.ScalerEnabled = v->ScalerEnabled[k];
- myPipe.SourceScan = v->SourceScan[k];
- myPipe.BlockWidth256BytesY = v->Read256BlockWidthY[k];
- myPipe.BlockHeight256BytesY = v->Read256BlockHeightY[k];
- myPipe.BlockWidth256BytesC = v->Read256BlockWidthC[k];
- myPipe.BlockHeight256BytesC = v->Read256BlockHeightC[k];
- myPipe.InterlaceEnable = v->Interlace[k];
- myPipe.NumberOfCursors = v->NumberOfCursors[k];
- myPipe.VBlank = v->VTotal[k] - v->VActive[k];
- myPipe.HTotal = v->HTotal[k];
- myPipe.DCCEnable = v->DCCEnable[k];
- myPipe.ODMCombineIsEnabled = v->ODMCombineEnablePerState[i][k] == dm_odm_combine_mode_4to1
- || v->ODMCombineEnablePerState[i][k] == dm_odm_combine_mode_2to1;
- myPipe.SourcePixelFormat = v->SourcePixelFormat[k];
- myPipe.BytePerPixelY = v->BytePerPixelY[k];
- myPipe.BytePerPixelC = v->BytePerPixelC[k];
- myPipe.ProgressiveToInterlaceUnitInOPP = v->ProgressiveToInterlaceUnitInOPP;
- v->NoTimeForPrefetch[i][j][k] = CalculatePrefetchSchedule(
- mode_lib,
- HostVMInefficiencyFactor,
- &myPipe,
- v->DSCDelayPerState[i][k],
- v->DPPCLKDelaySubtotal + v->DPPCLKDelayCNVCFormater,
- v->DPPCLKDelaySCL,
- v->DPPCLKDelaySCLLBOnly,
- v->DPPCLKDelayCNVCCursor,
- v->DISPCLKDelaySubtotal,
- v->SwathWidthYThisState[k] / v->HRatio[k],
- v->OutputFormat[k],
- v->MaxInterDCNTileRepeaters,
- dml_min(v->MaxVStartup, v->MaximumVStartup[i][j][k]),
- v->MaximumVStartup[i][j][k],
- v->GPUVMMaxPageTableLevels,
- v->GPUVMEnable,
- v->HostVMEnable,
- v->HostVMMaxNonCachedPageTableLevels,
- v->HostVMMinPageSize,
- v->DynamicMetadataEnable[k],
- v->DynamicMetadataVMEnabled,
- v->DynamicMetadataLinesBeforeActiveRequired[k],
- v->DynamicMetadataTransmittedBytes[k],
- v->UrgLatency[i],
- v->ExtraLatency,
- v->TimeCalc,
- v->PDEAndMetaPTEBytesPerFrame[i][j][k],
- v->MetaRowBytes[i][j][k],
- v->DPTEBytesPerRow[i][j][k],
- v->PrefetchLinesY[i][j][k],
- v->SwathWidthYThisState[k],
- v->PrefillY[k],
- v->MaxNumSwY[k],
- v->PrefetchLinesC[i][j][k],
- v->SwathWidthCThisState[k],
- v->PrefillC[k],
- v->MaxNumSwC[k],
- v->swath_width_luma_ub_this_state[k],
- v->swath_width_chroma_ub_this_state[k],
- v->SwathHeightYThisState[k],
- v->SwathHeightCThisState[k],
- v->TWait,
- &v->DSTXAfterScaler[k],
- &v->DSTYAfterScaler[k],
- &v->LineTimesForPrefetch[k],
- &v->PrefetchBW[k],
- &v->LinesForMetaPTE[k],
- &v->LinesForMetaAndDPTERow[k],
- &v->VRatioPreY[i][j][k],
- &v->VRatioPreC[i][j][k],
- &v->RequiredPrefetchPixelDataBWLuma[i][j][k],
- &v->RequiredPrefetchPixelDataBWChroma[i][j][k],
- &v->NoTimeForDynamicMetadata[i][j][k],
- &v->Tno_bw[k],
- &v->prefetch_vmrow_bw[k],
- &v->dummy7[k],
- &v->dummy8[k],
- &v->dummy13[k],
- &v->VUpdateOffsetPix[k],
- &v->VUpdateWidthPix[k],
- &v->VReadyOffsetPix[k]);
+ CalculatePrefetchSchedulePerPlane(mode_lib,
+ HostVMInefficiencyFactor,
+ i, j, k);
}
for (k = 0; k < v->NumberOfActivePlanes; k++) {
@@ -7249,69 +7150,15 @@ static double CalculateUrgentLatency(
static void UseMinimumDCFCLK(
struct display_mode_lib *mode_lib,
- int MaxInterDCNTileRepeaters,
int MaxPrefetchMode,
- double FinalDRAMClockChangeLatency,
- double SREnterPlusExitTime,
- int ReturnBusWidth,
- int RoundTripPingLatencyCycles,
- int ReorderingBytes,
- int PixelChunkSizeInKByte,
- int MetaChunkSize,
- bool GPUVMEnable,
- int GPUVMMaxPageTableLevels,
- bool HostVMEnable,
- int NumberOfActivePlanes,
- double HostVMMinPageSize,
- int HostVMMaxNonCachedPageTableLevels,
- bool DynamicMetadataVMEnabled,
- enum immediate_flip_requirement ImmediateFlipRequirement,
- bool ProgressiveToInterlaceUnitInOPP,
- double MaxAveragePercentOfIdealFabricAndSDPPortBWDisplayCanUseInNormalSystemOperation,
- double PercentOfIdealFabricAndSDPPortBWReceivedAfterUrgLatency,
- int VTotal[],
- int VActive[],
- int DynamicMetadataTransmittedBytes[],
- int DynamicMetadataLinesBeforeActiveRequired[],
- bool Interlace[],
- double RequiredDPPCLK[][2][DC__NUM_DPP__MAX],
- double RequiredDISPCLK[][2],
- double UrgLatency[],
- unsigned int NoOfDPP[][2][DC__NUM_DPP__MAX],
- double ProjectedDCFCLKDeepSleep[][2],
- double MaximumVStartup[][2][DC__NUM_DPP__MAX],
- double TotalVActivePixelBandwidth[][2],
- double TotalVActiveCursorBandwidth[][2],
- double TotalMetaRowBandwidth[][2],
- double TotalDPTERowBandwidth[][2],
- unsigned int TotalNumberOfActiveDPP[][2],
- unsigned int TotalNumberOfDCCActiveDPP[][2],
- int dpte_group_bytes[],
- double PrefetchLinesY[][2][DC__NUM_DPP__MAX],
- double PrefetchLinesC[][2][DC__NUM_DPP__MAX],
- int swath_width_luma_ub_all_states[][2][DC__NUM_DPP__MAX],
- int swath_width_chroma_ub_all_states[][2][DC__NUM_DPP__MAX],
- int BytePerPixelY[],
- int BytePerPixelC[],
- int HTotal[],
- double PixelClock[],
- double PDEAndMetaPTEBytesPerFrame[][2][DC__NUM_DPP__MAX],
- double DPTEBytesPerRow[][2][DC__NUM_DPP__MAX],
- double MetaRowBytes[][2][DC__NUM_DPP__MAX],
- bool DynamicMetadataEnable[],
- double VActivePixelBandwidth[][2][DC__NUM_DPP__MAX],
- double VActiveCursorBandwidth[][2][DC__NUM_DPP__MAX],
- double ReadBandwidthLuma[],
- double ReadBandwidthChroma[],
- double DCFCLKPerState[],
- double DCFCLKState[][2])
+ int ReorderingBytes)
{
struct vba_vars_st *v = &mode_lib->vba;
int dummy1, i, j, k;
double NormalEfficiency, dummy2, dummy3;
double TotalMaxPrefetchFlipDPTERowBandwidth[DC__VOLTAGE_STATES][2];
- NormalEfficiency = PercentOfIdealFabricAndSDPPortBWReceivedAfterUrgLatency / 100.0;
+ NormalEfficiency = v->PercentOfIdealFabricAndSDPPortBWReceivedAfterUrgLatency / 100.0;
for (i = 0; i < v->soc.num_states; ++i) {
for (j = 0; j <= 1; ++j) {
double PixelDCFCLKCyclesRequiredInPrefetch[DC__NUM_DPP__MAX];
@@ -7329,61 +7176,61 @@ static void UseMinimumDCFCLK(
double MinimumTvmPlus2Tr0;
TotalMaxPrefetchFlipDPTERowBandwidth[i][j] = 0;
- for (k = 0; k < NumberOfActivePlanes; ++k) {
+ for (k = 0; k < v->NumberOfActivePlanes; ++k) {
TotalMaxPrefetchFlipDPTERowBandwidth[i][j] = TotalMaxPrefetchFlipDPTERowBandwidth[i][j]
- + NoOfDPP[i][j][k] * DPTEBytesPerRow[i][j][k] / (15.75 * HTotal[k] / PixelClock[k]);
+ + v->NoOfDPP[i][j][k] * v->DPTEBytesPerRow[i][j][k] / (15.75 * v->HTotal[k] / v->PixelClock[k]);
}
- for (k = 0; k <= NumberOfActivePlanes - 1; ++k) {
- NoOfDPPState[k] = NoOfDPP[i][j][k];
+ for (k = 0; k <= v->NumberOfActivePlanes - 1; ++k) {
+ NoOfDPPState[k] = v->NoOfDPP[i][j][k];
}
- MinimumTWait = CalculateTWait(MaxPrefetchMode, FinalDRAMClockChangeLatency, UrgLatency[i], SREnterPlusExitTime);
- NonDPTEBandwidth = TotalVActivePixelBandwidth[i][j] + TotalVActiveCursorBandwidth[i][j] + TotalMetaRowBandwidth[i][j];
- DPTEBandwidth = (HostVMEnable == true || ImmediateFlipRequirement == dm_immediate_flip_required) ?
- TotalMaxPrefetchFlipDPTERowBandwidth[i][j] : TotalDPTERowBandwidth[i][j];
+ MinimumTWait = CalculateTWait(MaxPrefetchMode, v->FinalDRAMClockChangeLatency, v->UrgLatency[i], v->SREnterPlusExitTime);
+ NonDPTEBandwidth = v->TotalVActivePixelBandwidth[i][j] + v->TotalVActiveCursorBandwidth[i][j] + v->TotalMetaRowBandwidth[i][j];
+ DPTEBandwidth = (v->HostVMEnable == true || v->ImmediateFlipRequirement[0] == dm_immediate_flip_required) ?
+ TotalMaxPrefetchFlipDPTERowBandwidth[i][j] : v->TotalDPTERowBandwidth[i][j];
DCFCLKRequiredForAverageBandwidth = dml_max3(
- ProjectedDCFCLKDeepSleep[i][j],
- (NonDPTEBandwidth + TotalDPTERowBandwidth[i][j]) / ReturnBusWidth
- / (MaxAveragePercentOfIdealFabricAndSDPPortBWDisplayCanUseInNormalSystemOperation / 100),
- (NonDPTEBandwidth + DPTEBandwidth / NormalEfficiency) / NormalEfficiency / ReturnBusWidth);
+ v->ProjectedDCFCLKDeepSleep[i][j],
+ (NonDPTEBandwidth + v->TotalDPTERowBandwidth[i][j]) / v->ReturnBusWidth
+ / (v->MaxAveragePercentOfIdealFabricAndSDPPortBWDisplayCanUseInNormalSystemOperation / 100),
+ (NonDPTEBandwidth + DPTEBandwidth / NormalEfficiency) / NormalEfficiency / v->ReturnBusWidth);
ExtraLatencyBytes = CalculateExtraLatencyBytes(
ReorderingBytes,
- TotalNumberOfActiveDPP[i][j],
- PixelChunkSizeInKByte,
- TotalNumberOfDCCActiveDPP[i][j],
- MetaChunkSize,
- GPUVMEnable,
- HostVMEnable,
- NumberOfActivePlanes,
+ v->TotalNumberOfActiveDPP[i][j],
+ v->PixelChunkSizeInKByte,
+ v->TotalNumberOfDCCActiveDPP[i][j],
+ v->MetaChunkSize,
+ v->GPUVMEnable,
+ v->HostVMEnable,
+ v->NumberOfActivePlanes,
NoOfDPPState,
- dpte_group_bytes,
+ v->dpte_group_bytes,
1,
- HostVMMinPageSize,
- HostVMMaxNonCachedPageTableLevels);
- ExtraLatencyCycles = RoundTripPingLatencyCycles + __DML_ARB_TO_RET_DELAY__ + ExtraLatencyBytes / NormalEfficiency / ReturnBusWidth;
- for (k = 0; k < NumberOfActivePlanes; ++k) {
+ v->HostVMMinPageSize,
+ v->HostVMMaxNonCachedPageTableLevels);
+ ExtraLatencyCycles = v->RoundTripPingLatencyCycles + __DML_ARB_TO_RET_DELAY__ + ExtraLatencyBytes / NormalEfficiency / v->ReturnBusWidth;
+ for (k = 0; k < v->NumberOfActivePlanes; ++k) {
double DCFCLKCyclesRequiredInPrefetch;
double ExpectedPrefetchBWAcceleration;
double PrefetchTime;
- PixelDCFCLKCyclesRequiredInPrefetch[k] = (PrefetchLinesY[i][j][k] * swath_width_luma_ub_all_states[i][j][k] * BytePerPixelY[k]
- + PrefetchLinesC[i][j][k] * swath_width_chroma_ub_all_states[i][j][k] * BytePerPixelC[k]) / NormalEfficiency / ReturnBusWidth;
+ PixelDCFCLKCyclesRequiredInPrefetch[k] = (v->PrefetchLinesY[i][j][k] * v->swath_width_luma_ub_all_states[i][j][k] * v->BytePerPixelY[k]
+ + v->PrefetchLinesC[i][j][k] * v->swath_width_chroma_ub_all_states[i][j][k] * v->BytePerPixelC[k]) / NormalEfficiency / v->ReturnBusWidth;
DCFCLKCyclesRequiredInPrefetch = 2 * ExtraLatencyCycles / NoOfDPPState[k]
- + PDEAndMetaPTEBytesPerFrame[i][j][k] / NormalEfficiency / NormalEfficiency / ReturnBusWidth * (GPUVMMaxPageTableLevels > 2 ? 1 : 0)
- + 2 * DPTEBytesPerRow[i][j][k] / NormalEfficiency / NormalEfficiency / ReturnBusWidth
- + 2 * MetaRowBytes[i][j][k] / NormalEfficiency / ReturnBusWidth + PixelDCFCLKCyclesRequiredInPrefetch[k];
- PrefetchPixelLinesTime[k] = dml_max(PrefetchLinesY[i][j][k], PrefetchLinesC[i][j][k]) * HTotal[k] / PixelClock[k];
- ExpectedPrefetchBWAcceleration = (VActivePixelBandwidth[i][j][k] + VActiveCursorBandwidth[i][j][k])
- / (ReadBandwidthLuma[k] + ReadBandwidthChroma[k]);
+ + v->PDEAndMetaPTEBytesPerFrame[i][j][k] / NormalEfficiency / NormalEfficiency / v->ReturnBusWidth * (v->GPUVMMaxPageTableLevels > 2 ? 1 : 0)
+ + 2 * v->DPTEBytesPerRow[i][j][k] / NormalEfficiency / NormalEfficiency / v->ReturnBusWidth
+ + 2 * v->MetaRowBytes[i][j][k] / NormalEfficiency / v->ReturnBusWidth + PixelDCFCLKCyclesRequiredInPrefetch[k];
+ PrefetchPixelLinesTime[k] = dml_max(v->PrefetchLinesY[i][j][k], v->PrefetchLinesC[i][j][k]) * v->HTotal[k] / v->PixelClock[k];
+ ExpectedPrefetchBWAcceleration = (v->VActivePixelBandwidth[i][j][k] + v->VActiveCursorBandwidth[i][j][k])
+ / (v->ReadBandwidthLuma[k] + v->ReadBandwidthChroma[k]);
DynamicMetadataVMExtraLatency[k] =
- (GPUVMEnable == true && DynamicMetadataEnable[k] == true && DynamicMetadataVMEnabled == true) ?
- UrgLatency[i] * GPUVMMaxPageTableLevels * (HostVMEnable == true ? HostVMMaxNonCachedPageTableLevels + 1 : 1) : 0;
- PrefetchTime = (MaximumVStartup[i][j][k] - 1) * HTotal[k] / PixelClock[k] - MinimumTWait
- - UrgLatency[i]
- * ((GPUVMMaxPageTableLevels <= 2 ? GPUVMMaxPageTableLevels : GPUVMMaxPageTableLevels - 2)
- * (HostVMEnable == true ? HostVMMaxNonCachedPageTableLevels + 1 : 1) - 1)
+ (v->GPUVMEnable == true && v->DynamicMetadataEnable[k] == true && v->DynamicMetadataVMEnabled == true) ?
+ v->UrgLatency[i] * v->GPUVMMaxPageTableLevels * (v->HostVMEnable == true ? v->HostVMMaxNonCachedPageTableLevels + 1 : 1) : 0;
+ PrefetchTime = (v->MaximumVStartup[i][j][k] - 1) * v->HTotal[k] / v->PixelClock[k] - MinimumTWait
+ - v->UrgLatency[i]
+ * ((v->GPUVMMaxPageTableLevels <= 2 ? v->GPUVMMaxPageTableLevels : v->GPUVMMaxPageTableLevels - 2)
+ * (v->HostVMEnable == true ? v->HostVMMaxNonCachedPageTableLevels + 1 : 1) - 1)
- DynamicMetadataVMExtraLatency[k];
if (PrefetchTime > 0) {
@@ -7392,14 +7239,14 @@ static void UseMinimumDCFCLK(
/ (PrefetchTime * PixelDCFCLKCyclesRequiredInPrefetch[k] / DCFCLKCyclesRequiredInPrefetch);
DCFCLKRequiredForPeakBandwidthPerPlane[k] = NoOfDPPState[k] * PixelDCFCLKCyclesRequiredInPrefetch[k] / PrefetchPixelLinesTime[k]
* dml_max(1.0, ExpectedVRatioPrefetch) * dml_max(1.0, ExpectedVRatioPrefetch / 4) * ExpectedPrefetchBWAcceleration;
- if (HostVMEnable == true || ImmediateFlipRequirement == dm_immediate_flip_required) {
+ if (v->HostVMEnable == true || v->ImmediateFlipRequirement[0] == dm_immediate_flip_required) {
DCFCLKRequiredForPeakBandwidthPerPlane[k] = DCFCLKRequiredForPeakBandwidthPerPlane[k]
- + NoOfDPPState[k] * DPTEBandwidth / NormalEfficiency / NormalEfficiency / ReturnBusWidth;
+ + NoOfDPPState[k] * DPTEBandwidth / NormalEfficiency / NormalEfficiency / v->ReturnBusWidth;
}
} else {
- DCFCLKRequiredForPeakBandwidthPerPlane[k] = DCFCLKPerState[i];
+ DCFCLKRequiredForPeakBandwidthPerPlane[k] = v->DCFCLKPerState[i];
}
- if (DynamicMetadataEnable[k] == true) {
+ if (v->DynamicMetadataEnable[k] == true) {
double TSetupPipe;
double TdmbfPipe;
double TdmsksPipe;
@@ -7407,17 +7254,17 @@ static void UseMinimumDCFCLK(
double AllowedTimeForUrgentExtraLatency;
CalculateVupdateAndDynamicMetadataParameters(
- MaxInterDCNTileRepeaters,
- RequiredDPPCLK[i][j][k],
- RequiredDISPCLK[i][j],
- ProjectedDCFCLKDeepSleep[i][j],
- PixelClock[k],
- HTotal[k],
- VTotal[k] - VActive[k],
- DynamicMetadataTransmittedBytes[k],
- DynamicMetadataLinesBeforeActiveRequired[k],
- Interlace[k],
- ProgressiveToInterlaceUnitInOPP,
+ v->MaxInterDCNTileRepeaters,
+ v->RequiredDPPCLK[i][j][k],
+ v->RequiredDISPCLK[i][j],
+ v->ProjectedDCFCLKDeepSleep[i][j],
+ v->PixelClock[k],
+ v->HTotal[k],
+ v->VTotal[k] - v->VActive[k],
+ v->DynamicMetadataTransmittedBytes[k],
+ v->DynamicMetadataLinesBeforeActiveRequired[k],
+ v->Interlace[k],
+ v->ProgressiveToInterlaceUnitInOPP,
&TSetupPipe,
&TdmbfPipe,
&TdmecPipe,
@@ -7425,31 +7272,31 @@ static void UseMinimumDCFCLK(
&dummy1,
&dummy2,
&dummy3);
- AllowedTimeForUrgentExtraLatency = MaximumVStartup[i][j][k] * HTotal[k] / PixelClock[k] - MinimumTWait - TSetupPipe - TdmbfPipe - TdmecPipe
+ AllowedTimeForUrgentExtraLatency = v->MaximumVStartup[i][j][k] * v->HTotal[k] / v->PixelClock[k] - MinimumTWait - TSetupPipe - TdmbfPipe - TdmecPipe
- TdmsksPipe - DynamicMetadataVMExtraLatency[k];
if (AllowedTimeForUrgentExtraLatency > 0) {
DCFCLKRequiredForPeakBandwidthPerPlane[k] = dml_max(
DCFCLKRequiredForPeakBandwidthPerPlane[k],
ExtraLatencyCycles / AllowedTimeForUrgentExtraLatency);
} else {
- DCFCLKRequiredForPeakBandwidthPerPlane[k] = DCFCLKPerState[i];
+ DCFCLKRequiredForPeakBandwidthPerPlane[k] = v->DCFCLKPerState[i];
}
}
}
DCFCLKRequiredForPeakBandwidth = 0;
- for (k = 0; k <= NumberOfActivePlanes - 1; ++k) {
+ for (k = 0; k <= v->NumberOfActivePlanes - 1; ++k) {
DCFCLKRequiredForPeakBandwidth = DCFCLKRequiredForPeakBandwidth + DCFCLKRequiredForPeakBandwidthPerPlane[k];
}
- MinimumTvmPlus2Tr0 = UrgLatency[i]
- * (GPUVMEnable == true ?
- (HostVMEnable == true ?
- (GPUVMMaxPageTableLevels + 2) * (HostVMMaxNonCachedPageTableLevels + 1) - 1 : GPUVMMaxPageTableLevels + 1) :
+ MinimumTvmPlus2Tr0 = v->UrgLatency[i]
+ * (v->GPUVMEnable == true ?
+ (v->HostVMEnable == true ?
+ (v->GPUVMMaxPageTableLevels + 2) * (v->HostVMMaxNonCachedPageTableLevels + 1) - 1 : v->GPUVMMaxPageTableLevels + 1) :
0);
- for (k = 0; k < NumberOfActivePlanes; ++k) {
+ for (k = 0; k < v->NumberOfActivePlanes; ++k) {
double MaximumTvmPlus2Tr0PlusTsw;
- MaximumTvmPlus2Tr0PlusTsw = (MaximumVStartup[i][j][k] - 2) * HTotal[k] / PixelClock[k] - MinimumTWait - DynamicMetadataVMExtraLatency[k];
+ MaximumTvmPlus2Tr0PlusTsw = (v->MaximumVStartup[i][j][k] - 2) * v->HTotal[k] / v->PixelClock[k] - MinimumTWait - DynamicMetadataVMExtraLatency[k];
if (MaximumTvmPlus2Tr0PlusTsw <= MinimumTvmPlus2Tr0 + PrefetchPixelLinesTime[k] / 4) {
- DCFCLKRequiredForPeakBandwidth = DCFCLKPerState[i];
+ DCFCLKRequiredForPeakBandwidth = v->DCFCLKPerState[i];
} else {
DCFCLKRequiredForPeakBandwidth = dml_max3(
DCFCLKRequiredForPeakBandwidth,
@@ -7457,7 +7304,7 @@ static void UseMinimumDCFCLK(
(2 * ExtraLatencyCycles + PixelDCFCLKCyclesRequiredInPrefetch[k]) / (MaximumTvmPlus2Tr0PlusTsw - MinimumTvmPlus2Tr0));
}
}
- DCFCLKState[i][j] = dml_min(DCFCLKPerState[i], 1.05 * dml_max(DCFCLKRequiredForAverageBandwidth, DCFCLKRequiredForPeakBandwidth));
+ v->DCFCLKState[i][j] = dml_min(v->DCFCLKPerState[i], 1.05 * dml_max(DCFCLKRequiredForAverageBandwidth, DCFCLKRequiredForPeakBandwidth));
}
}
}
diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.h b/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.h
index 6905ef1e75a6..d76251fd1566 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.h
+++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.h
@@ -73,6 +73,7 @@ struct display_mode_lib {
struct vba_vars_st vba;
struct dal_logger *logger;
struct dml_funcs funcs;
+ struct _vcs_dpi_display_e2e_pipe_params_st dml_pipe_state[6];
};
void dml_init_instance(struct display_mode_lib *lib,
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dml_wrapper.c b/drivers/gpu/drm/amd/display/dc/dml/dml_wrapper.c
new file mode 100644
index 000000000000..789f7562cdc7
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dml/dml_wrapper.c
@@ -0,0 +1,1889 @@
+/*
+ * Copyright 2017 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dml_wrapper.h"
+#include "resource.h"
+#include "core_types.h"
+#include "dsc.h"
+#include "clk_mgr.h"
+
+#ifndef DC_LOGGER_INIT
+#define DC_LOGGER_INIT
+#undef DC_LOG_WARNING
+#define DC_LOG_WARNING
+#endif
+
+#define DML_WRAPPER_TRANSLATION_
+#include "dml_wrapper_translation.c"
+#undef DML_WRAPPER_TRANSLATION_
+
+static bool is_dual_plane(enum surface_pixel_format format)
+{
+ return format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN || format == SURFACE_PIXEL_FORMAT_GRPH_RGBE_ALPHA;
+}
+
+static void build_clamping_params(struct dc_stream_state *stream)
+{
+ stream->clamping.clamping_level = CLAMPING_FULL_RANGE;
+ stream->clamping.c_depth = stream->timing.display_color_depth;
+ stream->clamping.pixel_encoding = stream->timing.pixel_encoding;
+}
+
+static void get_pixel_clock_parameters(
+ const struct pipe_ctx *pipe_ctx,
+ struct pixel_clk_params *pixel_clk_params)
+{
+ const struct dc_stream_state *stream = pipe_ctx->stream;
+
+ /*TODO: is this halved for YCbCr 420? in that case we might want to move
+ * the pixel clock normalization for hdmi up to here instead of doing it
+ * in pll_adjust_pix_clk
+ */
+ pixel_clk_params->requested_pix_clk_100hz = stream->timing.pix_clk_100hz;
+ pixel_clk_params->encoder_object_id = stream->link->link_enc->id;
+ pixel_clk_params->signal_type = pipe_ctx->stream->signal;
+ pixel_clk_params->controller_id = pipe_ctx->stream_res.tg->inst + 1;
+ /* TODO: un-hardcode*/
+ pixel_clk_params->requested_sym_clk = LINK_RATE_LOW *
+ LINK_RATE_REF_FREQ_IN_KHZ;
+ pixel_clk_params->flags.ENABLE_SS = 0;
+ pixel_clk_params->color_depth =
+ stream->timing.display_color_depth;
+ pixel_clk_params->flags.DISPLAY_BLANKED = 1;
+ pixel_clk_params->flags.SUPPORT_YCBCR420 = (stream->timing.pixel_encoding ==
+ PIXEL_ENCODING_YCBCR420);
+ pixel_clk_params->pixel_encoding = stream->timing.pixel_encoding;
+ if (stream->timing.pixel_encoding == PIXEL_ENCODING_YCBCR422) {
+ pixel_clk_params->color_depth = COLOR_DEPTH_888;
+ }
+ if (stream->timing.pixel_encoding == PIXEL_ENCODING_YCBCR420) {
+ pixel_clk_params->requested_pix_clk_100hz = pixel_clk_params->requested_pix_clk_100hz / 2;
+ }
+ if (stream->timing.timing_3d_format == TIMING_3D_FORMAT_HW_FRAME_PACKING)
+ pixel_clk_params->requested_pix_clk_100hz *= 2;
+
+}
+
+static enum dc_status build_pipe_hw_param(struct pipe_ctx *pipe_ctx)
+{
+ get_pixel_clock_parameters(pipe_ctx, &pipe_ctx->stream_res.pix_clk_params);
+
+ if (pipe_ctx->clock_source)
+ pipe_ctx->clock_source->funcs->get_pix_clk_dividers(
+ pipe_ctx->clock_source,
+ &pipe_ctx->stream_res.pix_clk_params,
+ &pipe_ctx->pll_settings);
+
+ pipe_ctx->stream->clamping.pixel_encoding = pipe_ctx->stream->timing.pixel_encoding;
+
+ resource_build_bit_depth_reduction_params(pipe_ctx->stream,
+ &pipe_ctx->stream->bit_depth_params);
+ build_clamping_params(pipe_ctx->stream);
+
+ return DC_OK;
+}
+
+static void resource_build_bit_depth_reduction_params(struct dc_stream_state *stream,
+ struct bit_depth_reduction_params *fmt_bit_depth)
+{
+ enum dc_dither_option option = stream->dither_option;
+ enum dc_pixel_encoding pixel_encoding =
+ stream->timing.pixel_encoding;
+
+ memset(fmt_bit_depth, 0, sizeof(*fmt_bit_depth));
+
+ if (option == DITHER_OPTION_DEFAULT) {
+ switch (stream->timing.display_color_depth) {
+ case COLOR_DEPTH_666:
+ option = DITHER_OPTION_SPATIAL6;
+ break;
+ case COLOR_DEPTH_888:
+ option = DITHER_OPTION_SPATIAL8;
+ break;
+ case COLOR_DEPTH_101010:
+ option = DITHER_OPTION_SPATIAL10;
+ break;
+ default:
+ option = DITHER_OPTION_DISABLE;
+ }
+ }
+
+ if (option == DITHER_OPTION_DISABLE)
+ return;
+
+ if (option == DITHER_OPTION_TRUN6) {
+ fmt_bit_depth->flags.TRUNCATE_ENABLED = 1;
+ fmt_bit_depth->flags.TRUNCATE_DEPTH = 0;
+ } else if (option == DITHER_OPTION_TRUN8 ||
+ option == DITHER_OPTION_TRUN8_SPATIAL6 ||
+ option == DITHER_OPTION_TRUN8_FM6) {
+ fmt_bit_depth->flags.TRUNCATE_ENABLED = 1;
+ fmt_bit_depth->flags.TRUNCATE_DEPTH = 1;
+ } else if (option == DITHER_OPTION_TRUN10 ||
+ option == DITHER_OPTION_TRUN10_SPATIAL6 ||
+ option == DITHER_OPTION_TRUN10_SPATIAL8 ||
+ option == DITHER_OPTION_TRUN10_FM8 ||
+ option == DITHER_OPTION_TRUN10_FM6 ||
+ option == DITHER_OPTION_TRUN10_SPATIAL8_FM6) {
+ fmt_bit_depth->flags.TRUNCATE_ENABLED = 1;
+ fmt_bit_depth->flags.TRUNCATE_DEPTH = 2;
+ }
+
+ /* special case - Formatter can only reduce by 4 bits at most.
+ * When reducing from 12 to 6 bits,
+ * HW recommends we use trunc with round mode
+ * (if we did nothing, trunc to 10 bits would be used)
+ * note that any 12->10 bit reduction is ignored prior to DCE8,
+ * as the input was 10 bits.
+ */
+ if (option == DITHER_OPTION_SPATIAL6_FRAME_RANDOM ||
+ option == DITHER_OPTION_SPATIAL6 ||
+ option == DITHER_OPTION_FM6) {
+ fmt_bit_depth->flags.TRUNCATE_ENABLED = 1;
+ fmt_bit_depth->flags.TRUNCATE_DEPTH = 2;
+ fmt_bit_depth->flags.TRUNCATE_MODE = 1;
+ }
+
+ /* spatial dither
+ * note that spatial modes 1-3 are never used
+ */
+ if (option == DITHER_OPTION_SPATIAL6_FRAME_RANDOM ||
+ option == DITHER_OPTION_SPATIAL6 ||
+ option == DITHER_OPTION_TRUN10_SPATIAL6 ||
+ option == DITHER_OPTION_TRUN8_SPATIAL6) {
+ fmt_bit_depth->flags.SPATIAL_DITHER_ENABLED = 1;
+ fmt_bit_depth->flags.SPATIAL_DITHER_DEPTH = 0;
+ fmt_bit_depth->flags.HIGHPASS_RANDOM = 1;
+ fmt_bit_depth->flags.RGB_RANDOM =
+ (pixel_encoding == PIXEL_ENCODING_RGB) ? 1 : 0;
+ } else if (option == DITHER_OPTION_SPATIAL8_FRAME_RANDOM ||
+ option == DITHER_OPTION_SPATIAL8 ||
+ option == DITHER_OPTION_SPATIAL8_FM6 ||
+ option == DITHER_OPTION_TRUN10_SPATIAL8 ||
+ option == DITHER_OPTION_TRUN10_SPATIAL8_FM6) {
+ fmt_bit_depth->flags.SPATIAL_DITHER_ENABLED = 1;
+ fmt_bit_depth->flags.SPATIAL_DITHER_DEPTH = 1;
+ fmt_bit_depth->flags.HIGHPASS_RANDOM = 1;
+ fmt_bit_depth->flags.RGB_RANDOM =
+ (pixel_encoding == PIXEL_ENCODING_RGB) ? 1 : 0;
+ } else if (option == DITHER_OPTION_SPATIAL10_FRAME_RANDOM ||
+ option == DITHER_OPTION_SPATIAL10 ||
+ option == DITHER_OPTION_SPATIAL10_FM8 ||
+ option == DITHER_OPTION_SPATIAL10_FM6) {
+ fmt_bit_depth->flags.SPATIAL_DITHER_ENABLED = 1;
+ fmt_bit_depth->flags.SPATIAL_DITHER_DEPTH = 2;
+ fmt_bit_depth->flags.HIGHPASS_RANDOM = 1;
+ fmt_bit_depth->flags.RGB_RANDOM =
+ (pixel_encoding == PIXEL_ENCODING_RGB) ? 1 : 0;
+ }
+
+ if (option == DITHER_OPTION_SPATIAL6 ||
+ option == DITHER_OPTION_SPATIAL8 ||
+ option == DITHER_OPTION_SPATIAL10) {
+ fmt_bit_depth->flags.FRAME_RANDOM = 0;
+ } else {
+ fmt_bit_depth->flags.FRAME_RANDOM = 1;
+ }
+
+ //////////////////////
+ //// temporal dither
+ //////////////////////
+ if (option == DITHER_OPTION_FM6 ||
+ option == DITHER_OPTION_SPATIAL8_FM6 ||
+ option == DITHER_OPTION_SPATIAL10_FM6 ||
+ option == DITHER_OPTION_TRUN10_FM6 ||
+ option == DITHER_OPTION_TRUN8_FM6 ||
+ option == DITHER_OPTION_TRUN10_SPATIAL8_FM6) {
+ fmt_bit_depth->flags.FRAME_MODULATION_ENABLED = 1;
+ fmt_bit_depth->flags.FRAME_MODULATION_DEPTH = 0;
+ } else if (option == DITHER_OPTION_FM8 ||
+ option == DITHER_OPTION_SPATIAL10_FM8 ||
+ option == DITHER_OPTION_TRUN10_FM8) {
+ fmt_bit_depth->flags.FRAME_MODULATION_ENABLED = 1;
+ fmt_bit_depth->flags.FRAME_MODULATION_DEPTH = 1;
+ } else if (option == DITHER_OPTION_FM10) {
+ fmt_bit_depth->flags.FRAME_MODULATION_ENABLED = 1;
+ fmt_bit_depth->flags.FRAME_MODULATION_DEPTH = 2;
+ }
+
+ fmt_bit_depth->pixel_encoding = pixel_encoding;
+}
+
+bool dml_validate_dsc(struct dc *dc, struct dc_state *new_ctx)
+{
+ int i;
+
+ /* Validate DSC config, dsc count validation is already done */
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe_ctx = &new_ctx->res_ctx.pipe_ctx[i];
+ struct dc_stream_state *stream = pipe_ctx->stream;
+ struct dsc_config dsc_cfg;
+ struct pipe_ctx *odm_pipe;
+ int opp_cnt = 1;
+
+ for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe)
+ opp_cnt++;
+
+ /* Only need to validate top pipe */
+ if (pipe_ctx->top_pipe || pipe_ctx->prev_odm_pipe || !stream || !stream->timing.flags.DSC)
+ continue;
+
+ dsc_cfg.pic_width = (stream->timing.h_addressable + stream->timing.h_border_left
+ + stream->timing.h_border_right) / opp_cnt;
+ dsc_cfg.pic_height = stream->timing.v_addressable + stream->timing.v_border_top
+ + stream->timing.v_border_bottom;
+ dsc_cfg.pixel_encoding = stream->timing.pixel_encoding;
+ dsc_cfg.color_depth = stream->timing.display_color_depth;
+ dsc_cfg.is_odm = pipe_ctx->next_odm_pipe ? true : false;
+ dsc_cfg.dc_dsc_cfg = stream->timing.dsc_cfg;
+ dsc_cfg.dc_dsc_cfg.num_slices_h /= opp_cnt;
+
+ if (pipe_ctx->stream_res.dsc && !pipe_ctx->stream_res.dsc->funcs->dsc_validate_stream(pipe_ctx->stream_res.dsc, &dsc_cfg))
+ return false;
+ }
+ return true;
+}
+
+enum dc_status dml_build_mapped_resource(const struct dc *dc, struct dc_state *context, struct dc_stream_state *stream)
+{
+ enum dc_status status = DC_OK;
+ struct pipe_ctx *pipe_ctx = resource_get_head_pipe_for_stream(&context->res_ctx, stream);
+
+ if (!pipe_ctx)
+ return DC_ERROR_UNEXPECTED;
+
+
+ status = build_pipe_hw_param(pipe_ctx);
+
+ return status;
+}
+
+void dml_acquire_dsc(const struct dc *dc,
+ struct resource_context *res_ctx,
+ struct display_stream_compressor **dsc,
+ int pipe_idx)
+{
+ int i;
+ const struct resource_pool *pool = dc->res_pool;
+ struct display_stream_compressor *dsc_old = dc->current_state->res_ctx.pipe_ctx[pipe_idx].stream_res.dsc;
+
+ ASSERT(*dsc == NULL); /* If this ASSERT fails, dsc was not released properly */
+ *dsc = NULL;
+
+ /* Always do 1-to-1 mapping when number of DSCs is same as number of pipes */
+ if (pool->res_cap->num_dsc == pool->res_cap->num_opp) {
+ *dsc = pool->dscs[pipe_idx];
+ res_ctx->is_dsc_acquired[pipe_idx] = true;
+ return;
+ }
+
+ /* Return old DSC to avoid the need for redo it */
+ if (dsc_old && !res_ctx->is_dsc_acquired[dsc_old->inst]) {
+ *dsc = dsc_old;
+ res_ctx->is_dsc_acquired[dsc_old->inst] = true;
+ return ;
+ }
+
+ /* Find first free DSC */
+ for (i = 0; i < pool->res_cap->num_dsc; i++)
+ if (!res_ctx->is_dsc_acquired[i]) {
+ *dsc = pool->dscs[i];
+ res_ctx->is_dsc_acquired[i] = true;
+ break;
+ }
+}
+
+static bool dml_split_stream_for_mpc_or_odm(
+ const struct dc *dc,
+ struct resource_context *res_ctx,
+ struct pipe_ctx *pri_pipe,
+ struct pipe_ctx *sec_pipe,
+ bool odm)
+{
+ int pipe_idx = sec_pipe->pipe_idx;
+ const struct resource_pool *pool = dc->res_pool;
+
+ *sec_pipe = *pri_pipe;
+
+ sec_pipe->pipe_idx = pipe_idx;
+ sec_pipe->plane_res.mi = pool->mis[pipe_idx];
+ sec_pipe->plane_res.hubp = pool->hubps[pipe_idx];
+ sec_pipe->plane_res.ipp = pool->ipps[pipe_idx];
+ sec_pipe->plane_res.xfm = pool->transforms[pipe_idx];
+ sec_pipe->plane_res.dpp = pool->dpps[pipe_idx];
+ sec_pipe->plane_res.mpcc_inst = pool->dpps[pipe_idx]->inst;
+ sec_pipe->stream_res.dsc = NULL;
+ if (odm) {
+ if (pri_pipe->next_odm_pipe) {
+ ASSERT(pri_pipe->next_odm_pipe != sec_pipe);
+ sec_pipe->next_odm_pipe = pri_pipe->next_odm_pipe;
+ sec_pipe->next_odm_pipe->prev_odm_pipe = sec_pipe;
+ }
+ if (pri_pipe->top_pipe && pri_pipe->top_pipe->next_odm_pipe) {
+ pri_pipe->top_pipe->next_odm_pipe->bottom_pipe = sec_pipe;
+ sec_pipe->top_pipe = pri_pipe->top_pipe->next_odm_pipe;
+ }
+ if (pri_pipe->bottom_pipe && pri_pipe->bottom_pipe->next_odm_pipe) {
+ pri_pipe->bottom_pipe->next_odm_pipe->top_pipe = sec_pipe;
+ sec_pipe->bottom_pipe = pri_pipe->bottom_pipe->next_odm_pipe;
+ }
+ pri_pipe->next_odm_pipe = sec_pipe;
+ sec_pipe->prev_odm_pipe = pri_pipe;
+ ASSERT(sec_pipe->top_pipe == NULL);
+
+ if (!sec_pipe->top_pipe)
+ sec_pipe->stream_res.opp = pool->opps[pipe_idx];
+ else
+ sec_pipe->stream_res.opp = sec_pipe->top_pipe->stream_res.opp;
+ if (sec_pipe->stream->timing.flags.DSC == 1) {
+ dml_acquire_dsc(dc, res_ctx, &sec_pipe->stream_res.dsc, pipe_idx);
+ ASSERT(sec_pipe->stream_res.dsc);
+ if (sec_pipe->stream_res.dsc == NULL)
+ return false;
+ }
+ } else {
+ if (pri_pipe->bottom_pipe) {
+ ASSERT(pri_pipe->bottom_pipe != sec_pipe);
+ sec_pipe->bottom_pipe = pri_pipe->bottom_pipe;
+ sec_pipe->bottom_pipe->top_pipe = sec_pipe;
+ }
+ pri_pipe->bottom_pipe = sec_pipe;
+ sec_pipe->top_pipe = pri_pipe;
+
+ ASSERT(pri_pipe->plane_state);
+ }
+
+ return true;
+}
+
+static struct pipe_ctx *dml_find_split_pipe(
+ struct dc *dc,
+ struct dc_state *context,
+ int old_index)
+{
+ struct pipe_ctx *pipe = NULL;
+ int i;
+
+ if (old_index >= 0 && context->res_ctx.pipe_ctx[old_index].stream == NULL) {
+ pipe = &context->res_ctx.pipe_ctx[old_index];
+ pipe->pipe_idx = old_index;
+ }
+
+ if (!pipe)
+ for (i = dc->res_pool->pipe_count - 1; i >= 0; i--) {
+ if (dc->current_state->res_ctx.pipe_ctx[i].top_pipe == NULL
+ && dc->current_state->res_ctx.pipe_ctx[i].prev_odm_pipe == NULL) {
+ if (context->res_ctx.pipe_ctx[i].stream == NULL) {
+ pipe = &context->res_ctx.pipe_ctx[i];
+ pipe->pipe_idx = i;
+ break;
+ }
+ }
+ }
+
+ /*
+ * May need to fix pipes getting tossed from 1 opp to another on flip
+ * Add for debugging transient underflow during topology updates:
+ * ASSERT(pipe);
+ */
+ if (!pipe)
+ for (i = dc->res_pool->pipe_count - 1; i >= 0; i--) {
+ if (context->res_ctx.pipe_ctx[i].stream == NULL) {
+ pipe = &context->res_ctx.pipe_ctx[i];
+ pipe->pipe_idx = i;
+ break;
+ }
+ }
+
+ return pipe;
+}
+
+static void dml_release_dsc(struct resource_context *res_ctx,
+ const struct resource_pool *pool,
+ struct display_stream_compressor **dsc)
+{
+ int i;
+
+ for (i = 0; i < pool->res_cap->num_dsc; i++)
+ if (pool->dscs[i] == *dsc) {
+ res_ctx->is_dsc_acquired[i] = false;
+ *dsc = NULL;
+ break;
+ }
+}
+
+static int dml_get_num_mpc_splits(struct pipe_ctx *pipe)
+{
+ int mpc_split_count = 0;
+ struct pipe_ctx *other_pipe = pipe->bottom_pipe;
+
+ while (other_pipe && other_pipe->plane_state == pipe->plane_state) {
+ mpc_split_count++;
+ other_pipe = other_pipe->bottom_pipe;
+ }
+ other_pipe = pipe->top_pipe;
+ while (other_pipe && other_pipe->plane_state == pipe->plane_state) {
+ mpc_split_count++;
+ other_pipe = other_pipe->top_pipe;
+ }
+
+ return mpc_split_count;
+}
+
+static bool dml_enough_pipes_for_subvp(struct dc *dc,
+ struct dc_state *context)
+{
+ int i = 0;
+ int num_pipes = 0;
+
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
+
+ if (pipe->stream && pipe->plane_state)
+ num_pipes++;
+ }
+
+ // Sub-VP only possible if the number of "real" pipes is
+ // less than or equal to half the number of available pipes
+ if (num_pipes * 2 > dc->res_pool->pipe_count)
+ return false;
+
+ return true;
+}
+
+static int dml_validate_apply_pipe_split_flags(
+ struct dc *dc,
+ struct dc_state *context,
+ int vlevel,
+ int *split,
+ bool *merge)
+{
+ int i, pipe_idx, vlevel_split;
+ int plane_count = 0;
+ bool force_split = false;
+ bool avoid_split = dc->debug.pipe_split_policy == MPC_SPLIT_AVOID;
+ struct vba_vars_st *v = &context->bw_ctx.dml.vba;
+ int max_mpc_comb = v->maxMpcComb;
+
+ if (context->stream_count > 1) {
+ if (dc->debug.pipe_split_policy == MPC_SPLIT_AVOID_MULT_DISP)
+ avoid_split = true;
+ } else if (dc->debug.force_single_disp_pipe_split)
+ force_split = true;
+
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
+
+ /**
+ * Workaround for avoiding pipe-split in cases where we'd split
+ * planes that are too small, resulting in splits that aren't
+ * valid for the scaler.
+ */
+ if (pipe->plane_state &&
+ (pipe->plane_state->dst_rect.width <= 16 ||
+ pipe->plane_state->dst_rect.height <= 16 ||
+ pipe->plane_state->src_rect.width <= 16 ||
+ pipe->plane_state->src_rect.height <= 16))
+ avoid_split = true;
+
+ /* TODO: fix dc bugs and remove this split threshold thing */
+ if (pipe->stream && !pipe->prev_odm_pipe &&
+ (!pipe->top_pipe || pipe->top_pipe->plane_state != pipe->plane_state))
+ ++plane_count;
+ }
+ if (plane_count > dc->res_pool->pipe_count / 2)
+ avoid_split = true;
+
+ /* W/A: Mode timing with borders may not work well with pipe split, avoid for this corner case */
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
+ struct dc_crtc_timing timing;
+
+ if (!pipe->stream)
+ continue;
+ else {
+ timing = pipe->stream->timing;
+ if (timing.h_border_left + timing.h_border_right
+ + timing.v_border_top + timing.v_border_bottom > 0) {
+ avoid_split = true;
+ break;
+ }
+ }
+ }
+
+ /* Avoid split loop looks for lowest voltage level that allows most unsplit pipes possible */
+ if (avoid_split) {
+ for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) {
+ if (!context->res_ctx.pipe_ctx[i].stream)
+ continue;
+
+ for (vlevel_split = vlevel; vlevel <= context->bw_ctx.dml.soc.num_states; vlevel++)
+ if (v->NoOfDPP[vlevel][0][pipe_idx] == 1 &&
+ v->ModeSupport[vlevel][0])
+ break;
+ /* Impossible to not split this pipe */
+ if (vlevel > context->bw_ctx.dml.soc.num_states)
+ vlevel = vlevel_split;
+ else
+ max_mpc_comb = 0;
+ pipe_idx++;
+ }
+ v->maxMpcComb = max_mpc_comb;
+ }
+
+ /* Split loop sets which pipe should be split based on dml outputs and dc flags */
+ for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
+ int pipe_plane = v->pipe_plane[pipe_idx];
+ bool split4mpc = context->stream_count == 1 && plane_count == 1
+ && dc->config.enable_4to1MPC && dc->res_pool->pipe_count >= 4;
+
+ if (!context->res_ctx.pipe_ctx[i].stream)
+ continue;
+
+ if (split4mpc || v->NoOfDPP[vlevel][max_mpc_comb][pipe_plane] == 4)
+ split[i] = 4;
+ else if (force_split || v->NoOfDPP[vlevel][max_mpc_comb][pipe_plane] == 2)
+ split[i] = 2;
+
+ if ((pipe->stream->view_format ==
+ VIEW_3D_FORMAT_SIDE_BY_SIDE ||
+ pipe->stream->view_format ==
+ VIEW_3D_FORMAT_TOP_AND_BOTTOM) &&
+ (pipe->stream->timing.timing_3d_format ==
+ TIMING_3D_FORMAT_TOP_AND_BOTTOM ||
+ pipe->stream->timing.timing_3d_format ==
+ TIMING_3D_FORMAT_SIDE_BY_SIDE))
+ split[i] = 2;
+ if (dc->debug.force_odm_combine & (1 << pipe->stream_res.tg->inst)) {
+ split[i] = 2;
+ v->ODMCombineEnablePerState[vlevel][pipe_plane] = dm_odm_combine_mode_2to1;
+ }
+ if (dc->debug.force_odm_combine_4to1 & (1 << pipe->stream_res.tg->inst)) {
+ split[i] = 4;
+ v->ODMCombineEnablePerState[vlevel][pipe_plane] = dm_odm_combine_mode_4to1;
+ }
+ /*420 format workaround*/
+ if (pipe->stream->timing.h_addressable > 7680 &&
+ pipe->stream->timing.pixel_encoding == PIXEL_ENCODING_YCBCR420) {
+ split[i] = 4;
+ }
+
+ v->ODMCombineEnabled[pipe_plane] =
+ v->ODMCombineEnablePerState[vlevel][pipe_plane];
+
+ if (v->ODMCombineEnabled[pipe_plane] == dm_odm_combine_mode_disabled) {
+ if (dml_get_num_mpc_splits(pipe) == 1) {
+ /*If need split for mpc but 2 way split already*/
+ if (split[i] == 4)
+ split[i] = 2; /* 2 -> 4 MPC */
+ else if (split[i] == 2)
+ split[i] = 0; /* 2 -> 2 MPC */
+ else if (pipe->top_pipe && pipe->top_pipe->plane_state == pipe->plane_state)
+ merge[i] = true; /* 2 -> 1 MPC */
+ } else if (dml_get_num_mpc_splits(pipe) == 3) {
+ /*If need split for mpc but 4 way split already*/
+ if (split[i] == 2 && ((pipe->top_pipe && !pipe->top_pipe->top_pipe)
+ || !pipe->bottom_pipe)) {
+ merge[i] = true; /* 4 -> 2 MPC */
+ } else if (split[i] == 0 && pipe->top_pipe &&
+ pipe->top_pipe->plane_state == pipe->plane_state)
+ merge[i] = true; /* 4 -> 1 MPC */
+ split[i] = 0;
+ } else if (dml_get_num_mpc_splits(pipe)) {
+ /* ODM -> MPC transition */
+ if (pipe->prev_odm_pipe) {
+ split[i] = 0;
+ merge[i] = true;
+ }
+ }
+ } else {
+ if (dml_get_num_mpc_splits(pipe) == 1) {
+ /*If need split for odm but 2 way split already*/
+ if (split[i] == 4)
+ split[i] = 2; /* 2 -> 4 ODM */
+ else if (split[i] == 2)
+ split[i] = 0; /* 2 -> 2 ODM */
+ else if (pipe->prev_odm_pipe) {
+ ASSERT(0); /* NOT expected yet */
+ merge[i] = true; /* exit ODM */
+ }
+ } else if (dml_get_num_mpc_splits(pipe) == 3) {
+ /*If need split for odm but 4 way split already*/
+ if (split[i] == 2 && ((pipe->prev_odm_pipe && !pipe->prev_odm_pipe->prev_odm_pipe)
+ || !pipe->next_odm_pipe)) {
+ ASSERT(0); /* NOT expected yet */
+ merge[i] = true; /* 4 -> 2 ODM */
+ } else if (split[i] == 0 && pipe->prev_odm_pipe) {
+ ASSERT(0); /* NOT expected yet */
+ merge[i] = true; /* exit ODM */
+ }
+ split[i] = 0;
+ } else if (dml_get_num_mpc_splits(pipe)) {
+ /* MPC -> ODM transition */
+ ASSERT(0); /* NOT expected yet */
+ if (pipe->top_pipe && pipe->top_pipe->plane_state == pipe->plane_state) {
+ split[i] = 0;
+ merge[i] = true;
+ }
+ }
+ }
+
+ /* Adjust dppclk when split is forced, do not bother with dispclk */
+ if (split[i] != 0 && v->NoOfDPP[vlevel][max_mpc_comb][pipe_idx] == 1)
+ v->RequiredDPPCLK[vlevel][max_mpc_comb][pipe_idx] /= 2;
+ pipe_idx++;
+ }
+
+ return vlevel;
+}
+
+static void dml_set_phantom_stream_timing(struct dc *dc,
+ struct dc_state *context,
+ struct pipe_ctx *ref_pipe,
+ struct dc_stream_state *phantom_stream)
+{
+ // phantom_vactive = blackout (latency + margin) + fw_processing_delays + pstate allow width
+ uint32_t phantom_vactive_us = context->bw_ctx.dml.soc.dram_clock_change_latency_us + 60 +
+ dc->caps.subvp_fw_processing_delay_us +
+ dc->caps.subvp_pstate_allow_width_us;
+ uint32_t phantom_vactive = ((double)phantom_vactive_us/1000000) *
+ (ref_pipe->stream->timing.pix_clk_100hz * 100) /
+ (double)ref_pipe->stream->timing.h_total;
+ uint32_t phantom_bp = ref_pipe->pipe_dlg_param.vstartup_start;
+
+ phantom_stream->dst.y = 0;
+ phantom_stream->dst.height = phantom_vactive;
+ phantom_stream->src.y = 0;
+ phantom_stream->src.height = phantom_vactive;
+
+ phantom_stream->timing.v_addressable = phantom_vactive;
+ phantom_stream->timing.v_front_porch = 1;
+ phantom_stream->timing.v_total = phantom_stream->timing.v_addressable +
+ phantom_stream->timing.v_front_porch +
+ phantom_stream->timing.v_sync_width +
+ phantom_bp;
+}
+
+static struct dc_stream_state *dml_enable_phantom_stream(struct dc *dc,
+ struct dc_state *context,
+ struct pipe_ctx *ref_pipe)
+{
+ struct dc_stream_state *phantom_stream = NULL;
+
+ phantom_stream = dc_create_stream_for_sink(ref_pipe->stream->sink);
+ phantom_stream->signal = SIGNAL_TYPE_VIRTUAL;
+ phantom_stream->dpms_off = true;
+ phantom_stream->mall_stream_config.type = SUBVP_PHANTOM;
+ phantom_stream->mall_stream_config.paired_stream = ref_pipe->stream;
+ ref_pipe->stream->mall_stream_config.type = SUBVP_MAIN;
+ ref_pipe->stream->mall_stream_config.paired_stream = phantom_stream;
+
+ /* stream has limited viewport and small timing */
+ memcpy(&phantom_stream->timing, &ref_pipe->stream->timing, sizeof(phantom_stream->timing));
+ memcpy(&phantom_stream->src, &ref_pipe->stream->src, sizeof(phantom_stream->src));
+ memcpy(&phantom_stream->dst, &ref_pipe->stream->dst, sizeof(phantom_stream->dst));
+ dml_set_phantom_stream_timing(dc, context, ref_pipe, phantom_stream);
+
+ dc_add_stream_to_ctx(dc, context, phantom_stream);
+ dc->hwss.apply_ctx_to_hw(dc, context);
+ return phantom_stream;
+}
+
+static void dml_enable_phantom_plane(struct dc *dc,
+ struct dc_state *context,
+ struct dc_stream_state *phantom_stream,
+ struct pipe_ctx *main_pipe)
+{
+ struct dc_plane_state *phantom_plane = NULL;
+ struct dc_plane_state *prev_phantom_plane = NULL;
+ struct pipe_ctx *curr_pipe = main_pipe;
+
+ while (curr_pipe) {
+ if (curr_pipe->top_pipe && curr_pipe->top_pipe->plane_state == curr_pipe->plane_state)
+ phantom_plane = prev_phantom_plane;
+ else
+ phantom_plane = dc_create_plane_state(dc);
+
+ memcpy(&phantom_plane->address, &curr_pipe->plane_state->address, sizeof(phantom_plane->address));
+ memcpy(&phantom_plane->scaling_quality, &curr_pipe->plane_state->scaling_quality,
+ sizeof(phantom_plane->scaling_quality));
+ memcpy(&phantom_plane->src_rect, &curr_pipe->plane_state->src_rect, sizeof(phantom_plane->src_rect));
+ memcpy(&phantom_plane->dst_rect, &curr_pipe->plane_state->dst_rect, sizeof(phantom_plane->dst_rect));
+ memcpy(&phantom_plane->clip_rect, &curr_pipe->plane_state->clip_rect, sizeof(phantom_plane->clip_rect));
+ memcpy(&phantom_plane->plane_size, &curr_pipe->plane_state->plane_size,
+ sizeof(phantom_plane->plane_size));
+ memcpy(&phantom_plane->tiling_info, &curr_pipe->plane_state->tiling_info,
+ sizeof(phantom_plane->tiling_info));
+ memcpy(&phantom_plane->dcc, &curr_pipe->plane_state->dcc, sizeof(phantom_plane->dcc));
+ /* Currently compat_level is undefined in dc_state
+ * phantom_plane->compat_level = curr_pipe->plane_state->compat_level;
+ */
+ phantom_plane->format = curr_pipe->plane_state->format;
+ phantom_plane->rotation = curr_pipe->plane_state->rotation;
+ phantom_plane->visible = curr_pipe->plane_state->visible;
+
+ /* Shadow pipe has small viewport. */
+ phantom_plane->clip_rect.y = 0;
+ phantom_plane->clip_rect.height = phantom_stream->timing.v_addressable;
+
+ dc_add_plane_to_context(dc, phantom_stream, phantom_plane, context);
+
+ curr_pipe = curr_pipe->bottom_pipe;
+ prev_phantom_plane = phantom_plane;
+ }
+}
+
+static void dml_add_phantom_pipes(struct dc *dc, struct dc_state *context)
+{
+ int i = 0;
+
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
+ struct dc_stream_state *ref_stream = pipe->stream;
+ // Only construct phantom stream for top pipes that have plane enabled
+ if (!pipe->top_pipe && pipe->plane_state && pipe->stream &&
+ pipe->stream->mall_stream_config.type == SUBVP_NONE) {
+ struct dc_stream_state *phantom_stream = NULL;
+
+ phantom_stream = dml_enable_phantom_stream(dc, context, pipe);
+ dml_enable_phantom_plane(dc, context, phantom_stream, pipe);
+ }
+ }
+
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
+
+ if (pipe->plane_state && pipe->stream &&
+ pipe->stream->mall_stream_config.type == SUBVP_PHANTOM) {
+ pipe->stream->use_dynamic_meta = false;
+ pipe->plane_state->flip_immediate = false;
+ if (!resource_build_scaling_params(pipe)) {
+ // Log / remove phantom pipes since failed to build scaling params
+ }
+ }
+ }
+}
+
+static void dml_remove_phantom_pipes(struct dc *dc, struct dc_state *context)
+{
+ int i;
+ bool removed_pipe = false;
+
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
+ // build scaling params for phantom pipes
+ if (pipe->plane_state && pipe->stream && pipe->stream->mall_stream_config.type == SUBVP_PHANTOM) {
+ dc_rem_all_planes_for_stream(dc, pipe->stream, context);
+ dc_remove_stream_from_ctx(dc, context, pipe->stream);
+ removed_pipe = true;
+ }
+
+ // Clear all phantom stream info
+ if (pipe->stream) {
+ pipe->stream->mall_stream_config.type = SUBVP_NONE;
+ pipe->stream->mall_stream_config.paired_stream = NULL;
+ }
+ }
+ if (removed_pipe)
+ dc->hwss.apply_ctx_to_hw(dc, context);
+}
+
+/*
+ * If the input state contains no upstream planes for a particular pipe (i.e. only timing)
+ * we need to populate some "conservative" plane information as DML cannot handle "no planes"
+ */
+static void populate_default_plane_from_timing(const struct dc_crtc_timing *timing, struct _vcs_dpi_display_pipe_params_st *pipe)
+{
+ pipe->src.is_hsplit = pipe->dest.odm_combine != dm_odm_combine_mode_disabled;
+ pipe->src.source_scan = dm_horz;
+ pipe->src.sw_mode = dm_sw_4kb_s;
+ pipe->src.macro_tile_size = dm_64k_tile;
+ pipe->src.viewport_width = timing->h_addressable;
+ if (pipe->src.viewport_width > 1920)
+ pipe->src.viewport_width = 1920;
+ pipe->src.viewport_height = timing->v_addressable;
+ if (pipe->src.viewport_height > 1080)
+ pipe->src.viewport_height = 1080;
+ pipe->src.surface_height_y = pipe->src.viewport_height;
+ pipe->src.surface_width_y = pipe->src.viewport_width;
+ pipe->src.surface_height_c = pipe->src.viewport_height;
+ pipe->src.surface_width_c = pipe->src.viewport_width;
+ pipe->src.data_pitch = ((pipe->src.viewport_width + 255) / 256) * 256;
+ pipe->src.source_format = dm_444_32;
+ pipe->dest.recout_width = pipe->src.viewport_width;
+ pipe->dest.recout_height = pipe->src.viewport_height;
+ pipe->dest.full_recout_width = pipe->dest.recout_width;
+ pipe->dest.full_recout_height = pipe->dest.recout_height;
+ pipe->scale_ratio_depth.lb_depth = dm_lb_16;
+ pipe->scale_ratio_depth.hscl_ratio = 1.0;
+ pipe->scale_ratio_depth.vscl_ratio = 1.0;
+ pipe->scale_ratio_depth.scl_enable = 0;
+ pipe->scale_taps.htaps = 1;
+ pipe->scale_taps.vtaps = 1;
+ pipe->dest.vtotal_min = timing->v_total;
+ pipe->dest.vtotal_max = timing->v_total;
+
+ if (pipe->dest.odm_combine == dm_odm_combine_mode_2to1) {
+ pipe->src.viewport_width /= 2;
+ pipe->dest.recout_width /= 2;
+ } else if (pipe->dest.odm_combine == dm_odm_combine_mode_4to1) {
+ pipe->src.viewport_width /= 4;
+ pipe->dest.recout_width /= 4;
+ }
+
+ pipe->src.dcc = false;
+ pipe->src.dcc_rate = 1;
+}
+
+/*
+ * If the pipe is not blending (i.e. pipe_ctx->top pipe == null) then its
+ * hsplit group is equal to its own pipe ID
+ * Otherwise, all pipes part of the same blending tree have the same hsplit group
+ * ID as the top most pipe
+ *
+ * If the pipe ctx is ODM combined, then similar logic follows
+ */
+static void populate_hsplit_group_from_dc_pipe_ctx (const struct pipe_ctx *dc_pipe_ctx, struct _vcs_dpi_display_e2e_pipe_params_st *e2e_pipe)
+{
+ e2e_pipe->pipe.src.hsplit_grp = dc_pipe_ctx->pipe_idx;
+
+ if (dc_pipe_ctx->top_pipe && dc_pipe_ctx->top_pipe->plane_state
+ == dc_pipe_ctx->plane_state) {
+ struct pipe_ctx *first_pipe = dc_pipe_ctx->top_pipe;
+ int split_idx = 0;
+
+ while (first_pipe->top_pipe && first_pipe->top_pipe->plane_state
+ == dc_pipe_ctx->plane_state) {
+ first_pipe = first_pipe->top_pipe;
+ split_idx++;
+ }
+
+ /* Treat 4to1 mpc combine as an mpo of 2 2-to-1 combines */
+ if (split_idx == 0)
+ e2e_pipe->pipe.src.hsplit_grp = first_pipe->pipe_idx;
+ else if (split_idx == 1)
+ e2e_pipe->pipe.src.hsplit_grp = dc_pipe_ctx->pipe_idx;
+ else if (split_idx == 2)
+ e2e_pipe->pipe.src.hsplit_grp = dc_pipe_ctx->top_pipe->pipe_idx;
+
+ } else if (dc_pipe_ctx->prev_odm_pipe) {
+ struct pipe_ctx *first_pipe = dc_pipe_ctx->prev_odm_pipe;
+
+ while (first_pipe->prev_odm_pipe)
+ first_pipe = first_pipe->prev_odm_pipe;
+ e2e_pipe->pipe.src.hsplit_grp = first_pipe->pipe_idx;
+ }
+}
+
+static void populate_dml_from_dc_pipe_ctx (const struct pipe_ctx *dc_pipe_ctx, struct _vcs_dpi_display_e2e_pipe_params_st *e2e_pipe, int always_scale)
+{
+ const struct dc_plane_state *pln = dc_pipe_ctx->plane_state;
+ const struct scaler_data *scl = &dc_pipe_ctx->plane_res.scl_data;
+
+ e2e_pipe->pipe.src.immediate_flip = pln->flip_immediate;
+ e2e_pipe->pipe.src.is_hsplit = (dc_pipe_ctx->bottom_pipe && dc_pipe_ctx->bottom_pipe->plane_state == pln)
+ || (dc_pipe_ctx->top_pipe && dc_pipe_ctx->top_pipe->plane_state == pln)
+ || e2e_pipe->pipe.dest.odm_combine != dm_odm_combine_mode_disabled;
+
+ /* stereo is not split */
+ if (pln->stereo_format == PLANE_STEREO_FORMAT_SIDE_BY_SIDE ||
+ pln->stereo_format == PLANE_STEREO_FORMAT_TOP_AND_BOTTOM) {
+ e2e_pipe->pipe.src.is_hsplit = false;
+ e2e_pipe->pipe.src.hsplit_grp = dc_pipe_ctx->pipe_idx;
+ }
+
+ e2e_pipe->pipe.src.source_scan = pln->rotation == ROTATION_ANGLE_90
+ || pln->rotation == ROTATION_ANGLE_270 ? dm_vert : dm_horz;
+ e2e_pipe->pipe.src.viewport_y_y = scl->viewport.y;
+ e2e_pipe->pipe.src.viewport_y_c = scl->viewport_c.y;
+ e2e_pipe->pipe.src.viewport_width = scl->viewport.width;
+ e2e_pipe->pipe.src.viewport_width_c = scl->viewport_c.width;
+ e2e_pipe->pipe.src.viewport_height = scl->viewport.height;
+ e2e_pipe->pipe.src.viewport_height_c = scl->viewport_c.height;
+ e2e_pipe->pipe.src.viewport_width_max = pln->src_rect.width;
+ e2e_pipe->pipe.src.viewport_height_max = pln->src_rect.height;
+ e2e_pipe->pipe.src.surface_width_y = pln->plane_size.surface_size.width;
+ e2e_pipe->pipe.src.surface_height_y = pln->plane_size.surface_size.height;
+ e2e_pipe->pipe.src.surface_width_c = pln->plane_size.chroma_size.width;
+ e2e_pipe->pipe.src.surface_height_c = pln->plane_size.chroma_size.height;
+
+ if (pln->format == SURFACE_PIXEL_FORMAT_GRPH_RGBE_ALPHA
+ || pln->format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
+ e2e_pipe->pipe.src.data_pitch = pln->plane_size.surface_pitch;
+ e2e_pipe->pipe.src.data_pitch_c = pln->plane_size.chroma_pitch;
+ e2e_pipe->pipe.src.meta_pitch = pln->dcc.meta_pitch;
+ e2e_pipe->pipe.src.meta_pitch_c = pln->dcc.meta_pitch_c;
+ } else {
+ e2e_pipe->pipe.src.data_pitch = pln->plane_size.surface_pitch;
+ e2e_pipe->pipe.src.meta_pitch = pln->dcc.meta_pitch;
+ }
+ e2e_pipe->pipe.src.dcc = pln->dcc.enable;
+ e2e_pipe->pipe.src.dcc_rate = 1;
+ e2e_pipe->pipe.dest.recout_width = scl->recout.width;
+ e2e_pipe->pipe.dest.recout_height = scl->recout.height;
+ e2e_pipe->pipe.dest.full_recout_height = scl->recout.height;
+ e2e_pipe->pipe.dest.full_recout_width = scl->recout.width;
+ if (e2e_pipe->pipe.dest.odm_combine == dm_odm_combine_mode_2to1)
+ e2e_pipe->pipe.dest.full_recout_width *= 2;
+ else if (e2e_pipe->pipe.dest.odm_combine == dm_odm_combine_mode_4to1)
+ e2e_pipe->pipe.dest.full_recout_width *= 4;
+ else {
+ struct pipe_ctx *split_pipe = dc_pipe_ctx->bottom_pipe;
+
+ while (split_pipe && split_pipe->plane_state == pln) {
+ e2e_pipe->pipe.dest.full_recout_width += split_pipe->plane_res.scl_data.recout.width;
+ split_pipe = split_pipe->bottom_pipe;
+ }
+ split_pipe = dc_pipe_ctx->top_pipe;
+ while (split_pipe && split_pipe->plane_state == pln) {
+ e2e_pipe->pipe.dest.full_recout_width += split_pipe->plane_res.scl_data.recout.width;
+ split_pipe = split_pipe->top_pipe;
+ }
+ }
+
+ e2e_pipe->pipe.scale_ratio_depth.lb_depth = dm_lb_16;
+ e2e_pipe->pipe.scale_ratio_depth.hscl_ratio = (double) scl->ratios.horz.value / (1ULL<<32);
+ e2e_pipe->pipe.scale_ratio_depth.hscl_ratio_c = (double) scl->ratios.horz_c.value / (1ULL<<32);
+ e2e_pipe->pipe.scale_ratio_depth.vscl_ratio = (double) scl->ratios.vert.value / (1ULL<<32);
+ e2e_pipe->pipe.scale_ratio_depth.vscl_ratio_c = (double) scl->ratios.vert_c.value / (1ULL<<32);
+ e2e_pipe->pipe.scale_ratio_depth.scl_enable =
+ scl->ratios.vert.value != dc_fixpt_one.value
+ || scl->ratios.horz.value != dc_fixpt_one.value
+ || scl->ratios.vert_c.value != dc_fixpt_one.value
+ || scl->ratios.horz_c.value != dc_fixpt_one.value /*Lb only or Full scl*/
+ || always_scale; /*support always scale*/
+ e2e_pipe->pipe.scale_taps.htaps = scl->taps.h_taps;
+ e2e_pipe->pipe.scale_taps.htaps_c = scl->taps.h_taps_c;
+ e2e_pipe->pipe.scale_taps.vtaps = scl->taps.v_taps;
+ e2e_pipe->pipe.scale_taps.vtaps_c = scl->taps.v_taps_c;
+
+ /* Currently compat_level is not defined. Commenting it until further resolution
+ * if (pln->compat_level == DC_LEGACY_TILING_ADDR_GEN_TWO) {
+ swizzle_to_dml_params(pln->tiling_info.gfx9.swizzle,
+ &e2e_pipe->pipe.src.sw_mode);
+ e2e_pipe->pipe.src.macro_tile_size =
+ swizzle_mode_to_macro_tile_size(pln->tiling_info.gfx9.swizzle);
+ } else {
+ gfx10array_mode_to_dml_params(pln->tiling_info.gfx10compatible.array_mode,
+ pln->compat_level,
+ &e2e_pipe->pipe.src.sw_mode);
+ e2e_pipe->pipe.src.macro_tile_size = dm_4k_tile;
+ }*/
+
+ e2e_pipe->pipe.src.source_format = dc_source_format_to_dml_source_format(pln->format);
+}
+
+static void populate_dml_cursor_parameters_from_dc_pipe_ctx (const struct pipe_ctx *dc_pipe_ctx, struct _vcs_dpi_display_e2e_pipe_params_st *e2e_pipe)
+{
+ /*
+ * For graphic plane, cursor number is 1, nv12 is 0
+ * bw calculations due to cursor on/off
+ */
+ if (dc_pipe_ctx->plane_state &&
+ (dc_pipe_ctx->plane_state->address.type == PLN_ADDR_TYPE_VIDEO_PROGRESSIVE ||
+ dc_pipe_ctx->stream->mall_stream_config.type == SUBVP_PHANTOM))
+ e2e_pipe->pipe.src.num_cursors = 0;
+ else
+ e2e_pipe->pipe.src.num_cursors = 1;
+
+ e2e_pipe->pipe.src.cur0_src_width = 256;
+ e2e_pipe->pipe.src.cur0_bpp = dm_cur_32bit;
+}
+
+static int populate_dml_pipes_from_context_base(
+ struct dc *dc,
+ struct dc_state *context,
+ display_e2e_pipe_params_st *pipes,
+ bool fast_validate)
+{
+ int pipe_cnt, i;
+ bool synchronized_vblank = true;
+ struct resource_context *res_ctx = &context->res_ctx;
+
+ for (i = 0, pipe_cnt = -1; i < dc->res_pool->pipe_count; i++) {
+ if (!res_ctx->pipe_ctx[i].stream)
+ continue;
+
+ if (pipe_cnt < 0) {
+ pipe_cnt = i;
+ continue;
+ }
+
+ if (res_ctx->pipe_ctx[pipe_cnt].stream == res_ctx->pipe_ctx[i].stream)
+ continue;
+
+ if (dc->debug.disable_timing_sync ||
+ (!resource_are_streams_timing_synchronizable(
+ res_ctx->pipe_ctx[pipe_cnt].stream,
+ res_ctx->pipe_ctx[i].stream) &&
+ !resource_are_vblanks_synchronizable(
+ res_ctx->pipe_ctx[pipe_cnt].stream,
+ res_ctx->pipe_ctx[i].stream))) {
+ synchronized_vblank = false;
+ break;
+ }
+ }
+
+ for (i = 0, pipe_cnt = 0; i < dc->res_pool->pipe_count; i++) {
+ struct dc_crtc_timing *timing = &res_ctx->pipe_ctx[i].stream->timing;
+
+ struct audio_check aud_check = {0};
+ if (!res_ctx->pipe_ctx[i].stream)
+ continue;
+
+ /* todo:
+ pipes[pipe_cnt].pipe.src.dynamic_metadata_enable = 0;
+ pipes[pipe_cnt].pipe.src.dcc = 0;
+ pipes[pipe_cnt].pipe.src.vm = 0;*/
+
+ pipes[pipe_cnt].clks_cfg.refclk_mhz = dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000.0;
+
+ pipes[pipe_cnt].dout.dsc_enable = res_ctx->pipe_ctx[i].stream->timing.flags.DSC;
+ /* todo: rotation?*/
+ pipes[pipe_cnt].dout.dsc_slices = res_ctx->pipe_ctx[i].stream->timing.dsc_cfg.num_slices_h;
+ if (res_ctx->pipe_ctx[i].stream->use_dynamic_meta) {
+ pipes[pipe_cnt].pipe.src.dynamic_metadata_enable = true;
+ /* 1/2 vblank */
+ pipes[pipe_cnt].pipe.src.dynamic_metadata_lines_before_active =
+ (timing->v_total - timing->v_addressable
+ - timing->v_border_top - timing->v_border_bottom) / 2;
+ /* 36 bytes dp, 32 hdmi */
+ pipes[pipe_cnt].pipe.src.dynamic_metadata_xmit_bytes =
+ dc_is_dp_signal(res_ctx->pipe_ctx[i].stream->signal) ? 36 : 32;
+ }
+ pipes[pipe_cnt].pipe.dest.synchronized_vblank_all_planes = synchronized_vblank;
+
+ dc_timing_to_dml_timing(timing, &pipes[pipe_cnt].pipe.dest);
+ pipes[pipe_cnt].pipe.dest.vtotal_min = res_ctx->pipe_ctx[i].stream->adjust.v_total_min;
+ pipes[pipe_cnt].pipe.dest.vtotal_max = res_ctx->pipe_ctx[i].stream->adjust.v_total_max;
+
+ pipes[pipe_cnt].pipe.dest.otg_inst = res_ctx->pipe_ctx[i].stream_res.tg->inst;
+
+ pipes[pipe_cnt].pipe.dest.odm_combine = get_dml_odm_combine(&res_ctx->pipe_ctx[i]);
+
+ populate_hsplit_group_from_dc_pipe_ctx(&res_ctx->pipe_ctx[i], &pipes[pipe_cnt]);
+
+ pipes[pipe_cnt].dout.dp_lanes = 4;
+ pipes[pipe_cnt].dout.is_virtual = 0;
+ pipes[pipe_cnt].dout.output_type = get_dml_output_type(res_ctx->pipe_ctx[i].stream->signal);
+ if (pipes[pipe_cnt].dout.output_type < 0) {
+ pipes[pipe_cnt].dout.output_type = dm_dp;
+ pipes[pipe_cnt].dout.is_virtual = 1;
+ }
+
+ populate_color_depth_and_encoding_from_timing(&res_ctx->pipe_ctx[i].stream->timing, &pipes[pipe_cnt].dout);
+
+ if (res_ctx->pipe_ctx[i].stream->timing.flags.DSC)
+ pipes[pipe_cnt].dout.output_bpp = res_ctx->pipe_ctx[i].stream->timing.dsc_cfg.bits_per_pixel / 16.0;
+
+ /* todo: default max for now, until there is logic reflecting this in dc*/
+ pipes[pipe_cnt].dout.dsc_input_bpc = 12;
+ /*fill up the audio sample rate (unit in kHz)*/
+ get_audio_check(&res_ctx->pipe_ctx[i].stream->audio_info, &aud_check);
+ pipes[pipe_cnt].dout.max_audio_sample_rate = aud_check.max_audiosample_rate / 1000;
+
+ populate_dml_cursor_parameters_from_dc_pipe_ctx(&res_ctx->pipe_ctx[i], &pipes[pipe_cnt]);
+
+ if (!res_ctx->pipe_ctx[i].plane_state) {
+ populate_default_plane_from_timing(timing, &pipes[pipe_cnt].pipe);
+ } else {
+ populate_dml_from_dc_pipe_ctx(&res_ctx->pipe_ctx[i], &pipes[pipe_cnt], dc->debug.always_scale);
+ }
+
+ pipe_cnt++;
+ }
+
+ /* populate writeback information */
+ if (dc->res_pool)
+ dc->res_pool->funcs->populate_dml_writeback_from_context(dc, res_ctx, pipes);
+
+ return pipe_cnt;
+}
+
+static int dml_populate_dml_pipes_from_context(
+ struct dc *dc, struct dc_state *context,
+ display_e2e_pipe_params_st *pipes,
+ bool fast_validate)
+{
+ int i, pipe_cnt;
+ struct resource_context *res_ctx = &context->res_ctx;
+ struct pipe_ctx *pipe;
+
+ populate_dml_pipes_from_context_base(dc, context, pipes, fast_validate);
+
+ for (i = 0, pipe_cnt = 0; i < dc->res_pool->pipe_count; i++) {
+ struct dc_crtc_timing *timing;
+
+ if (!res_ctx->pipe_ctx[i].stream)
+ continue;
+ pipe = &res_ctx->pipe_ctx[i];
+ timing = &pipe->stream->timing;
+
+ pipes[pipe_cnt].pipe.src.gpuvm = true;
+ pipes[pipe_cnt].pipe.src.dcc_fraction_of_zs_req_luma = 0;
+ pipes[pipe_cnt].pipe.src.dcc_fraction_of_zs_req_chroma = 0;
+ pipes[pipe_cnt].pipe.dest.vfront_porch = timing->v_front_porch;
+
+ pipes[pipe_cnt].dout.dsc_input_bpc = 0;
+ if (pipes[pipe_cnt].dout.dsc_enable) {
+ switch (timing->display_color_depth) {
+ case COLOR_DEPTH_888:
+ pipes[pipe_cnt].dout.dsc_input_bpc = 8;
+ break;
+ case COLOR_DEPTH_101010:
+ pipes[pipe_cnt].dout.dsc_input_bpc = 10;
+ break;
+ case COLOR_DEPTH_121212:
+ pipes[pipe_cnt].dout.dsc_input_bpc = 12;
+ break;
+ default:
+ ASSERT(0);
+ break;
+ }
+ }
+ pipe_cnt++;
+ }
+ dc->config.enable_4to1MPC = false;
+ if (pipe_cnt == 1 && pipe->plane_state && !dc->debug.disable_z9_mpc) {
+ if (is_dual_plane(pipe->plane_state->format)
+ && pipe->plane_state->src_rect.width <= 1920 && pipe->plane_state->src_rect.height <= 1080) {
+ dc->config.enable_4to1MPC = true;
+ } else if (!is_dual_plane(pipe->plane_state->format)) {
+ context->bw_ctx.dml.ip.det_buffer_size_kbytes = 192;
+ pipes[0].pipe.src.unbounded_req_mode = true;
+ }
+ }
+
+ return pipe_cnt;
+}
+
+static void dml_full_validate_bw_helper(struct dc *dc,
+ struct dc_state *context,
+ display_e2e_pipe_params_st *pipes,
+ int *vlevel,
+ int *split,
+ bool *merge,
+ int *pipe_cnt)
+{
+ struct vba_vars_st *vba = &context->bw_ctx.dml.vba;
+
+ /*
+ * DML favors voltage over p-state, but we're more interested in
+ * supporting p-state over voltage. We can't support p-state in
+ * prefetch mode > 0 so try capping the prefetch mode to start.
+ */
+ context->bw_ctx.dml.soc.allow_dram_self_refresh_or_dram_clock_change_in_vblank =
+ dm_allow_self_refresh_and_mclk_switch;
+ *vlevel = dml_get_voltage_level(&context->bw_ctx.dml, pipes, *pipe_cnt);
+ /* This may adjust vlevel and maxMpcComb */
+ if (*vlevel < context->bw_ctx.dml.soc.num_states)
+ *vlevel = dml_validate_apply_pipe_split_flags(dc, context, *vlevel, split, merge);
+
+ /* Conditions for setting up phantom pipes for SubVP:
+ * 1. Not force disable SubVP
+ * 2. Full update (i.e. !fast_validate)
+ * 3. Enough pipes are available to support SubVP (TODO: Which pipes will use VACTIVE / VBLANK / SUBVP?)
+ * 4. Display configuration passes validation
+ * 5. (Config doesn't support MCLK in VACTIVE/VBLANK || dc->debug.force_subvp_mclk_switch)
+ */
+ if (!dc->debug.force_disable_subvp &&
+ dml_enough_pipes_for_subvp(dc, context) &&
+ *vlevel < context->bw_ctx.dml.soc.num_states &&
+ (vba->DRAMClockChangeSupport[*vlevel][vba->maxMpcComb] == dm_dram_clock_change_unsupported ||
+ dc->debug.force_subvp_mclk_switch)) {
+
+ dml_add_phantom_pipes(dc, context);
+
+ /* Create input to DML based on new context which includes phantom pipes
+ * TODO: Input to DML should mark which pipes are phantom
+ */
+ *pipe_cnt = dml_populate_dml_pipes_from_context(dc, context, pipes, false);
+ *vlevel = dml_get_voltage_level(&context->bw_ctx.dml, pipes, *pipe_cnt);
+ if (*vlevel < context->bw_ctx.dml.soc.num_states) {
+ memset(split, 0, MAX_PIPES * sizeof(*split));
+ memset(merge, 0, MAX_PIPES * sizeof(*merge));
+ *vlevel = dml_validate_apply_pipe_split_flags(dc, context, *vlevel, split, merge);
+ }
+
+ // If SubVP pipe config is unsupported (or cannot be used for UCLK switching)
+ // remove phantom pipes and repopulate dml pipes
+ if (*vlevel == context->bw_ctx.dml.soc.num_states ||
+ vba->DRAMClockChangeSupport[*vlevel][vba->maxMpcComb] == dm_dram_clock_change_unsupported) {
+ dml_remove_phantom_pipes(dc, context);
+ *pipe_cnt = dml_populate_dml_pipes_from_context(dc, context, pipes, false);
+ }
+ }
+}
+
+static void dcn20_adjust_adaptive_sync_v_startup(
+ const struct dc_crtc_timing *dc_crtc_timing, int *vstartup_start)
+{
+ struct dc_crtc_timing patched_crtc_timing;
+ uint32_t asic_blank_end = 0;
+ uint32_t asic_blank_start = 0;
+ uint32_t newVstartup = 0;
+
+ patched_crtc_timing = *dc_crtc_timing;
+
+ if (patched_crtc_timing.flags.INTERLACE == 1) {
+ if (patched_crtc_timing.v_front_porch < 2)
+ patched_crtc_timing.v_front_porch = 2;
+ } else {
+ if (patched_crtc_timing.v_front_porch < 1)
+ patched_crtc_timing.v_front_porch = 1;
+ }
+
+ /* blank_start = frame end - front porch */
+ asic_blank_start = patched_crtc_timing.v_total -
+ patched_crtc_timing.v_front_porch;
+
+ /* blank_end = blank_start - active */
+ asic_blank_end = asic_blank_start -
+ patched_crtc_timing.v_border_bottom -
+ patched_crtc_timing.v_addressable -
+ patched_crtc_timing.v_border_top;
+
+ newVstartup = asic_blank_end + (patched_crtc_timing.v_total - asic_blank_start);
+
+ *vstartup_start = ((newVstartup > *vstartup_start) ? newVstartup : *vstartup_start);
+}
+
+static bool is_dp_128b_132b_signal(struct pipe_ctx *pipe_ctx)
+{
+ return (pipe_ctx->stream_res.hpo_dp_stream_enc &&
+ pipe_ctx->link_res.hpo_dp_link_enc &&
+ dc_is_dp_signal(pipe_ctx->stream->signal));
+}
+
+static bool is_dtbclk_required(struct dc *dc, struct dc_state *context)
+{
+ int i;
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ if (!context->res_ctx.pipe_ctx[i].stream)
+ continue;
+#if defined (CONFIG_DRM_AMD_DC_DP2_0)
+ if (is_dp_128b_132b_signal(&context->res_ctx.pipe_ctx[i]))
+ return true;
+#endif
+ }
+ return false;
+}
+
+static void dml_update_soc_for_wm_a(struct dc *dc, struct dc_state *context)
+{
+ if (dc->clk_mgr->bw_params->wm_table.nv_entries[WM_A].valid) {
+ context->bw_ctx.dml.soc.dram_clock_change_latency_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_A].dml_input.pstate_latency_us;
+ context->bw_ctx.dml.soc.sr_enter_plus_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_A].dml_input.sr_enter_plus_exit_time_us;
+ context->bw_ctx.dml.soc.sr_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_A].dml_input.sr_exit_time_us;
+ }
+}
+
+static bool dml_internal_validate(
+ struct dc *dc,
+ struct dc_state *context,
+ display_e2e_pipe_params_st *pipes,
+ int *pipe_cnt_out,
+ int *vlevel_out,
+ bool fast_validate)
+{
+ bool out = false;
+ bool repopulate_pipes = false;
+ int split[MAX_PIPES] = { 0 };
+ bool merge[MAX_PIPES] = { false };
+ bool newly_split[MAX_PIPES] = { false };
+ int pipe_cnt, i, pipe_idx, vlevel;
+ struct vba_vars_st *vba = &context->bw_ctx.dml.vba;
+
+ ASSERT(pipes);
+ if (!pipes)
+ return false;
+
+ // For each full update, remove all existing phantom pipes first
+ dml_remove_phantom_pipes(dc, context);
+
+ dml_update_soc_for_wm_a(dc, context);
+
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
+
+ if (pipe->plane_state) {
+ // On initial pass through DML, we intend to use MALL for SS on all
+ // (non-PSR) surfaces with none using MALL for P-State
+ // 'mall_plane_config': is not a member of 'dc_plane_state' - commenting it out till mall_plane_config gets supported in dc_plant_state
+ //if (pipe->stream && pipe->stream->link->psr_settings.psr_version == DC_PSR_VERSION_UNSUPPORTED)
+ // pipe->plane_state->mall_plane_config.use_mall_for_ss = true;
+ }
+ }
+ pipe_cnt = dml_populate_dml_pipes_from_context(dc, context, pipes, fast_validate);
+
+ if (!pipe_cnt) {
+ out = true;
+ goto validate_out;
+ }
+
+ dml_log_pipe_params(&context->bw_ctx.dml, pipes, pipe_cnt);
+
+ if (!fast_validate) {
+ dml_full_validate_bw_helper(dc, context, pipes, &vlevel, split, merge, &pipe_cnt);
+ }
+
+ if (fast_validate || vlevel == context->bw_ctx.dml.soc.num_states ||
+ vba->DRAMClockChangeSupport[vlevel][vba->maxMpcComb] == dm_dram_clock_change_unsupported) {
+ /*
+ * If mode is unsupported or there's still no p-state support then
+ * fall back to favoring voltage.
+ *
+ * We don't actually support prefetch mode 2, so require that we
+ * at least support prefetch mode 1.
+ */
+ context->bw_ctx.dml.soc.allow_dram_self_refresh_or_dram_clock_change_in_vblank =
+ dm_allow_self_refresh;
+
+ vlevel = dml_get_voltage_level(&context->bw_ctx.dml, pipes, pipe_cnt);
+ if (vlevel < context->bw_ctx.dml.soc.num_states) {
+ memset(split, 0, sizeof(split));
+ memset(merge, 0, sizeof(merge));
+ vlevel = dml_validate_apply_pipe_split_flags(dc, context, vlevel, split, merge);
+ }
+ }
+
+ dml_log_mode_support_params(&context->bw_ctx.dml);
+
+ if (vlevel == context->bw_ctx.dml.soc.num_states)
+ goto validate_fail;
+
+ for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
+ struct pipe_ctx *mpo_pipe = pipe->bottom_pipe;
+
+ if (!pipe->stream)
+ continue;
+
+ /* We only support full screen mpo with ODM */
+ if (vba->ODMCombineEnabled[vba->pipe_plane[pipe_idx]] != dm_odm_combine_mode_disabled
+ && pipe->plane_state && mpo_pipe
+ && memcmp(&mpo_pipe->plane_res.scl_data.recout,
+ &pipe->plane_res.scl_data.recout,
+ sizeof(struct rect)) != 0) {
+ ASSERT(mpo_pipe->plane_state != pipe->plane_state);
+ goto validate_fail;
+ }
+ pipe_idx++;
+ }
+
+ /* merge pipes if necessary */
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
+
+ /*skip pipes that don't need merging*/
+ if (!merge[i])
+ continue;
+
+ /* if ODM merge we ignore mpc tree, mpo pipes will have their own flags */
+ if (pipe->prev_odm_pipe) {
+ /*split off odm pipe*/
+ pipe->prev_odm_pipe->next_odm_pipe = pipe->next_odm_pipe;
+ if (pipe->next_odm_pipe)
+ pipe->next_odm_pipe->prev_odm_pipe = pipe->prev_odm_pipe;
+
+ pipe->bottom_pipe = NULL;
+ pipe->next_odm_pipe = NULL;
+ pipe->plane_state = NULL;
+ pipe->stream = NULL;
+ pipe->top_pipe = NULL;
+ pipe->prev_odm_pipe = NULL;
+ if (pipe->stream_res.dsc)
+ dml_release_dsc(&context->res_ctx, dc->res_pool, &pipe->stream_res.dsc);
+ memset(&pipe->plane_res, 0, sizeof(pipe->plane_res));
+ memset(&pipe->stream_res, 0, sizeof(pipe->stream_res));
+ repopulate_pipes = true;
+ } else if (pipe->top_pipe && pipe->top_pipe->plane_state == pipe->plane_state) {
+ struct pipe_ctx *top_pipe = pipe->top_pipe;
+ struct pipe_ctx *bottom_pipe = pipe->bottom_pipe;
+
+ top_pipe->bottom_pipe = bottom_pipe;
+ if (bottom_pipe)
+ bottom_pipe->top_pipe = top_pipe;
+
+ pipe->top_pipe = NULL;
+ pipe->bottom_pipe = NULL;
+ pipe->plane_state = NULL;
+ pipe->stream = NULL;
+ memset(&pipe->plane_res, 0, sizeof(pipe->plane_res));
+ memset(&pipe->stream_res, 0, sizeof(pipe->stream_res));
+ repopulate_pipes = true;
+ } else
+ ASSERT(0); /* Should never try to merge master pipe */
+
+ }
+
+ for (i = 0, pipe_idx = -1; i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
+ struct pipe_ctx *old_pipe = &dc->current_state->res_ctx.pipe_ctx[i];
+ struct pipe_ctx *hsplit_pipe = NULL;
+ bool odm;
+ int old_index = -1;
+
+ if (!pipe->stream || newly_split[i])
+ continue;
+
+ pipe_idx++;
+ odm = vba->ODMCombineEnabled[vba->pipe_plane[pipe_idx]] != dm_odm_combine_mode_disabled;
+
+ if (!pipe->plane_state && !odm)
+ continue;
+
+ if (split[i]) {
+ if (odm) {
+ if (split[i] == 4 && old_pipe->next_odm_pipe && old_pipe->next_odm_pipe->next_odm_pipe)
+ old_index = old_pipe->next_odm_pipe->next_odm_pipe->pipe_idx;
+ else if (old_pipe->next_odm_pipe)
+ old_index = old_pipe->next_odm_pipe->pipe_idx;
+ } else {
+ if (split[i] == 4 && old_pipe->bottom_pipe && old_pipe->bottom_pipe->bottom_pipe &&
+ old_pipe->bottom_pipe->bottom_pipe->plane_state == old_pipe->plane_state)
+ old_index = old_pipe->bottom_pipe->bottom_pipe->pipe_idx;
+ else if (old_pipe->bottom_pipe &&
+ old_pipe->bottom_pipe->plane_state == old_pipe->plane_state)
+ old_index = old_pipe->bottom_pipe->pipe_idx;
+ }
+ hsplit_pipe = dml_find_split_pipe(dc, context, old_index);
+ ASSERT(hsplit_pipe);
+ if (!hsplit_pipe)
+ goto validate_fail;
+
+ if (!dml_split_stream_for_mpc_or_odm(
+ dc, &context->res_ctx,
+ pipe, hsplit_pipe, odm))
+ goto validate_fail;
+
+ newly_split[hsplit_pipe->pipe_idx] = true;
+ repopulate_pipes = true;
+ }
+ if (split[i] == 4) {
+ struct pipe_ctx *pipe_4to1;
+
+ if (odm && old_pipe->next_odm_pipe)
+ old_index = old_pipe->next_odm_pipe->pipe_idx;
+ else if (!odm && old_pipe->bottom_pipe &&
+ old_pipe->bottom_pipe->plane_state == old_pipe->plane_state)
+ old_index = old_pipe->bottom_pipe->pipe_idx;
+ else
+ old_index = -1;
+ pipe_4to1 = dml_find_split_pipe(dc, context, old_index);
+ ASSERT(pipe_4to1);
+ if (!pipe_4to1)
+ goto validate_fail;
+ if (!dml_split_stream_for_mpc_or_odm(
+ dc, &context->res_ctx,
+ pipe, pipe_4to1, odm))
+ goto validate_fail;
+ newly_split[pipe_4to1->pipe_idx] = true;
+
+ if (odm && old_pipe->next_odm_pipe && old_pipe->next_odm_pipe->next_odm_pipe
+ && old_pipe->next_odm_pipe->next_odm_pipe->next_odm_pipe)
+ old_index = old_pipe->next_odm_pipe->next_odm_pipe->next_odm_pipe->pipe_idx;
+ else if (!odm && old_pipe->bottom_pipe && old_pipe->bottom_pipe->bottom_pipe &&
+ old_pipe->bottom_pipe->bottom_pipe->bottom_pipe &&
+ old_pipe->bottom_pipe->bottom_pipe->bottom_pipe->plane_state == old_pipe->plane_state)
+ old_index = old_pipe->bottom_pipe->bottom_pipe->bottom_pipe->pipe_idx;
+ else
+ old_index = -1;
+ pipe_4to1 = dml_find_split_pipe(dc, context, old_index);
+ ASSERT(pipe_4to1);
+ if (!pipe_4to1)
+ goto validate_fail;
+ if (!dml_split_stream_for_mpc_or_odm(
+ dc, &context->res_ctx,
+ hsplit_pipe, pipe_4to1, odm))
+ goto validate_fail;
+ newly_split[pipe_4to1->pipe_idx] = true;
+ }
+ if (odm)
+ dml_build_mapped_resource(dc, context, pipe->stream);
+ }
+
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
+
+ if (pipe->plane_state) {
+ if (!resource_build_scaling_params(pipe))
+ goto validate_fail;
+ }
+ }
+
+ /* Actual dsc count per stream dsc validation*/
+ if (!dml_validate_dsc(dc, context)) {
+ vba->ValidationStatus[vba->soc.num_states] = DML_FAIL_DSC_VALIDATION_FAILURE;
+ goto validate_fail;
+ }
+
+ if (repopulate_pipes)
+ pipe_cnt = dml_populate_dml_pipes_from_context(dc, context, pipes, fast_validate);
+ *vlevel_out = vlevel;
+ *pipe_cnt_out = pipe_cnt;
+
+ out = true;
+ goto validate_out;
+
+validate_fail:
+ out = false;
+
+validate_out:
+ return out;
+}
+
+static void dml_calculate_dlg_params(
+ struct dc *dc, struct dc_state *context,
+ display_e2e_pipe_params_st *pipes,
+ int pipe_cnt,
+ int vlevel)
+{
+ int i, pipe_idx;
+ int plane_count;
+
+ /* Writeback MCIF_WB arbitration parameters */
+ if (dc->res_pool)
+ dc->res_pool->funcs->set_mcif_arb_params(dc, context, pipes, pipe_cnt);
+
+ context->bw_ctx.bw.dcn.clk.dispclk_khz = context->bw_ctx.dml.vba.DISPCLK * 1000;
+ context->bw_ctx.bw.dcn.clk.dcfclk_khz = context->bw_ctx.dml.vba.DCFCLK * 1000;
+ context->bw_ctx.bw.dcn.clk.socclk_khz = context->bw_ctx.dml.vba.SOCCLK * 1000;
+ context->bw_ctx.bw.dcn.clk.dramclk_khz = context->bw_ctx.dml.vba.DRAMSpeed * 1000 / 16;
+ context->bw_ctx.bw.dcn.clk.dcfclk_deep_sleep_khz = context->bw_ctx.dml.vba.DCFCLKDeepSleep * 1000;
+ context->bw_ctx.bw.dcn.clk.fclk_khz = context->bw_ctx.dml.vba.FabricClock * 1000;
+ context->bw_ctx.bw.dcn.clk.p_state_change_support =
+ context->bw_ctx.dml.vba.DRAMClockChangeSupport[vlevel][context->bw_ctx.dml.vba.maxMpcComb]
+ != dm_dram_clock_change_unsupported;
+
+ context->bw_ctx.bw.dcn.clk.dppclk_khz = 0;
+ /* 'z9_support': is not a member of 'dc_clocks' - Commenting out till we have this support in dc_clocks
+ * context->bw_ctx.bw.dcn.clk.z9_support = (context->bw_ctx.dml.vba.StutterPeriod > 5000.0) ?
+ DCN_Z9_SUPPORT_ALLOW : DCN_Z9_SUPPORT_DISALLOW;
+ */
+ plane_count = 0;
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ if (context->res_ctx.pipe_ctx[i].plane_state)
+ plane_count++;
+ }
+
+ /* Commented out as per above error for now.
+ if (plane_count == 0)
+ context->bw_ctx.bw.dcn.clk.z9_support = DCN_Z9_SUPPORT_ALLOW;
+ */
+ context->bw_ctx.bw.dcn.clk.dtbclk_en = is_dtbclk_required(dc, context);
+ /* TODO : Uncomment the below line and make changes
+ * as per DML nomenclature once it is available.
+ * context->bw_ctx.bw.dcn.clk.fclk_p_state_change_support = context->bw_ctx.dml.vba.fclk_pstate_support;
+ */
+
+ if (context->bw_ctx.bw.dcn.clk.dispclk_khz < dc->debug.min_disp_clk_khz)
+ context->bw_ctx.bw.dcn.clk.dispclk_khz = dc->debug.min_disp_clk_khz;
+
+ for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) {
+ if (!context->res_ctx.pipe_ctx[i].stream)
+ continue;
+ pipes[pipe_idx].pipe.dest.vstartup_start = get_vstartup(&context->bw_ctx.dml, pipes, pipe_cnt, pipe_idx);
+ pipes[pipe_idx].pipe.dest.vupdate_offset = get_vupdate_offset(&context->bw_ctx.dml, pipes, pipe_cnt, pipe_idx);
+ pipes[pipe_idx].pipe.dest.vupdate_width = get_vupdate_width(&context->bw_ctx.dml, pipes, pipe_cnt, pipe_idx);
+ pipes[pipe_idx].pipe.dest.vready_offset = get_vready_offset(&context->bw_ctx.dml, pipes, pipe_cnt, pipe_idx);
+ if (context->res_ctx.pipe_ctx[i].stream->mall_stream_config.type == SUBVP_PHANTOM) {
+ // Phantom pipe requires that DET_SIZE = 0 and no unbounded requests
+ context->res_ctx.pipe_ctx[i].det_buffer_size_kb = 0;
+ context->res_ctx.pipe_ctx[i].unbounded_req = false;
+ } else {
+ context->res_ctx.pipe_ctx[i].det_buffer_size_kb = context->bw_ctx.dml.ip.det_buffer_size_kbytes;
+ context->res_ctx.pipe_ctx[i].unbounded_req = pipes[pipe_idx].pipe.src.unbounded_req_mode;
+ }
+
+ if (context->bw_ctx.bw.dcn.clk.dppclk_khz < pipes[pipe_idx].clks_cfg.dppclk_mhz * 1000)
+ context->bw_ctx.bw.dcn.clk.dppclk_khz = pipes[pipe_idx].clks_cfg.dppclk_mhz * 1000;
+ context->res_ctx.pipe_ctx[i].plane_res.bw.dppclk_khz =
+ pipes[pipe_idx].clks_cfg.dppclk_mhz * 1000;
+ context->res_ctx.pipe_ctx[i].pipe_dlg_param = pipes[pipe_idx].pipe.dest;
+ pipe_idx++;
+ }
+ /*save a original dppclock copy*/
+ context->bw_ctx.bw.dcn.clk.bw_dppclk_khz = context->bw_ctx.bw.dcn.clk.dppclk_khz;
+ context->bw_ctx.bw.dcn.clk.bw_dispclk_khz = context->bw_ctx.bw.dcn.clk.dispclk_khz;
+ context->bw_ctx.bw.dcn.clk.max_supported_dppclk_khz = context->bw_ctx.dml.soc.clock_limits[vlevel].dppclk_mhz * 1000;
+ context->bw_ctx.bw.dcn.clk.max_supported_dispclk_khz = context->bw_ctx.dml.soc.clock_limits[vlevel].dispclk_mhz * 1000;
+ context->bw_ctx.bw.dcn.compbuf_size_kb = context->bw_ctx.dml.ip.config_return_buffer_size_in_kbytes
+ - context->bw_ctx.dml.ip.det_buffer_size_kbytes * pipe_idx;
+
+ for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) {
+ bool cstate_en = context->bw_ctx.dml.vba.PrefetchMode[vlevel][context->bw_ctx.dml.vba.maxMpcComb] != 2;
+
+ if (!context->res_ctx.pipe_ctx[i].stream)
+ continue;
+
+ context->bw_ctx.dml.funcs.rq_dlg_get_dlg_reg(&context->bw_ctx.dml,
+ &context->res_ctx.pipe_ctx[i].dlg_regs,
+ &context->res_ctx.pipe_ctx[i].ttu_regs,
+ pipes,
+ pipe_cnt,
+ pipe_idx,
+ cstate_en,
+ context->bw_ctx.bw.dcn.clk.p_state_change_support,
+ false, false, true);
+
+ context->bw_ctx.dml.funcs.rq_dlg_get_rq_reg(&context->bw_ctx.dml,
+ &context->res_ctx.pipe_ctx[i].rq_regs,
+ &pipes[pipe_idx].pipe);
+ pipe_idx++;
+ }
+}
+
+static void dml_calculate_wm_and_dlg(
+ struct dc *dc, struct dc_state *context,
+ display_e2e_pipe_params_st *pipes,
+ int pipe_cnt,
+ int vlevel)
+{
+ int i, pipe_idx, vlevel_temp = 0;
+
+ double dcfclk = context->bw_ctx.dml.soc.clock_limits[0].dcfclk_mhz;
+ double dcfclk_from_validation = context->bw_ctx.dml.vba.DCFCLKState[vlevel][context->bw_ctx.dml.vba.maxMpcComb];
+ unsigned int min_dram_speed_mts = context->bw_ctx.dml.vba.DRAMSpeed;
+ bool pstate_en = context->bw_ctx.dml.vba.DRAMClockChangeSupport[vlevel][context->bw_ctx.dml.vba.maxMpcComb] !=
+ dm_dram_clock_change_unsupported;
+
+ /* Set B:
+ * For Set B calculations use clocks from clock_limits[2] when available i.e. when SMU is present,
+ * otherwise use arbitrary low value from spreadsheet for DCFCLK as lower is safer for watermark
+ * calculations to cover bootup clocks.
+ * DCFCLK: soc.clock_limits[2] when available
+ * UCLK: soc.clock_limits[2] when available
+ */
+ if (context->bw_ctx.dml.soc.num_states > 2) {
+ vlevel_temp = 2;
+ dcfclk = context->bw_ctx.dml.soc.clock_limits[2].dcfclk_mhz;
+ } else
+ dcfclk = 615; //DCFCLK Vmin_lv
+
+ pipes[0].clks_cfg.voltage = vlevel_temp;
+ pipes[0].clks_cfg.dcfclk_mhz = dcfclk;
+ pipes[0].clks_cfg.socclk_mhz = context->bw_ctx.dml.soc.clock_limits[vlevel_temp].socclk_mhz;
+
+ if (dc->clk_mgr->bw_params->wm_table.nv_entries[WM_B].valid) {
+ context->bw_ctx.dml.soc.dram_clock_change_latency_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_B].dml_input.pstate_latency_us;
+ context->bw_ctx.dml.soc.sr_enter_plus_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_B].dml_input.sr_enter_plus_exit_time_us;
+ context->bw_ctx.dml.soc.sr_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_B].dml_input.sr_exit_time_us;
+ }
+ context->bw_ctx.bw.dcn.watermarks.b.urgent_ns = get_wm_urgent(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ context->bw_ctx.bw.dcn.watermarks.b.cstate_pstate.cstate_enter_plus_exit_ns = get_wm_stutter_enter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ context->bw_ctx.bw.dcn.watermarks.b.cstate_pstate.cstate_exit_ns = get_wm_stutter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ context->bw_ctx.bw.dcn.watermarks.b.cstate_pstate.pstate_change_ns = get_wm_dram_clock_change(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ context->bw_ctx.bw.dcn.watermarks.b.pte_meta_urgent_ns = get_wm_memory_trip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ context->bw_ctx.bw.dcn.watermarks.b.frac_urg_bw_nom = get_fraction_of_urgent_bandwidth(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ context->bw_ctx.bw.dcn.watermarks.b.frac_urg_bw_flip = get_fraction_of_urgent_bandwidth_imm_flip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ context->bw_ctx.bw.dcn.watermarks.b.urgent_latency_ns = get_urgent_latency(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ //context->bw_ctx.bw.dcn.watermarks.b.cstate_pstate.fclk_pstate_change_ns = get_wm_fclk_pstate(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ //context->bw_ctx.bw.dcn.watermarks.b.usr_retraining_ns = get_wm_usr_retraining(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+
+ /* Temporary, to have some fclk_pstate_change_ns and usr_retraining_ns wm values until DML is implemented */
+ context->bw_ctx.bw.dcn.watermarks.b.cstate_pstate.fclk_pstate_change_ns = context->bw_ctx.bw.dcn.watermarks.b.cstate_pstate.pstate_change_ns / 4;
+ context->bw_ctx.bw.dcn.watermarks.b.usr_retraining_ns = context->bw_ctx.bw.dcn.watermarks.b.cstate_pstate.pstate_change_ns / 8;
+
+ /* Set D:
+ * All clocks min.
+ * DCFCLK: Min, as reported by PM FW when available
+ * UCLK : Min, as reported by PM FW when available
+ * sr_enter_exit/sr_exit should be lower than used for DRAM (TBD after bringup or later, use as decided in Clk Mgr)
+ */
+
+ if (context->bw_ctx.dml.soc.num_states > 2) {
+ vlevel_temp = 0;
+ dcfclk = dc->clk_mgr->bw_params->clk_table.entries[0].dcfclk_mhz;
+ } else
+ dcfclk = 615; //DCFCLK Vmin_lv
+
+ pipes[0].clks_cfg.voltage = vlevel_temp;
+ pipes[0].clks_cfg.dcfclk_mhz = dcfclk;
+ pipes[0].clks_cfg.socclk_mhz = context->bw_ctx.dml.soc.clock_limits[vlevel_temp].socclk_mhz;
+
+ if (dc->clk_mgr->bw_params->wm_table.nv_entries[WM_D].valid) {
+ context->bw_ctx.dml.soc.dram_clock_change_latency_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_D].dml_input.pstate_latency_us;
+ context->bw_ctx.dml.soc.sr_enter_plus_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_D].dml_input.sr_enter_plus_exit_time_us;
+ context->bw_ctx.dml.soc.sr_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_D].dml_input.sr_exit_time_us;
+ }
+ context->bw_ctx.bw.dcn.watermarks.d.urgent_ns = get_wm_urgent(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ context->bw_ctx.bw.dcn.watermarks.d.cstate_pstate.cstate_enter_plus_exit_ns = get_wm_stutter_enter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ context->bw_ctx.bw.dcn.watermarks.d.cstate_pstate.cstate_exit_ns = get_wm_stutter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ context->bw_ctx.bw.dcn.watermarks.d.cstate_pstate.pstate_change_ns = get_wm_dram_clock_change(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ context->bw_ctx.bw.dcn.watermarks.d.pte_meta_urgent_ns = get_wm_memory_trip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ context->bw_ctx.bw.dcn.watermarks.d.frac_urg_bw_nom = get_fraction_of_urgent_bandwidth(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ context->bw_ctx.bw.dcn.watermarks.d.frac_urg_bw_flip = get_fraction_of_urgent_bandwidth_imm_flip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ context->bw_ctx.bw.dcn.watermarks.d.urgent_latency_ns = get_urgent_latency(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ //context->bw_ctx.bw.dcn.watermarks.d.cstate_pstate.fclk_pstate_change_ns = get_wm_fclk_pstate(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ //context->bw_ctx.bw.dcn.watermarks.d.usr_retraining_ns = get_wm_usr_retraining(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+
+ /* Temporary, to have some fclk_pstate_change_ns and usr_retraining_ns wm values until DML is implemented */
+ context->bw_ctx.bw.dcn.watermarks.d.cstate_pstate.fclk_pstate_change_ns = context->bw_ctx.bw.dcn.watermarks.d.cstate_pstate.pstate_change_ns / 4;
+ context->bw_ctx.bw.dcn.watermarks.d.usr_retraining_ns = context->bw_ctx.bw.dcn.watermarks.d.cstate_pstate.pstate_change_ns / 8;
+
+ /* Set C, for Dummy P-State:
+ * All clocks min.
+ * DCFCLK: Min, as reported by PM FW, when available
+ * UCLK : Min, as reported by PM FW, when available
+ * pstate latency as per UCLK state dummy pstate latency
+ */
+ if (dc->clk_mgr->bw_params->wm_table.nv_entries[WM_C].valid) {
+ unsigned int min_dram_speed_mts_margin = 160;
+
+ if ((!pstate_en))
+ min_dram_speed_mts = dc->clk_mgr->bw_params->clk_table.entries[dc->clk_mgr->bw_params->clk_table.num_entries - 1].memclk_mhz * 16;
+
+ /* find largest table entry that is lower than dram speed, but lower than DPM0 still uses DPM0 */
+ for (i = 3; i > 0; i--)
+ if (min_dram_speed_mts + min_dram_speed_mts_margin > dc->clk_mgr->bw_params->dummy_pstate_table[i].dram_speed_mts)
+ break;
+
+ context->bw_ctx.dml.soc.dram_clock_change_latency_us = dc->clk_mgr->bw_params->dummy_pstate_table[i].dummy_pstate_latency_us;
+ context->bw_ctx.dml.soc.dummy_pstate_latency_us = dc->clk_mgr->bw_params->dummy_pstate_table[i].dummy_pstate_latency_us;
+ context->bw_ctx.dml.soc.sr_enter_plus_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_C].dml_input.sr_enter_plus_exit_time_us;
+ context->bw_ctx.dml.soc.sr_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_C].dml_input.sr_exit_time_us;
+ }
+ context->bw_ctx.bw.dcn.watermarks.c.urgent_ns = get_wm_urgent(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ context->bw_ctx.bw.dcn.watermarks.c.cstate_pstate.cstate_enter_plus_exit_ns = get_wm_stutter_enter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ context->bw_ctx.bw.dcn.watermarks.c.cstate_pstate.cstate_exit_ns = get_wm_stutter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ context->bw_ctx.bw.dcn.watermarks.c.cstate_pstate.pstate_change_ns = get_wm_dram_clock_change(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ context->bw_ctx.bw.dcn.watermarks.c.pte_meta_urgent_ns = get_wm_memory_trip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ context->bw_ctx.bw.dcn.watermarks.c.frac_urg_bw_nom = get_fraction_of_urgent_bandwidth(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ context->bw_ctx.bw.dcn.watermarks.c.frac_urg_bw_flip = get_fraction_of_urgent_bandwidth_imm_flip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ context->bw_ctx.bw.dcn.watermarks.c.urgent_latency_ns = get_urgent_latency(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ //context->bw_ctx.bw.dcn.watermarks.c.cstate_pstate.fclk_pstate_change_ns = get_wm_fclk_pstate(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ //context->bw_ctx.bw.dcn.watermarks.c.usr_retraining_ns = get_wm_usr_retraining(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+
+ /* Temporary, to have some fclk_pstate_change_ns and usr_retraining_ns wm values until DML is implemented */
+ context->bw_ctx.bw.dcn.watermarks.c.cstate_pstate.fclk_pstate_change_ns = context->bw_ctx.bw.dcn.watermarks.c.cstate_pstate.pstate_change_ns / 4;
+ context->bw_ctx.bw.dcn.watermarks.c.usr_retraining_ns = context->bw_ctx.bw.dcn.watermarks.c.cstate_pstate.pstate_change_ns / 8;
+
+ if ((!pstate_en) && (dc->clk_mgr->bw_params->wm_table.nv_entries[WM_C].valid)) {
+ /* The only difference between A and C is p-state latency, if p-state is not supported
+ * with full p-state latency we want to calculate DLG based on dummy p-state latency,
+ * Set A p-state watermark set to 0 previously, when p-state unsupported, for now keep as previous implementation.
+ */
+ context->bw_ctx.bw.dcn.watermarks.a = context->bw_ctx.bw.dcn.watermarks.c;
+ context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.pstate_change_ns = 0;
+ } else {
+ /* Set A:
+ * All clocks min.
+ * DCFCLK: Min, as reported by PM FW, when available
+ * UCLK: Min, as reported by PM FW, when available
+ */
+ dml_update_soc_for_wm_a(dc, context);
+ context->bw_ctx.bw.dcn.watermarks.a.urgent_ns = get_wm_urgent(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.cstate_enter_plus_exit_ns = get_wm_stutter_enter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.cstate_exit_ns = get_wm_stutter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.pstate_change_ns = get_wm_dram_clock_change(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ context->bw_ctx.bw.dcn.watermarks.a.pte_meta_urgent_ns = get_wm_memory_trip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ context->bw_ctx.bw.dcn.watermarks.a.frac_urg_bw_nom = get_fraction_of_urgent_bandwidth(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ context->bw_ctx.bw.dcn.watermarks.a.frac_urg_bw_flip = get_fraction_of_urgent_bandwidth_imm_flip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ context->bw_ctx.bw.dcn.watermarks.a.urgent_latency_ns = get_urgent_latency(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ }
+
+ pipes[0].clks_cfg.voltage = vlevel;
+ pipes[0].clks_cfg.dcfclk_mhz = dcfclk_from_validation;
+ pipes[0].clks_cfg.socclk_mhz = context->bw_ctx.dml.soc.clock_limits[vlevel].socclk_mhz;
+
+ for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) {
+ if (!context->res_ctx.pipe_ctx[i].stream)
+ continue;
+
+ pipes[pipe_idx].clks_cfg.dispclk_mhz = get_dispclk_calculated(&context->bw_ctx.dml, pipes, pipe_cnt);
+ pipes[pipe_idx].clks_cfg.dppclk_mhz = get_dppclk_calculated(&context->bw_ctx.dml, pipes, pipe_cnt, pipe_idx);
+
+ if (dc->config.forced_clocks) {
+ pipes[pipe_idx].clks_cfg.dispclk_mhz = context->bw_ctx.dml.soc.clock_limits[0].dispclk_mhz;
+ pipes[pipe_idx].clks_cfg.dppclk_mhz = context->bw_ctx.dml.soc.clock_limits[0].dppclk_mhz;
+ }
+ if (dc->debug.min_disp_clk_khz > pipes[pipe_idx].clks_cfg.dispclk_mhz * 1000)
+ pipes[pipe_idx].clks_cfg.dispclk_mhz = dc->debug.min_disp_clk_khz / 1000.0;
+ if (dc->debug.min_dpp_clk_khz > pipes[pipe_idx].clks_cfg.dppclk_mhz * 1000)
+ pipes[pipe_idx].clks_cfg.dppclk_mhz = dc->debug.min_dpp_clk_khz / 1000.0;
+
+ pipe_idx++;
+ }
+
+ context->perf_params.stutter_period_us = context->bw_ctx.dml.vba.StutterPeriod;
+
+ dml_calculate_dlg_params(dc, context, pipes, pipe_cnt, vlevel);
+
+ if (!pstate_en)
+ /* Restore full p-state latency */
+ context->bw_ctx.dml.soc.dram_clock_change_latency_us =
+ dc->clk_mgr->bw_params->wm_table.nv_entries[WM_A].dml_input.pstate_latency_us;
+}
+
+bool dml_validate(struct dc *dc,
+ struct dc_state *context,
+ bool fast_validate)
+{
+ bool out = false;
+
+ BW_VAL_TRACE_SETUP();
+
+ int vlevel = 0;
+ int pipe_cnt = 0;
+ display_e2e_pipe_params_st *pipes = context->bw_ctx.dml.dml_pipe_state;
+ DC_LOGGER_INIT(dc->ctx->logger);
+
+ BW_VAL_TRACE_COUNT();
+
+ out = dml_internal_validate(dc, context, pipes, &pipe_cnt, &vlevel, fast_validate);
+
+ if (pipe_cnt == 0)
+ goto validate_out;
+
+ if (!out)
+ goto validate_fail;
+
+ BW_VAL_TRACE_END_VOLTAGE_LEVEL();
+
+ if (fast_validate) {
+ BW_VAL_TRACE_SKIP(fast);
+ goto validate_out;
+ }
+
+ dml_calculate_wm_and_dlg(dc, context, pipes, pipe_cnt, vlevel);
+
+ BW_VAL_TRACE_END_WATERMARKS();
+
+ goto validate_out;
+
+validate_fail:
+ DC_LOG_WARNING("Mode Validation Warning: %s failed validation.\n",
+ dml_get_status_message(context->bw_ctx.dml.vba.ValidationStatus[context->bw_ctx.dml.vba.soc.num_states]));
+
+ BW_VAL_TRACE_SKIP(fail);
+ out = false;
+
+validate_out:
+ BW_VAL_TRACE_FINISH();
+
+ return out;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dml_wrapper_translation.c b/drivers/gpu/drm/amd/display/dc/dml/dml_wrapper_translation.c
new file mode 100644
index 000000000000..4ec5310a2962
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dml/dml_wrapper_translation.c
@@ -0,0 +1,284 @@
+/*
+ * Copyright 2017 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifdef DML_WRAPPER_TRANSLATION_
+
+static void gfx10array_mode_to_dml_params(
+ enum array_mode_values array_mode,
+ enum legacy_tiling_compat_level compat_level,
+ unsigned int *sw_mode)
+{
+ switch (array_mode) {
+ case DC_ARRAY_LINEAR_ALLIGNED:
+ case DC_ARRAY_LINEAR_GENERAL:
+ *sw_mode = dm_sw_linear;
+ break;
+ case DC_ARRAY_2D_TILED_THIN1:
+// DC_LEGACY_TILING_ADDR_GEN_ZERO - undefined as per current code hence removed
+#if 0
+ if (compat_level == DC_LEGACY_TILING_ADDR_GEN_ZERO)
+ *sw_mode = dm_sw_gfx7_2d_thin_l_vp;
+ else
+ *sw_mode = dm_sw_gfx7_2d_thin_gl;
+#endif
+ break;
+ default:
+ ASSERT(0); /* Not supported */
+ break;
+ }
+}
+
+static void swizzle_to_dml_params(
+ enum swizzle_mode_values swizzle,
+ unsigned int *sw_mode)
+{
+ switch (swizzle) {
+ case DC_SW_LINEAR:
+ *sw_mode = dm_sw_linear;
+ break;
+ case DC_SW_4KB_S:
+ *sw_mode = dm_sw_4kb_s;
+ break;
+ case DC_SW_4KB_S_X:
+ *sw_mode = dm_sw_4kb_s_x;
+ break;
+ case DC_SW_4KB_D:
+ *sw_mode = dm_sw_4kb_d;
+ break;
+ case DC_SW_4KB_D_X:
+ *sw_mode = dm_sw_4kb_d_x;
+ break;
+ case DC_SW_64KB_S:
+ *sw_mode = dm_sw_64kb_s;
+ break;
+ case DC_SW_64KB_S_X:
+ *sw_mode = dm_sw_64kb_s_x;
+ break;
+ case DC_SW_64KB_S_T:
+ *sw_mode = dm_sw_64kb_s_t;
+ break;
+ case DC_SW_64KB_D:
+ *sw_mode = dm_sw_64kb_d;
+ break;
+ case DC_SW_64KB_D_X:
+ *sw_mode = dm_sw_64kb_d_x;
+ break;
+ case DC_SW_64KB_D_T:
+ *sw_mode = dm_sw_64kb_d_t;
+ break;
+ case DC_SW_64KB_R_X:
+ *sw_mode = dm_sw_64kb_r_x;
+ break;
+ case DC_SW_VAR_S:
+ *sw_mode = dm_sw_var_s;
+ break;
+ case DC_SW_VAR_S_X:
+ *sw_mode = dm_sw_var_s_x;
+ break;
+ case DC_SW_VAR_D:
+ *sw_mode = dm_sw_var_d;
+ break;
+ case DC_SW_VAR_D_X:
+ *sw_mode = dm_sw_var_d_x;
+ break;
+
+ default:
+ ASSERT(0); /* Not supported */
+ break;
+ }
+}
+
+static void dc_timing_to_dml_timing(const struct dc_crtc_timing *timing, struct _vcs_dpi_display_pipe_dest_params_st *dest)
+{
+ dest->hblank_start = timing->h_total - timing->h_front_porch;
+ dest->hblank_end = dest->hblank_start
+ - timing->h_addressable
+ - timing->h_border_left
+ - timing->h_border_right;
+ dest->vblank_start = timing->v_total - timing->v_front_porch;
+ dest->vblank_end = dest->vblank_start
+ - timing->v_addressable
+ - timing->v_border_top
+ - timing->v_border_bottom;
+ dest->htotal = timing->h_total;
+ dest->vtotal = timing->v_total;
+ dest->hactive = timing->h_addressable;
+ dest->vactive = timing->v_addressable;
+ dest->interlaced = timing->flags.INTERLACE;
+ dest->pixel_rate_mhz = timing->pix_clk_100hz/10000.0;
+ if (timing->timing_3d_format == TIMING_3D_FORMAT_HW_FRAME_PACKING)
+ dest->pixel_rate_mhz *= 2;
+}
+
+static enum odm_combine_mode get_dml_odm_combine(const struct pipe_ctx *pipe)
+{
+ int odm_split_count = 0;
+ enum odm_combine_mode combine_mode = dm_odm_combine_mode_disabled;
+ struct pipe_ctx *next_pipe = pipe->next_odm_pipe;
+
+ // Traverse pipe tree to determine odm split count
+ while (next_pipe) {
+ odm_split_count++;
+ next_pipe = next_pipe->next_odm_pipe;
+ }
+ pipe = pipe->prev_odm_pipe;
+ while (pipe) {
+ odm_split_count++;
+ pipe = pipe->prev_odm_pipe;
+ }
+
+ // Translate split to DML odm combine factor
+ switch (odm_split_count) {
+ case 1:
+ combine_mode = dm_odm_combine_mode_2to1;
+ break;
+ case 3:
+ combine_mode = dm_odm_combine_mode_4to1;
+ break;
+ default:
+ combine_mode = dm_odm_combine_mode_disabled;
+ }
+
+ return combine_mode;
+}
+
+static int get_dml_output_type(enum signal_type dc_signal)
+{
+ int dml_output_type = -1;
+
+ switch (dc_signal) {
+ case SIGNAL_TYPE_DISPLAY_PORT_MST:
+ case SIGNAL_TYPE_DISPLAY_PORT:
+ dml_output_type = dm_dp;
+ break;
+ case SIGNAL_TYPE_EDP:
+ dml_output_type = dm_edp;
+ break;
+ case SIGNAL_TYPE_HDMI_TYPE_A:
+ case SIGNAL_TYPE_DVI_SINGLE_LINK:
+ case SIGNAL_TYPE_DVI_DUAL_LINK:
+ dml_output_type = dm_hdmi;
+ break;
+ default:
+ break;
+ }
+
+ return dml_output_type;
+}
+
+static void populate_color_depth_and_encoding_from_timing(const struct dc_crtc_timing *timing, struct _vcs_dpi_display_output_params_st *dout)
+{
+ int output_bpc = 0;
+
+ switch (timing->display_color_depth) {
+ case COLOR_DEPTH_666:
+ output_bpc = 6;
+ break;
+ case COLOR_DEPTH_888:
+ output_bpc = 8;
+ break;
+ case COLOR_DEPTH_101010:
+ output_bpc = 10;
+ break;
+ case COLOR_DEPTH_121212:
+ output_bpc = 12;
+ break;
+ case COLOR_DEPTH_141414:
+ output_bpc = 14;
+ break;
+ case COLOR_DEPTH_161616:
+ output_bpc = 16;
+ break;
+ case COLOR_DEPTH_999:
+ output_bpc = 9;
+ break;
+ case COLOR_DEPTH_111111:
+ output_bpc = 11;
+ break;
+ default:
+ output_bpc = 8;
+ break;
+ }
+
+ switch (timing->pixel_encoding) {
+ case PIXEL_ENCODING_RGB:
+ case PIXEL_ENCODING_YCBCR444:
+ dout->output_format = dm_444;
+ dout->output_bpp = output_bpc * 3;
+ break;
+ case PIXEL_ENCODING_YCBCR420:
+ dout->output_format = dm_420;
+ dout->output_bpp = (output_bpc * 3.0) / 2;
+ break;
+ case PIXEL_ENCODING_YCBCR422:
+ if (timing->flags.DSC && !timing->dsc_cfg.ycbcr422_simple)
+ dout->output_format = dm_n422;
+ else
+ dout->output_format = dm_s422;
+ dout->output_bpp = output_bpc * 2;
+ break;
+ default:
+ dout->output_format = dm_444;
+ dout->output_bpp = output_bpc * 3;
+ }
+}
+
+static enum source_format_class dc_source_format_to_dml_source_format(enum surface_pixel_format dc_format)
+{
+ enum source_format_class dml_format = dm_444_32;
+
+ switch (dc_format) {
+ case SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr:
+ case SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb:
+ dml_format = dm_420_8;
+ break;
+ case SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCbCr:
+ case SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb:
+ dml_format = dm_420_10;
+ break;
+ case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616:
+ case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F:
+ case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F:
+ dml_format = dm_444_64;
+ break;
+ case SURFACE_PIXEL_FORMAT_GRPH_ARGB1555:
+ case SURFACE_PIXEL_FORMAT_GRPH_RGB565:
+ dml_format = dm_444_16;
+ break;
+ case SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS:
+ dml_format = dm_444_8;
+ break;
+ case SURFACE_PIXEL_FORMAT_GRPH_RGBE_ALPHA:
+ dml_format = dm_rgbe_alpha;
+ break;
+ default:
+ dml_format = dm_444_32;
+ break;
+ }
+
+ return dml_format;
+}
+
+#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dsc/rc_calc_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dsc/rc_calc_fpu.c
index 3ee858f311d1..ec636d06e18c 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dsc/rc_calc_fpu.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dsc/rc_calc_fpu.c
@@ -61,16 +61,6 @@ static double dsc_roundf(double num)
return (int)(num);
}
-static double dsc_ceil(double num)
-{
- double retval = (int)num;
-
- if (retval != num && num > 0)
- retval = num + 1;
-
- return (int)retval;
-}
-
static void get_qp_set(qp_set qps, enum colour_mode cm, enum bits_per_comp bpc,
enum max_min max_min, float bpp)
{
@@ -103,7 +93,7 @@ static void get_qp_set(qp_set qps, enum colour_mode cm, enum bits_per_comp bpc,
TABLE_CASE(420, 12, min);
}
- if (table == 0)
+ if (!table)
return;
index = (bpp - table[0].bpp) * 2;
@@ -268,24 +258,3 @@ void _do_calc_rc_params(struct rc_params *rc,
rc->rc_buf_thresh[13] = 8064;
}
-u32 _do_bytes_per_pixel_calc(int slice_width,
- u16 drm_bpp,
- bool is_navite_422_or_420)
-{
- float bpp;
- u32 bytes_per_pixel;
- double d_bytes_per_pixel;
-
- dc_assert_fp_enabled();
-
- bpp = ((float)drm_bpp / 16.0);
- d_bytes_per_pixel = dsc_ceil(bpp * slice_width / 8.0) / slice_width;
- // TODO: Make sure the formula for calculating this is precise (ceiling
- // vs. floor, and at what point they should be applied)
- if (is_navite_422_or_420)
- d_bytes_per_pixel /= 2;
-
- bytes_per_pixel = (u32)dsc_ceil(d_bytes_per_pixel * 0x10000000);
-
- return bytes_per_pixel;
-}
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dsc/rc_calc_fpu.h b/drivers/gpu/drm/amd/display/dc/dml/dsc/rc_calc_fpu.h
index b93b95409fbe..cad244c023cd 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dsc/rc_calc_fpu.h
+++ b/drivers/gpu/drm/amd/display/dc/dml/dsc/rc_calc_fpu.h
@@ -78,10 +78,6 @@ struct qp_entry {
typedef struct qp_entry qp_table[];
-u32 _do_bytes_per_pixel_calc(int slice_width,
- u16 drm_bpp,
- bool is_navite_422_or_420);
-
void _do_calc_rc_params(struct rc_params *rc,
enum colour_mode cm,
enum bits_per_comp bpc,
diff --git a/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c b/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c
index 0321b4446e05..9c74564cbd8d 100644
--- a/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c
+++ b/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c
@@ -455,6 +455,7 @@ static bool intersect_dsc_caps(
if (pixel_encoding == PIXEL_ENCODING_YCBCR422 || pixel_encoding == PIXEL_ENCODING_YCBCR420)
dsc_common_caps->bpp_increment_div = min(dsc_common_caps->bpp_increment_div, (uint32_t)8);
+ dsc_common_caps->edp_sink_max_bits_per_pixel = dsc_sink_caps->edp_max_bits_per_pixel;
dsc_common_caps->is_dp = dsc_sink_caps->is_dp;
return true;
}
@@ -513,6 +514,13 @@ static bool decide_dsc_bandwidth_range(
range->min_target_bpp_x16 = preferred_bpp_x16;
}
}
+ /* TODO - make this value generic to all signal types */
+ else if (dsc_caps->edp_sink_max_bits_per_pixel) {
+ /* apply max bpp limitation from edp sink */
+ range->max_target_bpp_x16 = MIN(dsc_caps->edp_sink_max_bits_per_pixel,
+ max_bpp_x16);
+ range->min_target_bpp_x16 = min_bpp_x16;
+ }
else {
range->max_target_bpp_x16 = max_bpp_x16;
range->min_target_bpp_x16 = min_bpp_x16;
@@ -574,7 +582,7 @@ static bool decide_dsc_target_bpp_x16(
return *target_bpp_x16 != 0;
}
-#define MIN_AVAILABLE_SLICES_SIZE 4
+#define MIN_AVAILABLE_SLICES_SIZE 6
static int get_available_dsc_slices(union dsc_enc_slice_caps slice_caps, int *available_slices)
{
@@ -860,6 +868,10 @@ static bool setup_dsc_config(
min_slices_h = 0; // DSC TODO: Maybe try increasing the number of slices first?
is_dsc_possible = (min_slices_h <= max_slices_h);
+
+ if (min_slices_h == 0 && max_slices_h == 0)
+ is_dsc_possible = false;
+
if (!is_dsc_possible)
goto done;
diff --git a/drivers/gpu/drm/amd/display/dc/dsc/rc_calc.c b/drivers/gpu/drm/amd/display/dc/dsc/rc_calc.c
index b19d3aeb5962..e97cf09be9d5 100644
--- a/drivers/gpu/drm/amd/display/dc/dsc/rc_calc.c
+++ b/drivers/gpu/drm/amd/display/dc/dsc/rc_calc.c
@@ -60,31 +60,3 @@ void calc_rc_params(struct rc_params *rc, const struct drm_dsc_config *pps)
pps->dsc_version_minor);
DC_FP_END();
}
-
-/**
- * calc_dsc_bytes_per_pixel - calculate bytes per pixel
- * @pps: DRM struct with all required DSC values
- *
- * Based on the information inside drm_dsc_config, this function calculates the
- * total of bytes per pixel.
- *
- * @note This calculation requires float point operation, most of it executes
- * under kernel_fpu_{begin,end}.
- *
- * Return:
- * Return the number of bytes per pixel
- */
-u32 calc_dsc_bytes_per_pixel(const struct drm_dsc_config *pps)
-
-{
- u32 ret;
- u16 drm_bpp = pps->bits_per_pixel;
- int slice_width = pps->slice_width;
- bool is_navite_422_or_420 = pps->native_422 || pps->native_420;
-
- DC_FP_START();
- ret = _do_bytes_per_pixel_calc(slice_width, drm_bpp,
- is_navite_422_or_420);
- DC_FP_END();
- return ret;
-}
diff --git a/drivers/gpu/drm/amd/display/dc/dsc/rc_calc.h b/drivers/gpu/drm/amd/display/dc/dsc/rc_calc.h
index c2340e001b57..80921c1c0d53 100644
--- a/drivers/gpu/drm/amd/display/dc/dsc/rc_calc.h
+++ b/drivers/gpu/drm/amd/display/dc/dsc/rc_calc.h
@@ -30,7 +30,6 @@
#include "dml/dsc/rc_calc_fpu.h"
void calc_rc_params(struct rc_params *rc, const struct drm_dsc_config *pps);
-u32 calc_dsc_bytes_per_pixel(const struct drm_dsc_config *pps);
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dsc/rc_calc_dpi.c b/drivers/gpu/drm/amd/display/dc/dsc/rc_calc_dpi.c
index 1e19dd674e5a..7e306aa3e2b9 100644
--- a/drivers/gpu/drm/amd/display/dc/dsc/rc_calc_dpi.c
+++ b/drivers/gpu/drm/amd/display/dc/dsc/rc_calc_dpi.c
@@ -100,8 +100,7 @@ int dscc_compute_dsc_parameters(const struct drm_dsc_config *pps, struct dsc_par
int ret;
struct rc_params rc;
struct drm_dsc_config dsc_cfg;
-
- dsc_params->bytes_per_pixel = calc_dsc_bytes_per_pixel(pps);
+ unsigned long long tmp;
calc_rc_params(&rc, pps);
dsc_params->pps = *pps;
@@ -113,6 +112,9 @@ int dscc_compute_dsc_parameters(const struct drm_dsc_config *pps, struct dsc_par
dsc_cfg.mux_word_size = dsc_params->pps.bits_per_component <= 10 ? 48 : 64;
ret = drm_dsc_compute_rc_parameters(&dsc_cfg);
+ tmp = (unsigned long long)dsc_cfg.slice_chunk_size * 0x10000000 + (dsc_cfg.slice_width - 1);
+ do_div(tmp, (uint32_t)dsc_cfg.slice_width); //ROUND-UP
+ dsc_params->bytes_per_pixel = (uint32_t)tmp;
copy_pps_fields(&dsc_params->pps, &dsc_cfg);
dsc_params->rc_buffer_model_size = dsc_cfg.rc_bits;
diff --git a/drivers/gpu/drm/amd/display/dc/inc/core_status.h b/drivers/gpu/drm/amd/display/dc/inc/core_status.h
index d34b0b0eea65..444182a97e6e 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/core_status.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/core_status.h
@@ -53,6 +53,8 @@ enum dc_status {
DC_NOT_SUPPORTED = 24,
DC_UNSUPPORTED_VALUE = 25,
+ DC_NO_LINK_ENC_RESOURCE = 26,
+
DC_ERROR_UNEXPECTED = -1
};
diff --git a/drivers/gpu/drm/amd/display/dc/inc/core_types.h b/drivers/gpu/drm/amd/display/dc/inc/core_types.h
index 6fc6488c54c0..943240e2809e 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/core_types.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/core_types.h
@@ -334,6 +334,20 @@ struct plane_resource {
struct dcn_fe_bandwidth bw;
};
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+#define LINK_RES_HPO_DP_REC_MAP__MASK 0xFFFF
+#define LINK_RES_HPO_DP_REC_MAP__SHIFT 0
+#endif
+
+/* all mappable hardware resources used to enable a link */
+struct link_resource {
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ struct hpo_dp_link_encoder *hpo_dp_link_enc;
+#else
+ void *dummy;
+#endif
+};
+
union pipe_update_flags {
struct {
uint32_t enable : 1;
@@ -361,12 +375,14 @@ struct pipe_ctx {
struct plane_resource plane_res;
struct stream_resource stream_res;
+ struct link_resource link_res;
struct clock_source *clock_source;
struct pll_settings pll_settings;
uint8_t pipe_idx;
+ uint8_t pipe_idx_syncd;
struct pipe_ctx *top_pipe;
struct pipe_ctx *bottom_pipe;
@@ -411,6 +427,8 @@ struct resource_context {
struct link_enc_cfg_context link_enc_cfg_ctx;
#if defined(CONFIG_DRM_AMD_DC_DCN)
bool is_hpo_dp_stream_enc_acquired[MAX_HPO_DP2_ENCODERS];
+ unsigned int hpo_dp_link_enc_to_link_idx[MAX_HPO_DP2_LINK_ENCODERS];
+ int hpo_dp_link_enc_ref_cnts[MAX_HPO_DP2_LINK_ENCODERS];
#endif
#if defined(CONFIG_DRM_AMD_DC_DCN)
bool is_mpc_3dlut_acquired[MAX_PIPES];
diff --git a/drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h b/drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h
index a6d3d859754a..cd52813a8432 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h
@@ -56,16 +56,19 @@ enum {
bool dp_verify_link_cap(
struct dc_link *link,
+ const struct link_resource *link_res,
struct dc_link_settings *known_limit_link_setting,
int *fail_count);
bool dp_verify_link_cap_with_retries(
struct dc_link *link,
+ const struct link_resource *link_res,
struct dc_link_settings *known_limit_link_setting,
int attempts);
bool dp_verify_mst_link_cap(
- struct dc_link *link);
+ struct dc_link *link,
+ const struct link_resource *link_res);
bool dp_validate_mode_timing(
struct dc_link *link,
@@ -168,8 +171,9 @@ uint8_t dc_dp_initialize_scrambling_data_symbols(
struct dc_link *link,
enum dc_dp_training_pattern pattern);
-enum dc_status dp_set_fec_ready(struct dc_link *link, bool ready);
+enum dc_status dp_set_fec_ready(struct dc_link *link, const struct link_resource *link_res, bool ready);
void dp_set_fec_enable(struct dc_link *link, bool enable);
+struct link_encoder *dp_get_link_enc(struct dc_link *link);
bool dp_set_dsc_enable(struct pipe_ctx *pipe_ctx, bool enable);
bool dp_set_dsc_pps_sdp(struct pipe_ctx *pipe_ctx, bool enable, bool immediate_update);
void dp_set_dsc_on_stream(struct pipe_ctx *pipe_ctx, bool enable);
@@ -210,11 +214,16 @@ bool dpcd_poll_for_allocation_change_trigger(struct dc_link *link);
struct fixed31_32 calculate_sst_avg_time_slots_per_mtp(
const struct dc_stream_state *stream,
const struct dc_link *link);
-void enable_dp_hpo_output(struct dc_link *link, const struct dc_link_settings *link_settings);
-void disable_dp_hpo_output(struct dc_link *link, enum signal_type signal);
+void enable_dp_hpo_output(struct dc_link *link,
+ const struct link_resource *link_res,
+ const struct dc_link_settings *link_settings);
+void disable_dp_hpo_output(struct dc_link *link,
+ const struct link_resource *link_res,
+ enum signal_type signal);
void setup_dp_hpo_stream(struct pipe_ctx *pipe_ctx, bool enable);
bool is_dp_128b_132b_signal(struct pipe_ctx *pipe_ctx);
void reset_dp_hpo_stream_encoders_for_link(struct dc_link *link);
bool dp_retrieve_lttpr_cap(struct dc_link *link);
+void edp_panel_backlight_power_on(struct dc_link *link);
#endif /* __DC_LINK_DP_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/inc/dc_link_dpia.h b/drivers/gpu/drm/amd/display/dc/inc/dc_link_dpia.h
index 974d703e3771..74dafd0f9d3d 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/dc_link_dpia.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/dc_link_dpia.h
@@ -91,8 +91,9 @@ enum dc_status dpcd_get_tunneling_device_data(struct dc_link *link);
* DPIA equivalent of dc_link_dp_perfrorm_link_training.
* Aborts link training upon detection of sink unplug.
*/
-enum link_training_result
-dc_link_dpia_perform_link_training(struct dc_link *link,
+enum link_training_result dc_link_dpia_perform_link_training(
+ struct dc_link *link,
+ const struct link_resource *link_res,
const struct dc_link_settings *link_setting,
bool skip_video_pattern);
diff --git a/drivers/gpu/drm/amd/display/dc/inc/dcn_calcs.h b/drivers/gpu/drm/amd/display/dc/inc/dcn_calcs.h
index 806f3041db14..337c0161e72d 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/dcn_calcs.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/dcn_calcs.h
@@ -619,7 +619,7 @@ struct dcn_ip_params {
};
extern const struct dcn_ip_params dcn10_ip_defaults;
-bool dcn_validate_bandwidth(
+bool dcn10_validate_bandwidth(
struct dc *dc,
struct dc_state *context,
bool fast_validate);
diff --git a/drivers/gpu/drm/amd/display/dc/inc/dml_wrapper.h b/drivers/gpu/drm/amd/display/dc/inc/dml_wrapper.h
new file mode 100644
index 000000000000..5dcfbd8e2697
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/inc/dml_wrapper.h
@@ -0,0 +1,34 @@
+/*
+ * Copyright 2015 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef DML_WRAPPER_H_
+#define DML_WRAPPER_H_
+
+#include "dc.h"
+#include "dml/display_mode_vba.h"
+
+bool dml_validate(struct dc *dc, struct dc_state *context, bool fast_validate);
+
+#endif
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h b/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h
index a17e5de3b100..c920c4b6077d 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h
@@ -211,6 +211,8 @@ struct dummy_pstate_entry {
struct clk_bw_params {
unsigned int vram_type;
unsigned int num_channels;
+ unsigned int dispclk_vco_khz;
+ unsigned int dc_mode_softmax_memclk;
struct clk_limit_table clk_table;
struct wm_table wm_table;
struct dummy_pstate_entry dummy_pstate_table[4];
@@ -261,6 +263,10 @@ struct clk_mgr_funcs {
/* Send message to PMFW to set hard max memclk frequency to highest DPM */
void (*set_hard_max_memclk)(struct clk_mgr *clk_mgr);
+ /* Custom set a memclk freq range*/
+ void (*set_max_memclk)(struct clk_mgr *clk_mgr, unsigned int memclk_mhz);
+ void (*set_min_memclk)(struct clk_mgr *clk_mgr, unsigned int memclk_mhz);
+
/* Get current memclk states from PMFW, update relevant structures */
void (*get_memclk_states_from_smu)(struct clk_mgr *clk_mgr);
@@ -274,6 +280,7 @@ struct clk_mgr {
struct dc_clocks clks;
bool psr_allow_active_cache;
bool force_smu_not_present;
+ bool dc_mode_softmax_enabled;
int dprefclk_khz; // Used by program pixel clock in clock source funcs, need to figureout where this goes
int dentist_vco_freq_khz;
struct clk_state_registers_and_bypass boot_snapshot;
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dsc.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dsc.h
index f94135c6e3c2..346f0ba73e86 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/dsc.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dsc.h
@@ -61,6 +61,8 @@ struct dcn_dsc_state {
uint32_t dsc_pic_height;
uint32_t dsc_slice_bpg_offset;
uint32_t dsc_chunk_size;
+ uint32_t dsc_fw_en;
+ uint32_t dsc_opp_source;
};
@@ -88,6 +90,7 @@ struct dsc_enc_caps {
int32_t max_total_throughput_mps; /* Maximum total throughput with all the slices combined */
int32_t max_slice_width;
uint32_t bpp_increment_div; /* bpp increment divisor, e.g. if 16, it's 1/16th of a bit */
+ uint32_t edp_sink_max_bits_per_pixel;
bool is_dp;
};
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h b/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h
index 80e1a32bc63d..2c031586f4e6 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h
@@ -139,6 +139,7 @@ struct hubp_funcs {
bool (*hubp_is_flip_pending)(struct hubp *hubp);
void (*set_blank)(struct hubp *hubp, bool blank);
+ void (*set_blank_regs)(struct hubp *hubp, bool blank);
void (*set_hubp_blank_en)(struct hubp *hubp, bool blank);
void (*set_cursor_attributes)(
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h b/drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h
index bb0e91756ddd..2ce15cd10d80 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h
@@ -268,7 +268,8 @@ struct hpo_dp_link_encoder_funcs {
void (*enable_link_phy)(struct hpo_dp_link_encoder *enc,
const struct dc_link_settings *link_settings,
- enum transmitter transmitter);
+ enum transmitter transmitter,
+ enum hpd_source_id hpd_source);
void (*disable_link_phy)(struct hpo_dp_link_encoder *link_enc,
enum signal_type signal);
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h b/drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h
index c88e113b94d1..073f8b667eff 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h
@@ -164,6 +164,10 @@ struct stream_encoder_funcs {
void (*stop_dp_info_packets)(
struct stream_encoder *enc);
+ void (*reset_fifo)(
+ struct stream_encoder *enc
+ );
+
void (*dp_blank)(
struct dc_link *link,
struct stream_encoder *enc);
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h b/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h
index 7390baf916b5..c29320b3855d 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h
@@ -290,6 +290,8 @@ struct timing_generator_funcs {
enum optc_dsc_mode dsc_mode,
uint32_t dsc_bytes_per_pixel,
uint32_t dsc_slice_width);
+ void (*get_dsc_status)(struct timing_generator *optc,
+ uint32_t *dsc_mode);
void (*set_odm_bypass)(struct timing_generator *optc, const struct dc_crtc_timing *dc_crtc_timing);
void (*set_odm_combine)(struct timing_generator *optc, int *opp_id, int opp_cnt,
struct dc_crtc_timing *timing);
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h
index d50f4bd06b5d..05053f3b4ab7 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h
@@ -64,6 +64,7 @@ struct hw_sequencer_funcs {
enum dc_status (*apply_ctx_to_hw)(struct dc *dc,
struct dc_state *context);
void (*disable_plane)(struct dc *dc, struct pipe_ctx *pipe_ctx);
+ void (*disable_pixel_data)(struct dc *dc, struct pipe_ctx *pipe_ctx, bool blank);
void (*apply_ctx_for_surface)(struct dc *dc,
const struct dc_stream_state *stream,
int num_planes, struct dc_state *context);
diff --git a/drivers/gpu/drm/amd/display/dc/inc/link_enc_cfg.h b/drivers/gpu/drm/amd/display/dc/inc/link_enc_cfg.h
index 10dcf6a5e9b1..a4e43b4826e0 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/link_enc_cfg.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/link_enc_cfg.h
@@ -36,7 +36,7 @@
* Initialise link encoder resource tracking.
*/
void link_enc_cfg_init(
- struct dc *dc,
+ const struct dc *dc,
struct dc_state *state);
/*
diff --git a/drivers/gpu/drm/amd/display/dc/inc/link_hwss.h b/drivers/gpu/drm/amd/display/dc/inc/link_hwss.h
index ba664bc49595..69d63763a10e 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/link_hwss.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/link_hwss.h
@@ -32,6 +32,7 @@ struct gpio *get_hpd_gpio(struct dc_bios *dcb,
void dp_enable_link_phy(
struct dc_link *link,
+ const struct link_resource *link_res,
enum signal_type signal,
enum clock_source_id clock_source,
const struct dc_link_settings *link_settings);
@@ -42,22 +43,27 @@ void edp_add_delay_for_T9(struct dc_link *link);
bool edp_receiver_ready_T9(struct dc_link *link);
bool edp_receiver_ready_T7(struct dc_link *link);
-void dp_disable_link_phy(struct dc_link *link, enum signal_type signal);
+void dp_disable_link_phy(struct dc_link *link, const struct link_resource *link_res,
+ enum signal_type signal);
-void dp_disable_link_phy_mst(struct dc_link *link, enum signal_type signal);
+void dp_disable_link_phy_mst(struct dc_link *link, const struct link_resource *link_res,
+ enum signal_type signal);
bool dp_set_hw_training_pattern(
struct dc_link *link,
+ const struct link_resource *link_res,
enum dc_dp_training_pattern pattern,
uint32_t offset);
void dp_set_hw_lane_settings(
struct dc_link *link,
+ const struct link_resource *link_res,
const struct link_training_settings *link_settings,
uint32_t offset);
void dp_set_hw_test_pattern(
struct dc_link *link,
+ const struct link_resource *link_res,
enum dp_test_pattern test_pattern,
uint8_t *custom_pattern,
uint32_t custom_pattern_size);
diff --git a/drivers/gpu/drm/amd/display/dc/inc/resource.h b/drivers/gpu/drm/amd/display/dc/inc/resource.h
index 372c0898facd..ee4a5df428e3 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/resource.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/resource.h
@@ -34,6 +34,10 @@
#define MEMORY_TYPE_HBM 2
+#define IS_PIPE_SYNCD_VALID(pipe) ((((pipe)->pipe_idx_syncd) & 0x80)?1:0)
+#define GET_PIPE_SYNCD_FROM_PIPE(pipe) ((pipe)->pipe_idx_syncd & 0x7F)
+#define SET_PIPE_SYNCD_TO_PIPE(pipe, pipe_syncd) ((pipe)->pipe_idx_syncd = (0x80 | pipe_syncd))
+
enum dce_version resource_parse_asic_id(
struct hw_asic_id asic_id);
@@ -202,8 +206,17 @@ int get_num_mpc_splits(struct pipe_ctx *pipe);
int get_num_odm_splits(struct pipe_ctx *pipe);
#if defined(CONFIG_DRM_AMD_DC_DCN)
-struct hpo_dp_link_encoder *resource_get_unused_hpo_dp_link_encoder(
- const struct resource_pool *pool);
+struct hpo_dp_link_encoder *resource_get_hpo_dp_link_enc_for_det_lt(
+ const struct resource_context *res_ctx,
+ const struct resource_pool *pool,
+ const struct dc_link *link);
#endif
+void reset_syncd_pipes_from_disabled_pipes(struct dc *dc,
+ struct dc_state *context);
+
+void check_syncd_pipes_for_disabled_master_pipe(struct dc *dc,
+ struct dc_state *context,
+ uint8_t disabled_master_pipe_idx);
+
#endif /* DRIVERS_GPU_DRM_AMD_DC_DEV_DC_INC_RESOURCE_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/irq/dce110/irq_service_dce110.c b/drivers/gpu/drm/amd/display/dc/irq/dce110/irq_service_dce110.c
index 378cc11aa047..6b5fedd9ace0 100644
--- a/drivers/gpu/drm/amd/display/dc/irq/dce110/irq_service_dce110.c
+++ b/drivers/gpu/drm/amd/display/dc/irq/dce110/irq_service_dce110.c
@@ -185,16 +185,18 @@ bool dal_irq_service_dummy_set(struct irq_service *irq_service,
const struct irq_source_info *info,
bool enable)
{
- DC_LOG_ERROR("%s: called for non-implemented irq source\n",
- __func__);
+ DC_LOG_ERROR("%s: called for non-implemented irq source, src_id=%u, ext_id=%u\n",
+ __func__, info->src_id, info->ext_id);
+
return false;
}
bool dal_irq_service_dummy_ack(struct irq_service *irq_service,
const struct irq_source_info *info)
{
- DC_LOG_ERROR("%s: called for non-implemented irq source\n",
- __func__);
+ DC_LOG_ERROR("%s: called for non-implemented irq source, src_id=%u, ext_id=%u\n",
+ __func__, info->src_id, info->ext_id);
+
return false;
}
diff --git a/drivers/gpu/drm/amd/display/dc/irq/dcn10/irq_service_dcn10.c b/drivers/gpu/drm/amd/display/dc/irq/dcn10/irq_service_dcn10.c
index 34f43cb650f8..cf072e2347d3 100644
--- a/drivers/gpu/drm/amd/display/dc/irq/dcn10/irq_service_dcn10.c
+++ b/drivers/gpu/drm/amd/display/dc/irq/dcn10/irq_service_dcn10.c
@@ -40,10 +40,9 @@
#include "ivsrcid/dcn/irqsrcs_dcn_1_0.h"
-enum dc_irq_source to_dal_irq_source_dcn10(
- struct irq_service *irq_service,
- uint32_t src_id,
- uint32_t ext_id)
+static enum dc_irq_source to_dal_irq_source_dcn10(struct irq_service *irq_service,
+ uint32_t src_id,
+ uint32_t ext_id)
{
switch (src_id) {
case DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP:
diff --git a/drivers/gpu/drm/amd/display/dc/irq/dcn201/irq_service_dcn201.c b/drivers/gpu/drm/amd/display/dc/irq/dcn201/irq_service_dcn201.c
index a47f68634fc3..aa708b61142f 100644
--- a/drivers/gpu/drm/amd/display/dc/irq/dcn201/irq_service_dcn201.c
+++ b/drivers/gpu/drm/amd/display/dc/irq/dcn201/irq_service_dcn201.c
@@ -39,10 +39,9 @@
#include "ivsrcid/dcn/irqsrcs_dcn_1_0.h"
-enum dc_irq_source to_dal_irq_source_dcn201(
- struct irq_service *irq_service,
- uint32_t src_id,
- uint32_t ext_id)
+static enum dc_irq_source to_dal_irq_source_dcn201(struct irq_service *irq_service,
+ uint32_t src_id,
+ uint32_t ext_id)
{
switch (src_id) {
case DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP:
diff --git a/drivers/gpu/drm/amd/display/dc/irq/dcn21/irq_service_dcn21.c b/drivers/gpu/drm/amd/display/dc/irq/dcn21/irq_service_dcn21.c
index 78940cb20e10..235294534c43 100644
--- a/drivers/gpu/drm/amd/display/dc/irq/dcn21/irq_service_dcn21.c
+++ b/drivers/gpu/drm/amd/display/dc/irq/dcn21/irq_service_dcn21.c
@@ -40,10 +40,9 @@
#include "ivsrcid/dcn/irqsrcs_dcn_1_0.h"
-enum dc_irq_source to_dal_irq_source_dcn21(
- struct irq_service *irq_service,
- uint32_t src_id,
- uint32_t ext_id)
+static enum dc_irq_source to_dal_irq_source_dcn21(struct irq_service *irq_service,
+ uint32_t src_id,
+ uint32_t ext_id)
{
switch (src_id) {
case DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP:
diff --git a/drivers/gpu/drm/amd/display/dc/irq/dcn31/irq_service_dcn31.c b/drivers/gpu/drm/amd/display/dc/irq/dcn31/irq_service_dcn31.c
index 38e0ade60c7b..1b88e4e627fd 100644
--- a/drivers/gpu/drm/amd/display/dc/irq/dcn31/irq_service_dcn31.c
+++ b/drivers/gpu/drm/amd/display/dc/irq/dcn31/irq_service_dcn31.c
@@ -36,10 +36,9 @@
#include "ivsrcid/dcn/irqsrcs_dcn_1_0.h"
-enum dc_irq_source to_dal_irq_source_dcn31(
- struct irq_service *irq_service,
- uint32_t src_id,
- uint32_t ext_id)
+static enum dc_irq_source to_dal_irq_source_dcn31(struct irq_service *irq_service,
+ uint32_t src_id,
+ uint32_t ext_id)
{
switch (src_id) {
case DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP:
diff --git a/drivers/gpu/drm/amd/display/dmub/dmub_srv.h b/drivers/gpu/drm/amd/display/dmub/dmub_srv.h
index cd204eef073b..83855b8a32e9 100644
--- a/drivers/gpu/drm/amd/display/dmub/dmub_srv.h
+++ b/drivers/gpu/drm/amd/display/dmub/dmub_srv.h
@@ -360,6 +360,8 @@ struct dmub_srv_hw_funcs {
uint32_t (*get_gpint_dataout)(struct dmub_srv *dmub);
+ void (*clear_inbox0_ack_register)(struct dmub_srv *dmub);
+ uint32_t (*read_inbox0_ack_register)(struct dmub_srv *dmub);
void (*send_inbox0_cmd)(struct dmub_srv *dmub, union dmub_inbox0_data_register data);
uint32_t (*get_current_time)(struct dmub_srv *dmub);
@@ -409,6 +411,7 @@ struct dmub_srv {
struct dmub_srv_base_funcs funcs;
struct dmub_srv_hw_funcs hw_funcs;
struct dmub_rb inbox1_rb;
+ uint32_t inbox1_last_wptr;
/**
* outbox1_rb is accessed without locks (dal & dc)
* and to be used only in dmub_srv_stat_get_notification()
@@ -735,6 +738,45 @@ bool dmub_srv_get_diagnostic_data(struct dmub_srv *dmub, struct dmub_diagnostic_
bool dmub_srv_should_detect(struct dmub_srv *dmub);
+/**
+ * dmub_srv_send_inbox0_cmd() - Send command to DMUB using INBOX0
+ * @dmub: the dmub service
+ * @data: the data to be sent in the INBOX0 command
+ *
+ * Send command by writing directly to INBOX0 WPTR
+ *
+ * Return:
+ * DMUB_STATUS_OK - success
+ * DMUB_STATUS_INVALID - hw_init false or hw function does not exist
+ */
+enum dmub_status dmub_srv_send_inbox0_cmd(struct dmub_srv *dmub, union dmub_inbox0_data_register data);
+
+/**
+ * dmub_srv_wait_for_inbox0_ack() - wait for DMUB to ACK INBOX0 command
+ * @dmub: the dmub service
+ * @timeout_us: the maximum number of microseconds to wait
+ *
+ * Wait for DMUB to ACK the INBOX0 message
+ *
+ * Return:
+ * DMUB_STATUS_OK - success
+ * DMUB_STATUS_INVALID - hw_init false or hw function does not exist
+ * DMUB_STATUS_TIMEOUT - wait for ack timed out
+ */
+enum dmub_status dmub_srv_wait_for_inbox0_ack(struct dmub_srv *dmub, uint32_t timeout_us);
+
+/**
+ * dmub_srv_wait_for_inbox0_ack() - clear ACK register for INBOX0
+ * @dmub: the dmub service
+ *
+ * Clear ACK register for INBOX0
+ *
+ * Return:
+ * DMUB_STATUS_OK - success
+ * DMUB_STATUS_INVALID - hw_init false or hw function does not exist
+ */
+enum dmub_status dmub_srv_clear_inbox0_ack(struct dmub_srv *dmub);
+
#if defined(__cplusplus)
}
#endif
diff --git a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
index c29a67ccef17..873ecd04e01d 100644
--- a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
+++ b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
@@ -46,10 +46,10 @@
/* Firmware versioning. */
#ifdef DMUB_EXPOSE_VERSION
-#define DMUB_FW_VERSION_GIT_HASH 0x1d82d23e
+#define DMUB_FW_VERSION_GIT_HASH 0xbaf06b95
#define DMUB_FW_VERSION_MAJOR 0
#define DMUB_FW_VERSION_MINOR 0
-#define DMUB_FW_VERSION_REVISION 91
+#define DMUB_FW_VERSION_REVISION 98
#define DMUB_FW_VERSION_TEST 0
#define DMUB_FW_VERSION_VBIOS 0
#define DMUB_FW_VERSION_HOTFIX 0
@@ -173,13 +173,6 @@ extern "C" {
#endif
/**
- * Number of nanoseconds per DMUB tick.
- * DMCUB_TIMER_CURRENT increments in DMUB ticks, which are 10ns by default.
- * If DMCUB_TIMER_WINDOW is non-zero this will no longer be true.
- */
-#define NS_PER_DMUB_TICK 10
-
-/**
* union dmub_addr - DMUB physical/virtual 64-bit address.
*/
union dmub_addr {
@@ -208,10 +201,9 @@ union dmub_psr_debug_flags {
uint32_t use_hw_lock_mgr : 1;
/**
- * Unused.
- * TODO: Remove.
+ * Use TPS3 signal when restore main link.
*/
- uint32_t log_line_nums : 1;
+ uint32_t force_wakeup_by_tps3 : 1;
} bitfields;
/**
@@ -416,7 +408,14 @@ enum dmub_cmd_vbios_type {
* Enables or disables power gating.
*/
DMUB_CMD__VBIOS_ENABLE_DISP_POWER_GATING = 3,
+ /**
+ * Controls embedded panels.
+ */
DMUB_CMD__VBIOS_LVTMA_CONTROL = 15,
+ /**
+ * Query DP alt status on a transmitter.
+ */
+ DMUB_CMD__VBIOS_TRANSMITTER_QUERY_DP_ALT = 26,
};
//==============================================================================
@@ -1550,10 +1549,14 @@ struct dmub_cmd_psr_copy_settings_data {
* Currently the support is only for 0 or 1
*/
uint8_t panel_inst;
+ /*
+ * DSC enable status in driver
+ */
+ uint8_t dsc_enable_status;
/**
- * Explicit padding to 4 byte boundary.
+ * Explicit padding to 3 byte boundary.
*/
- uint8_t pad3[4];
+ uint8_t pad3[3];
};
/**
@@ -2398,6 +2401,24 @@ struct dmub_rb_cmd_lvtma_control {
};
/**
+ * Data passed in/out in a DMUB_CMD__VBIOS_TRANSMITTER_QUERY_DP_ALT command.
+ */
+struct dmub_rb_cmd_transmitter_query_dp_alt_data {
+ uint8_t phy_id; /**< 0=UNIPHYA, 1=UNIPHYB, 2=UNIPHYC, 3=UNIPHYD, 4=UNIPHYE, 5=UNIPHYF */
+ uint8_t is_usb; /**< is phy is usb */
+ uint8_t is_dp_alt_disable; /**< is dp alt disable */
+ uint8_t is_dp4; /**< is dp in 4 lane */
+};
+
+/**
+ * Definition of a DMUB_CMD__VBIOS_TRANSMITTER_QUERY_DP_ALT command.
+ */
+struct dmub_rb_cmd_transmitter_query_dp_alt {
+ struct dmub_cmd_header header; /**< header */
+ struct dmub_rb_cmd_transmitter_query_dp_alt_data data; /**< payload */
+};
+
+/**
* Maximum number of bytes a chunk sent to DMUB for parsing
*/
#define DMUB_EDID_CEA_DATA_CHUNK_BYTES 8
@@ -2408,7 +2429,7 @@ struct dmub_rb_cmd_lvtma_control {
struct dmub_cmd_send_edid_cea {
uint16_t offset; /**< offset into the CEA block */
uint8_t length; /**< number of bytes in payload to copy as part of CEA block */
- uint16_t total_length; /**< total length of the CEA block */
+ uint16_t cea_total_length; /**< total length of the CEA block */
uint8_t payload[DMUB_EDID_CEA_DATA_CHUNK_BYTES]; /**< data chunk of the CEA block */
uint8_t pad[3]; /**< padding and for future expansion */
};
@@ -2605,6 +2626,10 @@ union dmub_rb_cmd {
*/
struct dmub_rb_cmd_lvtma_control lvtma_control;
/**
+ * Definition of a DMUB_CMD__VBIOS_TRANSMITTER_QUERY_DP_ALT command.
+ */
+ struct dmub_rb_cmd_transmitter_query_dp_alt query_dp_alt;
+ /**
* Definition of a DMUB_CMD__DPIA_DIG1_CONTROL command.
*/
struct dmub_rb_cmd_dig1_dpia_control dig1_dpia_control;
@@ -2722,7 +2747,7 @@ static inline bool dmub_rb_full(struct dmub_rb *rb)
static inline bool dmub_rb_push_front(struct dmub_rb *rb,
const union dmub_rb_cmd *cmd)
{
- uint64_t volatile *dst = (uint64_t volatile *)(rb->base_address) + rb->wrpt / sizeof(uint64_t);
+ uint64_t volatile *dst = (uint64_t volatile *)((uint8_t *)(rb->base_address) + rb->wrpt);
const uint64_t *src = (const uint64_t *)cmd;
uint8_t i;
@@ -2840,7 +2865,7 @@ static inline bool dmub_rb_peek_offset(struct dmub_rb *rb,
static inline bool dmub_rb_out_front(struct dmub_rb *rb,
union dmub_rb_out_cmd *cmd)
{
- const uint64_t volatile *src = (const uint64_t volatile *)(rb->base_address) + rb->rptr / sizeof(uint64_t);
+ const uint64_t volatile *src = (const uint64_t volatile *)((uint8_t *)(rb->base_address) + rb->rptr);
uint64_t *dst = (uint64_t *)cmd;
uint8_t i;
@@ -2888,7 +2913,7 @@ static inline void dmub_rb_flush_pending(const struct dmub_rb *rb)
uint32_t wptr = rb->wrpt;
while (rptr != wptr) {
- uint64_t volatile *data = (uint64_t volatile *)rb->base_address + rptr / sizeof(uint64_t);
+ uint64_t volatile *data = (uint64_t volatile *)((uint8_t *)(rb->base_address) + rptr);
//uint64_t volatile *p = (uint64_t volatile *)data;
uint64_t temp;
uint8_t i;
diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c
index 56d400ffa7ac..9280f2abd973 100644
--- a/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c
+++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c
@@ -100,24 +100,9 @@ void dmub_flush_buffer_mem(const struct dmub_fb *fb)
}
static const struct dmub_fw_meta_info *
-dmub_get_fw_meta_info(const struct dmub_srv_region_params *params)
+dmub_get_fw_meta_info_from_blob(const uint8_t *blob, uint32_t blob_size, uint32_t meta_offset)
{
const union dmub_fw_meta *meta;
- const uint8_t *blob = NULL;
- uint32_t blob_size = 0;
- uint32_t meta_offset = 0;
-
- if (params->fw_bss_data && params->bss_data_size) {
- /* Legacy metadata region. */
- blob = params->fw_bss_data;
- blob_size = params->bss_data_size;
- meta_offset = DMUB_FW_META_OFFSET;
- } else if (params->fw_inst_const && params->inst_const_size) {
- /* Combined metadata region. */
- blob = params->fw_inst_const;
- blob_size = params->inst_const_size;
- meta_offset = 0;
- }
if (!blob || !blob_size)
return NULL;
@@ -134,6 +119,32 @@ dmub_get_fw_meta_info(const struct dmub_srv_region_params *params)
return &meta->info;
}
+static const struct dmub_fw_meta_info *
+dmub_get_fw_meta_info(const struct dmub_srv_region_params *params)
+{
+ const struct dmub_fw_meta_info *info = NULL;
+
+ if (params->fw_bss_data && params->bss_data_size) {
+ /* Legacy metadata region. */
+ info = dmub_get_fw_meta_info_from_blob(params->fw_bss_data,
+ params->bss_data_size,
+ DMUB_FW_META_OFFSET);
+ } else if (params->fw_inst_const && params->inst_const_size) {
+ /* Combined metadata region - can be aligned to 16-bytes. */
+ uint32_t i;
+
+ for (i = 0; i < 16; ++i) {
+ info = dmub_get_fw_meta_info_from_blob(
+ params->fw_inst_const, params->inst_const_size, i);
+
+ if (info)
+ break;
+ }
+ }
+
+ return info;
+}
+
static bool dmub_srv_hw_setup(struct dmub_srv *dmub, enum dmub_asic asic)
{
struct dmub_srv_hw_funcs *funcs = &dmub->hw_funcs;
@@ -598,6 +609,8 @@ enum dmub_status dmub_srv_cmd_queue(struct dmub_srv *dmub,
enum dmub_status dmub_srv_cmd_execute(struct dmub_srv *dmub)
{
+ struct dmub_rb flush_rb;
+
if (!dmub->hw_init)
return DMUB_STATUS_INVALID;
@@ -606,9 +619,14 @@ enum dmub_status dmub_srv_cmd_execute(struct dmub_srv *dmub)
* been flushed to framebuffer memory. Otherwise DMCUB might
* read back stale, fully invalid or partially invalid data.
*/
- dmub_rb_flush_pending(&dmub->inbox1_rb);
+ flush_rb = dmub->inbox1_rb;
+ flush_rb.rptr = dmub->inbox1_last_wptr;
+ dmub_rb_flush_pending(&flush_rb);
+
+ dmub->hw_funcs.set_inbox1_wptr(dmub, dmub->inbox1_rb.wrpt);
+
+ dmub->inbox1_last_wptr = dmub->inbox1_rb.wrpt;
- dmub->hw_funcs.set_inbox1_wptr(dmub, dmub->inbox1_rb.wrpt);
return DMUB_STATUS_OK;
}
@@ -831,3 +849,38 @@ bool dmub_srv_should_detect(struct dmub_srv *dmub)
return dmub->hw_funcs.should_detect(dmub);
}
+
+enum dmub_status dmub_srv_clear_inbox0_ack(struct dmub_srv *dmub)
+{
+ if (!dmub->hw_init || !dmub->hw_funcs.clear_inbox0_ack_register)
+ return DMUB_STATUS_INVALID;
+
+ dmub->hw_funcs.clear_inbox0_ack_register(dmub);
+ return DMUB_STATUS_OK;
+}
+
+enum dmub_status dmub_srv_wait_for_inbox0_ack(struct dmub_srv *dmub, uint32_t timeout_us)
+{
+ uint32_t i = 0;
+ uint32_t ack = 0;
+
+ if (!dmub->hw_init || !dmub->hw_funcs.read_inbox0_ack_register)
+ return DMUB_STATUS_INVALID;
+
+ for (i = 0; i <= timeout_us; i++) {
+ ack = dmub->hw_funcs.read_inbox0_ack_register(dmub);
+ if (ack)
+ return DMUB_STATUS_OK;
+ }
+ return DMUB_STATUS_TIMEOUT;
+}
+
+enum dmub_status dmub_srv_send_inbox0_cmd(struct dmub_srv *dmub,
+ union dmub_inbox0_data_register data)
+{
+ if (!dmub->hw_init || !dmub->hw_funcs.send_inbox0_cmd)
+ return DMUB_STATUS_INVALID;
+
+ dmub->hw_funcs.send_inbox0_cmd(dmub, data);
+ return DMUB_STATUS_OK;
+}
diff --git a/drivers/gpu/drm/amd/display/include/ddc_service_types.h b/drivers/gpu/drm/amd/display/include/ddc_service_types.h
index 4de59b66bb1a..a2b80514d83e 100644
--- a/drivers/gpu/drm/amd/display/include/ddc_service_types.h
+++ b/drivers/gpu/drm/amd/display/include/ddc_service_types.h
@@ -35,6 +35,7 @@
#define DP_BRANCH_DEVICE_ID_00E04C 0x00E04C
#define DP_BRANCH_DEVICE_ID_006037 0x006037
+#define DP_DEVICE_ID_38EC11 0x38EC11
enum ddc_result {
DDC_RESULT_UNKNOWN = 0,
DDC_RESULT_SUCESSFULL,
@@ -117,4 +118,7 @@ struct av_sync_data {
uint8_t aud_del_ins3;/* DPCD 0002Dh */
};
+static const uint8_t DP_SINK_DEVICE_STR_ID_1[] = {7, 1, 8, 7, 3, 0};
+static const uint8_t DP_SINK_DEVICE_STR_ID_2[] = {7, 1, 8, 7, 5, 0};
+
#endif /* __DAL_DDC_SERVICE_TYPES_H__ */
diff --git a/drivers/gpu/drm/amd/display/include/logger_types.h b/drivers/gpu/drm/amd/display/include/logger_types.h
index 370fad883e33..f093b49c5e6e 100644
--- a/drivers/gpu/drm/amd/display/include/logger_types.h
+++ b/drivers/gpu/drm/amd/display/include/logger_types.h
@@ -72,9 +72,7 @@
#define DC_LOG_DSC(...) DRM_DEBUG_KMS(__VA_ARGS__)
#define DC_LOG_SMU(...) pr_debug("[SMU_MSG]:"__VA_ARGS__)
#define DC_LOG_DWB(...) DRM_DEBUG_KMS(__VA_ARGS__)
-#if defined(CONFIG_DRM_AMD_DC_DCN)
#define DC_LOG_DP2(...) DRM_DEBUG_KMS(__VA_ARGS__)
-#endif
struct dal_logger;
@@ -126,9 +124,7 @@ enum dc_log_type {
LOG_MAX_HW_POINTS,
LOG_ALL_TF_CHANNELS,
LOG_SAMPLE_1DLUT,
-#if defined(CONFIG_DRM_AMD_DC_DCN)
LOG_DP2,
-#endif
LOG_SECTION_TOTAL_COUNT
};
diff --git a/drivers/gpu/drm/amd/include/amd_shared.h b/drivers/gpu/drm/amd/include/amd_shared.h
index 4b9e68a79f06..f57a1478f0fe 100644
--- a/drivers/gpu/drm/amd/include/amd_shared.h
+++ b/drivers/gpu/drm/amd/include/amd_shared.h
@@ -231,6 +231,8 @@ enum DC_FEATURE_MASK {
DC_DISABLE_FRACTIONAL_PWM_MASK = (1 << 2), //0x4, disabled by default
DC_PSR_MASK = (1 << 3), //0x8, disabled by default for dcn < 3.1
DC_EDP_NO_POWER_SEQUENCING = (1 << 4), //0x10, disabled by default
+ DC_DISABLE_LTTPR_DP1_4A = (1 << 5), //0x20, disabled by default
+ DC_DISABLE_LTTPR_DP2_0 = (1 << 6), //0x40, disabled by default
};
enum DC_DEBUG_MASK {
diff --git a/drivers/gpu/drm/amd/include/asic_reg/mp/mp_11_0_offset.h b/drivers/gpu/drm/amd/include/asic_reg/mp/mp_11_0_offset.h
index 6d0052ce6bed..da6d380c948b 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/mp/mp_11_0_offset.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/mp/mp_11_0_offset.h
@@ -354,5 +354,12 @@
#define mmMP1_SMN_EXT_SCRATCH7 0x03c7
#define mmMP1_SMN_EXT_SCRATCH7_BASE_IDX 0
+/*
+ * addressBlock: mp_SmuMp1Pub_MmuDec
+ * base address: 0x0
+ */
+#define smnMP1_PMI_3_START 0x3030204
+#define smnMP1_PMI_3_FIFO 0x3030208
+#define smnMP1_PMI_3 0x3030600
#endif
diff --git a/drivers/gpu/drm/amd/include/asic_reg/mp/mp_11_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/mp/mp_11_0_sh_mask.h
index 136fb5de6a4c..a5ae2a801254 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/mp/mp_11_0_sh_mask.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/mp/mp_11_0_sh_mask.h
@@ -959,5 +959,17 @@
#define MP1_SMN_EXT_SCRATCH7__DATA__SHIFT 0x0
#define MP1_SMN_EXT_SCRATCH7__DATA_MASK 0xFFFFFFFFL
+// MP1_PMI_3_START
+#define MP1_PMI_3_START__ENABLE_MASK 0x80000000L
+// MP1_PMI_3_FIFO
+#define MP1_PMI_3_FIFO__DEPTH_MASK 0x00000fffL
+
+// MP1_PMI_3_START
+#define MP1_PMI_3_START__ENABLE__SHIFT 0x0000001f
+// MP1_PMI_3_FIFO
+#define MP1_PMI_3_FIFO__DEPTH__SHIFT 0x00000000
+
+
+
#endif
diff --git a/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_2_0_offset.h b/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_2_0_offset.h
index 79eae0256dbd..8072b0a6376d 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_2_0_offset.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_2_0_offset.h
@@ -20753,8 +20753,6 @@
// addressBlock: nbio_nbif0_gdc_GDCDEC
// base address: 0xd0000000
-#define regGDC1_LOGAN_FAST_WRITE_RESPONSE_CNTL 0x2ffc0eda
-#define regGDC1_LOGAN_FAST_WRITE_RESPONSE_CNTL_BASE_IDX 5
#define regGDC1_NGDC_SDP_PORT_CTRL 0x2ffc0ee2
#define regGDC1_NGDC_SDP_PORT_CTRL_BASE_IDX 5
#define regGDC1_SHUB_REGS_IF_CTL 0x2ffc0ee3
diff --git a/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_2_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_2_0_sh_mask.h
index e27fdc0c643c..54b0e4623971 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_2_0_sh_mask.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_2_0_sh_mask.h
@@ -1,4 +1,3 @@
-
/*
* Copyright (C) 2020 Advanced Micro Devices, Inc.
*
@@ -108541,17 +108540,6 @@
// addressBlock: nbio_nbif0_gdc_GDCDEC
-//GDC1_LOGAN_FAST_WRITE_RESPONSE_CNTL
-#define GDC1_LOGAN_FAST_WRITE_RESPONSE_CNTL__LOGAN0_FAST_WRITE_RESPONSE_EN__SHIFT 0x0
-#define GDC1_LOGAN_FAST_WRITE_RESPONSE_CNTL__LOGAN1_FAST_WRITE_RESPONSE_EN__SHIFT 0x1
-#define GDC1_LOGAN_FAST_WRITE_RESPONSE_CNTL__LOGAN2_FAST_WRITE_RESPONSE_EN__SHIFT 0x2
-#define GDC1_LOGAN_FAST_WRITE_RESPONSE_CNTL__LOGAN3_FAST_WRITE_RESPONSE_EN__SHIFT 0x3
-#define GDC1_LOGAN_FAST_WRITE_RESPONSE_CNTL__FWR_NORMAL_ARB_MODE__SHIFT 0x10
-#define GDC1_LOGAN_FAST_WRITE_RESPONSE_CNTL__LOGAN0_FAST_WRITE_RESPONSE_EN_MASK 0x00000001L
-#define GDC1_LOGAN_FAST_WRITE_RESPONSE_CNTL__LOGAN1_FAST_WRITE_RESPONSE_EN_MASK 0x00000002L
-#define GDC1_LOGAN_FAST_WRITE_RESPONSE_CNTL__LOGAN2_FAST_WRITE_RESPONSE_EN_MASK 0x00000004L
-#define GDC1_LOGAN_FAST_WRITE_RESPONSE_CNTL__LOGAN3_FAST_WRITE_RESPONSE_EN_MASK 0x00000008L
-#define GDC1_LOGAN_FAST_WRITE_RESPONSE_CNTL__FWR_NORMAL_ARB_MODE_MASK 0x00010000L
//GDC1_NGDC_SDP_PORT_CTRL
#define GDC1_NGDC_SDP_PORT_CTRL__SDP_DISCON_HYSTERESIS__SHIFT 0x0
#define GDC1_NGDC_SDP_PORT_CTRL__NGDC_OBFF_HW_URGENT_EARLY_WAKEUP_EN__SHIFT 0xf
diff --git a/drivers/gpu/drm/amd/include/cyan_skillfish_ip_offset.h b/drivers/gpu/drm/amd/include/cyan_skillfish_ip_offset.h
index 9cb5f3631c60..ce79e5de8ce3 100644
--- a/drivers/gpu/drm/amd/include/cyan_skillfish_ip_offset.h
+++ b/drivers/gpu/drm/amd/include/cyan_skillfish_ip_offset.h
@@ -25,15 +25,15 @@
#define MAX_SEGMENT 5
-struct IP_BASE_INSTANCE
+struct IP_BASE_INSTANCE
{
unsigned int segment[MAX_SEGMENT];
-};
-
-struct IP_BASE
+} __maybe_unused;
+
+struct IP_BASE
{
struct IP_BASE_INSTANCE instance[MAX_INSTANCE];
-};
+} __maybe_unused;
static const struct IP_BASE ATHUB_BASE ={ { { { 0x00000C00, 0, 0, 0, 0 } },
diff --git a/drivers/gpu/drm/amd/include/discovery.h b/drivers/gpu/drm/amd/include/discovery.h
index 7ec4331e67f2..a486769b66c6 100644
--- a/drivers/gpu/drm/amd/include/discovery.h
+++ b/drivers/gpu/drm/amd/include/discovery.h
@@ -143,6 +143,55 @@ struct gc_info_v1_0 {
uint32_t gc_num_gl2a;
};
+struct gc_info_v1_1 {
+ struct gpu_info_header header;
+
+ uint32_t gc_num_se;
+ uint32_t gc_num_wgp0_per_sa;
+ uint32_t gc_num_wgp1_per_sa;
+ uint32_t gc_num_rb_per_se;
+ uint32_t gc_num_gl2c;
+ uint32_t gc_num_gprs;
+ uint32_t gc_num_max_gs_thds;
+ uint32_t gc_gs_table_depth;
+ uint32_t gc_gsprim_buff_depth;
+ uint32_t gc_parameter_cache_depth;
+ uint32_t gc_double_offchip_lds_buffer;
+ uint32_t gc_wave_size;
+ uint32_t gc_max_waves_per_simd;
+ uint32_t gc_max_scratch_slots_per_cu;
+ uint32_t gc_lds_size;
+ uint32_t gc_num_sc_per_se;
+ uint32_t gc_num_sa_per_se;
+ uint32_t gc_num_packer_per_sc;
+ uint32_t gc_num_gl2a;
+ uint32_t gc_num_tcp_per_sa;
+ uint32_t gc_num_sdp_interface;
+ uint32_t gc_num_tcps;
+};
+
+struct gc_info_v2_0 {
+ struct gpu_info_header header;
+
+ uint32_t gc_num_se;
+ uint32_t gc_num_cu_per_sh;
+ uint32_t gc_num_sh_per_se;
+ uint32_t gc_num_rb_per_se;
+ uint32_t gc_num_tccs;
+ uint32_t gc_num_gprs;
+ uint32_t gc_num_max_gs_thds;
+ uint32_t gc_gs_table_depth;
+ uint32_t gc_gsprim_buff_depth;
+ uint32_t gc_parameter_cache_depth;
+ uint32_t gc_double_offchip_lds_buffer;
+ uint32_t gc_wave_size;
+ uint32_t gc_max_waves_per_simd;
+ uint32_t gc_max_scratch_slots_per_cu;
+ uint32_t gc_lds_size;
+ uint32_t gc_num_sc_per_se;
+ uint32_t gc_num_packer_per_sc;
+};
+
typedef struct harvest_info_header {
uint32_t signature; /* Table Signature */
uint32_t version; /* Table Version */
diff --git a/drivers/gpu/drm/amd/include/kgd_kfd_interface.h b/drivers/gpu/drm/amd/include/kgd_kfd_interface.h
index c84bd7b2cf59..ac941f62cbed 100644
--- a/drivers/gpu/drm/amd/include/kgd_kfd_interface.h
+++ b/drivers/gpu/drm/amd/include/kgd_kfd_interface.h
@@ -33,12 +33,11 @@
#include <linux/dma-fence.h>
struct pci_dev;
+struct amdgpu_device;
#define KGD_MAX_QUEUES 128
struct kfd_dev;
-struct kgd_dev;
-
struct kgd_mem;
enum kfd_preempt_type {
@@ -228,61 +227,61 @@ struct tile_config {
*/
struct kfd2kgd_calls {
/* Register access functions */
- void (*program_sh_mem_settings)(struct kgd_dev *kgd, uint32_t vmid,
+ void (*program_sh_mem_settings)(struct amdgpu_device *adev, uint32_t vmid,
uint32_t sh_mem_config, uint32_t sh_mem_ape1_base,
uint32_t sh_mem_ape1_limit, uint32_t sh_mem_bases);
- int (*set_pasid_vmid_mapping)(struct kgd_dev *kgd, u32 pasid,
+ int (*set_pasid_vmid_mapping)(struct amdgpu_device *adev, u32 pasid,
unsigned int vmid);
- int (*init_interrupts)(struct kgd_dev *kgd, uint32_t pipe_id);
+ int (*init_interrupts)(struct amdgpu_device *adev, uint32_t pipe_id);
- int (*hqd_load)(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id,
+ int (*hqd_load)(struct amdgpu_device *adev, void *mqd, uint32_t pipe_id,
uint32_t queue_id, uint32_t __user *wptr,
uint32_t wptr_shift, uint32_t wptr_mask,
struct mm_struct *mm);
- int (*hiq_mqd_load)(struct kgd_dev *kgd, void *mqd,
+ int (*hiq_mqd_load)(struct amdgpu_device *adev, void *mqd,
uint32_t pipe_id, uint32_t queue_id,
uint32_t doorbell_off);
- int (*hqd_sdma_load)(struct kgd_dev *kgd, void *mqd,
+ int (*hqd_sdma_load)(struct amdgpu_device *adev, void *mqd,
uint32_t __user *wptr, struct mm_struct *mm);
- int (*hqd_dump)(struct kgd_dev *kgd,
+ int (*hqd_dump)(struct amdgpu_device *adev,
uint32_t pipe_id, uint32_t queue_id,
uint32_t (**dump)[2], uint32_t *n_regs);
- int (*hqd_sdma_dump)(struct kgd_dev *kgd,
+ int (*hqd_sdma_dump)(struct amdgpu_device *adev,
uint32_t engine_id, uint32_t queue_id,
uint32_t (**dump)[2], uint32_t *n_regs);
- bool (*hqd_is_occupied)(struct kgd_dev *kgd, uint64_t queue_address,
- uint32_t pipe_id, uint32_t queue_id);
-
- int (*hqd_destroy)(struct kgd_dev *kgd, void *mqd, uint32_t reset_type,
- unsigned int timeout, uint32_t pipe_id,
+ bool (*hqd_is_occupied)(struct amdgpu_device *adev,
+ uint64_t queue_address, uint32_t pipe_id,
uint32_t queue_id);
- bool (*hqd_sdma_is_occupied)(struct kgd_dev *kgd, void *mqd);
+ int (*hqd_destroy)(struct amdgpu_device *adev, void *mqd,
+ uint32_t reset_type, unsigned int timeout,
+ uint32_t pipe_id, uint32_t queue_id);
+
+ bool (*hqd_sdma_is_occupied)(struct amdgpu_device *adev, void *mqd);
- int (*hqd_sdma_destroy)(struct kgd_dev *kgd, void *mqd,
+ int (*hqd_sdma_destroy)(struct amdgpu_device *adev, void *mqd,
unsigned int timeout);
- int (*address_watch_disable)(struct kgd_dev *kgd);
- int (*address_watch_execute)(struct kgd_dev *kgd,
+ int (*address_watch_disable)(struct amdgpu_device *adev);
+ int (*address_watch_execute)(struct amdgpu_device *adev,
unsigned int watch_point_id,
uint32_t cntl_val,
uint32_t addr_hi,
uint32_t addr_lo);
- int (*wave_control_execute)(struct kgd_dev *kgd,
+ int (*wave_control_execute)(struct amdgpu_device *adev,
uint32_t gfx_index_val,
uint32_t sq_cmd);
- uint32_t (*address_watch_get_offset)(struct kgd_dev *kgd,
+ uint32_t (*address_watch_get_offset)(struct amdgpu_device *adev,
unsigned int watch_point_id,
unsigned int reg_offset);
- bool (*get_atc_vmid_pasid_mapping_info)(
- struct kgd_dev *kgd,
+ bool (*get_atc_vmid_pasid_mapping_info)(struct amdgpu_device *adev,
uint8_t vmid,
uint16_t *p_pasid);
@@ -290,16 +289,16 @@ struct kfd2kgd_calls {
* passed to the shader by the CP. It's the user mode driver's
* responsibility.
*/
- void (*set_scratch_backing_va)(struct kgd_dev *kgd,
+ void (*set_scratch_backing_va)(struct amdgpu_device *adev,
uint64_t va, uint32_t vmid);
- void (*set_vm_context_page_table_base)(struct kgd_dev *kgd,
+ void (*set_vm_context_page_table_base)(struct amdgpu_device *adev,
uint32_t vmid, uint64_t page_table_base);
- uint32_t (*read_vmid_from_vmfault_reg)(struct kgd_dev *kgd);
+ uint32_t (*read_vmid_from_vmfault_reg)(struct amdgpu_device *adev);
- void (*get_cu_occupancy)(struct kgd_dev *kgd, int pasid, int *wave_cnt,
- int *max_waves_per_cu);
- void (*program_trap_handler_settings)(struct kgd_dev *kgd,
+ void (*get_cu_occupancy)(struct amdgpu_device *adev, int pasid,
+ int *wave_cnt, int *max_waves_per_cu);
+ void (*program_trap_handler_settings)(struct amdgpu_device *adev,
uint32_t vmid, uint64_t tba_addr, uint64_t tma_addr);
};
diff --git a/drivers/gpu/drm/amd/include/kgd_pp_interface.h b/drivers/gpu/drm/amd/include/kgd_pp_interface.h
index bac15c466733..5c0867ebcfce 100644
--- a/drivers/gpu/drm/amd/include/kgd_pp_interface.h
+++ b/drivers/gpu/drm/amd/include/kgd_pp_interface.h
@@ -153,6 +153,10 @@ enum PP_SMC_POWER_PROFILE {
PP_SMC_POWER_PROFILE_COUNT,
};
+extern const char * const amdgpu_pp_profile_name[PP_SMC_POWER_PROFILE_COUNT];
+
+
+
enum {
PP_GROUP_UNKNOWN = 0,
PP_GROUP_GFX = 1,
diff --git a/drivers/gpu/drm/amd/include/yellow_carp_offset.h b/drivers/gpu/drm/amd/include/yellow_carp_offset.h
index 76b9eb3f441d..28a56b56bcaf 100644
--- a/drivers/gpu/drm/amd/include/yellow_carp_offset.h
+++ b/drivers/gpu/drm/amd/include/yellow_carp_offset.h
@@ -9,12 +9,12 @@
struct IP_BASE_INSTANCE
{
unsigned int segment[MAX_SEGMENT];
-};
+} __maybe_unused;
struct IP_BASE
{
struct IP_BASE_INSTANCE instance[MAX_INSTANCE];
-};
+} __maybe_unused;
static const struct IP_BASE ACP_BASE = { { { { 0x02403800, 0x00480000, 0, 0, 0, 0 } },
diff --git a/drivers/gpu/drm/amd/pm/amdgpu_pm.c b/drivers/gpu/drm/amd/pm/amdgpu_pm.c
index 41472ed99253..e2cae97f4ff1 100644
--- a/drivers/gpu/drm/amd/pm/amdgpu_pm.c
+++ b/drivers/gpu/drm/amd/pm/amdgpu_pm.c
@@ -82,6 +82,16 @@ static const struct hwmon_temp_label {
{PP_TEMP_MEM, "mem"},
};
+const char * const amdgpu_pp_profile_name[] = {
+ "BOOTUP_DEFAULT",
+ "3D_FULL_SCREEN",
+ "POWER_SAVING",
+ "VIDEO",
+ "VR",
+ "COMPUTE",
+ "CUSTOM"
+};
+
/**
* DOC: power_dpm_state
*
@@ -2080,7 +2090,8 @@ static int default_attr_update(struct amdgpu_device *adev, struct amdgpu_device_
} else if (DEVICE_ATTR_IS(unique_id)) {
if (asic_type != CHIP_VEGA10 &&
asic_type != CHIP_VEGA20 &&
- asic_type != CHIP_ARCTURUS)
+ asic_type != CHIP_ARCTURUS &&
+ asic_type != CHIP_ALDEBARAN)
*states = ATTR_STATE_UNSUPPORTED;
} else if (DEVICE_ATTR_IS(pp_features)) {
if (adev->flags & AMD_IS_APU || asic_type < CHIP_VEGA10)
@@ -2123,6 +2134,12 @@ static int default_attr_update(struct amdgpu_device *adev, struct amdgpu_device_
}
}
+ /* setting should not be allowed from VF */
+ if (amdgpu_sriov_vf(adev)) {
+ dev_attr->attr.mode &= ~S_IWUGO;
+ dev_attr->store = NULL;
+ }
+
#undef DEVICE_ATTR_IS
return 0;
@@ -3759,5 +3776,7 @@ void amdgpu_debugfs_pm_init(struct amdgpu_device *adev)
adev,
&amdgpu_debugfs_pm_prv_buffer_fops,
adev->pm.smu_prv_buffer_size);
+
+ amdgpu_smu_stb_debug_fs_init(adev);
#endif
}
diff --git a/drivers/gpu/drm/amd/pm/inc/aldebaran_ppsmc.h b/drivers/gpu/drm/amd/pm/inc/aldebaran_ppsmc.h
index 35fa0d8e92dd..ab66a4b9e438 100644
--- a/drivers/gpu/drm/amd/pm/inc/aldebaran_ppsmc.h
+++ b/drivers/gpu/drm/amd/pm/inc/aldebaran_ppsmc.h
@@ -102,7 +102,9 @@
#define PPSMC_MSG_GfxDriverResetRecovery 0x42
#define PPSMC_MSG_BoardPowerCalibration 0x43
-#define PPSMC_Message_Count 0x44
+#define PPSMC_MSG_HeavySBR 0x45
+#define PPSMC_Message_Count 0x46
+
//PPSMC Reset Types
#define PPSMC_RESET_TYPE_WARM_RESET 0x00
diff --git a/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h b/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h
index 16e3f72d31b9..c464a045000d 100644
--- a/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h
+++ b/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h
@@ -423,6 +423,9 @@ enum ip_power_state {
POWER_STATE_OFF,
};
+/* Used to mask smu debug modes */
+#define SMU_DEBUG_HALT_ON_ERROR 0x1
+
struct amdgpu_pm {
struct mutex mutex;
u32 current_sclk;
@@ -460,6 +463,11 @@ struct amdgpu_pm {
struct list_head pm_attr_list;
atomic_t pwr_state[AMD_IP_BLOCK_TYPE_NUM];
+
+ /*
+ * 0 = disabled (default), otherwise enable corresponding debug mode
+ */
+ uint32_t smu_debug_mask;
};
#define R600_SSTU_DFLT 0
diff --git a/drivers/gpu/drm/amd/pm/inc/amdgpu_smu.h b/drivers/gpu/drm/amd/pm/inc/amdgpu_smu.h
index 3557f4e7fc30..ba7565bc8104 100644
--- a/drivers/gpu/drm/amd/pm/inc/amdgpu_smu.h
+++ b/drivers/gpu/drm/amd/pm/inc/amdgpu_smu.h
@@ -324,6 +324,7 @@ enum smu_table_id
SMU_TABLE_OVERDRIVE,
SMU_TABLE_I2C_COMMANDS,
SMU_TABLE_PACE,
+ SMU_TABLE_ECCINFO,
SMU_TABLE_COUNT,
};
@@ -340,6 +341,7 @@ struct smu_table_context
void *max_sustainable_clocks;
struct smu_bios_boot_up_values boot_values;
void *driver_pptable;
+ void *ecc_table;
struct smu_table tables[SMU_TABLE_COUNT];
/*
* The driver table is just a staging buffer for
@@ -472,7 +474,14 @@ struct cmn2asic_mapping {
int map_to;
};
+struct stb_context {
+ uint32_t stb_buf_size;
+ bool enabled;
+ spinlock_t lock;
+};
+
#define WORKLOAD_POLICY_MAX 7
+
struct smu_context
{
struct amdgpu_device *adev;
@@ -559,6 +568,8 @@ struct smu_context
uint16_t cpu_core_num;
struct smu_user_dpm_profile user_dpm_profile;
+
+ struct stb_context stb_context;
};
struct i2c_adapter;
@@ -1246,9 +1257,9 @@ struct pptable_funcs {
int (*set_fine_grain_gfx_freq_parameters)(struct smu_context *smu);
/**
- * @set_light_sbr: Set light sbr mode for the SMU.
+ * @smu_handle_passthrough_sbr: Send message to SMU about special handling for SBR.
*/
- int (*set_light_sbr)(struct smu_context *smu, bool enable);
+ int (*smu_handle_passthrough_sbr)(struct smu_context *smu, bool enable);
/**
* @wait_for_event: Wait for events from SMU.
@@ -1261,6 +1272,17 @@ struct pptable_funcs {
* of SMUBUS table.
*/
int (*send_hbm_bad_pages_num)(struct smu_context *smu, uint32_t size);
+
+ /**
+ * @get_ecc_table: message SMU to get ECC INFO table.
+ */
+ ssize_t (*get_ecc_info)(struct smu_context *smu, void *table);
+
+
+ /**
+ * @stb_collect_info: Collects Smart Trace Buffers data.
+ */
+ int (*stb_collect_info)(struct smu_context *smu, void *buf, uint32_t size);
};
typedef enum {
@@ -1393,10 +1415,13 @@ int smu_allow_xgmi_power_down(struct smu_context *smu, bool en);
int smu_get_status_gfxoff(struct amdgpu_device *adev, uint32_t *value);
-int smu_set_light_sbr(struct smu_context *smu, bool enable);
+int smu_handle_passthrough_sbr(struct smu_context *smu, bool enable);
int smu_wait_for_event(struct amdgpu_device *adev, enum smu_event_type event,
uint64_t event_arg);
+int smu_get_ecc_info(struct smu_context *smu, void *umc_ecc);
+int smu_stb_collect_info(struct smu_context *smu, void *buff, uint32_t size);
+void amdgpu_smu_stb_debug_fs_init(struct amdgpu_device *adev);
#endif
#endif
diff --git a/drivers/gpu/drm/amd/pm/inc/smu13_driver_if_aldebaran.h b/drivers/gpu/drm/amd/pm/inc/smu13_driver_if_aldebaran.h
index a017983ff1fa..0f67c56c2863 100644
--- a/drivers/gpu/drm/amd/pm/inc/smu13_driver_if_aldebaran.h
+++ b/drivers/gpu/drm/amd/pm/inc/smu13_driver_if_aldebaran.h
@@ -140,6 +140,8 @@
#define MAX_SW_I2C_COMMANDS 24
+#define ALDEBARAN_UMC_CHANNEL_NUM 32
+
typedef enum {
I2C_CONTROLLER_PORT_0, //CKSVII2C0
I2C_CONTROLLER_PORT_1, //CKSVII2C1
@@ -507,6 +509,19 @@ typedef struct {
uint32_t MmHubPadding[8]; // SMU internal use
} AvfsDebugTable_t;
+typedef struct {
+ uint64_t mca_umc_status;
+ uint64_t mca_umc_addr;
+ uint16_t ce_count_lo_chip;
+ uint16_t ce_count_hi_chip;
+
+ uint32_t eccPadding;
+} EccInfo_t;
+
+typedef struct {
+ EccInfo_t EccInfo[ALDEBARAN_UMC_CHANNEL_NUM];
+} EccInfoTable_t;
+
// These defines are used with the following messages:
// SMC_MSG_TransferTableDram2Smu
// SMC_MSG_TransferTableSmu2Dram
@@ -517,6 +532,7 @@ typedef struct {
#define TABLE_SMU_METRICS 4
#define TABLE_DRIVER_SMU_CONFIG 5
#define TABLE_I2C_COMMANDS 6
-#define TABLE_COUNT 7
+#define TABLE_ECCINFO 7
+#define TABLE_COUNT 8
#endif
diff --git a/drivers/gpu/drm/amd/pm/inc/smu_types.h b/drivers/gpu/drm/amd/pm/inc/smu_types.h
index 18b862a90fbe..ff8a0bcbd290 100644
--- a/drivers/gpu/drm/amd/pm/inc/smu_types.h
+++ b/drivers/gpu/drm/amd/pm/inc/smu_types.h
@@ -229,7 +229,8 @@
__SMU_DUMMY_MAP(BoardPowerCalibration), \
__SMU_DUMMY_MAP(RequestGfxclk), \
__SMU_DUMMY_MAP(ForceGfxVid), \
- __SMU_DUMMY_MAP(UnforceGfxVid),
+ __SMU_DUMMY_MAP(UnforceGfxVid), \
+ __SMU_DUMMY_MAP(HeavySBR),
#undef __SMU_DUMMY_MAP
#define __SMU_DUMMY_MAP(type) SMU_MSG_##type
diff --git a/drivers/gpu/drm/amd/pm/inc/smu_v11_0.h b/drivers/gpu/drm/amd/pm/inc/smu_v11_0.h
index 2d422e6a9feb..acb3be292096 100644
--- a/drivers/gpu/drm/amd/pm/inc/smu_v11_0.h
+++ b/drivers/gpu/drm/amd/pm/inc/smu_v11_0.h
@@ -312,7 +312,7 @@ int smu_v11_0_deep_sleep_control(struct smu_context *smu,
void smu_v11_0_interrupt_work(struct smu_context *smu);
-int smu_v11_0_set_light_sbr(struct smu_context *smu, bool enable);
+int smu_v11_0_handle_passthrough_sbr(struct smu_context *smu, bool enable);
int smu_v11_0_restore_user_od_settings(struct smu_context *smu);
diff --git a/drivers/gpu/drm/amd/pm/inc/smu_v13_0.h b/drivers/gpu/drm/amd/pm/inc/smu_v13_0.h
index e5d3b0d1a032..44af23ae059e 100644
--- a/drivers/gpu/drm/amd/pm/inc/smu_v13_0.h
+++ b/drivers/gpu/drm/amd/pm/inc/smu_v13_0.h
@@ -27,7 +27,9 @@
#define SMU13_DRIVER_IF_VERSION_INV 0xFFFFFFFF
#define SMU13_DRIVER_IF_VERSION_YELLOW_CARP 0x04
-#define SMU13_DRIVER_IF_VERSION_ALDE 0x07
+#define SMU13_DRIVER_IF_VERSION_ALDE 0x08
+
+#define SMU13_MODE1_RESET_WAIT_TIME_IN_MS 500 //500ms
/* MP Apertures */
#define MP0_Public 0x03800000
@@ -216,7 +218,6 @@ int smu_v13_0_baco_set_state(struct smu_context *smu, enum smu_baco_state state)
int smu_v13_0_baco_enter(struct smu_context *smu);
int smu_v13_0_baco_exit(struct smu_context *smu);
-int smu_v13_0_mode1_reset(struct smu_context *smu);
int smu_v13_0_mode2_reset(struct smu_context *smu);
int smu_v13_0_get_dpm_ultimate_freq(struct smu_context *smu, enum smu_clk_type clk_type,
diff --git a/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c b/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c
index 8d796ed3b7d1..3ab67b232cd4 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c
@@ -1328,7 +1328,12 @@ static int pp_set_powergating_by_smu(void *handle,
pp_dpm_powergate_vce(handle, gate);
break;
case AMD_IP_BLOCK_TYPE_GMC:
- pp_dpm_powergate_mmhub(handle);
+ /*
+ * For now, this is only used on PICASSO.
+ * And only "gate" operation is supported.
+ */
+ if (gate)
+ pp_dpm_powergate_mmhub(handle);
break;
case AMD_IP_BLOCK_TYPE_GFX:
ret = pp_dpm_powergate_gfx(handle, gate);
@@ -1551,7 +1556,7 @@ static int pp_set_ppfeature_status(void *handle, uint64_t ppfeature_masks)
static int pp_asic_reset_mode_2(void *handle)
{
struct pp_hwmgr *hwmgr = handle;
- int ret = 0;
+ int ret = 0;
if (!hwmgr || !hwmgr->pm_en)
return -EINVAL;
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.c
index 258c573acc97..9ddd8491ff00 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.c
@@ -1024,8 +1024,6 @@ static int smu10_print_clock_levels(struct pp_hwmgr *hwmgr,
uint32_t min_freq, max_freq = 0;
uint32_t ret = 0;
- phm_get_sysfs_buf(&buf, &size);
-
switch (type) {
case PP_SCLK:
smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetGfxclkFrequency, &now);
@@ -1038,13 +1036,13 @@ static int smu10_print_clock_levels(struct pp_hwmgr *hwmgr,
else
i = 1;
- size += sysfs_emit_at(buf, size, "0: %uMhz %s\n",
+ size += sprintf(buf + size, "0: %uMhz %s\n",
data->gfx_min_freq_limit/100,
i == 0 ? "*" : "");
- size += sysfs_emit_at(buf, size, "1: %uMhz %s\n",
+ size += sprintf(buf + size, "1: %uMhz %s\n",
i == 1 ? now : SMU10_UMD_PSTATE_GFXCLK,
i == 1 ? "*" : "");
- size += sysfs_emit_at(buf, size, "2: %uMhz %s\n",
+ size += sprintf(buf + size, "2: %uMhz %s\n",
data->gfx_max_freq_limit/100,
i == 2 ? "*" : "");
break;
@@ -1052,7 +1050,7 @@ static int smu10_print_clock_levels(struct pp_hwmgr *hwmgr,
smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetFclkFrequency, &now);
for (i = 0; i < mclk_table->count; i++)
- size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n",
+ size += sprintf(buf + size, "%d: %uMhz %s\n",
i,
mclk_table->entries[i].clk / 100,
((mclk_table->entries[i].clk / 100)
@@ -1067,10 +1065,10 @@ static int smu10_print_clock_levels(struct pp_hwmgr *hwmgr,
if (ret)
return ret;
- size += sysfs_emit_at(buf, size, "%s:\n", "OD_SCLK");
- size += sysfs_emit_at(buf, size, "0: %10uMhz\n",
+ size += sprintf(buf + size, "%s:\n", "OD_SCLK");
+ size += sprintf(buf + size, "0: %10uMhz\n",
(data->gfx_actual_soft_min_freq > 0) ? data->gfx_actual_soft_min_freq : min_freq);
- size += sysfs_emit_at(buf, size, "1: %10uMhz\n",
+ size += sprintf(buf + size, "1: %10uMhz\n",
(data->gfx_actual_soft_max_freq > 0) ? data->gfx_actual_soft_max_freq : max_freq);
}
break;
@@ -1083,8 +1081,8 @@ static int smu10_print_clock_levels(struct pp_hwmgr *hwmgr,
if (ret)
return ret;
- size += sysfs_emit_at(buf, size, "%s:\n", "OD_RANGE");
- size += sysfs_emit_at(buf, size, "SCLK: %7uMHz %10uMHz\n",
+ size += sprintf(buf + size, "%s:\n", "OD_RANGE");
+ size += sprintf(buf + size, "SCLK: %7uMHz %10uMHz\n",
min_freq, max_freq);
}
break;
@@ -1441,13 +1439,6 @@ static int smu10_get_power_profile_mode(struct pp_hwmgr *hwmgr, char *buf)
{70, 90, 0, 0,},
{30, 60, 0, 6,},
};
- static const char *profile_name[6] = {
- "BOOTUP_DEFAULT",
- "3D_FULL_SCREEN",
- "POWER_SAVING",
- "VIDEO",
- "VR",
- "COMPUTE"};
static const char *title[6] = {"NUM",
"MODE_NAME",
"BUSY_SET_POINT",
@@ -1465,7 +1456,7 @@ static int smu10_get_power_profile_mode(struct pp_hwmgr *hwmgr, char *buf)
for (i = 0; i <= PP_SMC_POWER_PROFILE_COMPUTE; i++)
size += sysfs_emit_at(buf, size, "%3d %14s%s: %14d %3d %10d %14d\n",
- i, profile_name[i], (i == hwmgr->power_profile_mode) ? "*" : " ",
+ i, amdgpu_pp_profile_name[i], (i == hwmgr->power_profile_mode) ? "*" : " ",
profile_mode_setting[i][0], profile_mode_setting[i][1],
profile_mode_setting[i][2], profile_mode_setting[i][3]);
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c
index aceebf584225..cd99db0dc2be 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c
@@ -4914,8 +4914,6 @@ static int smu7_print_clock_levels(struct pp_hwmgr *hwmgr,
int size = 0;
uint32_t i, now, clock, pcie_speed;
- phm_get_sysfs_buf(&buf, &size);
-
switch (type) {
case PP_SCLK:
smum_send_msg_to_smc(hwmgr, PPSMC_MSG_API_GetSclkFrequency, &clock);
@@ -4928,7 +4926,7 @@ static int smu7_print_clock_levels(struct pp_hwmgr *hwmgr,
now = i;
for (i = 0; i < sclk_table->count; i++)
- size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n",
+ size += sprintf(buf + size, "%d: %uMhz %s\n",
i, sclk_table->dpm_levels[i].value / 100,
(i == now) ? "*" : "");
break;
@@ -4943,7 +4941,7 @@ static int smu7_print_clock_levels(struct pp_hwmgr *hwmgr,
now = i;
for (i = 0; i < mclk_table->count; i++)
- size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n",
+ size += sprintf(buf + size, "%d: %uMhz %s\n",
i, mclk_table->dpm_levels[i].value / 100,
(i == now) ? "*" : "");
break;
@@ -4957,7 +4955,7 @@ static int smu7_print_clock_levels(struct pp_hwmgr *hwmgr,
now = i;
for (i = 0; i < pcie_table->count; i++)
- size += sysfs_emit_at(buf, size, "%d: %s %s\n", i,
+ size += sprintf(buf + size, "%d: %s %s\n", i,
(pcie_table->dpm_levels[i].value == 0) ? "2.5GT/s, x8" :
(pcie_table->dpm_levels[i].value == 1) ? "5.0GT/s, x16" :
(pcie_table->dpm_levels[i].value == 2) ? "8.0GT/s, x16" : "",
@@ -4965,32 +4963,32 @@ static int smu7_print_clock_levels(struct pp_hwmgr *hwmgr,
break;
case OD_SCLK:
if (hwmgr->od_enabled) {
- size += sysfs_emit_at(buf, size, "%s:\n", "OD_SCLK");
+ size += sprintf(buf + size, "%s:\n", "OD_SCLK");
for (i = 0; i < odn_sclk_table->num_of_pl; i++)
- size += sysfs_emit_at(buf, size, "%d: %10uMHz %10umV\n",
+ size += sprintf(buf + size, "%d: %10uMHz %10umV\n",
i, odn_sclk_table->entries[i].clock/100,
odn_sclk_table->entries[i].vddc);
}
break;
case OD_MCLK:
if (hwmgr->od_enabled) {
- size += sysfs_emit_at(buf, size, "%s:\n", "OD_MCLK");
+ size += sprintf(buf + size, "%s:\n", "OD_MCLK");
for (i = 0; i < odn_mclk_table->num_of_pl; i++)
- size += sysfs_emit_at(buf, size, "%d: %10uMHz %10umV\n",
+ size += sprintf(buf + size, "%d: %10uMHz %10umV\n",
i, odn_mclk_table->entries[i].clock/100,
odn_mclk_table->entries[i].vddc);
}
break;
case OD_RANGE:
if (hwmgr->od_enabled) {
- size += sysfs_emit_at(buf, size, "%s:\n", "OD_RANGE");
- size += sysfs_emit_at(buf, size, "SCLK: %7uMHz %10uMHz\n",
+ size += sprintf(buf + size, "%s:\n", "OD_RANGE");
+ size += sprintf(buf + size, "SCLK: %7uMHz %10uMHz\n",
data->golden_dpm_table.sclk_table.dpm_levels[0].value/100,
hwmgr->platform_descriptor.overdriveLimit.engineClock/100);
- size += sysfs_emit_at(buf, size, "MCLK: %7uMHz %10uMHz\n",
+ size += sprintf(buf + size, "MCLK: %7uMHz %10uMHz\n",
data->golden_dpm_table.mclk_table.dpm_levels[0].value/100,
hwmgr->platform_descriptor.overdriveLimit.memoryClock/100);
- size += sysfs_emit_at(buf, size, "VDDC: %7umV %11umV\n",
+ size += sprintf(buf + size, "VDDC: %7umV %11umV\n",
data->odn_dpm_table.min_vddc,
data->odn_dpm_table.max_vddc);
}
@@ -5500,14 +5498,6 @@ static int smu7_get_power_profile_mode(struct pp_hwmgr *hwmgr, char *buf)
uint32_t i, size = 0;
uint32_t len;
- static const char *profile_name[7] = {"BOOTUP_DEFAULT",
- "3D_FULL_SCREEN",
- "POWER_SAVING",
- "VIDEO",
- "VR",
- "COMPUTE",
- "CUSTOM"};
-
static const char *title[8] = {"NUM",
"MODE_NAME",
"SCLK_UP_HYST",
@@ -5531,7 +5521,7 @@ static int smu7_get_power_profile_mode(struct pp_hwmgr *hwmgr, char *buf)
for (i = 0; i < len; i++) {
if (i == hwmgr->power_profile_mode) {
size += sysfs_emit_at(buf, size, "%3d %14s %s: %8d %16d %16d %16d %16d %16d\n",
- i, profile_name[i], "*",
+ i, amdgpu_pp_profile_name[i], "*",
data->current_profile_setting.sclk_up_hyst,
data->current_profile_setting.sclk_down_hyst,
data->current_profile_setting.sclk_activity,
@@ -5542,12 +5532,12 @@ static int smu7_get_power_profile_mode(struct pp_hwmgr *hwmgr, char *buf)
}
if (smu7_profiling[i].bupdate_sclk)
size += sysfs_emit_at(buf, size, "%3d %16s: %8d %16d %16d ",
- i, profile_name[i], smu7_profiling[i].sclk_up_hyst,
+ i, amdgpu_pp_profile_name[i], smu7_profiling[i].sclk_up_hyst,
smu7_profiling[i].sclk_down_hyst,
smu7_profiling[i].sclk_activity);
else
size += sysfs_emit_at(buf, size, "%3d %16s: %8s %16s %16s ",
- i, profile_name[i], "-", "-", "-");
+ i, amdgpu_pp_profile_name[i], "-", "-", "-");
if (smu7_profiling[i].bupdate_mclk)
size += sysfs_emit_at(buf, size, "%16d %16d %16d\n",
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu8_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu8_hwmgr.c
index 8e28a8eecefc..03bf8f069222 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu8_hwmgr.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu8_hwmgr.c
@@ -1550,8 +1550,6 @@ static int smu8_print_clock_levels(struct pp_hwmgr *hwmgr,
uint32_t i, now;
int size = 0;
- phm_get_sysfs_buf(&buf, &size);
-
switch (type) {
case PP_SCLK:
now = PHM_GET_FIELD(cgs_read_ind_register(hwmgr->device,
@@ -1561,7 +1559,7 @@ static int smu8_print_clock_levels(struct pp_hwmgr *hwmgr,
CURR_SCLK_INDEX);
for (i = 0; i < sclk_table->count; i++)
- size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n",
+ size += sprintf(buf + size, "%d: %uMhz %s\n",
i, sclk_table->entries[i].clk / 100,
(i == now) ? "*" : "");
break;
@@ -1573,7 +1571,7 @@ static int smu8_print_clock_levels(struct pp_hwmgr *hwmgr,
CURR_MCLK_INDEX);
for (i = SMU8_NUM_NBPMEMORYCLOCK; i > 0; i--)
- size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n",
+ size += sprintf(buf + size, "%d: %uMhz %s\n",
SMU8_NUM_NBPMEMORYCLOCK-i, data->sys_info.nbp_memory_clock[i-1] / 100,
(SMU8_NUM_NBPMEMORYCLOCK-i == now) ? "*" : "");
break;
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c
index c981fc2882f0..3f040be0d158 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c
@@ -4639,8 +4639,6 @@ static int vega10_print_clock_levels(struct pp_hwmgr *hwmgr,
int i, now, size = 0, count = 0;
- phm_get_sysfs_buf(&buf, &size);
-
switch (type) {
case PP_SCLK:
if (data->registry_data.sclk_dpm_key_disabled)
@@ -4654,7 +4652,7 @@ static int vega10_print_clock_levels(struct pp_hwmgr *hwmgr,
else
count = sclk_table->count;
for (i = 0; i < count; i++)
- size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n",
+ size += sprintf(buf + size, "%d: %uMhz %s\n",
i, sclk_table->dpm_levels[i].value / 100,
(i == now) ? "*" : "");
break;
@@ -4665,7 +4663,7 @@ static int vega10_print_clock_levels(struct pp_hwmgr *hwmgr,
smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetCurrentUclkIndex, &now);
for (i = 0; i < mclk_table->count; i++)
- size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n",
+ size += sprintf(buf + size, "%d: %uMhz %s\n",
i, mclk_table->dpm_levels[i].value / 100,
(i == now) ? "*" : "");
break;
@@ -4676,7 +4674,7 @@ static int vega10_print_clock_levels(struct pp_hwmgr *hwmgr,
smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetCurrentSocclkIndex, &now);
for (i = 0; i < soc_table->count; i++)
- size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n",
+ size += sprintf(buf + size, "%d: %uMhz %s\n",
i, soc_table->dpm_levels[i].value / 100,
(i == now) ? "*" : "");
break;
@@ -4688,7 +4686,7 @@ static int vega10_print_clock_levels(struct pp_hwmgr *hwmgr,
PPSMC_MSG_GetClockFreqMHz, CLK_DCEFCLK, &now);
for (i = 0; i < dcef_table->count; i++)
- size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n",
+ size += sprintf(buf + size, "%d: %uMhz %s\n",
i, dcef_table->dpm_levels[i].value / 100,
(dcef_table->dpm_levels[i].value / 100 == now) ?
"*" : "");
@@ -4702,7 +4700,7 @@ static int vega10_print_clock_levels(struct pp_hwmgr *hwmgr,
gen_speed = pptable->PcieGenSpeed[i];
lane_width = pptable->PcieLaneCount[i];
- size += sysfs_emit_at(buf, size, "%d: %s %s %s\n", i,
+ size += sprintf(buf + size, "%d: %s %s %s\n", i,
(gen_speed == 0) ? "2.5GT/s," :
(gen_speed == 1) ? "5.0GT/s," :
(gen_speed == 2) ? "8.0GT/s," :
@@ -4721,34 +4719,34 @@ static int vega10_print_clock_levels(struct pp_hwmgr *hwmgr,
case OD_SCLK:
if (hwmgr->od_enabled) {
- size += sysfs_emit_at(buf, size, "%s:\n", "OD_SCLK");
+ size += sprintf(buf + size, "%s:\n", "OD_SCLK");
podn_vdd_dep = &data->odn_dpm_table.vdd_dep_on_sclk;
for (i = 0; i < podn_vdd_dep->count; i++)
- size += sysfs_emit_at(buf, size, "%d: %10uMhz %10umV\n",
+ size += sprintf(buf + size, "%d: %10uMhz %10umV\n",
i, podn_vdd_dep->entries[i].clk / 100,
podn_vdd_dep->entries[i].vddc);
}
break;
case OD_MCLK:
if (hwmgr->od_enabled) {
- size += sysfs_emit_at(buf, size, "%s:\n", "OD_MCLK");
+ size += sprintf(buf + size, "%s:\n", "OD_MCLK");
podn_vdd_dep = &data->odn_dpm_table.vdd_dep_on_mclk;
for (i = 0; i < podn_vdd_dep->count; i++)
- size += sysfs_emit_at(buf, size, "%d: %10uMhz %10umV\n",
+ size += sprintf(buf + size, "%d: %10uMhz %10umV\n",
i, podn_vdd_dep->entries[i].clk/100,
podn_vdd_dep->entries[i].vddc);
}
break;
case OD_RANGE:
if (hwmgr->od_enabled) {
- size += sysfs_emit_at(buf, size, "%s:\n", "OD_RANGE");
- size += sysfs_emit_at(buf, size, "SCLK: %7uMHz %10uMHz\n",
+ size += sprintf(buf + size, "%s:\n", "OD_RANGE");
+ size += sprintf(buf + size, "SCLK: %7uMHz %10uMHz\n",
data->golden_dpm_table.gfx_table.dpm_levels[0].value/100,
hwmgr->platform_descriptor.overdriveLimit.engineClock/100);
- size += sysfs_emit_at(buf, size, "MCLK: %7uMHz %10uMHz\n",
+ size += sprintf(buf + size, "MCLK: %7uMHz %10uMHz\n",
data->golden_dpm_table.mem_table.dpm_levels[0].value/100,
hwmgr->platform_descriptor.overdriveLimit.memoryClock/100);
- size += sysfs_emit_at(buf, size, "VDDC: %7umV %11umV\n",
+ size += sprintf(buf + size, "VDDC: %7umV %11umV\n",
data->odn_dpm_table.min_vddc,
data->odn_dpm_table.max_vddc);
}
@@ -5099,13 +5097,6 @@ static int vega10_get_power_profile_mode(struct pp_hwmgr *hwmgr, char *buf)
{70, 90, 0, 0,},
{30, 60, 0, 6,},
};
- static const char *profile_name[7] = {"BOOTUP_DEFAULT",
- "3D_FULL_SCREEN",
- "POWER_SAVING",
- "VIDEO",
- "VR",
- "COMPUTE",
- "CUSTOM"};
static const char *title[6] = {"NUM",
"MODE_NAME",
"BUSY_SET_POINT",
@@ -5123,11 +5114,12 @@ static int vega10_get_power_profile_mode(struct pp_hwmgr *hwmgr, char *buf)
for (i = 0; i < PP_SMC_POWER_PROFILE_CUSTOM; i++)
size += sysfs_emit_at(buf, size, "%3d %14s%s: %14d %3d %10d %14d\n",
- i, profile_name[i], (i == hwmgr->power_profile_mode) ? "*" : " ",
+ i, amdgpu_pp_profile_name[i], (i == hwmgr->power_profile_mode) ? "*" : " ",
profile_mode_setting[i][0], profile_mode_setting[i][1],
profile_mode_setting[i][2], profile_mode_setting[i][3]);
+
size += sysfs_emit_at(buf, size, "%3d %14s%s: %14d %3d %10d %14d\n", i,
- profile_name[i], (i == hwmgr->power_profile_mode) ? "*" : " ",
+ amdgpu_pp_profile_name[i], (i == hwmgr->power_profile_mode) ? "*" : " ",
data->custom_profile_mode[0], data->custom_profile_mode[1],
data->custom_profile_mode[2], data->custom_profile_mode[3]);
return size;
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_hwmgr.c
index f7e783e1c888..a2f4d6773d45 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_hwmgr.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_hwmgr.c
@@ -2246,8 +2246,6 @@ static int vega12_print_clock_levels(struct pp_hwmgr *hwmgr,
int i, now, size = 0;
struct pp_clock_levels_with_latency clocks;
- phm_get_sysfs_buf(&buf, &size);
-
switch (type) {
case PP_SCLK:
PP_ASSERT_WITH_CODE(
@@ -2260,7 +2258,7 @@ static int vega12_print_clock_levels(struct pp_hwmgr *hwmgr,
"Attempt to get gfx clk levels Failed!",
return -1);
for (i = 0; i < clocks.num_levels; i++)
- size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n",
+ size += sprintf(buf + size, "%d: %uMhz %s\n",
i, clocks.data[i].clocks_in_khz / 1000,
(clocks.data[i].clocks_in_khz / 1000 == now / 100) ? "*" : "");
break;
@@ -2276,7 +2274,7 @@ static int vega12_print_clock_levels(struct pp_hwmgr *hwmgr,
"Attempt to get memory clk levels Failed!",
return -1);
for (i = 0; i < clocks.num_levels; i++)
- size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n",
+ size += sprintf(buf + size, "%d: %uMhz %s\n",
i, clocks.data[i].clocks_in_khz / 1000,
(clocks.data[i].clocks_in_khz / 1000 == now / 100) ? "*" : "");
break;
@@ -2294,7 +2292,7 @@ static int vega12_print_clock_levels(struct pp_hwmgr *hwmgr,
"Attempt to get soc clk levels Failed!",
return -1);
for (i = 0; i < clocks.num_levels; i++)
- size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n",
+ size += sprintf(buf + size, "%d: %uMhz %s\n",
i, clocks.data[i].clocks_in_khz / 1000,
(clocks.data[i].clocks_in_khz / 1000 == now) ? "*" : "");
break;
@@ -2312,7 +2310,7 @@ static int vega12_print_clock_levels(struct pp_hwmgr *hwmgr,
"Attempt to get dcef clk levels Failed!",
return -1);
for (i = 0; i < clocks.num_levels; i++)
- size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n",
+ size += sprintf(buf + size, "%d: %uMhz %s\n",
i, clocks.data[i].clocks_in_khz / 1000,
(clocks.data[i].clocks_in_khz / 1000 == now) ? "*" : "");
break;
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_hwmgr.c
index 03e63be4ee27..97b3ad369046 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_hwmgr.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_hwmgr.c
@@ -3366,8 +3366,6 @@ static int vega20_print_clock_levels(struct pp_hwmgr *hwmgr,
int ret = 0;
uint32_t gen_speed, lane_width, current_gen_speed, current_lane_width;
- phm_get_sysfs_buf(&buf, &size);
-
switch (type) {
case PP_SCLK:
ret = vega20_get_current_clk_freq(hwmgr, PPCLK_GFXCLK, &now);
@@ -3376,13 +3374,13 @@ static int vega20_print_clock_levels(struct pp_hwmgr *hwmgr,
return ret);
if (vega20_get_sclks(hwmgr, &clocks)) {
- size += sysfs_emit_at(buf, size, "0: %uMhz * (DPM disabled)\n",
+ size += sprintf(buf + size, "0: %uMhz * (DPM disabled)\n",
now / 100);
break;
}
for (i = 0; i < clocks.num_levels; i++)
- size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n",
+ size += sprintf(buf + size, "%d: %uMhz %s\n",
i, clocks.data[i].clocks_in_khz / 1000,
(clocks.data[i].clocks_in_khz == now * 10) ? "*" : "");
break;
@@ -3394,13 +3392,13 @@ static int vega20_print_clock_levels(struct pp_hwmgr *hwmgr,
return ret);
if (vega20_get_memclocks(hwmgr, &clocks)) {
- size += sysfs_emit_at(buf, size, "0: %uMhz * (DPM disabled)\n",
+ size += sprintf(buf + size, "0: %uMhz * (DPM disabled)\n",
now / 100);
break;
}
for (i = 0; i < clocks.num_levels; i++)
- size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n",
+ size += sprintf(buf + size, "%d: %uMhz %s\n",
i, clocks.data[i].clocks_in_khz / 1000,
(clocks.data[i].clocks_in_khz == now * 10) ? "*" : "");
break;
@@ -3412,13 +3410,13 @@ static int vega20_print_clock_levels(struct pp_hwmgr *hwmgr,
return ret);
if (vega20_get_socclocks(hwmgr, &clocks)) {
- size += sysfs_emit_at(buf, size, "0: %uMhz * (DPM disabled)\n",
+ size += sprintf(buf + size, "0: %uMhz * (DPM disabled)\n",
now / 100);
break;
}
for (i = 0; i < clocks.num_levels; i++)
- size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n",
+ size += sprintf(buf + size, "%d: %uMhz %s\n",
i, clocks.data[i].clocks_in_khz / 1000,
(clocks.data[i].clocks_in_khz == now * 10) ? "*" : "");
break;
@@ -3430,7 +3428,7 @@ static int vega20_print_clock_levels(struct pp_hwmgr *hwmgr,
return ret);
for (i = 0; i < fclk_dpm_table->count; i++)
- size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n",
+ size += sprintf(buf + size, "%d: %uMhz %s\n",
i, fclk_dpm_table->dpm_levels[i].value,
fclk_dpm_table->dpm_levels[i].value == (now / 100) ? "*" : "");
break;
@@ -3442,13 +3440,13 @@ static int vega20_print_clock_levels(struct pp_hwmgr *hwmgr,
return ret);
if (vega20_get_dcefclocks(hwmgr, &clocks)) {
- size += sysfs_emit_at(buf, size, "0: %uMhz * (DPM disabled)\n",
+ size += sprintf(buf + size, "0: %uMhz * (DPM disabled)\n",
now / 100);
break;
}
for (i = 0; i < clocks.num_levels; i++)
- size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n",
+ size += sprintf(buf + size, "%d: %uMhz %s\n",
i, clocks.data[i].clocks_in_khz / 1000,
(clocks.data[i].clocks_in_khz == now * 10) ? "*" : "");
break;
@@ -3462,7 +3460,7 @@ static int vega20_print_clock_levels(struct pp_hwmgr *hwmgr,
gen_speed = pptable->PcieGenSpeed[i];
lane_width = pptable->PcieLaneCount[i];
- size += sysfs_emit_at(buf, size, "%d: %s %s %dMhz %s\n", i,
+ size += sprintf(buf + size, "%d: %s %s %dMhz %s\n", i,
(gen_speed == 0) ? "2.5GT/s," :
(gen_speed == 1) ? "5.0GT/s," :
(gen_speed == 2) ? "8.0GT/s," :
@@ -3483,18 +3481,18 @@ static int vega20_print_clock_levels(struct pp_hwmgr *hwmgr,
case OD_SCLK:
if (od8_settings[OD8_SETTING_GFXCLK_FMIN].feature_id &&
od8_settings[OD8_SETTING_GFXCLK_FMAX].feature_id) {
- size += sysfs_emit_at(buf, size, "%s:\n", "OD_SCLK");
- size += sysfs_emit_at(buf, size, "0: %10uMhz\n",
+ size += sprintf(buf + size, "%s:\n", "OD_SCLK");
+ size += sprintf(buf + size, "0: %10uMhz\n",
od_table->GfxclkFmin);
- size += sysfs_emit_at(buf, size, "1: %10uMhz\n",
+ size += sprintf(buf + size, "1: %10uMhz\n",
od_table->GfxclkFmax);
}
break;
case OD_MCLK:
if (od8_settings[OD8_SETTING_UCLK_FMAX].feature_id) {
- size += sysfs_emit_at(buf, size, "%s:\n", "OD_MCLK");
- size += sysfs_emit_at(buf, size, "1: %10uMhz\n",
+ size += sprintf(buf + size, "%s:\n", "OD_MCLK");
+ size += sprintf(buf + size, "1: %10uMhz\n",
od_table->UclkFmax);
}
@@ -3507,14 +3505,14 @@ static int vega20_print_clock_levels(struct pp_hwmgr *hwmgr,
od8_settings[OD8_SETTING_GFXCLK_VOLTAGE1].feature_id &&
od8_settings[OD8_SETTING_GFXCLK_VOLTAGE2].feature_id &&
od8_settings[OD8_SETTING_GFXCLK_VOLTAGE3].feature_id) {
- size += sysfs_emit_at(buf, size, "%s:\n", "OD_VDDC_CURVE");
- size += sysfs_emit_at(buf, size, "0: %10uMhz %10dmV\n",
+ size += sprintf(buf + size, "%s:\n", "OD_VDDC_CURVE");
+ size += sprintf(buf + size, "0: %10uMhz %10dmV\n",
od_table->GfxclkFreq1,
od_table->GfxclkVolt1 / VOLTAGE_SCALE);
- size += sysfs_emit_at(buf, size, "1: %10uMhz %10dmV\n",
+ size += sprintf(buf + size, "1: %10uMhz %10dmV\n",
od_table->GfxclkFreq2,
od_table->GfxclkVolt2 / VOLTAGE_SCALE);
- size += sysfs_emit_at(buf, size, "2: %10uMhz %10dmV\n",
+ size += sprintf(buf + size, "2: %10uMhz %10dmV\n",
od_table->GfxclkFreq3,
od_table->GfxclkVolt3 / VOLTAGE_SCALE);
}
@@ -3522,17 +3520,17 @@ static int vega20_print_clock_levels(struct pp_hwmgr *hwmgr,
break;
case OD_RANGE:
- size += sysfs_emit_at(buf, size, "%s:\n", "OD_RANGE");
+ size += sprintf(buf + size, "%s:\n", "OD_RANGE");
if (od8_settings[OD8_SETTING_GFXCLK_FMIN].feature_id &&
od8_settings[OD8_SETTING_GFXCLK_FMAX].feature_id) {
- size += sysfs_emit_at(buf, size, "SCLK: %7uMhz %10uMhz\n",
+ size += sprintf(buf + size, "SCLK: %7uMhz %10uMhz\n",
od8_settings[OD8_SETTING_GFXCLK_FMIN].min_value,
od8_settings[OD8_SETTING_GFXCLK_FMAX].max_value);
}
if (od8_settings[OD8_SETTING_UCLK_FMAX].feature_id) {
- size += sysfs_emit_at(buf, size, "MCLK: %7uMhz %10uMhz\n",
+ size += sprintf(buf + size, "MCLK: %7uMhz %10uMhz\n",
od8_settings[OD8_SETTING_UCLK_FMAX].min_value,
od8_settings[OD8_SETTING_UCLK_FMAX].max_value);
}
@@ -3543,22 +3541,22 @@ static int vega20_print_clock_levels(struct pp_hwmgr *hwmgr,
od8_settings[OD8_SETTING_GFXCLK_VOLTAGE1].feature_id &&
od8_settings[OD8_SETTING_GFXCLK_VOLTAGE2].feature_id &&
od8_settings[OD8_SETTING_GFXCLK_VOLTAGE3].feature_id) {
- size += sysfs_emit_at(buf, size, "VDDC_CURVE_SCLK[0]: %7uMhz %10uMhz\n",
+ size += sprintf(buf + size, "VDDC_CURVE_SCLK[0]: %7uMhz %10uMhz\n",
od8_settings[OD8_SETTING_GFXCLK_FREQ1].min_value,
od8_settings[OD8_SETTING_GFXCLK_FREQ1].max_value);
- size += sysfs_emit_at(buf, size, "VDDC_CURVE_VOLT[0]: %7dmV %11dmV\n",
+ size += sprintf(buf + size, "VDDC_CURVE_VOLT[0]: %7dmV %11dmV\n",
od8_settings[OD8_SETTING_GFXCLK_VOLTAGE1].min_value,
od8_settings[OD8_SETTING_GFXCLK_VOLTAGE1].max_value);
- size += sysfs_emit_at(buf, size, "VDDC_CURVE_SCLK[1]: %7uMhz %10uMhz\n",
+ size += sprintf(buf + size, "VDDC_CURVE_SCLK[1]: %7uMhz %10uMhz\n",
od8_settings[OD8_SETTING_GFXCLK_FREQ2].min_value,
od8_settings[OD8_SETTING_GFXCLK_FREQ2].max_value);
- size += sysfs_emit_at(buf, size, "VDDC_CURVE_VOLT[1]: %7dmV %11dmV\n",
+ size += sprintf(buf + size, "VDDC_CURVE_VOLT[1]: %7dmV %11dmV\n",
od8_settings[OD8_SETTING_GFXCLK_VOLTAGE2].min_value,
od8_settings[OD8_SETTING_GFXCLK_VOLTAGE2].max_value);
- size += sysfs_emit_at(buf, size, "VDDC_CURVE_SCLK[2]: %7uMhz %10uMhz\n",
+ size += sprintf(buf + size, "VDDC_CURVE_SCLK[2]: %7uMhz %10uMhz\n",
od8_settings[OD8_SETTING_GFXCLK_FREQ3].min_value,
od8_settings[OD8_SETTING_GFXCLK_FREQ3].max_value);
- size += sysfs_emit_at(buf, size, "VDDC_CURVE_VOLT[2]: %7dmV %11dmV\n",
+ size += sprintf(buf + size, "VDDC_CURVE_VOLT[2]: %7dmV %11dmV\n",
od8_settings[OD8_SETTING_GFXCLK_VOLTAGE3].min_value,
od8_settings[OD8_SETTING_GFXCLK_VOLTAGE3].max_value);
}
@@ -3982,14 +3980,6 @@ static int vega20_get_power_profile_mode(struct pp_hwmgr *hwmgr, char *buf)
DpmActivityMonitorCoeffInt_t activity_monitor;
uint32_t i, size = 0;
uint16_t workload_type = 0;
- static const char *profile_name[] = {
- "BOOTUP_DEFAULT",
- "3D_FULL_SCREEN",
- "POWER_SAVING",
- "VIDEO",
- "VR",
- "COMPUTE",
- "CUSTOM"};
static const char *title[] = {
"PROFILE_INDEX(NAME)",
"CLOCK_TYPE(NAME)",
@@ -4023,7 +4013,7 @@ static int vega20_get_power_profile_mode(struct pp_hwmgr *hwmgr, char *buf)
return result);
size += sysfs_emit_at(buf, size, "%2d %14s%s:\n",
- i, profile_name[i], (i == hwmgr->power_profile_mode) ? "*" : " ");
+ i, amdgpu_pp_profile_name[i], (i == hwmgr->power_profile_mode) ? "*" : " ");
size += sysfs_emit_at(buf, size, "%19s %d(%13s) %7d %7d %7d %7d %7d %7d %7d %7d %7d\n",
" ",
diff --git a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
index 01168b8955bf..76f95e8ada4c 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
@@ -277,8 +277,12 @@ static int smu_dpm_set_power_gate(void *handle,
struct smu_context *smu = handle;
int ret = 0;
- if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
+ if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) {
+ dev_WARN(smu->adev->dev,
+ "SMU uninitialized but power %s requested for %u!\n",
+ gate ? "gate" : "ungate", block_type);
return -EOPNOTSUPP;
+ }
switch (block_type) {
/*
@@ -1153,6 +1157,8 @@ static int smu_smc_hw_setup(struct smu_context *smu)
case IP_VERSION(11, 5, 0):
case IP_VERSION(11, 0, 12):
ret = smu_system_features_control(smu, true);
+ if (ret)
+ dev_err(adev->dev, "Failed system features control!\n");
break;
default:
break;
@@ -1277,8 +1283,10 @@ static int smu_smc_hw_setup(struct smu_context *smu)
}
ret = smu_notify_display_change(smu);
- if (ret)
+ if (ret) {
+ dev_err(adev->dev, "Failed to notify display change!\n");
return ret;
+ }
/*
* Set min deep sleep dce fclk with bootup value from vbios via
@@ -1286,8 +1294,6 @@ static int smu_smc_hw_setup(struct smu_context *smu)
*/
ret = smu_set_min_dcef_deep_sleep(smu,
smu->smu_table.boot_values.dcefclk / 100);
- if (ret)
- return ret;
return ret;
}
@@ -1344,7 +1350,6 @@ static int smu_hw_init(void *handle)
}
if (smu->is_apu) {
- smu_powergate_sdma(&adev->smu, false);
smu_dpm_set_vcn_enable(smu, true);
smu_dpm_set_jpeg_enable(smu, true);
smu_set_gfx_cgpg(&adev->smu, true);
@@ -1468,7 +1473,7 @@ static int smu_disable_dpms(struct smu_context *smu)
dev_err(adev->dev, "Failed to disable smu features.\n");
}
- if (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(10, 0, 0) &&
+ if (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(9, 4, 2) &&
adev->gfx.rlc.funcs->stop)
adev->gfx.rlc.funcs->stop(adev);
@@ -1506,10 +1511,6 @@ static int smu_hw_fini(void *handle)
if (amdgpu_sriov_vf(adev)&& !amdgpu_sriov_is_pp_one_vf(adev))
return 0;
- if (smu->is_apu) {
- smu_powergate_sdma(&adev->smu, true);
- }
-
smu_dpm_set_vcn_enable(smu, false);
smu_dpm_set_jpeg_enable(smu, false);
@@ -1568,9 +1569,7 @@ static int smu_suspend(void *handle)
smu->watermarks_bitmap &= ~(WATERMARKS_LOADED);
- /* skip CGPG when in S0ix */
- if (smu->is_apu && !adev->in_s0ix)
- smu_set_gfx_cgpg(&adev->smu, false);
+ smu_set_gfx_cgpg(&adev->smu, false);
return 0;
}
@@ -1601,8 +1600,7 @@ static int smu_resume(void *handle)
return ret;
}
- if (smu->is_apu)
- smu_set_gfx_cgpg(&adev->smu, true);
+ smu_set_gfx_cgpg(&adev->smu, true);
smu->disable_uclk_switch = 0;
@@ -3060,18 +3058,32 @@ static int smu_gfx_state_change_set(void *handle,
return ret;
}
-int smu_set_light_sbr(struct smu_context *smu, bool enable)
+int smu_handle_passthrough_sbr(struct smu_context *smu, bool enable)
{
int ret = 0;
mutex_lock(&smu->mutex);
- if (smu->ppt_funcs->set_light_sbr)
- ret = smu->ppt_funcs->set_light_sbr(smu, enable);
+ if (smu->ppt_funcs->smu_handle_passthrough_sbr)
+ ret = smu->ppt_funcs->smu_handle_passthrough_sbr(smu, enable);
mutex_unlock(&smu->mutex);
return ret;
}
+int smu_get_ecc_info(struct smu_context *smu, void *umc_ecc)
+{
+ int ret = -EOPNOTSUPP;
+
+ mutex_lock(&smu->mutex);
+ if (smu->ppt_funcs &&
+ smu->ppt_funcs->get_ecc_info)
+ ret = smu->ppt_funcs->get_ecc_info(smu, umc_ecc);
+ mutex_unlock(&smu->mutex);
+
+ return ret;
+
+}
+
static int smu_get_prv_buffer_details(void *handle, void **addr, size_t *size)
{
struct smu_context *smu = handle;
@@ -3161,3 +3173,107 @@ int smu_wait_for_event(struct amdgpu_device *adev, enum smu_event_type event,
return ret;
}
+
+int smu_stb_collect_info(struct smu_context *smu, void *buf, uint32_t size)
+{
+
+ if (!smu->ppt_funcs->stb_collect_info || !smu->stb_context.enabled)
+ return -EOPNOTSUPP;
+
+ /* Confirm the buffer allocated is of correct size */
+ if (size != smu->stb_context.stb_buf_size)
+ return -EINVAL;
+
+ /*
+ * No need to lock smu mutex as we access STB directly through MMIO
+ * and not going through SMU messaging route (for now at least).
+ * For registers access rely on implementation internal locking.
+ */
+ return smu->ppt_funcs->stb_collect_info(smu, buf, size);
+}
+
+#if defined(CONFIG_DEBUG_FS)
+
+static int smu_stb_debugfs_open(struct inode *inode, struct file *filp)
+{
+ struct amdgpu_device *adev = filp->f_inode->i_private;
+ struct smu_context *smu = &adev->smu;
+ unsigned char *buf;
+ int r;
+
+ buf = kvmalloc_array(smu->stb_context.stb_buf_size, sizeof(*buf), GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ r = smu_stb_collect_info(smu, buf, smu->stb_context.stb_buf_size);
+ if (r)
+ goto out;
+
+ filp->private_data = buf;
+
+ return 0;
+
+out:
+ kvfree(buf);
+ return r;
+}
+
+static ssize_t smu_stb_debugfs_read(struct file *filp, char __user *buf, size_t size,
+ loff_t *pos)
+{
+ struct amdgpu_device *adev = filp->f_inode->i_private;
+ struct smu_context *smu = &adev->smu;
+
+
+ if (!filp->private_data)
+ return -EINVAL;
+
+ return simple_read_from_buffer(buf,
+ size,
+ pos, filp->private_data,
+ smu->stb_context.stb_buf_size);
+}
+
+static int smu_stb_debugfs_release(struct inode *inode, struct file *filp)
+{
+ kvfree(filp->private_data);
+ filp->private_data = NULL;
+
+ return 0;
+}
+
+/*
+ * We have to define not only read method but also
+ * open and release because .read takes up to PAGE_SIZE
+ * data each time so and so is invoked multiple times.
+ * We allocate the STB buffer in .open and release it
+ * in .release
+ */
+static const struct file_operations smu_stb_debugfs_fops = {
+ .owner = THIS_MODULE,
+ .open = smu_stb_debugfs_open,
+ .read = smu_stb_debugfs_read,
+ .release = smu_stb_debugfs_release,
+ .llseek = default_llseek,
+};
+
+#endif
+
+void amdgpu_smu_stb_debug_fs_init(struct amdgpu_device *adev)
+{
+#if defined(CONFIG_DEBUG_FS)
+
+ struct smu_context *smu = &adev->smu;
+
+ if (!smu->stb_context.stb_buf_size)
+ return;
+
+ debugfs_create_file_size("amdgpu_smu_stb_dump",
+ S_IRUSR,
+ adev_to_drm(adev)->primary->debugfs_root,
+ adev,
+ &smu_stb_debugfs_fops,
+ smu->stb_context.stb_buf_size);
+#endif
+
+}
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c
index fd1d30a93db5..505d2fb94fd9 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c
@@ -295,16 +295,6 @@ static int arcturus_allocate_dpm_context(struct smu_context *smu)
return -ENOMEM;
smu_dpm->dpm_context_size = sizeof(struct smu_11_0_dpm_context);
- smu_dpm->dpm_current_power_state = kzalloc(sizeof(struct smu_power_state),
- GFP_KERNEL);
- if (!smu_dpm->dpm_current_power_state)
- return -ENOMEM;
-
- smu_dpm->dpm_request_power_state = kzalloc(sizeof(struct smu_power_state),
- GFP_KERNEL);
- if (!smu_dpm->dpm_request_power_state)
- return -ENOMEM;
-
return 0;
}
@@ -1389,14 +1379,6 @@ static int arcturus_get_power_profile_mode(struct smu_context *smu,
char *buf)
{
DpmActivityMonitorCoeffInt_t activity_monitor;
- static const char *profile_name[] = {
- "BOOTUP_DEFAULT",
- "3D_FULL_SCREEN",
- "POWER_SAVING",
- "VIDEO",
- "VR",
- "COMPUTE",
- "CUSTOM"};
static const char *title[] = {
"PROFILE_INDEX(NAME)",
"CLOCK_TYPE(NAME)",
@@ -1453,7 +1435,7 @@ static int arcturus_get_power_profile_mode(struct smu_context *smu,
}
size += sysfs_emit_at(buf, size, "%2d %14s%s\n",
- i, profile_name[i], (i == smu->power_profile_mode) ? "*" : " ");
+ i, amdgpu_pp_profile_name[i], (i == smu->power_profile_mode) ? "*" : " ");
if (smu_version >= 0x360d00) {
size += sysfs_emit_at(buf, size, "%19s %d(%13s) %7d %7d %7d %7d %7d %7d %7d %7d %7d\n",
@@ -2490,7 +2472,7 @@ static const struct pptable_funcs arcturus_ppt_funcs = {
.deep_sleep_control = smu_v11_0_deep_sleep_control,
.get_fan_parameters = arcturus_get_fan_parameters,
.interrupt_work = smu_v11_0_interrupt_work,
- .set_light_sbr = smu_v11_0_set_light_sbr,
+ .smu_handle_passthrough_sbr = smu_v11_0_handle_passthrough_sbr,
.set_mp1_state = smu_cmn_set_mp1_state,
};
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
index 60a557068ea4..2bb7816b245a 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
@@ -1713,14 +1713,6 @@ static int navi10_get_power_profile_mode(struct smu_context *smu, char *buf)
DpmActivityMonitorCoeffInt_t activity_monitor;
uint32_t i, size = 0;
int16_t workload_type = 0;
- static const char *profile_name[] = {
- "BOOTUP_DEFAULT",
- "3D_FULL_SCREEN",
- "POWER_SAVING",
- "VIDEO",
- "VR",
- "COMPUTE",
- "CUSTOM"};
static const char *title[] = {
"PROFILE_INDEX(NAME)",
"CLOCK_TYPE(NAME)",
@@ -1759,7 +1751,7 @@ static int navi10_get_power_profile_mode(struct smu_context *smu, char *buf)
}
size += sysfs_emit_at(buf, size, "%2d %14s%s:\n",
- i, profile_name[i], (i == smu->power_profile_mode) ? "*" : " ");
+ i, amdgpu_pp_profile_name[i], (i == smu->power_profile_mode) ? "*" : " ");
size += sysfs_emit_at(buf, size, "%19s %d(%13s) %7d %7d %7d %7d %7d %7d %7d %7d %7d\n",
" ",
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
index a4108025fe29..777f717c37ae 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
@@ -80,6 +80,9 @@
(*member) = (smu->smu_table.driver_pptable + offsetof(PPTable_t, field));\
} while(0)
+/* STB FIFO depth is in 64bit units */
+#define SIENNA_CICHLID_STB_DEPTH_UNIT_BYTES 8
+
static int get_table_size(struct smu_context *smu)
{
if (smu->adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 0, 13))
@@ -650,6 +653,8 @@ static int sienna_cichlid_allocate_dpm_context(struct smu_context *smu)
return 0;
}
+static void sienna_cichlid_stb_init(struct smu_context *smu);
+
static int sienna_cichlid_init_smc_tables(struct smu_context *smu)
{
int ret = 0;
@@ -662,6 +667,8 @@ static int sienna_cichlid_init_smc_tables(struct smu_context *smu)
if (ret)
return ret;
+ sienna_cichlid_stb_init(smu);
+
return smu_v11_0_init_smc_tables(smu);
}
@@ -1171,7 +1178,7 @@ static int sienna_cichlid_force_clk_levels(struct smu_context *smu,
enum smu_clk_type clk_type, uint32_t mask)
{
struct amdgpu_device *adev = smu->adev;
- int ret = 0, size = 0;
+ int ret = 0;
uint32_t soft_min_level = 0, soft_max_level = 0, min_freq = 0, max_freq = 0;
soft_min_level = mask ? (ffs(mask) - 1) : 0;
@@ -1216,7 +1223,7 @@ forec_level_out:
if ((clk_type == SMU_GFXCLK) || (clk_type == SMU_SCLK))
amdgpu_gfx_off_ctrl(adev, true);
- return size;
+ return 0;
}
static int sienna_cichlid_populate_umd_state_clk(struct smu_context *smu)
@@ -1342,14 +1349,6 @@ static int sienna_cichlid_get_power_profile_mode(struct smu_context *smu, char *
&(activity_monitor_external.DpmActivityMonitorCoeffInt);
uint32_t i, size = 0;
int16_t workload_type = 0;
- static const char *profile_name[] = {
- "BOOTUP_DEFAULT",
- "3D_FULL_SCREEN",
- "POWER_SAVING",
- "VIDEO",
- "VR",
- "COMPUTE",
- "CUSTOM"};
static const char *title[] = {
"PROFILE_INDEX(NAME)",
"CLOCK_TYPE(NAME)",
@@ -1388,7 +1387,7 @@ static int sienna_cichlid_get_power_profile_mode(struct smu_context *smu, char *
}
size += sysfs_emit_at(buf, size, "%2d %14s%s:\n",
- i, profile_name[i], (i == smu->power_profile_mode) ? "*" : " ");
+ i, amdgpu_pp_profile_name[i], (i == smu->power_profile_mode) ? "*" : " ");
size += sysfs_emit_at(buf, size, "%19s %d(%13s) %7d %7d %7d %7d %7d %7d %7d %7d %7d\n",
" ",
@@ -2135,7 +2134,13 @@ static int sienna_cichlid_od_edit_dpm_table(struct smu_context *smu,
static int sienna_cichlid_run_btc(struct smu_context *smu)
{
- return smu_cmn_send_smc_msg(smu, SMU_MSG_RunDcBtc, NULL);
+ int res;
+
+ res = smu_cmn_send_smc_msg(smu, SMU_MSG_RunDcBtc, NULL);
+ if (res)
+ dev_err(smu->adev->dev, "RunDcBtc failed!\n");
+
+ return res;
}
static int sienna_cichlid_baco_enter(struct smu_context *smu)
@@ -3619,6 +3624,16 @@ static ssize_t sienna_cichlid_get_gpu_metrics(struct smu_context *smu,
gpu_metrics->energy_accumulator =
use_metrics_v2 ? metrics_v2->EnergyAccumulator : metrics->EnergyAccumulator;
+ if (metrics->CurrGfxVoltageOffset)
+ gpu_metrics->voltage_gfx =
+ (155000 - 625 * metrics->CurrGfxVoltageOffset) / 100;
+ if (metrics->CurrMemVidOffset)
+ gpu_metrics->voltage_mem =
+ (155000 - 625 * metrics->CurrMemVidOffset) / 100;
+ if (metrics->CurrSocVoltageOffset)
+ gpu_metrics->voltage_soc =
+ (155000 - 625 * metrics->CurrSocVoltageOffset) / 100;
+
average_gfx_activity = use_metrics_v2 ? metrics_v2->AverageGfxActivity : metrics->AverageGfxActivity;
if (average_gfx_activity <= SMU_11_0_7_GFX_BUSY_THRESHOLD)
gpu_metrics->average_gfxclk_frequency =
@@ -3793,6 +3808,53 @@ static int sienna_cichlid_set_mp1_state(struct smu_context *smu,
return ret;
}
+static void sienna_cichlid_stb_init(struct smu_context *smu)
+{
+ struct amdgpu_device *adev = smu->adev;
+ uint32_t reg;
+
+ reg = RREG32_PCIE(MP1_Public | smnMP1_PMI_3_START);
+ smu->stb_context.enabled = REG_GET_FIELD(reg, MP1_PMI_3_START, ENABLE);
+
+ /* STB is disabled */
+ if (!smu->stb_context.enabled)
+ return;
+
+ spin_lock_init(&smu->stb_context.lock);
+
+ /* STB buffer size in bytes as function of FIFO depth */
+ reg = RREG32_PCIE(MP1_Public | smnMP1_PMI_3_FIFO);
+ smu->stb_context.stb_buf_size = 1 << REG_GET_FIELD(reg, MP1_PMI_3_FIFO, DEPTH);
+ smu->stb_context.stb_buf_size *= SIENNA_CICHLID_STB_DEPTH_UNIT_BYTES;
+
+ dev_info(smu->adev->dev, "STB initialized to %d entries",
+ smu->stb_context.stb_buf_size / SIENNA_CICHLID_STB_DEPTH_UNIT_BYTES);
+
+}
+
+int sienna_cichlid_stb_get_data_direct(struct smu_context *smu,
+ void *buf,
+ uint32_t size)
+{
+ uint32_t *p = buf;
+ struct amdgpu_device *adev = smu->adev;
+
+ /* No need to disable interrupts for now as we don't lock it yet from ISR */
+ spin_lock(&smu->stb_context.lock);
+
+ /*
+ * Read the STB FIFO in units of 32bit since this is the accessor window
+ * (register width) we have.
+ */
+ buf = ((char *) buf) + size;
+ while ((void *)p < buf)
+ *p++ = cpu_to_le32(RREG32_PCIE(MP1_Public | smnMP1_PMI_3));
+
+ spin_unlock(&smu->stb_context.lock);
+
+ return 0;
+}
+
static const struct pptable_funcs sienna_cichlid_ppt_funcs = {
.get_allowed_feature_mask = sienna_cichlid_get_allowed_feature_mask,
.set_default_dpm_table = sienna_cichlid_set_default_dpm_table,
@@ -3882,6 +3944,7 @@ static const struct pptable_funcs sienna_cichlid_ppt_funcs = {
.interrupt_work = smu_v11_0_interrupt_work,
.gpo_control = sienna_cichlid_gpo_control,
.set_mp1_state = sienna_cichlid_set_mp1_state,
+ .stb_collect_info = sienna_cichlid_stb_get_data_direct,
};
void sienna_cichlid_set_ppt_funcs(struct smu_context *smu)
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c
index 28b7c0562b99..4e9e2cf39859 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c
@@ -1724,7 +1724,7 @@ int smu_v11_0_mode1_reset(struct smu_context *smu)
return ret;
}
-int smu_v11_0_set_light_sbr(struct smu_context *smu, bool enable)
+int smu_v11_0_handle_passthrough_sbr(struct smu_context *smu, bool enable)
{
int ret = 0;
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c
index c02ed65ffa38..5cb07ed227fb 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c
@@ -1039,14 +1039,6 @@ failed:
static int vangogh_get_power_profile_mode(struct smu_context *smu,
char *buf)
{
- static const char *profile_name[] = {
- "BOOTUP_DEFAULT",
- "3D_FULL_SCREEN",
- "POWER_SAVING",
- "VIDEO",
- "VR",
- "COMPUTE",
- "CUSTOM"};
uint32_t i, size = 0;
int16_t workload_type = 0;
@@ -1066,7 +1058,7 @@ static int vangogh_get_power_profile_mode(struct smu_context *smu,
continue;
size += sysfs_emit_at(buf, size, "%2d %14s%s\n",
- i, profile_name[i], (i == smu->power_profile_mode) ? "*" : " ");
+ i, amdgpu_pp_profile_name[i], (i == smu->power_profile_mode) ? "*" : " ");
}
return size;
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c
index 145f13b8c977..25c4b135f830 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c
@@ -1095,14 +1095,6 @@ static int renoir_set_watermarks_table(
static int renoir_get_power_profile_mode(struct smu_context *smu,
char *buf)
{
- static const char *profile_name[] = {
- "BOOTUP_DEFAULT",
- "3D_FULL_SCREEN",
- "POWER_SAVING",
- "VIDEO",
- "VR",
- "COMPUTE",
- "CUSTOM"};
uint32_t i, size = 0;
int16_t workload_type = 0;
@@ -1121,7 +1113,7 @@ static int renoir_get_power_profile_mode(struct smu_context *smu,
continue;
size += sysfs_emit_at(buf, size, "%2d %14s%s\n",
- i, profile_name[i], (i == smu->power_profile_mode) ? "*" : " ");
+ i, amdgpu_pp_profile_name[i], (i == smu->power_profile_mode) ? "*" : " ");
}
return size;
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu12/smu_v12_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu12/smu_v12_0.c
index d60b8c5e8715..9c91e79c955f 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu12/smu_v12_0.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu12/smu_v12_0.c
@@ -120,7 +120,8 @@ int smu_v12_0_powergate_sdma(struct smu_context *smu, bool gate)
int smu_v12_0_set_gfx_cgpg(struct smu_context *smu, bool enable)
{
- if (!(smu->adev->pg_flags & AMD_PG_SUPPORT_GFX_PG))
+ /* Until now the SMU12 only implemented for Renoir series so here neen't do APU check. */
+ if (!(smu->adev->pg_flags & AMD_PG_SUPPORT_GFX_PG) || smu->adev->in_s0ix)
return 0;
return smu_cmn_send_smc_msg_with_param(smu,
@@ -191,6 +192,9 @@ int smu_v12_0_fini_smc_tables(struct smu_context *smu)
kfree(smu_table->watermarks_table);
smu_table->watermarks_table = NULL;
+ kfree(smu_table->gpu_metrics_table);
+ smu_table->gpu_metrics_table = NULL;
+
return 0;
}
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c
index 59a7d276541d..380811b91350 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c
@@ -78,6 +78,12 @@
#define smnPCIE_ESM_CTRL 0x111003D0
+/*
+ * SMU support ECCTABLE since version 68.42.0,
+ * use this to check ECCTALE feature whether support
+ */
+#define SUPPORT_ECCTABLE_SMU_VERSION 0x00442a00
+
static const struct smu_temperature_range smu13_thermal_policy[] =
{
{-273150, 99000, 99000, -273150, 99000, 99000, -273150, 99000, 99000},
@@ -135,6 +141,7 @@ static const struct cmn2asic_msg_mapping aldebaran_message_map[SMU_MSG_MAX_COUNT
MSG_MAP(SetUclkDpmMode, PPSMC_MSG_SetUclkDpmMode, 0),
MSG_MAP(GfxDriverResetRecovery, PPSMC_MSG_GfxDriverResetRecovery, 0),
MSG_MAP(BoardPowerCalibration, PPSMC_MSG_BoardPowerCalibration, 0),
+ MSG_MAP(HeavySBR, PPSMC_MSG_HeavySBR, 0),
};
static const struct cmn2asic_mapping aldebaran_clk_map[SMU_CLK_COUNT] = {
@@ -190,6 +197,7 @@ static const struct cmn2asic_mapping aldebaran_table_map[SMU_TABLE_COUNT] = {
TAB_MAP(SMU_METRICS),
TAB_MAP(DRIVER_SMU_CONFIG),
TAB_MAP(I2C_COMMANDS),
+ TAB_MAP(ECCINFO),
};
static const uint8_t aldebaran_throttler_map[] = {
@@ -223,6 +231,9 @@ static int aldebaran_tables_init(struct smu_context *smu)
SMU_TABLE_INIT(tables, SMU_TABLE_I2C_COMMANDS, sizeof(SwI2cRequest_t),
PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
+ SMU_TABLE_INIT(tables, SMU_TABLE_ECCINFO, sizeof(EccInfoTable_t),
+ PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
+
smu_table->metrics_table = kzalloc(sizeof(SmuMetrics_t), GFP_KERNEL);
if (!smu_table->metrics_table)
return -ENOMEM;
@@ -235,6 +246,10 @@ static int aldebaran_tables_init(struct smu_context *smu)
return -ENOMEM;
}
+ smu_table->ecc_table = kzalloc(tables[SMU_TABLE_ECCINFO].size, GFP_KERNEL);
+ if (!smu_table->ecc_table)
+ return -ENOMEM;
+
return 0;
}
@@ -248,16 +263,6 @@ static int aldebaran_allocate_dpm_context(struct smu_context *smu)
return -ENOMEM;
smu_dpm->dpm_context_size = sizeof(struct smu_13_0_dpm_context);
- smu_dpm->dpm_current_power_state = kzalloc(sizeof(struct smu_power_state),
- GFP_KERNEL);
- if (!smu_dpm->dpm_current_power_state)
- return -ENOMEM;
-
- smu_dpm->dpm_request_power_state = kzalloc(sizeof(struct smu_power_state),
- GFP_KERNEL);
- if (!smu_dpm->dpm_request_power_state)
- return -ENOMEM;
-
return 0;
}
@@ -1601,7 +1606,8 @@ out_unlock:
mutex_unlock(&smu->metrics_lock);
adev->unique_id = ((uint64_t)upper32 << 32) | lower32;
- sprintf(adev->serial, "%016llx", adev->unique_id);
+ if (adev->serial[0] == '\0')
+ sprintf(adev->serial, "%016llx", adev->unique_id);
}
static bool aldebaran_is_baco_supported(struct smu_context *smu)
@@ -1621,7 +1627,7 @@ static int aldebaran_allow_xgmi_power_down(struct smu_context *smu, bool en)
{
return smu_cmn_send_smc_msg_with_param(smu,
SMU_MSG_GmiPwrDnControl,
- en ? 1 : 0,
+ en ? 0 : 1,
NULL);
}
@@ -1765,6 +1771,98 @@ static ssize_t aldebaran_get_gpu_metrics(struct smu_context *smu,
return sizeof(struct gpu_metrics_v1_3);
}
+static int aldebaran_check_ecc_table_support(struct smu_context *smu)
+{
+ uint32_t if_version = 0xff, smu_version = 0xff;
+ int ret = 0;
+
+ ret = smu_cmn_get_smc_version(smu, &if_version, &smu_version);
+ if (ret) {
+ /* return not support if failed get smu_version */
+ ret = -EOPNOTSUPP;
+ }
+
+ if (smu_version < SUPPORT_ECCTABLE_SMU_VERSION)
+ ret = -EOPNOTSUPP;
+
+ return ret;
+}
+
+static ssize_t aldebaran_get_ecc_info(struct smu_context *smu,
+ void *table)
+{
+ struct smu_table_context *smu_table = &smu->smu_table;
+ EccInfoTable_t *ecc_table = NULL;
+ struct ecc_info_per_ch *ecc_info_per_channel = NULL;
+ int i, ret = 0;
+ struct umc_ecc_info *eccinfo = (struct umc_ecc_info *)table;
+
+ ret = aldebaran_check_ecc_table_support(smu);
+ if (ret)
+ return ret;
+
+ ret = smu_cmn_update_table(smu,
+ SMU_TABLE_ECCINFO,
+ 0,
+ smu_table->ecc_table,
+ false);
+ if (ret) {
+ dev_info(smu->adev->dev, "Failed to export SMU ecc table!\n");
+ return ret;
+ }
+
+ ecc_table = (EccInfoTable_t *)smu_table->ecc_table;
+
+ for (i = 0; i < ALDEBARAN_UMC_CHANNEL_NUM; i++) {
+ ecc_info_per_channel = &(eccinfo->ecc[i]);
+ ecc_info_per_channel->ce_count_lo_chip =
+ ecc_table->EccInfo[i].ce_count_lo_chip;
+ ecc_info_per_channel->ce_count_hi_chip =
+ ecc_table->EccInfo[i].ce_count_hi_chip;
+ ecc_info_per_channel->mca_umc_status =
+ ecc_table->EccInfo[i].mca_umc_status;
+ ecc_info_per_channel->mca_umc_addr =
+ ecc_table->EccInfo[i].mca_umc_addr;
+ }
+
+ return ret;
+}
+
+static int aldebaran_mode1_reset(struct smu_context *smu)
+{
+ u32 smu_version, fatal_err, param;
+ int ret = 0;
+ struct amdgpu_device *adev = smu->adev;
+ struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
+
+ fatal_err = 0;
+ param = SMU_RESET_MODE_1;
+
+ /*
+ * PM FW support SMU_MSG_GfxDeviceDriverReset from 68.07
+ */
+ smu_cmn_get_smc_version(smu, NULL, &smu_version);
+ if (smu_version < 0x00440700) {
+ ret = smu_cmn_send_smc_msg(smu, SMU_MSG_Mode1Reset, NULL);
+ }
+ else {
+ /* fatal error triggered by ras, PMFW supports the flag
+ from 68.44.0 */
+ if ((smu_version >= 0x00442c00) && ras &&
+ atomic_read(&ras->in_recovery))
+ fatal_err = 1;
+
+ param |= (fatal_err << 16);
+ ret = smu_cmn_send_smc_msg_with_param(smu,
+ SMU_MSG_GfxDeviceDriverReset, param, NULL);
+ }
+
+ if (!ret)
+ msleep(SMU13_MODE1_RESET_WAIT_TIME_IN_MS);
+
+ return ret;
+}
+
static int aldebaran_mode2_reset(struct smu_context *smu)
{
u32 smu_version;
@@ -1816,6 +1914,14 @@ out:
return ret;
}
+static int aldebaran_smu_handle_passthrough_sbr(struct smu_context *smu, bool enable)
+{
+ int ret = 0;
+ ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_HeavySBR, enable ? 1 : 0, NULL);
+
+ return ret;
+}
+
static bool aldebaran_is_mode1_reset_supported(struct smu_context *smu)
{
#if 0
@@ -1925,13 +2031,15 @@ static const struct pptable_funcs aldebaran_ppt_funcs = {
.get_gpu_metrics = aldebaran_get_gpu_metrics,
.mode1_reset_is_support = aldebaran_is_mode1_reset_supported,
.mode2_reset_is_support = aldebaran_is_mode2_reset_supported,
- .mode1_reset = smu_v13_0_mode1_reset,
+ .smu_handle_passthrough_sbr = aldebaran_smu_handle_passthrough_sbr,
+ .mode1_reset = aldebaran_mode1_reset,
.set_mp1_state = aldebaran_set_mp1_state,
.mode2_reset = aldebaran_mode2_reset,
.wait_for_event = smu_v13_0_wait_for_event,
.i2c_init = aldebaran_i2c_control_init,
.i2c_fini = aldebaran_i2c_control_fini,
.send_hbm_bad_pages_num = aldebaran_smu_send_hbm_bad_page_num,
+ .get_ecc_info = aldebaran_get_ecc_info,
};
void aldebaran_set_ppt_funcs(struct smu_context *smu)
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
index 35145db6eedf..b54790d3483e 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
@@ -60,8 +60,6 @@ MODULE_FIRMWARE("amdgpu/aldebaran_smc.bin");
#define SMU13_VOLTAGE_SCALE 4
-#define SMU13_MODE1_RESET_WAIT_TIME_IN_MS 500 //500ms
-
#define LINK_WIDTH_MAX 6
#define LINK_SPEED_MAX 3
@@ -198,6 +196,7 @@ int smu_v13_0_check_fw_status(struct smu_context *smu)
int smu_v13_0_check_fw_version(struct smu_context *smu)
{
+ struct amdgpu_device *adev = smu->adev;
uint32_t if_version = 0xff, smu_version = 0xff;
uint16_t smu_major;
uint8_t smu_minor, smu_debug;
@@ -210,8 +209,10 @@ int smu_v13_0_check_fw_version(struct smu_context *smu)
smu_major = (smu_version >> 16) & 0xffff;
smu_minor = (smu_version >> 8) & 0xff;
smu_debug = (smu_version >> 0) & 0xff;
+ if (smu->is_apu)
+ adev->pm.fw_version = smu_version;
- switch (smu->adev->ip_versions[MP1_HWIP][0]) {
+ switch (adev->ip_versions[MP1_HWIP][0]) {
case IP_VERSION(13, 0, 2):
smu->smc_driver_if_version = SMU13_DRIVER_IF_VERSION_ALDE;
break;
@@ -220,13 +221,15 @@ int smu_v13_0_check_fw_version(struct smu_context *smu)
smu->smc_driver_if_version = SMU13_DRIVER_IF_VERSION_YELLOW_CARP;
break;
default:
- dev_err(smu->adev->dev, "smu unsupported IP version: 0x%x.\n",
- smu->adev->ip_versions[MP1_HWIP][0]);
+ dev_err(adev->dev, "smu unsupported IP version: 0x%x.\n",
+ adev->ip_versions[MP1_HWIP][0]);
smu->smc_driver_if_version = SMU13_DRIVER_IF_VERSION_INV;
break;
}
- dev_info(smu->adev->dev, "smu fw reported version = 0x%08x (%d.%d.%d)\n",
+ /* only for dGPU w/ SMU13*/
+ if (adev->pm.fw)
+ dev_dbg(adev->dev, "smu fw reported version = 0x%08x (%d.%d.%d)\n",
smu_version, smu_major, smu_minor, smu_debug);
/*
@@ -238,11 +241,11 @@ int smu_v13_0_check_fw_version(struct smu_context *smu)
* of halt driver loading.
*/
if (if_version != smu->smc_driver_if_version) {
- dev_info(smu->adev->dev, "smu driver if version = 0x%08x, smu fw if version = 0x%08x, "
+ dev_info(adev->dev, "smu driver if version = 0x%08x, smu fw if version = 0x%08x, "
"smu fw version = 0x%08x (%d.%d.%d)\n",
smu->smc_driver_if_version, if_version,
smu_version, smu_major, smu_minor, smu_debug);
- dev_warn(smu->adev->dev, "SMU driver if version not matched\n");
+ dev_warn(adev->dev, "SMU driver if version not matched\n");
}
return ret;
@@ -430,8 +433,10 @@ int smu_v13_0_fini_smc_tables(struct smu_context *smu)
kfree(smu_table->hardcode_pptable);
smu_table->hardcode_pptable = NULL;
+ kfree(smu_table->ecc_table);
kfree(smu_table->metrics_table);
kfree(smu_table->watermarks_table);
+ smu_table->ecc_table = NULL;
smu_table->metrics_table = NULL;
smu_table->watermarks_table = NULL;
smu_table->metrics_time = 0;
@@ -1424,25 +1429,6 @@ int smu_v13_0_set_azalia_d3_pme(struct smu_context *smu)
return ret;
}
-int smu_v13_0_mode1_reset(struct smu_context *smu)
-{
- u32 smu_version;
- int ret = 0;
- /*
- * PM FW support SMU_MSG_GfxDeviceDriverReset from 68.07
- */
- smu_cmn_get_smc_version(smu, NULL, &smu_version);
- if (smu_version < 0x00440700)
- ret = smu_cmn_send_smc_msg(smu, SMU_MSG_Mode1Reset, NULL);
- else
- ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GfxDeviceDriverReset, SMU_RESET_MODE_1, NULL);
-
- if (!ret)
- msleep(SMU13_MODE1_RESET_WAIT_TIME_IN_MS);
-
- return ret;
-}
-
static int smu_v13_0_wait_for_reset_complete(struct smu_context *smu,
uint64_t event_arg)
{
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c
index ea6f50c08c5f..ee1a312fd497 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c
@@ -94,10 +94,10 @@ static void smu_cmn_read_arg(struct smu_context *smu,
/**
* __smu_cmn_poll_stat -- poll for a status from the SMU
- * smu: a pointer to SMU context
+ * @smu: a pointer to SMU context
*
* Returns the status of the SMU, which could be,
- * 0, the SMU is busy with your previous command;
+ * 0, the SMU is busy with your command;
* 1, execution status: success, execution result: success;
* 0xFF, execution status: success, execution result: failure;
* 0xFE, unknown command;
@@ -257,10 +257,11 @@ int smu_cmn_send_msg_without_waiting(struct smu_context *smu,
uint16_t msg_index,
uint32_t param)
{
+ struct amdgpu_device *adev = smu->adev;
u32 reg;
int res;
- if (smu->adev->no_hw_access)
+ if (adev->no_hw_access)
return 0;
reg = __smu_cmn_poll_stat(smu);
@@ -272,6 +273,12 @@ int smu_cmn_send_msg_without_waiting(struct smu_context *smu,
__smu_cmn_send_msg(smu, msg_index, param);
res = 0;
Out:
+ if (unlikely(adev->pm.smu_debug_mask & SMU_DEBUG_HALT_ON_ERROR) &&
+ res && (res != -ETIME)) {
+ amdgpu_device_halt(adev);
+ WARN_ON(1);
+ }
+
return res;
}
@@ -288,9 +295,18 @@ Out:
int smu_cmn_wait_for_response(struct smu_context *smu)
{
u32 reg;
+ int res;
reg = __smu_cmn_poll_stat(smu);
- return __smu_cmn_reg2errno(smu, reg);
+ res = __smu_cmn_reg2errno(smu, reg);
+
+ if (unlikely(smu->adev->pm.smu_debug_mask & SMU_DEBUG_HALT_ON_ERROR) &&
+ res && (res != -ETIME)) {
+ amdgpu_device_halt(smu->adev);
+ WARN_ON(1);
+ }
+
+ return res;
}
/**
@@ -328,10 +344,11 @@ int smu_cmn_send_smc_msg_with_param(struct smu_context *smu,
uint32_t param,
uint32_t *read_arg)
{
+ struct amdgpu_device *adev = smu->adev;
int res, index;
u32 reg;
- if (smu->adev->no_hw_access)
+ if (adev->no_hw_access)
return 0;
index = smu_cmn_to_asic_specific_index(smu,
@@ -352,11 +369,16 @@ int smu_cmn_send_smc_msg_with_param(struct smu_context *smu,
__smu_cmn_send_msg(smu, (uint16_t) index, param);
reg = __smu_cmn_poll_stat(smu);
res = __smu_cmn_reg2errno(smu, reg);
- if (res == -EREMOTEIO)
+ if (res != 0)
__smu_cmn_reg_print_error(smu, reg, index, param, msg);
if (read_arg)
smu_cmn_read_arg(smu, read_arg);
Out:
+ if (unlikely(adev->pm.smu_debug_mask & SMU_DEBUG_HALT_ON_ERROR) && res) {
+ amdgpu_device_halt(adev);
+ WARN_ON(1);
+ }
+
mutex_unlock(&smu->message_lock);
return res;
}
diff --git a/drivers/gpu/drm/arm/Kconfig b/drivers/gpu/drm/arm/Kconfig
index 3a9e966e0e78..58a242871b28 100644
--- a/drivers/gpu/drm/arm/Kconfig
+++ b/drivers/gpu/drm/arm/Kconfig
@@ -6,7 +6,6 @@ config DRM_HDLCD
depends on DRM && OF && (ARM || ARM64 || COMPILE_TEST)
depends on COMMON_CLK
select DRM_KMS_HELPER
- select DRM_KMS_CMA_HELPER
help
Choose this option if you have an ARM High Definition Colour LCD
controller.
@@ -27,7 +26,6 @@ config DRM_MALI_DISPLAY
depends on DRM && OF && (ARM || ARM64 || COMPILE_TEST)
depends on COMMON_CLK
select DRM_KMS_HELPER
- select DRM_KMS_CMA_HELPER
select DRM_GEM_CMA_HELPER
select VIDEOMODE_HELPERS
help
diff --git a/drivers/gpu/drm/arm/display/Kconfig b/drivers/gpu/drm/arm/display/Kconfig
index cec0639e3aa1..e91598b60781 100644
--- a/drivers/gpu/drm/arm/display/Kconfig
+++ b/drivers/gpu/drm/arm/display/Kconfig
@@ -4,7 +4,6 @@ config DRM_KOMEDA
depends on DRM && OF
depends on COMMON_CLK
select DRM_KMS_HELPER
- select DRM_KMS_CMA_HELPER
select DRM_GEM_CMA_HELPER
select VIDEOMODE_HELPERS
help
diff --git a/drivers/gpu/drm/aspeed/Kconfig b/drivers/gpu/drm/aspeed/Kconfig
index 5e95bcea43e9..024ccab14f88 100644
--- a/drivers/gpu/drm/aspeed/Kconfig
+++ b/drivers/gpu/drm/aspeed/Kconfig
@@ -5,7 +5,7 @@ config DRM_ASPEED_GFX
depends on (COMPILE_TEST || ARCH_ASPEED)
depends on MMU
select DRM_KMS_HELPER
- select DRM_KMS_CMA_HELPER
+ select DRM_GEM_CMA_HELPER
select DMA_CMA if HAVE_DMA_CONTIGUOUS
select CMA if HAVE_DMA_CONTIGUOUS
select MFD_SYSCON
diff --git a/drivers/gpu/drm/aspeed/aspeed_gfx_drv.c b/drivers/gpu/drm/aspeed/aspeed_gfx_drv.c
index b53fee6f1c17..65f172807a0d 100644
--- a/drivers/gpu/drm/aspeed/aspeed_gfx_drv.c
+++ b/drivers/gpu/drm/aspeed/aspeed_gfx_drv.c
@@ -291,7 +291,7 @@ vga_pw_show(struct device *dev, struct device_attribute *attr, char *buf)
if (rc)
return rc;
- return sprintf(buf, "%u\n", reg & 1);
+ return sprintf(buf, "%u\n", reg);
}
static DEVICE_ATTR_RO(vga_pw);
diff --git a/drivers/gpu/drm/ast/Makefile b/drivers/gpu/drm/ast/Makefile
index 438a2d05b115..21f71160bc3e 100644
--- a/drivers/gpu/drm/ast/Makefile
+++ b/drivers/gpu/drm/ast/Makefile
@@ -3,6 +3,6 @@
# Makefile for the drm device driver. This driver provides support for the
# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
-ast-y := ast_drv.o ast_main.o ast_mm.o ast_mode.o ast_post.o ast_dp501.o
+ast-y := ast_drv.o ast_i2c.o ast_main.o ast_mm.o ast_mode.o ast_post.o ast_dp501.o
obj-$(CONFIG_DRM_AST) := ast.o
diff --git a/drivers/gpu/drm/ast/ast_drv.h b/drivers/gpu/drm/ast/ast_drv.h
index 2cfce7dc95af..00bfa41ff7cb 100644
--- a/drivers/gpu/drm/ast/ast_drv.h
+++ b/drivers/gpu/drm/ast/ast_drv.h
@@ -357,4 +357,7 @@ bool ast_dp501_read_edid(struct drm_device *dev, u8 *ediddata);
u8 ast_get_dp501_max_clk(struct drm_device *dev);
void ast_init_3rdtx(struct drm_device *dev);
+/* ast_i2c.c */
+struct ast_i2c_chan *ast_i2c_create(struct drm_device *dev);
+
#endif
diff --git a/drivers/gpu/drm/ast/ast_i2c.c b/drivers/gpu/drm/ast/ast_i2c.c
new file mode 100644
index 000000000000..93e91c36d649
--- /dev/null
+++ b/drivers/gpu/drm/ast/ast_i2c.c
@@ -0,0 +1,152 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ */
+
+#include <drm/drm_managed.h>
+#include <drm/drm_print.h>
+
+#include "ast_drv.h"
+
+static void ast_i2c_setsda(void *i2c_priv, int data)
+{
+ struct ast_i2c_chan *i2c = i2c_priv;
+ struct ast_private *ast = to_ast_private(i2c->dev);
+ int i;
+ u8 ujcrb7, jtemp;
+
+ for (i = 0; i < 0x10000; i++) {
+ ujcrb7 = ((data & 0x01) ? 0 : 1) << 2;
+ ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0xf1, ujcrb7);
+ jtemp = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0x04);
+ if (ujcrb7 == jtemp)
+ break;
+ }
+}
+
+static void ast_i2c_setscl(void *i2c_priv, int clock)
+{
+ struct ast_i2c_chan *i2c = i2c_priv;
+ struct ast_private *ast = to_ast_private(i2c->dev);
+ int i;
+ u8 ujcrb7, jtemp;
+
+ for (i = 0; i < 0x10000; i++) {
+ ujcrb7 = ((clock & 0x01) ? 0 : 1);
+ ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0xf4, ujcrb7);
+ jtemp = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0x01);
+ if (ujcrb7 == jtemp)
+ break;
+ }
+}
+
+static int ast_i2c_getsda(void *i2c_priv)
+{
+ struct ast_i2c_chan *i2c = i2c_priv;
+ struct ast_private *ast = to_ast_private(i2c->dev);
+ uint32_t val, val2, count, pass;
+
+ count = 0;
+ pass = 0;
+ val = (ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0x20) >> 5) & 0x01;
+ do {
+ val2 = (ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0x20) >> 5) & 0x01;
+ if (val == val2) {
+ pass++;
+ } else {
+ pass = 0;
+ val = (ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0x20) >> 5) & 0x01;
+ }
+ } while ((pass < 5) && (count++ < 0x10000));
+
+ return val & 1 ? 1 : 0;
+}
+
+static int ast_i2c_getscl(void *i2c_priv)
+{
+ struct ast_i2c_chan *i2c = i2c_priv;
+ struct ast_private *ast = to_ast_private(i2c->dev);
+ uint32_t val, val2, count, pass;
+
+ count = 0;
+ pass = 0;
+ val = (ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0x10) >> 4) & 0x01;
+ do {
+ val2 = (ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0x10) >> 4) & 0x01;
+ if (val == val2) {
+ pass++;
+ } else {
+ pass = 0;
+ val = (ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0x10) >> 4) & 0x01;
+ }
+ } while ((pass < 5) && (count++ < 0x10000));
+
+ return val & 1 ? 1 : 0;
+}
+
+static void ast_i2c_release(struct drm_device *dev, void *res)
+{
+ struct ast_i2c_chan *i2c = res;
+
+ i2c_del_adapter(&i2c->adapter);
+ kfree(i2c);
+}
+
+struct ast_i2c_chan *ast_i2c_create(struct drm_device *dev)
+{
+ struct ast_i2c_chan *i2c;
+ int ret;
+
+ i2c = kzalloc(sizeof(struct ast_i2c_chan), GFP_KERNEL);
+ if (!i2c)
+ return NULL;
+
+ i2c->adapter.owner = THIS_MODULE;
+ i2c->adapter.class = I2C_CLASS_DDC;
+ i2c->adapter.dev.parent = dev->dev;
+ i2c->dev = dev;
+ i2c_set_adapdata(&i2c->adapter, i2c);
+ snprintf(i2c->adapter.name, sizeof(i2c->adapter.name),
+ "AST i2c bit bus");
+ i2c->adapter.algo_data = &i2c->bit;
+
+ i2c->bit.udelay = 20;
+ i2c->bit.timeout = 2;
+ i2c->bit.data = i2c;
+ i2c->bit.setsda = ast_i2c_setsda;
+ i2c->bit.setscl = ast_i2c_setscl;
+ i2c->bit.getsda = ast_i2c_getsda;
+ i2c->bit.getscl = ast_i2c_getscl;
+ ret = i2c_bit_add_bus(&i2c->adapter);
+ if (ret) {
+ drm_err(dev, "Failed to register bit i2c\n");
+ goto out_kfree;
+ }
+
+ ret = drmm_add_action_or_reset(dev, ast_i2c_release, i2c);
+ if (ret)
+ return NULL;
+ return i2c;
+
+out_kfree:
+ kfree(i2c);
+ return NULL;
+}
diff --git a/drivers/gpu/drm/ast/ast_mode.c b/drivers/gpu/drm/ast/ast_mode.c
index 1e30eaeb0e1b..956c8982192b 100644
--- a/drivers/gpu/drm/ast/ast_mode.c
+++ b/drivers/gpu/drm/ast/ast_mode.c
@@ -47,9 +47,6 @@
#include "ast_drv.h"
#include "ast_tables.h"
-static struct ast_i2c_chan *ast_i2c_create(struct drm_device *dev);
-static void ast_i2c_destroy(struct ast_i2c_chan *i2c);
-
static inline void ast_load_palette_index(struct ast_private *ast,
u8 index, u8 red, u8 green,
u8 blue)
@@ -1121,7 +1118,10 @@ static void ast_crtc_reset(struct drm_crtc *crtc)
if (crtc->state)
crtc->funcs->atomic_destroy_state(crtc, crtc->state);
- __drm_atomic_helper_crtc_reset(crtc, &ast_state->base);
+ if (ast_state)
+ __drm_atomic_helper_crtc_reset(crtc, &ast_state->base);
+ else
+ __drm_atomic_helper_crtc_reset(crtc, NULL);
}
static struct drm_crtc_state *
@@ -1210,9 +1210,9 @@ static int ast_get_modes(struct drm_connector *connector)
{
struct ast_connector *ast_connector = to_ast_connector(connector);
struct ast_private *ast = to_ast_private(connector->dev);
- struct edid *edid;
- int ret;
+ struct edid *edid = NULL;
bool flags = false;
+ int ret;
if (ast->tx_chip_type == AST_TX_DP501) {
ast->dp501_maxclk = 0xff;
@@ -1226,7 +1226,7 @@ static int ast_get_modes(struct drm_connector *connector)
else
kfree(edid);
}
- if (!flags)
+ if (!flags && ast_connector->i2c)
edid = drm_get_edid(connector, &ast_connector->i2c->adapter);
if (edid) {
drm_connector_update_edid_property(&ast_connector->base, edid);
@@ -1300,14 +1300,6 @@ static enum drm_mode_status ast_mode_valid(struct drm_connector *connector,
return flags;
}
-static void ast_connector_destroy(struct drm_connector *connector)
-{
- struct ast_connector *ast_connector = to_ast_connector(connector);
-
- ast_i2c_destroy(ast_connector->i2c);
- drm_connector_cleanup(connector);
-}
-
static const struct drm_connector_helper_funcs ast_connector_helper_funcs = {
.get_modes = ast_get_modes,
.mode_valid = ast_mode_valid,
@@ -1316,7 +1308,7 @@ static const struct drm_connector_helper_funcs ast_connector_helper_funcs = {
static const struct drm_connector_funcs ast_connector_funcs = {
.reset = drm_atomic_helper_connector_reset,
.fill_modes = drm_helper_probe_single_connector_modes,
- .destroy = ast_connector_destroy,
+ .destroy = drm_connector_cleanup,
.atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
};
@@ -1332,10 +1324,13 @@ static int ast_connector_init(struct drm_device *dev)
if (!ast_connector->i2c)
drm_err(dev, "failed to add ddc bus for connector\n");
- drm_connector_init_with_ddc(dev, connector,
- &ast_connector_funcs,
- DRM_MODE_CONNECTOR_VGA,
- &ast_connector->i2c->adapter);
+ if (ast_connector->i2c)
+ drm_connector_init_with_ddc(dev, connector, &ast_connector_funcs,
+ DRM_MODE_CONNECTOR_VGA,
+ &ast_connector->i2c->adapter);
+ else
+ drm_connector_init(dev, connector, &ast_connector_funcs,
+ DRM_MODE_CONNECTOR_VGA);
drm_connector_helper_add(connector, &ast_connector_helper_funcs);
@@ -1413,124 +1408,3 @@ int ast_mode_config_init(struct ast_private *ast)
return 0;
}
-
-static int get_clock(void *i2c_priv)
-{
- struct ast_i2c_chan *i2c = i2c_priv;
- struct ast_private *ast = to_ast_private(i2c->dev);
- uint32_t val, val2, count, pass;
-
- count = 0;
- pass = 0;
- val = (ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0x10) >> 4) & 0x01;
- do {
- val2 = (ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0x10) >> 4) & 0x01;
- if (val == val2) {
- pass++;
- } else {
- pass = 0;
- val = (ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0x10) >> 4) & 0x01;
- }
- } while ((pass < 5) && (count++ < 0x10000));
-
- return val & 1 ? 1 : 0;
-}
-
-static int get_data(void *i2c_priv)
-{
- struct ast_i2c_chan *i2c = i2c_priv;
- struct ast_private *ast = to_ast_private(i2c->dev);
- uint32_t val, val2, count, pass;
-
- count = 0;
- pass = 0;
- val = (ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0x20) >> 5) & 0x01;
- do {
- val2 = (ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0x20) >> 5) & 0x01;
- if (val == val2) {
- pass++;
- } else {
- pass = 0;
- val = (ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0x20) >> 5) & 0x01;
- }
- } while ((pass < 5) && (count++ < 0x10000));
-
- return val & 1 ? 1 : 0;
-}
-
-static void set_clock(void *i2c_priv, int clock)
-{
- struct ast_i2c_chan *i2c = i2c_priv;
- struct ast_private *ast = to_ast_private(i2c->dev);
- int i;
- u8 ujcrb7, jtemp;
-
- for (i = 0; i < 0x10000; i++) {
- ujcrb7 = ((clock & 0x01) ? 0 : 1);
- ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0xf4, ujcrb7);
- jtemp = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0x01);
- if (ujcrb7 == jtemp)
- break;
- }
-}
-
-static void set_data(void *i2c_priv, int data)
-{
- struct ast_i2c_chan *i2c = i2c_priv;
- struct ast_private *ast = to_ast_private(i2c->dev);
- int i;
- u8 ujcrb7, jtemp;
-
- for (i = 0; i < 0x10000; i++) {
- ujcrb7 = ((data & 0x01) ? 0 : 1) << 2;
- ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0xf1, ujcrb7);
- jtemp = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0x04);
- if (ujcrb7 == jtemp)
- break;
- }
-}
-
-static struct ast_i2c_chan *ast_i2c_create(struct drm_device *dev)
-{
- struct ast_i2c_chan *i2c;
- int ret;
-
- i2c = kzalloc(sizeof(struct ast_i2c_chan), GFP_KERNEL);
- if (!i2c)
- return NULL;
-
- i2c->adapter.owner = THIS_MODULE;
- i2c->adapter.class = I2C_CLASS_DDC;
- i2c->adapter.dev.parent = dev->dev;
- i2c->dev = dev;
- i2c_set_adapdata(&i2c->adapter, i2c);
- snprintf(i2c->adapter.name, sizeof(i2c->adapter.name),
- "AST i2c bit bus");
- i2c->adapter.algo_data = &i2c->bit;
-
- i2c->bit.udelay = 20;
- i2c->bit.timeout = 2;
- i2c->bit.data = i2c;
- i2c->bit.setsda = set_data;
- i2c->bit.setscl = set_clock;
- i2c->bit.getsda = get_data;
- i2c->bit.getscl = get_clock;
- ret = i2c_bit_add_bus(&i2c->adapter);
- if (ret) {
- drm_err(dev, "Failed to register bit i2c\n");
- goto out_free;
- }
-
- return i2c;
-out_free:
- kfree(i2c);
- return NULL;
-}
-
-static void ast_i2c_destroy(struct ast_i2c_chan *i2c)
-{
- if (!i2c)
- return;
- i2c_del_adapter(&i2c->adapter);
- kfree(i2c);
-}
diff --git a/drivers/gpu/drm/atmel-hlcdc/Kconfig b/drivers/gpu/drm/atmel-hlcdc/Kconfig
index 5f67f001553b..8ae679f1a518 100644
--- a/drivers/gpu/drm/atmel-hlcdc/Kconfig
+++ b/drivers/gpu/drm/atmel-hlcdc/Kconfig
@@ -4,7 +4,6 @@ config DRM_ATMEL_HLCDC
depends on DRM && OF && COMMON_CLK && MFD_ATMEL_HLCDC && ARM
select DRM_GEM_CMA_HELPER
select DRM_KMS_HELPER
- select DRM_KMS_CMA_HELPER
select DRM_PANEL
help
Choose this option if you have an ATMEL SoC with an HLCDC display
diff --git a/drivers/gpu/drm/bridge/analogix/anx7625.c b/drivers/gpu/drm/bridge/analogix/anx7625.c
index 2346dbcc505f..7b24213f7b13 100644
--- a/drivers/gpu/drm/bridge/analogix/anx7625.c
+++ b/drivers/gpu/drm/bridge/analogix/anx7625.c
@@ -1098,9 +1098,18 @@ static void anx7625_init_gpio(struct anx7625_data *platform)
/* Gpio for chip power enable */
platform->pdata.gpio_p_on =
devm_gpiod_get_optional(dev, "enable", GPIOD_OUT_LOW);
+ if (IS_ERR_OR_NULL(platform->pdata.gpio_p_on)) {
+ DRM_DEV_DEBUG_DRIVER(dev, "no enable gpio found\n");
+ platform->pdata.gpio_p_on = NULL;
+ }
+
/* Gpio for chip reset */
platform->pdata.gpio_reset =
devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_LOW);
+ if (IS_ERR_OR_NULL(platform->pdata.gpio_reset)) {
+ DRM_DEV_DEBUG_DRIVER(dev, "no reset gpio found\n");
+ platform->pdata.gpio_reset = NULL;
+ }
if (platform->pdata.gpio_p_on && platform->pdata.gpio_reset) {
platform->pdata.low_power_mode = 1;
@@ -1962,40 +1971,54 @@ static const struct drm_bridge_funcs anx7625_bridge_funcs = {
static int anx7625_register_i2c_dummy_clients(struct anx7625_data *ctx,
struct i2c_client *client)
{
+ int err = 0;
+
ctx->i2c.tx_p0_client = i2c_new_dummy_device(client->adapter,
TX_P0_ADDR >> 1);
- if (!ctx->i2c.tx_p0_client)
- return -ENOMEM;
+ if (IS_ERR(ctx->i2c.tx_p0_client))
+ return PTR_ERR(ctx->i2c.tx_p0_client);
ctx->i2c.tx_p1_client = i2c_new_dummy_device(client->adapter,
TX_P1_ADDR >> 1);
- if (!ctx->i2c.tx_p1_client)
+ if (IS_ERR(ctx->i2c.tx_p1_client)) {
+ err = PTR_ERR(ctx->i2c.tx_p1_client);
goto free_tx_p0;
+ }
ctx->i2c.tx_p2_client = i2c_new_dummy_device(client->adapter,
TX_P2_ADDR >> 1);
- if (!ctx->i2c.tx_p2_client)
+ if (IS_ERR(ctx->i2c.tx_p2_client)) {
+ err = PTR_ERR(ctx->i2c.tx_p2_client);
goto free_tx_p1;
+ }
ctx->i2c.rx_p0_client = i2c_new_dummy_device(client->adapter,
RX_P0_ADDR >> 1);
- if (!ctx->i2c.rx_p0_client)
+ if (IS_ERR(ctx->i2c.rx_p0_client)) {
+ err = PTR_ERR(ctx->i2c.rx_p0_client);
goto free_tx_p2;
+ }
ctx->i2c.rx_p1_client = i2c_new_dummy_device(client->adapter,
RX_P1_ADDR >> 1);
- if (!ctx->i2c.rx_p1_client)
+ if (IS_ERR(ctx->i2c.rx_p1_client)) {
+ err = PTR_ERR(ctx->i2c.rx_p1_client);
goto free_rx_p0;
+ }
ctx->i2c.rx_p2_client = i2c_new_dummy_device(client->adapter,
RX_P2_ADDR >> 1);
- if (!ctx->i2c.rx_p2_client)
+ if (IS_ERR(ctx->i2c.rx_p2_client)) {
+ err = PTR_ERR(ctx->i2c.rx_p2_client);
goto free_rx_p1;
+ }
ctx->i2c.tcpc_client = i2c_new_dummy_device(client->adapter,
TCPC_INTERFACE_ADDR >> 1);
- if (!ctx->i2c.tcpc_client)
+ if (IS_ERR(ctx->i2c.tcpc_client)) {
+ err = PTR_ERR(ctx->i2c.tcpc_client);
goto free_rx_p2;
+ }
return 0;
@@ -2012,7 +2035,7 @@ free_tx_p1:
free_tx_p0:
i2c_unregister_device(ctx->i2c.tx_p0_client);
- return -ENOMEM;
+ return err;
}
static void anx7625_unregister_i2c_dummy_clients(struct anx7625_data *ctx)
diff --git a/drivers/gpu/drm/bridge/chipone-icn6211.c b/drivers/gpu/drm/bridge/chipone-icn6211.c
index a6151db95586..e8f36dca56b3 100644
--- a/drivers/gpu/drm/bridge/chipone-icn6211.c
+++ b/drivers/gpu/drm/bridge/chipone-icn6211.c
@@ -4,6 +4,7 @@
* Author: Jagan Teki <jagan@amarulasolutions.com>
*/
+#include <drm/drm_atomic_helper.h>
#include <drm/drm_of.h>
#include <drm/drm_print.h>
#include <drm/drm_mipi_dsi.h>
@@ -30,6 +31,7 @@
struct chipone {
struct device *dev;
struct drm_bridge bridge;
+ struct drm_display_mode mode;
struct drm_bridge *panel_bridge;
struct gpio_desc *enable_gpio;
struct regulator *vdd1;
@@ -42,11 +44,6 @@ static inline struct chipone *bridge_to_chipone(struct drm_bridge *bridge)
return container_of(bridge, struct chipone, bridge);
}
-static struct drm_display_mode *bridge_to_mode(struct drm_bridge *bridge)
-{
- return &bridge->encoder->crtc->state->adjusted_mode;
-}
-
static inline int chipone_dsi_write(struct chipone *icn, const void *seq,
size_t len)
{
@@ -61,10 +58,11 @@ static inline int chipone_dsi_write(struct chipone *icn, const void *seq,
chipone_dsi_write(icn, d, ARRAY_SIZE(d)); \
}
-static void chipone_enable(struct drm_bridge *bridge)
+static void chipone_atomic_enable(struct drm_bridge *bridge,
+ struct drm_bridge_state *old_bridge_state)
{
struct chipone *icn = bridge_to_chipone(bridge);
- struct drm_display_mode *mode = bridge_to_mode(bridge);
+ struct drm_display_mode *mode = &icn->mode;
ICN6211_DSI(icn, 0x7a, 0xc1);
@@ -114,7 +112,8 @@ static void chipone_enable(struct drm_bridge *bridge)
usleep_range(10000, 11000);
}
-static void chipone_pre_enable(struct drm_bridge *bridge)
+static void chipone_atomic_pre_enable(struct drm_bridge *bridge,
+ struct drm_bridge_state *old_bridge_state)
{
struct chipone *icn = bridge_to_chipone(bridge);
int ret;
@@ -145,7 +144,8 @@ static void chipone_pre_enable(struct drm_bridge *bridge)
usleep_range(10000, 11000);
}
-static void chipone_post_disable(struct drm_bridge *bridge)
+static void chipone_atomic_post_disable(struct drm_bridge *bridge,
+ struct drm_bridge_state *old_bridge_state)
{
struct chipone *icn = bridge_to_chipone(bridge);
@@ -161,6 +161,15 @@ static void chipone_post_disable(struct drm_bridge *bridge)
gpiod_set_value(icn->enable_gpio, 0);
}
+static void chipone_mode_set(struct drm_bridge *bridge,
+ const struct drm_display_mode *mode,
+ const struct drm_display_mode *adjusted_mode)
+{
+ struct chipone *icn = bridge_to_chipone(bridge);
+
+ drm_mode_copy(&icn->mode, adjusted_mode);
+}
+
static int chipone_attach(struct drm_bridge *bridge, enum drm_bridge_attach_flags flags)
{
struct chipone *icn = bridge_to_chipone(bridge);
@@ -169,10 +178,14 @@ static int chipone_attach(struct drm_bridge *bridge, enum drm_bridge_attach_flag
}
static const struct drm_bridge_funcs chipone_bridge_funcs = {
- .attach = chipone_attach,
- .post_disable = chipone_post_disable,
- .pre_enable = chipone_pre_enable,
- .enable = chipone_enable,
+ .atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_bridge_destroy_state,
+ .atomic_reset = drm_atomic_helper_bridge_reset,
+ .atomic_pre_enable = chipone_atomic_pre_enable,
+ .atomic_enable = chipone_atomic_enable,
+ .atomic_post_disable = chipone_atomic_post_disable,
+ .mode_set = chipone_mode_set,
+ .attach = chipone_attach,
};
static int chipone_parse_dt(struct chipone *icn)
diff --git a/drivers/gpu/drm/bridge/lontium-lt9611.c b/drivers/gpu/drm/bridge/lontium-lt9611.c
index d2f45a0f79c8..dafb1b47c15f 100644
--- a/drivers/gpu/drm/bridge/lontium-lt9611.c
+++ b/drivers/gpu/drm/bridge/lontium-lt9611.c
@@ -586,7 +586,7 @@ lt9611_connector_detect(struct drm_connector *connector, bool force)
int connected = 0;
regmap_read(lt9611->regmap, 0x825e, &reg_val);
- connected = (reg_val & BIT(2));
+ connected = (reg_val & BIT(0));
lt9611->status = connected ? connector_status_connected :
connector_status_disconnected;
@@ -892,7 +892,7 @@ static enum drm_connector_status lt9611_bridge_detect(struct drm_bridge *bridge)
int connected;
regmap_read(lt9611->regmap, 0x825e, &reg_val);
- connected = reg_val & BIT(2);
+ connected = reg_val & BIT(0);
lt9611->status = connected ? connector_status_connected :
connector_status_disconnected;
diff --git a/drivers/gpu/drm/bridge/lvds-codec.c b/drivers/gpu/drm/bridge/lvds-codec.c
index f991842a161f..702ea803a743 100644
--- a/drivers/gpu/drm/bridge/lvds-codec.c
+++ b/drivers/gpu/drm/bridge/lvds-codec.c
@@ -21,6 +21,7 @@ struct lvds_codec {
struct device *dev;
struct drm_bridge bridge;
struct drm_bridge *panel_bridge;
+ struct drm_bridge_timings timings;
struct regulator *vcc;
struct gpio_desc *powerdown_gpio;
u32 connector_type;
@@ -119,6 +120,7 @@ static int lvds_codec_probe(struct platform_device *pdev)
struct device_node *bus_node;
struct drm_panel *panel;
struct lvds_codec *lvds_codec;
+ u32 val;
int ret;
lvds_codec = devm_kzalloc(dev, sizeof(*lvds_codec), GFP_KERNEL);
@@ -188,11 +190,24 @@ static int lvds_codec_probe(struct platform_device *pdev)
}
/*
+ * Encoder might sample data on different clock edge than the display,
+ * for example OnSemi FIN3385 has a dedicated strapping pin to select
+ * the sampling edge.
+ */
+ if (lvds_codec->connector_type == DRM_MODE_CONNECTOR_LVDS &&
+ !of_property_read_u32(dev->of_node, "pclk-sample", &val)) {
+ lvds_codec->timings.input_bus_flags = val ?
+ DRM_BUS_FLAG_PIXDATA_SAMPLE_POSEDGE :
+ DRM_BUS_FLAG_PIXDATA_SAMPLE_NEGEDGE;
+ }
+
+ /*
* The panel_bridge bridge is attached to the panel's of_node,
* but we need a bridge attached to our of_node for our user
* to look up.
*/
lvds_codec->bridge.of_node = dev->of_node;
+ lvds_codec->bridge.timings = &lvds_codec->timings;
drm_bridge_add(&lvds_codec->bridge);
platform_set_drvdata(pdev, lvds_codec);
diff --git a/drivers/gpu/drm/bridge/nwl-dsi.c b/drivers/gpu/drm/bridge/nwl-dsi.c
index a7389a0facfb..fc3ad9fab867 100644
--- a/drivers/gpu/drm/bridge/nwl-dsi.c
+++ b/drivers/gpu/drm/bridge/nwl-dsi.c
@@ -1206,6 +1206,7 @@ static int nwl_dsi_probe(struct platform_device *pdev)
ret = nwl_dsi_select_input(dsi);
if (ret < 0) {
+ pm_runtime_disable(dev);
mipi_dsi_host_unregister(&dsi->dsi_host);
return ret;
}
diff --git a/drivers/gpu/drm/bridge/parade-ps8640.c b/drivers/gpu/drm/bridge/parade-ps8640.c
index 26898042ba3d..54723f068884 100644
--- a/drivers/gpu/drm/bridge/parade-ps8640.c
+++ b/drivers/gpu/drm/bridge/parade-ps8640.c
@@ -102,6 +102,7 @@ struct ps8640 {
struct regulator_bulk_data supplies[2];
struct gpio_desc *gpio_reset;
struct gpio_desc *gpio_powerdown;
+ struct device_link *link;
bool pre_enabled;
};
@@ -449,20 +450,43 @@ static int ps8640_bridge_attach(struct drm_bridge *bridge,
if (!(flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR))
return -EINVAL;
+ ps_bridge->aux.drm_dev = bridge->dev;
ret = drm_dp_aux_register(&ps_bridge->aux);
if (ret) {
dev_err(dev, "failed to register DP AUX channel: %d\n", ret);
return ret;
}
+ ps_bridge->link = device_link_add(bridge->dev->dev, dev, DL_FLAG_STATELESS);
+ if (!ps_bridge->link) {
+ dev_err(dev, "failed to create device link");
+ ret = -EINVAL;
+ goto err_devlink;
+ }
+
/* Attach the panel-bridge to the dsi bridge */
- return drm_bridge_attach(bridge->encoder, ps_bridge->panel_bridge,
- &ps_bridge->bridge, flags);
+ ret = drm_bridge_attach(bridge->encoder, ps_bridge->panel_bridge,
+ &ps_bridge->bridge, flags);
+ if (ret)
+ goto err_bridge_attach;
+
+ return 0;
+
+err_bridge_attach:
+ device_link_del(ps_bridge->link);
+err_devlink:
+ drm_dp_aux_unregister(&ps_bridge->aux);
+
+ return ret;
}
static void ps8640_bridge_detach(struct drm_bridge *bridge)
{
- drm_dp_aux_unregister(&bridge_to_ps8640(bridge)->aux);
+ struct ps8640 *ps_bridge = bridge_to_ps8640(bridge);
+
+ drm_dp_aux_unregister(&ps_bridge->aux);
+ if (ps_bridge->link)
+ device_link_del(ps_bridge->link);
}
static struct edid *ps8640_bridge_get_edid(struct drm_bridge *bridge,
diff --git a/drivers/gpu/drm/bridge/sil-sii8620.c b/drivers/gpu/drm/bridge/sil-sii8620.c
index 843265d7f1b1..ec7745c31da0 100644
--- a/drivers/gpu/drm/bridge/sil-sii8620.c
+++ b/drivers/gpu/drm/bridge/sil-sii8620.c
@@ -2120,7 +2120,7 @@ static void sii8620_init_rcp_input_dev(struct sii8620 *ctx)
if (ret) {
dev_err(ctx->dev, "Failed to register RC device\n");
ctx->error = ret;
- rc_free_device(ctx->rc_dev);
+ rc_free_device(rc_dev);
return;
}
ctx->rc_dev = rc_dev;
diff --git a/drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi.c b/drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi.c
index e44e18a0112a..2a58b0b7ace5 100644
--- a/drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi.c
+++ b/drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi.c
@@ -998,7 +998,10 @@ dw_mipi_dsi_bridge_mode_valid(struct drm_bridge *bridge,
enum drm_mode_status mode_status = MODE_OK;
if (pdata->mode_valid)
- mode_status = pdata->mode_valid(pdata->priv_data, mode);
+ mode_status = pdata->mode_valid(pdata->priv_data, mode,
+ dsi->mode_flags,
+ dw_mipi_dsi_get_lanes(dsi),
+ dsi->format);
return mode_status;
}
@@ -1199,6 +1202,7 @@ __dw_mipi_dsi_probe(struct platform_device *pdev,
ret = mipi_dsi_host_register(&dsi->dsi_host);
if (ret) {
dev_err(dev, "Failed to register MIPI host: %d\n", ret);
+ pm_runtime_disable(dev);
dw_mipi_dsi_debugfs_remove(dsi);
return ERR_PTR(ret);
}
diff --git a/drivers/gpu/drm/bridge/ti-sn65dsi83.c b/drivers/gpu/drm/bridge/ti-sn65dsi83.c
index 945f08de45f1..19daaddd29a4 100644
--- a/drivers/gpu/drm/bridge/ti-sn65dsi83.c
+++ b/drivers/gpu/drm/bridge/ti-sn65dsi83.c
@@ -33,6 +33,7 @@
#include <linux/of_device.h>
#include <linux/of_graph.h>
#include <linux/regmap.h>
+#include <linux/regulator/consumer.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_bridge.h>
@@ -143,6 +144,7 @@ struct sn65dsi83 {
struct mipi_dsi_device *dsi;
struct drm_bridge *panel_bridge;
struct gpio_desc *enable_gpio;
+ struct regulator *vcc;
int dsi_lanes;
bool lvds_dual_link;
bool lvds_dual_link_even_odd_swap;
@@ -337,6 +339,12 @@ static void sn65dsi83_atomic_enable(struct drm_bridge *bridge,
u16 val;
int ret;
+ ret = regulator_enable(ctx->vcc);
+ if (ret) {
+ dev_err(ctx->dev, "Failed to enable vcc: %d\n", ret);
+ return;
+ }
+
/* Deassert reset */
gpiod_set_value(ctx->enable_gpio, 1);
usleep_range(1000, 1100);
@@ -486,11 +494,16 @@ static void sn65dsi83_atomic_disable(struct drm_bridge *bridge,
struct drm_bridge_state *old_bridge_state)
{
struct sn65dsi83 *ctx = bridge_to_sn65dsi83(bridge);
+ int ret;
/* Put the chip in reset, pull EN line low, and assure 10ms reset low timing. */
gpiod_set_value(ctx->enable_gpio, 0);
usleep_range(10000, 11000);
+ ret = regulator_disable(ctx->vcc);
+ if (ret)
+ dev_err(ctx->dev, "Failed to disable vcc: %d\n", ret);
+
regcache_mark_dirty(ctx->regmap);
}
@@ -560,10 +573,14 @@ static int sn65dsi83_parse_dt(struct sn65dsi83 *ctx, enum sn65dsi83_model model)
ctx->host_node = of_graph_get_remote_port_parent(endpoint);
of_node_put(endpoint);
- if (ctx->dsi_lanes < 0 || ctx->dsi_lanes > 4)
- return -EINVAL;
- if (!ctx->host_node)
- return -ENODEV;
+ if (ctx->dsi_lanes < 0 || ctx->dsi_lanes > 4) {
+ ret = -EINVAL;
+ goto err_put_node;
+ }
+ if (!ctx->host_node) {
+ ret = -ENODEV;
+ goto err_put_node;
+ }
ctx->lvds_dual_link = false;
ctx->lvds_dual_link_even_odd_swap = false;
@@ -590,16 +607,27 @@ static int sn65dsi83_parse_dt(struct sn65dsi83 *ctx, enum sn65dsi83_model model)
ret = drm_of_find_panel_or_bridge(dev->of_node, 2, 0, &panel, &panel_bridge);
if (ret < 0)
- return ret;
+ goto err_put_node;
if (panel) {
panel_bridge = devm_drm_panel_bridge_add(dev, panel);
- if (IS_ERR(panel_bridge))
- return PTR_ERR(panel_bridge);
+ if (IS_ERR(panel_bridge)) {
+ ret = PTR_ERR(panel_bridge);
+ goto err_put_node;
+ }
}
ctx->panel_bridge = panel_bridge;
+ ctx->vcc = devm_regulator_get(dev, "vcc");
+ if (IS_ERR(ctx->vcc))
+ return dev_err_probe(dev, PTR_ERR(ctx->vcc),
+ "Failed to get supply 'vcc'\n");
+
return 0;
+
+err_put_node:
+ of_node_put(ctx->host_node);
+ return ret;
}
static int sn65dsi83_host_attach(struct sn65dsi83 *ctx)
@@ -662,7 +690,8 @@ static int sn65dsi83_probe(struct i2c_client *client,
}
/* Put the chip in reset, pull EN line low, and assure 10ms reset low timing. */
- ctx->enable_gpio = devm_gpiod_get(ctx->dev, "enable", GPIOD_OUT_LOW);
+ ctx->enable_gpio = devm_gpiod_get_optional(ctx->dev, "enable",
+ GPIOD_OUT_LOW);
if (IS_ERR(ctx->enable_gpio))
return PTR_ERR(ctx->enable_gpio);
@@ -673,8 +702,10 @@ static int sn65dsi83_probe(struct i2c_client *client,
return ret;
ctx->regmap = devm_regmap_init_i2c(client, &sn65dsi83_regmap_config);
- if (IS_ERR(ctx->regmap))
- return PTR_ERR(ctx->regmap);
+ if (IS_ERR(ctx->regmap)) {
+ ret = PTR_ERR(ctx->regmap);
+ goto err_put_node;
+ }
dev_set_drvdata(dev, ctx);
i2c_set_clientdata(client, ctx);
@@ -691,6 +722,8 @@ static int sn65dsi83_probe(struct i2c_client *client,
err_remove_bridge:
drm_bridge_remove(&ctx->bridge);
+err_put_node:
+ of_node_put(ctx->host_node);
return ret;
}
diff --git a/drivers/gpu/drm/bridge/ti-sn65dsi86.c b/drivers/gpu/drm/bridge/ti-sn65dsi86.c
index 02b490671f8f..dab8f76618f3 100644
--- a/drivers/gpu/drm/bridge/ti-sn65dsi86.c
+++ b/drivers/gpu/drm/bridge/ti-sn65dsi86.c
@@ -213,6 +213,7 @@ static const struct regmap_config ti_sn65dsi86_regmap_config = {
.val_bits = 8,
.volatile_table = &ti_sn_bridge_volatile_table,
.cache_type = REGCACHE_NONE,
+ .max_register = 0xFF,
};
static int __maybe_unused ti_sn65dsi86_read_u16(struct ti_sn65dsi86 *pdata,
@@ -704,7 +705,7 @@ static struct ti_sn65dsi86 *bridge_to_ti_sn65dsi86(struct drm_bridge *bridge)
static int ti_sn_attach_host(struct ti_sn65dsi86 *pdata)
{
- int ret, val;
+ int val;
struct mipi_dsi_host *host;
struct mipi_dsi_device *dsi;
struct device *dev = pdata->dev;
@@ -714,16 +715,12 @@ static int ti_sn_attach_host(struct ti_sn65dsi86 *pdata)
};
host = of_find_mipi_dsi_host_by_node(pdata->host_node);
- if (!host) {
- DRM_ERROR("failed to find dsi host\n");
- return -ENODEV;
- }
+ if (!host)
+ return -EPROBE_DEFER;
dsi = devm_mipi_dsi_device_register_full(dev, host, &info);
- if (IS_ERR(dsi)) {
- DRM_ERROR("failed to create dsi device\n");
+ if (IS_ERR(dsi))
return PTR_ERR(dsi);
- }
/* TODO: setting to 4 MIPI lanes always for now */
dsi->lanes = 4;
@@ -739,13 +736,7 @@ static int ti_sn_attach_host(struct ti_sn65dsi86 *pdata)
pdata->dsi = dsi;
- ret = devm_mipi_dsi_attach(dev, dsi);
- if (ret < 0) {
- DRM_ERROR("failed to attach dsi to host\n");
- return ret;
- }
-
- return 0;
+ return devm_mipi_dsi_attach(dev, dsi);
}
static int ti_sn_bridge_attach(struct drm_bridge *bridge,
@@ -1267,8 +1258,10 @@ static int ti_sn_bridge_probe(struct auxiliary_device *adev,
drm_bridge_add(&pdata->bridge);
ret = ti_sn_attach_host(pdata);
- if (ret)
+ if (ret) {
+ dev_err_probe(pdata->dev, ret, "failed to attach dsi host\n");
goto err_remove_bridge;
+ }
return 0;
diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c
index aef2fbd676e5..a7a05e1e26bb 100644
--- a/drivers/gpu/drm/drm_atomic_helper.c
+++ b/drivers/gpu/drm/drm_atomic_helper.c
@@ -828,8 +828,8 @@ int drm_atomic_helper_check_plane_state(struct drm_plane_state *plane_state,
}
if (!crtc_state->enable && !can_update_disabled) {
- drm_dbg_kms(plane_state->crtc->dev,
- "Cannot update plane of a disabled CRTC.\n");
+ drm_dbg_kms(plane_state->plane->dev,
+ "Cannot update plane of a disabled CRTC.\n");
return -EINVAL;
}
@@ -839,8 +839,8 @@ int drm_atomic_helper_check_plane_state(struct drm_plane_state *plane_state,
hscale = drm_rect_calc_hscale(src, dst, min_scale, max_scale);
vscale = drm_rect_calc_vscale(src, dst, min_scale, max_scale);
if (hscale < 0 || vscale < 0) {
- drm_dbg_kms(plane_state->crtc->dev,
- "Invalid scaling of plane\n");
+ drm_dbg_kms(plane_state->plane->dev,
+ "Invalid scaling of plane\n");
drm_rect_debug_print("src: ", &plane_state->src, true);
drm_rect_debug_print("dst: ", &plane_state->dst, false);
return -ERANGE;
@@ -864,8 +864,8 @@ int drm_atomic_helper_check_plane_state(struct drm_plane_state *plane_state,
return 0;
if (!can_position && !drm_rect_equals(dst, &clip)) {
- drm_dbg_kms(plane_state->crtc->dev,
- "Plane must cover entire CRTC\n");
+ drm_dbg_kms(plane_state->plane->dev,
+ "Plane must cover entire CRTC\n");
drm_rect_debug_print("dst: ", dst, false);
drm_rect_debug_print("clip: ", &clip, false);
return -EINVAL;
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index 9727a59d35fd..ed43b987d306 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -1743,7 +1743,13 @@ void drm_fb_helper_fill_info(struct fb_info *info,
sizes->fb_width, sizes->fb_height);
info->par = fb_helper;
- snprintf(info->fix.id, sizeof(info->fix.id), "%s",
+ /*
+ * The DRM drivers fbdev emulation device name can be confusing if the
+ * driver name also has a "drm" suffix on it. Leading to names such as
+ * "simpledrmdrmfb" in /proc/fb. Unfortunately, it's an uAPI and can't
+ * be changed due user-space tools (e.g: pm-utils) matching against it.
+ */
+ snprintf(info->fix.id, sizeof(info->fix.id), "%sdrmfb",
fb_helper->dev->driver->name);
}
diff --git a/drivers/gpu/drm/drm_format_helper.c b/drivers/gpu/drm/drm_format_helper.c
index dbe3e830096e..0f28dd2bdd72 100644
--- a/drivers/gpu/drm/drm_format_helper.c
+++ b/drivers/gpu/drm/drm_format_helper.c
@@ -409,6 +409,61 @@ void drm_fb_xrgb8888_to_rgb888_toio(void __iomem *dst, unsigned int dst_pitch,
}
EXPORT_SYMBOL(drm_fb_xrgb8888_to_rgb888_toio);
+static void drm_fb_xrgb8888_to_xrgb2101010_line(u32 *dbuf, const u32 *sbuf,
+ unsigned int pixels)
+{
+ unsigned int x;
+ u32 val32;
+
+ for (x = 0; x < pixels; x++) {
+ val32 = ((sbuf[x] & 0x000000FF) << 2) |
+ ((sbuf[x] & 0x0000FF00) << 4) |
+ ((sbuf[x] & 0x00FF0000) << 6);
+ *dbuf++ = val32 | ((val32 >> 8) & 0x00300C03);
+ }
+}
+
+/**
+ * drm_fb_xrgb8888_to_xrgb2101010_toio - Convert XRGB8888 to XRGB2101010 clip
+ * buffer
+ * @dst: XRGB2101010 destination buffer (iomem)
+ * @dst_pitch: Number of bytes between two consecutive scanlines within dst
+ * @vaddr: XRGB8888 source buffer
+ * @fb: DRM framebuffer
+ * @clip: Clip rectangle area to copy
+ *
+ * Drivers can use this function for XRGB2101010 devices that don't natively
+ * support XRGB8888.
+ */
+void drm_fb_xrgb8888_to_xrgb2101010_toio(void __iomem *dst,
+ unsigned int dst_pitch, const void *vaddr,
+ const struct drm_framebuffer *fb,
+ const struct drm_rect *clip)
+{
+ size_t linepixels = clip->x2 - clip->x1;
+ size_t dst_len = linepixels * sizeof(u32);
+ unsigned int y, lines = clip->y2 - clip->y1;
+ void *dbuf;
+
+ if (!dst_pitch)
+ dst_pitch = dst_len;
+
+ dbuf = kmalloc(dst_len, GFP_KERNEL);
+ if (!dbuf)
+ return;
+
+ vaddr += clip_offset(clip, fb->pitches[0], sizeof(u32));
+ for (y = 0; y < lines; y++) {
+ drm_fb_xrgb8888_to_xrgb2101010_line(dbuf, vaddr, linepixels);
+ memcpy_toio(dst, dbuf, dst_len);
+ vaddr += fb->pitches[0];
+ dst += dst_pitch;
+ }
+
+ kfree(dbuf);
+}
+EXPORT_SYMBOL(drm_fb_xrgb8888_to_xrgb2101010_toio);
+
/**
* drm_fb_xrgb8888_to_gray8 - Convert XRGB8888 to grayscale
* @dst: 8-bit grayscale destination buffer
@@ -500,6 +555,10 @@ int drm_fb_blit_toio(void __iomem *dst, unsigned int dst_pitch, uint32_t dst_for
fb_format = DRM_FORMAT_XRGB8888;
if (dst_format == DRM_FORMAT_ARGB8888)
dst_format = DRM_FORMAT_XRGB8888;
+ if (fb_format == DRM_FORMAT_ARGB2101010)
+ fb_format = DRM_FORMAT_XRGB2101010;
+ if (dst_format == DRM_FORMAT_ARGB2101010)
+ dst_format = DRM_FORMAT_XRGB2101010;
if (dst_format == fb_format) {
drm_fb_memcpy_toio(dst, dst_pitch, vmap, fb, clip);
@@ -515,6 +574,11 @@ int drm_fb_blit_toio(void __iomem *dst, unsigned int dst_pitch, uint32_t dst_for
drm_fb_xrgb8888_to_rgb888_toio(dst, dst_pitch, vmap, fb, clip);
return 0;
}
+ } else if (dst_format == DRM_FORMAT_XRGB2101010) {
+ if (fb_format == DRM_FORMAT_XRGB8888) {
+ drm_fb_xrgb8888_to_xrgb2101010_toio(dst, dst_pitch, vmap, fb, clip);
+ return 0;
+ }
}
return -EINVAL;
diff --git a/drivers/gpu/drm/drm_fourcc.c b/drivers/gpu/drm/drm_fourcc.c
index 25837b1d6639..07741b678798 100644
--- a/drivers/gpu/drm/drm_fourcc.c
+++ b/drivers/gpu/drm/drm_fourcc.c
@@ -269,6 +269,9 @@ const struct drm_format_info *__drm_format_info(u32 format)
.num_planes = 3, .char_per_block = { 2, 2, 2 },
.block_w = { 1, 1, 1 }, .block_h = { 1, 1, 1 }, .hsub = 0,
.vsub = 0, .is_yuv = true },
+ { .format = DRM_FORMAT_P030, .depth = 0, .num_planes = 2,
+ .char_per_block = { 4, 8, 0 }, .block_w = { 3, 3, 0 }, .block_h = { 1, 1, 0 },
+ .hsub = 2, .vsub = 2, .is_yuv = true},
};
unsigned int i;
diff --git a/drivers/gpu/drm/drm_gem_cma_helper.c b/drivers/gpu/drm/drm_gem_cma_helper.c
index 09e2cb80de08..cefd0cbf9deb 100644
--- a/drivers/gpu/drm/drm_gem_cma_helper.c
+++ b/drivers/gpu/drm/drm_gem_cma_helper.c
@@ -32,14 +32,18 @@
* The DRM GEM/CMA helpers use this allocator as a means to provide buffer
* objects that are physically contiguous in memory. This is useful for
* display drivers that are unable to map scattered buffers via an IOMMU.
+ *
+ * For GEM callback helpers in struct &drm_gem_object functions, see likewise
+ * named functions with an _object_ infix (e.g., drm_gem_cma_object_vmap() wraps
+ * drm_gem_cma_vmap()). These helpers perform the necessary type conversion.
*/
static const struct drm_gem_object_funcs drm_gem_cma_default_funcs = {
- .free = drm_gem_cma_free_object,
- .print_info = drm_gem_cma_print_info,
- .get_sg_table = drm_gem_cma_get_sg_table,
- .vmap = drm_gem_cma_vmap,
- .mmap = drm_gem_cma_mmap,
+ .free = drm_gem_cma_object_free,
+ .print_info = drm_gem_cma_object_print_info,
+ .get_sg_table = drm_gem_cma_object_get_sg_table,
+ .vmap = drm_gem_cma_object_vmap,
+ .mmap = drm_gem_cma_object_mmap,
.vm_ops = &drm_gem_cma_vm_ops,
};
@@ -63,18 +67,21 @@ __drm_gem_cma_create(struct drm_device *drm, size_t size, bool private)
struct drm_gem_object *gem_obj;
int ret = 0;
- if (drm->driver->gem_create_object)
+ if (drm->driver->gem_create_object) {
gem_obj = drm->driver->gem_create_object(drm, size);
- else
- gem_obj = kzalloc(sizeof(*cma_obj), GFP_KERNEL);
- if (!gem_obj)
- return ERR_PTR(-ENOMEM);
+ if (IS_ERR(gem_obj))
+ return ERR_CAST(gem_obj);
+ cma_obj = to_drm_gem_cma_obj(gem_obj);
+ } else {
+ cma_obj = kzalloc(sizeof(*cma_obj), GFP_KERNEL);
+ if (!cma_obj)
+ return ERR_PTR(-ENOMEM);
+ gem_obj = &cma_obj->base;
+ }
if (!gem_obj->funcs)
gem_obj->funcs = &drm_gem_cma_default_funcs;
- cma_obj = container_of(gem_obj, struct drm_gem_cma_object, base);
-
if (private) {
drm_gem_private_object_init(drm, gem_obj, size);
@@ -192,18 +199,16 @@ drm_gem_cma_create_with_handle(struct drm_file *file_priv,
}
/**
- * drm_gem_cma_free_object - free resources associated with a CMA GEM object
- * @gem_obj: GEM object to free
+ * drm_gem_cma_free - free resources associated with a CMA GEM object
+ * @cma_obj: CMA GEM object to free
*
* This function frees the backing memory of the CMA GEM object, cleans up the
* GEM object state and frees the memory used to store the object itself.
* If the buffer is imported and the virtual address is set, it is released.
- * Drivers using the CMA helpers should set this as their
- * &drm_gem_object_funcs.free callback.
*/
-void drm_gem_cma_free_object(struct drm_gem_object *gem_obj)
+void drm_gem_cma_free(struct drm_gem_cma_object *cma_obj)
{
- struct drm_gem_cma_object *cma_obj = to_drm_gem_cma_obj(gem_obj);
+ struct drm_gem_object *gem_obj = &cma_obj->base;
struct dma_buf_map map = DMA_BUF_MAP_INIT_VADDR(cma_obj->vaddr);
if (gem_obj->import_attach) {
@@ -224,7 +229,7 @@ void drm_gem_cma_free_object(struct drm_gem_object *gem_obj)
kfree(cma_obj);
}
-EXPORT_SYMBOL_GPL(drm_gem_cma_free_object);
+EXPORT_SYMBOL_GPL(drm_gem_cma_free);
/**
* drm_gem_cma_dumb_create_internal - create a dumb buffer object
@@ -371,18 +376,15 @@ EXPORT_SYMBOL_GPL(drm_gem_cma_get_unmapped_area);
/**
* drm_gem_cma_print_info() - Print &drm_gem_cma_object info for debugfs
+ * @cma_obj: CMA GEM object
* @p: DRM printer
* @indent: Tab indentation level
- * @obj: GEM object
*
- * This function can be used as the &drm_driver->gem_print_info callback.
- * It prints paddr and vaddr for use in e.g. debugfs output.
+ * This function prints paddr and vaddr for use in e.g. debugfs output.
*/
-void drm_gem_cma_print_info(struct drm_printer *p, unsigned int indent,
- const struct drm_gem_object *obj)
+void drm_gem_cma_print_info(const struct drm_gem_cma_object *cma_obj,
+ struct drm_printer *p, unsigned int indent)
{
- const struct drm_gem_cma_object *cma_obj = to_drm_gem_cma_obj(obj);
-
drm_printf_indent(p, indent, "paddr=%pad\n", &cma_obj->paddr);
drm_printf_indent(p, indent, "vaddr=%p\n", cma_obj->vaddr);
}
@@ -391,18 +393,17 @@ EXPORT_SYMBOL(drm_gem_cma_print_info);
/**
* drm_gem_cma_get_sg_table - provide a scatter/gather table of pinned
* pages for a CMA GEM object
- * @obj: GEM object
+ * @cma_obj: CMA GEM object
*
- * This function exports a scatter/gather table by
- * calling the standard DMA mapping API. Drivers using the CMA helpers should
- * set this as their &drm_gem_object_funcs.get_sg_table callback.
+ * This function exports a scatter/gather table by calling the standard
+ * DMA mapping API.
*
* Returns:
* A pointer to the scatter/gather table of pinned pages or NULL on failure.
*/
-struct sg_table *drm_gem_cma_get_sg_table(struct drm_gem_object *obj)
+struct sg_table *drm_gem_cma_get_sg_table(struct drm_gem_cma_object *cma_obj)
{
- struct drm_gem_cma_object *cma_obj = to_drm_gem_cma_obj(obj);
+ struct drm_gem_object *obj = &cma_obj->base;
struct sg_table *sgt;
int ret;
@@ -468,23 +469,19 @@ EXPORT_SYMBOL_GPL(drm_gem_cma_prime_import_sg_table);
/**
* drm_gem_cma_vmap - map a CMA GEM object into the kernel's virtual
* address space
- * @obj: GEM object
+ * @cma_obj: CMA GEM object
* @map: Returns the kernel virtual address of the CMA GEM object's backing
* store.
*
- * This function maps a buffer into the kernel's
- * virtual address space. Since the CMA buffers are already mapped into the
- * kernel virtual address space this simply returns the cached virtual
- * address. Drivers using the CMA helpers should set this as their DRM
- * driver's &drm_gem_object_funcs.vmap callback.
+ * This function maps a buffer into the kernel's virtual address space.
+ * Since the CMA buffers are already mapped into the kernel virtual address
+ * space this simply returns the cached virtual address.
*
* Returns:
* 0 on success, or a negative error code otherwise.
*/
-int drm_gem_cma_vmap(struct drm_gem_object *obj, struct dma_buf_map *map)
+int drm_gem_cma_vmap(struct drm_gem_cma_object *cma_obj, struct dma_buf_map *map)
{
- struct drm_gem_cma_object *cma_obj = to_drm_gem_cma_obj(obj);
-
dma_buf_map_set_vaddr(map, cma_obj->vaddr);
return 0;
@@ -493,20 +490,19 @@ EXPORT_SYMBOL_GPL(drm_gem_cma_vmap);
/**
* drm_gem_cma_mmap - memory-map an exported CMA GEM object
- * @obj: GEM object
+ * @cma_obj: CMA GEM object
* @vma: VMA for the area to be mapped
*
* This function maps a buffer into a userspace process's address space.
* In addition to the usual GEM VMA setup it immediately faults in the entire
- * object instead of using on-demand faulting. Drivers that use the CMA
- * helpers should set this as their &drm_gem_object_funcs.mmap callback.
+ * object instead of using on-demand faulting.
*
* Returns:
* 0 on success or a negative error code on failure.
*/
-int drm_gem_cma_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
+int drm_gem_cma_mmap(struct drm_gem_cma_object *cma_obj, struct vm_area_struct *vma)
{
- struct drm_gem_cma_object *cma_obj;
+ struct drm_gem_object *obj = &cma_obj->base;
int ret;
/*
@@ -517,8 +513,6 @@ int drm_gem_cma_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
vma->vm_pgoff -= drm_vma_node_start(&obj->vma_node);
vma->vm_flags &= ~VM_PFNMAP;
- cma_obj = to_drm_gem_cma_obj(obj);
-
if (cma_obj->map_noncoherent) {
vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
diff --git a/drivers/gpu/drm/drm_gem_shmem_helper.c b/drivers/gpu/drm/drm_gem_shmem_helper.c
index 0eeda1012364..621924116eb4 100644
--- a/drivers/gpu/drm/drm_gem_shmem_helper.c
+++ b/drivers/gpu/drm/drm_gem_shmem_helper.c
@@ -10,6 +10,7 @@
#include <linux/shmem_fs.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
+#include <linux/module.h>
#ifdef CONFIG_X86
#include <asm/set_memory.h>
@@ -56,14 +57,17 @@ __drm_gem_shmem_create(struct drm_device *dev, size_t size, bool private)
size = PAGE_ALIGN(size);
- if (dev->driver->gem_create_object)
+ if (dev->driver->gem_create_object) {
obj = dev->driver->gem_create_object(dev, size);
- else
- obj = kzalloc(sizeof(*shmem), GFP_KERNEL);
- if (!obj)
- return ERR_PTR(-ENOMEM);
-
- shmem = to_drm_gem_shmem_obj(obj);
+ if (IS_ERR(obj))
+ return ERR_CAST(obj);
+ shmem = to_drm_gem_shmem_obj(obj);
+ } else {
+ shmem = kzalloc(sizeof(*shmem), GFP_KERNEL);
+ if (!shmem)
+ return ERR_PTR(-ENOMEM);
+ obj = &shmem->base;
+ }
if (!obj->funcs)
obj->funcs = &drm_gem_shmem_funcs;
diff --git a/drivers/gpu/drm/drm_gem_vram_helper.c b/drivers/gpu/drm/drm_gem_vram_helper.c
index bfa386b98134..3f00192215d1 100644
--- a/drivers/gpu/drm/drm_gem_vram_helper.c
+++ b/drivers/gpu/drm/drm_gem_vram_helper.c
@@ -197,8 +197,8 @@ struct drm_gem_vram_object *drm_gem_vram_create(struct drm_device *dev,
if (dev->driver->gem_create_object) {
gem = dev->driver->gem_create_object(dev, size);
- if (!gem)
- return ERR_PTR(-ENOMEM);
+ if (IS_ERR(gem))
+ return ERR_CAST(gem);
gbo = drm_gem_vram_of_gem(gem);
} else {
gbo = kzalloc(sizeof(*gbo), GFP_KERNEL);
diff --git a/drivers/gpu/drm/drm_hashtab.c b/drivers/gpu/drm/drm_hashtab.c
index c50fa6f0709f..60afa1865559 100644
--- a/drivers/gpu/drm/drm_hashtab.c
+++ b/drivers/gpu/drm/drm_hashtab.c
@@ -32,16 +32,16 @@
* Thomas Hellström <thomas-at-tungstengraphics-dot-com>
*/
-#include <linux/export.h>
#include <linux/hash.h>
#include <linux/mm.h>
#include <linux/rculist.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
-#include <drm/drm_hashtab.h>
#include <drm/drm_print.h>
+#include "drm_legacy.h"
+
int drm_ht_create(struct drm_open_hash *ht, unsigned int order)
{
unsigned int size = 1 << order;
@@ -58,7 +58,6 @@ int drm_ht_create(struct drm_open_hash *ht, unsigned int order)
}
return 0;
}
-EXPORT_SYMBOL(drm_ht_create);
void drm_ht_verbose_list(struct drm_open_hash *ht, unsigned long key)
{
@@ -135,7 +134,6 @@ int drm_ht_insert_item(struct drm_open_hash *ht, struct drm_hash_item *item)
}
return 0;
}
-EXPORT_SYMBOL(drm_ht_insert_item);
/*
* Just insert an item and return any "bits" bit key that hasn't been
@@ -164,7 +162,6 @@ int drm_ht_just_insert_please(struct drm_open_hash *ht, struct drm_hash_item *it
}
return 0;
}
-EXPORT_SYMBOL(drm_ht_just_insert_please);
int drm_ht_find_item(struct drm_open_hash *ht, unsigned long key,
struct drm_hash_item **item)
@@ -178,7 +175,6 @@ int drm_ht_find_item(struct drm_open_hash *ht, unsigned long key,
*item = hlist_entry(list, struct drm_hash_item, head);
return 0;
}
-EXPORT_SYMBOL(drm_ht_find_item);
int drm_ht_remove_key(struct drm_open_hash *ht, unsigned long key)
{
@@ -197,7 +193,6 @@ int drm_ht_remove_item(struct drm_open_hash *ht, struct drm_hash_item *item)
hlist_del_init_rcu(&item->head);
return 0;
}
-EXPORT_SYMBOL(drm_ht_remove_item);
void drm_ht_remove(struct drm_open_hash *ht)
{
@@ -206,4 +201,3 @@ void drm_ht_remove(struct drm_open_hash *ht)
ht->table = NULL;
}
}
-EXPORT_SYMBOL(drm_ht_remove);
diff --git a/drivers/gpu/drm/drm_legacy.h b/drivers/gpu/drm/drm_legacy.h
index c9206840c87f..70c9dba114a6 100644
--- a/drivers/gpu/drm/drm_legacy.h
+++ b/drivers/gpu/drm/drm_legacy.h
@@ -35,9 +35,47 @@
#include <drm/drm_legacy.h>
struct agp_memory;
+struct drm_buf_desc;
struct drm_device;
struct drm_file;
-struct drm_buf_desc;
+struct drm_hash_item;
+struct drm_open_hash;
+
+/*
+ * Hash-table Support
+ */
+
+#define drm_hash_entry(_ptr, _type, _member) container_of(_ptr, _type, _member)
+
+/* drm_hashtab.c */
+#if IS_ENABLED(CONFIG_DRM_LEGACY)
+int drm_ht_create(struct drm_open_hash *ht, unsigned int order);
+int drm_ht_insert_item(struct drm_open_hash *ht, struct drm_hash_item *item);
+int drm_ht_just_insert_please(struct drm_open_hash *ht, struct drm_hash_item *item,
+ unsigned long seed, int bits, int shift,
+ unsigned long add);
+int drm_ht_find_item(struct drm_open_hash *ht, unsigned long key, struct drm_hash_item **item);
+
+void drm_ht_verbose_list(struct drm_open_hash *ht, unsigned long key);
+int drm_ht_remove_key(struct drm_open_hash *ht, unsigned long key);
+int drm_ht_remove_item(struct drm_open_hash *ht, struct drm_hash_item *item);
+void drm_ht_remove(struct drm_open_hash *ht);
+#endif
+
+/*
+ * RCU-safe interface
+ *
+ * The user of this API needs to make sure that two or more instances of the
+ * hash table manipulation functions are never run simultaneously.
+ * The lookup function drm_ht_find_item_rcu may, however, run simultaneously
+ * with any of the manipulation functions as long as it's called from within
+ * an RCU read-locked section.
+ */
+#define drm_ht_insert_item_rcu drm_ht_insert_item
+#define drm_ht_just_insert_please_rcu drm_ht_just_insert_please
+#define drm_ht_remove_key_rcu drm_ht_remove_key
+#define drm_ht_remove_item_rcu drm_ht_remove_item
+#define drm_ht_find_item_rcu drm_ht_find_item
/*
* Generic DRM Contexts
diff --git a/drivers/gpu/drm/drm_mipi_dbi.c b/drivers/gpu/drm/drm_mipi_dbi.c
index b75403f3251a..ded8968b3e8a 100644
--- a/drivers/gpu/drm/drm_mipi_dbi.c
+++ b/drivers/gpu/drm/drm_mipi_dbi.c
@@ -15,9 +15,10 @@
#include <drm/drm_connector.h>
#include <drm/drm_damage_helper.h>
#include <drm/drm_drv.h>
-#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_file.h>
#include <drm/drm_format_helper.h>
#include <drm/drm_fourcc.h>
+#include <drm/drm_gem.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_mipi_dbi.h>
#include <drm/drm_modes.h>
@@ -200,13 +201,19 @@ int mipi_dbi_buf_copy(void *dst, struct drm_framebuffer *fb,
struct drm_rect *clip, bool swap)
{
struct drm_gem_object *gem = drm_gem_fb_get_obj(fb, 0);
- struct drm_gem_cma_object *cma_obj = to_drm_gem_cma_obj(gem);
- void *src = cma_obj->vaddr;
+ struct dma_buf_map map[DRM_FORMAT_MAX_PLANES];
+ struct dma_buf_map data[DRM_FORMAT_MAX_PLANES];
+ void *src;
int ret;
ret = drm_gem_fb_begin_cpu_access(fb, DMA_FROM_DEVICE);
if (ret)
return ret;
+ src = data[0].vaddr; /* TODO: Use mapping abstraction properly */
+
+ ret = drm_gem_fb_vmap(fb, map, data);
+ if (ret)
+ goto out_drm_gem_fb_end_cpu_access;
switch (fb->format->format) {
case DRM_FORMAT_RGB565:
@@ -221,9 +228,11 @@ int mipi_dbi_buf_copy(void *dst, struct drm_framebuffer *fb,
default:
drm_err_once(fb->dev, "Format is not supported: %p4cc\n",
&fb->format->format);
- return -EINVAL;
+ ret = -EINVAL;
}
+ drm_gem_fb_vunmap(fb, map);
+out_drm_gem_fb_end_cpu_access:
drm_gem_fb_end_cpu_access(fb, DMA_FROM_DEVICE);
return ret;
@@ -249,8 +258,8 @@ static void mipi_dbi_set_window_address(struct mipi_dbi_dev *dbidev,
static void mipi_dbi_fb_dirty(struct drm_framebuffer *fb, struct drm_rect *rect)
{
- struct drm_gem_object *gem = drm_gem_fb_get_obj(fb, 0);
- struct drm_gem_cma_object *cma_obj = to_drm_gem_cma_obj(gem);
+ struct dma_buf_map map[DRM_FORMAT_MAX_PLANES];
+ struct dma_buf_map data[DRM_FORMAT_MAX_PLANES];
struct mipi_dbi_dev *dbidev = drm_to_mipi_dbi_dev(fb->dev);
unsigned int height = rect->y2 - rect->y1;
unsigned int width = rect->x2 - rect->x1;
@@ -266,6 +275,10 @@ static void mipi_dbi_fb_dirty(struct drm_framebuffer *fb, struct drm_rect *rect)
if (!drm_dev_enter(fb->dev, &idx))
return;
+ ret = drm_gem_fb_vmap(fb, map, data);
+ if (ret)
+ goto err_drm_dev_exit;
+
full = width == fb->width && height == fb->height;
DRM_DEBUG_KMS("Flushing [FB:%d] " DRM_RECT_FMT "\n", fb->base.id, DRM_RECT_ARG(rect));
@@ -277,7 +290,7 @@ static void mipi_dbi_fb_dirty(struct drm_framebuffer *fb, struct drm_rect *rect)
if (ret)
goto err_msg;
} else {
- tr = cma_obj->vaddr;
+ tr = data[0].vaddr; /* TODO: Use mapping abstraction properly */
}
mipi_dbi_set_window_address(dbidev, rect->x1, rect->x2 - 1, rect->y1,
@@ -289,6 +302,9 @@ err_msg:
if (ret)
drm_err_once(fb->dev, "Failed to update display %d\n", ret);
+ drm_gem_fb_vunmap(fb, map);
+
+err_drm_dev_exit:
drm_dev_exit(idx);
}
@@ -1117,8 +1133,8 @@ int mipi_dbi_spi_init(struct spi_device *spi, struct mipi_dbi *dbi,
/*
* Even though it's not the SPI device that does DMA (the master does),
- * the dma mask is necessary for the dma_alloc_wc() in
- * drm_gem_cma_create(). The dma_addr returned will be a physical
+ * the dma mask is necessary for the dma_alloc_wc() in the GEM code
+ * (e.g., drm_gem_cma_create()). The dma_addr returned will be a physical
* address which might be different from the bus address, but this is
* not a problem since the address will not be used.
* The virtual address is used in the transfer and the SPI core
diff --git a/drivers/gpu/drm/drm_plane.c b/drivers/gpu/drm/drm_plane.c
index 82afb854141b..deeec60a3315 100644
--- a/drivers/gpu/drm/drm_plane.c
+++ b/drivers/gpu/drm/drm_plane.c
@@ -202,17 +202,13 @@ static int create_in_format_blob(struct drm_device *dev, struct drm_plane *plane
memcpy(formats_ptr(blob_data), plane->format_types, formats_size);
- /* If we can't determine support, just bail */
- if (!plane->funcs->format_mod_supported)
- goto done;
-
mod = modifiers_ptr(blob_data);
for (i = 0; i < plane->modifier_count; i++) {
for (j = 0; j < plane->format_count; j++) {
- if (plane->funcs->format_mod_supported(plane,
+ if (!plane->funcs->format_mod_supported ||
+ plane->funcs->format_mod_supported(plane,
plane->format_types[j],
plane->modifiers[i])) {
-
mod->formats |= 1ULL << j;
}
}
@@ -223,7 +219,6 @@ static int create_in_format_blob(struct drm_device *dev, struct drm_plane *plane
mod++;
}
-done:
drm_object_attach_property(&plane->base, config->modifiers_property,
blob->base.id);
diff --git a/drivers/gpu/drm/drm_syncobj.c b/drivers/gpu/drm/drm_syncobj.c
index c9a9d74f338c..c313a5b4549c 100644
--- a/drivers/gpu/drm/drm_syncobj.c
+++ b/drivers/gpu/drm/drm_syncobj.c
@@ -404,8 +404,17 @@ int drm_syncobj_find_fence(struct drm_file *file_private,
if (*fence) {
ret = dma_fence_chain_find_seqno(fence, point);
- if (!ret)
+ if (!ret) {
+ /* If the requested seqno is already signaled
+ * drm_syncobj_find_fence may return a NULL
+ * fence. To make sure the recipient gets
+ * signalled, use a new fence instead.
+ */
+ if (!*fence)
+ *fence = dma_fence_get_stub();
+
goto out;
+ }
dma_fence_put(*fence);
} else {
ret = -EINVAL;
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_drv.c b/drivers/gpu/drm/etnaviv/etnaviv_drv.c
index 7dcc6392792d..0b756ecb1bc2 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_drv.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_drv.c
@@ -589,6 +589,7 @@ static int compare_str(struct device *dev, void *data)
static int etnaviv_pdev_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
+ struct device_node *first_node = NULL;
struct component_match *match = NULL;
if (!dev->platform_data) {
@@ -598,6 +599,9 @@ static int etnaviv_pdev_probe(struct platform_device *pdev)
if (!of_device_is_available(core_node))
continue;
+ if (!first_node)
+ first_node = core_node;
+
drm_of_component_match_add(&pdev->dev, &match,
compare_of, core_node);
}
@@ -609,6 +613,32 @@ static int etnaviv_pdev_probe(struct platform_device *pdev)
component_match_add(dev, &match, compare_str, names[i]);
}
+ /*
+ * PTA and MTLB can have 40 bit base addresses, but
+ * unfortunately, an entry in the MTLB can only point to a
+ * 32 bit base address of a STLB. Moreover, to initialize the
+ * MMU we need a command buffer with a 32 bit address because
+ * without an MMU there is only an indentity mapping between
+ * the internal 32 bit addresses and the bus addresses.
+ *
+ * To make things easy, we set the dma_coherent_mask to 32
+ * bit to make sure we are allocating the command buffers and
+ * TLBs in the lower 4 GiB address space.
+ */
+ if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(40)) ||
+ dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32))) {
+ dev_dbg(&pdev->dev, "No suitable DMA available\n");
+ return -ENODEV;
+ }
+
+ /*
+ * Apply the same DMA configuration to the virtual etnaviv
+ * device as the GPU we found. This assumes that all Vivante
+ * GPUs in the system share the same DMA constraints.
+ */
+ if (first_node)
+ of_dma_configure(&pdev->dev, first_node, true);
+
return component_master_add_with_match(dev, &etnaviv_master_ops, match);
}
@@ -653,21 +683,12 @@ static int __init etnaviv_init(void)
if (!of_device_is_available(np))
continue;
- pdev = platform_device_alloc("etnaviv", -1);
+ pdev = platform_device_alloc("etnaviv", PLATFORM_DEVID_NONE);
if (!pdev) {
ret = -ENOMEM;
of_node_put(np);
goto unregister_platform_driver;
}
- pdev->dev.coherent_dma_mask = DMA_BIT_MASK(40);
- pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask;
-
- /*
- * Apply the same DMA configuration to the virtual etnaviv
- * device as the GPU we found. This assumes that all Vivante
- * GPUs in the system share the same DMA constraints.
- */
- of_dma_configure(&pdev->dev, np, true);
ret = platform_device_add(pdev);
if (ret) {
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
index b5e8ce86dbe7..b03c20c14ca1 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
@@ -469,6 +469,12 @@ int etnaviv_ioctl_gem_submit(struct drm_device *dev, void *data,
return -EINVAL;
}
+ if (args->stream_size > SZ_64K || args->nr_relocs > SZ_64K ||
+ args->nr_bos > SZ_64K || args->nr_pmrs > 128) {
+ DRM_ERROR("submit arguments out of size limits\n");
+ return -EINVAL;
+ }
+
/*
* Copy the command submission and bo array to kernel space in
* one go, and do this outside of any locks.
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
index 242a5fd8b932..ba5fd012a40a 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
@@ -1658,7 +1658,7 @@ etnaviv_gpu_cooling_set_cur_state(struct thermal_cooling_device *cdev,
return 0;
}
-static struct thermal_cooling_device_ops cooling_ops = {
+static const struct thermal_cooling_device_ops cooling_ops = {
.get_max_state = etnaviv_gpu_cooling_get_max_state,
.get_cur_state = etnaviv_gpu_cooling_get_cur_state,
.set_cur_state = etnaviv_gpu_cooling_set_cur_state,
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gpu.h b/drivers/gpu/drm/etnaviv/etnaviv_gpu.h
index 1c75c8ed5bce..85eddd492774 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gpu.h
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gpu.h
@@ -130,6 +130,7 @@ struct etnaviv_gpu {
/* hang detection */
u32 hangcheck_dma_addr;
+ u32 hangcheck_fence;
void __iomem *mmio;
int irq;
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_sched.c b/drivers/gpu/drm/etnaviv/etnaviv_sched.c
index 180bb633d5c5..58f593b278c1 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_sched.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_sched.c
@@ -107,8 +107,10 @@ static enum drm_gpu_sched_stat etnaviv_sched_timedout_job(struct drm_sched_job
*/
dma_addr = gpu_read(gpu, VIVS_FE_DMA_ADDRESS);
change = dma_addr - gpu->hangcheck_dma_addr;
- if (change < 0 || change > 16) {
+ if (gpu->completed_fence != gpu->hangcheck_fence ||
+ change < 0 || change > 16) {
gpu->hangcheck_dma_addr = dma_addr;
+ gpu->hangcheck_fence = gpu->completed_fence;
goto out_no_timeout;
}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.c b/drivers/gpu/drm/exynos/exynos_drm_drv.c
index d8f1cf4d6b69..9743b6b17447 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.c
@@ -102,16 +102,7 @@ static const struct drm_ioctl_desc exynos_ioctls[] = {
DRM_RENDER_ALLOW),
};
-static const struct file_operations exynos_drm_driver_fops = {
- .owner = THIS_MODULE,
- .open = drm_open,
- .mmap = exynos_drm_gem_mmap,
- .poll = drm_poll,
- .read = drm_read,
- .unlocked_ioctl = drm_ioctl,
- .compat_ioctl = drm_compat_ioctl,
- .release = drm_release,
-};
+DEFINE_DRM_GEM_FOPS(exynos_drm_driver_fops);
static const struct drm_driver exynos_drm_driver = {
.driver_features = DRIVER_MODESET | DRIVER_GEM
@@ -124,7 +115,7 @@ static const struct drm_driver exynos_drm_driver = {
.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
.gem_prime_import = exynos_drm_gem_prime_import,
.gem_prime_import_sg_table = exynos_drm_gem_prime_import_sg_table,
- .gem_prime_mmap = exynos_drm_gem_prime_mmap,
+ .gem_prime_mmap = drm_gem_prime_mmap,
.ioctls = exynos_ioctls,
.num_ioctls = ARRAY_SIZE(exynos_ioctls),
.fops = &exynos_drm_driver_fops,
diff --git a/drivers/gpu/drm/exynos/exynos_drm_dsi.c b/drivers/gpu/drm/exynos/exynos_drm_dsi.c
index 8d137857818c..fe2afd85b1fa 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_dsi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_dsi.c
@@ -13,7 +13,6 @@
#include <linux/gpio/consumer.h>
#include <linux/irq.h>
#include <linux/of_device.h>
-#include <linux/of_gpio.h>
#include <linux/of_graph.h>
#include <linux/phy/phy.h>
#include <linux/regulator/consumer.h>
@@ -257,15 +256,17 @@ struct exynos_dsi {
struct drm_connector connector;
struct drm_panel *panel;
struct list_head bridge_chain;
+ struct drm_bridge bridge;
struct drm_bridge *out_bridge;
struct device *dev;
+ struct drm_display_mode mode;
void __iomem *reg_base;
struct phy *phy;
struct clk **clks;
struct regulator_bulk_data supplies[2];
int irq;
- int te_gpio;
+ struct gpio_desc *te_gpio;
u32 pll_clk_rate;
u32 burst_clk_rate;
@@ -287,9 +288,9 @@ struct exynos_dsi {
#define host_to_dsi(host) container_of(host, struct exynos_dsi, dsi_host)
#define connector_to_dsi(c) container_of(c, struct exynos_dsi, connector)
-static inline struct exynos_dsi *encoder_to_dsi(struct drm_encoder *e)
+static inline struct exynos_dsi *bridge_to_dsi(struct drm_bridge *b)
{
- return container_of(e, struct exynos_dsi, encoder);
+ return container_of(b, struct exynos_dsi, bridge);
}
enum reg_idx {
@@ -882,7 +883,7 @@ static int exynos_dsi_init_link(struct exynos_dsi *dsi)
static void exynos_dsi_set_display_mode(struct exynos_dsi *dsi)
{
- struct drm_display_mode *m = &dsi->encoder.crtc->state->adjusted_mode;
+ struct drm_display_mode *m = &dsi->mode;
unsigned int num_bits_resol = dsi->driver_data->num_bits_resol;
u32 reg;
@@ -1298,14 +1299,14 @@ static void exynos_dsi_enable_irq(struct exynos_dsi *dsi)
{
enable_irq(dsi->irq);
- if (gpio_is_valid(dsi->te_gpio))
- enable_irq(gpio_to_irq(dsi->te_gpio));
+ if (dsi->te_gpio)
+ enable_irq(gpiod_to_irq(dsi->te_gpio));
}
static void exynos_dsi_disable_irq(struct exynos_dsi *dsi)
{
- if (gpio_is_valid(dsi->te_gpio))
- disable_irq(gpio_to_irq(dsi->te_gpio));
+ if (dsi->te_gpio)
+ disable_irq(gpiod_to_irq(dsi->te_gpio));
disable_irq(dsi->irq);
}
@@ -1335,48 +1336,38 @@ static int exynos_dsi_register_te_irq(struct exynos_dsi *dsi,
int ret;
int te_gpio_irq;
- dsi->te_gpio = of_get_named_gpio(panel->of_node, "te-gpios", 0);
- if (dsi->te_gpio == -ENOENT)
- return 0;
-
- if (!gpio_is_valid(dsi->te_gpio)) {
- ret = dsi->te_gpio;
- dev_err(dsi->dev, "cannot get te-gpios, %d\n", ret);
- goto out;
- }
-
- ret = gpio_request(dsi->te_gpio, "te_gpio");
- if (ret) {
- dev_err(dsi->dev, "gpio request failed with %d\n", ret);
- goto out;
+ dsi->te_gpio = devm_gpiod_get_optional(dsi->dev, "te", GPIOD_IN);
+ if (IS_ERR(dsi->te_gpio)) {
+ dev_err(dsi->dev, "gpio request failed with %ld\n",
+ PTR_ERR(dsi->te_gpio));
+ return PTR_ERR(dsi->te_gpio);
}
- te_gpio_irq = gpio_to_irq(dsi->te_gpio);
+ te_gpio_irq = gpiod_to_irq(dsi->te_gpio);
ret = request_threaded_irq(te_gpio_irq, exynos_dsi_te_irq_handler, NULL,
IRQF_TRIGGER_RISING | IRQF_NO_AUTOEN, "TE", dsi);
if (ret) {
dev_err(dsi->dev, "request interrupt failed with %d\n", ret);
- gpio_free(dsi->te_gpio);
- goto out;
+ gpiod_put(dsi->te_gpio);
+ return ret;
}
-out:
- return ret;
+ return 0;
}
static void exynos_dsi_unregister_te_irq(struct exynos_dsi *dsi)
{
- if (gpio_is_valid(dsi->te_gpio)) {
- free_irq(gpio_to_irq(dsi->te_gpio), dsi);
- gpio_free(dsi->te_gpio);
- dsi->te_gpio = -ENOENT;
+ if (dsi->te_gpio) {
+ free_irq(gpiod_to_irq(dsi->te_gpio), dsi);
+ gpiod_put(dsi->te_gpio);
}
}
-static void exynos_dsi_enable(struct drm_encoder *encoder)
+static void exynos_dsi_atomic_enable(struct drm_bridge *bridge,
+ struct drm_bridge_state *old_bridge_state)
{
- struct exynos_dsi *dsi = encoder_to_dsi(encoder);
+ struct exynos_dsi *dsi = bridge_to_dsi(bridge);
struct drm_bridge *iter;
int ret;
@@ -1399,7 +1390,8 @@ static void exynos_dsi_enable(struct drm_encoder *encoder)
list_for_each_entry_reverse(iter, &dsi->bridge_chain,
chain_node) {
if (iter->funcs->pre_enable)
- iter->funcs->pre_enable(iter);
+ iter->funcs->atomic_pre_enable(iter,
+ old_bridge_state);
}
}
@@ -1413,7 +1405,7 @@ static void exynos_dsi_enable(struct drm_encoder *encoder)
} else {
list_for_each_entry(iter, &dsi->bridge_chain, chain_node) {
if (iter->funcs->enable)
- iter->funcs->enable(iter);
+ iter->funcs->atomic_enable(iter, old_bridge_state);
}
}
@@ -1429,9 +1421,10 @@ err_put_sync:
pm_runtime_put(dsi->dev);
}
-static void exynos_dsi_disable(struct drm_encoder *encoder)
+static void exynos_dsi_atomic_disable(struct drm_bridge *bridge,
+ struct drm_bridge_state *old_bridge_state)
{
- struct exynos_dsi *dsi = encoder_to_dsi(encoder);
+ struct exynos_dsi *dsi = bridge_to_dsi(bridge);
struct drm_bridge *iter;
if (!(dsi->state & DSIM_STATE_ENABLED))
@@ -1443,7 +1436,7 @@ static void exynos_dsi_disable(struct drm_encoder *encoder)
list_for_each_entry_reverse(iter, &dsi->bridge_chain, chain_node) {
if (iter->funcs->disable)
- iter->funcs->disable(iter);
+ iter->funcs->atomic_disable(iter, old_bridge_state);
}
exynos_dsi_set_display_enable(dsi, false);
@@ -1451,7 +1444,7 @@ static void exynos_dsi_disable(struct drm_encoder *encoder)
list_for_each_entry(iter, &dsi->bridge_chain, chain_node) {
if (iter->funcs->post_disable)
- iter->funcs->post_disable(iter);
+ iter->funcs->atomic_post_disable(iter, old_bridge_state);
}
dsi->state &= ~DSIM_STATE_ENABLED;
@@ -1494,9 +1487,9 @@ static const struct drm_connector_helper_funcs exynos_dsi_connector_helper_funcs
.get_modes = exynos_dsi_get_modes,
};
-static int exynos_dsi_create_connector(struct drm_encoder *encoder)
+static int exynos_dsi_create_connector(struct exynos_dsi *dsi)
{
- struct exynos_dsi *dsi = encoder_to_dsi(encoder);
+ struct drm_encoder *encoder = &dsi->encoder;
struct drm_connector *connector = &dsi->connector;
struct drm_device *drm = encoder->dev;
int ret;
@@ -1522,9 +1515,31 @@ static int exynos_dsi_create_connector(struct drm_encoder *encoder)
return 0;
}
-static const struct drm_encoder_helper_funcs exynos_dsi_encoder_helper_funcs = {
- .enable = exynos_dsi_enable,
- .disable = exynos_dsi_disable,
+static void exynos_dsi_mode_set(struct drm_bridge *bridge,
+ const struct drm_display_mode *mode,
+ const struct drm_display_mode *adjusted_mode)
+{
+ struct exynos_dsi *dsi = bridge_to_dsi(bridge);
+
+ drm_mode_copy(&dsi->mode, adjusted_mode);
+}
+
+static int exynos_dsi_attach(struct drm_bridge *bridge,
+ enum drm_bridge_attach_flags flags)
+{
+ struct exynos_dsi *dsi = bridge_to_dsi(bridge);
+
+ return drm_bridge_attach(bridge->encoder, dsi->out_bridge, NULL, 0);
+}
+
+static const struct drm_bridge_funcs exynos_dsi_bridge_funcs = {
+ .atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_bridge_destroy_state,
+ .atomic_reset = drm_atomic_helper_bridge_reset,
+ .atomic_enable = exynos_dsi_atomic_enable,
+ .atomic_disable = exynos_dsi_atomic_disable,
+ .mode_set = exynos_dsi_mode_set,
+ .attach = exynos_dsi_attach,
};
MODULE_DEVICE_TABLE(of, exynos_dsi_of_match);
@@ -1543,7 +1558,7 @@ static int exynos_dsi_host_attach(struct mipi_dsi_host *host,
dsi->out_bridge = out_bridge;
list_splice_init(&encoder->bridge_chain, &dsi->bridge_chain);
} else {
- int ret = exynos_dsi_create_connector(encoder);
+ int ret = exynos_dsi_create_connector(dsi);
if (ret) {
DRM_DEV_ERROR(dsi->dev,
@@ -1596,7 +1611,7 @@ static int exynos_dsi_host_detach(struct mipi_dsi_host *host,
if (dsi->panel) {
mutex_lock(&drm->mode_config.mutex);
- exynos_dsi_disable(&dsi->encoder);
+ exynos_dsi_atomic_disable(&dsi->bridge, NULL);
dsi->panel = NULL;
dsi->connector.status = connector_status_disconnected;
mutex_unlock(&drm->mode_config.mutex);
@@ -1702,12 +1717,16 @@ static int exynos_dsi_bind(struct device *dev, struct device *master,
drm_simple_encoder_init(drm_dev, encoder, DRM_MODE_ENCODER_TMDS);
- drm_encoder_helper_add(encoder, &exynos_dsi_encoder_helper_funcs);
-
ret = exynos_drm_set_possible_crtcs(encoder, EXYNOS_DISPLAY_TYPE_LCD);
if (ret < 0)
return ret;
+ ret = drm_bridge_attach(&dsi->encoder, &dsi->bridge, NULL, 0);
+ if (ret) {
+ drm_encoder_cleanup(&dsi->encoder);
+ return ret;
+ }
+
in_bridge_node = of_graph_get_remote_node(dev->of_node, DSI_PORT_IN, 0);
if (in_bridge_node) {
in_bridge = of_drm_find_bridge(in_bridge_node);
@@ -1723,10 +1742,9 @@ static void exynos_dsi_unbind(struct device *dev, struct device *master,
void *data)
{
struct exynos_dsi *dsi = dev_get_drvdata(dev);
- struct drm_encoder *encoder = &dsi->encoder;
-
- exynos_dsi_disable(encoder);
+ exynos_dsi_atomic_disable(&dsi->bridge, NULL);
+ drm_encoder_cleanup(&dsi->encoder);
mipi_dsi_host_unregister(&dsi->dsi_host);
}
@@ -1745,9 +1763,6 @@ static int exynos_dsi_probe(struct platform_device *pdev)
if (!dsi)
return -ENOMEM;
- /* To be checked as invalid one */
- dsi->te_gpio = -ENOENT;
-
init_completion(&dsi->completed);
spin_lock_init(&dsi->transfer_lock);
INIT_LIST_HEAD(&dsi->transfer_list);
@@ -1819,6 +1834,12 @@ static int exynos_dsi_probe(struct platform_device *pdev)
pm_runtime_enable(dev);
+ dsi->bridge.funcs = &exynos_dsi_bridge_funcs;
+ dsi->bridge.of_node = dev->of_node;
+ dsi->bridge.type = DRM_MODE_CONNECTOR_DSI;
+
+ drm_bridge_add(&dsi->bridge);
+
ret = component_add(dev, &exynos_dsi_component_ops);
if (ret)
goto err_disable_runtime;
@@ -1833,6 +1854,10 @@ err_disable_runtime:
static int exynos_dsi_remove(struct platform_device *pdev)
{
+ struct exynos_dsi *dsi = platform_get_drvdata(pdev);
+
+ drm_bridge_remove(&dsi->bridge);
+
pm_runtime_disable(&pdev->dev);
component_del(&pdev->dev, &exynos_dsi_component_ops);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fbdev.c b/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
index 5147f5929be7..02c97b9ca926 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
@@ -15,6 +15,7 @@
#include <drm/drm_crtc.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_fourcc.h>
+#include <drm/drm_prime.h>
#include <drm/drm_probe_helper.h>
#include <drm/exynos_drm.h>
@@ -39,25 +40,8 @@ static int exynos_drm_fb_mmap(struct fb_info *info,
struct drm_fb_helper *helper = info->par;
struct exynos_drm_fbdev *exynos_fbd = to_exynos_fbdev(helper);
struct exynos_drm_gem *exynos_gem = exynos_fbd->exynos_gem;
- unsigned long vm_size;
- int ret;
-
- vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP;
-
- vm_size = vma->vm_end - vma->vm_start;
-
- if (vm_size > exynos_gem->size)
- return -EINVAL;
- ret = dma_mmap_attrs(to_dma_dev(helper->dev), vma, exynos_gem->cookie,
- exynos_gem->dma_addr, exynos_gem->size,
- exynos_gem->dma_attrs);
- if (ret < 0) {
- DRM_DEV_ERROR(to_dma_dev(helper->dev), "failed to mmap.\n");
- return ret;
- }
-
- return 0;
+ return drm_gem_prime_mmap(&exynos_gem->base, vma);
}
static const struct fb_ops exynos_drm_fb_ops = {
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimc.c b/drivers/gpu/drm/exynos/exynos_drm_fimc.c
index ecfd82d0afb7..023f54ee61a8 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fimc.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fimc.c
@@ -782,8 +782,8 @@ static int fimc_set_prescaler(struct fimc_context *ctx, struct fimc_scaler *sc,
sc->hratio = (src_w << 14) / (dst_w << hfactor);
sc->vratio = (src_h << 14) / (dst_h << vfactor);
- sc->up_h = (dst_w >= src_w) ? true : false;
- sc->up_v = (dst_h >= src_h) ? true : false;
+ sc->up_h = (dst_w >= src_w);
+ sc->up_v = (dst_h >= src_h);
DRM_DEV_DEBUG_KMS(ctx->dev, "hratio[%d]vratio[%d]up_h[%d]up_v[%d]\n",
sc->hratio, sc->vratio, sc->up_h, sc->up_v);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.c b/drivers/gpu/drm/exynos/exynos_drm_gem.c
index 0a0c042a3155..3e493f48e0d4 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gem.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_gem.c
@@ -20,6 +20,8 @@
MODULE_IMPORT_NS(DMA_BUF);
+static int exynos_drm_gem_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma);
+
static int exynos_drm_alloc_buf(struct exynos_drm_gem *exynos_gem, bool kvmap)
{
struct drm_device *dev = exynos_gem->base.dev;
@@ -138,6 +140,7 @@ static const struct vm_operations_struct exynos_drm_gem_vm_ops = {
static const struct drm_gem_object_funcs exynos_drm_gem_object_funcs = {
.free = exynos_drm_gem_free_object,
.get_sg_table = exynos_drm_gem_prime_get_sg_table,
+ .mmap = exynos_drm_gem_mmap,
.vm_ops = &exynos_drm_gem_vm_ops,
};
@@ -357,12 +360,16 @@ int exynos_drm_gem_dumb_create(struct drm_file *file_priv,
return 0;
}
-static int exynos_drm_gem_mmap_obj(struct drm_gem_object *obj,
- struct vm_area_struct *vma)
+static int exynos_drm_gem_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
{
struct exynos_drm_gem *exynos_gem = to_exynos_gem(obj);
int ret;
+ if (obj->import_attach)
+ return dma_buf_mmap(obj->dma_buf, vma, 0);
+
+ vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP;
+
DRM_DEV_DEBUG_KMS(to_dma_dev(obj->dev), "flags = 0x%x\n",
exynos_gem->flags);
@@ -388,26 +395,6 @@ err_close_vm:
return ret;
}
-int exynos_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
-{
- struct drm_gem_object *obj;
- int ret;
-
- /* set vm_area_struct. */
- ret = drm_gem_mmap(filp, vma);
- if (ret < 0) {
- DRM_ERROR("failed to mmap.\n");
- return ret;
- }
-
- obj = vma->vm_private_data;
-
- if (obj->import_attach)
- return dma_buf_mmap(obj->dma_buf, vma, 0);
-
- return exynos_drm_gem_mmap_obj(obj, vma);
-}
-
/* low-level interface prime helpers */
struct drm_gem_object *exynos_drm_gem_prime_import(struct drm_device *dev,
struct dma_buf *dma_buf)
@@ -469,15 +456,3 @@ exynos_drm_gem_prime_import_sg_table(struct drm_device *dev,
exynos_gem->sgt = sgt;
return &exynos_gem->base;
}
-
-int exynos_drm_gem_prime_mmap(struct drm_gem_object *obj,
- struct vm_area_struct *vma)
-{
- int ret;
-
- ret = drm_gem_mmap_obj(obj, obj->size, vma);
- if (ret < 0)
- return ret;
-
- return exynos_drm_gem_mmap_obj(obj, vma);
-}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.h b/drivers/gpu/drm/exynos/exynos_drm_gem.h
index a23272fb96fb..79d7e1a87419 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gem.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_gem.h
@@ -96,9 +96,6 @@ int exynos_drm_gem_dumb_create(struct drm_file *file_priv,
struct drm_device *dev,
struct drm_mode_create_dumb *args);
-/* set vm_flags and we can change the vm attribute to other one at here. */
-int exynos_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma);
-
/* low-level interface prime helpers */
struct drm_gem_object *exynos_drm_gem_prime_import(struct drm_device *dev,
struct dma_buf *dma_buf);
@@ -107,7 +104,5 @@ struct drm_gem_object *
exynos_drm_gem_prime_import_sg_table(struct drm_device *dev,
struct dma_buf_attachment *attach,
struct sg_table *sgt);
-int exynos_drm_gem_prime_mmap(struct drm_gem_object *obj,
- struct vm_area_struct *vma);
#endif
diff --git a/drivers/gpu/drm/fsl-dcu/Kconfig b/drivers/gpu/drm/fsl-dcu/Kconfig
index d7dd8ba90e3a..e95e96c565ba 100644
--- a/drivers/gpu/drm/fsl-dcu/Kconfig
+++ b/drivers/gpu/drm/fsl-dcu/Kconfig
@@ -3,8 +3,8 @@ config DRM_FSL_DCU
tristate "DRM Support for Freescale DCU"
depends on DRM && OF && ARM && COMMON_CLK
select BACKLIGHT_CLASS_DEVICE
+ select DRM_GEM_CMA_HELPER
select DRM_KMS_HELPER
- select DRM_KMS_CMA_HELPER
select DRM_PANEL
select REGMAP_MMIO
select VIDEOMODE_HELPERS
diff --git a/drivers/gpu/drm/hisilicon/kirin/Kconfig b/drivers/gpu/drm/hisilicon/kirin/Kconfig
index 290553e2f6b4..b770f7662830 100644
--- a/drivers/gpu/drm/hisilicon/kirin/Kconfig
+++ b/drivers/gpu/drm/hisilicon/kirin/Kconfig
@@ -4,7 +4,6 @@ config DRM_HISI_KIRIN
depends on DRM && OF && ARM64
select DRM_KMS_HELPER
select DRM_GEM_CMA_HELPER
- select DRM_KMS_CMA_HELPER
select DRM_MIPI_DSI
help
Choose this option if you have a hisilicon Kirin chipsets(hi6220).
diff --git a/drivers/gpu/drm/hyperv/hyperv_drm_drv.c b/drivers/gpu/drm/hyperv/hyperv_drm_drv.c
index cd818a629183..00e53de4812b 100644
--- a/drivers/gpu/drm/hyperv/hyperv_drm_drv.c
+++ b/drivers/gpu/drm/hyperv/hyperv_drm_drv.c
@@ -225,12 +225,29 @@ static int hyperv_vmbus_remove(struct hv_device *hdev)
{
struct drm_device *dev = hv_get_drvdata(hdev);
struct hyperv_drm_device *hv = to_hv(dev);
+ struct pci_dev *pdev;
drm_dev_unplug(dev);
drm_atomic_helper_shutdown(dev);
vmbus_close(hdev->channel);
hv_set_drvdata(hdev, NULL);
- vmbus_free_mmio(hv->mem->start, hv->fb_size);
+
+ /*
+ * Free allocated MMIO memory only on Gen2 VMs.
+ * On Gen1 VMs, release the PCI device
+ */
+ if (efi_enabled(EFI_BOOT)) {
+ vmbus_free_mmio(hv->mem->start, hv->fb_size);
+ } else {
+ pdev = pci_get_device(PCI_VENDOR_ID_MICROSOFT,
+ PCI_DEVICE_ID_HYPERV_VIDEO, NULL);
+ if (!pdev) {
+ drm_err(dev, "Unable to find PCI Hyper-V video\n");
+ return -ENODEV;
+ }
+ pci_release_region(pdev, 0);
+ pci_dev_put(pdev);
+ }
return 0;
}
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index 213c5f9fae32..1b62b9f65196 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -60,7 +60,6 @@ i915-y += i915_driver.o \
# core library code
i915-y += \
- dma_resv_utils.o \
i915_memcpy.o \
i915_mm.o \
i915_sw_fence.o \
@@ -154,6 +153,7 @@ gem-y += \
gem/i915_gem_throttle.o \
gem/i915_gem_tiling.o \
gem/i915_gem_ttm.o \
+ gem/i915_gem_ttm_move.o \
gem/i915_gem_ttm_pm.o \
gem/i915_gem_userptr.o \
gem/i915_gem_wait.o \
@@ -163,6 +163,7 @@ i915-y += \
i915_active.o \
i915_buddy.o \
i915_cmd_parser.o \
+ i915_deps.o \
i915_gem_evict.o \
i915_gem_gtt.o \
i915_gem_ww.o \
@@ -173,6 +174,7 @@ i915-y += \
i915_trace_points.o \
i915_ttm_buddy_manager.o \
i915_vma.o \
+ i915_vma_snapshot.o \
intel_wopcm.o
# general-purpose microcontroller (GuC) support
diff --git a/drivers/gpu/drm/i915/display/intel_atomic_plane.c b/drivers/gpu/drm/i915/display/intel_atomic_plane.c
index 89005628cc3a..c2c512cd8ec0 100644
--- a/drivers/gpu/drm/i915/display/intel_atomic_plane.c
+++ b/drivers/gpu/drm/i915/display/intel_atomic_plane.c
@@ -819,7 +819,7 @@ intel_prepare_plane_fb(struct drm_plane *_plane,
* maximum clocks following a vblank miss (see do_rps_boost()).
*/
if (!state->rps_interactive) {
- intel_rps_mark_interactive(&dev_priv->gt.rps, true);
+ intel_rps_mark_interactive(&to_gt(dev_priv)->rps, true);
state->rps_interactive = true;
}
@@ -853,7 +853,7 @@ intel_cleanup_plane_fb(struct drm_plane *plane,
return;
if (state->rps_interactive) {
- intel_rps_mark_interactive(&dev_priv->gt.rps, false);
+ intel_rps_mark_interactive(&to_gt(dev_priv)->rps, false);
state->rps_interactive = false;
}
diff --git a/drivers/gpu/drm/i915/display/intel_bw.c b/drivers/gpu/drm/i915/display/intel_bw.c
index abec394f6869..2da4aacc956b 100644
--- a/drivers/gpu/drm/i915/display/intel_bw.c
+++ b/drivers/gpu/drm/i915/display/intel_bw.c
@@ -634,7 +634,7 @@ static unsigned int intel_bw_data_rate(struct drm_i915_private *dev_priv,
for_each_pipe(dev_priv, pipe)
data_rate += bw_state->data_rate[pipe];
- if (DISPLAY_VER(dev_priv) >= 13 && intel_vtd_active())
+ if (DISPLAY_VER(dev_priv) >= 13 && intel_vtd_active(dev_priv))
data_rate = data_rate * 105 / 100;
return data_rate;
diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c
index e278a662b247..bf7ce684dd8e 100644
--- a/drivers/gpu/drm/i915/display/intel_display.c
+++ b/drivers/gpu/drm/i915/display/intel_display.c
@@ -843,7 +843,7 @@ __intel_display_resume(struct drm_device *dev,
static bool gpu_reset_clobbers_display(struct drm_i915_private *dev_priv)
{
return (INTEL_INFO(dev_priv)->gpu_reset_clobbers_display &&
- intel_has_gpu_reset(&dev_priv->gt));
+ intel_has_gpu_reset(to_gt(dev_priv)));
}
void intel_display_prepare_reset(struct drm_i915_private *dev_priv)
@@ -862,14 +862,14 @@ void intel_display_prepare_reset(struct drm_i915_private *dev_priv)
return;
/* We have a modeset vs reset deadlock, defensively unbreak it. */
- set_bit(I915_RESET_MODESET, &dev_priv->gt.reset.flags);
+ set_bit(I915_RESET_MODESET, &to_gt(dev_priv)->reset.flags);
smp_mb__after_atomic();
- wake_up_bit(&dev_priv->gt.reset.flags, I915_RESET_MODESET);
+ wake_up_bit(&to_gt(dev_priv)->reset.flags, I915_RESET_MODESET);
if (atomic_read(&dev_priv->gpu_error.pending_fb_pin)) {
drm_dbg_kms(&dev_priv->drm,
"Modeset potentially stuck, unbreaking through wedging\n");
- intel_gt_set_wedged(&dev_priv->gt);
+ intel_gt_set_wedged(to_gt(dev_priv));
}
/*
@@ -920,7 +920,7 @@ void intel_display_finish_reset(struct drm_i915_private *dev_priv)
return;
/* reset doesn't touch the display */
- if (!test_bit(I915_RESET_MODESET, &dev_priv->gt.reset.flags))
+ if (!test_bit(I915_RESET_MODESET, &to_gt(dev_priv)->reset.flags))
return;
state = fetch_and_zero(&dev_priv->modeset_restore_state);
@@ -958,7 +958,7 @@ unlock:
drm_modeset_acquire_fini(ctx);
mutex_unlock(&dev->mode_config.mutex);
- clear_bit_unlock(I915_RESET_MODESET, &dev_priv->gt.reset.flags);
+ clear_bit_unlock(I915_RESET_MODESET, &to_gt(dev_priv)->reset.flags);
}
static void icl_set_pipe_chicken(const struct intel_crtc_state *crtc_state)
@@ -1299,7 +1299,7 @@ static bool needs_async_flip_vtd_wa(const struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
- return crtc_state->uapi.async_flip && intel_vtd_active() &&
+ return crtc_state->uapi.async_flip && intel_vtd_active(i915) &&
(DISPLAY_VER(i915) == 9 || IS_BROADWELL(i915) || IS_HASWELL(i915));
}
@@ -8513,19 +8513,19 @@ static void intel_atomic_commit_fence_wait(struct intel_atomic_state *intel_stat
for (;;) {
prepare_to_wait(&intel_state->commit_ready.wait,
&wait_fence, TASK_UNINTERRUPTIBLE);
- prepare_to_wait(bit_waitqueue(&dev_priv->gt.reset.flags,
+ prepare_to_wait(bit_waitqueue(&to_gt(dev_priv)->reset.flags,
I915_RESET_MODESET),
&wait_reset, TASK_UNINTERRUPTIBLE);
if (i915_sw_fence_done(&intel_state->commit_ready) ||
- test_bit(I915_RESET_MODESET, &dev_priv->gt.reset.flags))
+ test_bit(I915_RESET_MODESET, &to_gt(dev_priv)->reset.flags))
break;
schedule();
}
finish_wait(&intel_state->commit_ready.wait, &wait_fence);
- finish_wait(bit_waitqueue(&dev_priv->gt.reset.flags,
+ finish_wait(bit_waitqueue(&to_gt(dev_priv)->reset.flags,
I915_RESET_MODESET),
&wait_reset);
}
@@ -8775,7 +8775,7 @@ static void intel_atomic_commit_work(struct work_struct *work)
intel_atomic_commit_tail(state);
}
-static int __i915_sw_fence_call
+static int
intel_atomic_commit_ready(struct i915_sw_fence *fence,
enum i915_sw_fence_notify notify)
{
diff --git a/drivers/gpu/drm/i915/display/intel_dpt.c b/drivers/gpu/drm/i915/display/intel_dpt.c
index 963ca7155b06..8f674745e7e0 100644
--- a/drivers/gpu/drm/i915/display/intel_dpt.c
+++ b/drivers/gpu/drm/i915/display/intel_dpt.c
@@ -264,7 +264,7 @@ intel_dpt_create(struct intel_framebuffer *fb)
vm = &dpt->vm;
- vm->gt = &i915->gt;
+ vm->gt = to_gt(i915);
vm->i915 = i915;
vm->dma = i915->drm.dev;
vm->total = (size / sizeof(gen8_pte_t)) * I915_GTT_PAGE_SIZE;
@@ -279,8 +279,6 @@ intel_dpt_create(struct intel_framebuffer *fb)
vm->vma_ops.bind_vma = dpt_bind_vma;
vm->vma_ops.unbind_vma = dpt_unbind_vma;
- vm->vma_ops.set_pages = ggtt_set_pages;
- vm->vma_ops.clear_pages = clear_pages;
vm->pte_encode = gen8_ggtt_pte_encode;
diff --git a/drivers/gpu/drm/i915/display/intel_fbc.c b/drivers/gpu/drm/i915/display/intel_fbc.c
index 7fd11d735ca4..465dc4e97ea8 100644
--- a/drivers/gpu/drm/i915/display/intel_fbc.c
+++ b/drivers/gpu/drm/i915/display/intel_fbc.c
@@ -1639,7 +1639,7 @@ static int intel_sanitize_fbc_option(struct drm_i915_private *i915)
static bool need_fbc_vtd_wa(struct drm_i915_private *i915)
{
/* WaFbcTurnOffFbcWhenHyperVisorIsUsed:skl,bxt */
- if (intel_vtd_active() &&
+ if (intel_vtd_active(i915) &&
(IS_SKYLAKE(i915) || IS_BROXTON(i915))) {
drm_info(&i915->drm,
"Disabling framebuffer compression (FBC) to prevent screen flicker with VT-d enabled\n");
diff --git a/drivers/gpu/drm/i915/display/intel_overlay.c b/drivers/gpu/drm/i915/display/intel_overlay.c
index 7e3f5c6ca484..1a376e9a1ff3 100644
--- a/drivers/gpu/drm/i915/display/intel_overlay.c
+++ b/drivers/gpu/drm/i915/display/intel_overlay.c
@@ -1382,7 +1382,7 @@ void intel_overlay_setup(struct drm_i915_private *dev_priv)
if (!HAS_OVERLAY(dev_priv))
return;
- engine = dev_priv->gt.engine[RCS0];
+ engine = to_gt(dev_priv)->engine[RCS0];
if (!engine || !engine->kernel_context)
return;
diff --git a/drivers/gpu/drm/i915/display/skl_universal_plane.c b/drivers/gpu/drm/i915/display/skl_universal_plane.c
index 158d89b8d490..b3162f49f341 100644
--- a/drivers/gpu/drm/i915/display/skl_universal_plane.c
+++ b/drivers/gpu/drm/i915/display/skl_universal_plane.c
@@ -1737,7 +1737,7 @@ static bool bo_has_valid_encryption(struct drm_i915_gem_object *obj)
{
struct drm_i915_private *i915 = to_i915(obj->base.dev);
- return intel_pxp_key_check(&i915->gt.pxp, obj, false) == 0;
+ return intel_pxp_key_check(&to_gt(i915)->pxp, obj, false) == 0;
}
static bool pxp_is_borked(struct drm_i915_gem_object *obj)
diff --git a/drivers/gpu/drm/i915/dma_resv_utils.c b/drivers/gpu/drm/i915/dma_resv_utils.c
deleted file mode 100644
index 7df91b7e4ca8..000000000000
--- a/drivers/gpu/drm/i915/dma_resv_utils.c
+++ /dev/null
@@ -1,17 +0,0 @@
-// SPDX-License-Identifier: MIT
-/*
- * Copyright © 2020 Intel Corporation
- */
-
-#include <linux/dma-resv.h>
-
-#include "dma_resv_utils.h"
-
-void dma_resv_prune(struct dma_resv *resv)
-{
- if (dma_resv_trylock(resv)) {
- if (dma_resv_test_signaled(resv, true))
- dma_resv_add_excl_fence(resv, NULL);
- dma_resv_unlock(resv);
- }
-}
diff --git a/drivers/gpu/drm/i915/dma_resv_utils.h b/drivers/gpu/drm/i915/dma_resv_utils.h
deleted file mode 100644
index b9d8fb5f8367..000000000000
--- a/drivers/gpu/drm/i915/dma_resv_utils.h
+++ /dev/null
@@ -1,13 +0,0 @@
-/* SPDX-License-Identifier: MIT */
-/*
- * Copyright © 2020 Intel Corporation
- */
-
-#ifndef DMA_RESV_UTILS_H
-#define DMA_RESV_UTILS_H
-
-struct dma_resv;
-
-void dma_resv_prune(struct dma_resv *resv);
-
-#endif /* DMA_RESV_UTILS_H */
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_clflush.c b/drivers/gpu/drm/i915/gem/i915_gem_clflush.c
index f0435c6feb68..8a248003dfae 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_clflush.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_clflush.c
@@ -69,10 +69,16 @@ static struct clflush *clflush_work_create(struct drm_i915_gem_object *obj)
bool i915_gem_clflush_object(struct drm_i915_gem_object *obj,
unsigned int flags)
{
+ struct drm_i915_private *i915 = to_i915(obj->base.dev);
struct clflush *clflush;
assert_object_held(obj);
+ if (IS_DGFX(i915)) {
+ WARN_ON_ONCE(obj->cache_dirty);
+ return false;
+ }
+
/*
* Stolen memory is always coherent with the GPU as it is explicitly
* marked as wc by the system, or the system is cache-coherent.
@@ -105,16 +111,24 @@ bool i915_gem_clflush_object(struct drm_i915_gem_object *obj,
if (clflush) {
i915_sw_fence_await_reservation(&clflush->base.chain,
obj->base.resv, NULL, true,
- i915_fence_timeout(to_i915(obj->base.dev)),
+ i915_fence_timeout(i915),
I915_FENCE_GFP);
dma_resv_add_excl_fence(obj->base.resv, &clflush->base.dma);
dma_fence_work_commit(&clflush->base);
+ /*
+ * We must have successfully populated the pages(since we are
+ * holding a pin on the pages as per the flush worker) to reach
+ * this point, which must mean we have already done the required
+ * flush-on-acquire, hence resetting cache_dirty here should be
+ * safe.
+ */
+ obj->cache_dirty = false;
} else if (obj->mm.pages) {
__do_clflush(obj);
+ obj->cache_dirty = false;
} else {
GEM_BUG_ON(obj->write_domain != I915_GEM_DOMAIN_CPU);
}
- obj->cache_dirty = false;
return true;
}
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context.c b/drivers/gpu/drm/i915/gem/i915_gem_context.c
index ebd775cb1661..00327b750fbb 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_context.c
@@ -237,7 +237,7 @@ static int proto_context_set_persistence(struct drm_i915_private *i915,
* colateral damage, and we should not pretend we can by
* exposing the interface.
*/
- if (!intel_has_reset_engine(&i915->gt))
+ if (!intel_has_reset_engine(to_gt(i915)))
return -ENODEV;
pc->user_flags &= ~BIT(UCONTEXT_PERSISTENCE);
@@ -254,7 +254,7 @@ static int proto_context_set_protected(struct drm_i915_private *i915,
if (!protected) {
pc->uses_protected_content = false;
- } else if (!intel_pxp_is_enabled(&i915->gt.pxp)) {
+ } else if (!intel_pxp_is_enabled(&to_gt(i915)->pxp)) {
ret = -ENODEV;
} else if ((pc->user_flags & BIT(UCONTEXT_RECOVERABLE)) ||
!(pc->user_flags & BIT(UCONTEXT_BANNABLE))) {
@@ -268,8 +268,8 @@ static int proto_context_set_protected(struct drm_i915_private *i915,
*/
pc->pxp_wakeref = intel_runtime_pm_get(&i915->runtime_pm);
- if (!intel_pxp_is_active(&i915->gt.pxp))
- ret = intel_pxp_start(&i915->gt.pxp);
+ if (!intel_pxp_is_active(&to_gt(i915)->pxp))
+ ret = intel_pxp_start(&to_gt(i915)->pxp);
}
return ret;
@@ -564,6 +564,7 @@ set_proto_ctx_engines_parallel_submit(struct i915_user_extension __user *base,
container_of_user(base, typeof(*ext), base);
const struct set_proto_ctx_engines *set = data;
struct drm_i915_private *i915 = set->i915;
+ struct i915_engine_class_instance prev_engine;
u64 flags;
int err = 0, n, i, j;
u16 slot, width, num_siblings;
@@ -571,7 +572,7 @@ set_proto_ctx_engines_parallel_submit(struct i915_user_extension __user *base,
intel_engine_mask_t prev_mask;
/* FIXME: This is NIY for execlists */
- if (!(intel_uc_uses_guc_submission(&i915->gt.uc)))
+ if (!(intel_uc_uses_guc_submission(&to_gt(i915)->uc)))
return -ENODEV;
if (get_user(slot, &ext->engine_index))
@@ -629,7 +630,6 @@ set_proto_ctx_engines_parallel_submit(struct i915_user_extension __user *base,
/* Create contexts / engines */
for (i = 0; i < width; ++i) {
intel_engine_mask_t current_mask = 0;
- struct i915_engine_class_instance prev_engine;
for (j = 0; j < num_siblings; ++j) {
struct i915_engine_class_instance ci;
@@ -833,7 +833,7 @@ static int set_proto_ctx_sseu(struct drm_i915_file_private *fpriv,
sseu = &pc->legacy_rcs_sseu;
}
- ret = i915_gem_user_to_context_sseu(&i915->gt, &user_sseu, sseu);
+ ret = i915_gem_user_to_context_sseu(to_gt(i915), &user_sseu, sseu);
if (ret)
return ret;
@@ -1001,7 +1001,7 @@ static void free_engines_rcu(struct rcu_head *rcu)
free_engines(engines);
}
-static int __i915_sw_fence_call
+static int
engines_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state)
{
struct i915_gem_engines *engines =
@@ -1044,7 +1044,7 @@ static struct i915_gem_engines *alloc_engines(unsigned int count)
static struct i915_gem_engines *default_engines(struct i915_gem_context *ctx,
struct intel_sseu rcs_sseu)
{
- const struct intel_gt *gt = &ctx->i915->gt;
+ const struct intel_gt *gt = to_gt(ctx->i915);
struct intel_engine_cs *engine;
struct i915_gem_engines *e, *err;
enum intel_engine_id id;
@@ -1521,7 +1521,7 @@ static int __context_set_persistence(struct i915_gem_context *ctx, bool state)
* colateral damage, and we should not pretend we can by
* exposing the interface.
*/
- if (!intel_has_reset_engine(&ctx->i915->gt))
+ if (!intel_has_reset_engine(to_gt(ctx->i915)))
return -ENODEV;
i915_gem_context_clear_persistence(ctx);
@@ -1559,7 +1559,7 @@ i915_gem_create_context(struct drm_i915_private *i915,
} else if (HAS_FULL_PPGTT(i915)) {
struct i915_ppgtt *ppgtt;
- ppgtt = i915_ppgtt_create(&i915->gt, 0);
+ ppgtt = i915_ppgtt_create(to_gt(i915), 0);
if (IS_ERR(ppgtt)) {
drm_dbg(&i915->drm, "PPGTT setup failed (%ld)\n",
PTR_ERR(ppgtt));
@@ -1742,7 +1742,7 @@ int i915_gem_vm_create_ioctl(struct drm_device *dev, void *data,
if (args->flags)
return -EINVAL;
- ppgtt = i915_ppgtt_create(&i915->gt, 0);
+ ppgtt = i915_ppgtt_create(to_gt(i915), 0);
if (IS_ERR(ppgtt))
return PTR_ERR(ppgtt);
@@ -2194,7 +2194,7 @@ int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
if (args->flags & I915_CONTEXT_CREATE_FLAGS_UNKNOWN)
return -EINVAL;
- ret = intel_gt_terminally_wedged(&i915->gt);
+ ret = intel_gt_terminally_wedged(to_gt(i915));
if (ret)
return ret;
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_create.c b/drivers/gpu/drm/i915/gem/i915_gem_create.c
index 8955d6abcef1..9402d4bf4ffc 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_create.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_create.c
@@ -379,7 +379,7 @@ static int ext_set_protected(struct i915_user_extension __user *base, void *data
if (ext.flags)
return -EINVAL;
- if (!intel_pxp_is_enabled(&ext_data->i915->gt.pxp))
+ if (!intel_pxp_is_enabled(&to_gt(ext_data->i915)->pxp))
return -ENODEV;
ext_data->flags |= I915_BO_PROTECTED;
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c
index e8a58c997170..1b526039a60d 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c
@@ -248,8 +248,19 @@ static int i915_gem_object_get_pages_dmabuf(struct drm_i915_gem_object *obj)
if (IS_ERR(pages))
return PTR_ERR(pages);
- /* XXX: consider doing a vmap flush or something */
- if (!HAS_LLC(i915) || i915_gem_object_can_bypass_llc(obj))
+ /*
+ * DG1 is special here since it still snoops transactions even with
+ * CACHE_NONE. This is not the case with other HAS_SNOOP platforms. We
+ * might need to revisit this as we add new discrete platforms.
+ *
+ * XXX: Consider doing a vmap flush or something, where possible.
+ * Currently we just do a heavy handed wbinvd_on_all_cpus() here since
+ * the underlying sg_table might not even point to struct pages, so we
+ * can't just call drm_clflush_sg or similar, like we do elsewhere in
+ * the driver.
+ */
+ if (i915_gem_object_can_bypass_llc(obj) ||
+ (!HAS_LLC(i915) && !IS_DG1(i915)))
wbinvd_on_all_cpus();
sg_page_sizes = i915_sg_dma_sizes(pages->sgl);
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_domain.c b/drivers/gpu/drm/i915/gem/i915_gem_domain.c
index b684a62bf3b0..26532c07d467 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_domain.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_domain.c
@@ -18,10 +18,32 @@
static bool gpu_write_needs_clflush(struct drm_i915_gem_object *obj)
{
+ struct drm_i915_private *i915 = to_i915(obj->base.dev);
+
+ if (IS_DGFX(i915))
+ return false;
+
return !(obj->cache_level == I915_CACHE_NONE ||
obj->cache_level == I915_CACHE_WT);
}
+bool i915_gem_cpu_write_needs_clflush(struct drm_i915_gem_object *obj)
+{
+ struct drm_i915_private *i915 = to_i915(obj->base.dev);
+
+ if (obj->cache_dirty)
+ return false;
+
+ if (!(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE))
+ return true;
+
+ if (IS_DGFX(i915))
+ return false;
+
+ /* Currently in use by HW (display engine)? Keep flushed. */
+ return i915_gem_object_is_framebuffer(obj);
+}
+
static void
flush_write_domain(struct drm_i915_gem_object *obj, unsigned int flush_domains)
{
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
index 4d7da07442f2..3a5b247be738 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
@@ -29,6 +29,7 @@
#include "i915_gem_ioctls.h"
#include "i915_trace.h"
#include "i915_user_extensions.h"
+#include "i915_vma_snapshot.h"
struct eb_vma {
struct i915_vma *vma;
@@ -307,11 +308,15 @@ struct i915_execbuffer {
struct eb_fence *fences;
unsigned long num_fences;
+#if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
+ struct i915_capture_list *capture_lists[MAX_ENGINE_INSTANCE + 1];
+#endif
};
static int eb_parse(struct i915_execbuffer *eb);
static int eb_pin_engine(struct i915_execbuffer *eb, bool throttle);
static void eb_unpin_engine(struct i915_execbuffer *eb);
+static void eb_capture_release(struct i915_execbuffer *eb);
static inline bool eb_use_cmdparser(const struct i915_execbuffer *eb)
{
@@ -990,7 +995,7 @@ static int eb_validate_vmas(struct i915_execbuffer *eb)
}
if (!(ev->flags & EXEC_OBJECT_WRITE)) {
- err = dma_resv_reserve_shared(vma->resv, 1);
+ err = dma_resv_reserve_shared(vma->obj->base.resv, 1);
if (err)
return err;
}
@@ -1043,6 +1048,7 @@ static void eb_release_vmas(struct i915_execbuffer *eb, bool final)
i915_vma_put(vma);
}
+ eb_capture_release(eb);
eb_unpin_engine(eb);
}
@@ -1092,6 +1098,47 @@ static inline struct i915_ggtt *cache_to_ggtt(struct reloc_cache *cache)
return &i915->ggtt;
}
+static void reloc_cache_unmap(struct reloc_cache *cache)
+{
+ void *vaddr;
+
+ if (!cache->vaddr)
+ return;
+
+ vaddr = unmask_page(cache->vaddr);
+ if (cache->vaddr & KMAP)
+ kunmap_atomic(vaddr);
+ else
+ io_mapping_unmap_atomic((void __iomem *)vaddr);
+}
+
+static void reloc_cache_remap(struct reloc_cache *cache,
+ struct drm_i915_gem_object *obj)
+{
+ void *vaddr;
+
+ if (!cache->vaddr)
+ return;
+
+ if (cache->vaddr & KMAP) {
+ struct page *page = i915_gem_object_get_page(obj, cache->page);
+
+ vaddr = kmap_atomic(page);
+ cache->vaddr = unmask_flags(cache->vaddr) |
+ (unsigned long)vaddr;
+ } else {
+ struct i915_ggtt *ggtt = cache_to_ggtt(cache);
+ unsigned long offset;
+
+ offset = cache->node.start;
+ if (!drm_mm_node_allocated(&cache->node))
+ offset += cache->page << PAGE_SHIFT;
+
+ cache->vaddr = (unsigned long)
+ io_mapping_map_atomic_wc(&ggtt->iomap, offset);
+ }
+}
+
static void reloc_cache_reset(struct reloc_cache *cache, struct i915_execbuffer *eb)
{
void *vaddr;
@@ -1356,10 +1403,17 @@ eb_relocate_entry(struct i915_execbuffer *eb,
* batchbuffers.
*/
if (reloc->write_domain == I915_GEM_DOMAIN_INSTRUCTION &&
- GRAPHICS_VER(eb->i915) == 6) {
+ GRAPHICS_VER(eb->i915) == 6 &&
+ !i915_vma_is_bound(target->vma, I915_VMA_GLOBAL_BIND)) {
+ struct i915_vma *vma = target->vma;
+
+ reloc_cache_unmap(&eb->reloc_cache);
+ mutex_lock(&vma->vm->mutex);
err = i915_vma_bind(target->vma,
target->vma->obj->cache_level,
PIN_GLOBAL, NULL);
+ mutex_unlock(&vma->vm->mutex);
+ reloc_cache_remap(&eb->reloc_cache, ev->vma->obj);
if (err)
return err;
}
@@ -1880,36 +1934,113 @@ eb_find_first_request_added(struct i915_execbuffer *eb)
return NULL;
}
-static int eb_move_to_gpu(struct i915_execbuffer *eb)
+#if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
+
+/* Stage with GFP_KERNEL allocations before we enter the signaling critical path */
+static void eb_capture_stage(struct i915_execbuffer *eb)
{
const unsigned int count = eb->buffer_count;
- unsigned int i = count;
- int err = 0, j;
+ unsigned int i = count, j;
+ struct i915_vma_snapshot *vsnap;
while (i--) {
struct eb_vma *ev = &eb->vma[i];
struct i915_vma *vma = ev->vma;
unsigned int flags = ev->flags;
- struct drm_i915_gem_object *obj = vma->obj;
- assert_vma_held(vma);
+ if (!(flags & EXEC_OBJECT_CAPTURE))
+ continue;
- if (flags & EXEC_OBJECT_CAPTURE) {
+ vsnap = i915_vma_snapshot_alloc(GFP_KERNEL);
+ if (!vsnap)
+ continue;
+
+ i915_vma_snapshot_init(vsnap, vma, "user");
+ for_each_batch_create_order(eb, j) {
struct i915_capture_list *capture;
- for_each_batch_create_order(eb, j) {
- if (!eb->requests[j])
- break;
+ capture = kmalloc(sizeof(*capture), GFP_KERNEL);
+ if (!capture)
+ continue;
- capture = kmalloc(sizeof(*capture), GFP_KERNEL);
- if (capture) {
- capture->next =
- eb->requests[j]->capture_list;
- capture->vma = vma;
- eb->requests[j]->capture_list = capture;
- }
- }
+ capture->next = eb->capture_lists[j];
+ capture->vma_snapshot = i915_vma_snapshot_get(vsnap);
+ eb->capture_lists[j] = capture;
+ }
+ i915_vma_snapshot_put(vsnap);
+ }
+}
+
+/* Commit once we're in the critical path */
+static void eb_capture_commit(struct i915_execbuffer *eb)
+{
+ unsigned int j;
+
+ for_each_batch_create_order(eb, j) {
+ struct i915_request *rq = eb->requests[j];
+
+ if (!rq)
+ break;
+
+ rq->capture_list = eb->capture_lists[j];
+ eb->capture_lists[j] = NULL;
+ }
+}
+
+/*
+ * Release anything that didn't get committed due to errors.
+ * The capture_list will otherwise be freed at request retire.
+ */
+static void eb_capture_release(struct i915_execbuffer *eb)
+{
+ unsigned int j;
+
+ for_each_batch_create_order(eb, j) {
+ if (eb->capture_lists[j]) {
+ i915_request_free_capture_list(eb->capture_lists[j]);
+ eb->capture_lists[j] = NULL;
}
+ }
+}
+
+static void eb_capture_list_clear(struct i915_execbuffer *eb)
+{
+ memset(eb->capture_lists, 0, sizeof(eb->capture_lists));
+}
+
+#else
+
+static void eb_capture_stage(struct i915_execbuffer *eb)
+{
+}
+
+static void eb_capture_commit(struct i915_execbuffer *eb)
+{
+}
+
+static void eb_capture_release(struct i915_execbuffer *eb)
+{
+}
+
+static void eb_capture_list_clear(struct i915_execbuffer *eb)
+{
+}
+
+#endif
+
+static int eb_move_to_gpu(struct i915_execbuffer *eb)
+{
+ const unsigned int count = eb->buffer_count;
+ unsigned int i = count;
+ int err = 0, j;
+
+ while (i--) {
+ struct eb_vma *ev = &eb->vma[i];
+ struct i915_vma *vma = ev->vma;
+ unsigned int flags = ev->flags;
+ struct drm_i915_gem_object *obj = vma->obj;
+
+ assert_vma_held(vma);
/*
* If the GPU is not _reading_ through the CPU cache, we need
@@ -1990,6 +2121,8 @@ static int eb_move_to_gpu(struct i915_execbuffer *eb)
/* Unconditionally flush any chipset caches (for streaming writes). */
intel_gt_chipset_flush(eb->gt);
+ eb_capture_commit(eb);
+
return 0;
err_skip:
@@ -2164,7 +2297,7 @@ static int eb_parse(struct i915_execbuffer *eb)
goto err_trampoline;
}
- err = dma_resv_reserve_shared(shadow->resv, 1);
+ err = dma_resv_reserve_shared(shadow->obj->base.resv, 1);
if (err)
goto err_trampoline;
@@ -2276,9 +2409,9 @@ static int eb_submit(struct i915_execbuffer *eb)
return err;
}
-static int num_vcs_engines(const struct drm_i915_private *i915)
+static int num_vcs_engines(struct drm_i915_private *i915)
{
- return hweight_long(VDBOX_MASK(&i915->gt));
+ return hweight_long(VDBOX_MASK(to_gt(i915)));
}
/*
@@ -3017,7 +3150,7 @@ eb_composite_fence_create(struct i915_execbuffer *eb, int out_fence_fd)
fence_array = dma_fence_array_create(eb->num_batches,
fences,
eb->context->parallel.fence_context,
- eb->context->parallel.seqno,
+ eb->context->parallel.seqno++,
false);
if (!fence_array) {
kfree(fences);
@@ -3114,7 +3247,7 @@ eb_requests_create(struct i915_execbuffer *eb, struct dma_fence *in_fence,
/* Allocate a request for this batch buffer nice and early. */
eb->requests[i] = i915_request_create(eb_find_context(eb, i));
if (IS_ERR(eb->requests[i])) {
- out_fence = ERR_PTR(PTR_ERR(eb->requests[i]));
+ out_fence = ERR_CAST(eb->requests[i]);
eb->requests[i] = NULL;
return out_fence;
}
@@ -3132,13 +3265,14 @@ eb_requests_create(struct i915_execbuffer *eb, struct dma_fence *in_fence,
}
/*
- * Whilst this request exists, batch_obj will be on the
- * active_list, and so will hold the active reference. Only when
- * this request is retired will the batch_obj be moved onto
- * the inactive_list and lose its active reference. Hence we do
- * not need to explicitly hold another reference here.
+ * Not really on stack, but we don't want to call
+ * kfree on the batch_snapshot when we put it, so use the
+ * _onstack interface.
*/
- eb->requests[i]->batch = eb->batches[i]->vma;
+ if (eb->batches[i]->vma)
+ i915_vma_snapshot_init_onstack(&eb->requests[i]->batch_snapshot,
+ eb->batches[i]->vma,
+ "batch");
if (eb->batch_pool) {
GEM_BUG_ON(intel_context_is_parallel(eb->context));
intel_gt_buffer_pool_mark_active(eb->batch_pool,
@@ -3187,6 +3321,8 @@ i915_gem_do_execbuffer(struct drm_device *dev,
eb.fences = NULL;
eb.num_fences = 0;
+ eb_capture_list_clear(&eb);
+
memset(eb.requests, 0, sizeof(struct i915_request *) *
ARRAY_SIZE(eb.requests));
eb.composite_fence = NULL;
@@ -3273,10 +3409,12 @@ i915_gem_do_execbuffer(struct drm_device *dev,
}
ww_acquire_done(&eb.ww.ctx);
+ eb_capture_stage(&eb);
out_fence = eb_requests_create(&eb, in_fence, out_fence_fd);
if (IS_ERR(out_fence)) {
err = PTR_ERR(out_fence);
+ out_fence = NULL;
if (eb.requests[0])
goto err_request;
else
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_internal.c b/drivers/gpu/drm/i915/gem/i915_gem_internal.c
index a57a6b7013c2..c5150a1ee3d2 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_internal.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_internal.c
@@ -145,24 +145,10 @@ static const struct drm_i915_gem_object_ops i915_gem_object_internal_ops = {
.put_pages = i915_gem_object_put_pages_internal,
};
-/**
- * i915_gem_object_create_internal: create an object with volatile pages
- * @i915: the i915 device
- * @size: the size in bytes of backing storage to allocate for the object
- *
- * Creates a new object that wraps some internal memory for private use.
- * This object is not backed by swappable storage, and as such its contents
- * are volatile and only valid whilst pinned. If the object is reaped by the
- * shrinker, its pages and data will be discarded. Equally, it is not a full
- * GEM object and so not valid for access from userspace. This makes it useful
- * for hardware interfaces like ringbuffers (which are pinned from the time
- * the request is written to the time the hardware stops accessing it), but
- * not for contexts (which need to be preserved when not active for later
- * reuse). Note that it is not cleared upon allocation.
- */
struct drm_i915_gem_object *
-i915_gem_object_create_internal(struct drm_i915_private *i915,
- phys_addr_t size)
+__i915_gem_object_create_internal(struct drm_i915_private *i915,
+ const struct drm_i915_gem_object_ops *ops,
+ phys_addr_t size)
{
static struct lock_class_key lock_class;
struct drm_i915_gem_object *obj;
@@ -179,7 +165,7 @@ i915_gem_object_create_internal(struct drm_i915_private *i915,
return ERR_PTR(-ENOMEM);
drm_gem_private_object_init(&i915->drm, &obj->base, size);
- i915_gem_object_init(obj, &i915_gem_object_internal_ops, &lock_class, 0);
+ i915_gem_object_init(obj, ops, &lock_class, 0);
obj->mem_flags |= I915_BO_FLAG_STRUCT_PAGE;
/*
@@ -199,3 +185,25 @@ i915_gem_object_create_internal(struct drm_i915_private *i915,
return obj;
}
+
+/**
+ * i915_gem_object_create_internal: create an object with volatile pages
+ * @i915: the i915 device
+ * @size: the size in bytes of backing storage to allocate for the object
+ *
+ * Creates a new object that wraps some internal memory for private use.
+ * This object is not backed by swappable storage, and as such its contents
+ * are volatile and only valid whilst pinned. If the object is reaped by the
+ * shrinker, its pages and data will be discarded. Equally, it is not a full
+ * GEM object and so not valid for access from userspace. This makes it useful
+ * for hardware interfaces like ringbuffers (which are pinned from the time
+ * the request is written to the time the hardware stops accessing it), but
+ * not for contexts (which need to be preserved when not active for later
+ * reuse). Note that it is not cleared upon allocation.
+ */
+struct drm_i915_gem_object *
+i915_gem_object_create_internal(struct drm_i915_private *i915,
+ phys_addr_t size)
+{
+ return __i915_gem_object_create_internal(i915, &i915_gem_object_internal_ops, size);
+}
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/i915_gem_mman.c
index 39bb15eafc07..aaf970c37aa2 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_mman.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_mman.c
@@ -73,7 +73,7 @@ i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
if (args->flags & ~(I915_MMAP_WC))
return -EINVAL;
- if (args->flags & I915_MMAP_WC && !boot_cpu_has(X86_FEATURE_PAT))
+ if (args->flags & I915_MMAP_WC && !pat_enabled())
return -ENODEV;
obj = i915_gem_object_lookup(file, args->handle);
@@ -646,7 +646,7 @@ mmap_offset_attach(struct drm_i915_gem_object *obj,
goto insert;
/* Attempt to reap some mmap space from dead objects */
- err = intel_gt_retire_requests_timeout(&i915->gt, MAX_SCHEDULE_TIMEOUT,
+ err = intel_gt_retire_requests_timeout(to_gt(i915), MAX_SCHEDULE_TIMEOUT,
NULL);
if (err)
goto err;
@@ -737,7 +737,7 @@ i915_gem_dumb_mmap_offset(struct drm_file *file,
if (HAS_LMEM(to_i915(dev)))
mmap_type = I915_MMAP_TYPE_FIXED;
- else if (boot_cpu_has(X86_FEATURE_PAT))
+ else if (pat_enabled())
mmap_type = I915_MMAP_TYPE_WC;
else if (!i915_ggtt_has_aperture(&to_i915(dev)->ggtt))
return -ENODEV;
@@ -793,7 +793,7 @@ i915_gem_mmap_offset_ioctl(struct drm_device *dev, void *data,
break;
case I915_MMAP_OFFSET_WC:
- if (!boot_cpu_has(X86_FEATURE_PAT))
+ if (!pat_enabled())
return -ENODEV;
type = I915_MMAP_TYPE_WC;
break;
@@ -803,7 +803,7 @@ i915_gem_mmap_offset_ioctl(struct drm_device *dev, void *data,
break;
case I915_MMAP_OFFSET_UC:
- if (!boot_cpu_has(X86_FEATURE_PAT))
+ if (!pat_enabled())
return -ENODEV;
type = I915_MMAP_TYPE_UC;
break;
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.c b/drivers/gpu/drm/i915/gem/i915_gem_object.c
index 1e426a42a36c..d87b508b59b1 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object.c
@@ -31,6 +31,7 @@
#include "i915_gem_context.h"
#include "i915_gem_mman.h"
#include "i915_gem_object.h"
+#include "i915_gem_ttm.h"
#include "i915_memcpy.h"
#include "i915_trace.h"
@@ -91,7 +92,7 @@ void i915_gem_object_init(struct drm_i915_gem_object *obj,
}
/**
- * i915_gem_object_fini - Clean up a GEM object initialization
+ * __i915_gem_object_fini - Clean up a GEM object initialization
* @obj: The gem object to cleanup
*
* This function cleans up gem object fields that are set up by
@@ -107,25 +108,29 @@ void __i915_gem_object_fini(struct drm_i915_gem_object *obj)
}
/**
- * Mark up the object's coherency levels for a given cache_level
+ * i915_gem_object_set_cache_coherency - Mark up the object's coherency levels
+ * for a given cache_level
* @obj: #drm_i915_gem_object
* @cache_level: cache level
*/
void i915_gem_object_set_cache_coherency(struct drm_i915_gem_object *obj,
unsigned int cache_level)
{
+ struct drm_i915_private *i915 = to_i915(obj->base.dev);
+
obj->cache_level = cache_level;
if (cache_level != I915_CACHE_NONE)
obj->cache_coherent = (I915_BO_CACHE_COHERENT_FOR_READ |
I915_BO_CACHE_COHERENT_FOR_WRITE);
- else if (HAS_LLC(to_i915(obj->base.dev)))
+ else if (HAS_LLC(i915))
obj->cache_coherent = I915_BO_CACHE_COHERENT_FOR_READ;
else
obj->cache_coherent = 0;
obj->cache_dirty =
- !(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE);
+ !(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE) &&
+ !IS_DGFX(i915);
}
bool i915_gem_object_can_bypass_llc(struct drm_i915_gem_object *obj)
@@ -257,6 +262,8 @@ static void __i915_gem_object_free_mmaps(struct drm_i915_gem_object *obj)
*/
void __i915_gem_object_pages_fini(struct drm_i915_gem_object *obj)
{
+ assert_object_held(obj);
+
if (!list_empty(&obj->vma.list)) {
struct i915_vma *vma;
@@ -323,7 +330,16 @@ static void __i915_gem_free_objects(struct drm_i915_private *i915,
obj->ops->delayed_free(obj);
continue;
}
+
+ if (!i915_gem_object_trylock(obj, NULL)) {
+ /* busy, toss it back to the pile */
+ if (llist_add(&obj->freed, &i915->mm.free_list))
+ queue_delayed_work(i915->wq, &i915->mm.free_work, msecs_to_jiffies(10));
+ continue;
+ }
+
__i915_gem_object_pages_fini(obj);
+ i915_gem_object_unlock(obj);
__i915_gem_free_object(obj);
/* But keep the pointer alive for RCU-protected lookups */
@@ -343,7 +359,7 @@ void i915_gem_flush_free_objects(struct drm_i915_private *i915)
static void __i915_gem_free_work(struct work_struct *work)
{
struct drm_i915_private *i915 =
- container_of(work, struct drm_i915_private, mm.free_work);
+ container_of(work, struct drm_i915_private, mm.free_work.work);
i915_gem_flush_free_objects(i915);
}
@@ -364,15 +380,6 @@ static void i915_gem_free_object(struct drm_gem_object *gem_obj)
atomic_inc(&i915->mm.free_count);
/*
- * This serializes freeing with the shrinker. Since the free
- * is delayed, first by RCU then by the workqueue, we want the
- * shrinker to be able to free pages of unreferenced objects,
- * or else we may oom whilst there are plenty of deferred
- * freed objects.
- */
- i915_gem_object_make_unshrinkable(obj);
-
- /*
* Since we require blocking on struct_mutex to unbind the freed
* object from the GPU before releasing resources back to the
* system, we can not do that directly from the RCU callback (which may
@@ -384,7 +391,7 @@ static void i915_gem_free_object(struct drm_gem_object *gem_obj)
*/
if (llist_add(&obj->freed, &i915->mm.free_list))
- queue_work(i915->wq, &i915->mm.free_work);
+ queue_delayed_work(i915->wq, &i915->mm.free_work, 0);
}
void __i915_gem_object_flush_frontbuffer(struct drm_i915_gem_object *obj,
@@ -456,7 +463,7 @@ i915_gem_object_read_from_page_iomap(struct drm_i915_gem_object *obj, u64 offset
* from can't cross a page boundary. The caller must ensure that @obj pages
* are pinned and that @obj is synced wrt. any related writes.
*
- * Returns 0 on success or -ENODEV if the type of @obj's backing store is
+ * Return: %0 on success or -ENODEV if the type of @obj's backing store is
* unsupported.
*/
int i915_gem_object_read_from_page(struct drm_i915_gem_object *obj, u64 offset, void *dst, int size)
@@ -709,7 +716,7 @@ bool i915_gem_object_placement_possible(struct drm_i915_gem_object *obj,
void i915_gem_init__objects(struct drm_i915_private *i915)
{
- INIT_WORK(&i915->mm.free_work, __i915_gem_free_work);
+ INIT_DELAYED_WORK(&i915->mm.free_work, __i915_gem_free_work);
}
void i915_objects_module_exit(void)
@@ -732,6 +739,57 @@ static const struct drm_gem_object_funcs i915_gem_object_funcs = {
.export = i915_gem_prime_export,
};
+/**
+ * i915_gem_object_get_moving_fence - Get the object's moving fence if any
+ * @obj: The object whose moving fence to get.
+ *
+ * A non-signaled moving fence means that there is an async operation
+ * pending on the object that needs to be waited on before setting up
+ * any GPU- or CPU PTEs to the object's pages.
+ *
+ * Return: A refcounted pointer to the object's moving fence if any,
+ * NULL otherwise.
+ */
+struct dma_fence *
+i915_gem_object_get_moving_fence(struct drm_i915_gem_object *obj)
+{
+ return dma_fence_get(i915_gem_to_ttm(obj)->moving);
+}
+
+/**
+ * i915_gem_object_wait_moving_fence - Wait for the object's moving fence if any
+ * @obj: The object whose moving fence to wait for.
+ * @intr: Whether to wait interruptible.
+ *
+ * If the moving fence signaled without an error, it is detached from the
+ * object and put.
+ *
+ * Return: 0 if successful, -ERESTARTSYS if the wait was interrupted,
+ * negative error code if the async operation represented by the
+ * moving fence failed.
+ */
+int i915_gem_object_wait_moving_fence(struct drm_i915_gem_object *obj,
+ bool intr)
+{
+ struct dma_fence *fence = i915_gem_to_ttm(obj)->moving;
+ int ret;
+
+ assert_object_held(obj);
+ if (!fence)
+ return 0;
+
+ ret = dma_fence_wait(fence, intr);
+ if (ret)
+ return ret;
+
+ if (fence->error)
+ return fence->error;
+
+ i915_gem_to_ttm(obj)->moving = NULL;
+ dma_fence_put(fence);
+ return 0;
+}
+
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
#include "selftests/huge_gem_object.c"
#include "selftests/huge_pages.c"
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.h b/drivers/gpu/drm/i915/gem/i915_gem_object.h
index 59201801cec5..f66d46882ea7 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object.h
@@ -93,7 +93,6 @@ void i915_gem_flush_free_objects(struct drm_i915_private *i915);
struct sg_table *
__i915_gem_object_unset_pages(struct drm_i915_gem_object *obj);
-void i915_gem_object_truncate(struct drm_i915_gem_object *obj);
/**
* i915_gem_object_lookup_rcu - look up a temporary GEM object from its handle
@@ -211,9 +210,13 @@ static inline int i915_gem_object_lock_interruptible(struct drm_i915_gem_object
return __i915_gem_object_lock(obj, ww, true);
}
-static inline bool i915_gem_object_trylock(struct drm_i915_gem_object *obj)
+static inline bool i915_gem_object_trylock(struct drm_i915_gem_object *obj,
+ struct i915_gem_ww_ctx *ww)
{
- return dma_resv_trylock(obj->base.resv);
+ if (!ww)
+ return dma_resv_trylock(obj->base.resv);
+ else
+ return ww_mutex_trylock(&obj->base.resv->lock, &ww->ctx);
}
static inline void i915_gem_object_unlock(struct drm_i915_gem_object *obj)
@@ -296,6 +299,12 @@ i915_gem_object_is_shrinkable(const struct drm_i915_gem_object *obj)
}
static inline bool
+i915_gem_object_has_self_managed_shrink_list(const struct drm_i915_gem_object *obj)
+{
+ return i915_gem_object_type_has(obj, I915_GEM_OBJECT_SELF_MANAGED_SHRINK_LIST);
+}
+
+static inline bool
i915_gem_object_is_proxy(const struct drm_i915_gem_object *obj)
{
return i915_gem_object_type_has(obj, I915_GEM_OBJECT_IS_PROXY);
@@ -449,7 +458,7 @@ i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj)
}
int __i915_gem_object_put_pages(struct drm_i915_gem_object *obj);
-void i915_gem_object_truncate(struct drm_i915_gem_object *obj);
+int i915_gem_object_truncate(struct drm_i915_gem_object *obj);
void i915_gem_object_writeback(struct drm_i915_gem_object *obj);
/**
@@ -512,11 +521,18 @@ i915_gem_object_finish_access(struct drm_i915_gem_object *obj)
i915_gem_object_unpin_pages(obj);
}
+struct dma_fence *
+i915_gem_object_get_moving_fence(struct drm_i915_gem_object *obj);
+
+int i915_gem_object_wait_moving_fence(struct drm_i915_gem_object *obj,
+ bool intr);
+
void i915_gem_object_set_cache_coherency(struct drm_i915_gem_object *obj,
unsigned int cache_level);
bool i915_gem_object_can_bypass_llc(struct drm_i915_gem_object *obj);
void i915_gem_object_flush_if_display(struct drm_i915_gem_object *obj);
void i915_gem_object_flush_if_display_locked(struct drm_i915_gem_object *obj);
+bool i915_gem_cpu_write_needs_clflush(struct drm_i915_gem_object *obj);
int __must_check
i915_gem_object_set_to_wc_domain(struct drm_i915_gem_object *obj, bool write);
@@ -533,25 +549,15 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
void i915_gem_object_make_unshrinkable(struct drm_i915_gem_object *obj);
void i915_gem_object_make_shrinkable(struct drm_i915_gem_object *obj);
+void __i915_gem_object_make_shrinkable(struct drm_i915_gem_object *obj);
+void __i915_gem_object_make_purgeable(struct drm_i915_gem_object *obj);
void i915_gem_object_make_purgeable(struct drm_i915_gem_object *obj);
-static inline bool cpu_write_needs_clflush(struct drm_i915_gem_object *obj)
-{
- if (obj->cache_dirty)
- return false;
-
- if (!(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE))
- return true;
-
- /* Currently in use by HW (display engine)? Keep flushed. */
- return i915_gem_object_is_framebuffer(obj);
-}
-
static inline void __start_cpu_write(struct drm_i915_gem_object *obj)
{
obj->read_domains = I915_GEM_DOMAIN_CPU;
obj->write_domain = I915_GEM_DOMAIN_CPU;
- if (cpu_write_needs_clflush(obj))
+ if (i915_gem_cpu_write_needs_clflush(obj))
obj->cache_dirty = true;
}
@@ -613,6 +619,14 @@ int i915_gem_object_wait_migration(struct drm_i915_gem_object *obj,
bool i915_gem_object_placement_possible(struct drm_i915_gem_object *obj,
enum intel_memory_type type);
+int shmem_sg_alloc_table(struct drm_i915_private *i915, struct sg_table *st,
+ size_t size, struct intel_memory_region *mr,
+ struct address_space *mapping,
+ unsigned int max_segment);
+void shmem_sg_free_table(struct sg_table *st, struct address_space *mapping,
+ bool dirty, bool backup);
+void __shmem_writeback(size_t size, struct address_space *mapping);
+
#ifdef CONFIG_MMU_NOTIFIER
static inline bool
i915_gem_object_is_userptr(struct drm_i915_gem_object *obj)
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
index da85169006d4..f9f7e44099fe 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
@@ -34,9 +34,11 @@ struct i915_lut_handle {
struct drm_i915_gem_object_ops {
unsigned int flags;
-#define I915_GEM_OBJECT_IS_SHRINKABLE BIT(1)
-#define I915_GEM_OBJECT_IS_PROXY BIT(2)
-#define I915_GEM_OBJECT_NO_MMAP BIT(3)
+#define I915_GEM_OBJECT_IS_SHRINKABLE BIT(1)
+/* Skip the shrinker management in set_pages/unset_pages */
+#define I915_GEM_OBJECT_SELF_MANAGED_SHRINK_LIST BIT(2)
+#define I915_GEM_OBJECT_IS_PROXY BIT(3)
+#define I915_GEM_OBJECT_NO_MMAP BIT(4)
/* Interface between the GEM object and its backing storage.
* get_pages() is called once prior to the use of the associated set
@@ -54,8 +56,11 @@ struct drm_i915_gem_object_ops {
int (*get_pages)(struct drm_i915_gem_object *obj);
void (*put_pages)(struct drm_i915_gem_object *obj,
struct sg_table *pages);
- void (*truncate)(struct drm_i915_gem_object *obj);
+ int (*truncate)(struct drm_i915_gem_object *obj);
void (*writeback)(struct drm_i915_gem_object *obj);
+ int (*shrinker_release_pages)(struct drm_i915_gem_object *obj,
+ bool no_gpu_wait,
+ bool should_writeback);
int (*pread)(struct drm_i915_gem_object *obj,
const struct drm_i915_gem_pread *arg);
@@ -486,9 +491,37 @@ struct drm_i915_gem_object {
* instead go through the pin/unpin interfaces.
*/
atomic_t pages_pin_count;
+
+ /**
+ * @shrink_pin: Prevents the pages from being made visible to
+ * the shrinker, while the shrink_pin is non-zero. Most users
+ * should pretty much never have to care about this, outside of
+ * some special use cases.
+ *
+ * By default most objects will start out as visible to the
+ * shrinker(if I915_GEM_OBJECT_IS_SHRINKABLE) as soon as the
+ * backing pages are attached to the object, like in
+ * __i915_gem_object_set_pages(). They will then be removed the
+ * shrinker list once the pages are released.
+ *
+ * The @shrink_pin is incremented by calling
+ * i915_gem_object_make_unshrinkable(), which will also remove
+ * the object from the shrinker list, if the pin count was zero.
+ *
+ * Callers will then typically call
+ * i915_gem_object_make_shrinkable() or
+ * i915_gem_object_make_purgeable() to decrement the pin count,
+ * and make the pages visible again.
+ */
atomic_t shrink_pin;
/**
+ * @ttm_shrinkable: True when the object is using shmem pages
+ * underneath. Protected by the object lock.
+ */
+ bool ttm_shrinkable;
+
+ /**
* Priority list of potential placements for this object.
*/
struct intel_memory_region **placements;
@@ -512,6 +545,7 @@ struct drm_i915_gem_object {
*/
struct list_head region_link;
+ struct i915_refct_sgt *rsgt;
struct sg_table *pages;
void *mapping;
@@ -547,7 +581,7 @@ struct drm_i915_gem_object {
struct i915_gem_object_page_iter get_dma_page;
/**
- * Element within i915->mm.unbound_list or i915->mm.bound_list,
+ * Element within i915->mm.shrink_list or i915->mm.purge_list,
* locked by i915->mm.obj_lock.
*/
struct list_head link;
@@ -565,7 +599,7 @@ struct drm_i915_gem_object {
} mm;
struct {
- struct sg_table *cached_io_st;
+ struct i915_refct_sgt *cached_io_rsgt;
struct i915_gem_object_page_iter get_io_page;
struct drm_i915_gem_object *backup;
bool created:1;
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_pages.c b/drivers/gpu/drm/i915/gem/i915_gem_pages.c
index 8eb1c3a6fc9c..89b70f5cde7a 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_pages.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_pages.c
@@ -26,6 +26,7 @@ void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj,
/* Make the pages coherent with the GPU (flushing any swapin). */
if (obj->cache_dirty) {
+ WARN_ON_ONCE(IS_DGFX(i915));
obj->write_domain = 0;
if (i915_gem_object_has_struct_page(obj))
drm_clflush_sg(pages);
@@ -68,7 +69,7 @@ void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj,
shrinkable = false;
}
- if (shrinkable) {
+ if (shrinkable && !i915_gem_object_has_self_managed_shrink_list(obj)) {
struct list_head *list;
unsigned long flags;
@@ -158,11 +159,13 @@ retry:
}
/* Immediately discard the backing storage */
-void i915_gem_object_truncate(struct drm_i915_gem_object *obj)
+int i915_gem_object_truncate(struct drm_i915_gem_object *obj)
{
drm_gem_free_mmap_offset(&obj->base);
if (obj->ops->truncate)
- obj->ops->truncate(obj);
+ return obj->ops->truncate(obj);
+
+ return 0;
}
/* Try to discard unwanted pages */
@@ -208,7 +211,8 @@ __i915_gem_object_unset_pages(struct drm_i915_gem_object *obj)
if (i915_gem_object_is_volatile(obj))
obj->mm.madv = I915_MADV_WILLNEED;
- i915_gem_object_make_unshrinkable(obj);
+ if (!i915_gem_object_has_self_managed_shrink_list(obj))
+ i915_gem_object_make_unshrinkable(obj);
if (obj->mm.mapping) {
unmap_object(obj, page_mask_bits(obj->mm.mapping));
@@ -414,8 +418,13 @@ void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj,
}
if (!ptr) {
- if (GEM_WARN_ON(type == I915_MAP_WC &&
- !static_cpu_has(X86_FEATURE_PAT)))
+ err = i915_gem_object_wait_moving_fence(obj, true);
+ if (err) {
+ ptr = ERR_PTR(err);
+ goto err_unpin;
+ }
+
+ if (GEM_WARN_ON(type == I915_MAP_WC && !pat_enabled()))
ptr = ERR_PTR(-ENODEV);
else if (i915_gem_object_has_struct_page(obj))
ptr = i915_gem_object_map_page(obj, type);
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_phys.c b/drivers/gpu/drm/i915/gem/i915_gem_phys.c
index 7986612f48fa..ca6faffcc496 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_phys.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_phys.c
@@ -19,6 +19,7 @@
static int i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj)
{
struct address_space *mapping = obj->base.filp->f_mapping;
+ struct drm_i915_private *i915 = to_i915(obj->base.dev);
struct scatterlist *sg;
struct sg_table *st;
dma_addr_t dma;
@@ -73,7 +74,7 @@ static int i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj)
dst += PAGE_SIZE;
}
- intel_gt_chipset_flush(&to_i915(obj->base.dev)->gt);
+ intel_gt_chipset_flush(to_gt(i915));
/* We're no longer struct page backed */
obj->mem_flags &= ~I915_BO_FLAG_STRUCT_PAGE;
@@ -140,6 +141,7 @@ int i915_gem_object_pwrite_phys(struct drm_i915_gem_object *obj,
{
void *vaddr = sg_page(obj->mm.pages->sgl) + args->offset;
char __user *user_data = u64_to_user_ptr(args->data_ptr);
+ struct drm_i915_private *i915 = to_i915(obj->base.dev);
int err;
err = i915_gem_object_wait(obj,
@@ -159,7 +161,7 @@ int i915_gem_object_pwrite_phys(struct drm_i915_gem_object *obj,
return -EFAULT;
drm_clflush_virt_range(vaddr, args->size);
- intel_gt_chipset_flush(&to_i915(obj->base.dev)->gt);
+ intel_gt_chipset_flush(to_gt(i915));
i915_gem_object_flush_frontbuffer(obj, ORIGIN_CPU);
return 0;
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_pm.c b/drivers/gpu/drm/i915/gem/i915_gem_pm.c
index 726b40e1fbb0..ac56124760e1 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_pm.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_pm.c
@@ -35,7 +35,7 @@ void i915_gem_suspend(struct drm_i915_private *i915)
* state. Fortunately, the kernel_context is disposable and we do
* not rely on its state.
*/
- intel_gt_suspend_prepare(&i915->gt);
+ intel_gt_suspend_prepare(to_gt(i915));
i915_gem_drain_freed_objects(i915);
}
@@ -153,7 +153,7 @@ void i915_gem_suspend_late(struct drm_i915_private *i915)
* machine in an unusable condition.
*/
- intel_gt_suspend_late(&i915->gt);
+ intel_gt_suspend_late(to_gt(i915));
spin_lock_irqsave(&i915->mm.obj_lock, flags);
for (phase = phases; *phase; phase++) {
@@ -223,7 +223,7 @@ void i915_gem_resume(struct drm_i915_private *i915)
* guarantee that the context image is complete. So let's just reset
* it and start again.
*/
- intel_gt_resume(&i915->gt);
+ intel_gt_resume(to_gt(i915));
ret = lmem_restore(i915, I915_TTM_BACKUP_ALLOW_GPU);
GEM_WARN_ON(ret);
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_region.c b/drivers/gpu/drm/i915/gem/i915_gem_region.c
index a016ccec36f3..a4350227e9ae 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_region.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_region.c
@@ -11,7 +11,7 @@
void i915_gem_object_init_memory_region(struct drm_i915_gem_object *obj,
struct intel_memory_region *mem)
{
- obj->mm.region = intel_memory_region_get(mem);
+ obj->mm.region = mem;
mutex_lock(&mem->objects.lock);
list_add(&obj->mm.region_link, &mem->objects.list);
@@ -25,8 +25,6 @@ void i915_gem_object_release_memory_region(struct drm_i915_gem_object *obj)
mutex_lock(&mem->objects.lock);
list_del(&obj->mm.region_link);
mutex_unlock(&mem->objects.lock);
-
- intel_memory_region_put(mem);
}
struct drm_i915_gem_object *
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_shmem.c b/drivers/gpu/drm/i915/gem/i915_gem_shmem.c
index d77da59fae04..cc9fe258fba7 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_shmem.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_shmem.c
@@ -25,62 +25,67 @@ static void check_release_pagevec(struct pagevec *pvec)
cond_resched();
}
-static int shmem_get_pages(struct drm_i915_gem_object *obj)
+void shmem_sg_free_table(struct sg_table *st, struct address_space *mapping,
+ bool dirty, bool backup)
{
- struct drm_i915_private *i915 = to_i915(obj->base.dev);
- struct intel_memory_region *mem = obj->mm.region;
- const unsigned long page_count = obj->base.size / PAGE_SIZE;
+ struct sgt_iter sgt_iter;
+ struct pagevec pvec;
+ struct page *page;
+
+ mapping_clear_unevictable(mapping);
+
+ pagevec_init(&pvec);
+ for_each_sgt_page(page, sgt_iter, st) {
+ if (dirty)
+ set_page_dirty(page);
+
+ if (backup)
+ mark_page_accessed(page);
+
+ if (!pagevec_add(&pvec, page))
+ check_release_pagevec(&pvec);
+ }
+ if (pagevec_count(&pvec))
+ check_release_pagevec(&pvec);
+
+ sg_free_table(st);
+}
+
+int shmem_sg_alloc_table(struct drm_i915_private *i915, struct sg_table *st,
+ size_t size, struct intel_memory_region *mr,
+ struct address_space *mapping,
+ unsigned int max_segment)
+{
+ const unsigned long page_count = size / PAGE_SIZE;
unsigned long i;
- struct address_space *mapping;
- struct sg_table *st;
struct scatterlist *sg;
- struct sgt_iter sgt_iter;
struct page *page;
unsigned long last_pfn = 0; /* suppress gcc warning */
- unsigned int max_segment = i915_sg_segment_size();
- unsigned int sg_page_sizes;
gfp_t noreclaim;
int ret;
/*
- * Assert that the object is not currently in any GPU domain. As it
- * wasn't in the GTT, there shouldn't be any way it could have been in
- * a GPU cache
- */
- GEM_BUG_ON(obj->read_domains & I915_GEM_GPU_DOMAINS);
- GEM_BUG_ON(obj->write_domain & I915_GEM_GPU_DOMAINS);
-
- /*
* If there's no chance of allocating enough pages for the whole
* object, bail early.
*/
- if (obj->base.size > resource_size(&mem->region))
+ if (size > resource_size(&mr->region))
return -ENOMEM;
- st = kmalloc(sizeof(*st), GFP_KERNEL);
- if (!st)
+ if (sg_alloc_table(st, page_count, GFP_KERNEL))
return -ENOMEM;
-rebuild_st:
- if (sg_alloc_table(st, page_count, GFP_KERNEL)) {
- kfree(st);
- return -ENOMEM;
- }
-
/*
* Get the list of pages out of our struct file. They'll be pinned
* at this point until we release them.
*
* Fail silently without starting the shrinker
*/
- mapping = obj->base.filp->f_mapping;
mapping_set_unevictable(mapping);
noreclaim = mapping_gfp_constraint(mapping, ~__GFP_RECLAIM);
noreclaim |= __GFP_NORETRY | __GFP_NOWARN;
sg = st->sgl;
st->nents = 0;
- sg_page_sizes = 0;
for (i = 0; i < page_count; i++) {
const unsigned int shrink[] = {
I915_SHRINK_BOUND | I915_SHRINK_UNBOUND,
@@ -135,10 +140,9 @@ rebuild_st:
if (!i ||
sg->length >= max_segment ||
page_to_pfn(page) != last_pfn + 1) {
- if (i) {
- sg_page_sizes |= sg->length;
+ if (i)
sg = sg_next(sg);
- }
+
st->nents++;
sg_set_page(sg, page, PAGE_SIZE, 0);
} else {
@@ -149,14 +153,67 @@ rebuild_st:
/* Check that the i965g/gm workaround works. */
GEM_BUG_ON(gfp & __GFP_DMA32 && last_pfn >= 0x00100000UL);
}
- if (sg) { /* loop terminated early; short sg table */
- sg_page_sizes |= sg->length;
+ if (sg) /* loop terminated early; short sg table */
sg_mark_end(sg);
- }
/* Trim unused sg entries to avoid wasting memory. */
i915_sg_trim(st);
+ return 0;
+err_sg:
+ sg_mark_end(sg);
+ if (sg != st->sgl) {
+ shmem_sg_free_table(st, mapping, false, false);
+ } else {
+ mapping_clear_unevictable(mapping);
+ sg_free_table(st);
+ }
+
+ /*
+ * shmemfs first checks if there is enough memory to allocate the page
+ * and reports ENOSPC should there be insufficient, along with the usual
+ * ENOMEM for a genuine allocation failure.
+ *
+ * We use ENOSPC in our driver to mean that we have run out of aperture
+ * space and so want to translate the error from shmemfs back to our
+ * usual understanding of ENOMEM.
+ */
+ if (ret == -ENOSPC)
+ ret = -ENOMEM;
+
+ return ret;
+}
+
+static int shmem_get_pages(struct drm_i915_gem_object *obj)
+{
+ struct drm_i915_private *i915 = to_i915(obj->base.dev);
+ struct intel_memory_region *mem = obj->mm.region;
+ struct address_space *mapping = obj->base.filp->f_mapping;
+ const unsigned long page_count = obj->base.size / PAGE_SIZE;
+ unsigned int max_segment = i915_sg_segment_size();
+ struct sg_table *st;
+ struct sgt_iter sgt_iter;
+ struct page *page;
+ int ret;
+
+ /*
+ * Assert that the object is not currently in any GPU domain. As it
+ * wasn't in the GTT, there shouldn't be any way it could have been in
+ * a GPU cache
+ */
+ GEM_BUG_ON(obj->read_domains & I915_GEM_GPU_DOMAINS);
+ GEM_BUG_ON(obj->write_domain & I915_GEM_GPU_DOMAINS);
+
+rebuild_st:
+ st = kmalloc(sizeof(*st), GFP_KERNEL);
+ if (!st)
+ return -ENOMEM;
+
+ ret = shmem_sg_alloc_table(i915, st, obj->base.size, mem, mapping,
+ max_segment);
+ if (ret)
+ goto err_st;
+
ret = i915_gem_gtt_prepare_pages(obj, st);
if (ret) {
/*
@@ -168,6 +225,7 @@ rebuild_st:
for_each_sgt_page(page, sgt_iter, st)
put_page(page);
sg_free_table(st);
+ kfree(st);
max_segment = PAGE_SIZE;
goto rebuild_st;
@@ -185,28 +243,12 @@ rebuild_st:
if (i915_gem_object_can_bypass_llc(obj))
obj->cache_dirty = true;
- __i915_gem_object_set_pages(obj, st, sg_page_sizes);
+ __i915_gem_object_set_pages(obj, st, i915_sg_dma_sizes(st->sgl));
return 0;
-err_sg:
- sg_mark_end(sg);
err_pages:
- mapping_clear_unevictable(mapping);
- if (sg != st->sgl) {
- struct pagevec pvec;
-
- pagevec_init(&pvec);
- for_each_sgt_page(page, sgt_iter, st) {
- if (!pagevec_add(&pvec, page))
- check_release_pagevec(&pvec);
- }
- if (pagevec_count(&pvec))
- check_release_pagevec(&pvec);
- }
- sg_free_table(st);
- kfree(st);
-
+ shmem_sg_free_table(st, mapping, false, false);
/*
* shmemfs first checks if there is enough memory to allocate the page
* and reports ENOSPC should there be insufficient, along with the usual
@@ -216,13 +258,16 @@ err_pages:
* space and so want to translate the error from shmemfs back to our
* usual understanding of ENOMEM.
*/
+err_st:
if (ret == -ENOSPC)
ret = -ENOMEM;
+ kfree(st);
+
return ret;
}
-static void
+static int
shmem_truncate(struct drm_i915_gem_object *obj)
{
/*
@@ -234,12 +279,12 @@ shmem_truncate(struct drm_i915_gem_object *obj)
shmem_truncate_range(file_inode(obj->base.filp), 0, (loff_t)-1);
obj->mm.madv = __I915_MADV_PURGED;
obj->mm.pages = ERR_PTR(-EFAULT);
+
+ return 0;
}
-static void
-shmem_writeback(struct drm_i915_gem_object *obj)
+void __shmem_writeback(size_t size, struct address_space *mapping)
{
- struct address_space *mapping;
struct writeback_control wbc = {
.sync_mode = WB_SYNC_NONE,
.nr_to_write = SWAP_CLUSTER_MAX,
@@ -255,10 +300,9 @@ shmem_writeback(struct drm_i915_gem_object *obj)
* instead of invoking writeback so they are aged and paged out
* as normal.
*/
- mapping = obj->base.filp->f_mapping;
/* Begin writeback on each dirty page */
- for (i = 0; i < obj->base.size >> PAGE_SHIFT; i++) {
+ for (i = 0; i < size >> PAGE_SHIFT; i++) {
struct page *page;
page = find_lock_page(mapping, i);
@@ -281,6 +325,12 @@ put:
}
}
+static void
+shmem_writeback(struct drm_i915_gem_object *obj)
+{
+ __shmem_writeback(obj->base.size, obj->base.filp->f_mapping);
+}
+
void
__i915_gem_object_release_shmem(struct drm_i915_gem_object *obj,
struct sg_table *pages,
@@ -313,11 +363,6 @@ __i915_gem_object_release_shmem(struct drm_i915_gem_object *obj,
void i915_gem_object_put_pages_shmem(struct drm_i915_gem_object *obj, struct sg_table *pages)
{
- struct sgt_iter sgt_iter;
- struct pagevec pvec;
- struct page *page;
-
- GEM_WARN_ON(IS_DGFX(to_i915(obj->base.dev)));
__i915_gem_object_release_shmem(obj, pages, true);
i915_gem_gtt_finish_pages(obj, pages);
@@ -325,25 +370,10 @@ void i915_gem_object_put_pages_shmem(struct drm_i915_gem_object *obj, struct sg_
if (i915_gem_object_needs_bit17_swizzle(obj))
i915_gem_object_save_bit_17_swizzle(obj, pages);
- mapping_clear_unevictable(file_inode(obj->base.filp)->i_mapping);
-
- pagevec_init(&pvec);
- for_each_sgt_page(page, sgt_iter, pages) {
- if (obj->mm.dirty)
- set_page_dirty(page);
-
- if (obj->mm.madv == I915_MADV_WILLNEED)
- mark_page_accessed(page);
-
- if (!pagevec_add(&pvec, page))
- check_release_pagevec(&pvec);
- }
- if (pagevec_count(&pvec))
- check_release_pagevec(&pvec);
- obj->mm.dirty = false;
-
- sg_free_table(pages);
+ shmem_sg_free_table(pages, file_inode(obj->base.filp)->i_mapping,
+ obj->mm.dirty, obj->mm.madv == I915_MADV_WILLNEED);
kfree(pages);
+ obj->mm.dirty = false;
}
static void
@@ -634,9 +664,10 @@ static int init_shmem(struct intel_memory_region *mem)
return 0; /* Don't error, we can simply fallback to the kernel mnt */
}
-static void release_shmem(struct intel_memory_region *mem)
+static int release_shmem(struct intel_memory_region *mem)
{
i915_gemfs_fini(mem->i915);
+ return 0;
}
static const struct intel_memory_region_ops shmem_region_ops = {
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c b/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c
index 5ab136ffdeb2..cc927e49d21f 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c
@@ -15,7 +15,6 @@
#include "gt/intel_gt_requests.h"
-#include "dma_resv_utils.h"
#include "i915_trace.h"
static bool swap_available(void)
@@ -37,8 +36,8 @@ static bool can_release_pages(struct drm_i915_gem_object *obj)
return swap_available() || obj->mm.madv == I915_MADV_DONTNEED;
}
-static bool unsafe_drop_pages(struct drm_i915_gem_object *obj,
- unsigned long shrink, bool trylock_vm)
+static int drop_pages(struct drm_i915_gem_object *obj,
+ unsigned long shrink, bool trylock_vm)
{
unsigned long flags;
@@ -56,19 +55,25 @@ static bool unsafe_drop_pages(struct drm_i915_gem_object *obj,
return false;
}
-static void try_to_writeback(struct drm_i915_gem_object *obj,
- unsigned int flags)
+static int try_to_writeback(struct drm_i915_gem_object *obj, unsigned int flags)
{
+ if (obj->ops->shrinker_release_pages)
+ return obj->ops->shrinker_release_pages(obj,
+ !(flags & I915_SHRINK_ACTIVE),
+ flags & I915_SHRINK_WRITEBACK);
+
switch (obj->mm.madv) {
case I915_MADV_DONTNEED:
i915_gem_object_truncate(obj);
- return;
+ return 0;
case __I915_MADV_PURGED:
- return;
+ return 0;
}
if (flags & I915_SHRINK_WRITEBACK)
i915_gem_object_writeback(obj);
+
+ return 0;
}
/**
@@ -148,7 +153,7 @@ i915_gem_shrink(struct i915_gem_ww_ctx *ww,
*/
if (shrink & I915_SHRINK_ACTIVE)
/* Retire requests to unpin all idle contexts */
- intel_gt_retire_requests(&i915->gt);
+ intel_gt_retire_requests(to_gt(i915));
/*
* As we may completely rewrite the (un)bound list whilst unbinding
@@ -209,27 +214,23 @@ i915_gem_shrink(struct i915_gem_ww_ctx *ww,
spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
- err = 0;
- if (unsafe_drop_pages(obj, shrink, trylock_vm)) {
- /* May arrive from get_pages on another bo */
- if (!ww) {
- if (!i915_gem_object_trylock(obj))
- goto skip;
- } else {
- err = i915_gem_object_lock(obj, ww);
- if (err)
- goto skip;
- }
-
- if (!__i915_gem_object_put_pages(obj)) {
- try_to_writeback(obj, shrink);
- count += obj->base.size >> PAGE_SHIFT;
- }
- if (!ww)
- i915_gem_object_unlock(obj);
+ /* May arrive from get_pages on another bo */
+ if (!ww) {
+ if (!i915_gem_object_trylock(obj, NULL))
+ goto skip;
+ } else {
+ err = i915_gem_object_lock(obj, ww);
+ if (err)
+ goto skip;
}
- dma_resv_prune(obj->base.resv);
+ if (drop_pages(obj, shrink, trylock_vm) &&
+ !__i915_gem_object_put_pages(obj) &&
+ !try_to_writeback(obj, shrink))
+ count += obj->base.size >> PAGE_SHIFT;
+
+ if (!ww)
+ i915_gem_object_unlock(obj);
scanned += obj->base.size >> PAGE_SHIFT;
skip:
@@ -404,12 +405,18 @@ i915_gem_shrinker_vmap(struct notifier_block *nb, unsigned long event, void *ptr
list_for_each_entry_safe(vma, next,
&i915->ggtt.vm.bound_list, vm_link) {
unsigned long count = vma->node.size >> PAGE_SHIFT;
+ struct drm_i915_gem_object *obj = vma->obj;
if (!vma->iomap || i915_vma_is_active(vma))
continue;
+ if (!i915_gem_object_trylock(obj, NULL))
+ continue;
+
if (__i915_vma_unbind(vma) == 0)
freed_pages += count;
+
+ i915_gem_object_unlock(obj);
}
mutex_unlock(&i915->ggtt.vm.mutex);
@@ -458,6 +465,16 @@ void i915_gem_shrinker_taints_mutex(struct drm_i915_private *i915,
#define obj_to_i915(obj__) to_i915((obj__)->base.dev)
+/**
+ * i915_gem_object_make_unshrinkable - Hide the object from the shrinker. By
+ * default all object types that support shrinking(see IS_SHRINKABLE), will also
+ * make the object visible to the shrinker after allocating the system memory
+ * pages.
+ * @obj: The GEM object.
+ *
+ * This is typically used for special kernel internal objects that can't be
+ * easily processed by the shrinker, like if they are perma-pinned.
+ */
void i915_gem_object_make_unshrinkable(struct drm_i915_gem_object *obj)
{
struct drm_i915_private *i915 = obj_to_i915(obj);
@@ -482,13 +499,12 @@ void i915_gem_object_make_unshrinkable(struct drm_i915_gem_object *obj)
spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
}
-static void __i915_gem_object_make_shrinkable(struct drm_i915_gem_object *obj,
- struct list_head *head)
+static void ___i915_gem_object_make_shrinkable(struct drm_i915_gem_object *obj,
+ struct list_head *head)
{
struct drm_i915_private *i915 = obj_to_i915(obj);
unsigned long flags;
- GEM_BUG_ON(!i915_gem_object_has_pages(obj));
if (!i915_gem_object_is_shrinkable(obj))
return;
@@ -508,14 +524,67 @@ static void __i915_gem_object_make_shrinkable(struct drm_i915_gem_object *obj,
spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
}
+/**
+ * __i915_gem_object_make_shrinkable - Move the object to the tail of the
+ * shrinkable list. Objects on this list might be swapped out. Used with
+ * WILLNEED objects.
+ * @obj: The GEM object.
+ *
+ * DO NOT USE. This is intended to be called on very special objects that don't
+ * yet have mm.pages, but are guaranteed to have potentially reclaimable pages
+ * underneath.
+ */
+void __i915_gem_object_make_shrinkable(struct drm_i915_gem_object *obj)
+{
+ ___i915_gem_object_make_shrinkable(obj,
+ &obj_to_i915(obj)->mm.shrink_list);
+}
+
+/**
+ * __i915_gem_object_make_purgeable - Move the object to the tail of the
+ * purgeable list. Objects on this list might be swapped out. Used with
+ * DONTNEED objects.
+ * @obj: The GEM object.
+ *
+ * DO NOT USE. This is intended to be called on very special objects that don't
+ * yet have mm.pages, but are guaranteed to have potentially reclaimable pages
+ * underneath.
+ */
+void __i915_gem_object_make_purgeable(struct drm_i915_gem_object *obj)
+{
+ ___i915_gem_object_make_shrinkable(obj,
+ &obj_to_i915(obj)->mm.purge_list);
+}
+
+/**
+ * i915_gem_object_make_shrinkable - Move the object to the tail of the
+ * shrinkable list. Objects on this list might be swapped out. Used with
+ * WILLNEED objects.
+ * @obj: The GEM object.
+ *
+ * MUST only be called on objects which have backing pages.
+ *
+ * MUST be balanced with previous call to i915_gem_object_make_unshrinkable().
+ */
void i915_gem_object_make_shrinkable(struct drm_i915_gem_object *obj)
{
- __i915_gem_object_make_shrinkable(obj,
- &obj_to_i915(obj)->mm.shrink_list);
+ GEM_BUG_ON(!i915_gem_object_has_pages(obj));
+ __i915_gem_object_make_shrinkable(obj);
}
+/**
+ * i915_gem_object_make_purgeable - Move the object to the tail of the purgeable
+ * list. Used with DONTNEED objects. Unlike with shrinkable objects, the
+ * shrinker will attempt to discard the backing pages, instead of trying to swap
+ * them out.
+ * @obj: The GEM object.
+ *
+ * MUST only be called on objects which have backing pages.
+ *
+ * MUST be balanced with previous call to i915_gem_object_make_unshrinkable().
+ */
void i915_gem_object_make_purgeable(struct drm_i915_gem_object *obj)
{
- __i915_gem_object_make_shrinkable(obj,
- &obj_to_i915(obj)->mm.purge_list);
+ GEM_BUG_ON(!i915_gem_object_has_pages(obj));
+ __i915_gem_object_make_purgeable(obj);
}
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_stolen.c b/drivers/gpu/drm/i915/gem/i915_gem_stolen.c
index ddd37ccb1362..7df50fd6cc7b 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_stolen.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_stolen.c
@@ -399,7 +399,7 @@ static int i915_gem_init_stolen(struct intel_memory_region *mem)
return 0;
}
- if (intel_vtd_active() && GRAPHICS_VER(i915) < 8) {
+ if (intel_vtd_active(i915) && GRAPHICS_VER(i915) < 8) {
drm_notice(&i915->drm,
"%s, disabling use of stolen memory\n",
"DMAR active");
@@ -488,6 +488,9 @@ static int i915_gem_init_stolen(struct intel_memory_region *mem)
return 0;
}
+ /* Exclude the reserved region from driver use */
+ mem->region.end = reserved_base - 1;
+
/* It is possible for the reserved area to end before the end of stolen
* memory, so just consider the start. */
reserved_total = stolen_top - reserved_base;
@@ -653,7 +656,7 @@ static int __i915_gem_object_create_stolen(struct intel_memory_region *mem,
cache_level = HAS_LLC(mem->i915) ? I915_CACHE_LLC : I915_CACHE_NONE;
i915_gem_object_set_cache_coherency(obj, cache_level);
- if (WARN_ON(!i915_gem_object_trylock(obj)))
+ if (WARN_ON(!i915_gem_object_trylock(obj, NULL)))
return -EBUSY;
i915_gem_object_init_memory_region(obj, mem);
@@ -720,9 +723,10 @@ static int init_stolen_smem(struct intel_memory_region *mem)
return i915_gem_init_stolen(mem);
}
-static void release_stolen_smem(struct intel_memory_region *mem)
+static int release_stolen_smem(struct intel_memory_region *mem)
{
i915_gem_cleanup_stolen(mem->i915);
+ return 0;
}
static const struct intel_memory_region_ops i915_region_stolen_smem_ops = {
@@ -759,10 +763,11 @@ err_fini:
return err;
}
-static void release_stolen_lmem(struct intel_memory_region *mem)
+static int release_stolen_lmem(struct intel_memory_region *mem)
{
io_mapping_fini(&mem->iomap);
i915_gem_cleanup_stolen(mem->i915);
+ return 0;
}
static const struct intel_memory_region_ops i915_region_stolen_lmem_ops = {
@@ -778,6 +783,7 @@ i915_gem_stolen_lmem_setup(struct drm_i915_private *i915, u16 type,
struct intel_uncore *uncore = &i915->uncore;
struct pci_dev *pdev = to_pci_dev(i915->drm.dev);
struct intel_memory_region *mem;
+ resource_size_t min_page_size;
resource_size_t io_start;
resource_size_t lmem_size;
u64 lmem_base;
@@ -789,8 +795,11 @@ i915_gem_stolen_lmem_setup(struct drm_i915_private *i915, u16 type,
lmem_size = pci_resource_len(pdev, 2) - lmem_base;
io_start = pci_resource_start(pdev, 2) + lmem_base;
+ min_page_size = HAS_64K_PAGES(i915) ? I915_GTT_PAGE_SIZE_64K :
+ I915_GTT_PAGE_SIZE_4K;
+
mem = intel_memory_region_create(i915, lmem_base, lmem_size,
- I915_GTT_PAGE_SIZE_4K, io_start,
+ min_page_size, io_start,
type, instance,
&i915_region_stolen_lmem_ops);
if (IS_ERR(mem))
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_throttle.c b/drivers/gpu/drm/i915/gem/i915_gem_throttle.c
index 1929d6cf4150..75501db71041 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_throttle.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_throttle.c
@@ -38,12 +38,13 @@ i915_gem_throttle_ioctl(struct drm_device *dev, void *data,
{
const unsigned long recent_enough = jiffies - DRM_I915_THROTTLE_JIFFIES;
struct drm_i915_file_private *file_priv = file->driver_priv;
+ struct drm_i915_private *i915 = to_i915(dev);
struct i915_gem_context *ctx;
unsigned long idx;
long ret;
/* ABI: return -EIO if already wedged */
- ret = intel_gt_terminally_wedged(&to_i915(dev)->gt);
+ ret = intel_gt_terminally_wedged(to_gt(i915));
if (ret)
return ret;
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_ttm.c b/drivers/gpu/drm/i915/gem/i915_gem_ttm.c
index 74a1ffd0d7dd..923cc7ad8d70 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_ttm.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_ttm.c
@@ -14,13 +14,9 @@
#include "gem/i915_gem_object.h"
#include "gem/i915_gem_region.h"
#include "gem/i915_gem_ttm.h"
+#include "gem/i915_gem_ttm_move.h"
#include "gem/i915_gem_ttm_pm.h"
-
-#include "gt/intel_engine_pm.h"
-#include "gt/intel_gt.h"
-#include "gt/intel_migrate.h"
-
#define I915_TTM_PRIO_PURGE 0
#define I915_TTM_PRIO_NO_PAGES 1
#define I915_TTM_PRIO_HAS_PAGES 2
@@ -34,7 +30,9 @@
* struct i915_ttm_tt - TTM page vector with additional private information
* @ttm: The base TTM page vector.
* @dev: The struct device used for dma mapping and unmapping.
- * @cached_st: The cached scatter-gather table.
+ * @cached_rsgt: The cached scatter-gather table.
+ * @is_shmem: Set if using shmem.
+ * @filp: The shmem file, if using shmem backend.
*
* Note that DMA may be going on right up to the point where the page-
* vector is unpopulated in delayed destroy. Hence keep the
@@ -45,7 +43,10 @@
struct i915_ttm_tt {
struct ttm_tt ttm;
struct device *dev;
- struct sg_table *cached_st;
+ struct i915_refct_sgt cached_rsgt;
+
+ bool is_shmem;
+ struct file *filp;
};
static const struct ttm_place sys_placement_flags = {
@@ -103,37 +104,15 @@ static int i915_ttm_err_to_gem(int err)
return err;
}
-static bool gpu_binds_iomem(struct ttm_resource *mem)
-{
- return mem->mem_type != TTM_PL_SYSTEM;
-}
-
-static bool cpu_maps_iomem(struct ttm_resource *mem)
-{
- /* Once / if we support GGTT, this is also false for cached ttm_tts */
- return mem->mem_type != TTM_PL_SYSTEM;
-}
-
-static enum i915_cache_level
-i915_ttm_cache_level(struct drm_i915_private *i915, struct ttm_resource *res,
- struct ttm_tt *ttm)
-{
- return ((HAS_LLC(i915) || HAS_SNOOP(i915)) && !gpu_binds_iomem(res) &&
- ttm->caching == ttm_cached) ? I915_CACHE_LLC :
- I915_CACHE_NONE;
-}
-
-static void i915_ttm_adjust_lru(struct drm_i915_gem_object *obj);
-
static enum ttm_caching
i915_ttm_select_tt_caching(const struct drm_i915_gem_object *obj)
{
/*
- * Objects only allowed in system get cached cpu-mappings.
- * Other objects get WC mapping for now. Even if in system.
+ * Objects only allowed in system get cached cpu-mappings, or when
+ * evicting lmem-only buffers to system for swapping. Other objects get
+ * WC mapping for now. Even if in system.
*/
- if (obj->mm.region->type == INTEL_MEMORY_SYSTEM &&
- obj->mm.n_placements <= 1)
+ if (obj->mm.n_placements <= 1)
return ttm_cached;
return ttm_write_combined;
@@ -179,15 +158,103 @@ i915_ttm_placement_from_obj(const struct drm_i915_gem_object *obj,
placement->busy_placement = busy;
}
+static int i915_ttm_tt_shmem_populate(struct ttm_device *bdev,
+ struct ttm_tt *ttm,
+ struct ttm_operation_ctx *ctx)
+{
+ struct drm_i915_private *i915 = container_of(bdev, typeof(*i915), bdev);
+ struct intel_memory_region *mr = i915->mm.regions[INTEL_MEMORY_SYSTEM];
+ struct i915_ttm_tt *i915_tt = container_of(ttm, typeof(*i915_tt), ttm);
+ const unsigned int max_segment = i915_sg_segment_size();
+ const size_t size = (size_t)ttm->num_pages << PAGE_SHIFT;
+ struct file *filp = i915_tt->filp;
+ struct sgt_iter sgt_iter;
+ struct sg_table *st;
+ struct page *page;
+ unsigned long i;
+ int err;
+
+ if (!filp) {
+ struct address_space *mapping;
+ gfp_t mask;
+
+ filp = shmem_file_setup("i915-shmem-tt", size, VM_NORESERVE);
+ if (IS_ERR(filp))
+ return PTR_ERR(filp);
+
+ mask = GFP_HIGHUSER | __GFP_RECLAIMABLE;
+
+ mapping = filp->f_mapping;
+ mapping_set_gfp_mask(mapping, mask);
+ GEM_BUG_ON(!(mapping_gfp_mask(mapping) & __GFP_RECLAIM));
+
+ i915_tt->filp = filp;
+ }
+
+ st = &i915_tt->cached_rsgt.table;
+ err = shmem_sg_alloc_table(i915, st, size, mr, filp->f_mapping,
+ max_segment);
+ if (err)
+ return err;
+
+ err = dma_map_sgtable(i915_tt->dev, st, DMA_BIDIRECTIONAL,
+ DMA_ATTR_SKIP_CPU_SYNC);
+ if (err)
+ goto err_free_st;
+
+ i = 0;
+ for_each_sgt_page(page, sgt_iter, st)
+ ttm->pages[i++] = page;
+
+ if (ttm->page_flags & TTM_TT_FLAG_SWAPPED)
+ ttm->page_flags &= ~TTM_TT_FLAG_SWAPPED;
+
+ return 0;
+
+err_free_st:
+ shmem_sg_free_table(st, filp->f_mapping, false, false);
+
+ return err;
+}
+
+static void i915_ttm_tt_shmem_unpopulate(struct ttm_tt *ttm)
+{
+ struct i915_ttm_tt *i915_tt = container_of(ttm, typeof(*i915_tt), ttm);
+ bool backup = ttm->page_flags & TTM_TT_FLAG_SWAPPED;
+ struct sg_table *st = &i915_tt->cached_rsgt.table;
+
+ shmem_sg_free_table(st, file_inode(i915_tt->filp)->i_mapping,
+ backup, backup);
+}
+
+static void i915_ttm_tt_release(struct kref *ref)
+{
+ struct i915_ttm_tt *i915_tt =
+ container_of(ref, typeof(*i915_tt), cached_rsgt.kref);
+ struct sg_table *st = &i915_tt->cached_rsgt.table;
+
+ GEM_WARN_ON(st->sgl);
+
+ kfree(i915_tt);
+}
+
+static const struct i915_refct_sgt_ops tt_rsgt_ops = {
+ .release = i915_ttm_tt_release
+};
+
static struct ttm_tt *i915_ttm_tt_create(struct ttm_buffer_object *bo,
uint32_t page_flags)
{
struct ttm_resource_manager *man =
ttm_manager_type(bo->bdev, bo->resource->mem_type);
struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo);
+ enum ttm_caching caching;
struct i915_ttm_tt *i915_tt;
int ret;
+ if (!obj)
+ return NULL;
+
i915_tt = kzalloc(sizeof(*i915_tt), GFP_KERNEL);
if (!i915_tt)
return NULL;
@@ -196,38 +263,66 @@ static struct ttm_tt *i915_ttm_tt_create(struct ttm_buffer_object *bo,
man->use_tt)
page_flags |= TTM_TT_FLAG_ZERO_ALLOC;
- ret = ttm_tt_init(&i915_tt->ttm, bo, page_flags,
- i915_ttm_select_tt_caching(obj));
- if (ret) {
- kfree(i915_tt);
- return NULL;
+ caching = i915_ttm_select_tt_caching(obj);
+ if (i915_gem_object_is_shrinkable(obj) && caching == ttm_cached) {
+ page_flags |= TTM_TT_FLAG_EXTERNAL |
+ TTM_TT_FLAG_EXTERNAL_MAPPABLE;
+ i915_tt->is_shmem = true;
}
+ ret = ttm_tt_init(&i915_tt->ttm, bo, page_flags, caching);
+ if (ret)
+ goto err_free;
+
+ __i915_refct_sgt_init(&i915_tt->cached_rsgt, bo->base.size,
+ &tt_rsgt_ops);
+
i915_tt->dev = obj->base.dev->dev;
return &i915_tt->ttm;
+
+err_free:
+ kfree(i915_tt);
+ return NULL;
+}
+
+static int i915_ttm_tt_populate(struct ttm_device *bdev,
+ struct ttm_tt *ttm,
+ struct ttm_operation_ctx *ctx)
+{
+ struct i915_ttm_tt *i915_tt = container_of(ttm, typeof(*i915_tt), ttm);
+
+ if (i915_tt->is_shmem)
+ return i915_ttm_tt_shmem_populate(bdev, ttm, ctx);
+
+ return ttm_pool_alloc(&bdev->pool, ttm, ctx);
}
static void i915_ttm_tt_unpopulate(struct ttm_device *bdev, struct ttm_tt *ttm)
{
struct i915_ttm_tt *i915_tt = container_of(ttm, typeof(*i915_tt), ttm);
+ struct sg_table *st = &i915_tt->cached_rsgt.table;
- if (i915_tt->cached_st) {
- dma_unmap_sgtable(i915_tt->dev, i915_tt->cached_st,
- DMA_BIDIRECTIONAL, 0);
- sg_free_table(i915_tt->cached_st);
- kfree(i915_tt->cached_st);
- i915_tt->cached_st = NULL;
+ if (st->sgl)
+ dma_unmap_sgtable(i915_tt->dev, st, DMA_BIDIRECTIONAL, 0);
+
+ if (i915_tt->is_shmem) {
+ i915_ttm_tt_shmem_unpopulate(ttm);
+ } else {
+ sg_free_table(st);
+ ttm_pool_free(&bdev->pool, ttm);
}
- ttm_pool_free(&bdev->pool, ttm);
}
static void i915_ttm_tt_destroy(struct ttm_device *bdev, struct ttm_tt *ttm)
{
struct i915_ttm_tt *i915_tt = container_of(ttm, typeof(*i915_tt), ttm);
+ if (i915_tt->filp)
+ fput(i915_tt->filp);
+
ttm_tt_fini(ttm);
- kfree(i915_tt);
+ i915_refct_sgt_put(&i915_tt->cached_rsgt);
}
static bool i915_ttm_eviction_valuable(struct ttm_buffer_object *bo,
@@ -235,6 +330,17 @@ static bool i915_ttm_eviction_valuable(struct ttm_buffer_object *bo,
{
struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo);
+ if (!obj)
+ return false;
+
+ /*
+ * EXTERNAL objects should never be swapped out by TTM, instead we need
+ * to handle that ourselves. TTM will already skip such objects for us,
+ * but we would like to avoid grabbing locks for no good reason.
+ */
+ if (bo->ttm && bo->ttm->page_flags & TTM_TT_FLAG_EXTERNAL)
+ return false;
+
/* Will do for now. Our pinned objects are still on TTM's LRU lists */
return i915_gem_object_evictable(obj);
}
@@ -245,28 +351,19 @@ static void i915_ttm_evict_flags(struct ttm_buffer_object *bo,
*placement = i915_sys_placement;
}
-static int i915_ttm_move_notify(struct ttm_buffer_object *bo)
-{
- struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo);
- int ret;
-
- ret = i915_gem_object_unbind(obj, I915_GEM_OBJECT_UNBIND_ACTIVE);
- if (ret)
- return ret;
-
- ret = __i915_gem_object_put_pages(obj);
- if (ret)
- return ret;
-
- return 0;
-}
-
-static void i915_ttm_free_cached_io_st(struct drm_i915_gem_object *obj)
+/**
+ * i915_ttm_free_cached_io_rsgt - Free object cached LMEM information
+ * @obj: The GEM object
+ * This function frees any LMEM-related information that is cached on
+ * the object. For example the radix tree for fast page lookup and the
+ * cached refcounted sg-table
+ */
+void i915_ttm_free_cached_io_rsgt(struct drm_i915_gem_object *obj)
{
struct radix_tree_iter iter;
void __rcu **slot;
- if (!obj->ttm.cached_io_st)
+ if (!obj->ttm.cached_io_rsgt)
return;
rcu_read_lock();
@@ -274,93 +371,106 @@ static void i915_ttm_free_cached_io_st(struct drm_i915_gem_object *obj)
radix_tree_delete(&obj->ttm.get_io_page.radix, iter.index);
rcu_read_unlock();
- sg_free_table(obj->ttm.cached_io_st);
- kfree(obj->ttm.cached_io_st);
- obj->ttm.cached_io_st = NULL;
+ i915_refct_sgt_put(obj->ttm.cached_io_rsgt);
+ obj->ttm.cached_io_rsgt = NULL;
}
-static void
-i915_ttm_adjust_domains_after_move(struct drm_i915_gem_object *obj)
+/**
+ * i915_ttm_purge - Clear an object of its memory
+ * @obj: The object
+ *
+ * This function is called to clear an object of it's memory when it is
+ * marked as not needed anymore.
+ *
+ * Return: 0 on success, negative error code on failure.
+ */
+int i915_ttm_purge(struct drm_i915_gem_object *obj)
{
struct ttm_buffer_object *bo = i915_gem_to_ttm(obj);
+ struct i915_ttm_tt *i915_tt =
+ container_of(bo->ttm, typeof(*i915_tt), ttm);
+ struct ttm_operation_ctx ctx = {
+ .interruptible = true,
+ .no_wait_gpu = false,
+ };
+ struct ttm_placement place = {};
+ int ret;
- if (cpu_maps_iomem(bo->resource) || bo->ttm->caching != ttm_cached) {
- obj->write_domain = I915_GEM_DOMAIN_WC;
- obj->read_domains = I915_GEM_DOMAIN_WC;
- } else {
- obj->write_domain = I915_GEM_DOMAIN_CPU;
- obj->read_domains = I915_GEM_DOMAIN_CPU;
- }
-}
+ if (obj->mm.madv == __I915_MADV_PURGED)
+ return 0;
-static void i915_ttm_adjust_gem_after_move(struct drm_i915_gem_object *obj)
-{
- struct ttm_buffer_object *bo = i915_gem_to_ttm(obj);
- unsigned int cache_level;
- unsigned int i;
+ ret = ttm_bo_validate(bo, &place, &ctx);
+ if (ret)
+ return ret;
- /*
- * If object was moved to an allowable region, update the object
- * region to consider it migrated. Note that if it's currently not
- * in an allowable region, it's evicted and we don't update the
- * object region.
- */
- if (intel_region_to_ttm_type(obj->mm.region) != bo->resource->mem_type) {
- for (i = 0; i < obj->mm.n_placements; ++i) {
- struct intel_memory_region *mr = obj->mm.placements[i];
-
- if (intel_region_to_ttm_type(mr) == bo->resource->mem_type &&
- mr != obj->mm.region) {
- i915_gem_object_release_memory_region(obj);
- i915_gem_object_init_memory_region(obj, mr);
- break;
- }
- }
+ if (bo->ttm && i915_tt->filp) {
+ /*
+ * The below fput(which eventually calls shmem_truncate) might
+ * be delayed by worker, so when directly called to purge the
+ * pages(like by the shrinker) we should try to be more
+ * aggressive and release the pages immediately.
+ */
+ shmem_truncate_range(file_inode(i915_tt->filp),
+ 0, (loff_t)-1);
+ fput(fetch_and_zero(&i915_tt->filp));
}
- obj->mem_flags &= ~(I915_BO_FLAG_STRUCT_PAGE | I915_BO_FLAG_IOMEM);
-
- obj->mem_flags |= cpu_maps_iomem(bo->resource) ? I915_BO_FLAG_IOMEM :
- I915_BO_FLAG_STRUCT_PAGE;
+ obj->write_domain = 0;
+ obj->read_domains = 0;
+ i915_ttm_adjust_gem_after_move(obj);
+ i915_ttm_free_cached_io_rsgt(obj);
+ obj->mm.madv = __I915_MADV_PURGED;
- cache_level = i915_ttm_cache_level(to_i915(bo->base.dev), bo->resource,
- bo->ttm);
- i915_gem_object_set_cache_coherency(obj, cache_level);
+ return 0;
}
-static void i915_ttm_purge(struct drm_i915_gem_object *obj)
+static int i915_ttm_shrinker_release_pages(struct drm_i915_gem_object *obj,
+ bool no_wait_gpu,
+ bool should_writeback)
{
struct ttm_buffer_object *bo = i915_gem_to_ttm(obj);
+ struct i915_ttm_tt *i915_tt =
+ container_of(bo->ttm, typeof(*i915_tt), ttm);
struct ttm_operation_ctx ctx = {
.interruptible = true,
- .no_wait_gpu = false,
+ .no_wait_gpu = no_wait_gpu,
};
struct ttm_placement place = {};
int ret;
- if (obj->mm.madv == __I915_MADV_PURGED)
- return;
+ if (!bo->ttm || bo->resource->mem_type != TTM_PL_SYSTEM)
+ return 0;
+
+ GEM_BUG_ON(!i915_tt->is_shmem);
+
+ if (!i915_tt->filp)
+ return 0;
+
+ ret = ttm_bo_wait_ctx(bo, &ctx);
+ if (ret)
+ return ret;
- /* TTM's purge interface. Note that we might be reentering. */
+ switch (obj->mm.madv) {
+ case I915_MADV_DONTNEED:
+ return i915_ttm_purge(obj);
+ case __I915_MADV_PURGED:
+ return 0;
+ }
+
+ if (bo->ttm->page_flags & TTM_TT_FLAG_SWAPPED)
+ return 0;
+
+ bo->ttm->page_flags |= TTM_TT_FLAG_SWAPPED;
ret = ttm_bo_validate(bo, &place, &ctx);
- if (!ret) {
- obj->write_domain = 0;
- obj->read_domains = 0;
- i915_ttm_adjust_gem_after_move(obj);
- i915_ttm_free_cached_io_st(obj);
- obj->mm.madv = __I915_MADV_PURGED;
+ if (ret) {
+ bo->ttm->page_flags &= ~TTM_TT_FLAG_SWAPPED;
+ return ret;
}
-}
-static void i915_ttm_swap_notify(struct ttm_buffer_object *bo)
-{
- struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo);
- int ret = i915_ttm_move_notify(bo);
+ if (should_writeback)
+ __shmem_writeback(obj->base.size, i915_tt->filp->f_mapping);
- GEM_WARN_ON(ret);
- GEM_WARN_ON(obj->ttm.cached_io_st);
- if (!ret && obj->mm.madv != I915_MADV_WILLNEED)
- i915_ttm_purge(obj);
+ return 0;
}
static void i915_ttm_delete_mem_notify(struct ttm_buffer_object *bo)
@@ -369,232 +479,101 @@ static void i915_ttm_delete_mem_notify(struct ttm_buffer_object *bo)
if (likely(obj)) {
__i915_gem_object_pages_fini(obj);
- i915_ttm_free_cached_io_st(obj);
+ i915_ttm_free_cached_io_rsgt(obj);
}
}
-static struct intel_memory_region *
-i915_ttm_region(struct ttm_device *bdev, int ttm_mem_type)
-{
- struct drm_i915_private *i915 = container_of(bdev, typeof(*i915), bdev);
-
- /* There's some room for optimization here... */
- GEM_BUG_ON(ttm_mem_type != I915_PL_SYSTEM &&
- ttm_mem_type < I915_PL_LMEM0);
- if (ttm_mem_type == I915_PL_SYSTEM)
- return intel_memory_region_lookup(i915, INTEL_MEMORY_SYSTEM,
- 0);
-
- return intel_memory_region_lookup(i915, INTEL_MEMORY_LOCAL,
- ttm_mem_type - I915_PL_LMEM0);
-}
-
-static struct sg_table *i915_ttm_tt_get_st(struct ttm_tt *ttm)
+static struct i915_refct_sgt *i915_ttm_tt_get_st(struct ttm_tt *ttm)
{
struct i915_ttm_tt *i915_tt = container_of(ttm, typeof(*i915_tt), ttm);
struct sg_table *st;
int ret;
- if (i915_tt->cached_st)
- return i915_tt->cached_st;
-
- st = kzalloc(sizeof(*st), GFP_KERNEL);
- if (!st)
- return ERR_PTR(-ENOMEM);
+ if (i915_tt->cached_rsgt.table.sgl)
+ return i915_refct_sgt_get(&i915_tt->cached_rsgt);
+ st = &i915_tt->cached_rsgt.table;
ret = sg_alloc_table_from_pages_segment(st,
ttm->pages, ttm->num_pages,
0, (unsigned long)ttm->num_pages << PAGE_SHIFT,
i915_sg_segment_size(), GFP_KERNEL);
if (ret) {
- kfree(st);
+ st->sgl = NULL;
return ERR_PTR(ret);
}
ret = dma_map_sgtable(i915_tt->dev, st, DMA_BIDIRECTIONAL, 0);
if (ret) {
sg_free_table(st);
- kfree(st);
return ERR_PTR(ret);
}
- i915_tt->cached_st = st;
- return st;
+ return i915_refct_sgt_get(&i915_tt->cached_rsgt);
}
-static struct sg_table *
+/**
+ * i915_ttm_resource_get_st - Get a refcounted sg-table pointing to the
+ * resource memory
+ * @obj: The GEM object used for sg-table caching
+ * @res: The struct ttm_resource for which an sg-table is requested.
+ *
+ * This function returns a refcounted sg-table representing the memory
+ * pointed to by @res. If @res is the object's current resource it may also
+ * cache the sg_table on the object or attempt to access an already cached
+ * sg-table. The refcounted sg-table needs to be put when no-longer in use.
+ *
+ * Return: A valid pointer to a struct i915_refct_sgt or error pointer on
+ * failure.
+ */
+struct i915_refct_sgt *
i915_ttm_resource_get_st(struct drm_i915_gem_object *obj,
struct ttm_resource *res)
{
struct ttm_buffer_object *bo = i915_gem_to_ttm(obj);
- if (!gpu_binds_iomem(res))
+ if (!i915_ttm_gtt_binds_lmem(res))
return i915_ttm_tt_get_st(bo->ttm);
/*
* If CPU mapping differs, we need to add the ttm_tt pages to
* the resulting st. Might make sense for GGTT.
*/
- GEM_WARN_ON(!cpu_maps_iomem(res));
- return intel_region_ttm_resource_to_st(obj->mm.region, res);
-}
-
-static int i915_ttm_accel_move(struct ttm_buffer_object *bo,
- bool clear,
- struct ttm_resource *dst_mem,
- struct ttm_tt *dst_ttm,
- struct sg_table *dst_st)
-{
- struct drm_i915_private *i915 = container_of(bo->bdev, typeof(*i915),
- bdev);
- struct ttm_resource_manager *src_man =
- ttm_manager_type(bo->bdev, bo->resource->mem_type);
- struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo);
- struct sg_table *src_st;
- struct i915_request *rq;
- struct ttm_tt *src_ttm = bo->ttm;
- enum i915_cache_level src_level, dst_level;
- int ret;
-
- if (!i915->gt.migrate.context || intel_gt_is_wedged(&i915->gt))
- return -EINVAL;
+ GEM_WARN_ON(!i915_ttm_cpu_maps_iomem(res));
+ if (bo->resource == res) {
+ if (!obj->ttm.cached_io_rsgt) {
+ struct i915_refct_sgt *rsgt;
- dst_level = i915_ttm_cache_level(i915, dst_mem, dst_ttm);
- if (clear) {
- if (bo->type == ttm_bo_type_kernel)
- return -EINVAL;
+ rsgt = intel_region_ttm_resource_to_rsgt(obj->mm.region,
+ res);
+ if (IS_ERR(rsgt))
+ return rsgt;
- intel_engine_pm_get(i915->gt.migrate.context->engine);
- ret = intel_context_migrate_clear(i915->gt.migrate.context, NULL,
- dst_st->sgl, dst_level,
- gpu_binds_iomem(dst_mem),
- 0, &rq);
-
- if (!ret && rq) {
- i915_request_wait(rq, 0, MAX_SCHEDULE_TIMEOUT);
- i915_request_put(rq);
- }
- intel_engine_pm_put(i915->gt.migrate.context->engine);
- } else {
- src_st = src_man->use_tt ? i915_ttm_tt_get_st(src_ttm) :
- obj->ttm.cached_io_st;
-
- src_level = i915_ttm_cache_level(i915, bo->resource, src_ttm);
- intel_engine_pm_get(i915->gt.migrate.context->engine);
- ret = intel_context_migrate_copy(i915->gt.migrate.context,
- NULL, src_st->sgl, src_level,
- gpu_binds_iomem(bo->resource),
- dst_st->sgl, dst_level,
- gpu_binds_iomem(dst_mem),
- &rq);
- if (!ret && rq) {
- i915_request_wait(rq, 0, MAX_SCHEDULE_TIMEOUT);
- i915_request_put(rq);
+ obj->ttm.cached_io_rsgt = rsgt;
}
- intel_engine_pm_put(i915->gt.migrate.context->engine);
+ return i915_refct_sgt_get(obj->ttm.cached_io_rsgt);
}
- return ret;
-}
-
-static void __i915_ttm_move(struct ttm_buffer_object *bo, bool clear,
- struct ttm_resource *dst_mem,
- struct ttm_tt *dst_ttm,
- struct sg_table *dst_st,
- bool allow_accel)
-{
- int ret = -EINVAL;
-
- if (allow_accel)
- ret = i915_ttm_accel_move(bo, clear, dst_mem, dst_ttm, dst_st);
- if (ret) {
- struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo);
- struct intel_memory_region *dst_reg, *src_reg;
- union {
- struct ttm_kmap_iter_tt tt;
- struct ttm_kmap_iter_iomap io;
- } _dst_iter, _src_iter;
- struct ttm_kmap_iter *dst_iter, *src_iter;
-
- dst_reg = i915_ttm_region(bo->bdev, dst_mem->mem_type);
- src_reg = i915_ttm_region(bo->bdev, bo->resource->mem_type);
- GEM_BUG_ON(!dst_reg || !src_reg);
-
- dst_iter = !cpu_maps_iomem(dst_mem) ?
- ttm_kmap_iter_tt_init(&_dst_iter.tt, dst_ttm) :
- ttm_kmap_iter_iomap_init(&_dst_iter.io, &dst_reg->iomap,
- dst_st, dst_reg->region.start);
-
- src_iter = !cpu_maps_iomem(bo->resource) ?
- ttm_kmap_iter_tt_init(&_src_iter.tt, bo->ttm) :
- ttm_kmap_iter_iomap_init(&_src_iter.io, &src_reg->iomap,
- obj->ttm.cached_io_st,
- src_reg->region.start);
-
- ttm_move_memcpy(clear, dst_mem->num_pages, dst_iter, src_iter);
- }
+ return intel_region_ttm_resource_to_rsgt(obj->mm.region, res);
}
-static int i915_ttm_move(struct ttm_buffer_object *bo, bool evict,
- struct ttm_operation_ctx *ctx,
- struct ttm_resource *dst_mem,
- struct ttm_place *hop)
+static void i915_ttm_swap_notify(struct ttm_buffer_object *bo)
{
struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo);
- struct ttm_resource_manager *dst_man =
- ttm_manager_type(bo->bdev, dst_mem->mem_type);
- struct ttm_tt *ttm = bo->ttm;
- struct sg_table *dst_st;
- bool clear;
int ret;
- /* Sync for now. We could do the actual copy async. */
- ret = ttm_bo_wait_ctx(bo, ctx);
- if (ret)
- return ret;
+ if (!obj)
+ return;
ret = i915_ttm_move_notify(bo);
- if (ret)
- return ret;
-
- if (obj->mm.madv != I915_MADV_WILLNEED) {
+ GEM_WARN_ON(ret);
+ GEM_WARN_ON(obj->ttm.cached_io_rsgt);
+ if (!ret && obj->mm.madv != I915_MADV_WILLNEED)
i915_ttm_purge(obj);
- ttm_resource_free(bo, &dst_mem);
- return 0;
- }
-
- /* Populate ttm with pages if needed. Typically system memory. */
- if (ttm && (dst_man->use_tt || (ttm->page_flags & TTM_TT_FLAG_SWAPPED))) {
- ret = ttm_tt_populate(bo->bdev, ttm, ctx);
- if (ret)
- return ret;
- }
-
- dst_st = i915_ttm_resource_get_st(obj, dst_mem);
- if (IS_ERR(dst_st))
- return PTR_ERR(dst_st);
-
- clear = !cpu_maps_iomem(bo->resource) && (!ttm || !ttm_tt_is_populated(ttm));
- if (!(clear && ttm && !(ttm->page_flags & TTM_TT_FLAG_ZERO_ALLOC)))
- __i915_ttm_move(bo, clear, dst_mem, bo->ttm, dst_st, true);
-
- ttm_bo_move_sync_cleanup(bo, dst_mem);
- i915_ttm_adjust_domains_after_move(obj);
- i915_ttm_free_cached_io_st(obj);
-
- if (gpu_binds_iomem(dst_mem) || cpu_maps_iomem(dst_mem)) {
- obj->ttm.cached_io_st = dst_st;
- obj->ttm.get_io_page.sg_pos = dst_st->sgl;
- obj->ttm.get_io_page.sg_idx = 0;
- }
-
- i915_ttm_adjust_gem_after_move(obj);
- return 0;
}
static int i915_ttm_io_mem_reserve(struct ttm_device *bdev, struct ttm_resource *mem)
{
- if (!cpu_maps_iomem(mem))
+ if (!i915_ttm_cpu_maps_iomem(mem))
return 0;
mem->bus.caching = ttm_write_combined;
@@ -607,19 +586,26 @@ static unsigned long i915_ttm_io_mem_pfn(struct ttm_buffer_object *bo,
unsigned long page_offset)
{
struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo);
- unsigned long base = obj->mm.region->iomap.base - obj->mm.region->region.start;
struct scatterlist *sg;
+ unsigned long base;
unsigned int ofs;
+ GEM_BUG_ON(!obj);
GEM_WARN_ON(bo->ttm);
+ base = obj->mm.region->iomap.base - obj->mm.region->region.start;
sg = __i915_gem_object_get_sg(obj, &obj->ttm.get_io_page, page_offset, &ofs, true);
return ((base + sg_dma_address(sg)) >> PAGE_SHIFT) + ofs;
}
+/*
+ * All callbacks need to take care not to downcast a struct ttm_buffer_object
+ * without checking its subclass, since it might be a TTM ghost object.
+ */
static struct ttm_device_funcs i915_ttm_bo_driver = {
.ttm_tt_create = i915_ttm_tt_create,
+ .ttm_tt_populate = i915_ttm_tt_populate,
.ttm_tt_unpopulate = i915_ttm_tt_unpopulate,
.ttm_tt_destroy = i915_ttm_tt_destroy,
.eviction_valuable = i915_ttm_eviction_valuable,
@@ -649,7 +635,6 @@ static int __i915_ttm_get_pages(struct drm_i915_gem_object *obj,
.interruptible = true,
.no_wait_gpu = false,
};
- struct sg_table *st;
int real_num_busy;
int ret;
@@ -676,7 +661,6 @@ static int __i915_ttm_get_pages(struct drm_i915_gem_object *obj,
return i915_ttm_err_to_gem(ret);
}
- i915_ttm_adjust_lru(obj);
if (bo->ttm && !ttm_tt_is_populated(bo->ttm)) {
ret = ttm_tt_populate(bo->bdev, bo->ttm, &ctx);
if (ret)
@@ -687,14 +671,19 @@ static int __i915_ttm_get_pages(struct drm_i915_gem_object *obj,
}
if (!i915_gem_object_has_pages(obj)) {
- /* Object either has a page vector or is an iomem object */
- st = bo->ttm ? i915_ttm_tt_get_st(bo->ttm) : obj->ttm.cached_io_st;
- if (IS_ERR(st))
- return PTR_ERR(st);
+ struct i915_refct_sgt *rsgt =
+ i915_ttm_resource_get_st(obj, bo->resource);
+
+ if (IS_ERR(rsgt))
+ return PTR_ERR(rsgt);
- __i915_gem_object_set_pages(obj, st, i915_sg_dma_sizes(st->sgl));
+ GEM_BUG_ON(obj->mm.rsgt);
+ obj->mm.rsgt = rsgt;
+ __i915_gem_object_set_pages(obj, &rsgt->table,
+ i915_sg_dma_sizes(rsgt->table.sgl));
}
+ i915_ttm_adjust_lru(obj);
return ret;
}
@@ -766,12 +755,21 @@ static void i915_ttm_put_pages(struct drm_i915_gem_object *obj,
* and shrinkers will move it out if needed.
*/
- i915_ttm_adjust_lru(obj);
+ if (obj->mm.rsgt)
+ i915_refct_sgt_put(fetch_and_zero(&obj->mm.rsgt));
}
-static void i915_ttm_adjust_lru(struct drm_i915_gem_object *obj)
+/**
+ * i915_ttm_adjust_lru - Adjust an object's position on relevant LRU lists.
+ * @obj: The object
+ */
+void i915_ttm_adjust_lru(struct drm_i915_gem_object *obj)
{
struct ttm_buffer_object *bo = i915_gem_to_ttm(obj);
+ struct i915_ttm_tt *i915_tt =
+ container_of(bo->ttm, typeof(*i915_tt), ttm);
+ bool shrinkable =
+ bo->ttm && i915_tt->filp && ttm_tt_is_populated(bo->ttm);
/*
* Don't manipulate the TTM LRUs while in TTM bo destruction.
@@ -781,10 +779,53 @@ static void i915_ttm_adjust_lru(struct drm_i915_gem_object *obj)
return;
/*
+ * We skip managing the shrinker LRU in set_pages() and just manage
+ * everything here. This does at least solve the issue with having
+ * temporary shmem mappings(like with evicted lmem) not being visible to
+ * the shrinker. Only our shmem objects are shrinkable, everything else
+ * we keep as unshrinkable.
+ *
+ * To make sure everything plays nice we keep an extra shrink pin in TTM
+ * if the underlying pages are not currently shrinkable. Once we release
+ * our pin, like when the pages are moved to shmem, the pages will then
+ * be added to the shrinker LRU, assuming the caller isn't also holding
+ * a pin.
+ *
+ * TODO: consider maybe also bumping the shrinker list here when we have
+ * already unpinned it, which should give us something more like an LRU.
+ *
+ * TODO: There is a small window of opportunity for this function to
+ * get called from eviction after we've dropped the last GEM refcount,
+ * but before the TTM deleted flag is set on the object. Avoid
+ * adjusting the shrinker list in such cases, since the object is
+ * not available to the shrinker anyway due to its zero refcount.
+ * To fix this properly we should move to a TTM shrinker LRU list for
+ * these objects.
+ */
+ if (kref_get_unless_zero(&obj->base.refcount)) {
+ if (shrinkable != obj->mm.ttm_shrinkable) {
+ if (shrinkable) {
+ if (obj->mm.madv == I915_MADV_WILLNEED)
+ __i915_gem_object_make_shrinkable(obj);
+ else
+ __i915_gem_object_make_purgeable(obj);
+ } else {
+ i915_gem_object_make_unshrinkable(obj);
+ }
+
+ obj->mm.ttm_shrinkable = shrinkable;
+ }
+ i915_gem_object_put(obj);
+ }
+
+ /*
* Put on the correct LRU list depending on the MADV status
*/
spin_lock(&bo->bdev->lru_lock);
- if (obj->mm.madv != I915_MADV_WILLNEED) {
+ if (shrinkable) {
+ /* Try to keep shmem_tt from being considered for shrinking. */
+ bo->priority = TTM_MAX_BO_PRIORITY - 1;
+ } else if (obj->mm.madv != I915_MADV_WILLNEED) {
bo->priority = I915_TTM_PRIO_PURGE;
} else if (!i915_gem_object_has_pages(obj)) {
if (bo->priority < I915_TTM_PRIO_HAS_PAGES)
@@ -823,15 +864,39 @@ static void i915_ttm_delayed_free(struct drm_i915_gem_object *obj)
static vm_fault_t vm_fault_ttm(struct vm_fault *vmf)
{
struct vm_area_struct *area = vmf->vma;
- struct drm_i915_gem_object *obj =
- i915_ttm_to_gem(area->vm_private_data);
+ struct ttm_buffer_object *bo = area->vm_private_data;
+ struct drm_device *dev = bo->base.dev;
+ struct drm_i915_gem_object *obj;
+ vm_fault_t ret;
+ int idx;
+
+ obj = i915_ttm_to_gem(bo);
+ if (!obj)
+ return VM_FAULT_SIGBUS;
/* Sanity check that we allow writing into this object */
if (unlikely(i915_gem_object_is_readonly(obj) &&
area->vm_flags & VM_WRITE))
return VM_FAULT_SIGBUS;
- return ttm_bo_vm_fault(vmf);
+ ret = ttm_bo_vm_reserve(bo, vmf);
+ if (ret)
+ return ret;
+
+ if (drm_dev_enter(dev, &idx)) {
+ ret = ttm_bo_vm_fault_reserved(vmf, vmf->vma->vm_page_prot,
+ TTM_BO_VM_NUM_PREFAULT);
+ drm_dev_exit(idx);
+ } else {
+ ret = ttm_bo_vm_dummy_page(vmf, vmf->vma->vm_page_prot);
+ }
+ if (ret == VM_FAULT_RETRY && !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT))
+ return ret;
+
+ i915_ttm_adjust_lru(obj);
+
+ dma_resv_unlock(bo->base.resv);
+ return ret;
}
static int
@@ -882,13 +947,18 @@ static u64 i915_ttm_mmap_offset(struct drm_i915_gem_object *obj)
static const struct drm_i915_gem_object_ops i915_gem_ttm_obj_ops = {
.name = "i915_gem_object_ttm",
+ .flags = I915_GEM_OBJECT_IS_SHRINKABLE |
+ I915_GEM_OBJECT_SELF_MANAGED_SHRINK_LIST,
.get_pages = i915_ttm_get_pages,
.put_pages = i915_ttm_put_pages,
.truncate = i915_ttm_purge,
+ .shrinker_release_pages = i915_ttm_shrinker_release_pages,
+
.adjust_lru = i915_ttm_adjust_lru,
.delayed_free = i915_ttm_delayed_free,
.migrate = i915_ttm_migrate,
+
.mmap_offset = i915_ttm_mmap_offset,
.mmap_ops = &vm_ops_ttm,
};
@@ -901,6 +971,18 @@ void i915_ttm_bo_destroy(struct ttm_buffer_object *bo)
mutex_destroy(&obj->ttm.get_io_page.lock);
if (obj->ttm.created) {
+ /*
+ * We freely manage the shrinker LRU outide of the mm.pages life
+ * cycle. As a result when destroying the object we should be
+ * extra paranoid and ensure we remove it from the LRU, before
+ * we free the object.
+ *
+ * Touching the ttm_shrinkable outside of the object lock here
+ * should be safe now that the last GEM object ref was dropped.
+ */
+ if (obj->mm.ttm_shrinkable)
+ i915_gem_object_make_unshrinkable(obj);
+
i915_ttm_backup_free(obj);
/* This releases all gem object bindings to the backend. */
@@ -940,10 +1022,9 @@ int __i915_gem_ttm_object_init(struct intel_memory_region *mem,
i915_gem_object_init(obj, &i915_gem_ttm_obj_ops, &lock_class, flags);
/* Don't put on a region list until we're either locked or fully initialized. */
- obj->mm.region = intel_memory_region_get(mem);
+ obj->mm.region = mem;
INIT_LIST_HEAD(&obj->mm.region_link);
- i915_gem_object_make_unshrinkable(obj);
INIT_RADIX_TREE(&obj->ttm.get_io_page.radix, GFP_KERNEL | __GFP_NOWARN);
mutex_init(&obj->ttm.get_io_page.lock);
bo_type = (obj->flags & I915_BO_ALLOC_USER) ? ttm_bo_type_device :
@@ -955,6 +1036,14 @@ int __i915_gem_ttm_object_init(struct intel_memory_region *mem,
GEM_BUG_ON(page_size && obj->mm.n_placements);
/*
+ * Keep an extra shrink pin to prevent the object from being made
+ * shrinkable too early. If the ttm_tt is ever allocated in shmem, we
+ * drop the pin. The TTM backend manages the shrinker LRU itself,
+ * outside of the normal mm.pages life cycle.
+ */
+ i915_gem_object_make_unshrinkable(obj);
+
+ /*
* If this function fails, it will call the destructor, but
* our caller still owns the object. So no freeing in the
* destructor until obj->ttm.created is true.
@@ -980,6 +1069,7 @@ int __i915_gem_ttm_object_init(struct intel_memory_region *mem,
static const struct intel_memory_region_ops ttm_system_region_ops = {
.init_object = __i915_gem_ttm_object_init,
+ .release = intel_region_ttm_fini,
};
struct intel_memory_region *
@@ -999,50 +1089,3 @@ i915_gem_ttm_system_setup(struct drm_i915_private *i915,
intel_memory_region_set_name(mr, "system-ttm");
return mr;
}
-
-/**
- * i915_gem_obj_copy_ttm - Copy the contents of one ttm-based gem object to
- * another
- * @dst: The destination object
- * @src: The source object
- * @allow_accel: Allow using the blitter. Otherwise TTM memcpy is used.
- * @intr: Whether to perform waits interruptible:
- *
- * Note: The caller is responsible for assuring that the underlying
- * TTM objects are populated if needed and locked.
- *
- * Return: Zero on success. Negative error code on error. If @intr == true,
- * then it may return -ERESTARTSYS or -EINTR.
- */
-int i915_gem_obj_copy_ttm(struct drm_i915_gem_object *dst,
- struct drm_i915_gem_object *src,
- bool allow_accel, bool intr)
-{
- struct ttm_buffer_object *dst_bo = i915_gem_to_ttm(dst);
- struct ttm_buffer_object *src_bo = i915_gem_to_ttm(src);
- struct ttm_operation_ctx ctx = {
- .interruptible = intr,
- };
- struct sg_table *dst_st;
- int ret;
-
- assert_object_held(dst);
- assert_object_held(src);
-
- /*
- * Sync for now. This will change with async moves.
- */
- ret = ttm_bo_wait_ctx(dst_bo, &ctx);
- if (!ret)
- ret = ttm_bo_wait_ctx(src_bo, &ctx);
- if (ret)
- return ret;
-
- dst_st = gpu_binds_iomem(dst_bo->resource) ?
- dst->ttm.cached_io_st : i915_ttm_tt_get_st(dst_bo->ttm);
-
- __i915_ttm_move(src_bo, false, dst_bo->resource, dst_bo->ttm,
- dst_st, allow_accel);
-
- return 0;
-}
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_ttm.h b/drivers/gpu/drm/i915/gem/i915_gem_ttm.h
index 0b7291dd897c..9d698ad00853 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_ttm.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_ttm.h
@@ -5,6 +5,8 @@
#ifndef _I915_GEM_TTM_H_
#define _I915_GEM_TTM_H_
+#include <drm/ttm/ttm_placement.h>
+
#include "gem/i915_gem_object_types.h"
/**
@@ -35,7 +37,7 @@ void i915_ttm_bo_destroy(struct ttm_buffer_object *bo);
static inline struct drm_i915_gem_object *
i915_ttm_to_gem(struct ttm_buffer_object *bo)
{
- if (GEM_WARN_ON(bo->destroy != i915_ttm_bo_destroy))
+ if (bo->destroy != i915_ttm_bo_destroy)
return NULL;
return container_of(bo, struct drm_i915_gem_object, __do_not_access);
@@ -47,10 +49,6 @@ int __i915_gem_ttm_object_init(struct intel_memory_region *mem,
resource_size_t page_size,
unsigned int flags);
-int i915_gem_obj_copy_ttm(struct drm_i915_gem_object *dst,
- struct drm_i915_gem_object *src,
- bool allow_accel, bool intr);
-
/* Internal I915 TTM declarations and definitions below. */
#define I915_PL_LMEM0 TTM_PL_PRIV
@@ -60,4 +58,37 @@ int i915_gem_obj_copy_ttm(struct drm_i915_gem_object *dst,
struct ttm_placement *i915_ttm_sys_placement(void);
+void i915_ttm_free_cached_io_rsgt(struct drm_i915_gem_object *obj);
+
+struct i915_refct_sgt *
+i915_ttm_resource_get_st(struct drm_i915_gem_object *obj,
+ struct ttm_resource *res);
+
+void i915_ttm_adjust_lru(struct drm_i915_gem_object *obj);
+
+int i915_ttm_purge(struct drm_i915_gem_object *obj);
+
+/**
+ * i915_ttm_gtt_binds_lmem - Should the memory be viewed as LMEM by the GTT?
+ * @mem: struct ttm_resource representing the memory.
+ *
+ * Return: true if memory should be viewed as LMEM for GTT binding purposes,
+ * false otherwise.
+ */
+static inline bool i915_ttm_gtt_binds_lmem(struct ttm_resource *mem)
+{
+ return mem->mem_type != I915_PL_SYSTEM;
+}
+
+/**
+ * i915_ttm_cpu_maps_iomem - Should the memory be viewed as IOMEM by the CPU?
+ * @mem: struct ttm_resource representing the memory.
+ *
+ * Return: true if memory should be viewed as IOMEM for CPU mapping purposes.
+ */
+static inline bool i915_ttm_cpu_maps_iomem(struct ttm_resource *mem)
+{
+ /* Once / if we support GGTT, this is also false for cached ttm_tts */
+ return mem->mem_type != I915_PL_SYSTEM;
+}
#endif
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_ttm_move.c b/drivers/gpu/drm/i915/gem/i915_gem_ttm_move.c
new file mode 100644
index 000000000000..ee9612a3ee5e
--- /dev/null
+++ b/drivers/gpu/drm/i915/gem/i915_gem_ttm_move.c
@@ -0,0 +1,627 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2021 Intel Corporation
+ */
+
+#include <drm/ttm/ttm_bo_driver.h>
+
+#include "i915_deps.h"
+#include "i915_drv.h"
+#include "intel_memory_region.h"
+#include "intel_region_ttm.h"
+
+#include "gem/i915_gem_object.h"
+#include "gem/i915_gem_region.h"
+#include "gem/i915_gem_ttm.h"
+#include "gem/i915_gem_ttm_move.h"
+
+#include "gt/intel_engine_pm.h"
+#include "gt/intel_gt.h"
+#include "gt/intel_migrate.h"
+
+/**
+ * DOC: Selftest failure modes for failsafe migration:
+ *
+ * For fail_gpu_migration, the gpu blit scheduled is always a clear blit
+ * rather than a copy blit, and then we force the failure paths as if
+ * the blit fence returned an error.
+ *
+ * For fail_work_allocation we fail the kmalloc of the async worker, we
+ * sync the gpu blit. If it then fails, or fail_gpu_migration is set to
+ * true, then a memcpy operation is performed sync.
+ */
+#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
+static bool fail_gpu_migration;
+static bool fail_work_allocation;
+
+void i915_ttm_migrate_set_failure_modes(bool gpu_migration,
+ bool work_allocation)
+{
+ fail_gpu_migration = gpu_migration;
+ fail_work_allocation = work_allocation;
+}
+#endif
+
+static enum i915_cache_level
+i915_ttm_cache_level(struct drm_i915_private *i915, struct ttm_resource *res,
+ struct ttm_tt *ttm)
+{
+ return ((HAS_LLC(i915) || HAS_SNOOP(i915)) &&
+ !i915_ttm_gtt_binds_lmem(res) &&
+ ttm->caching == ttm_cached) ? I915_CACHE_LLC :
+ I915_CACHE_NONE;
+}
+
+static struct intel_memory_region *
+i915_ttm_region(struct ttm_device *bdev, int ttm_mem_type)
+{
+ struct drm_i915_private *i915 = container_of(bdev, typeof(*i915), bdev);
+
+ /* There's some room for optimization here... */
+ GEM_BUG_ON(ttm_mem_type != I915_PL_SYSTEM &&
+ ttm_mem_type < I915_PL_LMEM0);
+ if (ttm_mem_type == I915_PL_SYSTEM)
+ return intel_memory_region_lookup(i915, INTEL_MEMORY_SYSTEM,
+ 0);
+
+ return intel_memory_region_lookup(i915, INTEL_MEMORY_LOCAL,
+ ttm_mem_type - I915_PL_LMEM0);
+}
+
+/**
+ * i915_ttm_adjust_domains_after_move - Adjust the GEM domains after a
+ * TTM move
+ * @obj: The gem object
+ */
+void i915_ttm_adjust_domains_after_move(struct drm_i915_gem_object *obj)
+{
+ struct ttm_buffer_object *bo = i915_gem_to_ttm(obj);
+
+ if (i915_ttm_cpu_maps_iomem(bo->resource) || bo->ttm->caching != ttm_cached) {
+ obj->write_domain = I915_GEM_DOMAIN_WC;
+ obj->read_domains = I915_GEM_DOMAIN_WC;
+ } else {
+ obj->write_domain = I915_GEM_DOMAIN_CPU;
+ obj->read_domains = I915_GEM_DOMAIN_CPU;
+ }
+}
+
+/**
+ * i915_ttm_adjust_gem_after_move - Adjust the GEM state after a TTM move
+ * @obj: The gem object
+ *
+ * Adjusts the GEM object's region, mem_flags and cache coherency after a
+ * TTM move.
+ */
+void i915_ttm_adjust_gem_after_move(struct drm_i915_gem_object *obj)
+{
+ struct ttm_buffer_object *bo = i915_gem_to_ttm(obj);
+ unsigned int cache_level;
+ unsigned int i;
+
+ /*
+ * If object was moved to an allowable region, update the object
+ * region to consider it migrated. Note that if it's currently not
+ * in an allowable region, it's evicted and we don't update the
+ * object region.
+ */
+ if (intel_region_to_ttm_type(obj->mm.region) != bo->resource->mem_type) {
+ for (i = 0; i < obj->mm.n_placements; ++i) {
+ struct intel_memory_region *mr = obj->mm.placements[i];
+
+ if (intel_region_to_ttm_type(mr) == bo->resource->mem_type &&
+ mr != obj->mm.region) {
+ i915_gem_object_release_memory_region(obj);
+ i915_gem_object_init_memory_region(obj, mr);
+ break;
+ }
+ }
+ }
+
+ obj->mem_flags &= ~(I915_BO_FLAG_STRUCT_PAGE | I915_BO_FLAG_IOMEM);
+
+ obj->mem_flags |= i915_ttm_cpu_maps_iomem(bo->resource) ? I915_BO_FLAG_IOMEM :
+ I915_BO_FLAG_STRUCT_PAGE;
+
+ cache_level = i915_ttm_cache_level(to_i915(bo->base.dev), bo->resource,
+ bo->ttm);
+ i915_gem_object_set_cache_coherency(obj, cache_level);
+}
+
+/**
+ * i915_ttm_move_notify - Prepare an object for move
+ * @bo: The ttm buffer object.
+ *
+ * This function prepares an object for move by removing all GPU bindings,
+ * removing all CPU mapings and finally releasing the pages sg-table.
+ *
+ * Return: 0 if successful, negative error code on error.
+ */
+int i915_ttm_move_notify(struct ttm_buffer_object *bo)
+{
+ struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo);
+ int ret;
+
+ ret = i915_gem_object_unbind(obj, I915_GEM_OBJECT_UNBIND_ACTIVE);
+ if (ret)
+ return ret;
+
+ ret = __i915_gem_object_put_pages(obj);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static struct dma_fence *i915_ttm_accel_move(struct ttm_buffer_object *bo,
+ bool clear,
+ struct ttm_resource *dst_mem,
+ struct ttm_tt *dst_ttm,
+ struct sg_table *dst_st,
+ const struct i915_deps *deps)
+{
+ struct drm_i915_private *i915 = container_of(bo->bdev, typeof(*i915),
+ bdev);
+ struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo);
+ struct i915_request *rq;
+ struct ttm_tt *src_ttm = bo->ttm;
+ enum i915_cache_level src_level, dst_level;
+ int ret;
+
+ if (!to_gt(i915)->migrate.context || intel_gt_is_wedged(to_gt(i915)))
+ return ERR_PTR(-EINVAL);
+
+ /* With fail_gpu_migration, we always perform a GPU clear. */
+ if (I915_SELFTEST_ONLY(fail_gpu_migration))
+ clear = true;
+
+ dst_level = i915_ttm_cache_level(i915, dst_mem, dst_ttm);
+ if (clear) {
+ if (bo->type == ttm_bo_type_kernel &&
+ !I915_SELFTEST_ONLY(fail_gpu_migration))
+ return ERR_PTR(-EINVAL);
+
+ intel_engine_pm_get(to_gt(i915)->migrate.context->engine);
+ ret = intel_context_migrate_clear(to_gt(i915)->migrate.context, deps,
+ dst_st->sgl, dst_level,
+ i915_ttm_gtt_binds_lmem(dst_mem),
+ 0, &rq);
+ } else {
+ struct i915_refct_sgt *src_rsgt =
+ i915_ttm_resource_get_st(obj, bo->resource);
+
+ if (IS_ERR(src_rsgt))
+ return ERR_CAST(src_rsgt);
+
+ src_level = i915_ttm_cache_level(i915, bo->resource, src_ttm);
+ intel_engine_pm_get(to_gt(i915)->migrate.context->engine);
+ ret = intel_context_migrate_copy(to_gt(i915)->migrate.context,
+ deps, src_rsgt->table.sgl,
+ src_level,
+ i915_ttm_gtt_binds_lmem(bo->resource),
+ dst_st->sgl, dst_level,
+ i915_ttm_gtt_binds_lmem(dst_mem),
+ &rq);
+
+ i915_refct_sgt_put(src_rsgt);
+ }
+
+ intel_engine_pm_put(to_gt(i915)->migrate.context->engine);
+
+ if (ret && rq) {
+ i915_request_wait(rq, 0, MAX_SCHEDULE_TIMEOUT);
+ i915_request_put(rq);
+ }
+
+ return ret ? ERR_PTR(ret) : &rq->fence;
+}
+
+/**
+ * struct i915_ttm_memcpy_arg - argument for the bo memcpy functionality.
+ * @_dst_iter: Storage space for the destination kmap iterator.
+ * @_src_iter: Storage space for the source kmap iterator.
+ * @dst_iter: Pointer to the destination kmap iterator.
+ * @src_iter: Pointer to the source kmap iterator.
+ * @clear: Whether to clear instead of copy.
+ * @src_rsgt: Refcounted scatter-gather list of source memory.
+ * @dst_rsgt: Refcounted scatter-gather list of destination memory.
+ */
+struct i915_ttm_memcpy_arg {
+ union {
+ struct ttm_kmap_iter_tt tt;
+ struct ttm_kmap_iter_iomap io;
+ } _dst_iter,
+ _src_iter;
+ struct ttm_kmap_iter *dst_iter;
+ struct ttm_kmap_iter *src_iter;
+ unsigned long num_pages;
+ bool clear;
+ struct i915_refct_sgt *src_rsgt;
+ struct i915_refct_sgt *dst_rsgt;
+};
+
+/**
+ * struct i915_ttm_memcpy_work - Async memcpy worker under a dma-fence.
+ * @fence: The dma-fence.
+ * @work: The work struct use for the memcpy work.
+ * @lock: The fence lock. Not used to protect anything else ATM.
+ * @irq_work: Low latency worker to signal the fence since it can't be done
+ * from the callback for lockdep reasons.
+ * @cb: Callback for the accelerated migration fence.
+ * @arg: The argument for the memcpy functionality.
+ */
+struct i915_ttm_memcpy_work {
+ struct dma_fence fence;
+ struct work_struct work;
+ /* The fence lock */
+ spinlock_t lock;
+ struct irq_work irq_work;
+ struct dma_fence_cb cb;
+ struct i915_ttm_memcpy_arg arg;
+};
+
+static void i915_ttm_move_memcpy(struct i915_ttm_memcpy_arg *arg)
+{
+ ttm_move_memcpy(arg->clear, arg->num_pages,
+ arg->dst_iter, arg->src_iter);
+}
+
+static void i915_ttm_memcpy_init(struct i915_ttm_memcpy_arg *arg,
+ struct ttm_buffer_object *bo, bool clear,
+ struct ttm_resource *dst_mem,
+ struct ttm_tt *dst_ttm,
+ struct i915_refct_sgt *dst_rsgt)
+{
+ struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo);
+ struct intel_memory_region *dst_reg, *src_reg;
+
+ dst_reg = i915_ttm_region(bo->bdev, dst_mem->mem_type);
+ src_reg = i915_ttm_region(bo->bdev, bo->resource->mem_type);
+ GEM_BUG_ON(!dst_reg || !src_reg);
+
+ arg->dst_iter = !i915_ttm_cpu_maps_iomem(dst_mem) ?
+ ttm_kmap_iter_tt_init(&arg->_dst_iter.tt, dst_ttm) :
+ ttm_kmap_iter_iomap_init(&arg->_dst_iter.io, &dst_reg->iomap,
+ &dst_rsgt->table, dst_reg->region.start);
+
+ arg->src_iter = !i915_ttm_cpu_maps_iomem(bo->resource) ?
+ ttm_kmap_iter_tt_init(&arg->_src_iter.tt, bo->ttm) :
+ ttm_kmap_iter_iomap_init(&arg->_src_iter.io, &src_reg->iomap,
+ &obj->ttm.cached_io_rsgt->table,
+ src_reg->region.start);
+ arg->clear = clear;
+ arg->num_pages = bo->base.size >> PAGE_SHIFT;
+
+ arg->dst_rsgt = i915_refct_sgt_get(dst_rsgt);
+ arg->src_rsgt = clear ? NULL :
+ i915_ttm_resource_get_st(obj, bo->resource);
+}
+
+static void i915_ttm_memcpy_release(struct i915_ttm_memcpy_arg *arg)
+{
+ i915_refct_sgt_put(arg->src_rsgt);
+ i915_refct_sgt_put(arg->dst_rsgt);
+}
+
+static void __memcpy_work(struct work_struct *work)
+{
+ struct i915_ttm_memcpy_work *copy_work =
+ container_of(work, typeof(*copy_work), work);
+ struct i915_ttm_memcpy_arg *arg = &copy_work->arg;
+ bool cookie = dma_fence_begin_signalling();
+
+ i915_ttm_move_memcpy(arg);
+ dma_fence_end_signalling(cookie);
+
+ dma_fence_signal(&copy_work->fence);
+
+ i915_ttm_memcpy_release(arg);
+ dma_fence_put(&copy_work->fence);
+}
+
+static void __memcpy_irq_work(struct irq_work *irq_work)
+{
+ struct i915_ttm_memcpy_work *copy_work =
+ container_of(irq_work, typeof(*copy_work), irq_work);
+ struct i915_ttm_memcpy_arg *arg = &copy_work->arg;
+
+ dma_fence_signal(&copy_work->fence);
+ i915_ttm_memcpy_release(arg);
+ dma_fence_put(&copy_work->fence);
+}
+
+static void __memcpy_cb(struct dma_fence *fence, struct dma_fence_cb *cb)
+{
+ struct i915_ttm_memcpy_work *copy_work =
+ container_of(cb, typeof(*copy_work), cb);
+
+ if (unlikely(fence->error || I915_SELFTEST_ONLY(fail_gpu_migration))) {
+ INIT_WORK(&copy_work->work, __memcpy_work);
+ queue_work(system_unbound_wq, &copy_work->work);
+ } else {
+ init_irq_work(&copy_work->irq_work, __memcpy_irq_work);
+ irq_work_queue(&copy_work->irq_work);
+ }
+}
+
+static const char *get_driver_name(struct dma_fence *fence)
+{
+ return "i915_ttm_memcpy_work";
+}
+
+static const char *get_timeline_name(struct dma_fence *fence)
+{
+ return "unbound";
+}
+
+static const struct dma_fence_ops dma_fence_memcpy_ops = {
+ .get_driver_name = get_driver_name,
+ .get_timeline_name = get_timeline_name,
+};
+
+static struct dma_fence *
+i915_ttm_memcpy_work_arm(struct i915_ttm_memcpy_work *work,
+ struct dma_fence *dep)
+{
+ int ret;
+
+ spin_lock_init(&work->lock);
+ dma_fence_init(&work->fence, &dma_fence_memcpy_ops, &work->lock, 0, 0);
+ dma_fence_get(&work->fence);
+ ret = dma_fence_add_callback(dep, &work->cb, __memcpy_cb);
+ if (ret) {
+ if (ret != -ENOENT)
+ dma_fence_wait(dep, false);
+
+ return ERR_PTR(I915_SELFTEST_ONLY(fail_gpu_migration) ? -EINVAL :
+ dep->error);
+ }
+
+ return &work->fence;
+}
+
+static struct dma_fence *
+__i915_ttm_move(struct ttm_buffer_object *bo,
+ const struct ttm_operation_ctx *ctx, bool clear,
+ struct ttm_resource *dst_mem, struct ttm_tt *dst_ttm,
+ struct i915_refct_sgt *dst_rsgt, bool allow_accel,
+ const struct i915_deps *move_deps)
+{
+ struct i915_ttm_memcpy_work *copy_work = NULL;
+ struct i915_ttm_memcpy_arg _arg, *arg = &_arg;
+ struct dma_fence *fence = ERR_PTR(-EINVAL);
+
+ if (allow_accel) {
+ fence = i915_ttm_accel_move(bo, clear, dst_mem, dst_ttm,
+ &dst_rsgt->table, move_deps);
+
+ /*
+ * We only need to intercept the error when moving to lmem.
+ * When moving to system, TTM or shmem will provide us with
+ * cleared pages.
+ */
+ if (!IS_ERR(fence) && !i915_ttm_gtt_binds_lmem(dst_mem) &&
+ !I915_SELFTEST_ONLY(fail_gpu_migration ||
+ fail_work_allocation))
+ goto out;
+ }
+
+ /* If we've scheduled gpu migration. Try to arm error intercept. */
+ if (!IS_ERR(fence)) {
+ struct dma_fence *dep = fence;
+
+ if (!I915_SELFTEST_ONLY(fail_work_allocation))
+ copy_work = kzalloc(sizeof(*copy_work), GFP_KERNEL);
+
+ if (copy_work) {
+ arg = &copy_work->arg;
+ i915_ttm_memcpy_init(arg, bo, clear, dst_mem, dst_ttm,
+ dst_rsgt);
+ fence = i915_ttm_memcpy_work_arm(copy_work, dep);
+ } else {
+ dma_fence_wait(dep, false);
+ fence = ERR_PTR(I915_SELFTEST_ONLY(fail_gpu_migration) ?
+ -EINVAL : fence->error);
+ }
+ dma_fence_put(dep);
+
+ if (!IS_ERR(fence))
+ goto out;
+ } else if (move_deps) {
+ int err = i915_deps_sync(move_deps, ctx);
+
+ if (err)
+ return ERR_PTR(err);
+ }
+
+ /* Error intercept failed or no accelerated migration to start with */
+ if (!copy_work)
+ i915_ttm_memcpy_init(arg, bo, clear, dst_mem, dst_ttm,
+ dst_rsgt);
+ i915_ttm_move_memcpy(arg);
+ i915_ttm_memcpy_release(arg);
+ kfree(copy_work);
+
+ return NULL;
+out:
+ if (!fence && copy_work) {
+ i915_ttm_memcpy_release(arg);
+ kfree(copy_work);
+ }
+
+ return fence;
+}
+
+static int
+prev_deps(struct ttm_buffer_object *bo, struct ttm_operation_ctx *ctx,
+ struct i915_deps *deps)
+{
+ int ret;
+
+ ret = i915_deps_add_dependency(deps, bo->moving, ctx);
+ if (!ret)
+ ret = i915_deps_add_resv(deps, bo->base.resv, ctx);
+
+ return ret;
+}
+
+/**
+ * i915_ttm_move - The TTM move callback used by i915.
+ * @bo: The buffer object.
+ * @evict: Whether this is an eviction.
+ * @dst_mem: The destination ttm resource.
+ * @hop: If we need multihop, what temporary memory type to move to.
+ *
+ * Return: 0 if successful, negative error code otherwise.
+ */
+int i915_ttm_move(struct ttm_buffer_object *bo, bool evict,
+ struct ttm_operation_ctx *ctx,
+ struct ttm_resource *dst_mem,
+ struct ttm_place *hop)
+{
+ struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo);
+ struct ttm_resource_manager *dst_man =
+ ttm_manager_type(bo->bdev, dst_mem->mem_type);
+ struct dma_fence *migration_fence = NULL;
+ struct ttm_tt *ttm = bo->ttm;
+ struct i915_refct_sgt *dst_rsgt;
+ bool clear;
+ int ret;
+
+ if (GEM_WARN_ON(!obj)) {
+ ttm_bo_move_null(bo, dst_mem);
+ return 0;
+ }
+
+ ret = i915_ttm_move_notify(bo);
+ if (ret)
+ return ret;
+
+ if (obj->mm.madv != I915_MADV_WILLNEED) {
+ i915_ttm_purge(obj);
+ ttm_resource_free(bo, &dst_mem);
+ return 0;
+ }
+
+ /* Populate ttm with pages if needed. Typically system memory. */
+ if (ttm && (dst_man->use_tt || (ttm->page_flags & TTM_TT_FLAG_SWAPPED))) {
+ ret = ttm_tt_populate(bo->bdev, ttm, ctx);
+ if (ret)
+ return ret;
+ }
+
+ dst_rsgt = i915_ttm_resource_get_st(obj, dst_mem);
+ if (IS_ERR(dst_rsgt))
+ return PTR_ERR(dst_rsgt);
+
+ clear = !i915_ttm_cpu_maps_iomem(bo->resource) && (!ttm || !ttm_tt_is_populated(ttm));
+ if (!(clear && ttm && !(ttm->page_flags & TTM_TT_FLAG_ZERO_ALLOC))) {
+ struct i915_deps deps;
+
+ i915_deps_init(&deps, GFP_KERNEL | __GFP_NORETRY | __GFP_NOWARN);
+ ret = prev_deps(bo, ctx, &deps);
+ if (ret) {
+ i915_refct_sgt_put(dst_rsgt);
+ return ret;
+ }
+
+ migration_fence = __i915_ttm_move(bo, ctx, clear, dst_mem, bo->ttm,
+ dst_rsgt, true, &deps);
+ i915_deps_fini(&deps);
+ }
+
+ /* We can possibly get an -ERESTARTSYS here */
+ if (IS_ERR(migration_fence)) {
+ i915_refct_sgt_put(dst_rsgt);
+ return PTR_ERR(migration_fence);
+ }
+
+ if (migration_fence) {
+ ret = ttm_bo_move_accel_cleanup(bo, migration_fence, evict,
+ true, dst_mem);
+ if (ret) {
+ dma_fence_wait(migration_fence, false);
+ ttm_bo_move_sync_cleanup(bo, dst_mem);
+ }
+ dma_fence_put(migration_fence);
+ } else {
+ ttm_bo_move_sync_cleanup(bo, dst_mem);
+ }
+
+ i915_ttm_adjust_domains_after_move(obj);
+ i915_ttm_free_cached_io_rsgt(obj);
+
+ if (i915_ttm_gtt_binds_lmem(dst_mem) || i915_ttm_cpu_maps_iomem(dst_mem)) {
+ obj->ttm.cached_io_rsgt = dst_rsgt;
+ obj->ttm.get_io_page.sg_pos = dst_rsgt->table.sgl;
+ obj->ttm.get_io_page.sg_idx = 0;
+ } else {
+ i915_refct_sgt_put(dst_rsgt);
+ }
+
+ i915_ttm_adjust_lru(obj);
+ i915_ttm_adjust_gem_after_move(obj);
+ return 0;
+}
+
+/**
+ * i915_gem_obj_copy_ttm - Copy the contents of one ttm-based gem object to
+ * another
+ * @dst: The destination object
+ * @src: The source object
+ * @allow_accel: Allow using the blitter. Otherwise TTM memcpy is used.
+ * @intr: Whether to perform waits interruptible:
+ *
+ * Note: The caller is responsible for assuring that the underlying
+ * TTM objects are populated if needed and locked.
+ *
+ * Return: Zero on success. Negative error code on error. If @intr == true,
+ * then it may return -ERESTARTSYS or -EINTR.
+ */
+int i915_gem_obj_copy_ttm(struct drm_i915_gem_object *dst,
+ struct drm_i915_gem_object *src,
+ bool allow_accel, bool intr)
+{
+ struct ttm_buffer_object *dst_bo = i915_gem_to_ttm(dst);
+ struct ttm_buffer_object *src_bo = i915_gem_to_ttm(src);
+ struct ttm_operation_ctx ctx = {
+ .interruptible = intr,
+ };
+ struct i915_refct_sgt *dst_rsgt;
+ struct dma_fence *copy_fence;
+ struct i915_deps deps;
+ int ret;
+
+ assert_object_held(dst);
+ assert_object_held(src);
+ i915_deps_init(&deps, GFP_KERNEL | __GFP_NORETRY | __GFP_NOWARN);
+
+ ret = dma_resv_reserve_shared(src_bo->base.resv, 1);
+ if (ret)
+ return ret;
+
+ ret = i915_deps_add_resv(&deps, dst_bo->base.resv, &ctx);
+ if (ret)
+ return ret;
+
+ ret = i915_deps_add_resv(&deps, src_bo->base.resv, &ctx);
+ if (ret)
+ return ret;
+
+ dst_rsgt = i915_ttm_resource_get_st(dst, dst_bo->resource);
+ copy_fence = __i915_ttm_move(src_bo, &ctx, false, dst_bo->resource,
+ dst_bo->ttm, dst_rsgt, allow_accel,
+ &deps);
+
+ i915_deps_fini(&deps);
+ i915_refct_sgt_put(dst_rsgt);
+ if (IS_ERR_OR_NULL(copy_fence))
+ return PTR_ERR_OR_ZERO(copy_fence);
+
+ dma_resv_add_excl_fence(dst_bo->base.resv, copy_fence);
+ dma_resv_add_shared_fence(src_bo->base.resv, copy_fence);
+
+ dma_fence_put(copy_fence);
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_ttm_move.h b/drivers/gpu/drm/i915/gem/i915_gem_ttm_move.h
new file mode 100644
index 000000000000..d2e7f149e05c
--- /dev/null
+++ b/drivers/gpu/drm/i915/gem/i915_gem_ttm_move.h
@@ -0,0 +1,41 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2021 Intel Corporation
+ */
+#ifndef _I915_GEM_TTM_MOVE_H_
+#define _I915_GEM_TTM_MOVE_H_
+
+#include <linux/types.h>
+
+#include "i915_selftest.h"
+
+struct ttm_buffer_object;
+struct ttm_operation_ctx;
+struct ttm_place;
+struct ttm_resource;
+struct ttm_tt;
+
+struct drm_i915_gem_object;
+struct i915_refct_sgt;
+
+int i915_ttm_move_notify(struct ttm_buffer_object *bo);
+
+I915_SELFTEST_DECLARE(void i915_ttm_migrate_set_failure_modes(bool gpu_migration,
+ bool work_allocation));
+
+int i915_gem_obj_copy_ttm(struct drm_i915_gem_object *dst,
+ struct drm_i915_gem_object *src,
+ bool allow_accel, bool intr);
+
+/* Internal I915 TTM declarations and definitions below. */
+
+int i915_ttm_move(struct ttm_buffer_object *bo, bool evict,
+ struct ttm_operation_ctx *ctx,
+ struct ttm_resource *dst_mem,
+ struct ttm_place *hop);
+
+void i915_ttm_adjust_domains_after_move(struct drm_i915_gem_object *obj);
+
+void i915_ttm_adjust_gem_after_move(struct drm_i915_gem_object *obj);
+
+#endif
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_ttm_pm.c b/drivers/gpu/drm/i915/gem/i915_gem_ttm_pm.c
index 3b6d14b5c604..9aad84059d56 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_ttm_pm.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_ttm_pm.c
@@ -12,6 +12,7 @@
#include "gem/i915_gem_region.h"
#include "gem/i915_gem_ttm.h"
+#include "gem/i915_gem_ttm_move.h"
#include "gem/i915_gem_ttm_pm.h"
/**
@@ -79,6 +80,7 @@ static int i915_ttm_backup(struct i915_gem_apply_to_region *apply,
err = i915_gem_obj_copy_ttm(backup, obj, pm_apply->allow_gpu, false);
GEM_WARN_ON(err);
+ ttm_bo_wait_ctx(backup_bo, &ctx);
obj->ttm.backup = backup;
return 0;
@@ -169,6 +171,7 @@ static int i915_ttm_restore(struct i915_gem_apply_to_region *apply,
err = i915_gem_obj_copy_ttm(obj, backup, pm_apply->allow_gpu,
false);
GEM_WARN_ON(err);
+ ttm_bo_wait_ctx(backup_bo, &ctx);
obj->ttm.backup = NULL;
err = 0;
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c
index 3173c9f9a040..3cc01c30dd62 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c
@@ -529,7 +529,7 @@ i915_gem_userptr_ioctl(struct drm_device *dev,
* On almost all of the older hw, we cannot tell the GPU that
* a page is readonly.
*/
- if (!dev_priv->gt.vm->has_read_only)
+ if (!to_gt(dev_priv)->vm->has_read_only)
return -ENODEV;
}
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_wait.c b/drivers/gpu/drm/i915/gem/i915_gem_wait.c
index f11325484110..dab3d30c09a0 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_wait.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_wait.c
@@ -10,7 +10,6 @@
#include "gt/intel_engine.h"
-#include "dma_resv_utils.h"
#include "i915_gem_ioctls.h"
#include "i915_gem_object.h"
@@ -52,13 +51,6 @@ i915_gem_object_wait_reservation(struct dma_resv *resv,
}
dma_resv_iter_end(&cursor);
- /*
- * Opportunistically prune the fences iff we know they have *all* been
- * signaled.
- */
- if (timeout > 0)
- dma_resv_prune(resv);
-
return ret;
}
@@ -262,6 +254,6 @@ int i915_gem_object_wait_migration(struct drm_i915_gem_object *obj,
unsigned int flags)
{
might_sleep();
- /* NOP for now. */
- return 0;
+
+ return i915_gem_object_wait_moving_fence(obj, !!(flags & I915_WAIT_INTERRUPTIBLE));
}
diff --git a/drivers/gpu/drm/i915/gem/i915_gemfs.c b/drivers/gpu/drm/i915/gem/i915_gemfs.c
index dbdbdc344d87..7271fbf813fa 100644
--- a/drivers/gpu/drm/i915/gem/i915_gemfs.c
+++ b/drivers/gpu/drm/i915/gem/i915_gemfs.c
@@ -12,6 +12,7 @@
int i915_gemfs_init(struct drm_i915_private *i915)
{
+ char huge_opt[] = "huge=within_size"; /* r/w */
struct file_system_type *type;
struct vfsmount *gemfs;
char *opts;
@@ -31,10 +32,8 @@ int i915_gemfs_init(struct drm_i915_private *i915)
*/
opts = NULL;
- if (intel_vtd_active()) {
+ if (intel_vtd_active(i915)) {
if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)) {
- static char huge_opt[] = "huge=within_size"; /* r/w */
-
opts = huge_opt;
drm_info(&i915->drm,
"Transparent Hugepage mode '%s'\n",
diff --git a/drivers/gpu/drm/i915/gem/selftests/huge_pages.c b/drivers/gpu/drm/i915/gem/selftests/huge_pages.c
index b2003133deaf..11f0aa65f8a3 100644
--- a/drivers/gpu/drm/i915/gem/selftests/huge_pages.c
+++ b/drivers/gpu/drm/i915/gem/selftests/huge_pages.c
@@ -22,6 +22,22 @@
#include "selftests/mock_region.h"
#include "selftests/i915_random.h"
+static struct i915_gem_context *hugepage_ctx(struct drm_i915_private *i915,
+ struct file *file)
+{
+ struct i915_gem_context *ctx = live_context(i915, file);
+ struct i915_address_space *vm;
+
+ if (IS_ERR(ctx))
+ return ctx;
+
+ vm = ctx->vm;
+ if (vm)
+ WRITE_ONCE(vm->scrub_64K, true);
+
+ return ctx;
+}
+
static const unsigned int page_sizes[] = {
I915_GTT_PAGE_SIZE_2M,
I915_GTT_PAGE_SIZE_64K,
@@ -552,7 +568,7 @@ out_unpin:
out_put:
i915_gem_object_put(obj);
out_region:
- intel_memory_region_put(mem);
+ intel_memory_region_destroy(mem);
return err;
}
@@ -959,6 +975,8 @@ static int igt_mock_ppgtt_64K(void *arg)
__i915_gem_object_put_pages(obj);
i915_gem_object_unlock(obj);
i915_gem_object_put(obj);
+
+ i915_gem_drain_freed_objects(i915);
}
}
@@ -1080,10 +1098,6 @@ static int __igt_write_huge(struct intel_context *ce,
if (IS_ERR(vma))
return PTR_ERR(vma);
- err = i915_vma_unbind(vma);
- if (err)
- return err;
-
err = i915_vma_pin(vma, size, 0, flags | offset);
if (err) {
/*
@@ -1117,7 +1131,7 @@ out_vma_unpin:
return err;
}
-static int igt_write_huge(struct i915_gem_context *ctx,
+static int igt_write_huge(struct drm_i915_private *i915,
struct drm_i915_gem_object *obj)
{
struct i915_gem_engines *engines;
@@ -1127,6 +1141,8 @@ static int igt_write_huge(struct i915_gem_context *ctx,
IGT_TIMEOUT(end_time);
unsigned int max_page_size;
unsigned int count;
+ struct i915_gem_context *ctx;
+ struct file *file;
u64 max;
u64 num;
u64 size;
@@ -1134,6 +1150,16 @@ static int igt_write_huge(struct i915_gem_context *ctx,
int i, n;
int err = 0;
+ file = mock_file(i915);
+ if (IS_ERR(file))
+ return PTR_ERR(file);
+
+ ctx = hugepage_ctx(i915, file);
+ if (IS_ERR(ctx)) {
+ err = PTR_ERR(ctx);
+ goto out;
+ }
+
GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
size = obj->base.size;
@@ -1153,7 +1179,7 @@ static int igt_write_huge(struct i915_gem_context *ctx,
}
i915_gem_context_unlock_engines(ctx);
if (!n)
- return 0;
+ goto out;
/*
* To keep things interesting when alternating between engines in our
@@ -1215,6 +1241,8 @@ static int igt_write_huge(struct i915_gem_context *ctx,
kfree(order);
+out:
+ fput(file);
return err;
}
@@ -1277,8 +1305,7 @@ static u32 igt_random_size(struct rnd_state *prng,
static int igt_ppgtt_smoke_huge(void *arg)
{
- struct i915_gem_context *ctx = arg;
- struct drm_i915_private *i915 = ctx->i915;
+ struct drm_i915_private *i915 = arg;
struct drm_i915_gem_object *obj;
I915_RND_STATE(prng);
struct {
@@ -1302,6 +1329,7 @@ static int igt_ppgtt_smoke_huge(void *arg)
u32 min = backends[i].min;
u32 max = backends[i].max;
u32 size = max;
+
try_again:
size = igt_random_size(&prng, min, rounddown_pow_of_two(size));
@@ -1336,7 +1364,7 @@ try_again:
goto out_unpin;
}
- err = igt_write_huge(ctx, obj);
+ err = igt_write_huge(i915, obj);
if (err) {
pr_err("%s write-huge failed with size=%u, i=%d\n",
__func__, size, i);
@@ -1363,8 +1391,7 @@ out_put:
static int igt_ppgtt_sanity_check(void *arg)
{
- struct i915_gem_context *ctx = arg;
- struct drm_i915_private *i915 = ctx->i915;
+ struct drm_i915_private *i915 = arg;
unsigned int supported = INTEL_INFO(i915)->page_sizes;
struct {
igt_create_fn fn;
@@ -1431,7 +1458,7 @@ static int igt_ppgtt_sanity_check(void *arg)
if (pages)
obj->mm.page_sizes.sg = pages;
- err = igt_write_huge(ctx, obj);
+ err = igt_write_huge(i915, obj);
i915_gem_object_lock(obj, NULL);
i915_gem_object_unpin_pages(obj);
@@ -1458,15 +1485,27 @@ out:
static int igt_tmpfs_fallback(void *arg)
{
- struct i915_gem_context *ctx = arg;
- struct drm_i915_private *i915 = ctx->i915;
+ struct drm_i915_private *i915 = arg;
+ struct i915_address_space *vm;
+ struct i915_gem_context *ctx;
struct vfsmount *gemfs = i915->mm.gemfs;
- struct i915_address_space *vm = i915_gem_context_get_eb_vm(ctx);
struct drm_i915_gem_object *obj;
struct i915_vma *vma;
+ struct file *file;
u32 *vaddr;
int err = 0;
+ file = mock_file(i915);
+ if (IS_ERR(file))
+ return PTR_ERR(file);
+
+ ctx = hugepage_ctx(i915, file);
+ if (IS_ERR(ctx)) {
+ err = PTR_ERR(ctx);
+ goto out;
+ }
+ vm = i915_gem_context_get_eb_vm(ctx);
+
/*
* Make sure that we don't burst into a ball of flames upon falling back
* to tmpfs, which we rely on if on the off-chance we encouter a failure
@@ -1510,33 +1549,47 @@ out_restore:
i915->mm.gemfs = gemfs;
i915_vm_put(vm);
+out:
+ fput(file);
return err;
}
static int igt_shrink_thp(void *arg)
{
- struct i915_gem_context *ctx = arg;
- struct drm_i915_private *i915 = ctx->i915;
- struct i915_address_space *vm = i915_gem_context_get_eb_vm(ctx);
+ struct drm_i915_private *i915 = arg;
+ struct i915_address_space *vm;
+ struct i915_gem_context *ctx;
struct drm_i915_gem_object *obj;
struct i915_gem_engines_iter it;
struct intel_context *ce;
struct i915_vma *vma;
+ struct file *file;
unsigned int flags = PIN_USER;
unsigned int n;
bool should_swap;
- int err = 0;
+ int err;
+
+ if (!igt_can_allocate_thp(i915)) {
+ pr_info("missing THP support, skipping\n");
+ return 0;
+ }
+
+ file = mock_file(i915);
+ if (IS_ERR(file))
+ return PTR_ERR(file);
+
+ ctx = hugepage_ctx(i915, file);
+ if (IS_ERR(ctx)) {
+ err = PTR_ERR(ctx);
+ goto out;
+ }
+ vm = i915_gem_context_get_eb_vm(ctx);
/*
* Sanity check shrinking huge-paged object -- make sure nothing blows
* up.
*/
- if (!igt_can_allocate_thp(i915)) {
- pr_info("missing THP support, skipping\n");
- goto out_vm;
- }
-
obj = i915_gem_object_create_shmem(i915, SZ_2M);
if (IS_ERR(obj)) {
err = PTR_ERR(obj);
@@ -1626,7 +1679,8 @@ out_put:
i915_gem_object_put(obj);
out_vm:
i915_vm_put(vm);
-
+out:
+ fput(file);
return err;
}
@@ -1651,7 +1705,7 @@ int i915_gem_huge_page_mock_selftests(void)
mkwrite_device_info(dev_priv)->ppgtt_type = INTEL_PPGTT_FULL;
mkwrite_device_info(dev_priv)->ppgtt_size = 48;
- ppgtt = i915_ppgtt_create(&dev_priv->gt, 0);
+ ppgtt = i915_ppgtt_create(to_gt(dev_priv), 0);
if (IS_ERR(ppgtt)) {
err = PTR_ERR(ppgtt);
goto out_unlock;
@@ -1687,36 +1741,14 @@ int i915_gem_huge_page_live_selftests(struct drm_i915_private *i915)
SUBTEST(igt_ppgtt_smoke_huge),
SUBTEST(igt_ppgtt_sanity_check),
};
- struct i915_gem_context *ctx;
- struct i915_address_space *vm;
- struct file *file;
- int err;
if (!HAS_PPGTT(i915)) {
pr_info("PPGTT not supported, skipping live-selftests\n");
return 0;
}
- if (intel_gt_is_wedged(&i915->gt))
+ if (intel_gt_is_wedged(to_gt(i915)))
return 0;
- file = mock_file(i915);
- if (IS_ERR(file))
- return PTR_ERR(file);
-
- ctx = live_context(i915, file);
- if (IS_ERR(ctx)) {
- err = PTR_ERR(ctx);
- goto out_file;
- }
-
- vm = ctx->vm;
- if (vm)
- WRITE_ONCE(vm->scrub_64K, true);
-
- err = i915_subtests(tests, ctx);
-
-out_file:
- fput(file);
- return err;
+ return i915_live_subtests(tests, i915);
}
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c
index 8402ed925a69..75947e9dada2 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c
@@ -592,7 +592,7 @@ int i915_gem_client_blt_live_selftests(struct drm_i915_private *i915)
SUBTEST(igt_client_tiled_blits),
};
- if (intel_gt_is_wedged(&i915->gt))
+ if (intel_gt_is_wedged(to_gt(i915)))
return 0;
return i915_live_subtests(tests, i915);
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
index b32f7fed2d9c..3f41fe5ec9d4 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
@@ -88,9 +88,9 @@ static int live_nop_switch(void *arg)
rq = i915_request_get(this);
i915_request_add(this);
}
- if (i915_request_wait(rq, 0, HZ) < 0) {
+ if (i915_request_wait(rq, 0, 10 * HZ) < 0) {
pr_err("Failed to populated %d contexts\n", nctx);
- intel_gt_set_wedged(&i915->gt);
+ intel_gt_set_wedged(to_gt(i915));
i915_request_put(rq);
err = -EIO;
goto out_file;
@@ -146,7 +146,7 @@ static int live_nop_switch(void *arg)
if (i915_request_wait(rq, 0, HZ / 5) < 0) {
pr_err("Switching between %ld contexts timed out\n",
prime);
- intel_gt_set_wedged(&i915->gt);
+ intel_gt_set_wedged(to_gt(i915));
i915_request_put(rq);
break;
}
@@ -1223,7 +1223,7 @@ __igt_ctx_sseu(struct drm_i915_private *i915,
return 0;
if (flags & TEST_RESET)
- igt_global_reset_lock(&i915->gt);
+ igt_global_reset_lock(to_gt(i915));
obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
if (IS_ERR(obj)) {
@@ -1306,7 +1306,7 @@ out_put:
out_unlock:
if (flags & TEST_RESET)
- igt_global_reset_unlock(&i915->gt);
+ igt_global_reset_unlock(to_gt(i915));
if (ret)
pr_err("%s: Failed with %d!\n", name, ret);
@@ -1481,10 +1481,10 @@ static int check_scratch(struct i915_address_space *vm, u64 offset)
static int write_to_scratch(struct i915_gem_context *ctx,
struct intel_engine_cs *engine,
+ struct drm_i915_gem_object *obj,
u64 offset, u32 value)
{
struct drm_i915_private *i915 = ctx->i915;
- struct drm_i915_gem_object *obj;
struct i915_address_space *vm;
struct i915_request *rq;
struct i915_vma *vma;
@@ -1497,15 +1497,9 @@ static int write_to_scratch(struct i915_gem_context *ctx,
if (err)
return err;
- obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
- if (IS_ERR(obj))
- return PTR_ERR(obj);
-
cmd = i915_gem_object_pin_map_unlocked(obj, I915_MAP_WB);
- if (IS_ERR(cmd)) {
- err = PTR_ERR(cmd);
- goto out;
- }
+ if (IS_ERR(cmd))
+ return PTR_ERR(cmd);
*cmd++ = MI_STORE_DWORD_IMM_GEN4;
if (GRAPHICS_VER(i915) >= 8) {
@@ -1569,17 +1563,19 @@ err_unpin:
i915_vma_unpin(vma);
out_vm:
i915_vm_put(vm);
-out:
- i915_gem_object_put(obj);
+
+ if (!err)
+ err = i915_gem_object_wait(obj, 0, MAX_SCHEDULE_TIMEOUT);
+
return err;
}
static int read_from_scratch(struct i915_gem_context *ctx,
struct intel_engine_cs *engine,
+ struct drm_i915_gem_object *obj,
u64 offset, u32 *value)
{
struct drm_i915_private *i915 = ctx->i915;
- struct drm_i915_gem_object *obj;
struct i915_address_space *vm;
const u32 result = 0x100;
struct i915_request *rq;
@@ -1594,10 +1590,6 @@ static int read_from_scratch(struct i915_gem_context *ctx,
if (err)
return err;
- obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
- if (IS_ERR(obj))
- return PTR_ERR(obj);
-
if (GRAPHICS_VER(i915) >= 8) {
const u32 GPR0 = engine->mmio_base + 0x600;
@@ -1615,7 +1607,7 @@ static int read_from_scratch(struct i915_gem_context *ctx,
cmd = i915_gem_object_pin_map_unlocked(obj, I915_MAP_WB);
if (IS_ERR(cmd)) {
err = PTR_ERR(cmd);
- goto out;
+ goto err_unpin;
}
memset(cmd, POISON_INUSE, PAGE_SIZE);
@@ -1651,7 +1643,7 @@ static int read_from_scratch(struct i915_gem_context *ctx,
cmd = i915_gem_object_pin_map_unlocked(obj, I915_MAP_WB);
if (IS_ERR(cmd)) {
err = PTR_ERR(cmd);
- goto out;
+ goto err_unpin;
}
memset(cmd, POISON_INUSE, PAGE_SIZE);
@@ -1722,8 +1714,10 @@ err_unpin:
i915_vma_unpin(vma);
out_vm:
i915_vm_put(vm);
-out:
- i915_gem_object_put(obj);
+
+ if (!err)
+ err = i915_gem_object_wait(obj, 0, MAX_SCHEDULE_TIMEOUT);
+
return err;
}
@@ -1757,6 +1751,7 @@ static int igt_vm_isolation(void *arg)
{
struct drm_i915_private *i915 = arg;
struct i915_gem_context *ctx_a, *ctx_b;
+ struct drm_i915_gem_object *obj_a, *obj_b;
unsigned long num_engines, count;
struct intel_engine_cs *engine;
struct igt_live_test t;
@@ -1810,6 +1805,18 @@ static int igt_vm_isolation(void *arg)
vm_total = ctx_a->vm->total;
GEM_BUG_ON(ctx_b->vm->total != vm_total);
+ obj_a = i915_gem_object_create_internal(i915, PAGE_SIZE);
+ if (IS_ERR(obj_a)) {
+ err = PTR_ERR(obj_a);
+ goto out_file;
+ }
+
+ obj_b = i915_gem_object_create_internal(i915, PAGE_SIZE);
+ if (IS_ERR(obj_b)) {
+ err = PTR_ERR(obj_b);
+ goto put_a;
+ }
+
count = 0;
num_engines = 0;
for_each_uabi_engine(engine, i915) {
@@ -1832,13 +1839,13 @@ static int igt_vm_isolation(void *arg)
I915_GTT_PAGE_SIZE, vm_total,
sizeof(u32), alignof_dword);
- err = write_to_scratch(ctx_a, engine,
+ err = write_to_scratch(ctx_a, engine, obj_a,
offset, 0xdeadbeef);
if (err == 0)
- err = read_from_scratch(ctx_b, engine,
+ err = read_from_scratch(ctx_b, engine, obj_b,
offset, &value);
if (err)
- goto out_file;
+ goto put_b;
if (value != expected) {
pr_err("%s: Read %08x from scratch (offset 0x%08x_%08x), after %lu reads!\n",
@@ -1847,7 +1854,7 @@ static int igt_vm_isolation(void *arg)
lower_32_bits(offset),
this);
err = -EINVAL;
- goto out_file;
+ goto put_b;
}
this++;
@@ -1858,6 +1865,10 @@ static int igt_vm_isolation(void *arg)
pr_info("Checked %lu scratch offsets across %lu engines\n",
count, num_engines);
+put_b:
+ i915_gem_object_put(obj_b);
+put_a:
+ i915_gem_object_put(obj_a);
out_file:
if (igt_live_test_end(&t))
err = -EIO;
@@ -1877,7 +1888,7 @@ int i915_gem_context_live_selftests(struct drm_i915_private *i915)
SUBTEST(igt_vm_isolation),
};
- if (intel_gt_is_wedged(&i915->gt))
+ if (intel_gt_is_wedged(to_gt(i915)))
return 0;
return i915_live_subtests(tests, i915);
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c
index 4a6bb64c3a35..3cc74b0fed06 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c
@@ -102,7 +102,7 @@ static int igt_dmabuf_import_same_driver_lmem(void *arg)
obj = __i915_gem_object_create_user(i915, PAGE_SIZE, &lmem, 1);
if (IS_ERR(obj)) {
pr_err("__i915_gem_object_create_user failed with err=%ld\n",
- PTR_ERR(dmabuf));
+ PTR_ERR(obj));
err = PTR_ERR(obj);
goto out_ret;
}
@@ -158,7 +158,7 @@ static int igt_dmabuf_import_same_driver(struct drm_i915_private *i915,
regions, num_regions);
if (IS_ERR(obj)) {
pr_err("__i915_gem_object_create_user failed with err=%ld\n",
- PTR_ERR(dmabuf));
+ PTR_ERR(obj));
err = PTR_ERR(obj);
goto out_ret;
}
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_migrate.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_migrate.c
index 28a700f08b49..ecb691c81d1e 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_migrate.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_migrate.c
@@ -4,6 +4,7 @@
*/
#include "gt/intel_migrate.h"
+#include "gem/i915_gem_ttm_move.h"
static int igt_fill_check_buffer(struct drm_i915_gem_object *obj,
bool fill)
@@ -227,17 +228,38 @@ out_put:
return err;
}
+static int igt_lmem_pages_failsafe_migrate(void *arg)
+{
+ int fail_gpu, fail_alloc, ret;
+
+ for (fail_gpu = 0; fail_gpu < 2; ++fail_gpu) {
+ for (fail_alloc = 0; fail_alloc < 2; ++fail_alloc) {
+ pr_info("Simulated failure modes: gpu: %d, alloc: %d\n",
+ fail_gpu, fail_alloc);
+ i915_ttm_migrate_set_failure_modes(fail_gpu,
+ fail_alloc);
+ ret = igt_lmem_pages_migrate(arg);
+ if (ret)
+ goto out_err;
+ }
+ }
+
+out_err:
+ i915_ttm_migrate_set_failure_modes(false, false);
+ return ret;
+}
+
int i915_gem_migrate_live_selftests(struct drm_i915_private *i915)
{
static const struct i915_subtest tests[] = {
SUBTEST(igt_smem_create_migrate),
SUBTEST(igt_lmem_create_migrate),
SUBTEST(igt_same_create_migrate),
- SUBTEST(igt_lmem_pages_migrate),
+ SUBTEST(igt_lmem_pages_failsafe_migrate),
};
if (!HAS_LMEM(i915))
return 0;
- return intel_gt_live_subtests(tests, &i915->gt);
+ return intel_gt_live_subtests(tests, to_gt(i915));
}
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
index 6d30cdfa80f3..743e6ab2c40b 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
@@ -84,6 +84,7 @@ static int check_partial_mapping(struct drm_i915_gem_object *obj,
struct rnd_state *prng)
{
const unsigned long npages = obj->base.size / PAGE_SIZE;
+ struct drm_i915_private *i915 = to_i915(obj->base.dev);
struct i915_ggtt_view view;
struct i915_vma *vma;
unsigned long page;
@@ -141,7 +142,7 @@ static int check_partial_mapping(struct drm_i915_gem_object *obj,
if (offset >= obj->base.size)
goto out;
- intel_gt_flush_ggtt_writes(&to_i915(obj->base.dev)->gt);
+ intel_gt_flush_ggtt_writes(to_gt(i915));
p = i915_gem_object_get_page(obj, offset >> PAGE_SHIFT);
cpu = kmap(p) + offset_in_page(offset);
@@ -175,6 +176,7 @@ static int check_partial_mappings(struct drm_i915_gem_object *obj,
{
const unsigned int nreal = obj->scratch / PAGE_SIZE;
const unsigned long npages = obj->base.size / PAGE_SIZE;
+ struct drm_i915_private *i915 = to_i915(obj->base.dev);
struct i915_vma *vma;
unsigned long page;
int err;
@@ -234,7 +236,7 @@ static int check_partial_mappings(struct drm_i915_gem_object *obj,
if (offset >= obj->base.size)
continue;
- intel_gt_flush_ggtt_writes(&to_i915(obj->base.dev)->gt);
+ intel_gt_flush_ggtt_writes(to_gt(i915));
p = i915_gem_object_get_page(obj, offset >> PAGE_SHIFT);
cpu = kmap(p) + offset_in_page(offset);
@@ -616,14 +618,14 @@ static bool assert_mmap_offset(struct drm_i915_private *i915,
static void disable_retire_worker(struct drm_i915_private *i915)
{
i915_gem_driver_unregister__shrinker(i915);
- intel_gt_pm_get(&i915->gt);
- cancel_delayed_work_sync(&i915->gt.requests.retire_work);
+ intel_gt_pm_get(to_gt(i915));
+ cancel_delayed_work_sync(&to_gt(i915)->requests.retire_work);
}
static void restore_retire_worker(struct drm_i915_private *i915)
{
igt_flush_test(i915);
- intel_gt_pm_put(&i915->gt);
+ intel_gt_pm_put(to_gt(i915));
i915_gem_driver_register__shrinker(i915);
}
@@ -651,8 +653,8 @@ static int igt_mmap_offset_exhaustion(void *arg)
/* Disable background reaper */
disable_retire_worker(i915);
- GEM_BUG_ON(!i915->gt.awake);
- intel_gt_retire_requests(&i915->gt);
+ GEM_BUG_ON(!to_gt(i915)->awake);
+ intel_gt_retire_requests(to_gt(i915));
i915_gem_drain_freed_objects(i915);
/* Trim the device mmap space to only a page */
@@ -728,7 +730,7 @@ static int igt_mmap_offset_exhaustion(void *arg)
/* Now fill with busy dead objects that we expect to reap */
for (loop = 0; loop < 3; loop++) {
- if (intel_gt_is_wedged(&i915->gt))
+ if (intel_gt_is_wedged(to_gt(i915)))
break;
obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
@@ -942,7 +944,7 @@ static int __igt_mmap(struct drm_i915_private *i915,
}
if (type == I915_MMAP_TYPE_GTT)
- intel_gt_flush_ggtt_writes(&i915->gt);
+ intel_gt_flush_ggtt_writes(to_gt(i915));
err = wc_check(obj);
if (err == -ENXIO)
@@ -1049,7 +1051,7 @@ static int __igt_mmap_access(struct drm_i915_private *i915,
goto out_unmap;
}
- intel_gt_flush_ggtt_writes(&i915->gt);
+ intel_gt_flush_ggtt_writes(to_gt(i915));
err = access_process_vm(current, addr, &x, sizeof(x), 0);
if (err != sizeof(x)) {
@@ -1065,7 +1067,7 @@ static int __igt_mmap_access(struct drm_i915_private *i915,
goto out_unmap;
}
- intel_gt_flush_ggtt_writes(&i915->gt);
+ intel_gt_flush_ggtt_writes(to_gt(i915));
err = __get_user(y, ptr);
if (err) {
@@ -1165,7 +1167,7 @@ static int __igt_mmap_gpu(struct drm_i915_private *i915,
}
if (type == I915_MMAP_TYPE_GTT)
- intel_gt_flush_ggtt_writes(&i915->gt);
+ intel_gt_flush_ggtt_writes(to_gt(i915));
for_each_uabi_engine(engine, i915) {
struct i915_request *rq;
diff --git a/drivers/gpu/drm/i915/gt/gen6_ppgtt.c b/drivers/gpu/drm/i915/gt/gen6_ppgtt.c
index 890191f286e3..6e9292918bfc 100644
--- a/drivers/gpu/drm/i915/gt/gen6_ppgtt.c
+++ b/drivers/gpu/drm/i915/gt/gen6_ppgtt.c
@@ -185,7 +185,6 @@ static void gen6_alloc_va_range(struct i915_address_space *vm,
pt = stash->pt[0];
__i915_gem_object_pin_pages(pt->base);
- i915_gem_object_make_unshrinkable(pt->base);
fill32_px(pt, vm->scratch[0]->encode);
@@ -262,30 +261,14 @@ static void gen6_ppgtt_cleanup(struct i915_address_space *vm)
{
struct gen6_ppgtt *ppgtt = to_gen6_ppgtt(i915_vm_to_ppgtt(vm));
- __i915_vma_put(ppgtt->vma);
-
gen6_ppgtt_free_pd(ppgtt);
free_scratch(vm);
mutex_destroy(&ppgtt->flush);
- mutex_destroy(&ppgtt->pin_mutex);
free_pd(&ppgtt->base.vm, ppgtt->base.pd);
}
-static int pd_vma_set_pages(struct i915_vma *vma)
-{
- vma->pages = ERR_PTR(-ENODEV);
- return 0;
-}
-
-static void pd_vma_clear_pages(struct i915_vma *vma)
-{
- GEM_BUG_ON(!vma->pages);
-
- vma->pages = NULL;
-}
-
static void pd_vma_bind(struct i915_address_space *vm,
struct i915_vm_pt_stash *stash,
struct i915_vma *vma,
@@ -325,43 +308,10 @@ static void pd_vma_unbind(struct i915_address_space *vm, struct i915_vma *vma)
}
static const struct i915_vma_ops pd_vma_ops = {
- .set_pages = pd_vma_set_pages,
- .clear_pages = pd_vma_clear_pages,
.bind_vma = pd_vma_bind,
.unbind_vma = pd_vma_unbind,
};
-static struct i915_vma *pd_vma_create(struct gen6_ppgtt *ppgtt, int size)
-{
- struct i915_ggtt *ggtt = ppgtt->base.vm.gt->ggtt;
- struct i915_vma *vma;
-
- GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE));
- GEM_BUG_ON(size > ggtt->vm.total);
-
- vma = i915_vma_alloc();
- if (!vma)
- return ERR_PTR(-ENOMEM);
-
- i915_active_init(&vma->active, NULL, NULL, 0);
-
- kref_init(&vma->ref);
- mutex_init(&vma->pages_mutex);
- vma->vm = i915_vm_get(&ggtt->vm);
- vma->ops = &pd_vma_ops;
- vma->private = ppgtt;
-
- vma->size = size;
- vma->fence_size = size;
- atomic_set(&vma->flags, I915_VMA_GGTT);
- vma->ggtt_view.type = I915_GGTT_VIEW_ROTATED; /* prevent fencing */
-
- INIT_LIST_HEAD(&vma->obj_link);
- INIT_LIST_HEAD(&vma->closed_link);
-
- return vma;
-}
-
int gen6_ppgtt_pin(struct i915_ppgtt *base, struct i915_gem_ww_ctx *ww)
{
struct gen6_ppgtt *ppgtt = to_gen6_ppgtt(base);
@@ -378,42 +328,92 @@ int gen6_ppgtt_pin(struct i915_ppgtt *base, struct i915_gem_ww_ctx *ww)
if (atomic_add_unless(&ppgtt->pin_count, 1, 0))
return 0;
- if (mutex_lock_interruptible(&ppgtt->pin_mutex))
- return -EINTR;
+ /* grab the ppgtt resv to pin the object */
+ err = i915_vm_lock_objects(&ppgtt->base.vm, ww);
+ if (err)
+ return err;
/*
* PPGTT PDEs reside in the GGTT and consists of 512 entries. The
* allocator works in address space sizes, so it's multiplied by page
* size. We allocate at the top of the GTT to avoid fragmentation.
*/
- err = 0;
- if (!atomic_read(&ppgtt->pin_count))
+ if (!atomic_read(&ppgtt->pin_count)) {
err = i915_ggtt_pin(ppgtt->vma, ww, GEN6_PD_ALIGN, PIN_HIGH);
+
+ GEM_BUG_ON(ppgtt->vma->fence);
+ clear_bit(I915_VMA_CAN_FENCE_BIT, __i915_vma_flags(ppgtt->vma));
+ }
if (!err)
atomic_inc(&ppgtt->pin_count);
- mutex_unlock(&ppgtt->pin_mutex);
return err;
}
-void gen6_ppgtt_unpin(struct i915_ppgtt *base)
+static int pd_dummy_obj_get_pages(struct drm_i915_gem_object *obj)
{
- struct gen6_ppgtt *ppgtt = to_gen6_ppgtt(base);
+ obj->mm.pages = ZERO_SIZE_PTR;
+ return 0;
+}
- GEM_BUG_ON(!atomic_read(&ppgtt->pin_count));
- if (atomic_dec_and_test(&ppgtt->pin_count))
- i915_vma_unpin(ppgtt->vma);
+static void pd_dummy_obj_put_pages(struct drm_i915_gem_object *obj,
+ struct sg_table *pages)
+{
}
-void gen6_ppgtt_unpin_all(struct i915_ppgtt *base)
+static const struct drm_i915_gem_object_ops pd_dummy_obj_ops = {
+ .name = "pd_dummy_obj",
+ .get_pages = pd_dummy_obj_get_pages,
+ .put_pages = pd_dummy_obj_put_pages,
+};
+
+static struct i915_page_directory *
+gen6_alloc_top_pd(struct gen6_ppgtt *ppgtt)
{
- struct gen6_ppgtt *ppgtt = to_gen6_ppgtt(base);
+ struct i915_ggtt * const ggtt = ppgtt->base.vm.gt->ggtt;
+ struct i915_page_directory *pd;
+ int err;
- if (!atomic_read(&ppgtt->pin_count))
- return;
+ pd = __alloc_pd(I915_PDES);
+ if (unlikely(!pd))
+ return ERR_PTR(-ENOMEM);
- i915_vma_unpin(ppgtt->vma);
- atomic_set(&ppgtt->pin_count, 0);
+ pd->pt.base = __i915_gem_object_create_internal(ppgtt->base.vm.gt->i915,
+ &pd_dummy_obj_ops,
+ I915_PDES * SZ_4K);
+ if (IS_ERR(pd->pt.base)) {
+ err = PTR_ERR(pd->pt.base);
+ pd->pt.base = NULL;
+ goto err_pd;
+ }
+
+ pd->pt.base->base.resv = i915_vm_resv_get(&ppgtt->base.vm);
+ pd->pt.base->shares_resv_from = &ppgtt->base.vm;
+
+ ppgtt->vma = i915_vma_instance(pd->pt.base, &ggtt->vm, NULL);
+ if (IS_ERR(ppgtt->vma)) {
+ err = PTR_ERR(ppgtt->vma);
+ ppgtt->vma = NULL;
+ goto err_pd;
+ }
+
+ /* The dummy object we create is special, override ops.. */
+ ppgtt->vma->ops = &pd_vma_ops;
+ ppgtt->vma->private = ppgtt;
+ return pd;
+
+err_pd:
+ free_pd(&ppgtt->base.vm, pd);
+ return ERR_PTR(err);
+}
+
+void gen6_ppgtt_unpin(struct i915_ppgtt *base)
+{
+ struct gen6_ppgtt *ppgtt = to_gen6_ppgtt(base);
+
+ GEM_BUG_ON(!atomic_read(&ppgtt->pin_count));
+ if (atomic_dec_and_test(&ppgtt->pin_count))
+ i915_vma_unpin(ppgtt->vma);
}
struct i915_ppgtt *gen6_ppgtt_create(struct intel_gt *gt)
@@ -427,7 +427,6 @@ struct i915_ppgtt *gen6_ppgtt_create(struct intel_gt *gt)
return ERR_PTR(-ENOMEM);
mutex_init(&ppgtt->flush);
- mutex_init(&ppgtt->pin_mutex);
ppgtt_init(&ppgtt->base, gt, 0);
ppgtt->base.vm.pd_shift = ilog2(SZ_4K * SZ_4K / sizeof(gen6_pte_t));
@@ -440,21 +439,16 @@ struct i915_ppgtt *gen6_ppgtt_create(struct intel_gt *gt)
ppgtt->base.vm.cleanup = gen6_ppgtt_cleanup;
ppgtt->base.vm.alloc_pt_dma = alloc_pt_dma;
+ ppgtt->base.vm.alloc_scratch_dma = alloc_pt_dma;
ppgtt->base.vm.pte_encode = ggtt->vm.pte_encode;
- ppgtt->base.pd = __alloc_pd(I915_PDES);
- if (!ppgtt->base.pd) {
- err = -ENOMEM;
- goto err_free;
- }
-
err = gen6_ppgtt_init_scratch(ppgtt);
if (err)
- goto err_pd;
+ goto err_free;
- ppgtt->vma = pd_vma_create(ppgtt, GEN6_PD_SIZE);
- if (IS_ERR(ppgtt->vma)) {
- err = PTR_ERR(ppgtt->vma);
+ ppgtt->base.pd = gen6_alloc_top_pd(ppgtt);
+ if (IS_ERR(ppgtt->base.pd)) {
+ err = PTR_ERR(ppgtt->base.pd);
goto err_scratch;
}
@@ -462,10 +456,7 @@ struct i915_ppgtt *gen6_ppgtt_create(struct intel_gt *gt)
err_scratch:
free_scratch(&ppgtt->base.vm);
-err_pd:
- free_pd(&ppgtt->base.vm, ppgtt->base.pd);
err_free:
- mutex_destroy(&ppgtt->pin_mutex);
kfree(ppgtt);
return ERR_PTR(err);
}
diff --git a/drivers/gpu/drm/i915/gt/gen6_ppgtt.h b/drivers/gpu/drm/i915/gt/gen6_ppgtt.h
index 6a61a5c3a85a..5e5cf2ec3309 100644
--- a/drivers/gpu/drm/i915/gt/gen6_ppgtt.h
+++ b/drivers/gpu/drm/i915/gt/gen6_ppgtt.h
@@ -19,7 +19,6 @@ struct gen6_ppgtt {
u32 pp_dir;
atomic_t pin_count;
- struct mutex pin_mutex;
bool scan_for_unused_pt;
};
@@ -71,7 +70,6 @@ static inline struct gen6_ppgtt *to_gen6_ppgtt(struct i915_ppgtt *base)
int gen6_ppgtt_pin(struct i915_ppgtt *base, struct i915_gem_ww_ctx *ww);
void gen6_ppgtt_unpin(struct i915_ppgtt *base);
-void gen6_ppgtt_unpin_all(struct i915_ppgtt *base);
void gen6_ppgtt_enable(struct intel_gt *gt);
void gen7_ppgtt_enable(struct intel_gt *gt);
struct i915_ppgtt *gen6_ppgtt_create(struct intel_gt *gt);
diff --git a/drivers/gpu/drm/i915/gt/gen8_engine_cs.c b/drivers/gpu/drm/i915/gt/gen8_engine_cs.c
index 461844dffd7e..e320610dd0b8 100644
--- a/drivers/gpu/drm/i915/gt/gen8_engine_cs.c
+++ b/drivers/gpu/drm/i915/gt/gen8_engine_cs.c
@@ -42,7 +42,7 @@ int gen8_emit_flush_rcs(struct i915_request *rq, u32 mode)
vf_flush_wa = true;
/* WaForGAMHang:kbl */
- if (IS_KBL_GT_STEP(rq->engine->i915, 0, STEP_C0))
+ if (IS_KBL_GRAPHICS_STEP(rq->engine->i915, 0, STEP_C0))
dc_flush_wa = true;
}
diff --git a/drivers/gpu/drm/i915/gt/gen8_ppgtt.c b/drivers/gpu/drm/i915/gt/gen8_ppgtt.c
index 037a9a6e4889..b012c50f7ce7 100644
--- a/drivers/gpu/drm/i915/gt/gen8_ppgtt.c
+++ b/drivers/gpu/drm/i915/gt/gen8_ppgtt.c
@@ -18,7 +18,7 @@
static u64 gen8_pde_encode(const dma_addr_t addr,
const enum i915_cache_level level)
{
- u64 pde = addr | _PAGE_PRESENT | _PAGE_RW;
+ u64 pde = addr | GEN8_PAGE_PRESENT | GEN8_PAGE_RW;
if (level != I915_CACHE_NONE)
pde |= PPAT_CACHED_PDE;
@@ -32,10 +32,10 @@ static u64 gen8_pte_encode(dma_addr_t addr,
enum i915_cache_level level,
u32 flags)
{
- gen8_pte_t pte = addr | _PAGE_PRESENT | _PAGE_RW;
+ gen8_pte_t pte = addr | GEN8_PAGE_PRESENT | GEN8_PAGE_RW;
if (unlikely(flags & PTE_READ_ONLY))
- pte &= ~_PAGE_RW;
+ pte &= ~GEN8_PAGE_RW;
if (flags & PTE_LM)
pte |= GEN12_PPGTT_PTE_LM;
@@ -301,7 +301,6 @@ static void __gen8_ppgtt_alloc(struct i915_address_space * const vm,
pt = stash->pt[!!lvl];
__i915_gem_object_pin_pages(pt->base);
- i915_gem_object_make_unshrinkable(pt->base);
fill_px(pt, vm->scratch[lvl]->encode);
@@ -652,7 +651,7 @@ static int gen8_init_scratch(struct i915_address_space *vm)
vm->scratch[0]->encode =
gen8_pte_encode(px_dma(vm->scratch[0]),
- I915_CACHE_LLC, pte_flags);
+ I915_CACHE_NONE, pte_flags);
for (i = 1; i <= vm->top; i++) {
struct drm_i915_gem_object *obj;
@@ -668,7 +667,7 @@ static int gen8_init_scratch(struct i915_address_space *vm)
}
fill_px(obj, vm->scratch[i - 1]->encode);
- obj->encode = gen8_pde_encode(px_dma(obj), I915_CACHE_LLC);
+ obj->encode = gen8_pde_encode(px_dma(obj), I915_CACHE_NONE);
vm->scratch[i] = obj;
}
@@ -777,10 +776,29 @@ struct i915_ppgtt *gen8_ppgtt_create(struct intel_gt *gt,
*/
ppgtt->vm.has_read_only = !IS_GRAPHICS_VER(gt->i915, 11, 12);
- if (HAS_LMEM(gt->i915))
+ if (HAS_LMEM(gt->i915)) {
ppgtt->vm.alloc_pt_dma = alloc_pt_lmem;
- else
+
+ /*
+ * On some platforms the hw has dropped support for 4K GTT pages
+ * when dealing with LMEM, and due to the design of 64K GTT
+ * pages in the hw, we can only mark the *entire* page-table as
+ * operating in 64K GTT mode, since the enable bit is still on
+ * the pde, and not the pte. And since we still need to allow
+ * 4K GTT pages for SMEM objects, we can't have a "normal" 4K
+ * page-table with scratch pointing to LMEM, since that's
+ * undefined from the hw pov. The simplest solution is to just
+ * move the 64K scratch page to SMEM on such platforms and call
+ * it a day, since that should work for all configurations.
+ */
+ if (HAS_64K_PAGES(gt->i915))
+ ppgtt->vm.alloc_scratch_dma = alloc_pt_dma;
+ else
+ ppgtt->vm.alloc_scratch_dma = alloc_pt_lmem;
+ } else {
ppgtt->vm.alloc_pt_dma = alloc_pt_dma;
+ ppgtt->vm.alloc_scratch_dma = alloc_pt_dma;
+ }
err = gen8_init_scratch(&ppgtt->vm);
if (err)
diff --git a/drivers/gpu/drm/i915/gt/intel_context.c b/drivers/gpu/drm/i915/gt/intel_context.c
index 5634d14052bc..ba083d800a08 100644
--- a/drivers/gpu/drm/i915/gt/intel_context.c
+++ b/drivers/gpu/drm/i915/gt/intel_context.c
@@ -219,7 +219,7 @@ int __intel_context_do_pin_ww(struct intel_context *ce,
*/
err = i915_gem_object_lock(ce->timeline->hwsp_ggtt->obj, ww);
- if (!err && ce->ring->vma->obj)
+ if (!err)
err = i915_gem_object_lock(ce->ring->vma->obj, ww);
if (!err && ce->state)
err = i915_gem_object_lock(ce->state->obj, ww);
@@ -228,17 +228,17 @@ int __intel_context_do_pin_ww(struct intel_context *ce,
if (err)
return err;
- err = i915_active_acquire(&ce->active);
+ err = ce->ops->pre_pin(ce, ww, &vaddr);
if (err)
goto err_ctx_unpin;
- err = ce->ops->pre_pin(ce, ww, &vaddr);
+ err = i915_active_acquire(&ce->active);
if (err)
- goto err_release;
+ goto err_post_unpin;
err = mutex_lock_interruptible(&ce->pin_mutex);
if (err)
- goto err_post_unpin;
+ goto err_release;
intel_engine_pm_might_get(ce->engine);
@@ -273,11 +273,11 @@ int __intel_context_do_pin_ww(struct intel_context *ce,
err_unlock:
mutex_unlock(&ce->pin_mutex);
+err_release:
+ i915_active_release(&ce->active);
err_post_unpin:
if (!handoff)
ce->ops->post_unpin(ce);
-err_release:
- i915_active_release(&ce->active);
err_ctx_unpin:
intel_context_post_unpin(ce);
@@ -364,7 +364,7 @@ static int __intel_context_active(struct i915_active *active)
return 0;
}
-static int __i915_sw_fence_call
+static int
sw_fence_dummy_notify(struct i915_sw_fence *sf,
enum i915_sw_fence_notify state)
{
diff --git a/drivers/gpu/drm/i915/gt/intel_context.h b/drivers/gpu/drm/i915/gt/intel_context.h
index 246c37d72cd7..d8c74bbf9aae 100644
--- a/drivers/gpu/drm/i915/gt/intel_context.h
+++ b/drivers/gpu/drm/i915/gt/intel_context.h
@@ -211,7 +211,8 @@ static inline void intel_context_enter(struct intel_context *ce)
static inline void intel_context_mark_active(struct intel_context *ce)
{
- lockdep_assert_held(&ce->timeline->mutex);
+ lockdep_assert(lockdep_is_held(&ce->timeline->mutex) ||
+ test_bit(CONTEXT_IS_PARKING, &ce->flags));
++ce->active_count;
}
diff --git a/drivers/gpu/drm/i915/gt/intel_context_types.h b/drivers/gpu/drm/i915/gt/intel_context_types.h
index 9e0177dc5484..30cd81ad8911 100644
--- a/drivers/gpu/drm/i915/gt/intel_context_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_context_types.h
@@ -118,6 +118,7 @@ struct intel_context {
#define CONTEXT_LRCA_DIRTY 9
#define CONTEXT_GUC_INIT 10
#define CONTEXT_PERMA_PIN 11
+#define CONTEXT_IS_PARKING 12
struct {
u64 timeout_us;
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_cs.c b/drivers/gpu/drm/i915/gt/intel_engine_cs.c
index ff6753ccb129..352254e001b4 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_cs.c
+++ b/drivers/gpu/drm/i915/gt/intel_engine_cs.c
@@ -325,6 +325,38 @@ static int intel_engine_setup(struct intel_gt *gt, enum intel_engine_id id,
engine->id = id;
engine->legacy_idx = INVALID_ENGINE;
engine->mask = BIT(id);
+ if (GRAPHICS_VER(gt->i915) >= 11) {
+ static const u32 engine_reset_domains[] = {
+ [RCS0] = GEN11_GRDOM_RENDER,
+ [BCS0] = GEN11_GRDOM_BLT,
+ [VCS0] = GEN11_GRDOM_MEDIA,
+ [VCS1] = GEN11_GRDOM_MEDIA2,
+ [VCS2] = GEN11_GRDOM_MEDIA3,
+ [VCS3] = GEN11_GRDOM_MEDIA4,
+ [VCS4] = GEN11_GRDOM_MEDIA5,
+ [VCS5] = GEN11_GRDOM_MEDIA6,
+ [VCS6] = GEN11_GRDOM_MEDIA7,
+ [VCS7] = GEN11_GRDOM_MEDIA8,
+ [VECS0] = GEN11_GRDOM_VECS,
+ [VECS1] = GEN11_GRDOM_VECS2,
+ [VECS2] = GEN11_GRDOM_VECS3,
+ [VECS3] = GEN11_GRDOM_VECS4,
+ };
+ GEM_BUG_ON(id >= ARRAY_SIZE(engine_reset_domains) ||
+ !engine_reset_domains[id]);
+ engine->reset_domain = engine_reset_domains[id];
+ } else {
+ static const u32 engine_reset_domains[] = {
+ [RCS0] = GEN6_GRDOM_RENDER,
+ [BCS0] = GEN6_GRDOM_BLT,
+ [VCS0] = GEN6_GRDOM_MEDIA,
+ [VCS1] = GEN8_GRDOM_MEDIA2,
+ [VECS0] = GEN6_GRDOM_VECS,
+ };
+ GEM_BUG_ON(id >= ARRAY_SIZE(engine_reset_domains) ||
+ !engine_reset_domains[id]);
+ engine->reset_domain = engine_reset_domains[id];
+ }
engine->i915 = i915;
engine->gt = gt;
engine->uncore = gt->uncore;
@@ -363,7 +395,7 @@ static int intel_engine_setup(struct intel_gt *gt, enum intel_engine_id id,
DRIVER_CAPS(i915)->has_logical_contexts = true;
ewma__engine_latency_init(&engine->latency);
- seqcount_init(&engine->stats.lock);
+ seqcount_init(&engine->stats.execlists.lock);
ATOMIC_INIT_NOTIFIER_HEAD(&engine->context_status_notifier);
@@ -1676,14 +1708,18 @@ static void intel_engine_print_registers(struct intel_engine_cs *engine,
static void print_request_ring(struct drm_printer *m, struct i915_request *rq)
{
+ struct i915_vma_snapshot *vsnap = &rq->batch_snapshot;
void *ring;
int size;
+ if (!i915_vma_snapshot_present(vsnap))
+ vsnap = NULL;
+
drm_printf(m,
"[head %04x, postfix %04x, tail %04x, batch 0x%08x_%08x]:\n",
rq->head, rq->postfix, rq->tail,
- rq->batch ? upper_32_bits(rq->batch->node.start) : ~0u,
- rq->batch ? lower_32_bits(rq->batch->node.start) : ~0u);
+ vsnap ? upper_32_bits(vsnap->gtt_offset) : ~0u,
+ vsnap ? lower_32_bits(vsnap->gtt_offset) : ~0u);
size = rq->tail - rq->head;
if (rq->tail < rq->head)
@@ -1915,22 +1951,6 @@ void intel_engine_dump(struct intel_engine_cs *engine,
intel_engine_print_breadcrumbs(engine, m);
}
-static ktime_t __intel_engine_get_busy_time(struct intel_engine_cs *engine,
- ktime_t *now)
-{
- ktime_t total = engine->stats.total;
-
- /*
- * If the engine is executing something at the moment
- * add it to the total.
- */
- *now = ktime_get();
- if (READ_ONCE(engine->stats.active))
- total = ktime_add(total, ktime_sub(*now, engine->stats.start));
-
- return total;
-}
-
/**
* intel_engine_get_busy_time() - Return current accumulated engine busyness
* @engine: engine to report on
@@ -1940,15 +1960,7 @@ static ktime_t __intel_engine_get_busy_time(struct intel_engine_cs *engine,
*/
ktime_t intel_engine_get_busy_time(struct intel_engine_cs *engine, ktime_t *now)
{
- unsigned int seq;
- ktime_t total;
-
- do {
- seq = read_seqcount_begin(&engine->stats.lock);
- total = __intel_engine_get_busy_time(engine, now);
- } while (read_seqcount_retry(&engine->stats.lock, seq));
-
- return total;
+ return engine->busyness(engine, now);
}
struct intel_context *
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_pm.c b/drivers/gpu/drm/i915/gt/intel_engine_pm.c
index a1334b48dde7..b0a4a2dbe3ee 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_pm.c
+++ b/drivers/gpu/drm/i915/gt/intel_engine_pm.c
@@ -26,7 +26,7 @@ static void dbg_poison_ce(struct intel_context *ce)
int type = i915_coherent_map_type(ce->engine->i915, obj, true);
void *map;
- if (!i915_gem_object_trylock(obj))
+ if (!i915_gem_object_trylock(obj, NULL))
return;
map = i915_gem_object_pin_map(obj, type);
@@ -80,39 +80,6 @@ static int __engine_unpark(struct intel_wakeref *wf)
return 0;
}
-#if IS_ENABLED(CONFIG_LOCKDEP)
-
-static unsigned long __timeline_mark_lock(struct intel_context *ce)
-{
- unsigned long flags;
-
- local_irq_save(flags);
- mutex_acquire(&ce->timeline->mutex.dep_map, 2, 0, _THIS_IP_);
-
- return flags;
-}
-
-static void __timeline_mark_unlock(struct intel_context *ce,
- unsigned long flags)
-{
- mutex_release(&ce->timeline->mutex.dep_map, _THIS_IP_);
- local_irq_restore(flags);
-}
-
-#else
-
-static unsigned long __timeline_mark_lock(struct intel_context *ce)
-{
- return 0;
-}
-
-static void __timeline_mark_unlock(struct intel_context *ce,
- unsigned long flags)
-{
-}
-
-#endif /* !IS_ENABLED(CONFIG_LOCKDEP) */
-
static void duration(struct dma_fence *fence, struct dma_fence_cb *cb)
{
struct i915_request *rq = to_request(fence);
@@ -159,7 +126,6 @@ static bool switch_to_kernel_context(struct intel_engine_cs *engine)
{
struct intel_context *ce = engine->kernel_context;
struct i915_request *rq;
- unsigned long flags;
bool result = true;
/*
@@ -214,7 +180,7 @@ static bool switch_to_kernel_context(struct intel_engine_cs *engine)
* engine->wakeref.count, we may see the request completion and retire
* it causing an underflow of the engine->wakeref.
*/
- flags = __timeline_mark_lock(ce);
+ set_bit(CONTEXT_IS_PARKING, &ce->flags);
GEM_BUG_ON(atomic_read(&ce->timeline->active_count) < 0);
rq = __i915_request_create(ce, GFP_NOWAIT);
@@ -246,7 +212,7 @@ static bool switch_to_kernel_context(struct intel_engine_cs *engine)
result = false;
out_unlock:
- __timeline_mark_unlock(ce, flags);
+ clear_bit(CONTEXT_IS_PARKING, &ce->flags);
return result;
}
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_stats.h b/drivers/gpu/drm/i915/gt/intel_engine_stats.h
index 24fbdd94351a..8e762d683e50 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_stats.h
+++ b/drivers/gpu/drm/i915/gt/intel_engine_stats.h
@@ -15,45 +15,46 @@
static inline void intel_engine_context_in(struct intel_engine_cs *engine)
{
+ struct intel_engine_execlists_stats *stats = &engine->stats.execlists;
unsigned long flags;
- if (engine->stats.active) {
- engine->stats.active++;
+ if (stats->active) {
+ stats->active++;
return;
}
/* The writer is serialised; but the pmu reader may be from hardirq */
local_irq_save(flags);
- write_seqcount_begin(&engine->stats.lock);
+ write_seqcount_begin(&stats->lock);
- engine->stats.start = ktime_get();
- engine->stats.active++;
+ stats->start = ktime_get();
+ stats->active++;
- write_seqcount_end(&engine->stats.lock);
+ write_seqcount_end(&stats->lock);
local_irq_restore(flags);
- GEM_BUG_ON(!engine->stats.active);
+ GEM_BUG_ON(!stats->active);
}
static inline void intel_engine_context_out(struct intel_engine_cs *engine)
{
+ struct intel_engine_execlists_stats *stats = &engine->stats.execlists;
unsigned long flags;
- GEM_BUG_ON(!engine->stats.active);
- if (engine->stats.active > 1) {
- engine->stats.active--;
+ GEM_BUG_ON(!stats->active);
+ if (stats->active > 1) {
+ stats->active--;
return;
}
local_irq_save(flags);
- write_seqcount_begin(&engine->stats.lock);
+ write_seqcount_begin(&stats->lock);
- engine->stats.active--;
- engine->stats.total =
- ktime_add(engine->stats.total,
- ktime_sub(ktime_get(), engine->stats.start));
+ stats->active--;
+ stats->total = ktime_add(stats->total,
+ ktime_sub(ktime_get(), stats->start));
- write_seqcount_end(&engine->stats.lock);
+ write_seqcount_end(&stats->lock);
local_irq_restore(flags);
}
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_types.h b/drivers/gpu/drm/i915/gt/intel_engine_types.h
index e0f773585c29..36365bdbe1ee 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_engine_types.h
@@ -257,6 +257,55 @@ struct intel_engine_execlists {
#define INTEL_ENGINE_CS_MAX_NAME 8
+struct intel_engine_execlists_stats {
+ /**
+ * @active: Number of contexts currently scheduled in.
+ */
+ unsigned int active;
+
+ /**
+ * @lock: Lock protecting the below fields.
+ */
+ seqcount_t lock;
+
+ /**
+ * @total: Total time this engine was busy.
+ *
+ * Accumulated time not counting the most recent block in cases where
+ * engine is currently busy (active > 0).
+ */
+ ktime_t total;
+
+ /**
+ * @start: Timestamp of the last idle to active transition.
+ *
+ * Idle is defined as active == 0, active is active > 0.
+ */
+ ktime_t start;
+};
+
+struct intel_engine_guc_stats {
+ /**
+ * @running: Active state of the engine when busyness was last sampled.
+ */
+ bool running;
+
+ /**
+ * @prev_total: Previous value of total runtime clock cycles.
+ */
+ u32 prev_total;
+
+ /**
+ * @total_gt_clks: Total gt clock cycles this engine was busy.
+ */
+ u64 total_gt_clks;
+
+ /**
+ * @start_gt_clk: GT clock time of last idle to active transition.
+ */
+ u64 start_gt_clk;
+};
+
struct intel_engine_cs {
struct drm_i915_private *i915;
struct intel_gt *gt;
@@ -269,6 +318,7 @@ struct intel_engine_cs {
unsigned int guc_id;
intel_engine_mask_t mask;
+ u32 reset_domain;
/**
* @logical_mask: logical mask of engine, reported to user space via
* query IOCTL and used to communicate with the GuC in logical space.
@@ -439,6 +489,12 @@ struct intel_engine_cs {
void (*add_active_request)(struct i915_request *rq);
void (*remove_active_request)(struct i915_request *rq);
+ /*
+ * Get engine busyness and the time at which the busyness was sampled.
+ */
+ ktime_t (*busyness)(struct intel_engine_cs *engine,
+ ktime_t *now);
+
struct intel_engine_execlists execlists;
/*
@@ -488,30 +544,10 @@ struct intel_engine_cs {
u32 (*get_cmd_length_mask)(u32 cmd_header);
struct {
- /**
- * @active: Number of contexts currently scheduled in.
- */
- unsigned int active;
-
- /**
- * @lock: Lock protecting the below fields.
- */
- seqcount_t lock;
-
- /**
- * @total: Total time this engine was busy.
- *
- * Accumulated time not counting the most recent block in cases
- * where engine is currently busy (active > 0).
- */
- ktime_t total;
-
- /**
- * @start: Timestamp of the last idle to active transition.
- *
- * Idle is defined as active == 0, active is active > 0.
- */
- ktime_t start;
+ union {
+ struct intel_engine_execlists_stats execlists;
+ struct intel_engine_guc_stats guc;
+ };
/**
* @rps: Utilisation at last RPS sampling.
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_user.c b/drivers/gpu/drm/i915/gt/intel_engine_user.c
index 8f8bea08e734..9ce85a845105 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_user.c
+++ b/drivers/gpu/drm/i915/gt/intel_engine_user.c
@@ -116,7 +116,7 @@ static void set_scheduler_caps(struct drm_i915_private *i915)
disabled |= (I915_SCHEDULER_CAP_ENABLED |
I915_SCHEDULER_CAP_PRIORITY);
- if (intel_uc_uses_guc_submission(&i915->gt.uc))
+ if (intel_uc_uses_guc_submission(&to_gt(i915)->uc))
enabled |= I915_SCHEDULER_CAP_STATIC_PRIORITY_MAP;
for (i = 0; i < ARRAY_SIZE(map); i++) {
diff --git a/drivers/gpu/drm/i915/gt/intel_execlists_submission.c b/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
index bedb80057046..a69df5e9e77a 100644
--- a/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
+++ b/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
@@ -2186,7 +2186,8 @@ struct execlists_capture {
static void execlists_capture_work(struct work_struct *work)
{
struct execlists_capture *cap = container_of(work, typeof(*cap), work);
- const gfp_t gfp = GFP_KERNEL | __GFP_RETRY_MAYFAIL | __GFP_NOWARN;
+ const gfp_t gfp = __GFP_KSWAPD_RECLAIM | __GFP_RETRY_MAYFAIL |
+ __GFP_NOWARN;
struct intel_engine_cs *engine = cap->rq->engine;
struct intel_gt_coredump *gt = cap->error->gt;
struct intel_engine_capture_vma *vma;
@@ -3293,6 +3294,38 @@ static void execlists_release(struct intel_engine_cs *engine)
lrc_fini_wa_ctx(engine);
}
+static ktime_t __execlists_engine_busyness(struct intel_engine_cs *engine,
+ ktime_t *now)
+{
+ struct intel_engine_execlists_stats *stats = &engine->stats.execlists;
+ ktime_t total = stats->total;
+
+ /*
+ * If the engine is executing something at the moment
+ * add it to the total.
+ */
+ *now = ktime_get();
+ if (READ_ONCE(stats->active))
+ total = ktime_add(total, ktime_sub(*now, stats->start));
+
+ return total;
+}
+
+static ktime_t execlists_engine_busyness(struct intel_engine_cs *engine,
+ ktime_t *now)
+{
+ struct intel_engine_execlists_stats *stats = &engine->stats.execlists;
+ unsigned int seq;
+ ktime_t total;
+
+ do {
+ seq = read_seqcount_begin(&stats->lock);
+ total = __execlists_engine_busyness(engine, now);
+ } while (read_seqcount_retry(&stats->lock, seq));
+
+ return total;
+}
+
static void
logical_ring_default_vfuncs(struct intel_engine_cs *engine)
{
@@ -3349,6 +3382,8 @@ logical_ring_default_vfuncs(struct intel_engine_cs *engine)
engine->emit_bb_start = gen8_emit_bb_start;
else
engine->emit_bb_start = gen8_emit_bb_start_noarb;
+
+ engine->busyness = execlists_engine_busyness;
}
static void logical_ring_default_irqs(struct intel_engine_cs *engine)
diff --git a/drivers/gpu/drm/i915/gt/intel_ggtt.c b/drivers/gpu/drm/i915/gt/intel_ggtt.c
index 555111c3bee5..5263dda7f8d5 100644
--- a/drivers/gpu/drm/i915/gt/intel_ggtt.c
+++ b/drivers/gpu/drm/i915/gt/intel_ggtt.c
@@ -22,9 +22,6 @@
#include "intel_gtt.h"
#include "gen8_ppgtt.h"
-static int
-i915_get_ggtt_vma_pages(struct i915_vma *vma);
-
static void i915_ggtt_color_adjust(const struct drm_mm_node *node,
unsigned long color,
u64 *start,
@@ -106,7 +103,7 @@ static bool needs_idle_maps(struct drm_i915_private *i915)
* Query intel_iommu to see if we need the workaround. Presumably that
* was loaded first.
*/
- if (!intel_vtd_active())
+ if (!intel_vtd_active(i915))
return false;
if (GRAPHICS_VER(i915) == 5 && IS_MOBILE(i915))
@@ -209,7 +206,7 @@ u64 gen8_ggtt_pte_encode(dma_addr_t addr,
enum i915_cache_level level,
u32 flags)
{
- gen8_pte_t pte = addr | _PAGE_PRESENT;
+ gen8_pte_t pte = addr | GEN8_PAGE_PRESENT;
if (flags & PTE_LM)
pte |= GEN12_GGTT_PTE_LM;
@@ -892,21 +889,6 @@ static int ggtt_probe_common(struct i915_ggtt *ggtt, u64 size)
return 0;
}
-int ggtt_set_pages(struct i915_vma *vma)
-{
- int ret;
-
- GEM_BUG_ON(vma->pages);
-
- ret = i915_get_ggtt_vma_pages(vma);
- if (ret)
- return ret;
-
- vma->page_sizes = vma->obj->mm.page_sizes;
-
- return 0;
-}
-
static void gen6_gmch_remove(struct i915_address_space *vm)
{
struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
@@ -941,6 +923,7 @@ static int gen8_gmch_probe(struct i915_ggtt *ggtt)
size = gen8_get_total_gtt_size(snb_gmch_ctl);
ggtt->vm.alloc_pt_dma = alloc_pt_dma;
+ ggtt->vm.alloc_scratch_dma = alloc_pt_dma;
ggtt->vm.lmem_pt_obj_flags = I915_BO_ALLOC_PM_EARLY;
ggtt->vm.total = (size / sizeof(gen8_pte_t)) * I915_GTT_PAGE_SIZE;
@@ -967,8 +950,6 @@ static int gen8_gmch_probe(struct i915_ggtt *ggtt)
ggtt->vm.vma_ops.bind_vma = ggtt_bind_vma;
ggtt->vm.vma_ops.unbind_vma = ggtt_unbind_vma;
- ggtt->vm.vma_ops.set_pages = ggtt_set_pages;
- ggtt->vm.vma_ops.clear_pages = clear_pages;
ggtt->vm.pte_encode = gen8_ggtt_pte_encode;
@@ -1094,6 +1075,7 @@ static int gen6_gmch_probe(struct i915_ggtt *ggtt)
ggtt->vm.total = (size / sizeof(gen6_pte_t)) * I915_GTT_PAGE_SIZE;
ggtt->vm.alloc_pt_dma = alloc_pt_dma;
+ ggtt->vm.alloc_scratch_dma = alloc_pt_dma;
ggtt->vm.clear_range = nop_clear_range;
if (!HAS_FULL_PPGTT(i915) || intel_scanout_needs_vtd_wa(i915))
@@ -1117,8 +1099,6 @@ static int gen6_gmch_probe(struct i915_ggtt *ggtt)
ggtt->vm.vma_ops.bind_vma = ggtt_bind_vma;
ggtt->vm.vma_ops.unbind_vma = ggtt_unbind_vma;
- ggtt->vm.vma_ops.set_pages = ggtt_set_pages;
- ggtt->vm.vma_ops.clear_pages = clear_pages;
return ggtt_probe_common(ggtt, size);
}
@@ -1146,6 +1126,7 @@ static int i915_gmch_probe(struct i915_ggtt *ggtt)
(struct resource)DEFINE_RES_MEM(gmadr_base, ggtt->mappable_end);
ggtt->vm.alloc_pt_dma = alloc_pt_dma;
+ ggtt->vm.alloc_scratch_dma = alloc_pt_dma;
if (needs_idle_maps(i915)) {
drm_notice(&i915->drm,
@@ -1162,8 +1143,6 @@ static int i915_gmch_probe(struct i915_ggtt *ggtt)
ggtt->vm.vma_ops.bind_vma = ggtt_bind_vma;
ggtt->vm.vma_ops.unbind_vma = ggtt_unbind_vma;
- ggtt->vm.vma_ops.set_pages = ggtt_set_pages;
- ggtt->vm.vma_ops.clear_pages = clear_pages;
if (unlikely(ggtt->do_idle_maps))
drm_notice(&i915->drm,
@@ -1229,11 +1208,11 @@ int i915_ggtt_probe_hw(struct drm_i915_private *i915)
{
int ret;
- ret = ggtt_probe_hw(&i915->ggtt, &i915->gt);
+ ret = ggtt_probe_hw(&i915->ggtt, to_gt(i915));
if (ret)
return ret;
- if (intel_vtd_active())
+ if (intel_vtd_active(i915))
drm_info(&i915->drm, "VT-d active for gfx access\n");
return 0;
@@ -1333,382 +1312,3 @@ void i915_ggtt_resume(struct i915_ggtt *ggtt)
intel_ggtt_restore_fences(ggtt);
}
-
-static struct scatterlist *
-rotate_pages(struct drm_i915_gem_object *obj, unsigned int offset,
- unsigned int width, unsigned int height,
- unsigned int src_stride, unsigned int dst_stride,
- struct sg_table *st, struct scatterlist *sg)
-{
- unsigned int column, row;
- unsigned int src_idx;
-
- for (column = 0; column < width; column++) {
- unsigned int left;
-
- src_idx = src_stride * (height - 1) + column + offset;
- for (row = 0; row < height; row++) {
- st->nents++;
- /*
- * We don't need the pages, but need to initialize
- * the entries so the sg list can be happily traversed.
- * The only thing we need are DMA addresses.
- */
- sg_set_page(sg, NULL, I915_GTT_PAGE_SIZE, 0);
- sg_dma_address(sg) =
- i915_gem_object_get_dma_address(obj, src_idx);
- sg_dma_len(sg) = I915_GTT_PAGE_SIZE;
- sg = sg_next(sg);
- src_idx -= src_stride;
- }
-
- left = (dst_stride - height) * I915_GTT_PAGE_SIZE;
-
- if (!left)
- continue;
-
- st->nents++;
-
- /*
- * The DE ignores the PTEs for the padding tiles, the sg entry
- * here is just a conenience to indicate how many padding PTEs
- * to insert at this spot.
- */
- sg_set_page(sg, NULL, left, 0);
- sg_dma_address(sg) = 0;
- sg_dma_len(sg) = left;
- sg = sg_next(sg);
- }
-
- return sg;
-}
-
-static noinline struct sg_table *
-intel_rotate_pages(struct intel_rotation_info *rot_info,
- struct drm_i915_gem_object *obj)
-{
- unsigned int size = intel_rotation_info_size(rot_info);
- struct drm_i915_private *i915 = to_i915(obj->base.dev);
- struct sg_table *st;
- struct scatterlist *sg;
- int ret = -ENOMEM;
- int i;
-
- /* Allocate target SG list. */
- st = kmalloc(sizeof(*st), GFP_KERNEL);
- if (!st)
- goto err_st_alloc;
-
- ret = sg_alloc_table(st, size, GFP_KERNEL);
- if (ret)
- goto err_sg_alloc;
-
- st->nents = 0;
- sg = st->sgl;
-
- for (i = 0 ; i < ARRAY_SIZE(rot_info->plane); i++)
- sg = rotate_pages(obj, rot_info->plane[i].offset,
- rot_info->plane[i].width, rot_info->plane[i].height,
- rot_info->plane[i].src_stride,
- rot_info->plane[i].dst_stride,
- st, sg);
-
- return st;
-
-err_sg_alloc:
- kfree(st);
-err_st_alloc:
-
- drm_dbg(&i915->drm, "Failed to create rotated mapping for object size %zu! (%ux%u tiles, %u pages)\n",
- obj->base.size, rot_info->plane[0].width,
- rot_info->plane[0].height, size);
-
- return ERR_PTR(ret);
-}
-
-static struct scatterlist *
-add_padding_pages(unsigned int count,
- struct sg_table *st, struct scatterlist *sg)
-{
- st->nents++;
-
- /*
- * The DE ignores the PTEs for the padding tiles, the sg entry
- * here is just a convenience to indicate how many padding PTEs
- * to insert at this spot.
- */
- sg_set_page(sg, NULL, count * I915_GTT_PAGE_SIZE, 0);
- sg_dma_address(sg) = 0;
- sg_dma_len(sg) = count * I915_GTT_PAGE_SIZE;
- sg = sg_next(sg);
-
- return sg;
-}
-
-static struct scatterlist *
-remap_tiled_color_plane_pages(struct drm_i915_gem_object *obj,
- unsigned int offset, unsigned int alignment_pad,
- unsigned int width, unsigned int height,
- unsigned int src_stride, unsigned int dst_stride,
- struct sg_table *st, struct scatterlist *sg,
- unsigned int *gtt_offset)
-{
- unsigned int row;
-
- if (!width || !height)
- return sg;
-
- if (alignment_pad)
- sg = add_padding_pages(alignment_pad, st, sg);
-
- for (row = 0; row < height; row++) {
- unsigned int left = width * I915_GTT_PAGE_SIZE;
-
- while (left) {
- dma_addr_t addr;
- unsigned int length;
-
- /*
- * We don't need the pages, but need to initialize
- * the entries so the sg list can be happily traversed.
- * The only thing we need are DMA addresses.
- */
-
- addr = i915_gem_object_get_dma_address_len(obj, offset, &length);
-
- length = min(left, length);
-
- st->nents++;
-
- sg_set_page(sg, NULL, length, 0);
- sg_dma_address(sg) = addr;
- sg_dma_len(sg) = length;
- sg = sg_next(sg);
-
- offset += length / I915_GTT_PAGE_SIZE;
- left -= length;
- }
-
- offset += src_stride - width;
-
- left = (dst_stride - width) * I915_GTT_PAGE_SIZE;
-
- if (!left)
- continue;
-
- sg = add_padding_pages(left >> PAGE_SHIFT, st, sg);
- }
-
- *gtt_offset += alignment_pad + dst_stride * height;
-
- return sg;
-}
-
-static struct scatterlist *
-remap_contiguous_pages(struct drm_i915_gem_object *obj,
- unsigned int obj_offset,
- unsigned int count,
- struct sg_table *st, struct scatterlist *sg)
-{
- struct scatterlist *iter;
- unsigned int offset;
-
- iter = i915_gem_object_get_sg_dma(obj, obj_offset, &offset);
- GEM_BUG_ON(!iter);
-
- do {
- unsigned int len;
-
- len = min(sg_dma_len(iter) - (offset << PAGE_SHIFT),
- count << PAGE_SHIFT);
- sg_set_page(sg, NULL, len, 0);
- sg_dma_address(sg) =
- sg_dma_address(iter) + (offset << PAGE_SHIFT);
- sg_dma_len(sg) = len;
-
- st->nents++;
- count -= len >> PAGE_SHIFT;
- if (count == 0)
- return sg;
-
- sg = __sg_next(sg);
- iter = __sg_next(iter);
- offset = 0;
- } while (1);
-}
-
-static struct scatterlist *
-remap_linear_color_plane_pages(struct drm_i915_gem_object *obj,
- unsigned int obj_offset, unsigned int alignment_pad,
- unsigned int size,
- struct sg_table *st, struct scatterlist *sg,
- unsigned int *gtt_offset)
-{
- if (!size)
- return sg;
-
- if (alignment_pad)
- sg = add_padding_pages(alignment_pad, st, sg);
-
- sg = remap_contiguous_pages(obj, obj_offset, size, st, sg);
- sg = sg_next(sg);
-
- *gtt_offset += alignment_pad + size;
-
- return sg;
-}
-
-static struct scatterlist *
-remap_color_plane_pages(const struct intel_remapped_info *rem_info,
- struct drm_i915_gem_object *obj,
- int color_plane,
- struct sg_table *st, struct scatterlist *sg,
- unsigned int *gtt_offset)
-{
- unsigned int alignment_pad = 0;
-
- if (rem_info->plane_alignment)
- alignment_pad = ALIGN(*gtt_offset, rem_info->plane_alignment) - *gtt_offset;
-
- if (rem_info->plane[color_plane].linear)
- sg = remap_linear_color_plane_pages(obj,
- rem_info->plane[color_plane].offset,
- alignment_pad,
- rem_info->plane[color_plane].size,
- st, sg,
- gtt_offset);
-
- else
- sg = remap_tiled_color_plane_pages(obj,
- rem_info->plane[color_plane].offset,
- alignment_pad,
- rem_info->plane[color_plane].width,
- rem_info->plane[color_plane].height,
- rem_info->plane[color_plane].src_stride,
- rem_info->plane[color_plane].dst_stride,
- st, sg,
- gtt_offset);
-
- return sg;
-}
-
-static noinline struct sg_table *
-intel_remap_pages(struct intel_remapped_info *rem_info,
- struct drm_i915_gem_object *obj)
-{
- unsigned int size = intel_remapped_info_size(rem_info);
- struct drm_i915_private *i915 = to_i915(obj->base.dev);
- struct sg_table *st;
- struct scatterlist *sg;
- unsigned int gtt_offset = 0;
- int ret = -ENOMEM;
- int i;
-
- /* Allocate target SG list. */
- st = kmalloc(sizeof(*st), GFP_KERNEL);
- if (!st)
- goto err_st_alloc;
-
- ret = sg_alloc_table(st, size, GFP_KERNEL);
- if (ret)
- goto err_sg_alloc;
-
- st->nents = 0;
- sg = st->sgl;
-
- for (i = 0 ; i < ARRAY_SIZE(rem_info->plane); i++)
- sg = remap_color_plane_pages(rem_info, obj, i, st, sg, &gtt_offset);
-
- i915_sg_trim(st);
-
- return st;
-
-err_sg_alloc:
- kfree(st);
-err_st_alloc:
-
- drm_dbg(&i915->drm, "Failed to create remapped mapping for object size %zu! (%ux%u tiles, %u pages)\n",
- obj->base.size, rem_info->plane[0].width,
- rem_info->plane[0].height, size);
-
- return ERR_PTR(ret);
-}
-
-static noinline struct sg_table *
-intel_partial_pages(const struct i915_ggtt_view *view,
- struct drm_i915_gem_object *obj)
-{
- struct sg_table *st;
- struct scatterlist *sg;
- unsigned int count = view->partial.size;
- int ret = -ENOMEM;
-
- st = kmalloc(sizeof(*st), GFP_KERNEL);
- if (!st)
- goto err_st_alloc;
-
- ret = sg_alloc_table(st, count, GFP_KERNEL);
- if (ret)
- goto err_sg_alloc;
-
- st->nents = 0;
-
- sg = remap_contiguous_pages(obj, view->partial.offset, count, st, st->sgl);
-
- sg_mark_end(sg);
- i915_sg_trim(st); /* Drop any unused tail entries. */
-
- return st;
-
-err_sg_alloc:
- kfree(st);
-err_st_alloc:
- return ERR_PTR(ret);
-}
-
-static int
-i915_get_ggtt_vma_pages(struct i915_vma *vma)
-{
- int ret;
-
- /*
- * The vma->pages are only valid within the lifespan of the borrowed
- * obj->mm.pages. When the obj->mm.pages sg_table is regenerated, so
- * must be the vma->pages. A simple rule is that vma->pages must only
- * be accessed when the obj->mm.pages are pinned.
- */
- GEM_BUG_ON(!i915_gem_object_has_pinned_pages(vma->obj));
-
- switch (vma->ggtt_view.type) {
- default:
- GEM_BUG_ON(vma->ggtt_view.type);
- fallthrough;
- case I915_GGTT_VIEW_NORMAL:
- vma->pages = vma->obj->mm.pages;
- return 0;
-
- case I915_GGTT_VIEW_ROTATED:
- vma->pages =
- intel_rotate_pages(&vma->ggtt_view.rotated, vma->obj);
- break;
-
- case I915_GGTT_VIEW_REMAPPED:
- vma->pages =
- intel_remap_pages(&vma->ggtt_view.remapped, vma->obj);
- break;
-
- case I915_GGTT_VIEW_PARTIAL:
- vma->pages = intel_partial_pages(&vma->ggtt_view, vma->obj);
- break;
- }
-
- ret = 0;
- if (IS_ERR(vma->pages)) {
- ret = PTR_ERR(vma->pages);
- vma->pages = NULL;
- drm_err(&vma->vm->i915->drm,
- "Failed to get pages for VMA view type %u (%d)!\n",
- vma->ggtt_view.type, ret);
- }
- return ret;
-}
diff --git a/drivers/gpu/drm/i915/gt/intel_gt.c b/drivers/gpu/drm/i915/gt/intel_gt.c
index f2422d48be32..f98f0fb21efb 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt.c
@@ -25,11 +25,8 @@
#include "shmem_utils.h"
#include "pxp/intel_pxp.h"
-void intel_gt_init_early(struct intel_gt *gt, struct drm_i915_private *i915)
+void __intel_gt_init_early(struct intel_gt *gt, struct drm_i915_private *i915)
{
- gt->i915 = i915;
- gt->uncore = &i915->uncore;
-
spin_lock_init(&gt->irq_lock);
INIT_LIST_HEAD(&gt->closed_vma);
@@ -48,6 +45,12 @@ void intel_gt_init_early(struct intel_gt *gt, struct drm_i915_private *i915)
intel_rps_init_early(&gt->rps);
}
+void intel_gt_init_early(struct intel_gt *gt, struct drm_i915_private *i915)
+{
+ gt->i915 = i915;
+ gt->uncore = &i915->uncore;
+}
+
int intel_gt_probe_lmem(struct intel_gt *gt)
{
struct drm_i915_private *i915 = gt->i915;
diff --git a/drivers/gpu/drm/i915/gt/intel_gt.h b/drivers/gpu/drm/i915/gt/intel_gt.h
index 74e771871a9b..3ace129eb2af 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt.h
+++ b/drivers/gpu/drm/i915/gt/intel_gt.h
@@ -35,6 +35,7 @@ static inline struct intel_gt *huc_to_gt(struct intel_huc *huc)
}
void intel_gt_init_early(struct intel_gt *gt, struct drm_i915_private *i915);
+void __intel_gt_init_early(struct intel_gt *gt, struct drm_i915_private *i915);
void intel_gt_init_hw_early(struct intel_gt *gt, struct i915_ggtt *ggtt);
int intel_gt_probe_lmem(struct intel_gt *gt);
int intel_gt_init_mmio(struct intel_gt *gt);
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_buffer_pool.c b/drivers/gpu/drm/i915/gt/intel_gt_buffer_pool.c
index acc49c56a9f3..9db3dcbd917f 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_buffer_pool.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt_buffer_pool.c
@@ -9,11 +9,6 @@
#include "intel_engine_pm.h"
#include "intel_gt_buffer_pool.h"
-static struct intel_gt *to_gt(struct intel_gt_buffer_pool *pool)
-{
- return container_of(pool, struct intel_gt, buffer_pool);
-}
-
static struct list_head *
bucket_for_size(struct intel_gt_buffer_pool *pool, size_t sz)
{
@@ -141,7 +136,7 @@ static struct intel_gt_buffer_pool_node *
node_create(struct intel_gt_buffer_pool *pool, size_t sz,
enum i915_map_type type)
{
- struct intel_gt *gt = to_gt(pool);
+ struct intel_gt *gt = container_of(pool, struct intel_gt, buffer_pool);
struct intel_gt_buffer_pool_node *node;
struct drm_i915_gem_object *obj;
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_debugfs.h b/drivers/gpu/drm/i915/gt/intel_gt_debugfs.h
index e307ceb99031..17e79b735cfe 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_debugfs.h
+++ b/drivers/gpu/drm/i915/gt/intel_gt_debugfs.h
@@ -10,11 +10,7 @@
struct intel_gt;
-#define DEFINE_INTEL_GT_DEBUGFS_ATTRIBUTE(__name) \
- static int __name ## _open(struct inode *inode, struct file *file) \
-{ \
- return single_open(file, __name ## _show, inode->i_private); \
-} \
+#define __GT_DEBUGFS_ATTRIBUTE_FOPS(__name) \
static const struct file_operations __name ## _fops = { \
.owner = THIS_MODULE, \
.open = __name ## _open, \
@@ -23,6 +19,21 @@ static const struct file_operations __name ## _fops = { \
.release = single_release, \
}
+#define DEFINE_INTEL_GT_DEBUGFS_ATTRIBUTE(__name) \
+static int __name ## _open(struct inode *inode, struct file *file) \
+{ \
+ return single_open(file, __name ## _show, inode->i_private); \
+} \
+__GT_DEBUGFS_ATTRIBUTE_FOPS(__name)
+
+#define DEFINE_INTEL_GT_DEBUGFS_ATTRIBUTE_WITH_SIZE(__name, __size_vf) \
+static int __name ## _open(struct inode *inode, struct file *file) \
+{ \
+ return single_open_size(file, __name ## _show, inode->i_private, \
+ __size_vf(inode->i_private)); \
+} \
+__GT_DEBUGFS_ATTRIBUTE_FOPS(__name)
+
void intel_gt_debugfs_register(struct intel_gt *gt);
struct intel_gt_debugfs_file {
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_pm.c b/drivers/gpu/drm/i915/gt/intel_gt_pm.c
index 524eaf678790..c0fa41e4c803 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_pm.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt_pm.c
@@ -86,6 +86,7 @@ static int __gt_unpark(struct intel_wakeref *wf)
intel_rc6_unpark(&gt->rc6);
intel_rps_unpark(&gt->rps);
i915_pmu_gt_unparked(i915);
+ intel_guc_busyness_unpark(gt);
intel_gt_unpark_requests(gt);
runtime_begin(gt);
@@ -104,6 +105,7 @@ static int __gt_park(struct intel_wakeref *wf)
runtime_end(gt);
intel_gt_park_requests(gt);
+ intel_guc_busyness_park(gt);
i915_vma_parked(gt);
i915_pmu_gt_parked(i915);
intel_rps_park(&gt->rps);
@@ -301,7 +303,7 @@ void intel_gt_suspend_prepare(struct intel_gt *gt)
user_forcewake(gt, true);
wait_for_suspend(gt);
- intel_pxp_suspend(&gt->pxp, false);
+ intel_pxp_suspend_prepare(&gt->pxp);
}
static suspend_state_t pm_suspend_target(void)
@@ -326,6 +328,7 @@ void intel_gt_suspend_late(struct intel_gt *gt)
GEM_BUG_ON(gt->awake);
intel_uc_suspend(&gt->uc);
+ intel_pxp_suspend(&gt->pxp);
/*
* On disabling the device, we want to turn off HW access to memory
@@ -353,7 +356,7 @@ void intel_gt_suspend_late(struct intel_gt *gt)
void intel_gt_runtime_suspend(struct intel_gt *gt)
{
- intel_pxp_suspend(&gt->pxp, true);
+ intel_pxp_runtime_suspend(&gt->pxp);
intel_uc_runtime_suspend(&gt->uc);
GT_TRACE(gt, "\n");
@@ -371,7 +374,7 @@ int intel_gt_runtime_resume(struct intel_gt *gt)
if (ret)
return ret;
- intel_pxp_resume(&gt->pxp);
+ intel_pxp_runtime_resume(&gt->pxp);
return 0;
}
diff --git a/drivers/gpu/drm/i915/gt/intel_gtt.c b/drivers/gpu/drm/i915/gt/intel_gtt.c
index 67d14afa6623..a94be0306464 100644
--- a/drivers/gpu/drm/i915/gt/intel_gtt.c
+++ b/drivers/gpu/drm/i915/gt/intel_gtt.c
@@ -6,6 +6,9 @@
#include <linux/slab.h> /* fault-inject.h is not standalone! */
#include <linux/fault-inject.h>
+#include <linux/sched/mm.h>
+
+#include <drm/drm_cache.h>
#include "gem/i915_gem_lmem.h"
#include "i915_trace.h"
@@ -221,19 +224,6 @@ void i915_address_space_init(struct i915_address_space *vm, int subclass)
INIT_LIST_HEAD(&vm->bound_list);
}
-void clear_pages(struct i915_vma *vma)
-{
- GEM_BUG_ON(!vma->pages);
-
- if (vma->pages != vma->obj->mm.pages) {
- sg_free_table(vma->pages);
- kfree(vma->pages);
- }
- vma->pages = NULL;
-
- memset(&vma->page_sizes, 0, sizeof(vma->page_sizes));
-}
-
void *__px_vaddr(struct drm_i915_gem_object *p)
{
enum i915_map_type type;
@@ -273,6 +263,7 @@ static void poison_scratch_page(struct drm_i915_gem_object *scratch)
val = POISON_FREE;
memset(vaddr, val, scratch->base.size);
+ drm_clflush_virt_range(vaddr, scratch->base.size);
}
int setup_scratch_page(struct i915_address_space *vm)
@@ -298,7 +289,7 @@ int setup_scratch_page(struct i915_address_space *vm)
do {
struct drm_i915_gem_object *obj;
- obj = vm->alloc_pt_dma(vm, size);
+ obj = vm->alloc_scratch_dma(vm, size);
if (IS_ERR(obj))
goto skip;
@@ -334,6 +325,18 @@ skip:
if (size == I915_GTT_PAGE_SIZE_4K)
return -ENOMEM;
+ /*
+ * If we need 64K minimum GTT pages for device local-memory,
+ * like on XEHPSDV, then we need to fail the allocation here,
+ * otherwise we can't safely support the insertion of
+ * local-memory pages for this vm, since the HW expects the
+ * correct physical alignment and size when the page-table is
+ * operating in 64K GTT mode, which includes any scratch PTEs,
+ * since userspace can still touch them.
+ */
+ if (HAS_64K_PAGES(vm->i915))
+ return -ENOMEM;
+
size = I915_GTT_PAGE_SIZE_4K;
} while (1);
}
diff --git a/drivers/gpu/drm/i915/gt/intel_gtt.h b/drivers/gpu/drm/i915/gt/intel_gtt.h
index dfeaef680aac..177b42b935a1 100644
--- a/drivers/gpu/drm/i915/gt/intel_gtt.h
+++ b/drivers/gpu/drm/i915/gt/intel_gtt.h
@@ -135,6 +135,9 @@ typedef u64 gen8_pte_t;
#define GEN8_PPAT_ELLC_OVERRIDE (0<<2)
#define GEN8_PPAT(i, x) ((u64)(x) << ((i) * 8))
+#define GEN8_PAGE_PRESENT BIT_ULL(0)
+#define GEN8_PAGE_RW BIT_ULL(1)
+
#define GEN8_PDE_IPS_64K BIT(11)
#define GEN8_PDE_PS_2M BIT(7)
@@ -206,9 +209,6 @@ struct i915_vma_ops {
*/
void (*unbind_vma)(struct i915_address_space *vm,
struct i915_vma *vma);
-
- int (*set_pages)(struct i915_vma *vma);
- void (*clear_pages)(struct i915_vma *vma);
};
struct i915_address_space {
@@ -265,6 +265,8 @@ struct i915_address_space {
struct drm_i915_gem_object *
(*alloc_pt_dma)(struct i915_address_space *vm, int sz);
+ struct drm_i915_gem_object *
+ (*alloc_scratch_dma)(struct i915_address_space *vm, int sz);
u64 (*pte_encode)(dma_addr_t addr,
enum i915_cache_level level,
@@ -596,10 +598,6 @@ release_pd_entry(struct i915_page_directory * const pd,
const struct drm_i915_gem_object * const scratch);
void gen6_ggtt_invalidate(struct i915_ggtt *ggtt);
-int ggtt_set_pages(struct i915_vma *vma);
-int ppgtt_set_pages(struct i915_vma *vma);
-void clear_pages(struct i915_vma *vma);
-
void ppgtt_bind_vma(struct i915_address_space *vm,
struct i915_vm_pt_stash *stash,
struct i915_vma *vma,
diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c
index 56156cf18c41..b3489599e4de 100644
--- a/drivers/gpu/drm/i915/gt/intel_lrc.c
+++ b/drivers/gpu/drm/i915/gt/intel_lrc.c
@@ -1167,6 +1167,11 @@ gen12_emit_indirect_ctx_rcs(const struct intel_context *ce, u32 *cs)
cs = gen12_emit_cmd_buf_wa(ce, cs);
cs = gen12_emit_restore_scratch(ce, cs);
+ /* Wa_16013000631:dg2 */
+ if (IS_DG2_GRAPHICS_STEP(ce->engine->i915, G10, STEP_B0, STEP_C0) ||
+ IS_DG2_G11(ce->engine->i915))
+ cs = gen8_emit_pipe_control(cs, PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE, 0);
+
return cs;
}
diff --git a/drivers/gpu/drm/i915/gt/intel_migrate.c b/drivers/gpu/drm/i915/gt/intel_migrate.c
index afb1cce9a352..18b44af56969 100644
--- a/drivers/gpu/drm/i915/gt/intel_migrate.c
+++ b/drivers/gpu/drm/i915/gt/intel_migrate.c
@@ -13,7 +13,6 @@
struct insert_pte_data {
u64 offset;
- bool is_lmem;
};
#define CHUNK_SZ SZ_8M /* ~1ms at 8GiB/s preemption delay */
@@ -40,7 +39,7 @@ static void insert_pte(struct i915_address_space *vm,
struct insert_pte_data *d = data;
vm->insert_page(vm, px_dma(pt), d->offset, I915_CACHE_NONE,
- d->is_lmem ? PTE_LM : 0);
+ i915_gem_object_is_lmem(pt->base) ? PTE_LM : 0);
d->offset += PAGE_SIZE;
}
@@ -134,8 +133,7 @@ static struct i915_address_space *migrate_vm(struct intel_gt *gt)
goto err_vm;
/* Now allow the GPU to rewrite the PTE via its own ppGTT */
- d.is_lmem = i915_gem_object_is_lmem(vm->vm.scratch[0]);
- vm->vm.foreach(&vm->vm, base, base + sz, insert_pte, &d);
+ vm->vm.foreach(&vm->vm, base, d.offset - base, insert_pte, &d);
}
return &vm->vm;
@@ -281,10 +279,10 @@ static int emit_pte(struct i915_request *rq,
GEM_BUG_ON(GRAPHICS_VER(rq->engine->i915) < 8);
/* Compute the page directory offset for the target address range */
- offset += (u64)rq->engine->instance << 32;
offset >>= 12;
offset *= sizeof(u64);
offset += 2 * CHUNK_SZ;
+ offset += (u64)rq->engine->instance << 32;
cs = intel_ring_begin(rq, 6);
if (IS_ERR(cs))
@@ -406,7 +404,7 @@ static int emit_copy(struct i915_request *rq, int size)
int
intel_context_migrate_copy(struct intel_context *ce,
- struct dma_fence *await,
+ const struct i915_deps *deps,
struct scatterlist *src,
enum i915_cache_level src_cache_level,
bool src_is_lmem,
@@ -433,8 +431,8 @@ intel_context_migrate_copy(struct intel_context *ce,
goto out_ce;
}
- if (await) {
- err = i915_request_await_dma_fence(rq, await);
+ if (deps) {
+ err = i915_request_await_deps(rq, deps);
if (err)
goto out_rq;
@@ -444,7 +442,7 @@ intel_context_migrate_copy(struct intel_context *ce,
goto out_rq;
}
- await = NULL;
+ deps = NULL;
}
/* The PTE updates + copy must not be interrupted. */
@@ -527,7 +525,7 @@ static int emit_clear(struct i915_request *rq, int size, u32 value)
int
intel_context_migrate_clear(struct intel_context *ce,
- struct dma_fence *await,
+ const struct i915_deps *deps,
struct scatterlist *sg,
enum i915_cache_level cache_level,
bool is_lmem,
@@ -552,8 +550,8 @@ intel_context_migrate_clear(struct intel_context *ce,
goto out_ce;
}
- if (await) {
- err = i915_request_await_dma_fence(rq, await);
+ if (deps) {
+ err = i915_request_await_deps(rq, deps);
if (err)
goto out_rq;
@@ -563,7 +561,7 @@ intel_context_migrate_clear(struct intel_context *ce,
goto out_rq;
}
- await = NULL;
+ deps = NULL;
}
/* The PTE updates + clear must not be interrupted. */
@@ -601,7 +599,7 @@ out_ce:
int intel_migrate_copy(struct intel_migrate *m,
struct i915_gem_ww_ctx *ww,
- struct dma_fence *await,
+ const struct i915_deps *deps,
struct scatterlist *src,
enum i915_cache_level src_cache_level,
bool src_is_lmem,
@@ -626,7 +624,7 @@ int intel_migrate_copy(struct intel_migrate *m,
if (err)
goto out;
- err = intel_context_migrate_copy(ce, await,
+ err = intel_context_migrate_copy(ce, deps,
src, src_cache_level, src_is_lmem,
dst, dst_cache_level, dst_is_lmem,
out);
@@ -640,7 +638,7 @@ out:
int
intel_migrate_clear(struct intel_migrate *m,
struct i915_gem_ww_ctx *ww,
- struct dma_fence *await,
+ const struct i915_deps *deps,
struct scatterlist *sg,
enum i915_cache_level cache_level,
bool is_lmem,
@@ -663,7 +661,7 @@ intel_migrate_clear(struct intel_migrate *m,
if (err)
goto out;
- err = intel_context_migrate_clear(ce, await, sg, cache_level,
+ err = intel_context_migrate_clear(ce, deps, sg, cache_level,
is_lmem, value, out);
intel_context_unpin(ce);
diff --git a/drivers/gpu/drm/i915/gt/intel_migrate.h b/drivers/gpu/drm/i915/gt/intel_migrate.h
index 4e18e755a00b..ccc677ec4aa3 100644
--- a/drivers/gpu/drm/i915/gt/intel_migrate.h
+++ b/drivers/gpu/drm/i915/gt/intel_migrate.h
@@ -11,6 +11,7 @@
#include "intel_migrate_types.h"
struct dma_fence;
+struct i915_deps;
struct i915_request;
struct i915_gem_ww_ctx;
struct intel_gt;
@@ -23,7 +24,7 @@ struct intel_context *intel_migrate_create_context(struct intel_migrate *m);
int intel_migrate_copy(struct intel_migrate *m,
struct i915_gem_ww_ctx *ww,
- struct dma_fence *await,
+ const struct i915_deps *deps,
struct scatterlist *src,
enum i915_cache_level src_cache_level,
bool src_is_lmem,
@@ -33,7 +34,7 @@ int intel_migrate_copy(struct intel_migrate *m,
struct i915_request **out);
int intel_context_migrate_copy(struct intel_context *ce,
- struct dma_fence *await,
+ const struct i915_deps *deps,
struct scatterlist *src,
enum i915_cache_level src_cache_level,
bool src_is_lmem,
@@ -45,7 +46,7 @@ int intel_context_migrate_copy(struct intel_context *ce,
int
intel_migrate_clear(struct intel_migrate *m,
struct i915_gem_ww_ctx *ww,
- struct dma_fence *await,
+ const struct i915_deps *deps,
struct scatterlist *sg,
enum i915_cache_level cache_level,
bool is_lmem,
@@ -53,7 +54,7 @@ intel_migrate_clear(struct intel_migrate *m,
struct i915_request **out);
int
intel_context_migrate_clear(struct intel_context *ce,
- struct dma_fence *await,
+ const struct i915_deps *deps,
struct scatterlist *sg,
enum i915_cache_level cache_level,
bool is_lmem,
diff --git a/drivers/gpu/drm/i915/gt/intel_mocs.c b/drivers/gpu/drm/i915/gt/intel_mocs.c
index 15f9ada28a7a..9c253ba593c6 100644
--- a/drivers/gpu/drm/i915/gt/intel_mocs.c
+++ b/drivers/gpu/drm/i915/gt/intel_mocs.c
@@ -424,7 +424,7 @@ static unsigned int get_mocs_settings(const struct drm_i915_private *i915,
table->unused_entries_index = I915_MOCS_PTE;
if (IS_DG2(i915)) {
- if (IS_DG2_GT_STEP(i915, G10, STEP_A0, STEP_B0)) {
+ if (IS_DG2_GRAPHICS_STEP(i915, G10, STEP_A0, STEP_B0)) {
table->size = ARRAY_SIZE(dg2_mocs_table_g10_ax);
table->table = dg2_mocs_table_g10_ax;
} else {
diff --git a/drivers/gpu/drm/i915/gt/intel_ppgtt.c b/drivers/gpu/drm/i915/gt/intel_ppgtt.c
index 4396bfd630d8..083b3090c69c 100644
--- a/drivers/gpu/drm/i915/gt/intel_ppgtt.c
+++ b/drivers/gpu/drm/i915/gt/intel_ppgtt.c
@@ -289,16 +289,6 @@ void i915_vm_free_pt_stash(struct i915_address_space *vm,
}
}
-int ppgtt_set_pages(struct i915_vma *vma)
-{
- GEM_BUG_ON(vma->pages);
-
- vma->pages = vma->obj->mm.pages;
- vma->page_sizes = vma->obj->mm.page_sizes;
-
- return 0;
-}
-
void ppgtt_init(struct i915_ppgtt *ppgtt, struct intel_gt *gt,
unsigned long lmem_pt_obj_flags)
{
@@ -315,6 +305,4 @@ void ppgtt_init(struct i915_ppgtt *ppgtt, struct intel_gt *gt,
ppgtt->vm.vma_ops.bind_vma = ppgtt_bind_vma;
ppgtt->vm.vma_ops.unbind_vma = ppgtt_unbind_vma;
- ppgtt->vm.vma_ops.set_pages = ppgtt_set_pages;
- ppgtt->vm.vma_ops.clear_pages = clear_pages;
}
diff --git a/drivers/gpu/drm/i915/gt/intel_rc6.c b/drivers/gpu/drm/i915/gt/intel_rc6.c
index 43093dd2d0c9..c3155ee58689 100644
--- a/drivers/gpu/drm/i915/gt/intel_rc6.c
+++ b/drivers/gpu/drm/i915/gt/intel_rc6.c
@@ -117,10 +117,17 @@ static void gen11_rc6_enable(struct intel_rc6 *rc6)
GEN6_RC_CTL_RC6_ENABLE |
GEN6_RC_CTL_EI_MODE(1);
- pg_enable =
- GEN9_RENDER_PG_ENABLE |
- GEN9_MEDIA_PG_ENABLE |
- GEN11_MEDIA_SAMPLER_PG_ENABLE;
+ /* Wa_16011777198 - Render powergating must remain disabled */
+ if (IS_DG2_GRAPHICS_STEP(gt->i915, G10, STEP_A0, STEP_C0) ||
+ IS_DG2_GRAPHICS_STEP(gt->i915, G11, STEP_A0, STEP_B0))
+ pg_enable =
+ GEN9_MEDIA_PG_ENABLE |
+ GEN11_MEDIA_SAMPLER_PG_ENABLE;
+ else
+ pg_enable =
+ GEN9_RENDER_PG_ENABLE |
+ GEN9_MEDIA_PG_ENABLE |
+ GEN11_MEDIA_SAMPLER_PG_ENABLE;
if (GRAPHICS_VER(gt->i915) >= 12) {
for (i = 0; i < I915_MAX_VCS; i++)
diff --git a/drivers/gpu/drm/i915/gt/intel_region_lmem.c b/drivers/gpu/drm/i915/gt/intel_region_lmem.c
index afb35d2e5c73..fde2dcb59809 100644
--- a/drivers/gpu/drm/i915/gt/intel_region_lmem.c
+++ b/drivers/gpu/drm/i915/gt/intel_region_lmem.c
@@ -66,12 +66,16 @@ static void release_fake_lmem_bar(struct intel_memory_region *mem)
DMA_ATTR_FORCE_CONTIGUOUS);
}
-static void
+static int
region_lmem_release(struct intel_memory_region *mem)
{
- intel_region_ttm_fini(mem);
+ int ret;
+
+ ret = intel_region_ttm_fini(mem);
io_mapping_fini(&mem->iomap);
release_fake_lmem_bar(mem);
+
+ return ret;
}
static int
@@ -158,7 +162,7 @@ intel_gt_setup_fake_lmem(struct intel_gt *gt)
static bool get_legacy_lowmem_region(struct intel_uncore *uncore,
u64 *start, u32 *size)
{
- if (!IS_DG1_GT_STEP(uncore->i915, STEP_A0, STEP_C0))
+ if (!IS_DG1_GRAPHICS_STEP(uncore->i915, STEP_A0, STEP_C0))
return false;
*start = 0;
@@ -193,6 +197,7 @@ static struct intel_memory_region *setup_lmem(struct intel_gt *gt)
struct intel_uncore *uncore = gt->uncore;
struct pci_dev *pdev = to_pci_dev(i915->drm.dev);
struct intel_memory_region *mem;
+ resource_size_t min_page_size;
resource_size_t io_start;
resource_size_t lmem_size;
int err;
@@ -207,10 +212,12 @@ static struct intel_memory_region *setup_lmem(struct intel_gt *gt)
if (GEM_WARN_ON(lmem_size > pci_resource_len(pdev, 2)))
return ERR_PTR(-ENODEV);
+ min_page_size = HAS_64K_PAGES(i915) ? I915_GTT_PAGE_SIZE_64K :
+ I915_GTT_PAGE_SIZE_4K;
mem = intel_memory_region_create(i915,
0,
lmem_size,
- I915_GTT_PAGE_SIZE_4K,
+ min_page_size,
io_start,
INTEL_MEMORY_LOCAL,
0,
@@ -231,7 +238,7 @@ static struct intel_memory_region *setup_lmem(struct intel_gt *gt)
return mem;
err_region_put:
- intel_memory_region_put(mem);
+ intel_memory_region_destroy(mem);
return ERR_PTR(err);
}
diff --git a/drivers/gpu/drm/i915/gt/intel_reset.c b/drivers/gpu/drm/i915/gt/intel_reset.c
index d8fe35f2281d..7be0002d9d70 100644
--- a/drivers/gpu/drm/i915/gt/intel_reset.c
+++ b/drivers/gpu/drm/i915/gt/intel_reset.c
@@ -297,13 +297,6 @@ static int gen6_reset_engines(struct intel_gt *gt,
intel_engine_mask_t engine_mask,
unsigned int retry)
{
- static const u32 hw_engine_mask[] = {
- [RCS0] = GEN6_GRDOM_RENDER,
- [BCS0] = GEN6_GRDOM_BLT,
- [VCS0] = GEN6_GRDOM_MEDIA,
- [VCS1] = GEN8_GRDOM_MEDIA2,
- [VECS0] = GEN6_GRDOM_VECS,
- };
struct intel_engine_cs *engine;
u32 hw_mask;
@@ -314,8 +307,7 @@ static int gen6_reset_engines(struct intel_gt *gt,
hw_mask = 0;
for_each_engine_masked(engine, gt, engine_mask, tmp) {
- GEM_BUG_ON(engine->id >= ARRAY_SIZE(hw_engine_mask));
- hw_mask |= hw_engine_mask[engine->id];
+ hw_mask |= engine->reset_domain;
}
}
@@ -492,22 +484,6 @@ static int gen11_reset_engines(struct intel_gt *gt,
intel_engine_mask_t engine_mask,
unsigned int retry)
{
- static const u32 hw_engine_mask[] = {
- [RCS0] = GEN11_GRDOM_RENDER,
- [BCS0] = GEN11_GRDOM_BLT,
- [VCS0] = GEN11_GRDOM_MEDIA,
- [VCS1] = GEN11_GRDOM_MEDIA2,
- [VCS2] = GEN11_GRDOM_MEDIA3,
- [VCS3] = GEN11_GRDOM_MEDIA4,
- [VCS4] = GEN11_GRDOM_MEDIA5,
- [VCS5] = GEN11_GRDOM_MEDIA6,
- [VCS6] = GEN11_GRDOM_MEDIA7,
- [VCS7] = GEN11_GRDOM_MEDIA8,
- [VECS0] = GEN11_GRDOM_VECS,
- [VECS1] = GEN11_GRDOM_VECS2,
- [VECS2] = GEN11_GRDOM_VECS3,
- [VECS3] = GEN11_GRDOM_VECS4,
- };
struct intel_engine_cs *engine;
intel_engine_mask_t tmp;
u32 reset_mask, unlock_mask = 0;
@@ -518,8 +494,7 @@ static int gen11_reset_engines(struct intel_gt *gt,
} else {
reset_mask = 0;
for_each_engine_masked(engine, gt, engine_mask, tmp) {
- GEM_BUG_ON(engine->id >= ARRAY_SIZE(hw_engine_mask));
- reset_mask |= hw_engine_mask[engine->id];
+ reset_mask |= engine->reset_domain;
ret = gen11_lock_sfc(engine, &reset_mask, &unlock_mask);
if (ret)
goto sfc_unlock;
@@ -1367,20 +1342,27 @@ void intel_gt_handle_error(struct intel_gt *gt,
/* Make sure i915_reset_trylock() sees the I915_RESET_BACKOFF */
synchronize_rcu_expedited();
- /* Prevent any other reset-engine attempt. */
- for_each_engine(engine, gt, tmp) {
- while (test_and_set_bit(I915_RESET_ENGINE + engine->id,
- &gt->reset.flags))
- wait_on_bit(&gt->reset.flags,
- I915_RESET_ENGINE + engine->id,
- TASK_UNINTERRUPTIBLE);
+ /*
+ * Prevent any other reset-engine attempt. We don't do this for GuC
+ * submission the GuC owns the per-engine reset, not the i915.
+ */
+ if (!intel_uc_uses_guc_submission(&gt->uc)) {
+ for_each_engine(engine, gt, tmp) {
+ while (test_and_set_bit(I915_RESET_ENGINE + engine->id,
+ &gt->reset.flags))
+ wait_on_bit(&gt->reset.flags,
+ I915_RESET_ENGINE + engine->id,
+ TASK_UNINTERRUPTIBLE);
+ }
}
intel_gt_reset_global(gt, engine_mask, msg);
- for_each_engine(engine, gt, tmp)
- clear_bit_unlock(I915_RESET_ENGINE + engine->id,
- &gt->reset.flags);
+ if (!intel_uc_uses_guc_submission(&gt->uc)) {
+ for_each_engine(engine, gt, tmp)
+ clear_bit_unlock(I915_RESET_ENGINE + engine->id,
+ &gt->reset.flags);
+ }
clear_bit_unlock(I915_RESET_BACKOFF, &gt->reset.flags);
smp_mb__after_atomic();
wake_up_all(&gt->reset.queue);
@@ -1441,6 +1423,7 @@ void intel_gt_set_wedged_on_init(struct intel_gt *gt)
BUILD_BUG_ON(I915_RESET_ENGINE + I915_NUM_ENGINES >
I915_WEDGED_ON_INIT);
intel_gt_set_wedged(gt);
+ i915_disable_error_state(gt->i915, -ENODEV);
set_bit(I915_WEDGED_ON_INIT, &gt->reset.flags);
/* Wedged on init is non-recoverable */
@@ -1450,6 +1433,7 @@ void intel_gt_set_wedged_on_init(struct intel_gt *gt)
void intel_gt_set_wedged_on_fini(struct intel_gt *gt)
{
intel_gt_set_wedged(gt);
+ i915_disable_error_state(gt->i915, -ENODEV);
set_bit(I915_WEDGED_ON_FINI, &gt->reset.flags);
intel_gt_retire_requests(gt); /* cleanup any wedged requests */
}
diff --git a/drivers/gpu/drm/i915/gt/intel_ring_submission.c b/drivers/gpu/drm/i915/gt/intel_ring_submission.c
index 586dca1731ce..3e6fac0340ef 100644
--- a/drivers/gpu/drm/i915/gt/intel_ring_submission.c
+++ b/drivers/gpu/drm/i915/gt/intel_ring_submission.c
@@ -1357,7 +1357,7 @@ retry:
err = i915_gem_object_lock(timeline->hwsp_ggtt->obj, &ww);
if (!err && gen7_wa_vma)
err = i915_gem_object_lock(gen7_wa_vma->obj, &ww);
- if (!err && engine->legacy.ring->vma->obj)
+ if (!err)
err = i915_gem_object_lock(engine->legacy.ring->vma->obj, &ww);
if (!err)
err = intel_timeline_pin(timeline, &ww);
diff --git a/drivers/gpu/drm/i915/gt/intel_rps.c b/drivers/gpu/drm/i915/gt/intel_rps.c
index 8f5bce298574..bd35e45d3aaa 100644
--- a/drivers/gpu/drm/i915/gt/intel_rps.c
+++ b/drivers/gpu/drm/i915/gt/intel_rps.c
@@ -937,8 +937,70 @@ void intel_rps_park(struct intel_rps *rps)
GT_TRACE(rps_to_gt(rps), "park:%x\n", rps->cur_freq);
}
+u32 intel_rps_get_boost_frequency(struct intel_rps *rps)
+{
+ struct intel_guc_slpc *slpc;
+
+ if (rps_uses_slpc(rps)) {
+ slpc = rps_to_slpc(rps);
+
+ return slpc->boost_freq;
+ } else {
+ return intel_gpu_freq(rps, rps->boost_freq);
+ }
+}
+
+static int rps_set_boost_freq(struct intel_rps *rps, u32 val)
+{
+ bool boost = false;
+
+ /* Validate against (static) hardware limits */
+ val = intel_freq_opcode(rps, val);
+ if (val < rps->min_freq || val > rps->max_freq)
+ return -EINVAL;
+
+ mutex_lock(&rps->lock);
+ if (val != rps->boost_freq) {
+ rps->boost_freq = val;
+ boost = atomic_read(&rps->num_waiters);
+ }
+ mutex_unlock(&rps->lock);
+ if (boost)
+ schedule_work(&rps->work);
+
+ return 0;
+}
+
+int intel_rps_set_boost_frequency(struct intel_rps *rps, u32 freq)
+{
+ struct intel_guc_slpc *slpc;
+
+ if (rps_uses_slpc(rps)) {
+ slpc = rps_to_slpc(rps);
+
+ return intel_guc_slpc_set_boost_freq(slpc, freq);
+ } else {
+ return rps_set_boost_freq(rps, freq);
+ }
+}
+
+void intel_rps_dec_waiters(struct intel_rps *rps)
+{
+ struct intel_guc_slpc *slpc;
+
+ if (rps_uses_slpc(rps)) {
+ slpc = rps_to_slpc(rps);
+
+ intel_guc_slpc_dec_waiters(slpc);
+ } else {
+ atomic_dec(&rps->num_waiters);
+ }
+}
+
void intel_rps_boost(struct i915_request *rq)
{
+ struct intel_guc_slpc *slpc;
+
if (i915_request_signaled(rq) || i915_request_has_waitboost(rq))
return;
@@ -946,6 +1008,16 @@ void intel_rps_boost(struct i915_request *rq)
if (!test_and_set_bit(I915_FENCE_FLAG_BOOST, &rq->fence.flags)) {
struct intel_rps *rps = &READ_ONCE(rq->engine)->gt->rps;
+ if (rps_uses_slpc(rps)) {
+ slpc = rps_to_slpc(rps);
+
+ /* Return if old value is non zero */
+ if (!atomic_fetch_inc(&slpc->num_waiters))
+ schedule_work(&slpc->boost_work);
+
+ return;
+ }
+
if (atomic_fetch_inc(&rps->num_waiters))
return;
@@ -2155,6 +2227,65 @@ u32 intel_rps_read_state_cap(struct intel_rps *rps)
return intel_uncore_read(uncore, GEN6_RP_STATE_CAP);
}
+static void intel_rps_set_manual(struct intel_rps *rps, bool enable)
+{
+ struct intel_uncore *uncore = rps_to_uncore(rps);
+ u32 state = enable ? GEN9_RPSWCTL_ENABLE : GEN9_RPSWCTL_DISABLE;
+
+ /* Allow punit to process software requests */
+ intel_uncore_write(uncore, GEN6_RP_CONTROL, state);
+}
+
+void intel_rps_raise_unslice(struct intel_rps *rps)
+{
+ struct intel_uncore *uncore = rps_to_uncore(rps);
+ u32 rp0_unslice_req;
+
+ mutex_lock(&rps->lock);
+
+ if (rps_uses_slpc(rps)) {
+ /* RP limits have not been initialized yet for SLPC path */
+ rp0_unslice_req = ((intel_rps_read_state_cap(rps) >> 0)
+ & 0xff) * GEN9_FREQ_SCALER;
+
+ intel_rps_set_manual(rps, true);
+ intel_uncore_write(uncore, GEN6_RPNSWREQ,
+ ((rp0_unslice_req <<
+ GEN9_SW_REQ_UNSLICE_RATIO_SHIFT) |
+ GEN9_IGNORE_SLICE_RATIO));
+ intel_rps_set_manual(rps, false);
+ } else {
+ intel_rps_set(rps, rps->rp0_freq);
+ }
+
+ mutex_unlock(&rps->lock);
+}
+
+void intel_rps_lower_unslice(struct intel_rps *rps)
+{
+ struct intel_uncore *uncore = rps_to_uncore(rps);
+ u32 rpn_unslice_req;
+
+ mutex_lock(&rps->lock);
+
+ if (rps_uses_slpc(rps)) {
+ /* RP limits have not been initialized yet for SLPC path */
+ rpn_unslice_req = ((intel_rps_read_state_cap(rps) >> 16)
+ & 0xff) * GEN9_FREQ_SCALER;
+
+ intel_rps_set_manual(rps, true);
+ intel_uncore_write(uncore, GEN6_RPNSWREQ,
+ ((rpn_unslice_req <<
+ GEN9_SW_REQ_UNSLICE_RATIO_SHIFT) |
+ GEN9_IGNORE_SLICE_RATIO));
+ intel_rps_set_manual(rps, false);
+ } else {
+ intel_rps_set(rps, rps->min_freq);
+ }
+
+ mutex_unlock(&rps->lock);
+}
+
/* External interface for intel_ips.ko */
static struct drm_i915_private __rcu *ips_mchdev;
@@ -2231,7 +2362,7 @@ unsigned long i915_read_mch_val(void)
return 0;
with_intel_runtime_pm(&i915->runtime_pm, wakeref) {
- struct intel_ips *ips = &i915->gt.rps.ips;
+ struct intel_ips *ips = &to_gt(i915)->rps.ips;
spin_lock_irq(&mchdev_lock);
chipset_val = __ips_chipset_val(ips);
@@ -2258,7 +2389,7 @@ bool i915_gpu_raise(void)
if (!i915)
return false;
- rps = &i915->gt.rps;
+ rps = &to_gt(i915)->rps;
spin_lock_irq(&mchdev_lock);
if (rps->max_freq_softlimit < rps->max_freq)
@@ -2285,7 +2416,7 @@ bool i915_gpu_lower(void)
if (!i915)
return false;
- rps = &i915->gt.rps;
+ rps = &to_gt(i915)->rps;
spin_lock_irq(&mchdev_lock);
if (rps->max_freq_softlimit > rps->min_freq)
@@ -2311,7 +2442,7 @@ bool i915_gpu_busy(void)
if (!i915)
return false;
- ret = i915->gt.awake;
+ ret = to_gt(i915)->awake;
drm_dev_put(&i915->drm);
return ret;
@@ -2334,11 +2465,11 @@ bool i915_gpu_turbo_disable(void)
if (!i915)
return false;
- rps = &i915->gt.rps;
+ rps = &to_gt(i915)->rps;
spin_lock_irq(&mchdev_lock);
rps->max_freq_softlimit = rps->min_freq;
- ret = !__gen5_rps_set(&i915->gt.rps, rps->min_freq);
+ ret = !__gen5_rps_set(&to_gt(i915)->rps, rps->min_freq);
spin_unlock_irq(&mchdev_lock);
drm_dev_put(&i915->drm);
diff --git a/drivers/gpu/drm/i915/gt/intel_rps.h b/drivers/gpu/drm/i915/gt/intel_rps.h
index 11960d64ca82..c6d76a3d1331 100644
--- a/drivers/gpu/drm/i915/gt/intel_rps.h
+++ b/drivers/gpu/drm/i915/gt/intel_rps.h
@@ -23,6 +23,9 @@ void intel_rps_disable(struct intel_rps *rps);
void intel_rps_park(struct intel_rps *rps);
void intel_rps_unpark(struct intel_rps *rps);
void intel_rps_boost(struct i915_request *rq);
+void intel_rps_dec_waiters(struct intel_rps *rps);
+u32 intel_rps_get_boost_frequency(struct intel_rps *rps);
+int intel_rps_set_boost_frequency(struct intel_rps *rps, u32 freq);
int intel_rps_set(struct intel_rps *rps, u8 val);
void intel_rps_mark_interactive(struct intel_rps *rps, bool interactive);
@@ -42,6 +45,8 @@ u32 intel_rps_get_rpn_frequency(struct intel_rps *rps);
u32 intel_rps_read_punit_req(struct intel_rps *rps);
u32 intel_rps_read_punit_req_frequency(struct intel_rps *rps);
u32 intel_rps_read_state_cap(struct intel_rps *rps);
+void intel_rps_raise_unslice(struct intel_rps *rps);
+void intel_rps_lower_unslice(struct intel_rps *rps);
void gen5_rps_irq_handler(struct intel_rps *rps);
void gen6_rps_irq_handler(struct intel_rps *rps, u32 pm_iir);
diff --git a/drivers/gpu/drm/i915/gt/intel_workarounds.c b/drivers/gpu/drm/i915/gt/intel_workarounds.c
index e1f362530889..ab3277a3d593 100644
--- a/drivers/gpu/drm/i915/gt/intel_workarounds.c
+++ b/drivers/gpu/drm/i915/gt/intel_workarounds.c
@@ -482,7 +482,7 @@ static void kbl_ctx_workarounds_init(struct intel_engine_cs *engine,
gen9_ctx_workarounds_init(engine, wal);
/* WaToEnableHwFixForPushConstHWBug:kbl */
- if (IS_KBL_GT_STEP(i915, STEP_C0, STEP_FOREVER))
+ if (IS_KBL_GRAPHICS_STEP(i915, STEP_C0, STEP_FOREVER))
wa_masked_en(wal, COMMON_SLICE_CHICKEN2,
GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
@@ -560,6 +560,22 @@ static void icl_ctx_workarounds_init(struct intel_engine_cs *engine,
/*
* These settings aren't actually workarounds, but general tuning settings that
+ * need to be programmed on dg2 platform.
+ */
+static void dg2_ctx_gt_tuning_init(struct intel_engine_cs *engine,
+ struct i915_wa_list *wal)
+{
+ wa_write_clr_set(wal, GEN11_L3SQCREG5, L3_PWM_TIMER_INIT_VAL_MASK,
+ REG_FIELD_PREP(L3_PWM_TIMER_INIT_VAL_MASK, 0x7f));
+ wa_add(wal,
+ FF_MODE2,
+ FF_MODE2_TDS_TIMER_MASK,
+ FF_MODE2_TDS_TIMER_128,
+ 0, false);
+}
+
+/*
+ * These settings aren't actually workarounds, but general tuning settings that
* need to be programmed on several platforms.
*/
static void gen12_ctx_gt_tuning_init(struct intel_engine_cs *engine,
@@ -621,13 +637,6 @@ static void gen12_ctx_workarounds_init(struct intel_engine_cs *engine,
FF_MODE2_GS_TIMER_MASK,
FF_MODE2_GS_TIMER_224,
0, false);
-
- /*
- * Wa_14012131227:dg1
- * Wa_1508744258:tgl,rkl,dg1,adl-s,adl-p
- */
- wa_masked_en(wal, GEN7_COMMON_SLICE_CHICKEN1,
- GEN9_RHWO_OPTIMIZATION_DISABLE);
}
static void dg1_ctx_workarounds_init(struct intel_engine_cs *engine,
@@ -644,6 +653,42 @@ static void dg1_ctx_workarounds_init(struct intel_engine_cs *engine,
DG1_HZ_READ_SUPPRESSION_OPTIMIZATION_DISABLE);
}
+static void dg2_ctx_workarounds_init(struct intel_engine_cs *engine,
+ struct i915_wa_list *wal)
+{
+ dg2_ctx_gt_tuning_init(engine, wal);
+
+ /* Wa_16011186671:dg2_g11 */
+ if (IS_DG2_GRAPHICS_STEP(engine->i915, G11, STEP_A0, STEP_B0)) {
+ wa_masked_dis(wal, VFLSKPD, DIS_MULT_MISS_RD_SQUASH);
+ wa_masked_en(wal, VFLSKPD, DIS_OVER_FETCH_CACHE);
+ }
+
+ if (IS_DG2_GRAPHICS_STEP(engine->i915, G10, STEP_A0, STEP_B0)) {
+ /* Wa_14010469329:dg2_g10 */
+ wa_masked_en(wal, GEN11_COMMON_SLICE_CHICKEN3,
+ XEHP_DUAL_SIMD8_SEQ_MERGE_DISABLE);
+
+ /*
+ * Wa_22010465075:dg2_g10
+ * Wa_22010613112:dg2_g10
+ * Wa_14010698770:dg2_g10
+ */
+ wa_masked_en(wal, GEN11_COMMON_SLICE_CHICKEN3,
+ GEN12_DISABLE_CPS_AWARE_COLOR_PIPE);
+ }
+
+ /* Wa_16013271637:dg2 */
+ wa_masked_en(wal, SLICE_COMMON_ECO_CHICKEN1,
+ MSC_MSAA_REODER_BUF_BYPASS_DISABLE);
+
+ /* Wa_22012532006:dg2 */
+ if (IS_DG2_GRAPHICS_STEP(engine->i915, G10, STEP_A0, STEP_C0) ||
+ IS_DG2_GRAPHICS_STEP(engine->i915, G11, STEP_A0, STEP_B0))
+ wa_masked_en(wal, GEN9_HALF_SLICE_CHICKEN7,
+ DG2_DISABLE_ROUND_ENABLE_ALLOW_FOR_SSLA);
+}
+
static void fakewa_disable_nestedbb_mode(struct intel_engine_cs *engine,
struct i915_wa_list *wal)
{
@@ -730,7 +775,11 @@ __intel_engine_init_ctx_wa(struct intel_engine_cs *engine,
if (engine->class != RENDER_CLASS)
goto done;
- if (IS_DG1(i915))
+ if (IS_DG2(i915))
+ dg2_ctx_workarounds_init(engine, wal);
+ else if (IS_XEHPSDV(i915))
+ ; /* noop; none at this time */
+ else if (IS_DG1(i915))
dg1_ctx_workarounds_init(engine, wal);
else if (GRAPHICS_VER(i915) == 12)
gen12_ctx_workarounds_init(engine, wal);
@@ -878,10 +927,51 @@ hsw_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
}
static void
+gen9_wa_init_mcr(struct drm_i915_private *i915, struct i915_wa_list *wal)
+{
+ const struct sseu_dev_info *sseu = &to_gt(i915)->info.sseu;
+ unsigned int slice, subslice;
+ u32 mcr, mcr_mask;
+
+ GEM_BUG_ON(GRAPHICS_VER(i915) != 9);
+
+ /*
+ * WaProgramMgsrForCorrectSliceSpecificMmioReads:gen9,glk,kbl,cml
+ * Before any MMIO read into slice/subslice specific registers, MCR
+ * packet control register needs to be programmed to point to any
+ * enabled s/ss pair. Otherwise, incorrect values will be returned.
+ * This means each subsequent MMIO read will be forwarded to an
+ * specific s/ss combination, but this is OK since these registers
+ * are consistent across s/ss in almost all cases. In the rare
+ * occasions, such as INSTDONE, where this value is dependent
+ * on s/ss combo, the read should be done with read_subslice_reg.
+ */
+ slice = ffs(sseu->slice_mask) - 1;
+ GEM_BUG_ON(slice >= ARRAY_SIZE(sseu->subslice_mask));
+ subslice = ffs(intel_sseu_get_subslices(sseu, slice));
+ GEM_BUG_ON(!subslice);
+ subslice--;
+
+ /*
+ * We use GEN8_MCR..() macros to calculate the |mcr| value for
+ * Gen9 to address WaProgramMgsrForCorrectSliceSpecificMmioReads
+ */
+ mcr = GEN8_MCR_SLICE(slice) | GEN8_MCR_SUBSLICE(subslice);
+ mcr_mask = GEN8_MCR_SLICE_MASK | GEN8_MCR_SUBSLICE_MASK;
+
+ drm_dbg(&i915->drm, "MCR slice:%d/subslice:%d = %x\n", slice, subslice, mcr);
+
+ wa_write_clr_set(wal, GEN8_MCR_SELECTOR, mcr_mask, mcr);
+}
+
+static void
gen9_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
{
struct drm_i915_private *i915 = gt->i915;
+ /* WaProgramMgsrForCorrectSliceSpecificMmioReads:glk,kbl,cml,gen9 */
+ gen9_wa_init_mcr(i915, wal);
+
/* WaDisableKillLogic:bxt,skl,kbl */
if (!IS_COFFEELAKE(i915) && !IS_COMETLAKE(i915))
wa_write_or(wal,
@@ -916,7 +1006,7 @@ skl_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE);
/* WaInPlaceDecompressionHang:skl */
- if (IS_SKL_GT_STEP(gt->i915, STEP_A0, STEP_H0))
+ if (IS_SKL_GRAPHICS_STEP(gt->i915, STEP_A0, STEP_H0))
wa_write_or(wal,
GEN9_GAMT_ECO_REG_RW_IA,
GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
@@ -928,7 +1018,7 @@ kbl_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
gen9_gt_workarounds_init(gt, wal);
/* WaDisableDynamicCreditSharing:kbl */
- if (IS_KBL_GT_STEP(gt->i915, 0, STEP_C0))
+ if (IS_KBL_GRAPHICS_STEP(gt->i915, 0, STEP_C0))
wa_write_or(wal,
GAMT_CHKN_BIT_REG,
GAMT_CHKN_DISABLE_DYNAMIC_CREDIT_SHARING);
@@ -1134,9 +1224,18 @@ icl_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
GAMT_CHKN_BIT_REG,
GAMT_CHKN_DISABLE_L3_COH_PIPE);
+ /* Wa_1407352427:icl,ehl */
+ wa_write_or(wal, UNSLICE_UNIT_LEVEL_CLKGATE2,
+ PSDUNIT_CLKGATE_DIS);
+
+ /* Wa_1406680159:icl,ehl */
+ wa_write_or(wal,
+ SUBSLICE_UNIT_LEVEL_CLKGATE,
+ GWUNIT_CLKGATE_DIS);
+
/* Wa_1607087056:icl,ehl,jsl */
if (IS_ICELAKE(i915) ||
- IS_JSL_EHL_GT_STEP(i915, STEP_A0, STEP_B0))
+ IS_JSL_EHL_GRAPHICS_STEP(i915, STEP_A0, STEP_B0))
wa_write_or(wal,
SLICE_UNIT_LEVEL_CLKGATE,
L3_CLKGATE_DIS | L3_CR2X_CLKGATE_DIS);
@@ -1190,19 +1289,19 @@ tgl_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
gen12_gt_workarounds_init(gt, wal);
/* Wa_1409420604:tgl */
- if (IS_TGL_UY_GT_STEP(i915, STEP_A0, STEP_B0))
+ if (IS_TGL_UY_GRAPHICS_STEP(i915, STEP_A0, STEP_B0))
wa_write_or(wal,
SUBSLICE_UNIT_LEVEL_CLKGATE2,
CPSSUNIT_CLKGATE_DIS);
/* Wa_1607087056:tgl also know as BUG:1409180338 */
- if (IS_TGL_UY_GT_STEP(i915, STEP_A0, STEP_B0))
+ if (IS_TGL_UY_GRAPHICS_STEP(i915, STEP_A0, STEP_B0))
wa_write_or(wal,
SLICE_UNIT_LEVEL_CLKGATE,
L3_CLKGATE_DIS | L3_CR2X_CLKGATE_DIS);
/* Wa_1408615072:tgl[a0] */
- if (IS_TGL_UY_GT_STEP(i915, STEP_A0, STEP_B0))
+ if (IS_TGL_UY_GRAPHICS_STEP(i915, STEP_A0, STEP_B0))
wa_write_or(wal, UNSLICE_UNIT_LEVEL_CLKGATE2,
VSUNIT_CLKGATE_DIS_TGL);
}
@@ -1215,7 +1314,7 @@ dg1_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
gen12_gt_workarounds_init(gt, wal);
/* Wa_1607087056:dg1 */
- if (IS_DG1_GT_STEP(i915, STEP_A0, STEP_B0))
+ if (IS_DG1_GRAPHICS_STEP(i915, STEP_A0, STEP_B0))
wa_write_or(wal,
SLICE_UNIT_LEVEL_CLKGATE,
L3_CLKGATE_DIS | L3_CR2X_CLKGATE_DIS);
@@ -1236,7 +1335,179 @@ dg1_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
static void
xehpsdv_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
{
+ struct drm_i915_private *i915 = gt->i915;
+
+ xehp_init_mcr(gt, wal);
+
+ /* Wa_1409757795:xehpsdv */
+ wa_write_or(wal, SCCGCTL94DC, CG3DDISURB);
+
+ /* Wa_18011725039:xehpsdv */
+ if (IS_XEHPSDV_GRAPHICS_STEP(i915, STEP_A1, STEP_B0)) {
+ wa_masked_dis(wal, MLTICTXCTL, TDONRENDER);
+ wa_write_or(wal, L3SQCREG1_CCS0, FLUSHALLNONCOH);
+ }
+
+ /* Wa_16011155590:xehpsdv */
+ if (IS_XEHPSDV_GRAPHICS_STEP(i915, STEP_A0, STEP_B0))
+ wa_write_or(wal, UNSLICE_UNIT_LEVEL_CLKGATE,
+ TSGUNIT_CLKGATE_DIS);
+
+ /* Wa_14011780169:xehpsdv */
+ if (IS_XEHPSDV_GRAPHICS_STEP(i915, STEP_B0, STEP_FOREVER)) {
+ wa_write_or(wal, UNSLCGCTL9440, GAMTLBOACS_CLKGATE_DIS |
+ GAMTLBVDBOX7_CLKGATE_DIS |
+ GAMTLBVDBOX6_CLKGATE_DIS |
+ GAMTLBVDBOX5_CLKGATE_DIS |
+ GAMTLBVDBOX4_CLKGATE_DIS |
+ GAMTLBVDBOX3_CLKGATE_DIS |
+ GAMTLBVDBOX2_CLKGATE_DIS |
+ GAMTLBVDBOX1_CLKGATE_DIS |
+ GAMTLBVDBOX0_CLKGATE_DIS |
+ GAMTLBKCR_CLKGATE_DIS |
+ GAMTLBGUC_CLKGATE_DIS |
+ GAMTLBBLT_CLKGATE_DIS);
+ wa_write_or(wal, UNSLCGCTL9444, GAMTLBGFXA0_CLKGATE_DIS |
+ GAMTLBGFXA1_CLKGATE_DIS |
+ GAMTLBCOMPA0_CLKGATE_DIS |
+ GAMTLBCOMPA1_CLKGATE_DIS |
+ GAMTLBCOMPB0_CLKGATE_DIS |
+ GAMTLBCOMPB1_CLKGATE_DIS |
+ GAMTLBCOMPC0_CLKGATE_DIS |
+ GAMTLBCOMPC1_CLKGATE_DIS |
+ GAMTLBCOMPD0_CLKGATE_DIS |
+ GAMTLBCOMPD1_CLKGATE_DIS |
+ GAMTLBMERT_CLKGATE_DIS |
+ GAMTLBVEBOX3_CLKGATE_DIS |
+ GAMTLBVEBOX2_CLKGATE_DIS |
+ GAMTLBVEBOX1_CLKGATE_DIS |
+ GAMTLBVEBOX0_CLKGATE_DIS);
+ }
+
+ /* Wa_14012362059:xehpsdv */
+ wa_write_or(wal, GEN12_MERT_MOD_CTRL, FORCE_MISS_FTLB);
+
+ /* Wa_16012725990:xehpsdv */
+ if (IS_XEHPSDV_GRAPHICS_STEP(i915, STEP_A1, STEP_FOREVER))
+ wa_write_or(wal, UNSLICE_UNIT_LEVEL_CLKGATE, VFUNIT_CLKGATE_DIS);
+
+ /* Wa_14011060649:xehpsdv */
+ wa_14011060649(gt, wal);
+
+ /* Wa_14014368820:xehpsdv */
+ wa_write_or(wal, GEN12_GAMCNTRL_CTRL, INVALIDATION_BROADCAST_MODE_DIS |
+ GLOBAL_INVALIDATION_MODE);
+}
+
+static void
+dg2_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
+{
+ struct intel_engine_cs *engine;
+ int id;
+
xehp_init_mcr(gt, wal);
+
+ /* Wa_14011060649:dg2 */
+ wa_14011060649(gt, wal);
+
+ /*
+ * Although there are per-engine instances of these registers,
+ * they technically exist outside the engine itself and are not
+ * impacted by engine resets. Furthermore, they're part of the
+ * GuC blacklist so trying to treat them as engine workarounds
+ * will result in GuC initialization failure and a wedged GPU.
+ */
+ for_each_engine(engine, gt, id) {
+ if (engine->class != VIDEO_DECODE_CLASS)
+ continue;
+
+ /* Wa_16010515920:dg2_g10 */
+ if (IS_DG2_GRAPHICS_STEP(gt->i915, G10, STEP_A0, STEP_B0))
+ wa_write_or(wal, VDBOX_CGCTL3F18(engine->mmio_base),
+ ALNUNIT_CLKGATE_DIS);
+ }
+
+ if (IS_DG2_G10(gt->i915)) {
+ /* Wa_22010523718:dg2 */
+ wa_write_or(wal, UNSLICE_UNIT_LEVEL_CLKGATE,
+ CG3DDISCFEG_CLKGATE_DIS);
+
+ /* Wa_14011006942:dg2 */
+ wa_write_or(wal, SUBSLICE_UNIT_LEVEL_CLKGATE,
+ DSS_ROUTER_CLKGATE_DIS);
+ }
+
+ if (IS_DG2_GRAPHICS_STEP(gt->i915, G10, STEP_A0, STEP_B0)) {
+ /* Wa_14010680813:dg2_g10 */
+ wa_write_or(wal, GEN12_GAMSTLB_CTRL, CONTROL_BLOCK_CLKGATE_DIS |
+ EGRESS_BLOCK_CLKGATE_DIS | TAG_BLOCK_CLKGATE_DIS);
+
+ /* Wa_14010948348:dg2_g10 */
+ wa_write_or(wal, UNSLCGCTL9430, MSQDUNIT_CLKGATE_DIS);
+
+ /* Wa_14011037102:dg2_g10 */
+ wa_write_or(wal, UNSLCGCTL9444, LTCDD_CLKGATE_DIS);
+
+ /* Wa_14011371254:dg2_g10 */
+ wa_write_or(wal, SLICE_UNIT_LEVEL_CLKGATE, NODEDSS_CLKGATE_DIS);
+
+ /* Wa_14011431319:dg2_g10 */
+ wa_write_or(wal, UNSLCGCTL9440, GAMTLBOACS_CLKGATE_DIS |
+ GAMTLBVDBOX7_CLKGATE_DIS |
+ GAMTLBVDBOX6_CLKGATE_DIS |
+ GAMTLBVDBOX5_CLKGATE_DIS |
+ GAMTLBVDBOX4_CLKGATE_DIS |
+ GAMTLBVDBOX3_CLKGATE_DIS |
+ GAMTLBVDBOX2_CLKGATE_DIS |
+ GAMTLBVDBOX1_CLKGATE_DIS |
+ GAMTLBVDBOX0_CLKGATE_DIS |
+ GAMTLBKCR_CLKGATE_DIS |
+ GAMTLBGUC_CLKGATE_DIS |
+ GAMTLBBLT_CLKGATE_DIS);
+ wa_write_or(wal, UNSLCGCTL9444, GAMTLBGFXA0_CLKGATE_DIS |
+ GAMTLBGFXA1_CLKGATE_DIS |
+ GAMTLBCOMPA0_CLKGATE_DIS |
+ GAMTLBCOMPA1_CLKGATE_DIS |
+ GAMTLBCOMPB0_CLKGATE_DIS |
+ GAMTLBCOMPB1_CLKGATE_DIS |
+ GAMTLBCOMPC0_CLKGATE_DIS |
+ GAMTLBCOMPC1_CLKGATE_DIS |
+ GAMTLBCOMPD0_CLKGATE_DIS |
+ GAMTLBCOMPD1_CLKGATE_DIS |
+ GAMTLBMERT_CLKGATE_DIS |
+ GAMTLBVEBOX3_CLKGATE_DIS |
+ GAMTLBVEBOX2_CLKGATE_DIS |
+ GAMTLBVEBOX1_CLKGATE_DIS |
+ GAMTLBVEBOX0_CLKGATE_DIS);
+
+ /* Wa_14010569222:dg2_g10 */
+ wa_write_or(wal, UNSLICE_UNIT_LEVEL_CLKGATE,
+ GAMEDIA_CLKGATE_DIS);
+
+ /* Wa_14011028019:dg2_g10 */
+ wa_write_or(wal, SSMCGCTL9530, RTFUNIT_CLKGATE_DIS);
+ }
+
+ if (IS_DG2_GRAPHICS_STEP(gt->i915, G10, STEP_A0, STEP_B0) ||
+ IS_DG2_GRAPHICS_STEP(gt->i915, G11, STEP_A0, STEP_B0)) {
+ /* Wa_14012362059:dg2 */
+ wa_write_or(wal, GEN12_MERT_MOD_CTRL, FORCE_MISS_FTLB);
+ }
+
+ /* Wa_1509235366:dg2 */
+ wa_write_or(wal, GEN12_GAMCNTRL_CTRL, INVALIDATION_BROADCAST_MODE_DIS |
+ GLOBAL_INVALIDATION_MODE);
+
+ /* Wa_14014830051:dg2 */
+ wa_write_clr(wal, SARB_CHICKEN1, COMP_CKN_IN);
+
+ /*
+ * The following are not actually "workarounds" but rather
+ * recommended tuning settings documented in the bspec's
+ * performance guide section.
+ */
+ wa_write_or(wal, XEHP_L3SCQREG7, BLEND_FILL_CACHING_OPT_DIS);
+ wa_write_or(wal, GEN12_SQCM, EN_32B_ACCESS);
}
static void
@@ -1244,7 +1515,9 @@ gt_init_workarounds(struct intel_gt *gt, struct i915_wa_list *wal)
{
struct drm_i915_private *i915 = gt->i915;
- if (IS_XEHPSDV(i915))
+ if (IS_DG2(i915))
+ dg2_gt_workarounds_init(gt, wal);
+ else if (IS_XEHPSDV(i915))
xehpsdv_gt_workarounds_init(gt, wal);
else if (IS_DG1(i915))
dg1_gt_workarounds_init(gt, wal);
@@ -1518,7 +1791,7 @@ static void cfl_whitelist_build(struct intel_engine_cs *engine)
RING_FORCE_TO_NONPRIV_RANGE_4);
}
-static void cml_whitelist_build(struct intel_engine_cs *engine)
+static void allow_read_ctx_timestamp(struct intel_engine_cs *engine)
{
struct i915_wa_list *w = &engine->whitelist;
@@ -1526,6 +1799,11 @@ static void cml_whitelist_build(struct intel_engine_cs *engine)
whitelist_reg_ext(w,
RING_CTX_TIMESTAMP(engine->mmio_base),
RING_FORCE_TO_NONPRIV_ACCESS_RD);
+}
+
+static void cml_whitelist_build(struct intel_engine_cs *engine)
+{
+ allow_read_ctx_timestamp(engine);
cfl_whitelist_build(engine);
}
@@ -1534,6 +1812,8 @@ static void icl_whitelist_build(struct intel_engine_cs *engine)
{
struct i915_wa_list *w = &engine->whitelist;
+ allow_read_ctx_timestamp(engine);
+
switch (engine->class) {
case RENDER_CLASS:
/* WaAllowUMDToModifyHalfSliceChicken7:icl */
@@ -1569,15 +1849,9 @@ static void icl_whitelist_build(struct intel_engine_cs *engine)
/* hucStatus2RegOffset */
whitelist_reg_ext(w, _MMIO(0x23B0 + engine->mmio_base),
RING_FORCE_TO_NONPRIV_ACCESS_RD);
- whitelist_reg_ext(w,
- RING_CTX_TIMESTAMP(engine->mmio_base),
- RING_FORCE_TO_NONPRIV_ACCESS_RD);
break;
default:
- whitelist_reg_ext(w,
- RING_CTX_TIMESTAMP(engine->mmio_base),
- RING_FORCE_TO_NONPRIV_ACCESS_RD);
break;
}
}
@@ -1586,6 +1860,8 @@ static void tgl_whitelist_build(struct intel_engine_cs *engine)
{
struct i915_wa_list *w = &engine->whitelist;
+ allow_read_ctx_timestamp(engine);
+
switch (engine->class) {
case RENDER_CLASS:
/*
@@ -1602,16 +1878,17 @@ static void tgl_whitelist_build(struct intel_engine_cs *engine)
RING_FORCE_TO_NONPRIV_ACCESS_RD |
RING_FORCE_TO_NONPRIV_RANGE_4);
- /* Wa_1808121037:tgl */
+ /*
+ * Wa_1808121037:tgl
+ * Wa_14012131227:dg1
+ * Wa_1508744258:tgl,rkl,dg1,adl-s,adl-p
+ */
whitelist_reg(w, GEN7_COMMON_SLICE_CHICKEN1);
/* Wa_1806527549:tgl */
whitelist_reg(w, HIZ_CHICKEN);
break;
default:
- whitelist_reg_ext(w,
- RING_CTX_TIMESTAMP(engine->mmio_base),
- RING_FORCE_TO_NONPRIV_ACCESS_RD);
break;
}
}
@@ -1623,13 +1900,46 @@ static void dg1_whitelist_build(struct intel_engine_cs *engine)
tgl_whitelist_build(engine);
/* GEN:BUG:1409280441:dg1 */
- if (IS_DG1_GT_STEP(engine->i915, STEP_A0, STEP_B0) &&
+ if (IS_DG1_GRAPHICS_STEP(engine->i915, STEP_A0, STEP_B0) &&
(engine->class == RENDER_CLASS ||
engine->class == COPY_ENGINE_CLASS))
whitelist_reg_ext(w, RING_ID(engine->mmio_base),
RING_FORCE_TO_NONPRIV_ACCESS_RD);
}
+static void xehpsdv_whitelist_build(struct intel_engine_cs *engine)
+{
+ allow_read_ctx_timestamp(engine);
+}
+
+static void dg2_whitelist_build(struct intel_engine_cs *engine)
+{
+ struct i915_wa_list *w = &engine->whitelist;
+
+ allow_read_ctx_timestamp(engine);
+
+ switch (engine->class) {
+ case RENDER_CLASS:
+ /*
+ * Wa_1507100340:dg2_g10
+ *
+ * This covers 4 registers which are next to one another :
+ * - PS_INVOCATION_COUNT
+ * - PS_INVOCATION_COUNT_UDW
+ * - PS_DEPTH_COUNT
+ * - PS_DEPTH_COUNT_UDW
+ */
+ if (IS_DG2_GRAPHICS_STEP(engine->i915, G10, STEP_A0, STEP_B0))
+ whitelist_reg_ext(w, PS_INVOCATION_COUNT,
+ RING_FORCE_TO_NONPRIV_ACCESS_RD |
+ RING_FORCE_TO_NONPRIV_RANGE_4);
+
+ break;
+ default:
+ break;
+ }
+}
+
void intel_engine_init_whitelist(struct intel_engine_cs *engine)
{
struct drm_i915_private *i915 = engine->i915;
@@ -1637,7 +1947,11 @@ void intel_engine_init_whitelist(struct intel_engine_cs *engine)
wa_init_start(w, "whitelist", engine->name);
- if (IS_DG1(i915))
+ if (IS_DG2(i915))
+ dg2_whitelist_build(engine);
+ else if (IS_XEHPSDV(i915))
+ xehpsdv_whitelist_build(engine);
+ else if (IS_DG1(i915))
dg1_whitelist_build(engine);
else if (GRAPHICS_VER(i915) == 12)
tgl_whitelist_build(engine);
@@ -1711,13 +2025,119 @@ engine_fake_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
CMD_CCTL_MOCS_OVERRIDE(mocs, mocs));
}
}
+
+static bool needs_wa_1308578152(struct intel_engine_cs *engine)
+{
+ u64 dss_mask = intel_sseu_get_subslices(&engine->gt->info.sseu, 0);
+
+ return (dss_mask & GENMASK(GEN_DSS_PER_GSLICE - 1, 0)) == 0;
+}
+
static void
rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
{
struct drm_i915_private *i915 = engine->i915;
- if (IS_DG1_GT_STEP(i915, STEP_A0, STEP_B0) ||
- IS_TGL_UY_GT_STEP(i915, STEP_A0, STEP_B0)) {
+ if (IS_DG2_GRAPHICS_STEP(engine->i915, G11, STEP_A0, STEP_B0)) {
+ /* Wa_14013392000:dg2_g11 */
+ wa_masked_en(wal, GEN7_ROW_CHICKEN2, GEN12_ENABLE_LARGE_GRF_MODE);
+
+ /* Wa_16011620976:dg2_g11 */
+ wa_write_or(wal, LSC_CHICKEN_BIT_0_UDW, DIS_CHAIN_2XSIMD8);
+ }
+
+ if (IS_DG2_GRAPHICS_STEP(engine->i915, G10, STEP_A0, STEP_B0) ||
+ IS_DG2_GRAPHICS_STEP(engine->i915, G11, STEP_A0, STEP_B0)) {
+ /* Wa_14012419201:dg2 */
+ wa_masked_en(wal, GEN9_ROW_CHICKEN4,
+ GEN12_DISABLE_HDR_PAST_PAYLOAD_HOLD_FIX);
+ }
+
+ if (IS_DG2_GRAPHICS_STEP(engine->i915, G10, STEP_B0, STEP_C0) ||
+ IS_DG2_G11(engine->i915)) {
+ /*
+ * Wa_22012826095:dg2
+ * Wa_22013059131:dg2
+ */
+ wa_write_clr_set(wal, LSC_CHICKEN_BIT_0_UDW,
+ MAXREQS_PER_BANK,
+ REG_FIELD_PREP(MAXREQS_PER_BANK, 2));
+
+ /* Wa_22013059131:dg2 */
+ wa_write_or(wal, LSC_CHICKEN_BIT_0,
+ FORCE_1_SUB_MESSAGE_PER_FRAGMENT);
+ }
+
+ /* Wa_1308578152:dg2_g10 when first gslice is fused off */
+ if (IS_DG2_GRAPHICS_STEP(engine->i915, G10, STEP_B0, STEP_C0) &&
+ needs_wa_1308578152(engine)) {
+ wa_masked_dis(wal, GEN12_CS_DEBUG_MODE1_CCCSUNIT_BE_COMMON,
+ GEN12_REPLAY_MODE_GRANULARITY);
+ }
+
+ if (IS_DG2_GRAPHICS_STEP(engine->i915, G10, STEP_B0, STEP_FOREVER) ||
+ IS_DG2_G11(engine->i915)) {
+ /* Wa_22013037850:dg2 */
+ wa_write_or(wal, LSC_CHICKEN_BIT_0_UDW,
+ DISABLE_128B_EVICTION_COMMAND_UDW);
+
+ /* Wa_22012856258:dg2 */
+ wa_masked_en(wal, GEN7_ROW_CHICKEN2,
+ GEN12_DISABLE_READ_SUPPRESSION);
+
+ /*
+ * Wa_22010960976:dg2
+ * Wa_14013347512:dg2
+ */
+ wa_masked_dis(wal, GEN12_HDC_CHICKEN0,
+ LSC_L1_FLUSH_CTL_3D_DATAPORT_FLUSH_EVENTS_MASK);
+ }
+
+ if (IS_DG2_GRAPHICS_STEP(engine->i915, G10, STEP_A0, STEP_B0)) {
+ /*
+ * Wa_1608949956:dg2_g10
+ * Wa_14010198302:dg2_g10
+ */
+ wa_masked_en(wal, GEN8_ROW_CHICKEN,
+ MDQ_ARBITRATION_MODE | UGM_BACKUP_MODE);
+
+ /*
+ * Wa_14010918519:dg2_g10
+ *
+ * LSC_CHICKEN_BIT_0 always reads back as 0 is this stepping,
+ * so ignoring verification.
+ */
+ wa_add(wal, LSC_CHICKEN_BIT_0_UDW, 0,
+ FORCE_SLM_FENCE_SCOPE_TO_TILE | FORCE_UGM_FENCE_SCOPE_TO_TILE,
+ 0, false);
+ }
+
+ if (IS_DG2_GRAPHICS_STEP(engine->i915, G10, STEP_A0, STEP_B0)) {
+ /* Wa_22010430635:dg2 */
+ wa_masked_en(wal,
+ GEN9_ROW_CHICKEN4,
+ GEN12_DISABLE_GRF_CLEAR);
+
+ /* Wa_14010648519:dg2 */
+ wa_write_or(wal, XEHP_L3NODEARBCFG, XEHP_LNESPARE);
+ }
+
+ if (IS_DG2_GRAPHICS_STEP(engine->i915, G10, STEP_A0, STEP_C0) ||
+ IS_DG2_G11(engine->i915)) {
+ /* Wa_22012654132:dg2 */
+ wa_add(wal, GEN10_CACHE_MODE_SS, 0,
+ _MASKED_BIT_ENABLE(ENABLE_PREFETCH_INTO_IC),
+ 0 /* write-only, so skip validation */,
+ true);
+ }
+
+ /* Wa_14013202645:dg2 */
+ if (IS_DG2_GRAPHICS_STEP(engine->i915, G10, STEP_B0, STEP_C0) ||
+ IS_DG2_GRAPHICS_STEP(engine->i915, G11, STEP_A0, STEP_B0))
+ wa_write_or(wal, RT_CTRL, DIS_NULL_QUERY);
+
+ if (IS_DG1_GRAPHICS_STEP(i915, STEP_A0, STEP_B0) ||
+ IS_TGL_UY_GRAPHICS_STEP(i915, STEP_A0, STEP_B0)) {
/*
* Wa_1607138336:tgl[a0],dg1[a0]
* Wa_1607063988:tgl[a0],dg1[a0]
@@ -1727,7 +2147,7 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
GEN12_DISABLE_POSH_BUSY_FF_DOP_CG);
}
- if (IS_TGL_UY_GT_STEP(i915, STEP_A0, STEP_B0)) {
+ if (IS_TGL_UY_GRAPHICS_STEP(i915, STEP_A0, STEP_B0)) {
/*
* Wa_1606679103:tgl
* (see also Wa_1606682166:icl)
@@ -1762,7 +2182,7 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
}
if (IS_ALDERLAKE_P(i915) || IS_ALDERLAKE_S(i915) ||
- IS_DG1_GT_STEP(i915, STEP_A0, STEP_B0) ||
+ IS_DG1_GRAPHICS_STEP(i915, STEP_A0, STEP_B0) ||
IS_ROCKETLAKE(i915) || IS_TIGERLAKE(i915)) {
/* Wa_1409804808:tgl,rkl,dg1[a0],adl-s,adl-p */
wa_masked_en(wal, GEN7_ROW_CHICKEN2,
@@ -1775,8 +2195,7 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
wa_masked_en(wal, GEN9_ROW_CHICKEN4, GEN12_DISABLE_TDL_PUSH);
}
-
- if (IS_DG1_GT_STEP(i915, STEP_A0, STEP_B0) ||
+ if (IS_DG1_GRAPHICS_STEP(i915, STEP_A0, STEP_B0) ||
IS_ROCKETLAKE(i915) || IS_TIGERLAKE(i915)) {
/*
* Wa_1607030317:tgl
@@ -1859,15 +2278,6 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
wa_write_or(wal, UNSLICE_UNIT_LEVEL_CLKGATE,
VSUNIT_CLKGATE_DIS | HSUNIT_CLKGATE_DIS);
- /* Wa_1407352427:icl,ehl */
- wa_write_or(wal, UNSLICE_UNIT_LEVEL_CLKGATE2,
- PSDUNIT_CLKGATE_DIS);
-
- /* Wa_1406680159:icl,ehl */
- wa_write_or(wal,
- SUBSLICE_UNIT_LEVEL_CLKGATE,
- GWUNIT_CLKGATE_DIS);
-
/*
* Wa_1408767742:icl[a2..forever],ehl[all]
* Wa_1605460711:icl[a0..c0]
@@ -2138,7 +2548,7 @@ xcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
struct drm_i915_private *i915 = engine->i915;
/* WaKBLVECSSemaphoreWaitPoll:kbl */
- if (IS_KBL_GT_STEP(i915, STEP_A0, STEP_F0)) {
+ if (IS_KBL_GRAPHICS_STEP(i915, STEP_A0, STEP_F0)) {
wa_write(wal,
RING_SEMA_WAIT_POLL(engine->mmio_base),
1);
diff --git a/drivers/gpu/drm/i915/gt/mock_engine.c b/drivers/gpu/drm/i915/gt/mock_engine.c
index 8b89215afe46..c0637bf799a3 100644
--- a/drivers/gpu/drm/i915/gt/mock_engine.c
+++ b/drivers/gpu/drm/i915/gt/mock_engine.c
@@ -17,7 +17,7 @@ static int mock_timeline_pin(struct intel_timeline *tl)
{
int err;
- if (WARN_ON(!i915_gem_object_trylock(tl->hwsp_ggtt->obj)))
+ if (WARN_ON(!i915_gem_object_trylock(tl->hwsp_ggtt->obj, NULL)))
return -EBUSY;
err = intel_timeline_pin_map(tl);
@@ -35,9 +35,31 @@ static void mock_timeline_unpin(struct intel_timeline *tl)
atomic_dec(&tl->pin_count);
}
+static struct i915_vma *create_ring_vma(struct i915_ggtt *ggtt, int size)
+{
+ struct i915_address_space *vm = &ggtt->vm;
+ struct drm_i915_private *i915 = vm->i915;
+ struct drm_i915_gem_object *obj;
+ struct i915_vma *vma;
+
+ obj = i915_gem_object_create_internal(i915, size);
+ if (IS_ERR(obj))
+ return ERR_CAST(obj);
+
+ vma = i915_vma_instance(obj, vm, NULL);
+ if (IS_ERR(vma))
+ goto err;
+
+ return vma;
+
+err:
+ i915_gem_object_put(obj);
+ return vma;
+}
+
static struct intel_ring *mock_ring(struct intel_engine_cs *engine)
{
- const unsigned long sz = PAGE_SIZE / 2;
+ const unsigned long sz = PAGE_SIZE;
struct intel_ring *ring;
ring = kzalloc(sizeof(*ring) + sz, GFP_KERNEL);
@@ -50,15 +72,11 @@ static struct intel_ring *mock_ring(struct intel_engine_cs *engine)
ring->vaddr = (void *)(ring + 1);
atomic_set(&ring->pin_count, 1);
- ring->vma = i915_vma_alloc();
- if (!ring->vma) {
+ ring->vma = create_ring_vma(engine->gt->ggtt, PAGE_SIZE);
+ if (IS_ERR(ring->vma)) {
kfree(ring);
return NULL;
}
- i915_active_init(&ring->vma->active, NULL, NULL, 0);
- __set_bit(I915_VMA_GGTT_BIT, __i915_vma_flags(ring->vma));
- __set_bit(DRM_MM_NODE_ALLOCATED_BIT, &ring->vma->node.flags);
- ring->vma->node.size = sz;
intel_ring_update_space(ring);
@@ -67,8 +85,7 @@ static struct intel_ring *mock_ring(struct intel_engine_cs *engine)
static void mock_ring_free(struct intel_ring *ring)
{
- i915_active_fini(&ring->vma->active);
- i915_vma_free(ring->vma);
+ i915_vma_put(ring->vma);
kfree(ring);
}
@@ -125,6 +142,7 @@ static void mock_context_unpin(struct intel_context *ce)
static void mock_context_post_unpin(struct intel_context *ce)
{
+ i915_vma_unpin(ce->ring->vma);
}
static void mock_context_destroy(struct kref *ref)
@@ -169,7 +187,7 @@ static int mock_context_alloc(struct intel_context *ce)
static int mock_context_pre_pin(struct intel_context *ce,
struct i915_gem_ww_ctx *ww, void **unused)
{
- return 0;
+ return i915_vma_pin_ww(ce->ring->vma, ww, 0, 0, PIN_GLOBAL | PIN_HIGH);
}
static int mock_context_pin(struct intel_context *ce, void *unused)
@@ -327,7 +345,7 @@ struct intel_engine_cs *mock_engine(struct drm_i915_private *i915,
struct mock_engine *engine;
GEM_BUG_ON(id >= I915_NUM_ENGINES);
- GEM_BUG_ON(!i915->gt.uncore);
+ GEM_BUG_ON(!to_gt(i915)->uncore);
engine = kzalloc(sizeof(*engine) + PAGE_SIZE, GFP_KERNEL);
if (!engine)
@@ -335,8 +353,8 @@ struct intel_engine_cs *mock_engine(struct drm_i915_private *i915,
/* minimal engine setup for requests */
engine->base.i915 = i915;
- engine->base.gt = &i915->gt;
- engine->base.uncore = i915->gt.uncore;
+ engine->base.gt = to_gt(i915);
+ engine->base.uncore = to_gt(i915)->uncore;
snprintf(engine->base.name, sizeof(engine->base.name), "%s", name);
engine->base.id = id;
engine->base.mask = BIT(id);
@@ -359,8 +377,8 @@ struct intel_engine_cs *mock_engine(struct drm_i915_private *i915,
engine->base.release = mock_engine_release;
- i915->gt.engine[id] = &engine->base;
- i915->gt.engine_class[0][id] = &engine->base;
+ to_gt(i915)->engine[id] = &engine->base;
+ to_gt(i915)->engine_class[0][id] = &engine->base;
/* fake hw queue */
spin_lock_init(&engine->hw_lock);
diff --git a/drivers/gpu/drm/i915/gt/selftest_context.c b/drivers/gpu/drm/i915/gt/selftest_context.c
index fa7b99a671dd..76fbae358072 100644
--- a/drivers/gpu/drm/i915/gt/selftest_context.c
+++ b/drivers/gpu/drm/i915/gt/selftest_context.c
@@ -442,7 +442,7 @@ int intel_context_live_selftests(struct drm_i915_private *i915)
SUBTEST(live_active_context),
SUBTEST(live_remote_context),
};
- struct intel_gt *gt = &i915->gt;
+ struct intel_gt *gt = to_gt(i915);
if (intel_gt_is_wedged(gt))
return 0;
diff --git a/drivers/gpu/drm/i915/gt/selftest_engine.c b/drivers/gpu/drm/i915/gt/selftest_engine.c
index 262764f6d90a..57fea9ea1705 100644
--- a/drivers/gpu/drm/i915/gt/selftest_engine.c
+++ b/drivers/gpu/drm/i915/gt/selftest_engine.c
@@ -12,7 +12,7 @@ int intel_engine_live_selftests(struct drm_i915_private *i915)
live_engine_pm_selftests,
NULL,
};
- struct intel_gt *gt = &i915->gt;
+ struct intel_gt *gt = to_gt(i915);
typeof(*tests) *fn;
for (fn = tests; *fn; fn++) {
diff --git a/drivers/gpu/drm/i915/gt/selftest_engine_cs.c b/drivers/gpu/drm/i915/gt/selftest_engine_cs.c
index 64abf5feabfa..1b75f478d1b8 100644
--- a/drivers/gpu/drm/i915/gt/selftest_engine_cs.c
+++ b/drivers/gpu/drm/i915/gt/selftest_engine_cs.c
@@ -361,10 +361,10 @@ int intel_engine_cs_perf_selftests(struct drm_i915_private *i915)
SUBTEST(perf_mi_noop),
};
- if (intel_gt_is_wedged(&i915->gt))
+ if (intel_gt_is_wedged(to_gt(i915)))
return 0;
- return intel_gt_live_subtests(tests, &i915->gt);
+ return intel_gt_live_subtests(tests, to_gt(i915));
}
static int intel_mmio_bases_check(void *arg)
diff --git a/drivers/gpu/drm/i915/gt/selftest_engine_heartbeat.c b/drivers/gpu/drm/i915/gt/selftest_engine_heartbeat.c
index 6e6e4d747cca..273d440a53e3 100644
--- a/drivers/gpu/drm/i915/gt/selftest_engine_heartbeat.c
+++ b/drivers/gpu/drm/i915/gt/selftest_engine_heartbeat.c
@@ -378,13 +378,13 @@ int intel_heartbeat_live_selftests(struct drm_i915_private *i915)
int saved_hangcheck;
int err;
- if (intel_gt_is_wedged(&i915->gt))
+ if (intel_gt_is_wedged(to_gt(i915)))
return 0;
saved_hangcheck = i915->params.enable_hangcheck;
i915->params.enable_hangcheck = INT_MAX;
- err = intel_gt_live_subtests(tests, &i915->gt);
+ err = intel_gt_live_subtests(tests, to_gt(i915));
i915->params.enable_hangcheck = saved_hangcheck;
return err;
diff --git a/drivers/gpu/drm/i915/gt/selftest_engine_pm.c b/drivers/gpu/drm/i915/gt/selftest_engine_pm.c
index 75569666105d..8af261831470 100644
--- a/drivers/gpu/drm/i915/gt/selftest_engine_pm.c
+++ b/drivers/gpu/drm/i915/gt/selftest_engine_pm.c
@@ -214,6 +214,31 @@ static int live_engine_timestamps(void *arg)
return 0;
}
+static int __spin_until_busier(struct intel_engine_cs *engine, ktime_t busyness)
+{
+ ktime_t start, unused, dt;
+
+ if (!intel_engine_uses_guc(engine))
+ return 0;
+
+ /*
+ * In GuC mode of submission, the busyness stats may get updated after
+ * the batch starts running. Poll for a change in busyness and timeout
+ * after 500 us.
+ */
+ start = ktime_get();
+ while (intel_engine_get_busy_time(engine, &unused) == busyness) {
+ dt = ktime_get() - start;
+ if (dt > 10000000) {
+ pr_err("active wait timed out %lld\n", dt);
+ ENGINE_TRACE(engine, "active wait time out %lld\n", dt);
+ return -ETIME;
+ }
+ }
+
+ return 0;
+}
+
static int live_engine_busy_stats(void *arg)
{
struct intel_gt *gt = arg;
@@ -232,6 +257,7 @@ static int live_engine_busy_stats(void *arg)
GEM_BUG_ON(intel_gt_pm_is_awake(gt));
for_each_engine(engine, gt, id) {
struct i915_request *rq;
+ ktime_t busyness, dummy;
ktime_t de, dt;
ktime_t t[2];
@@ -274,16 +300,23 @@ static int live_engine_busy_stats(void *arg)
}
i915_request_add(rq);
+ busyness = intel_engine_get_busy_time(engine, &dummy);
if (!igt_wait_for_spinner(&spin, rq)) {
intel_gt_set_wedged(engine->gt);
err = -ETIME;
goto end;
}
+ err = __spin_until_busier(engine, busyness);
+ if (err) {
+ GEM_TRACE_DUMP();
+ goto end;
+ }
+
ENGINE_TRACE(engine, "measuring busy time\n");
preempt_disable();
de = intel_engine_get_busy_time(engine, &t[0]);
- udelay(100);
+ mdelay(10);
de = ktime_sub(intel_engine_get_busy_time(engine, &t[1]), de);
preempt_enable();
dt = ktime_sub(t[1], t[0]);
diff --git a/drivers/gpu/drm/i915/gt/selftest_execlists.c b/drivers/gpu/drm/i915/gt/selftest_execlists.c
index b367ecfa42de..e10da897e07a 100644
--- a/drivers/gpu/drm/i915/gt/selftest_execlists.c
+++ b/drivers/gpu/drm/i915/gt/selftest_execlists.c
@@ -4502,11 +4502,11 @@ int intel_execlists_live_selftests(struct drm_i915_private *i915)
SUBTEST(live_virtual_reset),
};
- if (i915->gt.submission_method != INTEL_SUBMISSION_ELSP)
+ if (to_gt(i915)->submission_method != INTEL_SUBMISSION_ELSP)
return 0;
- if (intel_gt_is_wedged(&i915->gt))
+ if (intel_gt_is_wedged(to_gt(i915)))
return 0;
- return intel_gt_live_subtests(tests, &i915->gt);
+ return intel_gt_live_subtests(tests, to_gt(i915));
}
diff --git a/drivers/gpu/drm/i915/gt/selftest_gt_pm.c b/drivers/gpu/drm/i915/gt/selftest_gt_pm.c
index 55c5cdb99f45..8bf62a5826cc 100644
--- a/drivers/gpu/drm/i915/gt/selftest_gt_pm.c
+++ b/drivers/gpu/drm/i915/gt/selftest_gt_pm.c
@@ -193,10 +193,10 @@ int intel_gt_pm_live_selftests(struct drm_i915_private *i915)
SUBTEST(live_gt_resume),
};
- if (intel_gt_is_wedged(&i915->gt))
+ if (intel_gt_is_wedged(to_gt(i915)))
return 0;
- return intel_gt_live_subtests(tests, &i915->gt);
+ return intel_gt_live_subtests(tests, to_gt(i915));
}
int intel_gt_pm_late_selftests(struct drm_i915_private *i915)
@@ -210,8 +210,8 @@ int intel_gt_pm_late_selftests(struct drm_i915_private *i915)
SUBTEST(live_rc6_ctx_wa),
};
- if (intel_gt_is_wedged(&i915->gt))
+ if (intel_gt_is_wedged(to_gt(i915)))
return 0;
- return intel_gt_live_subtests(tests, &i915->gt);
+ return intel_gt_live_subtests(tests, to_gt(i915));
}
diff --git a/drivers/gpu/drm/i915/gt/selftest_hangcheck.c b/drivers/gpu/drm/i915/gt/selftest_hangcheck.c
index 7e2d99dd012d..15d63435ec4d 100644
--- a/drivers/gpu/drm/i915/gt/selftest_hangcheck.c
+++ b/drivers/gpu/drm/i915/gt/selftest_hangcheck.c
@@ -471,7 +471,8 @@ static int igt_reset_nop_engine(void *arg)
count = 0;
st_engine_heartbeat_disable(engine);
- set_bit(I915_RESET_ENGINE + id, &gt->reset.flags);
+ GEM_BUG_ON(test_and_set_bit(I915_RESET_ENGINE + id,
+ &gt->reset.flags));
do {
int i;
@@ -528,7 +529,7 @@ static int igt_reset_nop_engine(void *arg)
break;
}
} while (time_before(jiffies, end_time));
- clear_bit(I915_RESET_ENGINE + id, &gt->reset.flags);
+ clear_and_wake_up_bit(I915_RESET_ENGINE + id, &gt->reset.flags);
st_engine_heartbeat_enable(engine);
pr_info("%s(%s): %d resets\n", __func__, engine->name, count);
@@ -582,7 +583,8 @@ static int igt_reset_fail_engine(void *arg)
}
st_engine_heartbeat_disable(engine);
- set_bit(I915_RESET_ENGINE + id, &gt->reset.flags);
+ GEM_BUG_ON(test_and_set_bit(I915_RESET_ENGINE + id,
+ &gt->reset.flags));
force_reset_timeout(engine);
err = intel_engine_reset(engine, NULL);
@@ -679,7 +681,7 @@ static int igt_reset_fail_engine(void *arg)
out:
pr_info("%s(%s): %d resets\n", __func__, engine->name, count);
skip:
- clear_bit(I915_RESET_ENGINE + id, &gt->reset.flags);
+ clear_and_wake_up_bit(I915_RESET_ENGINE + id, &gt->reset.flags);
st_engine_heartbeat_enable(engine);
intel_context_put(ce);
@@ -734,7 +736,8 @@ static int __igt_reset_engine(struct intel_gt *gt, bool active)
reset_engine_count = i915_reset_engine_count(global, engine);
st_engine_heartbeat_disable(engine);
- set_bit(I915_RESET_ENGINE + id, &gt->reset.flags);
+ GEM_BUG_ON(test_and_set_bit(I915_RESET_ENGINE + id,
+ &gt->reset.flags));
count = 0;
do {
struct i915_request *rq = NULL;
@@ -824,7 +827,7 @@ restore:
if (err)
break;
} while (time_before(jiffies, end_time));
- clear_bit(I915_RESET_ENGINE + id, &gt->reset.flags);
+ clear_and_wake_up_bit(I915_RESET_ENGINE + id, &gt->reset.flags);
st_engine_heartbeat_enable(engine);
pr_info("%s: Completed %lu %s resets\n",
engine->name, count, active ? "active" : "idle");
@@ -1042,7 +1045,8 @@ static int __igt_reset_engines(struct intel_gt *gt,
yield(); /* start all threads before we begin */
st_engine_heartbeat_disable_no_pm(engine);
- set_bit(I915_RESET_ENGINE + id, &gt->reset.flags);
+ GEM_BUG_ON(test_and_set_bit(I915_RESET_ENGINE + id,
+ &gt->reset.flags));
do {
struct i915_request *rq = NULL;
struct intel_selftest_saved_policy saved;
@@ -1165,7 +1169,7 @@ restore:
if (err)
break;
} while (time_before(jiffies, end_time));
- clear_bit(I915_RESET_ENGINE + id, &gt->reset.flags);
+ clear_and_wake_up_bit(I915_RESET_ENGINE + id, &gt->reset.flags);
st_engine_heartbeat_enable_no_pm(engine);
pr_info("i915_reset_engine(%s:%s): %lu resets\n",
@@ -2014,7 +2018,7 @@ int intel_hangcheck_live_selftests(struct drm_i915_private *i915)
SUBTEST(igt_reset_evict_fence),
SUBTEST(igt_handle_error),
};
- struct intel_gt *gt = &i915->gt;
+ struct intel_gt *gt = to_gt(i915);
intel_wakeref_t wakeref;
int err;
diff --git a/drivers/gpu/drm/i915/gt/selftest_lrc.c b/drivers/gpu/drm/i915/gt/selftest_lrc.c
index b0977a3b699b..618c905daa19 100644
--- a/drivers/gpu/drm/i915/gt/selftest_lrc.c
+++ b/drivers/gpu/drm/i915/gt/selftest_lrc.c
@@ -1847,5 +1847,5 @@ int intel_lrc_live_selftests(struct drm_i915_private *i915)
if (!HAS_LOGICAL_RING_CONTEXTS(i915))
return 0;
- return intel_gt_live_subtests(tests, &i915->gt);
+ return intel_gt_live_subtests(tests, to_gt(i915));
}
diff --git a/drivers/gpu/drm/i915/gt/selftest_migrate.c b/drivers/gpu/drm/i915/gt/selftest_migrate.c
index 12ef2837c89b..fa4293d2944f 100644
--- a/drivers/gpu/drm/i915/gt/selftest_migrate.c
+++ b/drivers/gpu/drm/i915/gt/selftest_migrate.c
@@ -49,6 +49,7 @@ static int copy(struct intel_migrate *migrate,
if (IS_ERR(src))
return 0;
+ sz = src->base.size;
dst = i915_gem_object_create_internal(i915, sz);
if (IS_ERR(dst))
goto err_free_src;
@@ -441,7 +442,7 @@ int intel_migrate_live_selftests(struct drm_i915_private *i915)
SUBTEST(thread_global_copy),
SUBTEST(thread_global_clear),
};
- struct intel_gt *gt = &i915->gt;
+ struct intel_gt *gt = to_gt(i915);
if (!gt->migrate.context)
return 0;
@@ -464,7 +465,7 @@ create_init_lmem_internal(struct intel_gt *gt, size_t sz, bool try_lmem)
return obj;
}
- i915_gem_object_trylock(obj);
+ i915_gem_object_trylock(obj, NULL);
err = i915_gem_object_pin_pages(obj);
if (err) {
i915_gem_object_unlock(obj);
@@ -657,7 +658,7 @@ int intel_migrate_perf_selftests(struct drm_i915_private *i915)
SUBTEST(perf_clear_blt),
SUBTEST(perf_copy_blt),
};
- struct intel_gt *gt = &i915->gt;
+ struct intel_gt *gt = to_gt(i915);
if (intel_gt_is_wedged(gt))
return 0;
diff --git a/drivers/gpu/drm/i915/gt/selftest_mocs.c b/drivers/gpu/drm/i915/gt/selftest_mocs.c
index 13d25bf2a94a..c1d861333c44 100644
--- a/drivers/gpu/drm/i915/gt/selftest_mocs.c
+++ b/drivers/gpu/drm/i915/gt/selftest_mocs.c
@@ -451,5 +451,5 @@ int intel_mocs_live_selftests(struct drm_i915_private *i915)
if (!get_mocs_settings(i915, &table))
return 0;
- return intel_gt_live_subtests(tests, &i915->gt);
+ return intel_gt_live_subtests(tests, to_gt(i915));
}
diff --git a/drivers/gpu/drm/i915/gt/selftest_reset.c b/drivers/gpu/drm/i915/gt/selftest_reset.c
index 7a50c9f4071b..8a873f6bda7f 100644
--- a/drivers/gpu/drm/i915/gt/selftest_reset.c
+++ b/drivers/gpu/drm/i915/gt/selftest_reset.c
@@ -376,7 +376,7 @@ int intel_reset_live_selftests(struct drm_i915_private *i915)
SUBTEST(igt_atomic_reset),
SUBTEST(igt_atomic_engine_reset),
};
- struct intel_gt *gt = &i915->gt;
+ struct intel_gt *gt = to_gt(i915);
if (!intel_has_gpu_reset(gt))
return 0;
diff --git a/drivers/gpu/drm/i915/gt/selftest_ring_submission.c b/drivers/gpu/drm/i915/gt/selftest_ring_submission.c
index 041954408d0f..70f9ac1ec2c7 100644
--- a/drivers/gpu/drm/i915/gt/selftest_ring_submission.c
+++ b/drivers/gpu/drm/i915/gt/selftest_ring_submission.c
@@ -291,8 +291,8 @@ int intel_ring_submission_live_selftests(struct drm_i915_private *i915)
SUBTEST(live_ctx_switch_wa),
};
- if (i915->gt.submission_method > INTEL_SUBMISSION_RING)
+ if (to_gt(i915)->submission_method > INTEL_SUBMISSION_RING)
return 0;
- return intel_gt_live_subtests(tests, &i915->gt);
+ return intel_gt_live_subtests(tests, to_gt(i915));
}
diff --git a/drivers/gpu/drm/i915/gt/selftest_slpc.c b/drivers/gpu/drm/i915/gt/selftest_slpc.c
index 9334bad131a2..b768cea5943d 100644
--- a/drivers/gpu/drm/i915/gt/selftest_slpc.c
+++ b/drivers/gpu/drm/i915/gt/selftest_slpc.c
@@ -39,7 +39,7 @@ static int slpc_set_max_freq(struct intel_guc_slpc *slpc, u32 freq)
static int live_slpc_clamp_min(void *arg)
{
struct drm_i915_private *i915 = arg;
- struct intel_gt *gt = &i915->gt;
+ struct intel_gt *gt = to_gt(i915);
struct intel_guc_slpc *slpc = &gt->uc.guc.slpc;
struct intel_rps *rps = &gt->rps;
struct intel_engine_cs *engine;
@@ -166,7 +166,7 @@ static int live_slpc_clamp_min(void *arg)
static int live_slpc_clamp_max(void *arg)
{
struct drm_i915_private *i915 = arg;
- struct intel_gt *gt = &i915->gt;
+ struct intel_gt *gt = to_gt(i915);
struct intel_guc_slpc *slpc;
struct intel_rps *rps;
struct intel_engine_cs *engine;
@@ -304,7 +304,7 @@ int intel_slpc_live_selftests(struct drm_i915_private *i915)
SUBTEST(live_slpc_clamp_min),
};
- if (intel_gt_is_wedged(&i915->gt))
+ if (intel_gt_is_wedged(to_gt(i915)))
return 0;
return i915_live_subtests(tests, i915);
diff --git a/drivers/gpu/drm/i915/gt/selftest_timeline.c b/drivers/gpu/drm/i915/gt/selftest_timeline.c
index d0b6a3afcf44..e2eb686a9763 100644
--- a/drivers/gpu/drm/i915/gt/selftest_timeline.c
+++ b/drivers/gpu/drm/i915/gt/selftest_timeline.c
@@ -159,7 +159,7 @@ static int mock_hwsp_freelist(void *arg)
INIT_RADIX_TREE(&state.cachelines, GFP_KERNEL);
state.prng = I915_RND_STATE_INITIALIZER(i915_selftest.random_seed);
- state.gt = &i915->gt;
+ state.gt = to_gt(i915);
/*
* Create a bunch of timelines and check that their HWSP do not overlap.
@@ -1416,8 +1416,8 @@ int intel_timeline_live_selftests(struct drm_i915_private *i915)
SUBTEST(live_hwsp_rollover_user),
};
- if (intel_gt_is_wedged(&i915->gt))
+ if (intel_gt_is_wedged(to_gt(i915)))
return 0;
- return intel_gt_live_subtests(tests, &i915->gt);
+ return intel_gt_live_subtests(tests, to_gt(i915));
}
diff --git a/drivers/gpu/drm/i915/gt/selftest_workarounds.c b/drivers/gpu/drm/i915/gt/selftest_workarounds.c
index 962e91ba3be4..0287c2573c51 100644
--- a/drivers/gpu/drm/i915/gt/selftest_workarounds.c
+++ b/drivers/gpu/drm/i915/gt/selftest_workarounds.c
@@ -1387,8 +1387,8 @@ int intel_workarounds_live_selftests(struct drm_i915_private *i915)
SUBTEST(live_engine_reset_workarounds),
};
- if (intel_gt_is_wedged(&i915->gt))
+ if (intel_gt_is_wedged(to_gt(i915)))
return 0;
- return intel_gt_live_subtests(tests, &i915->gt);
+ return intel_gt_live_subtests(tests, to_gt(i915));
}
diff --git a/drivers/gpu/drm/i915/gt/uc/abi/guc_actions_abi.h b/drivers/gpu/drm/i915/gt/uc/abi/guc_actions_abi.h
index ba10bd374cee..fe5d7d261797 100644
--- a/drivers/gpu/drm/i915/gt/uc/abi/guc_actions_abi.h
+++ b/drivers/gpu/drm/i915/gt/uc/abi/guc_actions_abi.h
@@ -144,6 +144,7 @@ enum intel_guc_action {
INTEL_GUC_ACTION_DEREGISTER_CONTEXT_DONE = 0x4600,
INTEL_GUC_ACTION_REGISTER_CONTEXT_MULTI_LRC = 0x4601,
INTEL_GUC_ACTION_RESET_CLIENT = 0x5507,
+ INTEL_GUC_ACTION_SET_ENG_UTIL_BUFF = 0x550A,
INTEL_GUC_ACTION_LIMIT
};
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc.h b/drivers/gpu/drm/i915/gt/uc/intel_guc.h
index 31cf9fb48c7e..f9240d4baa69 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc.h
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc.h
@@ -95,6 +95,11 @@ struct intel_guc {
*/
struct ida guc_ids;
/**
+ * @num_guc_ids: Number of guc_ids, selftest feature to be able
+ * to reduce this number while testing.
+ */
+ int num_guc_ids;
+ /**
* @guc_ids_bitmap: used to allocate new guc_ids, multi-lrc
*/
unsigned long *guc_ids_bitmap;
@@ -138,6 +143,8 @@ struct intel_guc {
u32 ads_regset_size;
/** @ads_golden_ctxt_size: size of the golden contexts in the ADS */
u32 ads_golden_ctxt_size;
+ /** @ads_engine_usage_size: size of engine usage in the ADS */
+ u32 ads_engine_usage_size;
/** @lrc_desc_pool: object allocated to hold the GuC LRC descriptor pool */
struct i915_vma *lrc_desc_pool;
@@ -172,6 +179,41 @@ struct intel_guc {
/** @send_mutex: used to serialize the intel_guc_send actions */
struct mutex send_mutex;
+
+ /**
+ * @timestamp: GT timestamp object that stores a copy of the timestamp
+ * and adjusts it for overflow using a worker.
+ */
+ struct {
+ /**
+ * @lock: Lock protecting the below fields and the engine stats.
+ */
+ spinlock_t lock;
+
+ /**
+ * @gt_stamp: 64 bit extended value of the GT timestamp.
+ */
+ u64 gt_stamp;
+
+ /**
+ * @ping_delay: Period for polling the GT timestamp for
+ * overflow.
+ */
+ unsigned long ping_delay;
+
+ /**
+ * @work: Periodic work to adjust GT timestamp, engine and
+ * context usage for overflows.
+ */
+ struct delayed_work work;
+ } timestamp;
+
+#ifdef CONFIG_DRM_I915_SELFTEST
+ /**
+ * @number_guc_id_stolen: The number of guc_ids that have been stolen
+ */
+ int number_guc_id_stolen;
+#endif
};
static inline struct intel_guc *log_to_guc(struct intel_guc_log *log)
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c
index 621c893a009f..1a1edae67e4e 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c
@@ -26,6 +26,8 @@
* | guc_policies |
* +---------------------------------------+
* | guc_gt_system_info |
+ * +---------------------------------------+
+ * | guc_engine_usage |
* +---------------------------------------+ <== static
* | guc_mmio_reg[countA] (engine 0.0) |
* | guc_mmio_reg[countB] (engine 0.1) |
@@ -47,6 +49,7 @@ struct __guc_ads_blob {
struct guc_ads ads;
struct guc_policies policies;
struct guc_gt_system_info system_info;
+ struct guc_engine_usage engine_usage;
/* From here on, location is dynamic! Refer to above diagram. */
struct guc_mmio_reg regset[0];
} __packed;
@@ -628,3 +631,21 @@ void intel_guc_ads_reset(struct intel_guc *guc)
guc_ads_private_data_reset(guc);
}
+
+u32 intel_guc_engine_usage_offset(struct intel_guc *guc)
+{
+ struct __guc_ads_blob *blob = guc->ads_blob;
+ u32 base = intel_guc_ggtt_offset(guc, guc->ads_vma);
+ u32 offset = base + ptr_offset(blob, engine_usage);
+
+ return offset;
+}
+
+struct guc_engine_usage_record *intel_guc_engine_usage(struct intel_engine_cs *engine)
+{
+ struct intel_guc *guc = &engine->gt->uc.guc;
+ struct __guc_ads_blob *blob = guc->ads_blob;
+ u8 guc_class = engine_class_to_guc_class(engine->class);
+
+ return &blob->engine_usage.engines[guc_class][ilog2(engine->logical_mask)];
+}
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.h b/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.h
index 3d85051d57e4..e74c110facff 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.h
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.h
@@ -6,8 +6,11 @@
#ifndef _INTEL_GUC_ADS_H_
#define _INTEL_GUC_ADS_H_
+#include <linux/types.h>
+
struct intel_guc;
struct drm_printer;
+struct intel_engine_cs;
int intel_guc_ads_create(struct intel_guc *guc);
void intel_guc_ads_destroy(struct intel_guc *guc);
@@ -15,5 +18,7 @@ void intel_guc_ads_init_late(struct intel_guc *guc);
void intel_guc_ads_reset(struct intel_guc *guc);
void intel_guc_ads_print_policy_info(struct intel_guc *guc,
struct drm_printer *p);
+struct guc_engine_usage_record *intel_guc_engine_usage(struct intel_engine_cs *engine);
+u32 intel_guc_engine_usage_offset(struct intel_guc *guc);
#endif
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c
index a0cc34be7b56..aa6dd6415202 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c
@@ -523,6 +523,15 @@ static inline bool ct_deadlocked(struct intel_guc_ct *ct)
CT_ERROR(ct, "Communication stalled for %lld ms, desc status=%#x,%#x\n",
ktime_ms_delta(ktime_get(), ct->stall_time),
send->status, recv->status);
+ CT_ERROR(ct, "H2G Space: %u (Bytes)\n",
+ atomic_read(&ct->ctbs.send.space) * 4);
+ CT_ERROR(ct, "Head: %u (Dwords)\n", ct->ctbs.send.desc->head);
+ CT_ERROR(ct, "Tail: %u (Dwords)\n", ct->ctbs.send.desc->tail);
+ CT_ERROR(ct, "G2H Space: %u (Bytes)\n",
+ atomic_read(&ct->ctbs.recv.space) * 4);
+ CT_ERROR(ct, "Head: %u\n (Dwords)", ct->ctbs.recv.desc->head);
+ CT_ERROR(ct, "Tail: %u\n (Dwords)", ct->ctbs.recv.desc->tail);
+
ct->ctbs.send.broken = true;
}
@@ -582,12 +591,19 @@ static inline bool h2g_has_room(struct intel_guc_ct *ct, u32 len_dw)
static int has_room_nb(struct intel_guc_ct *ct, u32 h2g_dw, u32 g2h_dw)
{
+ bool h2g = h2g_has_room(ct, h2g_dw);
+ bool g2h = g2h_has_room(ct, g2h_dw);
+
lockdep_assert_held(&ct->ctbs.send.lock);
- if (unlikely(!h2g_has_room(ct, h2g_dw) || !g2h_has_room(ct, g2h_dw))) {
+ if (unlikely(!h2g || !g2h)) {
if (ct->stall_time == KTIME_MAX)
ct->stall_time = ktime_get();
+ /* Be paranoid and kick G2H tasklet to free credits */
+ if (!g2h)
+ tasklet_hi_schedule(&ct->receive_tasklet);
+
if (unlikely(ct_deadlocked(ct)))
return -EPIPE;
else
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_fw.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_fw.c
index 196424be0998..31420ce1ce6b 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_fw.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_fw.c
@@ -40,9 +40,8 @@ static void guc_prepare_xfer(struct intel_uncore *uncore)
}
}
-/* Copy RSA signature from the fw image to HW for verification */
-static int guc_xfer_rsa(struct intel_uc_fw *guc_fw,
- struct intel_uncore *uncore)
+static int guc_xfer_rsa_mmio(struct intel_uc_fw *guc_fw,
+ struct intel_uncore *uncore)
{
u32 rsa[UOS_RSA_SCRATCH_COUNT];
size_t copied;
@@ -58,6 +57,27 @@ static int guc_xfer_rsa(struct intel_uc_fw *guc_fw,
return 0;
}
+static int guc_xfer_rsa_vma(struct intel_uc_fw *guc_fw,
+ struct intel_uncore *uncore)
+{
+ struct intel_guc *guc = container_of(guc_fw, struct intel_guc, fw);
+
+ intel_uncore_write(uncore, UOS_RSA_SCRATCH(0),
+ intel_guc_ggtt_offset(guc, guc_fw->rsa_data));
+
+ return 0;
+}
+
+/* Copy RSA signature from the fw image to HW for verification */
+static int guc_xfer_rsa(struct intel_uc_fw *guc_fw,
+ struct intel_uncore *uncore)
+{
+ if (guc_fw->rsa_data)
+ return guc_xfer_rsa_vma(guc_fw, uncore);
+ else
+ return guc_xfer_rsa_mmio(guc_fw, uncore);
+}
+
/*
* Read the GuC status register (GUC_STATUS) and store it in the
* specified location; then return a boolean indicating whether
@@ -142,7 +162,10 @@ int intel_guc_fw_upload(struct intel_guc *guc)
/*
* Note that GuC needs the CSS header plus uKernel code to be copied
* by the DMA engine in one operation, whereas the RSA signature is
- * loaded via MMIO.
+ * loaded separately, either by copying it to the UOS_RSA_SCRATCH
+ * register (if key size <= 256) or through a ggtt-pinned vma (if key
+ * size > 256). The RSA size and therefore the way we provide it to the
+ * HW is fixed for each platform and hard-coded in the bootrom.
*/
ret = guc_xfer_rsa(&guc->fw, uncore);
if (ret)
@@ -164,6 +187,6 @@ int intel_guc_fw_upload(struct intel_guc *guc)
return 0;
out:
- intel_uc_fw_change_status(&guc->fw, INTEL_UC_FIRMWARE_FAIL);
+ intel_uc_fw_change_status(&guc->fw, INTEL_UC_FIRMWARE_LOAD_FAIL);
return ret;
}
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h b/drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h
index 722933e26347..7072e30e99f4 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h
@@ -294,6 +294,19 @@ struct guc_ads {
u32 reserved[15];
} __packed;
+/* Engine usage stats */
+struct guc_engine_usage_record {
+ u32 current_context_index;
+ u32 last_switch_in_stamp;
+ u32 reserved0;
+ u32 total_runtime;
+ u32 reserved1[4];
+} __packed;
+
+struct guc_engine_usage {
+ struct guc_engine_usage_record engines[GUC_MAX_ENGINE_CLASSES][GUC_MAX_INSTANCES_PER_CLASS];
+} __packed;
+
/* GuC logging structures */
enum guc_log_buffer_type {
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_log.h b/drivers/gpu/drm/i915/gt/uc/intel_guc_log.h
index ac1ee1d5ce10..fe6ab7550a14 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_log.h
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_log.h
@@ -15,9 +15,12 @@
struct intel_guc;
-#ifdef CONFIG_DRM_I915_DEBUG_GUC
+#if defined(CONFIG_DRM_I915_DEBUG_GUC)
#define CRASH_BUFFER_SIZE SZ_2M
#define DEBUG_BUFFER_SIZE SZ_16M
+#elif defined(CONFIG_DRM_I915_DEBUG_GEM)
+#define CRASH_BUFFER_SIZE SZ_1M
+#define DEBUG_BUFFER_SIZE SZ_2M
#else
#define CRASH_BUFFER_SIZE SZ_8K
#define DEBUG_BUFFER_SIZE SZ_64K
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_log_debugfs.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_log_debugfs.c
index 46026c2c1722..ddfbe334689f 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_log_debugfs.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_log_debugfs.c
@@ -10,28 +10,80 @@
#include "intel_guc.h"
#include "intel_guc_log.h"
#include "intel_guc_log_debugfs.h"
+#include "intel_uc.h"
+
+static u32 obj_to_guc_log_dump_size(struct drm_i915_gem_object *obj)
+{
+ u32 size;
+
+ if (!obj)
+ return PAGE_SIZE;
+
+ /* "0x%08x 0x%08x 0x%08x 0x%08x\n" => 16 bytes -> 44 chars => x2.75 */
+ size = ((obj->base.size * 11) + 3) / 4;
+
+ /* Add padding for final blank line, any extra header info, etc. */
+ size = PAGE_ALIGN(size + PAGE_SIZE);
+
+ return size;
+}
+
+static u32 guc_log_dump_size(struct intel_guc_log *log)
+{
+ struct intel_guc *guc = log_to_guc(log);
+
+ if (!intel_guc_is_supported(guc))
+ return PAGE_SIZE;
+
+ if (!log->vma)
+ return PAGE_SIZE;
+
+ return obj_to_guc_log_dump_size(log->vma->obj);
+}
static int guc_log_dump_show(struct seq_file *m, void *data)
{
struct drm_printer p = drm_seq_file_printer(m);
+ int ret;
- return intel_guc_log_dump(m->private, &p, false);
+ ret = intel_guc_log_dump(m->private, &p, false);
+
+ if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM) && seq_has_overflowed(m))
+ pr_warn_once("preallocated size:%zx for %s exceeded\n",
+ m->size, __func__);
+
+ return ret;
+}
+DEFINE_INTEL_GT_DEBUGFS_ATTRIBUTE_WITH_SIZE(guc_log_dump, guc_log_dump_size);
+
+static u32 guc_load_err_dump_size(struct intel_guc_log *log)
+{
+ struct intel_guc *guc = log_to_guc(log);
+ struct intel_uc *uc = container_of(guc, struct intel_uc, guc);
+
+ if (!intel_guc_is_supported(guc))
+ return PAGE_SIZE;
+
+ return obj_to_guc_log_dump_size(uc->load_err_log);
}
-DEFINE_INTEL_GT_DEBUGFS_ATTRIBUTE(guc_log_dump);
static int guc_load_err_log_dump_show(struct seq_file *m, void *data)
{
struct drm_printer p = drm_seq_file_printer(m);
+ if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM) && seq_has_overflowed(m))
+ pr_warn_once("preallocated size:%zx for %s exceeded\n",
+ m->size, __func__);
+
return intel_guc_log_dump(m->private, &p, true);
}
-DEFINE_INTEL_GT_DEBUGFS_ATTRIBUTE(guc_load_err_log_dump);
+DEFINE_INTEL_GT_DEBUGFS_ATTRIBUTE_WITH_SIZE(guc_load_err_log_dump, guc_load_err_dump_size);
static int guc_log_level_get(void *data, u64 *val)
{
struct intel_guc_log *log = data;
- if (!intel_guc_is_used(log_to_guc(log)))
+ if (!log->vma)
return -ENODEV;
*val = intel_guc_log_get_level(log);
@@ -43,7 +95,7 @@ static int guc_log_level_set(void *data, u64 val)
{
struct intel_guc_log *log = data;
- if (!intel_guc_is_used(log_to_guc(log)))
+ if (!log->vma)
return -ENODEV;
return intel_guc_log_set_level(log, val);
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.c
index 65a3e7fdb2b2..13b27b8ff74e 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.c
@@ -79,29 +79,6 @@ static void slpc_mem_set_disabled(struct slpc_shared_data *data,
slpc_mem_set_param(data, enable_id, 0);
}
-int intel_guc_slpc_init(struct intel_guc_slpc *slpc)
-{
- struct intel_guc *guc = slpc_to_guc(slpc);
- struct drm_i915_private *i915 = slpc_to_i915(slpc);
- u32 size = PAGE_ALIGN(sizeof(struct slpc_shared_data));
- int err;
-
- GEM_BUG_ON(slpc->vma);
-
- err = intel_guc_allocate_and_map_vma(guc, size, &slpc->vma, (void **)&slpc->vaddr);
- if (unlikely(err)) {
- drm_err(&i915->drm,
- "Failed to allocate SLPC struct (err=%pe)\n",
- ERR_PTR(err));
- return err;
- }
-
- slpc->max_freq_softlimit = 0;
- slpc->min_freq_softlimit = 0;
-
- return err;
-}
-
static u32 slpc_get_state(struct intel_guc_slpc *slpc)
{
struct slpc_shared_data *data;
@@ -203,6 +180,86 @@ static int slpc_unset_param(struct intel_guc_slpc *slpc,
return guc_action_slpc_unset_param(guc, id);
}
+static int slpc_force_min_freq(struct intel_guc_slpc *slpc, u32 freq)
+{
+ struct drm_i915_private *i915 = slpc_to_i915(slpc);
+ struct intel_guc *guc = slpc_to_guc(slpc);
+ intel_wakeref_t wakeref;
+ int ret = 0;
+
+ lockdep_assert_held(&slpc->lock);
+
+ if (!intel_guc_is_ready(guc))
+ return -ENODEV;
+
+ /*
+ * This function is a little different as compared to
+ * intel_guc_slpc_set_min_freq(). Softlimit will not be updated
+ * here since this is used to temporarily change min freq,
+ * for example, during a waitboost. Caller is responsible for
+ * checking bounds.
+ */
+
+ with_intel_runtime_pm(&i915->runtime_pm, wakeref) {
+ ret = slpc_set_param(slpc,
+ SLPC_PARAM_GLOBAL_MIN_GT_UNSLICE_FREQ_MHZ,
+ freq);
+ if (ret)
+ drm_err(&i915->drm, "Unable to force min freq to %u: %d",
+ freq, ret);
+ }
+
+ return ret;
+}
+
+static void slpc_boost_work(struct work_struct *work)
+{
+ struct intel_guc_slpc *slpc = container_of(work, typeof(*slpc), boost_work);
+
+ /*
+ * Raise min freq to boost. It's possible that
+ * this is greater than current max. But it will
+ * certainly be limited by RP0. An error setting
+ * the min param is not fatal.
+ */
+ mutex_lock(&slpc->lock);
+ if (atomic_read(&slpc->num_waiters)) {
+ slpc_force_min_freq(slpc, slpc->boost_freq);
+ slpc->num_boosts++;
+ }
+ mutex_unlock(&slpc->lock);
+}
+
+int intel_guc_slpc_init(struct intel_guc_slpc *slpc)
+{
+ struct intel_guc *guc = slpc_to_guc(slpc);
+ struct drm_i915_private *i915 = slpc_to_i915(slpc);
+ u32 size = PAGE_ALIGN(sizeof(struct slpc_shared_data));
+ int err;
+
+ GEM_BUG_ON(slpc->vma);
+
+ err = intel_guc_allocate_and_map_vma(guc, size, &slpc->vma, (void **)&slpc->vaddr);
+ if (unlikely(err)) {
+ drm_err(&i915->drm,
+ "Failed to allocate SLPC struct (err=%pe)\n",
+ ERR_PTR(err));
+ return err;
+ }
+
+ slpc->max_freq_softlimit = 0;
+ slpc->min_freq_softlimit = 0;
+
+ slpc->boost_freq = 0;
+ atomic_set(&slpc->num_waiters, 0);
+ slpc->num_boosts = 0;
+
+ mutex_init(&slpc->lock);
+ INIT_WORK(&slpc->boost_work, slpc_boost_work);
+
+ return err;
+}
+
static const char *slpc_global_state_to_string(enum slpc_global_state state)
{
switch (state) {
@@ -393,7 +450,11 @@ int intel_guc_slpc_set_min_freq(struct intel_guc_slpc *slpc, u32 val)
val > slpc->max_freq_softlimit)
return -EINVAL;
+ /* Need a lock now since waitboost can be modifying min as well */
+ mutex_lock(&slpc->lock);
+
with_intel_runtime_pm(&i915->runtime_pm, wakeref) {
+
ret = slpc_set_param(slpc,
SLPC_PARAM_GLOBAL_MIN_GT_UNSLICE_FREQ_MHZ,
val);
@@ -406,6 +467,8 @@ int intel_guc_slpc_set_min_freq(struct intel_guc_slpc *slpc, u32 val)
if (!ret)
slpc->min_freq_softlimit = val;
+ mutex_unlock(&slpc->lock);
+
return ret;
}
@@ -522,6 +585,9 @@ static void slpc_get_rp_values(struct intel_guc_slpc *slpc)
GT_FREQUENCY_MULTIPLIER;
slpc->min_freq = REG_FIELD_GET(RPN_CAP_MASK, rp_state_cap) *
GT_FREQUENCY_MULTIPLIER;
+
+ if (!slpc->boost_freq)
+ slpc->boost_freq = slpc->rp0_freq;
}
/*
@@ -557,7 +623,7 @@ int intel_guc_slpc_enable(struct intel_guc_slpc *slpc)
if (unlikely(ret < 0))
return ret;
- intel_guc_pm_intrmsk_enable(&i915->gt);
+ intel_guc_pm_intrmsk_enable(to_gt(i915));
slpc_get_rp_values(slpc);
@@ -588,6 +654,47 @@ int intel_guc_slpc_enable(struct intel_guc_slpc *slpc)
return 0;
}
+int intel_guc_slpc_set_boost_freq(struct intel_guc_slpc *slpc, u32 val)
+{
+ int ret = 0;
+
+ if (val < slpc->min_freq || val > slpc->rp0_freq)
+ return -EINVAL;
+
+ mutex_lock(&slpc->lock);
+
+ if (slpc->boost_freq != val) {
+ /* Apply only if there are active waiters */
+ if (atomic_read(&slpc->num_waiters)) {
+ ret = slpc_force_min_freq(slpc, val);
+ if (ret) {
+ ret = -EIO;
+ goto done;
+ }
+ }
+
+ slpc->boost_freq = val;
+ }
+
+done:
+ mutex_unlock(&slpc->lock);
+ return ret;
+}
+
+void intel_guc_slpc_dec_waiters(struct intel_guc_slpc *slpc)
+{
+ /*
+ * Return min back to the softlimit.
+ * This is called during request retire,
+ * so we don't need to fail that if the
+ * set_param fails.
+ */
+ mutex_lock(&slpc->lock);
+ if (atomic_dec_and_test(&slpc->num_waiters))
+ slpc_force_min_freq(slpc, slpc->min_freq_softlimit);
+ mutex_unlock(&slpc->lock);
+}
+
int intel_guc_slpc_print_info(struct intel_guc_slpc *slpc, struct drm_printer *p)
{
struct drm_i915_private *i915 = slpc_to_i915(slpc);
@@ -611,6 +718,8 @@ int intel_guc_slpc_print_info(struct intel_guc_slpc *slpc, struct drm_printer *p
slpc_decode_max_freq(slpc));
drm_printf(p, "\tMin freq: %u MHz\n",
slpc_decode_min_freq(slpc));
+ drm_printf(p, "\twaitboosts: %u\n",
+ slpc->num_boosts);
}
}
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.h b/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.h
index e45054d5b9b4..0caa8fee3c04 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.h
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.h
@@ -34,9 +34,12 @@ int intel_guc_slpc_enable(struct intel_guc_slpc *slpc);
void intel_guc_slpc_fini(struct intel_guc_slpc *slpc);
int intel_guc_slpc_set_max_freq(struct intel_guc_slpc *slpc, u32 val);
int intel_guc_slpc_set_min_freq(struct intel_guc_slpc *slpc, u32 val);
+int intel_guc_slpc_set_boost_freq(struct intel_guc_slpc *slpc, u32 val);
int intel_guc_slpc_get_max_freq(struct intel_guc_slpc *slpc, u32 *val);
int intel_guc_slpc_get_min_freq(struct intel_guc_slpc *slpc, u32 *val);
int intel_guc_slpc_print_info(struct intel_guc_slpc *slpc, struct drm_printer *p);
void intel_guc_pm_intrmsk_enable(struct intel_gt *gt);
+void intel_guc_slpc_boost(struct intel_guc_slpc *slpc);
+void intel_guc_slpc_dec_waiters(struct intel_guc_slpc *slpc);
#endif
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc_types.h b/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc_types.h
index 41d13527666f..bf5b9a563c09 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc_types.h
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc_types.h
@@ -6,6 +6,9 @@
#ifndef _INTEL_GUC_SLPC_TYPES_H_
#define _INTEL_GUC_SLPC_TYPES_H_
+#include <linux/atomic.h>
+#include <linux/workqueue.h>
+#include <linux/mutex.h>
#include <linux/types.h>
#define SLPC_RESET_TIMEOUT_MS 5
@@ -20,10 +23,20 @@ struct intel_guc_slpc {
u32 min_freq;
u32 rp0_freq;
u32 rp1_freq;
+ u32 boost_freq;
/* frequency softlimits */
u32 min_freq_softlimit;
u32 max_freq_softlimit;
+
+ /* Protects set/reset of boost freq
+ * and value of num_waiters
+ */
+ struct mutex lock;
+
+ struct work_struct boost_work;
+ atomic_t num_waiters;
+ u32 num_boosts;
};
#endif
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
index c48557dfa04c..e7517206af82 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
@@ -13,6 +13,7 @@
#include "gt/intel_engine_heartbeat.h"
#include "gt/intel_gpu_commands.h"
#include "gt/intel_gt.h"
+#include "gt/intel_gt_clock_utils.h"
#include "gt/intel_gt_irq.h"
#include "gt/intel_gt_pm.h"
#include "gt/intel_gt_requests.h"
@@ -21,6 +22,7 @@
#include "gt/intel_mocs.h"
#include "gt/intel_ring.h"
+#include "intel_guc_ads.h"
#include "intel_guc_submission.h"
#include "i915_drv.h"
@@ -143,7 +145,8 @@ guc_create_parallel(struct intel_engine_cs **engines,
* use should be low and 1/16 should be sufficient. Minimum of 32 guc_ids for
* multi-lrc.
*/
-#define NUMBER_MULTI_LRC_GUC_ID (GUC_MAX_LRC_DESCRIPTORS / 16)
+#define NUMBER_MULTI_LRC_GUC_ID(guc) \
+ ((guc)->submission_state.num_guc_ids / 16)
/*
* Below is a set of functions which control the GuC scheduling state which
@@ -1038,8 +1041,6 @@ static void scrub_guc_desc_for_outstanding_g2h(struct intel_guc *guc)
spin_unlock(&ce->guc_state.lock);
- GEM_BUG_ON(!do_put && !destroyed);
-
if (pending_enable || destroyed || deregister) {
decr_outstanding_submission_g2h(guc);
if (deregister)
@@ -1077,6 +1078,271 @@ static void scrub_guc_desc_for_outstanding_g2h(struct intel_guc *guc)
xa_unlock_irqrestore(&guc->context_lookup, flags);
}
+/*
+ * GuC stores busyness stats for each engine at context in/out boundaries. A
+ * context 'in' logs execution start time, 'out' adds in -> out delta to total.
+ * i915/kmd accesses 'start', 'total' and 'context id' from memory shared with
+ * GuC.
+ *
+ * __i915_pmu_event_read samples engine busyness. When sampling, if context id
+ * is valid (!= ~0) and start is non-zero, the engine is considered to be
+ * active. For an active engine total busyness = total + (now - start), where
+ * 'now' is the time at which the busyness is sampled. For inactive engine,
+ * total busyness = total.
+ *
+ * All times are captured from GUCPMTIMESTAMP reg and are in gt clock domain.
+ *
+ * The start and total values provided by GuC are 32 bits and wrap around in a
+ * few minutes. Since perf pmu provides busyness as 64 bit monotonically
+ * increasing ns values, there is a need for this implementation to account for
+ * overflows and extend the GuC provided values to 64 bits before returning
+ * busyness to the user. In order to do that, a worker runs periodically at
+ * frequency = 1/8th the time it takes for the timestamp to wrap (i.e. once in
+ * 27 seconds for a gt clock frequency of 19.2 MHz).
+ */
+
+#define WRAP_TIME_CLKS U32_MAX
+#define POLL_TIME_CLKS (WRAP_TIME_CLKS >> 3)
+
+static void
+__extend_last_switch(struct intel_guc *guc, u64 *prev_start, u32 new_start)
+{
+ u32 gt_stamp_hi = upper_32_bits(guc->timestamp.gt_stamp);
+ u32 gt_stamp_last = lower_32_bits(guc->timestamp.gt_stamp);
+
+ if (new_start == lower_32_bits(*prev_start))
+ return;
+
+ if (new_start < gt_stamp_last &&
+ (new_start - gt_stamp_last) <= POLL_TIME_CLKS)
+ gt_stamp_hi++;
+
+ if (new_start > gt_stamp_last &&
+ (gt_stamp_last - new_start) <= POLL_TIME_CLKS && gt_stamp_hi)
+ gt_stamp_hi--;
+
+ *prev_start = ((u64)gt_stamp_hi << 32) | new_start;
+}
+
+static void guc_update_engine_gt_clks(struct intel_engine_cs *engine)
+{
+ struct guc_engine_usage_record *rec = intel_guc_engine_usage(engine);
+ struct intel_engine_guc_stats *stats = &engine->stats.guc;
+ struct intel_guc *guc = &engine->gt->uc.guc;
+ u32 last_switch = rec->last_switch_in_stamp;
+ u32 ctx_id = rec->current_context_index;
+ u32 total = rec->total_runtime;
+
+ lockdep_assert_held(&guc->timestamp.lock);
+
+ stats->running = ctx_id != ~0U && last_switch;
+ if (stats->running)
+ __extend_last_switch(guc, &stats->start_gt_clk, last_switch);
+
+ /*
+ * Instead of adjusting the total for overflow, just add the
+ * difference from previous sample stats->total_gt_clks
+ */
+ if (total && total != ~0U) {
+ stats->total_gt_clks += (u32)(total - stats->prev_total);
+ stats->prev_total = total;
+ }
+}
+
+static void guc_update_pm_timestamp(struct intel_guc *guc,
+ struct intel_engine_cs *engine,
+ ktime_t *now)
+{
+ u32 gt_stamp_now, gt_stamp_hi;
+
+ lockdep_assert_held(&guc->timestamp.lock);
+
+ gt_stamp_hi = upper_32_bits(guc->timestamp.gt_stamp);
+ gt_stamp_now = intel_uncore_read(engine->uncore,
+ RING_TIMESTAMP(engine->mmio_base));
+ *now = ktime_get();
+
+ if (gt_stamp_now < lower_32_bits(guc->timestamp.gt_stamp))
+ gt_stamp_hi++;
+
+ guc->timestamp.gt_stamp = ((u64)gt_stamp_hi << 32) | gt_stamp_now;
+}
+
+/*
+ * Unlike the execlist mode of submission total and active times are in terms of
+ * gt clocks. The *now parameter is retained to return the cpu time at which the
+ * busyness was sampled.
+ */
+static ktime_t guc_engine_busyness(struct intel_engine_cs *engine, ktime_t *now)
+{
+ struct intel_engine_guc_stats stats_saved, *stats = &engine->stats.guc;
+ struct i915_gpu_error *gpu_error = &engine->i915->gpu_error;
+ struct intel_gt *gt = engine->gt;
+ struct intel_guc *guc = &gt->uc.guc;
+ u64 total, gt_stamp_saved;
+ unsigned long flags;
+ u32 reset_count;
+ bool in_reset;
+
+ spin_lock_irqsave(&guc->timestamp.lock, flags);
+
+ /*
+ * If a reset happened, we risk reading partially updated engine
+ * busyness from GuC, so we just use the driver stored copy of busyness.
+ * Synchronize with gt reset using reset_count and the
+ * I915_RESET_BACKOFF flag. Note that reset flow updates the reset_count
+ * after I915_RESET_BACKOFF flag, so ensure that the reset_count is
+ * usable by checking the flag afterwards.
+ */
+ reset_count = i915_reset_count(gpu_error);
+ in_reset = test_bit(I915_RESET_BACKOFF, &gt->reset.flags);
+
+ *now = ktime_get();
+
+ /*
+ * The active busyness depends on start_gt_clk and gt_stamp.
+ * gt_stamp is updated by i915 only when gt is awake and the
+ * start_gt_clk is derived from GuC state. To get a consistent
+ * view of activity, we query the GuC state only if gt is awake.
+ */
+ if (!in_reset && intel_gt_pm_get_if_awake(gt)) {
+ stats_saved = *stats;
+ gt_stamp_saved = guc->timestamp.gt_stamp;
+ guc_update_engine_gt_clks(engine);
+ guc_update_pm_timestamp(guc, engine, now);
+ intel_gt_pm_put_async(gt);
+ if (i915_reset_count(gpu_error) != reset_count) {
+ *stats = stats_saved;
+ guc->timestamp.gt_stamp = gt_stamp_saved;
+ }
+ }
+
+ total = intel_gt_clock_interval_to_ns(gt, stats->total_gt_clks);
+ if (stats->running) {
+ u64 clk = guc->timestamp.gt_stamp - stats->start_gt_clk;
+
+ total += intel_gt_clock_interval_to_ns(gt, clk);
+ }
+
+ spin_unlock_irqrestore(&guc->timestamp.lock, flags);
+
+ return ns_to_ktime(total);
+}
+
+static void __reset_guc_busyness_stats(struct intel_guc *guc)
+{
+ struct intel_gt *gt = guc_to_gt(guc);
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+ unsigned long flags;
+ ktime_t unused;
+
+ cancel_delayed_work_sync(&guc->timestamp.work);
+
+ spin_lock_irqsave(&guc->timestamp.lock, flags);
+
+ for_each_engine(engine, gt, id) {
+ guc_update_pm_timestamp(guc, engine, &unused);
+ guc_update_engine_gt_clks(engine);
+ engine->stats.guc.prev_total = 0;
+ }
+
+ spin_unlock_irqrestore(&guc->timestamp.lock, flags);
+}
+
+static void __update_guc_busyness_stats(struct intel_guc *guc)
+{
+ struct intel_gt *gt = guc_to_gt(guc);
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+ unsigned long flags;
+ ktime_t unused;
+
+ spin_lock_irqsave(&guc->timestamp.lock, flags);
+ for_each_engine(engine, gt, id) {
+ guc_update_pm_timestamp(guc, engine, &unused);
+ guc_update_engine_gt_clks(engine);
+ }
+ spin_unlock_irqrestore(&guc->timestamp.lock, flags);
+}
+
+static void guc_timestamp_ping(struct work_struct *wrk)
+{
+ struct intel_guc *guc = container_of(wrk, typeof(*guc),
+ timestamp.work.work);
+ struct intel_uc *uc = container_of(guc, typeof(*uc), guc);
+ struct intel_gt *gt = guc_to_gt(guc);
+ intel_wakeref_t wakeref;
+ int srcu, ret;
+
+ /*
+ * Synchronize with gt reset to make sure the worker does not
+ * corrupt the engine/guc stats.
+ */
+ ret = intel_gt_reset_trylock(gt, &srcu);
+ if (ret)
+ return;
+
+ with_intel_runtime_pm(&gt->i915->runtime_pm, wakeref)
+ __update_guc_busyness_stats(guc);
+
+ intel_gt_reset_unlock(gt, srcu);
+
+ mod_delayed_work(system_highpri_wq, &guc->timestamp.work,
+ guc->timestamp.ping_delay);
+}
+
+static int guc_action_enable_usage_stats(struct intel_guc *guc)
+{
+ u32 offset = intel_guc_engine_usage_offset(guc);
+ u32 action[] = {
+ INTEL_GUC_ACTION_SET_ENG_UTIL_BUFF,
+ offset,
+ 0,
+ };
+
+ return intel_guc_send(guc, action, ARRAY_SIZE(action));
+}
+
+static void guc_init_engine_stats(struct intel_guc *guc)
+{
+ struct intel_gt *gt = guc_to_gt(guc);
+ intel_wakeref_t wakeref;
+
+ mod_delayed_work(system_highpri_wq, &guc->timestamp.work,
+ guc->timestamp.ping_delay);
+
+ with_intel_runtime_pm(&gt->i915->runtime_pm, wakeref) {
+ int ret = guc_action_enable_usage_stats(guc);
+
+ if (ret)
+ drm_err(&gt->i915->drm,
+ "Failed to enable usage stats: %d!\n", ret);
+ }
+}
+
+void intel_guc_busyness_park(struct intel_gt *gt)
+{
+ struct intel_guc *guc = &gt->uc.guc;
+
+ if (!guc_submission_initialized(guc))
+ return;
+
+ cancel_delayed_work(&guc->timestamp.work);
+ __update_guc_busyness_stats(guc);
+}
+
+void intel_guc_busyness_unpark(struct intel_gt *gt)
+{
+ struct intel_guc *guc = &gt->uc.guc;
+
+ if (!guc_submission_initialized(guc))
+ return;
+
+ mod_delayed_work(system_highpri_wq, &guc->timestamp.work,
+ guc->timestamp.ping_delay);
+}
+
static inline bool
submission_disabled(struct intel_guc *guc)
{
@@ -1138,6 +1404,7 @@ void intel_guc_submission_reset_prepare(struct intel_guc *guc)
intel_gt_park_heartbeats(guc_to_gt(guc));
disable_submission(guc);
guc->interrupts.disable(guc);
+ __reset_guc_busyness_stats(guc);
/* Flush IRQ handler */
spin_lock_irq(&guc_to_gt(guc)->irq_lock);
@@ -1484,6 +1751,7 @@ static void destroyed_worker_func(struct work_struct *w);
*/
int intel_guc_submission_init(struct intel_guc *guc)
{
+ struct intel_gt *gt = guc_to_gt(guc);
int ret;
if (guc->lrc_desc_pool)
@@ -1508,10 +1776,14 @@ int intel_guc_submission_init(struct intel_guc *guc)
destroyed_worker_func);
guc->submission_state.guc_ids_bitmap =
- bitmap_zalloc(NUMBER_MULTI_LRC_GUC_ID, GFP_KERNEL);
+ bitmap_zalloc(NUMBER_MULTI_LRC_GUC_ID(guc), GFP_KERNEL);
if (!guc->submission_state.guc_ids_bitmap)
return -ENOMEM;
+ spin_lock_init(&guc->timestamp.lock);
+ INIT_DELAYED_WORK(&guc->timestamp.work, guc_timestamp_ping);
+ guc->timestamp.ping_delay = (POLL_TIME_CLKS / gt->clock_frequency + 1) * HZ;
+
return 0;
}
@@ -1598,13 +1870,13 @@ static int new_guc_id(struct intel_guc *guc, struct intel_context *ce)
if (intel_context_is_parent(ce))
ret = bitmap_find_free_region(guc->submission_state.guc_ids_bitmap,
- NUMBER_MULTI_LRC_GUC_ID,
+ NUMBER_MULTI_LRC_GUC_ID(guc),
order_base_2(ce->parallel.number_children
+ 1));
else
ret = ida_simple_get(&guc->submission_state.guc_ids,
- NUMBER_MULTI_LRC_GUC_ID,
- GUC_MAX_LRC_DESCRIPTORS,
+ NUMBER_MULTI_LRC_GUC_ID(guc),
+ guc->submission_state.num_guc_ids,
GFP_KERNEL | __GFP_RETRY_MAYFAIL |
__GFP_NOWARN);
if (unlikely(ret < 0))
@@ -1662,14 +1934,18 @@ static int steal_guc_id(struct intel_guc *guc, struct intel_context *ce)
GEM_BUG_ON(intel_context_is_parent(cn));
list_del_init(&cn->guc_id.link);
- ce->guc_id = cn->guc_id;
+ ce->guc_id.id = cn->guc_id.id;
- spin_lock(&ce->guc_state.lock);
+ spin_lock(&cn->guc_state.lock);
clr_context_registered(cn);
- spin_unlock(&ce->guc_state.lock);
+ spin_unlock(&cn->guc_state.lock);
set_context_guc_id_invalid(cn);
+#ifdef CONFIG_DRM_I915_SELFTEST
+ guc->number_guc_id_stolen++;
+#endif
+
return 0;
} else {
return -EAGAIN;
@@ -2373,7 +2649,6 @@ static inline void guc_lrc_desc_unpin(struct intel_context *ce)
unsigned long flags;
bool disabled;
- lockdep_assert_held(&guc->submission_state.lock);
GEM_BUG_ON(!intel_gt_pm_is_awake(gt));
GEM_BUG_ON(!lrc_desc_registered(guc, ce->guc_id.id));
GEM_BUG_ON(ce != __get_context(guc, ce->guc_id.id));
@@ -2389,7 +2664,7 @@ static inline void guc_lrc_desc_unpin(struct intel_context *ce)
}
spin_unlock_irqrestore(&ce->guc_state.lock, flags);
if (unlikely(disabled)) {
- __release_guc_id(guc, ce);
+ release_guc_id(guc, ce);
__guc_context_destroy(ce);
return;
}
@@ -2423,36 +2698,48 @@ static void __guc_context_destroy(struct intel_context *ce)
static void guc_flush_destroyed_contexts(struct intel_guc *guc)
{
- struct intel_context *ce, *cn;
+ struct intel_context *ce;
unsigned long flags;
GEM_BUG_ON(!submission_disabled(guc) &&
guc_submission_initialized(guc));
- spin_lock_irqsave(&guc->submission_state.lock, flags);
- list_for_each_entry_safe(ce, cn,
- &guc->submission_state.destroyed_contexts,
- destroyed_link) {
- list_del_init(&ce->destroyed_link);
- __release_guc_id(guc, ce);
+ while (!list_empty(&guc->submission_state.destroyed_contexts)) {
+ spin_lock_irqsave(&guc->submission_state.lock, flags);
+ ce = list_first_entry_or_null(&guc->submission_state.destroyed_contexts,
+ struct intel_context,
+ destroyed_link);
+ if (ce)
+ list_del_init(&ce->destroyed_link);
+ spin_unlock_irqrestore(&guc->submission_state.lock, flags);
+
+ if (!ce)
+ break;
+
+ release_guc_id(guc, ce);
__guc_context_destroy(ce);
}
- spin_unlock_irqrestore(&guc->submission_state.lock, flags);
}
static void deregister_destroyed_contexts(struct intel_guc *guc)
{
- struct intel_context *ce, *cn;
+ struct intel_context *ce;
unsigned long flags;
- spin_lock_irqsave(&guc->submission_state.lock, flags);
- list_for_each_entry_safe(ce, cn,
- &guc->submission_state.destroyed_contexts,
- destroyed_link) {
- list_del_init(&ce->destroyed_link);
+ while (!list_empty(&guc->submission_state.destroyed_contexts)) {
+ spin_lock_irqsave(&guc->submission_state.lock, flags);
+ ce = list_first_entry_or_null(&guc->submission_state.destroyed_contexts,
+ struct intel_context,
+ destroyed_link);
+ if (ce)
+ list_del_init(&ce->destroyed_link);
+ spin_unlock_irqrestore(&guc->submission_state.lock, flags);
+
+ if (!ce)
+ break;
+
guc_lrc_desc_unpin(ce);
}
- spin_unlock_irqrestore(&guc->submission_state.lock, flags);
}
static void destroyed_worker_func(struct work_struct *w)
@@ -3369,7 +3656,9 @@ static void guc_default_vfuncs(struct intel_engine_cs *engine)
engine->emit_flush = gen12_emit_flush_xcs;
}
engine->set_default_submission = guc_set_default_submission;
+ engine->busyness = guc_engine_busyness;
+ engine->flags |= I915_ENGINE_SUPPORTS_STATS;
engine->flags |= I915_ENGINE_HAS_PREEMPTION;
engine->flags |= I915_ENGINE_HAS_TIMESLICES;
@@ -3468,6 +3757,7 @@ int intel_guc_submission_setup(struct intel_engine_cs *engine)
void intel_guc_submission_enable(struct intel_guc *guc)
{
guc_init_lrc_mapping(guc);
+ guc_init_engine_stats(guc);
}
void intel_guc_submission_disable(struct intel_guc *guc)
@@ -3494,6 +3784,7 @@ static bool __guc_submission_selected(struct intel_guc *guc)
void intel_guc_submission_init_early(struct intel_guc *guc)
{
+ guc->submission_state.num_guc_ids = GUC_MAX_LRC_DESCRIPTORS;
guc->submission_supported = __guc_submission_supported(guc);
guc->submission_selected = __guc_submission_selected(guc);
}
@@ -3695,6 +3986,7 @@ int intel_guc_context_reset_process_msg(struct intel_guc *guc,
const u32 *msg, u32 len)
{
struct intel_context *ce;
+ unsigned long flags;
int desc_idx;
if (unlikely(len != 1)) {
@@ -3703,11 +3995,24 @@ int intel_guc_context_reset_process_msg(struct intel_guc *guc,
}
desc_idx = msg[0];
+
+ /*
+ * The context lookup uses the xarray but lookups only require an RCU lock
+ * not the full spinlock. So take the lock explicitly and keep it until the
+ * context has been reference count locked to ensure it can't be destroyed
+ * asynchronously until the reset is done.
+ */
+ xa_lock_irqsave(&guc->context_lookup, flags);
ce = g2h_context_lookup(guc, desc_idx);
+ if (ce)
+ intel_context_get(ce);
+ xa_unlock_irqrestore(&guc->context_lookup, flags);
+
if (unlikely(!ce))
return -EPROTO;
guc_handle_context_reset(guc, ce);
+ intel_context_put(ce);
return 0;
}
@@ -3728,11 +4033,12 @@ int intel_guc_engine_failure_process_msg(struct intel_guc *guc,
const u32 *msg, u32 len)
{
struct intel_engine_cs *engine;
+ struct intel_gt *gt = guc_to_gt(guc);
u8 guc_class, instance;
u32 reason;
if (unlikely(len != 3)) {
- drm_err(&guc_to_gt(guc)->i915->drm, "Invalid length %u", len);
+ drm_err(&gt->i915->drm, "Invalid length %u", len);
return -EPROTO;
}
@@ -3742,12 +4048,19 @@ int intel_guc_engine_failure_process_msg(struct intel_guc *guc,
engine = guc_lookup_engine(guc, guc_class, instance);
if (unlikely(!engine)) {
- drm_err(&guc_to_gt(guc)->i915->drm,
+ drm_err(&gt->i915->drm,
"Invalid engine %d:%d", guc_class, instance);
return -EPROTO;
}
- intel_gt_handle_error(guc_to_gt(guc), engine->mask,
+ /*
+ * This is an unexpected failure of a hardware feature. So, log a real
+ * error message not just the informational that comes with the reset.
+ */
+ drm_err(&gt->i915->drm, "GuC engine reset request failed on %d:%d (%s) because 0x%08X",
+ guc_class, instance, engine->name, reason);
+
+ intel_gt_handle_error(gt, engine->mask,
I915_ERROR_CAPTURE,
"GuC failed to reset %s (reason=0x%08x)\n",
engine->name, reason);
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.h b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.h
index c7ef44fa0c36..5a95a9f0a8e3 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.h
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.h
@@ -28,6 +28,8 @@ void intel_guc_submission_print_context_info(struct intel_guc *guc,
void intel_guc_dump_active_requests(struct intel_engine_cs *engine,
struct i915_request *hung_rq,
struct drm_printer *m);
+void intel_guc_busyness_park(struct intel_gt *gt);
+void intel_guc_busyness_unpark(struct intel_gt *gt);
bool intel_guc_virtual_engine_has_heartbeat(const struct intel_engine_cs *ve);
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_huc.c b/drivers/gpu/drm/i915/gt/uc/intel_huc.c
index ff4b6869b80b..d10b227ac4aa 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_huc.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_huc.c
@@ -54,65 +54,6 @@ void intel_huc_init_early(struct intel_huc *huc)
}
}
-static int intel_huc_rsa_data_create(struct intel_huc *huc)
-{
- struct intel_gt *gt = huc_to_gt(huc);
- struct intel_guc *guc = &gt->uc.guc;
- struct i915_vma *vma;
- size_t copied;
- void *vaddr;
- int err;
-
- err = i915_inject_probe_error(gt->i915, -ENXIO);
- if (err)
- return err;
-
- /*
- * HuC firmware will sit above GUC_GGTT_TOP and will not map
- * through GTT. Unfortunately, this means GuC cannot perform
- * the HuC auth. as the rsa offset now falls within the GuC
- * inaccessible range. We resort to perma-pinning an additional
- * vma within the accessible range that only contains the rsa
- * signature. The GuC can use this extra pinning to perform
- * the authentication since its GGTT offset will be GuC
- * accessible.
- */
- GEM_BUG_ON(huc->fw.rsa_size > PAGE_SIZE);
- vma = intel_guc_allocate_vma(guc, PAGE_SIZE);
- if (IS_ERR(vma))
- return PTR_ERR(vma);
-
- vaddr = i915_gem_object_pin_map_unlocked(vma->obj,
- i915_coherent_map_type(gt->i915,
- vma->obj, true));
- if (IS_ERR(vaddr)) {
- i915_vma_unpin_and_release(&vma, 0);
- err = PTR_ERR(vaddr);
- goto unpin_out;
- }
-
- copied = intel_uc_fw_copy_rsa(&huc->fw, vaddr, vma->size);
- i915_gem_object_unpin_map(vma->obj);
-
- if (copied < huc->fw.rsa_size) {
- err = -ENOMEM;
- goto unpin_out;
- }
-
- huc->rsa_data = vma;
-
- return 0;
-
-unpin_out:
- i915_vma_unpin_and_release(&vma, 0);
- return err;
-}
-
-static void intel_huc_rsa_data_destroy(struct intel_huc *huc)
-{
- i915_vma_unpin_and_release(&huc->rsa_data, 0);
-}
-
int intel_huc_init(struct intel_huc *huc)
{
struct drm_i915_private *i915 = huc_to_gt(huc)->i915;
@@ -122,21 +63,10 @@ int intel_huc_init(struct intel_huc *huc)
if (err)
goto out;
- /*
- * HuC firmware image is outside GuC accessible range.
- * Copy the RSA signature out of the image into
- * a perma-pinned region set aside for it
- */
- err = intel_huc_rsa_data_create(huc);
- if (err)
- goto out_fini;
-
intel_uc_fw_change_status(&huc->fw, INTEL_UC_FIRMWARE_LOADABLE);
return 0;
-out_fini:
- intel_uc_fw_fini(&huc->fw);
out:
i915_probe_error(i915, "failed with %d\n", err);
return err;
@@ -147,7 +77,6 @@ void intel_huc_fini(struct intel_huc *huc)
if (!intel_uc_fw_is_loadable(&huc->fw))
return;
- intel_huc_rsa_data_destroy(huc);
intel_uc_fw_fini(&huc->fw);
}
@@ -177,7 +106,7 @@ int intel_huc_auth(struct intel_huc *huc)
goto fail;
ret = intel_guc_auth_huc(guc,
- intel_guc_ggtt_offset(guc, huc->rsa_data));
+ intel_guc_ggtt_offset(guc, huc->fw.rsa_data));
if (ret) {
DRM_ERROR("HuC: GuC did not ack Auth request %d\n", ret);
goto fail;
@@ -199,7 +128,7 @@ int intel_huc_auth(struct intel_huc *huc)
fail:
i915_probe_error(gt->i915, "HuC: Authentication failed %d\n", ret);
- intel_uc_fw_change_status(&huc->fw, INTEL_UC_FIRMWARE_FAIL);
+ intel_uc_fw_change_status(&huc->fw, INTEL_UC_FIRMWARE_LOAD_FAIL);
return ret;
}
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_huc.h b/drivers/gpu/drm/i915/gt/uc/intel_huc.h
index daee43b661d4..ae8c8a6c8cc8 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_huc.h
+++ b/drivers/gpu/drm/i915/gt/uc/intel_huc.h
@@ -15,8 +15,6 @@ struct intel_huc {
struct intel_uc_fw fw;
/* HuC-specific additions */
- struct i915_vma *rsa_data;
-
struct {
i915_reg_t reg;
u32 mask;
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc.c b/drivers/gpu/drm/i915/gt/uc/intel_uc.c
index 8f17005ce85f..09ed29df67bc 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_uc.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_uc.c
@@ -8,6 +8,7 @@
#include "intel_guc.h"
#include "intel_guc_ads.h"
#include "intel_guc_submission.h"
+#include "gt/intel_rps.h"
#include "intel_uc.h"
#include "i915_drv.h"
@@ -462,6 +463,8 @@ static int __uc_init_hw(struct intel_uc *uc)
else
attempts = 1;
+ intel_rps_raise_unslice(&uc_to_gt(uc)->rps);
+
while (attempts--) {
/*
* Always reset the GuC just before (re)loading, so
@@ -499,6 +502,9 @@ static int __uc_init_hw(struct intel_uc *uc)
ret = intel_guc_slpc_enable(&guc->slpc);
if (ret)
goto err_submission;
+ } else {
+ /* Restore GT back to RPn for non-SLPC path */
+ intel_rps_lower_unslice(&uc_to_gt(uc)->rps);
}
drm_info(&i915->drm, "%s firmware %s version %u.%u %s:%s\n",
@@ -529,6 +535,9 @@ err_submission:
err_log_capture:
__uc_capture_load_err_log(uc);
err_out:
+ /* Return GT back to RPn */
+ intel_rps_lower_unslice(&uc_to_gt(uc)->rps);
+
__uc_sanitize(uc);
if (!ret) {
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c
index 3aa87be4f2e4..a5af05bde6f2 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c
@@ -48,22 +48,39 @@ void intel_uc_fw_change_status(struct intel_uc_fw *uc_fw,
* Note that RKL and ADL-S have the same GuC/HuC device ID's and use the same
* firmware as TGL.
*/
-#define INTEL_UC_FIRMWARE_DEFS(fw_def, guc_def, huc_def) \
- fw_def(ALDERLAKE_P, 0, guc_def(adlp, 62, 0, 3), huc_def(tgl, 7, 9, 3)) \
- fw_def(ALDERLAKE_S, 0, guc_def(tgl, 62, 0, 0), huc_def(tgl, 7, 9, 3)) \
- fw_def(DG1, 0, guc_def(dg1, 62, 0, 0), huc_def(dg1, 7, 9, 3)) \
- fw_def(ROCKETLAKE, 0, guc_def(tgl, 62, 0, 0), huc_def(tgl, 7, 9, 3)) \
- fw_def(TIGERLAKE, 0, guc_def(tgl, 62, 0, 0), huc_def(tgl, 7, 9, 3)) \
- fw_def(JASPERLAKE, 0, guc_def(ehl, 62, 0, 0), huc_def(ehl, 9, 0, 0)) \
- fw_def(ELKHARTLAKE, 0, guc_def(ehl, 62, 0, 0), huc_def(ehl, 9, 0, 0)) \
- fw_def(ICELAKE, 0, guc_def(icl, 62, 0, 0), huc_def(icl, 9, 0, 0)) \
- fw_def(COMETLAKE, 5, guc_def(cml, 62, 0, 0), huc_def(cml, 4, 0, 0)) \
- fw_def(COMETLAKE, 0, guc_def(kbl, 62, 0, 0), huc_def(kbl, 4, 0, 0)) \
- fw_def(COFFEELAKE, 0, guc_def(kbl, 62, 0, 0), huc_def(kbl, 4, 0, 0)) \
- fw_def(GEMINILAKE, 0, guc_def(glk, 62, 0, 0), huc_def(glk, 4, 0, 0)) \
- fw_def(KABYLAKE, 0, guc_def(kbl, 62, 0, 0), huc_def(kbl, 4, 0, 0)) \
- fw_def(BROXTON, 0, guc_def(bxt, 62, 0, 0), huc_def(bxt, 2, 0, 0)) \
- fw_def(SKYLAKE, 0, guc_def(skl, 62, 0, 0), huc_def(skl, 2, 0, 0))
+#define INTEL_GUC_FIRMWARE_DEFS(fw_def, guc_def) \
+ fw_def(ALDERLAKE_P, 0, guc_def(adlp, 62, 0, 3)) \
+ fw_def(ALDERLAKE_S, 0, guc_def(tgl, 62, 0, 0)) \
+ fw_def(DG1, 0, guc_def(dg1, 62, 0, 0)) \
+ fw_def(ROCKETLAKE, 0, guc_def(tgl, 62, 0, 0)) \
+ fw_def(TIGERLAKE, 0, guc_def(tgl, 62, 0, 0)) \
+ fw_def(JASPERLAKE, 0, guc_def(ehl, 62, 0, 0)) \
+ fw_def(ELKHARTLAKE, 0, guc_def(ehl, 62, 0, 0)) \
+ fw_def(ICELAKE, 0, guc_def(icl, 62, 0, 0)) \
+ fw_def(COMETLAKE, 5, guc_def(cml, 62, 0, 0)) \
+ fw_def(COMETLAKE, 0, guc_def(kbl, 62, 0, 0)) \
+ fw_def(COFFEELAKE, 0, guc_def(kbl, 62, 0, 0)) \
+ fw_def(GEMINILAKE, 0, guc_def(glk, 62, 0, 0)) \
+ fw_def(KABYLAKE, 0, guc_def(kbl, 62, 0, 0)) \
+ fw_def(BROXTON, 0, guc_def(bxt, 62, 0, 0)) \
+ fw_def(SKYLAKE, 0, guc_def(skl, 62, 0, 0))
+
+#define INTEL_HUC_FIRMWARE_DEFS(fw_def, huc_def) \
+ fw_def(ALDERLAKE_P, 0, huc_def(tgl, 7, 9, 3)) \
+ fw_def(ALDERLAKE_S, 0, huc_def(tgl, 7, 9, 3)) \
+ fw_def(DG1, 0, huc_def(dg1, 7, 9, 3)) \
+ fw_def(ROCKETLAKE, 0, huc_def(tgl, 7, 9, 3)) \
+ fw_def(TIGERLAKE, 0, huc_def(tgl, 7, 9, 3)) \
+ fw_def(JASPERLAKE, 0, huc_def(ehl, 9, 0, 0)) \
+ fw_def(ELKHARTLAKE, 0, huc_def(ehl, 9, 0, 0)) \
+ fw_def(ICELAKE, 0, huc_def(icl, 9, 0, 0)) \
+ fw_def(COMETLAKE, 5, huc_def(cml, 4, 0, 0)) \
+ fw_def(COMETLAKE, 0, huc_def(kbl, 4, 0, 0)) \
+ fw_def(COFFEELAKE, 0, huc_def(kbl, 4, 0, 0)) \
+ fw_def(GEMINILAKE, 0, huc_def(glk, 4, 0, 0)) \
+ fw_def(KABYLAKE, 0, huc_def(kbl, 4, 0, 0)) \
+ fw_def(BROXTON, 0, huc_def(bxt, 2, 0, 0)) \
+ fw_def(SKYLAKE, 0, huc_def(skl, 2, 0, 0))
#define __MAKE_UC_FW_PATH(prefix_, name_, major_, minor_, patch_) \
"i915/" \
@@ -79,11 +96,11 @@ void intel_uc_fw_change_status(struct intel_uc_fw *uc_fw,
__MAKE_UC_FW_PATH(prefix_, "_huc_", major_, minor_, bld_num_)
/* All blobs need to be declared via MODULE_FIRMWARE() */
-#define INTEL_UC_MODULE_FW(platform_, revid_, guc_, huc_) \
- MODULE_FIRMWARE(guc_); \
- MODULE_FIRMWARE(huc_);
+#define INTEL_UC_MODULE_FW(platform_, revid_, uc_) \
+ MODULE_FIRMWARE(uc_);
-INTEL_UC_FIRMWARE_DEFS(INTEL_UC_MODULE_FW, MAKE_GUC_FW_PATH, MAKE_HUC_FW_PATH)
+INTEL_GUC_FIRMWARE_DEFS(INTEL_UC_MODULE_FW, MAKE_GUC_FW_PATH)
+INTEL_HUC_FIRMWARE_DEFS(INTEL_UC_MODULE_FW, MAKE_HUC_FW_PATH)
/* The below structs and macros are used to iterate across the list of blobs */
struct __packed uc_fw_blob {
@@ -106,31 +123,47 @@ struct __packed uc_fw_blob {
struct __packed uc_fw_platform_requirement {
enum intel_platform p;
u8 rev; /* first platform rev using this FW */
- const struct uc_fw_blob blobs[INTEL_UC_FW_NUM_TYPES];
+ const struct uc_fw_blob blob;
};
-#define MAKE_FW_LIST(platform_, revid_, guc_, huc_) \
+#define MAKE_FW_LIST(platform_, revid_, uc_) \
{ \
.p = INTEL_##platform_, \
.rev = revid_, \
- .blobs[INTEL_UC_FW_TYPE_GUC] = guc_, \
- .blobs[INTEL_UC_FW_TYPE_HUC] = huc_, \
+ .blob = uc_, \
},
+struct fw_blobs_by_type {
+ const struct uc_fw_platform_requirement *blobs;
+ u32 count;
+};
+
static void
__uc_fw_auto_select(struct drm_i915_private *i915, struct intel_uc_fw *uc_fw)
{
- static const struct uc_fw_platform_requirement fw_blobs[] = {
- INTEL_UC_FIRMWARE_DEFS(MAKE_FW_LIST, GUC_FW_BLOB, HUC_FW_BLOB)
+ static const struct uc_fw_platform_requirement blobs_guc[] = {
+ INTEL_GUC_FIRMWARE_DEFS(MAKE_FW_LIST, GUC_FW_BLOB)
+ };
+ static const struct uc_fw_platform_requirement blobs_huc[] = {
+ INTEL_HUC_FIRMWARE_DEFS(MAKE_FW_LIST, HUC_FW_BLOB)
};
+ static const struct fw_blobs_by_type blobs_all[INTEL_UC_FW_NUM_TYPES] = {
+ [INTEL_UC_FW_TYPE_GUC] = { blobs_guc, ARRAY_SIZE(blobs_guc) },
+ [INTEL_UC_FW_TYPE_HUC] = { blobs_huc, ARRAY_SIZE(blobs_huc) },
+ };
+ static const struct uc_fw_platform_requirement *fw_blobs;
enum intel_platform p = INTEL_INFO(i915)->platform;
+ u32 fw_count;
u8 rev = INTEL_REVID(i915);
int i;
- for (i = 0; i < ARRAY_SIZE(fw_blobs) && p <= fw_blobs[i].p; i++) {
+ GEM_BUG_ON(uc_fw->type >= ARRAY_SIZE(blobs_all));
+ fw_blobs = blobs_all[uc_fw->type].blobs;
+ fw_count = blobs_all[uc_fw->type].count;
+
+ for (i = 0; i < fw_count && p <= fw_blobs[i].p; i++) {
if (p == fw_blobs[i].p && rev >= fw_blobs[i].rev) {
- const struct uc_fw_blob *blob =
- &fw_blobs[i].blobs[uc_fw->type];
+ const struct uc_fw_blob *blob = &fw_blobs[i].blob;
uc_fw->path = blob->path;
uc_fw->major_ver_wanted = blob->major;
uc_fw->minor_ver_wanted = blob->minor;
@@ -140,7 +173,7 @@ __uc_fw_auto_select(struct drm_i915_private *i915, struct intel_uc_fw *uc_fw)
/* make sure the list is ordered as expected */
if (IS_ENABLED(CONFIG_DRM_I915_SELFTEST)) {
- for (i = 1; i < ARRAY_SIZE(fw_blobs); i++) {
+ for (i = 1; i < fw_count; i++) {
if (fw_blobs[i].p < fw_blobs[i - 1].p)
continue;
@@ -322,13 +355,6 @@ int intel_uc_fw_fetch(struct intel_uc_fw *uc_fw)
uc_fw->ucode_size = (css->size_dw - css->header_size_dw) * sizeof(u32);
/* now RSA */
- if (unlikely(css->key_size_dw != UOS_RSA_SCRATCH_COUNT)) {
- drm_warn(&i915->drm, "%s firmware %s: unexpected key size: %u != %u\n",
- intel_uc_fw_type_repr(uc_fw->type), uc_fw->path,
- css->key_size_dw, UOS_RSA_SCRATCH_COUNT);
- err = -EPROTO;
- goto fail;
- }
uc_fw->rsa_size = css->key_size_dw * sizeof(u32);
/* At least, it should have header, uCode and RSA. Size of all three. */
@@ -540,10 +566,79 @@ fail:
i915_probe_error(gt->i915, "Failed to load %s firmware %s (%d)\n",
intel_uc_fw_type_repr(uc_fw->type), uc_fw->path,
err);
- intel_uc_fw_change_status(uc_fw, INTEL_UC_FIRMWARE_FAIL);
+ intel_uc_fw_change_status(uc_fw, INTEL_UC_FIRMWARE_LOAD_FAIL);
return err;
}
+static inline bool uc_fw_need_rsa_in_memory(struct intel_uc_fw *uc_fw)
+{
+ /*
+ * The HW reads the GuC RSA from memory if the key size is > 256 bytes,
+ * while it reads it from the 64 RSA registers if it is smaller.
+ * The HuC RSA is always read from memory.
+ */
+ return uc_fw->type == INTEL_UC_FW_TYPE_HUC || uc_fw->rsa_size > 256;
+}
+
+static int uc_fw_rsa_data_create(struct intel_uc_fw *uc_fw)
+{
+ struct intel_gt *gt = __uc_fw_to_gt(uc_fw);
+ struct i915_vma *vma;
+ size_t copied;
+ void *vaddr;
+ int err;
+
+ err = i915_inject_probe_error(gt->i915, -ENXIO);
+ if (err)
+ return err;
+
+ if (!uc_fw_need_rsa_in_memory(uc_fw))
+ return 0;
+
+ /*
+ * uC firmwares will sit above GUC_GGTT_TOP and will not map through
+ * GGTT. Unfortunately, this means that the GuC HW cannot perform the uC
+ * authentication from memory, as the RSA offset now falls within the
+ * GuC inaccessible range. We resort to perma-pinning an additional vma
+ * within the accessible range that only contains the RSA signature.
+ * The GuC HW can use this extra pinning to perform the authentication
+ * since its GGTT offset will be GuC accessible.
+ */
+ GEM_BUG_ON(uc_fw->rsa_size > PAGE_SIZE);
+ vma = intel_guc_allocate_vma(&gt->uc.guc, PAGE_SIZE);
+ if (IS_ERR(vma))
+ return PTR_ERR(vma);
+
+ vaddr = i915_gem_object_pin_map_unlocked(vma->obj,
+ i915_coherent_map_type(gt->i915, vma->obj, true));
+ if (IS_ERR(vaddr)) {
+ i915_vma_unpin_and_release(&vma, 0);
+ err = PTR_ERR(vaddr);
+ goto unpin_out;
+ }
+
+ copied = intel_uc_fw_copy_rsa(uc_fw, vaddr, vma->size);
+ i915_gem_object_unpin_map(vma->obj);
+
+ if (copied < uc_fw->rsa_size) {
+ err = -ENOMEM;
+ goto unpin_out;
+ }
+
+ uc_fw->rsa_data = vma;
+
+ return 0;
+
+unpin_out:
+ i915_vma_unpin_and_release(&vma, 0);
+ return err;
+}
+
+static void uc_fw_rsa_data_destroy(struct intel_uc_fw *uc_fw)
+{
+ i915_vma_unpin_and_release(&uc_fw->rsa_data, 0);
+}
+
int intel_uc_fw_init(struct intel_uc_fw *uc_fw)
{
int err;
@@ -558,14 +653,29 @@ int intel_uc_fw_init(struct intel_uc_fw *uc_fw)
if (err) {
DRM_DEBUG_DRIVER("%s fw pin-pages err=%d\n",
intel_uc_fw_type_repr(uc_fw->type), err);
- intel_uc_fw_change_status(uc_fw, INTEL_UC_FIRMWARE_FAIL);
+ goto out;
}
+ err = uc_fw_rsa_data_create(uc_fw);
+ if (err) {
+ DRM_DEBUG_DRIVER("%s fw rsa data creation failed, err=%d\n",
+ intel_uc_fw_type_repr(uc_fw->type), err);
+ goto out_unpin;
+ }
+
+ return 0;
+
+out_unpin:
+ i915_gem_object_unpin_pages(uc_fw->obj);
+out:
+ intel_uc_fw_change_status(uc_fw, INTEL_UC_FIRMWARE_INIT_FAIL);
return err;
}
void intel_uc_fw_fini(struct intel_uc_fw *uc_fw)
{
+ uc_fw_rsa_data_destroy(uc_fw);
+
if (i915_gem_object_has_pinned_pages(uc_fw->obj))
i915_gem_object_unpin_pages(uc_fw->obj);
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.h b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.h
index 1e00bf65639e..d9d1dc0b4cbb 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.h
+++ b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.h
@@ -32,11 +32,12 @@ struct intel_gt;
* | | MISSING <--/ | \--> ERROR |
* | fetch | V |
* | | AVAILABLE |
- * +------------+- | -+
+ * +------------+- | \ -+
+ * | | | \--> INIT FAIL |
* | init | V |
* | | /------> LOADABLE <----<-----------\ |
* +------------+- \ / \ \ \ -+
- * | | FAIL <--< \--> TRANSFERRED \ |
+ * | | LOAD FAIL <--< \--> TRANSFERRED \ |
* | upload | \ / \ / |
* | | \---------/ \--> RUNNING |
* +------------+---------------------------------------------------+
@@ -50,8 +51,9 @@ enum intel_uc_fw_status {
INTEL_UC_FIRMWARE_MISSING, /* blob not found on the system */
INTEL_UC_FIRMWARE_ERROR, /* invalid format or version */
INTEL_UC_FIRMWARE_AVAILABLE, /* blob found and copied in mem */
+ INTEL_UC_FIRMWARE_INIT_FAIL, /* failed to prepare fw objects for load */
INTEL_UC_FIRMWARE_LOADABLE, /* all fw-required objects are ready */
- INTEL_UC_FIRMWARE_FAIL, /* failed to xfer or init/auth the fw */
+ INTEL_UC_FIRMWARE_LOAD_FAIL, /* failed to xfer or init/auth the fw */
INTEL_UC_FIRMWARE_TRANSFERRED, /* dma xfer done */
INTEL_UC_FIRMWARE_RUNNING /* init/auth done */
};
@@ -84,6 +86,7 @@ struct intel_uc_fw {
* or during a GT reset (mutex guarantees single threaded).
*/
struct i915_vma dummy;
+ struct i915_vma *rsa_data;
/*
* The firmware build process will generate a version header file with major and
@@ -130,10 +133,12 @@ const char *intel_uc_fw_status_repr(enum intel_uc_fw_status status)
return "ERROR";
case INTEL_UC_FIRMWARE_AVAILABLE:
return "AVAILABLE";
+ case INTEL_UC_FIRMWARE_INIT_FAIL:
+ return "INIT FAIL";
case INTEL_UC_FIRMWARE_LOADABLE:
return "LOADABLE";
- case INTEL_UC_FIRMWARE_FAIL:
- return "FAIL";
+ case INTEL_UC_FIRMWARE_LOAD_FAIL:
+ return "LOAD FAIL";
case INTEL_UC_FIRMWARE_TRANSFERRED:
return "TRANSFERRED";
case INTEL_UC_FIRMWARE_RUNNING:
@@ -155,7 +160,8 @@ static inline int intel_uc_fw_status_to_error(enum intel_uc_fw_status status)
return -ENOENT;
case INTEL_UC_FIRMWARE_ERROR:
return -ENOEXEC;
- case INTEL_UC_FIRMWARE_FAIL:
+ case INTEL_UC_FIRMWARE_INIT_FAIL:
+ case INTEL_UC_FIRMWARE_LOAD_FAIL:
return -EIO;
case INTEL_UC_FIRMWARE_SELECTED:
return -ESTALE;
diff --git a/drivers/gpu/drm/i915/gt/uc/selftest_guc.c b/drivers/gpu/drm/i915/gt/uc/selftest_guc.c
index fb0e4a7bd8ca..d3327b802b76 100644
--- a/drivers/gpu/drm/i915/gt/uc/selftest_guc.c
+++ b/drivers/gpu/drm/i915/gt/uc/selftest_guc.c
@@ -3,8 +3,21 @@
* Copyright �� 2021 Intel Corporation
*/
+#include "selftests/igt_spinner.h"
#include "selftests/intel_scheduler_helpers.h"
+static int request_add_spin(struct i915_request *rq, struct igt_spinner *spin)
+{
+ int err = 0;
+
+ i915_request_get(rq);
+ i915_request_add(rq);
+ if (spin && !igt_wait_for_spinner(spin, rq))
+ err = -ETIMEDOUT;
+
+ return err;
+}
+
static struct i915_request *nop_user_request(struct intel_context *ce,
struct i915_request *from)
{
@@ -110,12 +123,172 @@ err:
return ret;
}
+/*
+ * intel_guc_steal_guc_ids - Test to exhaust all guc_ids and then steal one
+ *
+ * This test creates a spinner which is used to block all subsequent submissions
+ * until it completes. Next, a loop creates a context and a NOP request each
+ * iteration until the guc_ids are exhausted (request creation returns -EAGAIN).
+ * The spinner is ended, unblocking all requests created in the loop. At this
+ * point all guc_ids are exhausted but are available to steal. Try to create
+ * another request which should successfully steal a guc_id. Wait on last
+ * request to complete, idle GPU, verify a guc_id was stolen via a counter, and
+ * exit the test. Test also artificially reduces the number of guc_ids so the
+ * test runs in a timely manner.
+ */
+static int intel_guc_steal_guc_ids(void *arg)
+{
+ struct intel_gt *gt = arg;
+ struct intel_guc *guc = &gt->uc.guc;
+ int ret, sv, context_index = 0;
+ intel_wakeref_t wakeref;
+ struct intel_engine_cs *engine;
+ struct intel_context **ce;
+ struct igt_spinner spin;
+ struct i915_request *spin_rq = NULL, *rq, *last = NULL;
+ int number_guc_id_stolen = guc->number_guc_id_stolen;
+
+ ce = kzalloc(sizeof(*ce) * GUC_MAX_LRC_DESCRIPTORS, GFP_KERNEL);
+ if (!ce) {
+ pr_err("Context array allocation failed\n");
+ return -ENOMEM;
+ }
+
+ wakeref = intel_runtime_pm_get(gt->uncore->rpm);
+ engine = intel_selftest_find_any_engine(gt);
+ sv = guc->submission_state.num_guc_ids;
+ guc->submission_state.num_guc_ids = 4096;
+
+ /* Create spinner to block requests in below loop */
+ ce[context_index] = intel_context_create(engine);
+ if (IS_ERR(ce[context_index])) {
+ ret = PTR_ERR(ce[context_index]);
+ ce[context_index] = NULL;
+ pr_err("Failed to create context: %d\n", ret);
+ goto err_wakeref;
+ }
+ ret = igt_spinner_init(&spin, engine->gt);
+ if (ret) {
+ pr_err("Failed to create spinner: %d\n", ret);
+ goto err_contexts;
+ }
+ spin_rq = igt_spinner_create_request(&spin, ce[context_index],
+ MI_ARB_CHECK);
+ if (IS_ERR(spin_rq)) {
+ ret = PTR_ERR(spin_rq);
+ pr_err("Failed to create spinner request: %d\n", ret);
+ goto err_contexts;
+ }
+ ret = request_add_spin(spin_rq, &spin);
+ if (ret) {
+ pr_err("Failed to add Spinner request: %d\n", ret);
+ goto err_spin_rq;
+ }
+
+ /* Use all guc_ids */
+ while (ret != -EAGAIN) {
+ ce[++context_index] = intel_context_create(engine);
+ if (IS_ERR(ce[context_index])) {
+ ret = PTR_ERR(ce[context_index--]);
+ ce[context_index] = NULL;
+ pr_err("Failed to create context: %d\n", ret);
+ goto err_spin_rq;
+ }
+
+ rq = nop_user_request(ce[context_index], spin_rq);
+ if (IS_ERR(rq)) {
+ ret = PTR_ERR(rq);
+ rq = NULL;
+ if (ret != -EAGAIN) {
+ pr_err("Failed to create request, %d: %d\n",
+ context_index, ret);
+ goto err_spin_rq;
+ }
+ } else {
+ if (last)
+ i915_request_put(last);
+ last = rq;
+ }
+ }
+
+ /* Release blocked requests */
+ igt_spinner_end(&spin);
+ ret = intel_selftest_wait_for_rq(spin_rq);
+ if (ret) {
+ pr_err("Spin request failed to complete: %d\n", ret);
+ i915_request_put(last);
+ goto err_spin_rq;
+ }
+ i915_request_put(spin_rq);
+ igt_spinner_fini(&spin);
+ spin_rq = NULL;
+
+ /* Wait for last request */
+ ret = i915_request_wait(last, 0, HZ * 30);
+ i915_request_put(last);
+ if (ret < 0) {
+ pr_err("Last request failed to complete: %d\n", ret);
+ goto err_spin_rq;
+ }
+
+ /* Try to steal guc_id */
+ rq = nop_user_request(ce[context_index], NULL);
+ if (IS_ERR(rq)) {
+ ret = PTR_ERR(rq);
+ pr_err("Failed to steal guc_id, %d: %d\n", context_index, ret);
+ goto err_spin_rq;
+ }
+
+ /* Wait for request with stolen guc_id */
+ ret = i915_request_wait(rq, 0, HZ);
+ i915_request_put(rq);
+ if (ret < 0) {
+ pr_err("Request with stolen guc_id failed to complete: %d\n",
+ ret);
+ goto err_spin_rq;
+ }
+
+ /* Wait for idle */
+ ret = intel_gt_wait_for_idle(gt, HZ * 30);
+ if (ret < 0) {
+ pr_err("GT failed to idle: %d\n", ret);
+ goto err_spin_rq;
+ }
+
+ /* Verify a guc_id was stolen */
+ if (guc->number_guc_id_stolen == number_guc_id_stolen) {
+ pr_err("No guc_id was stolen");
+ ret = -EINVAL;
+ } else {
+ ret = 0;
+ }
+
+err_spin_rq:
+ if (spin_rq) {
+ igt_spinner_end(&spin);
+ intel_selftest_wait_for_rq(spin_rq);
+ i915_request_put(spin_rq);
+ igt_spinner_fini(&spin);
+ intel_gt_wait_for_idle(gt, HZ * 30);
+ }
+err_contexts:
+ for (; context_index >= 0 && ce[context_index]; --context_index)
+ intel_context_put(ce[context_index]);
+err_wakeref:
+ intel_runtime_pm_put(gt->uncore->rpm, wakeref);
+ kfree(ce);
+ guc->submission_state.num_guc_ids = sv;
+
+ return ret;
+}
+
int intel_guc_live_selftests(struct drm_i915_private *i915)
{
static const struct i915_subtest tests[] = {
SUBTEST(intel_guc_scrub_ctbs),
+ SUBTEST(intel_guc_steal_guc_ids),
};
- struct intel_gt *gt = &i915->gt;
+ struct intel_gt *gt = to_gt(i915);
if (intel_gt_is_wedged(gt))
return 0;
diff --git a/drivers/gpu/drm/i915/gt/uc/selftest_guc_multi_lrc.c b/drivers/gpu/drm/i915/gt/uc/selftest_guc_multi_lrc.c
index 50953c8e8b53..1297ddbf7f88 100644
--- a/drivers/gpu/drm/i915/gt/uc/selftest_guc_multi_lrc.c
+++ b/drivers/gpu/drm/i915/gt/uc/selftest_guc_multi_lrc.c
@@ -167,7 +167,7 @@ int intel_guc_multi_lrc_live_selftests(struct drm_i915_private *i915)
static const struct i915_subtest tests[] = {
SUBTEST(intel_guc_multi_lrc_basic),
};
- struct intel_gt *gt = &i915->gt;
+ struct intel_gt *gt = to_gt(i915);
if (intel_gt_is_wedged(gt))
return 0;
diff --git a/drivers/gpu/drm/i915/gvt/gtt.c b/drivers/gpu/drm/i915/gvt/gtt.c
index 53d0cb327539..99d1781fa5f0 100644
--- a/drivers/gpu/drm/i915/gvt/gtt.c
+++ b/drivers/gpu/drm/i915/gvt/gtt.c
@@ -446,17 +446,17 @@ static bool gen8_gtt_test_present(struct intel_gvt_gtt_entry *e)
|| e->type == GTT_TYPE_PPGTT_ROOT_L4_ENTRY)
return (e->val64 != 0);
else
- return (e->val64 & _PAGE_PRESENT);
+ return (e->val64 & GEN8_PAGE_PRESENT);
}
static void gtt_entry_clear_present(struct intel_gvt_gtt_entry *e)
{
- e->val64 &= ~_PAGE_PRESENT;
+ e->val64 &= ~GEN8_PAGE_PRESENT;
}
static void gtt_entry_set_present(struct intel_gvt_gtt_entry *e)
{
- e->val64 |= _PAGE_PRESENT;
+ e->val64 |= GEN8_PAGE_PRESENT;
}
static bool gen8_gtt_test_64k_splited(struct intel_gvt_gtt_entry *e)
@@ -2439,7 +2439,7 @@ static int alloc_scratch_pages(struct intel_vgpu *vgpu,
/* The entry parameters like present/writeable/cache type
* set to the same as i915's scratch page tree.
*/
- se.val64 |= _PAGE_PRESENT | _PAGE_RW;
+ se.val64 |= GEN8_PAGE_PRESENT | GEN8_PAGE_RW;
if (type == GTT_TYPE_PPGTT_PDE_PT)
se.val64 |= PPAT_CACHED;
@@ -2896,7 +2896,7 @@ void intel_gvt_restore_ggtt(struct intel_gvt *gvt)
offset = vgpu_aperture_gmadr_base(vgpu) >> PAGE_SHIFT;
for (idx = 0; idx < num_low; idx++) {
pte = mm->ggtt_mm.host_ggtt_aperture[idx];
- if (pte & _PAGE_PRESENT)
+ if (pte & GEN8_PAGE_PRESENT)
write_pte64(vgpu->gvt->gt->ggtt, offset + idx, pte);
}
@@ -2904,7 +2904,7 @@ void intel_gvt_restore_ggtt(struct intel_gvt *gvt)
offset = vgpu_hidden_gmadr_base(vgpu) >> PAGE_SHIFT;
for (idx = 0; idx < num_hi; idx++) {
pte = mm->ggtt_mm.host_ggtt_hidden[idx];
- if (pte & _PAGE_PRESENT)
+ if (pte & GEN8_PAGE_PRESENT)
write_pte64(vgpu->gvt->gt->ggtt, offset + idx, pte);
}
}
diff --git a/drivers/gpu/drm/i915/gvt/gvt.c b/drivers/gpu/drm/i915/gvt/gvt.c
index cbac409f6c8a..f0b69e4dcb52 100644
--- a/drivers/gpu/drm/i915/gvt/gvt.c
+++ b/drivers/gpu/drm/i915/gvt/gvt.c
@@ -205,7 +205,7 @@ int intel_gvt_init_device(struct drm_i915_private *i915)
spin_lock_init(&gvt->scheduler.mmio_context_lock);
mutex_init(&gvt->lock);
mutex_init(&gvt->sched_lock);
- gvt->gt = &i915->gt;
+ gvt->gt = to_gt(i915);
i915->gvt = gvt;
init_device_info(gvt);
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c
index 6c804102528b..42a0c9ae0a73 100644
--- a/drivers/gpu/drm/i915/gvt/scheduler.c
+++ b/drivers/gpu/drm/i915/gvt/scheduler.c
@@ -1386,7 +1386,7 @@ int intel_vgpu_setup_submission(struct intel_vgpu *vgpu)
enum intel_engine_id i;
int ret;
- ppgtt = i915_ppgtt_create(&i915->gt, I915_BO_ALLOC_PM_EARLY);
+ ppgtt = i915_ppgtt_create(to_gt(i915), I915_BO_ALLOC_PM_EARLY);
if (IS_ERR(ppgtt))
return PTR_ERR(ppgtt);
diff --git a/drivers/gpu/drm/i915/i915_active.c b/drivers/gpu/drm/i915/i915_active.c
index 3103c1e1fd14..ee2b3a375362 100644
--- a/drivers/gpu/drm/i915/i915_active.c
+++ b/drivers/gpu/drm/i915/i915_active.c
@@ -426,8 +426,9 @@ replace_barrier(struct i915_active *ref, struct i915_active_fence *active)
return true;
}
-int i915_active_ref(struct i915_active *ref, u64 idx, struct dma_fence *fence)
+int i915_active_add_request(struct i915_active *ref, struct i915_request *rq)
{
+ struct dma_fence *fence = &rq->fence;
struct i915_active_fence *active;
int err;
@@ -436,7 +437,7 @@ int i915_active_ref(struct i915_active *ref, u64 idx, struct dma_fence *fence)
if (err)
return err;
- active = active_instance(ref, idx);
+ active = active_instance(ref, i915_request_timeline(rq)->fence_context);
if (!active) {
err = -ENOMEM;
goto out;
@@ -477,29 +478,6 @@ __i915_active_set_fence(struct i915_active *ref,
return prev;
}
-static struct i915_active_fence *
-__active_fence(struct i915_active *ref, u64 idx)
-{
- struct active_node *it;
-
- it = __active_lookup(ref, idx);
- if (unlikely(!it)) { /* Contention with parallel tree builders! */
- spin_lock_irq(&ref->tree_lock);
- it = __active_lookup(ref, idx);
- spin_unlock_irq(&ref->tree_lock);
- }
- GEM_BUG_ON(!it); /* slot must be preallocated */
-
- return &it->base;
-}
-
-struct dma_fence *
-__i915_active_ref(struct i915_active *ref, u64 idx, struct dma_fence *fence)
-{
- /* Only valid while active, see i915_active_acquire_for_context() */
- return __i915_active_set_fence(ref, __active_fence(ref, idx), fence);
-}
-
struct dma_fence *
i915_active_set_exclusive(struct i915_active *ref, struct dma_fence *f)
{
diff --git a/drivers/gpu/drm/i915/i915_active.h b/drivers/gpu/drm/i915/i915_active.h
index 5fcdb0e2bc9e..7eb44132183a 100644
--- a/drivers/gpu/drm/i915/i915_active.h
+++ b/drivers/gpu/drm/i915/i915_active.h
@@ -164,26 +164,11 @@ void __i915_active_init(struct i915_active *ref,
__i915_active_init(ref, active, retire, flags, &__mkey, &__wkey); \
} while (0)
-struct dma_fence *
-__i915_active_ref(struct i915_active *ref, u64 idx, struct dma_fence *fence);
-int i915_active_ref(struct i915_active *ref, u64 idx, struct dma_fence *fence);
-
-static inline int
-i915_active_add_request(struct i915_active *ref, struct i915_request *rq)
-{
- return i915_active_ref(ref,
- i915_request_timeline(rq)->fence_context,
- &rq->fence);
-}
+int i915_active_add_request(struct i915_active *ref, struct i915_request *rq);
struct dma_fence *
i915_active_set_exclusive(struct i915_active *ref, struct dma_fence *f);
-static inline bool i915_active_has_exclusive(struct i915_active *ref)
-{
- return rcu_access_pointer(ref->excl.fence);
-}
-
int __i915_active_wait(struct i915_active *ref, int state);
static inline int i915_active_wait(struct i915_active *ref)
{
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 682011c07ad7..e0e052cdf8b8 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -64,7 +64,8 @@ static int i915_capabilities(struct seq_file *m, void *data)
intel_device_info_print_static(INTEL_INFO(i915), &p);
intel_device_info_print_runtime(RUNTIME_INFO(i915), &p);
- intel_gt_info_print(&i915->gt.info, &p);
+ i915_print_iommu_status(i915, &p);
+ intel_gt_info_print(&to_gt(i915)->info, &p);
intel_driver_caps_print(&i915->caps, &p);
kernel_param_lock(THIS_MODULE);
@@ -292,7 +293,7 @@ static int i915_gpu_info_open(struct inode *inode, struct file *file)
gpu = NULL;
with_intel_runtime_pm(&i915->runtime_pm, wakeref)
- gpu = i915_gpu_coredump(&i915->gt, ALL_ENGINES);
+ gpu = i915_gpu_coredump(to_gt(i915), ALL_ENGINES);
if (IS_ERR(gpu))
return PTR_ERR(gpu);
@@ -350,7 +351,7 @@ static const struct file_operations i915_error_state_fops = {
static int i915_frequency_info(struct seq_file *m, void *unused)
{
struct drm_i915_private *i915 = node_to_i915(m->private);
- struct intel_gt *gt = &i915->gt;
+ struct intel_gt *gt = to_gt(i915);
struct drm_printer p = drm_seq_file_printer(m);
intel_gt_pm_frequency_dump(gt, &p);
@@ -438,11 +439,11 @@ static int i915_swizzle_info(struct seq_file *m, void *data)
static int i915_rps_boost_info(struct seq_file *m, void *data)
{
struct drm_i915_private *dev_priv = node_to_i915(m->private);
- struct intel_rps *rps = &dev_priv->gt.rps;
+ struct intel_rps *rps = &to_gt(dev_priv)->rps;
seq_printf(m, "RPS enabled? %s\n", yesno(intel_rps_is_enabled(rps)));
seq_printf(m, "RPS active? %s\n", yesno(intel_rps_is_active(rps)));
- seq_printf(m, "GPU busy? %s\n", yesno(dev_priv->gt.awake));
+ seq_printf(m, "GPU busy? %s\n", yesno(to_gt(dev_priv)->awake));
seq_printf(m, "Boosts outstanding? %d\n",
atomic_read(&rps->num_waiters));
seq_printf(m, "Interactive? %d\n", READ_ONCE(rps->power.interactive));
@@ -475,7 +476,7 @@ static int i915_runtime_pm_status(struct seq_file *m, void *unused)
seq_printf(m, "Runtime power status: %s\n",
enableddisabled(!dev_priv->power_domains.init_wakeref));
- seq_printf(m, "GPU idle: %s\n", yesno(!dev_priv->gt.awake));
+ seq_printf(m, "GPU idle: %s\n", yesno(!to_gt(dev_priv)->awake));
seq_printf(m, "IRQs disabled: %s\n",
yesno(!intel_irqs_enabled(dev_priv)));
#ifdef CONFIG_PM
@@ -507,18 +508,18 @@ static int i915_engine_info(struct seq_file *m, void *unused)
wakeref = intel_runtime_pm_get(&i915->runtime_pm);
seq_printf(m, "GT awake? %s [%d], %llums\n",
- yesno(i915->gt.awake),
- atomic_read(&i915->gt.wakeref.count),
- ktime_to_ms(intel_gt_get_awake_time(&i915->gt)));
+ yesno(to_gt(i915)->awake),
+ atomic_read(&to_gt(i915)->wakeref.count),
+ ktime_to_ms(intel_gt_get_awake_time(to_gt(i915))));
seq_printf(m, "CS timestamp frequency: %u Hz, %d ns\n",
- i915->gt.clock_frequency,
- i915->gt.clock_period_ns);
+ to_gt(i915)->clock_frequency,
+ to_gt(i915)->clock_period_ns);
p = drm_seq_file_printer(m);
for_each_uabi_engine(engine, i915)
intel_engine_dump(engine, &p, "%s\n", engine->name);
- intel_gt_show_timelines(&i915->gt, &p, i915_request_show_with_schedule);
+ intel_gt_show_timelines(to_gt(i915), &p, i915_request_show_with_schedule);
intel_runtime_pm_put(&i915->runtime_pm, wakeref);
@@ -557,14 +558,14 @@ static int i915_wedged_get(void *data, u64 *val)
{
struct drm_i915_private *i915 = data;
- return intel_gt_debugfs_reset_show(&i915->gt, val);
+ return intel_gt_debugfs_reset_show(to_gt(i915), val);
}
static int i915_wedged_set(void *data, u64 val)
{
struct drm_i915_private *i915 = data;
- return intel_gt_debugfs_reset_store(&i915->gt, val);
+ return intel_gt_debugfs_reset_store(to_gt(i915), val);
}
DEFINE_SIMPLE_ATTRIBUTE(i915_wedged_fops,
@@ -580,7 +581,7 @@ i915_perf_noa_delay_set(void *data, u64 val)
* This would lead to infinite waits as we're doing timestamp
* difference on the CS with only 32bits.
*/
- if (intel_gt_ns_to_clock_interval(&i915->gt, val) > U32_MAX)
+ if (intel_gt_ns_to_clock_interval(to_gt(i915), val) > U32_MAX)
return -EINVAL;
atomic64_set(&i915->perf.noa_programming_delay, val);
@@ -665,16 +666,18 @@ static int
i915_drop_caches_set(void *data, u64 val)
{
struct drm_i915_private *i915 = data;
+ unsigned int flags;
int ret;
DRM_DEBUG("Dropping caches: 0x%08llx [0x%08llx]\n",
val, val & DROP_ALL);
- ret = gt_drop_caches(&i915->gt, val);
+ ret = gt_drop_caches(to_gt(i915), val);
if (ret)
return ret;
fs_reclaim_acquire(GFP_KERNEL);
+ flags = memalloc_noreclaim_save();
if (val & DROP_BOUND)
i915_gem_shrink(NULL, i915, LONG_MAX, NULL, I915_SHRINK_BOUND);
@@ -683,6 +686,7 @@ i915_drop_caches_set(void *data, u64 val)
if (val & DROP_SHRINK_ALL)
i915_gem_shrink_all(i915);
+ memalloc_noreclaim_restore(flags);
fs_reclaim_release(GFP_KERNEL);
if (val & DROP_RCU)
@@ -701,7 +705,7 @@ DEFINE_SIMPLE_ATTRIBUTE(i915_drop_caches_fops,
static int i915_sseu_status(struct seq_file *m, void *unused)
{
struct drm_i915_private *i915 = node_to_i915(m->private);
- struct intel_gt *gt = &i915->gt;
+ struct intel_gt *gt = to_gt(i915);
return intel_sseu_status(m, gt);
}
@@ -710,14 +714,14 @@ static int i915_forcewake_open(struct inode *inode, struct file *file)
{
struct drm_i915_private *i915 = inode->i_private;
- return intel_gt_pm_debugfs_forcewake_user_open(&i915->gt);
+ return intel_gt_pm_debugfs_forcewake_user_open(to_gt(i915));
}
static int i915_forcewake_release(struct inode *inode, struct file *file)
{
struct drm_i915_private *i915 = inode->i_private;
- return intel_gt_pm_debugfs_forcewake_user_release(&i915->gt);
+ return intel_gt_pm_debugfs_forcewake_user_release(to_gt(i915));
}
static const struct file_operations i915_forcewake_fops = {
diff --git a/drivers/gpu/drm/i915/i915_debugfs_params.c b/drivers/gpu/drm/i915/i915_debugfs_params.c
index 20424275d41e..783c8676eee2 100644
--- a/drivers/gpu/drm/i915/i915_debugfs_params.c
+++ b/drivers/gpu/drm/i915/i915_debugfs_params.c
@@ -40,8 +40,8 @@ static int notify_guc(struct drm_i915_private *i915)
{
int ret = 0;
- if (intel_uc_uses_guc_submission(&i915->gt.uc))
- ret = intel_guc_global_policies_update(&i915->gt.uc.guc);
+ if (intel_uc_uses_guc_submission(&to_gt(i915)->uc))
+ ret = intel_guc_global_policies_update(&to_gt(i915)->uc.guc);
return ret;
}
diff --git a/drivers/gpu/drm/i915/i915_deps.c b/drivers/gpu/drm/i915/i915_deps.c
new file mode 100644
index 000000000000..999210b37325
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_deps.c
@@ -0,0 +1,237 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2021 Intel Corporation
+ */
+
+#include <linux/dma-fence.h>
+#include <linux/slab.h>
+
+#include <drm/ttm/ttm_bo_api.h>
+
+#include "i915_deps.h"
+
+/**
+ * DOC: Set of utilities to dynamically collect dependencies into a
+ * structure which is fed into the GT migration code.
+ *
+ * Once we can do async unbinding, this is also needed to coalesce
+ * the migration fence with the unbind fences if these are coalesced
+ * post-migration.
+ *
+ * While collecting the individual dependencies, we store the refcounted
+ * struct dma_fence pointers in a realloc-managed pointer array, since
+ * that can be easily fed into a dma_fence_array. Other options are
+ * available, like for example an xarray for similarity with drm/sched.
+ * Can be changed easily if needed.
+ *
+ * A struct i915_deps need to be initialized using i915_deps_init().
+ * If i915_deps_add_dependency() or i915_deps_add_resv() return an
+ * error code they will internally call i915_deps_fini(), which frees
+ * all internal references and allocations.
+ */
+
+/* Min number of fence pointers in the array when an allocation occurs. */
+#define I915_DEPS_MIN_ALLOC_CHUNK 8U
+
+static void i915_deps_reset_fences(struct i915_deps *deps)
+{
+ if (deps->fences != &deps->single)
+ kfree(deps->fences);
+ deps->num_deps = 0;
+ deps->fences_size = 1;
+ deps->fences = &deps->single;
+}
+
+/**
+ * i915_deps_init - Initialize an i915_deps structure
+ * @deps: Pointer to the i915_deps structure to initialize.
+ * @gfp: The allocation mode for subsequenst allocations.
+ */
+void i915_deps_init(struct i915_deps *deps, gfp_t gfp)
+{
+ deps->fences = NULL;
+ deps->gfp = gfp;
+ i915_deps_reset_fences(deps);
+}
+
+/**
+ * i915_deps_fini - Finalize an i915_deps structure
+ * @deps: Pointer to the i915_deps structure to finalize.
+ *
+ * This function drops all fence references taken, conditionally frees and
+ * then resets the fences array.
+ */
+void i915_deps_fini(struct i915_deps *deps)
+{
+ unsigned int i;
+
+ for (i = 0; i < deps->num_deps; ++i)
+ dma_fence_put(deps->fences[i]);
+
+ if (deps->fences != &deps->single)
+ kfree(deps->fences);
+}
+
+static int i915_deps_grow(struct i915_deps *deps, struct dma_fence *fence,
+ const struct ttm_operation_ctx *ctx)
+{
+ int ret;
+
+ if (deps->num_deps >= deps->fences_size) {
+ unsigned int new_size = 2 * deps->fences_size;
+ struct dma_fence **new_fences;
+
+ new_size = max(new_size, I915_DEPS_MIN_ALLOC_CHUNK);
+ new_fences = kmalloc_array(new_size, sizeof(*new_fences), deps->gfp);
+ if (!new_fences)
+ goto sync;
+
+ memcpy(new_fences, deps->fences,
+ deps->fences_size * sizeof(*new_fences));
+ swap(new_fences, deps->fences);
+ if (new_fences != &deps->single)
+ kfree(new_fences);
+ deps->fences_size = new_size;
+ }
+ deps->fences[deps->num_deps++] = dma_fence_get(fence);
+ return 0;
+
+sync:
+ if (ctx->no_wait_gpu && !dma_fence_is_signaled(fence)) {
+ ret = -EBUSY;
+ goto unref;
+ }
+
+ ret = dma_fence_wait(fence, ctx->interruptible);
+ if (ret)
+ goto unref;
+
+ ret = fence->error;
+ if (ret)
+ goto unref;
+
+ return 0;
+
+unref:
+ i915_deps_fini(deps);
+ return ret;
+}
+
+/**
+ * i915_deps_sync - Wait for all the fences in the dependency collection
+ * @deps: Pointer to the i915_deps structure the fences of which to wait for.
+ * @ctx: Pointer to a struct ttm_operation_ctx indicating how the waits
+ * should be performed.
+ *
+ * This function waits for fences in the dependency collection. If it
+ * encounters an error during the wait or a fence error, the wait for
+ * further fences is aborted and the error returned.
+ *
+ * Return: Zero if successful, Negative error code on error.
+ */
+int i915_deps_sync(const struct i915_deps *deps, const struct ttm_operation_ctx *ctx)
+{
+ struct dma_fence **fences = deps->fences;
+ unsigned int i;
+ int ret = 0;
+
+ for (i = 0; i < deps->num_deps; ++i, ++fences) {
+ if (ctx->no_wait_gpu && !dma_fence_is_signaled(*fences)) {
+ ret = -EBUSY;
+ break;
+ }
+
+ ret = dma_fence_wait(*fences, ctx->interruptible);
+ if (!ret)
+ ret = (*fences)->error;
+ if (ret)
+ break;
+ }
+
+ return ret;
+}
+
+/**
+ * i915_deps_add_dependency - Add a fence to the dependency collection
+ * @deps: Pointer to the i915_deps structure a fence is to be added to.
+ * @fence: The fence to add.
+ * @ctx: Pointer to a struct ttm_operation_ctx indicating how waits are to
+ * be performed if waiting.
+ *
+ * Adds a fence to the dependency collection, and takes a reference on it.
+ * If the fence context is not zero and there was a later fence from the
+ * same fence context already added, then the fence is not added to the
+ * dependency collection. If the fence context is not zero and there was
+ * an earlier fence already added, then the fence will replace the older
+ * fence from the same context and the reference on the earlier fence will
+ * be dropped.
+ * If there is a failure to allocate memory to accommodate the new fence to
+ * be added, the new fence will instead be waited for and an error may
+ * be returned; depending on the value of @ctx, or if there was a fence
+ * error. If an error was returned, the dependency collection will be
+ * finalized and all fence reference dropped.
+ *
+ * Return: 0 if success. Negative error code on error.
+ */
+int i915_deps_add_dependency(struct i915_deps *deps,
+ struct dma_fence *fence,
+ const struct ttm_operation_ctx *ctx)
+{
+ unsigned int i;
+ int ret;
+
+ if (!fence)
+ return 0;
+
+ if (dma_fence_is_signaled(fence)) {
+ ret = fence->error;
+ if (ret)
+ i915_deps_fini(deps);
+ return ret;
+ }
+
+ for (i = 0; i < deps->num_deps; ++i) {
+ struct dma_fence *entry = deps->fences[i];
+
+ if (!entry->context || entry->context != fence->context)
+ continue;
+
+ if (dma_fence_is_later(fence, entry)) {
+ dma_fence_put(entry);
+ deps->fences[i] = dma_fence_get(fence);
+ }
+
+ return 0;
+ }
+
+ return i915_deps_grow(deps, fence, ctx);
+}
+
+/**
+ * i915_deps_add_resv - Add the fences of a reservation object to a dependency
+ * collection.
+ * @deps: Pointer to the i915_deps structure a fence is to be added to.
+ * @resv: The reservation object, then fences of which to add.
+ * @ctx: Pointer to a struct ttm_operation_ctx indicating how waits are to
+ * be performed if waiting.
+ *
+ * Calls i915_deps_add_depencency() on the indicated fences of @resv.
+ *
+ * Return: Zero on success. Negative error code on error.
+ */
+int i915_deps_add_resv(struct i915_deps *deps, struct dma_resv *resv,
+ const struct ttm_operation_ctx *ctx)
+{
+ struct dma_resv_iter iter;
+ struct dma_fence *fence;
+
+ dma_resv_assert_held(resv);
+ dma_resv_for_each_fence(&iter, resv, true, fence) {
+ int ret = i915_deps_add_dependency(deps, fence, ctx);
+
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/i915/i915_deps.h b/drivers/gpu/drm/i915/i915_deps.h
new file mode 100644
index 000000000000..d76c0106c910
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_deps.h
@@ -0,0 +1,45 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2021 Intel Corporation
+ */
+
+#ifndef _I915_DEPS_H_
+#define _I915_DEPS_H_
+
+#include <linux/types.h>
+
+struct ttm_operation_ctx;
+struct dma_fence;
+struct dma_resv;
+
+/**
+ * struct i915_deps - Collect dependencies into a single dma-fence
+ * @single: Storage for pointer if the collection is a single fence.
+ * @fences: Allocated array of fence pointers if more than a single fence;
+ * otherwise points to the address of @single.
+ * @num_deps: Current number of dependency fences.
+ * @fences_size: Size of the @fences array in number of pointers.
+ * @gfp: Allocation mode.
+ */
+struct i915_deps {
+ struct dma_fence *single;
+ struct dma_fence **fences;
+ unsigned int num_deps;
+ unsigned int fences_size;
+ gfp_t gfp;
+};
+
+void i915_deps_init(struct i915_deps *deps, gfp_t gfp);
+
+void i915_deps_fini(struct i915_deps *deps);
+
+int i915_deps_add_dependency(struct i915_deps *deps,
+ struct dma_fence *fence,
+ const struct ttm_operation_ctx *ctx);
+
+int i915_deps_add_resv(struct i915_deps *deps, struct dma_resv *resv,
+ const struct ttm_operation_ctx *ctx);
+
+int i915_deps_sync(const struct i915_deps *deps,
+ const struct ttm_operation_ctx *ctx);
+#endif
diff --git a/drivers/gpu/drm/i915/i915_driver.c b/drivers/gpu/drm/i915/i915_driver.c
index ca3b599f6c38..95174938b160 100644
--- a/drivers/gpu/drm/i915/i915_driver.c
+++ b/drivers/gpu/drm/i915/i915_driver.c
@@ -291,7 +291,7 @@ static void intel_detect_preproduction_hw(struct drm_i915_private *dev_priv)
static void sanitize_gpu(struct drm_i915_private *i915)
{
if (!INTEL_INFO(i915)->gpu_reset_clobbers_display)
- __intel_gt_reset(&i915->gt, ALL_ENGINES);
+ __intel_gt_reset(to_gt(i915), ALL_ENGINES);
}
/**
@@ -314,8 +314,9 @@ static int i915_driver_early_probe(struct drm_i915_private *dev_priv)
intel_device_info_subplatform_init(dev_priv);
intel_step_init(dev_priv);
+ intel_gt_init_early(to_gt(dev_priv), dev_priv);
intel_uncore_mmio_debug_init_early(&dev_priv->mmio_debug);
- intel_uncore_init_early(&dev_priv->uncore, dev_priv);
+ intel_uncore_init_early(&dev_priv->uncore, to_gt(dev_priv));
spin_lock_init(&dev_priv->irq_lock);
spin_lock_init(&dev_priv->gpu_error.lock);
@@ -346,7 +347,7 @@ static int i915_driver_early_probe(struct drm_i915_private *dev_priv)
intel_wopcm_init_early(&dev_priv->wopcm);
- intel_gt_init_early(&dev_priv->gt, dev_priv);
+ __intel_gt_init_early(to_gt(dev_priv), dev_priv);
i915_gem_init_early(dev_priv);
@@ -367,7 +368,7 @@ static int i915_driver_early_probe(struct drm_i915_private *dev_priv)
err_gem:
i915_gem_cleanup_early(dev_priv);
- intel_gt_driver_late_release(&dev_priv->gt);
+ intel_gt_driver_late_release(to_gt(dev_priv));
intel_region_ttm_device_fini(dev_priv);
err_ttm:
vlv_suspend_cleanup(dev_priv);
@@ -386,7 +387,7 @@ static void i915_driver_late_release(struct drm_i915_private *dev_priv)
intel_irq_fini(dev_priv);
intel_power_domains_cleanup(dev_priv);
i915_gem_cleanup_early(dev_priv);
- intel_gt_driver_late_release(&dev_priv->gt);
+ intel_gt_driver_late_release(to_gt(dev_priv));
intel_region_ttm_device_fini(dev_priv);
vlv_suspend_cleanup(dev_priv);
i915_workqueues_cleanup(dev_priv);
@@ -417,15 +418,19 @@ static int i915_driver_mmio_probe(struct drm_i915_private *dev_priv)
if (ret < 0)
return ret;
- ret = intel_uncore_init_mmio(&dev_priv->uncore);
+ ret = intel_uncore_setup_mmio(&dev_priv->uncore);
if (ret < 0)
goto err_bridge;
+ ret = intel_uncore_init_mmio(&dev_priv->uncore);
+ if (ret)
+ goto err_mmio;
+
/* Try to make sure MCHBAR is enabled before poking at it */
intel_setup_mchbar(dev_priv);
intel_device_info_runtime_init(dev_priv);
- ret = intel_gt_init_mmio(&dev_priv->gt);
+ ret = intel_gt_init_mmio(to_gt(dev_priv));
if (ret)
goto err_uncore;
@@ -437,6 +442,8 @@ static int i915_driver_mmio_probe(struct drm_i915_private *dev_priv)
err_uncore:
intel_teardown_mchbar(dev_priv);
intel_uncore_fini_mmio(&dev_priv->uncore);
+err_mmio:
+ intel_uncore_cleanup_mmio(&dev_priv->uncore);
err_bridge:
pci_dev_put(dev_priv->bridge_dev);
@@ -451,6 +458,7 @@ static void i915_driver_mmio_release(struct drm_i915_private *dev_priv)
{
intel_teardown_mchbar(dev_priv);
intel_uncore_fini_mmio(&dev_priv->uncore);
+ intel_uncore_cleanup_mmio(&dev_priv->uncore);
pci_dev_put(dev_priv->bridge_dev);
}
@@ -579,9 +587,9 @@ static int i915_driver_hw_probe(struct drm_i915_private *dev_priv)
if (ret)
goto err_ggtt;
- intel_gt_init_hw_early(&dev_priv->gt, &dev_priv->ggtt);
+ intel_gt_init_hw_early(to_gt(dev_priv), &dev_priv->ggtt);
- ret = intel_gt_probe_lmem(&dev_priv->gt);
+ ret = intel_gt_probe_lmem(to_gt(dev_priv));
if (ret)
goto err_mem_regions;
@@ -694,7 +702,7 @@ static void i915_driver_register(struct drm_i915_private *dev_priv)
/* Depends on sysfs having been initialized */
i915_perf_register(dev_priv);
- intel_gt_driver_register(&dev_priv->gt);
+ intel_gt_driver_register(to_gt(dev_priv));
intel_display_driver_register(dev_priv);
@@ -722,7 +730,7 @@ static void i915_driver_unregister(struct drm_i915_private *dev_priv)
intel_display_driver_unregister(dev_priv);
- intel_gt_driver_unregister(&dev_priv->gt);
+ intel_gt_driver_unregister(to_gt(dev_priv));
i915_perf_unregister(dev_priv);
i915_pmu_unregister(dev_priv);
@@ -733,6 +741,12 @@ static void i915_driver_unregister(struct drm_i915_private *dev_priv)
i915_gem_driver_unregister(dev_priv);
}
+void
+i915_print_iommu_status(struct drm_i915_private *i915, struct drm_printer *p)
+{
+ drm_printf(p, "iommu: %s\n", enableddisabled(intel_vtd_active(i915)));
+}
+
static void i915_welcome_messages(struct drm_i915_private *dev_priv)
{
if (drm_debug_enabled(DRM_UT_DRIVER)) {
@@ -748,7 +762,8 @@ static void i915_welcome_messages(struct drm_i915_private *dev_priv)
intel_device_info_print_static(INTEL_INFO(dev_priv), &p);
intel_device_info_print_runtime(RUNTIME_INFO(dev_priv), &p);
- intel_gt_info_print(&dev_priv->gt.info, &p);
+ i915_print_iommu_status(dev_priv, &p);
+ intel_gt_info_print(&to_gt(dev_priv)->info, &p);
}
if (IS_ENABLED(CONFIG_DRM_I915_DEBUG))
@@ -809,7 +824,7 @@ int i915_driver_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
return PTR_ERR(i915);
/* Disable nuclear pageflip by default on pre-ILK */
- if (!i915->params.nuclear_pageflip && match_info->graphics_ver < 5)
+ if (!i915->params.nuclear_pageflip && match_info->graphics.ver < 5)
i915->drm.driver_features &= ~DRIVER_ATOMIC;
/*
@@ -1370,7 +1385,7 @@ static int i915_drm_resume_early(struct drm_device *dev)
intel_uncore_resume_early(&dev_priv->uncore);
- intel_gt_check_and_clear_faults(&dev_priv->gt);
+ intel_gt_check_and_clear_faults(to_gt(dev_priv));
intel_display_power_resume_early(dev_priv);
@@ -1553,7 +1568,7 @@ static int intel_runtime_suspend(struct device *kdev)
*/
i915_gem_runtime_suspend(dev_priv);
- intel_gt_runtime_suspend(&dev_priv->gt);
+ intel_gt_runtime_suspend(to_gt(dev_priv));
intel_runtime_pm_disable_interrupts(dev_priv);
@@ -1569,7 +1584,7 @@ static int intel_runtime_suspend(struct device *kdev)
intel_runtime_pm_enable_interrupts(dev_priv);
- intel_gt_runtime_resume(&dev_priv->gt);
+ intel_gt_runtime_resume(to_gt(dev_priv));
enable_rpm_wakeref_asserts(rpm);
@@ -1657,7 +1672,7 @@ static int intel_runtime_resume(struct device *kdev)
* No point of rolling back things in case of an error, as the best
* we can do is to hope that things will still work (and disable RPM).
*/
- intel_gt_runtime_resume(&dev_priv->gt);
+ intel_gt_runtime_resume(to_gt(dev_priv));
/*
* On VLV/CHV display interrupts are part of the display
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index c7ce23da6ffa..3967748ba347 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -168,8 +168,6 @@ struct i915_hotplug {
I915_GEM_DOMAIN_VERTEX)
struct drm_i915_private;
-struct i915_mm_struct;
-struct i915_mmu_object;
struct drm_i915_file_private {
struct drm_i915_private *dev_priv;
@@ -466,7 +464,7 @@ struct i915_gem_mm {
* List of objects which are pending destruction.
*/
struct llist_head free_list;
- struct work_struct free_work;
+ struct delayed_work free_work;
/**
* Count of objects pending destructions. Used to skip needlessly
* waiting on an RCU barrier if no objects are waiting to be freed.
@@ -993,7 +991,7 @@ struct drm_i915_private {
struct i915_perf perf;
/* Abstract the submission mechanism (legacy ringbuffer or execlists) away */
- struct intel_gt gt;
+ struct intel_gt gt0;
struct {
struct i915_gem_contexts {
@@ -1065,6 +1063,11 @@ static inline struct drm_i915_private *pdev_to_i915(struct pci_dev *pdev)
return pci_get_drvdata(pdev);
}
+static inline struct intel_gt *to_gt(struct drm_i915_private *i915)
+{
+ return &i915->gt0;
+}
+
/* Simple iterator over all initialised engines */
#define for_each_engine(engine__, dev_priv__, id__) \
for ((id__) = 0; \
@@ -1122,15 +1125,15 @@ static inline struct drm_i915_private *pdev_to_i915(struct pci_dev *pdev)
#define IP_VER(ver, rel) ((ver) << 8 | (rel))
-#define GRAPHICS_VER(i915) (INTEL_INFO(i915)->graphics_ver)
-#define GRAPHICS_VER_FULL(i915) IP_VER(INTEL_INFO(i915)->graphics_ver, \
- INTEL_INFO(i915)->graphics_rel)
+#define GRAPHICS_VER(i915) (INTEL_INFO(i915)->graphics.ver)
+#define GRAPHICS_VER_FULL(i915) IP_VER(INTEL_INFO(i915)->graphics.ver, \
+ INTEL_INFO(i915)->graphics.rel)
#define IS_GRAPHICS_VER(i915, from, until) \
(GRAPHICS_VER(i915) >= (from) && GRAPHICS_VER(i915) <= (until))
-#define MEDIA_VER(i915) (INTEL_INFO(i915)->media_ver)
-#define MEDIA_VER_FULL(i915) IP_VER(INTEL_INFO(i915)->media_ver, \
- INTEL_INFO(i915)->media_rel)
+#define MEDIA_VER(i915) (INTEL_INFO(i915)->media.ver)
+#define MEDIA_VER_FULL(i915) IP_VER(INTEL_INFO(i915)->media.arch, \
+ INTEL_INFO(i915)->media.rel)
#define IS_MEDIA_VER(i915, from, until) \
(MEDIA_VER(i915) >= (from) && MEDIA_VER(i915) <= (until))
@@ -1143,15 +1146,20 @@ static inline struct drm_i915_private *pdev_to_i915(struct pci_dev *pdev)
#define HAS_DSB(dev_priv) (INTEL_INFO(dev_priv)->display.has_dsb)
#define INTEL_DISPLAY_STEP(__i915) (RUNTIME_INFO(__i915)->step.display_step)
-#define INTEL_GT_STEP(__i915) (RUNTIME_INFO(__i915)->step.gt_step)
+#define INTEL_GRAPHICS_STEP(__i915) (RUNTIME_INFO(__i915)->step.graphics_step)
+#define INTEL_MEDIA_STEP(__i915) (RUNTIME_INFO(__i915)->step.media_step)
#define IS_DISPLAY_STEP(__i915, since, until) \
(drm_WARN_ON(&(__i915)->drm, INTEL_DISPLAY_STEP(__i915) == STEP_NONE), \
INTEL_DISPLAY_STEP(__i915) >= (since) && INTEL_DISPLAY_STEP(__i915) < (until))
-#define IS_GT_STEP(__i915, since, until) \
- (drm_WARN_ON(&(__i915)->drm, INTEL_GT_STEP(__i915) == STEP_NONE), \
- INTEL_GT_STEP(__i915) >= (since) && INTEL_GT_STEP(__i915) < (until))
+#define IS_GRAPHICS_STEP(__i915, since, until) \
+ (drm_WARN_ON(&(__i915)->drm, INTEL_GRAPHICS_STEP(__i915) == STEP_NONE), \
+ INTEL_GRAPHICS_STEP(__i915) >= (since) && INTEL_GRAPHICS_STEP(__i915) < (until))
+
+#define IS_MEDIA_STEP(__i915, since, until) \
+ (drm_WARN_ON(&(__i915)->drm, INTEL_MEDIA_STEP(__i915) == STEP_NONE), \
+ INTEL_MEDIA_STEP(__i915) >= (since) && INTEL_MEDIA_STEP(__i915) < (until))
static __always_inline unsigned int
__platform_mask_index(const struct intel_runtime_info *info,
@@ -1328,15 +1336,15 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915,
#define IS_TGL_Y(dev_priv) \
IS_SUBPLATFORM(dev_priv, INTEL_TIGERLAKE, INTEL_SUBPLATFORM_ULX)
-#define IS_SKL_GT_STEP(p, since, until) (IS_SKYLAKE(p) && IS_GT_STEP(p, since, until))
+#define IS_SKL_GRAPHICS_STEP(p, since, until) (IS_SKYLAKE(p) && IS_GRAPHICS_STEP(p, since, until))
-#define IS_KBL_GT_STEP(dev_priv, since, until) \
- (IS_KABYLAKE(dev_priv) && IS_GT_STEP(dev_priv, since, until))
+#define IS_KBL_GRAPHICS_STEP(dev_priv, since, until) \
+ (IS_KABYLAKE(dev_priv) && IS_GRAPHICS_STEP(dev_priv, since, until))
#define IS_KBL_DISPLAY_STEP(dev_priv, since, until) \
(IS_KABYLAKE(dev_priv) && IS_DISPLAY_STEP(dev_priv, since, until))
-#define IS_JSL_EHL_GT_STEP(p, since, until) \
- (IS_JSL_EHL(p) && IS_GT_STEP(p, since, until))
+#define IS_JSL_EHL_GRAPHICS_STEP(p, since, until) \
+ (IS_JSL_EHL(p) && IS_GRAPHICS_STEP(p, since, until))
#define IS_JSL_EHL_DISPLAY_STEP(p, since, until) \
(IS_JSL_EHL(p) && IS_DISPLAY_STEP(p, since, until))
@@ -1344,19 +1352,19 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915,
(IS_TIGERLAKE(__i915) && \
IS_DISPLAY_STEP(__i915, since, until))
-#define IS_TGL_UY_GT_STEP(__i915, since, until) \
+#define IS_TGL_UY_GRAPHICS_STEP(__i915, since, until) \
((IS_TGL_U(__i915) || IS_TGL_Y(__i915)) && \
- IS_GT_STEP(__i915, since, until))
+ IS_GRAPHICS_STEP(__i915, since, until))
-#define IS_TGL_GT_STEP(__i915, since, until) \
+#define IS_TGL_GRAPHICS_STEP(__i915, since, until) \
(IS_TIGERLAKE(__i915) && !(IS_TGL_U(__i915) || IS_TGL_Y(__i915)) && \
- IS_GT_STEP(__i915, since, until))
+ IS_GRAPHICS_STEP(__i915, since, until))
#define IS_RKL_DISPLAY_STEP(p, since, until) \
(IS_ROCKETLAKE(p) && IS_DISPLAY_STEP(p, since, until))
-#define IS_DG1_GT_STEP(p, since, until) \
- (IS_DG1(p) && IS_GT_STEP(p, since, until))
+#define IS_DG1_GRAPHICS_STEP(p, since, until) \
+ (IS_DG1(p) && IS_GRAPHICS_STEP(p, since, until))
#define IS_DG1_DISPLAY_STEP(p, since, until) \
(IS_DG1(p) && IS_DISPLAY_STEP(p, since, until))
@@ -1364,20 +1372,20 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915,
(IS_ALDERLAKE_S(__i915) && \
IS_DISPLAY_STEP(__i915, since, until))
-#define IS_ADLS_GT_STEP(__i915, since, until) \
+#define IS_ADLS_GRAPHICS_STEP(__i915, since, until) \
(IS_ALDERLAKE_S(__i915) && \
- IS_GT_STEP(__i915, since, until))
+ IS_GRAPHICS_STEP(__i915, since, until))
#define IS_ADLP_DISPLAY_STEP(__i915, since, until) \
(IS_ALDERLAKE_P(__i915) && \
IS_DISPLAY_STEP(__i915, since, until))
-#define IS_ADLP_GT_STEP(__i915, since, until) \
+#define IS_ADLP_GRAPHICS_STEP(__i915, since, until) \
(IS_ALDERLAKE_P(__i915) && \
- IS_GT_STEP(__i915, since, until))
+ IS_GRAPHICS_STEP(__i915, since, until))
-#define IS_XEHPSDV_GT_STEP(__i915, since, until) \
- (IS_XEHPSDV(__i915) && IS_GT_STEP(__i915, since, until))
+#define IS_XEHPSDV_GRAPHICS_STEP(__i915, since, until) \
+ (IS_XEHPSDV(__i915) && IS_GRAPHICS_STEP(__i915, since, until))
/*
* DG2 hardware steppings are a bit unusual. The hardware design was forked
@@ -1393,9 +1401,9 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915,
* and stepping-specific logic will be applied with a general DG2-wide stepping
* number.
*/
-#define IS_DG2_GT_STEP(__i915, variant, since, until) \
+#define IS_DG2_GRAPHICS_STEP(__i915, variant, since, until) \
(IS_SUBPLATFORM(__i915, INTEL_DG2, INTEL_SUBPLATFORM_##variant) && \
- IS_GT_STEP(__i915, since, until))
+ IS_GRAPHICS_STEP(__i915, since, until))
#define IS_DG2_DISPLAY_STEP(__i915, since, until) \
(IS_DG2(__i915) && \
@@ -1512,6 +1520,14 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915,
#define HAS_MSLICES(dev_priv) \
(INTEL_INFO(dev_priv)->has_mslices)
+/*
+ * Set this flag, when platform requires 64K GTT page sizes or larger for
+ * device local memory access. Also this flag implies that we require or
+ * at least support the compact PT layout for the ppGTT when using the 64K
+ * GTT pages.
+ */
+#define HAS_64K_PAGES(dev_priv) (INTEL_INFO(dev_priv)->has_64k_pages)
+
#define HAS_IPC(dev_priv) (INTEL_INFO(dev_priv)->display.has_ipc)
#define HAS_REGION(i915, i) (INTEL_INFO(i915)->memory_regions & (i))
@@ -1525,7 +1541,7 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915,
#define HAS_PXP(dev_priv) ((IS_ENABLED(CONFIG_DRM_I915_PXP) && \
INTEL_INFO(dev_priv)->has_pxp) && \
- VDBOX_MASK(&dev_priv->gt))
+ VDBOX_MASK(to_gt(dev_priv)))
#define HAS_GMCH(dev_priv) (INTEL_INFO(dev_priv)->display.has_gmch)
@@ -1559,26 +1575,27 @@ static inline bool run_as_guest(void)
#define HAS_D12_PLANE_MINIMIZATION(dev_priv) (IS_ROCKETLAKE(dev_priv) || \
IS_ALDERLAKE_S(dev_priv))
-static inline bool intel_vtd_active(void)
+static inline bool intel_vtd_active(struct drm_i915_private *i915)
{
-#ifdef CONFIG_INTEL_IOMMU
- if (intel_iommu_gfx_mapped)
+ if (device_iommu_mapped(i915->drm.dev))
return true;
-#endif
/* Running as a guest, we assume the host is enforcing VT'd */
return run_as_guest();
}
+void
+i915_print_iommu_status(struct drm_i915_private *i915, struct drm_printer *p);
+
static inline bool intel_scanout_needs_vtd_wa(struct drm_i915_private *dev_priv)
{
- return GRAPHICS_VER(dev_priv) >= 6 && intel_vtd_active();
+ return GRAPHICS_VER(dev_priv) >= 6 && intel_vtd_active(dev_priv);
}
static inline bool
intel_ggtt_update_needs_vtd_wa(struct drm_i915_private *i915)
{
- return IS_BROXTON(i915) && intel_vtd_active();
+ return IS_BROXTON(i915) && intel_vtd_active(i915);
}
static inline bool
@@ -1607,7 +1624,8 @@ static inline void i915_gem_drain_freed_objects(struct drm_i915_private *i915)
* armed the work again.
*/
while (atomic_read(&i915->mm.free_count)) {
- flush_work(&i915->mm.free_work);
+ flush_delayed_work(&i915->mm.free_work);
+ flush_delayed_work(&i915->bdev.wq);
rcu_barrier();
}
}
@@ -1640,13 +1658,10 @@ i915_gem_object_ggtt_pin_ww(struct drm_i915_gem_object *obj,
const struct i915_ggtt_view *view,
u64 size, u64 alignment, u64 flags);
-static inline struct i915_vma * __must_check
+struct i915_vma * __must_check
i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj,
const struct i915_ggtt_view *view,
- u64 size, u64 alignment, u64 flags)
-{
- return i915_gem_object_ggtt_pin_ww(obj, NULL, view, size, alignment, flags);
-}
+ u64 size, u64 alignment, u64 flags);
int i915_gem_object_unbind(struct drm_i915_gem_object *obj,
unsigned long flags);
@@ -1722,6 +1737,10 @@ int i915_gem_evict_vm(struct i915_address_space *vm);
struct drm_i915_gem_object *
i915_gem_object_create_internal(struct drm_i915_private *dev_priv,
phys_addr_t size);
+struct drm_i915_gem_object *
+__i915_gem_object_create_internal(struct drm_i915_private *dev_priv,
+ const struct drm_i915_gem_object_ops *ops,
+ phys_addr_t size);
/* i915_gem_tiling.c */
static inline bool i915_gem_object_needs_bit17_swizzle(struct drm_i915_gem_object *obj)
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 981e383d1a5d..915bf431f320 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -764,7 +764,7 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
* perspective, requiring manual detiling by the client.
*/
if (!i915_gem_object_has_struct_page(obj) ||
- cpu_write_needs_clflush(obj))
+ i915_gem_cpu_write_needs_clflush(obj))
/* Note that the gtt paths might fail with non-page-backed user
* pointers (e.g. gtt mappings when moving data between
* textures). Fallback to the shmem path in that case.
@@ -877,6 +877,8 @@ i915_gem_object_ggtt_pin_ww(struct drm_i915_gem_object *obj,
struct i915_vma *vma;
int ret;
+ GEM_WARN_ON(!ww);
+
if (flags & PIN_MAPPABLE &&
(!view || view->type == I915_GGTT_VIEW_NORMAL)) {
/*
@@ -936,10 +938,7 @@ new_vma:
return ERR_PTR(ret);
}
- if (ww)
- ret = i915_vma_pin_ww(vma, ww, size, alignment, flags | PIN_GLOBAL);
- else
- ret = i915_vma_pin(vma, size, alignment, flags | PIN_GLOBAL);
+ ret = i915_vma_pin_ww(vma, ww, size, alignment, flags | PIN_GLOBAL);
if (ret)
return ERR_PTR(ret);
@@ -959,6 +958,29 @@ new_vma:
return vma;
}
+struct i915_vma * __must_check
+i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj,
+ const struct i915_ggtt_view *view,
+ u64 size, u64 alignment, u64 flags)
+{
+ struct i915_gem_ww_ctx ww;
+ struct i915_vma *ret;
+ int err;
+
+ for_i915_gem_ww(&ww, err, true) {
+ err = i915_gem_object_lock(obj, &ww);
+ if (err)
+ continue;
+
+ ret = i915_gem_object_ggtt_pin_ww(obj, &ww, view, size,
+ alignment, flags);
+ if (IS_ERR(ret))
+ err = PTR_ERR(ret);
+ }
+
+ return err ? ERR_PTR(err) : ret;
+}
+
int
i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
@@ -1005,7 +1027,8 @@ i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
obj->ops->adjust_lru(obj);
}
- if (i915_gem_object_has_pages(obj)) {
+ if (i915_gem_object_has_pages(obj) ||
+ i915_gem_object_has_self_managed_shrink_list(obj)) {
unsigned long flags;
spin_lock_irqsave(&i915->mm.obj_lock, flags);
@@ -1048,7 +1071,7 @@ int i915_gem_init(struct drm_i915_private *dev_priv)
if (ret)
return ret;
- intel_uc_fetch_firmwares(&dev_priv->gt.uc);
+ intel_uc_fetch_firmwares(&to_gt(dev_priv)->uc);
intel_wopcm_init(&dev_priv->wopcm);
ret = i915_init_ggtt(dev_priv);
@@ -1068,7 +1091,7 @@ int i915_gem_init(struct drm_i915_private *dev_priv)
*/
intel_init_clock_gating(dev_priv);
- ret = intel_gt_init(&dev_priv->gt);
+ ret = intel_gt_init(to_gt(dev_priv));
if (ret)
goto err_unlock;
@@ -1084,7 +1107,7 @@ err_unlock:
i915_gem_drain_workqueue(dev_priv);
if (ret != -EIO)
- intel_uc_cleanup_firmwares(&dev_priv->gt.uc);
+ intel_uc_cleanup_firmwares(&to_gt(dev_priv)->uc);
if (ret == -EIO) {
/*
@@ -1092,10 +1115,10 @@ err_unlock:
* as wedged. But we only want to do this when the GPU is angry,
* for all other failure, such as an allocation failure, bail.
*/
- if (!intel_gt_is_wedged(&dev_priv->gt)) {
+ if (!intel_gt_is_wedged(to_gt(dev_priv))) {
i915_probe_error(dev_priv,
"Failed to initialize GPU, declaring it wedged!\n");
- intel_gt_set_wedged(&dev_priv->gt);
+ intel_gt_set_wedged(to_gt(dev_priv));
}
/* Minimal basic recovery for KMS */
@@ -1126,7 +1149,7 @@ void i915_gem_driver_remove(struct drm_i915_private *dev_priv)
intel_wakeref_auto_fini(&dev_priv->ggtt.userfault_wakeref);
i915_gem_suspend_late(dev_priv);
- intel_gt_driver_remove(&dev_priv->gt);
+ intel_gt_driver_remove(to_gt(dev_priv));
dev_priv->uabi_engines = RB_ROOT;
/* Flush any outstanding unpin_work. */
@@ -1137,9 +1160,9 @@ void i915_gem_driver_remove(struct drm_i915_private *dev_priv)
void i915_gem_driver_release(struct drm_i915_private *dev_priv)
{
- intel_gt_driver_release(&dev_priv->gt);
+ intel_gt_driver_release(to_gt(dev_priv));
- intel_uc_cleanup_firmwares(&dev_priv->gt.uc);
+ intel_uc_cleanup_firmwares(&to_gt(dev_priv)->uc);
i915_gem_drain_freed_objects(dev_priv);
diff --git a/drivers/gpu/drm/i915/i915_getparam.c b/drivers/gpu/drm/i915/i915_getparam.c
index 77490cb5ff9c..7f80ad247bc8 100644
--- a/drivers/gpu/drm/i915/i915_getparam.c
+++ b/drivers/gpu/drm/i915/i915_getparam.c
@@ -13,7 +13,7 @@ int i915_getparam_ioctl(struct drm_device *dev, void *data,
{
struct drm_i915_private *i915 = to_i915(dev);
struct pci_dev *pdev = to_pci_dev(dev->dev);
- const struct sseu_dev_info *sseu = &i915->gt.info.sseu;
+ const struct sseu_dev_info *sseu = &to_gt(i915)->info.sseu;
drm_i915_getparam_t *param = data;
int value = 0;
@@ -82,8 +82,8 @@ int i915_getparam_ioctl(struct drm_device *dev, void *data,
break;
case I915_PARAM_HAS_GPU_RESET:
value = i915->params.enable_hangcheck &&
- intel_has_gpu_reset(&i915->gt);
- if (value && intel_has_reset_engine(&i915->gt))
+ intel_has_gpu_reset(to_gt(i915));
+ if (value && intel_has_reset_engine(to_gt(i915)))
value = 2;
break;
case I915_PARAM_HAS_RESOURCE_STREAMER:
@@ -96,7 +96,7 @@ int i915_getparam_ioctl(struct drm_device *dev, void *data,
value = sseu->min_eu_in_pool;
break;
case I915_PARAM_HUC_STATUS:
- value = intel_huc_check_status(&i915->gt.uc.huc);
+ value = intel_huc_check_status(&to_gt(i915)->uc.huc);
if (value < 0)
return value;
break;
@@ -158,7 +158,7 @@ int i915_getparam_ioctl(struct drm_device *dev, void *data,
return -ENODEV;
break;
case I915_PARAM_CS_TIMESTAMP_FREQUENCY:
- value = i915->gt.clock_frequency;
+ value = to_gt(i915)->clock_frequency;
break;
case I915_PARAM_MMAP_GTT_COHERENT:
value = INTEL_INFO(i915)->has_coherent_ggtt;
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
index 2a2d7643b551..5ae812d60abe 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -48,8 +48,9 @@
#include "i915_gpu_error.h"
#include "i915_memcpy.h"
#include "i915_scatterlist.h"
+#include "i915_vma_snapshot.h"
-#define ALLOW_FAIL (GFP_KERNEL | __GFP_RETRY_MAYFAIL | __GFP_NOWARN)
+#define ALLOW_FAIL (__GFP_KSWAPD_RECLAIM | __GFP_RETRY_MAYFAIL | __GFP_NOWARN)
#define ATOMIC_MAYFAIL (GFP_ATOMIC | __GFP_NOWARN)
static void __sg_set_buf(struct scatterlist *sg,
@@ -275,16 +276,16 @@ static bool compress_start(struct i915_vma_compress *c)
static void *compress_next_page(struct i915_vma_compress *c,
struct i915_vma_coredump *dst)
{
- void *page;
+ void *page_addr;
+ struct page *page;
- if (dst->page_count >= dst->num_pages)
- return ERR_PTR(-ENOSPC);
-
- page = pool_alloc(&c->pool, ALLOW_FAIL);
- if (!page)
+ page_addr = pool_alloc(&c->pool, ALLOW_FAIL);
+ if (!page_addr)
return ERR_PTR(-ENOMEM);
- return dst->pages[dst->page_count++] = page;
+ page = virt_to_page(page_addr);
+ list_add_tail(&page->lru, &dst->page_list);
+ return page_addr;
}
static int compress_page(struct i915_vma_compress *c,
@@ -397,7 +398,7 @@ static int compress_page(struct i915_vma_compress *c,
if (!(wc && i915_memcpy_from_wc(ptr, src, PAGE_SIZE)))
memcpy(ptr, src, PAGE_SIZE);
- dst->pages[dst->page_count++] = ptr;
+ list_add_tail(&virt_to_page(ptr)->lru, &dst->page_list);
cond_resched();
return 0;
@@ -504,7 +505,7 @@ static void error_print_context(struct drm_i915_error_state_buf *m,
const char *header,
const struct i915_gem_context_coredump *ctx)
{
- const u32 period = m->i915->gt.clock_period_ns;
+ const u32 period = to_gt(m->i915)->clock_period_ns;
err_printf(m, "%s%s[%d] prio %d, guilty %d active %d, runtime total %lluns, avg %lluns\n",
header, ctx->comm, ctx->pid, ctx->sched_attr.priority,
@@ -614,7 +615,7 @@ static void print_error_vma(struct drm_i915_error_state_buf *m,
const struct i915_vma_coredump *vma)
{
char out[ASCII85_BUFSZ];
- int page;
+ struct page *page;
if (!vma)
return;
@@ -628,16 +629,17 @@ static void print_error_vma(struct drm_i915_error_state_buf *m,
err_printf(m, "gtt_page_sizes = 0x%08x\n", vma->gtt_page_sizes);
err_compression_marker(m);
- for (page = 0; page < vma->page_count; page++) {
+ list_for_each_entry(page, &vma->page_list, lru) {
int i, len;
+ const u32 *addr = page_address(page);
len = PAGE_SIZE;
- if (page == vma->page_count - 1)
+ if (page == list_last_entry(&vma->page_list, typeof(*page), lru))
len -= vma->unused;
len = ascii85_encode_len(len);
for (i = 0; i < len; i++)
- err_puts(m, ascii85_encode(vma->pages[page][i], out));
+ err_puts(m, ascii85_encode(addr[i], out));
}
err_puts(m, "\n");
}
@@ -946,10 +948,12 @@ static void i915_vma_coredump_free(struct i915_vma_coredump *vma)
{
while (vma) {
struct i915_vma_coredump *next = vma->next;
- int page;
+ struct page *page, *n;
- for (page = 0; page < vma->page_count; page++)
- free_page((unsigned long)vma->pages[page]);
+ list_for_each_entry_safe(page, n, &vma->page_list, lru) {
+ list_del_init(&page->lru);
+ __free_page(page);
+ }
kfree(vma);
vma = next;
@@ -1009,25 +1013,21 @@ void __i915_gpu_coredump_free(struct kref *error_ref)
static struct i915_vma_coredump *
i915_vma_coredump_create(const struct intel_gt *gt,
- const struct i915_vma *vma,
- const char *name,
+ const struct i915_vma_snapshot *vsnap,
struct i915_vma_compress *compress)
{
struct i915_ggtt *ggtt = gt->ggtt;
const u64 slot = ggtt->error_capture.start;
struct i915_vma_coredump *dst;
- unsigned long num_pages;
struct sgt_iter iter;
int ret;
might_sleep();
- if (!vma || !vma->pages || !compress)
+ if (!vsnap || !vsnap->pages || !compress)
return NULL;
- num_pages = min_t(u64, vma->size, vma->obj->base.size) >> PAGE_SHIFT;
- num_pages = DIV_ROUND_UP(10 * num_pages, 8); /* worstcase zlib growth */
- dst = kmalloc(sizeof(*dst) + num_pages * sizeof(u32 *), ALLOW_FAIL);
+ dst = kmalloc(sizeof(*dst), ALLOW_FAIL);
if (!dst)
return NULL;
@@ -1036,14 +1036,13 @@ i915_vma_coredump_create(const struct intel_gt *gt,
return NULL;
}
- strcpy(dst->name, name);
+ INIT_LIST_HEAD(&dst->page_list);
+ strcpy(dst->name, vsnap->name);
dst->next = NULL;
- dst->gtt_offset = vma->node.start;
- dst->gtt_size = vma->node.size;
- dst->gtt_page_sizes = vma->page_sizes.gtt;
- dst->num_pages = num_pages;
- dst->page_count = 0;
+ dst->gtt_offset = vsnap->gtt_offset;
+ dst->gtt_size = vsnap->gtt_size;
+ dst->gtt_page_sizes = vsnap->page_sizes;
dst->unused = 0;
ret = -EINVAL;
@@ -1051,7 +1050,7 @@ i915_vma_coredump_create(const struct intel_gt *gt,
void __iomem *s;
dma_addr_t dma;
- for_each_sgt_daddr(dma, iter, vma->pages) {
+ for_each_sgt_daddr(dma, iter, vsnap->pages) {
mutex_lock(&ggtt->error_mutex);
ggtt->vm.insert_page(&ggtt->vm, dma, slot,
I915_CACHE_NONE, 0);
@@ -1069,11 +1068,11 @@ i915_vma_coredump_create(const struct intel_gt *gt,
if (ret)
break;
}
- } else if (__i915_gem_object_is_lmem(vma->obj)) {
- struct intel_memory_region *mem = vma->obj->mm.region;
+ } else if (vsnap->mr && vsnap->mr->type != INTEL_MEMORY_SYSTEM) {
+ struct intel_memory_region *mem = vsnap->mr;
dma_addr_t dma;
- for_each_sgt_daddr(dma, iter, vma->pages) {
+ for_each_sgt_daddr(dma, iter, vsnap->pages) {
void __iomem *s;
s = io_mapping_map_wc(&mem->iomap,
@@ -1089,7 +1088,7 @@ i915_vma_coredump_create(const struct intel_gt *gt,
} else {
struct page *page;
- for_each_sgt_page(page, iter, vma->pages) {
+ for_each_sgt_page(page, iter, vsnap->pages) {
void *s;
drm_clflush_pages(&page, 1);
@@ -1106,8 +1105,13 @@ i915_vma_coredump_create(const struct intel_gt *gt,
}
if (ret || compress_flush(compress, dst)) {
- while (dst->page_count--)
- pool_free(&compress->pool, dst->pages[dst->page_count]);
+ struct page *page, *n;
+
+ list_for_each_entry_safe_reverse(page, n, &dst->page_list, lru) {
+ list_del_init(&page->lru);
+ pool_free(&compress->pool, page_address(page));
+ }
+
kfree(dst);
dst = NULL;
}
@@ -1320,38 +1324,72 @@ static bool record_context(struct i915_gem_context_coredump *e,
struct intel_engine_capture_vma {
struct intel_engine_capture_vma *next;
- struct i915_vma *vma;
+ struct i915_vma_snapshot *vsnap;
char name[16];
+ bool lockdep_cookie;
};
static struct intel_engine_capture_vma *
-capture_vma(struct intel_engine_capture_vma *next,
- struct i915_vma *vma,
- const char *name,
- gfp_t gfp)
+capture_vma_snapshot(struct intel_engine_capture_vma *next,
+ struct i915_vma_snapshot *vsnap,
+ gfp_t gfp)
{
struct intel_engine_capture_vma *c;
- if (!vma)
+ if (!i915_vma_snapshot_present(vsnap))
return next;
c = kmalloc(sizeof(*c), gfp);
if (!c)
return next;
- if (!i915_active_acquire_if_busy(&vma->active)) {
+ if (!i915_vma_snapshot_resource_pin(vsnap, &c->lockdep_cookie)) {
kfree(c);
return next;
}
- strcpy(c->name, name);
- c->vma = vma; /* reference held while active */
+ strcpy(c->name, vsnap->name);
+ c->vsnap = vsnap;
+ i915_vma_snapshot_get(vsnap);
c->next = next;
return c;
}
static struct intel_engine_capture_vma *
+capture_vma(struct intel_engine_capture_vma *next,
+ struct i915_vma *vma,
+ const char *name,
+ gfp_t gfp)
+{
+ struct i915_vma_snapshot *vsnap;
+
+ if (!vma)
+ return next;
+
+ /*
+ * If the vma isn't pinned, then the vma should be snapshotted
+ * to a struct i915_vma_snapshot at command submission time.
+ * Not here.
+ */
+ GEM_WARN_ON(!i915_vma_is_pinned(vma));
+ if (!i915_vma_is_pinned(vma))
+ return next;
+
+ vsnap = i915_vma_snapshot_alloc(gfp);
+ if (!vsnap)
+ return next;
+
+ i915_vma_snapshot_init(vsnap, vma, name);
+ next = capture_vma_snapshot(next, vsnap, gfp);
+
+ /* FIXME: Replace on async unbind. */
+ i915_vma_snapshot_put(vsnap);
+
+ return next;
+}
+
+static struct intel_engine_capture_vma *
capture_user(struct intel_engine_capture_vma *capture,
const struct i915_request *rq,
gfp_t gfp)
@@ -1359,7 +1397,7 @@ capture_user(struct intel_engine_capture_vma *capture,
struct i915_capture_list *c;
for (c = rq->capture_list; c; c = c->next)
- capture = capture_vma(capture, c->vma, "user", gfp);
+ capture = capture_vma_snapshot(capture, c->vma_snapshot, gfp);
return capture;
}
@@ -1373,6 +1411,33 @@ static void add_vma(struct intel_engine_coredump *ee,
}
}
+static struct i915_vma_coredump *
+create_vma_coredump(const struct intel_gt *gt, struct i915_vma *vma,
+ const char *name, struct i915_vma_compress *compress)
+{
+ struct i915_vma_coredump *ret;
+ struct i915_vma_snapshot tmp;
+
+ if (!vma)
+ return NULL;
+
+ GEM_WARN_ON(!i915_vma_is_pinned(vma));
+ i915_vma_snapshot_init_onstack(&tmp, vma, name);
+ ret = i915_vma_coredump_create(gt, &tmp, compress);
+ i915_vma_snapshot_put_onstack(&tmp);
+
+ return ret;
+}
+
+static void add_vma_coredump(struct intel_engine_coredump *ee,
+ const struct intel_gt *gt,
+ struct i915_vma *vma,
+ const char *name,
+ struct i915_vma_compress *compress)
+{
+ add_vma(ee, create_vma_coredump(gt, vma, name, compress));
+}
+
struct intel_engine_coredump *
intel_engine_coredump_alloc(struct intel_engine_cs *engine, gfp_t gfp)
{
@@ -1406,7 +1471,7 @@ intel_engine_coredump_add_request(struct intel_engine_coredump *ee,
* as the simplest method to avoid being overwritten
* by userspace.
*/
- vma = capture_vma(vma, rq->batch, "batch", gfp);
+ vma = capture_vma_snapshot(vma, &rq->batch_snapshot, gfp);
vma = capture_user(vma, rq, gfp);
vma = capture_vma(vma, rq->ring->vma, "ring", gfp);
vma = capture_vma(vma, rq->context->state, "HW context", gfp);
@@ -1427,30 +1492,24 @@ intel_engine_coredump_add_vma(struct intel_engine_coredump *ee,
while (capture) {
struct intel_engine_capture_vma *this = capture;
- struct i915_vma *vma = this->vma;
+ struct i915_vma_snapshot *vsnap = this->vsnap;
add_vma(ee,
i915_vma_coredump_create(engine->gt,
- vma, this->name,
- compress));
+ vsnap, compress));
- i915_active_release(&vma->active);
+ i915_vma_snapshot_resource_unpin(vsnap, this->lockdep_cookie);
+ i915_vma_snapshot_put(vsnap);
capture = this->next;
kfree(this);
}
- add_vma(ee,
- i915_vma_coredump_create(engine->gt,
- engine->status_page.vma,
- "HW Status",
- compress));
+ add_vma_coredump(ee, engine->gt, engine->status_page.vma,
+ "HW Status", compress);
- add_vma(ee,
- i915_vma_coredump_create(engine->gt,
- engine->wa_ctx.vma,
- "WA context",
- compress));
+ add_vma_coredump(ee, engine->gt, engine->wa_ctx.vma,
+ "WA context", compress);
}
static struct intel_engine_coredump *
@@ -1486,17 +1545,25 @@ capture_engine(struct intel_engine_cs *engine,
}
}
if (rq)
- capture = intel_engine_coredump_add_request(ee, rq,
- ATOMIC_MAYFAIL);
+ rq = i915_request_get_rcu(rq);
+
+ if (!rq)
+ goto no_request_capture;
+
+ capture = intel_engine_coredump_add_request(ee, rq, ATOMIC_MAYFAIL);
if (!capture) {
-no_request_capture:
- kfree(ee);
- return NULL;
+ i915_request_put(rq);
+ goto no_request_capture;
}
intel_engine_coredump_add_vma(ee, capture, compress);
+ i915_request_put(rq);
return ee;
+
+no_request_capture:
+ kfree(ee);
+ return NULL;
}
static void
@@ -1550,10 +1617,8 @@ gt_record_uc(struct intel_gt_coredump *gt,
*/
error_uc->guc_fw.path = kstrdup(uc->guc.fw.path, ALLOW_FAIL);
error_uc->huc_fw.path = kstrdup(uc->huc.fw.path, ALLOW_FAIL);
- error_uc->guc_log =
- i915_vma_coredump_create(gt->_gt,
- uc->guc.log.vma, "GuC log buffer",
- compress);
+ error_uc->guc_log = create_vma_coredump(gt->_gt, uc->guc.log.vma,
+ "GuC log buffer", compress);
return error_uc;
}
@@ -1750,10 +1815,7 @@ static void capture_gen(struct i915_gpu_coredump *error)
error->wakelock = atomic_read(&i915->runtime_pm.wakeref_count);
error->suspended = i915->runtime_pm.suspended;
- error->iommu = -1;
-#ifdef CONFIG_INTEL_IOMMU
- error->iommu = intel_iommu_gfx_mapped;
-#endif
+ error->iommu = intel_vtd_active(i915);
error->reset_count = i915_reset_count(&i915->gpu_error);
error->suspend_count = i915->suspend_count;
@@ -1784,7 +1846,7 @@ i915_gpu_coredump_alloc(struct drm_i915_private *i915, gfp_t gfp)
error->time = ktime_get_real();
error->boottime = ktime_get_boottime();
- error->uptime = ktime_sub(ktime_get(), i915->gt.last_init_time);
+ error->uptime = ktime_sub(ktime_get(), to_gt(i915)->last_init_time);
error->capture = jiffies;
capture_gen(error);
@@ -1839,8 +1901,8 @@ void i915_vma_capture_finish(struct intel_gt_coredump *gt,
kfree(compress);
}
-struct i915_gpu_coredump *
-i915_gpu_coredump(struct intel_gt *gt, intel_engine_mask_t engine_mask)
+static struct i915_gpu_coredump *
+__i915_gpu_coredump(struct intel_gt *gt, intel_engine_mask_t engine_mask)
{
struct drm_i915_private *i915 = gt->i915;
struct i915_gpu_coredump *error;
@@ -1881,6 +1943,22 @@ i915_gpu_coredump(struct intel_gt *gt, intel_engine_mask_t engine_mask)
return error;
}
+struct i915_gpu_coredump *
+i915_gpu_coredump(struct intel_gt *gt, intel_engine_mask_t engine_mask)
+{
+ static DEFINE_MUTEX(capture_mutex);
+ int ret = mutex_lock_interruptible(&capture_mutex);
+ struct i915_gpu_coredump *dump;
+
+ if (ret)
+ return ERR_PTR(ret);
+
+ dump = __i915_gpu_coredump(gt, engine_mask);
+ mutex_unlock(&capture_mutex);
+
+ return dump;
+}
+
void i915_error_state_store(struct i915_gpu_coredump *error)
{
struct drm_i915_private *i915;
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.h b/drivers/gpu/drm/i915/i915_gpu_error.h
index b98d8cdbe4f2..5aedf5129814 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.h
+++ b/drivers/gpu/drm/i915/i915_gpu_error.h
@@ -39,10 +39,8 @@ struct i915_vma_coredump {
u64 gtt_size;
u32 gtt_page_sizes;
- int num_pages;
- int page_count;
int unused;
- u32 *pages[];
+ struct list_head page_list;
};
struct i915_request_coredump {
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 71171338f2df..21f75b069fa8 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -1040,7 +1040,7 @@ static void ivb_parity_work(struct work_struct *work)
{
struct drm_i915_private *dev_priv =
container_of(work, typeof(*dev_priv), l3_parity.error_work);
- struct intel_gt *gt = &dev_priv->gt;
+ struct intel_gt *gt = to_gt(dev_priv);
u32 error_status, row, bank, subbank;
char *parity_event[6];
u32 misccpctl;
@@ -1718,9 +1718,9 @@ static irqreturn_t valleyview_irq_handler(int irq, void *arg)
intel_uncore_write(&dev_priv->uncore, VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
if (gt_iir)
- gen6_gt_irq_handler(&dev_priv->gt, gt_iir);
+ gen6_gt_irq_handler(to_gt(dev_priv), gt_iir);
if (pm_iir)
- gen6_rps_irq_handler(&dev_priv->gt.rps, pm_iir);
+ gen6_rps_irq_handler(&to_gt(dev_priv)->rps, pm_iir);
if (hotplug_status)
i9xx_hpd_irq_handler(dev_priv, hotplug_status);
@@ -1777,7 +1777,7 @@ static irqreturn_t cherryview_irq_handler(int irq, void *arg)
ier = intel_uncore_read(&dev_priv->uncore, VLV_IER);
intel_uncore_write(&dev_priv->uncore, VLV_IER, 0);
- gen8_gt_irq_handler(&dev_priv->gt, master_ctl);
+ gen8_gt_irq_handler(to_gt(dev_priv), master_ctl);
if (iir & I915_DISPLAY_PORT_INTERRUPT)
hotplug_status = i9xx_hpd_irq_ack(dev_priv);
@@ -2108,7 +2108,7 @@ static void ilk_display_irq_handler(struct drm_i915_private *dev_priv,
}
if (DISPLAY_VER(dev_priv) == 5 && de_iir & DE_PCU_EVENT)
- gen5_rps_irq_handler(&dev_priv->gt.rps);
+ gen5_rps_irq_handler(&to_gt(dev_priv)->rps);
}
static void ivb_display_irq_handler(struct drm_i915_private *dev_priv,
@@ -2189,9 +2189,9 @@ static irqreturn_t ilk_irq_handler(int irq, void *arg)
if (gt_iir) {
raw_reg_write(regs, GTIIR, gt_iir);
if (GRAPHICS_VER(i915) >= 6)
- gen6_gt_irq_handler(&i915->gt, gt_iir);
+ gen6_gt_irq_handler(to_gt(i915), gt_iir);
else
- gen5_gt_irq_handler(&i915->gt, gt_iir);
+ gen5_gt_irq_handler(to_gt(i915), gt_iir);
ret = IRQ_HANDLED;
}
@@ -2209,7 +2209,7 @@ static irqreturn_t ilk_irq_handler(int irq, void *arg)
u32 pm_iir = raw_reg_read(regs, GEN6_PMIIR);
if (pm_iir) {
raw_reg_write(regs, GEN6_PMIIR, pm_iir);
- gen6_rps_irq_handler(&i915->gt.rps, pm_iir);
+ gen6_rps_irq_handler(&to_gt(i915)->rps, pm_iir);
ret = IRQ_HANDLED;
}
}
@@ -2635,7 +2635,7 @@ static irqreturn_t gen8_irq_handler(int irq, void *arg)
}
/* Find, queue (onto bottom-halves), then clear each source */
- gen8_gt_irq_handler(&dev_priv->gt, master_ctl);
+ gen8_gt_irq_handler(to_gt(dev_priv), master_ctl);
/* IRQs are synced during runtime_suspend, we don't require a wakeref */
if (master_ctl & ~GEN8_GT_IRQS) {
@@ -2715,7 +2715,7 @@ static irqreturn_t gen11_irq_handler(int irq, void *arg)
{
struct drm_i915_private *i915 = arg;
void __iomem * const regs = i915->uncore.regs;
- struct intel_gt *gt = &i915->gt;
+ struct intel_gt *gt = to_gt(i915);
u32 master_ctl;
u32 gu_misc_iir;
@@ -2771,8 +2771,8 @@ static inline void dg1_master_intr_enable(void __iomem * const regs)
static irqreturn_t dg1_irq_handler(int irq, void *arg)
{
struct drm_i915_private * const i915 = arg;
- struct intel_gt *gt = &i915->gt;
- void __iomem * const regs = i915->uncore.regs;
+ struct intel_gt *gt = to_gt(i915);
+ void __iomem * const regs = gt->uncore->regs;
u32 master_tile_ctl, master_ctl;
u32 gu_misc_iir;
@@ -3075,7 +3075,7 @@ static void ilk_irq_reset(struct drm_i915_private *dev_priv)
intel_uncore_write(uncore, EDP_PSR_IIR, 0xffffffff);
}
- gen5_gt_irq_reset(&dev_priv->gt);
+ gen5_gt_irq_reset(to_gt(dev_priv));
ibx_irq_reset(dev_priv);
}
@@ -3085,7 +3085,7 @@ static void valleyview_irq_reset(struct drm_i915_private *dev_priv)
intel_uncore_write(&dev_priv->uncore, VLV_MASTER_IER, 0);
intel_uncore_posting_read(&dev_priv->uncore, VLV_MASTER_IER);
- gen5_gt_irq_reset(&dev_priv->gt);
+ gen5_gt_irq_reset(to_gt(dev_priv));
spin_lock_irq(&dev_priv->irq_lock);
if (dev_priv->display_irqs_enabled)
@@ -3119,7 +3119,7 @@ static void gen8_irq_reset(struct drm_i915_private *dev_priv)
gen8_master_intr_disable(dev_priv->uncore.regs);
- gen8_gt_irq_reset(&dev_priv->gt);
+ gen8_gt_irq_reset(to_gt(dev_priv));
gen8_display_irq_reset(dev_priv);
GEN3_IRQ_RESET(uncore, GEN8_PCU_);
@@ -3173,11 +3173,12 @@ static void gen11_display_irq_reset(struct drm_i915_private *dev_priv)
static void gen11_irq_reset(struct drm_i915_private *dev_priv)
{
- struct intel_uncore *uncore = &dev_priv->uncore;
+ struct intel_gt *gt = to_gt(dev_priv);
+ struct intel_uncore *uncore = gt->uncore;
gen11_master_intr_disable(dev_priv->uncore.regs);
- gen11_gt_irq_reset(&dev_priv->gt);
+ gen11_gt_irq_reset(gt);
gen11_display_irq_reset(dev_priv);
GEN3_IRQ_RESET(uncore, GEN11_GU_MISC_);
@@ -3186,11 +3187,12 @@ static void gen11_irq_reset(struct drm_i915_private *dev_priv)
static void dg1_irq_reset(struct drm_i915_private *dev_priv)
{
- struct intel_uncore *uncore = &dev_priv->uncore;
+ struct intel_gt *gt = to_gt(dev_priv);
+ struct intel_uncore *uncore = gt->uncore;
dg1_master_intr_disable(dev_priv->uncore.regs);
- gen11_gt_irq_reset(&dev_priv->gt);
+ gen11_gt_irq_reset(gt);
gen11_display_irq_reset(dev_priv);
GEN3_IRQ_RESET(uncore, GEN11_GU_MISC_);
@@ -3250,7 +3252,7 @@ static void cherryview_irq_reset(struct drm_i915_private *dev_priv)
intel_uncore_write(&dev_priv->uncore, GEN8_MASTER_IRQ, 0);
intel_uncore_posting_read(&dev_priv->uncore, GEN8_MASTER_IRQ);
- gen8_gt_irq_reset(&dev_priv->gt);
+ gen8_gt_irq_reset(to_gt(dev_priv));
GEN3_IRQ_RESET(uncore, GEN8_PCU_);
@@ -3707,7 +3709,7 @@ static void ilk_irq_postinstall(struct drm_i915_private *dev_priv)
ibx_irq_postinstall(dev_priv);
- gen5_gt_irq_postinstall(&dev_priv->gt);
+ gen5_gt_irq_postinstall(to_gt(dev_priv));
GEN3_IRQ_INIT(uncore, DE, dev_priv->irq_mask,
display_mask | extra_mask);
@@ -3744,7 +3746,7 @@ void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv)
static void valleyview_irq_postinstall(struct drm_i915_private *dev_priv)
{
- gen5_gt_irq_postinstall(&dev_priv->gt);
+ gen5_gt_irq_postinstall(to_gt(dev_priv));
spin_lock_irq(&dev_priv->irq_lock);
if (dev_priv->display_irqs_enabled)
@@ -3850,7 +3852,7 @@ static void gen8_irq_postinstall(struct drm_i915_private *dev_priv)
else if (HAS_PCH_SPLIT(dev_priv))
ibx_irq_postinstall(dev_priv);
- gen8_gt_irq_postinstall(&dev_priv->gt);
+ gen8_gt_irq_postinstall(to_gt(dev_priv));
gen8_de_irq_postinstall(dev_priv);
gen8_master_intr_enable(dev_priv->uncore.regs);
@@ -3869,13 +3871,14 @@ static void gen11_de_irq_postinstall(struct drm_i915_private *dev_priv)
static void gen11_irq_postinstall(struct drm_i915_private *dev_priv)
{
- struct intel_uncore *uncore = &dev_priv->uncore;
+ struct intel_gt *gt = to_gt(dev_priv);
+ struct intel_uncore *uncore = gt->uncore;
u32 gu_misc_masked = GEN11_GU_MISC_GSE;
if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
icp_irq_postinstall(dev_priv);
- gen11_gt_irq_postinstall(&dev_priv->gt);
+ gen11_gt_irq_postinstall(gt);
gen11_de_irq_postinstall(dev_priv);
GEN3_IRQ_INIT(uncore, GEN11_GU_MISC_, ~gu_misc_masked, gu_misc_masked);
@@ -3886,10 +3889,11 @@ static void gen11_irq_postinstall(struct drm_i915_private *dev_priv)
static void dg1_irq_postinstall(struct drm_i915_private *dev_priv)
{
- struct intel_uncore *uncore = &dev_priv->uncore;
+ struct intel_gt *gt = to_gt(dev_priv);
+ struct intel_uncore *uncore = gt->uncore;
u32 gu_misc_masked = GEN11_GU_MISC_GSE;
- gen11_gt_irq_postinstall(&dev_priv->gt);
+ gen11_gt_irq_postinstall(gt);
GEN3_IRQ_INIT(uncore, GEN11_GU_MISC_, ~gu_misc_masked, gu_misc_masked);
@@ -3900,13 +3904,13 @@ static void dg1_irq_postinstall(struct drm_i915_private *dev_priv)
GEN11_DISPLAY_IRQ_ENABLE);
}
- dg1_master_intr_enable(dev_priv->uncore.regs);
- intel_uncore_posting_read(&dev_priv->uncore, DG1_MSTR_TILE_INTR);
+ dg1_master_intr_enable(uncore->regs);
+ intel_uncore_posting_read(uncore, DG1_MSTR_TILE_INTR);
}
static void cherryview_irq_postinstall(struct drm_i915_private *dev_priv)
{
- gen8_gt_irq_postinstall(&dev_priv->gt);
+ gen8_gt_irq_postinstall(to_gt(dev_priv));
spin_lock_irq(&dev_priv->irq_lock);
if (dev_priv->display_irqs_enabled)
@@ -4069,7 +4073,7 @@ static irqreturn_t i8xx_irq_handler(int irq, void *arg)
intel_uncore_write16(&dev_priv->uncore, GEN2_IIR, iir);
if (iir & I915_USER_INTERRUPT)
- intel_engine_cs_irq(dev_priv->gt.engine[RCS0], iir);
+ intel_engine_cs_irq(to_gt(dev_priv)->engine[RCS0], iir);
if (iir & I915_MASTER_ERROR_INTERRUPT)
i8xx_error_irq_handler(dev_priv, eir, eir_stuck);
@@ -4177,7 +4181,7 @@ static irqreturn_t i915_irq_handler(int irq, void *arg)
intel_uncore_write(&dev_priv->uncore, GEN2_IIR, iir);
if (iir & I915_USER_INTERRUPT)
- intel_engine_cs_irq(dev_priv->gt.engine[RCS0], iir);
+ intel_engine_cs_irq(to_gt(dev_priv)->engine[RCS0], iir);
if (iir & I915_MASTER_ERROR_INTERRUPT)
i9xx_error_irq_handler(dev_priv, eir, eir_stuck);
@@ -4322,11 +4326,11 @@ static irqreturn_t i965_irq_handler(int irq, void *arg)
intel_uncore_write(&dev_priv->uncore, GEN2_IIR, iir);
if (iir & I915_USER_INTERRUPT)
- intel_engine_cs_irq(dev_priv->gt.engine[RCS0],
+ intel_engine_cs_irq(to_gt(dev_priv)->engine[RCS0],
iir);
if (iir & I915_BSD_USER_INTERRUPT)
- intel_engine_cs_irq(dev_priv->gt.engine[VCS0],
+ intel_engine_cs_irq(to_gt(dev_priv)->engine[VCS0],
iir >> 25);
if (iir & I915_MASTER_ERROR_INTERRUPT)
@@ -4377,7 +4381,7 @@ void intel_irq_init(struct drm_i915_private *dev_priv)
/* pre-gen11 the guc irqs bits are in the upper 16 bits of the pm reg */
if (HAS_GT_UC(dev_priv) && GRAPHICS_VER(dev_priv) < 11)
- dev_priv->gt.pm_guc_events = GUC_INTR_GUC2HOST << 16;
+ to_gt(dev_priv)->pm_guc_events = GUC_INTR_GUC2HOST << 16;
if (!HAS_DISPLAY(dev_priv))
return;
diff --git a/drivers/gpu/drm/i915/i915_params.c b/drivers/gpu/drm/i915/i915_params.c
index e07f4cfea63a..525ae832aa9a 100644
--- a/drivers/gpu/drm/i915/i915_params.c
+++ b/drivers/gpu/drm/i915/i915_params.c
@@ -140,6 +140,9 @@ i915_param_named_unsafe(invert_brightness, int, 0400,
i915_param_named(disable_display, bool, 0400,
"Disable display (default: false)");
+i915_param_named(memtest, bool, 0400,
+ "Perform a read/write test of all device memory on module load (default: off)");
+
i915_param_named(mmio_debug, int, 0400,
"Enable the MMIO debug code for the first N failures (default: off). "
"This may negatively affect performance.");
diff --git a/drivers/gpu/drm/i915/i915_params.h b/drivers/gpu/drm/i915/i915_params.h
index 8d725b64592d..c9d53ff910a0 100644
--- a/drivers/gpu/drm/i915/i915_params.h
+++ b/drivers/gpu/drm/i915/i915_params.h
@@ -64,6 +64,7 @@ struct drm_printer;
param(char *, guc_firmware_path, NULL, 0400) \
param(char *, huc_firmware_path, NULL, 0400) \
param(char *, dmc_firmware_path, NULL, 0400) \
+ param(bool, memtest, false, 0400) \
param(int, mmio_debug, -IS_ENABLED(CONFIG_DRM_I915_DEBUG_MMIO), 0600) \
param(int, edp_vswing, 0, 0400) \
param(unsigned int, reset, 3, 0600) \
diff --git a/drivers/gpu/drm/i915/i915_pci.c b/drivers/gpu/drm/i915/i915_pci.c
index 960c358990bc..8261b6455747 100644
--- a/drivers/gpu/drm/i915/i915_pci.c
+++ b/drivers/gpu/drm/i915/i915_pci.c
@@ -31,8 +31,8 @@
#define PLATFORM(x) .platform = (x)
#define GEN(x) \
- .graphics_ver = (x), \
- .media_ver = (x), \
+ .graphics.ver = (x), \
+ .media.ver = (x), \
.display.ver = (x)
#define I845_PIPE_OFFSETS \
@@ -904,7 +904,7 @@ static const struct intel_device_info rkl_info = {
static const struct intel_device_info dg1_info = {
GEN12_FEATURES,
DGFX_FEATURES,
- .graphics_rel = 10,
+ .graphics.rel = 10,
PLATFORM(INTEL_DG1),
.display.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C) | BIT(PIPE_D),
.require_force_probe = 1,
@@ -998,8 +998,8 @@ static const struct intel_device_info adl_p_info = {
I915_GTT_PAGE_SIZE_2M
#define XE_HP_FEATURES \
- .graphics_ver = 12, \
- .graphics_rel = 50, \
+ .graphics.ver = 12, \
+ .graphics.rel = 50, \
XE_HP_PAGE_SIZES, \
.dma_mask_size = 46, \
.has_64bit_reloc = 1, \
@@ -1017,8 +1017,8 @@ static const struct intel_device_info adl_p_info = {
.ppgtt_type = INTEL_PPGTT_FULL
#define XE_HPM_FEATURES \
- .media_ver = 12, \
- .media_rel = 50
+ .media.ver = 12, \
+ .media.rel = 50
__maybe_unused
static const struct intel_device_info xehpsdv_info = {
@@ -1027,6 +1027,7 @@ static const struct intel_device_info xehpsdv_info = {
DGFX_FEATURES,
PLATFORM(INTEL_XEHPSDV),
.display = { },
+ .has_64k_pages = 1,
.platform_engine_mask =
BIT(RCS0) | BIT(BCS0) |
BIT(VECS0) | BIT(VECS1) | BIT(VECS2) | BIT(VECS3) |
@@ -1041,9 +1042,10 @@ static const struct intel_device_info dg2_info = {
XE_HPM_FEATURES,
XE_LPD_FEATURES,
DGFX_FEATURES,
- .graphics_rel = 55,
- .media_rel = 55,
+ .graphics.rel = 55,
+ .media.rel = 55,
PLATFORM(INTEL_DG2),
+ .has_64k_pages = 1,
.platform_engine_mask =
BIT(RCS0) | BIT(BCS0) |
BIT(VECS0) | BIT(VECS1) |
diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c
index 2f01b8c0284c..170bba913c30 100644
--- a/drivers/gpu/drm/i915/i915_perf.c
+++ b/drivers/gpu/drm/i915/i915_perf.c
@@ -4443,7 +4443,7 @@ void i915_perf_init(struct drm_i915_private *i915)
mutex_init(&perf->lock);
/* Choose a representative limit */
- oa_sample_rate_hard_limit = i915->gt.clock_frequency / 2;
+ oa_sample_rate_hard_limit = to_gt(i915)->clock_frequency / 2;
mutex_init(&perf->metrics_lock);
idr_init_base(&perf->metrics_idr, 1);
diff --git a/drivers/gpu/drm/i915/i915_pmu.c b/drivers/gpu/drm/i915/i915_pmu.c
index 0b488d49694c..ea655161793e 100644
--- a/drivers/gpu/drm/i915/i915_pmu.c
+++ b/drivers/gpu/drm/i915/i915_pmu.c
@@ -210,8 +210,8 @@ static void init_rc6(struct i915_pmu *pmu)
struct drm_i915_private *i915 = container_of(pmu, typeof(*i915), pmu);
intel_wakeref_t wakeref;
- with_intel_runtime_pm(i915->gt.uncore->rpm, wakeref) {
- pmu->sample[__I915_SAMPLE_RC6].cur = __get_rc6(&i915->gt);
+ with_intel_runtime_pm(to_gt(i915)->uncore->rpm, wakeref) {
+ pmu->sample[__I915_SAMPLE_RC6].cur = __get_rc6(to_gt(i915));
pmu->sample[__I915_SAMPLE_RC6_LAST_REPORTED].cur =
pmu->sample[__I915_SAMPLE_RC6].cur;
pmu->sleep_last = ktime_get_raw();
@@ -222,7 +222,7 @@ static void park_rc6(struct drm_i915_private *i915)
{
struct i915_pmu *pmu = &i915->pmu;
- pmu->sample[__I915_SAMPLE_RC6].cur = __get_rc6(&i915->gt);
+ pmu->sample[__I915_SAMPLE_RC6].cur = __get_rc6(to_gt(i915));
pmu->sleep_last = ktime_get_raw();
}
@@ -419,7 +419,7 @@ static enum hrtimer_restart i915_sample(struct hrtimer *hrtimer)
struct drm_i915_private *i915 =
container_of(hrtimer, struct drm_i915_private, pmu.timer);
struct i915_pmu *pmu = &i915->pmu;
- struct intel_gt *gt = &i915->gt;
+ struct intel_gt *gt = to_gt(i915);
unsigned int period_ns;
ktime_t now;
@@ -476,7 +476,7 @@ engine_event_status(struct intel_engine_cs *engine,
static int
config_status(struct drm_i915_private *i915, u64 config)
{
- struct intel_gt *gt = &i915->gt;
+ struct intel_gt *gt = to_gt(i915);
switch (config) {
case I915_PMU_ACTUAL_FREQUENCY:
@@ -601,10 +601,10 @@ static u64 __i915_pmu_event_read(struct perf_event *event)
val = READ_ONCE(pmu->irq_count);
break;
case I915_PMU_RC6_RESIDENCY:
- val = get_rc6(&i915->gt);
+ val = get_rc6(to_gt(i915));
break;
case I915_PMU_SOFTWARE_GT_AWAKE_TIME:
- val = ktime_to_ns(intel_gt_get_awake_time(&i915->gt));
+ val = ktime_to_ns(intel_gt_get_awake_time(to_gt(i915)));
break;
}
}
diff --git a/drivers/gpu/drm/i915/i915_query.c b/drivers/gpu/drm/i915/i915_query.c
index 51b368be0fc4..2dfbc22857a3 100644
--- a/drivers/gpu/drm/i915/i915_query.c
+++ b/drivers/gpu/drm/i915/i915_query.c
@@ -31,7 +31,7 @@ static int copy_query_item(void *query_hdr, size_t query_sz,
static int query_topology_info(struct drm_i915_private *dev_priv,
struct drm_i915_query_item *query_item)
{
- const struct sseu_dev_info *sseu = &dev_priv->gt.info.sseu;
+ const struct sseu_dev_info *sseu = &to_gt(dev_priv)->info.sseu;
struct drm_i915_query_topology_info topo;
u32 slice_length, subslice_length, eu_length, total_length;
int ret;
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 5b502c8f0cfb..e20e832162b4 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -501,6 +501,18 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define ECOBITS_PPGTT_CACHE64B (3 << 8)
#define ECOBITS_PPGTT_CACHE4B (0 << 8)
+#define GEN12_GAMCNTRL_CTRL _MMIO(0xcf54)
+#define INVALIDATION_BROADCAST_MODE_DIS REG_BIT(12)
+#define GLOBAL_INVALIDATION_MODE REG_BIT(2)
+
+#define GEN12_GAMSTLB_CTRL _MMIO(0xcf4c)
+#define CONTROL_BLOCK_CLKGATE_DIS REG_BIT(12)
+#define EGRESS_BLOCK_CLKGATE_DIS REG_BIT(11)
+#define TAG_BLOCK_CLKGATE_DIS REG_BIT(7)
+
+#define GEN12_MERT_MOD_CTRL _MMIO(0xcf28)
+#define FORCE_MISS_FTLB REG_BIT(3)
+
#define GAB_CTL _MMIO(0x24000)
#define GAB_CTL_CONT_AFTER_PAGEFAULT (1 << 8)
@@ -722,6 +734,9 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define GEN12_OA_TLB_INV_CR _MMIO(0xceec)
+#define GEN12_SQCM _MMIO(0x8724)
+#define EN_32B_ACCESS REG_BIT(30)
+
/* Gen12 OAR unit */
#define GEN12_OAR_OACONTROL _MMIO(0x2960)
#define GEN12_OAR_OACONTROL_COUNTER_FORMAT_SHIFT 1
@@ -773,6 +788,9 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define EU_PERF_CNTL5 _MMIO(0xe55c)
#define EU_PERF_CNTL6 _MMIO(0xe65c)
+#define RT_CTRL _MMIO(0xe530)
+#define DIS_NULL_QUERY REG_BIT(10)
+
/*
* OA Boolean state
*/
@@ -2666,6 +2684,8 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define RING_WAIT (1 << 11) /* gen3+, PRBx_CTL */
#define RING_WAIT_SEMAPHORE (1 << 10) /* gen6+ */
+#define GUCPMTIMESTAMP _MMIO(0xC3E8)
+
/* There are 16 64-bit CS General Purpose Registers per-engine on Gen8+ */
#define GEN8_RING_CS_GPR(base, n) _MMIO((base) + 0x600 + (n) * 8)
#define GEN8_RING_CS_GPR_UDW(base, n) _MMIO((base) + 0x600 + (n) * 8 + 4)
@@ -2776,6 +2796,9 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define VDBOX_CGCTL3F10(base) _MMIO((base) + 0x3f10)
#define IECPUNIT_CLKGATE_DIS REG_BIT(22)
+#define VDBOX_CGCTL3F18(base) _MMIO((base) + 0x3f18)
+#define ALNUNIT_CLKGATE_DIS REG_BIT(13)
+
#define ERROR_GEN6 _MMIO(0x40a0)
#define GEN7_ERR_INT _MMIO(0x44040)
#define ERR_INT_POISON (1 << 31)
@@ -2874,6 +2897,15 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define GEN9_PREEMPT_GPGPU_SYNC_SWITCH_DISABLE (1 << 2)
#define GEN11_ENABLE_32_PLANE_MODE (1 << 7)
+#define SCCGCTL94DC _MMIO(0x94dc)
+#define CG3DDISURB REG_BIT(14)
+
+#define MLTICTXCTL _MMIO(0xb170)
+#define TDONRENDER REG_BIT(2)
+
+#define L3SQCREG1_CCS0 _MMIO(0xb200)
+#define FLUSHALLNONCOH REG_BIT(5)
+
/* WaClearTdlStateAckDirtyBits */
#define GEN8_STATE_ACK _MMIO(0x20F0)
#define GEN9_STATE_ACK_SLICE1 _MMIO(0x20F8)
@@ -3110,7 +3142,8 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define GEN9_RCS_FE_FSM2 _MMIO(0x22a4)
#define GEN10_CACHE_MODE_SS _MMIO(0xe420)
-#define FLOAT_BLEND_OPTIMIZATION_ENABLE (1 << 4)
+#define ENABLE_PREFETCH_INTO_IC REG_BIT(3)
+#define FLOAT_BLEND_OPTIMIZATION_ENABLE REG_BIT(4)
/* Fuse readout registers for GT */
#define HSW_PAVP_FUSE1 _MMIO(0x911C)
@@ -4289,21 +4322,62 @@ enum {
/*
* GEN10 clock gating regs
*/
+
+#define UNSLCGCTL9440 _MMIO(0x9440)
+#define GAMTLBOACS_CLKGATE_DIS REG_BIT(28)
+#define GAMTLBVDBOX5_CLKGATE_DIS REG_BIT(27)
+#define GAMTLBVDBOX6_CLKGATE_DIS REG_BIT(26)
+#define GAMTLBVDBOX3_CLKGATE_DIS REG_BIT(24)
+#define GAMTLBVDBOX4_CLKGATE_DIS REG_BIT(23)
+#define GAMTLBVDBOX7_CLKGATE_DIS REG_BIT(22)
+#define GAMTLBVDBOX2_CLKGATE_DIS REG_BIT(21)
+#define GAMTLBVDBOX0_CLKGATE_DIS REG_BIT(17)
+#define GAMTLBKCR_CLKGATE_DIS REG_BIT(16)
+#define GAMTLBGUC_CLKGATE_DIS REG_BIT(15)
+#define GAMTLBBLT_CLKGATE_DIS REG_BIT(14)
+#define GAMTLBVDBOX1_CLKGATE_DIS REG_BIT(6)
+
+#define UNSLCGCTL9444 _MMIO(0x9444)
+#define GAMTLBGFXA0_CLKGATE_DIS REG_BIT(30)
+#define GAMTLBGFXA1_CLKGATE_DIS REG_BIT(29)
+#define GAMTLBCOMPA0_CLKGATE_DIS REG_BIT(28)
+#define GAMTLBCOMPA1_CLKGATE_DIS REG_BIT(27)
+#define GAMTLBCOMPB0_CLKGATE_DIS REG_BIT(26)
+#define GAMTLBCOMPB1_CLKGATE_DIS REG_BIT(25)
+#define GAMTLBCOMPC0_CLKGATE_DIS REG_BIT(24)
+#define GAMTLBCOMPC1_CLKGATE_DIS REG_BIT(23)
+#define GAMTLBCOMPD0_CLKGATE_DIS REG_BIT(22)
+#define GAMTLBCOMPD1_CLKGATE_DIS REG_BIT(21)
+#define GAMTLBMERT_CLKGATE_DIS REG_BIT(20)
+#define GAMTLBVEBOX3_CLKGATE_DIS REG_BIT(19)
+#define GAMTLBVEBOX2_CLKGATE_DIS REG_BIT(18)
+#define GAMTLBVEBOX1_CLKGATE_DIS REG_BIT(17)
+#define GAMTLBVEBOX0_CLKGATE_DIS REG_BIT(16)
+#define LTCDD_CLKGATE_DIS REG_BIT(10)
+
#define SLICE_UNIT_LEVEL_CLKGATE _MMIO(0x94d4)
#define SARBUNIT_CLKGATE_DIS (1 << 5)
#define RCCUNIT_CLKGATE_DIS (1 << 7)
#define MSCUNIT_CLKGATE_DIS (1 << 10)
+#define NODEDSS_CLKGATE_DIS REG_BIT(12)
#define L3_CLKGATE_DIS REG_BIT(16)
#define L3_CR2X_CLKGATE_DIS REG_BIT(17)
#define SUBSLICE_UNIT_LEVEL_CLKGATE _MMIO(0x9524)
-#define GWUNIT_CLKGATE_DIS (1 << 16)
+#define DSS_ROUTER_CLKGATE_DIS REG_BIT(28)
+#define GWUNIT_CLKGATE_DIS REG_BIT(16)
#define SUBSLICE_UNIT_LEVEL_CLKGATE2 _MMIO(0x9528)
#define CPSSUNIT_CLKGATE_DIS REG_BIT(9)
+#define SSMCGCTL9530 _MMIO(0x9530)
+#define RTFUNIT_CLKGATE_DIS REG_BIT(18)
+
#define UNSLICE_UNIT_LEVEL_CLKGATE _MMIO(0x9434)
#define VFUNIT_CLKGATE_DIS REG_BIT(20)
+#define TSGUNIT_CLKGATE_DIS REG_BIT(17) /* XEHPSDV */
+#define CG3DDISCFEG_CLKGATE_DIS REG_BIT(17) /* DG2 */
+#define GAMEDIA_CLKGATE_DIS REG_BIT(11)
#define HSUNIT_CLKGATE_DIS REG_BIT(8)
#define VSUNIT_CLKGATE_DIS REG_BIT(3)
@@ -8368,6 +8442,9 @@ enum {
#define GEN9_CTX_PREEMPT_REG _MMIO(0x2248)
#define GEN12_DISABLE_POSH_BUSY_FF_DOP_CG REG_BIT(11)
+#define GEN12_CS_DEBUG_MODE1_CCCSUNIT_BE_COMMON _MMIO(0x20EC)
+#define GEN12_REPLAY_MODE_GRANULARITY REG_BIT(0)
+
#define GEN8_CS_CHICKEN1 _MMIO(0x2580)
#define GEN9_PREEMPT_3D_OBJECT_LEVEL (1 << 0)
#define GEN9_PREEMPT_GPGPU_LEVEL(hi, lo) (((hi) << 2) | ((lo) << 1))
@@ -8391,9 +8468,10 @@ enum {
#define GEN8_ERRDETBCTRL (1 << 9)
#define GEN11_COMMON_SLICE_CHICKEN3 _MMIO(0x7304)
- #define DG1_FLOAT_POINT_BLEND_OPT_STRICT_MODE_EN REG_BIT(12)
- #define GEN11_BLEND_EMB_FIX_DISABLE_IN_RCC REG_BIT(11)
- #define GEN12_DISABLE_CPS_AWARE_COLOR_PIPE REG_BIT(9)
+#define DG1_FLOAT_POINT_BLEND_OPT_STRICT_MODE_EN REG_BIT(12)
+#define XEHP_DUAL_SIMD8_SEQ_MERGE_DISABLE REG_BIT(12)
+#define GEN11_BLEND_EMB_FIX_DISABLE_IN_RCC REG_BIT(11)
+#define GEN12_DISABLE_CPS_AWARE_COLOR_PIPE REG_BIT(9)
#define HIZ_CHICKEN _MMIO(0x7018)
# define CHV_HZ_8X8_MODE_IN_1X REG_BIT(15)
@@ -8447,6 +8525,12 @@ enum {
#define GEN8_LQSC_FLUSH_COHERENT_LINES (1 << 21)
#define GEN8_LQSQ_NONIA_COHERENT_ATOMICS_ENABLE REG_BIT(22)
+#define GEN11_L3SQCREG5 _MMIO(0xb158)
+#define L3_PWM_TIMER_INIT_VAL_MASK REG_GENMASK(9, 0)
+
+#define XEHP_L3SCQREG7 _MMIO(0xb188)
+#define BLEND_FILL_CACHING_OPT_DIS REG_BIT(3)
+
/* GEN8 chicken */
#define HDC_CHICKEN0 _MMIO(0x7300)
#define ICL_HDC_MODE _MMIO(0xE5F4)
@@ -8457,6 +8541,12 @@ enum {
#define HDC_FORCE_NON_COHERENT (1 << 4)
#define HDC_BARRIER_PERFORMANCE_DISABLE (1 << 10)
+#define GEN12_HDC_CHICKEN0 _MMIO(0xE5F0)
+#define LSC_L1_FLUSH_CTL_3D_DATAPORT_FLUSH_EVENTS_MASK REG_GENMASK(13, 11)
+
+#define SARB_CHICKEN1 _MMIO(0xe90c)
+#define COMP_CKN_IN REG_GENMASK(30, 29)
+
#define GEN8_HDC_CHICKEN1 _MMIO(0x7304)
/* GEN9 chicken */
@@ -8488,6 +8578,10 @@ enum {
#define DG2_RENDER_CCSTAG_4_3_EN REG_BIT(12)
#define PER_PIXEL_ALPHA_BYPASS_EN REG_BIT(7)
+#define VFLSKPD _MMIO(0x62a8)
+#define DIS_OVER_FETCH_CACHE REG_BIT(1)
+#define DIS_MULT_MISS_RD_SQUASH REG_BIT(0)
+
#define FF_MODE2 _MMIO(0x6604)
#define FF_MODE2_GS_TIMER_MASK REG_GENMASK(31, 24)
#define FF_MODE2_GS_TIMER_224 REG_FIELD_PREP(FF_MODE2_GS_TIMER_MASK, 224)
@@ -9311,6 +9405,9 @@ enum {
#define GEN8_SDEUNIT_CLOCK_GATE_DISABLE (1 << 14)
#define GEN8_HDCUNIT_CLOCK_GATE_DISABLE_HDCREQ (1 << 28)
+#define UNSLCGCTL9430 _MMIO(0x9430)
+#define MSQDUNIT_CLKGATE_DIS REG_BIT(3)
+
#define GEN6_GFXPAUSE _MMIO(0xA000)
#define GEN6_RPNSWREQ _MMIO(0xA008)
#define GEN6_TURBO_DISABLE (1 << 31)
@@ -9320,6 +9417,7 @@ enum {
#define GEN6_OFFSET(x) ((x) << 19)
#define GEN6_AGGRESSIVE_TURBO (0 << 15)
#define GEN9_SW_REQ_UNSLICE_RATIO_SHIFT 23
+#define GEN9_IGNORE_SLICE_RATIO (0 << 0)
#define GEN6_RC_VIDEO_FREQ _MMIO(0xA00C)
#define GEN6_RC_CONTROL _MMIO(0xA090)
@@ -9355,6 +9453,9 @@ enum {
#define GEN6_RP_UP_BUSY_CONT (0x4 << 3)
#define GEN6_RP_DOWN_IDLE_AVG (0x2 << 0)
#define GEN6_RP_DOWN_IDLE_CONT (0x1 << 0)
+#define GEN6_RPSWCTL_SHIFT 9
+#define GEN9_RPSWCTL_ENABLE (0x2 << GEN6_RPSWCTL_SHIFT)
+#define GEN9_RPSWCTL_DISABLE (0x0 << GEN6_RPSWCTL_SHIFT)
#define GEN6_RP_UP_THRESHOLD _MMIO(0xA02C)
#define GEN6_RP_DOWN_THRESHOLD _MMIO(0xA030)
#define GEN6_RP_CUR_UP_EI _MMIO(0xA050)
@@ -9626,24 +9727,39 @@ enum {
#define GEN9_CCS_TLB_PREFETCH_ENABLE (1 << 3)
#define GEN8_ROW_CHICKEN _MMIO(0xe4f0)
-#define FLOW_CONTROL_ENABLE (1 << 15)
-#define PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE (1 << 8)
-#define STALL_DOP_GATING_DISABLE (1 << 5)
-#define THROTTLE_12_5 (7 << 2)
-#define DISABLE_EARLY_EOT (1 << 1)
+#define FLOW_CONTROL_ENABLE REG_BIT(15)
+#define UGM_BACKUP_MODE REG_BIT(13)
+#define MDQ_ARBITRATION_MODE REG_BIT(12)
+#define PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE REG_BIT(8)
+#define STALL_DOP_GATING_DISABLE REG_BIT(5)
+#define THROTTLE_12_5 REG_GENMASK(4, 2)
+#define DISABLE_EARLY_EOT REG_BIT(1)
#define GEN7_ROW_CHICKEN2 _MMIO(0xe4f4)
+#define GEN12_DISABLE_READ_SUPPRESSION REG_BIT(15)
#define GEN12_DISABLE_EARLY_READ REG_BIT(14)
+#define GEN12_ENABLE_LARGE_GRF_MODE REG_BIT(12)
#define GEN12_PUSH_CONST_DEREF_HOLD_DIS REG_BIT(8)
+#define LSC_CHICKEN_BIT_0 _MMIO(0xe7c8)
+#define FORCE_1_SUB_MESSAGE_PER_FRAGMENT REG_BIT(15)
+#define LSC_CHICKEN_BIT_0_UDW _MMIO(0xe7c8 + 4)
+#define DIS_CHAIN_2XSIMD8 REG_BIT(55 - 32)
+#define FORCE_SLM_FENCE_SCOPE_TO_TILE REG_BIT(42 - 32)
+#define FORCE_UGM_FENCE_SCOPE_TO_TILE REG_BIT(41 - 32)
+#define MAXREQS_PER_BANK REG_GENMASK(39 - 32, 37 - 32)
+#define DISABLE_128B_EVICTION_COMMAND_UDW REG_BIT(36 - 32)
+
#define GEN7_ROW_CHICKEN2_GT2 _MMIO(0xf4f4)
#define DOP_CLOCK_GATING_DISABLE (1 << 0)
#define PUSH_CONSTANT_DEREF_DISABLE (1 << 8)
#define GEN11_TDL_CLOCK_GATING_FIX_DISABLE (1 << 1)
-#define GEN9_ROW_CHICKEN4 _MMIO(0xe48c)
-#define GEN12_DISABLE_TDL_PUSH REG_BIT(9)
-#define GEN11_DIS_PICK_2ND_EU REG_BIT(7)
+#define GEN9_ROW_CHICKEN4 _MMIO(0xe48c)
+#define GEN12_DISABLE_GRF_CLEAR REG_BIT(13)
+#define GEN12_DISABLE_TDL_PUSH REG_BIT(9)
+#define GEN11_DIS_PICK_2ND_EU REG_BIT(7)
+#define GEN12_DISABLE_HDR_PAST_PAYLOAD_HOLD_FIX REG_BIT(4)
#define HSW_ROW_CHICKEN3 _MMIO(0xe49c)
#define HSW_ROW_CHICKEN3_L3_GLOBAL_ATOMICS_DISABLE (1 << 6)
@@ -9658,9 +9774,10 @@ enum {
#define GEN8_SAMPLER_POWER_BYPASS_DIS (1 << 1)
#define GEN9_HALF_SLICE_CHICKEN7 _MMIO(0xe194)
-#define GEN9_SAMPLER_HASH_COMPRESSED_READ_ADDR (1 << 8)
-#define GEN9_ENABLE_YV12_BUGFIX (1 << 4)
-#define GEN9_ENABLE_GPGPU_PREEMPTION (1 << 2)
+#define DG2_DISABLE_ROUND_ENABLE_ALLOW_FOR_SSLA REG_BIT(15)
+#define GEN9_SAMPLER_HASH_COMPRESSED_READ_ADDR REG_BIT(8)
+#define GEN9_ENABLE_YV12_BUGFIX REG_BIT(4)
+#define GEN9_ENABLE_GPGPU_PREEMPTION REG_BIT(2)
/* Audio */
#define G4X_AUD_VID_DID _MMIO(DISPLAY_MMIO_BASE(dev_priv) + 0x62020)
@@ -12494,11 +12611,19 @@ enum skl_power_gate {
#define PMFLUSH_GAPL3UNBLOCK (1 << 21)
#define PMFLUSHDONE_LNEBLK (1 << 22)
+#define XEHP_L3NODEARBCFG _MMIO(0xb0b4)
+#define XEHP_LNESPARE REG_BIT(19)
+
#define GEN12_GLOBAL_MOCS(i) _MMIO(0x4000 + (i) * 4) /* Global MOCS regs */
#define GEN12_GSMBASE _MMIO(0x108100)
#define GEN12_DSMBASE _MMIO(0x1080C0)
+#define XEHP_CLOCK_GATE_DIS _MMIO(0x101014)
+#define SGSI_SIDECLK_DIS REG_BIT(17)
+#define SGGI_DIS REG_BIT(15)
+#define SGR_DIS REG_BIT(13)
+
/* gamt regs */
#define GEN8_L3_LRA_1_GPGPU _MMIO(0x4dd4)
#define GEN8_L3_LRA_1_GPGPU_DEFAULT_VALUE_BDW 0x67F1427F /* max/min for LRA1/2 */
@@ -12883,4 +13008,7 @@ enum skl_power_gate {
#define CLKGATE_DIS_MISC _MMIO(0x46534)
#define CLKGATE_DIS_MISC_DMASC_GATING_DIS REG_BIT(21)
+#define SLICE_COMMON_ECO_CHICKEN1 _MMIO(0x731C)
+#define MSC_MSAA_REODER_BUF_BYPASS_DISABLE REG_BIT(14)
+
#endif /* _I915_REG_H_ */
diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c
index 42cd17357771..76cf5ac91e94 100644
--- a/drivers/gpu/drm/i915/i915_request.c
+++ b/drivers/gpu/drm/i915/i915_request.c
@@ -29,6 +29,7 @@
#include <linux/sched.h>
#include <linux/sched/clock.h>
#include <linux/sched/signal.h>
+#include <linux/sched/mm.h>
#include "gem/i915_gem_context.h"
#include "gt/intel_breadcrumbs.h"
@@ -41,6 +42,7 @@
#include "gt/intel_rps.h"
#include "i915_active.h"
+#include "i915_deps.h"
#include "i915_drv.h"
#include "i915_trace.h"
#include "intel_pm.h"
@@ -113,6 +115,10 @@ static void i915_fence_release(struct dma_fence *fence)
GEM_BUG_ON(rq->guc_prio != GUC_PRIO_INIT &&
rq->guc_prio != GUC_PRIO_FINI);
+ i915_request_free_capture_list(fetch_and_zero(&rq->capture_list));
+ if (i915_vma_snapshot_present(&rq->batch_snapshot))
+ i915_vma_snapshot_put_onstack(&rq->batch_snapshot);
+
/*
* The request is put onto a RCU freelist (i.e. the address
* is immediately reused), mark the fences as being freed now.
@@ -186,19 +192,6 @@ void i915_request_notify_execute_cb_imm(struct i915_request *rq)
__notify_execute_cb(rq, irq_work_imm);
}
-static void free_capture_list(struct i915_request *request)
-{
- struct i915_capture_list *capture;
-
- capture = fetch_and_zero(&request->capture_list);
- while (capture) {
- struct i915_capture_list *next = capture->next;
-
- kfree(capture);
- capture = next;
- }
-}
-
static void __i915_request_fill(struct i915_request *rq, u8 val)
{
void *vaddr = rq->ring->vaddr;
@@ -303,6 +296,38 @@ static void __rq_cancel_watchdog(struct i915_request *rq)
i915_request_put(rq);
}
+#if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
+
+/**
+ * i915_request_free_capture_list - Free a capture list
+ * @capture: Pointer to the first list item or NULL
+ *
+ */
+void i915_request_free_capture_list(struct i915_capture_list *capture)
+{
+ while (capture) {
+ struct i915_capture_list *next = capture->next;
+
+ i915_vma_snapshot_put(capture->vma_snapshot);
+ kfree(capture);
+ capture = next;
+ }
+}
+
+#define assert_capture_list_is_null(_rq) GEM_BUG_ON((_rq)->capture_list)
+
+#define clear_capture_list(_rq) ((_rq)->capture_list = NULL)
+
+#else
+
+#define i915_request_free_capture_list(_a) do {} while (0)
+
+#define assert_capture_list_is_null(_a) do {} while (0)
+
+#define clear_capture_list(_rq) do {} while (0)
+
+#endif
+
bool i915_request_retire(struct i915_request *rq)
{
if (!__i915_request_is_complete(rq))
@@ -339,7 +364,7 @@ bool i915_request_retire(struct i915_request *rq)
}
if (test_and_set_bit(I915_FENCE_FLAG_BOOST, &rq->fence.flags))
- atomic_dec(&rq->engine->gt->rps.num_waiters);
+ intel_rps_dec_waiters(&rq->engine->gt->rps);
/*
* We only loosely track inflight requests across preemption,
@@ -359,7 +384,6 @@ bool i915_request_retire(struct i915_request *rq)
intel_context_exit(rq->context);
intel_context_unpin(rq->context);
- free_capture_list(rq);
i915_sched_node_fini(&rq->sched);
i915_request_put(rq);
@@ -719,7 +743,7 @@ void i915_request_cancel(struct i915_request *rq, int error)
intel_context_cancel_request(rq->context, rq);
}
-static int __i915_sw_fence_call
+static int
submit_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state)
{
struct i915_request *request =
@@ -755,7 +779,7 @@ submit_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state)
return NOTIFY_DONE;
}
-static int __i915_sw_fence_call
+static int
semaphore_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state)
{
struct i915_request *rq = container_of(fence, typeof(*rq), semaphore);
@@ -829,11 +853,18 @@ static void __i915_request_ctor(void *arg)
i915_sw_fence_init(&rq->submit, submit_notify);
i915_sw_fence_init(&rq->semaphore, semaphore_notify);
- rq->capture_list = NULL;
+ clear_capture_list(rq);
+ rq->batch_snapshot.present = false;
init_llist_head(&rq->execute_cb);
}
+#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
+#define clear_batch_ptr(_rq) ((_rq)->batch = NULL)
+#else
+#define clear_batch_ptr(_a) do {} while (0)
+#endif
+
struct i915_request *
__i915_request_create(struct intel_context *ce, gfp_t gfp)
{
@@ -925,10 +956,11 @@ __i915_request_create(struct intel_context *ce, gfp_t gfp)
i915_sched_node_reinit(&rq->sched);
/* No zalloc, everything must be cleared after use */
- rq->batch = NULL;
+ clear_batch_ptr(rq);
__rq_init_watchdog(rq);
- GEM_BUG_ON(rq->capture_list);
+ assert_capture_list_is_null(rq);
GEM_BUG_ON(!llist_empty(&rq->execute_cb));
+ GEM_BUG_ON(i915_vma_snapshot_present(&rq->batch_snapshot));
/*
* Reserve space in the ring buffer for all the commands required to
@@ -1513,6 +1545,27 @@ i915_request_await_dma_fence(struct i915_request *rq, struct dma_fence *fence)
}
/**
+ * i915_request_await_deps - set this request to (async) wait upon a struct
+ * i915_deps dma_fence collection
+ * @rq: request we are wishing to use
+ * @deps: The struct i915_deps containing the dependencies.
+ *
+ * Returns 0 if successful, negative error code on error.
+ */
+int i915_request_await_deps(struct i915_request *rq, const struct i915_deps *deps)
+{
+ int i, err;
+
+ for (i = 0; i < deps->num_deps; ++i) {
+ err = i915_request_await_dma_fence(rq, deps->fences[i]);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+/**
* i915_request_await_object - set this request to (async) wait upon a bo
* @to: request we are wishing to use
* @obj: object which may be in use on another ring.
diff --git a/drivers/gpu/drm/i915/i915_request.h b/drivers/gpu/drm/i915/i915_request.h
index 3c6e8acd1457..170ee78c2858 100644
--- a/drivers/gpu/drm/i915/i915_request.h
+++ b/drivers/gpu/drm/i915/i915_request.h
@@ -40,19 +40,27 @@
#include "i915_scheduler.h"
#include "i915_selftest.h"
#include "i915_sw_fence.h"
+#include "i915_vma_snapshot.h"
#include <uapi/drm/i915_drm.h>
struct drm_file;
struct drm_i915_gem_object;
struct drm_printer;
+struct i915_deps;
struct i915_request;
+#if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
struct i915_capture_list {
+ struct i915_vma_snapshot *vma_snapshot;
struct i915_capture_list *next;
- struct i915_vma *vma;
};
+void i915_request_free_capture_list(struct i915_capture_list *capture);
+#else
+#define i915_request_free_capture_list(_a) do {} while (0)
+#endif
+
#define RQ_TRACE(rq, fmt, ...) do { \
const struct i915_request *rq__ = (rq); \
ENGINE_TRACE(rq__->engine, "fence %llx:%lld, current %d " fmt, \
@@ -289,10 +297,12 @@ struct i915_request {
/** Preallocate space in the ring for the emitting the request */
u32 reserved_space;
- /** Batch buffer related to this request if any (used for
- * error state dump only).
- */
- struct i915_vma *batch;
+ /** Batch buffer pointer for selftest internal use. */
+ I915_SELFTEST_DECLARE(struct i915_vma *batch);
+
+ struct i915_vma_snapshot batch_snapshot;
+
+#if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
/**
* Additional buffers requested by userspace to be captured upon
* a GPU hang. The vma/obj on this list are protected by their
@@ -300,6 +310,7 @@ struct i915_request {
* on the active_list (of their final request).
*/
struct i915_capture_list *capture_list;
+#endif
/** Time at which this request was emitted, in jiffies. */
unsigned long emitted_jiffies;
@@ -401,6 +412,7 @@ int i915_request_await_object(struct i915_request *to,
bool write);
int i915_request_await_dma_fence(struct i915_request *rq,
struct dma_fence *fence);
+int i915_request_await_deps(struct i915_request *rq, const struct i915_deps *deps);
int i915_request_await_execution(struct i915_request *rq,
struct dma_fence *fence);
@@ -647,7 +659,8 @@ i915_request_timeline(const struct i915_request *rq)
{
/* Valid only while the request is being constructed (or retired). */
return rcu_dereference_protected(rq->timeline,
- lockdep_is_held(&rcu_access_pointer(rq->timeline)->mutex));
+ lockdep_is_held(&rcu_access_pointer(rq->timeline)->mutex) ||
+ test_bit(CONTEXT_IS_PARKING, &rq->context->flags));
}
static inline struct i915_gem_context *
diff --git a/drivers/gpu/drm/i915/i915_scatterlist.c b/drivers/gpu/drm/i915/i915_scatterlist.c
index 4a6712dca838..41f2adb6a583 100644
--- a/drivers/gpu/drm/i915/i915_scatterlist.c
+++ b/drivers/gpu/drm/i915/i915_scatterlist.c
@@ -41,8 +41,32 @@ bool i915_sg_trim(struct sg_table *orig_st)
return true;
}
+static void i915_refct_sgt_release(struct kref *ref)
+{
+ struct i915_refct_sgt *rsgt =
+ container_of(ref, typeof(*rsgt), kref);
+
+ sg_free_table(&rsgt->table);
+ kfree(rsgt);
+}
+
+static const struct i915_refct_sgt_ops rsgt_ops = {
+ .release = i915_refct_sgt_release
+};
+
+/**
+ * i915_refct_sgt_init - Initialize a struct i915_refct_sgt with default ops
+ * @rsgt: The struct i915_refct_sgt to initialize.
+ * size: The size of the underlying memory buffer.
+ */
+void i915_refct_sgt_init(struct i915_refct_sgt *rsgt, size_t size)
+{
+ __i915_refct_sgt_init(rsgt, size, &rsgt_ops);
+}
+
/**
- * i915_sg_from_mm_node - Create an sg_table from a struct drm_mm_node
+ * i915_rsgt_from_mm_node - Create a refcounted sg_table from a struct
+ * drm_mm_node
* @node: The drm_mm_node.
* @region_start: An offset to add to the dma addresses of the sg list.
*
@@ -50,25 +74,28 @@ bool i915_sg_trim(struct sg_table *orig_st)
* taking a maximum segment length into account, splitting into segments
* if necessary.
*
- * Return: A pointer to a kmalloced struct sg_table on success, negative
+ * Return: A pointer to a kmalloced struct i915_refct_sgt on success, negative
* error code cast to an error pointer on failure.
*/
-struct sg_table *i915_sg_from_mm_node(const struct drm_mm_node *node,
- u64 region_start)
+struct i915_refct_sgt *i915_rsgt_from_mm_node(const struct drm_mm_node *node,
+ u64 region_start)
{
const u64 max_segment = SZ_1G; /* Do we have a limit on this? */
u64 segment_pages = max_segment >> PAGE_SHIFT;
u64 block_size, offset, prev_end;
+ struct i915_refct_sgt *rsgt;
struct sg_table *st;
struct scatterlist *sg;
- st = kmalloc(sizeof(*st), GFP_KERNEL);
- if (!st)
+ rsgt = kmalloc(sizeof(*rsgt), GFP_KERNEL);
+ if (!rsgt)
return ERR_PTR(-ENOMEM);
+ i915_refct_sgt_init(rsgt, node->size << PAGE_SHIFT);
+ st = &rsgt->table;
if (sg_alloc_table(st, DIV_ROUND_UP(node->size, segment_pages),
GFP_KERNEL)) {
- kfree(st);
+ i915_refct_sgt_put(rsgt);
return ERR_PTR(-ENOMEM);
}
@@ -104,11 +131,11 @@ struct sg_table *i915_sg_from_mm_node(const struct drm_mm_node *node,
sg_mark_end(sg);
i915_sg_trim(st);
- return st;
+ return rsgt;
}
/**
- * i915_sg_from_buddy_resource - Create an sg_table from a struct
+ * i915_rsgt_from_buddy_resource - Create a refcounted sg_table from a struct
* i915_buddy_block list
* @res: The struct i915_ttm_buddy_resource.
* @region_start: An offset to add to the dma addresses of the sg list.
@@ -117,11 +144,11 @@ struct sg_table *i915_sg_from_mm_node(const struct drm_mm_node *node,
* taking a maximum segment length into account, splitting into segments
* if necessary.
*
- * Return: A pointer to a kmalloced struct sg_table on success, negative
+ * Return: A pointer to a kmalloced struct i915_refct_sgts on success, negative
* error code cast to an error pointer on failure.
*/
-struct sg_table *i915_sg_from_buddy_resource(struct ttm_resource *res,
- u64 region_start)
+struct i915_refct_sgt *i915_rsgt_from_buddy_resource(struct ttm_resource *res,
+ u64 region_start)
{
struct i915_ttm_buddy_resource *bman_res = to_ttm_buddy_resource(res);
const u64 size = res->num_pages << PAGE_SHIFT;
@@ -129,18 +156,21 @@ struct sg_table *i915_sg_from_buddy_resource(struct ttm_resource *res,
struct i915_buddy_mm *mm = bman_res->mm;
struct list_head *blocks = &bman_res->blocks;
struct i915_buddy_block *block;
+ struct i915_refct_sgt *rsgt;
struct scatterlist *sg;
struct sg_table *st;
resource_size_t prev_end;
GEM_BUG_ON(list_empty(blocks));
- st = kmalloc(sizeof(*st), GFP_KERNEL);
- if (!st)
+ rsgt = kmalloc(sizeof(*rsgt), GFP_KERNEL);
+ if (!rsgt)
return ERR_PTR(-ENOMEM);
+ i915_refct_sgt_init(rsgt, size);
+ st = &rsgt->table;
if (sg_alloc_table(st, res->num_pages, GFP_KERNEL)) {
- kfree(st);
+ i915_refct_sgt_put(rsgt);
return ERR_PTR(-ENOMEM);
}
@@ -181,7 +211,7 @@ struct sg_table *i915_sg_from_buddy_resource(struct ttm_resource *res,
sg_mark_end(sg);
i915_sg_trim(st);
- return st;
+ return rsgt;
}
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
diff --git a/drivers/gpu/drm/i915/i915_scatterlist.h b/drivers/gpu/drm/i915/i915_scatterlist.h
index b8bd5925b03f..12c6a1684081 100644
--- a/drivers/gpu/drm/i915/i915_scatterlist.h
+++ b/drivers/gpu/drm/i915/i915_scatterlist.h
@@ -144,10 +144,78 @@ static inline unsigned int i915_sg_segment_size(void)
bool i915_sg_trim(struct sg_table *orig_st);
-struct sg_table *i915_sg_from_mm_node(const struct drm_mm_node *node,
- u64 region_start);
+/**
+ * struct i915_refct_sgt_ops - Operations structure for struct i915_refct_sgt
+ */
+struct i915_refct_sgt_ops {
+ /**
+ * release() - Free the memory of the struct i915_refct_sgt
+ * @ref: struct kref that is embedded in the struct i915_refct_sgt
+ */
+ void (*release)(struct kref *ref);
+};
+
+/**
+ * struct i915_refct_sgt - A refcounted scatter-gather table
+ * @kref: struct kref for refcounting
+ * @table: struct sg_table holding the scatter-gather table itself. Note that
+ * @table->sgl = NULL can be used to determine whether a scatter-gather table
+ * is present or not.
+ * @size: The size in bytes of the underlying memory buffer
+ * @ops: The operations structure.
+ */
+struct i915_refct_sgt {
+ struct kref kref;
+ struct sg_table table;
+ size_t size;
+ const struct i915_refct_sgt_ops *ops;
+};
+
+/**
+ * i915_refct_sgt_put - Put a refcounted sg-table
+ * @rsgt the struct i915_refct_sgt to put.
+ */
+static inline void i915_refct_sgt_put(struct i915_refct_sgt *rsgt)
+{
+ if (rsgt)
+ kref_put(&rsgt->kref, rsgt->ops->release);
+}
+
+/**
+ * i915_refct_sgt_get - Get a refcounted sg-table
+ * @rsgt the struct i915_refct_sgt to get.
+ */
+static inline struct i915_refct_sgt *
+i915_refct_sgt_get(struct i915_refct_sgt *rsgt)
+{
+ kref_get(&rsgt->kref);
+ return rsgt;
+}
+
+/**
+ * __i915_refct_sgt_init - Initialize a refcounted sg-list with a custom
+ * operations structure
+ * @rsgt The struct i915_refct_sgt to initialize.
+ * @size: Size in bytes of the underlying memory buffer.
+ * @ops: A customized operations structure in case the refcounted sg-list
+ * is embedded into another structure.
+ */
+static inline void __i915_refct_sgt_init(struct i915_refct_sgt *rsgt,
+ size_t size,
+ const struct i915_refct_sgt_ops *ops)
+{
+ kref_init(&rsgt->kref);
+ rsgt->table.sgl = NULL;
+ rsgt->size = size;
+ rsgt->ops = ops;
+}
+
+void i915_refct_sgt_init(struct i915_refct_sgt *rsgt, size_t size);
+
+struct i915_refct_sgt *i915_rsgt_from_mm_node(const struct drm_mm_node *node,
+ u64 region_start);
-struct sg_table *i915_sg_from_buddy_resource(struct ttm_resource *res,
- u64 region_start);
+struct i915_refct_sgt *i915_rsgt_from_buddy_resource(struct ttm_resource *res,
+ u64 region_start);
#endif
diff --git a/drivers/gpu/drm/i915/i915_sw_fence.c b/drivers/gpu/drm/i915/i915_sw_fence.c
index 7ea0dbf81530..2a74a9a1cafe 100644
--- a/drivers/gpu/drm/i915/i915_sw_fence.c
+++ b/drivers/gpu/drm/i915/i915_sw_fence.c
@@ -18,7 +18,9 @@
#define I915_SW_FENCE_BUG_ON(expr) BUILD_BUG_ON_INVALID(expr)
#endif
+#ifdef CONFIG_DRM_I915_SW_FENCE_CHECK_DAG
static DEFINE_SPINLOCK(i915_sw_fence_lock);
+#endif
#define WQ_FLAG_BITS \
BITS_PER_TYPE(typeof_member(struct wait_queue_entry, flags))
@@ -34,7 +36,7 @@ enum {
static void *i915_sw_fence_debug_hint(void *addr)
{
- return (void *)(((struct i915_sw_fence *)addr)->flags & I915_SW_FENCE_MASK);
+ return (void *)(((struct i915_sw_fence *)addr)->fn);
}
#ifdef CONFIG_DRM_I915_SW_FENCE_DEBUG_OBJECTS
@@ -126,10 +128,7 @@ static inline void debug_fence_assert(struct i915_sw_fence *fence)
static int __i915_sw_fence_notify(struct i915_sw_fence *fence,
enum i915_sw_fence_notify state)
{
- i915_sw_fence_notify_t fn;
-
- fn = (i915_sw_fence_notify_t)(fence->flags & I915_SW_FENCE_MASK);
- return fn(fence, state);
+ return fence->fn(fence, state);
}
#ifdef CONFIG_DRM_I915_SW_FENCE_DEBUG_OBJECTS
@@ -242,10 +241,13 @@ void __i915_sw_fence_init(struct i915_sw_fence *fence,
const char *name,
struct lock_class_key *key)
{
- BUG_ON(!fn || (unsigned long)fn & ~I915_SW_FENCE_MASK);
+ BUG_ON(!fn);
__init_waitqueue_head(&fence->wait, name, key);
- fence->flags = (unsigned long)fn;
+ fence->fn = fn;
+#ifdef CONFIG_DRM_I915_SW_FENCE_CHECK_DAG
+ fence->flags = 0;
+#endif
i915_sw_fence_reinit(fence);
}
@@ -257,7 +259,6 @@ void i915_sw_fence_reinit(struct i915_sw_fence *fence)
atomic_set(&fence->pending, 1);
fence->error = 0;
- I915_SW_FENCE_BUG_ON(!fence->flags);
I915_SW_FENCE_BUG_ON(!list_empty(&fence->wait.head));
}
@@ -279,6 +280,7 @@ static int i915_sw_fence_wake(wait_queue_entry_t *wq, unsigned mode, int flags,
return 0;
}
+#ifdef CONFIG_DRM_I915_SW_FENCE_CHECK_DAG
static bool __i915_sw_fence_check_if_after(struct i915_sw_fence *fence,
const struct i915_sw_fence * const signaler)
{
@@ -322,9 +324,6 @@ static bool i915_sw_fence_check_if_after(struct i915_sw_fence *fence,
unsigned long flags;
bool err;
- if (!IS_ENABLED(CONFIG_DRM_I915_SW_FENCE_CHECK_DAG))
- return false;
-
spin_lock_irqsave(&i915_sw_fence_lock, flags);
err = __i915_sw_fence_check_if_after(fence, signaler);
__i915_sw_fence_clear_checked_bit(fence);
@@ -332,6 +331,13 @@ static bool i915_sw_fence_check_if_after(struct i915_sw_fence *fence,
return err;
}
+#else
+static bool i915_sw_fence_check_if_after(struct i915_sw_fence *fence,
+ const struct i915_sw_fence * const signaler)
+{
+ return false;
+}
+#endif
static int __i915_sw_fence_await_sw_fence(struct i915_sw_fence *fence,
struct i915_sw_fence *signaler,
diff --git a/drivers/gpu/drm/i915/i915_sw_fence.h b/drivers/gpu/drm/i915/i915_sw_fence.h
index 30a863353ee6..a7c603bc1b01 100644
--- a/drivers/gpu/drm/i915/i915_sw_fence.h
+++ b/drivers/gpu/drm/i915/i915_sw_fence.h
@@ -17,26 +17,27 @@
struct completion;
struct dma_resv;
+struct i915_sw_fence;
+
+enum i915_sw_fence_notify {
+ FENCE_COMPLETE,
+ FENCE_FREE
+};
+
+typedef int (*i915_sw_fence_notify_t)(struct i915_sw_fence *,
+ enum i915_sw_fence_notify state);
struct i915_sw_fence {
wait_queue_head_t wait;
+ i915_sw_fence_notify_t fn;
+#ifdef CONFIG_DRM_I915_SW_FENCE_CHECK_DAG
unsigned long flags;
+#endif
atomic_t pending;
int error;
};
#define I915_SW_FENCE_CHECKED_BIT 0 /* used internally for DAG checking */
-#define I915_SW_FENCE_PRIVATE_BIT 1 /* available for use by owner */
-#define I915_SW_FENCE_MASK (~3)
-
-enum i915_sw_fence_notify {
- FENCE_COMPLETE,
- FENCE_FREE
-};
-
-typedef int (*i915_sw_fence_notify_t)(struct i915_sw_fence *,
- enum i915_sw_fence_notify state);
-#define __i915_sw_fence_call __aligned(4)
void __i915_sw_fence_init(struct i915_sw_fence *fence,
i915_sw_fence_notify_t fn,
diff --git a/drivers/gpu/drm/i915/i915_sw_fence_work.c b/drivers/gpu/drm/i915/i915_sw_fence_work.c
index 5b33ef23d54c..d2e56b387993 100644
--- a/drivers/gpu/drm/i915/i915_sw_fence_work.c
+++ b/drivers/gpu/drm/i915/i915_sw_fence_work.c
@@ -23,7 +23,7 @@ static void fence_work(struct work_struct *work)
dma_fence_put(&f->dma);
}
-static int __i915_sw_fence_call
+static int
fence_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state)
{
struct dma_fence_work *f = container_of(fence, typeof(*f), chain);
diff --git a/drivers/gpu/drm/i915/i915_sysfs.c b/drivers/gpu/drm/i915/i915_sysfs.c
index 1804f4142740..fae4d1f4f275 100644
--- a/drivers/gpu/drm/i915/i915_sysfs.c
+++ b/drivers/gpu/drm/i915/i915_sysfs.c
@@ -52,7 +52,7 @@ static u32 calc_residency(struct drm_i915_private *dev_priv,
u64 res = 0;
with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref)
- res = intel_rc6_residency_us(&dev_priv->gt.rc6, reg);
+ res = intel_rc6_residency_us(&to_gt(dev_priv)->rc6, reg);
return DIV_ROUND_CLOSEST_ULL(res, 1000);
}
@@ -260,7 +260,7 @@ static ssize_t gt_act_freq_mhz_show(struct device *kdev,
struct device_attribute *attr, char *buf)
{
struct drm_i915_private *i915 = kdev_minor_to_i915(kdev);
- struct intel_rps *rps = &i915->gt.rps;
+ struct intel_rps *rps = &to_gt(i915)->rps;
return sysfs_emit(buf, "%d\n", intel_rps_read_actual_frequency(rps));
}
@@ -269,7 +269,7 @@ static ssize_t gt_cur_freq_mhz_show(struct device *kdev,
struct device_attribute *attr, char *buf)
{
struct drm_i915_private *i915 = kdev_minor_to_i915(kdev);
- struct intel_rps *rps = &i915->gt.rps;
+ struct intel_rps *rps = &to_gt(i915)->rps;
return sysfs_emit(buf, "%d\n", intel_rps_get_requested_frequency(rps));
}
@@ -277,9 +277,9 @@ static ssize_t gt_cur_freq_mhz_show(struct device *kdev,
static ssize_t gt_boost_freq_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf)
{
struct drm_i915_private *i915 = kdev_minor_to_i915(kdev);
- struct intel_rps *rps = &i915->gt.rps;
+ struct intel_rps *rps = &to_gt(i915)->rps;
- return sysfs_emit(buf, "%d\n", intel_gpu_freq(rps, rps->boost_freq));
+ return sysfs_emit(buf, "%d\n", intel_rps_get_boost_frequency(rps));
}
static ssize_t gt_boost_freq_mhz_store(struct device *kdev,
@@ -287,8 +287,7 @@ static ssize_t gt_boost_freq_mhz_store(struct device *kdev,
const char *buf, size_t count)
{
struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
- struct intel_rps *rps = &dev_priv->gt.rps;
- bool boost = false;
+ struct intel_rps *rps = &to_gt(dev_priv)->rps;
ssize_t ret;
u32 val;
@@ -296,28 +295,16 @@ static ssize_t gt_boost_freq_mhz_store(struct device *kdev,
if (ret)
return ret;
- /* Validate against (static) hardware limits */
- val = intel_freq_opcode(rps, val);
- if (val < rps->min_freq || val > rps->max_freq)
- return -EINVAL;
-
- mutex_lock(&rps->lock);
- if (val != rps->boost_freq) {
- rps->boost_freq = val;
- boost = atomic_read(&rps->num_waiters);
- }
- mutex_unlock(&rps->lock);
- if (boost)
- schedule_work(&rps->work);
+ ret = intel_rps_set_boost_frequency(rps, val);
- return count;
+ return ret ?: count;
}
static ssize_t vlv_rpe_freq_mhz_show(struct device *kdev,
struct device_attribute *attr, char *buf)
{
struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
- struct intel_rps *rps = &dev_priv->gt.rps;
+ struct intel_rps *rps = &to_gt(dev_priv)->rps;
return sysfs_emit(buf, "%d\n", intel_gpu_freq(rps, rps->efficient_freq));
}
@@ -325,7 +312,7 @@ static ssize_t vlv_rpe_freq_mhz_show(struct device *kdev,
static ssize_t gt_max_freq_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf)
{
struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
- struct intel_gt *gt = &dev_priv->gt;
+ struct intel_gt *gt = to_gt(dev_priv);
struct intel_rps *rps = &gt->rps;
return sysfs_emit(buf, "%d\n", intel_rps_get_max_frequency(rps));
@@ -336,7 +323,7 @@ static ssize_t gt_max_freq_mhz_store(struct device *kdev,
const char *buf, size_t count)
{
struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
- struct intel_gt *gt = &dev_priv->gt;
+ struct intel_gt *gt = to_gt(dev_priv);
struct intel_rps *rps = &gt->rps;
ssize_t ret;
u32 val;
@@ -353,7 +340,7 @@ static ssize_t gt_max_freq_mhz_store(struct device *kdev,
static ssize_t gt_min_freq_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf)
{
struct drm_i915_private *i915 = kdev_minor_to_i915(kdev);
- struct intel_gt *gt = &i915->gt;
+ struct intel_gt *gt = to_gt(i915);
struct intel_rps *rps = &gt->rps;
return sysfs_emit(buf, "%d\n", intel_rps_get_min_frequency(rps));
@@ -364,7 +351,7 @@ static ssize_t gt_min_freq_mhz_store(struct device *kdev,
const char *buf, size_t count)
{
struct drm_i915_private *i915 = kdev_minor_to_i915(kdev);
- struct intel_rps *rps = &i915->gt.rps;
+ struct intel_rps *rps = &to_gt(i915)->rps;
ssize_t ret;
u32 val;
@@ -394,7 +381,7 @@ static DEVICE_ATTR(gt_RPn_freq_mhz, S_IRUGO, gt_rp_mhz_show, NULL);
static ssize_t gt_rp_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf)
{
struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
- struct intel_rps *rps = &dev_priv->gt.rps;
+ struct intel_rps *rps = &to_gt(dev_priv)->rps;
u32 val;
if (attr == &dev_attr_gt_RP0_freq_mhz)
diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c
index bef795e265a6..29a858c53bdd 100644
--- a/drivers/gpu/drm/i915/i915_vma.c
+++ b/drivers/gpu/drm/i915/i915_vma.c
@@ -40,12 +40,12 @@
static struct kmem_cache *slab_vmas;
-struct i915_vma *i915_vma_alloc(void)
+static struct i915_vma *i915_vma_alloc(void)
{
return kmem_cache_zalloc(slab_vmas, GFP_KERNEL);
}
-void i915_vma_free(struct i915_vma *vma)
+static void i915_vma_free(struct i915_vma *vma)
{
return kmem_cache_free(slab_vmas, vma);
}
@@ -109,11 +109,9 @@ vma_create(struct drm_i915_gem_object *obj,
return ERR_PTR(-ENOMEM);
kref_init(&vma->ref);
- mutex_init(&vma->pages_mutex);
vma->vm = i915_vm_get(vm);
vma->ops = &vm->vma_ops;
vma->obj = obj;
- vma->resv = obj->base.resv;
vma->size = obj->base.size;
vma->display_alignment = I915_GTT_MIN_ALIGNMENT;
@@ -346,7 +344,7 @@ int i915_vma_wait_for_bind(struct i915_vma *vma)
fence = dma_fence_get_rcu_safe(&vma->active.excl.fence);
rcu_read_unlock();
if (fence) {
- err = dma_fence_wait(fence, MAX_SCHEDULE_TIMEOUT);
+ err = dma_fence_wait(fence, true);
dma_fence_put(fence);
}
}
@@ -354,6 +352,28 @@ int i915_vma_wait_for_bind(struct i915_vma *vma)
return err;
}
+#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)
+static int i915_vma_verify_bind_complete(struct i915_vma *vma)
+{
+ struct dma_fence *fence = i915_active_fence_get(&vma->active.excl);
+ int err;
+
+ if (!fence)
+ return 0;
+
+ if (dma_fence_is_signaled(fence))
+ err = fence->error;
+ else
+ err = -EBUSY;
+
+ dma_fence_put(fence);
+
+ return err;
+}
+#else
+#define i915_vma_verify_bind_complete(_vma) 0
+#endif
+
/**
* i915_vma_bind - Sets up PTEs for an VMA in it's corresponding address space.
* @vma: VMA to map
@@ -373,6 +393,7 @@ int i915_vma_bind(struct i915_vma *vma,
u32 bind_flags;
u32 vma_flags;
+ lockdep_assert_held(&vma->vm->mutex);
GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
GEM_BUG_ON(vma->size > vma->node.size);
@@ -394,7 +415,7 @@ int i915_vma_bind(struct i915_vma *vma,
if (bind_flags == 0)
return 0;
- GEM_BUG_ON(!vma->pages);
+ GEM_BUG_ON(!atomic_read(&vma->pages_count));
trace_i915_vma_bind(vma, bind_flags);
if (work && bind_flags & vma->vm->bind_async_flags) {
@@ -423,11 +444,16 @@ int i915_vma_bind(struct i915_vma *vma,
work->base.dma.error = 0; /* enable the queue_work() */
+ __i915_gem_object_pin_pages(vma->obj);
+ work->pinned = i915_gem_object_get(vma->obj);
+ } else {
if (vma->obj) {
- __i915_gem_object_pin_pages(vma->obj);
- work->pinned = i915_gem_object_get(vma->obj);
+ int ret;
+
+ ret = i915_gem_object_wait_moving_fence(vma->obj, true);
+ if (ret)
+ return ret;
}
- } else {
vma->ops->bind_vma(vma->vm, NULL, vma, cache_level, bind_flags);
}
@@ -449,6 +475,7 @@ void __iomem *i915_vma_pin_iomap(struct i915_vma *vma)
GEM_BUG_ON(!i915_vma_is_ggtt(vma));
GEM_BUG_ON(!i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND));
+ GEM_BUG_ON(i915_vma_verify_bind_complete(vma));
ptr = READ_ONCE(vma->iomap);
if (ptr == NULL) {
@@ -667,7 +694,7 @@ i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
}
color = 0;
- if (vma->obj && i915_vm_has_cache_coloring(vma->vm))
+ if (i915_vm_has_cache_coloring(vma->vm))
color = vma->obj->cache_level;
if (flags & PIN_OFFSET_FIXED) {
@@ -789,40 +816,356 @@ unpinned:
return pinned;
}
-static int vma_get_pages(struct i915_vma *vma)
+static struct scatterlist *
+rotate_pages(struct drm_i915_gem_object *obj, unsigned int offset,
+ unsigned int width, unsigned int height,
+ unsigned int src_stride, unsigned int dst_stride,
+ struct sg_table *st, struct scatterlist *sg)
{
- int err = 0;
- bool pinned_pages = false;
+ unsigned int column, row;
+ unsigned int src_idx;
- if (atomic_add_unless(&vma->pages_count, 1, 0))
- return 0;
+ for (column = 0; column < width; column++) {
+ unsigned int left;
- if (vma->obj) {
- err = i915_gem_object_pin_pages(vma->obj);
- if (err)
- return err;
- pinned_pages = true;
+ src_idx = src_stride * (height - 1) + column + offset;
+ for (row = 0; row < height; row++) {
+ st->nents++;
+ /*
+ * We don't need the pages, but need to initialize
+ * the entries so the sg list can be happily traversed.
+ * The only thing we need are DMA addresses.
+ */
+ sg_set_page(sg, NULL, I915_GTT_PAGE_SIZE, 0);
+ sg_dma_address(sg) =
+ i915_gem_object_get_dma_address(obj, src_idx);
+ sg_dma_len(sg) = I915_GTT_PAGE_SIZE;
+ sg = sg_next(sg);
+ src_idx -= src_stride;
+ }
+
+ left = (dst_stride - height) * I915_GTT_PAGE_SIZE;
+
+ if (!left)
+ continue;
+
+ st->nents++;
+
+ /*
+ * The DE ignores the PTEs for the padding tiles, the sg entry
+ * here is just a conenience to indicate how many padding PTEs
+ * to insert at this spot.
+ */
+ sg_set_page(sg, NULL, left, 0);
+ sg_dma_address(sg) = 0;
+ sg_dma_len(sg) = left;
+ sg = sg_next(sg);
+ }
+
+ return sg;
+}
+
+static noinline struct sg_table *
+intel_rotate_pages(struct intel_rotation_info *rot_info,
+ struct drm_i915_gem_object *obj)
+{
+ unsigned int size = intel_rotation_info_size(rot_info);
+ struct drm_i915_private *i915 = to_i915(obj->base.dev);
+ struct sg_table *st;
+ struct scatterlist *sg;
+ int ret = -ENOMEM;
+ int i;
+
+ /* Allocate target SG list. */
+ st = kmalloc(sizeof(*st), GFP_KERNEL);
+ if (!st)
+ goto err_st_alloc;
+
+ ret = sg_alloc_table(st, size, GFP_KERNEL);
+ if (ret)
+ goto err_sg_alloc;
+
+ st->nents = 0;
+ sg = st->sgl;
+
+ for (i = 0 ; i < ARRAY_SIZE(rot_info->plane); i++)
+ sg = rotate_pages(obj, rot_info->plane[i].offset,
+ rot_info->plane[i].width, rot_info->plane[i].height,
+ rot_info->plane[i].src_stride,
+ rot_info->plane[i].dst_stride,
+ st, sg);
+
+ return st;
+
+err_sg_alloc:
+ kfree(st);
+err_st_alloc:
+
+ drm_dbg(&i915->drm, "Failed to create rotated mapping for object size %zu! (%ux%u tiles, %u pages)\n",
+ obj->base.size, rot_info->plane[0].width,
+ rot_info->plane[0].height, size);
+
+ return ERR_PTR(ret);
+}
+
+static struct scatterlist *
+remap_pages(struct drm_i915_gem_object *obj,
+ unsigned int offset, unsigned int alignment_pad,
+ unsigned int width, unsigned int height,
+ unsigned int src_stride, unsigned int dst_stride,
+ struct sg_table *st, struct scatterlist *sg)
+{
+ unsigned int row;
+
+ if (!width || !height)
+ return sg;
+
+ if (alignment_pad) {
+ st->nents++;
+
+ /*
+ * The DE ignores the PTEs for the padding tiles, the sg entry
+ * here is just a convenience to indicate how many padding PTEs
+ * to insert at this spot.
+ */
+ sg_set_page(sg, NULL, alignment_pad * 4096, 0);
+ sg_dma_address(sg) = 0;
+ sg_dma_len(sg) = alignment_pad * 4096;
+ sg = sg_next(sg);
}
- /* Allocations ahoy! */
- if (mutex_lock_interruptible(&vma->pages_mutex)) {
- err = -EINTR;
- goto unpin;
+ for (row = 0; row < height; row++) {
+ unsigned int left = width * I915_GTT_PAGE_SIZE;
+
+ while (left) {
+ dma_addr_t addr;
+ unsigned int length;
+
+ /*
+ * We don't need the pages, but need to initialize
+ * the entries so the sg list can be happily traversed.
+ * The only thing we need are DMA addresses.
+ */
+
+ addr = i915_gem_object_get_dma_address_len(obj, offset, &length);
+
+ length = min(left, length);
+
+ st->nents++;
+
+ sg_set_page(sg, NULL, length, 0);
+ sg_dma_address(sg) = addr;
+ sg_dma_len(sg) = length;
+ sg = sg_next(sg);
+
+ offset += length / I915_GTT_PAGE_SIZE;
+ left -= length;
+ }
+
+ offset += src_stride - width;
+
+ left = (dst_stride - width) * I915_GTT_PAGE_SIZE;
+
+ if (!left)
+ continue;
+
+ st->nents++;
+
+ /*
+ * The DE ignores the PTEs for the padding tiles, the sg entry
+ * here is just a conenience to indicate how many padding PTEs
+ * to insert at this spot.
+ */
+ sg_set_page(sg, NULL, left, 0);
+ sg_dma_address(sg) = 0;
+ sg_dma_len(sg) = left;
+ sg = sg_next(sg);
}
- if (!atomic_read(&vma->pages_count)) {
- err = vma->ops->set_pages(vma);
- if (err)
- goto unlock;
- pinned_pages = false;
+ return sg;
+}
+
+static noinline struct sg_table *
+intel_remap_pages(struct intel_remapped_info *rem_info,
+ struct drm_i915_gem_object *obj)
+{
+ unsigned int size = intel_remapped_info_size(rem_info);
+ struct drm_i915_private *i915 = to_i915(obj->base.dev);
+ struct sg_table *st;
+ struct scatterlist *sg;
+ unsigned int gtt_offset = 0;
+ int ret = -ENOMEM;
+ int i;
+
+ /* Allocate target SG list. */
+ st = kmalloc(sizeof(*st), GFP_KERNEL);
+ if (!st)
+ goto err_st_alloc;
+
+ ret = sg_alloc_table(st, size, GFP_KERNEL);
+ if (ret)
+ goto err_sg_alloc;
+
+ st->nents = 0;
+ sg = st->sgl;
+
+ for (i = 0 ; i < ARRAY_SIZE(rem_info->plane); i++) {
+ unsigned int alignment_pad = 0;
+
+ if (rem_info->plane_alignment)
+ alignment_pad = ALIGN(gtt_offset, rem_info->plane_alignment) - gtt_offset;
+
+ sg = remap_pages(obj,
+ rem_info->plane[i].offset, alignment_pad,
+ rem_info->plane[i].width, rem_info->plane[i].height,
+ rem_info->plane[i].src_stride, rem_info->plane[i].dst_stride,
+ st, sg);
+
+ gtt_offset += alignment_pad +
+ rem_info->plane[i].dst_stride * rem_info->plane[i].height;
+ }
+
+ i915_sg_trim(st);
+
+ return st;
+
+err_sg_alloc:
+ kfree(st);
+err_st_alloc:
+
+ drm_dbg(&i915->drm, "Failed to create remapped mapping for object size %zu! (%ux%u tiles, %u pages)\n",
+ obj->base.size, rem_info->plane[0].width,
+ rem_info->plane[0].height, size);
+
+ return ERR_PTR(ret);
+}
+
+static noinline struct sg_table *
+intel_partial_pages(const struct i915_ggtt_view *view,
+ struct drm_i915_gem_object *obj)
+{
+ struct sg_table *st;
+ struct scatterlist *sg, *iter;
+ unsigned int count = view->partial.size;
+ unsigned int offset;
+ int ret = -ENOMEM;
+
+ st = kmalloc(sizeof(*st), GFP_KERNEL);
+ if (!st)
+ goto err_st_alloc;
+
+ ret = sg_alloc_table(st, count, GFP_KERNEL);
+ if (ret)
+ goto err_sg_alloc;
+
+ iter = i915_gem_object_get_sg_dma(obj, view->partial.offset, &offset);
+ GEM_BUG_ON(!iter);
+
+ sg = st->sgl;
+ st->nents = 0;
+ do {
+ unsigned int len;
+
+ len = min(sg_dma_len(iter) - (offset << PAGE_SHIFT),
+ count << PAGE_SHIFT);
+ sg_set_page(sg, NULL, len, 0);
+ sg_dma_address(sg) =
+ sg_dma_address(iter) + (offset << PAGE_SHIFT);
+ sg_dma_len(sg) = len;
+
+ st->nents++;
+ count -= len >> PAGE_SHIFT;
+ if (count == 0) {
+ sg_mark_end(sg);
+ i915_sg_trim(st); /* Drop any unused tail entries. */
+
+ return st;
+ }
+
+ sg = __sg_next(sg);
+ iter = __sg_next(iter);
+ offset = 0;
+ } while (1);
+
+err_sg_alloc:
+ kfree(st);
+err_st_alloc:
+ return ERR_PTR(ret);
+}
+
+static int
+__i915_vma_get_pages(struct i915_vma *vma)
+{
+ struct sg_table *pages;
+ int ret;
+
+ /*
+ * The vma->pages are only valid within the lifespan of the borrowed
+ * obj->mm.pages. When the obj->mm.pages sg_table is regenerated, so
+ * must be the vma->pages. A simple rule is that vma->pages must only
+ * be accessed when the obj->mm.pages are pinned.
+ */
+ GEM_BUG_ON(!i915_gem_object_has_pinned_pages(vma->obj));
+
+ switch (vma->ggtt_view.type) {
+ default:
+ GEM_BUG_ON(vma->ggtt_view.type);
+ fallthrough;
+ case I915_GGTT_VIEW_NORMAL:
+ pages = vma->obj->mm.pages;
+ break;
+
+ case I915_GGTT_VIEW_ROTATED:
+ pages =
+ intel_rotate_pages(&vma->ggtt_view.rotated, vma->obj);
+ break;
+
+ case I915_GGTT_VIEW_REMAPPED:
+ pages =
+ intel_remap_pages(&vma->ggtt_view.remapped, vma->obj);
+ break;
+
+ case I915_GGTT_VIEW_PARTIAL:
+ pages = intel_partial_pages(&vma->ggtt_view, vma->obj);
+ break;
}
+
+ ret = 0;
+ if (IS_ERR(pages)) {
+ ret = PTR_ERR(pages);
+ pages = NULL;
+ drm_err(&vma->vm->i915->drm,
+ "Failed to get pages for VMA view type %u (%d)!\n",
+ vma->ggtt_view.type, ret);
+ }
+
+ vma->pages = pages;
+
+ return ret;
+}
+
+I915_SELFTEST_EXPORT int i915_vma_get_pages(struct i915_vma *vma)
+{
+ int err;
+
+ if (atomic_add_unless(&vma->pages_count, 1, 0))
+ return 0;
+
+ err = i915_gem_object_pin_pages(vma->obj);
+ if (err)
+ return err;
+
+ err = __i915_vma_get_pages(vma);
+ if (err)
+ goto err_unpin;
+
+ vma->page_sizes = vma->obj->mm.page_sizes;
atomic_inc(&vma->pages_count);
-unlock:
- mutex_unlock(&vma->pages_mutex);
-unpin:
- if (pinned_pages)
- __i915_gem_object_unpin_pages(vma->obj);
+ return 0;
+
+err_unpin:
+ __i915_gem_object_unpin_pages(vma->obj);
return err;
}
@@ -830,18 +1173,31 @@ unpin:
static void __vma_put_pages(struct i915_vma *vma, unsigned int count)
{
/* We allocate under vma_get_pages, so beware the shrinker */
- mutex_lock_nested(&vma->pages_mutex, SINGLE_DEPTH_NESTING);
+ struct sg_table *pages = READ_ONCE(vma->pages);
+
GEM_BUG_ON(atomic_read(&vma->pages_count) < count);
+
if (atomic_sub_return(count, &vma->pages_count) == 0) {
- vma->ops->clear_pages(vma);
- GEM_BUG_ON(vma->pages);
- if (vma->obj)
- i915_gem_object_unpin_pages(vma->obj);
+ /*
+ * The atomic_sub_return is a read barrier for the READ_ONCE of
+ * vma->pages above.
+ *
+ * READ_ONCE is safe because this is either called from the same
+ * function (i915_vma_pin_ww), or guarded by vma->vm->mutex.
+ *
+ * TODO: We're leaving vma->pages dangling, until vma->obj->resv
+ * lock is required.
+ */
+ if (pages != vma->obj->mm.pages) {
+ sg_free_table(pages);
+ kfree(pages);
+ }
+
+ i915_gem_object_unpin_pages(vma->obj);
}
- mutex_unlock(&vma->pages_mutex);
}
-static void vma_put_pages(struct i915_vma *vma)
+I915_SELFTEST_EXPORT void i915_vma_put_pages(struct i915_vma *vma)
{
if (atomic_add_unless(&vma->pages_count, -1, 1))
return;
@@ -867,14 +1223,13 @@ int i915_vma_pin_ww(struct i915_vma *vma, struct i915_gem_ww_ctx *ww,
u64 size, u64 alignment, u64 flags)
{
struct i915_vma_work *work = NULL;
+ struct dma_fence *moving = NULL;
intel_wakeref_t wakeref = 0;
unsigned int bound;
int err;
-#ifdef CONFIG_PROVE_LOCKING
- if (debug_locks && !WARN_ON(!ww) && vma->resv)
- assert_vma_held(vma);
-#endif
+ assert_vma_held(vma);
+ GEM_BUG_ON(!ww);
BUILD_BUG_ON(PIN_GLOBAL != I915_VMA_GLOBAL_BIND);
BUILD_BUG_ON(PIN_USER != I915_VMA_LOCAL_BIND);
@@ -885,14 +1240,15 @@ int i915_vma_pin_ww(struct i915_vma *vma, struct i915_gem_ww_ctx *ww,
if (try_qad_pin(vma, flags & I915_VMA_BIND_MASK))
return 0;
- err = vma_get_pages(vma);
+ err = i915_vma_get_pages(vma);
if (err)
return err;
if (flags & PIN_GLOBAL)
wakeref = intel_runtime_pm_get(&vma->vm->i915->runtime_pm);
- if (flags & vma->vm->bind_async_flags) {
+ moving = vma->obj ? i915_gem_object_get_moving_fence(vma->obj) : NULL;
+ if (flags & vma->vm->bind_async_flags || moving) {
/* lock VM */
err = i915_vm_lock_objects(vma->vm, ww);
if (err)
@@ -906,6 +1262,8 @@ int i915_vma_pin_ww(struct i915_vma *vma, struct i915_gem_ww_ctx *ww,
work->vm = i915_vm_get(vma->vm);
+ dma_fence_work_chain(&work->base, moving);
+
/* Allocate enough page directories to used PTE */
if (vma->vm->allocate_va_range) {
err = i915_vm_alloc_pt_stash(vma->vm,
@@ -980,7 +1338,7 @@ int i915_vma_pin_ww(struct i915_vma *vma, struct i915_gem_ww_ctx *ww,
GEM_BUG_ON(!vma->pages);
err = i915_vma_bind(vma,
- vma->obj ? vma->obj->cache_level : 0,
+ vma->obj->cache_level,
flags, work);
if (err)
goto err_remove;
@@ -1010,7 +1368,11 @@ err_fence:
err_rpm:
if (wakeref)
intel_runtime_pm_put(&vma->vm->i915->runtime_pm, wakeref);
- vma_put_pages(vma);
+
+ if (moving)
+ dma_fence_put(moving);
+
+ i915_vma_put_pages(vma);
return err;
}
@@ -1025,23 +1387,15 @@ static void flush_idle_contexts(struct intel_gt *gt)
intel_gt_wait_for_idle(gt, MAX_SCHEDULE_TIMEOUT);
}
-int i915_ggtt_pin(struct i915_vma *vma, struct i915_gem_ww_ctx *ww,
- u32 align, unsigned int flags)
+static int __i915_ggtt_pin(struct i915_vma *vma, struct i915_gem_ww_ctx *ww,
+ u32 align, unsigned int flags)
{
struct i915_address_space *vm = vma->vm;
int err;
- GEM_BUG_ON(!i915_vma_is_ggtt(vma));
-
-#ifdef CONFIG_LOCKDEP
- WARN_ON(!ww && vma->resv && dma_resv_held(vma->resv));
-#endif
-
do {
- if (ww)
- err = i915_vma_pin_ww(vma, ww, 0, align, flags | PIN_GLOBAL);
- else
- err = i915_vma_pin(vma, 0, align, flags | PIN_GLOBAL);
+ err = i915_vma_pin_ww(vma, ww, 0, align, flags | PIN_GLOBAL);
+
if (err != -ENOSPC) {
if (!err) {
err = i915_vma_wait_for_bind(vma);
@@ -1060,6 +1414,30 @@ int i915_ggtt_pin(struct i915_vma *vma, struct i915_gem_ww_ctx *ww,
} while (1);
}
+int i915_ggtt_pin(struct i915_vma *vma, struct i915_gem_ww_ctx *ww,
+ u32 align, unsigned int flags)
+{
+ struct i915_gem_ww_ctx _ww;
+ int err;
+
+ GEM_BUG_ON(!i915_vma_is_ggtt(vma));
+
+ if (ww)
+ return __i915_ggtt_pin(vma, ww, align, flags);
+
+#ifdef CONFIG_LOCKDEP
+ WARN_ON(dma_resv_held(vma->obj->base.resv));
+#endif
+
+ for_i915_gem_ww(&_ww, err, true) {
+ err = i915_gem_object_lock(vma->obj, &_ww);
+ if (!err)
+ err = __i915_ggtt_pin(vma, &_ww, align, flags);
+ }
+
+ return err;
+}
+
static void __vma_close(struct i915_vma *vma, struct intel_gt *gt)
{
/*
@@ -1113,6 +1491,7 @@ void i915_vma_reopen(struct i915_vma *vma)
void i915_vma_release(struct kref *ref)
{
struct i915_vma *vma = container_of(ref, typeof(*vma), ref);
+ struct drm_i915_gem_object *obj = vma->obj;
if (drm_mm_node_allocated(&vma->node)) {
mutex_lock(&vma->vm->mutex);
@@ -1123,15 +1502,11 @@ void i915_vma_release(struct kref *ref)
}
GEM_BUG_ON(i915_vma_is_active(vma));
- if (vma->obj) {
- struct drm_i915_gem_object *obj = vma->obj;
-
- spin_lock(&obj->vma.lock);
- list_del(&vma->obj_link);
- if (!RB_EMPTY_NODE(&vma->obj_node))
- rb_erase(&vma->obj_node, &obj->vma.tree);
- spin_unlock(&obj->vma.lock);
- }
+ spin_lock(&obj->vma.lock);
+ list_del(&vma->obj_link);
+ if (!RB_EMPTY_NODE(&vma->obj_node))
+ rb_erase(&vma->obj_node, &obj->vma.tree);
+ spin_unlock(&obj->vma.lock);
__i915_vma_remove_closed(vma);
i915_vm_put(vma->vm);
@@ -1217,7 +1592,7 @@ __i915_request_await_bind(struct i915_request *rq, struct i915_vma *vma)
return __i915_request_await_exclusive(rq, &vma->active);
}
-int __i915_vma_move_to_active(struct i915_vma *vma, struct i915_request *rq)
+static int __i915_vma_move_to_active(struct i915_vma *vma, struct i915_request *rq)
{
int err;
@@ -1256,19 +1631,19 @@ int _i915_vma_move_to_active(struct i915_vma *vma,
}
if (fence) {
- dma_resv_add_excl_fence(vma->resv, fence);
+ dma_resv_add_excl_fence(vma->obj->base.resv, fence);
obj->write_domain = I915_GEM_DOMAIN_RENDER;
obj->read_domains = 0;
}
} else {
if (!(flags & __EXEC_OBJECT_NO_RESERVE)) {
- err = dma_resv_reserve_shared(vma->resv, 1);
+ err = dma_resv_reserve_shared(vma->obj->base.resv, 1);
if (unlikely(err))
return err;
}
if (fence) {
- dma_resv_add_shared_fence(vma->resv, fence);
+ dma_resv_add_shared_fence(vma->obj->base.resv, fence);
obj->write_domain = 0;
}
}
diff --git a/drivers/gpu/drm/i915/i915_vma.h b/drivers/gpu/drm/i915/i915_vma.h
index 648dbe744c96..32719431b3df 100644
--- a/drivers/gpu/drm/i915/i915_vma.h
+++ b/drivers/gpu/drm/i915/i915_vma.h
@@ -55,8 +55,6 @@ static inline bool i915_vma_is_active(const struct i915_vma *vma)
/* do not reserve memory to prevent deadlocks */
#define __EXEC_OBJECT_NO_RESERVE BIT(31)
-int __must_check __i915_vma_move_to_active(struct i915_vma *vma,
- struct i915_request *rq);
int __must_check _i915_vma_move_to_active(struct i915_vma *vma,
struct i915_request *rq,
struct dma_fence *fence,
@@ -234,16 +232,16 @@ static inline void __i915_vma_put(struct i915_vma *vma)
kref_put(&vma->ref, i915_vma_release);
}
-#define assert_vma_held(vma) dma_resv_assert_held((vma)->resv)
+#define assert_vma_held(vma) dma_resv_assert_held((vma)->obj->base.resv)
static inline void i915_vma_lock(struct i915_vma *vma)
{
- dma_resv_lock(vma->resv, NULL);
+ dma_resv_lock(vma->obj->base.resv, NULL);
}
static inline void i915_vma_unlock(struct i915_vma *vma)
{
- dma_resv_unlock(vma->resv);
+ dma_resv_unlock(vma->obj->base.resv);
}
int __must_check
@@ -418,9 +416,6 @@ static inline void i915_vma_clear_scanout(struct i915_vma *vma)
list_for_each_entry(V, &(OBJ)->vma.list, obj_link) \
for_each_until(!i915_vma_is_ggtt(V))
-struct i915_vma *i915_vma_alloc(void);
-void i915_vma_free(struct i915_vma *vma);
-
struct i915_vma *i915_vma_make_unshrinkable(struct i915_vma *vma);
void i915_vma_make_shrinkable(struct i915_vma *vma);
void i915_vma_make_purgeable(struct i915_vma *vma);
@@ -436,4 +431,7 @@ static inline int i915_vma_sync(struct i915_vma *vma)
void i915_vma_module_exit(void);
int i915_vma_module_init(void);
+I915_SELFTEST_DECLARE(int i915_vma_get_pages(struct i915_vma *vma));
+I915_SELFTEST_DECLARE(void i915_vma_put_pages(struct i915_vma *vma));
+
#endif
diff --git a/drivers/gpu/drm/i915/i915_vma_snapshot.c b/drivers/gpu/drm/i915/i915_vma_snapshot.c
new file mode 100644
index 000000000000..2949ceea9884
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_vma_snapshot.c
@@ -0,0 +1,134 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2021 Intel Corporation
+ */
+
+#include "i915_vma_snapshot.h"
+#include "i915_vma_types.h"
+#include "i915_vma.h"
+
+/**
+ * i915_vma_snapshot_init - Initialize a struct i915_vma_snapshot from
+ * a struct i915_vma.
+ * @vsnap: The i915_vma_snapshot to init.
+ * @vma: A struct i915_vma used to initialize @vsnap.
+ * @name: Name associated with the snapshot. The character pointer needs to
+ * stay alive over the lifitime of the shapsot
+ */
+void i915_vma_snapshot_init(struct i915_vma_snapshot *vsnap,
+ struct i915_vma *vma,
+ const char *name)
+{
+ if (!i915_vma_is_pinned(vma))
+ assert_object_held(vma->obj);
+
+ vsnap->name = name;
+ vsnap->size = vma->size;
+ vsnap->obj_size = vma->obj->base.size;
+ vsnap->gtt_offset = vma->node.start;
+ vsnap->gtt_size = vma->node.size;
+ vsnap->page_sizes = vma->page_sizes.gtt;
+ vsnap->pages = vma->pages;
+ vsnap->pages_rsgt = NULL;
+ vsnap->mr = NULL;
+ if (vma->obj->mm.rsgt)
+ vsnap->pages_rsgt = i915_refct_sgt_get(vma->obj->mm.rsgt);
+ vsnap->mr = vma->obj->mm.region;
+ kref_init(&vsnap->kref);
+ vsnap->vma_resource = &vma->active;
+ vsnap->onstack = false;
+ vsnap->present = true;
+}
+
+/**
+ * i915_vma_snapshot_init_onstack - Initialize a struct i915_vma_snapshot from
+ * a struct i915_vma, but avoid kfreeing it on last put.
+ * @vsnap: The i915_vma_snapshot to init.
+ * @vma: A struct i915_vma used to initialize @vsnap.
+ * @name: Name associated with the snapshot. The character pointer needs to
+ * stay alive over the lifitime of the shapsot
+ */
+void i915_vma_snapshot_init_onstack(struct i915_vma_snapshot *vsnap,
+ struct i915_vma *vma,
+ const char *name)
+{
+ i915_vma_snapshot_init(vsnap, vma, name);
+ vsnap->onstack = true;
+}
+
+static void vma_snapshot_release(struct kref *ref)
+{
+ struct i915_vma_snapshot *vsnap =
+ container_of(ref, typeof(*vsnap), kref);
+
+ vsnap->present = false;
+ if (vsnap->pages_rsgt)
+ i915_refct_sgt_put(vsnap->pages_rsgt);
+ if (!vsnap->onstack)
+ kfree(vsnap);
+}
+
+/**
+ * i915_vma_snapshot_put - Put an i915_vma_snapshot pointer reference
+ * @vsnap: The pointer reference
+ */
+void i915_vma_snapshot_put(struct i915_vma_snapshot *vsnap)
+{
+ kref_put(&vsnap->kref, vma_snapshot_release);
+}
+
+/**
+ * i915_vma_snapshot_put_onstack - Put an onstcak i915_vma_snapshot pointer
+ * reference and varify that the structure is released
+ * @vsnap: The pointer reference
+ *
+ * This function is intended to be paired with a i915_vma_init_onstack()
+ * and should be called before exiting the scope that declared or
+ * freeing the structure that embedded @vsnap to verify that all references
+ * have been released.
+ */
+void i915_vma_snapshot_put_onstack(struct i915_vma_snapshot *vsnap)
+{
+ if (!kref_put(&vsnap->kref, vma_snapshot_release))
+ GEM_BUG_ON(1);
+}
+
+/**
+ * i915_vma_snapshot_resource_pin - Temporarily block the memory the
+ * vma snapshot is pointing to from being released.
+ * @vsnap: The vma snapshot.
+ * @lockdep_cookie: Pointer to bool needed for lockdep support. This needs
+ * to be passed to the paired i915_vma_snapshot_resource_unpin.
+ *
+ * This function will temporarily try to hold up a fence or similar structure
+ * and will therefore enter a fence signaling critical section.
+ *
+ * Return: true if we succeeded in blocking the memory from being released,
+ * false otherwise.
+ */
+bool i915_vma_snapshot_resource_pin(struct i915_vma_snapshot *vsnap,
+ bool *lockdep_cookie)
+{
+ bool pinned = i915_active_acquire_if_busy(vsnap->vma_resource);
+
+ if (pinned)
+ *lockdep_cookie = dma_fence_begin_signalling();
+
+ return pinned;
+}
+
+/**
+ * i915_vma_snapshot_resource_unpin - Unblock vma snapshot memory from
+ * being released.
+ * @vsnap: The vma snapshot.
+ * @lockdep_cookie: Cookie returned from matching i915_vma_resource_pin().
+ *
+ * Might leave a fence signalling critical section and signal a fence.
+ */
+void i915_vma_snapshot_resource_unpin(struct i915_vma_snapshot *vsnap,
+ bool lockdep_cookie)
+{
+ dma_fence_end_signalling(lockdep_cookie);
+
+ return i915_active_release(vsnap->vma_resource);
+}
diff --git a/drivers/gpu/drm/i915/i915_vma_snapshot.h b/drivers/gpu/drm/i915/i915_vma_snapshot.h
new file mode 100644
index 000000000000..940581df4622
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_vma_snapshot.h
@@ -0,0 +1,112 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2021 Intel Corporation
+ */
+#ifndef _I915_VMA_SNAPSHOT_H_
+#define _I915_VMA_SNAPSHOT_H_
+
+#include <linux/kref.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+
+struct i915_active;
+struct i915_refct_sgt;
+struct i915_vma;
+struct intel_memory_region;
+struct sg_table;
+
+/**
+ * DOC: Simple utilities for snapshotting GPU vma metadata, later used for
+ * error capture. Vi use a separate header for this to avoid issues due to
+ * recursive header includes.
+ */
+
+/**
+ * struct i915_vma_snapshot - Snapshot of vma metadata.
+ * @size: The vma size in bytes.
+ * @obj_size: The size of the underlying object in bytes.
+ * @gtt_offset: The gtt offset the vma is bound to.
+ * @gtt_size: The size in bytes allocated for the vma in the GTT.
+ * @pages: The struct sg_table pointing to the pages bound.
+ * @pages_rsgt: The refcounted sg_table holding the reference for @pages if any.
+ * @mr: The memory region pointed for the pages bound.
+ * @kref: Reference for this structure.
+ * @vma_resource: FIXME: A means to keep the unbind fence from signaling.
+ * Temporarily while we have only sync unbinds, and still use the vma
+ * active, we use that. With async unbinding we need a signaling refcount
+ * for the unbind fence.
+ * @page_sizes: The vma GTT page sizes information.
+ * @onstack: Whether the structure shouldn't be freed on final put.
+ * @present: Whether the structure is present and initialized.
+ */
+struct i915_vma_snapshot {
+ const char *name;
+ size_t size;
+ size_t obj_size;
+ size_t gtt_offset;
+ size_t gtt_size;
+ struct sg_table *pages;
+ struct i915_refct_sgt *pages_rsgt;
+ struct intel_memory_region *mr;
+ struct kref kref;
+ struct i915_active *vma_resource;
+ u32 page_sizes;
+ bool onstack:1;
+ bool present:1;
+};
+
+void i915_vma_snapshot_init(struct i915_vma_snapshot *vsnap,
+ struct i915_vma *vma,
+ const char *name);
+
+void i915_vma_snapshot_init_onstack(struct i915_vma_snapshot *vsnap,
+ struct i915_vma *vma,
+ const char *name);
+
+void i915_vma_snapshot_put(struct i915_vma_snapshot *vsnap);
+
+void i915_vma_snapshot_put_onstack(struct i915_vma_snapshot *vsnap);
+
+bool i915_vma_snapshot_resource_pin(struct i915_vma_snapshot *vsnap,
+ bool *lockdep_cookie);
+
+void i915_vma_snapshot_resource_unpin(struct i915_vma_snapshot *vsnap,
+ bool lockdep_cookie);
+
+/**
+ * i915_vma_snapshot_alloc - Allocate a struct i915_vma_snapshot
+ * @gfp: Allocation mode.
+ *
+ * Return: A pointer to a struct i915_vma_snapshot if successful.
+ * NULL otherwise.
+ */
+static inline struct i915_vma_snapshot *i915_vma_snapshot_alloc(gfp_t gfp)
+{
+ return kmalloc(sizeof(struct i915_vma_snapshot), gfp);
+}
+
+/**
+ * i915_vma_snapshot_get - Take a reference on a struct i915_vma_snapshot
+ *
+ * Return: A pointer to a struct i915_vma_snapshot.
+ */
+static inline struct i915_vma_snapshot *
+i915_vma_snapshot_get(struct i915_vma_snapshot *vsnap)
+{
+ kref_get(&vsnap->kref);
+ return vsnap;
+}
+
+/**
+ * i915_vma_snapshot_present - Whether a struct i915_vma_snapshot is
+ * present and initialized.
+ *
+ * Return: true if present and initialized; false otherwise.
+ */
+static inline bool
+i915_vma_snapshot_present(const struct i915_vma_snapshot *vsnap)
+{
+ return vsnap && vsnap->present;
+}
+
+#endif
diff --git a/drivers/gpu/drm/i915/i915_vma_types.h b/drivers/gpu/drm/i915/i915_vma_types.h
index 4ee6e54799f4..ca575e129ced 100644
--- a/drivers/gpu/drm/i915/i915_vma_types.h
+++ b/drivers/gpu/drm/i915/i915_vma_types.h
@@ -187,7 +187,6 @@ struct i915_vma {
const struct i915_vma_ops *ops;
struct drm_i915_gem_object *obj;
- struct dma_resv *resv; /** Alias of obj->resv */
struct sg_table *pages;
void __iomem *iomap;
@@ -271,7 +270,6 @@ struct i915_vma {
#define I915_VMA_PAGES_BIAS 24
#define I915_VMA_PAGES_ACTIVE (BIT(24) | 1)
atomic_t pages_count; /* number of active binds to the pages */
- struct mutex pages_mutex; /* protect acquire/release of backing pages */
/**
* Support different GGTT views into the same object.
diff --git a/drivers/gpu/drm/i915/intel_device_info.c b/drivers/gpu/drm/i915/intel_device_info.c
index 24e05f1ef486..93b251b25aba 100644
--- a/drivers/gpu/drm/i915/intel_device_info.c
+++ b/drivers/gpu/drm/i915/intel_device_info.c
@@ -83,33 +83,26 @@ const char *intel_platform_name(enum intel_platform platform)
return platform_names[platform];
}
-static const char *iommu_name(void)
-{
- const char *msg = "n/a";
-
-#ifdef CONFIG_INTEL_IOMMU
- msg = enableddisabled(intel_iommu_gfx_mapped);
-#endif
-
- return msg;
-}
-
void intel_device_info_print_static(const struct intel_device_info *info,
struct drm_printer *p)
{
- if (info->graphics_rel)
- drm_printf(p, "graphics version: %u.%02u\n", info->graphics_ver, info->graphics_rel);
+ if (info->graphics.rel)
+ drm_printf(p, "graphics version: %u.%02u\n", info->graphics.ver,
+ info->graphics.rel);
+ else
+ drm_printf(p, "graphics version: %u\n", info->graphics.ver);
+
+ if (info->media.rel)
+ drm_printf(p, "media version: %u.%02u\n", info->media.ver, info->media.rel);
else
- drm_printf(p, "graphics version: %u\n", info->graphics_ver);
+ drm_printf(p, "media version: %u\n", info->media.ver);
- if (info->media_rel)
- drm_printf(p, "media version: %u.%02u\n", info->media_ver, info->media_rel);
+ if (info->display.rel)
+ drm_printf(p, "display version: %u.%02u\n", info->display.ver, info->display.rel);
else
- drm_printf(p, "media version: %u\n", info->media_ver);
+ drm_printf(p, "display version: %u\n", info->display.ver);
- drm_printf(p, "display version: %u\n", info->display.ver);
drm_printf(p, "gt: %d\n", info->gt);
- drm_printf(p, "iommu: %s\n", iommu_name());
drm_printf(p, "memory-regions: %x\n", info->memory_regions);
drm_printf(p, "page-sizes: %x\n", info->page_sizes);
drm_printf(p, "platform: %s\n", intel_platform_name(info->platform));
@@ -385,7 +378,7 @@ void intel_device_info_runtime_init(struct drm_i915_private *dev_priv)
info->display.has_dsc = 0;
}
- if (GRAPHICS_VER(dev_priv) == 6 && intel_vtd_active()) {
+ if (GRAPHICS_VER(dev_priv) == 6 && intel_vtd_active(dev_priv)) {
drm_info(&dev_priv->drm,
"Disabling ppGTT for VT-d support\n");
info->ppgtt_type = INTEL_PPGTT_NONE;
diff --git a/drivers/gpu/drm/i915/intel_device_info.h b/drivers/gpu/drm/i915/intel_device_info.h
index 2a4e32b4ebfd..3699b1c539ea 100644
--- a/drivers/gpu/drm/i915/intel_device_info.h
+++ b/drivers/gpu/drm/i915/intel_device_info.h
@@ -129,6 +129,7 @@ enum intel_ppgtt_type {
func(is_dgfx); \
/* Keep has_* in alphabetical order */ \
func(has_64bit_reloc); \
+ func(has_64k_pages); \
func(gpu_reset_clobbers_display); \
func(has_reset_engine); \
func(has_global_mocs); \
@@ -171,11 +172,14 @@ enum intel_ppgtt_type {
func(overlay_needs_physical); \
func(supports_tv);
+struct ip_version {
+ u8 ver;
+ u8 rel;
+};
+
struct intel_device_info {
- u8 graphics_ver;
- u8 graphics_rel;
- u8 media_ver;
- u8 media_rel;
+ struct ip_version graphics;
+ struct ip_version media;
intel_engine_mask_t platform_engine_mask; /* Engines supported by the HW */
@@ -200,6 +204,7 @@ struct intel_device_info {
struct {
u8 ver;
+ u8 rel;
u8 pipe_mask;
u8 cpu_transcoder_mask;
diff --git a/drivers/gpu/drm/i915/intel_gvt.c b/drivers/gpu/drm/i915/intel_gvt.c
index 4e70c1a9ef2e..cf6e98962d82 100644
--- a/drivers/gpu/drm/i915/intel_gvt.c
+++ b/drivers/gpu/drm/i915/intel_gvt.c
@@ -109,7 +109,7 @@ int intel_gvt_init(struct drm_i915_private *dev_priv)
return 0;
}
- if (intel_uc_wants_guc_submission(&dev_priv->gt.uc)) {
+ if (intel_uc_wants_guc_submission(&to_gt(dev_priv)->uc)) {
drm_err(&dev_priv->drm,
"i915 GVT-g loading failed due to Graphics virtualization is not yet supported with GuC submission\n");
return -EIO;
diff --git a/drivers/gpu/drm/i915/intel_memory_region.c b/drivers/gpu/drm/i915/intel_memory_region.c
index e7f7e6627750..c70d7e286a51 100644
--- a/drivers/gpu/drm/i915/intel_memory_region.c
+++ b/drivers/gpu/drm/i915/intel_memory_region.c
@@ -3,6 +3,8 @@
* Copyright © 2019 Intel Corporation
*/
+#include <linux/prandom.h>
+
#include "intel_memory_region.h"
#include "i915_drv.h"
#include "i915_ttm_buddy_manager.h"
@@ -29,6 +31,110 @@ static const struct {
},
};
+static int __iopagetest(struct intel_memory_region *mem,
+ u8 __iomem *va, int pagesize,
+ u8 value, resource_size_t offset,
+ const void *caller)
+{
+ int byte = prandom_u32_max(pagesize);
+ u8 result[3];
+
+ memset_io(va, value, pagesize); /* or GPF! */
+ wmb();
+
+ result[0] = ioread8(va);
+ result[1] = ioread8(va + byte);
+ result[2] = ioread8(va + pagesize - 1);
+ if (memchr_inv(result, value, sizeof(result))) {
+ dev_err(mem->i915->drm.dev,
+ "Failed to read back from memory region:%pR at [%pa + %pa] for %ps; wrote %x, read (%x, %x, %x)\n",
+ &mem->region, &mem->io_start, &offset, caller,
+ value, result[0], result[1], result[2]);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int iopagetest(struct intel_memory_region *mem,
+ resource_size_t offset,
+ const void *caller)
+{
+ const u8 val[] = { 0x0, 0xa5, 0xc3, 0xf0 };
+ void __iomem *va;
+ int err;
+ int i;
+
+ va = ioremap_wc(mem->io_start + offset, PAGE_SIZE);
+ if (!va) {
+ dev_err(mem->i915->drm.dev,
+ "Failed to ioremap memory region [%pa + %pa] for %ps\n",
+ &mem->io_start, &offset, caller);
+ return -EFAULT;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(val); i++) {
+ err = __iopagetest(mem, va, PAGE_SIZE, val[i], offset, caller);
+ if (err)
+ break;
+
+ err = __iopagetest(mem, va, PAGE_SIZE, ~val[i], offset, caller);
+ if (err)
+ break;
+ }
+
+ iounmap(va);
+ return err;
+}
+
+static resource_size_t random_page(resource_size_t last)
+{
+ /* Limited to low 44b (16TiB), but should suffice for a spot check */
+ return prandom_u32_max(last >> PAGE_SHIFT) << PAGE_SHIFT;
+}
+
+static int iomemtest(struct intel_memory_region *mem,
+ bool test_all,
+ const void *caller)
+{
+ resource_size_t last = resource_size(&mem->region) - PAGE_SIZE;
+ resource_size_t page;
+ int err;
+
+ /*
+ * Quick test to check read/write access to the iomap (backing store).
+ *
+ * Write a byte, read it back. If the iomapping fails, we expect
+ * a GPF preventing further execution. If the backing store does not
+ * exist, the read back will return garbage. We check a couple of pages,
+ * the first and last of the specified region to confirm the backing
+ * store + iomap does cover the entire memory region; and we check
+ * a random offset within as a quick spot check for bad memory.
+ */
+
+ if (test_all) {
+ for (page = 0; page <= last; page += PAGE_SIZE) {
+ err = iopagetest(mem, page, caller);
+ if (err)
+ return err;
+ }
+ } else {
+ err = iopagetest(mem, 0, caller);
+ if (err)
+ return err;
+
+ err = iopagetest(mem, last, caller);
+ if (err)
+ return err;
+
+ err = iopagetest(mem, random_page(last), caller);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
struct intel_memory_region *
intel_memory_region_lookup(struct drm_i915_private *i915,
u16 class, u16 instance)
@@ -90,6 +196,21 @@ void intel_memory_region_debug(struct intel_memory_region *mr,
&mr->total, &mr->avail);
}
+static int intel_memory_region_memtest(struct intel_memory_region *mem,
+ void *caller)
+{
+ struct drm_i915_private *i915 = mem->i915;
+ int err = 0;
+
+ if (!mem->io_start)
+ return 0;
+
+ if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM) || i915->params.memtest)
+ err = iomemtest(mem, i915->params.memtest, caller);
+
+ return err;
+}
+
struct intel_memory_region *
intel_memory_region_create(struct drm_i915_private *i915,
resource_size_t start,
@@ -126,9 +247,15 @@ intel_memory_region_create(struct drm_i915_private *i915,
goto err_free;
}
- kref_init(&mem->kref);
+ err = intel_memory_region_memtest(mem, (void *)_RET_IP_);
+ if (err)
+ goto err_release;
+
return mem;
+err_release:
+ if (mem->ops->release)
+ mem->ops->release(mem);
err_free:
kfree(mem);
return ERR_PTR(err);
@@ -144,28 +271,17 @@ void intel_memory_region_set_name(struct intel_memory_region *mem,
va_end(ap);
}
-static void __intel_memory_region_destroy(struct kref *kref)
+void intel_memory_region_destroy(struct intel_memory_region *mem)
{
- struct intel_memory_region *mem =
- container_of(kref, typeof(*mem), kref);
+ int ret = 0;
if (mem->ops->release)
- mem->ops->release(mem);
+ ret = mem->ops->release(mem);
+ GEM_WARN_ON(!list_empty_careful(&mem->objects.list));
mutex_destroy(&mem->objects.lock);
- kfree(mem);
-}
-
-struct intel_memory_region *
-intel_memory_region_get(struct intel_memory_region *mem)
-{
- kref_get(&mem->kref);
- return mem;
-}
-
-void intel_memory_region_put(struct intel_memory_region *mem)
-{
- kref_put(&mem->kref, __intel_memory_region_destroy);
+ if (!ret)
+ kfree(mem);
}
/* Global memory region registration -- only slight layer inversions! */
@@ -234,7 +350,7 @@ void intel_memory_regions_driver_release(struct drm_i915_private *i915)
fetch_and_zero(&i915->mm.regions[i]);
if (region)
- intel_memory_region_put(region);
+ intel_memory_region_destroy(region);
}
}
diff --git a/drivers/gpu/drm/i915/intel_memory_region.h b/drivers/gpu/drm/i915/intel_memory_region.h
index 3feae3353d33..5625c9c38993 100644
--- a/drivers/gpu/drm/i915/intel_memory_region.h
+++ b/drivers/gpu/drm/i915/intel_memory_region.h
@@ -6,7 +6,6 @@
#ifndef __INTEL_MEMORY_REGION_H__
#define __INTEL_MEMORY_REGION_H__
-#include <linux/kref.h>
#include <linux/ioport.h>
#include <linux/mutex.h>
#include <linux/io-mapping.h>
@@ -51,7 +50,7 @@ struct intel_memory_region_ops {
unsigned int flags;
int (*init)(struct intel_memory_region *mem);
- void (*release)(struct intel_memory_region *mem);
+ int (*release)(struct intel_memory_region *mem);
int (*init_object)(struct intel_memory_region *mem,
struct drm_i915_gem_object *obj,
@@ -71,8 +70,6 @@ struct intel_memory_region {
/* For fake LMEM */
struct drm_mm_node fake_mappable;
- struct kref kref;
-
resource_size_t io_start;
resource_size_t min_page_size;
resource_size_t total;
@@ -110,9 +107,7 @@ intel_memory_region_create(struct drm_i915_private *i915,
u16 instance,
const struct intel_memory_region_ops *ops);
-struct intel_memory_region *
-intel_memory_region_get(struct intel_memory_region *mem);
-void intel_memory_region_put(struct intel_memory_region *mem);
+void intel_memory_region_destroy(struct intel_memory_region *mem);
int intel_memory_regions_hw_probe(struct drm_i915_private *i915);
void intel_memory_regions_driver_release(struct drm_i915_private *i915);
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index 3714f96f17b3..bdf97a8c9ef3 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -98,7 +98,7 @@ static void gen9_init_clock_gating(struct drm_i915_private *dev_priv)
* "Plane N strech max must be programmed to 11b (x1)
* when Async flips are enabled on that plane."
*/
- if (!IS_GEMINILAKE(dev_priv) && intel_vtd_active())
+ if (!IS_GEMINILAKE(dev_priv) && intel_vtd_active(dev_priv))
intel_uncore_rmw(&dev_priv->uncore, CHICKEN_PIPESL_1(pipe),
SKL_PLANE1_STRETCH_MAX_MASK, SKL_PLANE1_STRETCH_MAX_X1);
}
@@ -7492,11 +7492,34 @@ static void dg1_init_clock_gating(struct drm_i915_private *dev_priv)
gen12lp_init_clock_gating(dev_priv);
/* Wa_1409836686:dg1[a0] */
- if (IS_DG1_GT_STEP(dev_priv, STEP_A0, STEP_B0))
+ if (IS_DG1_GRAPHICS_STEP(dev_priv, STEP_A0, STEP_B0))
intel_uncore_write(&dev_priv->uncore, GEN9_CLKGATE_DIS_3, intel_uncore_read(&dev_priv->uncore, GEN9_CLKGATE_DIS_3) |
DPT_GATING_DIS);
}
+static void xehpsdv_init_clock_gating(struct drm_i915_private *dev_priv)
+{
+ /* Wa_22010146351:xehpsdv */
+ if (IS_XEHPSDV_GRAPHICS_STEP(dev_priv, STEP_A0, STEP_B0))
+ intel_uncore_rmw(&dev_priv->uncore, XEHP_CLOCK_GATE_DIS, 0, SGR_DIS);
+}
+
+static void dg2_init_clock_gating(struct drm_i915_private *i915)
+{
+ /* Wa_22010954014:dg2_g10 */
+ if (IS_DG2_G10(i915))
+ intel_uncore_rmw(&i915->uncore, XEHP_CLOCK_GATE_DIS, 0,
+ SGSI_SIDECLK_DIS);
+
+ /*
+ * Wa_14010733611:dg2_g10
+ * Wa_22010146351:dg2_g10
+ */
+ if (IS_DG2_GRAPHICS_STEP(i915, G10, STEP_A0, STEP_B0))
+ intel_uncore_rmw(&i915->uncore, XEHP_CLOCK_GATE_DIS, 0,
+ SGR_DIS | SGGI_DIS);
+}
+
static void cnp_init_clock_gating(struct drm_i915_private *dev_priv)
{
if (!HAS_PCH_CNP(dev_priv))
@@ -7541,12 +7564,12 @@ static void kbl_init_clock_gating(struct drm_i915_private *dev_priv)
FBC_LLC_FULLY_OPEN);
/* WaDisableSDEUnitClockGating:kbl */
- if (IS_KBL_GT_STEP(dev_priv, 0, STEP_C0))
+ if (IS_KBL_GRAPHICS_STEP(dev_priv, 0, STEP_C0))
intel_uncore_write(&dev_priv->uncore, GEN8_UCGCTL6, intel_uncore_read(&dev_priv->uncore, GEN8_UCGCTL6) |
GEN8_SDEUNIT_CLOCK_GATE_DISABLE);
/* WaDisableGamClockGating:kbl */
- if (IS_KBL_GT_STEP(dev_priv, 0, STEP_C0))
+ if (IS_KBL_GRAPHICS_STEP(dev_priv, 0, STEP_C0))
intel_uncore_write(&dev_priv->uncore, GEN6_UCGCTL1, intel_uncore_read(&dev_priv->uncore, GEN6_UCGCTL1) |
GEN6_GAMUNIT_CLOCK_GATE_DISABLE);
@@ -7911,6 +7934,8 @@ static const struct drm_i915_clock_gating_funcs platform##_clock_gating_funcs =
.init_clock_gating = platform##_init_clock_gating, \
}
+CG_FUNCS(dg2);
+CG_FUNCS(xehpsdv);
CG_FUNCS(adlp);
CG_FUNCS(dg1);
CG_FUNCS(gen12lp);
@@ -7947,7 +7972,11 @@ CG_FUNCS(nop);
*/
void intel_init_clock_gating_hooks(struct drm_i915_private *dev_priv)
{
- if (IS_ALDERLAKE_P(dev_priv))
+ if (IS_DG2(dev_priv))
+ dev_priv->clock_gating_funcs = &dg2_clock_gating_funcs;
+ else if (IS_XEHPSDV(dev_priv))
+ dev_priv->clock_gating_funcs = &xehpsdv_clock_gating_funcs;
+ else if (IS_ALDERLAKE_P(dev_priv))
dev_priv->clock_gating_funcs = &adlp_clock_gating_funcs;
else if (IS_DG1(dev_priv))
dev_priv->clock_gating_funcs = &dg1_clock_gating_funcs;
diff --git a/drivers/gpu/drm/i915/intel_region_ttm.c b/drivers/gpu/drm/i915/intel_region_ttm.c
index 98c7339bf8ba..f2b888c16958 100644
--- a/drivers/gpu/drm/i915/intel_region_ttm.c
+++ b/drivers/gpu/drm/i915/intel_region_ttm.c
@@ -104,19 +104,50 @@ int intel_region_ttm_init(struct intel_memory_region *mem)
* memory region, and if it was registered with the TTM device,
* removes that registration.
*/
-void intel_region_ttm_fini(struct intel_memory_region *mem)
+int intel_region_ttm_fini(struct intel_memory_region *mem)
{
- int ret;
+ struct ttm_resource_manager *man = mem->region_private;
+ int ret = -EBUSY;
+ int count;
+
+ /*
+ * Put the region's move fences. This releases requests that
+ * may hold on to contexts and vms that may hold on to buffer
+ * objects placed in this region.
+ */
+ if (man)
+ ttm_resource_manager_cleanup(man);
+
+ /* Flush objects from region. */
+ for (count = 0; count < 10; ++count) {
+ i915_gem_flush_free_objects(mem->i915);
+
+ mutex_lock(&mem->objects.lock);
+ if (list_empty(&mem->objects.list))
+ ret = 0;
+ mutex_unlock(&mem->objects.lock);
+ if (!ret)
+ break;
+
+ msleep(20);
+ flush_delayed_work(&mem->i915->bdev.wq);
+ }
+
+ /* If we leaked objects, Don't free the region causing use after free */
+ if (ret || !man)
+ return ret;
ret = i915_ttm_buddy_man_fini(&mem->i915->bdev,
intel_region_to_ttm_type(mem));
GEM_WARN_ON(ret);
mem->region_private = NULL;
+
+ return ret;
}
/**
- * intel_region_ttm_resource_to_st - Convert an opaque TTM resource manager resource
- * to an sg_table.
+ * intel_region_ttm_resource_to_rsgt -
+ * Convert an opaque TTM resource manager resource to a refcounted sg_table.
* @mem: The memory region.
* @res: The resource manager resource obtained from the TTM resource manager.
*
@@ -126,17 +157,18 @@ void intel_region_ttm_fini(struct intel_memory_region *mem)
*
* Return: A malloced sg_table on success, an error pointer on failure.
*/
-struct sg_table *intel_region_ttm_resource_to_st(struct intel_memory_region *mem,
- struct ttm_resource *res)
+struct i915_refct_sgt *
+intel_region_ttm_resource_to_rsgt(struct intel_memory_region *mem,
+ struct ttm_resource *res)
{
if (mem->is_range_manager) {
struct ttm_range_mgr_node *range_node =
to_ttm_range_mgr_node(res);
- return i915_sg_from_mm_node(&range_node->mm_nodes[0],
- mem->region.start);
+ return i915_rsgt_from_mm_node(&range_node->mm_nodes[0],
+ mem->region.start);
} else {
- return i915_sg_from_buddy_resource(res, mem->region.start);
+ return i915_rsgt_from_buddy_resource(res, mem->region.start);
}
}
diff --git a/drivers/gpu/drm/i915/intel_region_ttm.h b/drivers/gpu/drm/i915/intel_region_ttm.h
index 6f44075920f2..fdee5e7bd46c 100644
--- a/drivers/gpu/drm/i915/intel_region_ttm.h
+++ b/drivers/gpu/drm/i915/intel_region_ttm.h
@@ -20,10 +20,11 @@ void intel_region_ttm_device_fini(struct drm_i915_private *dev_priv);
int intel_region_ttm_init(struct intel_memory_region *mem);
-void intel_region_ttm_fini(struct intel_memory_region *mem);
+int intel_region_ttm_fini(struct intel_memory_region *mem);
-struct sg_table *intel_region_ttm_resource_to_st(struct intel_memory_region *mem,
- struct ttm_resource *res);
+struct i915_refct_sgt *
+intel_region_ttm_resource_to_rsgt(struct intel_memory_region *mem,
+ struct ttm_resource *res);
void intel_region_ttm_resource_free(struct intel_memory_region *mem,
struct ttm_resource *res);
diff --git a/drivers/gpu/drm/i915/intel_step.c b/drivers/gpu/drm/i915/intel_step.c
index 6cf967631395..a4b16b9e2e55 100644
--- a/drivers/gpu/drm/i915/intel_step.c
+++ b/drivers/gpu/drm/i915/intel_step.c
@@ -23,7 +23,8 @@
* use a macro to define these to make it easier to identify the platforms
* where the two steppings can deviate.
*/
-#define COMMON_STEP(x) .gt_step = STEP_##x, .display_step = STEP_##x
+#define COMMON_STEP(x) .graphics_step = STEP_##x, .display_step = STEP_##x, .media_step = STEP_##x
+#define COMMON_GT_MEDIA_STEP(x) .graphics_step = STEP_##x, .media_step = STEP_##x
static const struct intel_step_info skl_revids[] = {
[0x6] = { COMMON_STEP(G0) },
@@ -33,13 +34,13 @@ static const struct intel_step_info skl_revids[] = {
};
static const struct intel_step_info kbl_revids[] = {
- [1] = { .gt_step = STEP_B0, .display_step = STEP_B0 },
- [2] = { .gt_step = STEP_C0, .display_step = STEP_B0 },
- [3] = { .gt_step = STEP_D0, .display_step = STEP_B0 },
- [4] = { .gt_step = STEP_F0, .display_step = STEP_C0 },
- [5] = { .gt_step = STEP_C0, .display_step = STEP_B1 },
- [6] = { .gt_step = STEP_D1, .display_step = STEP_B1 },
- [7] = { .gt_step = STEP_G0, .display_step = STEP_C0 },
+ [1] = { COMMON_GT_MEDIA_STEP(B0), .display_step = STEP_B0 },
+ [2] = { COMMON_GT_MEDIA_STEP(C0), .display_step = STEP_B0 },
+ [3] = { COMMON_GT_MEDIA_STEP(D0), .display_step = STEP_B0 },
+ [4] = { COMMON_GT_MEDIA_STEP(F0), .display_step = STEP_C0 },
+ [5] = { COMMON_GT_MEDIA_STEP(C0), .display_step = STEP_B1 },
+ [6] = { COMMON_GT_MEDIA_STEP(D1), .display_step = STEP_B1 },
+ [7] = { COMMON_GT_MEDIA_STEP(G0), .display_step = STEP_C0 },
};
static const struct intel_step_info bxt_revids[] = {
@@ -63,16 +64,16 @@ static const struct intel_step_info jsl_ehl_revids[] = {
};
static const struct intel_step_info tgl_uy_revids[] = {
- [0] = { .gt_step = STEP_A0, .display_step = STEP_A0 },
- [1] = { .gt_step = STEP_B0, .display_step = STEP_C0 },
- [2] = { .gt_step = STEP_B1, .display_step = STEP_C0 },
- [3] = { .gt_step = STEP_C0, .display_step = STEP_D0 },
+ [0] = { COMMON_GT_MEDIA_STEP(A0), .display_step = STEP_A0 },
+ [1] = { COMMON_GT_MEDIA_STEP(B0), .display_step = STEP_C0 },
+ [2] = { COMMON_GT_MEDIA_STEP(B1), .display_step = STEP_C0 },
+ [3] = { COMMON_GT_MEDIA_STEP(C0), .display_step = STEP_D0 },
};
/* Same GT stepping between tgl_uy_revids and tgl_revids don't mean the same HW */
static const struct intel_step_info tgl_revids[] = {
- [0] = { .gt_step = STEP_A0, .display_step = STEP_B0 },
- [1] = { .gt_step = STEP_B0, .display_step = STEP_D0 },
+ [0] = { COMMON_GT_MEDIA_STEP(A0), .display_step = STEP_B0 },
+ [1] = { COMMON_GT_MEDIA_STEP(B0), .display_step = STEP_D0 },
};
static const struct intel_step_info rkl_revids[] = {
@@ -87,38 +88,38 @@ static const struct intel_step_info dg1_revids[] = {
};
static const struct intel_step_info adls_revids[] = {
- [0x0] = { .gt_step = STEP_A0, .display_step = STEP_A0 },
- [0x1] = { .gt_step = STEP_A0, .display_step = STEP_A2 },
- [0x4] = { .gt_step = STEP_B0, .display_step = STEP_B0 },
- [0x8] = { .gt_step = STEP_C0, .display_step = STEP_B0 },
- [0xC] = { .gt_step = STEP_D0, .display_step = STEP_C0 },
+ [0x0] = { COMMON_GT_MEDIA_STEP(A0), .display_step = STEP_A0 },
+ [0x1] = { COMMON_GT_MEDIA_STEP(A0), .display_step = STEP_A2 },
+ [0x4] = { COMMON_GT_MEDIA_STEP(B0), .display_step = STEP_B0 },
+ [0x8] = { COMMON_GT_MEDIA_STEP(C0), .display_step = STEP_B0 },
+ [0xC] = { COMMON_GT_MEDIA_STEP(D0), .display_step = STEP_C0 },
};
static const struct intel_step_info adlp_revids[] = {
- [0x0] = { .gt_step = STEP_A0, .display_step = STEP_A0 },
- [0x4] = { .gt_step = STEP_B0, .display_step = STEP_B0 },
- [0x8] = { .gt_step = STEP_C0, .display_step = STEP_C0 },
- [0xC] = { .gt_step = STEP_C0, .display_step = STEP_D0 },
+ [0x0] = { COMMON_GT_MEDIA_STEP(A0), .display_step = STEP_A0 },
+ [0x4] = { COMMON_GT_MEDIA_STEP(B0), .display_step = STEP_B0 },
+ [0x8] = { COMMON_GT_MEDIA_STEP(C0), .display_step = STEP_C0 },
+ [0xC] = { COMMON_GT_MEDIA_STEP(C0), .display_step = STEP_D0 },
};
static const struct intel_step_info xehpsdv_revids[] = {
- [0x0] = { .gt_step = STEP_A0 },
- [0x1] = { .gt_step = STEP_A1 },
- [0x4] = { .gt_step = STEP_B0 },
- [0x8] = { .gt_step = STEP_C0 },
+ [0x0] = { COMMON_GT_MEDIA_STEP(A0) },
+ [0x1] = { COMMON_GT_MEDIA_STEP(A1) },
+ [0x4] = { COMMON_GT_MEDIA_STEP(B0) },
+ [0x8] = { COMMON_GT_MEDIA_STEP(C0) },
};
static const struct intel_step_info dg2_g10_revid_step_tbl[] = {
- [0x0] = { .gt_step = STEP_A0, .display_step = STEP_A0 },
- [0x1] = { .gt_step = STEP_A1, .display_step = STEP_A0 },
- [0x4] = { .gt_step = STEP_B0, .display_step = STEP_B0 },
- [0x8] = { .gt_step = STEP_C0, .display_step = STEP_C0 },
+ [0x0] = { COMMON_GT_MEDIA_STEP(A0), .display_step = STEP_A0 },
+ [0x1] = { COMMON_GT_MEDIA_STEP(A1), .display_step = STEP_A0 },
+ [0x4] = { COMMON_GT_MEDIA_STEP(B0), .display_step = STEP_B0 },
+ [0x8] = { COMMON_GT_MEDIA_STEP(C0), .display_step = STEP_C0 },
};
static const struct intel_step_info dg2_g11_revid_step_tbl[] = {
- [0x0] = { .gt_step = STEP_A0, .display_step = STEP_B0 },
- [0x4] = { .gt_step = STEP_B0, .display_step = STEP_C0 },
- [0x5] = { .gt_step = STEP_B1, .display_step = STEP_C0 },
+ [0x0] = { COMMON_GT_MEDIA_STEP(A0), .display_step = STEP_B0 },
+ [0x4] = { COMMON_GT_MEDIA_STEP(B0), .display_step = STEP_C0 },
+ [0x5] = { COMMON_GT_MEDIA_STEP(B1), .display_step = STEP_C0 },
};
void intel_step_init(struct drm_i915_private *i915)
@@ -179,7 +180,7 @@ void intel_step_init(struct drm_i915_private *i915)
if (!revids)
return;
- if (revid < size && revids[revid].gt_step != STEP_NONE) {
+ if (revid < size && revids[revid].graphics_step != STEP_NONE) {
step = revids[revid];
} else {
drm_warn(&i915->drm, "Unknown revid 0x%02x\n", revid);
@@ -192,7 +193,7 @@ void intel_step_init(struct drm_i915_private *i915)
* steppings in the array are not monotonically increasing, but
* it's better than defaulting to 0.
*/
- while (revid < size && revids[revid].gt_step == STEP_NONE)
+ while (revid < size && revids[revid].graphics_step == STEP_NONE)
revid++;
if (revid < size) {
@@ -201,12 +202,12 @@ void intel_step_init(struct drm_i915_private *i915)
step = revids[revid];
} else {
drm_dbg(&i915->drm, "Using future steppings\n");
- step.gt_step = STEP_FUTURE;
+ step.graphics_step = STEP_FUTURE;
step.display_step = STEP_FUTURE;
}
}
- if (drm_WARN_ON(&i915->drm, step.gt_step == STEP_NONE))
+ if (drm_WARN_ON(&i915->drm, step.graphics_step == STEP_NONE))
return;
RUNTIME_INFO(i915)->step = step;
diff --git a/drivers/gpu/drm/i915/intel_step.h b/drivers/gpu/drm/i915/intel_step.h
index f6641e2a3c77..d71a99bd5179 100644
--- a/drivers/gpu/drm/i915/intel_step.h
+++ b/drivers/gpu/drm/i915/intel_step.h
@@ -11,8 +11,9 @@
struct drm_i915_private;
struct intel_step_info {
- u8 gt_step;
+ u8 graphics_step;
u8 display_step;
+ u8 media_step;
};
#define STEP_ENUM_VAL(name) STEP_##name,
diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c
index 722910d02b5f..fc25ebf1a593 100644
--- a/drivers/gpu/drm/i915/intel_uncore.c
+++ b/drivers/gpu/drm/i915/intel_uncore.c
@@ -2020,7 +2020,7 @@ static int i915_pmic_bus_access_notifier(struct notifier_block *nb,
return NOTIFY_OK;
}
-static int uncore_mmio_setup(struct intel_uncore *uncore)
+int intel_uncore_setup_mmio(struct intel_uncore *uncore)
{
struct drm_i915_private *i915 = uncore->i915;
struct pci_dev *pdev = to_pci_dev(i915->drm.dev);
@@ -2053,7 +2053,7 @@ static int uncore_mmio_setup(struct intel_uncore *uncore)
return 0;
}
-static void uncore_mmio_cleanup(struct intel_uncore *uncore)
+void intel_uncore_cleanup_mmio(struct intel_uncore *uncore)
{
struct pci_dev *pdev = to_pci_dev(uncore->i915->drm.dev);
@@ -2061,12 +2061,13 @@ static void uncore_mmio_cleanup(struct intel_uncore *uncore)
}
void intel_uncore_init_early(struct intel_uncore *uncore,
- struct drm_i915_private *i915)
+ struct intel_gt *gt)
{
spin_lock_init(&uncore->lock);
- uncore->i915 = i915;
- uncore->rpm = &i915->runtime_pm;
- uncore->debug = &i915->mmio_debug;
+ uncore->i915 = gt->i915;
+ uncore->gt = gt;
+ uncore->rpm = &gt->i915->runtime_pm;
+ uncore->debug = &gt->i915->mmio_debug;
}
static void uncore_raw_init(struct intel_uncore *uncore)
@@ -2146,10 +2147,6 @@ int intel_uncore_init_mmio(struct intel_uncore *uncore)
struct drm_i915_private *i915 = uncore->i915;
int ret;
- ret = uncore_mmio_setup(uncore);
- if (ret)
- return ret;
-
/*
* The boot firmware initializes local memory and assesses its health.
* If memory training fails, the punit will have been instructed to
@@ -2170,7 +2167,7 @@ int intel_uncore_init_mmio(struct intel_uncore *uncore)
} else {
ret = uncore_forcewake_init(uncore);
if (ret)
- goto out_mmio_cleanup;
+ return ret;
}
/* make sure fw funcs are set if and only if we have fw*/
@@ -2192,11 +2189,6 @@ int intel_uncore_init_mmio(struct intel_uncore *uncore)
drm_dbg(&i915->drm, "unclaimed mmio detected on uncore init, clearing\n");
return 0;
-
-out_mmio_cleanup:
- uncore_mmio_cleanup(uncore);
-
- return ret;
}
/*
@@ -2261,8 +2253,6 @@ void intel_uncore_fini_mmio(struct intel_uncore *uncore)
intel_uncore_fw_domains_fini(uncore);
iosf_mbi_punit_release();
}
-
- uncore_mmio_cleanup(uncore);
}
static const struct reg_whitelist {
diff --git a/drivers/gpu/drm/i915/intel_uncore.h b/drivers/gpu/drm/i915/intel_uncore.h
index 3248e4e2c540..210fe2a71612 100644
--- a/drivers/gpu/drm/i915/intel_uncore.h
+++ b/drivers/gpu/drm/i915/intel_uncore.h
@@ -129,6 +129,7 @@ struct intel_uncore {
void __iomem *regs;
struct drm_i915_private *i915;
+ struct intel_gt *gt;
struct intel_runtime_pm *rpm;
spinlock_t lock; /** lock is also taken in irq contexts. */
@@ -217,12 +218,14 @@ u32 intel_uncore_read_with_mcr_steering(struct intel_uncore *uncore,
void
intel_uncore_mmio_debug_init_early(struct intel_uncore_mmio_debug *mmio_debug);
void intel_uncore_init_early(struct intel_uncore *uncore,
- struct drm_i915_private *i915);
+ struct intel_gt *gt);
+int intel_uncore_setup_mmio(struct intel_uncore *uncore);
int intel_uncore_init_mmio(struct intel_uncore *uncore);
void intel_uncore_prune_engine_fw_domains(struct intel_uncore *uncore,
struct intel_gt *gt);
bool intel_uncore_unclaimed_mmio(struct intel_uncore *uncore);
bool intel_uncore_arm_unclaimed_mmio_detection(struct intel_uncore *uncore);
+void intel_uncore_cleanup_mmio(struct intel_uncore *uncore);
void intel_uncore_fini_mmio(struct intel_uncore *uncore);
void intel_uncore_suspend(struct intel_uncore *uncore);
void intel_uncore_resume_early(struct intel_uncore *uncore);
diff --git a/drivers/gpu/drm/i915/intel_wopcm.c b/drivers/gpu/drm/i915/intel_wopcm.c
index 5e511bb891f9..f06d21005106 100644
--- a/drivers/gpu/drm/i915/intel_wopcm.c
+++ b/drivers/gpu/drm/i915/intel_wopcm.c
@@ -220,7 +220,7 @@ static bool __wopcm_regs_locked(struct intel_uncore *uncore,
void intel_wopcm_init(struct intel_wopcm *wopcm)
{
struct drm_i915_private *i915 = wopcm_to_i915(wopcm);
- struct intel_gt *gt = &i915->gt;
+ struct intel_gt *gt = to_gt(i915);
u32 guc_fw_size = intel_uc_fw_get_upload_size(&gt->uc.guc.fw);
u32 huc_fw_size = intel_uc_fw_get_upload_size(&gt->uc.huc.fw);
u32 ctx_rsvd = context_reserved_size(i915);
diff --git a/drivers/gpu/drm/i915/pxp/intel_pxp_pm.c b/drivers/gpu/drm/i915/pxp/intel_pxp_pm.c
index 23fd86de5a24..6a7d4e2ee138 100644
--- a/drivers/gpu/drm/i915/pxp/intel_pxp_pm.c
+++ b/drivers/gpu/drm/i915/pxp/intel_pxp_pm.c
@@ -7,26 +7,29 @@
#include "intel_pxp_irq.h"
#include "intel_pxp_pm.h"
#include "intel_pxp_session.h"
+#include "i915_drv.h"
-void intel_pxp_suspend(struct intel_pxp *pxp, bool runtime)
+void intel_pxp_suspend_prepare(struct intel_pxp *pxp)
{
if (!intel_pxp_is_enabled(pxp))
return;
pxp->arb_is_valid = false;
- /*
- * Contexts using protected objects keep a runtime PM reference, so we
- * can only runtime suspend when all of them have been either closed
- * or banned. Therefore, there is no need to invalidate in that
- * scenario.
- */
- if (!runtime)
- intel_pxp_invalidate(pxp);
+ intel_pxp_invalidate(pxp);
+}
- intel_pxp_fini_hw(pxp);
+void intel_pxp_suspend(struct intel_pxp *pxp)
+{
+ intel_wakeref_t wakeref;
- pxp->hw_state_invalidated = false;
+ if (!intel_pxp_is_enabled(pxp))
+ return;
+
+ with_intel_runtime_pm(&pxp_to_gt(pxp)->i915->runtime_pm, wakeref) {
+ intel_pxp_fini_hw(pxp);
+ pxp->hw_state_invalidated = false;
+ }
}
void intel_pxp_resume(struct intel_pxp *pxp)
@@ -44,3 +47,15 @@ void intel_pxp_resume(struct intel_pxp *pxp)
intel_pxp_init_hw(pxp);
}
+
+void intel_pxp_runtime_suspend(struct intel_pxp *pxp)
+{
+ if (!intel_pxp_is_enabled(pxp))
+ return;
+
+ pxp->arb_is_valid = false;
+
+ intel_pxp_fini_hw(pxp);
+
+ pxp->hw_state_invalidated = false;
+}
diff --git a/drivers/gpu/drm/i915/pxp/intel_pxp_pm.h b/drivers/gpu/drm/i915/pxp/intel_pxp_pm.h
index c89e97a0c3d0..16990a3f2f85 100644
--- a/drivers/gpu/drm/i915/pxp/intel_pxp_pm.h
+++ b/drivers/gpu/drm/i915/pxp/intel_pxp_pm.h
@@ -9,16 +9,29 @@
#include "intel_pxp_types.h"
#ifdef CONFIG_DRM_I915_PXP
-void intel_pxp_suspend(struct intel_pxp *pxp, bool runtime);
+void intel_pxp_suspend_prepare(struct intel_pxp *pxp);
+void intel_pxp_suspend(struct intel_pxp *pxp);
void intel_pxp_resume(struct intel_pxp *pxp);
+void intel_pxp_runtime_suspend(struct intel_pxp *pxp);
#else
-static inline void intel_pxp_suspend(struct intel_pxp *pxp, bool runtime)
+static inline void intel_pxp_suspend_prepare(struct intel_pxp *pxp)
+{
+}
+
+static inline void intel_pxp_suspend(struct intel_pxp *pxp)
{
}
static inline void intel_pxp_resume(struct intel_pxp *pxp)
{
}
-#endif
+static inline void intel_pxp_runtime_suspend(struct intel_pxp *pxp)
+{
+}
+#endif
+static inline void intel_pxp_runtime_resume(struct intel_pxp *pxp)
+{
+ intel_pxp_resume(pxp);
+}
#endif /* __INTEL_PXP_PM_H__ */
diff --git a/drivers/gpu/drm/i915/pxp/intel_pxp_tee.c b/drivers/gpu/drm/i915/pxp/intel_pxp_tee.c
index 5d169624ad60..195b2323ec00 100644
--- a/drivers/gpu/drm/i915/pxp/intel_pxp_tee.c
+++ b/drivers/gpu/drm/i915/pxp/intel_pxp_tee.c
@@ -16,7 +16,9 @@
static inline struct intel_pxp *i915_dev_to_pxp(struct device *i915_kdev)
{
- return &kdev_to_i915(i915_kdev)->gt.pxp;
+ struct drm_i915_private *i915 = kdev_to_i915(i915_kdev);
+
+ return &to_gt(i915)->pxp;
}
static int intel_pxp_tee_io_message(struct intel_pxp *pxp,
diff --git a/drivers/gpu/drm/i915/selftests/i915_active.c b/drivers/gpu/drm/i915/selftests/i915_active.c
index 61bf4560d8af..2dac9be1de58 100644
--- a/drivers/gpu/drm/i915/selftests/i915_active.c
+++ b/drivers/gpu/drm/i915/selftests/i915_active.c
@@ -254,7 +254,7 @@ int i915_active_live_selftests(struct drm_i915_private *i915)
SUBTEST(live_active_barrier),
};
- if (intel_gt_is_wedged(&i915->gt))
+ if (intel_gt_is_wedged(to_gt(i915)))
return 0;
return i915_subtests(tests, i915);
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem.c b/drivers/gpu/drm/i915/selftests/i915_gem.c
index 152d9ab135b1..b5576888cd78 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem.c
@@ -248,7 +248,7 @@ int i915_gem_live_selftests(struct drm_i915_private *i915)
SUBTEST(igt_gem_ww_ctx),
};
- if (intel_gt_is_wedged(&i915->gt))
+ if (intel_gt_is_wedged(to_gt(i915)))
return 0;
return i915_live_subtests(tests, i915);
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_evict.c b/drivers/gpu/drm/i915/selftests/i915_gem_evict.c
index f99bb0113726..75b709c26dd3 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem_evict.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem_evict.c
@@ -442,6 +442,7 @@ static int igt_evict_contexts(void *arg)
/* Overfill the GGTT with context objects and so try to evict one. */
for_each_engine(engine, gt, id) {
struct i915_sw_fence fence;
+ struct i915_request *last = NULL;
count = 0;
onstack_fence_init(&fence);
@@ -479,6 +480,9 @@ static int igt_evict_contexts(void *arg)
i915_request_add(rq);
count++;
+ if (last)
+ i915_request_put(last);
+ last = i915_request_get(rq);
err = 0;
} while(1);
onstack_fence_fini(&fence);
@@ -486,6 +490,21 @@ static int igt_evict_contexts(void *arg)
count, engine->name);
if (err)
break;
+ if (last) {
+ if (i915_request_wait(last, 0, HZ) < 0) {
+ err = -EIO;
+ i915_request_put(last);
+ pr_err("Failed waiting for last request (on %s)",
+ engine->name);
+ break;
+ }
+ i915_request_put(last);
+ }
+ err = intel_gt_wait_for_idle(engine->gt, HZ * 3);
+ if (err) {
+ pr_err("Failed to idle GT (on %s)", engine->name);
+ break;
+ }
}
mutex_lock(&ggtt->vm.mutex);
@@ -526,7 +545,7 @@ int i915_gem_evict_mock_selftests(void)
return -ENOMEM;
with_intel_runtime_pm(&i915->runtime_pm, wakeref)
- err = i915_subtests(tests, &i915->gt);
+ err = i915_subtests(tests, to_gt(i915));
mock_destroy_device(i915);
return err;
@@ -538,8 +557,8 @@ int i915_gem_evict_live_selftests(struct drm_i915_private *i915)
SUBTEST(igt_evict_contexts),
};
- if (intel_gt_is_wedged(&i915->gt))
+ if (intel_gt_is_wedged(to_gt(i915)))
return 0;
- return intel_gt_live_subtests(tests, &i915->gt);
+ return intel_gt_live_subtests(tests, to_gt(i915));
}
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
index 46f4236039a9..575705c3bce9 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
@@ -155,7 +155,7 @@ static int igt_ppgtt_alloc(void *arg)
if (!HAS_PPGTT(dev_priv))
return 0;
- ppgtt = i915_ppgtt_create(&dev_priv->gt, 0);
+ ppgtt = i915_ppgtt_create(to_gt(dev_priv), 0);
if (IS_ERR(ppgtt))
return PTR_ERR(ppgtt);
@@ -1053,7 +1053,7 @@ static int exercise_ppgtt(struct drm_i915_private *dev_priv,
if (IS_ERR(file))
return PTR_ERR(file);
- ppgtt = i915_ppgtt_create(&dev_priv->gt, 0);
+ ppgtt = i915_ppgtt_create(to_gt(dev_priv), 0);
if (IS_ERR(ppgtt)) {
err = PTR_ERR(ppgtt);
goto out_free;
@@ -1275,7 +1275,7 @@ static void track_vma_bind(struct i915_vma *vma)
__i915_gem_object_pin_pages(obj);
- GEM_BUG_ON(vma->pages);
+ GEM_BUG_ON(atomic_read(&vma->pages_count));
atomic_set(&vma->pages_count, I915_VMA_PAGES_ACTIVE);
__i915_gem_object_pin_pages(obj);
vma->pages = obj->mm.pages;
@@ -1953,7 +1953,9 @@ static int igt_cs_tlb(void *arg)
goto end;
}
- err = vma->ops->set_pages(vma);
+ i915_gem_object_lock(bbe, NULL);
+ err = i915_vma_get_pages(vma);
+ i915_gem_object_unlock(bbe);
if (err)
goto end;
@@ -1994,7 +1996,7 @@ end_ww:
i915_request_put(rq);
}
- vma->ops->clear_pages(vma);
+ i915_vma_put_pages(vma);
err = context_sync(ce);
if (err) {
@@ -2009,7 +2011,9 @@ end_ww:
goto end;
}
- err = vma->ops->set_pages(vma);
+ i915_gem_object_lock(act, NULL);
+ err = i915_vma_get_pages(vma);
+ i915_gem_object_unlock(act);
if (err)
goto end;
@@ -2047,7 +2051,7 @@ end_ww:
}
end_spin(batch, count - 1);
- vma->ops->clear_pages(vma);
+ i915_vma_put_pages(vma);
err = context_sync(ce);
if (err) {
diff --git a/drivers/gpu/drm/i915/selftests/i915_perf.c b/drivers/gpu/drm/i915/selftests/i915_perf.c
index 9e9a6cb1d9e5..88db2e3d81d0 100644
--- a/drivers/gpu/drm/i915/selftests/i915_perf.c
+++ b/drivers/gpu/drm/i915/selftests/i915_perf.c
@@ -424,7 +424,7 @@ int i915_perf_live_selftests(struct drm_i915_private *i915)
if (!perf->metrics_kobj || !perf->ops.enable_metric_set)
return 0;
- if (intel_gt_is_wedged(&i915->gt))
+ if (intel_gt_is_wedged(to_gt(i915)))
return 0;
err = alloc_empty_config(&i915->perf);
diff --git a/drivers/gpu/drm/i915/selftests/i915_request.c b/drivers/gpu/drm/i915/selftests/i915_request.c
index d67710d10615..92a859b34190 100644
--- a/drivers/gpu/drm/i915/selftests/i915_request.c
+++ b/drivers/gpu/drm/i915/selftests/i915_request.c
@@ -209,6 +209,10 @@ static int igt_request_rewind(void *arg)
int err = -EINVAL;
ctx[0] = mock_context(i915, "A");
+ if (!ctx[0]) {
+ err = -ENOMEM;
+ goto err_ctx_0;
+ }
ce = i915_gem_context_get_engine(ctx[0], RCS0);
GEM_BUG_ON(IS_ERR(ce));
@@ -223,6 +227,10 @@ static int igt_request_rewind(void *arg)
i915_request_add(request);
ctx[1] = mock_context(i915, "B");
+ if (!ctx[1]) {
+ err = -ENOMEM;
+ goto err_ctx_1;
+ }
ce = i915_gem_context_get_engine(ctx[1], RCS0);
GEM_BUG_ON(IS_ERR(ce));
@@ -261,9 +269,11 @@ err:
i915_request_put(vip);
err_context_1:
mock_context_close(ctx[1]);
+err_ctx_1:
i915_request_put(request);
err_context_0:
mock_context_close(ctx[0]);
+err_ctx_0:
mock_device_flush(i915);
return err;
}
@@ -831,7 +841,7 @@ static struct i915_vma *empty_batch(struct drm_i915_private *i915)
__i915_gem_object_flush_map(obj, 0, 64);
i915_gem_object_unpin_map(obj);
- intel_gt_chipset_flush(&i915->gt);
+ intel_gt_chipset_flush(to_gt(i915));
vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL);
if (IS_ERR(vma)) {
@@ -972,7 +982,7 @@ static struct i915_vma *recursive_batch(struct drm_i915_private *i915)
if (IS_ERR(obj))
return ERR_CAST(obj);
- vma = i915_vma_instance(obj, i915->gt.vm, NULL);
+ vma = i915_vma_instance(obj, to_gt(i915)->vm, NULL);
if (IS_ERR(vma)) {
err = PTR_ERR(vma);
goto err;
@@ -1004,7 +1014,7 @@ static struct i915_vma *recursive_batch(struct drm_i915_private *i915)
__i915_gem_object_flush_map(obj, 0, 64);
i915_gem_object_unpin_map(obj);
- intel_gt_chipset_flush(&i915->gt);
+ intel_gt_chipset_flush(to_gt(i915));
return vma;
@@ -1690,7 +1700,7 @@ int i915_request_live_selftests(struct drm_i915_private *i915)
SUBTEST(live_breadcrumbs_smoketest),
};
- if (intel_gt_is_wedged(&i915->gt))
+ if (intel_gt_is_wedged(to_gt(i915)))
return 0;
return i915_subtests(tests, i915);
@@ -2805,7 +2815,7 @@ static int p_sync0(void *arg)
i915_request_add(rq);
err = 0;
- if (i915_request_wait(rq, 0, HZ / 5) < 0)
+ if (i915_request_wait(rq, 0, HZ) < 0)
err = -ETIME;
i915_request_put(rq);
if (err)
@@ -2876,7 +2886,7 @@ static int p_sync1(void *arg)
i915_request_add(rq);
err = 0;
- if (prev && i915_request_wait(prev, 0, HZ / 5) < 0)
+ if (prev && i915_request_wait(prev, 0, HZ) < 0)
err = -ETIME;
i915_request_put(prev);
prev = rq;
@@ -3081,7 +3091,7 @@ int i915_request_perf_selftests(struct drm_i915_private *i915)
SUBTEST(perf_parallel_engines),
};
- if (intel_gt_is_wedged(&i915->gt))
+ if (intel_gt_is_wedged(to_gt(i915)))
return 0;
return i915_subtests(tests, i915);
diff --git a/drivers/gpu/drm/i915/selftests/i915_selftest.c b/drivers/gpu/drm/i915/selftests/i915_selftest.c
index 484759c9409c..2d6d7bd13c3c 100644
--- a/drivers/gpu/drm/i915/selftests/i915_selftest.c
+++ b/drivers/gpu/drm/i915/selftests/i915_selftest.c
@@ -298,10 +298,10 @@ int __i915_live_setup(void *data)
struct drm_i915_private *i915 = data;
/* The selftests expect an idle system */
- if (intel_gt_pm_wait_for_idle(&i915->gt))
+ if (intel_gt_pm_wait_for_idle(to_gt(i915)))
return -EIO;
- return intel_gt_terminally_wedged(&i915->gt);
+ return intel_gt_terminally_wedged(to_gt(i915));
}
int __i915_live_teardown(int err, void *data)
diff --git a/drivers/gpu/drm/i915/selftests/i915_sw_fence.c b/drivers/gpu/drm/i915/selftests/i915_sw_fence.c
index cbf45d85cbff..daa985e5a19b 100644
--- a/drivers/gpu/drm/i915/selftests/i915_sw_fence.c
+++ b/drivers/gpu/drm/i915/selftests/i915_sw_fence.c
@@ -28,7 +28,7 @@
#include "../i915_selftest.h"
-static int __i915_sw_fence_call
+static int
fence_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state)
{
switch (state) {
diff --git a/drivers/gpu/drm/i915/selftests/i915_vma.c b/drivers/gpu/drm/i915/selftests/i915_vma.c
index 1f10fe36619b..5c5809dfe9b2 100644
--- a/drivers/gpu/drm/i915/selftests/i915_vma.c
+++ b/drivers/gpu/drm/i915/selftests/i915_vma.c
@@ -691,7 +691,11 @@ static int igt_vma_rotate_remap(void *arg)
}
i915_vma_unpin(vma);
-
+ err = i915_vma_unbind(vma);
+ if (err) {
+ pr_err("Unbinding returned %i\n", err);
+ goto out_object;
+ }
cond_resched();
}
}
@@ -848,6 +852,11 @@ static int igt_vma_partial(void *arg)
i915_vma_unpin(vma);
nvma++;
+ err = i915_vma_unbind(vma);
+ if (err) {
+ pr_err("Unbinding returned %i\n", err);
+ goto out_object;
+ }
cond_resched();
}
@@ -882,6 +891,12 @@ static int igt_vma_partial(void *arg)
i915_vma_unpin(vma);
+ err = i915_vma_unbind(vma);
+ if (err) {
+ pr_err("Unbinding returned %i\n", err);
+ goto out_object;
+ }
+
count = 0;
list_for_each_entry(vma, &obj->vma.list, obj_link)
count++;
diff --git a/drivers/gpu/drm/i915/selftests/igt_flush_test.c b/drivers/gpu/drm/i915/selftests/igt_flush_test.c
index a6c71fca61aa..b84594601d30 100644
--- a/drivers/gpu/drm/i915/selftests/igt_flush_test.c
+++ b/drivers/gpu/drm/i915/selftests/igt_flush_test.c
@@ -14,7 +14,7 @@
int igt_flush_test(struct drm_i915_private *i915)
{
- struct intel_gt *gt = &i915->gt;
+ struct intel_gt *gt = to_gt(i915);
int ret = intel_gt_is_wedged(gt) ? -EIO : 0;
cond_resched();
diff --git a/drivers/gpu/drm/i915/selftests/igt_live_test.c b/drivers/gpu/drm/i915/selftests/igt_live_test.c
index 1c721542e277..72b58b66692a 100644
--- a/drivers/gpu/drm/i915/selftests/igt_live_test.c
+++ b/drivers/gpu/drm/i915/selftests/igt_live_test.c
@@ -16,7 +16,7 @@ int igt_live_test_begin(struct igt_live_test *t,
const char *func,
const char *name)
{
- struct intel_gt *gt = &i915->gt;
+ struct intel_gt *gt = to_gt(i915);
struct intel_engine_cs *engine;
enum intel_engine_id id;
int err;
@@ -57,7 +57,7 @@ int igt_live_test_end(struct igt_live_test *t)
return -EIO;
}
- for_each_engine(engine, &i915->gt, id) {
+ for_each_engine(engine, to_gt(i915), id) {
if (t->reset_engine[id] ==
i915_reset_engine_count(&i915->gpu_error, engine))
continue;
diff --git a/drivers/gpu/drm/i915/selftests/igt_reset.c b/drivers/gpu/drm/i915/selftests/igt_reset.c
index 9f8590b868a9..a2838c65f8a5 100644
--- a/drivers/gpu/drm/i915/selftests/igt_reset.c
+++ b/drivers/gpu/drm/i915/selftests/igt_reset.c
@@ -36,7 +36,7 @@ void igt_global_reset_unlock(struct intel_gt *gt)
enum intel_engine_id id;
for_each_engine(engine, gt, id)
- clear_bit(I915_RESET_ENGINE + id, &gt->reset.flags);
+ clear_and_wake_up_bit(I915_RESET_ENGINE + id, &gt->reset.flags);
clear_bit(I915_RESET_BACKOFF, &gt->reset.flags);
wake_up_all(&gt->reset.queue);
diff --git a/drivers/gpu/drm/i915/selftests/intel_memory_region.c b/drivers/gpu/drm/i915/selftests/intel_memory_region.c
index 418caae84759..8255561ff853 100644
--- a/drivers/gpu/drm/i915/selftests/intel_memory_region.c
+++ b/drivers/gpu/drm/i915/selftests/intel_memory_region.c
@@ -225,7 +225,7 @@ static int igt_mock_reserve(void *arg)
out_close:
close_objects(mem, &objects);
- intel_memory_region_put(mem);
+ intel_memory_region_destroy(mem);
out_free_order:
kfree(order);
return err;
@@ -439,7 +439,7 @@ static int igt_mock_splintered_region(void *arg)
out_close:
close_objects(mem, &objects);
out_put:
- intel_memory_region_put(mem);
+ intel_memory_region_destroy(mem);
return err;
}
@@ -507,7 +507,7 @@ static int igt_mock_max_segment(void *arg)
out_close:
close_objects(mem, &objects);
out_put:
- intel_memory_region_put(mem);
+ intel_memory_region_destroy(mem);
return err;
}
@@ -1196,7 +1196,7 @@ int intel_memory_region_mock_selftests(void)
err = i915_subtests(tests, mem);
- intel_memory_region_put(mem);
+ intel_memory_region_destroy(mem);
out_unref:
mock_destroy_device(i915);
return err;
@@ -1217,7 +1217,7 @@ int intel_memory_region_live_selftests(struct drm_i915_private *i915)
return 0;
}
- if (intel_gt_is_wedged(&i915->gt))
+ if (intel_gt_is_wedged(to_gt(i915)))
return 0;
return i915_live_subtests(tests, i915);
@@ -1229,7 +1229,7 @@ int intel_memory_region_perf_selftests(struct drm_i915_private *i915)
SUBTEST(perf_memcpy),
};
- if (intel_gt_is_wedged(&i915->gt))
+ if (intel_gt_is_wedged(to_gt(i915)))
return 0;
return i915_live_subtests(tests, i915);
diff --git a/drivers/gpu/drm/i915/selftests/intel_uncore.c b/drivers/gpu/drm/i915/selftests/intel_uncore.c
index bc8128170a99..cdd196783535 100644
--- a/drivers/gpu/drm/i915/selftests/intel_uncore.c
+++ b/drivers/gpu/drm/i915/selftests/intel_uncore.c
@@ -344,5 +344,5 @@ int intel_uncore_live_selftests(struct drm_i915_private *i915)
SUBTEST(live_forcewake_domains),
};
- return intel_gt_live_subtests(tests, &i915->gt);
+ return intel_gt_live_subtests(tests, to_gt(i915));
}
diff --git a/drivers/gpu/drm/i915/selftests/lib_sw_fence.c b/drivers/gpu/drm/i915/selftests/lib_sw_fence.c
index 080b90b63d16..bf2752cc1e0b 100644
--- a/drivers/gpu/drm/i915/selftests/lib_sw_fence.c
+++ b/drivers/gpu/drm/i915/selftests/lib_sw_fence.c
@@ -26,7 +26,7 @@
/* Small library of different fence types useful for writing tests */
-static int __i915_sw_fence_call
+static int
nop_fence_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state)
{
return NOTIFY_DONE;
@@ -41,12 +41,12 @@ void __onstack_fence_init(struct i915_sw_fence *fence,
__init_waitqueue_head(&fence->wait, name, key);
atomic_set(&fence->pending, 1);
fence->error = 0;
- fence->flags = (unsigned long)nop_fence_notify;
+ fence->fn = nop_fence_notify;
}
void onstack_fence_fini(struct i915_sw_fence *fence)
{
- if (!fence->flags)
+ if (!fence->fn)
return;
i915_sw_fence_commit(fence);
@@ -89,7 +89,7 @@ struct heap_fence {
};
};
-static int __i915_sw_fence_call
+static int
heap_fence_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state)
{
struct heap_fence *h = container_of(fence, typeof(*h), fence);
diff --git a/drivers/gpu/drm/i915/selftests/mock_gem_device.c b/drivers/gpu/drm/i915/selftests/mock_gem_device.c
index 4f8180146888..8aa7b1d33865 100644
--- a/drivers/gpu/drm/i915/selftests/mock_gem_device.c
+++ b/drivers/gpu/drm/i915/selftests/mock_gem_device.c
@@ -45,7 +45,7 @@
void mock_device_flush(struct drm_i915_private *i915)
{
- struct intel_gt *gt = &i915->gt;
+ struct intel_gt *gt = to_gt(i915);
struct intel_engine_cs *engine;
enum intel_engine_id id;
@@ -64,7 +64,7 @@ static void mock_device_release(struct drm_device *dev)
goto out;
mock_device_flush(i915);
- intel_gt_driver_remove(&i915->gt);
+ intel_gt_driver_remove(to_gt(i915));
i915_gem_drain_workqueue(i915);
i915_gem_drain_freed_objects(i915);
@@ -73,7 +73,7 @@ static void mock_device_release(struct drm_device *dev)
destroy_workqueue(i915->wq);
intel_region_ttm_device_fini(i915);
- intel_gt_driver_late_release(&i915->gt);
+ intel_gt_driver_late_release(to_gt(i915));
intel_memory_regions_driver_release(i915);
drm_mode_config_cleanup(&i915->drm);
@@ -165,7 +165,7 @@ struct drm_i915_private *mock_gem_device(void)
/* Using the global GTT may ask questions about KMS users, so prepare */
drm_mode_config_init(&i915->drm);
- mkwrite_device_info(i915)->graphics_ver = -1;
+ mkwrite_device_info(i915)->graphics.ver = -1;
mkwrite_device_info(i915)->page_sizes =
I915_GTT_PAGE_SIZE_4K |
@@ -175,12 +175,14 @@ struct drm_i915_private *mock_gem_device(void)
mkwrite_device_info(i915)->memory_regions = REGION_SMEM;
intel_memory_regions_hw_probe(i915);
- mock_uncore_init(&i915->uncore, i915);
+ spin_lock_init(&i915->gpu_error.lock);
i915_gem_init__mm(i915);
- intel_gt_init_early(&i915->gt, i915);
- atomic_inc(&i915->gt.wakeref.count); /* disable; no hw support */
- i915->gt.awake = -ENODEV;
+ intel_gt_init_early(to_gt(i915), i915);
+ __intel_gt_init_early(to_gt(i915), i915);
+ mock_uncore_init(&i915->uncore, i915);
+ atomic_inc(&to_gt(i915)->wakeref.count); /* disable; no hw support */
+ to_gt(i915)->awake = -ENODEV;
ret = intel_region_ttm_device_init(i915);
if (ret)
@@ -193,19 +195,19 @@ struct drm_i915_private *mock_gem_device(void)
mock_init_contexts(i915);
mock_init_ggtt(i915, &i915->ggtt);
- i915->gt.vm = i915_vm_get(&i915->ggtt.vm);
+ to_gt(i915)->vm = i915_vm_get(&i915->ggtt.vm);
mkwrite_device_info(i915)->platform_engine_mask = BIT(0);
- i915->gt.info.engine_mask = BIT(0);
+ to_gt(i915)->info.engine_mask = BIT(0);
- i915->gt.engine[RCS0] = mock_engine(i915, "mock", RCS0);
- if (!i915->gt.engine[RCS0])
+ to_gt(i915)->engine[RCS0] = mock_engine(i915, "mock", RCS0);
+ if (!to_gt(i915)->engine[RCS0])
goto err_unlock;
- if (mock_engine_init(i915->gt.engine[RCS0]))
+ if (mock_engine_init(to_gt(i915)->engine[RCS0]))
goto err_context;
- __clear_bit(I915_WEDGED, &i915->gt.reset.flags);
+ __clear_bit(I915_WEDGED, &to_gt(i915)->reset.flags);
intel_engines_driver_register(i915);
i915->do_release = true;
@@ -214,13 +216,13 @@ struct drm_i915_private *mock_gem_device(void)
return i915;
err_context:
- intel_gt_driver_remove(&i915->gt);
+ intel_gt_driver_remove(to_gt(i915));
err_unlock:
destroy_workqueue(i915->wq);
err_drv:
intel_region_ttm_device_fini(i915);
err_ttm:
- intel_gt_driver_late_release(&i915->gt);
+ intel_gt_driver_late_release(to_gt(i915));
intel_memory_regions_driver_release(i915);
drm_mode_config_cleanup(&i915->drm);
mock_destroy_device(i915);
diff --git a/drivers/gpu/drm/i915/selftests/mock_gtt.c b/drivers/gpu/drm/i915/selftests/mock_gtt.c
index cc047ec594f9..1802baf80a17 100644
--- a/drivers/gpu/drm/i915/selftests/mock_gtt.c
+++ b/drivers/gpu/drm/i915/selftests/mock_gtt.c
@@ -70,7 +70,7 @@ struct i915_ppgtt *mock_ppgtt(struct drm_i915_private *i915, const char *name)
if (!ppgtt)
return NULL;
- ppgtt->vm.gt = &i915->gt;
+ ppgtt->vm.gt = to_gt(i915);
ppgtt->vm.i915 = i915;
ppgtt->vm.total = round_down(U64_MAX, PAGE_SIZE);
ppgtt->vm.dma = i915->drm.dev;
@@ -78,6 +78,7 @@ struct i915_ppgtt *mock_ppgtt(struct drm_i915_private *i915, const char *name)
i915_address_space_init(&ppgtt->vm, VM_CLASS_PPGTT);
ppgtt->vm.alloc_pt_dma = alloc_pt_dma;
+ ppgtt->vm.alloc_scratch_dma = alloc_pt_dma;
ppgtt->vm.clear_range = mock_clear_range;
ppgtt->vm.insert_page = mock_insert_page;
@@ -86,8 +87,6 @@ struct i915_ppgtt *mock_ppgtt(struct drm_i915_private *i915, const char *name)
ppgtt->vm.vma_ops.bind_vma = mock_bind_ppgtt;
ppgtt->vm.vma_ops.unbind_vma = mock_unbind_ppgtt;
- ppgtt->vm.vma_ops.set_pages = ppgtt_set_pages;
- ppgtt->vm.vma_ops.clear_pages = clear_pages;
return ppgtt;
}
@@ -109,7 +108,7 @@ void mock_init_ggtt(struct drm_i915_private *i915, struct i915_ggtt *ggtt)
{
memset(ggtt, 0, sizeof(*ggtt));
- ggtt->vm.gt = &i915->gt;
+ ggtt->vm.gt = to_gt(i915);
ggtt->vm.i915 = i915;
ggtt->vm.is_ggtt = true;
@@ -118,6 +117,7 @@ void mock_init_ggtt(struct drm_i915_private *i915, struct i915_ggtt *ggtt)
ggtt->vm.total = 4096 * PAGE_SIZE;
ggtt->vm.alloc_pt_dma = alloc_pt_dma;
+ ggtt->vm.alloc_scratch_dma = alloc_pt_dma;
ggtt->vm.clear_range = mock_clear_range;
ggtt->vm.insert_page = mock_insert_page;
@@ -126,11 +126,9 @@ void mock_init_ggtt(struct drm_i915_private *i915, struct i915_ggtt *ggtt)
ggtt->vm.vma_ops.bind_vma = mock_bind_ggtt;
ggtt->vm.vma_ops.unbind_vma = mock_unbind_ggtt;
- ggtt->vm.vma_ops.set_pages = ggtt_set_pages;
- ggtt->vm.vma_ops.clear_pages = clear_pages;
i915_address_space_init(&ggtt->vm, VM_CLASS_GGTT);
- i915->gt.ggtt = ggtt;
+ to_gt(i915)->ggtt = ggtt;
}
void mock_fini_ggtt(struct i915_ggtt *ggtt)
diff --git a/drivers/gpu/drm/i915/selftests/mock_region.c b/drivers/gpu/drm/i915/selftests/mock_region.c
index 75793008c4ef..19bff8afcaaa 100644
--- a/drivers/gpu/drm/i915/selftests/mock_region.c
+++ b/drivers/gpu/drm/i915/selftests/mock_region.c
@@ -15,9 +15,9 @@
static void mock_region_put_pages(struct drm_i915_gem_object *obj,
struct sg_table *pages)
{
+ i915_refct_sgt_put(obj->mm.rsgt);
+ obj->mm.rsgt = NULL;
intel_region_ttm_resource_free(obj->mm.region, obj->mm.res);
- sg_free_table(pages);
- kfree(pages);
}
static int mock_region_get_pages(struct drm_i915_gem_object *obj)
@@ -36,12 +36,14 @@ static int mock_region_get_pages(struct drm_i915_gem_object *obj)
if (IS_ERR(obj->mm.res))
return PTR_ERR(obj->mm.res);
- pages = intel_region_ttm_resource_to_st(obj->mm.region, obj->mm.res);
- if (IS_ERR(pages)) {
- err = PTR_ERR(pages);
+ obj->mm.rsgt = intel_region_ttm_resource_to_rsgt(obj->mm.region,
+ obj->mm.res);
+ if (IS_ERR(obj->mm.rsgt)) {
+ err = PTR_ERR(obj->mm.rsgt);
goto err_free_resource;
}
+ pages = &obj->mm.rsgt->table;
__i915_gem_object_set_pages(obj, pages, i915_sg_dma_sizes(pages->sgl));
return 0;
@@ -82,13 +84,16 @@ static int mock_object_init(struct intel_memory_region *mem,
return 0;
}
-static void mock_region_fini(struct intel_memory_region *mem)
+static int mock_region_fini(struct intel_memory_region *mem)
{
struct drm_i915_private *i915 = mem->i915;
int instance = mem->instance;
+ int ret;
- intel_region_ttm_fini(mem);
+ ret = intel_region_ttm_fini(mem);
ida_free(&i915->selftest.mock_region_instances, instance);
+
+ return ret;
}
static const struct intel_memory_region_ops mock_region_ops = {
diff --git a/drivers/gpu/drm/i915/selftests/mock_uncore.c b/drivers/gpu/drm/i915/selftests/mock_uncore.c
index ca57e4008701..f2d6be5e1230 100644
--- a/drivers/gpu/drm/i915/selftests/mock_uncore.c
+++ b/drivers/gpu/drm/i915/selftests/mock_uncore.c
@@ -42,7 +42,7 @@ __nop_read(64)
void mock_uncore_init(struct intel_uncore *uncore,
struct drm_i915_private *i915)
{
- intel_uncore_init_early(uncore, i915);
+ intel_uncore_init_early(uncore, to_gt(i915));
ASSIGN_RAW_WRITE_MMIO_VFUNCS(uncore, nop);
ASSIGN_RAW_READ_MMIO_VFUNCS(uncore, nop);
diff --git a/drivers/gpu/drm/imx/Kconfig b/drivers/gpu/drm/imx/Kconfig
index b5fa0e45a839..bb9738c7c825 100644
--- a/drivers/gpu/drm/imx/Kconfig
+++ b/drivers/gpu/drm/imx/Kconfig
@@ -4,7 +4,7 @@ config DRM_IMX
select DRM_KMS_HELPER
select VIDEOMODE_HELPERS
select DRM_GEM_CMA_HELPER
- select DRM_KMS_CMA_HELPER
+ select DRM_KMS_HELPER
depends on DRM && (ARCH_MXC || ARCH_MULTIPLATFORM || COMPILE_TEST)
depends on IMX_IPUV3_CORE
help
diff --git a/drivers/gpu/drm/imx/dcss/Kconfig b/drivers/gpu/drm/imx/dcss/Kconfig
index 2b17a964ff05..7374f1952762 100644
--- a/drivers/gpu/drm/imx/dcss/Kconfig
+++ b/drivers/gpu/drm/imx/dcss/Kconfig
@@ -1,7 +1,7 @@
config DRM_IMX_DCSS
tristate "i.MX8MQ DCSS"
select IMX_IRQSTEER
- select DRM_KMS_CMA_HELPER
+ select DRM_KMS_HELPER
select VIDEOMODE_HELPERS
depends on DRM && ARCH_MXC && ARM64
help
diff --git a/drivers/gpu/drm/ingenic/Kconfig b/drivers/gpu/drm/ingenic/Kconfig
index 3b57f8be007c..001f59fb06d5 100644
--- a/drivers/gpu/drm/ingenic/Kconfig
+++ b/drivers/gpu/drm/ingenic/Kconfig
@@ -8,7 +8,6 @@ config DRM_INGENIC
select DRM_BRIDGE
select DRM_PANEL_BRIDGE
select DRM_KMS_HELPER
- select DRM_KMS_CMA_HELPER
select DRM_GEM_CMA_HELPER
select VT_HW_CONSOLE_BINDING if FRAMEBUFFER_CONSOLE
help
diff --git a/drivers/gpu/drm/ingenic/ingenic-drm-drv.c b/drivers/gpu/drm/ingenic/ingenic-drm-drv.c
index b4943a56be09..542c4af70661 100644
--- a/drivers/gpu/drm/ingenic/ingenic-drm-drv.c
+++ b/drivers/gpu/drm/ingenic/ingenic-drm-drv.c
@@ -6,6 +6,7 @@
#include "ingenic-drm.h"
+#include <linux/bitfield.h>
#include <linux/component.h>
#include <linux/clk.h>
#include <linux/dma-mapping.h>
@@ -49,6 +50,11 @@ struct ingenic_dma_hwdesc {
u32 addr;
u32 id;
u32 cmd;
+ /* extended hw descriptor for jz4780 */
+ u32 offsize;
+ u32 pagewidth;
+ u32 cpos;
+ u32 dessize;
} __aligned(16);
struct ingenic_dma_hwdescs {
@@ -60,6 +66,7 @@ struct jz_soc_info {
bool needs_dev_clk;
bool has_osd;
bool map_noncoherent;
+ bool use_extended_hwdesc;
unsigned int max_width, max_height;
const u32 *formats_f0, *formats_f1;
unsigned int num_formats_f0, num_formats_f1;
@@ -173,7 +180,6 @@ static const struct regmap_config ingenic_drm_regmap_config = {
.val_bits = 32,
.reg_stride = 4,
- .max_register = JZ_REG_LCD_SIZE1,
.writeable_reg = ingenic_drm_writeable_reg,
};
@@ -447,6 +453,9 @@ static int ingenic_drm_plane_atomic_check(struct drm_plane *plane,
if (!crtc)
return 0;
+ if (plane == &priv->f0)
+ return -EINVAL;
+
crtc_state = drm_atomic_get_existing_crtc_state(state,
crtc);
if (WARN_ON(!crtc_state))
@@ -663,6 +672,33 @@ static void ingenic_drm_plane_atomic_update(struct drm_plane *plane,
hwdesc->cmd = JZ_LCD_CMD_EOF_IRQ | (width * height * cpp / 4);
hwdesc->next = dma_hwdesc_addr(priv, next_id);
+ if (priv->soc_info->use_extended_hwdesc) {
+ hwdesc->cmd |= JZ_LCD_CMD_FRM_ENABLE;
+
+ /* Extended 8-byte descriptor */
+ hwdesc->cpos = 0;
+ hwdesc->offsize = 0;
+ hwdesc->pagewidth = 0;
+
+ switch (newstate->fb->format->format) {
+ case DRM_FORMAT_XRGB1555:
+ hwdesc->cpos |= JZ_LCD_CPOS_RGB555;
+ fallthrough;
+ case DRM_FORMAT_RGB565:
+ hwdesc->cpos |= JZ_LCD_CPOS_BPP_15_16;
+ break;
+ case DRM_FORMAT_XRGB8888:
+ hwdesc->cpos |= JZ_LCD_CPOS_BPP_18_24;
+ break;
+ }
+ hwdesc->cpos |= (JZ_LCD_CPOS_COEFFICIENT_1 <<
+ JZ_LCD_CPOS_COEFFICIENT_OFFSET);
+ hwdesc->dessize =
+ (0xff << JZ_LCD_DESSIZE_ALPHA_OFFSET) |
+ FIELD_PREP(JZ_LCD_DESSIZE_HEIGHT_MASK, height - 1) |
+ FIELD_PREP(JZ_LCD_DESSIZE_WIDTH_MASK, width - 1);
+ }
+
if (drm_atomic_crtc_needs_modeset(crtc_state)) {
fourcc = newstate->fb->format->format;
@@ -694,6 +730,9 @@ static void ingenic_drm_encoder_atomic_mode_set(struct drm_encoder *encoder,
| JZ_LCD_CFG_SPL_DISABLE | JZ_LCD_CFG_REV_DISABLE;
}
+ if (priv->soc_info->use_extended_hwdesc)
+ cfg |= JZ_LCD_CFG_DESCRIPTOR_8;
+
if (mode->flags & DRM_MODE_FLAG_NHSYNC)
cfg |= JZ_LCD_CFG_HSYNC_ACTIVE_LOW;
if (mode->flags & DRM_MODE_FLAG_NVSYNC)
@@ -1011,6 +1050,8 @@ static int ingenic_drm_bind(struct device *dev, bool has_components)
struct ingenic_drm_bridge *ib;
struct drm_device *drm;
void __iomem *base;
+ struct resource *res;
+ struct regmap_config regmap_config;
long parent_rate;
unsigned int i, clone_mask = 0;
int ret, irq;
@@ -1056,14 +1097,16 @@ static int ingenic_drm_bind(struct device *dev, bool has_components)
drm->mode_config.funcs = &ingenic_drm_mode_config_funcs;
drm->mode_config.helper_private = &ingenic_drm_mode_config_helpers;
- base = devm_platform_ioremap_resource(pdev, 0);
+ base = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
if (IS_ERR(base)) {
dev_err(dev, "Failed to get memory resource\n");
return PTR_ERR(base);
}
+ regmap_config = ingenic_drm_regmap_config;
+ regmap_config.max_register = res->end - res->start;
priv->map = devm_regmap_init_mmio(dev, base,
- &ingenic_drm_regmap_config);
+ &regmap_config);
if (IS_ERR(priv->map)) {
dev_err(dev, "Failed to create regmap\n");
return PTR_ERR(priv->map);
@@ -1465,10 +1508,23 @@ static const struct jz_soc_info jz4770_soc_info = {
.num_formats_f0 = ARRAY_SIZE(jz4770_formats_f0),
};
+static const struct jz_soc_info jz4780_soc_info = {
+ .needs_dev_clk = true,
+ .has_osd = true,
+ .use_extended_hwdesc = true,
+ .max_width = 4096,
+ .max_height = 2048,
+ .formats_f1 = jz4770_formats_f1,
+ .num_formats_f1 = ARRAY_SIZE(jz4770_formats_f1),
+ .formats_f0 = jz4770_formats_f0,
+ .num_formats_f0 = ARRAY_SIZE(jz4770_formats_f0),
+};
+
static const struct of_device_id ingenic_drm_of_match[] = {
{ .compatible = "ingenic,jz4740-lcd", .data = &jz4740_soc_info },
{ .compatible = "ingenic,jz4725b-lcd", .data = &jz4725b_soc_info },
{ .compatible = "ingenic,jz4770-lcd", .data = &jz4770_soc_info },
+ { .compatible = "ingenic,jz4780-lcd", .data = &jz4780_soc_info },
{ /* sentinel */ },
};
MODULE_DEVICE_TABLE(of, ingenic_drm_of_match);
diff --git a/drivers/gpu/drm/ingenic/ingenic-drm.h b/drivers/gpu/drm/ingenic/ingenic-drm.h
index 22654ac1dde1..cb1d09b62588 100644
--- a/drivers/gpu/drm/ingenic/ingenic-drm.h
+++ b/drivers/gpu/drm/ingenic/ingenic-drm.h
@@ -44,8 +44,11 @@
#define JZ_REG_LCD_XYP1 0x124
#define JZ_REG_LCD_SIZE0 0x128
#define JZ_REG_LCD_SIZE1 0x12c
+#define JZ_REG_LCD_PCFG 0x2c0
#define JZ_LCD_CFG_SLCD BIT(31)
+#define JZ_LCD_CFG_DESCRIPTOR_8 BIT(28)
+#define JZ_LCD_CFG_RECOVER_FIFO_UNDERRUN BIT(25)
#define JZ_LCD_CFG_PS_DISABLE BIT(23)
#define JZ_LCD_CFG_CLS_DISABLE BIT(22)
#define JZ_LCD_CFG_SPL_DISABLE BIT(21)
@@ -63,6 +66,7 @@
#define JZ_LCD_CFG_DE_ACTIVE_LOW BIT(9)
#define JZ_LCD_CFG_VSYNC_ACTIVE_LOW BIT(8)
#define JZ_LCD_CFG_18_BIT BIT(7)
+#define JZ_LCD_CFG_24_BIT BIT(6)
#define JZ_LCD_CFG_PDW (BIT(5) | BIT(4))
#define JZ_LCD_CFG_MODE_GENERIC_16BIT 0
@@ -132,6 +136,7 @@
#define JZ_LCD_CMD_SOF_IRQ BIT(31)
#define JZ_LCD_CMD_EOF_IRQ BIT(30)
#define JZ_LCD_CMD_ENABLE_PAL BIT(28)
+#define JZ_LCD_CMD_FRM_ENABLE BIT(26)
#define JZ_LCD_SYNC_MASK 0x3ff
@@ -153,6 +158,7 @@
#define JZ_LCD_RGBC_EVEN_BGR (0x5 << 0)
#define JZ_LCD_OSDC_OSDEN BIT(0)
+#define JZ_LCD_OSDC_ALPHAEN BIT(2)
#define JZ_LCD_OSDC_F0EN BIT(3)
#define JZ_LCD_OSDC_F1EN BIT(4)
@@ -176,6 +182,38 @@
#define JZ_LCD_SIZE01_WIDTH_LSB 0
#define JZ_LCD_SIZE01_HEIGHT_LSB 16
+#define JZ_LCD_DESSIZE_ALPHA_OFFSET 24
+#define JZ_LCD_DESSIZE_HEIGHT_MASK GENMASK(23, 12)
+#define JZ_LCD_DESSIZE_WIDTH_MASK GENMASK(11, 0)
+
+#define JZ_LCD_CPOS_BPP_15_16 (4 << 27)
+#define JZ_LCD_CPOS_BPP_18_24 (5 << 27)
+#define JZ_LCD_CPOS_BPP_30 (7 << 27)
+#define JZ_LCD_CPOS_RGB555 BIT(30)
+#define JZ_LCD_CPOS_PREMULTIPLY_LCD BIT(26)
+#define JZ_LCD_CPOS_COEFFICIENT_OFFSET 24
+#define JZ_LCD_CPOS_COEFFICIENT_0 0
+#define JZ_LCD_CPOS_COEFFICIENT_1 1
+#define JZ_LCD_CPOS_COEFFICIENT_ALPHA1 2
+#define JZ_LCD_CPOS_COEFFICIENT_1_ALPHA1 3
+
+#define JZ_LCD_RGBC_RGB_PADDING BIT(15)
+#define JZ_LCD_RGBC_RGB_PADDING_FIRST BIT(14)
+#define JZ_LCD_RGBC_422 BIT(8)
+#define JZ_LCD_RGBC_RGB_FORMAT_ENABLE BIT(7)
+
+#define JZ_LCD_PCFG_PRI_MODE BIT(31)
+#define JZ_LCD_PCFG_HP_BST_4 (0 << 28)
+#define JZ_LCD_PCFG_HP_BST_8 (1 << 28)
+#define JZ_LCD_PCFG_HP_BST_16 (2 << 28)
+#define JZ_LCD_PCFG_HP_BST_32 (3 << 28)
+#define JZ_LCD_PCFG_HP_BST_64 (4 << 28)
+#define JZ_LCD_PCFG_HP_BST_16_CONT (5 << 28)
+#define JZ_LCD_PCFG_HP_BST_DISABLE (7 << 28)
+#define JZ_LCD_PCFG_THRESHOLD2_OFFSET 18
+#define JZ_LCD_PCFG_THRESHOLD1_OFFSET 9
+#define JZ_LCD_PCFG_THRESHOLD0_OFFSET 0
+
struct device;
struct drm_plane;
struct drm_plane_state;
diff --git a/drivers/gpu/drm/kmb/Kconfig b/drivers/gpu/drm/kmb/Kconfig
index bc4cb5e1cd8a..5fdd43dad507 100644
--- a/drivers/gpu/drm/kmb/Kconfig
+++ b/drivers/gpu/drm/kmb/Kconfig
@@ -3,7 +3,6 @@ config DRM_KMB_DISPLAY
depends on DRM
depends on ARCH_KEEMBAY || COMPILE_TEST
select DRM_KMS_HELPER
- select DRM_KMS_CMA_HELPER
select DRM_GEM_CMA_HELPER
select DRM_MIPI_DSI
help
diff --git a/drivers/gpu/drm/lima/lima_device.c b/drivers/gpu/drm/lima/lima_device.c
index 36c990589427..02cef0cea657 100644
--- a/drivers/gpu/drm/lima/lima_device.c
+++ b/drivers/gpu/drm/lima/lima_device.c
@@ -4,6 +4,7 @@
#include <linux/regulator/consumer.h>
#include <linux/reset.h>
#include <linux/clk.h>
+#include <linux/slab.h>
#include <linux/dma-mapping.h>
#include <linux/platform_device.h>
diff --git a/drivers/gpu/drm/lima/lima_gem.c b/drivers/gpu/drm/lima/lima_gem.c
index 2723d333c608..f9a9198ef198 100644
--- a/drivers/gpu/drm/lima/lima_gem.c
+++ b/drivers/gpu/drm/lima/lima_gem.c
@@ -221,7 +221,7 @@ struct drm_gem_object *lima_gem_create_object(struct drm_device *dev, size_t siz
bo = kzalloc(sizeof(*bo), GFP_KERNEL);
if (!bo)
- return NULL;
+ return ERR_PTR(-ENOMEM);
mutex_init(&bo->lock);
INIT_LIST_HEAD(&bo->va);
diff --git a/drivers/gpu/drm/mcde/Kconfig b/drivers/gpu/drm/mcde/Kconfig
index 71c689b573c9..d0bf1bc8da3f 100644
--- a/drivers/gpu/drm/mcde/Kconfig
+++ b/drivers/gpu/drm/mcde/Kconfig
@@ -10,7 +10,6 @@ config DRM_MCDE
select DRM_BRIDGE
select DRM_PANEL_BRIDGE
select DRM_KMS_HELPER
- select DRM_KMS_CMA_HELPER
select DRM_GEM_CMA_HELPER
select VT_HW_CONSOLE_BINDING if FRAMEBUFFER_CONSOLE
help
diff --git a/drivers/gpu/drm/mediatek/mtk_disp_ccorr.c b/drivers/gpu/drm/mediatek/mtk_disp_ccorr.c
index 141cb36b9c07..3a53ebc4e172 100644
--- a/drivers/gpu/drm/mediatek/mtk_disp_ccorr.c
+++ b/drivers/gpu/drm/mediatek/mtk_disp_ccorr.c
@@ -205,9 +205,15 @@ static const struct mtk_disp_ccorr_data mt8183_ccorr_driver_data = {
.matrix_bits = 10,
};
+static const struct mtk_disp_ccorr_data mt8192_ccorr_driver_data = {
+ .matrix_bits = 11,
+};
+
static const struct of_device_id mtk_disp_ccorr_driver_dt_match[] = {
{ .compatible = "mediatek,mt8183-disp-ccorr",
.data = &mt8183_ccorr_driver_data},
+ { .compatible = "mediatek,mt8192-disp-ccorr",
+ .data = &mt8192_ccorr_driver_data},
{},
};
MODULE_DEVICE_TABLE(of, mtk_disp_ccorr_driver_dt_match);
diff --git a/drivers/gpu/drm/mediatek/mtk_disp_ovl.c b/drivers/gpu/drm/mediatek/mtk_disp_ovl.c
index 5326989d5206..2146299e5f52 100644
--- a/drivers/gpu/drm/mediatek/mtk_disp_ovl.c
+++ b/drivers/gpu/drm/mediatek/mtk_disp_ovl.c
@@ -456,6 +456,22 @@ static const struct mtk_disp_ovl_data mt8183_ovl_2l_driver_data = {
.fmt_rgb565_is_0 = true,
};
+static const struct mtk_disp_ovl_data mt8192_ovl_driver_data = {
+ .addr = DISP_REG_OVL_ADDR_MT8173,
+ .gmc_bits = 10,
+ .layer_nr = 4,
+ .fmt_rgb565_is_0 = true,
+ .smi_id_en = true,
+};
+
+static const struct mtk_disp_ovl_data mt8192_ovl_2l_driver_data = {
+ .addr = DISP_REG_OVL_ADDR_MT8173,
+ .gmc_bits = 10,
+ .layer_nr = 2,
+ .fmt_rgb565_is_0 = true,
+ .smi_id_en = true,
+};
+
static const struct of_device_id mtk_disp_ovl_driver_dt_match[] = {
{ .compatible = "mediatek,mt2701-disp-ovl",
.data = &mt2701_ovl_driver_data},
@@ -465,6 +481,10 @@ static const struct of_device_id mtk_disp_ovl_driver_dt_match[] = {
.data = &mt8183_ovl_driver_data},
{ .compatible = "mediatek,mt8183-disp-ovl-2l",
.data = &mt8183_ovl_2l_driver_data},
+ { .compatible = "mediatek,mt8192-disp-ovl",
+ .data = &mt8192_ovl_driver_data},
+ { .compatible = "mediatek,mt8192-disp-ovl-2l",
+ .data = &mt8192_ovl_2l_driver_data},
{},
};
MODULE_DEVICE_TABLE(of, mtk_disp_ovl_driver_dt_match);
diff --git a/drivers/gpu/drm/mediatek/mtk_disp_rdma.c b/drivers/gpu/drm/mediatek/mtk_disp_rdma.c
index 75d7f45579e2..d41a3970b944 100644
--- a/drivers/gpu/drm/mediatek/mtk_disp_rdma.c
+++ b/drivers/gpu/drm/mediatek/mtk_disp_rdma.c
@@ -353,6 +353,10 @@ static const struct mtk_disp_rdma_data mt8183_rdma_driver_data = {
.fifo_size = 5 * SZ_1K,
};
+static const struct mtk_disp_rdma_data mt8192_rdma_driver_data = {
+ .fifo_size = 5 * SZ_1K,
+};
+
static const struct of_device_id mtk_disp_rdma_driver_dt_match[] = {
{ .compatible = "mediatek,mt2701-disp-rdma",
.data = &mt2701_rdma_driver_data},
@@ -360,6 +364,8 @@ static const struct of_device_id mtk_disp_rdma_driver_dt_match[] = {
.data = &mt8173_rdma_driver_data},
{ .compatible = "mediatek,mt8183-disp-rdma",
.data = &mt8183_rdma_driver_data},
+ { .compatible = "mediatek,mt8192-disp-rdma",
+ .data = &mt8192_rdma_driver_data},
{},
};
MODULE_DEVICE_TABLE(of, mtk_disp_rdma_driver_dt_match);
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c
index a4e80e499674..d661edf7e0fe 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c
@@ -4,6 +4,8 @@
*/
#include <linux/clk.h>
+#include <linux/dma-mapping.h>
+#include <linux/mailbox_controller.h>
#include <linux/pm_runtime.h>
#include <linux/soc/mediatek/mtk-cmdq.h>
#include <linux/soc/mediatek/mtk-mmsys.h>
@@ -50,8 +52,10 @@ struct mtk_drm_crtc {
bool pending_async_planes;
#if IS_REACHABLE(CONFIG_MTK_CMDQ)
- struct cmdq_client *cmdq_client;
+ struct cmdq_client cmdq_client;
+ struct cmdq_pkt cmdq_handle;
u32 cmdq_event;
+ u32 cmdq_vblank_cnt;
#endif
struct device *mmsys_dev;
@@ -104,12 +108,60 @@ static void mtk_drm_finish_page_flip(struct mtk_drm_crtc *mtk_crtc)
}
}
+#if IS_REACHABLE(CONFIG_MTK_CMDQ)
+static int mtk_drm_cmdq_pkt_create(struct cmdq_client *client, struct cmdq_pkt *pkt,
+ size_t size)
+{
+ struct device *dev;
+ dma_addr_t dma_addr;
+
+ pkt->va_base = kzalloc(size, GFP_KERNEL);
+ if (!pkt->va_base) {
+ kfree(pkt);
+ return -ENOMEM;
+ }
+ pkt->buf_size = size;
+ pkt->cl = (void *)client;
+
+ dev = client->chan->mbox->dev;
+ dma_addr = dma_map_single(dev, pkt->va_base, pkt->buf_size,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(dev, dma_addr)) {
+ dev_err(dev, "dma map failed, size=%u\n", (u32)(u64)size);
+ kfree(pkt->va_base);
+ kfree(pkt);
+ return -ENOMEM;
+ }
+
+ pkt->pa_base = dma_addr;
+
+ return 0;
+}
+
+static void mtk_drm_cmdq_pkt_destroy(struct cmdq_pkt *pkt)
+{
+ struct cmdq_client *client = (struct cmdq_client *)pkt->cl;
+
+ dma_unmap_single(client->chan->mbox->dev, pkt->pa_base, pkt->buf_size,
+ DMA_TO_DEVICE);
+ kfree(pkt->va_base);
+ kfree(pkt);
+}
+#endif
+
static void mtk_drm_crtc_destroy(struct drm_crtc *crtc)
{
struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc);
mtk_mutex_put(mtk_crtc->mutex);
+#if IS_REACHABLE(CONFIG_MTK_CMDQ)
+ mtk_drm_cmdq_pkt_destroy(&mtk_crtc->cmdq_handle);
+ if (mtk_crtc->cmdq_client.chan) {
+ mbox_free_channel(mtk_crtc->cmdq_client.chan);
+ mtk_crtc->cmdq_client.chan = NULL;
+ }
+#endif
drm_crtc_cleanup(crtc);
}
@@ -222,9 +274,46 @@ struct mtk_ddp_comp *mtk_drm_ddp_comp_for_plane(struct drm_crtc *crtc,
}
#if IS_REACHABLE(CONFIG_MTK_CMDQ)
-static void ddp_cmdq_cb(struct cmdq_cb_data data)
+static void ddp_cmdq_cb(struct mbox_client *cl, void *mssg)
{
- cmdq_pkt_destroy(data.data);
+ struct cmdq_cb_data *data = mssg;
+ struct cmdq_client *cmdq_cl = container_of(cl, struct cmdq_client, client);
+ struct mtk_drm_crtc *mtk_crtc = container_of(cmdq_cl, struct mtk_drm_crtc, cmdq_client);
+ struct mtk_crtc_state *state;
+ unsigned int i;
+
+ if (data->sta < 0)
+ return;
+
+ state = to_mtk_crtc_state(mtk_crtc->base.state);
+
+ state->pending_config = false;
+
+ if (mtk_crtc->pending_planes) {
+ for (i = 0; i < mtk_crtc->layer_nr; i++) {
+ struct drm_plane *plane = &mtk_crtc->planes[i];
+ struct mtk_plane_state *plane_state;
+
+ plane_state = to_mtk_plane_state(plane->state);
+
+ plane_state->pending.config = false;
+ }
+ mtk_crtc->pending_planes = false;
+ }
+
+ if (mtk_crtc->pending_async_planes) {
+ for (i = 0; i < mtk_crtc->layer_nr; i++) {
+ struct drm_plane *plane = &mtk_crtc->planes[i];
+ struct mtk_plane_state *plane_state;
+
+ plane_state = to_mtk_plane_state(plane->state);
+
+ plane_state->pending.async_config = false;
+ }
+ mtk_crtc->pending_async_planes = false;
+ }
+
+ mtk_crtc->cmdq_vblank_cnt = 0;
}
#endif
@@ -378,7 +467,8 @@ static void mtk_crtc_ddp_config(struct drm_crtc *crtc,
state->pending_vrefresh, 0,
cmdq_handle);
- state->pending_config = false;
+ if (!cmdq_handle)
+ state->pending_config = false;
}
if (mtk_crtc->pending_planes) {
@@ -398,9 +488,12 @@ static void mtk_crtc_ddp_config(struct drm_crtc *crtc,
mtk_ddp_comp_layer_config(comp, local_layer,
plane_state,
cmdq_handle);
- plane_state->pending.config = false;
+ if (!cmdq_handle)
+ plane_state->pending.config = false;
}
- mtk_crtc->pending_planes = false;
+
+ if (!cmdq_handle)
+ mtk_crtc->pending_planes = false;
}
if (mtk_crtc->pending_async_planes) {
@@ -420,9 +513,12 @@ static void mtk_crtc_ddp_config(struct drm_crtc *crtc,
mtk_ddp_comp_layer_config(comp, local_layer,
plane_state,
cmdq_handle);
- plane_state->pending.async_config = false;
+ if (!cmdq_handle)
+ plane_state->pending.async_config = false;
}
- mtk_crtc->pending_async_planes = false;
+
+ if (!cmdq_handle)
+ mtk_crtc->pending_async_planes = false;
}
}
@@ -430,7 +526,7 @@ static void mtk_drm_crtc_update_config(struct mtk_drm_crtc *mtk_crtc,
bool needs_vblank)
{
#if IS_REACHABLE(CONFIG_MTK_CMDQ)
- struct cmdq_pkt *cmdq_handle;
+ struct cmdq_pkt *cmdq_handle = &mtk_crtc->cmdq_handle;
#endif
struct drm_crtc *crtc = &mtk_crtc->base;
struct mtk_drm_private *priv = crtc->dev->dev_private;
@@ -468,14 +564,28 @@ static void mtk_drm_crtc_update_config(struct mtk_drm_crtc *mtk_crtc,
mtk_mutex_release(mtk_crtc->mutex);
}
#if IS_REACHABLE(CONFIG_MTK_CMDQ)
- if (mtk_crtc->cmdq_client) {
- mbox_flush(mtk_crtc->cmdq_client->chan, 2000);
- cmdq_handle = cmdq_pkt_create(mtk_crtc->cmdq_client, PAGE_SIZE);
+ if (mtk_crtc->cmdq_client.chan) {
+ mbox_flush(mtk_crtc->cmdq_client.chan, 2000);
+ cmdq_handle->cmd_buf_size = 0;
cmdq_pkt_clear_event(cmdq_handle, mtk_crtc->cmdq_event);
cmdq_pkt_wfe(cmdq_handle, mtk_crtc->cmdq_event, false);
mtk_crtc_ddp_config(crtc, cmdq_handle);
cmdq_pkt_finalize(cmdq_handle);
- cmdq_pkt_flush_async(cmdq_handle, ddp_cmdq_cb, cmdq_handle);
+ dma_sync_single_for_device(mtk_crtc->cmdq_client.chan->mbox->dev,
+ cmdq_handle->pa_base,
+ cmdq_handle->cmd_buf_size,
+ DMA_TO_DEVICE);
+ /*
+ * CMDQ command should execute in next 3 vblank.
+ * One vblank interrupt before send message (occasionally)
+ * and one vblank interrupt after cmdq done,
+ * so it's timeout after 3 vblank interrupt.
+ * If it fail to execute in next 3 vblank, timeout happen.
+ */
+ mtk_crtc->cmdq_vblank_cnt = 3;
+
+ mbox_send_message(mtk_crtc->cmdq_client.chan, cmdq_handle);
+ mbox_client_txdone(mtk_crtc->cmdq_client.chan, 0);
}
#endif
mtk_crtc->config_updating = false;
@@ -489,12 +599,15 @@ static void mtk_crtc_ddp_irq(void *data)
struct mtk_drm_private *priv = crtc->dev->dev_private;
#if IS_REACHABLE(CONFIG_MTK_CMDQ)
- if (!priv->data->shadow_register && !mtk_crtc->cmdq_client)
+ if (!priv->data->shadow_register && !mtk_crtc->cmdq_client.chan)
+ mtk_crtc_ddp_config(crtc, NULL);
+ else if (mtk_crtc->cmdq_vblank_cnt > 0 && --mtk_crtc->cmdq_vblank_cnt == 0)
+ DRM_ERROR("mtk_crtc %d CMDQ execute command timeout!\n",
+ drm_crtc_index(&mtk_crtc->base));
#else
if (!priv->data->shadow_register)
-#endif
mtk_crtc_ddp_config(crtc, NULL);
-
+#endif
mtk_drm_finish_page_flip(mtk_crtc);
}
@@ -829,16 +942,20 @@ int mtk_drm_crtc_create(struct drm_device *drm_dev,
mutex_init(&mtk_crtc->hw_lock);
#if IS_REACHABLE(CONFIG_MTK_CMDQ)
- mtk_crtc->cmdq_client =
- cmdq_mbox_create(mtk_crtc->mmsys_dev,
- drm_crtc_index(&mtk_crtc->base));
- if (IS_ERR(mtk_crtc->cmdq_client)) {
+ mtk_crtc->cmdq_client.client.dev = mtk_crtc->mmsys_dev;
+ mtk_crtc->cmdq_client.client.tx_block = false;
+ mtk_crtc->cmdq_client.client.knows_txdone = true;
+ mtk_crtc->cmdq_client.client.rx_callback = ddp_cmdq_cb;
+ mtk_crtc->cmdq_client.chan =
+ mbox_request_channel(&mtk_crtc->cmdq_client.client,
+ drm_crtc_index(&mtk_crtc->base));
+ if (IS_ERR(mtk_crtc->cmdq_client.chan)) {
dev_dbg(dev, "mtk_crtc %d failed to create mailbox client, writing register by CPU now\n",
drm_crtc_index(&mtk_crtc->base));
- mtk_crtc->cmdq_client = NULL;
+ mtk_crtc->cmdq_client.chan = NULL;
}
- if (mtk_crtc->cmdq_client) {
+ if (mtk_crtc->cmdq_client.chan) {
ret = of_property_read_u32_index(priv->mutex_node,
"mediatek,gce-events",
drm_crtc_index(&mtk_crtc->base),
@@ -846,8 +963,18 @@ int mtk_drm_crtc_create(struct drm_device *drm_dev,
if (ret) {
dev_dbg(dev, "mtk_crtc %d failed to get mediatek,gce-events property\n",
drm_crtc_index(&mtk_crtc->base));
- cmdq_mbox_destroy(mtk_crtc->cmdq_client);
- mtk_crtc->cmdq_client = NULL;
+ mbox_free_channel(mtk_crtc->cmdq_client.chan);
+ mtk_crtc->cmdq_client.chan = NULL;
+ } else {
+ ret = mtk_drm_cmdq_pkt_create(&mtk_crtc->cmdq_client,
+ &mtk_crtc->cmdq_handle,
+ PAGE_SIZE);
+ if (ret) {
+ dev_dbg(dev, "mtk_crtc %d failed to create cmdq packet\n",
+ drm_crtc_index(&mtk_crtc->base));
+ mbox_free_channel(mtk_crtc->cmdq_client.chan);
+ mtk_crtc->cmdq_client.chan = NULL;
+ }
}
}
#endif
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c b/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c
index 99cbf44463e4..b4b682bc1991 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c
@@ -20,45 +20,39 @@
#include "mtk_drm_ddp_comp.h"
#include "mtk_drm_crtc.h"
-#define DISP_OD_EN 0x0000
-#define DISP_OD_INTEN 0x0008
-#define DISP_OD_INTSTA 0x000c
-#define DISP_OD_CFG 0x0020
-#define DISP_OD_SIZE 0x0030
-#define DISP_DITHER_5 0x0114
-#define DISP_DITHER_7 0x011c
-#define DISP_DITHER_15 0x013c
-#define DISP_DITHER_16 0x0140
-#define DISP_REG_UFO_START 0x0000
-
-#define DISP_DITHER_EN 0x0000
+#define DISP_REG_DITHER_EN 0x0000
#define DITHER_EN BIT(0)
-#define DISP_DITHER_CFG 0x0020
+#define DISP_REG_DITHER_CFG 0x0020
#define DITHER_RELAY_MODE BIT(0)
#define DITHER_ENGINE_EN BIT(1)
-#define DISP_DITHER_SIZE 0x0030
-
-#define LUT_10BIT_MASK 0x03ff
-
-#define OD_RELAYMODE BIT(0)
-
-#define UFO_BYPASS BIT(2)
-
#define DISP_DITHERING BIT(2)
+#define DISP_REG_DITHER_SIZE 0x0030
+#define DISP_REG_DITHER_5 0x0114
+#define DISP_REG_DITHER_7 0x011c
+#define DISP_REG_DITHER_15 0x013c
#define DITHER_LSB_ERR_SHIFT_R(x) (((x) & 0x7) << 28)
-#define DITHER_OVFLW_BIT_R(x) (((x) & 0x7) << 24)
#define DITHER_ADD_LSHIFT_R(x) (((x) & 0x7) << 20)
-#define DITHER_ADD_RSHIFT_R(x) (((x) & 0x7) << 16)
#define DITHER_NEW_BIT_MODE BIT(0)
+#define DISP_REG_DITHER_16 0x0140
#define DITHER_LSB_ERR_SHIFT_B(x) (((x) & 0x7) << 28)
-#define DITHER_OVFLW_BIT_B(x) (((x) & 0x7) << 24)
#define DITHER_ADD_LSHIFT_B(x) (((x) & 0x7) << 20)
-#define DITHER_ADD_RSHIFT_B(x) (((x) & 0x7) << 16)
#define DITHER_LSB_ERR_SHIFT_G(x) (((x) & 0x7) << 12)
-#define DITHER_OVFLW_BIT_G(x) (((x) & 0x7) << 8)
#define DITHER_ADD_LSHIFT_G(x) (((x) & 0x7) << 4)
-#define DITHER_ADD_RSHIFT_G(x) (((x) & 0x7) << 0)
+
+#define DISP_REG_OD_EN 0x0000
+#define DISP_REG_OD_CFG 0x0020
+#define OD_RELAYMODE BIT(0)
+#define DISP_REG_OD_SIZE 0x0030
+
+#define DISP_REG_POSTMASK_EN 0x0000
+#define POSTMASK_EN BIT(0)
+#define DISP_REG_POSTMASK_CFG 0x0020
+#define POSTMASK_RELAY_MODE BIT(0)
+#define DISP_REG_POSTMASK_SIZE 0x0030
+
+#define DISP_REG_UFO_START 0x0000
+#define UFO_BYPASS BIT(2)
struct mtk_ddp_comp_dev {
struct clk *clk;
@@ -134,25 +128,52 @@ void mtk_dither_set_common(void __iomem *regs, struct cmdq_client_reg *cmdq_reg,
return;
if (bpc >= MTK_MIN_BPC) {
- mtk_ddp_write(cmdq_pkt, 0, cmdq_reg, regs, DISP_DITHER_5);
- mtk_ddp_write(cmdq_pkt, 0, cmdq_reg, regs, DISP_DITHER_7);
+ mtk_ddp_write(cmdq_pkt, 0, cmdq_reg, regs, DISP_REG_DITHER_5);
+ mtk_ddp_write(cmdq_pkt, 0, cmdq_reg, regs, DISP_REG_DITHER_7);
mtk_ddp_write(cmdq_pkt,
DITHER_LSB_ERR_SHIFT_R(MTK_MAX_BPC - bpc) |
DITHER_ADD_LSHIFT_R(MTK_MAX_BPC - bpc) |
DITHER_NEW_BIT_MODE,
- cmdq_reg, regs, DISP_DITHER_15);
+ cmdq_reg, regs, DISP_REG_DITHER_15);
mtk_ddp_write(cmdq_pkt,
DITHER_LSB_ERR_SHIFT_B(MTK_MAX_BPC - bpc) |
DITHER_ADD_LSHIFT_B(MTK_MAX_BPC - bpc) |
DITHER_LSB_ERR_SHIFT_G(MTK_MAX_BPC - bpc) |
DITHER_ADD_LSHIFT_G(MTK_MAX_BPC - bpc),
- cmdq_reg, regs, DISP_DITHER_16);
+ cmdq_reg, regs, DISP_REG_DITHER_16);
mtk_ddp_write(cmdq_pkt, dither_en, cmdq_reg, regs, cfg);
}
}
+static void mtk_dither_config(struct device *dev, unsigned int w,
+ unsigned int h, unsigned int vrefresh,
+ unsigned int bpc, struct cmdq_pkt *cmdq_pkt)
+{
+ struct mtk_ddp_comp_dev *priv = dev_get_drvdata(dev);
+
+ mtk_ddp_write(cmdq_pkt, h << 16 | w, &priv->cmdq_reg, priv->regs, DISP_REG_DITHER_SIZE);
+ mtk_ddp_write(cmdq_pkt, DITHER_RELAY_MODE, &priv->cmdq_reg, priv->regs,
+ DISP_REG_DITHER_CFG);
+ mtk_dither_set_common(priv->regs, &priv->cmdq_reg, bpc, DISP_REG_DITHER_CFG,
+ DITHER_ENGINE_EN, cmdq_pkt);
+}
+
+static void mtk_dither_start(struct device *dev)
+{
+ struct mtk_ddp_comp_dev *priv = dev_get_drvdata(dev);
+
+ writel(DITHER_EN, priv->regs + DISP_REG_DITHER_EN);
+}
+
+static void mtk_dither_stop(struct device *dev)
+{
+ struct mtk_ddp_comp_dev *priv = dev_get_drvdata(dev);
+
+ writel_relaxed(0x0, priv->regs + DISP_REG_DITHER_EN);
+}
+
static void mtk_dither_set(struct device *dev, unsigned int bpc,
- unsigned int cfg, struct cmdq_pkt *cmdq_pkt)
+ unsigned int cfg, struct cmdq_pkt *cmdq_pkt)
{
struct mtk_ddp_comp_dev *priv = dev_get_drvdata(dev);
@@ -166,49 +187,49 @@ static void mtk_od_config(struct device *dev, unsigned int w,
{
struct mtk_ddp_comp_dev *priv = dev_get_drvdata(dev);
- mtk_ddp_write(cmdq_pkt, w << 16 | h, &priv->cmdq_reg, priv->regs, DISP_OD_SIZE);
- mtk_ddp_write(cmdq_pkt, OD_RELAYMODE, &priv->cmdq_reg, priv->regs, DISP_OD_CFG);
- mtk_dither_set(dev, bpc, DISP_OD_CFG, cmdq_pkt);
+ mtk_ddp_write(cmdq_pkt, w << 16 | h, &priv->cmdq_reg, priv->regs, DISP_REG_OD_SIZE);
+ mtk_ddp_write(cmdq_pkt, OD_RELAYMODE, &priv->cmdq_reg, priv->regs, DISP_REG_OD_CFG);
+ mtk_dither_set(dev, bpc, DISP_REG_OD_CFG, cmdq_pkt);
}
static void mtk_od_start(struct device *dev)
{
struct mtk_ddp_comp_dev *priv = dev_get_drvdata(dev);
- writel(1, priv->regs + DISP_OD_EN);
+ writel(1, priv->regs + DISP_REG_OD_EN);
}
-static void mtk_ufoe_start(struct device *dev)
+static void mtk_postmask_config(struct device *dev, unsigned int w,
+ unsigned int h, unsigned int vrefresh,
+ unsigned int bpc, struct cmdq_pkt *cmdq_pkt)
{
struct mtk_ddp_comp_dev *priv = dev_get_drvdata(dev);
- writel(UFO_BYPASS, priv->regs + DISP_REG_UFO_START);
+ mtk_ddp_write(cmdq_pkt, w << 16 | h, &priv->cmdq_reg, priv->regs,
+ DISP_REG_POSTMASK_SIZE);
+ mtk_ddp_write(cmdq_pkt, POSTMASK_RELAY_MODE, &priv->cmdq_reg,
+ priv->regs, DISP_REG_POSTMASK_CFG);
}
-static void mtk_dither_config(struct device *dev, unsigned int w,
- unsigned int h, unsigned int vrefresh,
- unsigned int bpc, struct cmdq_pkt *cmdq_pkt)
+static void mtk_postmask_start(struct device *dev)
{
struct mtk_ddp_comp_dev *priv = dev_get_drvdata(dev);
- mtk_ddp_write(cmdq_pkt, h << 16 | w, &priv->cmdq_reg, priv->regs, DISP_DITHER_SIZE);
- mtk_ddp_write(cmdq_pkt, DITHER_RELAY_MODE, &priv->cmdq_reg, priv->regs, DISP_DITHER_CFG);
- mtk_dither_set_common(priv->regs, &priv->cmdq_reg, bpc, DISP_DITHER_CFG,
- DITHER_ENGINE_EN, cmdq_pkt);
+ writel(POSTMASK_EN, priv->regs + DISP_REG_POSTMASK_EN);
}
-static void mtk_dither_start(struct device *dev)
+static void mtk_postmask_stop(struct device *dev)
{
struct mtk_ddp_comp_dev *priv = dev_get_drvdata(dev);
- writel(DITHER_EN, priv->regs + DISP_DITHER_EN);
+ writel_relaxed(0x0, priv->regs + DISP_REG_POSTMASK_EN);
}
-static void mtk_dither_stop(struct device *dev)
+static void mtk_ufoe_start(struct device *dev)
{
struct mtk_ddp_comp_dev *priv = dev_get_drvdata(dev);
- writel_relaxed(0x0, priv->regs + DISP_DITHER_EN);
+ writel(UFO_BYPASS, priv->regs + DISP_REG_UFO_START);
}
static const struct mtk_ddp_comp_funcs ddp_aal = {
@@ -286,6 +307,14 @@ static const struct mtk_ddp_comp_funcs ddp_ovl = {
.bgclr_in_off = mtk_ovl_bgclr_in_off,
};
+static const struct mtk_ddp_comp_funcs ddp_postmask = {
+ .clk_enable = mtk_ddp_clk_enable,
+ .clk_disable = mtk_ddp_clk_disable,
+ .config = mtk_postmask_config,
+ .start = mtk_postmask_start,
+ .stop = mtk_postmask_stop,
+};
+
static const struct mtk_ddp_comp_funcs ddp_rdma = {
.clk_enable = mtk_rdma_clk_enable,
.clk_disable = mtk_rdma_clk_disable,
@@ -305,22 +334,23 @@ static const struct mtk_ddp_comp_funcs ddp_ufoe = {
};
static const char * const mtk_ddp_comp_stem[MTK_DDP_COMP_TYPE_MAX] = {
+ [MTK_DISP_AAL] = "aal",
+ [MTK_DISP_BLS] = "bls",
+ [MTK_DISP_CCORR] = "ccorr",
+ [MTK_DISP_COLOR] = "color",
+ [MTK_DISP_DITHER] = "dither",
+ [MTK_DISP_GAMMA] = "gamma",
+ [MTK_DISP_MUTEX] = "mutex",
+ [MTK_DISP_OD] = "od",
[MTK_DISP_OVL] = "ovl",
[MTK_DISP_OVL_2L] = "ovl-2l",
+ [MTK_DISP_POSTMASK] = "postmask",
+ [MTK_DISP_PWM] = "pwm",
[MTK_DISP_RDMA] = "rdma",
- [MTK_DISP_WDMA] = "wdma",
- [MTK_DISP_COLOR] = "color",
- [MTK_DISP_CCORR] = "ccorr",
- [MTK_DISP_AAL] = "aal",
- [MTK_DISP_GAMMA] = "gamma",
- [MTK_DISP_DITHER] = "dither",
[MTK_DISP_UFOE] = "ufoe",
- [MTK_DSI] = "dsi",
+ [MTK_DISP_WDMA] = "wdma",
[MTK_DPI] = "dpi",
- [MTK_DISP_PWM] = "pwm",
- [MTK_DISP_MUTEX] = "mutex",
- [MTK_DISP_OD] = "od",
- [MTK_DISP_BLS] = "bls",
+ [MTK_DSI] = "dsi",
};
struct mtk_ddp_comp_match {
@@ -330,35 +360,38 @@ struct mtk_ddp_comp_match {
};
static const struct mtk_ddp_comp_match mtk_ddp_matches[DDP_COMPONENT_ID_MAX] = {
- [DDP_COMPONENT_AAL0] = { MTK_DISP_AAL, 0, &ddp_aal },
- [DDP_COMPONENT_AAL1] = { MTK_DISP_AAL, 1, &ddp_aal },
- [DDP_COMPONENT_BLS] = { MTK_DISP_BLS, 0, NULL },
- [DDP_COMPONENT_CCORR] = { MTK_DISP_CCORR, 0, &ddp_ccorr },
- [DDP_COMPONENT_COLOR0] = { MTK_DISP_COLOR, 0, &ddp_color },
- [DDP_COMPONENT_COLOR1] = { MTK_DISP_COLOR, 1, &ddp_color },
- [DDP_COMPONENT_DITHER] = { MTK_DISP_DITHER, 0, &ddp_dither },
- [DDP_COMPONENT_DPI0] = { MTK_DPI, 0, &ddp_dpi },
- [DDP_COMPONENT_DPI1] = { MTK_DPI, 1, &ddp_dpi },
- [DDP_COMPONENT_DSI0] = { MTK_DSI, 0, &ddp_dsi },
- [DDP_COMPONENT_DSI1] = { MTK_DSI, 1, &ddp_dsi },
- [DDP_COMPONENT_DSI2] = { MTK_DSI, 2, &ddp_dsi },
- [DDP_COMPONENT_DSI3] = { MTK_DSI, 3, &ddp_dsi },
- [DDP_COMPONENT_GAMMA] = { MTK_DISP_GAMMA, 0, &ddp_gamma },
- [DDP_COMPONENT_OD0] = { MTK_DISP_OD, 0, &ddp_od },
- [DDP_COMPONENT_OD1] = { MTK_DISP_OD, 1, &ddp_od },
- [DDP_COMPONENT_OVL0] = { MTK_DISP_OVL, 0, &ddp_ovl },
- [DDP_COMPONENT_OVL1] = { MTK_DISP_OVL, 1, &ddp_ovl },
- [DDP_COMPONENT_OVL_2L0] = { MTK_DISP_OVL_2L, 0, &ddp_ovl },
- [DDP_COMPONENT_OVL_2L1] = { MTK_DISP_OVL_2L, 1, &ddp_ovl },
- [DDP_COMPONENT_PWM0] = { MTK_DISP_PWM, 0, NULL },
- [DDP_COMPONENT_PWM1] = { MTK_DISP_PWM, 1, NULL },
- [DDP_COMPONENT_PWM2] = { MTK_DISP_PWM, 2, NULL },
- [DDP_COMPONENT_RDMA0] = { MTK_DISP_RDMA, 0, &ddp_rdma },
- [DDP_COMPONENT_RDMA1] = { MTK_DISP_RDMA, 1, &ddp_rdma },
- [DDP_COMPONENT_RDMA2] = { MTK_DISP_RDMA, 2, &ddp_rdma },
- [DDP_COMPONENT_UFOE] = { MTK_DISP_UFOE, 0, &ddp_ufoe },
- [DDP_COMPONENT_WDMA0] = { MTK_DISP_WDMA, 0, NULL },
- [DDP_COMPONENT_WDMA1] = { MTK_DISP_WDMA, 1, NULL },
+ [DDP_COMPONENT_AAL0] = { MTK_DISP_AAL, 0, &ddp_aal },
+ [DDP_COMPONENT_AAL1] = { MTK_DISP_AAL, 1, &ddp_aal },
+ [DDP_COMPONENT_BLS] = { MTK_DISP_BLS, 0, NULL },
+ [DDP_COMPONENT_CCORR] = { MTK_DISP_CCORR, 0, &ddp_ccorr },
+ [DDP_COMPONENT_COLOR0] = { MTK_DISP_COLOR, 0, &ddp_color },
+ [DDP_COMPONENT_COLOR1] = { MTK_DISP_COLOR, 1, &ddp_color },
+ [DDP_COMPONENT_DITHER] = { MTK_DISP_DITHER, 0, &ddp_dither },
+ [DDP_COMPONENT_DPI0] = { MTK_DPI, 0, &ddp_dpi },
+ [DDP_COMPONENT_DPI1] = { MTK_DPI, 1, &ddp_dpi },
+ [DDP_COMPONENT_DSI0] = { MTK_DSI, 0, &ddp_dsi },
+ [DDP_COMPONENT_DSI1] = { MTK_DSI, 1, &ddp_dsi },
+ [DDP_COMPONENT_DSI2] = { MTK_DSI, 2, &ddp_dsi },
+ [DDP_COMPONENT_DSI3] = { MTK_DSI, 3, &ddp_dsi },
+ [DDP_COMPONENT_GAMMA] = { MTK_DISP_GAMMA, 0, &ddp_gamma },
+ [DDP_COMPONENT_OD0] = { MTK_DISP_OD, 0, &ddp_od },
+ [DDP_COMPONENT_OD1] = { MTK_DISP_OD, 1, &ddp_od },
+ [DDP_COMPONENT_OVL0] = { MTK_DISP_OVL, 0, &ddp_ovl },
+ [DDP_COMPONENT_OVL1] = { MTK_DISP_OVL, 1, &ddp_ovl },
+ [DDP_COMPONENT_OVL_2L0] = { MTK_DISP_OVL_2L, 0, &ddp_ovl },
+ [DDP_COMPONENT_OVL_2L1] = { MTK_DISP_OVL_2L, 1, &ddp_ovl },
+ [DDP_COMPONENT_OVL_2L2] = { MTK_DISP_OVL_2L, 2, &ddp_ovl },
+ [DDP_COMPONENT_POSTMASK0] = { MTK_DISP_POSTMASK, 0, &ddp_postmask },
+ [DDP_COMPONENT_PWM0] = { MTK_DISP_PWM, 0, NULL },
+ [DDP_COMPONENT_PWM1] = { MTK_DISP_PWM, 1, NULL },
+ [DDP_COMPONENT_PWM2] = { MTK_DISP_PWM, 2, NULL },
+ [DDP_COMPONENT_RDMA0] = { MTK_DISP_RDMA, 0, &ddp_rdma },
+ [DDP_COMPONENT_RDMA1] = { MTK_DISP_RDMA, 1, &ddp_rdma },
+ [DDP_COMPONENT_RDMA2] = { MTK_DISP_RDMA, 2, &ddp_rdma },
+ [DDP_COMPONENT_RDMA4] = { MTK_DISP_RDMA, 4, &ddp_rdma },
+ [DDP_COMPONENT_UFOE] = { MTK_DISP_UFOE, 0, &ddp_ufoe },
+ [DDP_COMPONENT_WDMA0] = { MTK_DISP_WDMA, 0, NULL },
+ [DDP_COMPONENT_WDMA1] = { MTK_DISP_WDMA, 1, NULL },
};
static bool mtk_drm_find_comp_in_ddp(struct device *dev,
@@ -475,12 +508,12 @@ int mtk_ddp_comp_init(struct device_node *node, struct mtk_ddp_comp *comp,
type == MTK_DISP_CCORR ||
type == MTK_DISP_COLOR ||
type == MTK_DISP_GAMMA ||
- type == MTK_DPI ||
- type == MTK_DSI ||
type == MTK_DISP_OVL ||
type == MTK_DISP_OVL_2L ||
type == MTK_DISP_PWM ||
- type == MTK_DISP_RDMA)
+ type == MTK_DISP_RDMA ||
+ type == MTK_DPI ||
+ type == MTK_DSI)
return 0;
priv = devm_kzalloc(comp->dev, sizeof(*priv), GFP_KERNEL);
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.h b/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.h
index bb914d976cf5..4c6a98662305 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.h
+++ b/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.h
@@ -18,22 +18,23 @@ struct mtk_plane_state;
struct drm_crtc_state;
enum mtk_ddp_comp_type {
- MTK_DISP_OVL,
- MTK_DISP_OVL_2L,
- MTK_DISP_RDMA,
- MTK_DISP_WDMA,
- MTK_DISP_COLOR,
+ MTK_DISP_AAL,
+ MTK_DISP_BLS,
MTK_DISP_CCORR,
+ MTK_DISP_COLOR,
MTK_DISP_DITHER,
- MTK_DISP_AAL,
MTK_DISP_GAMMA,
- MTK_DISP_UFOE,
- MTK_DSI,
- MTK_DPI,
- MTK_DISP_PWM,
MTK_DISP_MUTEX,
MTK_DISP_OD,
- MTK_DISP_BLS,
+ MTK_DISP_OVL,
+ MTK_DISP_OVL_2L,
+ MTK_DISP_POSTMASK,
+ MTK_DISP_PWM,
+ MTK_DISP_RDMA,
+ MTK_DISP_UFOE,
+ MTK_DISP_WDMA,
+ MTK_DPI,
+ MTK_DSI,
MTK_DDP_COMP_TYPE_MAX,
};
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_drv.c b/drivers/gpu/drm/mediatek/mtk_drm_drv.c
index aec39724ebeb..56ff8c57ef8f 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_drv.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_drv.c
@@ -158,6 +158,25 @@ static const enum mtk_ddp_comp_id mt8183_mtk_ddp_ext[] = {
DDP_COMPONENT_DPI0,
};
+static const enum mtk_ddp_comp_id mt8192_mtk_ddp_main[] = {
+ DDP_COMPONENT_OVL0,
+ DDP_COMPONENT_OVL_2L0,
+ DDP_COMPONENT_RDMA0,
+ DDP_COMPONENT_COLOR0,
+ DDP_COMPONENT_CCORR,
+ DDP_COMPONENT_AAL0,
+ DDP_COMPONENT_GAMMA,
+ DDP_COMPONENT_POSTMASK0,
+ DDP_COMPONENT_DITHER,
+ DDP_COMPONENT_DSI0,
+};
+
+static const enum mtk_ddp_comp_id mt8192_mtk_ddp_ext[] = {
+ DDP_COMPONENT_OVL_2L2,
+ DDP_COMPONENT_RDMA4,
+ DDP_COMPONENT_DPI0,
+};
+
static const struct mtk_mmsys_driver_data mt2701_mmsys_driver_data = {
.main_path = mt2701_mtk_ddp_main,
.main_len = ARRAY_SIZE(mt2701_mtk_ddp_main),
@@ -202,6 +221,13 @@ static const struct mtk_mmsys_driver_data mt8183_mmsys_driver_data = {
.ext_len = ARRAY_SIZE(mt8183_mtk_ddp_ext),
};
+static const struct mtk_mmsys_driver_data mt8192_mmsys_driver_data = {
+ .main_path = mt8192_mtk_ddp_main,
+ .main_len = ARRAY_SIZE(mt8192_mtk_ddp_main),
+ .ext_path = mt8192_mtk_ddp_ext,
+ .ext_len = ARRAY_SIZE(mt8192_mtk_ddp_ext),
+};
+
static int mtk_drm_kms_init(struct drm_device *drm)
{
struct mtk_drm_private *private = drm->dev_private;
@@ -397,68 +423,36 @@ static const struct component_master_ops mtk_drm_ops = {
};
static const struct of_device_id mtk_ddp_comp_dt_ids[] = {
- { .compatible = "mediatek,mt2701-disp-ovl",
- .data = (void *)MTK_DISP_OVL },
- { .compatible = "mediatek,mt8167-disp-ovl",
- .data = (void *)MTK_DISP_OVL },
- { .compatible = "mediatek,mt8173-disp-ovl",
- .data = (void *)MTK_DISP_OVL },
- { .compatible = "mediatek,mt8183-disp-ovl",
- .data = (void *)MTK_DISP_OVL },
- { .compatible = "mediatek,mt8183-disp-ovl-2l",
- .data = (void *)MTK_DISP_OVL_2L },
- { .compatible = "mediatek,mt2701-disp-rdma",
- .data = (void *)MTK_DISP_RDMA },
- { .compatible = "mediatek,mt8167-disp-rdma",
- .data = (void *)MTK_DISP_RDMA },
- { .compatible = "mediatek,mt8173-disp-rdma",
- .data = (void *)MTK_DISP_RDMA },
- { .compatible = "mediatek,mt8183-disp-rdma",
- .data = (void *)MTK_DISP_RDMA },
- { .compatible = "mediatek,mt8173-disp-wdma",
- .data = (void *)MTK_DISP_WDMA },
+ { .compatible = "mediatek,mt8167-disp-aal",
+ .data = (void *)MTK_DISP_AAL},
+ { .compatible = "mediatek,mt8173-disp-aal",
+ .data = (void *)MTK_DISP_AAL},
+ { .compatible = "mediatek,mt8183-disp-aal",
+ .data = (void *)MTK_DISP_AAL},
+ { .compatible = "mediatek,mt8192-disp-aal",
+ .data = (void *)MTK_DISP_AAL},
{ .compatible = "mediatek,mt8167-disp-ccorr",
.data = (void *)MTK_DISP_CCORR },
{ .compatible = "mediatek,mt8183-disp-ccorr",
.data = (void *)MTK_DISP_CCORR },
+ { .compatible = "mediatek,mt8192-disp-ccorr",
+ .data = (void *)MTK_DISP_CCORR },
{ .compatible = "mediatek,mt2701-disp-color",
.data = (void *)MTK_DISP_COLOR },
{ .compatible = "mediatek,mt8167-disp-color",
.data = (void *)MTK_DISP_COLOR },
{ .compatible = "mediatek,mt8173-disp-color",
.data = (void *)MTK_DISP_COLOR },
- { .compatible = "mediatek,mt8167-disp-aal",
- .data = (void *)MTK_DISP_AAL},
- { .compatible = "mediatek,mt8173-disp-aal",
- .data = (void *)MTK_DISP_AAL},
- { .compatible = "mediatek,mt8183-disp-aal",
- .data = (void *)MTK_DISP_AAL},
+ { .compatible = "mediatek,mt8167-disp-dither",
+ .data = (void *)MTK_DISP_DITHER },
+ { .compatible = "mediatek,mt8183-disp-dither",
+ .data = (void *)MTK_DISP_DITHER },
{ .compatible = "mediatek,mt8167-disp-gamma",
.data = (void *)MTK_DISP_GAMMA, },
{ .compatible = "mediatek,mt8173-disp-gamma",
.data = (void *)MTK_DISP_GAMMA, },
{ .compatible = "mediatek,mt8183-disp-gamma",
.data = (void *)MTK_DISP_GAMMA, },
- { .compatible = "mediatek,mt8167-disp-dither",
- .data = (void *)MTK_DISP_DITHER },
- { .compatible = "mediatek,mt8183-disp-dither",
- .data = (void *)MTK_DISP_DITHER },
- { .compatible = "mediatek,mt8173-disp-ufoe",
- .data = (void *)MTK_DISP_UFOE },
- { .compatible = "mediatek,mt2701-dsi",
- .data = (void *)MTK_DSI },
- { .compatible = "mediatek,mt8167-dsi",
- .data = (void *)MTK_DSI },
- { .compatible = "mediatek,mt8173-dsi",
- .data = (void *)MTK_DSI },
- { .compatible = "mediatek,mt8183-dsi",
- .data = (void *)MTK_DSI },
- { .compatible = "mediatek,mt2701-dpi",
- .data = (void *)MTK_DPI },
- { .compatible = "mediatek,mt8173-dpi",
- .data = (void *)MTK_DPI },
- { .compatible = "mediatek,mt8183-dpi",
- .data = (void *)MTK_DPI },
{ .compatible = "mediatek,mt2701-disp-mutex",
.data = (void *)MTK_DISP_MUTEX },
{ .compatible = "mediatek,mt2712-disp-mutex",
@@ -469,14 +463,60 @@ static const struct of_device_id mtk_ddp_comp_dt_ids[] = {
.data = (void *)MTK_DISP_MUTEX },
{ .compatible = "mediatek,mt8183-disp-mutex",
.data = (void *)MTK_DISP_MUTEX },
+ { .compatible = "mediatek,mt8192-disp-mutex",
+ .data = (void *)MTK_DISP_MUTEX },
+ { .compatible = "mediatek,mt8173-disp-od",
+ .data = (void *)MTK_DISP_OD },
+ { .compatible = "mediatek,mt2701-disp-ovl",
+ .data = (void *)MTK_DISP_OVL },
+ { .compatible = "mediatek,mt8167-disp-ovl",
+ .data = (void *)MTK_DISP_OVL },
+ { .compatible = "mediatek,mt8173-disp-ovl",
+ .data = (void *)MTK_DISP_OVL },
+ { .compatible = "mediatek,mt8183-disp-ovl",
+ .data = (void *)MTK_DISP_OVL },
+ { .compatible = "mediatek,mt8192-disp-ovl",
+ .data = (void *)MTK_DISP_OVL },
+ { .compatible = "mediatek,mt8183-disp-ovl-2l",
+ .data = (void *)MTK_DISP_OVL_2L },
+ { .compatible = "mediatek,mt8192-disp-ovl-2l",
+ .data = (void *)MTK_DISP_OVL_2L },
+ { .compatible = "mediatek,mt8192-disp-postmask",
+ .data = (void *)MTK_DISP_POSTMASK },
{ .compatible = "mediatek,mt2701-disp-pwm",
.data = (void *)MTK_DISP_BLS },
{ .compatible = "mediatek,mt8167-disp-pwm",
.data = (void *)MTK_DISP_PWM },
{ .compatible = "mediatek,mt8173-disp-pwm",
.data = (void *)MTK_DISP_PWM },
- { .compatible = "mediatek,mt8173-disp-od",
- .data = (void *)MTK_DISP_OD },
+ { .compatible = "mediatek,mt2701-disp-rdma",
+ .data = (void *)MTK_DISP_RDMA },
+ { .compatible = "mediatek,mt8167-disp-rdma",
+ .data = (void *)MTK_DISP_RDMA },
+ { .compatible = "mediatek,mt8173-disp-rdma",
+ .data = (void *)MTK_DISP_RDMA },
+ { .compatible = "mediatek,mt8183-disp-rdma",
+ .data = (void *)MTK_DISP_RDMA },
+ { .compatible = "mediatek,mt8192-disp-rdma",
+ .data = (void *)MTK_DISP_RDMA },
+ { .compatible = "mediatek,mt8173-disp-ufoe",
+ .data = (void *)MTK_DISP_UFOE },
+ { .compatible = "mediatek,mt8173-disp-wdma",
+ .data = (void *)MTK_DISP_WDMA },
+ { .compatible = "mediatek,mt2701-dpi",
+ .data = (void *)MTK_DPI },
+ { .compatible = "mediatek,mt8167-dsi",
+ .data = (void *)MTK_DSI },
+ { .compatible = "mediatek,mt8173-dpi",
+ .data = (void *)MTK_DPI },
+ { .compatible = "mediatek,mt8183-dpi",
+ .data = (void *)MTK_DPI },
+ { .compatible = "mediatek,mt2701-dsi",
+ .data = (void *)MTK_DSI },
+ { .compatible = "mediatek,mt8173-dsi",
+ .data = (void *)MTK_DSI },
+ { .compatible = "mediatek,mt8183-dsi",
+ .data = (void *)MTK_DSI },
{ }
};
@@ -493,6 +533,8 @@ static const struct of_device_id mtk_drm_of_ids[] = {
.data = &mt8173_mmsys_driver_data},
{ .compatible = "mediatek,mt8183-mmsys",
.data = &mt8183_mmsys_driver_data},
+ { .compatible = "mediatek,mt8192-mmsys",
+ .data = &mt8192_mmsys_driver_data},
{ }
};
MODULE_DEVICE_TABLE(of, mtk_drm_of_ids);
@@ -568,8 +610,8 @@ static int mtk_drm_probe(struct platform_device *pdev)
comp_type == MTK_DISP_OVL ||
comp_type == MTK_DISP_OVL_2L ||
comp_type == MTK_DISP_RDMA ||
- comp_type == MTK_DSI ||
- comp_type == MTK_DPI) {
+ comp_type == MTK_DPI ||
+ comp_type == MTK_DSI) {
dev_info(dev, "Adding component match for %pOF\n",
node);
drm_of_component_match_add(dev, &match, compare_of,
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_plane.c b/drivers/gpu/drm/mediatek/mtk_drm_plane.c
index 734a1fb052df..075747a6d4aa 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_plane.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_plane.c
@@ -44,9 +44,10 @@ static void mtk_plane_reset(struct drm_plane *plane)
state = kzalloc(sizeof(*state), GFP_KERNEL);
if (!state)
return;
- plane->state = &state->base;
}
+ __drm_atomic_helper_plane_reset(plane, &state->base);
+
state->base.plane = plane;
state->pending.format = DRM_FORMAT_RGB565;
}
diff --git a/drivers/gpu/drm/mediatek/mtk_hdmi.c b/drivers/gpu/drm/mediatek/mtk_hdmi.c
index 5838c44cbf6f..3196189429bc 100644
--- a/drivers/gpu/drm/mediatek/mtk_hdmi.c
+++ b/drivers/gpu/drm/mediatek/mtk_hdmi.c
@@ -1224,12 +1224,14 @@ static int mtk_hdmi_bridge_mode_valid(struct drm_bridge *bridge,
return MODE_BAD;
}
- if (hdmi->conf->cea_modes_only && !drm_match_cea_mode(mode))
- return MODE_BAD;
+ if (hdmi->conf) {
+ if (hdmi->conf->cea_modes_only && !drm_match_cea_mode(mode))
+ return MODE_BAD;
- if (hdmi->conf->max_mode_clock &&
- mode->clock > hdmi->conf->max_mode_clock)
- return MODE_CLOCK_HIGH;
+ if (hdmi->conf->max_mode_clock &&
+ mode->clock > hdmi->conf->max_mode_clock)
+ return MODE_CLOCK_HIGH;
+ }
if (mode->clock < 27000)
return MODE_CLOCK_LOW;
diff --git a/drivers/gpu/drm/meson/Kconfig b/drivers/gpu/drm/meson/Kconfig
index a4e1ed96e5e8..6c70fc3214af 100644
--- a/drivers/gpu/drm/meson/Kconfig
+++ b/drivers/gpu/drm/meson/Kconfig
@@ -4,7 +4,6 @@ config DRM_MESON
depends on DRM && OF && (ARM || ARM64)
depends on ARCH_MESON || COMPILE_TEST
select DRM_KMS_HELPER
- select DRM_KMS_CMA_HELPER
select DRM_GEM_CMA_HELPER
select DRM_DISPLAY_CONNECTOR
select VIDEOMODE_HELPERS
diff --git a/drivers/gpu/drm/meson/meson_drv.c b/drivers/gpu/drm/meson/meson_drv.c
index 80f1d439841a..26aeaf0ab86e 100644
--- a/drivers/gpu/drm/meson/meson_drv.c
+++ b/drivers/gpu/drm/meson/meson_drv.c
@@ -302,42 +302,42 @@ static int meson_drv_bind_master(struct device *dev, bool has_components)
if (priv->afbcd.ops) {
ret = priv->afbcd.ops->init(priv);
if (ret)
- return ret;
+ goto free_drm;
}
/* Encoder Initialization */
ret = meson_encoder_cvbs_init(priv);
if (ret)
- goto free_drm;
+ goto exit_afbcd;
if (has_components) {
ret = component_bind_all(drm->dev, drm);
if (ret) {
dev_err(drm->dev, "Couldn't bind all components\n");
- goto free_drm;
+ goto exit_afbcd;
}
}
ret = meson_encoder_hdmi_init(priv);
if (ret)
- goto free_drm;
+ goto exit_afbcd;
ret = meson_plane_create(priv);
if (ret)
- goto free_drm;
+ goto exit_afbcd;
ret = meson_overlay_create(priv);
if (ret)
- goto free_drm;
+ goto exit_afbcd;
ret = meson_crtc_create(priv);
if (ret)
- goto free_drm;
+ goto exit_afbcd;
ret = request_irq(priv->vsync_irq, meson_irq, 0, drm->driver->name, drm);
if (ret)
- goto free_drm;
+ goto exit_afbcd;
drm_mode_config_reset(drm);
@@ -355,6 +355,9 @@ static int meson_drv_bind_master(struct device *dev, bool has_components)
uninstall_irq:
free_irq(priv->vsync_irq, drm);
+exit_afbcd:
+ if (priv->afbcd.ops)
+ priv->afbcd.ops->exit(priv);
free_drm:
drm_dev_put(drm);
@@ -385,10 +388,8 @@ static void meson_drv_unbind(struct device *dev)
free_irq(priv->vsync_irq, drm);
drm_dev_put(drm);
- if (priv->afbcd.ops) {
- priv->afbcd.ops->reset(priv);
- meson_rdma_free(priv);
- }
+ if (priv->afbcd.ops)
+ priv->afbcd.ops->exit(priv);
}
static const struct component_master_ops meson_drv_master_ops = {
diff --git a/drivers/gpu/drm/meson/meson_osd_afbcd.c b/drivers/gpu/drm/meson/meson_osd_afbcd.c
index ffc6b584dbf8..0cdbe899402f 100644
--- a/drivers/gpu/drm/meson/meson_osd_afbcd.c
+++ b/drivers/gpu/drm/meson/meson_osd_afbcd.c
@@ -79,11 +79,6 @@ static bool meson_gxm_afbcd_supported_fmt(u64 modifier, uint32_t format)
return meson_gxm_afbcd_pixel_fmt(modifier, format) >= 0;
}
-static int meson_gxm_afbcd_init(struct meson_drm *priv)
-{
- return 0;
-}
-
static int meson_gxm_afbcd_reset(struct meson_drm *priv)
{
writel_relaxed(VIU_SW_RESET_OSD1_AFBCD,
@@ -93,6 +88,16 @@ static int meson_gxm_afbcd_reset(struct meson_drm *priv)
return 0;
}
+static int meson_gxm_afbcd_init(struct meson_drm *priv)
+{
+ return 0;
+}
+
+static void meson_gxm_afbcd_exit(struct meson_drm *priv)
+{
+ meson_gxm_afbcd_reset(priv);
+}
+
static int meson_gxm_afbcd_enable(struct meson_drm *priv)
{
writel_relaxed(FIELD_PREP(OSD1_AFBCD_ID_FIFO_THRD, 0x40) |
@@ -172,6 +177,7 @@ static int meson_gxm_afbcd_setup(struct meson_drm *priv)
struct meson_afbcd_ops meson_afbcd_gxm_ops = {
.init = meson_gxm_afbcd_init,
+ .exit = meson_gxm_afbcd_exit,
.reset = meson_gxm_afbcd_reset,
.enable = meson_gxm_afbcd_enable,
.disable = meson_gxm_afbcd_disable,
@@ -269,6 +275,18 @@ static bool meson_g12a_afbcd_supported_fmt(u64 modifier, uint32_t format)
return meson_g12a_afbcd_pixel_fmt(modifier, format) >= 0;
}
+static int meson_g12a_afbcd_reset(struct meson_drm *priv)
+{
+ meson_rdma_reset(priv);
+
+ meson_rdma_writel_sync(priv, VIU_SW_RESET_G12A_AFBC_ARB |
+ VIU_SW_RESET_G12A_OSD1_AFBCD,
+ VIU_SW_RESET);
+ meson_rdma_writel_sync(priv, 0, VIU_SW_RESET);
+
+ return 0;
+}
+
static int meson_g12a_afbcd_init(struct meson_drm *priv)
{
int ret;
@@ -286,16 +304,10 @@ static int meson_g12a_afbcd_init(struct meson_drm *priv)
return 0;
}
-static int meson_g12a_afbcd_reset(struct meson_drm *priv)
+static void meson_g12a_afbcd_exit(struct meson_drm *priv)
{
- meson_rdma_reset(priv);
-
- meson_rdma_writel_sync(priv, VIU_SW_RESET_G12A_AFBC_ARB |
- VIU_SW_RESET_G12A_OSD1_AFBCD,
- VIU_SW_RESET);
- meson_rdma_writel_sync(priv, 0, VIU_SW_RESET);
-
- return 0;
+ meson_g12a_afbcd_reset(priv);
+ meson_rdma_free(priv);
}
static int meson_g12a_afbcd_enable(struct meson_drm *priv)
@@ -380,6 +392,7 @@ static int meson_g12a_afbcd_setup(struct meson_drm *priv)
struct meson_afbcd_ops meson_afbcd_g12a_ops = {
.init = meson_g12a_afbcd_init,
+ .exit = meson_g12a_afbcd_exit,
.reset = meson_g12a_afbcd_reset,
.enable = meson_g12a_afbcd_enable,
.disable = meson_g12a_afbcd_disable,
diff --git a/drivers/gpu/drm/meson/meson_osd_afbcd.h b/drivers/gpu/drm/meson/meson_osd_afbcd.h
index 5e5523304f42..e77ddeb6416f 100644
--- a/drivers/gpu/drm/meson/meson_osd_afbcd.h
+++ b/drivers/gpu/drm/meson/meson_osd_afbcd.h
@@ -14,6 +14,7 @@
struct meson_afbcd_ops {
int (*init)(struct meson_drm *priv);
+ void (*exit)(struct meson_drm *priv);
int (*reset)(struct meson_drm *priv);
int (*enable)(struct meson_drm *priv);
int (*disable)(struct meson_drm *priv);
diff --git a/drivers/gpu/drm/msm/Kconfig b/drivers/gpu/drm/msm/Kconfig
index ae11061727ff..1eae5a9645f4 100644
--- a/drivers/gpu/drm/msm/Kconfig
+++ b/drivers/gpu/drm/msm/Kconfig
@@ -4,8 +4,8 @@ config DRM_MSM
tristate "MSM DRM"
depends on DRM
depends on ARCH_QCOM || SOC_IMX5 || COMPILE_TEST
+ depends on COMMON_CLK
depends on IOMMU_SUPPORT
- depends on (OF && COMMON_CLK) || COMPILE_TEST
depends on QCOM_OCMEM || QCOM_OCMEM=n
depends on QCOM_LLCC || QCOM_LLCC=n
depends on QCOM_COMMAND_DB || QCOM_COMMAND_DB=n
@@ -65,6 +65,7 @@ config DRM_MSM_HDMI_HDCP
config DRM_MSM_DP
bool "Enable DisplayPort support in MSM DRM driver"
depends on DRM_MSM
+ select RATIONAL
default y
help
Compile in support for DP driver in MSM DRM driver. DP external
diff --git a/drivers/gpu/drm/msm/Makefile b/drivers/gpu/drm/msm/Makefile
index 40577f8856d8..03ab55c37beb 100644
--- a/drivers/gpu/drm/msm/Makefile
+++ b/drivers/gpu/drm/msm/Makefile
@@ -19,24 +19,21 @@ msm-y := \
hdmi/hdmi.o \
hdmi/hdmi_audio.o \
hdmi/hdmi_bridge.o \
- hdmi/hdmi_connector.o \
+ hdmi/hdmi_hpd.o \
hdmi/hdmi_i2c.o \
hdmi/hdmi_phy.o \
hdmi/hdmi_phy_8960.o \
+ hdmi/hdmi_phy_8996.o \
hdmi/hdmi_phy_8x60.o \
hdmi/hdmi_phy_8x74.o \
- edp/edp.o \
- edp/edp_aux.o \
- edp/edp_bridge.o \
- edp/edp_connector.o \
- edp/edp_ctrl.o \
- edp/edp_phy.o \
+ hdmi/hdmi_pll_8960.o \
disp/mdp_format.o \
disp/mdp_kms.o \
disp/mdp4/mdp4_crtc.o \
disp/mdp4/mdp4_dtv_encoder.o \
disp/mdp4/mdp4_lcdc_encoder.o \
disp/mdp4/mdp4_lvds_connector.o \
+ disp/mdp4/mdp4_lvds_pll.o \
disp/mdp4/mdp4_irq.o \
disp/mdp4/mdp4_kms.o \
disp/mdp4/mdp4_plane.o \
@@ -116,9 +113,6 @@ msm-$(CONFIG_DRM_MSM_DP)+= dp/dp_aux.o \
dp/dp_audio.o
msm-$(CONFIG_DRM_FBDEV_EMULATION) += msm_fbdev.o
-msm-$(CONFIG_COMMON_CLK) += disp/mdp4/mdp4_lvds_pll.o
-msm-$(CONFIG_COMMON_CLK) += hdmi/hdmi_pll_8960.o
-msm-$(CONFIG_COMMON_CLK) += hdmi/hdmi_phy_8996.o
msm-$(CONFIG_DRM_MSM_HDMI_HDCP) += hdmi/hdmi_hdcp.o
diff --git a/drivers/gpu/drm/msm/adreno/a2xx_gpu.c b/drivers/gpu/drm/msm/adreno/a2xx_gpu.c
index bdc989183c64..22e8295a5e2b 100644
--- a/drivers/gpu/drm/msm/adreno/a2xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a2xx_gpu.c
@@ -12,7 +12,6 @@ static bool a2xx_idle(struct msm_gpu *gpu);
static void a2xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
{
- struct msm_drm_private *priv = gpu->dev->dev_private;
struct msm_ringbuffer *ring = submit->ring;
unsigned int i;
@@ -23,7 +22,7 @@ static void a2xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
break;
case MSM_SUBMIT_CMD_CTX_RESTORE_BUF:
/* ignore if there has not been a ctx switch: */
- if (priv->lastctx == submit->queue->ctx)
+ if (gpu->cur_ctx_seqno == submit->queue->ctx->seqno)
break;
fallthrough;
case MSM_SUBMIT_CMD_BUF:
diff --git a/drivers/gpu/drm/msm/adreno/a3xx_gpu.c b/drivers/gpu/drm/msm/adreno/a3xx_gpu.c
index 8fb847c174ff..2e481e2692ba 100644
--- a/drivers/gpu/drm/msm/adreno/a3xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a3xx_gpu.c
@@ -30,7 +30,6 @@ static bool a3xx_idle(struct msm_gpu *gpu);
static void a3xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
{
- struct msm_drm_private *priv = gpu->dev->dev_private;
struct msm_ringbuffer *ring = submit->ring;
unsigned int i;
@@ -41,7 +40,7 @@ static void a3xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
break;
case MSM_SUBMIT_CMD_CTX_RESTORE_BUF:
/* ignore if there has not been a ctx switch: */
- if (priv->lastctx == submit->queue->ctx)
+ if (gpu->cur_ctx_seqno == submit->queue->ctx->seqno)
break;
fallthrough;
case MSM_SUBMIT_CMD_BUF:
diff --git a/drivers/gpu/drm/msm/adreno/a4xx_gpu.c b/drivers/gpu/drm/msm/adreno/a4xx_gpu.c
index a96ee79cc5e0..c5524d6e8705 100644
--- a/drivers/gpu/drm/msm/adreno/a4xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a4xx_gpu.c
@@ -24,7 +24,6 @@ static bool a4xx_idle(struct msm_gpu *gpu);
static void a4xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
{
- struct msm_drm_private *priv = gpu->dev->dev_private;
struct msm_ringbuffer *ring = submit->ring;
unsigned int i;
@@ -35,7 +34,7 @@ static void a4xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
break;
case MSM_SUBMIT_CMD_CTX_RESTORE_BUF:
/* ignore if there has not been a ctx switch: */
- if (priv->lastctx == submit->queue->ctx)
+ if (gpu->cur_ctx_seqno == submit->queue->ctx->seqno)
break;
fallthrough;
case MSM_SUBMIT_CMD_BUF:
diff --git a/drivers/gpu/drm/msm/adreno/a5xx_debugfs.c b/drivers/gpu/drm/msm/adreno/a5xx_debugfs.c
index dd593ec2bc56..6bd397a85834 100644
--- a/drivers/gpu/drm/msm/adreno/a5xx_debugfs.c
+++ b/drivers/gpu/drm/msm/adreno/a5xx_debugfs.c
@@ -107,7 +107,7 @@ reset_set(void *data, u64 val)
* try to reset an active GPU.
*/
- mutex_lock(&dev->struct_mutex);
+ mutex_lock(&gpu->lock);
release_firmware(adreno_gpu->fw[ADRENO_FW_PM4]);
adreno_gpu->fw[ADRENO_FW_PM4] = NULL;
@@ -133,7 +133,7 @@ reset_set(void *data, u64 val)
gpu->funcs->recover(gpu);
pm_runtime_put_sync(&gpu->pdev->dev);
- mutex_unlock(&dev->struct_mutex);
+ mutex_unlock(&gpu->lock);
return 0;
}
diff --git a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
index 5e2750eb3810..3d28fcf841a6 100644
--- a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
@@ -65,7 +65,6 @@ void a5xx_flush(struct msm_gpu *gpu, struct msm_ringbuffer *ring,
static void a5xx_submit_in_rb(struct msm_gpu *gpu, struct msm_gem_submit *submit)
{
- struct msm_drm_private *priv = gpu->dev->dev_private;
struct msm_ringbuffer *ring = submit->ring;
struct msm_gem_object *obj;
uint32_t *ptr, dwords;
@@ -76,7 +75,7 @@ static void a5xx_submit_in_rb(struct msm_gpu *gpu, struct msm_gem_submit *submit
case MSM_SUBMIT_CMD_IB_TARGET_BUF:
break;
case MSM_SUBMIT_CMD_CTX_RESTORE_BUF:
- if (priv->lastctx == submit->queue->ctx)
+ if (gpu->cur_ctx_seqno == submit->queue->ctx->seqno)
break;
fallthrough;
case MSM_SUBMIT_CMD_BUF:
@@ -126,12 +125,11 @@ static void a5xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
- struct msm_drm_private *priv = gpu->dev->dev_private;
struct msm_ringbuffer *ring = submit->ring;
unsigned int i, ibs = 0;
if (IS_ENABLED(CONFIG_DRM_MSM_GPU_SUDO) && submit->in_rb) {
- priv->lastctx = NULL;
+ gpu->cur_ctx_seqno = 0;
a5xx_submit_in_rb(gpu, submit);
return;
}
@@ -166,7 +164,7 @@ static void a5xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
case MSM_SUBMIT_CMD_IB_TARGET_BUF:
break;
case MSM_SUBMIT_CMD_CTX_RESTORE_BUF:
- if (priv->lastctx == submit->queue->ctx)
+ if (gpu->cur_ctx_seqno == submit->queue->ctx->seqno)
break;
fallthrough;
case MSM_SUBMIT_CMD_BUF:
@@ -441,7 +439,7 @@ void a5xx_set_hwcg(struct msm_gpu *gpu, bool state)
const struct adreno_five_hwcg_regs *regs;
unsigned int i, sz;
- if (adreno_is_a508(adreno_gpu)) {
+ if (adreno_is_a506(adreno_gpu) || adreno_is_a508(adreno_gpu)) {
regs = a50x_hwcg;
sz = ARRAY_SIZE(a50x_hwcg);
} else if (adreno_is_a509(adreno_gpu) || adreno_is_a512(adreno_gpu)) {
@@ -485,7 +483,7 @@ static int a5xx_me_init(struct msm_gpu *gpu)
OUT_RING(ring, 0x00000000);
/* Specify workarounds for various microcode issues */
- if (adreno_is_a530(adreno_gpu)) {
+ if (adreno_is_a506(adreno_gpu) || adreno_is_a530(adreno_gpu)) {
/* Workaround for token end syncs
* Force a WFI after every direct-render 3D mode draw and every
* 2D mode 3 draw
@@ -620,8 +618,16 @@ static int a5xx_ucode_init(struct msm_gpu *gpu)
static int a5xx_zap_shader_resume(struct msm_gpu *gpu)
{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
int ret;
+ /*
+ * Adreno 506 have CPZ Retention feature and doesn't require
+ * to resume zap shader
+ */
+ if (adreno_is_a506(adreno_gpu))
+ return 0;
+
ret = qcom_scm_set_remote_state(SCM_GPU_ZAP_SHADER_RESUME, GPU_PAS_ID);
if (ret)
DRM_ERROR("%s: zap-shader resume failed: %d\n",
@@ -733,9 +739,10 @@ static int a5xx_hw_init(struct msm_gpu *gpu)
0x00100000 + adreno_gpu->gmem - 1);
gpu_write(gpu, REG_A5XX_UCHE_GMEM_RANGE_MAX_HI, 0x00000000);
- if (adreno_is_a508(adreno_gpu) || adreno_is_a510(adreno_gpu)) {
+ if (adreno_is_a506(adreno_gpu) || adreno_is_a508(adreno_gpu) ||
+ adreno_is_a510(adreno_gpu)) {
gpu_write(gpu, REG_A5XX_CP_MEQ_THRESHOLDS, 0x20);
- if (adreno_is_a508(adreno_gpu))
+ if (adreno_is_a506(adreno_gpu) || adreno_is_a508(adreno_gpu))
gpu_write(gpu, REG_A5XX_CP_MERCIU_SIZE, 0x400);
else
gpu_write(gpu, REG_A5XX_CP_MERCIU_SIZE, 0x20);
@@ -751,7 +758,7 @@ static int a5xx_hw_init(struct msm_gpu *gpu)
gpu_write(gpu, REG_A5XX_CP_ROQ_THRESHOLDS_1, 0x40201B16);
}
- if (adreno_is_a508(adreno_gpu))
+ if (adreno_is_a506(adreno_gpu) || adreno_is_a508(adreno_gpu))
gpu_write(gpu, REG_A5XX_PC_DBG_ECO_CNTL,
(0x100 << 11 | 0x100 << 22));
else if (adreno_is_a509(adreno_gpu) || adreno_is_a510(adreno_gpu) ||
@@ -769,8 +776,8 @@ static int a5xx_hw_init(struct msm_gpu *gpu)
* Disable the RB sampler datapath DP2 clock gating optimization
* for 1-SP GPUs, as it is enabled by default.
*/
- if (adreno_is_a508(adreno_gpu) || adreno_is_a509(adreno_gpu) ||
- adreno_is_a512(adreno_gpu))
+ if (adreno_is_a506(adreno_gpu) || adreno_is_a508(adreno_gpu) ||
+ adreno_is_a509(adreno_gpu) || adreno_is_a512(adreno_gpu))
gpu_rmw(gpu, REG_A5XX_RB_DBG_ECO_CNTL, 0, (1 << 9));
/* Disable UCHE global filter as SP can invalidate/flush independently */
@@ -851,10 +858,8 @@ static int a5xx_hw_init(struct msm_gpu *gpu)
/* UCHE */
gpu_write(gpu, REG_A5XX_CP_PROTECT(16), ADRENO_PROTECT_RW(0xE80, 16));
- if (adreno_is_a508(adreno_gpu) || adreno_is_a509(adreno_gpu) ||
- adreno_is_a510(adreno_gpu) || adreno_is_a512(adreno_gpu) ||
- adreno_is_a530(adreno_gpu))
- gpu_write(gpu, REG_A5XX_CP_PROTECT(17),
+ /* SMMU */
+ gpu_write(gpu, REG_A5XX_CP_PROTECT(17),
ADRENO_PROTECT_RW(0x10000, 0x8000));
gpu_write(gpu, REG_A5XX_RBBM_SECVID_TSB_CNTL, 0);
@@ -895,8 +900,7 @@ static int a5xx_hw_init(struct msm_gpu *gpu)
if (ret)
return ret;
- if (!(adreno_is_a508(adreno_gpu) || adreno_is_a509(adreno_gpu) ||
- adreno_is_a510(adreno_gpu) || adreno_is_a512(adreno_gpu)))
+ if (adreno_is_a530(adreno_gpu) || adreno_is_a540(adreno_gpu))
a5xx_gpmu_ucode_init(gpu);
ret = a5xx_ucode_init(gpu);
@@ -927,6 +931,8 @@ static int a5xx_hw_init(struct msm_gpu *gpu)
if (IS_ERR(a5xx_gpu->shadow))
return PTR_ERR(a5xx_gpu->shadow);
+
+ msm_gem_object_set_name(a5xx_gpu->shadow_bo, "shadow");
}
gpu_write64(gpu, REG_A5XX_CP_RB_RPTR_ADDR,
@@ -1254,6 +1260,7 @@ static void a5xx_fault_detect_irq(struct msm_gpu *gpu)
static irqreturn_t a5xx_irq(struct msm_gpu *gpu)
{
+ struct msm_drm_private *priv = gpu->dev->dev_private;
u32 status = gpu_read(gpu, REG_A5XX_RBBM_INT_0_STATUS);
/*
@@ -1263,6 +1270,11 @@ static irqreturn_t a5xx_irq(struct msm_gpu *gpu)
gpu_write(gpu, REG_A5XX_RBBM_INT_CLEAR_CMD,
status & ~A5XX_RBBM_INT_0_MASK_RBBM_AHB_ERROR);
+ if (priv->disable_err_irq) {
+ status &= A5XX_RBBM_INT_0_MASK_CP_CACHE_FLUSH_TS |
+ A5XX_RBBM_INT_0_MASK_CP_SW;
+ }
+
/* Pass status to a5xx_rbbm_err_irq because we've already cleared it */
if (status & RBBM_ERROR_MASK)
a5xx_rbbm_err_irq(gpu, status);
@@ -1338,7 +1350,7 @@ static int a5xx_pm_resume(struct msm_gpu *gpu)
if (ret)
return ret;
- /* Adreno 508, 509, 510, 512 needs manual RBBM sus/res control */
+ /* Adreno 506, 508, 509, 510, 512 needs manual RBBM sus/res control */
if (!(adreno_is_a530(adreno_gpu) || adreno_is_a540(adreno_gpu))) {
/* Halt the sp_input_clk at HM level */
gpu_write(gpu, REG_A5XX_RBBM_CLOCK_CNTL, 0x00000055);
@@ -1381,8 +1393,9 @@ static int a5xx_pm_suspend(struct msm_gpu *gpu)
u32 mask = 0xf;
int i, ret;
- /* A508, A510 have 3 XIN ports in VBIF */
- if (adreno_is_a508(adreno_gpu) || adreno_is_a510(adreno_gpu))
+ /* A506, A508, A510 have 3 XIN ports in VBIF */
+ if (adreno_is_a506(adreno_gpu) || adreno_is_a508(adreno_gpu) ||
+ adreno_is_a510(adreno_gpu))
mask = 0x7;
/* Clear the VBIF pipe before shutting down */
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
index 71e52b2b2025..3e325e2a2b1b 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
@@ -1146,7 +1146,7 @@ static void a6xx_gmu_memory_free(struct a6xx_gmu *gmu)
}
static int a6xx_gmu_memory_alloc(struct a6xx_gmu *gmu, struct a6xx_gmu_bo *bo,
- size_t size, u64 iova)
+ size_t size, u64 iova, const char *name)
{
struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu);
struct drm_device *dev = a6xx_gpu->base.base.dev;
@@ -1181,6 +1181,8 @@ static int a6xx_gmu_memory_alloc(struct a6xx_gmu *gmu, struct a6xx_gmu_bo *bo,
bo->virt = msm_gem_get_vaddr(bo->obj);
bo->size = size;
+ msm_gem_object_set_name(bo->obj, name);
+
return 0;
}
@@ -1515,7 +1517,8 @@ int a6xx_gmu_init(struct a6xx_gpu *a6xx_gpu, struct device_node *node)
*/
gmu->dummy.size = SZ_4K;
if (adreno_is_a660_family(adreno_gpu)) {
- ret = a6xx_gmu_memory_alloc(gmu, &gmu->debug, SZ_4K * 7, 0x60400000);
+ ret = a6xx_gmu_memory_alloc(gmu, &gmu->debug, SZ_4K * 7,
+ 0x60400000, "debug");
if (ret)
goto err_memory;
@@ -1523,42 +1526,46 @@ int a6xx_gmu_init(struct a6xx_gpu *a6xx_gpu, struct device_node *node)
}
/* Allocate memory for the GMU dummy page */
- ret = a6xx_gmu_memory_alloc(gmu, &gmu->dummy, gmu->dummy.size, 0x60000000);
+ ret = a6xx_gmu_memory_alloc(gmu, &gmu->dummy, gmu->dummy.size,
+ 0x60000000, "dummy");
if (ret)
goto err_memory;
+ /* Note that a650 family also includes a660 family: */
if (adreno_is_a650_family(adreno_gpu)) {
ret = a6xx_gmu_memory_alloc(gmu, &gmu->icache,
- SZ_16M - SZ_16K, 0x04000);
+ SZ_16M - SZ_16K, 0x04000, "icache");
if (ret)
goto err_memory;
} else if (adreno_is_a640_family(adreno_gpu)) {
ret = a6xx_gmu_memory_alloc(gmu, &gmu->icache,
- SZ_256K - SZ_16K, 0x04000);
+ SZ_256K - SZ_16K, 0x04000, "icache");
if (ret)
goto err_memory;
ret = a6xx_gmu_memory_alloc(gmu, &gmu->dcache,
- SZ_256K - SZ_16K, 0x44000);
+ SZ_256K - SZ_16K, 0x44000, "dcache");
if (ret)
goto err_memory;
} else {
+ BUG_ON(adreno_is_a660_family(adreno_gpu));
+
/* HFI v1, has sptprac */
gmu->legacy = true;
/* Allocate memory for the GMU debug region */
- ret = a6xx_gmu_memory_alloc(gmu, &gmu->debug, SZ_16K, 0);
+ ret = a6xx_gmu_memory_alloc(gmu, &gmu->debug, SZ_16K, 0, "debug");
if (ret)
goto err_memory;
}
/* Allocate memory for for the HFI queues */
- ret = a6xx_gmu_memory_alloc(gmu, &gmu->hfi, SZ_16K, 0);
+ ret = a6xx_gmu_memory_alloc(gmu, &gmu->hfi, SZ_16K, 0, "hfi");
if (ret)
goto err_memory;
/* Allocate memory for the GMU log region */
- ret = a6xx_gmu_memory_alloc(gmu, &gmu->log, SZ_4K, 0);
+ ret = a6xx_gmu_memory_alloc(gmu, &gmu->log, SZ_4K, 0, "log");
if (ret)
goto err_memory;
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
index 267a880811d6..51b83776951b 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
@@ -106,7 +106,7 @@ static void a6xx_set_pagetable(struct a6xx_gpu *a6xx_gpu,
u32 asid;
u64 memptr = rbmemptr(ring, ttbr0);
- if (ctx->seqno == a6xx_gpu->cur_ctx_seqno)
+ if (ctx->seqno == a6xx_gpu->base.base.cur_ctx_seqno)
return;
if (msm_iommu_pagetable_params(ctx->aspace->mmu, &ttbr, &asid))
@@ -138,14 +138,11 @@ static void a6xx_set_pagetable(struct a6xx_gpu *a6xx_gpu,
OUT_PKT7(ring, CP_EVENT_WRITE, 1);
OUT_RING(ring, 0x31);
-
- a6xx_gpu->cur_ctx_seqno = ctx->seqno;
}
static void a6xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
{
unsigned int index = submit->seqno % MSM_GPU_SUBMIT_STATS_COUNT;
- struct msm_drm_private *priv = gpu->dev->dev_private;
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
struct msm_ringbuffer *ring = submit->ring;
@@ -177,7 +174,7 @@ static void a6xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
case MSM_SUBMIT_CMD_IB_TARGET_BUF:
break;
case MSM_SUBMIT_CMD_CTX_RESTORE_BUF:
- if (priv->lastctx == submit->queue->ctx)
+ if (gpu->cur_ctx_seqno == submit->queue->ctx->seqno)
break;
fallthrough;
case MSM_SUBMIT_CMD_BUF:
@@ -1071,6 +1068,8 @@ static int hw_init(struct msm_gpu *gpu)
if (IS_ERR(a6xx_gpu->shadow))
return PTR_ERR(a6xx_gpu->shadow);
+
+ msm_gem_object_set_name(a6xx_gpu->shadow_bo, "shadow");
}
gpu_write64(gpu, REG_A6XX_CP_RB_RPTR_ADDR_LO,
@@ -1081,7 +1080,7 @@ static int hw_init(struct msm_gpu *gpu)
/* Always come up on rb 0 */
a6xx_gpu->cur_ring = gpu->rb[0];
- a6xx_gpu->cur_ctx_seqno = 0;
+ gpu->cur_ctx_seqno = 0;
/* Enable the SQE_to start the CP engine */
gpu_write(gpu, REG_A6XX_CP_SQE_CNTL, 1);
@@ -1376,10 +1375,14 @@ static void a6xx_fault_detect_irq(struct msm_gpu *gpu)
static irqreturn_t a6xx_irq(struct msm_gpu *gpu)
{
+ struct msm_drm_private *priv = gpu->dev->dev_private;
u32 status = gpu_read(gpu, REG_A6XX_RBBM_INT_0_STATUS);
gpu_write(gpu, REG_A6XX_RBBM_INT_CLEAR_CMD, status);
+ if (priv->disable_err_irq)
+ status &= A6XX_RBBM_INT_0_MASK_CP_CACHE_FLUSH_TS;
+
if (status & A6XX_RBBM_INT_0_MASK_RBBM_HANG_DETECT)
a6xx_fault_detect_irq(gpu);
@@ -1424,17 +1427,24 @@ static void a6xx_llc_activate(struct a6xx_gpu *a6xx_gpu)
{
struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
struct msm_gpu *gpu = &adreno_gpu->base;
- u32 gpu_scid, cntl1_regval = 0;
+ u32 cntl1_regval = 0;
if (IS_ERR(a6xx_gpu->llc_mmio))
return;
if (!llcc_slice_activate(a6xx_gpu->llc_slice)) {
- gpu_scid = llcc_get_slice_id(a6xx_gpu->llc_slice);
+ u32 gpu_scid = llcc_get_slice_id(a6xx_gpu->llc_slice);
gpu_scid &= 0x1f;
cntl1_regval = (gpu_scid << 0) | (gpu_scid << 5) | (gpu_scid << 10) |
(gpu_scid << 15) | (gpu_scid << 20);
+
+ /* On A660, the SCID programming for UCHE traffic is done in
+ * A6XX_GBIF_SCACHE_CNTL0[14:10]
+ */
+ if (adreno_is_a660_family(adreno_gpu))
+ gpu_rmw(gpu, REG_A6XX_GBIF_SCACHE_CNTL0, (0x1f << 10) |
+ (1 << 8), (gpu_scid << 10) | (1 << 8));
}
/*
@@ -1471,13 +1481,6 @@ static void a6xx_llc_activate(struct a6xx_gpu *a6xx_gpu)
}
gpu_rmw(gpu, REG_A6XX_GBIF_SCACHE_CNTL1, GENMASK(24, 0), cntl1_regval);
-
- /* On A660, the SCID programming for UCHE traffic is done in
- * A6XX_GBIF_SCACHE_CNTL0[14:10]
- */
- if (adreno_is_a660_family(adreno_gpu))
- gpu_rmw(gpu, REG_A6XX_GBIF_SCACHE_CNTL0, (0x1f << 10) |
- (1 << 8), (gpu_scid << 10) | (1 << 8));
}
static void a6xx_llc_slices_destroy(struct a6xx_gpu *a6xx_gpu)
@@ -1640,7 +1643,7 @@ static unsigned long a6xx_gpu_busy(struct msm_gpu *gpu)
return (unsigned long)busy_time;
}
-void a6xx_gpu_set_freq(struct msm_gpu *gpu, struct dev_pm_opp *opp)
+static void a6xx_gpu_set_freq(struct msm_gpu *gpu, struct dev_pm_opp *opp)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu.h b/drivers/gpu/drm/msm/adreno/a6xx_gpu.h
index 8e5527c881b1..86e0a7c3fe6d 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gpu.h
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.h
@@ -20,16 +20,6 @@ struct a6xx_gpu {
struct msm_ringbuffer *cur_ring;
- /**
- * cur_ctx_seqno:
- *
- * The ctx->seqno value of the context with current pgtables
- * installed. Tracked by seqno rather than pointer value to
- * avoid dangling pointers, and cases where a ctx can be freed
- * and a new one created with the same address.
- */
- int cur_ctx_seqno;
-
struct a6xx_gmu gmu;
struct drm_gem_object *shadow_bo;
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c
index 7501849ed15d..55f443328d8e 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c
@@ -42,7 +42,15 @@ struct a6xx_gpu_state {
struct a6xx_gpu_state_obj *cx_debugbus;
int nr_cx_debugbus;
+ struct msm_gpu_state_bo *gmu_log;
+ struct msm_gpu_state_bo *gmu_hfi;
+ struct msm_gpu_state_bo *gmu_debug;
+
+ s32 hfi_queue_history[2][HFI_HISTORY_SZ];
+
struct list_head objs;
+
+ bool gpu_initialized;
};
static inline int CRASHDUMP_WRITE(u64 *in, u32 reg, u32 val)
@@ -777,12 +785,12 @@ static void a6xx_get_gmu_registers(struct msm_gpu *gpu,
struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
a6xx_state->gmu_registers = state_kcalloc(a6xx_state,
- 2, sizeof(*a6xx_state->gmu_registers));
+ 3, sizeof(*a6xx_state->gmu_registers));
if (!a6xx_state->gmu_registers)
return;
- a6xx_state->nr_gmu_registers = 2;
+ a6xx_state->nr_gmu_registers = 3;
/* Get the CX GMU registers from AHB */
_a6xx_get_gmu_registers(gpu, a6xx_state, &a6xx_gmu_reglist[0],
@@ -800,6 +808,45 @@ static void a6xx_get_gmu_registers(struct msm_gpu *gpu,
&a6xx_state->gmu_registers[2], false);
}
+static struct msm_gpu_state_bo *a6xx_snapshot_gmu_bo(
+ struct a6xx_gpu_state *a6xx_state, struct a6xx_gmu_bo *bo)
+{
+ struct msm_gpu_state_bo *snapshot;
+
+ snapshot = state_kcalloc(a6xx_state, 1, sizeof(*snapshot));
+ if (!snapshot)
+ return NULL;
+
+ snapshot->iova = bo->iova;
+ snapshot->size = bo->size;
+ snapshot->data = kvzalloc(snapshot->size, GFP_KERNEL);
+ if (!snapshot->data)
+ return NULL;
+
+ memcpy(snapshot->data, bo->virt, bo->size);
+
+ return snapshot;
+}
+
+static void a6xx_snapshot_gmu_hfi_history(struct msm_gpu *gpu,
+ struct a6xx_gpu_state *a6xx_state)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
+ struct a6xx_gmu *gmu = &a6xx_gpu->gmu;
+ unsigned i, j;
+
+ BUILD_BUG_ON(ARRAY_SIZE(gmu->queues) != ARRAY_SIZE(a6xx_state->hfi_queue_history));
+
+ for (i = 0; i < ARRAY_SIZE(gmu->queues); i++) {
+ struct a6xx_hfi_queue *queue = &gmu->queues[i];
+ for (j = 0; j < HFI_HISTORY_SZ; j++) {
+ unsigned idx = (j + queue->history_idx) % HFI_HISTORY_SZ;
+ a6xx_state->hfi_queue_history[i][j] = queue->history[idx];
+ }
+ }
+}
+
#define A6XX_GBIF_REGLIST_SIZE 1
static void a6xx_get_registers(struct msm_gpu *gpu,
struct a6xx_gpu_state *a6xx_state,
@@ -937,6 +984,12 @@ struct msm_gpu_state *a6xx_gpu_state_get(struct msm_gpu *gpu)
a6xx_get_gmu_registers(gpu, a6xx_state);
+ a6xx_state->gmu_log = a6xx_snapshot_gmu_bo(a6xx_state, &a6xx_gpu->gmu.log);
+ a6xx_state->gmu_hfi = a6xx_snapshot_gmu_bo(a6xx_state, &a6xx_gpu->gmu.hfi);
+ a6xx_state->gmu_debug = a6xx_snapshot_gmu_bo(a6xx_state, &a6xx_gpu->gmu.debug);
+
+ a6xx_snapshot_gmu_hfi_history(gpu, a6xx_state);
+
/* If GX isn't on the rest of the data isn't going to be accessible */
if (!a6xx_gmu_gx_is_on(&a6xx_gpu->gmu))
return &a6xx_state->base;
@@ -950,7 +1003,8 @@ struct msm_gpu_state *a6xx_gpu_state_get(struct msm_gpu *gpu)
* write out GPU state, so we need to skip this when the SMMU is
* stalled in response to an iova fault
*/
- if (!stalled && !a6xx_crashdumper_init(gpu, &_dumper)) {
+ if (!stalled && !gpu->needs_hw_init &&
+ !a6xx_crashdumper_init(gpu, &_dumper)) {
dumper = &_dumper;
}
@@ -967,6 +1021,8 @@ struct msm_gpu_state *a6xx_gpu_state_get(struct msm_gpu *gpu)
if (snapshot_debugbus)
a6xx_get_debugbus(gpu, a6xx_state);
+ a6xx_state->gpu_initialized = !gpu->needs_hw_init;
+
return &a6xx_state->base;
}
@@ -978,6 +1034,12 @@ static void a6xx_gpu_state_destroy(struct kref *kref)
struct a6xx_gpu_state *a6xx_state = container_of(state,
struct a6xx_gpu_state, base);
+ if (a6xx_state->gmu_log)
+ kvfree(a6xx_state->gmu_log->data);
+
+ if (a6xx_state->gmu_hfi)
+ kvfree(a6xx_state->gmu_hfi->data);
+
list_for_each_entry_safe(obj, tmp, &a6xx_state->objs, node)
kfree(obj);
@@ -1189,8 +1251,48 @@ void a6xx_show(struct msm_gpu *gpu, struct msm_gpu_state *state,
if (IS_ERR_OR_NULL(state))
return;
+ drm_printf(p, "gpu-initialized: %d\n", a6xx_state->gpu_initialized);
+
adreno_show(gpu, state, p);
+ drm_puts(p, "gmu-log:\n");
+ if (a6xx_state->gmu_log) {
+ struct msm_gpu_state_bo *gmu_log = a6xx_state->gmu_log;
+
+ drm_printf(p, " iova: 0x%016llx\n", gmu_log->iova);
+ drm_printf(p, " size: %zu\n", gmu_log->size);
+ adreno_show_object(p, &gmu_log->data, gmu_log->size,
+ &gmu_log->encoded);
+ }
+
+ drm_puts(p, "gmu-hfi:\n");
+ if (a6xx_state->gmu_hfi) {
+ struct msm_gpu_state_bo *gmu_hfi = a6xx_state->gmu_hfi;
+ unsigned i, j;
+
+ drm_printf(p, " iova: 0x%016llx\n", gmu_hfi->iova);
+ drm_printf(p, " size: %zu\n", gmu_hfi->size);
+ for (i = 0; i < ARRAY_SIZE(a6xx_state->hfi_queue_history); i++) {
+ drm_printf(p, " queue-history[%u]:", i);
+ for (j = 0; j < HFI_HISTORY_SZ; j++) {
+ drm_printf(p, " %d", a6xx_state->hfi_queue_history[i][j]);
+ }
+ drm_printf(p, "\n");
+ }
+ adreno_show_object(p, &gmu_hfi->data, gmu_hfi->size,
+ &gmu_hfi->encoded);
+ }
+
+ drm_puts(p, "gmu-debug:\n");
+ if (a6xx_state->gmu_debug) {
+ struct msm_gpu_state_bo *gmu_debug = a6xx_state->gmu_debug;
+
+ drm_printf(p, " iova: 0x%016llx\n", gmu_debug->iova);
+ drm_printf(p, " size: %zu\n", gmu_debug->size);
+ adreno_show_object(p, &gmu_debug->data, gmu_debug->size,
+ &gmu_debug->encoded);
+ }
+
drm_puts(p, "registers:\n");
for (i = 0; i < a6xx_state->nr_registers; i++) {
struct a6xx_gpu_state_obj *obj = &a6xx_state->registers[i];
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_hfi.c b/drivers/gpu/drm/msm/adreno/a6xx_hfi.c
index d4c65bf0a1b7..d73fce5fdf1f 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_hfi.c
+++ b/drivers/gpu/drm/msm/adreno/a6xx_hfi.c
@@ -36,6 +36,8 @@ static int a6xx_hfi_queue_read(struct a6xx_gmu *gmu,
hdr = queue->data[index];
+ queue->history[(queue->history_idx++) % HFI_HISTORY_SZ] = index;
+
/*
* If we are to assume that the GMU firmware is in fact a rational actor
* and is programmed to not send us a larger response than we expect
@@ -75,6 +77,8 @@ static int a6xx_hfi_queue_write(struct a6xx_gmu *gmu,
return -ENOSPC;
}
+ queue->history[(queue->history_idx++) % HFI_HISTORY_SZ] = index;
+
for (i = 0; i < dwords; i++) {
queue->data[index] = data[i];
index = (index + 1) % header->size;
@@ -600,6 +604,9 @@ void a6xx_hfi_stop(struct a6xx_gmu *gmu)
queue->header->read_index = 0;
queue->header->write_index = 0;
+
+ memset(&queue->history, 0xff, sizeof(queue->history));
+ queue->history_idx = 0;
}
}
@@ -612,6 +619,9 @@ static void a6xx_hfi_queue_init(struct a6xx_hfi_queue *queue,
queue->data = virt;
atomic_set(&queue->seqnum, 0);
+ memset(&queue->history, 0xff, sizeof(queue->history));
+ queue->history_idx = 0;
+
/* Set up the shared memory header */
header->iova = iova;
header->type = 10 << 8 | id;
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_hfi.h b/drivers/gpu/drm/msm/adreno/a6xx_hfi.h
index 2bd670ca42d6..528110169398 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_hfi.h
+++ b/drivers/gpu/drm/msm/adreno/a6xx_hfi.h
@@ -33,6 +33,17 @@ struct a6xx_hfi_queue {
spinlock_t lock;
u32 *data;
atomic_t seqnum;
+
+ /*
+ * Tracking for the start index of the last N messages in the
+ * queue, for the benefit of devcore dump / crashdec (since
+ * parsing in the reverse direction to decode the last N
+ * messages is difficult to do and would rely on heuristics
+ * which are not guaranteed to be correct)
+ */
+#define HFI_HISTORY_SZ 8
+ s32 history[HFI_HISTORY_SZ];
+ u8 history_idx;
};
/* This is the outgoing queue to the GMU */
diff --git a/drivers/gpu/drm/msm/adreno/adreno_device.c b/drivers/gpu/drm/msm/adreno/adreno_device.c
index 2a6ce76656aa..93005839b5da 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_device.c
+++ b/drivers/gpu/drm/msm/adreno/adreno_device.c
@@ -132,6 +132,24 @@ static const struct adreno_info gpulist[] = {
.inactive_period = DRM_MSM_INACTIVE_PERIOD,
.init = a4xx_gpu_init,
}, {
+ .rev = ADRENO_REV(5, 0, 6, ANY_ID),
+ .revn = 506,
+ .name = "A506",
+ .fw = {
+ [ADRENO_FW_PM4] = "a530_pm4.fw",
+ [ADRENO_FW_PFP] = "a530_pfp.fw",
+ },
+ .gmem = (SZ_128K + SZ_8K),
+ /*
+ * Increase inactive period to 250 to avoid bouncing
+ * the GDSC which appears to make it grumpy
+ */
+ .inactive_period = 250,
+ .quirks = ADRENO_QUIRK_TWO_PASS_USE_WFI |
+ ADRENO_QUIRK_LMLOADKILL_DISABLE,
+ .init = a5xx_gpu_init,
+ .zapfw = "a506_zap.mdt",
+ }, {
.rev = ADRENO_REV(5, 0, 8, ANY_ID),
.revn = 508,
.name = "A508",
@@ -408,9 +426,9 @@ struct msm_gpu *adreno_load_gpu(struct drm_device *dev)
return NULL;
}
- mutex_lock(&dev->struct_mutex);
+ mutex_lock(&gpu->lock);
ret = msm_gpu_hw_init(gpu);
- mutex_unlock(&dev->struct_mutex);
+ mutex_unlock(&gpu->lock);
pm_runtime_put_autosuspend(&pdev->dev);
if (ret) {
DRM_DEV_ERROR(dev->dev, "gpu hw init failed: %d\n", ret);
@@ -427,13 +445,6 @@ struct msm_gpu *adreno_load_gpu(struct drm_device *dev)
return gpu;
}
-static void set_gpu_pdev(struct drm_device *dev,
- struct platform_device *pdev)
-{
- struct msm_drm_private *priv = dev->dev_private;
- priv->gpu_pdev = pdev;
-}
-
static int find_chipid(struct device *dev, struct adreno_rev *rev)
{
struct device_node *node = dev->of_node;
@@ -482,8 +493,8 @@ static int adreno_bind(struct device *dev, struct device *master, void *data)
{
static struct adreno_platform_config config = {};
const struct adreno_info *info;
- struct drm_device *drm = dev_get_drvdata(master);
- struct msm_drm_private *priv = drm->dev_private;
+ struct msm_drm_private *priv = dev_get_drvdata(master);
+ struct drm_device *drm = priv->dev;
struct msm_gpu *gpu;
int ret;
@@ -492,7 +503,7 @@ static int adreno_bind(struct device *dev, struct device *master, void *data)
return ret;
dev->platform_data = &config;
- set_gpu_pdev(drm, to_platform_device(dev));
+ priv->gpu_pdev = to_platform_device(dev);
info = adreno_info(config.rev);
@@ -521,12 +532,13 @@ static int adreno_bind(struct device *dev, struct device *master, void *data)
static void adreno_unbind(struct device *dev, struct device *master,
void *data)
{
+ struct msm_drm_private *priv = dev_get_drvdata(master);
struct msm_gpu *gpu = dev_to_gpu(dev);
pm_runtime_force_suspend(dev);
gpu->funcs->destroy(gpu);
- set_gpu_pdev(dev_get_drvdata(master), NULL);
+ priv->gpu_pdev = NULL;
}
static const struct component_ops a3xx_ops = {
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
index 748665232d29..f33cfa4ef1c8 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
@@ -504,6 +504,8 @@ int adreno_gpu_state_get(struct msm_gpu *gpu, struct msm_gpu_state *state)
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
int i, count = 0;
+ WARN_ON(!mutex_is_locked(&gpu->lock));
+
kref_init(&state->ref);
ktime_get_real_ts64(&state->time);
@@ -630,7 +632,7 @@ static char *adreno_gpu_ascii85_encode(u32 *src, size_t len)
}
/* len is expected to be in bytes */
-static void adreno_show_object(struct drm_printer *p, void **ptr, int len,
+void adreno_show_object(struct drm_printer *p, void **ptr, int len,
bool *encoded)
{
if (!*ptr || !len)
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.h b/drivers/gpu/drm/msm/adreno/adreno_gpu.h
index 225c277a6223..cffabe7d33c1 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_gpu.h
+++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.h
@@ -201,6 +201,11 @@ static inline int adreno_is_a430(struct adreno_gpu *gpu)
return gpu->revn == 430;
}
+static inline int adreno_is_a506(struct adreno_gpu *gpu)
+{
+ return gpu->revn == 506;
+}
+
static inline int adreno_is_a508(struct adreno_gpu *gpu)
{
return gpu->revn == 508;
@@ -306,6 +311,8 @@ void adreno_gpu_state_destroy(struct msm_gpu_state *state);
int adreno_gpu_state_get(struct msm_gpu *gpu, struct msm_gpu_state *state);
int adreno_gpu_state_put(struct msm_gpu_state *state);
+void adreno_show_object(struct drm_printer *p, void **ptr, int len,
+ bool *encoded);
/*
* Common helper function to initialize the default address space for arm-smmu
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
index 967245b8cc02..e7c9fe1a250f 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
@@ -337,7 +337,8 @@ static void _dpu_crtc_program_lm_output_roi(struct drm_crtc *crtc)
}
static void _dpu_crtc_blend_setup_mixer(struct drm_crtc *crtc,
- struct dpu_crtc *dpu_crtc, struct dpu_crtc_mixer *mixer)
+ struct dpu_crtc *dpu_crtc, struct dpu_crtc_mixer *mixer,
+ struct dpu_hw_stage_cfg *stage_cfg)
{
struct drm_plane *plane;
struct drm_framebuffer *fb;
@@ -346,7 +347,6 @@ static void _dpu_crtc_blend_setup_mixer(struct drm_crtc *crtc,
struct dpu_plane_state *pstate = NULL;
struct dpu_format *format;
struct dpu_hw_ctl *ctl = mixer->lm_ctl;
- struct dpu_hw_stage_cfg *stage_cfg = &dpu_crtc->stage_cfg;
u32 flush_mask;
uint32_t stage_idx, lm_idx;
@@ -422,6 +422,7 @@ static void _dpu_crtc_blend_setup(struct drm_crtc *crtc)
struct dpu_crtc_mixer *mixer = cstate->mixers;
struct dpu_hw_ctl *ctl;
struct dpu_hw_mixer *lm;
+ struct dpu_hw_stage_cfg stage_cfg;
int i;
DRM_DEBUG_ATOMIC("%s\n", dpu_crtc->name);
@@ -435,9 +436,9 @@ static void _dpu_crtc_blend_setup(struct drm_crtc *crtc)
}
/* initialize stage cfg */
- memset(&dpu_crtc->stage_cfg, 0, sizeof(struct dpu_hw_stage_cfg));
+ memset(&stage_cfg, 0, sizeof(struct dpu_hw_stage_cfg));
- _dpu_crtc_blend_setup_mixer(crtc, dpu_crtc, mixer);
+ _dpu_crtc_blend_setup_mixer(crtc, dpu_crtc, mixer, &stage_cfg);
for (i = 0; i < cstate->num_mixers; i++) {
ctl = mixer[i].lm_ctl;
@@ -458,7 +459,7 @@ static void _dpu_crtc_blend_setup(struct drm_crtc *crtc)
mixer[i].flush_mask);
ctl->ops.setup_blendstage(ctl, mixer[i].hw_lm->idx,
- &dpu_crtc->stage_cfg);
+ &stage_cfg);
}
}
@@ -923,6 +924,20 @@ static struct drm_crtc_state *dpu_crtc_duplicate_state(struct drm_crtc *crtc)
return &cstate->base;
}
+static void dpu_crtc_atomic_print_state(struct drm_printer *p,
+ const struct drm_crtc_state *state)
+{
+ const struct dpu_crtc_state *cstate = to_dpu_crtc_state(state);
+ int i;
+
+ for (i = 0; i < cstate->num_mixers; i++) {
+ drm_printf(p, "\tlm[%d]=%d\n", i, cstate->mixers[i].hw_lm->idx - LM_0);
+ drm_printf(p, "\tctl[%d]=%d\n", i, cstate->mixers[i].lm_ctl->idx - CTL_0);
+ if (cstate->mixers[i].hw_dspp)
+ drm_printf(p, "\tdspp[%d]=%d\n", i, cstate->mixers[i].hw_dspp->idx - DSPP_0);
+ }
+}
+
static void dpu_crtc_disable(struct drm_crtc *crtc,
struct drm_atomic_state *state)
{
@@ -1423,15 +1438,16 @@ DEFINE_SHOW_ATTRIBUTE(dpu_crtc_debugfs_state);
static int _dpu_crtc_init_debugfs(struct drm_crtc *crtc)
{
struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
+ struct dentry *debugfs_root;
- dpu_crtc->debugfs_root = debugfs_create_dir(dpu_crtc->name,
+ debugfs_root = debugfs_create_dir(dpu_crtc->name,
crtc->dev->primary->debugfs_root);
debugfs_create_file("status", 0400,
- dpu_crtc->debugfs_root,
+ debugfs_root,
dpu_crtc, &_dpu_debugfs_status_fops);
debugfs_create_file("state", 0600,
- dpu_crtc->debugfs_root,
+ debugfs_root,
&dpu_crtc->base,
&dpu_crtc_debugfs_state_fops);
@@ -1449,13 +1465,6 @@ static int dpu_crtc_late_register(struct drm_crtc *crtc)
return _dpu_crtc_init_debugfs(crtc);
}
-static void dpu_crtc_early_unregister(struct drm_crtc *crtc)
-{
- struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
-
- debugfs_remove_recursive(dpu_crtc->debugfs_root);
-}
-
static const struct drm_crtc_funcs dpu_crtc_funcs = {
.set_config = drm_atomic_helper_set_config,
.destroy = dpu_crtc_destroy,
@@ -1463,8 +1472,8 @@ static const struct drm_crtc_funcs dpu_crtc_funcs = {
.reset = dpu_crtc_reset,
.atomic_duplicate_state = dpu_crtc_duplicate_state,
.atomic_destroy_state = dpu_crtc_destroy_state,
+ .atomic_print_state = dpu_crtc_atomic_print_state,
.late_register = dpu_crtc_late_register,
- .early_unregister = dpu_crtc_early_unregister,
.verify_crc_source = dpu_crtc_verify_crc_source,
.set_crc_source = dpu_crtc_set_crc_source,
.enable_vblank = msm_crtc_enable_vblank,
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.h
index ae9546ca1359..b8785c394fcc 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.h
@@ -129,8 +129,6 @@ struct dpu_crtc_frame_event {
* @drm_requested_vblank : Whether vblanks have been enabled in the encoder
* @property_info : Opaque structure for generic property support
* @property_defaults : Array of default values for generic property support
- * @stage_cfg : H/w mixer stage configuration
- * @debugfs_root : Parent of debugfs node
* @vblank_cb_count : count of vblank callback since last reset
* @play_count : frame count between crtc enable and disable
* @vblank_cb_time : ktime at vblank count reset
@@ -161,9 +159,6 @@ struct dpu_crtc {
struct drm_pending_vblank_event *event;
u32 vsync_count;
- struct dpu_hw_stage_cfg stage_cfg;
- struct dentry *debugfs_root;
-
u32 vblank_cb_count;
u64 play_count;
ktime_t vblank_cb_time;
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
index e7ee4cfb8461..1e648db439f9 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
@@ -995,9 +995,6 @@ static void dpu_encoder_virt_mode_set(struct drm_encoder *drm_enc,
trace_dpu_enc_mode_set(DRMID(drm_enc));
- if (drm_enc->encoder_type == DRM_MODE_ENCODER_TMDS)
- msm_dp_display_mode_set(dpu_enc->dp, drm_enc, mode, adj_mode);
-
list_for_each_entry(conn_iter, connector_list, head)
if (conn_iter->encoder == drm_enc)
conn = conn_iter;
@@ -1148,10 +1145,6 @@ static void dpu_encoder_virt_enable(struct drm_encoder *drm_enc)
struct msm_drm_private *priv;
struct drm_display_mode *cur_mode = NULL;
- if (!drm_enc) {
- DPU_ERROR("invalid encoder\n");
- return;
- }
dpu_enc = to_dpu_encoder_virt(drm_enc);
mutex_lock(&dpu_enc->enc_lock);
@@ -1177,14 +1170,6 @@ static void dpu_encoder_virt_enable(struct drm_encoder *drm_enc)
_dpu_encoder_virt_enable_helper(drm_enc);
- if (drm_enc->encoder_type == DRM_MODE_ENCODER_TMDS) {
- ret = msm_dp_display_enable(dpu_enc->dp, drm_enc);
- if (ret) {
- DPU_ERROR_ENC(dpu_enc, "dp display enable failed: %d\n",
- ret);
- goto out;
- }
- }
dpu_enc->enabled = true;
out:
@@ -1197,14 +1182,6 @@ static void dpu_encoder_virt_disable(struct drm_encoder *drm_enc)
struct msm_drm_private *priv;
int i = 0;
- if (!drm_enc) {
- DPU_ERROR("invalid encoder\n");
- return;
- } else if (!drm_enc->dev) {
- DPU_ERROR("invalid dev\n");
- return;
- }
-
dpu_enc = to_dpu_encoder_virt(drm_enc);
DPU_DEBUG_ENC(dpu_enc, "\n");
@@ -1218,11 +1195,6 @@ static void dpu_encoder_virt_disable(struct drm_encoder *drm_enc)
/* wait for idle */
dpu_encoder_wait_for_event(drm_enc, MSM_ENC_TX_COMPLETE);
- if (drm_enc->encoder_type == DRM_MODE_ENCODER_TMDS) {
- if (msm_dp_display_pre_disable(dpu_enc->dp, drm_enc))
- DPU_ERROR_ENC(dpu_enc, "dp display push idle failed\n");
- }
-
dpu_encoder_resource_control(drm_enc, DPU_ENC_RC_EVENT_PRE_STOP);
for (i = 0; i < dpu_enc->num_phys_encs; i++) {
@@ -1247,11 +1219,6 @@ static void dpu_encoder_virt_disable(struct drm_encoder *drm_enc)
DPU_DEBUG_ENC(dpu_enc, "encoder disabled\n");
- if (drm_enc->encoder_type == DRM_MODE_ENCODER_TMDS) {
- if (msm_dp_display_disable(dpu_enc->dp, drm_enc))
- DPU_ERROR_ENC(dpu_enc, "dp display disable failed\n");
- }
-
mutex_unlock(&dpu_enc->enc_lock);
}
@@ -2128,11 +2095,8 @@ static void dpu_encoder_frame_done_timeout(struct timer_list *t)
static const struct drm_encoder_helper_funcs dpu_encoder_helper_funcs = {
.mode_set = dpu_encoder_virt_mode_set,
.disable = dpu_encoder_virt_disable,
- .enable = dpu_kms_encoder_enable,
+ .enable = dpu_encoder_virt_enable,
.atomic_check = dpu_encoder_virt_atomic_check,
-
- /* This is called by dpu_kms_encoder_enable */
- .commit = dpu_encoder_virt_enable,
};
static const struct drm_encoder_funcs dpu_encoder_funcs = {
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c
index 185379b18572..ddd9d89cd456 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c
@@ -698,17 +698,17 @@ struct dpu_encoder_phys *dpu_encoder_phys_vid_init(
{
struct dpu_encoder_phys *phys_enc = NULL;
struct dpu_encoder_irq *irq;
- int i, ret = 0;
+ int i;
if (!p) {
- ret = -EINVAL;
- goto fail;
+ DPU_ERROR("failed to create encoder due to invalid parameter\n");
+ return ERR_PTR(-EINVAL);
}
phys_enc = kzalloc(sizeof(*phys_enc), GFP_KERNEL);
if (!phys_enc) {
- ret = -ENOMEM;
- goto fail;
+ DPU_ERROR("failed to create encoder due to memory allocation error\n");
+ return ERR_PTR(-ENOMEM);
}
phys_enc->hw_mdptop = p->dpu_kms->hw_mdp;
@@ -748,11 +748,4 @@ struct dpu_encoder_phys *dpu_encoder_phys_vid_init(
DPU_DEBUG_VIDENC(phys_enc, "created intf idx:%d\n", p->intf_idx);
return phys_enc;
-
-fail:
- DPU_ERROR("failed to create encoder\n");
- if (phys_enc)
- dpu_encoder_phys_vid_destroy(phys_enc);
-
- return ERR_PTR(ret);
}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c
index ce6f32a919e5..aa75991903a6 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c
@@ -45,7 +45,7 @@
(PINGPONG_SDM845_MASK | BIT(DPU_PINGPONG_TE2))
#define CTL_SC7280_MASK \
- (BIT(DPU_CTL_ACTIVE_CFG) | BIT(DPU_CTL_FETCH_ACTIVE))
+ (BIT(DPU_CTL_ACTIVE_CFG) | BIT(DPU_CTL_FETCH_ACTIVE) | BIT(DPU_CTL_VM_CFG))
#define MERGE_3D_SM8150_MASK (0)
@@ -856,9 +856,9 @@ static const struct dpu_intf_cfg sm8150_intf[] = {
};
static const struct dpu_intf_cfg sc7280_intf[] = {
- INTF_BLK("intf_0", INTF_0, 0x34000, INTF_DP, 0, 24, INTF_SC7280_MASK, MDP_SSPP_TOP0_INTR, 24, 25),
+ INTF_BLK("intf_0", INTF_0, 0x34000, INTF_DP, MSM_DP_CONTROLLER_0, 24, INTF_SC7280_MASK, MDP_SSPP_TOP0_INTR, 24, 25),
INTF_BLK("intf_1", INTF_1, 0x35000, INTF_DSI, 0, 24, INTF_SC7280_MASK, MDP_SSPP_TOP0_INTR, 26, 27),
- INTF_BLK("intf_5", INTF_5, 0x39000, INTF_EDP, 0, 24, INTF_SC7280_MASK, MDP_SSPP_TOP0_INTR, 22, 23),
+ INTF_BLK("intf_5", INTF_5, 0x39000, INTF_DP, MSM_DP_CONTROLLER_1, 24, INTF_SC7280_MASK, MDP_SSPP_TOP0_INTR, 22, 23),
};
/*************************************************************
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h
index 4ade44bbd37e..31af04afda7d 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h
@@ -179,13 +179,16 @@ enum {
/**
* CTL sub-blocks
- * @DPU_CTL_SPLIT_DISPLAY CTL supports video mode split display
+ * @DPU_CTL_SPLIT_DISPLAY: CTL supports video mode split display
+ * @DPU_CTL_FETCH_ACTIVE: Active CTL for fetch HW (SSPPs)
+ * @DPU_CTL_VM_CFG: CTL config to support multiple VMs
* @DPU_CTL_MAX
*/
enum {
DPU_CTL_SPLIT_DISPLAY = 0x1,
DPU_CTL_ACTIVE_CFG,
DPU_CTL_FETCH_ACTIVE,
+ DPU_CTL_VM_CFG,
DPU_CTL_MAX
};
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c
index 64740ddb983e..02da9ecf71f1 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c
@@ -36,6 +36,7 @@
#define MERGE_3D_IDX 23
#define INTF_IDX 31
#define CTL_INVALID_BIT 0xffff
+#define CTL_DEFAULT_GROUP_ID 0xf
static const u32 fetch_tbl[SSPP_MAX] = {CTL_INVALID_BIT, 16, 17, 18, 19,
CTL_INVALID_BIT, CTL_INVALID_BIT, CTL_INVALID_BIT, CTL_INVALID_BIT, 0,
@@ -498,6 +499,13 @@ static void dpu_hw_ctl_intf_cfg_v1(struct dpu_hw_ctl *ctx,
u32 intf_active = 0;
u32 mode_sel = 0;
+ /* CTL_TOP[31:28] carries group_id to collate CTL paths
+ * per VM. Explicitly disable it until VM support is
+ * added in SW. Power on reset value is not disable.
+ */
+ if ((test_bit(DPU_CTL_VM_CFG, &ctx->caps->features)))
+ mode_sel = CTL_DEFAULT_GROUP_ID << 28;
+
if (cfg->intf_mode_sel == DPU_CTL_MODE_SEL_CMD)
mode_sel |= BIT(17);
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.c
index d2b6dca487e3..a77a5eaa78ad 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.c
@@ -30,6 +30,9 @@
#define MDP_AD4_INTR_STATUS_OFF 0x420
#define MDP_INTF_0_OFF_REV_7xxx 0x34000
#define MDP_INTF_1_OFF_REV_7xxx 0x35000
+#define MDP_INTF_2_OFF_REV_7xxx 0x36000
+#define MDP_INTF_3_OFF_REV_7xxx 0x37000
+#define MDP_INTF_4_OFF_REV_7xxx 0x38000
#define MDP_INTF_5_OFF_REV_7xxx 0x39000
/**
@@ -111,6 +114,21 @@ static const struct dpu_intr_reg dpu_intr_set[] = {
MDP_INTF_1_OFF_REV_7xxx+INTF_INTR_STATUS
},
{
+ MDP_INTF_2_OFF_REV_7xxx+INTF_INTR_CLEAR,
+ MDP_INTF_2_OFF_REV_7xxx+INTF_INTR_EN,
+ MDP_INTF_2_OFF_REV_7xxx+INTF_INTR_STATUS
+ },
+ {
+ MDP_INTF_3_OFF_REV_7xxx+INTF_INTR_CLEAR,
+ MDP_INTF_3_OFF_REV_7xxx+INTF_INTR_EN,
+ MDP_INTF_3_OFF_REV_7xxx+INTF_INTR_STATUS
+ },
+ {
+ MDP_INTF_4_OFF_REV_7xxx+INTF_INTR_CLEAR,
+ MDP_INTF_4_OFF_REV_7xxx+INTF_INTR_EN,
+ MDP_INTF_4_OFF_REV_7xxx+INTF_INTR_STATUS
+ },
+ {
MDP_INTF_5_OFF_REV_7xxx+INTF_INTR_CLEAR,
MDP_INTF_5_OFF_REV_7xxx+INTF_INTR_EN,
MDP_INTF_5_OFF_REV_7xxx+INTF_INTR_STATUS
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.h
index d50e78c9f148..1ab75cccd145 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.h
@@ -26,6 +26,9 @@ enum dpu_hw_intr_reg {
MDP_AD4_1_INTR,
MDP_INTF0_7xxx_INTR,
MDP_INTF1_7xxx_INTR,
+ MDP_INTF2_7xxx_INTR,
+ MDP_INTF3_7xxx_INTR,
+ MDP_INTF4_7xxx_INTR,
MDP_INTF5_7xxx_INTR,
MDP_INTR_MAX,
};
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.c
index f9460672176a..09cdc3576653 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.c
@@ -8,6 +8,8 @@
#include "dpu_hw_sspp.h"
#include "dpu_kms.h"
+#include <drm/drm_file.h>
+
#define DPU_FETCH_CONFIG_RESET_VALUE 0x00000087
/* DPU_SSPP_SRC */
@@ -75,6 +77,7 @@
#define SSPP_TRAFFIC_SHAPER 0x130
#define SSPP_CDP_CNTL 0x134
#define SSPP_UBWC_ERROR_STATUS 0x138
+#define SSPP_CDP_CNTL_REC1 0x13c
#define SSPP_TRAFFIC_SHAPER_PREFILL 0x150
#define SSPP_TRAFFIC_SHAPER_REC1_PREFILL 0x154
#define SSPP_TRAFFIC_SHAPER_REC1 0x158
@@ -413,13 +416,11 @@ static void dpu_hw_sspp_setup_pe_config(struct dpu_hw_pipe *ctx,
static void _dpu_hw_sspp_setup_scaler3(struct dpu_hw_pipe *ctx,
struct dpu_hw_pipe_cfg *sspp,
- struct dpu_hw_pixel_ext *pe,
void *scaler_cfg)
{
u32 idx;
struct dpu_hw_scaler3_cfg *scaler3_cfg = scaler_cfg;
- (void)pe;
if (_sspp_subblk_offset(ctx, DPU_SSPP_SCALER_QSEED3, &idx) || !sspp
|| !scaler3_cfg)
return;
@@ -539,7 +540,7 @@ static void dpu_hw_sspp_setup_sourceaddress(struct dpu_hw_pipe *ctx,
}
static void dpu_hw_sspp_setup_csc(struct dpu_hw_pipe *ctx,
- struct dpu_csc_cfg *data)
+ const struct dpu_csc_cfg *data)
{
u32 idx;
bool csc10 = false;
@@ -571,19 +572,20 @@ static void dpu_hw_sspp_setup_solidfill(struct dpu_hw_pipe *ctx, u32 color, enum
}
static void dpu_hw_sspp_setup_danger_safe_lut(struct dpu_hw_pipe *ctx,
- struct dpu_hw_pipe_qos_cfg *cfg)
+ u32 danger_lut,
+ u32 safe_lut)
{
u32 idx;
if (_sspp_subblk_offset(ctx, DPU_SSPP_SRC, &idx))
return;
- DPU_REG_WRITE(&ctx->hw, SSPP_DANGER_LUT + idx, cfg->danger_lut);
- DPU_REG_WRITE(&ctx->hw, SSPP_SAFE_LUT + idx, cfg->safe_lut);
+ DPU_REG_WRITE(&ctx->hw, SSPP_DANGER_LUT + idx, danger_lut);
+ DPU_REG_WRITE(&ctx->hw, SSPP_SAFE_LUT + idx, safe_lut);
}
static void dpu_hw_sspp_setup_creq_lut(struct dpu_hw_pipe *ctx,
- struct dpu_hw_pipe_qos_cfg *cfg)
+ u64 creq_lut)
{
u32 idx;
@@ -591,11 +593,11 @@ static void dpu_hw_sspp_setup_creq_lut(struct dpu_hw_pipe *ctx,
return;
if (ctx->cap && test_bit(DPU_SSPP_QOS_8LVL, &ctx->cap->features)) {
- DPU_REG_WRITE(&ctx->hw, SSPP_CREQ_LUT_0 + idx, cfg->creq_lut);
+ DPU_REG_WRITE(&ctx->hw, SSPP_CREQ_LUT_0 + idx, creq_lut);
DPU_REG_WRITE(&ctx->hw, SSPP_CREQ_LUT_1 + idx,
- cfg->creq_lut >> 32);
+ creq_lut >> 32);
} else {
- DPU_REG_WRITE(&ctx->hw, SSPP_CREQ_LUT + idx, cfg->creq_lut);
+ DPU_REG_WRITE(&ctx->hw, SSPP_CREQ_LUT + idx, creq_lut);
}
}
@@ -625,10 +627,12 @@ static void dpu_hw_sspp_setup_qos_ctrl(struct dpu_hw_pipe *ctx,
}
static void dpu_hw_sspp_setup_cdp(struct dpu_hw_pipe *ctx,
- struct dpu_hw_pipe_cdp_cfg *cfg)
+ struct dpu_hw_pipe_cdp_cfg *cfg,
+ enum dpu_sspp_multirect_index index)
{
u32 idx;
u32 cdp_cntl = 0;
+ u32 cdp_cntl_offset = 0;
if (!ctx || !cfg)
return;
@@ -636,6 +640,11 @@ static void dpu_hw_sspp_setup_cdp(struct dpu_hw_pipe *ctx,
if (_sspp_subblk_offset(ctx, DPU_SSPP_SRC, &idx))
return;
+ if (index == DPU_SSPP_RECT_SOLO || index == DPU_SSPP_RECT_0)
+ cdp_cntl_offset = SSPP_CDP_CNTL;
+ else
+ cdp_cntl_offset = SSPP_CDP_CNTL_REC1;
+
if (cfg->enable)
cdp_cntl |= BIT(0);
if (cfg->ubwc_meta_enable)
@@ -645,7 +654,7 @@ static void dpu_hw_sspp_setup_cdp(struct dpu_hw_pipe *ctx,
if (cfg->preload_ahead == DPU_SSPP_CDP_PRELOAD_AHEAD_64)
cdp_cntl |= BIT(3);
- DPU_REG_WRITE(&ctx->hw, SSPP_CDP_CNTL, cdp_cntl);
+ DPU_REG_WRITE(&ctx->hw, cdp_cntl_offset, cdp_cntl);
}
static void _setup_layer_ops(struct dpu_hw_pipe *c,
@@ -685,6 +694,71 @@ static void _setup_layer_ops(struct dpu_hw_pipe *c,
c->ops.setup_cdp = dpu_hw_sspp_setup_cdp;
}
+#ifdef CONFIG_DEBUG_FS
+int _dpu_hw_sspp_init_debugfs(struct dpu_hw_pipe *hw_pipe, struct dpu_kms *kms, struct dentry *entry)
+{
+ const struct dpu_sspp_cfg *cfg = hw_pipe->cap;
+ const struct dpu_sspp_sub_blks *sblk = cfg->sblk;
+ struct dentry *debugfs_root;
+ char sspp_name[32];
+
+ snprintf(sspp_name, sizeof(sspp_name), "%d", hw_pipe->idx);
+
+ /* create overall sub-directory for the pipe */
+ debugfs_root =
+ debugfs_create_dir(sspp_name, entry);
+
+ /* don't error check these */
+ debugfs_create_xul("features", 0600,
+ debugfs_root, (unsigned long *)&hw_pipe->cap->features);
+
+ /* add register dump support */
+ dpu_debugfs_create_regset32("src_blk", 0400,
+ debugfs_root,
+ sblk->src_blk.base + cfg->base,
+ sblk->src_blk.len,
+ kms);
+
+ if (cfg->features & BIT(DPU_SSPP_SCALER_QSEED3) ||
+ cfg->features & BIT(DPU_SSPP_SCALER_QSEED3LITE) ||
+ cfg->features & BIT(DPU_SSPP_SCALER_QSEED2) ||
+ cfg->features & BIT(DPU_SSPP_SCALER_QSEED4))
+ dpu_debugfs_create_regset32("scaler_blk", 0400,
+ debugfs_root,
+ sblk->scaler_blk.base + cfg->base,
+ sblk->scaler_blk.len,
+ kms);
+
+ if (cfg->features & BIT(DPU_SSPP_CSC) ||
+ cfg->features & BIT(DPU_SSPP_CSC_10BIT))
+ dpu_debugfs_create_regset32("csc_blk", 0400,
+ debugfs_root,
+ sblk->csc_blk.base + cfg->base,
+ sblk->csc_blk.len,
+ kms);
+
+ debugfs_create_u32("xin_id",
+ 0400,
+ debugfs_root,
+ (u32 *) &cfg->xin_id);
+ debugfs_create_u32("clk_ctrl",
+ 0400,
+ debugfs_root,
+ (u32 *) &cfg->clk_ctrl);
+ debugfs_create_x32("creq_vblank",
+ 0600,
+ debugfs_root,
+ (u32 *) &sblk->creq_vblank);
+ debugfs_create_x32("danger_vblank",
+ 0600,
+ debugfs_root,
+ (u32 *) &sblk->danger_vblank);
+
+ return 0;
+}
+#endif
+
+
static const struct dpu_sspp_cfg *_sspp_offset(enum dpu_sspp sspp,
void __iomem *addr,
struct dpu_mdss_cfg *catalog,
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.h
index fdfd4b46e2c6..92b071b78fdb 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.h
@@ -25,11 +25,17 @@ struct dpu_hw_pipe;
/**
* Define all scaler feature bits in catalog
*/
-#define DPU_SSPP_SCALER ((1UL << DPU_SSPP_SCALER_RGB) | \
- (1UL << DPU_SSPP_SCALER_QSEED2) | \
- (1UL << DPU_SSPP_SCALER_QSEED3) | \
- (1UL << DPU_SSPP_SCALER_QSEED3LITE) | \
- (1UL << DPU_SSPP_SCALER_QSEED4))
+#define DPU_SSPP_SCALER (BIT(DPU_SSPP_SCALER_RGB) | \
+ BIT(DPU_SSPP_SCALER_QSEED2) | \
+ BIT(DPU_SSPP_SCALER_QSEED3) | \
+ BIT(DPU_SSPP_SCALER_QSEED3LITE) | \
+ BIT(DPU_SSPP_SCALER_QSEED4))
+
+/*
+ * Define all CSC feature bits in catalog
+ */
+#define DPU_SSPP_CSC_ANY (BIT(DPU_SSPP_CSC) | \
+ BIT(DPU_SSPP_CSC_10BIT))
/**
* Component indices
@@ -166,18 +172,12 @@ struct dpu_hw_pipe_cfg {
/**
* struct dpu_hw_pipe_qos_cfg : Source pipe QoS configuration
- * @danger_lut: LUT for generate danger level based on fill level
- * @safe_lut: LUT for generate safe level based on fill level
- * @creq_lut: LUT for generate creq level based on fill level
* @creq_vblank: creq value generated to vbif during vertical blanking
* @danger_vblank: danger value generated during vertical blanking
* @vblank_en: enable creq_vblank and danger_vblank during vblank
* @danger_safe_en: enable danger safe generation
*/
struct dpu_hw_pipe_qos_cfg {
- u32 danger_lut;
- u32 safe_lut;
- u64 creq_lut;
u32 creq_vblank;
u32 danger_vblank;
bool vblank_en;
@@ -268,7 +268,7 @@ struct dpu_hw_sspp_ops {
* @ctx: Pointer to pipe context
* @data: Pointer to config structure
*/
- void (*setup_csc)(struct dpu_hw_pipe *ctx, struct dpu_csc_cfg *data);
+ void (*setup_csc)(struct dpu_hw_pipe *ctx, const struct dpu_csc_cfg *data);
/**
* setup_solidfill - enable/disable colorfill
@@ -302,20 +302,22 @@ struct dpu_hw_sspp_ops {
/**
* setup_danger_safe_lut - setup danger safe LUTs
* @ctx: Pointer to pipe context
- * @cfg: Pointer to pipe QoS configuration
+ * @danger_lut: LUT for generate danger level based on fill level
+ * @safe_lut: LUT for generate safe level based on fill level
*
*/
void (*setup_danger_safe_lut)(struct dpu_hw_pipe *ctx,
- struct dpu_hw_pipe_qos_cfg *cfg);
+ u32 danger_lut,
+ u32 safe_lut);
/**
* setup_creq_lut - setup CREQ LUT
* @ctx: Pointer to pipe context
- * @cfg: Pointer to pipe QoS configuration
+ * @creq_lut: LUT for generate creq level based on fill level
*
*/
void (*setup_creq_lut)(struct dpu_hw_pipe *ctx,
- struct dpu_hw_pipe_qos_cfg *cfg);
+ u64 creq_lut);
/**
* setup_qos_ctrl - setup QoS control
@@ -338,12 +340,10 @@ struct dpu_hw_sspp_ops {
* setup_scaler - setup scaler
* @ctx: Pointer to pipe context
* @pipe_cfg: Pointer to pipe configuration
- * @pe_cfg: Pointer to pixel extension configuration
* @scaler_cfg: Pointer to scaler configuration
*/
void (*setup_scaler)(struct dpu_hw_pipe *ctx,
struct dpu_hw_pipe_cfg *pipe_cfg,
- struct dpu_hw_pixel_ext *pe_cfg,
void *scaler_cfg);
/**
@@ -356,9 +356,11 @@ struct dpu_hw_sspp_ops {
* setup_cdp - setup client driven prefetch
* @ctx: Pointer to pipe context
* @cfg: Pointer to cdp configuration
+ * @index: rectangle index in multirect
*/
void (*setup_cdp)(struct dpu_hw_pipe *ctx,
- struct dpu_hw_pipe_cdp_cfg *cfg);
+ struct dpu_hw_pipe_cdp_cfg *cfg,
+ enum dpu_sspp_multirect_index index);
};
/**
@@ -385,6 +387,7 @@ struct dpu_hw_pipe {
struct dpu_hw_sspp_ops ops;
};
+struct dpu_kms;
/**
* dpu_hw_sspp_init - initializes the sspp hw driver object.
* Should be called once before accessing every pipe.
@@ -404,5 +407,8 @@ struct dpu_hw_pipe *dpu_hw_sspp_init(enum dpu_sspp idx,
*/
void dpu_hw_sspp_destroy(struct dpu_hw_pipe *ctx);
+void dpu_debugfs_sspp_init(struct dpu_kms *dpu_kms, struct dentry *debugfs_root);
+int _dpu_hw_sspp_init_debugfs(struct dpu_hw_pipe *hw_pipe, struct dpu_kms *kms, struct dentry *entry);
+
#endif /*_DPU_HW_SSPP_H */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.c
index f94584c982cd..aad85116b0a0 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.c
@@ -374,7 +374,7 @@ u32 dpu_hw_get_scaler3_ver(struct dpu_hw_blk_reg_map *c,
void dpu_hw_csc_setup(struct dpu_hw_blk_reg_map *c,
u32 csc_reg_off,
- struct dpu_csc_cfg *data, bool csc10)
+ const struct dpu_csc_cfg *data, bool csc10)
{
static const u32 matrix_shift = 7;
u32 clamp_shift = csc10 ? 16 : 8;
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.h
index 6d4911957e33..39134754579e 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.h
@@ -322,6 +322,6 @@ u32 dpu_hw_get_scaler3_ver(struct dpu_hw_blk_reg_map *c,
void dpu_hw_csc_setup(struct dpu_hw_blk_reg_map *c,
u32 csc_reg_off,
- struct dpu_csc_cfg *data, bool csc10);
+ const struct dpu_csc_cfg *data, bool csc10);
#endif /* _DPU_HW_UTIL_H */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c
index a15b26428280..47fe11a84a77 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c
@@ -21,14 +21,14 @@
#include "msm_gem.h"
#include "disp/msm_disp_snapshot.h"
-#include "dpu_kms.h"
#include "dpu_core_irq.h"
+#include "dpu_crtc.h"
+#include "dpu_encoder.h"
#include "dpu_formats.h"
#include "dpu_hw_vbif.h"
-#include "dpu_vbif.h"
-#include "dpu_encoder.h"
+#include "dpu_kms.h"
#include "dpu_plane.h"
-#include "dpu_crtc.h"
+#include "dpu_vbif.h"
#define CREATE_TRACE_POINTS
#include "dpu_trace.h"
@@ -73,8 +73,8 @@ static int _dpu_danger_signal_status(struct seq_file *s,
&status);
} else {
seq_puts(s, "\nSafe signal status:\n");
- if (kms->hw_mdp->ops.get_danger_status)
- kms->hw_mdp->ops.get_danger_status(kms->hw_mdp,
+ if (kms->hw_mdp->ops.get_safe_status)
+ kms->hw_mdp->ops.get_safe_status(kms->hw_mdp,
&status);
}
pm_runtime_put_sync(&kms->pdev->dev);
@@ -82,7 +82,7 @@ static int _dpu_danger_signal_status(struct seq_file *s,
seq_printf(s, "MDP : 0x%x\n", status.mdp);
for (i = SSPP_VIG0; i < SSPP_MAX; i++)
- seq_printf(s, "SSPP%d : 0x%x \t", i - SSPP_VIG0,
+ seq_printf(s, "SSPP%d : 0x%x \n", i - SSPP_VIG0,
status.sspp[i]);
seq_puts(s, "\n");
@@ -101,6 +101,73 @@ static int dpu_debugfs_safe_stats_show(struct seq_file *s, void *v)
}
DEFINE_SHOW_ATTRIBUTE(dpu_debugfs_safe_stats);
+static ssize_t _dpu_plane_danger_read(struct file *file,
+ char __user *buff, size_t count, loff_t *ppos)
+{
+ struct dpu_kms *kms = file->private_data;
+ int len;
+ char buf[40];
+
+ len = scnprintf(buf, sizeof(buf), "%d\n", !kms->has_danger_ctrl);
+
+ return simple_read_from_buffer(buff, count, ppos, buf, len);
+}
+
+static void _dpu_plane_set_danger_state(struct dpu_kms *kms, bool enable)
+{
+ struct drm_plane *plane;
+
+ drm_for_each_plane(plane, kms->dev) {
+ if (plane->fb && plane->state) {
+ dpu_plane_danger_signal_ctrl(plane, enable);
+ DPU_DEBUG("plane:%d img:%dx%d ",
+ plane->base.id, plane->fb->width,
+ plane->fb->height);
+ DPU_DEBUG("src[%d,%d,%d,%d] dst[%d,%d,%d,%d]\n",
+ plane->state->src_x >> 16,
+ plane->state->src_y >> 16,
+ plane->state->src_w >> 16,
+ plane->state->src_h >> 16,
+ plane->state->crtc_x, plane->state->crtc_y,
+ plane->state->crtc_w, plane->state->crtc_h);
+ } else {
+ DPU_DEBUG("Inactive plane:%d\n", plane->base.id);
+ }
+ }
+}
+
+static ssize_t _dpu_plane_danger_write(struct file *file,
+ const char __user *user_buf, size_t count, loff_t *ppos)
+{
+ struct dpu_kms *kms = file->private_data;
+ int disable_panic;
+ int ret;
+
+ ret = kstrtouint_from_user(user_buf, count, 0, &disable_panic);
+ if (ret)
+ return ret;
+
+ if (disable_panic) {
+ /* Disable panic signal for all active pipes */
+ DPU_DEBUG("Disabling danger:\n");
+ _dpu_plane_set_danger_state(kms, false);
+ kms->has_danger_ctrl = false;
+ } else {
+ /* Enable panic signal for all active pipes */
+ DPU_DEBUG("Enabling danger:\n");
+ kms->has_danger_ctrl = true;
+ _dpu_plane_set_danger_state(kms, true);
+ }
+
+ return count;
+}
+
+static const struct file_operations dpu_plane_danger_enable = {
+ .open = simple_open,
+ .read = _dpu_plane_danger_read,
+ .write = _dpu_plane_danger_write,
+};
+
static void dpu_debugfs_danger_init(struct dpu_kms *dpu_kms,
struct dentry *parent)
{
@@ -110,8 +177,20 @@ static void dpu_debugfs_danger_init(struct dpu_kms *dpu_kms,
dpu_kms, &dpu_debugfs_danger_stats_fops);
debugfs_create_file("safe_status", 0600, entry,
dpu_kms, &dpu_debugfs_safe_stats_fops);
+ debugfs_create_file("disable_danger", 0600, entry,
+ dpu_kms, &dpu_plane_danger_enable);
+
}
+/*
+ * Companion structure for dpu_debugfs_create_regset32.
+ */
+struct dpu_debugfs_regset32 {
+ uint32_t offset;
+ uint32_t blk_len;
+ struct dpu_kms *dpu_kms;
+};
+
static int _dpu_debugfs_show_regset32(struct seq_file *s, void *data)
{
struct dpu_debugfs_regset32 *regset = s->private;
@@ -159,24 +238,23 @@ static const struct file_operations dpu_fops_regset32 = {
.release = single_release,
};
-void dpu_debugfs_setup_regset32(struct dpu_debugfs_regset32 *regset,
+void dpu_debugfs_create_regset32(const char *name, umode_t mode,
+ void *parent,
uint32_t offset, uint32_t length, struct dpu_kms *dpu_kms)
{
- if (regset) {
- regset->offset = offset;
- regset->blk_len = length;
- regset->dpu_kms = dpu_kms;
- }
-}
+ struct dpu_debugfs_regset32 *regset;
-void dpu_debugfs_create_regset32(const char *name, umode_t mode,
- void *parent, struct dpu_debugfs_regset32 *regset)
-{
- if (!name || !regset || !regset->dpu_kms || !regset->blk_len)
+ if (WARN_ON(!name || !dpu_kms || !length))
+ return;
+
+ regset = devm_kzalloc(&dpu_kms->pdev->dev, sizeof(*regset), GFP_KERNEL);
+ if (!regset)
return;
/* make sure offset is a multiple of 4 */
- regset->offset = round_down(regset->offset, 4);
+ regset->offset = round_down(offset, 4);
+ regset->blk_len = length;
+ regset->dpu_kms = dpu_kms;
debugfs_create_file(name, mode, parent, regset, &dpu_fops_regset32);
}
@@ -203,6 +281,7 @@ static int dpu_kms_debugfs_init(struct msm_kms *kms, struct drm_minor *minor)
dpu_debugfs_danger_init(dpu_kms, entry);
dpu_debugfs_vbif_init(dpu_kms, entry);
dpu_debugfs_core_irq_init(dpu_kms, entry);
+ dpu_debugfs_sspp_init(dpu_kms, entry);
for (i = 0; i < ARRAY_SIZE(priv->dp); i++) {
if (priv->dp[i])
@@ -384,28 +463,6 @@ static void dpu_kms_flush_commit(struct msm_kms *kms, unsigned crtc_mask)
}
}
-/*
- * Override the encoder enable since we need to setup the inline rotator and do
- * some crtc magic before enabling any bridge that might be present.
- */
-void dpu_kms_encoder_enable(struct drm_encoder *encoder)
-{
- const struct drm_encoder_helper_funcs *funcs = encoder->helper_private;
- struct drm_device *dev = encoder->dev;
- struct drm_crtc *crtc;
-
- /* Forward this enable call to the commit hook */
- if (funcs && funcs->commit)
- funcs->commit(encoder);
-
- drm_for_each_crtc(crtc, dev) {
- if (!(crtc->state->encoder_mask & drm_encoder_mask(encoder)))
- continue;
-
- trace_dpu_kms_enc_enable(DRMID(crtc));
- }
-}
-
static void dpu_kms_complete_commit(struct msm_kms *kms, unsigned crtc_mask)
{
struct dpu_kms *dpu_kms = to_dpu_kms(kms);
@@ -863,6 +920,11 @@ static void dpu_kms_mdp_snapshot(struct msm_disp_state *disp_state, struct msm_k
msm_disp_snapshot_add_block(disp_state, cat->sspp[i].len,
dpu_kms->mmio + cat->sspp[i].base, "sspp_%d", i);
+ /* dump LM sub-blocks HW regs info */
+ for (i = 0; i < cat->mixer_count; i++)
+ msm_disp_snapshot_add_block(disp_state, cat->mixer[i].len,
+ dpu_kms->mmio + cat->mixer[i].base, "lm_%d", i);
+
msm_disp_snapshot_add_block(disp_state, top->hw.length,
dpu_kms->mmio + top->hw.blk_off, "top");
@@ -1153,9 +1215,9 @@ struct msm_kms *dpu_kms_init(struct drm_device *dev)
static int dpu_bind(struct device *dev, struct device *master, void *data)
{
- struct drm_device *ddev = dev_get_drvdata(master);
+ struct msm_drm_private *priv = dev_get_drvdata(master);
struct platform_device *pdev = to_platform_device(dev);
- struct msm_drm_private *priv = ddev->dev_private;
+ struct drm_device *ddev = priv->dev;
struct dpu_kms *dpu_kms;
struct dss_module_power *mp;
int ret = 0;
@@ -1285,7 +1347,7 @@ static const struct dev_pm_ops dpu_pm_ops = {
pm_runtime_force_resume)
};
-static const struct of_device_id dpu_dt_match[] = {
+const struct of_device_id dpu_dt_match[] = {
{ .compatible = "qcom,sdm845-dpu", },
{ .compatible = "qcom,sc7180-dpu", },
{ .compatible = "qcom,sc7280-dpu", },
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h
index 775bcbda860f..2d385b4b7f5e 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h
@@ -160,34 +160,10 @@ struct dpu_global_state
*
* Documentation/filesystems/debugfs.rst
*
- * @dpu_debugfs_setup_regset32: Initialize data for dpu_debugfs_create_regset32
* @dpu_debugfs_create_regset32: Create 32-bit register dump file
- * @dpu_debugfs_get_root: Get root dentry for DPU_KMS's debugfs node
*/
/**
- * Companion structure for dpu_debugfs_create_regset32. Do not initialize the
- * members of this structure explicitly; use dpu_debugfs_setup_regset32 instead.
- */
-struct dpu_debugfs_regset32 {
- uint32_t offset;
- uint32_t blk_len;
- struct dpu_kms *dpu_kms;
-};
-
-/**
- * dpu_debugfs_setup_regset32 - Initialize register block definition for debugfs
- * This function is meant to initialize dpu_debugfs_regset32 structures for use
- * with dpu_debugfs_create_regset32.
- * @regset: opaque register definition structure
- * @offset: sub-block offset
- * @length: sub-block length, in bytes
- * @dpu_kms: pointer to dpu kms structure
- */
-void dpu_debugfs_setup_regset32(struct dpu_debugfs_regset32 *regset,
- uint32_t offset, uint32_t length, struct dpu_kms *dpu_kms);
-
-/**
* dpu_debugfs_create_regset32 - Create register read back file for debugfs
*
* This function is almost identical to the standard debugfs_create_regset32()
@@ -195,20 +171,16 @@ void dpu_debugfs_setup_regset32(struct dpu_debugfs_regset32 *regset,
* names/offsets do not need to be provided. The 'read' function simply outputs
* sequential register values over a specified range.
*
- * Similar to the related debugfs_create_regset32 API, the structure pointed to
- * by regset needs to persist for the lifetime of the created file. The calling
- * code is responsible for initialization/management of this structure.
- *
- * The structure pointed to by regset is meant to be opaque. Please use
- * dpu_debugfs_setup_regset32 to initialize it.
- *
* @name: File name within debugfs
* @mode: File mode within debugfs
* @parent: Parent directory entry within debugfs, can be NULL
- * @regset: Pointer to persistent register block definition
+ * @offset: sub-block offset
+ * @length: sub-block length, in bytes
+ * @dpu_kms: pointer to dpu kms structure
*/
void dpu_debugfs_create_regset32(const char *name, umode_t mode,
- void *parent, struct dpu_debugfs_regset32 *regset);
+ void *parent,
+ uint32_t offset, uint32_t length, struct dpu_kms *dpu_kms);
/**
* dpu_debugfs_get_root - Return root directory entry for KMS's debugfs
@@ -235,8 +207,6 @@ void *dpu_debugfs_get_root(struct dpu_kms *dpu_kms);
int dpu_enable_vblank(struct msm_kms *kms, struct drm_crtc *crtc);
void dpu_disable_vblank(struct msm_kms *kms, struct drm_crtc *crtc);
-void dpu_kms_encoder_enable(struct drm_encoder *encoder);
-
/**
* dpu_kms_get_clk_rate() - get the clock rate
* @dpu_kms: pointer to dpu_kms structure
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_mdss.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_mdss.c
index b466784d9822..131c1f1a869c 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_mdss.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_mdss.c
@@ -111,7 +111,7 @@ static int _dpu_mdss_irq_domain_add(struct dpu_mdss *dpu_mdss)
struct device *dev;
struct irq_domain *domain;
- dev = dpu_mdss->base.dev->dev;
+ dev = dpu_mdss->base.dev;
domain = irq_domain_add_linear(dev->of_node, 32,
&dpu_mdss_irqdomain_ops, dpu_mdss);
@@ -184,16 +184,15 @@ static int dpu_mdss_disable(struct msm_mdss *mdss)
return ret;
}
-static void dpu_mdss_destroy(struct drm_device *dev)
+static void dpu_mdss_destroy(struct msm_mdss *mdss)
{
- struct platform_device *pdev = to_platform_device(dev->dev);
- struct msm_drm_private *priv = dev->dev_private;
- struct dpu_mdss *dpu_mdss = to_dpu_mdss(priv->mdss);
+ struct platform_device *pdev = to_platform_device(mdss->dev);
+ struct dpu_mdss *dpu_mdss = to_dpu_mdss(mdss);
struct dss_module_power *mp = &dpu_mdss->mp;
int irq;
- pm_runtime_suspend(dev->dev);
- pm_runtime_disable(dev->dev);
+ pm_runtime_suspend(mdss->dev);
+ pm_runtime_disable(mdss->dev);
_dpu_mdss_irq_domain_fini(dpu_mdss);
irq = platform_get_irq(pdev, 0);
irq_set_chained_handler_and_data(irq, NULL, NULL);
@@ -203,7 +202,6 @@ static void dpu_mdss_destroy(struct drm_device *dev)
if (dpu_mdss->mmio)
devm_iounmap(&pdev->dev, dpu_mdss->mmio);
dpu_mdss->mmio = NULL;
- priv->mdss = NULL;
}
static const struct msm_mdss_funcs mdss_funcs = {
@@ -212,16 +210,15 @@ static const struct msm_mdss_funcs mdss_funcs = {
.destroy = dpu_mdss_destroy,
};
-int dpu_mdss_init(struct drm_device *dev)
+int dpu_mdss_init(struct platform_device *pdev)
{
- struct platform_device *pdev = to_platform_device(dev->dev);
- struct msm_drm_private *priv = dev->dev_private;
+ struct msm_drm_private *priv = platform_get_drvdata(pdev);
struct dpu_mdss *dpu_mdss;
struct dss_module_power *mp;
int ret;
int irq;
- dpu_mdss = devm_kzalloc(dev->dev, sizeof(*dpu_mdss), GFP_KERNEL);
+ dpu_mdss = devm_kzalloc(&pdev->dev, sizeof(*dpu_mdss), GFP_KERNEL);
if (!dpu_mdss)
return -ENOMEM;
@@ -238,7 +235,7 @@ int dpu_mdss_init(struct drm_device *dev)
goto clk_parse_err;
}
- dpu_mdss->base.dev = dev;
+ dpu_mdss->base.dev = &pdev->dev;
dpu_mdss->base.funcs = &mdss_funcs;
ret = _dpu_mdss_irq_domain_add(dpu_mdss);
@@ -256,7 +253,7 @@ int dpu_mdss_init(struct drm_device *dev)
priv->mdss = &dpu_mdss->base;
- pm_runtime_enable(dev->dev);
+ pm_runtime_enable(&pdev->dev);
return 0;
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c
index a3e3b9d1b82e..ca75089c9d61 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c
@@ -13,7 +13,6 @@
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_uapi.h>
#include <drm/drm_damage_helper.h>
-#include <drm/drm_file.h>
#include <drm/drm_gem_atomic_helper.h>
#include "msm_drv.h"
@@ -90,7 +89,6 @@ enum dpu_plane_qos {
/*
* struct dpu_plane - local dpu plane structure
* @aspace: address space pointer
- * @csc_ptr: Points to dpu_csc_cfg structure to use for current
* @mplane_list: List of multirect planes of the same pipe
* @catalog: Points to dpu catalog structure
* @revalidate: force revalidation of all the plane properties
@@ -101,29 +99,14 @@ struct dpu_plane {
struct mutex lock;
enum dpu_sspp pipe;
- uint32_t features; /* capabilities from catalog */
struct dpu_hw_pipe *pipe_hw;
- struct dpu_hw_pipe_cfg pipe_cfg;
- struct dpu_hw_pipe_qos_cfg pipe_qos_cfg;
uint32_t color_fill;
bool is_error;
bool is_rt_pipe;
bool is_virtual;
struct list_head mplane_list;
struct dpu_mdss_cfg *catalog;
-
- struct dpu_csc_cfg *csc_ptr;
-
- const struct dpu_sspp_sub_blks *pipe_sblk;
- char pipe_name[DPU_NAME_SIZE];
-
- /* debugfs related stuff */
- struct dentry *debugfs_root;
- struct dpu_debugfs_regset32 debugfs_src;
- struct dpu_debugfs_regset32 debugfs_scaler;
- struct dpu_debugfs_regset32 debugfs_csc;
- bool debugfs_default_scale;
};
static const uint64_t supported_format_modifiers[] = {
@@ -145,14 +128,15 @@ static struct dpu_kms *_dpu_plane_get_kms(struct drm_plane *plane)
* _dpu_plane_calc_bw - calculate bandwidth required for a plane
* @plane: Pointer to drm plane.
* @fb: Pointer to framebuffer associated with the given plane
+ * @pipe_cfg: Pointer to pipe configuration
* Result: Updates calculated bandwidth in the plane state.
* BW Equation: src_w * src_h * bpp * fps * (v_total / v_dest)
* Prefill BW Equation: line src bytes * line_time
*/
static void _dpu_plane_calc_bw(struct drm_plane *plane,
- struct drm_framebuffer *fb)
+ struct drm_framebuffer *fb,
+ struct dpu_hw_pipe_cfg *pipe_cfg)
{
- struct dpu_plane *pdpu = to_dpu_plane(plane);
struct dpu_plane_state *pstate;
struct drm_display_mode *mode;
const struct dpu_format *fmt = NULL;
@@ -169,9 +153,9 @@ static void _dpu_plane_calc_bw(struct drm_plane *plane,
fmt = dpu_get_dpu_format_ext(fb->format->format, fb->modifier);
- src_width = drm_rect_width(&pdpu->pipe_cfg.src_rect);
- src_height = drm_rect_height(&pdpu->pipe_cfg.src_rect);
- dst_height = drm_rect_height(&pdpu->pipe_cfg.dst_rect);
+ src_width = drm_rect_width(&pipe_cfg->src_rect);
+ src_height = drm_rect_height(&pipe_cfg->src_rect);
+ dst_height = drm_rect_height(&pipe_cfg->dst_rect);
fps = drm_mode_vrefresh(mode);
vbp = mode->vtotal - mode->vsync_end;
vpw = mode->vsync_end - mode->vsync_start;
@@ -202,12 +186,12 @@ static void _dpu_plane_calc_bw(struct drm_plane *plane,
/**
* _dpu_plane_calc_clk - calculate clock required for a plane
* @plane: Pointer to drm plane.
+ * @pipe_cfg: Pointer to pipe configuration
* Result: Updates calculated clock in the plane state.
* Clock equation: dst_w * v_total * fps * (src_h / dst_h)
*/
-static void _dpu_plane_calc_clk(struct drm_plane *plane)
+static void _dpu_plane_calc_clk(struct drm_plane *plane, struct dpu_hw_pipe_cfg *pipe_cfg)
{
- struct dpu_plane *pdpu = to_dpu_plane(plane);
struct dpu_plane_state *pstate;
struct drm_display_mode *mode;
int dst_width, src_height, dst_height, fps;
@@ -215,9 +199,9 @@ static void _dpu_plane_calc_clk(struct drm_plane *plane)
pstate = to_dpu_plane_state(plane->state);
mode = &plane->state->crtc->mode;
- src_height = drm_rect_height(&pdpu->pipe_cfg.src_rect);
- dst_width = drm_rect_width(&pdpu->pipe_cfg.dst_rect);
- dst_height = drm_rect_height(&pdpu->pipe_cfg.dst_rect);
+ src_height = drm_rect_height(&pipe_cfg->src_rect);
+ dst_width = drm_rect_width(&pipe_cfg->dst_rect);
+ dst_height = drm_rect_height(&pipe_cfg->dst_rect);
fps = drm_mode_vrefresh(mode);
pstate->plane_clk =
@@ -254,14 +238,17 @@ static int _dpu_plane_calc_fill_level(struct drm_plane *plane,
fixed_buff_size = pdpu->catalog->caps->pixel_ram_size;
list_for_each_entry(tmp, &pdpu->mplane_list, mplane_list) {
+ u32 tmp_width;
+
if (!tmp->base.state->visible)
continue;
+ tmp_width = drm_rect_width(&tmp->base.state->src) >> 16;
DPU_DEBUG("plane%d/%d src_width:%d/%d\n",
pdpu->base.base.id, tmp->base.base.id,
src_width,
- drm_rect_width(&tmp->pipe_cfg.src_rect));
+ tmp_width);
src_width = max_t(u32, src_width,
- drm_rect_width(&tmp->pipe_cfg.src_rect));
+ tmp_width);
}
if (fmt->fetch_planes == DPU_PLANE_PSEUDO_PLANAR) {
@@ -321,9 +308,10 @@ static u64 _dpu_plane_get_qos_lut(const struct dpu_qos_lut_tbl *tbl,
* _dpu_plane_set_qos_lut - set QoS LUT of the given plane
* @plane: Pointer to drm plane
* @fb: Pointer to framebuffer associated with the given plane
+ * @pipe_cfg: Pointer to pipe configuration
*/
static void _dpu_plane_set_qos_lut(struct drm_plane *plane,
- struct drm_framebuffer *fb)
+ struct drm_framebuffer *fb, struct dpu_hw_pipe_cfg *pipe_cfg)
{
struct dpu_plane *pdpu = to_dpu_plane(plane);
const struct dpu_format *fmt = NULL;
@@ -337,7 +325,7 @@ static void _dpu_plane_set_qos_lut(struct drm_plane *plane,
fb->format->format,
fb->modifier);
total_fl = _dpu_plane_calc_fill_level(plane, fmt,
- drm_rect_width(&pdpu->pipe_cfg.src_rect));
+ drm_rect_width(&pipe_cfg->src_rect));
if (fmt && DPU_FORMAT_IS_LINEAR(fmt))
lut_usage = DPU_QOS_LUT_USAGE_LINEAR;
@@ -348,8 +336,6 @@ static void _dpu_plane_set_qos_lut(struct drm_plane *plane,
qos_lut = _dpu_plane_get_qos_lut(
&pdpu->catalog->perf.qos_lut_tbl[lut_usage], total_fl);
- pdpu->pipe_qos_cfg.creq_lut = qos_lut;
-
trace_dpu_perf_set_qos_luts(pdpu->pipe - SSPP_VIG0,
(fmt) ? fmt->base.pixel_format : 0,
pdpu->is_rt_pipe, total_fl, qos_lut, lut_usage);
@@ -359,7 +345,7 @@ static void _dpu_plane_set_qos_lut(struct drm_plane *plane,
fmt ? (char *)&fmt->base.pixel_format : NULL,
pdpu->is_rt_pipe, total_fl, qos_lut);
- pdpu->pipe_hw->ops.setup_creq_lut(pdpu->pipe_hw, &pdpu->pipe_qos_cfg);
+ pdpu->pipe_hw->ops.setup_creq_lut(pdpu->pipe_hw, qos_lut);
}
/**
@@ -397,24 +383,21 @@ static void _dpu_plane_set_danger_lut(struct drm_plane *plane,
}
}
- pdpu->pipe_qos_cfg.danger_lut = danger_lut;
- pdpu->pipe_qos_cfg.safe_lut = safe_lut;
-
trace_dpu_perf_set_danger_luts(pdpu->pipe - SSPP_VIG0,
(fmt) ? fmt->base.pixel_format : 0,
(fmt) ? fmt->fetch_mode : 0,
- pdpu->pipe_qos_cfg.danger_lut,
- pdpu->pipe_qos_cfg.safe_lut);
+ danger_lut,
+ safe_lut);
DPU_DEBUG_PLANE(pdpu, "pnum:%d fmt: %4.4s mode:%d luts[0x%x, 0x%x]\n",
pdpu->pipe - SSPP_VIG0,
fmt ? (char *)&fmt->base.pixel_format : NULL,
fmt ? fmt->fetch_mode : -1,
- pdpu->pipe_qos_cfg.danger_lut,
- pdpu->pipe_qos_cfg.safe_lut);
+ danger_lut,
+ safe_lut);
pdpu->pipe_hw->ops.setup_danger_safe_lut(pdpu->pipe_hw,
- &pdpu->pipe_qos_cfg);
+ danger_lut, safe_lut);
}
/**
@@ -427,47 +410,51 @@ static void _dpu_plane_set_qos_ctrl(struct drm_plane *plane,
bool enable, u32 flags)
{
struct dpu_plane *pdpu = to_dpu_plane(plane);
+ struct dpu_hw_pipe_qos_cfg pipe_qos_cfg;
+
+ memset(&pipe_qos_cfg, 0, sizeof(pipe_qos_cfg));
if (flags & DPU_PLANE_QOS_VBLANK_CTRL) {
- pdpu->pipe_qos_cfg.creq_vblank = pdpu->pipe_sblk->creq_vblank;
- pdpu->pipe_qos_cfg.danger_vblank =
- pdpu->pipe_sblk->danger_vblank;
- pdpu->pipe_qos_cfg.vblank_en = enable;
+ pipe_qos_cfg.creq_vblank = pdpu->pipe_hw->cap->sblk->creq_vblank;
+ pipe_qos_cfg.danger_vblank =
+ pdpu->pipe_hw->cap->sblk->danger_vblank;
+ pipe_qos_cfg.vblank_en = enable;
}
if (flags & DPU_PLANE_QOS_VBLANK_AMORTIZE) {
/* this feature overrules previous VBLANK_CTRL */
- pdpu->pipe_qos_cfg.vblank_en = false;
- pdpu->pipe_qos_cfg.creq_vblank = 0; /* clear vblank bits */
+ pipe_qos_cfg.vblank_en = false;
+ pipe_qos_cfg.creq_vblank = 0; /* clear vblank bits */
}
if (flags & DPU_PLANE_QOS_PANIC_CTRL)
- pdpu->pipe_qos_cfg.danger_safe_en = enable;
+ pipe_qos_cfg.danger_safe_en = enable;
if (!pdpu->is_rt_pipe) {
- pdpu->pipe_qos_cfg.vblank_en = false;
- pdpu->pipe_qos_cfg.danger_safe_en = false;
+ pipe_qos_cfg.vblank_en = false;
+ pipe_qos_cfg.danger_safe_en = false;
}
DPU_DEBUG_PLANE(pdpu, "pnum:%d ds:%d vb:%d pri[0x%x, 0x%x] is_rt:%d\n",
pdpu->pipe - SSPP_VIG0,
- pdpu->pipe_qos_cfg.danger_safe_en,
- pdpu->pipe_qos_cfg.vblank_en,
- pdpu->pipe_qos_cfg.creq_vblank,
- pdpu->pipe_qos_cfg.danger_vblank,
+ pipe_qos_cfg.danger_safe_en,
+ pipe_qos_cfg.vblank_en,
+ pipe_qos_cfg.creq_vblank,
+ pipe_qos_cfg.danger_vblank,
pdpu->is_rt_pipe);
pdpu->pipe_hw->ops.setup_qos_ctrl(pdpu->pipe_hw,
- &pdpu->pipe_qos_cfg);
+ &pipe_qos_cfg);
}
/**
* _dpu_plane_set_ot_limit - set OT limit for the given plane
* @plane: Pointer to drm plane
* @crtc: Pointer to drm crtc
+ * @pipe_cfg: Pointer to pipe configuration
*/
static void _dpu_plane_set_ot_limit(struct drm_plane *plane,
- struct drm_crtc *crtc)
+ struct drm_crtc *crtc, struct dpu_hw_pipe_cfg *pipe_cfg)
{
struct dpu_plane *pdpu = to_dpu_plane(plane);
struct dpu_vbif_set_ot_params ot_params;
@@ -476,8 +463,8 @@ static void _dpu_plane_set_ot_limit(struct drm_plane *plane,
memset(&ot_params, 0, sizeof(ot_params));
ot_params.xin_id = pdpu->pipe_hw->cap->xin_id;
ot_params.num = pdpu->pipe_hw->idx - SSPP_NONE;
- ot_params.width = drm_rect_width(&pdpu->pipe_cfg.src_rect);
- ot_params.height = drm_rect_height(&pdpu->pipe_cfg.src_rect);
+ ot_params.width = drm_rect_width(&pipe_cfg->src_rect);
+ ot_params.height = drm_rect_height(&pipe_cfg->src_rect);
ot_params.is_wfd = !pdpu->is_rt_pipe;
ot_params.frame_rate = drm_mode_vrefresh(&crtc->mode);
ot_params.vbif_idx = VBIF_RT;
@@ -541,14 +528,12 @@ static void _dpu_plane_setup_scaler3(struct dpu_plane *pdpu,
struct dpu_plane_state *pstate,
uint32_t src_w, uint32_t src_h, uint32_t dst_w, uint32_t dst_h,
struct dpu_hw_scaler3_cfg *scale_cfg,
+ struct dpu_hw_pixel_ext *pixel_ext,
const struct dpu_format *fmt,
uint32_t chroma_subsmpl_h, uint32_t chroma_subsmpl_v)
{
uint32_t i;
- memset(scale_cfg, 0, sizeof(*scale_cfg));
- memset(&pstate->pixel_ext, 0, sizeof(struct dpu_hw_pixel_ext));
-
scale_cfg->phase_step_x[DPU_SSPP_COMP_0] =
mult_frac((1 << PHASE_STEP_SHIFT), src_w, dst_w);
scale_cfg->phase_step_y[DPU_SSPP_COMP_0] =
@@ -587,9 +572,9 @@ static void _dpu_plane_setup_scaler3(struct dpu_plane *pdpu,
scale_cfg->preload_y[i] = DPU_QSEED3_DEFAULT_PRELOAD_V;
}
- pstate->pixel_ext.num_ext_pxls_top[i] =
+ pixel_ext->num_ext_pxls_top[i] =
scale_cfg->src_height[i];
- pstate->pixel_ext.num_ext_pxls_left[i] =
+ pixel_ext->num_ext_pxls_left[i] =
scale_cfg->src_width[i];
}
if (!(DPU_FORMAT_IS_YUV(fmt)) && (src_h == dst_h)
@@ -606,68 +591,97 @@ static void _dpu_plane_setup_scaler3(struct dpu_plane *pdpu,
scale_cfg->enable = 1;
}
-static void _dpu_plane_setup_csc(struct dpu_plane *pdpu)
-{
- static const struct dpu_csc_cfg dpu_csc_YUV2RGB_601L = {
- {
- /* S15.16 format */
- 0x00012A00, 0x00000000, 0x00019880,
- 0x00012A00, 0xFFFF9B80, 0xFFFF3000,
- 0x00012A00, 0x00020480, 0x00000000,
+static const struct dpu_csc_cfg dpu_csc_YUV2RGB_601L = {
+ {
+ /* S15.16 format */
+ 0x00012A00, 0x00000000, 0x00019880,
+ 0x00012A00, 0xFFFF9B80, 0xFFFF3000,
+ 0x00012A00, 0x00020480, 0x00000000,
+ },
+ /* signed bias */
+ { 0xfff0, 0xff80, 0xff80,},
+ { 0x0, 0x0, 0x0,},
+ /* unsigned clamp */
+ { 0x10, 0xeb, 0x10, 0xf0, 0x10, 0xf0,},
+ { 0x00, 0xff, 0x00, 0xff, 0x00, 0xff,},
+};
+
+static const struct dpu_csc_cfg dpu_csc10_YUV2RGB_601L = {
+ {
+ /* S15.16 format */
+ 0x00012A00, 0x00000000, 0x00019880,
+ 0x00012A00, 0xFFFF9B80, 0xFFFF3000,
+ 0x00012A00, 0x00020480, 0x00000000,
},
- /* signed bias */
- { 0xfff0, 0xff80, 0xff80,},
- { 0x0, 0x0, 0x0,},
- /* unsigned clamp */
- { 0x10, 0xeb, 0x10, 0xf0, 0x10, 0xf0,},
- { 0x00, 0xff, 0x00, 0xff, 0x00, 0xff,},
- };
- static const struct dpu_csc_cfg dpu_csc10_YUV2RGB_601L = {
- {
- /* S15.16 format */
- 0x00012A00, 0x00000000, 0x00019880,
- 0x00012A00, 0xFFFF9B80, 0xFFFF3000,
- 0x00012A00, 0x00020480, 0x00000000,
- },
- /* signed bias */
- { 0xffc0, 0xfe00, 0xfe00,},
- { 0x0, 0x0, 0x0,},
- /* unsigned clamp */
- { 0x40, 0x3ac, 0x40, 0x3c0, 0x40, 0x3c0,},
- { 0x00, 0x3ff, 0x00, 0x3ff, 0x00, 0x3ff,},
- };
+ /* signed bias */
+ { 0xffc0, 0xfe00, 0xfe00,},
+ { 0x0, 0x0, 0x0,},
+ /* unsigned clamp */
+ { 0x40, 0x3ac, 0x40, 0x3c0, 0x40, 0x3c0,},
+ { 0x00, 0x3ff, 0x00, 0x3ff, 0x00, 0x3ff,},
+};
+
+static const struct dpu_csc_cfg *_dpu_plane_get_csc(struct dpu_plane *pdpu, const struct dpu_format *fmt)
+{
+ const struct dpu_csc_cfg *csc_ptr;
if (!pdpu) {
DPU_ERROR("invalid plane\n");
- return;
+ return NULL;
}
- if (BIT(DPU_SSPP_CSC_10BIT) & pdpu->features)
- pdpu->csc_ptr = (struct dpu_csc_cfg *)&dpu_csc10_YUV2RGB_601L;
+ if (!DPU_FORMAT_IS_YUV(fmt))
+ return NULL;
+
+ if (BIT(DPU_SSPP_CSC_10BIT) & pdpu->pipe_hw->cap->features)
+ csc_ptr = &dpu_csc10_YUV2RGB_601L;
else
- pdpu->csc_ptr = (struct dpu_csc_cfg *)&dpu_csc_YUV2RGB_601L;
+ csc_ptr = &dpu_csc_YUV2RGB_601L;
DPU_DEBUG_PLANE(pdpu, "using 0x%X 0x%X 0x%X...\n",
- pdpu->csc_ptr->csc_mv[0],
- pdpu->csc_ptr->csc_mv[1],
- pdpu->csc_ptr->csc_mv[2]);
+ csc_ptr->csc_mv[0],
+ csc_ptr->csc_mv[1],
+ csc_ptr->csc_mv[2]);
+
+ return csc_ptr;
}
static void _dpu_plane_setup_scaler(struct dpu_plane *pdpu,
struct dpu_plane_state *pstate,
- const struct dpu_format *fmt, bool color_fill)
+ const struct dpu_format *fmt, bool color_fill,
+ struct dpu_hw_pipe_cfg *pipe_cfg)
{
const struct drm_format_info *info = drm_format_info(fmt->base.pixel_format);
+ struct dpu_hw_scaler3_cfg scaler3_cfg;
+ struct dpu_hw_pixel_ext pixel_ext;
+
+ memset(&scaler3_cfg, 0, sizeof(scaler3_cfg));
+ memset(&pixel_ext, 0, sizeof(pixel_ext));
/* don't chroma subsample if decimating */
/* update scaler. calculate default config for QSEED3 */
_dpu_plane_setup_scaler3(pdpu, pstate,
- drm_rect_width(&pdpu->pipe_cfg.src_rect),
- drm_rect_height(&pdpu->pipe_cfg.src_rect),
- drm_rect_width(&pdpu->pipe_cfg.dst_rect),
- drm_rect_height(&pdpu->pipe_cfg.dst_rect),
- &pstate->scaler3_cfg, fmt,
+ drm_rect_width(&pipe_cfg->src_rect),
+ drm_rect_height(&pipe_cfg->src_rect),
+ drm_rect_width(&pipe_cfg->dst_rect),
+ drm_rect_height(&pipe_cfg->dst_rect),
+ &scaler3_cfg, &pixel_ext, fmt,
info->hsub, info->vsub);
+
+ if (pdpu->pipe_hw->ops.setup_pe)
+ pdpu->pipe_hw->ops.setup_pe(pdpu->pipe_hw,
+ &pixel_ext);
+
+ /**
+ * when programmed in multirect mode, scalar block will be
+ * bypassed. Still we need to update alpha and bitwidth
+ * ONLY for RECT0
+ */
+ if (pdpu->pipe_hw->ops.setup_scaler &&
+ pstate->multirect_index != DPU_SSPP_RECT_1)
+ pdpu->pipe_hw->ops.setup_scaler(pdpu->pipe_hw,
+ pipe_cfg,
+ &scaler3_cfg);
}
/**
@@ -683,6 +697,7 @@ static int _dpu_plane_color_fill(struct dpu_plane *pdpu,
const struct dpu_format *fmt;
const struct drm_plane *plane = &pdpu->base;
struct dpu_plane_state *pstate = to_dpu_plane_state(plane->state);
+ struct dpu_hw_pipe_cfg pipe_cfg;
DPU_DEBUG_PLANE(pdpu, "\n");
@@ -699,13 +714,14 @@ static int _dpu_plane_color_fill(struct dpu_plane *pdpu,
pstate->multirect_index);
/* override scaler/decimation if solid fill */
- pdpu->pipe_cfg.src_rect.x1 = 0;
- pdpu->pipe_cfg.src_rect.y1 = 0;
- pdpu->pipe_cfg.src_rect.x2 =
- drm_rect_width(&pdpu->pipe_cfg.dst_rect);
- pdpu->pipe_cfg.src_rect.y2 =
- drm_rect_height(&pdpu->pipe_cfg.dst_rect);
- _dpu_plane_setup_scaler(pdpu, pstate, fmt, true);
+ pipe_cfg.dst_rect = pstate->base.dst;
+
+ pipe_cfg.src_rect.x1 = 0;
+ pipe_cfg.src_rect.y1 = 0;
+ pipe_cfg.src_rect.x2 =
+ drm_rect_width(&pipe_cfg.dst_rect);
+ pipe_cfg.src_rect.y2 =
+ drm_rect_height(&pipe_cfg.dst_rect);
if (pdpu->pipe_hw->ops.setup_format)
pdpu->pipe_hw->ops.setup_format(pdpu->pipe_hw,
@@ -714,18 +730,10 @@ static int _dpu_plane_color_fill(struct dpu_plane *pdpu,
if (pdpu->pipe_hw->ops.setup_rects)
pdpu->pipe_hw->ops.setup_rects(pdpu->pipe_hw,
- &pdpu->pipe_cfg,
+ &pipe_cfg,
pstate->multirect_index);
- if (pdpu->pipe_hw->ops.setup_pe)
- pdpu->pipe_hw->ops.setup_pe(pdpu->pipe_hw,
- &pstate->pixel_ext);
-
- if (pdpu->pipe_hw->ops.setup_scaler &&
- pstate->multirect_index != DPU_SSPP_RECT_1)
- pdpu->pipe_hw->ops.setup_scaler(pdpu->pipe_hw,
- &pdpu->pipe_cfg, &pstate->pixel_ext,
- &pstate->scaler3_cfg);
+ _dpu_plane_setup_scaler(pdpu, pstate, fmt, true, &pipe_cfg);
}
return 0;
@@ -964,10 +972,10 @@ static int dpu_plane_atomic_check(struct drm_plane *plane,
crtc_state = drm_atomic_get_new_crtc_state(state,
new_plane_state->crtc);
- min_scale = FRAC_16_16(1, pdpu->pipe_sblk->maxupscale);
+ min_scale = FRAC_16_16(1, pdpu->pipe_hw->cap->sblk->maxupscale);
ret = drm_atomic_helper_check_plane_state(new_plane_state, crtc_state,
min_scale,
- pdpu->pipe_sblk->maxdwnscale << 16,
+ pdpu->pipe_hw->cap->sblk->maxdwnscale << 16,
true, true);
if (ret) {
DPU_DEBUG_PLANE(pdpu, "Check plane state failed (%d)\n", ret);
@@ -993,9 +1001,8 @@ static int dpu_plane_atomic_check(struct drm_plane *plane,
min_src_size = DPU_FORMAT_IS_YUV(fmt) ? 2 : 1;
if (DPU_FORMAT_IS_YUV(fmt) &&
- (!(pdpu->features & DPU_SSPP_SCALER) ||
- !(pdpu->features & (BIT(DPU_SSPP_CSC)
- | BIT(DPU_SSPP_CSC_10BIT))))) {
+ (!(pdpu->pipe_hw->cap->features & DPU_SSPP_SCALER) ||
+ !(pdpu->pipe_hw->cap->features & DPU_SSPP_CSC_ANY))) {
DPU_DEBUG_PLANE(pdpu,
"plane doesn't have scaler/csc for yuv\n");
return -EINVAL;
@@ -1056,8 +1063,13 @@ void dpu_plane_flush(struct drm_plane *plane)
else if (pdpu->color_fill & DPU_PLANE_COLOR_FILL_FLAG)
/* force 100% alpha */
_dpu_plane_color_fill(pdpu, pdpu->color_fill, 0xFF);
- else if (pdpu->pipe_hw && pdpu->csc_ptr && pdpu->pipe_hw->ops.setup_csc)
- pdpu->pipe_hw->ops.setup_csc(pdpu->pipe_hw, pdpu->csc_ptr);
+ else if (pdpu->pipe_hw && pdpu->pipe_hw->ops.setup_csc) {
+ const struct dpu_format *fmt = to_dpu_format(msm_framebuffer_format(plane->state->fb));
+ const struct dpu_csc_cfg *csc_ptr = _dpu_plane_get_csc(pdpu, fmt);
+
+ if (csc_ptr)
+ pdpu->pipe_hw->ops.setup_csc(pdpu->pipe_hw, csc_ptr);
+ }
/* flag h/w flush complete */
if (plane->state)
@@ -1091,10 +1103,11 @@ static void dpu_plane_sspp_atomic_update(struct drm_plane *plane)
bool is_rt_pipe, update_qos_remap;
const struct dpu_format *fmt =
to_dpu_format(msm_framebuffer_format(fb));
+ struct dpu_hw_pipe_cfg pipe_cfg;
- memset(&(pdpu->pipe_cfg), 0, sizeof(struct dpu_hw_pipe_cfg));
+ memset(&pipe_cfg, 0, sizeof(struct dpu_hw_pipe_cfg));
- _dpu_plane_set_scanout(plane, pstate, &pdpu->pipe_cfg, fb);
+ _dpu_plane_set_scanout(plane, pstate, &pipe_cfg, fb);
pstate->pending = true;
@@ -1106,17 +1119,15 @@ static void dpu_plane_sspp_atomic_update(struct drm_plane *plane)
crtc->base.id, DRM_RECT_ARG(&state->dst),
(char *)&fmt->base.pixel_format, DPU_FORMAT_IS_UBWC(fmt));
- pdpu->pipe_cfg.src_rect = state->src;
+ pipe_cfg.src_rect = state->src;
/* state->src is 16.16, src_rect is not */
- pdpu->pipe_cfg.src_rect.x1 >>= 16;
- pdpu->pipe_cfg.src_rect.x2 >>= 16;
- pdpu->pipe_cfg.src_rect.y1 >>= 16;
- pdpu->pipe_cfg.src_rect.y2 >>= 16;
-
- pdpu->pipe_cfg.dst_rect = state->dst;
+ pipe_cfg.src_rect.x1 >>= 16;
+ pipe_cfg.src_rect.x2 >>= 16;
+ pipe_cfg.src_rect.y1 >>= 16;
+ pipe_cfg.src_rect.y2 >>= 16;
- _dpu_plane_setup_scaler(pdpu, pstate, fmt, false);
+ pipe_cfg.dst_rect = state->dst;
/* override for color fill */
if (pdpu->color_fill & DPU_PLANE_COLOR_FILL_FLAG) {
@@ -1126,25 +1137,11 @@ static void dpu_plane_sspp_atomic_update(struct drm_plane *plane)
if (pdpu->pipe_hw->ops.setup_rects) {
pdpu->pipe_hw->ops.setup_rects(pdpu->pipe_hw,
- &pdpu->pipe_cfg,
+ &pipe_cfg,
pstate->multirect_index);
}
- if (pdpu->pipe_hw->ops.setup_pe &&
- (pstate->multirect_index != DPU_SSPP_RECT_1))
- pdpu->pipe_hw->ops.setup_pe(pdpu->pipe_hw,
- &pstate->pixel_ext);
-
- /**
- * when programmed in multirect mode, scalar block will be
- * bypassed. Still we need to update alpha and bitwidth
- * ONLY for RECT0
- */
- if (pdpu->pipe_hw->ops.setup_scaler &&
- pstate->multirect_index != DPU_SSPP_RECT_1)
- pdpu->pipe_hw->ops.setup_scaler(pdpu->pipe_hw,
- &pdpu->pipe_cfg, &pstate->pixel_ext,
- &pstate->scaler3_cfg);
+ _dpu_plane_setup_scaler(pdpu, pstate, fmt, false, &pipe_cfg);
if (pdpu->pipe_hw->ops.setup_multirect)
pdpu->pipe_hw->ops.setup_multirect(
@@ -1173,35 +1170,29 @@ static void dpu_plane_sspp_atomic_update(struct drm_plane *plane)
pstate->multirect_index);
if (pdpu->pipe_hw->ops.setup_cdp) {
- struct dpu_hw_pipe_cdp_cfg *cdp_cfg = &pstate->cdp_cfg;
+ struct dpu_hw_pipe_cdp_cfg cdp_cfg;
- memset(cdp_cfg, 0, sizeof(struct dpu_hw_pipe_cdp_cfg));
+ memset(&cdp_cfg, 0, sizeof(struct dpu_hw_pipe_cdp_cfg));
- cdp_cfg->enable = pdpu->catalog->perf.cdp_cfg
+ cdp_cfg.enable = pdpu->catalog->perf.cdp_cfg
[DPU_PERF_CDP_USAGE_RT].rd_enable;
- cdp_cfg->ubwc_meta_enable =
+ cdp_cfg.ubwc_meta_enable =
DPU_FORMAT_IS_UBWC(fmt);
- cdp_cfg->tile_amortize_enable =
+ cdp_cfg.tile_amortize_enable =
DPU_FORMAT_IS_UBWC(fmt) ||
DPU_FORMAT_IS_TILE(fmt);
- cdp_cfg->preload_ahead = DPU_SSPP_CDP_PRELOAD_AHEAD_64;
+ cdp_cfg.preload_ahead = DPU_SSPP_CDP_PRELOAD_AHEAD_64;
- pdpu->pipe_hw->ops.setup_cdp(pdpu->pipe_hw, cdp_cfg);
+ pdpu->pipe_hw->ops.setup_cdp(pdpu->pipe_hw, &cdp_cfg, pstate->multirect_index);
}
-
- /* update csc */
- if (DPU_FORMAT_IS_YUV(fmt))
- _dpu_plane_setup_csc(pdpu);
- else
- pdpu->csc_ptr = NULL;
}
- _dpu_plane_set_qos_lut(plane, fb);
+ _dpu_plane_set_qos_lut(plane, fb, &pipe_cfg);
_dpu_plane_set_danger_lut(plane, fb);
if (plane->type != DRM_PLANE_TYPE_CURSOR) {
_dpu_plane_set_qos_ctrl(plane, true, DPU_PLANE_QOS_PANIC_CTRL);
- _dpu_plane_set_ot_limit(plane, crtc);
+ _dpu_plane_set_ot_limit(plane, crtc, &pipe_cfg);
}
update_qos_remap = (is_rt_pipe != pdpu->is_rt_pipe) ||
@@ -1215,9 +1206,9 @@ static void dpu_plane_sspp_atomic_update(struct drm_plane *plane)
_dpu_plane_set_qos_remap(plane);
}
- _dpu_plane_calc_bw(plane, fb);
+ _dpu_plane_calc_bw(plane, fb, &pipe_cfg);
- _dpu_plane_calc_clk(plane);
+ _dpu_plane_calc_clk(plane, &pipe_cfg);
}
static void _dpu_plane_atomic_disable(struct drm_plane *plane)
@@ -1314,6 +1305,46 @@ dpu_plane_duplicate_state(struct drm_plane *plane)
return &pstate->base;
}
+static const char * const multirect_mode_name[] = {
+ [DPU_SSPP_MULTIRECT_NONE] = "none",
+ [DPU_SSPP_MULTIRECT_PARALLEL] = "parallel",
+ [DPU_SSPP_MULTIRECT_TIME_MX] = "time_mx",
+};
+
+static const char * const multirect_index_name[] = {
+ [DPU_SSPP_RECT_SOLO] = "solo",
+ [DPU_SSPP_RECT_0] = "rect_0",
+ [DPU_SSPP_RECT_1] = "rect_1",
+};
+
+static const char *dpu_get_multirect_mode(enum dpu_sspp_multirect_mode mode)
+{
+ if (WARN_ON(mode >= ARRAY_SIZE(multirect_mode_name)))
+ return "unknown";
+
+ return multirect_mode_name[mode];
+}
+
+static const char *dpu_get_multirect_index(enum dpu_sspp_multirect_index index)
+{
+ if (WARN_ON(index >= ARRAY_SIZE(multirect_index_name)))
+ return "unknown";
+
+ return multirect_index_name[index];
+}
+
+static void dpu_plane_atomic_print_state(struct drm_printer *p,
+ const struct drm_plane_state *state)
+{
+ const struct dpu_plane_state *pstate = to_dpu_plane_state(state);
+ const struct dpu_plane *pdpu = to_dpu_plane(state->plane);
+
+ drm_printf(p, "\tstage=%d\n", pstate->stage);
+ drm_printf(p, "\tsspp=%s\n", pdpu->pipe_hw->cap->name);
+ drm_printf(p, "\tmultirect_mode=%s\n", dpu_get_multirect_mode(pstate->multirect_mode));
+ drm_printf(p, "\tmultirect_index=%s\n", dpu_get_multirect_index(pstate->multirect_index));
+}
+
static void dpu_plane_reset(struct drm_plane *plane)
{
struct dpu_plane *pdpu;
@@ -1343,7 +1374,7 @@ static void dpu_plane_reset(struct drm_plane *plane)
}
#ifdef CONFIG_DEBUG_FS
-static void dpu_plane_danger_signal_ctrl(struct drm_plane *plane, bool enable)
+void dpu_plane_danger_signal_ctrl(struct drm_plane *plane, bool enable)
{
struct dpu_plane *pdpu = to_dpu_plane(plane);
struct dpu_kms *dpu_kms = _dpu_plane_get_kms(plane);
@@ -1356,167 +1387,23 @@ static void dpu_plane_danger_signal_ctrl(struct drm_plane *plane, bool enable)
pm_runtime_put_sync(&dpu_kms->pdev->dev);
}
-static ssize_t _dpu_plane_danger_read(struct file *file,
- char __user *buff, size_t count, loff_t *ppos)
-{
- struct dpu_kms *kms = file->private_data;
- int len;
- char buf[40];
-
- len = scnprintf(buf, sizeof(buf), "%d\n", !kms->has_danger_ctrl);
-
- return simple_read_from_buffer(buff, count, ppos, buf, len);
-}
-
-static void _dpu_plane_set_danger_state(struct dpu_kms *kms, bool enable)
+/* SSPP live inside dpu_plane private data only. Enumerate them here. */
+void dpu_debugfs_sspp_init(struct dpu_kms *dpu_kms, struct dentry *debugfs_root)
{
struct drm_plane *plane;
+ struct dentry *entry = debugfs_create_dir("sspp", debugfs_root);
- drm_for_each_plane(plane, kms->dev) {
- if (plane->fb && plane->state) {
- dpu_plane_danger_signal_ctrl(plane, enable);
- DPU_DEBUG("plane:%d img:%dx%d ",
- plane->base.id, plane->fb->width,
- plane->fb->height);
- DPU_DEBUG("src[%d,%d,%d,%d] dst[%d,%d,%d,%d]\n",
- plane->state->src_x >> 16,
- plane->state->src_y >> 16,
- plane->state->src_w >> 16,
- plane->state->src_h >> 16,
- plane->state->crtc_x, plane->state->crtc_y,
- plane->state->crtc_w, plane->state->crtc_h);
- } else {
- DPU_DEBUG("Inactive plane:%d\n", plane->base.id);
- }
- }
-}
-
-static ssize_t _dpu_plane_danger_write(struct file *file,
- const char __user *user_buf, size_t count, loff_t *ppos)
-{
- struct dpu_kms *kms = file->private_data;
- int disable_panic;
- int ret;
-
- ret = kstrtouint_from_user(user_buf, count, 0, &disable_panic);
- if (ret)
- return ret;
-
- if (disable_panic) {
- /* Disable panic signal for all active pipes */
- DPU_DEBUG("Disabling danger:\n");
- _dpu_plane_set_danger_state(kms, false);
- kms->has_danger_ctrl = false;
- } else {
- /* Enable panic signal for all active pipes */
- DPU_DEBUG("Enabling danger:\n");
- kms->has_danger_ctrl = true;
- _dpu_plane_set_danger_state(kms, true);
- }
-
- return count;
-}
+ if (IS_ERR(entry))
+ return;
-static const struct file_operations dpu_plane_danger_enable = {
- .open = simple_open,
- .read = _dpu_plane_danger_read,
- .write = _dpu_plane_danger_write,
-};
+ drm_for_each_plane(plane, dpu_kms->dev) {
+ struct dpu_plane *pdpu = to_dpu_plane(plane);
-static int _dpu_plane_init_debugfs(struct drm_plane *plane)
-{
- struct dpu_plane *pdpu = to_dpu_plane(plane);
- struct dpu_kms *kms = _dpu_plane_get_kms(plane);
- const struct dpu_sspp_cfg *cfg = pdpu->pipe_hw->cap;
- const struct dpu_sspp_sub_blks *sblk = cfg->sblk;
-
- /* create overall sub-directory for the pipe */
- pdpu->debugfs_root =
- debugfs_create_dir(pdpu->pipe_name,
- plane->dev->primary->debugfs_root);
-
- /* don't error check these */
- debugfs_create_x32("features", 0600,
- pdpu->debugfs_root, &pdpu->features);
-
- /* add register dump support */
- dpu_debugfs_setup_regset32(&pdpu->debugfs_src,
- sblk->src_blk.base + cfg->base,
- sblk->src_blk.len,
- kms);
- dpu_debugfs_create_regset32("src_blk", 0400,
- pdpu->debugfs_root, &pdpu->debugfs_src);
-
- if (cfg->features & BIT(DPU_SSPP_SCALER_QSEED3) ||
- cfg->features & BIT(DPU_SSPP_SCALER_QSEED3LITE) ||
- cfg->features & BIT(DPU_SSPP_SCALER_QSEED2) ||
- cfg->features & BIT(DPU_SSPP_SCALER_QSEED4)) {
- dpu_debugfs_setup_regset32(&pdpu->debugfs_scaler,
- sblk->scaler_blk.base + cfg->base,
- sblk->scaler_blk.len,
- kms);
- dpu_debugfs_create_regset32("scaler_blk", 0400,
- pdpu->debugfs_root,
- &pdpu->debugfs_scaler);
- debugfs_create_bool("default_scaling",
- 0600,
- pdpu->debugfs_root,
- &pdpu->debugfs_default_scale);
- }
-
- if (cfg->features & BIT(DPU_SSPP_CSC) ||
- cfg->features & BIT(DPU_SSPP_CSC_10BIT)) {
- dpu_debugfs_setup_regset32(&pdpu->debugfs_csc,
- sblk->csc_blk.base + cfg->base,
- sblk->csc_blk.len,
- kms);
- dpu_debugfs_create_regset32("csc_blk", 0400,
- pdpu->debugfs_root, &pdpu->debugfs_csc);
+ _dpu_hw_sspp_init_debugfs(pdpu->pipe_hw, dpu_kms, entry);
}
-
- debugfs_create_u32("xin_id",
- 0400,
- pdpu->debugfs_root,
- (u32 *) &cfg->xin_id);
- debugfs_create_u32("clk_ctrl",
- 0400,
- pdpu->debugfs_root,
- (u32 *) &cfg->clk_ctrl);
- debugfs_create_x32("creq_vblank",
- 0600,
- pdpu->debugfs_root,
- (u32 *) &sblk->creq_vblank);
- debugfs_create_x32("danger_vblank",
- 0600,
- pdpu->debugfs_root,
- (u32 *) &sblk->danger_vblank);
-
- debugfs_create_file("disable_danger",
- 0600,
- pdpu->debugfs_root,
- kms, &dpu_plane_danger_enable);
-
- return 0;
-}
-#else
-static int _dpu_plane_init_debugfs(struct drm_plane *plane)
-{
- return 0;
}
#endif
-static int dpu_plane_late_register(struct drm_plane *plane)
-{
- return _dpu_plane_init_debugfs(plane);
-}
-
-static void dpu_plane_early_unregister(struct drm_plane *plane)
-{
- struct dpu_plane *pdpu = to_dpu_plane(plane);
-
- debugfs_remove_recursive(pdpu->debugfs_root);
-}
-
static bool dpu_plane_format_mod_supported(struct drm_plane *plane,
uint32_t format, uint64_t modifier)
{
@@ -1541,8 +1428,7 @@ static const struct drm_plane_funcs dpu_plane_funcs = {
.reset = dpu_plane_reset,
.atomic_duplicate_state = dpu_plane_duplicate_state,
.atomic_destroy_state = dpu_plane_destroy_state,
- .late_register = dpu_plane_late_register,
- .early_unregister = dpu_plane_early_unregister,
+ .atomic_print_state = dpu_plane_atomic_print_state,
.format_mod_supported = dpu_plane_format_mod_supported,
};
@@ -1609,21 +1495,13 @@ struct drm_plane *dpu_plane_init(struct drm_device *dev,
goto clean_sspp;
}
- /* cache features mask for later */
- pdpu->features = pdpu->pipe_hw->cap->features;
- pdpu->pipe_sblk = pdpu->pipe_hw->cap->sblk;
- if (!pdpu->pipe_sblk) {
- DPU_ERROR("[%u]invalid sblk\n", pipe);
- goto clean_sspp;
- }
-
if (pdpu->is_virtual) {
- format_list = pdpu->pipe_sblk->virt_format_list;
- num_formats = pdpu->pipe_sblk->virt_num_formats;
+ format_list = pdpu->pipe_hw->cap->sblk->virt_format_list;
+ num_formats = pdpu->pipe_hw->cap->sblk->virt_num_formats;
}
else {
- format_list = pdpu->pipe_sblk->format_list;
- num_formats = pdpu->pipe_sblk->num_formats;
+ format_list = pdpu->pipe_hw->cap->sblk->format_list;
+ num_formats = pdpu->pipe_hw->cap->sblk->num_formats;
}
ret = drm_universal_plane_init(dev, plane, 0xff, &dpu_plane_funcs,
@@ -1663,12 +1541,9 @@ struct drm_plane *dpu_plane_init(struct drm_device *dev,
/* success! finalize initialization */
drm_plane_helper_add(plane, &dpu_plane_helper_funcs);
- /* save user friendly pipe name for later */
- snprintf(pdpu->pipe_name, DPU_NAME_SIZE, "plane%u", plane->base.id);
-
mutex_init(&pdpu->lock);
- DPU_DEBUG("%s created for pipe:%u id:%u virtual:%u\n", pdpu->pipe_name,
+ DPU_DEBUG("%s created for pipe:%u id:%u virtual:%u\n", plane->name,
pipe, plane->base.id, master_plane_id);
return plane;
@@ -1676,6 +1551,7 @@ clean_sspp:
if (pdpu && pdpu->pipe_hw)
dpu_hw_sspp_destroy(pdpu->pipe_hw);
clean_plane:
+ list_del(&pdpu->mplane_list);
kfree(pdpu);
return ERR_PTR(ret);
}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.h
index 34e03ac05f4a..9d51dad5c6a5 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.h
@@ -23,9 +23,6 @@
* @multirect_index: index of the rectangle of SSPP
* @multirect_mode: parallel or time multiplex multirect mode
* @pending: whether the current update is still pending
- * @scaler3_cfg: configuration data for scaler3
- * @pixel_ext: configuration data for pixel extensions
- * @cdp_cfg: CDP configuration
* @plane_fetch_bw: calculated BW per plane
* @plane_clk: calculated clk per plane
*/
@@ -38,11 +35,6 @@ struct dpu_plane_state {
uint32_t multirect_mode;
bool pending;
- /* scaler configuration */
- struct dpu_hw_scaler3_cfg scaler3_cfg;
- struct dpu_hw_pixel_ext pixel_ext;
-
- struct dpu_hw_pipe_cdp_cfg cdp_cfg;
u64 plane_fetch_bw;
u64 plane_clk;
};
@@ -134,4 +126,10 @@ void dpu_plane_clear_multirect(const struct drm_plane_state *drm_state);
int dpu_plane_color_fill(struct drm_plane *plane,
uint32_t color, uint32_t alpha);
+#ifdef CONFIG_DEBUG_FS
+void dpu_plane_danger_signal_ctrl(struct drm_plane *plane, bool enable);
+#else
+static inline void dpu_plane_danger_signal_ctrl(struct drm_plane *plane, bool enable) {}
+#endif
+
#endif /* _DPU_PLANE_H_ */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_trace.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_trace.h
index 37bba57675a8..54d74341e690 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_trace.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_trace.h
@@ -266,10 +266,6 @@ DEFINE_EVENT(dpu_drm_obj_template, dpu_crtc_complete_commit,
TP_PROTO(uint32_t drm_id),
TP_ARGS(drm_id)
);
-DEFINE_EVENT(dpu_drm_obj_template, dpu_kms_enc_enable,
- TP_PROTO(uint32_t drm_id),
- TP_ARGS(drm_id)
-);
DEFINE_EVENT(dpu_drm_obj_template, dpu_kms_commit,
TP_PROTO(uint32_t drm_id),
TP_ARGS(drm_id)
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c
index 7b242246d4e7..12a5f81e402b 100644
--- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c
@@ -370,22 +370,7 @@ static int modeset_init_intf(struct mdp5_kms *mdp5_kms,
switch (intf->type) {
case INTF_eDP:
- if (!priv->edp)
- break;
-
- ctl = mdp5_ctlm_request(ctlm, intf->num);
- if (!ctl) {
- ret = -EINVAL;
- break;
- }
-
- encoder = construct_encoder(mdp5_kms, intf, ctl);
- if (IS_ERR(encoder)) {
- ret = PTR_ERR(encoder);
- break;
- }
-
- ret = msm_edp_modeset_init(priv->edp, dev, encoder);
+ DRM_DEV_INFO(dev->dev, "Skipping eDP interface %d\n", intf->num);
break;
case INTF_HDMI:
if (!priv->hdmi)
@@ -936,7 +921,8 @@ fail:
static int mdp5_bind(struct device *dev, struct device *master, void *data)
{
- struct drm_device *ddev = dev_get_drvdata(master);
+ struct msm_drm_private *priv = dev_get_drvdata(master);
+ struct drm_device *ddev = priv->dev;
struct platform_device *pdev = to_platform_device(dev);
DBG("");
@@ -1031,7 +1017,7 @@ static const struct dev_pm_ops mdp5_pm_ops = {
SET_RUNTIME_PM_OPS(mdp5_runtime_suspend, mdp5_runtime_resume, NULL)
};
-static const struct of_device_id mdp5_dt_match[] = {
+const struct of_device_id mdp5_dt_match[] = {
{ .compatible = "qcom,mdp5", },
/* to support downstream DT files */
{ .compatible = "qcom,mdss_mdp", },
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_mdss.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_mdss.c
index 0ea53420bc40..b3f79c2277e9 100644
--- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_mdss.c
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_mdss.c
@@ -16,8 +16,6 @@ struct mdp5_mdss {
void __iomem *mmio, *vbif;
- struct regulator *vdd;
-
struct clk *ahb_clk;
struct clk *axi_clk;
struct clk *vsync_clk;
@@ -114,7 +112,7 @@ static const struct irq_domain_ops mdss_hw_irqdomain_ops = {
static int mdss_irq_domain_init(struct mdp5_mdss *mdp5_mdss)
{
- struct device *dev = mdp5_mdss->base.dev->dev;
+ struct device *dev = mdp5_mdss->base.dev;
struct irq_domain *d;
d = irq_domain_add_linear(dev->of_node, 32, &mdss_hw_irqdomain_ops,
@@ -157,7 +155,7 @@ static int mdp5_mdss_disable(struct msm_mdss *mdss)
static int msm_mdss_get_clocks(struct mdp5_mdss *mdp5_mdss)
{
struct platform_device *pdev =
- to_platform_device(mdp5_mdss->base.dev->dev);
+ to_platform_device(mdp5_mdss->base.dev);
mdp5_mdss->ahb_clk = msm_clk_get(pdev, "iface");
if (IS_ERR(mdp5_mdss->ahb_clk))
@@ -174,10 +172,9 @@ static int msm_mdss_get_clocks(struct mdp5_mdss *mdp5_mdss)
return 0;
}
-static void mdp5_mdss_destroy(struct drm_device *dev)
+static void mdp5_mdss_destroy(struct msm_mdss *mdss)
{
- struct msm_drm_private *priv = dev->dev_private;
- struct mdp5_mdss *mdp5_mdss = to_mdp5_mdss(priv->mdss);
+ struct mdp5_mdss *mdp5_mdss = to_mdp5_mdss(mdss);
if (!mdp5_mdss)
return;
@@ -185,9 +182,7 @@ static void mdp5_mdss_destroy(struct drm_device *dev)
irq_domain_remove(mdp5_mdss->irqcontroller.domain);
mdp5_mdss->irqcontroller.domain = NULL;
- regulator_disable(mdp5_mdss->vdd);
-
- pm_runtime_disable(dev->dev);
+ pm_runtime_disable(mdss->dev);
}
static const struct msm_mdss_funcs mdss_funcs = {
@@ -196,25 +191,24 @@ static const struct msm_mdss_funcs mdss_funcs = {
.destroy = mdp5_mdss_destroy,
};
-int mdp5_mdss_init(struct drm_device *dev)
+int mdp5_mdss_init(struct platform_device *pdev)
{
- struct platform_device *pdev = to_platform_device(dev->dev);
- struct msm_drm_private *priv = dev->dev_private;
+ struct msm_drm_private *priv = platform_get_drvdata(pdev);
struct mdp5_mdss *mdp5_mdss;
int ret;
DBG("");
- if (!of_device_is_compatible(dev->dev->of_node, "qcom,mdss"))
+ if (!of_device_is_compatible(pdev->dev.of_node, "qcom,mdss"))
return 0;
- mdp5_mdss = devm_kzalloc(dev->dev, sizeof(*mdp5_mdss), GFP_KERNEL);
+ mdp5_mdss = devm_kzalloc(&pdev->dev, sizeof(*mdp5_mdss), GFP_KERNEL);
if (!mdp5_mdss) {
ret = -ENOMEM;
goto fail;
}
- mdp5_mdss->base.dev = dev;
+ mdp5_mdss->base.dev = &pdev->dev;
mdp5_mdss->mmio = msm_ioremap(pdev, "mdss_phys", "MDSS");
if (IS_ERR(mdp5_mdss->mmio)) {
@@ -230,45 +224,29 @@ int mdp5_mdss_init(struct drm_device *dev)
ret = msm_mdss_get_clocks(mdp5_mdss);
if (ret) {
- DRM_DEV_ERROR(dev->dev, "failed to get clocks: %d\n", ret);
- goto fail;
- }
-
- /* Regulator to enable GDSCs in downstream kernels */
- mdp5_mdss->vdd = devm_regulator_get(dev->dev, "vdd");
- if (IS_ERR(mdp5_mdss->vdd)) {
- ret = PTR_ERR(mdp5_mdss->vdd);
- goto fail;
- }
-
- ret = regulator_enable(mdp5_mdss->vdd);
- if (ret) {
- DRM_DEV_ERROR(dev->dev, "failed to enable regulator vdd: %d\n",
- ret);
+ DRM_DEV_ERROR(&pdev->dev, "failed to get clocks: %d\n", ret);
goto fail;
}
- ret = devm_request_irq(dev->dev, platform_get_irq(pdev, 0),
+ ret = devm_request_irq(&pdev->dev, platform_get_irq(pdev, 0),
mdss_irq, 0, "mdss_isr", mdp5_mdss);
if (ret) {
- DRM_DEV_ERROR(dev->dev, "failed to init irq: %d\n", ret);
- goto fail_irq;
+ DRM_DEV_ERROR(&pdev->dev, "failed to init irq: %d\n", ret);
+ goto fail;
}
ret = mdss_irq_domain_init(mdp5_mdss);
if (ret) {
- DRM_DEV_ERROR(dev->dev, "failed to init sub-block irqs: %d\n", ret);
- goto fail_irq;
+ DRM_DEV_ERROR(&pdev->dev, "failed to init sub-block irqs: %d\n", ret);
+ goto fail;
}
mdp5_mdss->base.funcs = &mdss_funcs;
priv->mdss = &mdp5_mdss->base;
- pm_runtime_enable(dev->dev);
+ pm_runtime_enable(&pdev->dev);
return 0;
-fail_irq:
- regulator_disable(mdp5_mdss->vdd);
fail:
return ret;
}
diff --git a/drivers/gpu/drm/msm/disp/msm_disp_snapshot.c b/drivers/gpu/drm/msm/disp/msm_disp_snapshot.c
index a4a7cb06bc87..e75b97127c0d 100644
--- a/drivers/gpu/drm/msm/disp/msm_disp_snapshot.c
+++ b/drivers/gpu/drm/msm/disp/msm_disp_snapshot.c
@@ -28,29 +28,42 @@ static ssize_t __maybe_unused disp_devcoredump_read(char *buffer, loff_t offset,
return count - iter.remain;
}
-static void _msm_disp_snapshot_work(struct kthread_work *work)
+struct msm_disp_state *
+msm_disp_snapshot_state_sync(struct msm_kms *kms)
{
- struct msm_kms *kms = container_of(work, struct msm_kms, dump_work);
struct drm_device *drm_dev = kms->dev;
struct msm_disp_state *disp_state;
- struct drm_printer p;
+
+ WARN_ON(!mutex_is_locked(&kms->dump_mutex));
disp_state = kzalloc(sizeof(struct msm_disp_state), GFP_KERNEL);
if (!disp_state)
- return;
+ return ERR_PTR(-ENOMEM);
disp_state->dev = drm_dev->dev;
disp_state->drm_dev = drm_dev;
INIT_LIST_HEAD(&disp_state->blocks);
- /* Serialize dumping here */
- mutex_lock(&kms->dump_mutex);
-
msm_disp_snapshot_capture_state(disp_state);
+ return disp_state;
+}
+
+static void _msm_disp_snapshot_work(struct kthread_work *work)
+{
+ struct msm_kms *kms = container_of(work, struct msm_kms, dump_work);
+ struct msm_disp_state *disp_state;
+ struct drm_printer p;
+
+ /* Serialize dumping here */
+ mutex_lock(&kms->dump_mutex);
+ disp_state = msm_disp_snapshot_state_sync(kms);
mutex_unlock(&kms->dump_mutex);
+ if (IS_ERR(disp_state))
+ return;
+
if (MSM_DISP_SNAPSHOT_DUMP_IN_CONSOLE) {
p = drm_info_printer(disp_state->drm_dev->dev);
msm_disp_state_print(disp_state, &p);
diff --git a/drivers/gpu/drm/msm/disp/msm_disp_snapshot.h b/drivers/gpu/drm/msm/disp/msm_disp_snapshot.h
index 4c619307612c..b5f452bd7ada 100644
--- a/drivers/gpu/drm/msm/disp/msm_disp_snapshot.h
+++ b/drivers/gpu/drm/msm/disp/msm_disp_snapshot.h
@@ -39,7 +39,7 @@
* @dev: device pointer
* @drm_dev: drm device pointer
* @atomic_state: atomic state duplicated at the time of the error
- * @timestamp: timestamp at which the coredump was captured
+ * @time: timestamp at which the coredump was captured
*/
struct msm_disp_state {
struct device *dev;
@@ -49,7 +49,7 @@ struct msm_disp_state {
struct drm_atomic_state *atomic_state;
- ktime_t timestamp;
+ struct timespec64 time;
};
/**
@@ -85,6 +85,16 @@ int msm_disp_snapshot_init(struct drm_device *drm_dev);
void msm_disp_snapshot_destroy(struct drm_device *drm_dev);
/**
+ * msm_disp_snapshot_state_sync - synchronously snapshot display state
+ * @kms: the kms object
+ *
+ * Returns state or error
+ *
+ * Must be called with &kms->dump_mutex held
+ */
+struct msm_disp_state *msm_disp_snapshot_state_sync(struct msm_kms *kms);
+
+/**
* msm_disp_snapshot_state - trigger to dump the display snapshot
* @drm_dev: handle to drm device
diff --git a/drivers/gpu/drm/msm/disp/msm_disp_snapshot_util.c b/drivers/gpu/drm/msm/disp/msm_disp_snapshot_util.c
index 2e1acb1bc390..5d2ff6791058 100644
--- a/drivers/gpu/drm/msm/disp/msm_disp_snapshot_util.c
+++ b/drivers/gpu/drm/msm/disp/msm_disp_snapshot_util.c
@@ -5,6 +5,8 @@
#define pr_fmt(fmt) "[drm:%s:%d] " fmt, __func__, __LINE__
+#include <generated/utsrelease.h>
+
#include "msm_disp_snapshot.h"
static void msm_disp_state_dump_regs(u32 **reg, u32 aligned_len, void __iomem *base_addr)
@@ -79,10 +81,11 @@ void msm_disp_state_print(struct msm_disp_state *state, struct drm_printer *p)
}
drm_printf(p, "---\n");
-
+ drm_printf(p, "kernel: " UTS_RELEASE "\n");
drm_printf(p, "module: " KBUILD_MODNAME "\n");
drm_printf(p, "dpu devcoredump\n");
- drm_printf(p, "timestamp %lld\n", ktime_to_ns(state->timestamp));
+ drm_printf(p, "time: %lld.%09ld\n",
+ state->time.tv_sec, state->time.tv_nsec);
list_for_each_entry_safe(block, tmp, &state->blocks, node) {
drm_printf(p, "====================%s================\n", block->name);
@@ -100,7 +103,7 @@ static void msm_disp_capture_atomic_state(struct msm_disp_state *disp_state)
struct drm_device *ddev;
struct drm_modeset_acquire_ctx ctx;
- disp_state->timestamp = ktime_get();
+ ktime_get_real_ts64(&disp_state->time);
ddev = disp_state->drm_dev;
diff --git a/drivers/gpu/drm/msm/dp/dp_aux.c b/drivers/gpu/drm/msm/dp/dp_aux.c
index eb40d8413bca..6d36f63c3338 100644
--- a/drivers/gpu/drm/msm/dp/dp_aux.c
+++ b/drivers/gpu/drm/msm/dp/dp_aux.c
@@ -33,6 +33,7 @@ struct dp_aux_private {
bool read;
bool no_send_addr;
bool no_send_stop;
+ bool initted;
u32 offset;
u32 segment;
@@ -331,6 +332,10 @@ static ssize_t dp_aux_transfer(struct drm_dp_aux *dp_aux,
}
mutex_lock(&aux->mutex);
+ if (!aux->initted) {
+ ret = -EIO;
+ goto exit;
+ }
dp_aux_update_offset_and_segment(aux, msg);
dp_aux_transfer_helper(aux, msg, true);
@@ -380,6 +385,8 @@ static ssize_t dp_aux_transfer(struct drm_dp_aux *dp_aux,
}
aux->cmd_busy = false;
+
+exit:
mutex_unlock(&aux->mutex);
return ret;
@@ -431,8 +438,13 @@ void dp_aux_init(struct drm_dp_aux *dp_aux)
aux = container_of(dp_aux, struct dp_aux_private, dp_aux);
+ mutex_lock(&aux->mutex);
+
dp_catalog_aux_enable(aux->catalog, true);
aux->retry_cnt = 0;
+ aux->initted = true;
+
+ mutex_unlock(&aux->mutex);
}
void dp_aux_deinit(struct drm_dp_aux *dp_aux)
@@ -441,7 +453,12 @@ void dp_aux_deinit(struct drm_dp_aux *dp_aux)
aux = container_of(dp_aux, struct dp_aux_private, dp_aux);
+ mutex_lock(&aux->mutex);
+
+ aux->initted = false;
dp_catalog_aux_enable(aux->catalog, false);
+
+ mutex_unlock(&aux->mutex);
}
int dp_aux_register(struct drm_dp_aux *dp_aux)
diff --git a/drivers/gpu/drm/msm/dp/dp_ctrl.c b/drivers/gpu/drm/msm/dp/dp_ctrl.c
index 62e75dc8afc6..c724cb0bde9d 100644
--- a/drivers/gpu/drm/msm/dp/dp_ctrl.c
+++ b/drivers/gpu/drm/msm/dp/dp_ctrl.c
@@ -119,13 +119,13 @@ void dp_ctrl_push_idle(struct dp_ctrl *dp_ctrl)
static void dp_ctrl_config_ctrl(struct dp_ctrl_private *ctrl)
{
u32 config = 0, tbd;
- u8 *dpcd = ctrl->panel->dpcd;
+ const u8 *dpcd = ctrl->panel->dpcd;
/* Default-> LSCLK DIV: 1/4 LCLK */
config |= (2 << DP_CONFIGURATION_CTRL_LSCLK_DIV_SHIFT);
/* Scrambler reset enable */
- if (dpcd[DP_EDP_CONFIGURATION_CAP] & DP_ALTERNATE_SCRAMBLER_RESET_CAP)
+ if (drm_dp_alternate_scrambler_reset_cap(dpcd))
config |= DP_CONFIGURATION_CTRL_ASSR;
tbd = dp_link_get_test_bits_depth(ctrl->link,
@@ -1228,7 +1228,10 @@ static int dp_ctrl_link_train(struct dp_ctrl_private *ctrl,
int *training_step)
{
int ret = 0;
+ const u8 *dpcd = ctrl->panel->dpcd;
u8 encoding = DP_SET_ANSI_8B10B;
+ u8 ssc;
+ u8 assr;
struct dp_link_info link_info = {0};
dp_ctrl_config_ctrl(ctrl);
@@ -1238,9 +1241,21 @@ static int dp_ctrl_link_train(struct dp_ctrl_private *ctrl,
link_info.capabilities = DP_LINK_CAP_ENHANCED_FRAMING;
dp_aux_link_configure(ctrl->aux, &link_info);
+
+ if (drm_dp_max_downspread(dpcd)) {
+ ssc = DP_SPREAD_AMP_0_5;
+ drm_dp_dpcd_write(ctrl->aux, DP_DOWNSPREAD_CTRL, &ssc, 1);
+ }
+
drm_dp_dpcd_write(ctrl->aux, DP_MAIN_LINK_CHANNEL_CODING_SET,
&encoding, 1);
+ if (drm_dp_alternate_scrambler_reset_cap(dpcd)) {
+ assr = DP_ALTERNATE_SCRAMBLER_RESET_ENABLE;
+ drm_dp_dpcd_write(ctrl->aux, DP_EDP_CONFIGURATION_SET,
+ &assr, 1);
+ }
+
ret = dp_ctrl_link_train_1(ctrl, training_step);
if (ret) {
DRM_ERROR("link training #1 failed. ret=%d\n", ret);
@@ -1312,9 +1327,11 @@ static int dp_ctrl_enable_mainlink_clocks(struct dp_ctrl_private *ctrl)
struct dp_io *dp_io = &ctrl->parser->io;
struct phy *phy = dp_io->phy;
struct phy_configure_opts_dp *opts_dp = &dp_io->phy_opts.dp;
+ const u8 *dpcd = ctrl->panel->dpcd;
opts_dp->lanes = ctrl->link->link_params.num_lanes;
opts_dp->link_rate = ctrl->link->link_params.rate / 100;
+ opts_dp->ssc = drm_dp_max_downspread(dpcd);
dp_ctrl_set_clock_rate(ctrl, DP_CTRL_PM, "ctrl_link",
ctrl->link->link_params.rate * 1000);
@@ -1406,7 +1423,7 @@ void dp_ctrl_host_deinit(struct dp_ctrl *dp_ctrl)
static bool dp_ctrl_use_fixed_nvid(struct dp_ctrl_private *ctrl)
{
- u8 *dpcd = ctrl->panel->dpcd;
+ const u8 *dpcd = ctrl->panel->dpcd;
/*
* For better interop experience, used a fixed NVID=0x8000
diff --git a/drivers/gpu/drm/msm/dp/dp_display.c b/drivers/gpu/drm/msm/dp/dp_display.c
index aba8aa47ed76..7cc4d21f2091 100644
--- a/drivers/gpu/drm/msm/dp/dp_display.c
+++ b/drivers/gpu/drm/msm/dp/dp_display.c
@@ -135,8 +135,18 @@ static const struct msm_dp_config sc7180_dp_cfg = {
.num_descs = 1,
};
+static const struct msm_dp_config sc7280_dp_cfg = {
+ .descs = (const struct msm_dp_desc[]) {
+ [MSM_DP_CONTROLLER_0] = { .io_start = 0x0ae90000, .connector_type = DRM_MODE_CONNECTOR_DisplayPort },
+ [MSM_DP_CONTROLLER_1] = { .io_start = 0x0aea0000, .connector_type = DRM_MODE_CONNECTOR_eDP },
+ },
+ .num_descs = 2,
+};
+
static const struct of_device_id dp_dt_match[] = {
{ .compatible = "qcom,sc7180-dp", .data = &sc7180_dp_cfg },
+ { .compatible = "qcom,sc7280-dp", .data = &sc7280_dp_cfg },
+ { .compatible = "qcom,sc7280-edp", .data = &sc7280_dp_cfg },
{}
};
@@ -224,13 +234,10 @@ static int dp_display_bind(struct device *dev, struct device *master,
{
int rc = 0;
struct dp_display_private *dp = dev_get_dp_display_private(dev);
- struct msm_drm_private *priv;
- struct drm_device *drm;
-
- drm = dev_get_drvdata(master);
+ struct msm_drm_private *priv = dev_get_drvdata(master);
+ struct drm_device *drm = priv->dev;
dp->dp_display.drm_dev = drm;
- priv = drm->dev_private;
priv->dp[dp->id] = &dp->dp_display;
rc = dp->parser->parse(dp->parser, dp->dp_display.connector_type);
@@ -266,8 +273,7 @@ static void dp_display_unbind(struct device *dev, struct device *master,
void *data)
{
struct dp_display_private *dp = dev_get_dp_display_private(dev);
- struct drm_device *drm = dev_get_drvdata(master);
- struct msm_drm_private *priv = drm->dev_private;
+ struct msm_drm_private *priv = dev_get_drvdata(master);
dp_power_client_deinit(dp->power);
dp_aux_unregister(dp->aux);
@@ -410,12 +416,11 @@ static int dp_display_usbpd_configure_cb(struct device *dev)
static int dp_display_usbpd_disconnect_cb(struct device *dev)
{
- int rc = 0;
struct dp_display_private *dp = dev_get_dp_display_private(dev);
dp_add_event(dp, EV_USER_NOTIFICATION, false, 0);
- return rc;
+ return 0;
}
static void dp_display_handle_video_request(struct dp_display_private *dp)
@@ -522,11 +527,8 @@ static int dp_hpd_plug_handle(struct dp_display_private *dp, u32 data)
dp->hpd_state = ST_CONNECT_PENDING;
- hpd->hpd_high = 1;
-
ret = dp_display_usbpd_configure_cb(&dp->pdev->dev);
if (ret) { /* link train failed */
- hpd->hpd_high = 0;
dp->hpd_state = ST_DISCONNECTED;
if (ret == -ECONNRESET) { /* cable unplugged */
@@ -603,7 +605,6 @@ static int dp_hpd_unplug_handle(struct dp_display_private *dp, u32 data)
/* triggered by irq_hdp with sink_count = 0 */
if (dp->link->sink_count == 0) {
dp_ctrl_off_phy(dp->ctrl);
- hpd->hpd_high = 0;
dp->core_initialized = false;
}
mutex_unlock(&dp->event_mutex);
@@ -627,8 +628,6 @@ static int dp_hpd_unplug_handle(struct dp_display_private *dp, u32 data)
/* disable HPD plug interrupts */
dp_catalog_hpd_config_intr(dp->catalog, DP_DP_HPD_PLUG_INT_MASK, false);
- hpd->hpd_high = 0;
-
/*
* We don't need separate work for disconnect as
* connect/attention interrupts are disabled
@@ -693,9 +692,15 @@ static int dp_irq_hpd_handle(struct dp_display_private *dp, u32 data)
return 0;
}
- ret = dp_display_usbpd_attention_cb(&dp->pdev->dev);
- if (ret == -ECONNRESET) { /* cable unplugged */
- dp->core_initialized = false;
+ /*
+ * dp core (ahb/aux clks) must be initialized before
+ * irq_hpd be handled
+ */
+ if (dp->core_initialized) {
+ ret = dp_display_usbpd_attention_cb(&dp->pdev->dev);
+ if (ret == -ECONNRESET) { /* cable unplugged */
+ dp->core_initialized = false;
+ }
}
DRM_DEBUG_DP("hpd_state=%d\n", state);
@@ -707,9 +712,9 @@ static int dp_irq_hpd_handle(struct dp_display_private *dp, u32 data)
static void dp_display_deinit_sub_modules(struct dp_display_private *dp)
{
dp_debug_put(dp->debug);
+ dp_audio_put(dp->audio);
dp_panel_put(dp->panel);
dp_aux_put(dp->aux);
- dp_audio_put(dp->audio);
}
static int dp_init_sub_modules(struct dp_display_private *dp)
@@ -1481,6 +1486,18 @@ int msm_dp_modeset_init(struct msm_dp *dp_display, struct drm_device *dev,
}
priv->connectors[priv->num_connectors++] = dp_display->connector;
+
+ dp_display->bridge = msm_dp_bridge_init(dp_display, dev, encoder);
+ if (IS_ERR(dp_display->bridge)) {
+ ret = PTR_ERR(dp_display->bridge);
+ DRM_DEV_ERROR(dev->dev,
+ "failed to create dp bridge: %d\n", ret);
+ dp_display->bridge = NULL;
+ return ret;
+ }
+
+ priv->bridges[priv->num_bridges++] = dp_display->bridge;
+
return 0;
}
@@ -1584,8 +1601,8 @@ int msm_dp_display_disable(struct msm_dp *dp, struct drm_encoder *encoder)
}
void msm_dp_display_mode_set(struct msm_dp *dp, struct drm_encoder *encoder,
- struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
+ const struct drm_display_mode *mode,
+ const struct drm_display_mode *adjusted_mode)
{
struct dp_display_private *dp_display;
diff --git a/drivers/gpu/drm/msm/dp/dp_display.h b/drivers/gpu/drm/msm/dp/dp_display.h
index 8e80e3bac394..e3adcd578a90 100644
--- a/drivers/gpu/drm/msm/dp/dp_display.h
+++ b/drivers/gpu/drm/msm/dp/dp_display.h
@@ -13,6 +13,7 @@
struct msm_dp {
struct drm_device *drm_dev;
struct device *codec_dev;
+ struct drm_bridge *bridge;
struct drm_connector *connector;
struct drm_encoder *encoder;
struct drm_bridge *panel_bridge;
diff --git a/drivers/gpu/drm/msm/dp/dp_drm.c b/drivers/gpu/drm/msm/dp/dp_drm.c
index 76856c4ee1d6..d4d360d19eba 100644
--- a/drivers/gpu/drm/msm/dp/dp_drm.c
+++ b/drivers/gpu/drm/msm/dp/dp_drm.c
@@ -12,6 +12,14 @@
#include "msm_kms.h"
#include "dp_drm.h"
+
+struct msm_dp_bridge {
+ struct drm_bridge bridge;
+ struct msm_dp *dp_display;
+};
+
+#define to_dp_display(x) container_of((x), struct msm_dp_bridge, bridge)
+
struct dp_connector {
struct drm_connector base;
struct msm_dp *dp_display;
@@ -173,3 +181,70 @@ struct drm_connector *dp_drm_connector_init(struct msm_dp *dp_display)
return connector;
}
+
+static void dp_bridge_mode_set(struct drm_bridge *drm_bridge,
+ const struct drm_display_mode *mode,
+ const struct drm_display_mode *adjusted_mode)
+{
+ struct msm_dp_bridge *dp_bridge = to_dp_display(drm_bridge);
+ struct msm_dp *dp_display = dp_bridge->dp_display;
+
+ msm_dp_display_mode_set(dp_display, drm_bridge->encoder, mode, adjusted_mode);
+}
+
+static void dp_bridge_enable(struct drm_bridge *drm_bridge)
+{
+ struct msm_dp_bridge *dp_bridge = to_dp_display(drm_bridge);
+ struct msm_dp *dp_display = dp_bridge->dp_display;
+
+ msm_dp_display_enable(dp_display, drm_bridge->encoder);
+}
+
+static void dp_bridge_disable(struct drm_bridge *drm_bridge)
+{
+ struct msm_dp_bridge *dp_bridge = to_dp_display(drm_bridge);
+ struct msm_dp *dp_display = dp_bridge->dp_display;
+
+ msm_dp_display_pre_disable(dp_display, drm_bridge->encoder);
+}
+
+static void dp_bridge_post_disable(struct drm_bridge *drm_bridge)
+{
+ struct msm_dp_bridge *dp_bridge = to_dp_display(drm_bridge);
+ struct msm_dp *dp_display = dp_bridge->dp_display;
+
+ msm_dp_display_disable(dp_display, drm_bridge->encoder);
+}
+
+static const struct drm_bridge_funcs dp_bridge_ops = {
+ .enable = dp_bridge_enable,
+ .disable = dp_bridge_disable,
+ .post_disable = dp_bridge_post_disable,
+ .mode_set = dp_bridge_mode_set,
+};
+
+struct drm_bridge *msm_dp_bridge_init(struct msm_dp *dp_display, struct drm_device *dev,
+ struct drm_encoder *encoder)
+{
+ int rc;
+ struct msm_dp_bridge *dp_bridge;
+ struct drm_bridge *bridge;
+
+ dp_bridge = devm_kzalloc(dev->dev, sizeof(*dp_bridge), GFP_KERNEL);
+ if (!dp_bridge)
+ return ERR_PTR(-ENOMEM);
+
+ dp_bridge->dp_display = dp_display;
+
+ bridge = &dp_bridge->bridge;
+ bridge->funcs = &dp_bridge_ops;
+ bridge->encoder = encoder;
+
+ rc = drm_bridge_attach(encoder, bridge, NULL, DRM_BRIDGE_ATTACH_NO_CONNECTOR);
+ if (rc) {
+ DRM_ERROR("failed to attach bridge, rc=%d\n", rc);
+ return ERR_PTR(rc);
+ }
+
+ return bridge;
+}
diff --git a/drivers/gpu/drm/msm/dp/dp_hpd.c b/drivers/gpu/drm/msm/dp/dp_hpd.c
index e1c90fa47411..db98a1d431eb 100644
--- a/drivers/gpu/drm/msm/dp/dp_hpd.c
+++ b/drivers/gpu/drm/msm/dp/dp_hpd.c
@@ -32,8 +32,6 @@ int dp_hpd_connect(struct dp_usbpd *dp_usbpd, bool hpd)
hpd_priv = container_of(dp_usbpd, struct dp_hpd_private,
dp_usbpd);
- dp_usbpd->hpd_high = hpd;
-
if (!hpd_priv->dp_cb || !hpd_priv->dp_cb->configure
|| !hpd_priv->dp_cb->disconnect) {
pr_err("hpd dp_cb not initialized\n");
diff --git a/drivers/gpu/drm/msm/dp/dp_hpd.h b/drivers/gpu/drm/msm/dp/dp_hpd.h
index 5bc5bb64680f..8feec5aa5027 100644
--- a/drivers/gpu/drm/msm/dp/dp_hpd.h
+++ b/drivers/gpu/drm/msm/dp/dp_hpd.h
@@ -26,7 +26,6 @@ enum plug_orientation {
* @multi_func: multi-function preferred
* @usb_config_req: request to switch to usb
* @exit_dp_mode: request exit from displayport mode
- * @hpd_high: Hot Plug Detect signal is high.
* @hpd_irq: Change in the status since last message
* @alt_mode_cfg_done: bool to specify alt mode status
* @debug_en: bool to specify debug mode
@@ -39,7 +38,6 @@ struct dp_usbpd {
bool multi_func;
bool usb_config_req;
bool exit_dp_mode;
- bool hpd_high;
bool hpd_irq;
bool alt_mode_cfg_done;
bool debug_en;
diff --git a/drivers/gpu/drm/msm/dp/dp_link.c b/drivers/gpu/drm/msm/dp/dp_link.c
index a5bdfc5029de..d4d31e5bda07 100644
--- a/drivers/gpu/drm/msm/dp/dp_link.c
+++ b/drivers/gpu/drm/msm/dp/dp_link.c
@@ -737,18 +737,25 @@ static int dp_link_parse_sink_count(struct dp_link *dp_link)
return 0;
}
-static void dp_link_parse_sink_status_field(struct dp_link_private *link)
+static int dp_link_parse_sink_status_field(struct dp_link_private *link)
{
int len = 0;
link->prev_sink_count = link->dp_link.sink_count;
- dp_link_parse_sink_count(&link->dp_link);
+ len = dp_link_parse_sink_count(&link->dp_link);
+ if (len < 0) {
+ DRM_ERROR("DP parse sink count failed\n");
+ return len;
+ }
len = drm_dp_dpcd_read_link_status(link->aux,
link->link_status);
- if (len < DP_LINK_STATUS_SIZE)
+ if (len < DP_LINK_STATUS_SIZE) {
DRM_ERROR("DP link status read failed\n");
- dp_link_parse_request(link);
+ return len;
+ }
+
+ return dp_link_parse_request(link);
}
/**
@@ -1023,7 +1030,9 @@ int dp_link_process_request(struct dp_link *dp_link)
dp_link_reset_data(link);
- dp_link_parse_sink_status_field(link);
+ ret = dp_link_parse_sink_status_field(link);
+ if (ret)
+ return ret;
if (link->request.test_requested == DP_TEST_LINK_EDID_READ) {
dp_link->sink_request |= DP_TEST_LINK_EDID_READ;
diff --git a/drivers/gpu/drm/msm/dsi/dsi.c b/drivers/gpu/drm/msm/dsi/dsi.c
index 5cd230a5d5d3..052548883d27 100644
--- a/drivers/gpu/drm/msm/dsi/dsi.c
+++ b/drivers/gpu/drm/msm/dsi/dsi.c
@@ -110,8 +110,7 @@ destroy_dsi:
static int dsi_bind(struct device *dev, struct device *master, void *data)
{
- struct drm_device *drm = dev_get_drvdata(master);
- struct msm_drm_private *priv = drm->dev_private;
+ struct msm_drm_private *priv = dev_get_drvdata(master);
struct msm_dsi *msm_dsi = dev_get_drvdata(dev);
priv->dsi[msm_dsi->id] = msm_dsi;
@@ -122,8 +121,7 @@ static int dsi_bind(struct device *dev, struct device *master, void *data)
static void dsi_unbind(struct device *dev, struct device *master,
void *data)
{
- struct drm_device *drm = dev_get_drvdata(master);
- struct msm_drm_private *priv = drm->dev_private;
+ struct msm_drm_private *priv = dev_get_drvdata(master);
struct msm_dsi *msm_dsi = dev_get_drvdata(dev);
priv->dsi[msm_dsi->id] = NULL;
@@ -225,9 +223,13 @@ int msm_dsi_modeset_init(struct msm_dsi *msm_dsi, struct drm_device *dev,
goto fail;
}
- if (!msm_dsi_manager_validate_current_config(msm_dsi->id)) {
- ret = -EINVAL;
- goto fail;
+ if (msm_dsi_is_bonded_dsi(msm_dsi) &&
+ !msm_dsi_is_master_dsi(msm_dsi)) {
+ /*
+ * Do not return an eror here,
+ * Just skip creating encoder/connector for the slave-DSI.
+ */
+ return 0;
}
msm_dsi->encoder = encoder;
diff --git a/drivers/gpu/drm/msm/dsi/dsi.h b/drivers/gpu/drm/msm/dsi/dsi.h
index bb39e7ca802d..c8dedc95428c 100644
--- a/drivers/gpu/drm/msm/dsi/dsi.h
+++ b/drivers/gpu/drm/msm/dsi/dsi.h
@@ -82,7 +82,6 @@ int msm_dsi_manager_cmd_xfer(int id, const struct mipi_dsi_msg *msg);
bool msm_dsi_manager_cmd_xfer_trigger(int id, u32 dma_base, u32 len);
int msm_dsi_manager_register(struct msm_dsi *msm_dsi);
void msm_dsi_manager_unregister(struct msm_dsi *msm_dsi);
-bool msm_dsi_manager_validate_current_config(u8 id);
void msm_dsi_manager_tpg_enable(void);
/* msm dsi */
@@ -120,6 +119,8 @@ unsigned long msm_dsi_host_get_mode_flags(struct mipi_dsi_host *host);
struct drm_bridge *msm_dsi_host_get_bridge(struct mipi_dsi_host *host);
int msm_dsi_host_register(struct mipi_dsi_host *host);
void msm_dsi_host_unregister(struct mipi_dsi_host *host);
+void msm_dsi_host_set_phy_mode(struct mipi_dsi_host *host,
+ struct msm_dsi_phy *src_phy);
int msm_dsi_host_set_src_pll(struct mipi_dsi_host *host,
struct msm_dsi_phy *src_phy);
void msm_dsi_host_reset_phy(struct mipi_dsi_host *host);
@@ -173,8 +174,6 @@ int msm_dsi_phy_enable(struct msm_dsi_phy *phy,
void msm_dsi_phy_disable(struct msm_dsi_phy *phy);
void msm_dsi_phy_set_usecase(struct msm_dsi_phy *phy,
enum msm_dsi_phy_usecase uc);
-int msm_dsi_phy_get_clk_provider(struct msm_dsi_phy *phy,
- struct clk **byte_clk_provider, struct clk **pixel_clk_provider);
void msm_dsi_phy_pll_save_state(struct msm_dsi_phy *phy);
int msm_dsi_phy_pll_restore_state(struct msm_dsi_phy *phy);
void msm_dsi_phy_snapshot(struct msm_disp_state *disp_state, struct msm_dsi_phy *phy);
diff --git a/drivers/gpu/drm/msm/dsi/dsi_host.c b/drivers/gpu/drm/msm/dsi/dsi_host.c
index 4c7b6944fc0d..6b3ced4aaaf5 100644
--- a/drivers/gpu/drm/msm/dsi/dsi_host.c
+++ b/drivers/gpu/drm/msm/dsi/dsi_host.c
@@ -1664,6 +1664,8 @@ static int dsi_host_parse_lane_data(struct msm_dsi_host *msm_host,
if (!prop) {
DRM_DEV_DEBUG(dev,
"failed to find data lane mapping, using default\n");
+ /* Set the number of date lanes to 4 by default. */
+ msm_host->num_data_lanes = 4;
return 0;
}
@@ -2018,7 +2020,7 @@ void msm_dsi_host_xfer_restore(struct mipi_dsi_host *host,
/* TODO: unvote for bus bandwidth */
cfg_hnd->ops->link_clk_disable(msm_host);
- pm_runtime_put_autosuspend(&msm_host->pdev->dev);
+ pm_runtime_put(&msm_host->pdev->dev);
}
int msm_dsi_host_cmd_tx(struct mipi_dsi_host *host,
@@ -2177,57 +2179,12 @@ void msm_dsi_host_cmd_xfer_commit(struct mipi_dsi_host *host, u32 dma_base,
wmb();
}
-int msm_dsi_host_set_src_pll(struct mipi_dsi_host *host,
+void msm_dsi_host_set_phy_mode(struct mipi_dsi_host *host,
struct msm_dsi_phy *src_phy)
{
struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
- struct clk *byte_clk_provider, *pixel_clk_provider;
- int ret;
msm_host->cphy_mode = src_phy->cphy_mode;
-
- ret = msm_dsi_phy_get_clk_provider(src_phy,
- &byte_clk_provider, &pixel_clk_provider);
- if (ret) {
- pr_info("%s: can't get provider from pll, don't set parent\n",
- __func__);
- return 0;
- }
-
- ret = clk_set_parent(msm_host->byte_clk_src, byte_clk_provider);
- if (ret) {
- pr_err("%s: can't set parent to byte_clk_src. ret=%d\n",
- __func__, ret);
- goto exit;
- }
-
- ret = clk_set_parent(msm_host->pixel_clk_src, pixel_clk_provider);
- if (ret) {
- pr_err("%s: can't set parent to pixel_clk_src. ret=%d\n",
- __func__, ret);
- goto exit;
- }
-
- if (msm_host->dsi_clk_src) {
- ret = clk_set_parent(msm_host->dsi_clk_src, pixel_clk_provider);
- if (ret) {
- pr_err("%s: can't set parent to dsi_clk_src. ret=%d\n",
- __func__, ret);
- goto exit;
- }
- }
-
- if (msm_host->esc_clk_src) {
- ret = clk_set_parent(msm_host->esc_clk_src, byte_clk_provider);
- if (ret) {
- pr_err("%s: can't set parent to esc_clk_src. ret=%d\n",
- __func__, ret);
- goto exit;
- }
- }
-
-exit:
- return ret;
}
void msm_dsi_host_reset_phy(struct mipi_dsi_host *host)
@@ -2295,7 +2252,7 @@ int msm_dsi_host_enable(struct mipi_dsi_host *host)
*/
/* if (msm_panel->mode == MSM_DSI_CMD_MODE) {
* dsi_link_clk_disable(msm_host);
- * pm_runtime_put_autosuspend(&msm_host->pdev->dev);
+ * pm_runtime_put(&msm_host->pdev->dev);
* }
*/
msm_host->enabled = true;
@@ -2387,7 +2344,7 @@ int msm_dsi_host_power_on(struct mipi_dsi_host *host,
fail_disable_clk:
cfg_hnd->ops->link_clk_disable(msm_host);
- pm_runtime_put_autosuspend(&msm_host->pdev->dev);
+ pm_runtime_put(&msm_host->pdev->dev);
fail_disable_reg:
dsi_host_regulator_disable(msm_host);
unlock_ret:
@@ -2414,7 +2371,7 @@ int msm_dsi_host_power_off(struct mipi_dsi_host *host)
pinctrl_pm_select_sleep_state(&msm_host->pdev->dev);
cfg_hnd->ops->link_clk_disable(msm_host);
- pm_runtime_put_autosuspend(&msm_host->pdev->dev);
+ pm_runtime_put(&msm_host->pdev->dev);
dsi_host_regulator_disable(msm_host);
diff --git a/drivers/gpu/drm/msm/dsi/dsi_manager.c b/drivers/gpu/drm/msm/dsi/dsi_manager.c
index 01bf8d907933..f19bae475c96 100644
--- a/drivers/gpu/drm/msm/dsi/dsi_manager.c
+++ b/drivers/gpu/drm/msm/dsi/dsi_manager.c
@@ -79,10 +79,8 @@ static int dsi_mgr_setup_components(int id)
return ret;
msm_dsi_phy_set_usecase(msm_dsi->phy, MSM_DSI_PHY_STANDALONE);
- ret = msm_dsi_host_set_src_pll(msm_dsi->host, msm_dsi->phy);
- } else if (!other_dsi) {
- ret = 0;
- } else {
+ msm_dsi_host_set_phy_mode(msm_dsi->host, msm_dsi->phy);
+ } else if (other_dsi) {
struct msm_dsi *master_link_dsi = IS_MASTER_DSI_LINK(id) ?
msm_dsi : other_dsi;
struct msm_dsi *slave_link_dsi = IS_MASTER_DSI_LINK(id) ?
@@ -106,13 +104,11 @@ static int dsi_mgr_setup_components(int id)
MSM_DSI_PHY_MASTER);
msm_dsi_phy_set_usecase(clk_slave_dsi->phy,
MSM_DSI_PHY_SLAVE);
- ret = msm_dsi_host_set_src_pll(msm_dsi->host, clk_master_dsi->phy);
- if (ret)
- return ret;
- ret = msm_dsi_host_set_src_pll(other_dsi->host, clk_master_dsi->phy);
+ msm_dsi_host_set_phy_mode(msm_dsi->host, msm_dsi->phy);
+ msm_dsi_host_set_phy_mode(other_dsi->host, other_dsi->phy);
}
- return ret;
+ return 0;
}
static int enable_phy(struct msm_dsi *msm_dsi,
@@ -649,23 +645,6 @@ fail:
return ERR_PTR(ret);
}
-bool msm_dsi_manager_validate_current_config(u8 id)
-{
- bool is_bonded_dsi = IS_BONDED_DSI();
-
- /*
- * For bonded DSI, we only have one drm panel. For this
- * use case, we register only one bridge/connector.
- * Skip bridge/connector initialisation if it is
- * slave-DSI for bonded DSI configuration.
- */
- if (is_bonded_dsi && !IS_MASTER_DSI_LINK(id)) {
- DBG("Skip bridge registration for slave DSI->id: %d\n", id);
- return false;
- }
- return true;
-}
-
/* initialize bridge */
struct drm_bridge *msm_dsi_manager_bridge_init(u8 id)
{
diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c
index 9842e04b5858..c2ed177717c7 100644
--- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c
+++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c
@@ -602,7 +602,7 @@ static int dsi_phy_enable_resource(struct msm_dsi_phy *phy)
static void dsi_phy_disable_resource(struct msm_dsi_phy *phy)
{
clk_disable_unprepare(phy->ahb_clk);
- pm_runtime_put_autosuspend(&phy->pdev->dev);
+ pm_runtime_put(&phy->pdev->dev);
}
static const struct of_device_id dsi_phy_dt_match[] = {
@@ -892,17 +892,6 @@ bool msm_dsi_phy_set_continuous_clock(struct msm_dsi_phy *phy, bool enable)
return phy->cfg->ops.set_continuous_clock(phy, enable);
}
-int msm_dsi_phy_get_clk_provider(struct msm_dsi_phy *phy,
- struct clk **byte_clk_provider, struct clk **pixel_clk_provider)
-{
- if (byte_clk_provider)
- *byte_clk_provider = phy->provided_clocks->hws[DSI_BYTE_PLL_CLK]->clk;
- if (pixel_clk_provider)
- *pixel_clk_provider = phy->provided_clocks->hws[DSI_PIXEL_PLL_CLK]->clk;
-
- return 0;
-}
-
void msm_dsi_phy_pll_save_state(struct msm_dsi_phy *phy)
{
if (phy->cfg->ops.save_pll_state) {
diff --git a/drivers/gpu/drm/msm/edp/edp.c b/drivers/gpu/drm/msm/edp/edp.c
deleted file mode 100644
index 106a67473af5..000000000000
--- a/drivers/gpu/drm/msm/edp/edp.c
+++ /dev/null
@@ -1,198 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (c) 2014-2015, The Linux Foundation. All rights reserved.
- */
-
-#include <linux/of_irq.h>
-#include "edp.h"
-
-static irqreturn_t edp_irq(int irq, void *dev_id)
-{
- struct msm_edp *edp = dev_id;
-
- /* Process eDP irq */
- return msm_edp_ctrl_irq(edp->ctrl);
-}
-
-static void edp_destroy(struct platform_device *pdev)
-{
- struct msm_edp *edp = platform_get_drvdata(pdev);
-
- if (!edp)
- return;
-
- if (edp->ctrl) {
- msm_edp_ctrl_destroy(edp->ctrl);
- edp->ctrl = NULL;
- }
-
- platform_set_drvdata(pdev, NULL);
-}
-
-/* construct eDP at bind/probe time, grab all the resources. */
-static struct msm_edp *edp_init(struct platform_device *pdev)
-{
- struct msm_edp *edp = NULL;
- int ret;
-
- if (!pdev) {
- pr_err("no eDP device\n");
- ret = -ENXIO;
- goto fail;
- }
-
- edp = devm_kzalloc(&pdev->dev, sizeof(*edp), GFP_KERNEL);
- if (!edp) {
- ret = -ENOMEM;
- goto fail;
- }
- DBG("eDP probed=%p", edp);
-
- edp->pdev = pdev;
- platform_set_drvdata(pdev, edp);
-
- ret = msm_edp_ctrl_init(edp);
- if (ret)
- goto fail;
-
- return edp;
-
-fail:
- if (edp)
- edp_destroy(pdev);
-
- return ERR_PTR(ret);
-}
-
-static int edp_bind(struct device *dev, struct device *master, void *data)
-{
- struct drm_device *drm = dev_get_drvdata(master);
- struct msm_drm_private *priv = drm->dev_private;
- struct msm_edp *edp;
-
- DBG("");
- edp = edp_init(to_platform_device(dev));
- if (IS_ERR(edp))
- return PTR_ERR(edp);
- priv->edp = edp;
-
- return 0;
-}
-
-static void edp_unbind(struct device *dev, struct device *master, void *data)
-{
- struct drm_device *drm = dev_get_drvdata(master);
- struct msm_drm_private *priv = drm->dev_private;
-
- DBG("");
- if (priv->edp) {
- edp_destroy(to_platform_device(dev));
- priv->edp = NULL;
- }
-}
-
-static const struct component_ops edp_ops = {
- .bind = edp_bind,
- .unbind = edp_unbind,
-};
-
-static int edp_dev_probe(struct platform_device *pdev)
-{
- DBG("");
- return component_add(&pdev->dev, &edp_ops);
-}
-
-static int edp_dev_remove(struct platform_device *pdev)
-{
- DBG("");
- component_del(&pdev->dev, &edp_ops);
- return 0;
-}
-
-static const struct of_device_id dt_match[] = {
- { .compatible = "qcom,mdss-edp" },
- {}
-};
-
-static struct platform_driver edp_driver = {
- .probe = edp_dev_probe,
- .remove = edp_dev_remove,
- .driver = {
- .name = "msm_edp",
- .of_match_table = dt_match,
- },
-};
-
-void __init msm_edp_register(void)
-{
- DBG("");
- platform_driver_register(&edp_driver);
-}
-
-void __exit msm_edp_unregister(void)
-{
- DBG("");
- platform_driver_unregister(&edp_driver);
-}
-
-/* Second part of initialization, the drm/kms level modeset_init */
-int msm_edp_modeset_init(struct msm_edp *edp, struct drm_device *dev,
- struct drm_encoder *encoder)
-{
- struct platform_device *pdev = edp->pdev;
- struct msm_drm_private *priv = dev->dev_private;
- int ret;
-
- edp->encoder = encoder;
- edp->dev = dev;
-
- edp->bridge = msm_edp_bridge_init(edp);
- if (IS_ERR(edp->bridge)) {
- ret = PTR_ERR(edp->bridge);
- DRM_DEV_ERROR(dev->dev, "failed to create eDP bridge: %d\n", ret);
- edp->bridge = NULL;
- goto fail;
- }
-
- edp->connector = msm_edp_connector_init(edp);
- if (IS_ERR(edp->connector)) {
- ret = PTR_ERR(edp->connector);
- DRM_DEV_ERROR(dev->dev, "failed to create eDP connector: %d\n", ret);
- edp->connector = NULL;
- goto fail;
- }
-
- edp->irq = irq_of_parse_and_map(pdev->dev.of_node, 0);
- if (edp->irq < 0) {
- ret = edp->irq;
- DRM_DEV_ERROR(dev->dev, "failed to get IRQ: %d\n", ret);
- goto fail;
- }
-
- ret = devm_request_irq(&pdev->dev, edp->irq,
- edp_irq, IRQF_TRIGGER_HIGH | IRQF_ONESHOT,
- "edp_isr", edp);
- if (ret < 0) {
- DRM_DEV_ERROR(dev->dev, "failed to request IRQ%u: %d\n",
- edp->irq, ret);
- goto fail;
- }
-
- priv->bridges[priv->num_bridges++] = edp->bridge;
- priv->connectors[priv->num_connectors++] = edp->connector;
-
- return 0;
-
-fail:
- /* bridge/connector are normally destroyed by drm */
- if (edp->bridge) {
- edp_bridge_destroy(edp->bridge);
- edp->bridge = NULL;
- }
- if (edp->connector) {
- edp->connector->funcs->destroy(edp->connector);
- edp->connector = NULL;
- }
-
- return ret;
-}
diff --git a/drivers/gpu/drm/msm/edp/edp.h b/drivers/gpu/drm/msm/edp/edp.h
deleted file mode 100644
index 8590f2ce274d..000000000000
--- a/drivers/gpu/drm/msm/edp/edp.h
+++ /dev/null
@@ -1,77 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (c) 2014-2015, The Linux Foundation. All rights reserved.
- */
-
-#ifndef __EDP_CONNECTOR_H__
-#define __EDP_CONNECTOR_H__
-
-#include <linux/i2c.h>
-#include <linux/interrupt.h>
-#include <linux/kernel.h>
-#include <linux/platform_device.h>
-#include <drm/drm_bridge.h>
-#include <drm/drm_crtc.h>
-#include <drm/drm_dp_helper.h>
-
-#include "msm_drv.h"
-
-#define edp_read(offset) msm_readl((offset))
-#define edp_write(offset, data) msm_writel((data), (offset))
-
-struct edp_ctrl;
-struct edp_aux;
-struct edp_phy;
-
-struct msm_edp {
- struct drm_device *dev;
- struct platform_device *pdev;
-
- struct drm_connector *connector;
- struct drm_bridge *bridge;
-
- /* the encoder we are hooked to (outside of eDP block) */
- struct drm_encoder *encoder;
-
- struct edp_ctrl *ctrl;
-
- int irq;
-};
-
-/* eDP bridge */
-struct drm_bridge *msm_edp_bridge_init(struct msm_edp *edp);
-void edp_bridge_destroy(struct drm_bridge *bridge);
-
-/* eDP connector */
-struct drm_connector *msm_edp_connector_init(struct msm_edp *edp);
-
-/* AUX */
-void *msm_edp_aux_init(struct msm_edp *edp, void __iomem *regbase, struct drm_dp_aux **drm_aux);
-void msm_edp_aux_destroy(struct device *dev, struct edp_aux *aux);
-irqreturn_t msm_edp_aux_irq(struct edp_aux *aux, u32 isr);
-void msm_edp_aux_ctrl(struct edp_aux *aux, int enable);
-
-/* Phy */
-bool msm_edp_phy_ready(struct edp_phy *phy);
-void msm_edp_phy_ctrl(struct edp_phy *phy, int enable);
-void msm_edp_phy_vm_pe_init(struct edp_phy *phy);
-void msm_edp_phy_vm_pe_cfg(struct edp_phy *phy, u32 v0, u32 v1);
-void msm_edp_phy_lane_power_ctrl(struct edp_phy *phy, bool up, u32 max_lane);
-void *msm_edp_phy_init(struct device *dev, void __iomem *regbase);
-
-/* Ctrl */
-irqreturn_t msm_edp_ctrl_irq(struct edp_ctrl *ctrl);
-void msm_edp_ctrl_power(struct edp_ctrl *ctrl, bool on);
-int msm_edp_ctrl_init(struct msm_edp *edp);
-void msm_edp_ctrl_destroy(struct edp_ctrl *ctrl);
-bool msm_edp_ctrl_panel_connected(struct edp_ctrl *ctrl);
-int msm_edp_ctrl_get_panel_info(struct edp_ctrl *ctrl,
- struct drm_connector *connector, struct edid **edid);
-int msm_edp_ctrl_timing_cfg(struct edp_ctrl *ctrl,
- const struct drm_display_mode *mode,
- const struct drm_display_info *info);
-/* @pixel_rate is in kHz */
-bool msm_edp_ctrl_pixel_clock_valid(struct edp_ctrl *ctrl,
- u32 pixel_rate, u32 *pm, u32 *pn);
-
-#endif /* __EDP_CONNECTOR_H__ */
diff --git a/drivers/gpu/drm/msm/edp/edp.xml.h b/drivers/gpu/drm/msm/edp/edp.xml.h
deleted file mode 100644
index 7907e0f5988f..000000000000
--- a/drivers/gpu/drm/msm/edp/edp.xml.h
+++ /dev/null
@@ -1,388 +0,0 @@
-#ifndef EDP_XML
-#define EDP_XML
-
-/* Autogenerated file, DO NOT EDIT manually!
-
-This file was generated by the rules-ng-ng headergen tool in this git repository:
-http://github.com/freedreno/envytools/
-git clone https://github.com/freedreno/envytools.git
-
-The rules-ng-ng source files this header was generated from are:
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/msm.xml ( 981 bytes, from 2021-06-05 21:37:42)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/freedreno_copyright.xml ( 1572 bytes, from 2021-02-18 16:45:44)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/mdp/mdp4.xml ( 20912 bytes, from 2021-02-18 16:45:44)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/mdp/mdp_common.xml ( 2849 bytes, from 2021-02-18 16:45:44)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/mdp/mdp5.xml ( 37461 bytes, from 2021-02-18 16:45:44)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi.xml ( 15291 bytes, from 2021-06-15 22:36:13)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_v2.xml ( 3236 bytes, from 2021-06-05 21:37:42)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_28nm_8960.xml ( 4935 bytes, from 2021-05-21 19:18:08)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_28nm.xml ( 7004 bytes, from 2021-05-21 19:18:08)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_20nm.xml ( 3712 bytes, from 2021-05-21 19:18:08)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_14nm.xml ( 5381 bytes, from 2021-05-21 19:18:08)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_10nm.xml ( 4499 bytes, from 2021-05-21 19:18:08)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_7nm.xml ( 10953 bytes, from 2021-05-21 19:18:08)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_5nm.xml ( 10900 bytes, from 2021-05-21 19:18:08)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/sfpb.xml ( 602 bytes, from 2021-02-18 16:45:44)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/mmss_cc.xml ( 1686 bytes, from 2021-02-18 16:45:44)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/hdmi/qfprom.xml ( 600 bytes, from 2021-02-18 16:45:44)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/hdmi/hdmi.xml ( 41874 bytes, from 2021-02-18 16:45:44)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/edp/edp.xml ( 10416 bytes, from 2021-02-18 16:45:44)
-
-Copyright (C) 2013-2021 by the following authors:
-- Rob Clark <robdclark@gmail.com> (robclark)
-- Ilia Mirkin <imirkin@alum.mit.edu> (imirkin)
-
-Permission is hereby granted, free of charge, to any person obtaining
-a copy of this software and associated documentation files (the
-"Software"), to deal in the Software without restriction, including
-without limitation the rights to use, copy, modify, merge, publish,
-distribute, sublicense, and/or sell copies of the Software, and to
-permit persons to whom the Software is furnished to do so, subject to
-the following conditions:
-
-The above copyright notice and this permission notice (including the
-next paragraph) shall be included in all copies or substantial
-portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
-EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
-MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
-IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
-LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
-OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
-WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
-*/
-
-
-enum edp_color_depth {
- EDP_6BIT = 0,
- EDP_8BIT = 1,
- EDP_10BIT = 2,
- EDP_12BIT = 3,
- EDP_16BIT = 4,
-};
-
-enum edp_component_format {
- EDP_RGB = 0,
- EDP_YUV422 = 1,
- EDP_YUV444 = 2,
-};
-
-#define REG_EDP_MAINLINK_CTRL 0x00000004
-#define EDP_MAINLINK_CTRL_ENABLE 0x00000001
-#define EDP_MAINLINK_CTRL_RESET 0x00000002
-
-#define REG_EDP_STATE_CTRL 0x00000008
-#define EDP_STATE_CTRL_TRAIN_PATTERN_1 0x00000001
-#define EDP_STATE_CTRL_TRAIN_PATTERN_2 0x00000002
-#define EDP_STATE_CTRL_TRAIN_PATTERN_3 0x00000004
-#define EDP_STATE_CTRL_SYMBOL_ERR_RATE_MEAS 0x00000008
-#define EDP_STATE_CTRL_PRBS7 0x00000010
-#define EDP_STATE_CTRL_CUSTOM_80_BIT_PATTERN 0x00000020
-#define EDP_STATE_CTRL_SEND_VIDEO 0x00000040
-#define EDP_STATE_CTRL_PUSH_IDLE 0x00000080
-
-#define REG_EDP_CONFIGURATION_CTRL 0x0000000c
-#define EDP_CONFIGURATION_CTRL_SYNC_CLK 0x00000001
-#define EDP_CONFIGURATION_CTRL_STATIC_MVID 0x00000002
-#define EDP_CONFIGURATION_CTRL_PROGRESSIVE 0x00000004
-#define EDP_CONFIGURATION_CTRL_LANES__MASK 0x00000030
-#define EDP_CONFIGURATION_CTRL_LANES__SHIFT 4
-static inline uint32_t EDP_CONFIGURATION_CTRL_LANES(uint32_t val)
-{
- return ((val) << EDP_CONFIGURATION_CTRL_LANES__SHIFT) & EDP_CONFIGURATION_CTRL_LANES__MASK;
-}
-#define EDP_CONFIGURATION_CTRL_ENHANCED_FRAMING 0x00000040
-#define EDP_CONFIGURATION_CTRL_COLOR__MASK 0x00000100
-#define EDP_CONFIGURATION_CTRL_COLOR__SHIFT 8
-static inline uint32_t EDP_CONFIGURATION_CTRL_COLOR(enum edp_color_depth val)
-{
- return ((val) << EDP_CONFIGURATION_CTRL_COLOR__SHIFT) & EDP_CONFIGURATION_CTRL_COLOR__MASK;
-}
-
-#define REG_EDP_SOFTWARE_MVID 0x00000014
-
-#define REG_EDP_SOFTWARE_NVID 0x00000018
-
-#define REG_EDP_TOTAL_HOR_VER 0x0000001c
-#define EDP_TOTAL_HOR_VER_HORIZ__MASK 0x0000ffff
-#define EDP_TOTAL_HOR_VER_HORIZ__SHIFT 0
-static inline uint32_t EDP_TOTAL_HOR_VER_HORIZ(uint32_t val)
-{
- return ((val) << EDP_TOTAL_HOR_VER_HORIZ__SHIFT) & EDP_TOTAL_HOR_VER_HORIZ__MASK;
-}
-#define EDP_TOTAL_HOR_VER_VERT__MASK 0xffff0000
-#define EDP_TOTAL_HOR_VER_VERT__SHIFT 16
-static inline uint32_t EDP_TOTAL_HOR_VER_VERT(uint32_t val)
-{
- return ((val) << EDP_TOTAL_HOR_VER_VERT__SHIFT) & EDP_TOTAL_HOR_VER_VERT__MASK;
-}
-
-#define REG_EDP_START_HOR_VER_FROM_SYNC 0x00000020
-#define EDP_START_HOR_VER_FROM_SYNC_HORIZ__MASK 0x0000ffff
-#define EDP_START_HOR_VER_FROM_SYNC_HORIZ__SHIFT 0
-static inline uint32_t EDP_START_HOR_VER_FROM_SYNC_HORIZ(uint32_t val)
-{
- return ((val) << EDP_START_HOR_VER_FROM_SYNC_HORIZ__SHIFT) & EDP_START_HOR_VER_FROM_SYNC_HORIZ__MASK;
-}
-#define EDP_START_HOR_VER_FROM_SYNC_VERT__MASK 0xffff0000
-#define EDP_START_HOR_VER_FROM_SYNC_VERT__SHIFT 16
-static inline uint32_t EDP_START_HOR_VER_FROM_SYNC_VERT(uint32_t val)
-{
- return ((val) << EDP_START_HOR_VER_FROM_SYNC_VERT__SHIFT) & EDP_START_HOR_VER_FROM_SYNC_VERT__MASK;
-}
-
-#define REG_EDP_HSYNC_VSYNC_WIDTH_POLARITY 0x00000024
-#define EDP_HSYNC_VSYNC_WIDTH_POLARITY_HORIZ__MASK 0x00007fff
-#define EDP_HSYNC_VSYNC_WIDTH_POLARITY_HORIZ__SHIFT 0
-static inline uint32_t EDP_HSYNC_VSYNC_WIDTH_POLARITY_HORIZ(uint32_t val)
-{
- return ((val) << EDP_HSYNC_VSYNC_WIDTH_POLARITY_HORIZ__SHIFT) & EDP_HSYNC_VSYNC_WIDTH_POLARITY_HORIZ__MASK;
-}
-#define EDP_HSYNC_VSYNC_WIDTH_POLARITY_NHSYNC 0x00008000
-#define EDP_HSYNC_VSYNC_WIDTH_POLARITY_VERT__MASK 0x7fff0000
-#define EDP_HSYNC_VSYNC_WIDTH_POLARITY_VERT__SHIFT 16
-static inline uint32_t EDP_HSYNC_VSYNC_WIDTH_POLARITY_VERT(uint32_t val)
-{
- return ((val) << EDP_HSYNC_VSYNC_WIDTH_POLARITY_VERT__SHIFT) & EDP_HSYNC_VSYNC_WIDTH_POLARITY_VERT__MASK;
-}
-#define EDP_HSYNC_VSYNC_WIDTH_POLARITY_NVSYNC 0x80000000
-
-#define REG_EDP_ACTIVE_HOR_VER 0x00000028
-#define EDP_ACTIVE_HOR_VER_HORIZ__MASK 0x0000ffff
-#define EDP_ACTIVE_HOR_VER_HORIZ__SHIFT 0
-static inline uint32_t EDP_ACTIVE_HOR_VER_HORIZ(uint32_t val)
-{
- return ((val) << EDP_ACTIVE_HOR_VER_HORIZ__SHIFT) & EDP_ACTIVE_HOR_VER_HORIZ__MASK;
-}
-#define EDP_ACTIVE_HOR_VER_VERT__MASK 0xffff0000
-#define EDP_ACTIVE_HOR_VER_VERT__SHIFT 16
-static inline uint32_t EDP_ACTIVE_HOR_VER_VERT(uint32_t val)
-{
- return ((val) << EDP_ACTIVE_HOR_VER_VERT__SHIFT) & EDP_ACTIVE_HOR_VER_VERT__MASK;
-}
-
-#define REG_EDP_MISC1_MISC0 0x0000002c
-#define EDP_MISC1_MISC0_MISC0__MASK 0x000000ff
-#define EDP_MISC1_MISC0_MISC0__SHIFT 0
-static inline uint32_t EDP_MISC1_MISC0_MISC0(uint32_t val)
-{
- return ((val) << EDP_MISC1_MISC0_MISC0__SHIFT) & EDP_MISC1_MISC0_MISC0__MASK;
-}
-#define EDP_MISC1_MISC0_SYNC 0x00000001
-#define EDP_MISC1_MISC0_COMPONENT_FORMAT__MASK 0x00000006
-#define EDP_MISC1_MISC0_COMPONENT_FORMAT__SHIFT 1
-static inline uint32_t EDP_MISC1_MISC0_COMPONENT_FORMAT(enum edp_component_format val)
-{
- return ((val) << EDP_MISC1_MISC0_COMPONENT_FORMAT__SHIFT) & EDP_MISC1_MISC0_COMPONENT_FORMAT__MASK;
-}
-#define EDP_MISC1_MISC0_CEA 0x00000008
-#define EDP_MISC1_MISC0_BT709_5 0x00000010
-#define EDP_MISC1_MISC0_COLOR__MASK 0x000000e0
-#define EDP_MISC1_MISC0_COLOR__SHIFT 5
-static inline uint32_t EDP_MISC1_MISC0_COLOR(enum edp_color_depth val)
-{
- return ((val) << EDP_MISC1_MISC0_COLOR__SHIFT) & EDP_MISC1_MISC0_COLOR__MASK;
-}
-#define EDP_MISC1_MISC0_MISC1__MASK 0x0000ff00
-#define EDP_MISC1_MISC0_MISC1__SHIFT 8
-static inline uint32_t EDP_MISC1_MISC0_MISC1(uint32_t val)
-{
- return ((val) << EDP_MISC1_MISC0_MISC1__SHIFT) & EDP_MISC1_MISC0_MISC1__MASK;
-}
-#define EDP_MISC1_MISC0_INTERLACED_ODD 0x00000100
-#define EDP_MISC1_MISC0_STEREO__MASK 0x00000600
-#define EDP_MISC1_MISC0_STEREO__SHIFT 9
-static inline uint32_t EDP_MISC1_MISC0_STEREO(uint32_t val)
-{
- return ((val) << EDP_MISC1_MISC0_STEREO__SHIFT) & EDP_MISC1_MISC0_STEREO__MASK;
-}
-
-#define REG_EDP_PHY_CTRL 0x00000074
-#define EDP_PHY_CTRL_SW_RESET_PLL 0x00000001
-#define EDP_PHY_CTRL_SW_RESET 0x00000004
-
-#define REG_EDP_MAINLINK_READY 0x00000084
-#define EDP_MAINLINK_READY_TRAIN_PATTERN_1_READY 0x00000008
-#define EDP_MAINLINK_READY_TRAIN_PATTERN_2_READY 0x00000010
-#define EDP_MAINLINK_READY_TRAIN_PATTERN_3_READY 0x00000020
-
-#define REG_EDP_AUX_CTRL 0x00000300
-#define EDP_AUX_CTRL_ENABLE 0x00000001
-#define EDP_AUX_CTRL_RESET 0x00000002
-
-#define REG_EDP_INTERRUPT_REG_1 0x00000308
-#define EDP_INTERRUPT_REG_1_HPD 0x00000001
-#define EDP_INTERRUPT_REG_1_HPD_ACK 0x00000002
-#define EDP_INTERRUPT_REG_1_HPD_EN 0x00000004
-#define EDP_INTERRUPT_REG_1_AUX_I2C_DONE 0x00000008
-#define EDP_INTERRUPT_REG_1_AUX_I2C_DONE_ACK 0x00000010
-#define EDP_INTERRUPT_REG_1_AUX_I2C_DONE_EN 0x00000020
-#define EDP_INTERRUPT_REG_1_WRONG_ADDR 0x00000040
-#define EDP_INTERRUPT_REG_1_WRONG_ADDR_ACK 0x00000080
-#define EDP_INTERRUPT_REG_1_WRONG_ADDR_EN 0x00000100
-#define EDP_INTERRUPT_REG_1_TIMEOUT 0x00000200
-#define EDP_INTERRUPT_REG_1_TIMEOUT_ACK 0x00000400
-#define EDP_INTERRUPT_REG_1_TIMEOUT_EN 0x00000800
-#define EDP_INTERRUPT_REG_1_NACK_DEFER 0x00001000
-#define EDP_INTERRUPT_REG_1_NACK_DEFER_ACK 0x00002000
-#define EDP_INTERRUPT_REG_1_NACK_DEFER_EN 0x00004000
-#define EDP_INTERRUPT_REG_1_WRONG_DATA_CNT 0x00008000
-#define EDP_INTERRUPT_REG_1_WRONG_DATA_CNT_ACK 0x00010000
-#define EDP_INTERRUPT_REG_1_WRONG_DATA_CNT_EN 0x00020000
-#define EDP_INTERRUPT_REG_1_I2C_NACK 0x00040000
-#define EDP_INTERRUPT_REG_1_I2C_NACK_ACK 0x00080000
-#define EDP_INTERRUPT_REG_1_I2C_NACK_EN 0x00100000
-#define EDP_INTERRUPT_REG_1_I2C_DEFER 0x00200000
-#define EDP_INTERRUPT_REG_1_I2C_DEFER_ACK 0x00400000
-#define EDP_INTERRUPT_REG_1_I2C_DEFER_EN 0x00800000
-#define EDP_INTERRUPT_REG_1_PLL_UNLOCK 0x01000000
-#define EDP_INTERRUPT_REG_1_PLL_UNLOCK_ACK 0x02000000
-#define EDP_INTERRUPT_REG_1_PLL_UNLOCK_EN 0x04000000
-#define EDP_INTERRUPT_REG_1_AUX_ERROR 0x08000000
-#define EDP_INTERRUPT_REG_1_AUX_ERROR_ACK 0x10000000
-#define EDP_INTERRUPT_REG_1_AUX_ERROR_EN 0x20000000
-
-#define REG_EDP_INTERRUPT_REG_2 0x0000030c
-#define EDP_INTERRUPT_REG_2_READY_FOR_VIDEO 0x00000001
-#define EDP_INTERRUPT_REG_2_READY_FOR_VIDEO_ACK 0x00000002
-#define EDP_INTERRUPT_REG_2_READY_FOR_VIDEO_EN 0x00000004
-#define EDP_INTERRUPT_REG_2_IDLE_PATTERNs_SENT 0x00000008
-#define EDP_INTERRUPT_REG_2_IDLE_PATTERNs_SENT_ACK 0x00000010
-#define EDP_INTERRUPT_REG_2_IDLE_PATTERNs_SENT_EN 0x00000020
-#define EDP_INTERRUPT_REG_2_FRAME_END 0x00000200
-#define EDP_INTERRUPT_REG_2_FRAME_END_ACK 0x00000080
-#define EDP_INTERRUPT_REG_2_FRAME_END_EN 0x00000100
-#define EDP_INTERRUPT_REG_2_CRC_UPDATED 0x00000200
-#define EDP_INTERRUPT_REG_2_CRC_UPDATED_ACK 0x00000400
-#define EDP_INTERRUPT_REG_2_CRC_UPDATED_EN 0x00000800
-
-#define REG_EDP_INTERRUPT_TRANS_NUM 0x00000310
-
-#define REG_EDP_AUX_DATA 0x00000314
-#define EDP_AUX_DATA_READ 0x00000001
-#define EDP_AUX_DATA_DATA__MASK 0x0000ff00
-#define EDP_AUX_DATA_DATA__SHIFT 8
-static inline uint32_t EDP_AUX_DATA_DATA(uint32_t val)
-{
- return ((val) << EDP_AUX_DATA_DATA__SHIFT) & EDP_AUX_DATA_DATA__MASK;
-}
-#define EDP_AUX_DATA_INDEX__MASK 0x00ff0000
-#define EDP_AUX_DATA_INDEX__SHIFT 16
-static inline uint32_t EDP_AUX_DATA_INDEX(uint32_t val)
-{
- return ((val) << EDP_AUX_DATA_INDEX__SHIFT) & EDP_AUX_DATA_INDEX__MASK;
-}
-#define EDP_AUX_DATA_INDEX_WRITE 0x80000000
-
-#define REG_EDP_AUX_TRANS_CTRL 0x00000318
-#define EDP_AUX_TRANS_CTRL_I2C 0x00000100
-#define EDP_AUX_TRANS_CTRL_GO 0x00000200
-
-#define REG_EDP_AUX_STATUS 0x00000324
-
-static inline uint32_t REG_EDP_PHY_LN(uint32_t i0) { return 0x00000400 + 0x40*i0; }
-
-static inline uint32_t REG_EDP_PHY_LN_PD_CTL(uint32_t i0) { return 0x00000404 + 0x40*i0; }
-
-#define REG_EDP_PHY_GLB_VM_CFG0 0x00000510
-
-#define REG_EDP_PHY_GLB_VM_CFG1 0x00000514
-
-#define REG_EDP_PHY_GLB_MISC9 0x00000518
-
-#define REG_EDP_PHY_GLB_CFG 0x00000528
-
-#define REG_EDP_PHY_GLB_PD_CTL 0x0000052c
-
-#define REG_EDP_PHY_GLB_PHY_STATUS 0x00000598
-
-#define REG_EDP_28nm_PHY_PLL_REFCLK_CFG 0x00000000
-
-#define REG_EDP_28nm_PHY_PLL_POSTDIV1_CFG 0x00000004
-
-#define REG_EDP_28nm_PHY_PLL_CHGPUMP_CFG 0x00000008
-
-#define REG_EDP_28nm_PHY_PLL_VCOLPF_CFG 0x0000000c
-
-#define REG_EDP_28nm_PHY_PLL_VREG_CFG 0x00000010
-
-#define REG_EDP_28nm_PHY_PLL_PWRGEN_CFG 0x00000014
-
-#define REG_EDP_28nm_PHY_PLL_DMUX_CFG 0x00000018
-
-#define REG_EDP_28nm_PHY_PLL_AMUX_CFG 0x0000001c
-
-#define REG_EDP_28nm_PHY_PLL_GLB_CFG 0x00000020
-#define EDP_28nm_PHY_PLL_GLB_CFG_PLL_PWRDN_B 0x00000001
-#define EDP_28nm_PHY_PLL_GLB_CFG_PLL_LDO_PWRDN_B 0x00000002
-#define EDP_28nm_PHY_PLL_GLB_CFG_PLL_PWRGEN_PWRDN_B 0x00000004
-#define EDP_28nm_PHY_PLL_GLB_CFG_PLL_ENABLE 0x00000008
-
-#define REG_EDP_28nm_PHY_PLL_POSTDIV2_CFG 0x00000024
-
-#define REG_EDP_28nm_PHY_PLL_POSTDIV3_CFG 0x00000028
-
-#define REG_EDP_28nm_PHY_PLL_LPFR_CFG 0x0000002c
-
-#define REG_EDP_28nm_PHY_PLL_LPFC1_CFG 0x00000030
-
-#define REG_EDP_28nm_PHY_PLL_LPFC2_CFG 0x00000034
-
-#define REG_EDP_28nm_PHY_PLL_SDM_CFG0 0x00000038
-
-#define REG_EDP_28nm_PHY_PLL_SDM_CFG1 0x0000003c
-
-#define REG_EDP_28nm_PHY_PLL_SDM_CFG2 0x00000040
-
-#define REG_EDP_28nm_PHY_PLL_SDM_CFG3 0x00000044
-
-#define REG_EDP_28nm_PHY_PLL_SDM_CFG4 0x00000048
-
-#define REG_EDP_28nm_PHY_PLL_SSC_CFG0 0x0000004c
-
-#define REG_EDP_28nm_PHY_PLL_SSC_CFG1 0x00000050
-
-#define REG_EDP_28nm_PHY_PLL_SSC_CFG2 0x00000054
-
-#define REG_EDP_28nm_PHY_PLL_SSC_CFG3 0x00000058
-
-#define REG_EDP_28nm_PHY_PLL_LKDET_CFG0 0x0000005c
-
-#define REG_EDP_28nm_PHY_PLL_LKDET_CFG1 0x00000060
-
-#define REG_EDP_28nm_PHY_PLL_LKDET_CFG2 0x00000064
-
-#define REG_EDP_28nm_PHY_PLL_TEST_CFG 0x00000068
-#define EDP_28nm_PHY_PLL_TEST_CFG_PLL_SW_RESET 0x00000001
-
-#define REG_EDP_28nm_PHY_PLL_CAL_CFG0 0x0000006c
-
-#define REG_EDP_28nm_PHY_PLL_CAL_CFG1 0x00000070
-
-#define REG_EDP_28nm_PHY_PLL_CAL_CFG2 0x00000074
-
-#define REG_EDP_28nm_PHY_PLL_CAL_CFG3 0x00000078
-
-#define REG_EDP_28nm_PHY_PLL_CAL_CFG4 0x0000007c
-
-#define REG_EDP_28nm_PHY_PLL_CAL_CFG5 0x00000080
-
-#define REG_EDP_28nm_PHY_PLL_CAL_CFG6 0x00000084
-
-#define REG_EDP_28nm_PHY_PLL_CAL_CFG7 0x00000088
-
-#define REG_EDP_28nm_PHY_PLL_CAL_CFG8 0x0000008c
-
-#define REG_EDP_28nm_PHY_PLL_CAL_CFG9 0x00000090
-
-#define REG_EDP_28nm_PHY_PLL_CAL_CFG10 0x00000094
-
-#define REG_EDP_28nm_PHY_PLL_CAL_CFG11 0x00000098
-
-#define REG_EDP_28nm_PHY_PLL_EFUSE_CFG 0x0000009c
-
-#define REG_EDP_28nm_PHY_PLL_DEBUG_BUS_SEL 0x000000a0
-
-
-#endif /* EDP_XML */
diff --git a/drivers/gpu/drm/msm/edp/edp_aux.c b/drivers/gpu/drm/msm/edp/edp_aux.c
deleted file mode 100644
index e3d85c622cfb..000000000000
--- a/drivers/gpu/drm/msm/edp/edp_aux.c
+++ /dev/null
@@ -1,265 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (c) 2014-2015, The Linux Foundation. All rights reserved.
- */
-
-#include "edp.h"
-#include "edp.xml.h"
-
-#define AUX_CMD_FIFO_LEN 144
-#define AUX_CMD_NATIVE_MAX 16
-#define AUX_CMD_I2C_MAX 128
-
-#define EDP_INTR_AUX_I2C_ERR \
- (EDP_INTERRUPT_REG_1_WRONG_ADDR | EDP_INTERRUPT_REG_1_TIMEOUT | \
- EDP_INTERRUPT_REG_1_NACK_DEFER | EDP_INTERRUPT_REG_1_WRONG_DATA_CNT | \
- EDP_INTERRUPT_REG_1_I2C_NACK | EDP_INTERRUPT_REG_1_I2C_DEFER)
-#define EDP_INTR_TRANS_STATUS \
- (EDP_INTERRUPT_REG_1_AUX_I2C_DONE | EDP_INTR_AUX_I2C_ERR)
-
-struct edp_aux {
- void __iomem *base;
- bool msg_err;
-
- struct completion msg_comp;
-
- /* To prevent the message transaction routine from reentry. */
- struct mutex msg_mutex;
-
- struct drm_dp_aux drm_aux;
-};
-#define to_edp_aux(x) container_of(x, struct edp_aux, drm_aux)
-
-static int edp_msg_fifo_tx(struct edp_aux *aux, struct drm_dp_aux_msg *msg)
-{
- u32 data[4];
- u32 reg, len;
- bool native = msg->request & (DP_AUX_NATIVE_WRITE & DP_AUX_NATIVE_READ);
- bool read = msg->request & (DP_AUX_I2C_READ & DP_AUX_NATIVE_READ);
- u8 *msgdata = msg->buffer;
- int i;
-
- if (read)
- len = 4;
- else
- len = msg->size + 4;
-
- /*
- * cmd fifo only has depth of 144 bytes
- */
- if (len > AUX_CMD_FIFO_LEN)
- return -EINVAL;
-
- /* Pack cmd and write to HW */
- data[0] = (msg->address >> 16) & 0xf; /* addr[19:16] */
- if (read)
- data[0] |= BIT(4); /* R/W */
-
- data[1] = (msg->address >> 8) & 0xff; /* addr[15:8] */
- data[2] = msg->address & 0xff; /* addr[7:0] */
- data[3] = (msg->size - 1) & 0xff; /* len[7:0] */
-
- for (i = 0; i < len; i++) {
- reg = (i < 4) ? data[i] : msgdata[i - 4];
- reg = EDP_AUX_DATA_DATA(reg); /* index = 0, write */
- if (i == 0)
- reg |= EDP_AUX_DATA_INDEX_WRITE;
- edp_write(aux->base + REG_EDP_AUX_DATA, reg);
- }
-
- reg = 0; /* Transaction number is always 1 */
- if (!native) /* i2c */
- reg |= EDP_AUX_TRANS_CTRL_I2C;
-
- reg |= EDP_AUX_TRANS_CTRL_GO;
- edp_write(aux->base + REG_EDP_AUX_TRANS_CTRL, reg);
-
- return 0;
-}
-
-static int edp_msg_fifo_rx(struct edp_aux *aux, struct drm_dp_aux_msg *msg)
-{
- u32 data;
- u8 *dp;
- int i;
- u32 len = msg->size;
-
- edp_write(aux->base + REG_EDP_AUX_DATA,
- EDP_AUX_DATA_INDEX_WRITE | EDP_AUX_DATA_READ); /* index = 0 */
-
- dp = msg->buffer;
-
- /* discard first byte */
- data = edp_read(aux->base + REG_EDP_AUX_DATA);
- for (i = 0; i < len; i++) {
- data = edp_read(aux->base + REG_EDP_AUX_DATA);
- dp[i] = (u8)((data >> 8) & 0xff);
- }
-
- return 0;
-}
-
-/*
- * This function does the real job to process an AUX transaction.
- * It will call msm_edp_aux_ctrl() function to reset the AUX channel,
- * if the waiting is timeout.
- * The caller who triggers the transaction should avoid the
- * msm_edp_aux_ctrl() running concurrently in other threads, i.e.
- * start transaction only when AUX channel is fully enabled.
- */
-static ssize_t edp_aux_transfer(struct drm_dp_aux *drm_aux,
- struct drm_dp_aux_msg *msg)
-{
- struct edp_aux *aux = to_edp_aux(drm_aux);
- ssize_t ret;
- unsigned long time_left;
- bool native = msg->request & (DP_AUX_NATIVE_WRITE & DP_AUX_NATIVE_READ);
- bool read = msg->request & (DP_AUX_I2C_READ & DP_AUX_NATIVE_READ);
-
- /* Ignore address only message */
- if ((msg->size == 0) || (msg->buffer == NULL)) {
- msg->reply = native ?
- DP_AUX_NATIVE_REPLY_ACK : DP_AUX_I2C_REPLY_ACK;
- return msg->size;
- }
-
- /* msg sanity check */
- if ((native && (msg->size > AUX_CMD_NATIVE_MAX)) ||
- (msg->size > AUX_CMD_I2C_MAX)) {
- pr_err("%s: invalid msg: size(%zu), request(%x)\n",
- __func__, msg->size, msg->request);
- return -EINVAL;
- }
-
- mutex_lock(&aux->msg_mutex);
-
- aux->msg_err = false;
- reinit_completion(&aux->msg_comp);
-
- ret = edp_msg_fifo_tx(aux, msg);
- if (ret < 0)
- goto unlock_exit;
-
- DBG("wait_for_completion");
- time_left = wait_for_completion_timeout(&aux->msg_comp,
- msecs_to_jiffies(300));
- if (!time_left) {
- /*
- * Clear GO and reset AUX channel
- * to cancel the current transaction.
- */
- edp_write(aux->base + REG_EDP_AUX_TRANS_CTRL, 0);
- msm_edp_aux_ctrl(aux, 1);
- pr_err("%s: aux timeout,\n", __func__);
- ret = -ETIMEDOUT;
- goto unlock_exit;
- }
- DBG("completion");
-
- if (!aux->msg_err) {
- if (read) {
- ret = edp_msg_fifo_rx(aux, msg);
- if (ret < 0)
- goto unlock_exit;
- }
-
- msg->reply = native ?
- DP_AUX_NATIVE_REPLY_ACK : DP_AUX_I2C_REPLY_ACK;
- } else {
- /* Reply defer to retry */
- msg->reply = native ?
- DP_AUX_NATIVE_REPLY_DEFER : DP_AUX_I2C_REPLY_DEFER;
- /*
- * The sleep time in caller is not long enough to make sure
- * our H/W completes transactions. Add more defer time here.
- */
- msleep(100);
- }
-
- /* Return requested size for success or retry */
- ret = msg->size;
-
-unlock_exit:
- mutex_unlock(&aux->msg_mutex);
- return ret;
-}
-
-void *msm_edp_aux_init(struct msm_edp *edp, void __iomem *regbase, struct drm_dp_aux **drm_aux)
-{
- struct device *dev = &edp->pdev->dev;
- struct edp_aux *aux = NULL;
- int ret;
-
- DBG("");
- aux = devm_kzalloc(dev, sizeof(*aux), GFP_KERNEL);
- if (!aux)
- return NULL;
-
- aux->base = regbase;
- mutex_init(&aux->msg_mutex);
- init_completion(&aux->msg_comp);
-
- aux->drm_aux.name = "msm_edp_aux";
- aux->drm_aux.dev = dev;
- aux->drm_aux.drm_dev = edp->dev;
- aux->drm_aux.transfer = edp_aux_transfer;
- ret = drm_dp_aux_register(&aux->drm_aux);
- if (ret) {
- pr_err("%s: failed to register drm aux: %d\n", __func__, ret);
- mutex_destroy(&aux->msg_mutex);
- }
-
- if (drm_aux && aux)
- *drm_aux = &aux->drm_aux;
-
- return aux;
-}
-
-void msm_edp_aux_destroy(struct device *dev, struct edp_aux *aux)
-{
- if (aux) {
- drm_dp_aux_unregister(&aux->drm_aux);
- mutex_destroy(&aux->msg_mutex);
- }
-}
-
-irqreturn_t msm_edp_aux_irq(struct edp_aux *aux, u32 isr)
-{
- if (isr & EDP_INTR_TRANS_STATUS) {
- DBG("isr=%x", isr);
- edp_write(aux->base + REG_EDP_AUX_TRANS_CTRL, 0);
-
- if (isr & EDP_INTR_AUX_I2C_ERR)
- aux->msg_err = true;
- else
- aux->msg_err = false;
-
- complete(&aux->msg_comp);
- }
-
- return IRQ_HANDLED;
-}
-
-void msm_edp_aux_ctrl(struct edp_aux *aux, int enable)
-{
- u32 data;
-
- DBG("enable=%d", enable);
- data = edp_read(aux->base + REG_EDP_AUX_CTRL);
-
- if (enable) {
- data |= EDP_AUX_CTRL_RESET;
- edp_write(aux->base + REG_EDP_AUX_CTRL, data);
- /* Make sure full reset */
- wmb();
- usleep_range(500, 1000);
-
- data &= ~EDP_AUX_CTRL_RESET;
- data |= EDP_AUX_CTRL_ENABLE;
- edp_write(aux->base + REG_EDP_AUX_CTRL, data);
- } else {
- data &= ~EDP_AUX_CTRL_ENABLE;
- edp_write(aux->base + REG_EDP_AUX_CTRL, data);
- }
-}
-
diff --git a/drivers/gpu/drm/msm/edp/edp_bridge.c b/drivers/gpu/drm/msm/edp/edp_bridge.c
deleted file mode 100644
index c69a37e0c708..000000000000
--- a/drivers/gpu/drm/msm/edp/edp_bridge.c
+++ /dev/null
@@ -1,111 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (c) 2014-2015, The Linux Foundation. All rights reserved.
- */
-
-#include "edp.h"
-
-struct edp_bridge {
- struct drm_bridge base;
- struct msm_edp *edp;
-};
-#define to_edp_bridge(x) container_of(x, struct edp_bridge, base)
-
-void edp_bridge_destroy(struct drm_bridge *bridge)
-{
-}
-
-static void edp_bridge_pre_enable(struct drm_bridge *bridge)
-{
- struct edp_bridge *edp_bridge = to_edp_bridge(bridge);
- struct msm_edp *edp = edp_bridge->edp;
-
- DBG("");
- msm_edp_ctrl_power(edp->ctrl, true);
-}
-
-static void edp_bridge_enable(struct drm_bridge *bridge)
-{
- DBG("");
-}
-
-static void edp_bridge_disable(struct drm_bridge *bridge)
-{
- DBG("");
-}
-
-static void edp_bridge_post_disable(struct drm_bridge *bridge)
-{
- struct edp_bridge *edp_bridge = to_edp_bridge(bridge);
- struct msm_edp *edp = edp_bridge->edp;
-
- DBG("");
- msm_edp_ctrl_power(edp->ctrl, false);
-}
-
-static void edp_bridge_mode_set(struct drm_bridge *bridge,
- const struct drm_display_mode *mode,
- const struct drm_display_mode *adjusted_mode)
-{
- struct drm_device *dev = bridge->dev;
- struct drm_connector *connector;
- struct edp_bridge *edp_bridge = to_edp_bridge(bridge);
- struct msm_edp *edp = edp_bridge->edp;
-
- DBG("set mode: " DRM_MODE_FMT, DRM_MODE_ARG(mode));
-
- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
- struct drm_encoder *encoder = connector->encoder;
- struct drm_bridge *first_bridge;
-
- if (!connector->encoder)
- continue;
-
- first_bridge = drm_bridge_chain_get_first_bridge(encoder);
- if (bridge == first_bridge) {
- msm_edp_ctrl_timing_cfg(edp->ctrl,
- adjusted_mode, &connector->display_info);
- break;
- }
- }
-}
-
-static const struct drm_bridge_funcs edp_bridge_funcs = {
- .pre_enable = edp_bridge_pre_enable,
- .enable = edp_bridge_enable,
- .disable = edp_bridge_disable,
- .post_disable = edp_bridge_post_disable,
- .mode_set = edp_bridge_mode_set,
-};
-
-/* initialize bridge */
-struct drm_bridge *msm_edp_bridge_init(struct msm_edp *edp)
-{
- struct drm_bridge *bridge = NULL;
- struct edp_bridge *edp_bridge;
- int ret;
-
- edp_bridge = devm_kzalloc(edp->dev->dev,
- sizeof(*edp_bridge), GFP_KERNEL);
- if (!edp_bridge) {
- ret = -ENOMEM;
- goto fail;
- }
-
- edp_bridge->edp = edp;
-
- bridge = &edp_bridge->base;
- bridge->funcs = &edp_bridge_funcs;
-
- ret = drm_bridge_attach(edp->encoder, bridge, NULL, 0);
- if (ret)
- goto fail;
-
- return bridge;
-
-fail:
- if (bridge)
- edp_bridge_destroy(bridge);
-
- return ERR_PTR(ret);
-}
diff --git a/drivers/gpu/drm/msm/edp/edp_connector.c b/drivers/gpu/drm/msm/edp/edp_connector.c
deleted file mode 100644
index 73cb5fd97a5a..000000000000
--- a/drivers/gpu/drm/msm/edp/edp_connector.c
+++ /dev/null
@@ -1,132 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (c) 2014-2015, The Linux Foundation. All rights reserved.
- */
-
-#include "drm/drm_edid.h"
-#include "msm_kms.h"
-#include "edp.h"
-
-struct edp_connector {
- struct drm_connector base;
- struct msm_edp *edp;
-};
-#define to_edp_connector(x) container_of(x, struct edp_connector, base)
-
-static enum drm_connector_status edp_connector_detect(
- struct drm_connector *connector, bool force)
-{
- struct edp_connector *edp_connector = to_edp_connector(connector);
- struct msm_edp *edp = edp_connector->edp;
-
- DBG("");
- return msm_edp_ctrl_panel_connected(edp->ctrl) ?
- connector_status_connected : connector_status_disconnected;
-}
-
-static void edp_connector_destroy(struct drm_connector *connector)
-{
- struct edp_connector *edp_connector = to_edp_connector(connector);
-
- DBG("");
-
- drm_connector_cleanup(connector);
-
- kfree(edp_connector);
-}
-
-static int edp_connector_get_modes(struct drm_connector *connector)
-{
- struct edp_connector *edp_connector = to_edp_connector(connector);
- struct msm_edp *edp = edp_connector->edp;
-
- struct edid *drm_edid = NULL;
- int ret = 0;
-
- DBG("");
- ret = msm_edp_ctrl_get_panel_info(edp->ctrl, connector, &drm_edid);
- if (ret)
- return ret;
-
- drm_connector_update_edid_property(connector, drm_edid);
- if (drm_edid)
- ret = drm_add_edid_modes(connector, drm_edid);
-
- return ret;
-}
-
-static int edp_connector_mode_valid(struct drm_connector *connector,
- struct drm_display_mode *mode)
-{
- struct edp_connector *edp_connector = to_edp_connector(connector);
- struct msm_edp *edp = edp_connector->edp;
- struct msm_drm_private *priv = connector->dev->dev_private;
- struct msm_kms *kms = priv->kms;
- long actual, requested;
-
- requested = 1000 * mode->clock;
- actual = kms->funcs->round_pixclk(kms,
- requested, edp_connector->edp->encoder);
-
- DBG("requested=%ld, actual=%ld", requested, actual);
- if (actual != requested)
- return MODE_CLOCK_RANGE;
-
- if (!msm_edp_ctrl_pixel_clock_valid(
- edp->ctrl, mode->clock, NULL, NULL))
- return MODE_CLOCK_RANGE;
-
- /* Invalidate all modes if color format is not supported */
- if (connector->display_info.bpc > 8)
- return MODE_BAD;
-
- return MODE_OK;
-}
-
-static const struct drm_connector_funcs edp_connector_funcs = {
- .detect = edp_connector_detect,
- .fill_modes = drm_helper_probe_single_connector_modes,
- .destroy = edp_connector_destroy,
- .reset = drm_atomic_helper_connector_reset,
- .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
- .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
-};
-
-static const struct drm_connector_helper_funcs edp_connector_helper_funcs = {
- .get_modes = edp_connector_get_modes,
- .mode_valid = edp_connector_mode_valid,
-};
-
-/* initialize connector */
-struct drm_connector *msm_edp_connector_init(struct msm_edp *edp)
-{
- struct drm_connector *connector = NULL;
- struct edp_connector *edp_connector;
- int ret;
-
- edp_connector = kzalloc(sizeof(*edp_connector), GFP_KERNEL);
- if (!edp_connector)
- return ERR_PTR(-ENOMEM);
-
- edp_connector->edp = edp;
-
- connector = &edp_connector->base;
-
- ret = drm_connector_init(edp->dev, connector, &edp_connector_funcs,
- DRM_MODE_CONNECTOR_eDP);
- if (ret)
- return ERR_PTR(ret);
-
- drm_connector_helper_add(connector, &edp_connector_helper_funcs);
-
- /* We don't support HPD, so only poll status until connected. */
- connector->polled = DRM_CONNECTOR_POLL_CONNECT;
-
- /* Display driver doesn't support interlace now. */
- connector->interlace_allowed = false;
- connector->doublescan_allowed = false;
-
- drm_connector_attach_encoder(connector, edp->encoder);
-
- return connector;
-}
diff --git a/drivers/gpu/drm/msm/edp/edp_ctrl.c b/drivers/gpu/drm/msm/edp/edp_ctrl.c
deleted file mode 100644
index a68a4a1867c1..000000000000
--- a/drivers/gpu/drm/msm/edp/edp_ctrl.c
+++ /dev/null
@@ -1,1373 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (c) 2014-2015, The Linux Foundation. All rights reserved.
- */
-
-#include <linux/clk.h>
-#include <linux/gpio/consumer.h>
-#include <linux/regulator/consumer.h>
-#include <drm/drm_crtc.h>
-#include <drm/drm_dp_helper.h>
-#include <drm/drm_edid.h>
-
-#include "edp.h"
-#include "edp.xml.h"
-
-#define VDDA_UA_ON_LOAD 100000 /* uA units */
-#define VDDA_UA_OFF_LOAD 100 /* uA units */
-
-#define DPCD_LINK_VOLTAGE_MAX 4
-#define DPCD_LINK_PRE_EMPHASIS_MAX 4
-
-#define EDP_LINK_BW_MAX DP_LINK_BW_2_7
-
-/* Link training return value */
-#define EDP_TRAIN_FAIL -1
-#define EDP_TRAIN_SUCCESS 0
-#define EDP_TRAIN_RECONFIG 1
-
-#define EDP_CLK_MASK_AHB BIT(0)
-#define EDP_CLK_MASK_AUX BIT(1)
-#define EDP_CLK_MASK_LINK BIT(2)
-#define EDP_CLK_MASK_PIXEL BIT(3)
-#define EDP_CLK_MASK_MDP_CORE BIT(4)
-#define EDP_CLK_MASK_LINK_CHAN (EDP_CLK_MASK_LINK | EDP_CLK_MASK_PIXEL)
-#define EDP_CLK_MASK_AUX_CHAN \
- (EDP_CLK_MASK_AHB | EDP_CLK_MASK_AUX | EDP_CLK_MASK_MDP_CORE)
-#define EDP_CLK_MASK_ALL (EDP_CLK_MASK_AUX_CHAN | EDP_CLK_MASK_LINK_CHAN)
-
-#define EDP_BACKLIGHT_MAX 255
-
-#define EDP_INTR_STATUS1 \
- (EDP_INTERRUPT_REG_1_HPD | EDP_INTERRUPT_REG_1_AUX_I2C_DONE | \
- EDP_INTERRUPT_REG_1_WRONG_ADDR | EDP_INTERRUPT_REG_1_TIMEOUT | \
- EDP_INTERRUPT_REG_1_NACK_DEFER | EDP_INTERRUPT_REG_1_WRONG_DATA_CNT | \
- EDP_INTERRUPT_REG_1_I2C_NACK | EDP_INTERRUPT_REG_1_I2C_DEFER | \
- EDP_INTERRUPT_REG_1_PLL_UNLOCK | EDP_INTERRUPT_REG_1_AUX_ERROR)
-#define EDP_INTR_MASK1 (EDP_INTR_STATUS1 << 2)
-#define EDP_INTR_STATUS2 \
- (EDP_INTERRUPT_REG_2_READY_FOR_VIDEO | \
- EDP_INTERRUPT_REG_2_IDLE_PATTERNs_SENT | \
- EDP_INTERRUPT_REG_2_FRAME_END | EDP_INTERRUPT_REG_2_CRC_UPDATED)
-#define EDP_INTR_MASK2 (EDP_INTR_STATUS2 << 2)
-
-struct edp_ctrl {
- struct platform_device *pdev;
-
- void __iomem *base;
-
- /* regulators */
- struct regulator *vdda_vreg; /* 1.8 V */
- struct regulator *lvl_vreg;
-
- /* clocks */
- struct clk *aux_clk;
- struct clk *pixel_clk;
- struct clk *ahb_clk;
- struct clk *link_clk;
- struct clk *mdp_core_clk;
-
- /* gpios */
- struct gpio_desc *panel_en_gpio;
- struct gpio_desc *panel_hpd_gpio;
-
- /* completion and mutex */
- struct completion idle_comp;
- struct mutex dev_mutex; /* To protect device power status */
-
- /* work queue */
- struct work_struct on_work;
- struct work_struct off_work;
- struct workqueue_struct *workqueue;
-
- /* Interrupt register lock */
- spinlock_t irq_lock;
-
- bool edp_connected;
- bool power_on;
-
- /* edid raw data */
- struct edid *edid;
-
- struct drm_dp_aux *drm_aux;
-
- /* dpcd raw data */
- u8 dpcd[DP_RECEIVER_CAP_SIZE];
-
- /* Link status */
- u8 link_rate;
- u8 lane_cnt;
- u8 v_level;
- u8 p_level;
-
- /* Timing status */
- u8 interlaced;
- u32 pixel_rate; /* in kHz */
- u32 color_depth;
-
- struct edp_aux *aux;
- struct edp_phy *phy;
-};
-
-struct edp_pixel_clk_div {
- u32 rate; /* in kHz */
- u32 m;
- u32 n;
-};
-
-#define EDP_PIXEL_CLK_NUM 8
-static const struct edp_pixel_clk_div clk_divs[2][EDP_PIXEL_CLK_NUM] = {
- { /* Link clock = 162MHz, source clock = 810MHz */
- {119000, 31, 211}, /* WSXGA+ 1680x1050@60Hz CVT */
- {130250, 32, 199}, /* UXGA 1600x1200@60Hz CVT */
- {148500, 11, 60}, /* FHD 1920x1080@60Hz */
- {154000, 50, 263}, /* WUXGA 1920x1200@60Hz CVT */
- {209250, 31, 120}, /* QXGA 2048x1536@60Hz CVT */
- {268500, 119, 359}, /* WQXGA 2560x1600@60Hz CVT */
- {138530, 33, 193}, /* AUO B116HAN03.0 Panel */
- {141400, 48, 275}, /* AUO B133HTN01.2 Panel */
- },
- { /* Link clock = 270MHz, source clock = 675MHz */
- {119000, 52, 295}, /* WSXGA+ 1680x1050@60Hz CVT */
- {130250, 11, 57}, /* UXGA 1600x1200@60Hz CVT */
- {148500, 11, 50}, /* FHD 1920x1080@60Hz */
- {154000, 47, 206}, /* WUXGA 1920x1200@60Hz CVT */
- {209250, 31, 100}, /* QXGA 2048x1536@60Hz CVT */
- {268500, 107, 269}, /* WQXGA 2560x1600@60Hz CVT */
- {138530, 63, 307}, /* AUO B116HAN03.0 Panel */
- {141400, 53, 253}, /* AUO B133HTN01.2 Panel */
- },
-};
-
-static int edp_clk_init(struct edp_ctrl *ctrl)
-{
- struct platform_device *pdev = ctrl->pdev;
- int ret;
-
- ctrl->aux_clk = msm_clk_get(pdev, "core");
- if (IS_ERR(ctrl->aux_clk)) {
- ret = PTR_ERR(ctrl->aux_clk);
- pr_err("%s: Can't find core clock, %d\n", __func__, ret);
- ctrl->aux_clk = NULL;
- return ret;
- }
-
- ctrl->pixel_clk = msm_clk_get(pdev, "pixel");
- if (IS_ERR(ctrl->pixel_clk)) {
- ret = PTR_ERR(ctrl->pixel_clk);
- pr_err("%s: Can't find pixel clock, %d\n", __func__, ret);
- ctrl->pixel_clk = NULL;
- return ret;
- }
-
- ctrl->ahb_clk = msm_clk_get(pdev, "iface");
- if (IS_ERR(ctrl->ahb_clk)) {
- ret = PTR_ERR(ctrl->ahb_clk);
- pr_err("%s: Can't find iface clock, %d\n", __func__, ret);
- ctrl->ahb_clk = NULL;
- return ret;
- }
-
- ctrl->link_clk = msm_clk_get(pdev, "link");
- if (IS_ERR(ctrl->link_clk)) {
- ret = PTR_ERR(ctrl->link_clk);
- pr_err("%s: Can't find link clock, %d\n", __func__, ret);
- ctrl->link_clk = NULL;
- return ret;
- }
-
- /* need mdp core clock to receive irq */
- ctrl->mdp_core_clk = msm_clk_get(pdev, "mdp_core");
- if (IS_ERR(ctrl->mdp_core_clk)) {
- ret = PTR_ERR(ctrl->mdp_core_clk);
- pr_err("%s: Can't find mdp_core clock, %d\n", __func__, ret);
- ctrl->mdp_core_clk = NULL;
- return ret;
- }
-
- return 0;
-}
-
-static int edp_clk_enable(struct edp_ctrl *ctrl, u32 clk_mask)
-{
- int ret;
-
- DBG("mask=%x", clk_mask);
- /* ahb_clk should be enabled first */
- if (clk_mask & EDP_CLK_MASK_AHB) {
- ret = clk_prepare_enable(ctrl->ahb_clk);
- if (ret) {
- pr_err("%s: Failed to enable ahb clk\n", __func__);
- goto f0;
- }
- }
- if (clk_mask & EDP_CLK_MASK_AUX) {
- ret = clk_set_rate(ctrl->aux_clk, 19200000);
- if (ret) {
- pr_err("%s: Failed to set rate aux clk\n", __func__);
- goto f1;
- }
- ret = clk_prepare_enable(ctrl->aux_clk);
- if (ret) {
- pr_err("%s: Failed to enable aux clk\n", __func__);
- goto f1;
- }
- }
- /* Need to set rate and enable link_clk prior to pixel_clk */
- if (clk_mask & EDP_CLK_MASK_LINK) {
- DBG("edp->link_clk, set_rate %ld",
- (unsigned long)ctrl->link_rate * 27000000);
- ret = clk_set_rate(ctrl->link_clk,
- (unsigned long)ctrl->link_rate * 27000000);
- if (ret) {
- pr_err("%s: Failed to set rate to link clk\n",
- __func__);
- goto f2;
- }
-
- ret = clk_prepare_enable(ctrl->link_clk);
- if (ret) {
- pr_err("%s: Failed to enable link clk\n", __func__);
- goto f2;
- }
- }
- if (clk_mask & EDP_CLK_MASK_PIXEL) {
- DBG("edp->pixel_clk, set_rate %ld",
- (unsigned long)ctrl->pixel_rate * 1000);
- ret = clk_set_rate(ctrl->pixel_clk,
- (unsigned long)ctrl->pixel_rate * 1000);
- if (ret) {
- pr_err("%s: Failed to set rate to pixel clk\n",
- __func__);
- goto f3;
- }
-
- ret = clk_prepare_enable(ctrl->pixel_clk);
- if (ret) {
- pr_err("%s: Failed to enable pixel clk\n", __func__);
- goto f3;
- }
- }
- if (clk_mask & EDP_CLK_MASK_MDP_CORE) {
- ret = clk_prepare_enable(ctrl->mdp_core_clk);
- if (ret) {
- pr_err("%s: Failed to enable mdp core clk\n", __func__);
- goto f4;
- }
- }
-
- return 0;
-
-f4:
- if (clk_mask & EDP_CLK_MASK_PIXEL)
- clk_disable_unprepare(ctrl->pixel_clk);
-f3:
- if (clk_mask & EDP_CLK_MASK_LINK)
- clk_disable_unprepare(ctrl->link_clk);
-f2:
- if (clk_mask & EDP_CLK_MASK_AUX)
- clk_disable_unprepare(ctrl->aux_clk);
-f1:
- if (clk_mask & EDP_CLK_MASK_AHB)
- clk_disable_unprepare(ctrl->ahb_clk);
-f0:
- return ret;
-}
-
-static void edp_clk_disable(struct edp_ctrl *ctrl, u32 clk_mask)
-{
- if (clk_mask & EDP_CLK_MASK_MDP_CORE)
- clk_disable_unprepare(ctrl->mdp_core_clk);
- if (clk_mask & EDP_CLK_MASK_PIXEL)
- clk_disable_unprepare(ctrl->pixel_clk);
- if (clk_mask & EDP_CLK_MASK_LINK)
- clk_disable_unprepare(ctrl->link_clk);
- if (clk_mask & EDP_CLK_MASK_AUX)
- clk_disable_unprepare(ctrl->aux_clk);
- if (clk_mask & EDP_CLK_MASK_AHB)
- clk_disable_unprepare(ctrl->ahb_clk);
-}
-
-static int edp_regulator_init(struct edp_ctrl *ctrl)
-{
- struct device *dev = &ctrl->pdev->dev;
- int ret;
-
- DBG("");
- ctrl->vdda_vreg = devm_regulator_get(dev, "vdda");
- ret = PTR_ERR_OR_ZERO(ctrl->vdda_vreg);
- if (ret) {
- pr_err("%s: Could not get vdda reg, ret = %d\n", __func__,
- ret);
- ctrl->vdda_vreg = NULL;
- return ret;
- }
- ctrl->lvl_vreg = devm_regulator_get(dev, "lvl-vdd");
- ret = PTR_ERR_OR_ZERO(ctrl->lvl_vreg);
- if (ret) {
- pr_err("%s: Could not get lvl-vdd reg, ret = %d\n", __func__,
- ret);
- ctrl->lvl_vreg = NULL;
- return ret;
- }
-
- return 0;
-}
-
-static int edp_regulator_enable(struct edp_ctrl *ctrl)
-{
- int ret;
-
- ret = regulator_set_load(ctrl->vdda_vreg, VDDA_UA_ON_LOAD);
- if (ret < 0) {
- pr_err("%s: vdda_vreg set regulator mode failed.\n", __func__);
- goto vdda_set_fail;
- }
-
- ret = regulator_enable(ctrl->vdda_vreg);
- if (ret) {
- pr_err("%s: Failed to enable vdda_vreg regulator.\n", __func__);
- goto vdda_enable_fail;
- }
-
- ret = regulator_enable(ctrl->lvl_vreg);
- if (ret) {
- pr_err("Failed to enable lvl-vdd reg regulator, %d", ret);
- goto lvl_enable_fail;
- }
-
- DBG("exit");
- return 0;
-
-lvl_enable_fail:
- regulator_disable(ctrl->vdda_vreg);
-vdda_enable_fail:
- regulator_set_load(ctrl->vdda_vreg, VDDA_UA_OFF_LOAD);
-vdda_set_fail:
- return ret;
-}
-
-static void edp_regulator_disable(struct edp_ctrl *ctrl)
-{
- regulator_disable(ctrl->lvl_vreg);
- regulator_disable(ctrl->vdda_vreg);
- regulator_set_load(ctrl->vdda_vreg, VDDA_UA_OFF_LOAD);
-}
-
-static int edp_gpio_config(struct edp_ctrl *ctrl)
-{
- struct device *dev = &ctrl->pdev->dev;
- int ret;
-
- ctrl->panel_hpd_gpio = devm_gpiod_get(dev, "panel-hpd", GPIOD_IN);
- if (IS_ERR(ctrl->panel_hpd_gpio)) {
- ret = PTR_ERR(ctrl->panel_hpd_gpio);
- ctrl->panel_hpd_gpio = NULL;
- pr_err("%s: cannot get panel-hpd-gpios, %d\n", __func__, ret);
- return ret;
- }
-
- ctrl->panel_en_gpio = devm_gpiod_get(dev, "panel-en", GPIOD_OUT_LOW);
- if (IS_ERR(ctrl->panel_en_gpio)) {
- ret = PTR_ERR(ctrl->panel_en_gpio);
- ctrl->panel_en_gpio = NULL;
- pr_err("%s: cannot get panel-en-gpios, %d\n", __func__, ret);
- return ret;
- }
-
- DBG("gpio on");
-
- return 0;
-}
-
-static void edp_ctrl_irq_enable(struct edp_ctrl *ctrl, int enable)
-{
- unsigned long flags;
-
- DBG("%d", enable);
- spin_lock_irqsave(&ctrl->irq_lock, flags);
- if (enable) {
- edp_write(ctrl->base + REG_EDP_INTERRUPT_REG_1, EDP_INTR_MASK1);
- edp_write(ctrl->base + REG_EDP_INTERRUPT_REG_2, EDP_INTR_MASK2);
- } else {
- edp_write(ctrl->base + REG_EDP_INTERRUPT_REG_1, 0x0);
- edp_write(ctrl->base + REG_EDP_INTERRUPT_REG_2, 0x0);
- }
- spin_unlock_irqrestore(&ctrl->irq_lock, flags);
- DBG("exit");
-}
-
-static void edp_fill_link_cfg(struct edp_ctrl *ctrl)
-{
- u32 prate;
- u32 lrate;
- u32 bpp;
- u8 max_lane = drm_dp_max_lane_count(ctrl->dpcd);
- u8 lane;
-
- prate = ctrl->pixel_rate;
- bpp = ctrl->color_depth * 3;
-
- /*
- * By default, use the maximum link rate and minimum lane count,
- * so that we can do rate down shift during link training.
- */
- ctrl->link_rate = ctrl->dpcd[DP_MAX_LINK_RATE];
-
- prate *= bpp;
- prate /= 8; /* in kByte */
-
- lrate = 270000; /* in kHz */
- lrate *= ctrl->link_rate;
- lrate /= 10; /* in kByte, 10 bits --> 8 bits */
-
- for (lane = 1; lane <= max_lane; lane <<= 1) {
- if (lrate >= prate)
- break;
- lrate <<= 1;
- }
-
- ctrl->lane_cnt = lane;
- DBG("rate=%d lane=%d", ctrl->link_rate, ctrl->lane_cnt);
-}
-
-static void edp_config_ctrl(struct edp_ctrl *ctrl)
-{
- u32 data;
- enum edp_color_depth depth;
-
- data = EDP_CONFIGURATION_CTRL_LANES(ctrl->lane_cnt - 1);
-
- if (drm_dp_enhanced_frame_cap(ctrl->dpcd))
- data |= EDP_CONFIGURATION_CTRL_ENHANCED_FRAMING;
-
- depth = EDP_6BIT;
- if (ctrl->color_depth == 8)
- depth = EDP_8BIT;
-
- data |= EDP_CONFIGURATION_CTRL_COLOR(depth);
-
- if (!ctrl->interlaced) /* progressive */
- data |= EDP_CONFIGURATION_CTRL_PROGRESSIVE;
-
- data |= (EDP_CONFIGURATION_CTRL_SYNC_CLK |
- EDP_CONFIGURATION_CTRL_STATIC_MVID);
-
- edp_write(ctrl->base + REG_EDP_CONFIGURATION_CTRL, data);
-}
-
-static void edp_state_ctrl(struct edp_ctrl *ctrl, u32 state)
-{
- edp_write(ctrl->base + REG_EDP_STATE_CTRL, state);
- /* Make sure H/W status is set */
- wmb();
-}
-
-static int edp_lane_set_write(struct edp_ctrl *ctrl,
- u8 voltage_level, u8 pre_emphasis_level)
-{
- int i;
- u8 buf[4];
-
- if (voltage_level >= DPCD_LINK_VOLTAGE_MAX)
- voltage_level |= 0x04;
-
- if (pre_emphasis_level >= DPCD_LINK_PRE_EMPHASIS_MAX)
- pre_emphasis_level |= 0x04;
-
- pre_emphasis_level <<= 3;
-
- for (i = 0; i < 4; i++)
- buf[i] = voltage_level | pre_emphasis_level;
-
- DBG("%s: p|v=0x%x", __func__, voltage_level | pre_emphasis_level);
- if (drm_dp_dpcd_write(ctrl->drm_aux, 0x103, buf, 4) < 4) {
- pr_err("%s: Set sw/pe to panel failed\n", __func__);
- return -ENOLINK;
- }
-
- return 0;
-}
-
-static int edp_train_pattern_set_write(struct edp_ctrl *ctrl, u8 pattern)
-{
- u8 p = pattern;
-
- DBG("pattern=%x", p);
- if (drm_dp_dpcd_write(ctrl->drm_aux,
- DP_TRAINING_PATTERN_SET, &p, 1) < 1) {
- pr_err("%s: Set training pattern to panel failed\n", __func__);
- return -ENOLINK;
- }
-
- return 0;
-}
-
-static void edp_sink_train_set_adjust(struct edp_ctrl *ctrl,
- const u8 *link_status)
-{
- int i;
- u8 max = 0;
- u8 data;
-
- /* use the max level across lanes */
- for (i = 0; i < ctrl->lane_cnt; i++) {
- data = drm_dp_get_adjust_request_voltage(link_status, i);
- DBG("lane=%d req_voltage_swing=0x%x", i, data);
- if (max < data)
- max = data;
- }
-
- ctrl->v_level = max >> DP_TRAIN_VOLTAGE_SWING_SHIFT;
-
- /* use the max level across lanes */
- max = 0;
- for (i = 0; i < ctrl->lane_cnt; i++) {
- data = drm_dp_get_adjust_request_pre_emphasis(link_status, i);
- DBG("lane=%d req_pre_emphasis=0x%x", i, data);
- if (max < data)
- max = data;
- }
-
- ctrl->p_level = max >> DP_TRAIN_PRE_EMPHASIS_SHIFT;
- DBG("v_level=%d, p_level=%d", ctrl->v_level, ctrl->p_level);
-}
-
-static void edp_host_train_set(struct edp_ctrl *ctrl, u32 train)
-{
- int cnt = 10;
- u32 data;
- u32 shift = train - 1;
-
- DBG("train=%d", train);
-
- edp_state_ctrl(ctrl, EDP_STATE_CTRL_TRAIN_PATTERN_1 << shift);
- while (--cnt) {
- data = edp_read(ctrl->base + REG_EDP_MAINLINK_READY);
- if (data & (EDP_MAINLINK_READY_TRAIN_PATTERN_1_READY << shift))
- break;
- }
-
- if (cnt == 0)
- pr_err("%s: set link_train=%d failed\n", __func__, train);
-}
-
-static const u8 vm_pre_emphasis[4][4] = {
- {0x03, 0x06, 0x09, 0x0C}, /* pe0, 0 db */
- {0x03, 0x06, 0x09, 0xFF}, /* pe1, 3.5 db */
- {0x03, 0x06, 0xFF, 0xFF}, /* pe2, 6.0 db */
- {0x03, 0xFF, 0xFF, 0xFF} /* pe3, 9.5 db */
-};
-
-/* voltage swing, 0.2v and 1.0v are not support */
-static const u8 vm_voltage_swing[4][4] = {
- {0x14, 0x18, 0x1A, 0x1E}, /* sw0, 0.4v */
- {0x18, 0x1A, 0x1E, 0xFF}, /* sw1, 0.6 v */
- {0x1A, 0x1E, 0xFF, 0xFF}, /* sw1, 0.8 v */
- {0x1E, 0xFF, 0xFF, 0xFF} /* sw1, 1.2 v, optional */
-};
-
-static int edp_voltage_pre_emphasise_set(struct edp_ctrl *ctrl)
-{
- u32 value0;
- u32 value1;
-
- DBG("v=%d p=%d", ctrl->v_level, ctrl->p_level);
-
- value0 = vm_pre_emphasis[(int)(ctrl->v_level)][(int)(ctrl->p_level)];
- value1 = vm_voltage_swing[(int)(ctrl->v_level)][(int)(ctrl->p_level)];
-
- /* Configure host and panel only if both values are allowed */
- if (value0 != 0xFF && value1 != 0xFF) {
- msm_edp_phy_vm_pe_cfg(ctrl->phy, value0, value1);
- return edp_lane_set_write(ctrl, ctrl->v_level, ctrl->p_level);
- }
-
- return -EINVAL;
-}
-
-static int edp_start_link_train_1(struct edp_ctrl *ctrl)
-{
- u8 link_status[DP_LINK_STATUS_SIZE];
- u8 old_v_level;
- int tries;
- int ret;
- int rlen;
-
- DBG("");
-
- edp_host_train_set(ctrl, DP_TRAINING_PATTERN_1);
- ret = edp_voltage_pre_emphasise_set(ctrl);
- if (ret)
- return ret;
- ret = edp_train_pattern_set_write(ctrl,
- DP_TRAINING_PATTERN_1 | DP_RECOVERED_CLOCK_OUT_EN);
- if (ret)
- return ret;
-
- tries = 0;
- old_v_level = ctrl->v_level;
- while (1) {
- drm_dp_link_train_clock_recovery_delay(ctrl->drm_aux, ctrl->dpcd);
-
- rlen = drm_dp_dpcd_read_link_status(ctrl->drm_aux, link_status);
- if (rlen < DP_LINK_STATUS_SIZE) {
- pr_err("%s: read link status failed\n", __func__);
- return -ENOLINK;
- }
- if (drm_dp_clock_recovery_ok(link_status, ctrl->lane_cnt)) {
- ret = 0;
- break;
- }
-
- if (ctrl->v_level == DPCD_LINK_VOLTAGE_MAX) {
- ret = -1;
- break;
- }
-
- if (old_v_level == ctrl->v_level) {
- tries++;
- if (tries >= 5) {
- ret = -1;
- break;
- }
- } else {
- tries = 0;
- old_v_level = ctrl->v_level;
- }
-
- edp_sink_train_set_adjust(ctrl, link_status);
- ret = edp_voltage_pre_emphasise_set(ctrl);
- if (ret)
- return ret;
- }
-
- return ret;
-}
-
-static int edp_start_link_train_2(struct edp_ctrl *ctrl)
-{
- u8 link_status[DP_LINK_STATUS_SIZE];
- int tries = 0;
- int ret;
- int rlen;
-
- DBG("");
-
- edp_host_train_set(ctrl, DP_TRAINING_PATTERN_2);
- ret = edp_voltage_pre_emphasise_set(ctrl);
- if (ret)
- return ret;
-
- ret = edp_train_pattern_set_write(ctrl,
- DP_TRAINING_PATTERN_2 | DP_RECOVERED_CLOCK_OUT_EN);
- if (ret)
- return ret;
-
- while (1) {
- drm_dp_link_train_channel_eq_delay(ctrl->drm_aux, ctrl->dpcd);
-
- rlen = drm_dp_dpcd_read_link_status(ctrl->drm_aux, link_status);
- if (rlen < DP_LINK_STATUS_SIZE) {
- pr_err("%s: read link status failed\n", __func__);
- return -ENOLINK;
- }
- if (drm_dp_channel_eq_ok(link_status, ctrl->lane_cnt)) {
- ret = 0;
- break;
- }
-
- tries++;
- if (tries > 10) {
- ret = -1;
- break;
- }
-
- edp_sink_train_set_adjust(ctrl, link_status);
- ret = edp_voltage_pre_emphasise_set(ctrl);
- if (ret)
- return ret;
- }
-
- return ret;
-}
-
-static int edp_link_rate_down_shift(struct edp_ctrl *ctrl)
-{
- u32 prate, lrate, bpp;
- u8 rate, lane, max_lane;
- int changed = 0;
-
- rate = ctrl->link_rate;
- lane = ctrl->lane_cnt;
- max_lane = drm_dp_max_lane_count(ctrl->dpcd);
-
- bpp = ctrl->color_depth * 3;
- prate = ctrl->pixel_rate;
- prate *= bpp;
- prate /= 8; /* in kByte */
-
- if (rate > DP_LINK_BW_1_62 && rate <= EDP_LINK_BW_MAX) {
- rate -= 4; /* reduce rate */
- changed++;
- }
-
- if (changed) {
- if (lane >= 1 && lane < max_lane)
- lane <<= 1; /* increase lane */
-
- lrate = 270000; /* in kHz */
- lrate *= rate;
- lrate /= 10; /* kByte, 10 bits --> 8 bits */
- lrate *= lane;
-
- DBG("new lrate=%u prate=%u(kHz) rate=%d lane=%d p=%u b=%d",
- lrate, prate, rate, lane,
- ctrl->pixel_rate,
- bpp);
-
- if (lrate > prate) {
- ctrl->link_rate = rate;
- ctrl->lane_cnt = lane;
- DBG("new rate=%d %d", rate, lane);
- return 0;
- }
- }
-
- return -EINVAL;
-}
-
-static int edp_clear_training_pattern(struct edp_ctrl *ctrl)
-{
- int ret;
-
- ret = edp_train_pattern_set_write(ctrl, 0);
-
- drm_dp_link_train_channel_eq_delay(ctrl->drm_aux, ctrl->dpcd);
-
- return ret;
-}
-
-static int edp_do_link_train(struct edp_ctrl *ctrl)
-{
- u8 values[2];
- int ret;
-
- DBG("");
- /*
- * Set the current link rate and lane cnt to panel. They may have been
- * adjusted and the values are different from them in DPCD CAP
- */
- values[0] = ctrl->lane_cnt;
- values[1] = ctrl->link_rate;
-
- if (drm_dp_enhanced_frame_cap(ctrl->dpcd))
- values[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
-
- if (drm_dp_dpcd_write(ctrl->drm_aux, DP_LINK_BW_SET, values,
- sizeof(values)) < 0)
- return EDP_TRAIN_FAIL;
-
- ctrl->v_level = 0; /* start from default level */
- ctrl->p_level = 0;
-
- edp_state_ctrl(ctrl, 0);
- if (edp_clear_training_pattern(ctrl))
- return EDP_TRAIN_FAIL;
-
- ret = edp_start_link_train_1(ctrl);
- if (ret < 0) {
- if (edp_link_rate_down_shift(ctrl) == 0) {
- DBG("link reconfig");
- ret = EDP_TRAIN_RECONFIG;
- goto clear;
- } else {
- pr_err("%s: Training 1 failed", __func__);
- ret = EDP_TRAIN_FAIL;
- goto clear;
- }
- }
- DBG("Training 1 completed successfully");
-
- edp_state_ctrl(ctrl, 0);
- if (edp_clear_training_pattern(ctrl))
- return EDP_TRAIN_FAIL;
-
- ret = edp_start_link_train_2(ctrl);
- if (ret < 0) {
- if (edp_link_rate_down_shift(ctrl) == 0) {
- DBG("link reconfig");
- ret = EDP_TRAIN_RECONFIG;
- goto clear;
- } else {
- pr_err("%s: Training 2 failed", __func__);
- ret = EDP_TRAIN_FAIL;
- goto clear;
- }
- }
- DBG("Training 2 completed successfully");
-
- edp_state_ctrl(ctrl, EDP_STATE_CTRL_SEND_VIDEO);
-clear:
- edp_clear_training_pattern(ctrl);
-
- return ret;
-}
-
-static void edp_clock_synchrous(struct edp_ctrl *ctrl, int sync)
-{
- u32 data;
- enum edp_color_depth depth;
-
- data = edp_read(ctrl->base + REG_EDP_MISC1_MISC0);
-
- if (sync)
- data |= EDP_MISC1_MISC0_SYNC;
- else
- data &= ~EDP_MISC1_MISC0_SYNC;
-
- /* only legacy rgb mode supported */
- depth = EDP_6BIT; /* Default */
- if (ctrl->color_depth == 8)
- depth = EDP_8BIT;
- else if (ctrl->color_depth == 10)
- depth = EDP_10BIT;
- else if (ctrl->color_depth == 12)
- depth = EDP_12BIT;
- else if (ctrl->color_depth == 16)
- depth = EDP_16BIT;
-
- data |= EDP_MISC1_MISC0_COLOR(depth);
-
- edp_write(ctrl->base + REG_EDP_MISC1_MISC0, data);
-}
-
-static int edp_sw_mvid_nvid(struct edp_ctrl *ctrl, u32 m, u32 n)
-{
- u32 n_multi, m_multi = 5;
-
- if (ctrl->link_rate == DP_LINK_BW_1_62) {
- n_multi = 1;
- } else if (ctrl->link_rate == DP_LINK_BW_2_7) {
- n_multi = 2;
- } else {
- pr_err("%s: Invalid link rate, %d\n", __func__,
- ctrl->link_rate);
- return -EINVAL;
- }
-
- edp_write(ctrl->base + REG_EDP_SOFTWARE_MVID, m * m_multi);
- edp_write(ctrl->base + REG_EDP_SOFTWARE_NVID, n * n_multi);
-
- return 0;
-}
-
-static void edp_mainlink_ctrl(struct edp_ctrl *ctrl, int enable)
-{
- u32 data = 0;
-
- edp_write(ctrl->base + REG_EDP_MAINLINK_CTRL, EDP_MAINLINK_CTRL_RESET);
- /* Make sure fully reset */
- wmb();
- usleep_range(500, 1000);
-
- if (enable)
- data |= EDP_MAINLINK_CTRL_ENABLE;
-
- edp_write(ctrl->base + REG_EDP_MAINLINK_CTRL, data);
-}
-
-static void edp_ctrl_phy_aux_enable(struct edp_ctrl *ctrl, int enable)
-{
- if (enable) {
- edp_regulator_enable(ctrl);
- edp_clk_enable(ctrl, EDP_CLK_MASK_AUX_CHAN);
- msm_edp_phy_ctrl(ctrl->phy, 1);
- msm_edp_aux_ctrl(ctrl->aux, 1);
- gpiod_set_value(ctrl->panel_en_gpio, 1);
- } else {
- gpiod_set_value(ctrl->panel_en_gpio, 0);
- msm_edp_aux_ctrl(ctrl->aux, 0);
- msm_edp_phy_ctrl(ctrl->phy, 0);
- edp_clk_disable(ctrl, EDP_CLK_MASK_AUX_CHAN);
- edp_regulator_disable(ctrl);
- }
-}
-
-static void edp_ctrl_link_enable(struct edp_ctrl *ctrl, int enable)
-{
- u32 m, n;
-
- if (enable) {
- /* Enable link channel clocks */
- edp_clk_enable(ctrl, EDP_CLK_MASK_LINK_CHAN);
-
- msm_edp_phy_lane_power_ctrl(ctrl->phy, true, ctrl->lane_cnt);
-
- msm_edp_phy_vm_pe_init(ctrl->phy);
-
- /* Make sure phy is programed */
- wmb();
- msm_edp_phy_ready(ctrl->phy);
-
- edp_config_ctrl(ctrl);
- msm_edp_ctrl_pixel_clock_valid(ctrl, ctrl->pixel_rate, &m, &n);
- edp_sw_mvid_nvid(ctrl, m, n);
- edp_mainlink_ctrl(ctrl, 1);
- } else {
- edp_mainlink_ctrl(ctrl, 0);
-
- msm_edp_phy_lane_power_ctrl(ctrl->phy, false, 0);
- edp_clk_disable(ctrl, EDP_CLK_MASK_LINK_CHAN);
- }
-}
-
-static int edp_ctrl_training(struct edp_ctrl *ctrl)
-{
- int ret;
-
- /* Do link training only when power is on */
- if (!ctrl->power_on)
- return -EINVAL;
-
-train_start:
- ret = edp_do_link_train(ctrl);
- if (ret == EDP_TRAIN_RECONFIG) {
- /* Re-configure main link */
- edp_ctrl_irq_enable(ctrl, 0);
- edp_ctrl_link_enable(ctrl, 0);
- msm_edp_phy_ctrl(ctrl->phy, 0);
-
- /* Make sure link is fully disabled */
- wmb();
- usleep_range(500, 1000);
-
- msm_edp_phy_ctrl(ctrl->phy, 1);
- edp_ctrl_link_enable(ctrl, 1);
- edp_ctrl_irq_enable(ctrl, 1);
- goto train_start;
- }
-
- return ret;
-}
-
-static void edp_ctrl_on_worker(struct work_struct *work)
-{
- struct edp_ctrl *ctrl = container_of(
- work, struct edp_ctrl, on_work);
- u8 value;
- int ret;
-
- mutex_lock(&ctrl->dev_mutex);
-
- if (ctrl->power_on) {
- DBG("already on");
- goto unlock_ret;
- }
-
- edp_ctrl_phy_aux_enable(ctrl, 1);
- edp_ctrl_link_enable(ctrl, 1);
-
- edp_ctrl_irq_enable(ctrl, 1);
-
- /* DP_SET_POWER register is only available on DPCD v1.1 and later */
- if (ctrl->dpcd[DP_DPCD_REV] >= 0x11) {
- ret = drm_dp_dpcd_readb(ctrl->drm_aux, DP_SET_POWER, &value);
- if (ret < 0)
- goto fail;
-
- value &= ~DP_SET_POWER_MASK;
- value |= DP_SET_POWER_D0;
-
- ret = drm_dp_dpcd_writeb(ctrl->drm_aux, DP_SET_POWER, value);
- if (ret < 0)
- goto fail;
-
- /*
- * According to the DP 1.1 specification, a "Sink Device must
- * exit the power saving state within 1 ms" (Section 2.5.3.1,
- * Table 5-52, "Sink Control Field" (register 0x600).
- */
- usleep_range(1000, 2000);
- }
-
- ctrl->power_on = true;
-
- /* Start link training */
- ret = edp_ctrl_training(ctrl);
- if (ret != EDP_TRAIN_SUCCESS)
- goto fail;
-
- DBG("DONE");
- goto unlock_ret;
-
-fail:
- edp_ctrl_irq_enable(ctrl, 0);
- edp_ctrl_link_enable(ctrl, 0);
- edp_ctrl_phy_aux_enable(ctrl, 0);
- ctrl->power_on = false;
-unlock_ret:
- mutex_unlock(&ctrl->dev_mutex);
-}
-
-static void edp_ctrl_off_worker(struct work_struct *work)
-{
- struct edp_ctrl *ctrl = container_of(
- work, struct edp_ctrl, off_work);
- unsigned long time_left;
-
- mutex_lock(&ctrl->dev_mutex);
-
- if (!ctrl->power_on) {
- DBG("already off");
- goto unlock_ret;
- }
-
- reinit_completion(&ctrl->idle_comp);
- edp_state_ctrl(ctrl, EDP_STATE_CTRL_PUSH_IDLE);
-
- time_left = wait_for_completion_timeout(&ctrl->idle_comp,
- msecs_to_jiffies(500));
- if (!time_left)
- DBG("%s: idle pattern timedout\n", __func__);
-
- edp_state_ctrl(ctrl, 0);
-
- /* DP_SET_POWER register is only available on DPCD v1.1 and later */
- if (ctrl->dpcd[DP_DPCD_REV] >= 0x11) {
- u8 value;
- int ret;
-
- ret = drm_dp_dpcd_readb(ctrl->drm_aux, DP_SET_POWER, &value);
- if (ret > 0) {
- value &= ~DP_SET_POWER_MASK;
- value |= DP_SET_POWER_D3;
-
- drm_dp_dpcd_writeb(ctrl->drm_aux, DP_SET_POWER, value);
- }
- }
-
- edp_ctrl_irq_enable(ctrl, 0);
-
- edp_ctrl_link_enable(ctrl, 0);
-
- edp_ctrl_phy_aux_enable(ctrl, 0);
-
- ctrl->power_on = false;
-
-unlock_ret:
- mutex_unlock(&ctrl->dev_mutex);
-}
-
-irqreturn_t msm_edp_ctrl_irq(struct edp_ctrl *ctrl)
-{
- u32 isr1, isr2, mask1, mask2;
- u32 ack;
-
- DBG("");
- spin_lock(&ctrl->irq_lock);
- isr1 = edp_read(ctrl->base + REG_EDP_INTERRUPT_REG_1);
- isr2 = edp_read(ctrl->base + REG_EDP_INTERRUPT_REG_2);
-
- mask1 = isr1 & EDP_INTR_MASK1;
- mask2 = isr2 & EDP_INTR_MASK2;
-
- isr1 &= ~mask1; /* remove masks bit */
- isr2 &= ~mask2;
-
- DBG("isr=%x mask=%x isr2=%x mask2=%x",
- isr1, mask1, isr2, mask2);
-
- ack = isr1 & EDP_INTR_STATUS1;
- ack <<= 1; /* ack bits */
- ack |= mask1;
- edp_write(ctrl->base + REG_EDP_INTERRUPT_REG_1, ack);
-
- ack = isr2 & EDP_INTR_STATUS2;
- ack <<= 1; /* ack bits */
- ack |= mask2;
- edp_write(ctrl->base + REG_EDP_INTERRUPT_REG_2, ack);
- spin_unlock(&ctrl->irq_lock);
-
- if (isr1 & EDP_INTERRUPT_REG_1_HPD)
- DBG("edp_hpd");
-
- if (isr2 & EDP_INTERRUPT_REG_2_READY_FOR_VIDEO)
- DBG("edp_video_ready");
-
- if (isr2 & EDP_INTERRUPT_REG_2_IDLE_PATTERNs_SENT) {
- DBG("idle_patterns_sent");
- complete(&ctrl->idle_comp);
- }
-
- msm_edp_aux_irq(ctrl->aux, isr1);
-
- return IRQ_HANDLED;
-}
-
-void msm_edp_ctrl_power(struct edp_ctrl *ctrl, bool on)
-{
- if (on)
- queue_work(ctrl->workqueue, &ctrl->on_work);
- else
- queue_work(ctrl->workqueue, &ctrl->off_work);
-}
-
-int msm_edp_ctrl_init(struct msm_edp *edp)
-{
- struct edp_ctrl *ctrl = NULL;
- struct device *dev;
- int ret;
-
- if (!edp) {
- pr_err("%s: edp is NULL!\n", __func__);
- return -EINVAL;
- }
-
- dev = &edp->pdev->dev;
- ctrl = devm_kzalloc(dev, sizeof(*ctrl), GFP_KERNEL);
- if (!ctrl)
- return -ENOMEM;
-
- edp->ctrl = ctrl;
- ctrl->pdev = edp->pdev;
-
- ctrl->base = msm_ioremap(ctrl->pdev, "edp", "eDP");
- if (IS_ERR(ctrl->base))
- return PTR_ERR(ctrl->base);
-
- /* Get regulator, clock, gpio, pwm */
- ret = edp_regulator_init(ctrl);
- if (ret) {
- pr_err("%s:regulator init fail\n", __func__);
- return ret;
- }
- ret = edp_clk_init(ctrl);
- if (ret) {
- pr_err("%s:clk init fail\n", __func__);
- return ret;
- }
- ret = edp_gpio_config(ctrl);
- if (ret) {
- pr_err("%s:failed to configure GPIOs: %d", __func__, ret);
- return ret;
- }
-
- /* Init aux and phy */
- ctrl->aux = msm_edp_aux_init(edp, ctrl->base, &ctrl->drm_aux);
- if (!ctrl->aux || !ctrl->drm_aux) {
- pr_err("%s:failed to init aux\n", __func__);
- return -ENOMEM;
- }
-
- ctrl->phy = msm_edp_phy_init(dev, ctrl->base);
- if (!ctrl->phy) {
- pr_err("%s:failed to init phy\n", __func__);
- ret = -ENOMEM;
- goto err_destory_aux;
- }
-
- spin_lock_init(&ctrl->irq_lock);
- mutex_init(&ctrl->dev_mutex);
- init_completion(&ctrl->idle_comp);
-
- /* setup workqueue */
- ctrl->workqueue = alloc_ordered_workqueue("edp_drm_work", 0);
- INIT_WORK(&ctrl->on_work, edp_ctrl_on_worker);
- INIT_WORK(&ctrl->off_work, edp_ctrl_off_worker);
-
- return 0;
-
-err_destory_aux:
- msm_edp_aux_destroy(dev, ctrl->aux);
- ctrl->aux = NULL;
- return ret;
-}
-
-void msm_edp_ctrl_destroy(struct edp_ctrl *ctrl)
-{
- if (!ctrl)
- return;
-
- if (ctrl->workqueue) {
- destroy_workqueue(ctrl->workqueue);
- ctrl->workqueue = NULL;
- }
-
- if (ctrl->aux) {
- msm_edp_aux_destroy(&ctrl->pdev->dev, ctrl->aux);
- ctrl->aux = NULL;
- }
-
- kfree(ctrl->edid);
- ctrl->edid = NULL;
-
- mutex_destroy(&ctrl->dev_mutex);
-}
-
-bool msm_edp_ctrl_panel_connected(struct edp_ctrl *ctrl)
-{
- mutex_lock(&ctrl->dev_mutex);
- DBG("connect status = %d", ctrl->edp_connected);
- if (ctrl->edp_connected) {
- mutex_unlock(&ctrl->dev_mutex);
- return true;
- }
-
- if (!ctrl->power_on) {
- edp_ctrl_phy_aux_enable(ctrl, 1);
- edp_ctrl_irq_enable(ctrl, 1);
- }
-
- if (drm_dp_dpcd_read(ctrl->drm_aux, DP_DPCD_REV, ctrl->dpcd,
- DP_RECEIVER_CAP_SIZE) < DP_RECEIVER_CAP_SIZE) {
- pr_err("%s: AUX channel is NOT ready\n", __func__);
- memset(ctrl->dpcd, 0, DP_RECEIVER_CAP_SIZE);
- } else {
- ctrl->edp_connected = true;
- }
-
- if (!ctrl->power_on) {
- edp_ctrl_irq_enable(ctrl, 0);
- edp_ctrl_phy_aux_enable(ctrl, 0);
- }
-
- DBG("exit: connect status=%d", ctrl->edp_connected);
-
- mutex_unlock(&ctrl->dev_mutex);
-
- return ctrl->edp_connected;
-}
-
-int msm_edp_ctrl_get_panel_info(struct edp_ctrl *ctrl,
- struct drm_connector *connector, struct edid **edid)
-{
- mutex_lock(&ctrl->dev_mutex);
-
- if (ctrl->edid) {
- if (edid) {
- DBG("Just return edid buffer");
- *edid = ctrl->edid;
- }
- goto unlock_ret;
- }
-
- if (!ctrl->power_on) {
- edp_ctrl_phy_aux_enable(ctrl, 1);
- edp_ctrl_irq_enable(ctrl, 1);
- }
-
- /* Initialize link rate as panel max link rate */
- ctrl->link_rate = ctrl->dpcd[DP_MAX_LINK_RATE];
-
- ctrl->edid = drm_get_edid(connector, &ctrl->drm_aux->ddc);
- if (!ctrl->edid) {
- pr_err("%s: edid read fail\n", __func__);
- goto disable_ret;
- }
-
- if (edid)
- *edid = ctrl->edid;
-
-disable_ret:
- if (!ctrl->power_on) {
- edp_ctrl_irq_enable(ctrl, 0);
- edp_ctrl_phy_aux_enable(ctrl, 0);
- }
-unlock_ret:
- mutex_unlock(&ctrl->dev_mutex);
- return 0;
-}
-
-int msm_edp_ctrl_timing_cfg(struct edp_ctrl *ctrl,
- const struct drm_display_mode *mode,
- const struct drm_display_info *info)
-{
- u32 hstart_from_sync, vstart_from_sync;
- u32 data;
- int ret = 0;
-
- mutex_lock(&ctrl->dev_mutex);
- /*
- * Need to keep color depth, pixel rate and
- * interlaced information in ctrl context
- */
- ctrl->color_depth = info->bpc;
- ctrl->pixel_rate = mode->clock;
- ctrl->interlaced = !!(mode->flags & DRM_MODE_FLAG_INTERLACE);
-
- /* Fill initial link config based on passed in timing */
- edp_fill_link_cfg(ctrl);
-
- if (edp_clk_enable(ctrl, EDP_CLK_MASK_AHB)) {
- pr_err("%s, fail to prepare enable ahb clk\n", __func__);
- ret = -EINVAL;
- goto unlock_ret;
- }
- edp_clock_synchrous(ctrl, 1);
-
- /* Configure eDP timing to HW */
- edp_write(ctrl->base + REG_EDP_TOTAL_HOR_VER,
- EDP_TOTAL_HOR_VER_HORIZ(mode->htotal) |
- EDP_TOTAL_HOR_VER_VERT(mode->vtotal));
-
- vstart_from_sync = mode->vtotal - mode->vsync_start;
- hstart_from_sync = mode->htotal - mode->hsync_start;
- edp_write(ctrl->base + REG_EDP_START_HOR_VER_FROM_SYNC,
- EDP_START_HOR_VER_FROM_SYNC_HORIZ(hstart_from_sync) |
- EDP_START_HOR_VER_FROM_SYNC_VERT(vstart_from_sync));
-
- data = EDP_HSYNC_VSYNC_WIDTH_POLARITY_VERT(
- mode->vsync_end - mode->vsync_start);
- data |= EDP_HSYNC_VSYNC_WIDTH_POLARITY_HORIZ(
- mode->hsync_end - mode->hsync_start);
- if (mode->flags & DRM_MODE_FLAG_NVSYNC)
- data |= EDP_HSYNC_VSYNC_WIDTH_POLARITY_NVSYNC;
- if (mode->flags & DRM_MODE_FLAG_NHSYNC)
- data |= EDP_HSYNC_VSYNC_WIDTH_POLARITY_NHSYNC;
- edp_write(ctrl->base + REG_EDP_HSYNC_VSYNC_WIDTH_POLARITY, data);
-
- edp_write(ctrl->base + REG_EDP_ACTIVE_HOR_VER,
- EDP_ACTIVE_HOR_VER_HORIZ(mode->hdisplay) |
- EDP_ACTIVE_HOR_VER_VERT(mode->vdisplay));
-
- edp_clk_disable(ctrl, EDP_CLK_MASK_AHB);
-
-unlock_ret:
- mutex_unlock(&ctrl->dev_mutex);
- return ret;
-}
-
-bool msm_edp_ctrl_pixel_clock_valid(struct edp_ctrl *ctrl,
- u32 pixel_rate, u32 *pm, u32 *pn)
-{
- const struct edp_pixel_clk_div *divs;
- u32 err = 1; /* 1% error tolerance */
- u32 clk_err;
- int i;
-
- if (ctrl->link_rate == DP_LINK_BW_1_62) {
- divs = clk_divs[0];
- } else if (ctrl->link_rate == DP_LINK_BW_2_7) {
- divs = clk_divs[1];
- } else {
- pr_err("%s: Invalid link rate,%d\n", __func__, ctrl->link_rate);
- return false;
- }
-
- for (i = 0; i < EDP_PIXEL_CLK_NUM; i++) {
- clk_err = abs(divs[i].rate - pixel_rate);
- if ((divs[i].rate * err / 100) >= clk_err) {
- if (pm)
- *pm = divs[i].m;
- if (pn)
- *pn = divs[i].n;
- return true;
- }
- }
-
- DBG("pixel clock %d(kHz) not supported", pixel_rate);
-
- return false;
-}
-
diff --git a/drivers/gpu/drm/msm/edp/edp_phy.c b/drivers/gpu/drm/msm/edp/edp_phy.c
deleted file mode 100644
index fcaf7b7ecdd2..000000000000
--- a/drivers/gpu/drm/msm/edp/edp_phy.c
+++ /dev/null
@@ -1,98 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (c) 2014-2015, The Linux Foundation. All rights reserved.
- */
-
-#include "edp.h"
-#include "edp.xml.h"
-
-#define EDP_MAX_LANE 4
-
-struct edp_phy {
- void __iomem *base;
-};
-
-bool msm_edp_phy_ready(struct edp_phy *phy)
-{
- u32 status;
- int cnt = 100;
-
- while (--cnt) {
- status = edp_read(phy->base +
- REG_EDP_PHY_GLB_PHY_STATUS);
- if (status & 0x01)
- break;
- usleep_range(500, 1000);
- }
-
- if (cnt == 0) {
- pr_err("%s: PHY NOT ready\n", __func__);
- return false;
- } else {
- return true;
- }
-}
-
-void msm_edp_phy_ctrl(struct edp_phy *phy, int enable)
-{
- DBG("enable=%d", enable);
- if (enable) {
- /* Reset */
- edp_write(phy->base + REG_EDP_PHY_CTRL,
- EDP_PHY_CTRL_SW_RESET | EDP_PHY_CTRL_SW_RESET_PLL);
- /* Make sure fully reset */
- wmb();
- usleep_range(500, 1000);
- edp_write(phy->base + REG_EDP_PHY_CTRL, 0x000);
- edp_write(phy->base + REG_EDP_PHY_GLB_PD_CTL, 0x3f);
- edp_write(phy->base + REG_EDP_PHY_GLB_CFG, 0x1);
- } else {
- edp_write(phy->base + REG_EDP_PHY_GLB_PD_CTL, 0xc0);
- }
-}
-
-/* voltage mode and pre emphasis cfg */
-void msm_edp_phy_vm_pe_init(struct edp_phy *phy)
-{
- edp_write(phy->base + REG_EDP_PHY_GLB_VM_CFG0, 0x3);
- edp_write(phy->base + REG_EDP_PHY_GLB_VM_CFG1, 0x64);
- edp_write(phy->base + REG_EDP_PHY_GLB_MISC9, 0x6c);
-}
-
-void msm_edp_phy_vm_pe_cfg(struct edp_phy *phy, u32 v0, u32 v1)
-{
- edp_write(phy->base + REG_EDP_PHY_GLB_VM_CFG0, v0);
- edp_write(phy->base + REG_EDP_PHY_GLB_VM_CFG1, v1);
-}
-
-void msm_edp_phy_lane_power_ctrl(struct edp_phy *phy, bool up, u32 max_lane)
-{
- u32 i;
- u32 data;
-
- if (up)
- data = 0; /* power up */
- else
- data = 0x7; /* power down */
-
- for (i = 0; i < max_lane; i++)
- edp_write(phy->base + REG_EDP_PHY_LN_PD_CTL(i) , data);
-
- /* power down unused lane */
- data = 0x7; /* power down */
- for (i = max_lane; i < EDP_MAX_LANE; i++)
- edp_write(phy->base + REG_EDP_PHY_LN_PD_CTL(i) , data);
-}
-
-void *msm_edp_phy_init(struct device *dev, void __iomem *regbase)
-{
- struct edp_phy *phy = NULL;
-
- phy = devm_kzalloc(dev, sizeof(*phy), GFP_KERNEL);
- if (!phy)
- return NULL;
-
- phy->base = regbase;
- return phy;
-}
-
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi.c b/drivers/gpu/drm/msm/hdmi/hdmi.c
index 75b64e6ae035..3acdeae25caf 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi.c
@@ -8,6 +8,8 @@
#include <linux/of_irq.h>
#include <linux/of_gpio.h>
+#include <drm/drm_bridge_connector.h>
+
#include <sound/hdmi-codec.h>
#include "hdmi.h"
@@ -41,7 +43,7 @@ static irqreturn_t msm_hdmi_irq(int irq, void *dev_id)
struct hdmi *hdmi = dev_id;
/* Process HPD: */
- msm_hdmi_connector_irq(hdmi->connector);
+ msm_hdmi_hpd_irq(hdmi->bridge);
/* Process DDC: */
msm_hdmi_i2c_irq(hdmi->i2c);
@@ -281,7 +283,7 @@ int msm_hdmi_modeset_init(struct hdmi *hdmi,
goto fail;
}
- hdmi->connector = msm_hdmi_connector_init(hdmi);
+ hdmi->connector = drm_bridge_connector_init(hdmi->dev, encoder);
if (IS_ERR(hdmi->connector)) {
ret = PTR_ERR(hdmi->connector);
DRM_DEV_ERROR(dev->dev, "failed to create HDMI connector: %d\n", ret);
@@ -289,6 +291,8 @@ int msm_hdmi_modeset_init(struct hdmi *hdmi,
goto fail;
}
+ drm_connector_attach_encoder(hdmi->connector, hdmi->encoder);
+
hdmi->irq = irq_of_parse_and_map(pdev->dev.of_node, 0);
if (hdmi->irq < 0) {
ret = hdmi->irq;
@@ -305,7 +309,9 @@ int msm_hdmi_modeset_init(struct hdmi *hdmi,
goto fail;
}
- ret = msm_hdmi_hpd_enable(hdmi->connector);
+ drm_bridge_connector_enable_hpd(hdmi->connector);
+
+ ret = msm_hdmi_hpd_enable(hdmi->bridge);
if (ret < 0) {
DRM_DEV_ERROR(&hdmi->pdev->dev, "failed to enable HPD: %d\n", ret);
goto fail;
@@ -514,8 +520,7 @@ static int msm_hdmi_register_audio_driver(struct hdmi *hdmi, struct device *dev)
static int msm_hdmi_bind(struct device *dev, struct device *master, void *data)
{
- struct drm_device *drm = dev_get_drvdata(master);
- struct msm_drm_private *priv = drm->dev_private;
+ struct msm_drm_private *priv = dev_get_drvdata(master);
struct hdmi_platform_config *hdmi_cfg;
struct hdmi *hdmi;
struct device_node *of_node = dev->of_node;
@@ -586,8 +591,8 @@ static int msm_hdmi_bind(struct device *dev, struct device *master, void *data)
static void msm_hdmi_unbind(struct device *dev, struct device *master,
void *data)
{
- struct drm_device *drm = dev_get_drvdata(master);
- struct msm_drm_private *priv = drm->dev_private;
+ struct msm_drm_private *priv = dev_get_drvdata(master);
+
if (priv->hdmi) {
if (priv->hdmi->audio_pdev)
platform_device_unregister(priv->hdmi->audio_pdev);
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi.h b/drivers/gpu/drm/msm/hdmi/hdmi.h
index 82261078c6b1..736f348befb3 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi.h
+++ b/drivers/gpu/drm/msm/hdmi/hdmi.h
@@ -114,6 +114,13 @@ struct hdmi_platform_config {
struct hdmi_gpio_data gpios[HDMI_MAX_NUM_GPIO];
};
+struct hdmi_bridge {
+ struct drm_bridge base;
+ struct hdmi *hdmi;
+ struct work_struct hpd_work;
+};
+#define to_hdmi_bridge(x) container_of(x, struct hdmi_bridge, base)
+
void msm_hdmi_set_mode(struct hdmi *hdmi, bool power_on);
static inline void hdmi_write(struct hdmi *hdmi, u32 reg, u32 data)
@@ -230,13 +237,11 @@ void msm_hdmi_audio_set_sample_rate(struct hdmi *hdmi, int rate);
struct drm_bridge *msm_hdmi_bridge_init(struct hdmi *hdmi);
void msm_hdmi_bridge_destroy(struct drm_bridge *bridge);
-/*
- * hdmi connector:
- */
-
-void msm_hdmi_connector_irq(struct drm_connector *connector);
-struct drm_connector *msm_hdmi_connector_init(struct hdmi *hdmi);
-int msm_hdmi_hpd_enable(struct drm_connector *connector);
+void msm_hdmi_hpd_irq(struct drm_bridge *bridge);
+enum drm_connector_status msm_hdmi_bridge_detect(
+ struct drm_bridge *bridge);
+int msm_hdmi_hpd_enable(struct drm_bridge *bridge);
+void msm_hdmi_hpd_disable(struct hdmi_bridge *hdmi_bridge);
/*
* i2c adapter for ddc:
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_bridge.c b/drivers/gpu/drm/msm/hdmi/hdmi_bridge.c
index f04eb4a70f0d..68fba4bf7212 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi_bridge.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_bridge.c
@@ -5,17 +5,16 @@
*/
#include <linux/delay.h>
+#include <drm/drm_bridge_connector.h>
+#include "msm_kms.h"
#include "hdmi.h"
-struct hdmi_bridge {
- struct drm_bridge base;
- struct hdmi *hdmi;
-};
-#define to_hdmi_bridge(x) container_of(x, struct hdmi_bridge, base)
-
void msm_hdmi_bridge_destroy(struct drm_bridge *bridge)
{
+ struct hdmi_bridge *hdmi_bridge = to_hdmi_bridge(bridge);
+
+ msm_hdmi_hpd_disable(hdmi_bridge);
}
static void msm_hdmi_power_on(struct drm_bridge *bridge)
@@ -70,7 +69,7 @@ static void power_off(struct drm_bridge *bridge)
if (ret)
DRM_DEV_ERROR(dev->dev, "failed to disable pwr regulator: %d\n", ret);
- pm_runtime_put_autosuspend(&hdmi->pdev->dev);
+ pm_runtime_put(&hdmi->pdev->dev);
}
#define AVI_IFRAME_LINE_NUMBER 1
@@ -251,14 +250,76 @@ static void msm_hdmi_bridge_mode_set(struct drm_bridge *bridge,
msm_hdmi_audio_update(hdmi);
}
+static struct edid *msm_hdmi_bridge_get_edid(struct drm_bridge *bridge,
+ struct drm_connector *connector)
+{
+ struct hdmi_bridge *hdmi_bridge = to_hdmi_bridge(bridge);
+ struct hdmi *hdmi = hdmi_bridge->hdmi;
+ struct edid *edid;
+ uint32_t hdmi_ctrl;
+
+ hdmi_ctrl = hdmi_read(hdmi, REG_HDMI_CTRL);
+ hdmi_write(hdmi, REG_HDMI_CTRL, hdmi_ctrl | HDMI_CTRL_ENABLE);
+
+ edid = drm_get_edid(connector, hdmi->i2c);
+
+ hdmi_write(hdmi, REG_HDMI_CTRL, hdmi_ctrl);
+
+ hdmi->hdmi_mode = drm_detect_hdmi_monitor(edid);
+
+ return edid;
+}
+
+static enum drm_mode_status msm_hdmi_bridge_mode_valid(struct drm_bridge *bridge,
+ const struct drm_display_info *info,
+ const struct drm_display_mode *mode)
+{
+ struct hdmi_bridge *hdmi_bridge = to_hdmi_bridge(bridge);
+ struct hdmi *hdmi = hdmi_bridge->hdmi;
+ const struct hdmi_platform_config *config = hdmi->config;
+ struct msm_drm_private *priv = bridge->dev->dev_private;
+ struct msm_kms *kms = priv->kms;
+ long actual, requested;
+
+ requested = 1000 * mode->clock;
+ actual = kms->funcs->round_pixclk(kms,
+ requested, hdmi_bridge->hdmi->encoder);
+
+ /* for mdp5/apq8074, we manage our own pixel clk (as opposed to
+ * mdp4/dtv stuff where pixel clk is assigned to mdp/encoder
+ * instead):
+ */
+ if (config->pwr_clk_cnt > 0)
+ actual = clk_round_rate(hdmi->pwr_clks[0], actual);
+
+ DBG("requested=%ld, actual=%ld", requested, actual);
+
+ if (actual != requested)
+ return MODE_CLOCK_RANGE;
+
+ return 0;
+}
+
static const struct drm_bridge_funcs msm_hdmi_bridge_funcs = {
.pre_enable = msm_hdmi_bridge_pre_enable,
.enable = msm_hdmi_bridge_enable,
.disable = msm_hdmi_bridge_disable,
.post_disable = msm_hdmi_bridge_post_disable,
.mode_set = msm_hdmi_bridge_mode_set,
+ .mode_valid = msm_hdmi_bridge_mode_valid,
+ .get_edid = msm_hdmi_bridge_get_edid,
+ .detect = msm_hdmi_bridge_detect,
};
+static void
+msm_hdmi_hotplug_work(struct work_struct *work)
+{
+ struct hdmi_bridge *hdmi_bridge =
+ container_of(work, struct hdmi_bridge, hpd_work);
+ struct drm_bridge *bridge = &hdmi_bridge->base;
+
+ drm_bridge_hpd_notify(bridge, drm_bridge_detect(bridge));
+}
/* initialize bridge */
struct drm_bridge *msm_hdmi_bridge_init(struct hdmi *hdmi)
@@ -275,11 +336,17 @@ struct drm_bridge *msm_hdmi_bridge_init(struct hdmi *hdmi)
}
hdmi_bridge->hdmi = hdmi;
+ INIT_WORK(&hdmi_bridge->hpd_work, msm_hdmi_hotplug_work);
bridge = &hdmi_bridge->base;
bridge->funcs = &msm_hdmi_bridge_funcs;
+ bridge->ddc = hdmi->i2c;
+ bridge->type = DRM_MODE_CONNECTOR_HDMIA;
+ bridge->ops = DRM_BRIDGE_OP_HPD |
+ DRM_BRIDGE_OP_DETECT |
+ DRM_BRIDGE_OP_EDID;
- ret = drm_bridge_attach(hdmi->encoder, bridge, NULL, 0);
+ ret = drm_bridge_attach(hdmi->encoder, bridge, NULL, DRM_BRIDGE_ATTACH_NO_CONNECTOR);
if (ret)
goto fail;
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_connector.c b/drivers/gpu/drm/msm/hdmi/hdmi_hpd.c
index a7f729cdec7b..75605ddac7c4 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi_connector.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_hpd.c
@@ -11,13 +11,6 @@
#include "msm_kms.h"
#include "hdmi.h"
-struct hdmi_connector {
- struct drm_connector base;
- struct hdmi *hdmi;
- struct work_struct hpd_work;
-};
-#define to_hdmi_connector(x) container_of(x, struct hdmi_connector, base)
-
static void msm_hdmi_phy_reset(struct hdmi *hdmi)
{
unsigned int val;
@@ -139,10 +132,10 @@ static void enable_hpd_clocks(struct hdmi *hdmi, bool enable)
}
}
-int msm_hdmi_hpd_enable(struct drm_connector *connector)
+int msm_hdmi_hpd_enable(struct drm_bridge *bridge)
{
- struct hdmi_connector *hdmi_connector = to_hdmi_connector(connector);
- struct hdmi *hdmi = hdmi_connector->hdmi;
+ struct hdmi_bridge *hdmi_bridge = to_hdmi_bridge(bridge);
+ struct hdmi *hdmi = hdmi_bridge->hdmi;
const struct hdmi_platform_config *config = hdmi->config;
struct device *dev = &hdmi->pdev->dev;
uint32_t hpd_ctrl;
@@ -199,9 +192,9 @@ fail:
return ret;
}
-static void hdp_disable(struct hdmi_connector *hdmi_connector)
+void msm_hdmi_hpd_disable(struct hdmi_bridge *hdmi_bridge)
{
- struct hdmi *hdmi = hdmi_connector->hdmi;
+ struct hdmi *hdmi = hdmi_bridge->hdmi;
const struct hdmi_platform_config *config = hdmi->config;
struct device *dev = &hdmi->pdev->dev;
int ret;
@@ -212,7 +205,7 @@ static void hdp_disable(struct hdmi_connector *hdmi_connector)
msm_hdmi_set_mode(hdmi, false);
enable_hpd_clocks(hdmi, false);
- pm_runtime_put_autosuspend(dev);
+ pm_runtime_put(dev);
ret = gpio_config(hdmi, false);
if (ret)
@@ -227,19 +220,10 @@ static void hdp_disable(struct hdmi_connector *hdmi_connector)
dev_warn(dev, "failed to disable hpd regulator: %d\n", ret);
}
-static void
-msm_hdmi_hotplug_work(struct work_struct *work)
-{
- struct hdmi_connector *hdmi_connector =
- container_of(work, struct hdmi_connector, hpd_work);
- struct drm_connector *connector = &hdmi_connector->base;
- drm_helper_hpd_irq_event(connector->dev);
-}
-
-void msm_hdmi_connector_irq(struct drm_connector *connector)
+void msm_hdmi_hpd_irq(struct drm_bridge *bridge)
{
- struct hdmi_connector *hdmi_connector = to_hdmi_connector(connector);
- struct hdmi *hdmi = hdmi_connector->hdmi;
+ struct hdmi_bridge *hdmi_bridge = to_hdmi_bridge(bridge);
+ struct hdmi *hdmi = hdmi_bridge->hdmi;
uint32_t hpd_int_status, hpd_int_ctrl;
/* Process HPD: */
@@ -262,7 +246,7 @@ void msm_hdmi_connector_irq(struct drm_connector *connector)
hpd_int_ctrl |= HDMI_HPD_INT_CTRL_INT_CONNECT;
hdmi_write(hdmi, REG_HDMI_HPD_INT_CTRL, hpd_int_ctrl);
- queue_work(hdmi->workq, &hdmi_connector->hpd_work);
+ queue_work(hdmi->workq, &hdmi_bridge->hpd_work);
}
}
@@ -276,7 +260,7 @@ static enum drm_connector_status detect_reg(struct hdmi *hdmi)
hpd_int_status = hdmi_read(hdmi, REG_HDMI_HPD_INT_STATUS);
enable_hpd_clocks(hdmi, false);
- pm_runtime_put_autosuspend(&hdmi->pdev->dev);
+ pm_runtime_put(&hdmi->pdev->dev);
return (hpd_int_status & HDMI_HPD_INT_STATUS_CABLE_DETECTED) ?
connector_status_connected : connector_status_disconnected;
@@ -293,11 +277,11 @@ static enum drm_connector_status detect_gpio(struct hdmi *hdmi)
connector_status_disconnected;
}
-static enum drm_connector_status hdmi_connector_detect(
- struct drm_connector *connector, bool force)
+enum drm_connector_status msm_hdmi_bridge_detect(
+ struct drm_bridge *bridge)
{
- struct hdmi_connector *hdmi_connector = to_hdmi_connector(connector);
- struct hdmi *hdmi = hdmi_connector->hdmi;
+ struct hdmi_bridge *hdmi_bridge = to_hdmi_bridge(bridge);
+ struct hdmi *hdmi = hdmi_bridge->hdmi;
const struct hdmi_platform_config *config = hdmi->config;
struct hdmi_gpio_data hpd_gpio = config->gpios[HPD_GPIO_INDEX];
enum drm_connector_status stat_gpio, stat_reg;
@@ -331,115 +315,3 @@ static enum drm_connector_status hdmi_connector_detect(
return stat_gpio;
}
-
-static void hdmi_connector_destroy(struct drm_connector *connector)
-{
- struct hdmi_connector *hdmi_connector = to_hdmi_connector(connector);
-
- hdp_disable(hdmi_connector);
-
- drm_connector_cleanup(connector);
-
- kfree(hdmi_connector);
-}
-
-static int msm_hdmi_connector_get_modes(struct drm_connector *connector)
-{
- struct hdmi_connector *hdmi_connector = to_hdmi_connector(connector);
- struct hdmi *hdmi = hdmi_connector->hdmi;
- struct edid *edid;
- uint32_t hdmi_ctrl;
- int ret = 0;
-
- hdmi_ctrl = hdmi_read(hdmi, REG_HDMI_CTRL);
- hdmi_write(hdmi, REG_HDMI_CTRL, hdmi_ctrl | HDMI_CTRL_ENABLE);
-
- edid = drm_get_edid(connector, hdmi->i2c);
-
- hdmi_write(hdmi, REG_HDMI_CTRL, hdmi_ctrl);
-
- hdmi->hdmi_mode = drm_detect_hdmi_monitor(edid);
- drm_connector_update_edid_property(connector, edid);
-
- if (edid) {
- ret = drm_add_edid_modes(connector, edid);
- kfree(edid);
- }
-
- return ret;
-}
-
-static int msm_hdmi_connector_mode_valid(struct drm_connector *connector,
- struct drm_display_mode *mode)
-{
- struct hdmi_connector *hdmi_connector = to_hdmi_connector(connector);
- struct hdmi *hdmi = hdmi_connector->hdmi;
- const struct hdmi_platform_config *config = hdmi->config;
- struct msm_drm_private *priv = connector->dev->dev_private;
- struct msm_kms *kms = priv->kms;
- long actual, requested;
-
- requested = 1000 * mode->clock;
- actual = kms->funcs->round_pixclk(kms,
- requested, hdmi_connector->hdmi->encoder);
-
- /* for mdp5/apq8074, we manage our own pixel clk (as opposed to
- * mdp4/dtv stuff where pixel clk is assigned to mdp/encoder
- * instead):
- */
- if (config->pwr_clk_cnt > 0)
- actual = clk_round_rate(hdmi->pwr_clks[0], actual);
-
- DBG("requested=%ld, actual=%ld", requested, actual);
-
- if (actual != requested)
- return MODE_CLOCK_RANGE;
-
- return 0;
-}
-
-static const struct drm_connector_funcs hdmi_connector_funcs = {
- .detect = hdmi_connector_detect,
- .fill_modes = drm_helper_probe_single_connector_modes,
- .destroy = hdmi_connector_destroy,
- .reset = drm_atomic_helper_connector_reset,
- .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
- .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
-};
-
-static const struct drm_connector_helper_funcs msm_hdmi_connector_helper_funcs = {
- .get_modes = msm_hdmi_connector_get_modes,
- .mode_valid = msm_hdmi_connector_mode_valid,
-};
-
-/* initialize connector */
-struct drm_connector *msm_hdmi_connector_init(struct hdmi *hdmi)
-{
- struct drm_connector *connector = NULL;
- struct hdmi_connector *hdmi_connector;
-
- hdmi_connector = kzalloc(sizeof(*hdmi_connector), GFP_KERNEL);
- if (!hdmi_connector)
- return ERR_PTR(-ENOMEM);
-
- hdmi_connector->hdmi = hdmi;
- INIT_WORK(&hdmi_connector->hpd_work, msm_hdmi_hotplug_work);
-
- connector = &hdmi_connector->base;
-
- drm_connector_init_with_ddc(hdmi->dev, connector,
- &hdmi_connector_funcs,
- DRM_MODE_CONNECTOR_HDMIA,
- hdmi->i2c);
- drm_connector_helper_add(connector, &msm_hdmi_connector_helper_funcs);
-
- connector->polled = DRM_CONNECTOR_POLL_CONNECT |
- DRM_CONNECTOR_POLL_DISCONNECT;
-
- connector->interlace_allowed = 0;
- connector->doublescan_allowed = 0;
-
- drm_connector_attach_encoder(connector, hdmi->encoder);
-
- return connector;
-}
diff --git a/drivers/gpu/drm/msm/msm_debugfs.c b/drivers/gpu/drm/msm/msm_debugfs.c
index 09d2d279c30a..0804c31e8962 100644
--- a/drivers/gpu/drm/msm/msm_debugfs.c
+++ b/drivers/gpu/drm/msm/msm_debugfs.c
@@ -15,6 +15,11 @@
#include "msm_gpu.h"
#include "msm_kms.h"
#include "msm_debugfs.h"
+#include "disp/msm_disp_snapshot.h"
+
+/*
+ * GPU Snapshot:
+ */
struct msm_gpu_show_priv {
struct msm_gpu_state *state;
@@ -29,14 +34,14 @@ static int msm_gpu_show(struct seq_file *m, void *arg)
struct msm_gpu *gpu = priv->gpu;
int ret;
- ret = mutex_lock_interruptible(&show_priv->dev->struct_mutex);
+ ret = mutex_lock_interruptible(&gpu->lock);
if (ret)
return ret;
drm_printf(&p, "%s Status:\n", gpu->name);
gpu->funcs->show(gpu, show_priv->state, &p);
- mutex_unlock(&show_priv->dev->struct_mutex);
+ mutex_unlock(&gpu->lock);
return 0;
}
@@ -48,9 +53,9 @@ static int msm_gpu_release(struct inode *inode, struct file *file)
struct msm_drm_private *priv = show_priv->dev->dev_private;
struct msm_gpu *gpu = priv->gpu;
- mutex_lock(&show_priv->dev->struct_mutex);
+ mutex_lock(&gpu->lock);
gpu->funcs->gpu_state_put(show_priv->state);
- mutex_unlock(&show_priv->dev->struct_mutex);
+ mutex_unlock(&gpu->lock);
kfree(show_priv);
@@ -72,15 +77,16 @@ static int msm_gpu_open(struct inode *inode, struct file *file)
if (!show_priv)
return -ENOMEM;
- ret = mutex_lock_interruptible(&dev->struct_mutex);
+ ret = mutex_lock_interruptible(&gpu->lock);
if (ret)
goto free_priv;
pm_runtime_get_sync(&gpu->pdev->dev);
+ msm_gpu_hw_init(gpu);
show_priv->state = gpu->funcs->gpu_state_get(gpu);
pm_runtime_put_sync(&gpu->pdev->dev);
- mutex_unlock(&dev->struct_mutex);
+ mutex_unlock(&gpu->lock);
if (IS_ERR(show_priv->state)) {
ret = PTR_ERR(show_priv->state);
@@ -108,6 +114,73 @@ static const struct file_operations msm_gpu_fops = {
.release = msm_gpu_release,
};
+/*
+ * Display Snapshot:
+ */
+
+static int msm_kms_show(struct seq_file *m, void *arg)
+{
+ struct drm_printer p = drm_seq_file_printer(m);
+ struct msm_disp_state *state = m->private;
+
+ msm_disp_state_print(state, &p);
+
+ return 0;
+}
+
+static int msm_kms_release(struct inode *inode, struct file *file)
+{
+ struct seq_file *m = file->private_data;
+ struct msm_disp_state *state = m->private;
+
+ msm_disp_state_free(state);
+
+ return single_release(inode, file);
+}
+
+static int msm_kms_open(struct inode *inode, struct file *file)
+{
+ struct drm_device *dev = inode->i_private;
+ struct msm_drm_private *priv = dev->dev_private;
+ struct msm_disp_state *state;
+ int ret;
+
+ if (!priv->kms)
+ return -ENODEV;
+
+ ret = mutex_lock_interruptible(&priv->kms->dump_mutex);
+ if (ret)
+ return ret;
+
+ state = msm_disp_snapshot_state_sync(priv->kms);
+
+ mutex_unlock(&priv->kms->dump_mutex);
+
+ if (IS_ERR(state)) {
+ return PTR_ERR(state);
+ }
+
+ ret = single_open(file, msm_kms_show, state);
+ if (ret) {
+ msm_disp_state_free(state);
+ return ret;
+ }
+
+ return 0;
+}
+
+static const struct file_operations msm_kms_fops = {
+ .owner = THIS_MODULE,
+ .open = msm_kms_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = msm_kms_release,
+};
+
+/*
+ * Other debugfs:
+ */
+
static unsigned long last_shrink_freed;
static int
@@ -133,8 +206,10 @@ DEFINE_SIMPLE_ATTRIBUTE(shrink_fops,
"0x%08llx\n");
-static int msm_gem_show(struct drm_device *dev, struct seq_file *m)
+static int msm_gem_show(struct seq_file *m, void *arg)
{
+ struct drm_info_node *node = (struct drm_info_node *) m->private;
+ struct drm_device *dev = node->minor->dev;
struct msm_drm_private *priv = dev->dev_private;
int ret;
@@ -149,8 +224,10 @@ static int msm_gem_show(struct drm_device *dev, struct seq_file *m)
return 0;
}
-static int msm_mm_show(struct drm_device *dev, struct seq_file *m)
+static int msm_mm_show(struct seq_file *m, void *arg)
{
+ struct drm_info_node *node = (struct drm_info_node *) m->private;
+ struct drm_device *dev = node->minor->dev;
struct drm_printer p = drm_seq_file_printer(m);
drm_mm_print(&dev->vma_offset_manager->vm_addr_space_mm, &p);
@@ -158,8 +235,10 @@ static int msm_mm_show(struct drm_device *dev, struct seq_file *m)
return 0;
}
-static int msm_fb_show(struct drm_device *dev, struct seq_file *m)
+static int msm_fb_show(struct seq_file *m, void *arg)
{
+ struct drm_info_node *node = (struct drm_info_node *) m->private;
+ struct drm_device *dev = node->minor->dev;
struct msm_drm_private *priv = dev->dev_private;
struct drm_framebuffer *fb, *fbdev_fb = NULL;
@@ -182,29 +261,10 @@ static int msm_fb_show(struct drm_device *dev, struct seq_file *m)
return 0;
}
-static int show_locked(struct seq_file *m, void *arg)
-{
- struct drm_info_node *node = (struct drm_info_node *) m->private;
- struct drm_device *dev = node->minor->dev;
- int (*show)(struct drm_device *dev, struct seq_file *m) =
- node->info_ent->data;
- int ret;
-
- ret = mutex_lock_interruptible(&dev->struct_mutex);
- if (ret)
- return ret;
-
- ret = show(dev, m);
-
- mutex_unlock(&dev->struct_mutex);
-
- return ret;
-}
-
static struct drm_info_list msm_debugfs_list[] = {
- {"gem", show_locked, 0, msm_gem_show},
- { "mm", show_locked, 0, msm_mm_show },
- { "fb", show_locked, 0, msm_fb_show },
+ {"gem", msm_gem_show},
+ { "mm", msm_mm_show },
+ { "fb", msm_fb_show },
};
static int late_init_minor(struct drm_minor *minor)
@@ -251,9 +311,15 @@ void msm_debugfs_init(struct drm_minor *minor)
debugfs_create_file("gpu", S_IRUSR, minor->debugfs_root,
dev, &msm_gpu_fops);
+ debugfs_create_file("kms", S_IRUSR, minor->debugfs_root,
+ dev, &msm_kms_fops);
+
debugfs_create_u32("hangcheck_period_ms", 0600, minor->debugfs_root,
&priv->hangcheck_period);
+ debugfs_create_bool("disable_err_irq", 0600, minor->debugfs_root,
+ &priv->disable_err_irq);
+
debugfs_create_file("shrink", S_IRWXU, minor->debugfs_root,
dev, &shrink_fops);
diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
index 7936e8d498dd..ad35a5d94053 100644
--- a/drivers/gpu/drm/msm/msm_drv.c
+++ b/drivers/gpu/drm/msm/msm_drv.c
@@ -339,10 +339,9 @@ static int vblank_ctrl_queue_work(struct msm_drm_private *priv,
static int msm_drm_uninit(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
- struct drm_device *ddev = platform_get_drvdata(pdev);
- struct msm_drm_private *priv = ddev->dev_private;
+ struct msm_drm_private *priv = platform_get_drvdata(pdev);
+ struct drm_device *ddev = priv->dev;
struct msm_kms *kms = priv->kms;
- struct msm_mdss *mdss = priv->mdss;
int i;
/*
@@ -402,14 +401,10 @@ static int msm_drm_uninit(struct device *dev)
component_unbind_all(dev, ddev);
- if (mdss && mdss->funcs)
- mdss->funcs->destroy(ddev);
-
ddev->dev_private = NULL;
drm_dev_put(ddev);
destroy_workqueue(priv->wq);
- kfree(priv);
return 0;
}
@@ -512,8 +507,8 @@ static int msm_init_vram(struct drm_device *dev)
static int msm_drm_init(struct device *dev, const struct drm_driver *drv)
{
struct platform_device *pdev = to_platform_device(dev);
+ struct msm_drm_private *priv = dev_get_drvdata(dev);
struct drm_device *ddev;
- struct msm_drm_private *priv;
struct msm_kms *kms;
struct msm_mdss *mdss;
int ret, i;
@@ -523,32 +518,9 @@ static int msm_drm_init(struct device *dev, const struct drm_driver *drv)
DRM_DEV_ERROR(dev, "failed to allocate drm_device\n");
return PTR_ERR(ddev);
}
-
- platform_set_drvdata(pdev, ddev);
-
- priv = kzalloc(sizeof(*priv), GFP_KERNEL);
- if (!priv) {
- ret = -ENOMEM;
- goto err_put_drm_dev;
- }
-
ddev->dev_private = priv;
priv->dev = ddev;
- switch (get_mdp_ver(pdev)) {
- case KMS_MDP5:
- ret = mdp5_mdss_init(ddev);
- break;
- case KMS_DPU:
- ret = dpu_mdss_init(ddev);
- break;
- default:
- ret = 0;
- break;
- }
- if (ret)
- goto err_free_priv;
-
mdss = priv->mdss;
priv->wq = alloc_ordered_workqueue("msm", 0);
@@ -571,12 +543,12 @@ static int msm_drm_init(struct device *dev, const struct drm_driver *drv)
ret = msm_init_vram(ddev);
if (ret)
- goto err_destroy_mdss;
+ return ret;
/* Bind all our sub-components: */
ret = component_bind_all(dev, ddev);
if (ret)
- goto err_destroy_mdss;
+ return ret;
dma_set_max_seg_size(dev, UINT_MAX);
@@ -682,15 +654,6 @@ static int msm_drm_init(struct device *dev, const struct drm_driver *drv)
err_msm_uninit:
msm_drm_uninit(dev);
return ret;
-err_destroy_mdss:
- if (mdss && mdss->funcs)
- mdss->funcs->destroy(ddev);
-err_free_priv:
- kfree(priv);
-err_put_drm_dev:
- drm_dev_put(ddev);
- platform_set_drvdata(pdev, NULL);
- return ret;
}
/*
@@ -752,14 +715,8 @@ static void context_close(struct msm_file_private *ctx)
static void msm_postclose(struct drm_device *dev, struct drm_file *file)
{
- struct msm_drm_private *priv = dev->dev_private;
struct msm_file_private *ctx = file->driver_priv;
- mutex_lock(&dev->struct_mutex);
- if (ctx == priv->lastctx)
- priv->lastctx = NULL;
- mutex_unlock(&dev->struct_mutex);
-
context_close(ctx);
}
@@ -967,29 +924,18 @@ static int msm_ioctl_gem_info(struct drm_device *dev, void *data,
return ret;
}
-static int msm_ioctl_wait_fence(struct drm_device *dev, void *data,
- struct drm_file *file)
+static int wait_fence(struct msm_gpu_submitqueue *queue, uint32_t fence_id,
+ ktime_t timeout)
{
- struct msm_drm_private *priv = dev->dev_private;
- struct drm_msm_wait_fence *args = data;
- ktime_t timeout = to_ktime(args->timeout);
- struct msm_gpu_submitqueue *queue;
- struct msm_gpu *gpu = priv->gpu;
struct dma_fence *fence;
int ret;
- if (args->pad) {
- DRM_ERROR("invalid pad: %08x\n", args->pad);
+ if (fence_after(fence_id, queue->last_fence)) {
+ DRM_ERROR_RATELIMITED("waiting on invalid fence: %u (of %u)\n",
+ fence_id, queue->last_fence);
return -EINVAL;
}
- if (!gpu)
- return 0;
-
- queue = msm_submitqueue_get(file->driver_priv, args->queueid);
- if (!queue)
- return -ENOENT;
-
/*
* Map submitqueue scoped "seqno" (which is actually an idr key)
* back to underlying dma-fence
@@ -1001,7 +947,7 @@ static int msm_ioctl_wait_fence(struct drm_device *dev, void *data,
ret = mutex_lock_interruptible(&queue->lock);
if (ret)
return ret;
- fence = idr_find(&queue->fence_idr, args->fence);
+ fence = idr_find(&queue->fence_idr, fence_id);
if (fence)
fence = dma_fence_get_rcu(fence);
mutex_unlock(&queue->lock);
@@ -1017,6 +963,32 @@ static int msm_ioctl_wait_fence(struct drm_device *dev, void *data,
}
dma_fence_put(fence);
+
+ return ret;
+}
+
+static int msm_ioctl_wait_fence(struct drm_device *dev, void *data,
+ struct drm_file *file)
+{
+ struct msm_drm_private *priv = dev->dev_private;
+ struct drm_msm_wait_fence *args = data;
+ struct msm_gpu_submitqueue *queue;
+ int ret;
+
+ if (args->pad) {
+ DRM_ERROR("invalid pad: %08x\n", args->pad);
+ return -EINVAL;
+ }
+
+ if (!priv->gpu)
+ return 0;
+
+ queue = msm_submitqueue_get(file->driver_priv, args->queueid);
+ if (!queue)
+ return -ENOENT;
+
+ ret = wait_fence(queue, args->fence, to_ktime(args->timeout));
+
msm_submitqueue_put(queue);
return ret;
@@ -1127,8 +1099,7 @@ static const struct drm_driver msm_driver = {
static int __maybe_unused msm_runtime_suspend(struct device *dev)
{
- struct drm_device *ddev = dev_get_drvdata(dev);
- struct msm_drm_private *priv = ddev->dev_private;
+ struct msm_drm_private *priv = dev_get_drvdata(dev);
struct msm_mdss *mdss = priv->mdss;
DBG("");
@@ -1141,8 +1112,7 @@ static int __maybe_unused msm_runtime_suspend(struct device *dev)
static int __maybe_unused msm_runtime_resume(struct device *dev)
{
- struct drm_device *ddev = dev_get_drvdata(dev);
- struct msm_drm_private *priv = ddev->dev_private;
+ struct msm_drm_private *priv = dev_get_drvdata(dev);
struct msm_mdss *mdss = priv->mdss;
DBG("");
@@ -1172,8 +1142,8 @@ static int __maybe_unused msm_pm_resume(struct device *dev)
static int __maybe_unused msm_pm_prepare(struct device *dev)
{
- struct drm_device *ddev = dev_get_drvdata(dev);
- struct msm_drm_private *priv = ddev ? ddev->dev_private : NULL;
+ struct msm_drm_private *priv = dev_get_drvdata(dev);
+ struct drm_device *ddev = priv ? priv->dev : NULL;
if (!priv || !priv->kms)
return 0;
@@ -1183,8 +1153,8 @@ static int __maybe_unused msm_pm_prepare(struct device *dev)
static void __maybe_unused msm_pm_complete(struct device *dev)
{
- struct drm_device *ddev = dev_get_drvdata(dev);
- struct msm_drm_private *priv = ddev ? ddev->dev_private : NULL;
+ struct msm_drm_private *priv = dev_get_drvdata(dev);
+ struct drm_device *ddev = priv ? priv->dev : NULL;
if (!priv || !priv->kms)
return;
@@ -1277,9 +1247,10 @@ static int add_components_mdp(struct device *mdp_dev,
return 0;
}
-static int compare_name_mdp(struct device *dev, void *data)
+static int find_mdp_node(struct device *dev, void *data)
{
- return (strstr(dev_name(dev), "mdp") != NULL);
+ return of_match_node(dpu_dt_match, dev->of_node) ||
+ of_match_node(mdp5_dt_match, dev->of_node);
}
static int add_display_components(struct platform_device *pdev,
@@ -1304,7 +1275,7 @@ static int add_display_components(struct platform_device *pdev,
return ret;
}
- mdp_dev = device_find_child(dev, NULL, compare_name_mdp);
+ mdp_dev = device_find_child(dev, NULL, find_mdp_node);
if (!mdp_dev) {
DRM_DEV_ERROR(dev, "failed to find MDSS MDP node\n");
of_platform_depopulate(dev);
@@ -1382,12 +1353,35 @@ static const struct component_master_ops msm_drm_ops = {
static int msm_pdev_probe(struct platform_device *pdev)
{
struct component_match *match = NULL;
+ struct msm_drm_private *priv;
int ret;
+ priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, priv);
+
+ switch (get_mdp_ver(pdev)) {
+ case KMS_MDP5:
+ ret = mdp5_mdss_init(pdev);
+ break;
+ case KMS_DPU:
+ ret = dpu_mdss_init(pdev);
+ break;
+ default:
+ ret = 0;
+ break;
+ }
+ if (ret) {
+ platform_set_drvdata(pdev, NULL);
+ return ret;
+ }
+
if (get_mdp_ver(pdev)) {
ret = add_display_components(pdev, &match);
if (ret)
- return ret;
+ goto fail;
}
ret = add_gpu_components(&pdev->dev, &match);
@@ -1409,21 +1403,31 @@ static int msm_pdev_probe(struct platform_device *pdev)
fail:
of_platform_depopulate(&pdev->dev);
+
+ if (priv->mdss && priv->mdss->funcs)
+ priv->mdss->funcs->destroy(priv->mdss);
+
return ret;
}
static int msm_pdev_remove(struct platform_device *pdev)
{
+ struct msm_drm_private *priv = platform_get_drvdata(pdev);
+ struct msm_mdss *mdss = priv->mdss;
+
component_master_del(&pdev->dev, &msm_drm_ops);
of_platform_depopulate(&pdev->dev);
+ if (mdss && mdss->funcs)
+ mdss->funcs->destroy(mdss);
+
return 0;
}
static void msm_pdev_shutdown(struct platform_device *pdev)
{
- struct drm_device *drm = platform_get_drvdata(pdev);
- struct msm_drm_private *priv = drm ? drm->dev_private : NULL;
+ struct msm_drm_private *priv = platform_get_drvdata(pdev);
+ struct drm_device *drm = priv ? priv->dev : NULL;
if (!priv || !priv->kms)
return;
@@ -1463,7 +1467,6 @@ static int __init msm_drm_register(void)
msm_mdp_register();
msm_dpu_register();
msm_dsi_register();
- msm_edp_register();
msm_hdmi_register();
msm_dp_register();
adreno_register();
@@ -1477,7 +1480,6 @@ static void __exit msm_drm_unregister(void)
msm_dp_unregister();
msm_hdmi_unregister();
adreno_unregister();
- msm_edp_unregister();
msm_dsi_unregister();
msm_mdp_unregister();
msm_dpu_unregister();
diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h
index eb984d925f4d..d7574e6bd4e4 100644
--- a/drivers/gpu/drm/msm/msm_drv.h
+++ b/drivers/gpu/drm/msm/msm_drv.h
@@ -151,12 +151,6 @@ struct msm_drm_private {
*/
struct hdmi *hdmi;
- /* eDP is for mdp5 only, but kms has not been created
- * when edp_bind() and edp_init() are called. Here is the only
- * place to keep the edp instance.
- */
- struct msm_edp *edp;
-
/* DSI is shared by mdp4 and mdp5 */
struct msm_dsi *dsi[2];
@@ -164,7 +158,7 @@ struct msm_drm_private {
/* when we have more than one 'msm_gpu' these need to be an array: */
struct msm_gpu *gpu;
- struct msm_file_private *lastctx;
+
/* gpu is only set on open(), but we need this info earlier */
bool is_a2xx;
bool has_cached_coherent;
@@ -246,6 +240,15 @@ struct msm_drm_private {
/* For hang detection, in ms */
unsigned int hangcheck_period;
+
+ /**
+ * disable_err_irq:
+ *
+ * Disable handling of GPU hw error interrupts, to force fallback to
+ * sw hangcheck timer. Written (via debugfs) by igt tests to test
+ * the sw hangcheck mechanism.
+ */
+ bool disable_err_irq;
};
struct msm_format {
@@ -335,12 +338,6 @@ int msm_hdmi_modeset_init(struct hdmi *hdmi, struct drm_device *dev,
void __init msm_hdmi_register(void);
void __exit msm_hdmi_unregister(void);
-struct msm_edp;
-void __init msm_edp_register(void);
-void __exit msm_edp_unregister(void);
-int msm_edp_modeset_init(struct msm_edp *edp, struct drm_device *dev,
- struct drm_encoder *encoder);
-
struct msm_dsi;
#ifdef CONFIG_DRM_MSM_DSI
int dsi_dev_attach(struct platform_device *pdev);
@@ -392,8 +389,12 @@ int msm_dp_display_enable(struct msm_dp *dp, struct drm_encoder *encoder);
int msm_dp_display_disable(struct msm_dp *dp, struct drm_encoder *encoder);
int msm_dp_display_pre_disable(struct msm_dp *dp, struct drm_encoder *encoder);
void msm_dp_display_mode_set(struct msm_dp *dp, struct drm_encoder *encoder,
- struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode);
+ const struct drm_display_mode *mode,
+ const struct drm_display_mode *adjusted_mode);
+
+struct drm_bridge *msm_dp_bridge_init(struct msm_dp *dp_display,
+ struct drm_device *dev,
+ struct drm_encoder *encoder);
void msm_dp_irq_postinstall(struct msm_dp *dp_display);
void msm_dp_snapshot(struct msm_disp_state *disp_state, struct msm_dp *dp_display);
@@ -430,8 +431,8 @@ static inline int msm_dp_display_pre_disable(struct msm_dp *dp,
}
static inline void msm_dp_display_mode_set(struct msm_dp *dp,
struct drm_encoder *encoder,
- struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
+ const struct drm_display_mode *mode,
+ const struct drm_display_mode *adjusted_mode)
{
}
diff --git a/drivers/gpu/drm/msm/msm_fbdev.c b/drivers/gpu/drm/msm/msm_fbdev.c
index 0daaeb54ff6f..4c39ef9dd75d 100644
--- a/drivers/gpu/drm/msm/msm_fbdev.c
+++ b/drivers/gpu/drm/msm/msm_fbdev.c
@@ -81,8 +81,6 @@ static int msm_fbdev_create(struct drm_fb_helper *helper,
bo = msm_framebuffer_bo(fb, 0);
- mutex_lock(&dev->struct_mutex);
-
/*
* NOTE: if we can be guaranteed to be able to map buffer
* in panic (ie. lock-safe, etc) we could avoid pinning the
@@ -91,14 +89,14 @@ static int msm_fbdev_create(struct drm_fb_helper *helper,
ret = msm_gem_get_and_pin_iova(bo, priv->kms->aspace, &paddr);
if (ret) {
DRM_DEV_ERROR(dev->dev, "failed to get buffer obj iova: %d\n", ret);
- goto fail_unlock;
+ goto fail;
}
fbi = drm_fb_helper_alloc_fbi(helper);
if (IS_ERR(fbi)) {
DRM_DEV_ERROR(dev->dev, "failed to allocate fb info\n");
ret = PTR_ERR(fbi);
- goto fail_unlock;
+ goto fail;
}
DBG("fbi=%p, dev=%p", fbi, dev);
@@ -115,7 +113,7 @@ static int msm_fbdev_create(struct drm_fb_helper *helper,
fbi->screen_base = msm_gem_get_vaddr(bo);
if (IS_ERR(fbi->screen_base)) {
ret = PTR_ERR(fbi->screen_base);
- goto fail_unlock;
+ goto fail;
}
fbi->screen_size = bo->size;
fbi->fix.smem_start = paddr;
@@ -124,12 +122,9 @@ static int msm_fbdev_create(struct drm_fb_helper *helper,
DBG("par=%p, %dx%d", fbi->par, fbi->var.xres, fbi->var.yres);
DBG("allocated %dx%d fb", fbdev->fb->width, fbdev->fb->height);
- mutex_unlock(&dev->struct_mutex);
-
return 0;
-fail_unlock:
- mutex_unlock(&dev->struct_mutex);
+fail:
drm_framebuffer_remove(fb);
return ret;
}
diff --git a/drivers/gpu/drm/msm/msm_fence.h b/drivers/gpu/drm/msm/msm_fence.h
index 4783db528bcc..17ee3822b423 100644
--- a/drivers/gpu/drm/msm/msm_fence.h
+++ b/drivers/gpu/drm/msm/msm_fence.h
@@ -60,4 +60,16 @@ void msm_update_fence(struct msm_fence_context *fctx, uint32_t fence);
struct dma_fence * msm_fence_alloc(struct msm_fence_context *fctx);
+static inline bool
+fence_before(uint32_t a, uint32_t b)
+{
+ return (int32_t)(a - b) < 0;
+}
+
+static inline bool
+fence_after(uint32_t a, uint32_t b)
+{
+ return (int32_t)(a - b) > 0;
+}
+
#endif
diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
index 2916480d9115..02b9ae65a96a 100644
--- a/drivers/gpu/drm/msm/msm_gem.c
+++ b/drivers/gpu/drm/msm/msm_gem.c
@@ -1029,8 +1029,7 @@ static int msm_gem_object_mmap(struct drm_gem_object *obj, struct vm_area_struct
{
struct msm_gem_object *msm_obj = to_msm_bo(obj);
- vma->vm_flags &= ~VM_PFNMAP;
- vma->vm_flags |= VM_MIXEDMAP | VM_DONTEXPAND;
+ vma->vm_flags |= VM_IO | VM_MIXEDMAP | VM_DONTEXPAND | VM_DONTDUMP;
vma->vm_page_prot = msm_gem_pgprot(msm_obj, vm_get_page_prot(vma->vm_flags));
return 0;
@@ -1094,7 +1093,7 @@ static int msm_gem_new_impl(struct drm_device *dev,
break;
fallthrough;
default:
- DRM_DEV_ERROR(dev->dev, "invalid cache flag: %x\n",
+ DRM_DEV_DEBUG(dev->dev, "invalid cache flag: %x\n",
(flags & MSM_BO_CACHE_MASK));
return -EINVAL;
}
diff --git a/drivers/gpu/drm/msm/msm_gem_shrinker.c b/drivers/gpu/drm/msm/msm_gem_shrinker.c
index 4a1420b05e97..086dacf2f26a 100644
--- a/drivers/gpu/drm/msm/msm_gem_shrinker.c
+++ b/drivers/gpu/drm/msm/msm_gem_shrinker.c
@@ -5,6 +5,7 @@
*/
#include <linux/vmalloc.h>
+#include <linux/sched/mm.h>
#include "msm_drv.h"
#include "msm_gem.h"
diff --git a/drivers/gpu/drm/msm/msm_gem_submit.c b/drivers/gpu/drm/msm/msm_gem_submit.c
index 3cb029f10925..6cfa984dee6a 100644
--- a/drivers/gpu/drm/msm/msm_gem_submit.c
+++ b/drivers/gpu/drm/msm/msm_gem_submit.c
@@ -772,6 +772,7 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
args->nr_cmds);
if (IS_ERR(submit)) {
ret = PTR_ERR(submit);
+ submit = NULL;
goto out_unlock;
}
@@ -880,7 +881,7 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
* to the underlying fence.
*/
submit->fence_id = idr_alloc_cyclic(&queue->fence_idr,
- submit->user_fence, 0, INT_MAX, GFP_KERNEL);
+ submit->user_fence, 1, INT_MAX, GFP_KERNEL);
if (submit->fence_id < 0) {
ret = submit->fence_id = 0;
submit->fence_id = 0;
@@ -904,6 +905,7 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
drm_sched_entity_push_job(&submit->base);
args->fence = submit->fence_id;
+ queue->last_fence = submit->fence_id;
msm_reset_syncobjs(syncobjs_to_reset, args->nr_in_syncobjs);
msm_process_post_deps(post_deps, args->nr_out_syncobjs,
diff --git a/drivers/gpu/drm/msm/msm_gpu.c b/drivers/gpu/drm/msm/msm_gpu.c
index 2c46cd968ac4..0f78c2615272 100644
--- a/drivers/gpu/drm/msm/msm_gpu.c
+++ b/drivers/gpu/drm/msm/msm_gpu.c
@@ -150,7 +150,7 @@ int msm_gpu_hw_init(struct msm_gpu *gpu)
{
int ret;
- WARN_ON(!mutex_is_locked(&gpu->dev->struct_mutex));
+ WARN_ON(!mutex_is_locked(&gpu->lock));
if (!gpu->needs_hw_init)
return 0;
@@ -172,7 +172,7 @@ static void update_fences(struct msm_gpu *gpu, struct msm_ringbuffer *ring,
spin_lock_irqsave(&ring->submit_lock, flags);
list_for_each_entry(submit, &ring->submits, node) {
- if (submit->seqno > fence)
+ if (fence_after(submit->seqno, fence))
break;
msm_update_fence(submit->ring->fctx,
@@ -361,7 +361,7 @@ static void recover_worker(struct kthread_work *work)
char *comm = NULL, *cmd = NULL;
int i;
- mutex_lock(&dev->struct_mutex);
+ mutex_lock(&gpu->lock);
DRM_DEV_ERROR(dev->dev, "%s: hangcheck recover!\n", gpu->name);
@@ -442,7 +442,7 @@ static void recover_worker(struct kthread_work *work)
}
}
- mutex_unlock(&dev->struct_mutex);
+ mutex_unlock(&gpu->lock);
msm_gpu_retire(gpu);
}
@@ -450,12 +450,11 @@ static void recover_worker(struct kthread_work *work)
static void fault_worker(struct kthread_work *work)
{
struct msm_gpu *gpu = container_of(work, struct msm_gpu, fault_work);
- struct drm_device *dev = gpu->dev;
struct msm_gem_submit *submit;
struct msm_ringbuffer *cur_ring = gpu->funcs->active_ring(gpu);
char *comm = NULL, *cmd = NULL;
- mutex_lock(&dev->struct_mutex);
+ mutex_lock(&gpu->lock);
submit = find_submit(cur_ring, cur_ring->memptrs->fence + 1);
if (submit && submit->fault_dumped)
@@ -490,7 +489,7 @@ resume_smmu:
memset(&gpu->fault_info, 0, sizeof(gpu->fault_info));
gpu->aspace->mmu->funcs->resume_translation(gpu->aspace->mmu);
- mutex_unlock(&dev->struct_mutex);
+ mutex_unlock(&gpu->lock);
}
static void hangcheck_timer_reset(struct msm_gpu *gpu)
@@ -510,7 +509,7 @@ static void hangcheck_handler(struct timer_list *t)
if (fence != ring->hangcheck_fence) {
/* some progress has been made.. ya! */
ring->hangcheck_fence = fence;
- } else if (fence < ring->seqno) {
+ } else if (fence_before(fence, ring->seqno)) {
/* no progress and not done.. hung! */
ring->hangcheck_fence = fence;
DRM_DEV_ERROR(dev->dev, "%s: hangcheck detected gpu lockup rb %d!\n",
@@ -524,7 +523,7 @@ static void hangcheck_handler(struct timer_list *t)
}
/* if still more pending work, reset the hangcheck timer: */
- if (ring->seqno > ring->hangcheck_fence)
+ if (fence_after(ring->seqno, ring->hangcheck_fence))
hangcheck_timer_reset(gpu);
/* workaround for missing irq: */
@@ -733,7 +732,7 @@ void msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
struct msm_ringbuffer *ring = submit->ring;
unsigned long flags;
- WARN_ON(!mutex_is_locked(&dev->struct_mutex));
+ WARN_ON(!mutex_is_locked(&gpu->lock));
pm_runtime_get_sync(&gpu->pdev->dev);
@@ -763,7 +762,7 @@ void msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
mutex_unlock(&gpu->active_lock);
gpu->funcs->submit(gpu, submit);
- priv->lastctx = submit->queue->ctx;
+ gpu->cur_ctx_seqno = submit->queue->ctx->seqno;
hangcheck_timer_reset(gpu);
}
@@ -848,6 +847,7 @@ int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
INIT_LIST_HEAD(&gpu->active_list);
mutex_init(&gpu->active_lock);
+ mutex_init(&gpu->lock);
kthread_init_work(&gpu->retire_work, retire_worker);
kthread_init_work(&gpu->recover_work, recover_worker);
kthread_init_work(&gpu->fault_work, fault_worker);
diff --git a/drivers/gpu/drm/msm/msm_gpu.h b/drivers/gpu/drm/msm/msm_gpu.h
index 59cdd00b69d0..445c6bfd4b6b 100644
--- a/drivers/gpu/drm/msm/msm_gpu.h
+++ b/drivers/gpu/drm/msm/msm_gpu.h
@@ -88,6 +88,21 @@ struct msm_gpu_devfreq {
struct devfreq *devfreq;
/**
+ * idle_constraint:
+ *
+ * A PM QoS constraint to limit max freq while the GPU is idle.
+ */
+ struct dev_pm_qos_request idle_freq;
+
+ /**
+ * boost_constraint:
+ *
+ * A PM QoS constraint to boost min freq for a period of time
+ * until the boost expires.
+ */
+ struct dev_pm_qos_request boost_freq;
+
+ /**
* busy_cycles:
*
* Used by implementation of gpu->gpu_busy() to track the last
@@ -103,22 +118,19 @@ struct msm_gpu_devfreq {
ktime_t idle_time;
/**
- * idle_freq:
+ * idle_work:
*
- * Shadow frequency used while the GPU is idle. From the PoV of
- * the devfreq governor, we are continuing to sample busyness and
- * adjust frequency while the GPU is idle, but we use this shadow
- * value as the GPU is actually clamped to minimum frequency while
- * it is inactive.
+ * Used to delay clamping to idle freq on active->idle transition.
*/
- unsigned long idle_freq;
+ struct msm_hrtimer_work idle_work;
/**
- * idle_work:
+ * boost_work:
*
- * Used to delay clamping to idle freq on active->idle transition.
+ * Used to reset the boost_constraint after the boost period has
+ * elapsed
*/
- struct msm_hrtimer_work idle_work;
+ struct msm_hrtimer_work boost_work;
};
struct msm_gpu {
@@ -144,6 +156,17 @@ struct msm_gpu {
struct msm_ringbuffer *rb[MSM_GPU_MAX_RINGS];
int nr_rings;
+ /**
+ * cur_ctx_seqno:
+ *
+ * The ctx->seqno value of the last context to submit rendering,
+ * and the one with current pgtables installed (for generations
+ * that support per-context pgtables). Tracked by seqno rather
+ * than pointer value to avoid dangling pointers, and cases where
+ * a ctx can be freed and a new one created with the same address.
+ */
+ int cur_ctx_seqno;
+
/*
* List of GEM active objects on this gpu. Protected by
* msm_drm_private::mm_lock
@@ -151,12 +174,22 @@ struct msm_gpu {
struct list_head active_list;
/**
+ * lock:
+ *
+ * General lock for serializing all the gpu things.
+ *
+ * TODO move to per-ring locking where feasible (ie. submit/retire
+ * path, etc)
+ */
+ struct mutex lock;
+
+ /**
* active_submits:
*
* The number of submitted but not yet retired submits, used to
* determine transitions between active and idle.
*
- * Protected by lock
+ * Protected by active_lock
*/
int active_submits;
@@ -241,7 +274,7 @@ static inline bool msm_gpu_active(struct msm_gpu *gpu)
for (i = 0; i < gpu->nr_rings; i++) {
struct msm_ringbuffer *ring = gpu->rb[i];
- if (ring->seqno > ring->memptrs->fence)
+ if (fence_after(ring->seqno, ring->memptrs->fence))
return true;
}
@@ -359,6 +392,8 @@ static inline int msm_gpu_convert_priority(struct msm_gpu *gpu, int prio,
* @ring_nr: the ringbuffer used by this submitqueue, which is determined
* by the submitqueue's priority
* @faults: the number of GPU hangs associated with this submitqueue
+ * @last_fence: the sequence number of the last allocated fence (for error
+ * checking)
* @ctx: the per-drm_file context associated with the submitqueue (ie.
* which set of pgtables do submits jobs associated with the
* submitqueue use)
@@ -374,6 +409,7 @@ struct msm_gpu_submitqueue {
u32 flags;
u32 ring_nr;
int faults;
+ uint32_t last_fence;
struct msm_file_private *ctx;
struct list_head node;
struct idr fence_idr;
@@ -498,6 +534,7 @@ void msm_devfreq_init(struct msm_gpu *gpu);
void msm_devfreq_cleanup(struct msm_gpu *gpu);
void msm_devfreq_resume(struct msm_gpu *gpu);
void msm_devfreq_suspend(struct msm_gpu *gpu);
+void msm_devfreq_boost(struct msm_gpu *gpu, unsigned factor);
void msm_devfreq_active(struct msm_gpu *gpu);
void msm_devfreq_idle(struct msm_gpu *gpu);
@@ -534,28 +571,28 @@ static inline struct msm_gpu_state *msm_gpu_crashstate_get(struct msm_gpu *gpu)
{
struct msm_gpu_state *state = NULL;
- mutex_lock(&gpu->dev->struct_mutex);
+ mutex_lock(&gpu->lock);
if (gpu->crashstate) {
kref_get(&gpu->crashstate->ref);
state = gpu->crashstate;
}
- mutex_unlock(&gpu->dev->struct_mutex);
+ mutex_unlock(&gpu->lock);
return state;
}
static inline void msm_gpu_crashstate_put(struct msm_gpu *gpu)
{
- mutex_lock(&gpu->dev->struct_mutex);
+ mutex_lock(&gpu->lock);
if (gpu->crashstate) {
if (gpu->funcs->gpu_state_put(gpu->crashstate))
gpu->crashstate = NULL;
}
- mutex_unlock(&gpu->dev->struct_mutex);
+ mutex_unlock(&gpu->lock);
}
/*
diff --git a/drivers/gpu/drm/msm/msm_gpu_devfreq.c b/drivers/gpu/drm/msm/msm_gpu_devfreq.c
index 8b7473f69cb8..62405e980925 100644
--- a/drivers/gpu/drm/msm/msm_gpu_devfreq.c
+++ b/drivers/gpu/drm/msm/msm_gpu_devfreq.c
@@ -9,6 +9,7 @@
#include <linux/devfreq.h>
#include <linux/devfreq_cooling.h>
+#include <linux/units.h>
/*
* Power Management:
@@ -20,17 +21,11 @@ static int msm_devfreq_target(struct device *dev, unsigned long *freq,
struct msm_gpu *gpu = dev_to_gpu(dev);
struct dev_pm_opp *opp;
- opp = devfreq_recommended_opp(dev, freq, flags);
-
/*
- * If the GPU is idle, devfreq is not aware, so just ignore
- * it's requests
+ * Note that devfreq_recommended_opp() can modify the freq
+ * to something that actually is in the opp table:
*/
- if (gpu->devfreq.idle_freq) {
- gpu->devfreq.idle_freq = *freq;
- return 0;
- }
-
+ opp = devfreq_recommended_opp(dev, freq, flags);
if (IS_ERR(opp))
return PTR_ERR(opp);
@@ -48,9 +43,6 @@ static int msm_devfreq_target(struct device *dev, unsigned long *freq,
static unsigned long get_freq(struct msm_gpu *gpu)
{
- if (gpu->devfreq.idle_freq)
- return gpu->devfreq.idle_freq;
-
if (gpu->funcs->gpu_get_freq)
return gpu->funcs->gpu_get_freq(gpu);
@@ -88,6 +80,7 @@ static struct devfreq_dev_profile msm_devfreq_profile = {
.get_cur_freq = msm_devfreq_get_cur_freq,
};
+static void msm_devfreq_boost_work(struct kthread_work *work);
static void msm_devfreq_idle_work(struct kthread_work *work);
void msm_devfreq_init(struct msm_gpu *gpu)
@@ -98,6 +91,12 @@ void msm_devfreq_init(struct msm_gpu *gpu)
if (!gpu->funcs->gpu_busy)
return;
+ dev_pm_qos_add_request(&gpu->pdev->dev, &df->idle_freq,
+ DEV_PM_QOS_MAX_FREQUENCY,
+ PM_QOS_MAX_FREQUENCY_DEFAULT_VALUE);
+ dev_pm_qos_add_request(&gpu->pdev->dev, &df->boost_freq,
+ DEV_PM_QOS_MIN_FREQUENCY, 0);
+
msm_devfreq_profile.initial_freq = gpu->fast_rate;
/*
@@ -128,13 +127,19 @@ void msm_devfreq_init(struct msm_gpu *gpu)
gpu->cooling = NULL;
}
+ msm_hrtimer_work_init(&df->boost_work, gpu->worker, msm_devfreq_boost_work,
+ CLOCK_MONOTONIC, HRTIMER_MODE_REL);
msm_hrtimer_work_init(&df->idle_work, gpu->worker, msm_devfreq_idle_work,
CLOCK_MONOTONIC, HRTIMER_MODE_REL);
}
void msm_devfreq_cleanup(struct msm_gpu *gpu)
{
+ struct msm_gpu_devfreq *df = &gpu->devfreq;
+
devfreq_cooling_unregister(gpu->cooling);
+ dev_pm_qos_remove_request(&df->boost_freq);
+ dev_pm_qos_remove_request(&df->idle_freq);
}
void msm_devfreq_resume(struct msm_gpu *gpu)
@@ -150,12 +155,40 @@ void msm_devfreq_suspend(struct msm_gpu *gpu)
devfreq_suspend_device(gpu->devfreq.devfreq);
}
+static void msm_devfreq_boost_work(struct kthread_work *work)
+{
+ struct msm_gpu_devfreq *df = container_of(work,
+ struct msm_gpu_devfreq, boost_work.work);
+
+ dev_pm_qos_update_request(&df->boost_freq, 0);
+}
+
+void msm_devfreq_boost(struct msm_gpu *gpu, unsigned factor)
+{
+ struct msm_gpu_devfreq *df = &gpu->devfreq;
+ uint64_t freq;
+
+ freq = get_freq(gpu);
+ freq *= factor;
+
+ /*
+ * A nice little trap is that PM QoS operates in terms of KHz,
+ * while devfreq operates in terms of Hz:
+ */
+ do_div(freq, HZ_PER_KHZ);
+
+ dev_pm_qos_update_request(&df->boost_freq, freq);
+
+ msm_hrtimer_queue_work(&df->boost_work,
+ ms_to_ktime(msm_devfreq_profile.polling_ms),
+ HRTIMER_MODE_REL);
+}
+
void msm_devfreq_active(struct msm_gpu *gpu)
{
struct msm_gpu_devfreq *df = &gpu->devfreq;
struct devfreq_dev_status status;
unsigned int idle_time;
- unsigned long target_freq = df->idle_freq;
if (!df->devfreq)
return;
@@ -165,12 +198,6 @@ void msm_devfreq_active(struct msm_gpu *gpu)
*/
hrtimer_cancel(&df->idle_work.timer);
- /*
- * Hold devfreq lock to synchronize with get_dev_status()/
- * target() callbacks
- */
- mutex_lock(&df->devfreq->lock);
-
idle_time = ktime_to_ms(ktime_sub(ktime_get(), df->idle_time));
/*
@@ -178,21 +205,18 @@ void msm_devfreq_active(struct msm_gpu *gpu)
* interval, then we won't meet the threshold of busyness for
* the governor to ramp up the freq.. so give some boost
*/
- if (idle_time > msm_devfreq_profile.polling_ms/2) {
- target_freq *= 2;
+ if (idle_time > msm_devfreq_profile.polling_ms) {
+ msm_devfreq_boost(gpu, 2);
}
- df->idle_freq = 0;
-
- msm_devfreq_target(&gpu->pdev->dev, &target_freq, 0);
+ dev_pm_qos_update_request(&df->idle_freq,
+ PM_QOS_MAX_FREQUENCY_DEFAULT_VALUE);
/*
* Reset the polling interval so we aren't inconsistent
* about freq vs busy/total cycles
*/
msm_devfreq_get_dev_status(&gpu->pdev->dev, &status);
-
- mutex_unlock(&df->devfreq->lock);
}
@@ -201,32 +225,20 @@ static void msm_devfreq_idle_work(struct kthread_work *work)
struct msm_gpu_devfreq *df = container_of(work,
struct msm_gpu_devfreq, idle_work.work);
struct msm_gpu *gpu = container_of(df, struct msm_gpu, devfreq);
- unsigned long idle_freq, target_freq = 0;
-
- if (!df->devfreq)
- return;
-
- /*
- * Hold devfreq lock to synchronize with get_dev_status()/
- * target() callbacks
- */
- mutex_lock(&df->devfreq->lock);
-
- idle_freq = get_freq(gpu);
-
- if (gpu->clamp_to_idle)
- msm_devfreq_target(&gpu->pdev->dev, &target_freq, 0);
df->idle_time = ktime_get();
- df->idle_freq = idle_freq;
- mutex_unlock(&df->devfreq->lock);
+ if (gpu->clamp_to_idle)
+ dev_pm_qos_update_request(&df->idle_freq, 0);
}
void msm_devfreq_idle(struct msm_gpu *gpu)
{
struct msm_gpu_devfreq *df = &gpu->devfreq;
+ if (!df->devfreq)
+ return;
+
msm_hrtimer_queue_work(&df->idle_work, ms_to_ktime(1),
- HRTIMER_MODE_ABS);
+ HRTIMER_MODE_REL);
}
diff --git a/drivers/gpu/drm/msm/msm_kms.h b/drivers/gpu/drm/msm/msm_kms.h
index 6a42b819abc4..2a4f0526cb98 100644
--- a/drivers/gpu/drm/msm/msm_kms.h
+++ b/drivers/gpu/drm/msm/msm_kms.h
@@ -198,19 +198,22 @@ struct msm_kms *mdp4_kms_init(struct drm_device *dev);
struct msm_kms *mdp5_kms_init(struct drm_device *dev);
struct msm_kms *dpu_kms_init(struct drm_device *dev);
+extern const struct of_device_id dpu_dt_match[];
+extern const struct of_device_id mdp5_dt_match[];
+
struct msm_mdss_funcs {
int (*enable)(struct msm_mdss *mdss);
int (*disable)(struct msm_mdss *mdss);
- void (*destroy)(struct drm_device *dev);
+ void (*destroy)(struct msm_mdss *mdss);
};
struct msm_mdss {
- struct drm_device *dev;
+ struct device *dev;
const struct msm_mdss_funcs *funcs;
};
-int mdp5_mdss_init(struct drm_device *dev);
-int dpu_mdss_init(struct drm_device *dev);
+int mdp5_mdss_init(struct platform_device *dev);
+int dpu_mdss_init(struct platform_device *dev);
#define for_each_crtc_mask(dev, crtc, crtc_mask) \
drm_for_each_crtc(crtc, dev) \
diff --git a/drivers/gpu/drm/msm/msm_perf.c b/drivers/gpu/drm/msm/msm_perf.c
index 3a27153eef08..3d3da79fec2a 100644
--- a/drivers/gpu/drm/msm/msm_perf.c
+++ b/drivers/gpu/drm/msm/msm_perf.c
@@ -155,9 +155,12 @@ static int perf_open(struct inode *inode, struct file *file)
struct msm_gpu *gpu = priv->gpu;
int ret = 0;
- mutex_lock(&dev->struct_mutex);
+ if (!gpu)
+ return -ENODEV;
- if (perf->open || !gpu) {
+ mutex_lock(&gpu->lock);
+
+ if (perf->open) {
ret = -EBUSY;
goto out;
}
@@ -171,7 +174,7 @@ static int perf_open(struct inode *inode, struct file *file)
perf->next_jiffies = jiffies + SAMPLE_TIME;
out:
- mutex_unlock(&dev->struct_mutex);
+ mutex_unlock(&gpu->lock);
return ret;
}
diff --git a/drivers/gpu/drm/msm/msm_rd.c b/drivers/gpu/drm/msm/msm_rd.c
index b55398a34fa4..81432ec07012 100644
--- a/drivers/gpu/drm/msm/msm_rd.c
+++ b/drivers/gpu/drm/msm/msm_rd.c
@@ -86,7 +86,7 @@ struct msm_rd_state {
struct msm_gem_submit *submit;
/* fifo access is synchronized on the producer side by
- * struct_mutex held by submit code (otherwise we could
+ * gpu->lock held by submit code (otherwise we could
* end up w/ cmds logged in different order than they
* were executed). And read_lock synchronizes the reads
*/
@@ -181,9 +181,12 @@ static int rd_open(struct inode *inode, struct file *file)
uint32_t gpu_id;
int ret = 0;
- mutex_lock(&dev->struct_mutex);
+ if (!gpu)
+ return -ENODEV;
- if (rd->open || !gpu) {
+ mutex_lock(&gpu->lock);
+
+ if (rd->open) {
ret = -EBUSY;
goto out;
}
@@ -200,7 +203,7 @@ static int rd_open(struct inode *inode, struct file *file)
rd_write_section(rd, RD_GPU_ID, &gpu_id, sizeof(gpu_id));
out:
- mutex_unlock(&dev->struct_mutex);
+ mutex_unlock(&gpu->lock);
return ret;
}
@@ -340,11 +343,10 @@ out_unlock:
msm_gem_unlock(&obj->base);
}
-/* called under struct_mutex */
+/* called under gpu->lock */
void msm_rd_dump_submit(struct msm_rd_state *rd, struct msm_gem_submit *submit,
const char *fmt, ...)
{
- struct drm_device *dev = submit->dev;
struct task_struct *task;
char msg[256];
int i, n;
@@ -355,7 +357,7 @@ void msm_rd_dump_submit(struct msm_rd_state *rd, struct msm_gem_submit *submit,
/* writing into fifo is serialized by caller, and
* rd->read_lock is used to serialize the reads
*/
- WARN_ON(!mutex_is_locked(&dev->struct_mutex));
+ WARN_ON(!mutex_is_locked(&submit->gpu->lock));
if (fmt) {
va_list args;
diff --git a/drivers/gpu/drm/msm/msm_ringbuffer.c b/drivers/gpu/drm/msm/msm_ringbuffer.c
index 652b1dedd7c1..3bbf574c3bdc 100644
--- a/drivers/gpu/drm/msm/msm_ringbuffer.c
+++ b/drivers/gpu/drm/msm/msm_ringbuffer.c
@@ -21,11 +21,11 @@ static struct dma_fence *msm_job_run(struct drm_sched_job *job)
pm_runtime_get_sync(&gpu->pdev->dev);
/* TODO move submit path over to using a per-ring lock.. */
- mutex_lock(&gpu->dev->struct_mutex);
+ mutex_lock(&gpu->lock);
msm_gpu_submit(gpu, submit);
- mutex_unlock(&gpu->dev->struct_mutex);
+ mutex_unlock(&gpu->lock);
pm_runtime_put(&gpu->pdev->dev);
diff --git a/drivers/gpu/drm/mxsfb/Kconfig b/drivers/gpu/drm/mxsfb/Kconfig
index ee22cd25d3e3..987170e16ebd 100644
--- a/drivers/gpu/drm/mxsfb/Kconfig
+++ b/drivers/gpu/drm/mxsfb/Kconfig
@@ -10,7 +10,7 @@ config DRM_MXSFB
depends on COMMON_CLK
select DRM_MXS
select DRM_KMS_HELPER
- select DRM_KMS_CMA_HELPER
+ select DRM_GEM_CMA_HELPER
select DRM_PANEL
select DRM_PANEL_BRIDGE
help
diff --git a/drivers/gpu/drm/nouveau/nouveau_dp.c b/drivers/gpu/drm/nouveau/nouveau_dp.c
index 040ed88d362d..3a288d0b848f 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dp.c
+++ b/drivers/gpu/drm/nouveau/nouveau_dp.c
@@ -147,6 +147,21 @@ nouveau_dp_detect(struct nouveau_connector *nv_connector,
nv_encoder->dp.link_nr =
dpcd[DP_MAX_LANE_COUNT] & DP_MAX_LANE_COUNT_MASK;
+ if (connector->connector_type == DRM_MODE_CONNECTOR_eDP && dpcd[DP_DPCD_REV] >= 0x13) {
+ struct drm_dp_aux *aux = &nv_connector->aux;
+ int ret, i;
+ u8 sink_rates[16];
+
+ ret = drm_dp_dpcd_read(aux, DP_SUPPORTED_LINK_RATES, sink_rates, sizeof(sink_rates));
+ if (ret == sizeof(sink_rates)) {
+ for (i = 0; i < ARRAY_SIZE(sink_rates); i += 2) {
+ int val = ((sink_rates[i + 1] << 8) | sink_rates[i]) * 200 / 10;
+ if (val && (i == 0 || val > nv_encoder->dp.link_bw))
+ nv_encoder->dp.link_bw = val;
+ }
+ }
+ }
+
NV_DEBUG(drm, "display: %dx%d dpcd 0x%02x\n",
nv_encoder->dp.link_nr, nv_encoder->dp.link_bw,
dpcd[DP_DPCD_REV]);
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
index b51d690f375f..88d262ba648c 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
@@ -2627,6 +2627,27 @@ nv174_chipset = {
};
static const struct nvkm_device_chip
+nv176_chipset = {
+ .name = "GA106",
+ .bar = { 0x00000001, tu102_bar_new },
+ .bios = { 0x00000001, nvkm_bios_new },
+ .devinit = { 0x00000001, ga100_devinit_new },
+ .fb = { 0x00000001, ga102_fb_new },
+ .gpio = { 0x00000001, ga102_gpio_new },
+ .i2c = { 0x00000001, gm200_i2c_new },
+ .imem = { 0x00000001, nv50_instmem_new },
+ .mc = { 0x00000001, ga100_mc_new },
+ .mmu = { 0x00000001, tu102_mmu_new },
+ .pci = { 0x00000001, gp100_pci_new },
+ .privring = { 0x00000001, gm200_privring_new },
+ .timer = { 0x00000001, gk20a_timer_new },
+ .top = { 0x00000001, ga100_top_new },
+ .disp = { 0x00000001, ga102_disp_new },
+ .dma = { 0x00000001, gv100_dma_new },
+ .fifo = { 0x00000001, ga102_fifo_new },
+};
+
+static const struct nvkm_device_chip
nv177_chipset = {
.name = "GA107",
.bar = { 0x00000001, tu102_bar_new },
@@ -3072,6 +3093,7 @@ nvkm_device_ctor(const struct nvkm_device_func *func,
case 0x168: device->chip = &nv168_chipset; break;
case 0x172: device->chip = &nv172_chipset; break;
case 0x174: device->chip = &nv174_chipset; break;
+ case 0x176: device->chip = &nv176_chipset; break;
case 0x177: device->chip = &nv177_chipset; break;
default:
if (nvkm_boolopt(device->cfgopt, "NvEnableUnsupportedChipsets", false)) {
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dp.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dp.c
index 9669472a2749..8e09315b8fb3 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dp.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dp.c
@@ -41,6 +41,10 @@
struct lt_state {
struct nvkm_dp *dp;
+
+ int repeaters;
+ int repeater;
+
u8 stat[6];
u8 conf[4];
bool pc2;
@@ -52,14 +56,26 @@ static int
nvkm_dp_train_sense(struct lt_state *lt, bool pc, u32 delay)
{
struct nvkm_dp *dp = lt->dp;
+ u32 addr;
int ret;
- if (dp->dpcd[DPCD_RC0E_AUX_RD_INTERVAL])
- mdelay(dp->dpcd[DPCD_RC0E_AUX_RD_INTERVAL] * 4);
+ usleep_range(delay, delay * 2);
+
+ if (lt->repeater)
+ addr = DPCD_LTTPR_LANE0_1_STATUS(lt->repeater);
+ else
+ addr = DPCD_LS02;
+
+ ret = nvkm_rdaux(dp->aux, addr, &lt->stat[0], 3);
+ if (ret)
+ return ret;
+
+ if (lt->repeater)
+ addr = DPCD_LTTPR_LANE0_1_ADJUST(lt->repeater);
else
- udelay(delay);
+ addr = DPCD_LS06;
- ret = nvkm_rdaux(dp->aux, DPCD_LS02, lt->stat, 6);
+ ret = nvkm_rdaux(dp->aux, addr, &lt->stat[4], 2);
if (ret)
return ret;
@@ -85,6 +101,7 @@ nvkm_dp_train_drive(struct lt_state *lt, bool pc)
struct nvbios_dpout info;
struct nvbios_dpcfg ocfg;
u8 ver, hdr, cnt, len;
+ u32 addr;
u32 data;
int ret, i;
@@ -113,6 +130,9 @@ nvkm_dp_train_drive(struct lt_state *lt, bool pc)
OUTP_TRACE(&dp->outp, "config lane %d %02x %02x",
i, lt->conf[i], lpc2);
+ if (lt->repeater != lt->repeaters)
+ continue;
+
data = nvbios_dpout_match(bios, dp->outp.info.hasht,
dp->outp.info.hashm,
&ver, &hdr, &cnt, &len, &info);
@@ -129,7 +149,12 @@ nvkm_dp_train_drive(struct lt_state *lt, bool pc)
ocfg.pe, ocfg.tx_pu);
}
- ret = nvkm_wraux(dp->aux, DPCD_LC03(0), lt->conf, 4);
+ if (lt->repeater)
+ addr = DPCD_LTTPR_LANE0_SET(lt->repeater);
+ else
+ addr = DPCD_LC03(0);
+
+ ret = nvkm_wraux(dp->aux, addr, lt->conf, 4);
if (ret)
return ret;
@@ -146,32 +171,59 @@ static void
nvkm_dp_train_pattern(struct lt_state *lt, u8 pattern)
{
struct nvkm_dp *dp = lt->dp;
+ u32 addr;
u8 sink_tp;
OUTP_TRACE(&dp->outp, "training pattern %d", pattern);
dp->outp.ior->func->dp.pattern(dp->outp.ior, pattern);
- nvkm_rdaux(dp->aux, DPCD_LC02, &sink_tp, 1);
+ if (lt->repeater)
+ addr = DPCD_LTTPR_PATTERN_SET(lt->repeater);
+ else
+ addr = DPCD_LC02;
+
+ nvkm_rdaux(dp->aux, addr, &sink_tp, 1);
sink_tp &= ~DPCD_LC02_TRAINING_PATTERN_SET;
- sink_tp |= pattern;
- nvkm_wraux(dp->aux, DPCD_LC02, &sink_tp, 1);
+ sink_tp |= (pattern != 4) ? pattern : 7;
+
+ if (pattern != 0)
+ sink_tp |= DPCD_LC02_SCRAMBLING_DISABLE;
+ else
+ sink_tp &= ~DPCD_LC02_SCRAMBLING_DISABLE;
+ nvkm_wraux(dp->aux, addr, &sink_tp, 1);
}
static int
nvkm_dp_train_eq(struct lt_state *lt)
{
+ struct nvkm_i2c_aux *aux = lt->dp->aux;
bool eq_done = false, cr_done = true;
- int tries = 0, i;
+ int tries = 0, usec = 0, i;
+ u8 data;
- if (lt->dp->dpcd[DPCD_RC02] & DPCD_RC02_TPS3_SUPPORTED)
- nvkm_dp_train_pattern(lt, 3);
- else
- nvkm_dp_train_pattern(lt, 2);
+ if (lt->repeater) {
+ if (!nvkm_rdaux(aux, DPCD_LTTPR_AUX_RD_INTERVAL(lt->repeater), &data, sizeof(data)))
+ usec = (data & DPCD_RC0E_AUX_RD_INTERVAL) * 4000;
+
+ nvkm_dp_train_pattern(lt, 4);
+ } else {
+ if (lt->dp->dpcd[DPCD_RC00_DPCD_REV] >= 0x14 &&
+ lt->dp->dpcd[DPCD_RC03] & DPCD_RC03_TPS4_SUPPORTED)
+ nvkm_dp_train_pattern(lt, 4);
+ else
+ if (lt->dp->dpcd[DPCD_RC00_DPCD_REV] >= 0x12 &&
+ lt->dp->dpcd[DPCD_RC02] & DPCD_RC02_TPS3_SUPPORTED)
+ nvkm_dp_train_pattern(lt, 3);
+ else
+ nvkm_dp_train_pattern(lt, 2);
+
+ usec = (lt->dp->dpcd[DPCD_RC0E] & DPCD_RC0E_AUX_RD_INTERVAL) * 4000;
+ }
do {
if ((tries &&
nvkm_dp_train_drive(lt, lt->pc2)) ||
- nvkm_dp_train_sense(lt, lt->pc2, 400))
+ nvkm_dp_train_sense(lt, lt->pc2, usec ? usec : 400))
break;
eq_done = !!(lt->stat[2] & DPCD_LS04_INTERLANE_ALIGN_DONE);
@@ -193,13 +245,16 @@ nvkm_dp_train_cr(struct lt_state *lt)
{
bool cr_done = false, abort = false;
int voltage = lt->conf[0] & DPCD_LC03_VOLTAGE_SWING_SET;
- int tries = 0, i;
+ int tries = 0, usec = 0, i;
nvkm_dp_train_pattern(lt, 1);
+ if (lt->dp->dpcd[DPCD_RC00_DPCD_REV] < 0x14 && !lt->repeater)
+ usec = (lt->dp->dpcd[DPCD_RC0E] & DPCD_RC0E_AUX_RD_INTERVAL) * 4000;
+
do {
if (nvkm_dp_train_drive(lt, false) ||
- nvkm_dp_train_sense(lt, false, 100))
+ nvkm_dp_train_sense(lt, false, usec ? usec : 100))
break;
cr_done = true;
@@ -223,7 +278,7 @@ nvkm_dp_train_cr(struct lt_state *lt)
}
static int
-nvkm_dp_train_links(struct nvkm_dp *dp)
+nvkm_dp_train_links(struct nvkm_dp *dp, int rate)
{
struct nvkm_ior *ior = dp->outp.ior;
struct nvkm_disp *disp = dp->outp.disp;
@@ -233,13 +288,15 @@ nvkm_dp_train_links(struct nvkm_dp *dp)
.dp = dp,
};
u32 lnkcmp;
- u8 sink[2];
+ u8 sink[2], data;
int ret;
OUTP_DBG(&dp->outp, "training %d x %d MB/s",
ior->dp.nr, ior->dp.bw * 27);
/* Intersect misc. capabilities of the OR and sink. */
+ if (disp->engine.subdev.device->chipset < 0x110)
+ dp->dpcd[DPCD_RC03] &= ~DPCD_RC03_TPS4_SUPPORTED;
if (disp->engine.subdev.device->chipset < 0xd0)
dp->dpcd[DPCD_RC02] &= ~DPCD_RC02_TPS3_SUPPORTED;
lt.pc2 = dp->dpcd[DPCD_RC02] & DPCD_RC02_TPS3_SUPPORTED;
@@ -287,8 +344,22 @@ nvkm_dp_train_links(struct nvkm_dp *dp)
ior->func->dp.power(ior, ior->dp.nr);
+ /* Select LTTPR non-transparent mode if we have a valid configuration,
+ * use transparent mode otherwise.
+ */
+ if (dp->lttpr[0] >= 0x14) {
+ data = DPCD_LTTPR_MODE_TRANSPARENT;
+ nvkm_wraux(dp->aux, DPCD_LTTPR_MODE, &data, sizeof(data));
+
+ if (dp->lttprs) {
+ data = DPCD_LTTPR_MODE_NON_TRANSPARENT;
+ nvkm_wraux(dp->aux, DPCD_LTTPR_MODE, &data, sizeof(data));
+ lt.repeaters = dp->lttprs;
+ }
+ }
+
/* Set desired link configuration on the sink. */
- sink[0] = ior->dp.bw;
+ sink[0] = (dp->rate[rate].dpcd < 0) ? ior->dp.bw : 0;
sink[1] = ior->dp.nr;
if (ior->dp.ef)
sink[1] |= DPCD_LC01_ENHANCED_FRAME_EN;
@@ -297,12 +368,33 @@ nvkm_dp_train_links(struct nvkm_dp *dp)
if (ret)
return ret;
+ if (dp->rate[rate].dpcd >= 0) {
+ ret = nvkm_rdaux(dp->aux, DPCD_LC15_LINK_RATE_SET, &sink[0], sizeof(sink[0]));
+ if (ret)
+ return ret;
+
+ sink[0] &= ~DPCD_LC15_LINK_RATE_SET_MASK;
+ sink[0] |= dp->rate[rate].dpcd;
+
+ ret = nvkm_wraux(dp->aux, DPCD_LC15_LINK_RATE_SET, &sink[0], sizeof(sink[0]));
+ if (ret)
+ return ret;
+ }
+
/* Attempt to train the link in this configuration. */
- memset(lt.stat, 0x00, sizeof(lt.stat));
- ret = nvkm_dp_train_cr(&lt);
- if (ret == 0)
- ret = nvkm_dp_train_eq(&lt);
- nvkm_dp_train_pattern(&lt, 0);
+ for (lt.repeater = lt.repeaters; lt.repeater >= 0; lt.repeater--) {
+ if (lt.repeater)
+ OUTP_DBG(&dp->outp, "training LTTPR%d", lt.repeater);
+ else
+ OUTP_DBG(&dp->outp, "training sink");
+
+ memset(lt.stat, 0x00, sizeof(lt.stat));
+ ret = nvkm_dp_train_cr(&lt);
+ if (ret == 0)
+ ret = nvkm_dp_train_eq(&lt);
+ nvkm_dp_train_pattern(&lt, 0);
+ }
+
return ret;
}
@@ -345,63 +437,13 @@ nvkm_dp_train_init(struct nvkm_dp *dp)
}
}
-static const struct dp_rates {
- u32 rate;
- u8 bw;
- u8 nr;
-} nvkm_dp_rates[] = {
- { 2160000, 0x14, 4 },
- { 1080000, 0x0a, 4 },
- { 1080000, 0x14, 2 },
- { 648000, 0x06, 4 },
- { 540000, 0x0a, 2 },
- { 540000, 0x14, 1 },
- { 324000, 0x06, 2 },
- { 270000, 0x0a, 1 },
- { 162000, 0x06, 1 },
- {}
-};
-
static int
nvkm_dp_train(struct nvkm_dp *dp, u32 dataKBps)
{
struct nvkm_ior *ior = dp->outp.ior;
- const u8 sink_nr = dp->dpcd[DPCD_RC02] & DPCD_RC02_MAX_LANE_COUNT;
- const u8 sink_bw = dp->dpcd[DPCD_RC01_MAX_LINK_RATE];
- const u8 outp_nr = dp->outp.info.dpconf.link_nr;
- const u8 outp_bw = dp->outp.info.dpconf.link_bw;
- const struct dp_rates *failsafe = NULL, *cfg;
- int ret = -EINVAL;
+ int ret = -EINVAL, nr, rate;
u8 pwr;
- /* Find the lowest configuration of the OR that can support
- * the required link rate.
- *
- * We will refuse to program the OR to lower rates, even if
- * link training fails at higher rates (or even if the sink
- * can't support the rate at all, though the DD is supposed
- * to prevent such situations from happening).
- *
- * Attempting to do so can cause the entire display to hang,
- * and it's better to have a failed modeset than that.
- */
- for (cfg = nvkm_dp_rates; cfg->rate; cfg++) {
- if (cfg->nr <= outp_nr && cfg->bw <= outp_bw) {
- /* Try to respect sink limits too when selecting
- * lowest link configuration.
- */
- if (!failsafe ||
- (cfg->nr <= sink_nr && cfg->bw <= sink_bw))
- failsafe = cfg;
- }
-
- if (failsafe && cfg[1].rate < dataKBps)
- break;
- }
-
- if (WARN_ON(!failsafe))
- return ret;
-
/* Ensure sink is not in a low-power state. */
if (!nvkm_rdaux(dp->aux, DPCD_SC00, &pwr, 1)) {
if ((pwr & DPCD_SC00_SET_POWER) != DPCD_SC00_SET_POWER_D0) {
@@ -411,25 +453,22 @@ nvkm_dp_train(struct nvkm_dp *dp, u32 dataKBps)
}
}
+ ior->dp.mst = dp->lt.mst;
+ ior->dp.ef = dp->dpcd[DPCD_RC02] & DPCD_RC02_ENHANCED_FRAME_CAP;
+ ior->dp.nr = 0;
+
/* Link training. */
- OUTP_DBG(&dp->outp, "training (min: %d x %d MB/s)",
- failsafe->nr, failsafe->bw * 27);
+ OUTP_DBG(&dp->outp, "training");
nvkm_dp_train_init(dp);
- for (cfg = nvkm_dp_rates; ret < 0 && cfg <= failsafe; cfg++) {
- /* Skip configurations not supported by both OR and sink. */
- if ((cfg->nr > outp_nr || cfg->bw > outp_bw ||
- cfg->nr > sink_nr || cfg->bw > sink_bw)) {
- if (cfg != failsafe)
- continue;
- OUTP_ERR(&dp->outp, "link rate unsupported by sink");
+ for (nr = dp->links; ret < 0 && nr; nr >>= 1) {
+ for (rate = 0; ret < 0 && rate < dp->rates; rate++) {
+ if (dp->rate[rate].rate * nr >= dataKBps || WARN_ON(!ior->dp.nr)) {
+ /* Program selected link configuration. */
+ ior->dp.bw = dp->rate[rate].rate / 27000;
+ ior->dp.nr = nr;
+ ret = nvkm_dp_train_links(dp, rate);
+ }
}
- ior->dp.mst = dp->lt.mst;
- ior->dp.ef = dp->dpcd[DPCD_RC02] & DPCD_RC02_ENHANCED_FRAME_CAP;
- ior->dp.bw = cfg->bw;
- ior->dp.nr = cfg->nr;
-
- /* Program selected link configuration. */
- ret = nvkm_dp_train_links(dp);
}
nvkm_dp_train_fini(dp);
if (ret < 0)
@@ -527,6 +566,47 @@ done:
}
static bool
+nvkm_dp_enable_supported_link_rates(struct nvkm_dp *dp)
+{
+ u8 sink_rates[DPCD_RC10_SUPPORTED_LINK_RATES__SIZE];
+ int i, j, k;
+
+ if (dp->outp.conn->info.type != DCB_CONNECTOR_eDP ||
+ dp->dpcd[DPCD_RC00_DPCD_REV] < 0x13 ||
+ nvkm_rdaux(dp->aux, DPCD_RC10_SUPPORTED_LINK_RATES(0), sink_rates, sizeof(sink_rates)))
+ return false;
+
+ for (i = 0; i < ARRAY_SIZE(sink_rates); i += 2) {
+ const u32 rate = ((sink_rates[i + 1] << 8) | sink_rates[i]) * 200 / 10;
+
+ if (!rate || WARN_ON(dp->rates == ARRAY_SIZE(dp->rate)))
+ break;
+
+ if (rate > dp->outp.info.dpconf.link_bw * 27000) {
+ OUTP_DBG(&dp->outp, "rate %d !outp", rate);
+ continue;
+ }
+
+ for (j = 0; j < dp->rates; j++) {
+ if (rate > dp->rate[j].rate) {
+ for (k = dp->rates; k > j; k--)
+ dp->rate[k] = dp->rate[k - 1];
+ break;
+ }
+ }
+
+ dp->rate[j].dpcd = i / 2;
+ dp->rate[j].rate = rate;
+ dp->rates++;
+ }
+
+ for (i = 0; i < dp->rates; i++)
+ OUTP_DBG(&dp->outp, "link_rate[%d] = %d", dp->rate[i].dpcd, dp->rate[i].rate);
+
+ return dp->rates != 0;
+}
+
+static bool
nvkm_dp_enable(struct nvkm_dp *dp, bool enable)
{
struct nvkm_i2c_aux *aux = dp->aux;
@@ -538,9 +618,60 @@ nvkm_dp_enable(struct nvkm_dp *dp, bool enable)
dp->present = true;
}
- if (!nvkm_rdaux(aux, DPCD_RC00_DPCD_REV, dp->dpcd,
- sizeof(dp->dpcd)))
+ /* Detect any LTTPRs before reading DPCD receiver caps. */
+ if (!nvkm_rdaux(aux, DPCD_LTTPR_REV, dp->lttpr, sizeof(dp->lttpr)) &&
+ dp->lttpr[0] >= 0x14 && dp->lttpr[2]) {
+ switch (dp->lttpr[2]) {
+ case 0x80: dp->lttprs = 1; break;
+ case 0x40: dp->lttprs = 2; break;
+ case 0x20: dp->lttprs = 3; break;
+ case 0x10: dp->lttprs = 4; break;
+ case 0x08: dp->lttprs = 5; break;
+ case 0x04: dp->lttprs = 6; break;
+ case 0x02: dp->lttprs = 7; break;
+ case 0x01: dp->lttprs = 8; break;
+ default:
+ /* Unknown LTTPR count, we'll switch to transparent mode. */
+ WARN_ON(1);
+ dp->lttprs = 0;
+ break;
+ }
+ } else {
+ /* No LTTPR support, or zero LTTPR count - don't touch it at all. */
+ memset(dp->lttpr, 0x00, sizeof(dp->lttpr));
+ }
+
+ if (!nvkm_rdaux(aux, DPCD_RC00_DPCD_REV, dp->dpcd, sizeof(dp->dpcd))) {
+ const u8 rates[] = { 0x1e, 0x14, 0x0a, 0x06, 0 };
+ const u8 *rate;
+ int rate_max;
+
+ dp->rates = 0;
+ dp->links = dp->dpcd[DPCD_RC02] & DPCD_RC02_MAX_LANE_COUNT;
+ dp->links = min(dp->links, dp->outp.info.dpconf.link_nr);
+ if (dp->lttprs && dp->lttpr[4])
+ dp->links = min_t(int, dp->links, dp->lttpr[4]);
+
+ rate_max = dp->dpcd[DPCD_RC01_MAX_LINK_RATE];
+ rate_max = min(rate_max, dp->outp.info.dpconf.link_bw);
+ if (dp->lttprs && dp->lttpr[1])
+ rate_max = min_t(int, rate_max, dp->lttpr[1]);
+
+ if (!nvkm_dp_enable_supported_link_rates(dp)) {
+ for (rate = rates; *rate; rate++) {
+ if (*rate <= rate_max) {
+ if (WARN_ON(dp->rates == ARRAY_SIZE(dp->rate)))
+ break;
+
+ dp->rate[dp->rates].dpcd = -1;
+ dp->rate[dp->rates].rate = *rate * 27000;
+ dp->rates++;
+ }
+ }
+ }
+
return true;
+ }
}
if (dp->present) {
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dp.h b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dp.h
index e484d0c3b0d4..8e59dd469da6 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dp.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dp.h
@@ -9,10 +9,7 @@
#include <subdev/bios/dp.h>
struct nvkm_dp {
- union {
- struct nvkm_outp base;
- struct nvkm_outp outp;
- };
+ struct nvkm_outp outp;
struct nvbios_dpout info;
u8 version;
@@ -21,8 +18,17 @@ struct nvkm_dp {
struct nvkm_notify hpd;
bool present;
+ u8 lttpr[6];
+ u8 lttprs;
u8 dpcd[16];
+ struct {
+ int dpcd; /* -1, or index into SUPPORTED_LINK_RATES table */
+ u32 rate;
+ } rate[8];
+ int rates;
+ int links;
+
struct mutex mutex;
struct {
atomic_t done;
@@ -42,8 +48,12 @@ void nvkm_dp_disable(struct nvkm_outp *, struct nvkm_ior *);
#define DPCD_RC02_TPS3_SUPPORTED 0x40
#define DPCD_RC02_MAX_LANE_COUNT 0x1f
#define DPCD_RC03 0x00003
+#define DPCD_RC03_TPS4_SUPPORTED 0x80
#define DPCD_RC03_MAX_DOWNSPREAD 0x01
-#define DPCD_RC0E_AUX_RD_INTERVAL 0x0000e
+#define DPCD_RC0E 0x0000e
+#define DPCD_RC0E_AUX_RD_INTERVAL 0x7f
+#define DPCD_RC10_SUPPORTED_LINK_RATES(i) 0x00010
+#define DPCD_RC10_SUPPORTED_LINK_RATES__SIZE 16
/* DPCD Link Configuration */
#define DPCD_LC00_LINK_BW_SET 0x00100
@@ -51,7 +61,8 @@ void nvkm_dp_disable(struct nvkm_outp *, struct nvkm_ior *);
#define DPCD_LC01_ENHANCED_FRAME_EN 0x80
#define DPCD_LC01_LANE_COUNT_SET 0x1f
#define DPCD_LC02 0x00102
-#define DPCD_LC02_TRAINING_PATTERN_SET 0x03
+#define DPCD_LC02_TRAINING_PATTERN_SET 0x0f
+#define DPCD_LC02_SCRAMBLING_DISABLE 0x20
#define DPCD_LC03(l) ((l) + 0x00103)
#define DPCD_LC03_MAX_PRE_EMPHASIS_REACHED 0x20
#define DPCD_LC03_PRE_EMPHASIS_SET 0x18
@@ -67,6 +78,8 @@ void nvkm_dp_disable(struct nvkm_outp *, struct nvkm_ior *);
#define DPCD_LC10_LANE3_POST_CURSOR2_SET 0x30
#define DPCD_LC10_LANE2_MAX_POST_CURSOR2_REACHED 0x04
#define DPCD_LC10_LANE2_POST_CURSOR2_SET 0x03
+#define DPCD_LC15_LINK_RATE_SET 0x00115
+#define DPCD_LC15_LINK_RATE_SET_MASK 0x07
/* DPCD Link/Sink Status */
#define DPCD_LS02 0x00202
@@ -108,4 +121,14 @@ void nvkm_dp_disable(struct nvkm_outp *, struct nvkm_ior *);
#define DPCD_SC00_SET_POWER 0x03
#define DPCD_SC00_SET_POWER_D0 0x01
#define DPCD_SC00_SET_POWER_D3 0x03
+
+#define DPCD_LTTPR_REV 0xf0000
+#define DPCD_LTTPR_MODE 0xf0003
+#define DPCD_LTTPR_MODE_TRANSPARENT 0x55
+#define DPCD_LTTPR_MODE_NON_TRANSPARENT 0xaa
+#define DPCD_LTTPR_PATTERN_SET(i) ((i - 1) * 0x50 + 0xf0010)
+#define DPCD_LTTPR_LANE0_SET(i) ((i - 1) * 0x50 + 0xf0011)
+#define DPCD_LTTPR_AUX_RD_INTERVAL(i) ((i - 1) * 0x50 + 0xf0020)
+#define DPCD_LTTPR_LANE0_1_STATUS(i) ((i - 1) * 0x50 + 0xf0030)
+#define DPCD_LTTPR_LANE0_1_ADJUST(i) ((i - 1) * 0x50 + 0xf0033)
#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorg94.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorg94.c
index 4d59d02525d9..56b8f4411988 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorg94.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorg94.c
@@ -77,7 +77,18 @@ g94_sor_dp_pattern(struct nvkm_ior *sor, int pattern)
{
struct nvkm_device *device = sor->disp->engine.subdev.device;
const u32 loff = nv50_sor_link(sor);
- nvkm_mask(device, 0x61c10c + loff, 0x0f000000, pattern << 24);
+ u32 data;
+
+ switch (pattern) {
+ case 0: data = 0x00001000; break;
+ case 1: data = 0x01000000; break;
+ case 2: data = 0x02000000; break;
+ default:
+ WARN_ON(1);
+ return;
+ }
+
+ nvkm_mask(device, 0x61c10c + loff, 0x0f001000, data);
}
void
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorga102.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorga102.c
index 033827de9116..d2c05f5c4aa0 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorga102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorga102.c
@@ -37,6 +37,10 @@ ga102_sor_dp_links(struct nvkm_ior *sor, struct nvkm_i2c_aux *aux)
case 0x0a: clksor |= 0x00040000; break;
case 0x14: clksor |= 0x00080000; break;
case 0x1e: clksor |= 0x000c0000; break;
+ case 0x08: clksor |= 0x00100000; break;
+ case 0x09: clksor |= 0x00140000; break;
+ case 0x0c: clksor |= 0x00180000; break;
+ case 0x10: clksor |= 0x001c0000; break;
default:
WARN_ON(1);
return -EINVAL;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgf119.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgf119.c
index 3b3643fb1019..c431e0b9fc11 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgf119.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgf119.c
@@ -92,7 +92,19 @@ gf119_sor_dp_pattern(struct nvkm_ior *sor, int pattern)
{
struct nvkm_device *device = sor->disp->engine.subdev.device;
const u32 soff = nv50_ior_base(sor);
- nvkm_mask(device, 0x61c110 + soff, 0x0f0f0f0f, 0x01010101 * pattern);
+ u32 data;
+
+ switch (pattern) {
+ case 0: data = 0x10101010; break;
+ case 1: data = 0x01010101; break;
+ case 2: data = 0x02020202; break;
+ case 3: data = 0x03030303; break;
+ default:
+ WARN_ON(1);
+ return;
+ }
+
+ nvkm_mask(device, 0x61c110 + soff, 0x1f1f1f1f, data);
}
int
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgm107.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgm107.c
index 38045c92197f..3696bfd3bfd7 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgm107.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgm107.c
@@ -28,11 +28,23 @@ gm107_sor_dp_pattern(struct nvkm_ior *sor, int pattern)
{
struct nvkm_device *device = sor->disp->engine.subdev.device;
const u32 soff = nv50_ior_base(sor);
- const u32 data = 0x01010101 * pattern;
+ u32 mask = 0x1f1f1f1f, data;
+
+ switch (pattern) {
+ case 0: data = 0x10101010; break;
+ case 1: data = 0x01010101; break;
+ case 2: data = 0x02020202; break;
+ case 3: data = 0x03030303; break;
+ case 4: data = 0x1b1b1b1b; break;
+ default:
+ WARN_ON(1);
+ return;
+ }
+
if (sor->asy.link & 1)
- nvkm_mask(device, 0x61c110 + soff, 0x0f0f0f0f, data);
+ nvkm_mask(device, 0x61c110 + soff, mask, data);
else
- nvkm_mask(device, 0x61c12c + soff, 0x0f0f0f0f, data);
+ nvkm_mask(device, 0x61c12c + soff, mask, data);
}
static const struct nvkm_ior_func
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/acr/gm200.c b/drivers/gpu/drm/nouveau/nvkm/subdev/acr/gm200.c
index cdb1ead26d84..82b4c8e1457c 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/acr/gm200.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/acr/gm200.c
@@ -207,11 +207,13 @@ int
gm200_acr_wpr_parse(struct nvkm_acr *acr)
{
const struct wpr_header *hdr = (void *)acr->wpr_fw->data;
+ struct nvkm_acr_lsfw *lsfw;
while (hdr->falcon_id != WPR_HEADER_V0_FALCON_ID_INVALID) {
wpr_header_dump(&acr->subdev, hdr);
- if (!nvkm_acr_lsfw_add(NULL, acr, NULL, (hdr++)->falcon_id))
- return -ENOMEM;
+ lsfw = nvkm_acr_lsfw_add(NULL, acr, NULL, (hdr++)->falcon_id);
+ if (IS_ERR(lsfw))
+ return PTR_ERR(lsfw);
}
return 0;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/acr/gp102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/acr/gp102.c
index fb9132a39bb1..fd97a935a380 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/acr/gp102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/acr/gp102.c
@@ -161,11 +161,13 @@ int
gp102_acr_wpr_parse(struct nvkm_acr *acr)
{
const struct wpr_header_v1 *hdr = (void *)acr->wpr_fw->data;
+ struct nvkm_acr_lsfw *lsfw;
while (hdr->falcon_id != WPR_HEADER_V1_FALCON_ID_INVALID) {
wpr_header_v1_dump(&acr->subdev, hdr);
- if (!nvkm_acr_lsfw_add(NULL, acr, NULL, (hdr++)->falcon_id))
- return -ENOMEM;
+ lsfw = nvkm_acr_lsfw_add(NULL, acr, NULL, (hdr++)->falcon_id);
+ if (IS_ERR(lsfw))
+ return PTR_ERR(lsfw);
}
return 0;
diff --git a/drivers/gpu/drm/omapdrm/Makefile b/drivers/gpu/drm/omapdrm/Makefile
index 21e8277ff88f..710b4e0abcf0 100644
--- a/drivers/gpu/drm/omapdrm/Makefile
+++ b/drivers/gpu/drm/omapdrm/Makefile
@@ -9,6 +9,7 @@ omapdrm-y := omap_drv.o \
omap_debugfs.o \
omap_crtc.o \
omap_plane.o \
+ omap_overlay.o \
omap_encoder.o \
omap_fb.o \
omap_gem.o \
diff --git a/drivers/gpu/drm/omapdrm/dss/dispc.c b/drivers/gpu/drm/omapdrm/dss/dispc.c
index b440147ae28b..c4de142cc85b 100644
--- a/drivers/gpu/drm/omapdrm/dss/dispc.c
+++ b/drivers/gpu/drm/omapdrm/dss/dispc.c
@@ -92,6 +92,8 @@ struct dispc_features {
u8 mgr_height_start;
u16 mgr_width_max;
u16 mgr_height_max;
+ u16 ovl_width_max;
+ u16 ovl_height_max;
unsigned long max_lcd_pclk;
unsigned long max_tv_pclk;
unsigned int max_downscale;
@@ -1279,8 +1281,8 @@ static u32 dispc_ovl_get_burst_size(struct dispc_device *dispc,
return dispc->feat->burst_size_unit * 8;
}
-static bool dispc_ovl_color_mode_supported(struct dispc_device *dispc,
- enum omap_plane_id plane, u32 fourcc)
+bool dispc_ovl_color_mode_supported(struct dispc_device *dispc,
+ enum omap_plane_id plane, u32 fourcc)
{
const u32 *modes;
unsigned int i;
@@ -2487,6 +2489,11 @@ static int dispc_ovl_calc_scaling_44xx(struct dispc_device *dispc,
return 0;
}
+enum omap_overlay_caps dispc_ovl_get_caps(struct dispc_device *dispc, enum omap_plane_id plane)
+{
+ return dispc->feat->overlay_caps[plane];
+}
+
#define DIV_FRAC(dividend, divisor) \
((dividend) * 100 / (divisor) - ((dividend) / (divisor) * 100))
@@ -2599,6 +2606,12 @@ static int dispc_ovl_calc_scaling(struct dispc_device *dispc,
return 0;
}
+void dispc_ovl_get_max_size(struct dispc_device *dispc, u16 *width, u16 *height)
+{
+ *width = dispc->feat->ovl_width_max;
+ *height = dispc->feat->ovl_height_max;
+}
+
static int dispc_ovl_setup_common(struct dispc_device *dispc,
enum omap_plane_id plane,
enum omap_overlay_caps caps,
@@ -4240,6 +4253,8 @@ static const struct dispc_features omap24xx_dispc_feats = {
.mgr_height_start = 26,
.mgr_width_max = 2048,
.mgr_height_max = 2048,
+ .ovl_width_max = 2048,
+ .ovl_height_max = 2048,
.max_lcd_pclk = 66500000,
.max_downscale = 2,
/*
@@ -4278,6 +4293,8 @@ static const struct dispc_features omap34xx_rev1_0_dispc_feats = {
.mgr_height_start = 26,
.mgr_width_max = 2048,
.mgr_height_max = 2048,
+ .ovl_width_max = 2048,
+ .ovl_height_max = 2048,
.max_lcd_pclk = 173000000,
.max_tv_pclk = 59000000,
.max_downscale = 4,
@@ -4313,6 +4330,8 @@ static const struct dispc_features omap34xx_rev3_0_dispc_feats = {
.mgr_height_start = 26,
.mgr_width_max = 2048,
.mgr_height_max = 2048,
+ .ovl_width_max = 2048,
+ .ovl_height_max = 2048,
.max_lcd_pclk = 173000000,
.max_tv_pclk = 59000000,
.max_downscale = 4,
@@ -4348,6 +4367,8 @@ static const struct dispc_features omap36xx_dispc_feats = {
.mgr_height_start = 26,
.mgr_width_max = 2048,
.mgr_height_max = 2048,
+ .ovl_width_max = 2048,
+ .ovl_height_max = 2048,
.max_lcd_pclk = 173000000,
.max_tv_pclk = 59000000,
.max_downscale = 4,
@@ -4383,6 +4404,8 @@ static const struct dispc_features am43xx_dispc_feats = {
.mgr_height_start = 26,
.mgr_width_max = 2048,
.mgr_height_max = 2048,
+ .ovl_width_max = 2048,
+ .ovl_height_max = 2048,
.max_lcd_pclk = 173000000,
.max_tv_pclk = 59000000,
.max_downscale = 4,
@@ -4418,6 +4441,8 @@ static const struct dispc_features omap44xx_dispc_feats = {
.mgr_height_start = 26,
.mgr_width_max = 2048,
.mgr_height_max = 2048,
+ .ovl_width_max = 2048,
+ .ovl_height_max = 2048,
.max_lcd_pclk = 170000000,
.max_tv_pclk = 185625000,
.max_downscale = 4,
@@ -4457,6 +4482,8 @@ static const struct dispc_features omap54xx_dispc_feats = {
.mgr_height_start = 27,
.mgr_width_max = 4096,
.mgr_height_max = 4096,
+ .ovl_width_max = 2048,
+ .ovl_height_max = 4096,
.max_lcd_pclk = 170000000,
.max_tv_pclk = 192000000,
.max_downscale = 4,
@@ -4842,7 +4869,7 @@ static int dispc_remove(struct platform_device *pdev)
return 0;
}
-static int dispc_runtime_suspend(struct device *dev)
+static __maybe_unused int dispc_runtime_suspend(struct device *dev)
{
struct dispc_device *dispc = dev_get_drvdata(dev);
@@ -4857,7 +4884,7 @@ static int dispc_runtime_suspend(struct device *dev)
return 0;
}
-static int dispc_runtime_resume(struct device *dev)
+static __maybe_unused int dispc_runtime_resume(struct device *dev)
{
struct dispc_device *dispc = dev_get_drvdata(dev);
diff --git a/drivers/gpu/drm/omapdrm/dss/dsi.c b/drivers/gpu/drm/omapdrm/dss/dsi.c
index d730bf67fed9..a6845856cbce 100644
--- a/drivers/gpu/drm/omapdrm/dss/dsi.c
+++ b/drivers/gpu/drm/omapdrm/dss/dsi.c
@@ -5058,7 +5058,7 @@ static int dsi_remove(struct platform_device *pdev)
return 0;
}
-static int dsi_runtime_suspend(struct device *dev)
+static __maybe_unused int dsi_runtime_suspend(struct device *dev)
{
struct dsi_data *dsi = dev_get_drvdata(dev);
@@ -5071,7 +5071,7 @@ static int dsi_runtime_suspend(struct device *dev)
return 0;
}
-static int dsi_runtime_resume(struct device *dev)
+static __maybe_unused int dsi_runtime_resume(struct device *dev)
{
struct dsi_data *dsi = dev_get_drvdata(dev);
diff --git a/drivers/gpu/drm/omapdrm/dss/dss.c b/drivers/gpu/drm/omapdrm/dss/dss.c
index 66db28bfe824..69b3e15b9356 100644
--- a/drivers/gpu/drm/omapdrm/dss/dss.c
+++ b/drivers/gpu/drm/omapdrm/dss/dss.c
@@ -1569,7 +1569,7 @@ static void dss_shutdown(struct platform_device *pdev)
DSSDBG("shutdown\n");
}
-static int dss_runtime_suspend(struct device *dev)
+static __maybe_unused int dss_runtime_suspend(struct device *dev)
{
struct dss_device *dss = dev_get_drvdata(dev);
@@ -1581,7 +1581,7 @@ static int dss_runtime_suspend(struct device *dev)
return 0;
}
-static int dss_runtime_resume(struct device *dev)
+static __maybe_unused int dss_runtime_resume(struct device *dev)
{
struct dss_device *dss = dev_get_drvdata(dev);
int r;
diff --git a/drivers/gpu/drm/omapdrm/dss/dss.h b/drivers/gpu/drm/omapdrm/dss/dss.h
index a547527bb2f3..4ff02fbc0e71 100644
--- a/drivers/gpu/drm/omapdrm/dss/dss.h
+++ b/drivers/gpu/drm/omapdrm/dss/dss.h
@@ -397,6 +397,11 @@ int dispc_get_num_mgrs(struct dispc_device *dispc);
const u32 *dispc_ovl_get_color_modes(struct dispc_device *dispc,
enum omap_plane_id plane);
+void dispc_ovl_get_max_size(struct dispc_device *dispc, u16 *width, u16 *height);
+bool dispc_ovl_color_mode_supported(struct dispc_device *dispc,
+ enum omap_plane_id plane, u32 fourcc);
+enum omap_overlay_caps dispc_ovl_get_caps(struct dispc_device *dispc, enum omap_plane_id plane);
+
u32 dispc_read_irqstatus(struct dispc_device *dispc);
void dispc_clear_irqstatus(struct dispc_device *dispc, u32 mask);
void dispc_write_irqenable(struct dispc_device *dispc, u32 mask);
diff --git a/drivers/gpu/drm/omapdrm/dss/venc.c b/drivers/gpu/drm/omapdrm/dss/venc.c
index 508fddd376cf..4480b69ab5a7 100644
--- a/drivers/gpu/drm/omapdrm/dss/venc.c
+++ b/drivers/gpu/drm/omapdrm/dss/venc.c
@@ -879,7 +879,7 @@ static int venc_remove(struct platform_device *pdev)
return 0;
}
-static int venc_runtime_suspend(struct device *dev)
+static __maybe_unused int venc_runtime_suspend(struct device *dev)
{
struct venc_device *venc = dev_get_drvdata(dev);
@@ -889,7 +889,7 @@ static int venc_runtime_suspend(struct device *dev)
return 0;
}
-static int venc_runtime_resume(struct device *dev)
+static __maybe_unused int venc_runtime_resume(struct device *dev)
{
struct venc_device *venc = dev_get_drvdata(dev);
diff --git a/drivers/gpu/drm/omapdrm/omap_drv.c b/drivers/gpu/drm/omapdrm/omap_drv.c
index c05d3975cb31..2720a58ccd90 100644
--- a/drivers/gpu/drm/omapdrm/omap_drv.c
+++ b/drivers/gpu/drm/omapdrm/omap_drv.c
@@ -117,6 +117,102 @@ static void omap_atomic_commit_tail(struct drm_atomic_state *old_state)
dispc_runtime_put(priv->dispc);
}
+static int drm_atomic_state_normalized_zpos_cmp(const void *a, const void *b)
+{
+ const struct drm_plane_state *sa = *(struct drm_plane_state **)a;
+ const struct drm_plane_state *sb = *(struct drm_plane_state **)b;
+
+ if (sa->normalized_zpos != sb->normalized_zpos)
+ return sa->normalized_zpos - sb->normalized_zpos;
+ else
+ return sa->plane->base.id - sb->plane->base.id;
+}
+
+/*
+ * This replaces the drm_atomic_normalize_zpos to handle the dual overlay case.
+ *
+ * Since both halves need to be 'appear' side by side the zpos is
+ * recalculated when dealing with dual overlay cases so that the other
+ * planes zpos is consistent.
+ */
+static int omap_atomic_update_normalize_zpos(struct drm_device *dev,
+ struct drm_atomic_state *state)
+{
+ struct drm_crtc *crtc;
+ struct drm_crtc_state *old_state, *new_state;
+ struct drm_plane *plane;
+ int c, i, n, inc;
+ int total_planes = dev->mode_config.num_total_plane;
+ struct drm_plane_state **states;
+ int ret = 0;
+
+ states = kmalloc_array(total_planes, sizeof(*states), GFP_KERNEL);
+ if (!states)
+ return -ENOMEM;
+
+ for_each_oldnew_crtc_in_state(state, crtc, old_state, new_state, c) {
+ if (old_state->plane_mask == new_state->plane_mask &&
+ !new_state->zpos_changed)
+ continue;
+
+ /* Reset plane increment and index value for every crtc */
+ n = 0;
+
+ /*
+ * Normalization process might create new states for planes
+ * which normalized_zpos has to be recalculated.
+ */
+ drm_for_each_plane_mask(plane, dev, new_state->plane_mask) {
+ struct drm_plane_state *plane_state =
+ drm_atomic_get_plane_state(new_state->state,
+ plane);
+ if (IS_ERR(plane_state)) {
+ ret = PTR_ERR(plane_state);
+ goto done;
+ }
+ states[n++] = plane_state;
+ }
+
+ sort(states, n, sizeof(*states),
+ drm_atomic_state_normalized_zpos_cmp, NULL);
+
+ for (i = 0, inc = 0; i < n; i++) {
+ plane = states[i]->plane;
+
+ states[i]->normalized_zpos = i + inc;
+ DRM_DEBUG_ATOMIC("[PLANE:%d:%s] updated normalized zpos value %d\n",
+ plane->base.id, plane->name,
+ states[i]->normalized_zpos);
+
+ if (is_omap_plane_dual_overlay(states[i]))
+ inc++;
+ }
+ new_state->zpos_changed = true;
+ }
+
+done:
+ kfree(states);
+ return ret;
+}
+
+static int omap_atomic_check(struct drm_device *dev,
+ struct drm_atomic_state *state)
+{
+ int ret;
+
+ ret = drm_atomic_helper_check(dev, state);
+ if (ret)
+ return ret;
+
+ if (dev->mode_config.normalize_zpos) {
+ ret = omap_atomic_update_normalize_zpos(dev, state);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
static const struct drm_mode_config_helper_funcs omap_mode_config_helper_funcs = {
.atomic_commit_tail = omap_atomic_commit_tail,
};
@@ -124,10 +220,86 @@ static const struct drm_mode_config_helper_funcs omap_mode_config_helper_funcs =
static const struct drm_mode_config_funcs omap_mode_config_funcs = {
.fb_create = omap_framebuffer_create,
.output_poll_changed = drm_fb_helper_output_poll_changed,
- .atomic_check = drm_atomic_helper_check,
+ .atomic_check = omap_atomic_check,
.atomic_commit = drm_atomic_helper_commit,
};
+/* Global/shared object state funcs */
+
+/*
+ * This is a helper that returns the private state currently in operation.
+ * Note that this would return the "old_state" if called in the atomic check
+ * path, and the "new_state" after the atomic swap has been done.
+ */
+struct omap_global_state *
+omap_get_existing_global_state(struct omap_drm_private *priv)
+{
+ return to_omap_global_state(priv->glob_obj.state);
+}
+
+/*
+ * This acquires the modeset lock set aside for global state, creates
+ * a new duplicated private object state.
+ */
+struct omap_global_state *__must_check
+omap_get_global_state(struct drm_atomic_state *s)
+{
+ struct omap_drm_private *priv = s->dev->dev_private;
+ struct drm_private_state *priv_state;
+
+ priv_state = drm_atomic_get_private_obj_state(s, &priv->glob_obj);
+ if (IS_ERR(priv_state))
+ return ERR_CAST(priv_state);
+
+ return to_omap_global_state(priv_state);
+}
+
+static struct drm_private_state *
+omap_global_duplicate_state(struct drm_private_obj *obj)
+{
+ struct omap_global_state *state;
+
+ state = kmemdup(obj->state, sizeof(*state), GFP_KERNEL);
+ if (!state)
+ return NULL;
+
+ __drm_atomic_helper_private_obj_duplicate_state(obj, &state->base);
+
+ return &state->base;
+}
+
+static void omap_global_destroy_state(struct drm_private_obj *obj,
+ struct drm_private_state *state)
+{
+ struct omap_global_state *omap_state = to_omap_global_state(state);
+
+ kfree(omap_state);
+}
+
+static const struct drm_private_state_funcs omap_global_state_funcs = {
+ .atomic_duplicate_state = omap_global_duplicate_state,
+ .atomic_destroy_state = omap_global_destroy_state,
+};
+
+static int omap_global_obj_init(struct drm_device *dev)
+{
+ struct omap_drm_private *priv = dev->dev_private;
+ struct omap_global_state *state;
+
+ state = kzalloc(sizeof(*state), GFP_KERNEL);
+ if (!state)
+ return -ENOMEM;
+
+ drm_atomic_private_obj_init(dev, &priv->glob_obj, &state->base,
+ &omap_global_state_funcs);
+ return 0;
+}
+
+static void omap_global_obj_fini(struct omap_drm_private *priv)
+{
+ drm_atomic_private_obj_fini(&priv->glob_obj);
+}
+
static void omap_disconnect_pipelines(struct drm_device *ddev)
{
struct omap_drm_private *priv = ddev->dev_private;
@@ -231,8 +403,6 @@ static int omap_modeset_init(struct drm_device *dev)
if (!omapdss_stack_is_ready())
return -EPROBE_DEFER;
- drm_mode_config_init(dev);
-
ret = omap_modeset_init_properties(dev);
if (ret < 0)
return ret;
@@ -583,10 +753,20 @@ static int omapdrm_init(struct omap_drm_private *priv, struct device *dev)
omap_gem_init(ddev);
+ drm_mode_config_init(ddev);
+
+ ret = omap_global_obj_init(ddev);
+ if (ret)
+ goto err_gem_deinit;
+
+ ret = omap_hwoverlays_init(priv);
+ if (ret)
+ goto err_free_priv_obj;
+
ret = omap_modeset_init(ddev);
if (ret) {
dev_err(priv->dev, "omap_modeset_init failed: ret=%d\n", ret);
- goto err_gem_deinit;
+ goto err_free_overlays;
}
/* Initialize vblank handling, start with all CRTCs disabled. */
@@ -618,7 +798,12 @@ err_cleanup_helpers:
omap_fbdev_fini(ddev);
err_cleanup_modeset:
omap_modeset_fini(ddev);
+err_free_overlays:
+ omap_hwoverlays_destroy(priv);
+err_free_priv_obj:
+ omap_global_obj_fini(priv);
err_gem_deinit:
+ drm_mode_config_cleanup(ddev);
omap_gem_deinit(ddev);
destroy_workqueue(priv->wq);
omap_disconnect_pipelines(ddev);
@@ -642,6 +827,9 @@ static void omapdrm_cleanup(struct omap_drm_private *priv)
drm_atomic_helper_shutdown(ddev);
omap_modeset_fini(ddev);
+ omap_hwoverlays_destroy(priv);
+ omap_global_obj_fini(priv);
+ drm_mode_config_cleanup(ddev);
omap_gem_deinit(ddev);
destroy_workqueue(priv->wq);
diff --git a/drivers/gpu/drm/omapdrm/omap_drv.h b/drivers/gpu/drm/omapdrm/omap_drv.h
index 591d4c273f02..825960fd3ea9 100644
--- a/drivers/gpu/drm/omapdrm/omap_drv.h
+++ b/drivers/gpu/drm/omapdrm/omap_drv.h
@@ -14,6 +14,7 @@
#include "dss/omapdss.h"
#include "dss/dss.h"
+#include <drm/drm_atomic.h>
#include <drm/drm_gem.h>
#include <drm/omap_drm.h>
@@ -24,6 +25,7 @@
#include "omap_gem.h"
#include "omap_irq.h"
#include "omap_plane.h"
+#include "omap_overlay.h"
#define DBG(fmt, ...) DRM_DEBUG_DRIVER(fmt"\n", ##__VA_ARGS__)
#define VERB(fmt, ...) if (0) DRM_DEBUG_DRIVER(fmt, ##__VA_ARGS__) /* verbose debug */
@@ -40,6 +42,19 @@ struct omap_drm_pipeline {
unsigned int alias_id;
};
+/*
+ * Global private object state for tracking resources that are shared across
+ * multiple kms objects (planes/crtcs/etc).
+ */
+#define to_omap_global_state(x) container_of(x, struct omap_global_state, base)
+
+struct omap_global_state {
+ struct drm_private_state base;
+
+ /* global atomic state of assignment between overlays and planes */
+ struct drm_plane *hwoverlay_to_plane[8];
+};
+
struct omap_drm_private {
struct drm_device *ddev;
struct device *dev;
@@ -57,6 +72,11 @@ struct omap_drm_private {
unsigned int num_planes;
struct drm_plane *planes[8];
+ unsigned int num_ovls;
+ struct omap_hw_overlay *overlays[8];
+
+ struct drm_private_obj glob_obj;
+
struct drm_fb_helper *fbdev;
struct workqueue_struct *wq;
@@ -85,4 +105,8 @@ struct omap_drm_private {
void omap_debugfs_init(struct drm_minor *minor);
+struct omap_global_state * __must_check omap_get_global_state(struct drm_atomic_state *s);
+
+struct omap_global_state *omap_get_existing_global_state(struct omap_drm_private *priv);
+
#endif /* __OMAPDRM_DRV_H__ */
diff --git a/drivers/gpu/drm/omapdrm/omap_fb.c b/drivers/gpu/drm/omapdrm/omap_fb.c
index 190afc564914..895e66b08a81 100644
--- a/drivers/gpu/drm/omapdrm/omap_fb.c
+++ b/drivers/gpu/drm/omapdrm/omap_fb.c
@@ -131,7 +131,9 @@ static u32 drm_rotation_to_tiler(unsigned int drm_rot)
/* update ovl info for scanout, handles cases of multi-planar fb's, etc.
*/
void omap_framebuffer_update_scanout(struct drm_framebuffer *fb,
- struct drm_plane_state *state, struct omap_overlay_info *info)
+ struct drm_plane_state *state,
+ struct omap_overlay_info *info,
+ struct omap_overlay_info *r_info)
{
struct omap_framebuffer *omap_fb = to_omap_framebuffer(fb);
const struct drm_format_info *format = omap_fb->format;
@@ -218,6 +220,35 @@ void omap_framebuffer_update_scanout(struct drm_framebuffer *fb,
} else {
info->p_uv_addr = 0;
}
+
+ if (r_info) {
+ info->width /= 2;
+ info->out_width /= 2;
+
+ *r_info = *info;
+
+ if (fb->format->is_yuv) {
+ if (info->width & 1) {
+ info->width++;
+ r_info->width--;
+ }
+
+ if (info->out_width & 1) {
+ info->out_width++;
+ r_info->out_width--;
+ }
+ }
+
+ r_info->pos_x = info->pos_x + info->out_width;
+
+ r_info->paddr = get_linear_addr(fb, format, 0,
+ x + info->width, y);
+ if (fb->format->format == DRM_FORMAT_NV12) {
+ r_info->p_uv_addr =
+ get_linear_addr(fb, format, 1,
+ x + info->width, y);
+ }
+ }
}
/* pin, prepare for scanout: */
diff --git a/drivers/gpu/drm/omapdrm/omap_fb.h b/drivers/gpu/drm/omapdrm/omap_fb.h
index c0e19aed8220..b75f0b5ef1d8 100644
--- a/drivers/gpu/drm/omapdrm/omap_fb.h
+++ b/drivers/gpu/drm/omapdrm/omap_fb.h
@@ -26,7 +26,9 @@ struct drm_framebuffer *omap_framebuffer_init(struct drm_device *dev,
int omap_framebuffer_pin(struct drm_framebuffer *fb);
void omap_framebuffer_unpin(struct drm_framebuffer *fb);
void omap_framebuffer_update_scanout(struct drm_framebuffer *fb,
- struct drm_plane_state *state, struct omap_overlay_info *info);
+ struct drm_plane_state *state,
+ struct omap_overlay_info *info,
+ struct omap_overlay_info *r_info);
bool omap_framebuffer_supports_rotation(struct drm_framebuffer *fb);
void omap_framebuffer_describe(struct drm_framebuffer *fb, struct seq_file *m);
diff --git a/drivers/gpu/drm/omapdrm/omap_gem.c b/drivers/gpu/drm/omapdrm/omap_gem.c
index 38af6195d959..b0fa17409b66 100644
--- a/drivers/gpu/drm/omapdrm/omap_gem.c
+++ b/drivers/gpu/drm/omapdrm/omap_gem.c
@@ -789,7 +789,7 @@ int omap_gem_pin(struct drm_gem_object *obj, dma_addr_t *dma_addr)
if (omap_obj->flags & OMAP_BO_TILED_MASK) {
block = tiler_reserve_2d(fmt,
omap_obj->width,
- omap_obj->height, 0);
+ omap_obj->height, PAGE_SIZE);
} else {
block = tiler_reserve_1d(obj->size);
}
@@ -851,6 +851,11 @@ static void omap_gem_unpin_locked(struct drm_gem_object *obj)
return;
if (refcount_dec_and_test(&omap_obj->dma_addr_cnt)) {
+ if (omap_obj->sgt) {
+ sg_free_table(omap_obj->sgt);
+ kfree(omap_obj->sgt);
+ omap_obj->sgt = NULL;
+ }
ret = tiler_unpin(omap_obj->block);
if (ret) {
dev_err(obj->dev->dev,
@@ -963,6 +968,78 @@ int omap_gem_put_pages(struct drm_gem_object *obj)
return 0;
}
+struct sg_table *omap_gem_get_sg(struct drm_gem_object *obj)
+{
+ struct omap_gem_object *omap_obj = to_omap_bo(obj);
+ dma_addr_t addr;
+ struct sg_table *sgt;
+ struct scatterlist *sg;
+ unsigned int count, len, stride, i;
+ int ret;
+
+ ret = omap_gem_pin(obj, &addr);
+ if (ret)
+ return ERR_PTR(ret);
+
+ mutex_lock(&omap_obj->lock);
+
+ sgt = omap_obj->sgt;
+ if (sgt)
+ goto out;
+
+ sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
+ if (!sgt) {
+ ret = -ENOMEM;
+ goto err_unpin;
+ }
+
+ if (omap_obj->flags & OMAP_BO_TILED_MASK) {
+ enum tiler_fmt fmt = gem2fmt(omap_obj->flags);
+
+ len = omap_obj->width << (int)fmt;
+ count = omap_obj->height;
+ stride = tiler_stride(fmt, 0);
+ } else {
+ len = obj->size;
+ count = 1;
+ stride = 0;
+ }
+
+ ret = sg_alloc_table(sgt, count, GFP_KERNEL);
+ if (ret)
+ goto err_free;
+
+ for_each_sg(sgt->sgl, sg, count, i) {
+ sg_set_page(sg, phys_to_page(addr), len, offset_in_page(addr));
+ sg_dma_address(sg) = addr;
+ sg_dma_len(sg) = len;
+
+ addr += stride;
+ }
+
+ omap_obj->sgt = sgt;
+out:
+ mutex_unlock(&omap_obj->lock);
+ return sgt;
+
+err_free:
+ kfree(sgt);
+err_unpin:
+ mutex_unlock(&omap_obj->lock);
+ omap_gem_unpin(obj);
+ return ERR_PTR(ret);
+}
+
+void omap_gem_put_sg(struct drm_gem_object *obj, struct sg_table *sgt)
+{
+ struct omap_gem_object *omap_obj = to_omap_bo(obj);
+
+ if (WARN_ON(omap_obj->sgt != sgt))
+ return;
+
+ omap_gem_unpin(obj);
+}
+
#ifdef CONFIG_DRM_FBDEV_EMULATION
/*
* Get kernel virtual address for CPU access.. this more or less only
diff --git a/drivers/gpu/drm/omapdrm/omap_gem.h b/drivers/gpu/drm/omapdrm/omap_gem.h
index eda9b4839c30..19209e319663 100644
--- a/drivers/gpu/drm/omapdrm/omap_gem.h
+++ b/drivers/gpu/drm/omapdrm/omap_gem.h
@@ -82,5 +82,7 @@ u32 omap_gem_flags(struct drm_gem_object *obj);
int omap_gem_rotated_dma_addr(struct drm_gem_object *obj, u32 orient,
int x, int y, dma_addr_t *dma_addr);
int omap_gem_tiled_stride(struct drm_gem_object *obj, u32 orient);
+struct sg_table *omap_gem_get_sg(struct drm_gem_object *obj);
+void omap_gem_put_sg(struct drm_gem_object *obj, struct sg_table *sgt);
#endif /* __OMAPDRM_GEM_H__ */
diff --git a/drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c b/drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c
index 809f86cfc540..57af3d97be77 100644
--- a/drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c
+++ b/drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c
@@ -23,45 +23,21 @@ static struct sg_table *omap_gem_map_dma_buf(
{
struct drm_gem_object *obj = attachment->dmabuf->priv;
struct sg_table *sg;
- dma_addr_t dma_addr;
- int ret;
-
- sg = kzalloc(sizeof(*sg), GFP_KERNEL);
- if (!sg)
- return ERR_PTR(-ENOMEM);
-
- /* camera, etc, need physically contiguous.. but we need a
- * better way to know this..
- */
- ret = omap_gem_pin(obj, &dma_addr);
- if (ret)
- goto out;
-
- ret = sg_alloc_table(sg, 1, GFP_KERNEL);
- if (ret)
- goto out;
-
- sg_init_table(sg->sgl, 1);
- sg_dma_len(sg->sgl) = obj->size;
- sg_set_page(sg->sgl, pfn_to_page(PFN_DOWN(dma_addr)), obj->size, 0);
- sg_dma_address(sg->sgl) = dma_addr;
+ sg = omap_gem_get_sg(obj);
+ if (IS_ERR(sg))
+ return sg;
/* this must be after omap_gem_pin() to ensure we have pages attached */
omap_gem_dma_sync_buffer(obj, dir);
return sg;
-out:
- kfree(sg);
- return ERR_PTR(ret);
}
static void omap_gem_unmap_dma_buf(struct dma_buf_attachment *attachment,
struct sg_table *sg, enum dma_data_direction dir)
{
struct drm_gem_object *obj = attachment->dmabuf->priv;
- omap_gem_unpin(obj);
- sg_free_table(sg);
- kfree(sg);
+ omap_gem_put_sg(obj, sg);
}
static int omap_gem_dmabuf_begin_cpu_access(struct dma_buf *buffer,
@@ -114,7 +90,7 @@ struct dma_buf *omap_gem_prime_export(struct drm_gem_object *obj, int flags)
DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
exp_info.ops = &omap_dmabuf_ops;
- exp_info.size = obj->size;
+ exp_info.size = omap_gem_mmap_size(obj);
exp_info.flags = flags;
exp_info.priv = obj;
diff --git a/drivers/gpu/drm/omapdrm/omap_overlay.c b/drivers/gpu/drm/omapdrm/omap_overlay.c
new file mode 100644
index 000000000000..10730c9b2752
--- /dev/null
+++ b/drivers/gpu/drm/omapdrm/omap_overlay.c
@@ -0,0 +1,212 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2018 Texas Instruments Incorporated - http://www.ti.com/
+ * Author: Benoit Parrot <bparrot@ti.com>
+ */
+
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_plane_helper.h>
+
+#include "omap_dmm_tiler.h"
+#include "omap_drv.h"
+
+/*
+ * overlay funcs
+ */
+static const char * const overlay_id_to_name[] = {
+ [OMAP_DSS_GFX] = "gfx",
+ [OMAP_DSS_VIDEO1] = "vid1",
+ [OMAP_DSS_VIDEO2] = "vid2",
+ [OMAP_DSS_VIDEO3] = "vid3",
+};
+
+/*
+ * Find a free overlay with the required caps and supported fourcc
+ */
+static struct omap_hw_overlay *
+omap_plane_find_free_overlay(struct drm_device *dev, struct drm_plane *hwoverlay_to_plane[],
+ u32 caps, u32 fourcc)
+{
+ struct omap_drm_private *priv = dev->dev_private;
+ int i;
+
+ DBG("caps: %x fourcc: %x", caps, fourcc);
+
+ for (i = 0; i < priv->num_ovls; i++) {
+ struct omap_hw_overlay *cur = priv->overlays[i];
+
+ DBG("%d: id: %d cur->caps: %x",
+ cur->idx, cur->id, cur->caps);
+
+ /* skip if already in-use */
+ if (hwoverlay_to_plane[cur->idx])
+ continue;
+
+ /* skip if doesn't support some required caps: */
+ if (caps & ~cur->caps)
+ continue;
+
+ /* check supported format */
+ if (!dispc_ovl_color_mode_supported(priv->dispc,
+ cur->id, fourcc))
+ continue;
+
+ return cur;
+ }
+
+ DBG("no match");
+ return NULL;
+}
+
+/*
+ * Assign a new overlay to a plane with the required caps and supported fourcc
+ * If a plane need a new overlay, the previous one should have been released
+ * with omap_overlay_release()
+ * This should be called from the plane atomic_check() in order to prepare the
+ * next global overlay_map to be enabled when atomic transaction is valid.
+ */
+int omap_overlay_assign(struct drm_atomic_state *s, struct drm_plane *plane,
+ u32 caps, u32 fourcc, struct omap_hw_overlay **overlay,
+ struct omap_hw_overlay **r_overlay)
+{
+ /* Get the global state of the current atomic transaction */
+ struct omap_global_state *state = omap_get_global_state(s);
+ struct drm_plane **overlay_map = state->hwoverlay_to_plane;
+ struct omap_hw_overlay *ovl, *r_ovl;
+
+ ovl = omap_plane_find_free_overlay(s->dev, overlay_map, caps, fourcc);
+ if (!ovl)
+ return -ENOMEM;
+
+ overlay_map[ovl->idx] = plane;
+ *overlay = ovl;
+
+ if (r_overlay) {
+ r_ovl = omap_plane_find_free_overlay(s->dev, overlay_map,
+ caps, fourcc);
+ if (!r_ovl) {
+ overlay_map[r_ovl->idx] = NULL;
+ *overlay = NULL;
+ return -ENOMEM;
+ }
+
+ overlay_map[r_ovl->idx] = plane;
+ *r_overlay = r_ovl;
+ }
+
+ DBG("%s: assign to plane %s caps %x", ovl->name, plane->name, caps);
+
+ if (r_overlay) {
+ DBG("%s: assign to right of plane %s caps %x",
+ r_ovl->name, plane->name, caps);
+ }
+
+ return 0;
+}
+
+/*
+ * Release an overlay from a plane if the plane gets not visible or the plane
+ * need a new overlay if overlay caps changes.
+ * This should be called from the plane atomic_check() in order to prepare the
+ * next global overlay_map to be enabled when atomic transaction is valid.
+ */
+void omap_overlay_release(struct drm_atomic_state *s, struct omap_hw_overlay *overlay)
+{
+ /* Get the global state of the current atomic transaction */
+ struct omap_global_state *state = omap_get_global_state(s);
+ struct drm_plane **overlay_map = state->hwoverlay_to_plane;
+
+ if (!overlay)
+ return;
+
+ if (WARN_ON(!overlay_map[overlay->idx]))
+ return;
+
+ DBG("%s: release from plane %s", overlay->name, overlay_map[overlay->idx]->name);
+
+ overlay_map[overlay->idx] = NULL;
+}
+
+/*
+ * Update an overlay state that was attached to a plane before the current atomic state.
+ * This should be called from the plane atomic_update() or atomic_disable(),
+ * where an overlay association to a plane could have changed between the old and current
+ * atomic state.
+ */
+void omap_overlay_update_state(struct omap_drm_private *priv,
+ struct omap_hw_overlay *overlay)
+{
+ struct omap_global_state *state = omap_get_existing_global_state(priv);
+ struct drm_plane **overlay_map = state->hwoverlay_to_plane;
+
+ /* Check if this overlay is not used anymore, then disable it */
+ if (!overlay_map[overlay->idx]) {
+ DBG("%s: disabled", overlay->name);
+
+ /* disable the overlay */
+ dispc_ovl_enable(priv->dispc, overlay->id, false);
+ }
+}
+
+static void omap_overlay_destroy(struct omap_hw_overlay *overlay)
+{
+ kfree(overlay);
+}
+
+static struct omap_hw_overlay *omap_overlay_init(enum omap_plane_id overlay_id,
+ enum omap_overlay_caps caps)
+{
+ struct omap_hw_overlay *overlay;
+
+ overlay = kzalloc(sizeof(*overlay), GFP_KERNEL);
+ if (!overlay)
+ return ERR_PTR(-ENOMEM);
+
+ overlay->name = overlay_id_to_name[overlay_id];
+ overlay->id = overlay_id;
+ overlay->caps = caps;
+
+ return overlay;
+}
+
+int omap_hwoverlays_init(struct omap_drm_private *priv)
+{
+ static const enum omap_plane_id hw_plane_ids[] = {
+ OMAP_DSS_GFX, OMAP_DSS_VIDEO1,
+ OMAP_DSS_VIDEO2, OMAP_DSS_VIDEO3,
+ };
+ u32 num_overlays = dispc_get_num_ovls(priv->dispc);
+ enum omap_overlay_caps caps;
+ int i, ret;
+
+ for (i = 0; i < num_overlays; i++) {
+ struct omap_hw_overlay *overlay;
+
+ caps = dispc_ovl_get_caps(priv->dispc, hw_plane_ids[i]);
+ overlay = omap_overlay_init(hw_plane_ids[i], caps);
+ if (IS_ERR(overlay)) {
+ ret = PTR_ERR(overlay);
+ dev_err(priv->dev, "failed to construct overlay for %s (%d)\n",
+ overlay_id_to_name[i], ret);
+ omap_hwoverlays_destroy(priv);
+ return ret;
+ }
+ overlay->idx = priv->num_ovls;
+ priv->overlays[priv->num_ovls++] = overlay;
+ }
+
+ return 0;
+}
+
+void omap_hwoverlays_destroy(struct omap_drm_private *priv)
+{
+ int i;
+
+ for (i = 0; i < priv->num_ovls; i++) {
+ omap_overlay_destroy(priv->overlays[i]);
+ priv->overlays[i] = NULL;
+ }
+
+ priv->num_ovls = 0;
+}
diff --git a/drivers/gpu/drm/omapdrm/omap_overlay.h b/drivers/gpu/drm/omapdrm/omap_overlay.h
new file mode 100644
index 000000000000..e36a43f35563
--- /dev/null
+++ b/drivers/gpu/drm/omapdrm/omap_overlay.h
@@ -0,0 +1,35 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2018 Texas Instruments Incorporated - http://www.ti.com/
+ * Author: Benoit Parrot <bparrot@ti.com>
+ */
+
+#ifndef __OMAPDRM_OVERLAY_H__
+#define __OMAPDRM_OVERLAY_H__
+
+#include <linux/types.h>
+
+enum drm_plane_type;
+
+struct drm_device;
+struct drm_mode_object;
+struct drm_plane;
+
+/* Used to associate a HW overlay/plane to a plane */
+struct omap_hw_overlay {
+ unsigned int idx;
+
+ const char *name;
+ enum omap_plane_id id;
+
+ enum omap_overlay_caps caps;
+};
+
+int omap_hwoverlays_init(struct omap_drm_private *priv);
+void omap_hwoverlays_destroy(struct omap_drm_private *priv);
+int omap_overlay_assign(struct drm_atomic_state *s, struct drm_plane *plane,
+ u32 caps, u32 fourcc, struct omap_hw_overlay **overlay,
+ struct omap_hw_overlay **r_overlay);
+void omap_overlay_release(struct drm_atomic_state *s, struct omap_hw_overlay *overlay);
+void omap_overlay_update_state(struct omap_drm_private *priv, struct omap_hw_overlay *overlay);
+#endif /* __OMAPDRM_OVERLAY_H__ */
diff --git a/drivers/gpu/drm/omapdrm/omap_plane.c b/drivers/gpu/drm/omapdrm/omap_plane.c
index 512af976b7e9..b35205c4e979 100644
--- a/drivers/gpu/drm/omapdrm/omap_plane.c
+++ b/drivers/gpu/drm/omapdrm/omap_plane.c
@@ -8,6 +8,7 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_gem_atomic_helper.h>
#include <drm/drm_plane_helper.h>
+#include <drm/drm_fourcc.h>
#include "omap_dmm_tiler.h"
#include "omap_drv.h"
@@ -16,14 +17,30 @@
* plane funcs
*/
+#define to_omap_plane_state(x) container_of(x, struct omap_plane_state, base)
+
+struct omap_plane_state {
+ /* Must be first. */
+ struct drm_plane_state base;
+
+ struct omap_hw_overlay *overlay;
+ struct omap_hw_overlay *r_overlay; /* right overlay */
+};
+
#define to_omap_plane(x) container_of(x, struct omap_plane, base)
struct omap_plane {
struct drm_plane base;
enum omap_plane_id id;
- const char *name;
};
+bool is_omap_plane_dual_overlay(struct drm_plane_state *state)
+{
+ struct omap_plane_state *omap_state = to_omap_plane_state(state);
+
+ return !!omap_state->r_overlay;
+}
+
static int omap_plane_prepare_fb(struct drm_plane *plane,
struct drm_plane_state *new_state)
{
@@ -46,13 +63,35 @@ static void omap_plane_atomic_update(struct drm_plane *plane,
struct drm_atomic_state *state)
{
struct omap_drm_private *priv = plane->dev->dev_private;
- struct omap_plane *omap_plane = to_omap_plane(plane);
struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
plane);
- struct omap_overlay_info info;
+ struct drm_plane_state *old_state = drm_atomic_get_old_plane_state(state,
+ plane);
+ struct omap_plane_state *new_omap_state;
+ struct omap_plane_state *old_omap_state;
+ struct omap_overlay_info info, r_info;
+ enum omap_plane_id ovl_id, r_ovl_id;
int ret;
+ bool dual_ovl;
+
+ new_omap_state = to_omap_plane_state(new_state);
+ old_omap_state = to_omap_plane_state(old_state);
- DBG("%s, crtc=%p fb=%p", omap_plane->name, new_state->crtc,
+ dual_ovl = is_omap_plane_dual_overlay(new_state);
+
+ /* Cleanup previously held overlay if needed */
+ if (old_omap_state->overlay)
+ omap_overlay_update_state(priv, old_omap_state->overlay);
+ if (old_omap_state->r_overlay)
+ omap_overlay_update_state(priv, old_omap_state->r_overlay);
+
+ if (!new_omap_state->overlay) {
+ DBG("[PLANE:%d:%s] no overlay attached", plane->base.id, plane->name);
+ return;
+ }
+
+ ovl_id = new_omap_state->overlay->id;
+ DBG("%s, crtc=%p fb=%p", plane->name, new_state->crtc,
new_state->fb);
memset(&info, 0, sizeof(info));
@@ -67,65 +106,155 @@ static void omap_plane_atomic_update(struct drm_plane *plane,
info.color_encoding = new_state->color_encoding;
info.color_range = new_state->color_range;
+ r_info = info;
+
/* update scanout: */
- omap_framebuffer_update_scanout(new_state->fb, new_state, &info);
+ omap_framebuffer_update_scanout(new_state->fb, new_state, &info,
+ dual_ovl ? &r_info : NULL);
- DBG("%dx%d -> %dx%d (%d)", info.width, info.height,
- info.out_width, info.out_height,
- info.screen_width);
+ DBG("%s: %dx%d -> %dx%d (%d)",
+ new_omap_state->overlay->name, info.width, info.height,
+ info.out_width, info.out_height, info.screen_width);
DBG("%d,%d %pad %pad", info.pos_x, info.pos_y,
&info.paddr, &info.p_uv_addr);
+ if (dual_ovl) {
+ r_ovl_id = new_omap_state->r_overlay->id;
+ /*
+ * If the current plane uses 2 hw planes the very next
+ * zorder is used by the r_overlay so we just use the
+ * main overlay zorder + 1
+ */
+ r_info.zorder = info.zorder + 1;
+
+ DBG("%s: %dx%d -> %dx%d (%d)",
+ new_omap_state->r_overlay->name,
+ r_info.width, r_info.height,
+ r_info.out_width, r_info.out_height, r_info.screen_width);
+ DBG("%d,%d %pad %pad", r_info.pos_x, r_info.pos_y,
+ &r_info.paddr, &r_info.p_uv_addr);
+ }
+
/* and finally, update omapdss: */
- ret = dispc_ovl_setup(priv->dispc, omap_plane->id, &info,
+ ret = dispc_ovl_setup(priv->dispc, ovl_id, &info,
omap_crtc_timings(new_state->crtc), false,
omap_crtc_channel(new_state->crtc));
if (ret) {
dev_err(plane->dev->dev, "Failed to setup plane %s\n",
- omap_plane->name);
- dispc_ovl_enable(priv->dispc, omap_plane->id, false);
+ plane->name);
+ dispc_ovl_enable(priv->dispc, ovl_id, false);
return;
}
- dispc_ovl_enable(priv->dispc, omap_plane->id, true);
+ dispc_ovl_enable(priv->dispc, ovl_id, true);
+
+ if (dual_ovl) {
+ ret = dispc_ovl_setup(priv->dispc, r_ovl_id, &r_info,
+ omap_crtc_timings(new_state->crtc), false,
+ omap_crtc_channel(new_state->crtc));
+ if (ret) {
+ dev_err(plane->dev->dev, "Failed to setup plane right-overlay %s\n",
+ plane->name);
+ dispc_ovl_enable(priv->dispc, r_ovl_id, false);
+ dispc_ovl_enable(priv->dispc, ovl_id, false);
+ return;
+ }
+
+ dispc_ovl_enable(priv->dispc, r_ovl_id, true);
+ }
}
static void omap_plane_atomic_disable(struct drm_plane *plane,
struct drm_atomic_state *state)
{
- struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
- plane);
struct omap_drm_private *priv = plane->dev->dev_private;
struct omap_plane *omap_plane = to_omap_plane(plane);
+ struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
+ plane);
+ struct drm_plane_state *old_state = drm_atomic_get_old_plane_state(state,
+ plane);
+ struct omap_plane_state *new_omap_state;
+ struct omap_plane_state *old_omap_state;
+
+ new_omap_state = to_omap_plane_state(new_state);
+ old_omap_state = to_omap_plane_state(old_state);
+
+ if (!old_omap_state->overlay)
+ return;
new_state->rotation = DRM_MODE_ROTATE_0;
new_state->zpos = plane->type == DRM_PLANE_TYPE_PRIMARY ? 0 : omap_plane->id;
- dispc_ovl_enable(priv->dispc, omap_plane->id, false);
+ omap_overlay_update_state(priv, old_omap_state->overlay);
+ new_omap_state->overlay = NULL;
+
+ if (is_omap_plane_dual_overlay(old_state)) {
+ omap_overlay_update_state(priv, old_omap_state->r_overlay);
+ new_omap_state->r_overlay = NULL;
+ }
}
+#define FRAC_16_16(mult, div) (((mult) << 16) / (div))
+
static int omap_plane_atomic_check(struct drm_plane *plane,
struct drm_atomic_state *state)
{
struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state,
plane);
+ struct drm_plane_state *old_plane_state = drm_atomic_get_old_plane_state(state,
+ plane);
+ struct omap_drm_private *priv = plane->dev->dev_private;
+ struct omap_plane_state *omap_state = to_omap_plane_state(new_plane_state);
+ struct omap_global_state *omap_overlay_global_state;
struct drm_crtc_state *crtc_state;
+ bool new_r_hw_overlay = false;
+ bool new_hw_overlay = false;
+ u32 max_width, max_height;
+ struct drm_crtc *crtc;
+ u16 width, height;
+ u32 caps = 0;
+ u32 fourcc;
+ int ret;
- if (!new_plane_state->fb)
- return 0;
+ omap_overlay_global_state = omap_get_global_state(state);
+ if (IS_ERR(omap_overlay_global_state))
+ return PTR_ERR(omap_overlay_global_state);
+
+ dispc_ovl_get_max_size(priv->dispc, &width, &height);
+ max_width = width << 16;
+ max_height = height << 16;
- /* crtc should only be NULL when disabling (i.e., !new_plane_state->fb) */
- if (WARN_ON(!new_plane_state->crtc))
+ crtc = new_plane_state->crtc ? new_plane_state->crtc : plane->state->crtc;
+ if (!crtc)
return 0;
- crtc_state = drm_atomic_get_existing_crtc_state(state,
- new_plane_state->crtc);
+ crtc_state = drm_atomic_get_existing_crtc_state(state, crtc);
/* we should have a crtc state if the plane is attached to a crtc */
if (WARN_ON(!crtc_state))
return 0;
- if (!crtc_state->enable)
+ /*
+ * Note: these are just sanity checks to filter out totally bad scaling
+ * factors. The real limits must be calculated case by case, and
+ * unfortunately we currently do those checks only at the commit
+ * phase in dispc.
+ */
+ ret = drm_atomic_helper_check_plane_state(new_plane_state, crtc_state,
+ FRAC_16_16(1, 8), FRAC_16_16(8, 1),
+ true, true);
+ if (ret)
+ return ret;
+
+ DBG("%s: visible %d -> %d", plane->name,
+ old_plane_state->visible, new_plane_state->visible);
+
+ if (!new_plane_state->visible) {
+ omap_overlay_release(state, omap_state->overlay);
+ omap_overlay_release(state, omap_state->r_overlay);
+ omap_state->overlay = NULL;
+ omap_state->r_overlay = NULL;
return 0;
+ }
if (new_plane_state->crtc_x < 0 || new_plane_state->crtc_y < 0)
return -EINVAL;
@@ -136,10 +265,96 @@ static int omap_plane_atomic_check(struct drm_plane *plane,
if (new_plane_state->crtc_y + new_plane_state->crtc_h > crtc_state->adjusted_mode.vdisplay)
return -EINVAL;
+ /* Make sure dimensions are within bounds. */
+ if (new_plane_state->src_h > max_height || new_plane_state->crtc_h > height)
+ return -EINVAL;
+
+
+ if (new_plane_state->src_w > max_width || new_plane_state->crtc_w > width) {
+ bool is_fourcc_yuv = new_plane_state->fb->format->is_yuv;
+
+ if (is_fourcc_yuv && (((new_plane_state->src_w >> 16) / 2 & 1) ||
+ new_plane_state->crtc_w / 2 & 1)) {
+ /*
+ * When calculating the split overlay width
+ * and it yield an odd value we will need to adjust
+ * the indivual width +/- 1. So make sure it fits
+ */
+ if (new_plane_state->src_w <= ((2 * width - 1) << 16) &&
+ new_plane_state->crtc_w <= (2 * width - 1))
+ new_r_hw_overlay = true;
+ else
+ return -EINVAL;
+ } else {
+ if (new_plane_state->src_w <= (2 * max_width) &&
+ new_plane_state->crtc_w <= (2 * width))
+ new_r_hw_overlay = true;
+ else
+ return -EINVAL;
+ }
+ }
+
if (new_plane_state->rotation != DRM_MODE_ROTATE_0 &&
!omap_framebuffer_supports_rotation(new_plane_state->fb))
return -EINVAL;
+ if ((new_plane_state->src_w >> 16) != new_plane_state->crtc_w ||
+ (new_plane_state->src_h >> 16) != new_plane_state->crtc_h)
+ caps |= OMAP_DSS_OVL_CAP_SCALE;
+
+ fourcc = new_plane_state->fb->format->format;
+
+ /*
+ * (re)allocate hw overlay if we don't have one or
+ * there is a caps mismatch
+ */
+ if (!omap_state->overlay || (caps & ~omap_state->overlay->caps)) {
+ new_hw_overlay = true;
+ } else {
+ /* check supported format */
+ if (!dispc_ovl_color_mode_supported(priv->dispc, omap_state->overlay->id,
+ fourcc))
+ new_hw_overlay = true;
+ }
+
+ /*
+ * check if we need two overlays and only have 1 or
+ * if we had 2 overlays but will only need 1
+ */
+ if ((new_r_hw_overlay && !omap_state->r_overlay) ||
+ (!new_r_hw_overlay && omap_state->r_overlay))
+ new_hw_overlay = true;
+
+ if (new_hw_overlay) {
+ struct omap_hw_overlay *old_ovl = omap_state->overlay;
+ struct omap_hw_overlay *old_r_ovl = omap_state->r_overlay;
+ struct omap_hw_overlay *new_ovl = NULL;
+ struct omap_hw_overlay *new_r_ovl = NULL;
+
+ omap_overlay_release(state, old_ovl);
+ omap_overlay_release(state, old_r_ovl);
+
+ ret = omap_overlay_assign(state, plane, caps, fourcc, &new_ovl,
+ new_r_hw_overlay ? &new_r_ovl : NULL);
+ if (ret) {
+ DBG("%s: failed to assign hw_overlay", plane->name);
+ omap_state->overlay = NULL;
+ omap_state->r_overlay = NULL;
+ return ret;
+ }
+
+ omap_state->overlay = new_ovl;
+ if (new_r_hw_overlay)
+ omap_state->r_overlay = new_r_ovl;
+ else
+ omap_state->r_overlay = NULL;
+ }
+
+ DBG("plane: %s overlay_id: %d", plane->name, omap_state->overlay->id);
+
+ if (omap_state->r_overlay)
+ DBG("plane: %s r_overlay_id: %d", plane->name, omap_state->r_overlay->id);
+
return 0;
}
@@ -155,7 +370,7 @@ static void omap_plane_destroy(struct drm_plane *plane)
{
struct omap_plane *omap_plane = to_omap_plane(plane);
- DBG("%s", omap_plane->name);
+ DBG("%s", plane->name);
drm_plane_cleanup(plane);
@@ -189,11 +404,17 @@ void omap_plane_install_properties(struct drm_plane *plane,
static void omap_plane_reset(struct drm_plane *plane)
{
struct omap_plane *omap_plane = to_omap_plane(plane);
+ struct omap_plane_state *omap_state;
+
+ if (plane->state)
+ drm_atomic_helper_plane_destroy_state(plane, plane->state);
- drm_atomic_helper_plane_reset(plane);
- if (!plane->state)
+ omap_state = kzalloc(sizeof(*omap_state), GFP_KERNEL);
+ if (!omap_state)
return;
+ __drm_atomic_helper_plane_reset(plane, &omap_state->base);
+
/*
* Set the zpos default depending on whether we are a primary or overlay
* plane.
@@ -204,6 +425,47 @@ static void omap_plane_reset(struct drm_plane *plane)
plane->state->color_range = DRM_COLOR_YCBCR_FULL_RANGE;
}
+static struct drm_plane_state *
+omap_plane_atomic_duplicate_state(struct drm_plane *plane)
+{
+ struct omap_plane_state *state, *current_state;
+
+ if (WARN_ON(!plane->state))
+ return NULL;
+
+ current_state = to_omap_plane_state(plane->state);
+
+ state = kmalloc(sizeof(*state), GFP_KERNEL);
+ if (!state)
+ return NULL;
+
+ __drm_atomic_helper_plane_duplicate_state(plane, &state->base);
+
+ state->overlay = current_state->overlay;
+ state->r_overlay = current_state->r_overlay;
+
+ return &state->base;
+}
+
+static void omap_plane_atomic_print_state(struct drm_printer *p,
+ const struct drm_plane_state *state)
+{
+ struct omap_plane_state *omap_state = to_omap_plane_state(state);
+
+ if (omap_state->overlay)
+ drm_printf(p, "\toverlay=%s (caps=0x%x)\n",
+ omap_state->overlay->name,
+ omap_state->overlay->caps);
+ else
+ drm_printf(p, "\toverlay=None\n");
+ if (omap_state->r_overlay)
+ drm_printf(p, "\tr_overlay=%s (caps=0x%x)\n",
+ omap_state->r_overlay->name,
+ omap_state->r_overlay->caps);
+ else
+ drm_printf(p, "\tr_overlay=None\n");
+}
+
static int omap_plane_atomic_set_property(struct drm_plane *plane,
struct drm_plane_state *state,
struct drm_property *property,
@@ -239,10 +501,11 @@ static const struct drm_plane_funcs omap_plane_funcs = {
.disable_plane = drm_atomic_helper_disable_plane,
.reset = omap_plane_reset,
.destroy = omap_plane_destroy,
- .atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state,
+ .atomic_duplicate_state = omap_plane_atomic_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_plane_destroy_state,
.atomic_set_property = omap_plane_atomic_set_property,
.atomic_get_property = omap_plane_atomic_get_property,
+ .atomic_print_state = omap_plane_atomic_print_state,
};
static bool omap_plane_supports_yuv(struct drm_plane *plane)
@@ -261,20 +524,6 @@ static bool omap_plane_supports_yuv(struct drm_plane *plane)
return false;
}
-static const char *plane_id_to_name[] = {
- [OMAP_DSS_GFX] = "gfx",
- [OMAP_DSS_VIDEO1] = "vid1",
- [OMAP_DSS_VIDEO2] = "vid2",
- [OMAP_DSS_VIDEO3] = "vid3",
-};
-
-static const enum omap_plane_id plane_idx_to_id[] = {
- OMAP_DSS_GFX,
- OMAP_DSS_VIDEO1,
- OMAP_DSS_VIDEO2,
- OMAP_DSS_VIDEO3,
-};
-
/* initialize plane */
struct drm_plane *omap_plane_init(struct drm_device *dev,
int idx, enum drm_plane_type type,
@@ -284,27 +533,25 @@ struct drm_plane *omap_plane_init(struct drm_device *dev,
unsigned int num_planes = dispc_get_num_ovls(priv->dispc);
struct drm_plane *plane;
struct omap_plane *omap_plane;
- enum omap_plane_id id;
int ret;
u32 nformats;
const u32 *formats;
- if (WARN_ON(idx >= ARRAY_SIZE(plane_idx_to_id)))
+ if (WARN_ON(idx >= num_planes))
return ERR_PTR(-EINVAL);
- id = plane_idx_to_id[idx];
-
- DBG("%s: type=%d", plane_id_to_name[id], type);
-
omap_plane = kzalloc(sizeof(*omap_plane), GFP_KERNEL);
if (!omap_plane)
return ERR_PTR(-ENOMEM);
- formats = dispc_ovl_get_color_modes(priv->dispc, id);
+ omap_plane->id = idx;
+
+ DBG("%d: type=%d", omap_plane->id, type);
+ DBG(" crtc_mask: 0x%04x", possible_crtcs);
+
+ formats = dispc_ovl_get_color_modes(priv->dispc, omap_plane->id);
for (nformats = 0; formats[nformats]; ++nformats)
;
- omap_plane->id = id;
- omap_plane->name = plane_id_to_name[id];
plane = &omap_plane->base;
@@ -334,8 +581,8 @@ struct drm_plane *omap_plane_init(struct drm_device *dev,
return plane;
error:
- dev_err(dev->dev, "%s(): could not create plane: %s\n",
- __func__, plane_id_to_name[id]);
+ dev_err(dev->dev, "%s(): could not create plane: %d\n",
+ __func__, omap_plane->id);
kfree(omap_plane);
return NULL;
diff --git a/drivers/gpu/drm/omapdrm/omap_plane.h b/drivers/gpu/drm/omapdrm/omap_plane.h
index 0c28fe8ffa20..a9a33e12722a 100644
--- a/drivers/gpu/drm/omapdrm/omap_plane.h
+++ b/drivers/gpu/drm/omapdrm/omap_plane.h
@@ -22,5 +22,6 @@ struct drm_plane *omap_plane_init(struct drm_device *dev,
u32 possible_crtcs);
void omap_plane_install_properties(struct drm_plane *plane,
struct drm_mode_object *obj);
+bool is_omap_plane_dual_overlay(struct drm_plane_state *state);
#endif /* __OMAPDRM_PLANE_H__ */
diff --git a/drivers/gpu/drm/panel/Kconfig b/drivers/gpu/drm/panel/Kconfig
index 7770b1802291..434c2861bb40 100644
--- a/drivers/gpu/drm/panel/Kconfig
+++ b/drivers/gpu/drm/panel/Kconfig
@@ -152,7 +152,7 @@ config DRM_PANEL_ILITEK_ILI9341
tristate "Ilitek ILI9341 240x320 QVGA panels"
depends on OF && SPI
depends on DRM_KMS_HELPER
- depends on DRM_KMS_CMA_HELPER
+ depends on DRM_GEM_CMA_HELPER
depends on BACKLIGHT_CLASS_DEVICE
select DRM_MIPI_DBI
help
diff --git a/drivers/gpu/drm/panel/panel-boe-tv101wum-nl6.c b/drivers/gpu/drm/panel/panel-boe-tv101wum-nl6.c
index 529561b4fbbc..1be150ac758f 100644
--- a/drivers/gpu/drm/panel/panel-boe-tv101wum-nl6.c
+++ b/drivers/gpu/drm/panel/panel-boe-tv101wum-nl6.c
@@ -84,9 +84,9 @@ static const struct panel_init_cmd boe_tv110c9m_init_cmd[] = {
_INIT_DCS_CMD(0x0D, 0x63),
_INIT_DCS_CMD(0x0E, 0x91),
_INIT_DCS_CMD(0x0F, 0x73),
- _INIT_DCS_CMD(0x95, 0xEB),
- _INIT_DCS_CMD(0x96, 0xEB),
- _INIT_DCS_CMD(0x30, 0x11),
+ _INIT_DCS_CMD(0x95, 0xE6),
+ _INIT_DCS_CMD(0x96, 0xF0),
+ _INIT_DCS_CMD(0x30, 0x00),
_INIT_DCS_CMD(0x6D, 0x66),
_INIT_DCS_CMD(0x75, 0xA2),
_INIT_DCS_CMD(0x77, 0x3B),
@@ -111,18 +111,18 @@ static const struct panel_init_cmd boe_tv110c9m_init_cmd[] = {
_INIT_DCS_CMD(0xB0, 0x00, 0x00, 0x00, 0x1B, 0x00, 0x45, 0x00, 0x65, 0x00, 0x81, 0x00, 0x99, 0x00, 0xAE, 0x00, 0xC1),
_INIT_DCS_CMD(0xB1, 0x00, 0xD2, 0x01, 0x0B, 0x01, 0x34, 0x01, 0x76, 0x01, 0xA3, 0x01, 0xEF, 0x02, 0x27, 0x02, 0x29),
_INIT_DCS_CMD(0xB2, 0x02, 0x5F, 0x02, 0x9E, 0x02, 0xC9, 0x03, 0x00, 0x03, 0x26, 0x03, 0x53, 0x03, 0x63, 0x03, 0x73),
- _INIT_DCS_CMD(0xB3, 0x03, 0x86, 0x03, 0x9A, 0x03, 0xAF, 0x03, 0xDF, 0x03, 0xF5, 0x03, 0xF7),
+ _INIT_DCS_CMD(0xB3, 0x03, 0x86, 0x03, 0x9A, 0x03, 0xAF, 0x03, 0xDF, 0x03, 0xF5, 0x03, 0xE0),
_INIT_DCS_CMD(0xB4, 0x00, 0x00, 0x00, 0x1B, 0x00, 0x45, 0x00, 0x65, 0x00, 0x81, 0x00, 0x99, 0x00, 0xAE, 0x00, 0xC1),
_INIT_DCS_CMD(0xB5, 0x00, 0xD2, 0x01, 0x0B, 0x01, 0x34, 0x01, 0x76, 0x01, 0xA3, 0x01, 0xEF, 0x02, 0x27, 0x02, 0x29),
_INIT_DCS_CMD(0xB6, 0x02, 0x5F, 0x02, 0x9E, 0x02, 0xC9, 0x03, 0x00, 0x03, 0x26, 0x03, 0x53, 0x03, 0x63, 0x03, 0x73),
- _INIT_DCS_CMD(0xB7, 0x03, 0x86, 0x03, 0x9A, 0x03, 0xAF, 0x03, 0xDF, 0x03, 0xF5, 0x03, 0xF7),
+ _INIT_DCS_CMD(0xB7, 0x03, 0x86, 0x03, 0x9A, 0x03, 0xAF, 0x03, 0xDF, 0x03, 0xF5, 0x03, 0xE0),
_INIT_DCS_CMD(0xB8, 0x00, 0x00, 0x00, 0x1B, 0x00, 0x45, 0x00, 0x65, 0x00, 0x81, 0x00, 0x99, 0x00, 0xAE, 0x00, 0xC1),
_INIT_DCS_CMD(0xB9, 0x00, 0xD2, 0x01, 0x0B, 0x01, 0x34, 0x01, 0x76, 0x01, 0xA3, 0x01, 0xEF, 0x02, 0x27, 0x02, 0x29),
_INIT_DCS_CMD(0xBA, 0x02, 0x5F, 0x02, 0x9E, 0x02, 0xC9, 0x03, 0x00, 0x03, 0x26, 0x03, 0x53, 0x03, 0x63, 0x03, 0x73),
- _INIT_DCS_CMD(0xBB, 0x03, 0x86, 0x03, 0x9A, 0x03, 0xAF, 0x03, 0xDF, 0x03, 0xF5, 0x03, 0xF7),
+ _INIT_DCS_CMD(0xBB, 0x03, 0x86, 0x03, 0x9A, 0x03, 0xAF, 0x03, 0xDF, 0x03, 0xF5, 0x03, 0xE0),
_INIT_DCS_CMD(0xFF, 0x24),
_INIT_DCS_CMD(0xFB, 0x01),
@@ -225,6 +225,7 @@ static const struct panel_init_cmd boe_tv110c9m_init_cmd[] = {
_INIT_DCS_CMD(0x7F, 0x3C),
_INIT_DCS_CMD(0x82, 0x04),
_INIT_DCS_CMD(0x97, 0xC0),
+
_INIT_DCS_CMD(0xB6, 0x05, 0x00, 0x05, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05, 0x05, 0x00, 0x00),
_INIT_DCS_CMD(0x91, 0x44),
_INIT_DCS_CMD(0x92, 0xA9),
@@ -332,12 +333,39 @@ static const struct panel_init_cmd boe_tv110c9m_init_cmd[] = {
_INIT_DCS_CMD(0x34, 0x78),
_INIT_DCS_CMD(0x35, 0x16),
_INIT_DCS_CMD(0xC8, 0x04),
- _INIT_DCS_CMD(0xC9, 0x80),
+ _INIT_DCS_CMD(0xC9, 0x9E),
_INIT_DCS_CMD(0xCA, 0x4E),
_INIT_DCS_CMD(0xCB, 0x00),
- _INIT_DCS_CMD(0xA9, 0x4C),
- _INIT_DCS_CMD(0xAA, 0x47),
+ _INIT_DCS_CMD(0xA9, 0x49),
+ _INIT_DCS_CMD(0xAA, 0x4B),
+ _INIT_DCS_CMD(0xAB, 0x48),
+ _INIT_DCS_CMD(0xAC, 0x43),
+ _INIT_DCS_CMD(0xAD, 0x40),
+ _INIT_DCS_CMD(0xAE, 0x50),
+ _INIT_DCS_CMD(0xAF, 0x44),
+ _INIT_DCS_CMD(0xB0, 0x54),
+ _INIT_DCS_CMD(0xB1, 0x4E),
+ _INIT_DCS_CMD(0xB2, 0x4D),
+ _INIT_DCS_CMD(0xB3, 0x4C),
+ _INIT_DCS_CMD(0xB4, 0x41),
+ _INIT_DCS_CMD(0xB5, 0x47),
+ _INIT_DCS_CMD(0xB6, 0x53),
+ _INIT_DCS_CMD(0xB7, 0x3E),
+ _INIT_DCS_CMD(0xB8, 0x51),
+ _INIT_DCS_CMD(0xB9, 0x3C),
+ _INIT_DCS_CMD(0xBA, 0x3B),
+ _INIT_DCS_CMD(0xBB, 0x46),
+ _INIT_DCS_CMD(0xBC, 0x45),
+ _INIT_DCS_CMD(0xBD, 0x55),
+ _INIT_DCS_CMD(0xBE, 0x3D),
+ _INIT_DCS_CMD(0xBF, 0x3F),
+ _INIT_DCS_CMD(0xC0, 0x52),
+ _INIT_DCS_CMD(0xC1, 0x4A),
+ _INIT_DCS_CMD(0xC2, 0x39),
+ _INIT_DCS_CMD(0xC3, 0x4F),
+ _INIT_DCS_CMD(0xC4, 0x3A),
+ _INIT_DCS_CMD(0xC5, 0x42),
_INIT_DCS_CMD(0xFF, 0x27),
_INIT_DCS_CMD(0xFB, 0x01),
@@ -419,7 +447,7 @@ static const struct panel_init_cmd boe_tv110c9m_init_cmd[] = {
{},
};
-static const struct panel_init_cmd inx_init_cmd[] = {
+static const struct panel_init_cmd inx_hj110iz_init_cmd[] = {
_INIT_DCS_CMD(0xFF, 0x20),
_INIT_DCS_CMD(0xFB, 0x01),
_INIT_DCS_CMD(0x05, 0xD1),
@@ -428,10 +456,10 @@ static const struct panel_init_cmd inx_init_cmd[] = {
_INIT_DCS_CMD(0x08, 0x4B),
_INIT_DCS_CMD(0x0E, 0x91),
_INIT_DCS_CMD(0x0F, 0x69),
- _INIT_DCS_CMD(0x95, 0xFF),
- _INIT_DCS_CMD(0x96, 0xFF),
- _INIT_DCS_CMD(0x9D, 0x0A),
- _INIT_DCS_CMD(0x9E, 0x0A),
+ _INIT_DCS_CMD(0x95, 0xF5),
+ _INIT_DCS_CMD(0x96, 0xF5),
+ _INIT_DCS_CMD(0x9D, 0x00),
+ _INIT_DCS_CMD(0x9E, 0x00),
_INIT_DCS_CMD(0x69, 0x98),
_INIT_DCS_CMD(0x75, 0xA2),
_INIT_DCS_CMD(0x77, 0xB3),
@@ -493,17 +521,17 @@ static const struct panel_init_cmd inx_init_cmd[] = {
_INIT_DCS_CMD(0x2A, 0x03),
_INIT_DCS_CMD(0x2B, 0x03),
- _INIT_DCS_CMD(0x2F, 0x06),
+ _INIT_DCS_CMD(0x2F, 0x05),
_INIT_DCS_CMD(0x30, 0x32),
_INIT_DCS_CMD(0x31, 0x43),
- _INIT_DCS_CMD(0x33, 0x06),
+ _INIT_DCS_CMD(0x33, 0x05),
_INIT_DCS_CMD(0x34, 0x32),
_INIT_DCS_CMD(0x35, 0x43),
_INIT_DCS_CMD(0x37, 0x44),
_INIT_DCS_CMD(0x38, 0x40),
_INIT_DCS_CMD(0x39, 0x00),
- _INIT_DCS_CMD(0x3A, 0x01),
- _INIT_DCS_CMD(0x3B, 0x48),
+ _INIT_DCS_CMD(0x3A, 0x18),
+ _INIT_DCS_CMD(0x3B, 0x00),
_INIT_DCS_CMD(0x3D, 0x93),
_INIT_DCS_CMD(0xAB, 0x44),
_INIT_DCS_CMD(0xAC, 0x40),
@@ -520,8 +548,8 @@ static const struct panel_init_cmd inx_init_cmd[] = {
_INIT_DCS_CMD(0x56, 0x08),
_INIT_DCS_CMD(0x58, 0x21),
_INIT_DCS_CMD(0x59, 0x40),
- _INIT_DCS_CMD(0x5A, 0x09),
- _INIT_DCS_CMD(0x5B, 0x48),
+ _INIT_DCS_CMD(0x5A, 0x00),
+ _INIT_DCS_CMD(0x5B, 0x2C),
_INIT_DCS_CMD(0x5E, 0x00, 0x10),
_INIT_DCS_CMD(0x5F, 0x00),
@@ -558,33 +586,36 @@ static const struct panel_init_cmd inx_init_cmd[] = {
_INIT_DCS_CMD(0xEF, 0x01),
_INIT_DCS_CMD(0xF0, 0x7A),
+ _INIT_DCS_CMD(0xB6, 0x05, 0x00, 0x05, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05, 0x05, 0x00, 0x00),
_INIT_DCS_CMD(0xFF, 0x25),
_INIT_DCS_CMD(0xFB, 0x01),
_INIT_DCS_CMD(0x05, 0x00),
+ _INIT_DCS_CMD(0x13, 0x02),
+ _INIT_DCS_CMD(0x14, 0xDF),
_INIT_DCS_CMD(0xF1, 0x10),
_INIT_DCS_CMD(0x1E, 0x00),
- _INIT_DCS_CMD(0x1F, 0x09),
- _INIT_DCS_CMD(0x20, 0x46),
+ _INIT_DCS_CMD(0x1F, 0x00),
+ _INIT_DCS_CMD(0x20, 0x2C),
_INIT_DCS_CMD(0x25, 0x00),
- _INIT_DCS_CMD(0x26, 0x09),
- _INIT_DCS_CMD(0x27, 0x46),
+ _INIT_DCS_CMD(0x26, 0x00),
+ _INIT_DCS_CMD(0x27, 0x2C),
_INIT_DCS_CMD(0x3F, 0x80),
_INIT_DCS_CMD(0x40, 0x00),
_INIT_DCS_CMD(0x43, 0x00),
- _INIT_DCS_CMD(0x44, 0x09),
- _INIT_DCS_CMD(0x45, 0x46),
+ _INIT_DCS_CMD(0x44, 0x18),
+ _INIT_DCS_CMD(0x45, 0x00),
- _INIT_DCS_CMD(0x48, 0x09),
- _INIT_DCS_CMD(0x49, 0x46),
+ _INIT_DCS_CMD(0x48, 0x00),
+ _INIT_DCS_CMD(0x49, 0x2C),
_INIT_DCS_CMD(0x5B, 0x80),
_INIT_DCS_CMD(0x5C, 0x00),
- _INIT_DCS_CMD(0x5D, 0x01),
- _INIT_DCS_CMD(0x5E, 0x46),
- _INIT_DCS_CMD(0x61, 0x01),
- _INIT_DCS_CMD(0x62, 0x46),
+ _INIT_DCS_CMD(0x5D, 0x00),
+ _INIT_DCS_CMD(0x5E, 0x00),
+ _INIT_DCS_CMD(0x61, 0x00),
+ _INIT_DCS_CMD(0x62, 0x2C),
_INIT_DCS_CMD(0x68, 0x10),
_INIT_DCS_CMD(0xFF, 0x26),
_INIT_DCS_CMD(0xFB, 0x01),
@@ -700,16 +731,22 @@ static const struct panel_init_cmd inx_init_cmd[] = {
_INIT_DCS_CMD(0xA3, 0x30),
_INIT_DCS_CMD(0xA4, 0xC0),
_INIT_DCS_CMD(0xE8, 0x00),
+ _INIT_DCS_CMD(0x97, 0x3C),
+ _INIT_DCS_CMD(0x98, 0x02),
+ _INIT_DCS_CMD(0x99, 0x95),
+ _INIT_DCS_CMD(0x9A, 0x06),
+ _INIT_DCS_CMD(0x9B, 0x00),
+ _INIT_DCS_CMD(0x9C, 0x0B),
+ _INIT_DCS_CMD(0x9D, 0x0A),
+ _INIT_DCS_CMD(0x9E, 0x90),
_INIT_DCS_CMD(0xFF, 0xF0),
_INIT_DCS_CMD(0xFB, 0x01),
_INIT_DCS_CMD(0x3A, 0x08),
_INIT_DCS_CMD(0xFF, 0xD0),
_INIT_DCS_CMD(0xFB, 0x01),
_INIT_DCS_CMD(0x00, 0x33),
- _INIT_DCS_CMD(0x02, 0x77),
_INIT_DCS_CMD(0x08, 0x01),
_INIT_DCS_CMD(0x09, 0xBF),
- _INIT_DCS_CMD(0x28, 0x30),
_INIT_DCS_CMD(0x2F, 0x33),
_INIT_DCS_CMD(0xFF, 0x23),
_INIT_DCS_CMD(0xFB, 0x01),
@@ -718,6 +755,9 @@ static const struct panel_init_cmd inx_init_cmd[] = {
_INIT_DCS_CMD(0xFF, 0x20),
_INIT_DCS_CMD(0xFB, 0x01),
_INIT_DCS_CMD(0x30, 0x00),
+ _INIT_DCS_CMD(0xFF, 0x24),
+ _INIT_DCS_CMD(0x5C, 0x88),
+ _INIT_DCS_CMD(0x5D, 0x08),
_INIT_DCS_CMD(0xFF, 0x10),
_INIT_DCS_CMD(0xB9, 0x01),
_INIT_DCS_CMD(0xFF, 0x20),
@@ -1312,7 +1352,7 @@ static const struct panel_desc inx_hj110iz_desc = {
| MIPI_DSI_MODE_VIDEO_HSE
| MIPI_DSI_CLOCK_NON_CONTINUOUS
| MIPI_DSI_MODE_VIDEO_BURST,
- .init_cmds = inx_init_cmd,
+ .init_cmds = inx_hj110iz_init_cmd,
};
static const struct drm_display_mode boe_tv101wum_nl6_default_mode = {
diff --git a/drivers/gpu/drm/panel/panel-simple.c b/drivers/gpu/drm/panel/panel-simple.c
index dde033066f3d..9e46db5e359c 100644
--- a/drivers/gpu/drm/panel/panel-simple.c
+++ b/drivers/gpu/drm/panel/panel-simple.c
@@ -3252,6 +3252,33 @@ static const struct panel_desc starry_kr070pe2t = {
.connector_type = DRM_MODE_CONNECTOR_DPI,
};
+static const struct display_timing tsd_tst043015cmhx_timing = {
+ .pixelclock = { 5000000, 9000000, 12000000 },
+ .hactive = { 480, 480, 480 },
+ .hfront_porch = { 4, 5, 65 },
+ .hback_porch = { 36, 40, 255 },
+ .hsync_len = { 1, 1, 1 },
+ .vactive = { 272, 272, 272 },
+ .vfront_porch = { 2, 8, 97 },
+ .vback_porch = { 3, 8, 31 },
+ .vsync_len = { 1, 1, 1 },
+
+ .flags = DISPLAY_FLAGS_HSYNC_LOW | DISPLAY_FLAGS_VSYNC_LOW |
+ DISPLAY_FLAGS_DE_HIGH | DISPLAY_FLAGS_PIXDATA_POSEDGE,
+};
+
+static const struct panel_desc tsd_tst043015cmhx = {
+ .timings = &tsd_tst043015cmhx_timing,
+ .num_timings = 1,
+ .bpc = 8,
+ .size = {
+ .width = 105,
+ .height = 67,
+ },
+ .bus_format = MEDIA_BUS_FMT_RGB888_1X24,
+ .bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_PIXDATA_SAMPLE_NEGEDGE,
+};
+
static const struct drm_display_mode tfc_s9700rtwv43tr_01b_mode = {
.clock = 30000,
.hdisplay = 800,
@@ -3929,6 +3956,9 @@ static const struct of_device_id platform_of_match[] = {
.compatible = "starry,kr070pe2t",
.data = &starry_kr070pe2t,
}, {
+ .compatible = "team-source-display,tst043015cmhx",
+ .data = &tsd_tst043015cmhx,
+ }, {
.compatible = "tfc,s9700rtwv43tr-01b",
.data = &tfc_s9700rtwv43tr_01b,
}, {
diff --git a/drivers/gpu/drm/panfrost/panfrost_gem.c b/drivers/gpu/drm/panfrost/panfrost_gem.c
index 6d9bdb9180cb..ead65f5fa2bc 100644
--- a/drivers/gpu/drm/panfrost/panfrost_gem.c
+++ b/drivers/gpu/drm/panfrost/panfrost_gem.c
@@ -223,7 +223,7 @@ struct drm_gem_object *panfrost_gem_create_object(struct drm_device *dev, size_t
obj = kzalloc(sizeof(*obj), GFP_KERNEL);
if (!obj)
- return NULL;
+ return ERR_PTR(-ENOMEM);
INIT_LIST_HEAD(&obj->mappings.list);
mutex_init(&obj->mappings.lock);
diff --git a/drivers/gpu/drm/pl111/Kconfig b/drivers/gpu/drm/pl111/Kconfig
index 3aae387a96af..91ee05b01303 100644
--- a/drivers/gpu/drm/pl111/Kconfig
+++ b/drivers/gpu/drm/pl111/Kconfig
@@ -6,7 +6,6 @@ config DRM_PL111
depends on VEXPRESS_CONFIG || VEXPRESS_CONFIG=n
depends on COMMON_CLK
select DRM_KMS_HELPER
- select DRM_KMS_CMA_HELPER
select DRM_GEM_CMA_HELPER
select DRM_BRIDGE
select DRM_PANEL_BRIDGE
diff --git a/drivers/gpu/drm/qxl/qxl_debugfs.c b/drivers/gpu/drm/qxl/qxl_debugfs.c
index 1f9a59601bb1..6a36b0fd845c 100644
--- a/drivers/gpu/drm/qxl/qxl_debugfs.c
+++ b/drivers/gpu/drm/qxl/qxl_debugfs.c
@@ -57,13 +57,16 @@ qxl_debugfs_buffers_info(struct seq_file *m, void *data)
struct qxl_bo *bo;
list_for_each_entry(bo, &qdev->gem.objects, list) {
- struct dma_resv_list *fobj;
- int rel;
-
- rcu_read_lock();
- fobj = dma_resv_shared_list(bo->tbo.base.resv);
- rel = fobj ? fobj->shared_count : 0;
- rcu_read_unlock();
+ struct dma_resv_iter cursor;
+ struct dma_fence *fence;
+ int rel = 0;
+
+ dma_resv_iter_begin(&cursor, bo->tbo.base.resv, true);
+ dma_resv_for_each_fence_unlocked(&cursor, fence) {
+ if (dma_resv_iter_is_restarted(&cursor))
+ rel = 0;
+ ++rel;
+ }
seq_printf(m, "size %ld, pc %d, num releases %d\n",
(unsigned long)bo->tbo.base.size,
diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c
index 482fb0ae6cb5..e2488559cc9f 100644
--- a/drivers/gpu/drm/radeon/radeon_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_kms.c
@@ -168,7 +168,7 @@ int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags)
if (!r) {
acpi_status = radeon_acpi_init(rdev);
if (acpi_status)
- dev_dbg(dev->dev, "Error during ACPI methods call\n");
+ dev_dbg(dev->dev, "Error during ACPI methods call\n");
}
if (radeon_is_px(dev)) {
@@ -648,6 +648,8 @@ void radeon_driver_lastclose_kms(struct drm_device *dev)
int radeon_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
{
struct radeon_device *rdev = dev->dev_private;
+ struct radeon_fpriv *fpriv;
+ struct radeon_vm *vm;
int r;
file_priv->driver_priv = NULL;
@@ -660,8 +662,6 @@ int radeon_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
/* new gpu have virtual address space support */
if (rdev->family >= CHIP_CAYMAN) {
- struct radeon_fpriv *fpriv;
- struct radeon_vm *vm;
fpriv = kzalloc(sizeof(*fpriv), GFP_KERNEL);
if (unlikely(!fpriv)) {
@@ -672,35 +672,39 @@ int radeon_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
if (rdev->accel_working) {
vm = &fpriv->vm;
r = radeon_vm_init(rdev, vm);
- if (r) {
- kfree(fpriv);
- goto out_suspend;
- }
+ if (r)
+ goto out_fpriv;
r = radeon_bo_reserve(rdev->ring_tmp_bo.bo, false);
- if (r) {
- radeon_vm_fini(rdev, vm);
- kfree(fpriv);
- goto out_suspend;
- }
+ if (r)
+ goto out_vm_fini;
/* map the ib pool buffer read only into
* virtual address space */
vm->ib_bo_va = radeon_vm_bo_add(rdev, vm,
rdev->ring_tmp_bo.bo);
+ if (!vm->ib_bo_va) {
+ r = -ENOMEM;
+ goto out_vm_fini;
+ }
+
r = radeon_vm_bo_set_addr(rdev, vm->ib_bo_va,
RADEON_VA_IB_OFFSET,
RADEON_VM_PAGE_READABLE |
RADEON_VM_PAGE_SNOOPED);
- if (r) {
- radeon_vm_fini(rdev, vm);
- kfree(fpriv);
- goto out_suspend;
- }
+ if (r)
+ goto out_vm_fini;
}
file_priv->driver_priv = fpriv;
}
+ if (!r)
+ goto out_suspend;
+
+out_vm_fini:
+ radeon_vm_fini(rdev, vm);
+out_fpriv:
+ kfree(fpriv);
out_suspend:
pm_runtime_mark_last_busy(dev->dev);
pm_runtime_put_autosuspend(dev->dev);
diff --git a/drivers/gpu/drm/radeon/radeon_vce.c b/drivers/gpu/drm/radeon/radeon_vce.c
index 511a942e851d..ca4a36464340 100644
--- a/drivers/gpu/drm/radeon/radeon_vce.c
+++ b/drivers/gpu/drm/radeon/radeon_vce.c
@@ -513,7 +513,7 @@ int radeon_vce_cs_reloc(struct radeon_cs_parser *p, int lo, int hi,
* @allocated: allocated a new handle?
*
* Validates the handle and return the found session index or -EINVAL
- * we we don't have another free session index.
+ * we don't have another free session index.
*/
static int radeon_vce_validate_handle(struct radeon_cs_parser *p,
uint32_t handle, bool *allocated)
diff --git a/drivers/gpu/drm/rcar-du/Kconfig b/drivers/gpu/drm/rcar-du/Kconfig
index b47e74421e34..f6e6a6d5d987 100644
--- a/drivers/gpu/drm/rcar-du/Kconfig
+++ b/drivers/gpu/drm/rcar-du/Kconfig
@@ -4,23 +4,24 @@ config DRM_RCAR_DU
depends on DRM && OF
depends on ARM || ARM64
depends on ARCH_RENESAS || COMPILE_TEST
- imply DRM_RCAR_CMM
- imply DRM_RCAR_LVDS
select DRM_KMS_HELPER
- select DRM_KMS_CMA_HELPER
select DRM_GEM_CMA_HELPER
select VIDEOMODE_HELPERS
help
Choose this option if you have an R-Car chipset.
If M is selected the module will be called rcar-du-drm.
-config DRM_RCAR_CMM
- tristate "R-Car DU Color Management Module (CMM) Support"
- depends on DRM && OF
+config DRM_RCAR_USE_CMM
+ bool "R-Car DU Color Management Module (CMM) Support"
depends on DRM_RCAR_DU
+ default DRM_RCAR_DU
help
Enable support for R-Car Color Management Module (CMM).
+config DRM_RCAR_CMM
+ def_tristate DRM_RCAR_DU
+ depends on DRM_RCAR_USE_CMM
+
config DRM_RCAR_DW_HDMI
tristate "R-Car Gen3 and RZ/G2 DU HDMI Encoder Support"
depends on DRM && OF
@@ -28,15 +29,27 @@ config DRM_RCAR_DW_HDMI
help
Enable support for R-Car Gen3 or RZ/G2 internal HDMI encoder.
+config DRM_RCAR_USE_LVDS
+ bool "R-Car DU LVDS Encoder Support"
+ depends on DRM_BRIDGE && OF
+ default DRM_RCAR_DU
+ help
+ Enable support for the R-Car Display Unit embedded LVDS encoders.
+
config DRM_RCAR_LVDS
- tristate "R-Car DU LVDS Encoder Support"
- depends on DRM && DRM_BRIDGE && OF
+ def_tristate DRM_RCAR_DU
+ depends on DRM_RCAR_USE_LVDS
select DRM_KMS_HELPER
select DRM_PANEL
select OF_FLATTREE
select OF_OVERLAY
+
+config DRM_RCAR_MIPI_DSI
+ tristate "R-Car DU MIPI DSI Encoder Support"
+ depends on DRM && DRM_BRIDGE && OF
+ select DRM_MIPI_DSI
help
- Enable support for the R-Car Display Unit embedded LVDS encoders.
+ Enable support for the R-Car Display Unit embedded MIPI DSI encoders.
config DRM_RCAR_VSP
bool "R-Car DU VSP Compositor Support" if ARM
diff --git a/drivers/gpu/drm/rcar-du/Makefile b/drivers/gpu/drm/rcar-du/Makefile
index 4d1187ccc3e5..286bc81b3e7c 100644
--- a/drivers/gpu/drm/rcar-du/Makefile
+++ b/drivers/gpu/drm/rcar-du/Makefile
@@ -19,6 +19,7 @@ obj-$(CONFIG_DRM_RCAR_CMM) += rcar_cmm.o
obj-$(CONFIG_DRM_RCAR_DU) += rcar-du-drm.o
obj-$(CONFIG_DRM_RCAR_DW_HDMI) += rcar_dw_hdmi.o
obj-$(CONFIG_DRM_RCAR_LVDS) += rcar_lvds.o
+obj-$(CONFIG_DRM_RCAR_MIPI_DSI) += rcar_mipi_dsi.o
# 'remote-endpoint' is fixed up at run-time
DTC_FLAGS_rcar_du_of_lvds_r8a7790 += -Wno-graph_endpoint
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_crtc.c b/drivers/gpu/drm/rcar-du/rcar_du_crtc.c
index 5672830ca184..f361a604337f 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_crtc.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_crtc.c
@@ -215,6 +215,7 @@ static void rcar_du_crtc_set_display_timing(struct rcar_du_crtc *rcrtc)
const struct drm_display_mode *mode = &rcrtc->crtc.state->adjusted_mode;
struct rcar_du_device *rcdu = rcrtc->dev;
unsigned long mode_clock = mode->clock * 1000;
+ unsigned int hdse_offset;
u32 dsmr;
u32 escr;
@@ -261,12 +262,13 @@ static void rcar_du_crtc_set_display_timing(struct rcar_du_crtc *rcrtc)
rcar_du_group_write(rcrtc->group, DPLLCR, dpllcr);
escr = ESCR_DCLKSEL_DCLKIN | div;
- } else if (rcdu->info->lvds_clk_mask & BIT(rcrtc->index)) {
+ } else if (rcdu->info->lvds_clk_mask & BIT(rcrtc->index) ||
+ rcdu->info->dsi_clk_mask & BIT(rcrtc->index)) {
/*
- * Use the LVDS PLL output as the dot clock when outputting to
- * the LVDS encoder on an SoC that supports this clock routing
- * option. We use the clock directly in that case, without any
- * additional divider.
+ * Use the external LVDS or DSI PLL output as the dot clock when
+ * outputting to the LVDS or DSI encoder on an SoC that supports
+ * this clock routing option. We use the clock directly in that
+ * case, without any additional divider.
*/
escr = ESCR_DCLKSEL_DCLKIN;
} else {
@@ -298,10 +300,15 @@ static void rcar_du_crtc_set_display_timing(struct rcar_du_crtc *rcrtc)
| DSMR_DIPM_DISP | DSMR_CSPM;
rcar_du_crtc_write(rcrtc, DSMR, dsmr);
+ hdse_offset = 19;
+ if (rcrtc->group->cmms_mask & BIT(rcrtc->index % 2))
+ hdse_offset += 25;
+
/* Display timings */
- rcar_du_crtc_write(rcrtc, HDSR, mode->htotal - mode->hsync_start - 19);
+ rcar_du_crtc_write(rcrtc, HDSR, mode->htotal - mode->hsync_start -
+ hdse_offset);
rcar_du_crtc_write(rcrtc, HDER, mode->htotal - mode->hsync_start +
- mode->hdisplay - 19);
+ mode->hdisplay - hdse_offset);
rcar_du_crtc_write(rcrtc, HSWR, mode->hsync_end -
mode->hsync_start - 1);
rcar_du_crtc_write(rcrtc, HCR, mode->htotal - 1);
@@ -836,6 +843,7 @@ rcar_du_crtc_mode_valid(struct drm_crtc *crtc,
struct rcar_du_crtc *rcrtc = to_rcar_crtc(crtc);
struct rcar_du_device *rcdu = rcrtc->dev;
bool interlaced = mode->flags & DRM_MODE_FLAG_INTERLACE;
+ unsigned int min_sync_porch;
unsigned int vbp;
if (interlaced && !rcar_du_has(rcdu, RCAR_DU_FEATURE_INTERLACED))
@@ -843,9 +851,14 @@ rcar_du_crtc_mode_valid(struct drm_crtc *crtc,
/*
* The hardware requires a minimum combined horizontal sync and back
- * porch of 20 pixels and a minimum vertical back porch of 3 lines.
+ * porch of 20 pixels (when CMM isn't used) or 45 pixels (when CMM is
+ * used), and a minimum vertical back porch of 3 lines.
*/
- if (mode->htotal - mode->hsync_start < 20)
+ min_sync_porch = 20;
+ if (rcrtc->group->cmms_mask & BIT(rcrtc->index % 2))
+ min_sync_porch += 25;
+
+ if (mode->htotal - mode->hsync_start < min_sync_porch)
return MODE_HBLANK_NARROW;
vbp = (mode->vtotal - mode->vsync_end) / (interlaced ? 2 : 1);
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_drv.c b/drivers/gpu/drm/rcar-du/rcar_du_drv.c
index 5612a9e7a905..5a8131ef81d5 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_drv.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_drv.c
@@ -544,10 +544,12 @@ const char *rcar_du_output_name(enum rcar_du_output output)
static const char * const names[] = {
[RCAR_DU_OUTPUT_DPAD0] = "DPAD0",
[RCAR_DU_OUTPUT_DPAD1] = "DPAD1",
- [RCAR_DU_OUTPUT_LVDS0] = "LVDS0",
- [RCAR_DU_OUTPUT_LVDS1] = "LVDS1",
+ [RCAR_DU_OUTPUT_DSI0] = "DSI0",
+ [RCAR_DU_OUTPUT_DSI1] = "DSI1",
[RCAR_DU_OUTPUT_HDMI0] = "HDMI0",
[RCAR_DU_OUTPUT_HDMI1] = "HDMI1",
+ [RCAR_DU_OUTPUT_LVDS0] = "LVDS0",
+ [RCAR_DU_OUTPUT_LVDS1] = "LVDS1",
[RCAR_DU_OUTPUT_TCON] = "TCON",
};
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_kms.c b/drivers/gpu/drm/rcar-du/rcar_du_kms.c
index eacb1f17f747..190dbb7f15dd 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_kms.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_kms.c
@@ -327,11 +327,11 @@ const struct rcar_du_format_info *rcar_du_format_info(u32 fourcc)
*/
static const struct drm_gem_object_funcs rcar_du_gem_funcs = {
- .free = drm_gem_cma_free_object,
- .print_info = drm_gem_cma_print_info,
- .get_sg_table = drm_gem_cma_get_sg_table,
- .vmap = drm_gem_cma_vmap,
- .mmap = drm_gem_cma_mmap,
+ .free = drm_gem_cma_object_free,
+ .print_info = drm_gem_cma_object_print_info,
+ .get_sg_table = drm_gem_cma_object_get_sg_table,
+ .vmap = drm_gem_cma_object_vmap,
+ .mmap = drm_gem_cma_object_mmap,
.vm_ops = &drm_gem_cma_vm_ops,
};
diff --git a/drivers/gpu/drm/rcar-du/rcar_mipi_dsi.c b/drivers/gpu/drm/rcar-du/rcar_mipi_dsi.c
new file mode 100644
index 000000000000..891bb956fd61
--- /dev/null
+++ b/drivers/gpu/drm/rcar-du/rcar_mipi_dsi.c
@@ -0,0 +1,819 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * rcar_mipi_dsi.c -- R-Car MIPI DSI Encoder
+ *
+ * Copyright (C) 2020 Renesas Electronics Corporation
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_graph.h>
+#include <linux/platform_device.h>
+#include <linux/reset.h>
+#include <linux/slab.h>
+
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_bridge.h>
+#include <drm/drm_mipi_dsi.h>
+#include <drm/drm_of.h>
+#include <drm/drm_panel.h>
+#include <drm/drm_probe_helper.h>
+
+#include "rcar_mipi_dsi_regs.h"
+
+struct rcar_mipi_dsi {
+ struct device *dev;
+ const struct rcar_mipi_dsi_device_info *info;
+ struct reset_control *rstc;
+
+ struct mipi_dsi_host host;
+ struct drm_bridge bridge;
+ struct drm_bridge *next_bridge;
+ struct drm_connector connector;
+
+ void __iomem *mmio;
+ struct {
+ struct clk *mod;
+ struct clk *pll;
+ struct clk *dsi;
+ } clocks;
+
+ enum mipi_dsi_pixel_format format;
+ unsigned int num_data_lanes;
+ unsigned int lanes;
+};
+
+static inline struct rcar_mipi_dsi *
+bridge_to_rcar_mipi_dsi(struct drm_bridge *bridge)
+{
+ return container_of(bridge, struct rcar_mipi_dsi, bridge);
+}
+
+static inline struct rcar_mipi_dsi *
+host_to_rcar_mipi_dsi(struct mipi_dsi_host *host)
+{
+ return container_of(host, struct rcar_mipi_dsi, host);
+}
+
+static const u32 phtw[] = {
+ 0x01020114, 0x01600115, /* General testing */
+ 0x01030116, 0x0102011d, /* General testing */
+ 0x011101a4, 0x018601a4, /* 1Gbps testing */
+ 0x014201a0, 0x010001a3, /* 1Gbps testing */
+ 0x0101011f, /* 1Gbps testing */
+};
+
+static const u32 phtw2[] = {
+ 0x010c0130, 0x010c0140, /* General testing */
+ 0x010c0150, 0x010c0180, /* General testing */
+ 0x010c0190,
+ 0x010a0160, 0x010a0170,
+ 0x01800164, 0x01800174, /* 1Gbps testing */
+};
+
+static const u32 hsfreqrange_table[][2] = {
+ { 80000000U, 0x00 }, { 90000000U, 0x10 }, { 100000000U, 0x20 },
+ { 110000000U, 0x30 }, { 120000000U, 0x01 }, { 130000000U, 0x11 },
+ { 140000000U, 0x21 }, { 150000000U, 0x31 }, { 160000000U, 0x02 },
+ { 170000000U, 0x12 }, { 180000000U, 0x22 }, { 190000000U, 0x32 },
+ { 205000000U, 0x03 }, { 220000000U, 0x13 }, { 235000000U, 0x23 },
+ { 250000000U, 0x33 }, { 275000000U, 0x04 }, { 300000000U, 0x14 },
+ { 325000000U, 0x25 }, { 350000000U, 0x35 }, { 400000000U, 0x05 },
+ { 450000000U, 0x16 }, { 500000000U, 0x26 }, { 550000000U, 0x37 },
+ { 600000000U, 0x07 }, { 650000000U, 0x18 }, { 700000000U, 0x28 },
+ { 750000000U, 0x39 }, { 800000000U, 0x09 }, { 850000000U, 0x19 },
+ { 900000000U, 0x29 }, { 950000000U, 0x3a }, { 1000000000U, 0x0a },
+ { 1050000000U, 0x1a }, { 1100000000U, 0x2a }, { 1150000000U, 0x3b },
+ { 1200000000U, 0x0b }, { 1250000000U, 0x1b }, { 1300000000U, 0x2b },
+ { 1350000000U, 0x3c }, { 1400000000U, 0x0c }, { 1450000000U, 0x1c },
+ { 1500000000U, 0x2c }, { 1550000000U, 0x3d }, { 1600000000U, 0x0d },
+ { 1650000000U, 0x1d }, { 1700000000U, 0x2e }, { 1750000000U, 0x3e },
+ { 1800000000U, 0x0e }, { 1850000000U, 0x1e }, { 1900000000U, 0x2f },
+ { 1950000000U, 0x3f }, { 2000000000U, 0x0f }, { 2050000000U, 0x40 },
+ { 2100000000U, 0x41 }, { 2150000000U, 0x42 }, { 2200000000U, 0x43 },
+ { 2250000000U, 0x44 }, { 2300000000U, 0x45 }, { 2350000000U, 0x46 },
+ { 2400000000U, 0x47 }, { 2450000000U, 0x48 }, { 2500000000U, 0x49 },
+ { /* sentinel */ },
+};
+
+struct vco_cntrl_value {
+ u32 min_freq;
+ u32 max_freq;
+ u16 value;
+};
+
+static const struct vco_cntrl_value vco_cntrl_table[] = {
+ { .min_freq = 40000000U, .max_freq = 55000000U, .value = 0x3f },
+ { .min_freq = 52500000U, .max_freq = 80000000U, .value = 0x39 },
+ { .min_freq = 80000000U, .max_freq = 110000000U, .value = 0x2f },
+ { .min_freq = 105000000U, .max_freq = 160000000U, .value = 0x29 },
+ { .min_freq = 160000000U, .max_freq = 220000000U, .value = 0x1f },
+ { .min_freq = 210000000U, .max_freq = 320000000U, .value = 0x19 },
+ { .min_freq = 320000000U, .max_freq = 440000000U, .value = 0x0f },
+ { .min_freq = 420000000U, .max_freq = 660000000U, .value = 0x09 },
+ { .min_freq = 630000000U, .max_freq = 1149000000U, .value = 0x03 },
+ { .min_freq = 1100000000U, .max_freq = 1152000000U, .value = 0x01 },
+ { .min_freq = 1150000000U, .max_freq = 1250000000U, .value = 0x01 },
+ { /* sentinel */ },
+};
+
+static void rcar_mipi_dsi_write(struct rcar_mipi_dsi *dsi, u32 reg, u32 data)
+{
+ iowrite32(data, dsi->mmio + reg);
+}
+
+static u32 rcar_mipi_dsi_read(struct rcar_mipi_dsi *dsi, u32 reg)
+{
+ return ioread32(dsi->mmio + reg);
+}
+
+static void rcar_mipi_dsi_clr(struct rcar_mipi_dsi *dsi, u32 reg, u32 clr)
+{
+ rcar_mipi_dsi_write(dsi, reg, rcar_mipi_dsi_read(dsi, reg) & ~clr);
+}
+
+static void rcar_mipi_dsi_set(struct rcar_mipi_dsi *dsi, u32 reg, u32 set)
+{
+ rcar_mipi_dsi_write(dsi, reg, rcar_mipi_dsi_read(dsi, reg) | set);
+}
+
+static int rcar_mipi_dsi_phtw_test(struct rcar_mipi_dsi *dsi, u32 phtw)
+{
+ u32 status;
+ int ret;
+
+ rcar_mipi_dsi_write(dsi, PHTW, phtw);
+
+ ret = read_poll_timeout(rcar_mipi_dsi_read, status,
+ !(status & (PHTW_DWEN | PHTW_CWEN)),
+ 2000, 10000, false, dsi, PHTW);
+ if (ret < 0) {
+ dev_err(dsi->dev, "PHY test interface write timeout (0x%08x)\n",
+ phtw);
+ return ret;
+ }
+
+ return ret;
+}
+
+/* -----------------------------------------------------------------------------
+ * Hardware Setup
+ */
+
+struct dsi_setup_info {
+ unsigned long fout;
+ u16 vco_cntrl;
+ u16 prop_cntrl;
+ u16 hsfreqrange;
+ u16 div;
+ unsigned int m;
+ unsigned int n;
+};
+
+static void rcar_mipi_dsi_parameters_calc(struct rcar_mipi_dsi *dsi,
+ struct clk *clk, unsigned long target,
+ struct dsi_setup_info *setup_info)
+{
+
+ const struct vco_cntrl_value *vco_cntrl;
+ unsigned long fout_target;
+ unsigned long fin, fout;
+ unsigned long hsfreq;
+ unsigned int best_err = -1;
+ unsigned int divider;
+ unsigned int n;
+ unsigned int i;
+ unsigned int err;
+
+ /*
+ * Calculate Fout = dot clock * ColorDepth / (2 * Lane Count)
+ * The range out Fout is [40 - 1250] Mhz
+ */
+ fout_target = target * mipi_dsi_pixel_format_to_bpp(dsi->format)
+ / (2 * dsi->lanes);
+ if (fout_target < 40000000 || fout_target > 1250000000)
+ return;
+
+ /* Find vco_cntrl */
+ for (vco_cntrl = vco_cntrl_table; vco_cntrl->min_freq != 0; vco_cntrl++) {
+ if (fout_target > vco_cntrl->min_freq &&
+ fout_target <= vco_cntrl->max_freq) {
+ setup_info->vco_cntrl = vco_cntrl->value;
+ if (fout_target >= 1150000000)
+ setup_info->prop_cntrl = 0x0c;
+ else
+ setup_info->prop_cntrl = 0x0b;
+ break;
+ }
+ }
+
+ /* Add divider */
+ setup_info->div = (setup_info->vco_cntrl & 0x30) >> 4;
+
+ /* Find hsfreqrange */
+ hsfreq = fout_target * 2;
+ for (i = 0; i < ARRAY_SIZE(hsfreqrange_table); i++) {
+ if (hsfreqrange_table[i][0] >= hsfreq) {
+ setup_info->hsfreqrange = hsfreqrange_table[i][1];
+ break;
+ }
+ }
+
+ /*
+ * Calculate n and m for PLL clock
+ * Following the HW manual the ranges of n and m are
+ * n = [3-8] and m = [64-625]
+ */
+ fin = clk_get_rate(clk);
+ divider = 1 << setup_info->div;
+ for (n = 3; n < 9; n++) {
+ unsigned long fpfd;
+ unsigned int m;
+
+ fpfd = fin / n;
+
+ for (m = 64; m < 626; m++) {
+ fout = fpfd * m / divider;
+ err = abs((long)(fout - fout_target) * 10000 /
+ (long)fout_target);
+ if (err < best_err) {
+ setup_info->m = m - 2;
+ setup_info->n = n - 1;
+ setup_info->fout = fout;
+ best_err = err;
+ if (err == 0)
+ goto done;
+ }
+ }
+ }
+
+done:
+ dev_dbg(dsi->dev,
+ "%pC %lu Hz -> Fout %lu Hz (target %lu Hz, error %d.%02u%%), PLL M/N/DIV %u/%u/%u\n",
+ clk, fin, setup_info->fout, fout_target, best_err / 100,
+ best_err % 100, setup_info->m, setup_info->n, setup_info->div);
+ dev_dbg(dsi->dev,
+ "vco_cntrl = 0x%x\tprop_cntrl = 0x%x\thsfreqrange = 0x%x\n",
+ setup_info->vco_cntrl, setup_info->prop_cntrl,
+ setup_info->hsfreqrange);
+}
+
+static void rcar_mipi_dsi_set_display_timing(struct rcar_mipi_dsi *dsi,
+ const struct drm_display_mode *mode)
+{
+ u32 setr;
+ u32 vprmset0r;
+ u32 vprmset1r;
+ u32 vprmset2r;
+ u32 vprmset3r;
+ u32 vprmset4r;
+
+ /* Configuration for Pixel Stream and Packet Header */
+ if (mipi_dsi_pixel_format_to_bpp(dsi->format) == 24)
+ rcar_mipi_dsi_write(dsi, TXVMPSPHSETR, TXVMPSPHSETR_DT_RGB24);
+ else if (mipi_dsi_pixel_format_to_bpp(dsi->format) == 18)
+ rcar_mipi_dsi_write(dsi, TXVMPSPHSETR, TXVMPSPHSETR_DT_RGB18);
+ else if (mipi_dsi_pixel_format_to_bpp(dsi->format) == 16)
+ rcar_mipi_dsi_write(dsi, TXVMPSPHSETR, TXVMPSPHSETR_DT_RGB16);
+ else {
+ dev_warn(dsi->dev, "unsupported format");
+ return;
+ }
+
+ /* Configuration for Blanking sequence and Input Pixel */
+ setr = TXVMSETR_HSABPEN_EN | TXVMSETR_HBPBPEN_EN
+ | TXVMSETR_HFPBPEN_EN | TXVMSETR_SYNSEQ_PULSES
+ | TXVMSETR_PIXWDTH | TXVMSETR_VSTPM;
+ rcar_mipi_dsi_write(dsi, TXVMSETR, setr);
+
+ /* Configuration for Video Parameters */
+ vprmset0r = (mode->flags & DRM_MODE_FLAG_PVSYNC ?
+ TXVMVPRMSET0R_VSPOL_HIG : TXVMVPRMSET0R_VSPOL_LOW)
+ | (mode->flags & DRM_MODE_FLAG_PHSYNC ?
+ TXVMVPRMSET0R_HSPOL_HIG : TXVMVPRMSET0R_HSPOL_LOW)
+ | TXVMVPRMSET0R_CSPC_RGB | TXVMVPRMSET0R_BPP_24;
+
+ vprmset1r = TXVMVPRMSET1R_VACTIVE(mode->vdisplay)
+ | TXVMVPRMSET1R_VSA(mode->vsync_end - mode->vsync_start);
+
+ vprmset2r = TXVMVPRMSET2R_VFP(mode->vsync_start - mode->vdisplay)
+ | TXVMVPRMSET2R_VBP(mode->vtotal - mode->vsync_end);
+
+ vprmset3r = TXVMVPRMSET3R_HACTIVE(mode->hdisplay)
+ | TXVMVPRMSET3R_HSA(mode->hsync_end - mode->hsync_start);
+
+ vprmset4r = TXVMVPRMSET4R_HFP(mode->hsync_start - mode->hdisplay)
+ | TXVMVPRMSET4R_HBP(mode->htotal - mode->hsync_end);
+
+ rcar_mipi_dsi_write(dsi, TXVMVPRMSET0R, vprmset0r);
+ rcar_mipi_dsi_write(dsi, TXVMVPRMSET1R, vprmset1r);
+ rcar_mipi_dsi_write(dsi, TXVMVPRMSET2R, vprmset2r);
+ rcar_mipi_dsi_write(dsi, TXVMVPRMSET3R, vprmset3r);
+ rcar_mipi_dsi_write(dsi, TXVMVPRMSET4R, vprmset4r);
+}
+
+static int rcar_mipi_dsi_startup(struct rcar_mipi_dsi *dsi,
+ const struct drm_display_mode *mode)
+{
+ struct dsi_setup_info setup_info = {};
+ unsigned int timeout;
+ int ret, i;
+ int dsi_format;
+ u32 phy_setup;
+ u32 clockset2, clockset3;
+ u32 ppisetr;
+ u32 vclkset;
+
+ /* Checking valid format */
+ dsi_format = mipi_dsi_pixel_format_to_bpp(dsi->format);
+ if (dsi_format < 0) {
+ dev_warn(dsi->dev, "invalid format");
+ return -EINVAL;
+ }
+
+ /* Parameters Calculation */
+ rcar_mipi_dsi_parameters_calc(dsi, dsi->clocks.pll,
+ mode->clock * 1000, &setup_info);
+
+ /* LPCLK enable */
+ rcar_mipi_dsi_set(dsi, LPCLKSET, LPCLKSET_CKEN);
+
+ /* CFGCLK enabled */
+ rcar_mipi_dsi_set(dsi, CFGCLKSET, CFGCLKSET_CKEN);
+
+ rcar_mipi_dsi_clr(dsi, PHYSETUP, PHYSETUP_RSTZ);
+ rcar_mipi_dsi_clr(dsi, PHYSETUP, PHYSETUP_SHUTDOWNZ);
+
+ rcar_mipi_dsi_set(dsi, PHTC, PHTC_TESTCLR);
+ rcar_mipi_dsi_clr(dsi, PHTC, PHTC_TESTCLR);
+
+ /* PHY setting */
+ phy_setup = rcar_mipi_dsi_read(dsi, PHYSETUP);
+ phy_setup &= ~PHYSETUP_HSFREQRANGE_MASK;
+ phy_setup |= PHYSETUP_HSFREQRANGE(setup_info.hsfreqrange);
+ rcar_mipi_dsi_write(dsi, PHYSETUP, phy_setup);
+
+ for (i = 0; i < ARRAY_SIZE(phtw); i++) {
+ ret = rcar_mipi_dsi_phtw_test(dsi, phtw[i]);
+ if (ret < 0)
+ return ret;
+ }
+
+ /* PLL Clock Setting */
+ rcar_mipi_dsi_clr(dsi, CLOCKSET1, CLOCKSET1_SHADOW_CLEAR);
+ rcar_mipi_dsi_set(dsi, CLOCKSET1, CLOCKSET1_SHADOW_CLEAR);
+ rcar_mipi_dsi_clr(dsi, CLOCKSET1, CLOCKSET1_SHADOW_CLEAR);
+
+ clockset2 = CLOCKSET2_M(setup_info.m) | CLOCKSET2_N(setup_info.n)
+ | CLOCKSET2_VCO_CNTRL(setup_info.vco_cntrl);
+ clockset3 = CLOCKSET3_PROP_CNTRL(setup_info.prop_cntrl)
+ | CLOCKSET3_INT_CNTRL(0)
+ | CLOCKSET3_CPBIAS_CNTRL(0x10)
+ | CLOCKSET3_GMP_CNTRL(1);
+ rcar_mipi_dsi_write(dsi, CLOCKSET2, clockset2);
+ rcar_mipi_dsi_write(dsi, CLOCKSET3, clockset3);
+
+ rcar_mipi_dsi_clr(dsi, CLOCKSET1, CLOCKSET1_UPDATEPLL);
+ rcar_mipi_dsi_set(dsi, CLOCKSET1, CLOCKSET1_UPDATEPLL);
+ udelay(10);
+ rcar_mipi_dsi_clr(dsi, CLOCKSET1, CLOCKSET1_UPDATEPLL);
+
+ ppisetr = PPISETR_DLEN_3 | PPISETR_CLEN;
+ rcar_mipi_dsi_write(dsi, PPISETR, ppisetr);
+
+ rcar_mipi_dsi_set(dsi, PHYSETUP, PHYSETUP_SHUTDOWNZ);
+ rcar_mipi_dsi_set(dsi, PHYSETUP, PHYSETUP_RSTZ);
+ usleep_range(400, 500);
+
+ /* Checking PPI clock status register */
+ for (timeout = 10; timeout > 0; --timeout) {
+ if ((rcar_mipi_dsi_read(dsi, PPICLSR) & PPICLSR_STPST) &&
+ (rcar_mipi_dsi_read(dsi, PPIDLSR) & PPIDLSR_STPST) &&
+ (rcar_mipi_dsi_read(dsi, CLOCKSET1) & CLOCKSET1_LOCK))
+ break;
+
+ usleep_range(1000, 2000);
+ }
+
+ if (!timeout) {
+ dev_err(dsi->dev, "failed to enable PPI clock\n");
+ return -ETIMEDOUT;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(phtw2); i++) {
+ ret = rcar_mipi_dsi_phtw_test(dsi, phtw2[i]);
+ if (ret < 0)
+ return ret;
+ }
+
+ /* Enable DOT clock */
+ vclkset = VCLKSET_CKEN;
+ rcar_mipi_dsi_set(dsi, VCLKSET, vclkset);
+
+ if (dsi_format == 24)
+ vclkset |= VCLKSET_BPP_24;
+ else if (dsi_format == 18)
+ vclkset |= VCLKSET_BPP_18;
+ else if (dsi_format == 16)
+ vclkset |= VCLKSET_BPP_16;
+ else {
+ dev_warn(dsi->dev, "unsupported format");
+ return -EINVAL;
+ }
+ vclkset |= VCLKSET_COLOR_RGB | VCLKSET_DIV(setup_info.div)
+ | VCLKSET_LANE(dsi->lanes - 1);
+
+ rcar_mipi_dsi_set(dsi, VCLKSET, vclkset);
+
+ /* After setting VCLKSET register, enable VCLKEN */
+ rcar_mipi_dsi_set(dsi, VCLKEN, VCLKEN_CKEN);
+
+ dev_dbg(dsi->dev, "DSI device is started\n");
+
+ return 0;
+}
+
+static void rcar_mipi_dsi_shutdown(struct rcar_mipi_dsi *dsi)
+{
+ rcar_mipi_dsi_clr(dsi, PHYSETUP, PHYSETUP_RSTZ);
+ rcar_mipi_dsi_clr(dsi, PHYSETUP, PHYSETUP_SHUTDOWNZ);
+
+ dev_dbg(dsi->dev, "DSI device is shutdown\n");
+}
+
+static int rcar_mipi_dsi_clk_enable(struct rcar_mipi_dsi *dsi)
+{
+ int ret;
+
+ reset_control_deassert(dsi->rstc);
+
+ ret = clk_prepare_enable(dsi->clocks.mod);
+ if (ret < 0)
+ goto err_reset;
+
+ ret = clk_prepare_enable(dsi->clocks.dsi);
+ if (ret < 0)
+ goto err_clock;
+
+ return 0;
+
+err_clock:
+ clk_disable_unprepare(dsi->clocks.mod);
+err_reset:
+ reset_control_assert(dsi->rstc);
+ return ret;
+}
+
+static void rcar_mipi_dsi_clk_disable(struct rcar_mipi_dsi *dsi)
+{
+ clk_disable_unprepare(dsi->clocks.dsi);
+ clk_disable_unprepare(dsi->clocks.mod);
+
+ reset_control_assert(dsi->rstc);
+}
+
+static int rcar_mipi_dsi_start_hs_clock(struct rcar_mipi_dsi *dsi)
+{
+ /*
+ * In HW manual, we need to check TxDDRClkHS-Q Stable? but it dont
+ * write how to check. So we skip this check in this patch
+ */
+ u32 status;
+ int ret;
+
+ /* Start HS clock. */
+ rcar_mipi_dsi_set(dsi, PPICLCR, PPICLCR_TXREQHS);
+
+ ret = read_poll_timeout(rcar_mipi_dsi_read, status,
+ status & PPICLSR_TOHS,
+ 2000, 10000, false, dsi, PPICLSR);
+ if (ret < 0) {
+ dev_err(dsi->dev, "failed to enable HS clock\n");
+ return ret;
+ }
+
+ rcar_mipi_dsi_set(dsi, PPICLSCR, PPICLSCR_TOHS);
+
+ return 0;
+}
+
+static int rcar_mipi_dsi_start_video(struct rcar_mipi_dsi *dsi)
+{
+ u32 status;
+ int ret;
+
+ /* Wait for the link to be ready. */
+ ret = read_poll_timeout(rcar_mipi_dsi_read, status,
+ !(status & (LINKSR_LPBUSY | LINKSR_HSBUSY)),
+ 2000, 10000, false, dsi, LINKSR);
+ if (ret < 0) {
+ dev_err(dsi->dev, "Link failed to become ready\n");
+ return ret;
+ }
+
+ /* De-assert video FIFO clear. */
+ rcar_mipi_dsi_clr(dsi, TXVMCR, TXVMCR_VFCLR);
+
+ ret = read_poll_timeout(rcar_mipi_dsi_read, status,
+ status & TXVMSR_VFRDY,
+ 2000, 10000, false, dsi, TXVMSR);
+ if (ret < 0) {
+ dev_err(dsi->dev, "Failed to de-assert video FIFO clear\n");
+ return ret;
+ }
+
+ /* Enable transmission in video mode. */
+ rcar_mipi_dsi_set(dsi, TXVMCR, TXVMCR_EN_VIDEO);
+
+ ret = read_poll_timeout(rcar_mipi_dsi_read, status,
+ status & TXVMSR_RDY,
+ 2000, 10000, false, dsi, TXVMSR);
+ if (ret < 0) {
+ dev_err(dsi->dev, "Failed to enable video transmission\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+/* -----------------------------------------------------------------------------
+ * Bridge
+ */
+
+static int rcar_mipi_dsi_attach(struct drm_bridge *bridge,
+ enum drm_bridge_attach_flags flags)
+{
+ struct rcar_mipi_dsi *dsi = bridge_to_rcar_mipi_dsi(bridge);
+
+ return drm_bridge_attach(bridge->encoder, dsi->next_bridge, bridge,
+ flags);
+}
+
+static void rcar_mipi_dsi_atomic_enable(struct drm_bridge *bridge,
+ struct drm_bridge_state *old_bridge_state)
+{
+ struct drm_atomic_state *state = old_bridge_state->base.state;
+ struct rcar_mipi_dsi *dsi = bridge_to_rcar_mipi_dsi(bridge);
+ const struct drm_display_mode *mode;
+ struct drm_connector *connector;
+ struct drm_crtc *crtc;
+ int ret;
+
+ connector = drm_atomic_get_new_connector_for_encoder(state,
+ bridge->encoder);
+ crtc = drm_atomic_get_new_connector_state(state, connector)->crtc;
+ mode = &drm_atomic_get_new_crtc_state(state, crtc)->adjusted_mode;
+
+ ret = rcar_mipi_dsi_clk_enable(dsi);
+ if (ret < 0) {
+ dev_err(dsi->dev, "failed to enable DSI clocks\n");
+ return;
+ }
+
+ ret = rcar_mipi_dsi_startup(dsi, mode);
+ if (ret < 0)
+ goto err_dsi_startup;
+
+ rcar_mipi_dsi_set_display_timing(dsi, mode);
+
+ ret = rcar_mipi_dsi_start_hs_clock(dsi);
+ if (ret < 0)
+ goto err_dsi_start_hs;
+
+ rcar_mipi_dsi_start_video(dsi);
+
+ return;
+
+err_dsi_start_hs:
+ rcar_mipi_dsi_shutdown(dsi);
+err_dsi_startup:
+ rcar_mipi_dsi_clk_disable(dsi);
+}
+
+static void rcar_mipi_dsi_atomic_disable(struct drm_bridge *bridge,
+ struct drm_bridge_state *old_bridge_state)
+{
+ struct rcar_mipi_dsi *dsi = bridge_to_rcar_mipi_dsi(bridge);
+
+ rcar_mipi_dsi_shutdown(dsi);
+ rcar_mipi_dsi_clk_disable(dsi);
+}
+
+static enum drm_mode_status
+rcar_mipi_dsi_bridge_mode_valid(struct drm_bridge *bridge,
+ const struct drm_display_info *info,
+ const struct drm_display_mode *mode)
+{
+ if (mode->clock > 297000)
+ return MODE_CLOCK_HIGH;
+
+ return MODE_OK;
+}
+
+static const struct drm_bridge_funcs rcar_mipi_dsi_bridge_ops = {
+ .attach = rcar_mipi_dsi_attach,
+ .atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_bridge_destroy_state,
+ .atomic_reset = drm_atomic_helper_bridge_reset,
+ .atomic_enable = rcar_mipi_dsi_atomic_enable,
+ .atomic_disable = rcar_mipi_dsi_atomic_disable,
+ .mode_valid = rcar_mipi_dsi_bridge_mode_valid,
+};
+
+/* -----------------------------------------------------------------------------
+ * Host setting
+ */
+
+static int rcar_mipi_dsi_host_attach(struct mipi_dsi_host *host,
+ struct mipi_dsi_device *device)
+{
+ struct rcar_mipi_dsi *dsi = host_to_rcar_mipi_dsi(host);
+ int ret;
+
+ if (device->lanes > dsi->num_data_lanes)
+ return -EINVAL;
+
+ dsi->lanes = device->lanes;
+ dsi->format = device->format;
+
+ dsi->next_bridge = devm_drm_of_get_bridge(dsi->dev, dsi->dev->of_node,
+ 1, 0);
+ if (IS_ERR(dsi->next_bridge)) {
+ ret = PTR_ERR(dsi->next_bridge);
+ dev_err(dsi->dev, "failed to get next bridge: %d\n", ret);
+ return ret;
+ }
+
+ /* Initialize the DRM bridge. */
+ dsi->bridge.funcs = &rcar_mipi_dsi_bridge_ops;
+ dsi->bridge.of_node = dsi->dev->of_node;
+ drm_bridge_add(&dsi->bridge);
+
+ return 0;
+}
+
+static int rcar_mipi_dsi_host_detach(struct mipi_dsi_host *host,
+ struct mipi_dsi_device *device)
+{
+ struct rcar_mipi_dsi *dsi = host_to_rcar_mipi_dsi(host);
+
+ drm_bridge_remove(&dsi->bridge);
+
+ return 0;
+}
+
+static const struct mipi_dsi_host_ops rcar_mipi_dsi_host_ops = {
+ .attach = rcar_mipi_dsi_host_attach,
+ .detach = rcar_mipi_dsi_host_detach,
+};
+
+/* -----------------------------------------------------------------------------
+ * Probe & Remove
+ */
+
+static int rcar_mipi_dsi_parse_dt(struct rcar_mipi_dsi *dsi)
+{
+ struct device_node *ep;
+ u32 data_lanes[4];
+ int ret;
+
+ ep = of_graph_get_endpoint_by_regs(dsi->dev->of_node, 1, 0);
+ if (!ep) {
+ dev_dbg(dsi->dev, "unconnected port@1\n");
+ return -ENODEV;
+ }
+
+ ret = of_property_read_variable_u32_array(ep, "data-lanes", data_lanes,
+ 1, 4);
+ of_node_put(ep);
+
+ if (ret < 0) {
+ dev_err(dsi->dev, "missing or invalid data-lanes property\n");
+ return -ENODEV;
+ }
+
+ dsi->num_data_lanes = ret;
+ return 0;
+}
+
+static struct clk *rcar_mipi_dsi_get_clock(struct rcar_mipi_dsi *dsi,
+ const char *name,
+ bool optional)
+{
+ struct clk *clk;
+
+ clk = devm_clk_get(dsi->dev, name);
+ if (!IS_ERR(clk))
+ return clk;
+
+ if (PTR_ERR(clk) == -ENOENT && optional)
+ return NULL;
+
+ dev_err_probe(dsi->dev, PTR_ERR(clk), "failed to get %s clock\n",
+ name ? name : "module");
+
+ return clk;
+}
+
+static int rcar_mipi_dsi_get_clocks(struct rcar_mipi_dsi *dsi)
+{
+ dsi->clocks.mod = rcar_mipi_dsi_get_clock(dsi, NULL, false);
+ if (IS_ERR(dsi->clocks.mod))
+ return PTR_ERR(dsi->clocks.mod);
+
+ dsi->clocks.pll = rcar_mipi_dsi_get_clock(dsi, "pll", true);
+ if (IS_ERR(dsi->clocks.pll))
+ return PTR_ERR(dsi->clocks.pll);
+
+ dsi->clocks.dsi = rcar_mipi_dsi_get_clock(dsi, "dsi", true);
+ if (IS_ERR(dsi->clocks.dsi))
+ return PTR_ERR(dsi->clocks.dsi);
+
+ if (!dsi->clocks.pll && !dsi->clocks.dsi) {
+ dev_err(dsi->dev, "no input clock (pll, dsi)\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int rcar_mipi_dsi_probe(struct platform_device *pdev)
+{
+ struct rcar_mipi_dsi *dsi;
+ struct resource *mem;
+ int ret;
+
+ dsi = devm_kzalloc(&pdev->dev, sizeof(*dsi), GFP_KERNEL);
+ if (dsi == NULL)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, dsi);
+
+ dsi->dev = &pdev->dev;
+ dsi->info = of_device_get_match_data(&pdev->dev);
+
+ ret = rcar_mipi_dsi_parse_dt(dsi);
+ if (ret < 0)
+ return ret;
+
+ /* Acquire resources. */
+ mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ dsi->mmio = devm_ioremap_resource(dsi->dev, mem);
+ if (IS_ERR(dsi->mmio))
+ return PTR_ERR(dsi->mmio);
+
+ ret = rcar_mipi_dsi_get_clocks(dsi);
+ if (ret < 0)
+ return ret;
+
+ dsi->rstc = devm_reset_control_get(dsi->dev, NULL);
+ if (IS_ERR(dsi->rstc)) {
+ dev_err(dsi->dev, "failed to get cpg reset\n");
+ return PTR_ERR(dsi->rstc);
+ }
+
+ /* Initialize the DSI host. */
+ dsi->host.dev = dsi->dev;
+ dsi->host.ops = &rcar_mipi_dsi_host_ops;
+ ret = mipi_dsi_host_register(&dsi->host);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static int rcar_mipi_dsi_remove(struct platform_device *pdev)
+{
+ struct rcar_mipi_dsi *dsi = platform_get_drvdata(pdev);
+
+ mipi_dsi_host_unregister(&dsi->host);
+
+ return 0;
+}
+
+static const struct of_device_id rcar_mipi_dsi_of_table[] = {
+ { .compatible = "renesas,r8a779a0-dsi-csi2-tx" },
+ { }
+};
+
+MODULE_DEVICE_TABLE(of, rcar_mipi_dsi_of_table);
+
+static struct platform_driver rcar_mipi_dsi_platform_driver = {
+ .probe = rcar_mipi_dsi_probe,
+ .remove = rcar_mipi_dsi_remove,
+ .driver = {
+ .name = "rcar-mipi-dsi",
+ .of_match_table = rcar_mipi_dsi_of_table,
+ },
+};
+
+module_platform_driver(rcar_mipi_dsi_platform_driver);
+
+MODULE_DESCRIPTION("Renesas R-Car MIPI DSI Encoder Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/rcar-du/rcar_mipi_dsi_regs.h b/drivers/gpu/drm/rcar-du/rcar_mipi_dsi_regs.h
new file mode 100644
index 000000000000..0e7a9274749f
--- /dev/null
+++ b/drivers/gpu/drm/rcar-du/rcar_mipi_dsi_regs.h
@@ -0,0 +1,172 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * rcar_mipi_dsi_regs.h -- R-Car MIPI DSI Interface Registers Definitions
+ *
+ * Copyright (C) 2020 Renesas Electronics Corporation
+ */
+
+#ifndef __RCAR_MIPI_DSI_REGS_H__
+#define __RCAR_MIPI_DSI_REGS_H__
+
+#define LINKSR 0x010
+#define LINKSR_LPBUSY (1 << 1)
+#define LINKSR_HSBUSY (1 << 0)
+
+/*
+ * Video Mode Register
+ */
+#define TXVMSETR 0x180
+#define TXVMSETR_SYNSEQ_PULSES (0 << 16)
+#define TXVMSETR_SYNSEQ_EVENTS (1 << 16)
+#define TXVMSETR_VSTPM (1 << 15)
+#define TXVMSETR_PIXWDTH (1 << 8)
+#define TXVMSETR_VSEN_EN (1 << 4)
+#define TXVMSETR_VSEN_DIS (0 << 4)
+#define TXVMSETR_HFPBPEN_EN (1 << 2)
+#define TXVMSETR_HFPBPEN_DIS (0 << 2)
+#define TXVMSETR_HBPBPEN_EN (1 << 1)
+#define TXVMSETR_HBPBPEN_DIS (0 << 1)
+#define TXVMSETR_HSABPEN_EN (1 << 0)
+#define TXVMSETR_HSABPEN_DIS (0 << 0)
+
+#define TXVMCR 0x190
+#define TXVMCR_VFCLR (1 << 12)
+#define TXVMCR_EN_VIDEO (1 << 0)
+
+#define TXVMSR 0x1a0
+#define TXVMSR_STR (1 << 16)
+#define TXVMSR_VFRDY (1 << 12)
+#define TXVMSR_ACT (1 << 8)
+#define TXVMSR_RDY (1 << 0)
+
+#define TXVMSCR 0x1a4
+#define TXVMSCR_STR (1 << 16)
+
+#define TXVMPSPHSETR 0x1c0
+#define TXVMPSPHSETR_DT_RGB16 (0x0e << 16)
+#define TXVMPSPHSETR_DT_RGB18 (0x1e << 16)
+#define TXVMPSPHSETR_DT_RGB18_LS (0x2e << 16)
+#define TXVMPSPHSETR_DT_RGB24 (0x3e << 16)
+#define TXVMPSPHSETR_DT_YCBCR16 (0x2c << 16)
+
+#define TXVMVPRMSET0R 0x1d0
+#define TXVMVPRMSET0R_HSPOL_HIG (0 << 17)
+#define TXVMVPRMSET0R_HSPOL_LOW (1 << 17)
+#define TXVMVPRMSET0R_VSPOL_HIG (0 << 16)
+#define TXVMVPRMSET0R_VSPOL_LOW (1 << 16)
+#define TXVMVPRMSET0R_CSPC_RGB (0 << 4)
+#define TXVMVPRMSET0R_CSPC_YCbCr (1 << 4)
+#define TXVMVPRMSET0R_BPP_16 (0 << 0)
+#define TXVMVPRMSET0R_BPP_18 (1 << 0)
+#define TXVMVPRMSET0R_BPP_24 (2 << 0)
+
+#define TXVMVPRMSET1R 0x1d4
+#define TXVMVPRMSET1R_VACTIVE(x) (((x) & 0x7fff) << 16)
+#define TXVMVPRMSET1R_VSA(x) (((x) & 0xfff) << 0)
+
+#define TXVMVPRMSET2R 0x1d8
+#define TXVMVPRMSET2R_VFP(x) (((x) & 0x1fff) << 16)
+#define TXVMVPRMSET2R_VBP(x) (((x) & 0x1fff) << 0)
+
+#define TXVMVPRMSET3R 0x1dc
+#define TXVMVPRMSET3R_HACTIVE(x) (((x) & 0x7fff) << 16)
+#define TXVMVPRMSET3R_HSA(x) (((x) & 0xfff) << 0)
+
+#define TXVMVPRMSET4R 0x1e0
+#define TXVMVPRMSET4R_HFP(x) (((x) & 0x1fff) << 16)
+#define TXVMVPRMSET4R_HBP(x) (((x) & 0x1fff) << 0)
+
+/*
+ * PHY-Protocol Interface (PPI) Registers
+ */
+#define PPISETR 0x700
+#define PPISETR_DLEN_0 (0x1 << 0)
+#define PPISETR_DLEN_1 (0x3 << 0)
+#define PPISETR_DLEN_2 (0x7 << 0)
+#define PPISETR_DLEN_3 (0xf << 0)
+#define PPISETR_CLEN (1 << 8)
+
+#define PPICLCR 0x710
+#define PPICLCR_TXREQHS (1 << 8)
+#define PPICLCR_TXULPSEXT (1 << 1)
+#define PPICLCR_TXULPSCLK (1 << 0)
+
+#define PPICLSR 0x720
+#define PPICLSR_HSTOLP (1 << 27)
+#define PPICLSR_TOHS (1 << 26)
+#define PPICLSR_STPST (1 << 0)
+
+#define PPICLSCR 0x724
+#define PPICLSCR_HSTOLP (1 << 27)
+#define PPICLSCR_TOHS (1 << 26)
+
+#define PPIDLSR 0x760
+#define PPIDLSR_STPST (0xf << 0)
+
+/*
+ * Clocks registers
+ */
+#define LPCLKSET 0x1000
+#define LPCLKSET_CKEN (1 << 8)
+#define LPCLKSET_LPCLKDIV(x) (((x) & 0x3f) << 0)
+
+#define CFGCLKSET 0x1004
+#define CFGCLKSET_CKEN (1 << 8)
+#define CFGCLKSET_CFGCLKDIV(x) (((x) & 0x3f) << 0)
+
+#define DOTCLKDIV 0x1008
+#define DOTCLKDIV_CKEN (1 << 8)
+#define DOTCLKDIV_DOTCLKDIV(x) (((x) & 0x3f) << 0)
+
+#define VCLKSET 0x100c
+#define VCLKSET_CKEN (1 << 16)
+#define VCLKSET_COLOR_RGB (0 << 8)
+#define VCLKSET_COLOR_YCC (1 << 8)
+#define VCLKSET_DIV(x) (((x) & 0x3) << 4)
+#define VCLKSET_BPP_16 (0 << 2)
+#define VCLKSET_BPP_18 (1 << 2)
+#define VCLKSET_BPP_18L (2 << 2)
+#define VCLKSET_BPP_24 (3 << 2)
+#define VCLKSET_LANE(x) (((x) & 0x3) << 0)
+
+#define VCLKEN 0x1010
+#define VCLKEN_CKEN (1 << 0)
+
+#define PHYSETUP 0x1014
+#define PHYSETUP_HSFREQRANGE(x) (((x) & 0x7f) << 16)
+#define PHYSETUP_HSFREQRANGE_MASK (0x7f << 16)
+#define PHYSETUP_CFGCLKFREQRANGE(x) (((x) & 0x3f) << 8)
+#define PHYSETUP_SHUTDOWNZ (1 << 1)
+#define PHYSETUP_RSTZ (1 << 0)
+
+#define CLOCKSET1 0x101c
+#define CLOCKSET1_LOCK_PHY (1 << 17)
+#define CLOCKSET1_LOCK (1 << 16)
+#define CLOCKSET1_CLKSEL (1 << 8)
+#define CLOCKSET1_CLKINSEL_EXTAL (0 << 2)
+#define CLOCKSET1_CLKINSEL_DIG (1 << 2)
+#define CLOCKSET1_CLKINSEL_DU (1 << 3)
+#define CLOCKSET1_SHADOW_CLEAR (1 << 1)
+#define CLOCKSET1_UPDATEPLL (1 << 0)
+
+#define CLOCKSET2 0x1020
+#define CLOCKSET2_M(x) (((x) & 0xfff) << 16)
+#define CLOCKSET2_VCO_CNTRL(x) (((x) & 0x3f) << 8)
+#define CLOCKSET2_N(x) (((x) & 0xf) << 0)
+
+#define CLOCKSET3 0x1024
+#define CLOCKSET3_PROP_CNTRL(x) (((x) & 0x3f) << 24)
+#define CLOCKSET3_INT_CNTRL(x) (((x) & 0x3f) << 16)
+#define CLOCKSET3_CPBIAS_CNTRL(x) (((x) & 0x7f) << 8)
+#define CLOCKSET3_GMP_CNTRL(x) (((x) & 0x3) << 0)
+
+#define PHTW 0x1034
+#define PHTW_DWEN (1 << 24)
+#define PHTW_TESTDIN_DATA(x) (((x) & 0xff) << 16)
+#define PHTW_CWEN (1 << 8)
+#define PHTW_TESTDIN_CODE(x) (((x) & 0xff) << 0)
+
+#define PHTC 0x103c
+#define PHTC_TESTCLR (1 << 0)
+
+#endif /* __RCAR_MIPI_DSI_REGS_H__ */
diff --git a/drivers/gpu/drm/rockchip/Makefile b/drivers/gpu/drm/rockchip/Makefile
index 17a9e7eb2130..1a56f696558c 100644
--- a/drivers/gpu/drm/rockchip/Makefile
+++ b/drivers/gpu/drm/rockchip/Makefile
@@ -5,7 +5,6 @@
rockchipdrm-y := rockchip_drm_drv.o rockchip_drm_fb.o \
rockchip_drm_gem.o rockchip_drm_vop.o rockchip_vop_reg.o
-rockchipdrm-$(CONFIG_DRM_FBDEV_EMULATION) += rockchip_drm_fbdev.o
rockchipdrm-$(CONFIG_ROCKCHIP_ANALOGIX_DP) += analogix_dp-rockchip.o
rockchipdrm-$(CONFIG_ROCKCHIP_CDN_DP) += cdn-dp-core.o cdn-dp-reg.o
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_drv.c b/drivers/gpu/drm/rockchip/rockchip_drm_drv.c
index 69c699459dce..bec207de4544 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_drv.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_drv.c
@@ -26,7 +26,6 @@
#include "rockchip_drm_drv.h"
#include "rockchip_drm_fb.h"
-#include "rockchip_drm_fbdev.h"
#include "rockchip_drm_gem.h"
#define DRIVER_NAME "rockchip"
@@ -159,10 +158,6 @@ static int rockchip_drm_bind(struct device *dev)
drm_mode_config_reset(drm_dev);
- ret = rockchip_drm_fbdev_init(drm_dev);
- if (ret)
- goto err_unbind_all;
-
/* init kms poll for handling hpd */
drm_kms_helper_poll_init(drm_dev);
@@ -170,10 +165,11 @@ static int rockchip_drm_bind(struct device *dev)
if (ret)
goto err_kms_helper_poll_fini;
+ drm_fbdev_generic_setup(drm_dev, 0);
+
return 0;
err_kms_helper_poll_fini:
drm_kms_helper_poll_fini(drm_dev);
- rockchip_drm_fbdev_fini(drm_dev);
err_unbind_all:
component_unbind_all(dev, drm_dev);
err_iommu_cleanup:
@@ -189,7 +185,6 @@ static void rockchip_drm_unbind(struct device *dev)
drm_dev_unregister(drm_dev);
- rockchip_drm_fbdev_fini(drm_dev);
drm_kms_helper_poll_fini(drm_dev);
drm_atomic_helper_shutdown(drm_dev);
@@ -203,7 +198,6 @@ DEFINE_DRM_GEM_FOPS(rockchip_drm_driver_fops);
static const struct drm_driver rockchip_drm_driver = {
.driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_ATOMIC,
- .lastclose = drm_fb_helper_lastclose,
.dumb_create = rockchip_gem_dumb_create,
.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_drv.h b/drivers/gpu/drm/rockchip/rockchip_drm_drv.h
index aa0909e8edf9..143a48330f84 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_drv.h
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_drv.h
@@ -43,8 +43,6 @@ struct rockchip_crtc_state {
* @mm_lock: protect drm_mm on multi-threads.
*/
struct rockchip_drm_private {
- struct drm_fb_helper fbdev_helper;
- struct drm_gem_object *fbdev_bo;
struct iommu_domain *domain;
struct mutex mm_lock;
struct drm_mm mm;
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_fbdev.c b/drivers/gpu/drm/rockchip/rockchip_drm_fbdev.c
deleted file mode 100644
index d8418dd39d0e..000000000000
--- a/drivers/gpu/drm/rockchip/rockchip_drm_fbdev.c
+++ /dev/null
@@ -1,164 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (C) Fuzhou Rockchip Electronics Co.Ltd
- * Author:Mark Yao <mark.yao@rock-chips.com>
- */
-
-#include <drm/drm.h>
-#include <drm/drm_fb_helper.h>
-#include <drm/drm_fourcc.h>
-#include <drm/drm_prime.h>
-#include <drm/drm_probe_helper.h>
-
-#include "rockchip_drm_drv.h"
-#include "rockchip_drm_gem.h"
-#include "rockchip_drm_fb.h"
-#include "rockchip_drm_fbdev.h"
-
-#define PREFERRED_BPP 32
-#define to_drm_private(x) \
- container_of(x, struct rockchip_drm_private, fbdev_helper)
-
-static int rockchip_fbdev_mmap(struct fb_info *info,
- struct vm_area_struct *vma)
-{
- struct drm_fb_helper *helper = info->par;
- struct rockchip_drm_private *private = to_drm_private(helper);
-
- return drm_gem_prime_mmap(private->fbdev_bo, vma);
-}
-
-static const struct fb_ops rockchip_drm_fbdev_ops = {
- .owner = THIS_MODULE,
- DRM_FB_HELPER_DEFAULT_OPS,
- .fb_mmap = rockchip_fbdev_mmap,
- .fb_fillrect = drm_fb_helper_cfb_fillrect,
- .fb_copyarea = drm_fb_helper_cfb_copyarea,
- .fb_imageblit = drm_fb_helper_cfb_imageblit,
-};
-
-static int rockchip_drm_fbdev_create(struct drm_fb_helper *helper,
- struct drm_fb_helper_surface_size *sizes)
-{
- struct rockchip_drm_private *private = to_drm_private(helper);
- struct drm_mode_fb_cmd2 mode_cmd = { 0 };
- struct drm_device *dev = helper->dev;
- struct rockchip_gem_object *rk_obj;
- struct drm_framebuffer *fb;
- unsigned int bytes_per_pixel;
- unsigned long offset;
- struct fb_info *fbi;
- size_t size;
- int ret;
-
- bytes_per_pixel = DIV_ROUND_UP(sizes->surface_bpp, 8);
-
- mode_cmd.width = sizes->surface_width;
- mode_cmd.height = sizes->surface_height;
- mode_cmd.pitches[0] = sizes->surface_width * bytes_per_pixel;
- mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp,
- sizes->surface_depth);
-
- size = mode_cmd.pitches[0] * mode_cmd.height;
-
- rk_obj = rockchip_gem_create_object(dev, size, true);
- if (IS_ERR(rk_obj))
- return -ENOMEM;
-
- private->fbdev_bo = &rk_obj->base;
-
- fbi = drm_fb_helper_alloc_fbi(helper);
- if (IS_ERR(fbi)) {
- DRM_DEV_ERROR(dev->dev, "Failed to create framebuffer info.\n");
- ret = PTR_ERR(fbi);
- goto out;
- }
-
- helper->fb = rockchip_drm_framebuffer_init(dev, &mode_cmd,
- private->fbdev_bo);
- if (IS_ERR(helper->fb)) {
- DRM_DEV_ERROR(dev->dev,
- "Failed to allocate DRM framebuffer.\n");
- ret = PTR_ERR(helper->fb);
- goto out;
- }
-
- fbi->fbops = &rockchip_drm_fbdev_ops;
-
- fb = helper->fb;
- drm_fb_helper_fill_info(fbi, helper, sizes);
-
- offset = fbi->var.xoffset * bytes_per_pixel;
- offset += fbi->var.yoffset * fb->pitches[0];
-
- dev->mode_config.fb_base = 0;
- fbi->screen_base = rk_obj->kvaddr + offset;
- fbi->screen_size = rk_obj->base.size;
- fbi->fix.smem_len = rk_obj->base.size;
-
- DRM_DEBUG_KMS("FB [%dx%d]-%d kvaddr=%p offset=%ld size=%zu\n",
- fb->width, fb->height, fb->format->depth,
- rk_obj->kvaddr,
- offset, size);
-
- return 0;
-
-out:
- rockchip_gem_free_object(&rk_obj->base);
- return ret;
-}
-
-static const struct drm_fb_helper_funcs rockchip_drm_fb_helper_funcs = {
- .fb_probe = rockchip_drm_fbdev_create,
-};
-
-int rockchip_drm_fbdev_init(struct drm_device *dev)
-{
- struct rockchip_drm_private *private = dev->dev_private;
- struct drm_fb_helper *helper;
- int ret;
-
- if (!dev->mode_config.num_crtc || !dev->mode_config.num_connector)
- return -EINVAL;
-
- helper = &private->fbdev_helper;
-
- drm_fb_helper_prepare(dev, helper, &rockchip_drm_fb_helper_funcs);
-
- ret = drm_fb_helper_init(dev, helper);
- if (ret < 0) {
- DRM_DEV_ERROR(dev->dev,
- "Failed to initialize drm fb helper - %d.\n",
- ret);
- return ret;
- }
-
- ret = drm_fb_helper_initial_config(helper, PREFERRED_BPP);
- if (ret < 0) {
- DRM_DEV_ERROR(dev->dev,
- "Failed to set initial hw config - %d.\n",
- ret);
- goto err_drm_fb_helper_fini;
- }
-
- return 0;
-
-err_drm_fb_helper_fini:
- drm_fb_helper_fini(helper);
- return ret;
-}
-
-void rockchip_drm_fbdev_fini(struct drm_device *dev)
-{
- struct rockchip_drm_private *private = dev->dev_private;
- struct drm_fb_helper *helper;
-
- helper = &private->fbdev_helper;
-
- drm_fb_helper_unregister_fbi(helper);
-
- if (helper->fb)
- drm_framebuffer_put(helper->fb);
-
- drm_fb_helper_fini(helper);
-}
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_fbdev.h b/drivers/gpu/drm/rockchip/rockchip_drm_fbdev.h
deleted file mode 100644
index 5fb7ac2371a8..000000000000
--- a/drivers/gpu/drm/rockchip/rockchip_drm_fbdev.h
+++ /dev/null
@@ -1,24 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (C) Fuzhou Rockchip Electronics Co.Ltd
- * Author:Mark Yao <mark.yao@rock-chips.com>
- */
-
-#ifndef _ROCKCHIP_DRM_FBDEV_H
-#define _ROCKCHIP_DRM_FBDEV_H
-
-#ifdef CONFIG_DRM_FBDEV_EMULATION
-int rockchip_drm_fbdev_init(struct drm_device *dev);
-void rockchip_drm_fbdev_fini(struct drm_device *dev);
-#else
-static inline int rockchip_drm_fbdev_init(struct drm_device *dev)
-{
- return 0;
-}
-
-static inline void rockchip_drm_fbdev_fini(struct drm_device *dev)
-{
-}
-#endif
-
-#endif /* _ROCKCHIP_DRM_FBDEV_H */
diff --git a/drivers/gpu/drm/selftests/test-drm_plane_helper.c b/drivers/gpu/drm/selftests/test-drm_plane_helper.c
index 0a9553f51796..ceebeede55ea 100644
--- a/drivers/gpu/drm/selftests/test-drm_plane_helper.c
+++ b/drivers/gpu/drm/selftests/test-drm_plane_helper.c
@@ -87,11 +87,15 @@ int igt_check_plane_state(void *ignored)
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC)
},
};
+ struct drm_plane plane = {
+ .dev = NULL
+ };
struct drm_framebuffer fb = {
.width = 2048,
.height = 2048
};
struct drm_plane_state plane_state = {
+ .plane = &plane,
.crtc = ZERO_SIZE_PTR,
.fb = &fb,
.rotation = DRM_MODE_ROTATE_0
diff --git a/drivers/gpu/drm/shmobile/Kconfig b/drivers/gpu/drm/shmobile/Kconfig
index e2a6c82c8252..288b838a904a 100644
--- a/drivers/gpu/drm/shmobile/Kconfig
+++ b/drivers/gpu/drm/shmobile/Kconfig
@@ -5,7 +5,6 @@ config DRM_SHMOBILE
depends on ARCH_SHMOBILE || COMPILE_TEST
select BACKLIGHT_CLASS_DEVICE
select DRM_KMS_HELPER
- select DRM_KMS_CMA_HELPER
select DRM_GEM_CMA_HELPER
help
Choose this option if you have an SH Mobile chipset.
diff --git a/drivers/gpu/drm/sprd/Kconfig b/drivers/gpu/drm/sprd/Kconfig
new file mode 100644
index 000000000000..3edeaeca0e65
--- /dev/null
+++ b/drivers/gpu/drm/sprd/Kconfig
@@ -0,0 +1,13 @@
+config DRM_SPRD
+ tristate "DRM Support for Unisoc SoCs Platform"
+ depends on ARCH_SPRD || COMPILE_TEST
+ depends on DRM && OF
+ select DRM_GEM_CMA_HELPER
+ select DRM_KMS_CMA_HELPER
+ select DRM_KMS_HELPER
+ select DRM_MIPI_DSI
+ select VIDEOMODE_HELPERS
+ help
+ Choose this option if you have a Unisoc chipset.
+ If M is selected the module will be called sprd_drm.
+
diff --git a/drivers/gpu/drm/sprd/Makefile b/drivers/gpu/drm/sprd/Makefile
new file mode 100644
index 000000000000..e82e6a6f89de
--- /dev/null
+++ b/drivers/gpu/drm/sprd/Makefile
@@ -0,0 +1,8 @@
+# SPDX-License-Identifier: GPL-2.0
+
+sprd-drm-y := sprd_drm.o \
+ sprd_dpu.o \
+ sprd_dsi.o \
+ megacores_pll.o
+
+obj-$(CONFIG_DRM_SPRD) += sprd-drm.o \ No newline at end of file
diff --git a/drivers/gpu/drm/sprd/megacores_pll.c b/drivers/gpu/drm/sprd/megacores_pll.c
new file mode 100644
index 000000000000..3091dfdc11e3
--- /dev/null
+++ b/drivers/gpu/drm/sprd/megacores_pll.c
@@ -0,0 +1,305 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2020 Unisoc Inc.
+ */
+
+#include <asm/div64.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/regmap.h>
+#include <linux/string.h>
+
+#include "sprd_dsi.h"
+
+#define L 0
+#define H 1
+#define CLK 0
+#define DATA 1
+#define INFINITY 0xffffffff
+#define MIN_OUTPUT_FREQ (100)
+
+#define AVERAGE(a, b) (min(a, b) + abs((b) - (a)) / 2)
+
+/* sharkle */
+#define VCO_BAND_LOW 750
+#define VCO_BAND_MID 1100
+#define VCO_BAND_HIGH 1500
+#define PHY_REF_CLK 26000
+
+static int dphy_calc_pll_param(struct dphy_pll *pll)
+{
+ const u32 khz = 1000;
+ const u32 mhz = 1000000;
+ const unsigned long long factor = 100;
+ unsigned long long tmp;
+ int i;
+
+ pll->potential_fvco = pll->freq / khz;
+ pll->ref_clk = PHY_REF_CLK / khz;
+
+ for (i = 0; i < 4; ++i) {
+ if (pll->potential_fvco >= VCO_BAND_LOW &&
+ pll->potential_fvco <= VCO_BAND_HIGH) {
+ pll->fvco = pll->potential_fvco;
+ pll->out_sel = BIT(i);
+ break;
+ }
+ pll->potential_fvco <<= 1;
+ }
+ if (pll->fvco == 0)
+ return -EINVAL;
+
+ if (pll->fvco >= VCO_BAND_LOW && pll->fvco <= VCO_BAND_MID) {
+ /* vco band control */
+ pll->vco_band = 0x0;
+ /* low pass filter control */
+ pll->lpf_sel = 1;
+ } else if (pll->fvco > VCO_BAND_MID && pll->fvco <= VCO_BAND_HIGH) {
+ pll->vco_band = 0x1;
+ pll->lpf_sel = 0;
+ } else {
+ return -EINVAL;
+ }
+
+ pll->nint = pll->fvco / pll->ref_clk;
+ tmp = pll->fvco * factor * mhz;
+ do_div(tmp, pll->ref_clk);
+ tmp = tmp - pll->nint * factor * mhz;
+ tmp *= BIT(20);
+ do_div(tmp, 100000000);
+ pll->kint = (u32)tmp;
+ pll->refin = 3; /* pre-divider bypass */
+ pll->sdm_en = true; /* use fraction N PLL */
+ pll->fdk_s = 0x1; /* fraction */
+ pll->cp_s = 0x0;
+ pll->det_delay = 0x1;
+
+ return 0;
+}
+
+static void dphy_set_pll_reg(struct dphy_pll *pll, struct regmap *regmap)
+{
+ u8 reg_val[9] = {0};
+ int i;
+
+ u8 reg_addr[] = {
+ 0x03, 0x04, 0x06, 0x08, 0x09,
+ 0x0a, 0x0b, 0x0e, 0x0f
+ };
+
+ reg_val[0] = 1 | (1 << 1) | (pll->lpf_sel << 2);
+ reg_val[1] = pll->div | (1 << 3) | (pll->cp_s << 5) | (pll->fdk_s << 7);
+ reg_val[2] = pll->nint;
+ reg_val[3] = pll->vco_band | (pll->sdm_en << 1) | (pll->refin << 2);
+ reg_val[4] = pll->kint >> 12;
+ reg_val[5] = pll->kint >> 4;
+ reg_val[6] = pll->out_sel | ((pll->kint << 4) & 0xf);
+ reg_val[7] = 1 << 4;
+ reg_val[8] = pll->det_delay;
+
+ for (i = 0; i < sizeof(reg_addr); ++i) {
+ regmap_write(regmap, reg_addr[i], reg_val[i]);
+ DRM_DEBUG("%02x: %02x\n", reg_addr[i], reg_val[i]);
+ }
+}
+
+int dphy_pll_config(struct dsi_context *ctx)
+{
+ struct sprd_dsi *dsi = container_of(ctx, struct sprd_dsi, ctx);
+ struct regmap *regmap = ctx->regmap;
+ struct dphy_pll *pll = &ctx->pll;
+ int ret;
+
+ pll->freq = dsi->slave->hs_rate;
+
+ /* FREQ = 26M * (NINT + KINT / 2^20) / out_sel */
+ ret = dphy_calc_pll_param(pll);
+ if (ret) {
+ drm_err(dsi->drm, "failed to calculate dphy pll parameters\n");
+ return ret;
+ }
+ dphy_set_pll_reg(pll, regmap);
+
+ return 0;
+}
+
+static void dphy_set_timing_reg(struct regmap *regmap, int type, u8 val[])
+{
+ switch (type) {
+ case REQUEST_TIME:
+ regmap_write(regmap, 0x31, val[CLK]);
+ regmap_write(regmap, 0x41, val[DATA]);
+ regmap_write(regmap, 0x51, val[DATA]);
+ regmap_write(regmap, 0x61, val[DATA]);
+ regmap_write(regmap, 0x71, val[DATA]);
+
+ regmap_write(regmap, 0x90, val[CLK]);
+ regmap_write(regmap, 0xa0, val[DATA]);
+ regmap_write(regmap, 0xb0, val[DATA]);
+ regmap_write(regmap, 0xc0, val[DATA]);
+ regmap_write(regmap, 0xd0, val[DATA]);
+ break;
+ case PREPARE_TIME:
+ regmap_write(regmap, 0x32, val[CLK]);
+ regmap_write(regmap, 0x42, val[DATA]);
+ regmap_write(regmap, 0x52, val[DATA]);
+ regmap_write(regmap, 0x62, val[DATA]);
+ regmap_write(regmap, 0x72, val[DATA]);
+
+ regmap_write(regmap, 0x91, val[CLK]);
+ regmap_write(regmap, 0xa1, val[DATA]);
+ regmap_write(regmap, 0xb1, val[DATA]);
+ regmap_write(regmap, 0xc1, val[DATA]);
+ regmap_write(regmap, 0xd1, val[DATA]);
+ break;
+ case ZERO_TIME:
+ regmap_write(regmap, 0x33, val[CLK]);
+ regmap_write(regmap, 0x43, val[DATA]);
+ regmap_write(regmap, 0x53, val[DATA]);
+ regmap_write(regmap, 0x63, val[DATA]);
+ regmap_write(regmap, 0x73, val[DATA]);
+
+ regmap_write(regmap, 0x92, val[CLK]);
+ regmap_write(regmap, 0xa2, val[DATA]);
+ regmap_write(regmap, 0xb2, val[DATA]);
+ regmap_write(regmap, 0xc2, val[DATA]);
+ regmap_write(regmap, 0xd2, val[DATA]);
+ break;
+ case TRAIL_TIME:
+ regmap_write(regmap, 0x34, val[CLK]);
+ regmap_write(regmap, 0x44, val[DATA]);
+ regmap_write(regmap, 0x54, val[DATA]);
+ regmap_write(regmap, 0x64, val[DATA]);
+ regmap_write(regmap, 0x74, val[DATA]);
+
+ regmap_write(regmap, 0x93, val[CLK]);
+ regmap_write(regmap, 0xa3, val[DATA]);
+ regmap_write(regmap, 0xb3, val[DATA]);
+ regmap_write(regmap, 0xc3, val[DATA]);
+ regmap_write(regmap, 0xd3, val[DATA]);
+ break;
+ case EXIT_TIME:
+ regmap_write(regmap, 0x36, val[CLK]);
+ regmap_write(regmap, 0x46, val[DATA]);
+ regmap_write(regmap, 0x56, val[DATA]);
+ regmap_write(regmap, 0x66, val[DATA]);
+ regmap_write(regmap, 0x76, val[DATA]);
+
+ regmap_write(regmap, 0x95, val[CLK]);
+ regmap_write(regmap, 0xA5, val[DATA]);
+ regmap_write(regmap, 0xB5, val[DATA]);
+ regmap_write(regmap, 0xc5, val[DATA]);
+ regmap_write(regmap, 0xd5, val[DATA]);
+ break;
+ case CLKPOST_TIME:
+ regmap_write(regmap, 0x35, val[CLK]);
+ regmap_write(regmap, 0x94, val[CLK]);
+ break;
+
+ /* the following just use default value */
+ case SETTLE_TIME:
+ fallthrough;
+ case TA_GET:
+ fallthrough;
+ case TA_GO:
+ fallthrough;
+ case TA_SURE:
+ fallthrough;
+ default:
+ break;
+ }
+}
+
+void dphy_timing_config(struct dsi_context *ctx)
+{
+ struct regmap *regmap = ctx->regmap;
+ struct dphy_pll *pll = &ctx->pll;
+ const u32 factor = 2;
+ const u32 scale = 100;
+ u32 t_ui, t_byteck, t_half_byteck;
+ u32 range[2], constant;
+ u8 val[2];
+ u32 tmp = 0;
+
+ /* t_ui: 1 ui, byteck: 8 ui, half byteck: 4 ui */
+ t_ui = 1000 * scale / (pll->freq / 1000);
+ t_byteck = t_ui << 3;
+ t_half_byteck = t_ui << 2;
+ constant = t_ui << 1;
+
+ /* REQUEST_TIME: HS T-LPX: LP-01
+ * For T-LPX, mipi spec defined min value is 50ns,
+ * but maybe it shouldn't be too small, because BTA,
+ * LP-10, LP-00, LP-01, all of this is related to T-LPX.
+ */
+ range[L] = 50 * scale;
+ range[H] = INFINITY;
+ val[CLK] = DIV_ROUND_UP(range[L] * (factor << 1), t_byteck) - 2;
+ val[DATA] = val[CLK];
+ dphy_set_timing_reg(regmap, REQUEST_TIME, val);
+
+ /* PREPARE_TIME: HS sequence: LP-00 */
+ range[L] = 38 * scale;
+ range[H] = 95 * scale;
+ tmp = AVERAGE(range[L], range[H]);
+ val[CLK] = DIV_ROUND_UP(AVERAGE(range[L], range[H]), t_half_byteck) - 1;
+ range[L] = 40 * scale + 4 * t_ui;
+ range[H] = 85 * scale + 6 * t_ui;
+ tmp |= AVERAGE(range[L], range[H]) << 16;
+ val[DATA] = DIV_ROUND_UP(AVERAGE(range[L], range[H]), t_half_byteck) - 1;
+ dphy_set_timing_reg(regmap, PREPARE_TIME, val);
+
+ /* ZERO_TIME: HS-ZERO */
+ range[L] = 300 * scale;
+ range[H] = INFINITY;
+ val[CLK] = DIV_ROUND_UP(range[L] * factor + (tmp & 0xffff)
+ - 525 * t_byteck / 100, t_byteck) - 2;
+ range[L] = 145 * scale + 10 * t_ui;
+ val[DATA] = DIV_ROUND_UP(range[L] * factor
+ + ((tmp >> 16) & 0xffff) - 525 * t_byteck / 100,
+ t_byteck) - 2;
+ dphy_set_timing_reg(regmap, ZERO_TIME, val);
+
+ /* TRAIL_TIME: HS-TRAIL */
+ range[L] = 60 * scale;
+ range[H] = INFINITY;
+ val[CLK] = DIV_ROUND_UP(range[L] * factor - constant, t_half_byteck);
+ range[L] = max(8 * t_ui, 60 * scale + 4 * t_ui);
+ val[DATA] = DIV_ROUND_UP(range[L] * 3 / 2 - constant, t_half_byteck) - 2;
+ dphy_set_timing_reg(regmap, TRAIL_TIME, val);
+
+ /* EXIT_TIME: */
+ range[L] = 100 * scale;
+ range[H] = INFINITY;
+ val[CLK] = DIV_ROUND_UP(range[L] * factor, t_byteck) - 2;
+ val[DATA] = val[CLK];
+ dphy_set_timing_reg(regmap, EXIT_TIME, val);
+
+ /* CLKPOST_TIME: */
+ range[L] = 60 * scale + 52 * t_ui;
+ range[H] = INFINITY;
+ val[CLK] = DIV_ROUND_UP(range[L] * factor, t_byteck) - 2;
+ val[DATA] = val[CLK];
+ dphy_set_timing_reg(regmap, CLKPOST_TIME, val);
+
+ /* SETTLE_TIME:
+ * This time is used for receiver. So for transmitter,
+ * it can be ignored.
+ */
+
+ /* TA_GO:
+ * transmitter drives bridge state(LP-00) before releasing control,
+ * reg 0x1f default value: 0x04, which is good.
+ */
+
+ /* TA_SURE:
+ * After LP-10 state and before bridge state(LP-00),
+ * reg 0x20 default value: 0x01, which is good.
+ */
+
+ /* TA_GET:
+ * receiver drives Bridge state(LP-00) before releasing control
+ * reg 0x21 default value: 0x03, which is good.
+ */
+}
diff --git a/drivers/gpu/drm/sprd/sprd_dpu.c b/drivers/gpu/drm/sprd/sprd_dpu.c
new file mode 100644
index 000000000000..06a3414ee43a
--- /dev/null
+++ b/drivers/gpu/drm/sprd/sprd_dpu.c
@@ -0,0 +1,880 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2020 Unisoc Inc.
+ */
+
+#include <linux/component.h>
+#include <linux/delay.h>
+#include <linux/dma-buf.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/of_graph.h>
+#include <linux/of_irq.h>
+#include <linux/wait.h>
+#include <linux/workqueue.h>
+
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_gem_framebuffer_helper.h>
+#include <drm/drm_plane_helper.h>
+
+#include "sprd_drm.h"
+#include "sprd_dpu.h"
+#include "sprd_dsi.h"
+
+/* Global control registers */
+#define REG_DPU_CTRL 0x04
+#define REG_DPU_CFG0 0x08
+#define REG_PANEL_SIZE 0x20
+#define REG_BLEND_SIZE 0x24
+#define REG_BG_COLOR 0x2C
+
+/* Layer0 control registers */
+#define REG_LAY_BASE_ADDR0 0x30
+#define REG_LAY_BASE_ADDR1 0x34
+#define REG_LAY_BASE_ADDR2 0x38
+#define REG_LAY_CTRL 0x40
+#define REG_LAY_SIZE 0x44
+#define REG_LAY_PITCH 0x48
+#define REG_LAY_POS 0x4C
+#define REG_LAY_ALPHA 0x50
+#define REG_LAY_CROP_START 0x5C
+
+/* Interrupt control registers */
+#define REG_DPU_INT_EN 0x1E0
+#define REG_DPU_INT_CLR 0x1E4
+#define REG_DPU_INT_STS 0x1E8
+
+/* DPI control registers */
+#define REG_DPI_CTRL 0x1F0
+#define REG_DPI_H_TIMING 0x1F4
+#define REG_DPI_V_TIMING 0x1F8
+
+/* MMU control registers */
+#define REG_MMU_EN 0x800
+#define REG_MMU_VPN_RANGE 0x80C
+#define REG_MMU_PPN1 0x83C
+#define REG_MMU_RANGE1 0x840
+#define REG_MMU_PPN2 0x844
+#define REG_MMU_RANGE2 0x848
+
+/* Global control bits */
+#define BIT_DPU_RUN BIT(0)
+#define BIT_DPU_STOP BIT(1)
+#define BIT_DPU_REG_UPDATE BIT(2)
+#define BIT_DPU_IF_EDPI BIT(0)
+
+/* Layer control bits */
+#define BIT_DPU_LAY_EN BIT(0)
+#define BIT_DPU_LAY_LAYER_ALPHA (0x01 << 2)
+#define BIT_DPU_LAY_COMBO_ALPHA (0x02 << 2)
+#define BIT_DPU_LAY_FORMAT_YUV422_2PLANE (0x00 << 4)
+#define BIT_DPU_LAY_FORMAT_YUV420_2PLANE (0x01 << 4)
+#define BIT_DPU_LAY_FORMAT_YUV420_3PLANE (0x02 << 4)
+#define BIT_DPU_LAY_FORMAT_ARGB8888 (0x03 << 4)
+#define BIT_DPU_LAY_FORMAT_RGB565 (0x04 << 4)
+#define BIT_DPU_LAY_DATA_ENDIAN_B0B1B2B3 (0x00 << 8)
+#define BIT_DPU_LAY_DATA_ENDIAN_B3B2B1B0 (0x01 << 8)
+#define BIT_DPU_LAY_NO_SWITCH (0x00 << 10)
+#define BIT_DPU_LAY_RB_OR_UV_SWITCH (0x01 << 10)
+#define BIT_DPU_LAY_MODE_BLEND_NORMAL (0x00 << 16)
+#define BIT_DPU_LAY_MODE_BLEND_PREMULT (0x01 << 16)
+#define BIT_DPU_LAY_ROTATION_0 (0x00 << 20)
+#define BIT_DPU_LAY_ROTATION_90 (0x01 << 20)
+#define BIT_DPU_LAY_ROTATION_180 (0x02 << 20)
+#define BIT_DPU_LAY_ROTATION_270 (0x03 << 20)
+#define BIT_DPU_LAY_ROTATION_0_M (0x04 << 20)
+#define BIT_DPU_LAY_ROTATION_90_M (0x05 << 20)
+#define BIT_DPU_LAY_ROTATION_180_M (0x06 << 20)
+#define BIT_DPU_LAY_ROTATION_270_M (0x07 << 20)
+
+/* Interrupt control & status bits */
+#define BIT_DPU_INT_DONE BIT(0)
+#define BIT_DPU_INT_TE BIT(1)
+#define BIT_DPU_INT_ERR BIT(2)
+#define BIT_DPU_INT_UPDATE_DONE BIT(4)
+#define BIT_DPU_INT_VSYNC BIT(5)
+
+/* DPI control bits */
+#define BIT_DPU_EDPI_TE_EN BIT(8)
+#define BIT_DPU_EDPI_FROM_EXTERNAL_PAD BIT(10)
+#define BIT_DPU_DPI_HALT_EN BIT(16)
+
+static const u32 layer_fmts[] = {
+ DRM_FORMAT_XRGB8888,
+ DRM_FORMAT_XBGR8888,
+ DRM_FORMAT_ARGB8888,
+ DRM_FORMAT_ABGR8888,
+ DRM_FORMAT_RGBA8888,
+ DRM_FORMAT_BGRA8888,
+ DRM_FORMAT_RGBX8888,
+ DRM_FORMAT_RGB565,
+ DRM_FORMAT_BGR565,
+ DRM_FORMAT_NV12,
+ DRM_FORMAT_NV21,
+ DRM_FORMAT_NV16,
+ DRM_FORMAT_NV61,
+ DRM_FORMAT_YUV420,
+ DRM_FORMAT_YVU420,
+};
+
+struct sprd_plane {
+ struct drm_plane base;
+};
+
+static int dpu_wait_stop_done(struct sprd_dpu *dpu)
+{
+ struct dpu_context *ctx = &dpu->ctx;
+ int rc;
+
+ if (ctx->stopped)
+ return 0;
+
+ rc = wait_event_interruptible_timeout(ctx->wait_queue, ctx->evt_stop,
+ msecs_to_jiffies(500));
+ ctx->evt_stop = false;
+
+ ctx->stopped = true;
+
+ if (!rc) {
+ drm_err(dpu->drm, "dpu wait for stop done time out!\n");
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+static int dpu_wait_update_done(struct sprd_dpu *dpu)
+{
+ struct dpu_context *ctx = &dpu->ctx;
+ int rc;
+
+ ctx->evt_update = false;
+
+ rc = wait_event_interruptible_timeout(ctx->wait_queue, ctx->evt_update,
+ msecs_to_jiffies(500));
+
+ if (!rc) {
+ drm_err(dpu->drm, "dpu wait for reg update done time out!\n");
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+static u32 drm_format_to_dpu(struct drm_framebuffer *fb)
+{
+ u32 format = 0;
+
+ switch (fb->format->format) {
+ case DRM_FORMAT_BGRA8888:
+ /* BGRA8888 -> ARGB8888 */
+ format |= BIT_DPU_LAY_DATA_ENDIAN_B3B2B1B0;
+ format |= BIT_DPU_LAY_FORMAT_ARGB8888;
+ break;
+ case DRM_FORMAT_RGBX8888:
+ case DRM_FORMAT_RGBA8888:
+ /* RGBA8888 -> ABGR8888 */
+ format |= BIT_DPU_LAY_DATA_ENDIAN_B3B2B1B0;
+ fallthrough;
+ case DRM_FORMAT_ABGR8888:
+ /* RB switch */
+ format |= BIT_DPU_LAY_RB_OR_UV_SWITCH;
+ fallthrough;
+ case DRM_FORMAT_ARGB8888:
+ format |= BIT_DPU_LAY_FORMAT_ARGB8888;
+ break;
+ case DRM_FORMAT_XBGR8888:
+ /* RB switch */
+ format |= BIT_DPU_LAY_RB_OR_UV_SWITCH;
+ fallthrough;
+ case DRM_FORMAT_XRGB8888:
+ format |= BIT_DPU_LAY_FORMAT_ARGB8888;
+ break;
+ case DRM_FORMAT_BGR565:
+ /* RB switch */
+ format |= BIT_DPU_LAY_RB_OR_UV_SWITCH;
+ fallthrough;
+ case DRM_FORMAT_RGB565:
+ format |= BIT_DPU_LAY_FORMAT_RGB565;
+ break;
+ case DRM_FORMAT_NV12:
+ /* 2-Lane: Yuv420 */
+ format |= BIT_DPU_LAY_FORMAT_YUV420_2PLANE;
+ /* Y endian */
+ format |= BIT_DPU_LAY_DATA_ENDIAN_B0B1B2B3;
+ /* UV endian */
+ format |= BIT_DPU_LAY_NO_SWITCH;
+ break;
+ case DRM_FORMAT_NV21:
+ /* 2-Lane: Yuv420 */
+ format |= BIT_DPU_LAY_FORMAT_YUV420_2PLANE;
+ /* Y endian */
+ format |= BIT_DPU_LAY_DATA_ENDIAN_B0B1B2B3;
+ /* UV endian */
+ format |= BIT_DPU_LAY_RB_OR_UV_SWITCH;
+ break;
+ case DRM_FORMAT_NV16:
+ /* 2-Lane: Yuv422 */
+ format |= BIT_DPU_LAY_FORMAT_YUV422_2PLANE;
+ /* Y endian */
+ format |= BIT_DPU_LAY_DATA_ENDIAN_B3B2B1B0;
+ /* UV endian */
+ format |= BIT_DPU_LAY_RB_OR_UV_SWITCH;
+ break;
+ case DRM_FORMAT_NV61:
+ /* 2-Lane: Yuv422 */
+ format |= BIT_DPU_LAY_FORMAT_YUV422_2PLANE;
+ /* Y endian */
+ format |= BIT_DPU_LAY_DATA_ENDIAN_B0B1B2B3;
+ /* UV endian */
+ format |= BIT_DPU_LAY_NO_SWITCH;
+ break;
+ case DRM_FORMAT_YUV420:
+ format |= BIT_DPU_LAY_FORMAT_YUV420_3PLANE;
+ /* Y endian */
+ format |= BIT_DPU_LAY_DATA_ENDIAN_B0B1B2B3;
+ /* UV endian */
+ format |= BIT_DPU_LAY_NO_SWITCH;
+ break;
+ case DRM_FORMAT_YVU420:
+ format |= BIT_DPU_LAY_FORMAT_YUV420_3PLANE;
+ /* Y endian */
+ format |= BIT_DPU_LAY_DATA_ENDIAN_B0B1B2B3;
+ /* UV endian */
+ format |= BIT_DPU_LAY_RB_OR_UV_SWITCH;
+ break;
+ default:
+ break;
+ }
+
+ return format;
+}
+
+static u32 drm_rotation_to_dpu(struct drm_plane_state *state)
+{
+ u32 rotation = 0;
+
+ switch (state->rotation) {
+ default:
+ case DRM_MODE_ROTATE_0:
+ rotation = BIT_DPU_LAY_ROTATION_0;
+ break;
+ case DRM_MODE_ROTATE_90:
+ rotation = BIT_DPU_LAY_ROTATION_90;
+ break;
+ case DRM_MODE_ROTATE_180:
+ rotation = BIT_DPU_LAY_ROTATION_180;
+ break;
+ case DRM_MODE_ROTATE_270:
+ rotation = BIT_DPU_LAY_ROTATION_270;
+ break;
+ case DRM_MODE_REFLECT_Y:
+ rotation = BIT_DPU_LAY_ROTATION_180_M;
+ break;
+ case (DRM_MODE_REFLECT_Y | DRM_MODE_ROTATE_90):
+ rotation = BIT_DPU_LAY_ROTATION_90_M;
+ break;
+ case DRM_MODE_REFLECT_X:
+ rotation = BIT_DPU_LAY_ROTATION_0_M;
+ break;
+ case (DRM_MODE_REFLECT_X | DRM_MODE_ROTATE_90):
+ rotation = BIT_DPU_LAY_ROTATION_270_M;
+ break;
+ }
+
+ return rotation;
+}
+
+static u32 drm_blend_to_dpu(struct drm_plane_state *state)
+{
+ u32 blend = 0;
+
+ switch (state->pixel_blend_mode) {
+ case DRM_MODE_BLEND_COVERAGE:
+ /* alpha mode select - combo alpha */
+ blend |= BIT_DPU_LAY_COMBO_ALPHA;
+ /* Normal mode */
+ blend |= BIT_DPU_LAY_MODE_BLEND_NORMAL;
+ break;
+ case DRM_MODE_BLEND_PREMULTI:
+ /* alpha mode select - combo alpha */
+ blend |= BIT_DPU_LAY_COMBO_ALPHA;
+ /* Pre-mult mode */
+ blend |= BIT_DPU_LAY_MODE_BLEND_PREMULT;
+ break;
+ case DRM_MODE_BLEND_PIXEL_NONE:
+ default:
+ /* don't do blending, maybe RGBX */
+ /* alpha mode select - layer alpha */
+ blend |= BIT_DPU_LAY_LAYER_ALPHA;
+ break;
+ }
+
+ return blend;
+}
+
+static void sprd_dpu_layer(struct sprd_dpu *dpu, struct drm_plane_state *state)
+{
+ struct dpu_context *ctx = &dpu->ctx;
+ struct drm_gem_cma_object *cma_obj;
+ struct drm_framebuffer *fb = state->fb;
+ u32 addr, size, offset, pitch, blend, format, rotation;
+ u32 src_x = state->src_x >> 16;
+ u32 src_y = state->src_y >> 16;
+ u32 src_w = state->src_w >> 16;
+ u32 src_h = state->src_h >> 16;
+ u32 dst_x = state->crtc_x;
+ u32 dst_y = state->crtc_y;
+ u32 alpha = state->alpha;
+ u32 index = state->zpos;
+ int i;
+
+ offset = (dst_x & 0xffff) | (dst_y << 16);
+ size = (src_w & 0xffff) | (src_h << 16);
+
+ for (i = 0; i < fb->format->num_planes; i++) {
+ cma_obj = drm_fb_cma_get_gem_obj(fb, i);
+ addr = cma_obj->paddr + fb->offsets[i];
+
+ if (i == 0)
+ layer_reg_wr(ctx, REG_LAY_BASE_ADDR0, addr, index);
+ else if (i == 1)
+ layer_reg_wr(ctx, REG_LAY_BASE_ADDR1, addr, index);
+ else
+ layer_reg_wr(ctx, REG_LAY_BASE_ADDR2, addr, index);
+ }
+
+ if (fb->format->num_planes == 3) {
+ /* UV pitch is 1/2 of Y pitch */
+ pitch = (fb->pitches[0] / fb->format->cpp[0]) |
+ (fb->pitches[0] / fb->format->cpp[0] << 15);
+ } else {
+ pitch = fb->pitches[0] / fb->format->cpp[0];
+ }
+
+ layer_reg_wr(ctx, REG_LAY_POS, offset, index);
+ layer_reg_wr(ctx, REG_LAY_SIZE, size, index);
+ layer_reg_wr(ctx, REG_LAY_CROP_START,
+ src_y << 16 | src_x, index);
+ layer_reg_wr(ctx, REG_LAY_ALPHA, alpha, index);
+ layer_reg_wr(ctx, REG_LAY_PITCH, pitch, index);
+
+ format = drm_format_to_dpu(fb);
+ blend = drm_blend_to_dpu(state);
+ rotation = drm_rotation_to_dpu(state);
+
+ layer_reg_wr(ctx, REG_LAY_CTRL, BIT_DPU_LAY_EN |
+ format |
+ blend |
+ rotation,
+ index);
+}
+
+static void sprd_dpu_flip(struct sprd_dpu *dpu)
+{
+ struct dpu_context *ctx = &dpu->ctx;
+
+ /*
+ * Make sure the dpu is in stop status. DPU has no shadow
+ * registers in EDPI mode. So the config registers can only be
+ * updated in the rising edge of DPU_RUN bit.
+ */
+ if (ctx->if_type == SPRD_DPU_IF_EDPI)
+ dpu_wait_stop_done(dpu);
+
+ /* update trigger and wait */
+ if (ctx->if_type == SPRD_DPU_IF_DPI) {
+ if (!ctx->stopped) {
+ dpu_reg_set(ctx, REG_DPU_CTRL, BIT_DPU_REG_UPDATE);
+ dpu_wait_update_done(dpu);
+ }
+
+ dpu_reg_set(ctx, REG_DPU_INT_EN, BIT_DPU_INT_ERR);
+ } else if (ctx->if_type == SPRD_DPU_IF_EDPI) {
+ dpu_reg_set(ctx, REG_DPU_CTRL, BIT_DPU_RUN);
+
+ ctx->stopped = false;
+ }
+}
+
+static void sprd_dpu_init(struct sprd_dpu *dpu)
+{
+ struct dpu_context *ctx = &dpu->ctx;
+ u32 int_mask = 0;
+
+ writel(0x00, ctx->base + REG_BG_COLOR);
+ writel(0x00, ctx->base + REG_MMU_EN);
+ writel(0x00, ctx->base + REG_MMU_PPN1);
+ writel(0xffff, ctx->base + REG_MMU_RANGE1);
+ writel(0x00, ctx->base + REG_MMU_PPN2);
+ writel(0xffff, ctx->base + REG_MMU_RANGE2);
+ writel(0x1ffff, ctx->base + REG_MMU_VPN_RANGE);
+
+ if (ctx->if_type == SPRD_DPU_IF_DPI) {
+ /* use dpi as interface */
+ dpu_reg_clr(ctx, REG_DPU_CFG0, BIT_DPU_IF_EDPI);
+ /* disable Halt function for SPRD DSI */
+ dpu_reg_clr(ctx, REG_DPI_CTRL, BIT_DPU_DPI_HALT_EN);
+ /* select te from external pad */
+ dpu_reg_set(ctx, REG_DPI_CTRL, BIT_DPU_EDPI_FROM_EXTERNAL_PAD);
+
+ /* enable dpu update done INT */
+ int_mask |= BIT_DPU_INT_UPDATE_DONE;
+ /* enable dpu done INT */
+ int_mask |= BIT_DPU_INT_DONE;
+ /* enable dpu dpi vsync */
+ int_mask |= BIT_DPU_INT_VSYNC;
+ /* enable dpu TE INT */
+ int_mask |= BIT_DPU_INT_TE;
+ /* enable underflow err INT */
+ int_mask |= BIT_DPU_INT_ERR;
+ } else if (ctx->if_type == SPRD_DPU_IF_EDPI) {
+ /* use edpi as interface */
+ dpu_reg_set(ctx, REG_DPU_CFG0, BIT_DPU_IF_EDPI);
+ /* use external te */
+ dpu_reg_set(ctx, REG_DPI_CTRL, BIT_DPU_EDPI_FROM_EXTERNAL_PAD);
+ /* enable te */
+ dpu_reg_set(ctx, REG_DPI_CTRL, BIT_DPU_EDPI_TE_EN);
+
+ /* enable stop done INT */
+ int_mask |= BIT_DPU_INT_DONE;
+ /* enable TE INT */
+ int_mask |= BIT_DPU_INT_TE;
+ }
+
+ writel(int_mask, ctx->base + REG_DPU_INT_EN);
+}
+
+static void sprd_dpu_fini(struct sprd_dpu *dpu)
+{
+ struct dpu_context *ctx = &dpu->ctx;
+
+ writel(0x00, ctx->base + REG_DPU_INT_EN);
+ writel(0xff, ctx->base + REG_DPU_INT_CLR);
+}
+
+static void sprd_dpi_init(struct sprd_dpu *dpu)
+{
+ struct dpu_context *ctx = &dpu->ctx;
+ u32 reg_val;
+ u32 size;
+
+ size = (ctx->vm.vactive << 16) | ctx->vm.hactive;
+ writel(size, ctx->base + REG_PANEL_SIZE);
+ writel(size, ctx->base + REG_BLEND_SIZE);
+
+ if (ctx->if_type == SPRD_DPU_IF_DPI) {
+ /* set dpi timing */
+ reg_val = ctx->vm.hsync_len << 0 |
+ ctx->vm.hback_porch << 8 |
+ ctx->vm.hfront_porch << 20;
+ writel(reg_val, ctx->base + REG_DPI_H_TIMING);
+
+ reg_val = ctx->vm.vsync_len << 0 |
+ ctx->vm.vback_porch << 8 |
+ ctx->vm.vfront_porch << 20;
+ writel(reg_val, ctx->base + REG_DPI_V_TIMING);
+ }
+}
+
+void sprd_dpu_run(struct sprd_dpu *dpu)
+{
+ struct dpu_context *ctx = &dpu->ctx;
+
+ dpu_reg_set(ctx, REG_DPU_CTRL, BIT_DPU_RUN);
+
+ ctx->stopped = false;
+}
+
+void sprd_dpu_stop(struct sprd_dpu *dpu)
+{
+ struct dpu_context *ctx = &dpu->ctx;
+
+ if (ctx->if_type == SPRD_DPU_IF_DPI)
+ dpu_reg_set(ctx, REG_DPU_CTRL, BIT_DPU_STOP);
+
+ dpu_wait_stop_done(dpu);
+}
+
+static int sprd_plane_atomic_check(struct drm_plane *plane,
+ struct drm_atomic_state *state)
+{
+ struct drm_plane_state *plane_state = drm_atomic_get_new_plane_state(state,
+ plane);
+ struct drm_crtc_state *crtc_state;
+ u32 fmt;
+
+ if (!plane_state->fb || !plane_state->crtc)
+ return 0;
+
+ fmt = drm_format_to_dpu(plane_state->fb);
+ if (!fmt)
+ return -EINVAL;
+
+ crtc_state = drm_atomic_get_crtc_state(plane_state->state, plane_state->crtc);
+ if (IS_ERR(crtc_state))
+ return PTR_ERR(crtc_state);
+
+ return drm_atomic_helper_check_plane_state(plane_state, crtc_state,
+ DRM_PLANE_HELPER_NO_SCALING,
+ DRM_PLANE_HELPER_NO_SCALING,
+ true, true);
+}
+
+static void sprd_plane_atomic_update(struct drm_plane *drm_plane,
+ struct drm_atomic_state *state)
+{
+ struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
+ drm_plane);
+ struct sprd_dpu *dpu = to_sprd_crtc(new_state->crtc);
+
+ /* start configure dpu layers */
+ sprd_dpu_layer(dpu, new_state);
+}
+
+static void sprd_plane_atomic_disable(struct drm_plane *drm_plane,
+ struct drm_atomic_state *state)
+{
+ struct drm_plane_state *old_state = drm_atomic_get_old_plane_state(state,
+ drm_plane);
+ struct sprd_dpu *dpu = to_sprd_crtc(old_state->crtc);
+
+ layer_reg_wr(&dpu->ctx, REG_LAY_CTRL, 0x00, old_state->zpos);
+}
+
+static void sprd_plane_create_properties(struct sprd_plane *plane, int index)
+{
+ unsigned int supported_modes = BIT(DRM_MODE_BLEND_PIXEL_NONE) |
+ BIT(DRM_MODE_BLEND_PREMULTI) |
+ BIT(DRM_MODE_BLEND_COVERAGE);
+
+ /* create rotation property */
+ drm_plane_create_rotation_property(&plane->base,
+ DRM_MODE_ROTATE_0,
+ DRM_MODE_ROTATE_MASK |
+ DRM_MODE_REFLECT_MASK);
+
+ /* create alpha property */
+ drm_plane_create_alpha_property(&plane->base);
+
+ /* create blend mode property */
+ drm_plane_create_blend_mode_property(&plane->base, supported_modes);
+
+ /* create zpos property */
+ drm_plane_create_zpos_immutable_property(&plane->base, index);
+}
+
+static const struct drm_plane_helper_funcs sprd_plane_helper_funcs = {
+ .atomic_check = sprd_plane_atomic_check,
+ .atomic_update = sprd_plane_atomic_update,
+ .atomic_disable = sprd_plane_atomic_disable,
+};
+
+static const struct drm_plane_funcs sprd_plane_funcs = {
+ .update_plane = drm_atomic_helper_update_plane,
+ .disable_plane = drm_atomic_helper_disable_plane,
+ .destroy = drm_plane_cleanup,
+ .reset = drm_atomic_helper_plane_reset,
+ .atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_plane_destroy_state,
+};
+
+static struct sprd_plane *sprd_planes_init(struct drm_device *drm)
+{
+ struct sprd_plane *plane, *primary;
+ enum drm_plane_type plane_type;
+ int i;
+
+ for (i = 0; i < 6; i++) {
+ plane_type = (i == 0) ? DRM_PLANE_TYPE_PRIMARY :
+ DRM_PLANE_TYPE_OVERLAY;
+
+ plane = drmm_universal_plane_alloc(drm, struct sprd_plane, base,
+ 1, &sprd_plane_funcs,
+ layer_fmts, ARRAY_SIZE(layer_fmts),
+ NULL, plane_type, NULL);
+ if (IS_ERR(plane)) {
+ drm_err(drm, "failed to init drm plane: %d\n", i);
+ return plane;
+ }
+
+ drm_plane_helper_add(&plane->base, &sprd_plane_helper_funcs);
+
+ sprd_plane_create_properties(plane, i);
+
+ if (i == 0)
+ primary = plane;
+ }
+
+ return primary;
+}
+
+static void sprd_crtc_mode_set_nofb(struct drm_crtc *crtc)
+{
+ struct sprd_dpu *dpu = to_sprd_crtc(crtc);
+ struct drm_display_mode *mode = &crtc->state->adjusted_mode;
+ struct drm_encoder *encoder;
+ struct sprd_dsi *dsi;
+
+ drm_display_mode_to_videomode(mode, &dpu->ctx.vm);
+
+ drm_for_each_encoder_mask(encoder, crtc->dev,
+ crtc->state->encoder_mask) {
+ dsi = encoder_to_dsi(encoder);
+
+ if (dsi->slave->mode_flags & MIPI_DSI_MODE_VIDEO)
+ dpu->ctx.if_type = SPRD_DPU_IF_DPI;
+ else
+ dpu->ctx.if_type = SPRD_DPU_IF_EDPI;
+ }
+
+ sprd_dpi_init(dpu);
+}
+
+static void sprd_crtc_atomic_enable(struct drm_crtc *crtc,
+ struct drm_atomic_state *state)
+{
+ struct sprd_dpu *dpu = to_sprd_crtc(crtc);
+
+ sprd_dpu_init(dpu);
+
+ drm_crtc_vblank_on(&dpu->base);
+}
+
+static void sprd_crtc_atomic_disable(struct drm_crtc *crtc,
+ struct drm_atomic_state *state)
+{
+ struct sprd_dpu *dpu = to_sprd_crtc(crtc);
+ struct drm_device *drm = dpu->base.dev;
+
+ drm_crtc_vblank_off(&dpu->base);
+
+ sprd_dpu_fini(dpu);
+
+ spin_lock_irq(&drm->event_lock);
+ if (crtc->state->event) {
+ drm_crtc_send_vblank_event(crtc, crtc->state->event);
+ crtc->state->event = NULL;
+ }
+ spin_unlock_irq(&drm->event_lock);
+}
+
+static void sprd_crtc_atomic_flush(struct drm_crtc *crtc,
+ struct drm_atomic_state *state)
+
+{
+ struct sprd_dpu *dpu = to_sprd_crtc(crtc);
+ struct drm_device *drm = dpu->base.dev;
+
+ sprd_dpu_flip(dpu);
+
+ spin_lock_irq(&drm->event_lock);
+ if (crtc->state->event) {
+ drm_crtc_send_vblank_event(crtc, crtc->state->event);
+ crtc->state->event = NULL;
+ }
+ spin_unlock_irq(&drm->event_lock);
+}
+
+static int sprd_crtc_enable_vblank(struct drm_crtc *crtc)
+{
+ struct sprd_dpu *dpu = to_sprd_crtc(crtc);
+
+ dpu_reg_set(&dpu->ctx, REG_DPU_INT_EN, BIT_DPU_INT_VSYNC);
+
+ return 0;
+}
+
+static void sprd_crtc_disable_vblank(struct drm_crtc *crtc)
+{
+ struct sprd_dpu *dpu = to_sprd_crtc(crtc);
+
+ dpu_reg_clr(&dpu->ctx, REG_DPU_INT_EN, BIT_DPU_INT_VSYNC);
+}
+
+static const struct drm_crtc_helper_funcs sprd_crtc_helper_funcs = {
+ .mode_set_nofb = sprd_crtc_mode_set_nofb,
+ .atomic_flush = sprd_crtc_atomic_flush,
+ .atomic_enable = sprd_crtc_atomic_enable,
+ .atomic_disable = sprd_crtc_atomic_disable,
+};
+
+static const struct drm_crtc_funcs sprd_crtc_funcs = {
+ .destroy = drm_crtc_cleanup,
+ .set_config = drm_atomic_helper_set_config,
+ .page_flip = drm_atomic_helper_page_flip,
+ .reset = drm_atomic_helper_crtc_reset,
+ .atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
+ .enable_vblank = sprd_crtc_enable_vblank,
+ .disable_vblank = sprd_crtc_disable_vblank,
+};
+
+static struct sprd_dpu *sprd_crtc_init(struct drm_device *drm,
+ struct drm_plane *primary, struct device *dev)
+{
+ struct device_node *port;
+ struct sprd_dpu *dpu;
+
+ dpu = drmm_crtc_alloc_with_planes(drm, struct sprd_dpu, base,
+ primary, NULL,
+ &sprd_crtc_funcs, NULL);
+ if (IS_ERR(dpu)) {
+ drm_err(drm, "failed to init crtc\n");
+ return dpu;
+ }
+ drm_crtc_helper_add(&dpu->base, &sprd_crtc_helper_funcs);
+
+ /*
+ * set crtc port so that drm_of_find_possible_crtcs call works
+ */
+ port = of_graph_get_port_by_id(dev->of_node, 0);
+ if (!port) {
+ drm_err(drm, "failed to found crtc output port for %s\n",
+ dev->of_node->full_name);
+ return ERR_PTR(-EINVAL);
+ }
+ dpu->base.port = port;
+ of_node_put(port);
+
+ return dpu;
+}
+
+static irqreturn_t sprd_dpu_isr(int irq, void *data)
+{
+ struct sprd_dpu *dpu = data;
+ struct dpu_context *ctx = &dpu->ctx;
+ u32 reg_val, int_mask = 0;
+
+ reg_val = readl(ctx->base + REG_DPU_INT_STS);
+
+ /* disable err interrupt */
+ if (reg_val & BIT_DPU_INT_ERR) {
+ int_mask |= BIT_DPU_INT_ERR;
+ drm_warn(dpu->drm, "Warning: dpu underflow!\n");
+ }
+
+ /* dpu update done isr */
+ if (reg_val & BIT_DPU_INT_UPDATE_DONE) {
+ ctx->evt_update = true;
+ wake_up_interruptible_all(&ctx->wait_queue);
+ }
+
+ /* dpu stop done isr */
+ if (reg_val & BIT_DPU_INT_DONE) {
+ ctx->evt_stop = true;
+ wake_up_interruptible_all(&ctx->wait_queue);
+ }
+
+ if (reg_val & BIT_DPU_INT_VSYNC)
+ drm_crtc_handle_vblank(&dpu->base);
+
+ writel(reg_val, ctx->base + REG_DPU_INT_CLR);
+ dpu_reg_clr(ctx, REG_DPU_INT_EN, int_mask);
+
+ return IRQ_HANDLED;
+}
+
+static int sprd_dpu_context_init(struct sprd_dpu *dpu,
+ struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct dpu_context *ctx = &dpu->ctx;
+ struct resource *res;
+ int ret;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ ctx->base = devm_ioremap(dev, res->start, resource_size(res));
+ if (!ctx->base) {
+ dev_err(dev, "failed to map dpu registers\n");
+ return -EFAULT;
+ }
+
+ ctx->irq = platform_get_irq(pdev, 0);
+ if (ctx->irq < 0) {
+ dev_err(dev, "failed to get dpu irq\n");
+ return ctx->irq;
+ }
+
+ /* disable and clear interrupts before register dpu IRQ. */
+ writel(0x00, ctx->base + REG_DPU_INT_EN);
+ writel(0xff, ctx->base + REG_DPU_INT_CLR);
+
+ ret = devm_request_irq(dev, ctx->irq, sprd_dpu_isr,
+ IRQF_TRIGGER_NONE, "DPU", dpu);
+ if (ret) {
+ dev_err(dev, "failed to register dpu irq handler\n");
+ return ret;
+ }
+
+ init_waitqueue_head(&ctx->wait_queue);
+
+ return 0;
+}
+
+static int sprd_dpu_bind(struct device *dev, struct device *master, void *data)
+{
+ struct drm_device *drm = data;
+ struct sprd_dpu *dpu;
+ struct sprd_plane *plane;
+ int ret;
+
+ plane = sprd_planes_init(drm);
+ if (IS_ERR(plane))
+ return PTR_ERR(plane);
+
+ dpu = sprd_crtc_init(drm, &plane->base, dev);
+ if (IS_ERR(dpu))
+ return PTR_ERR(dpu);
+
+ dpu->drm = drm;
+ dev_set_drvdata(dev, dpu);
+
+ ret = sprd_dpu_context_init(dpu, dev);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static const struct component_ops dpu_component_ops = {
+ .bind = sprd_dpu_bind,
+};
+
+static const struct of_device_id dpu_match_table[] = {
+ { .compatible = "sprd,sharkl3-dpu" },
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, dpu_match_table);
+
+static int sprd_dpu_probe(struct platform_device *pdev)
+{
+ return component_add(&pdev->dev, &dpu_component_ops);
+}
+
+static int sprd_dpu_remove(struct platform_device *pdev)
+{
+ component_del(&pdev->dev, &dpu_component_ops);
+
+ return 0;
+}
+
+struct platform_driver sprd_dpu_driver = {
+ .probe = sprd_dpu_probe,
+ .remove = sprd_dpu_remove,
+ .driver = {
+ .name = "sprd-dpu-drv",
+ .of_match_table = dpu_match_table,
+ },
+};
+
+MODULE_AUTHOR("Leon He <leon.he@unisoc.com>");
+MODULE_AUTHOR("Kevin Tang <kevin.tang@unisoc.com>");
+MODULE_DESCRIPTION("Unisoc Display Controller Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/sprd/sprd_dpu.h b/drivers/gpu/drm/sprd/sprd_dpu.h
new file mode 100644
index 000000000000..157a78f24dc1
--- /dev/null
+++ b/drivers/gpu/drm/sprd/sprd_dpu.h
@@ -0,0 +1,109 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2020 Unisoc Inc.
+ */
+
+#ifndef __SPRD_DPU_H__
+#define __SPRD_DPU_H__
+
+#include <linux/bug.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+#include <linux/string.h>
+#include <video/videomode.h>
+
+#include <drm/drm_crtc.h>
+#include <drm/drm_fourcc.h>
+#include <drm/drm_print.h>
+#include <drm/drm_vblank.h>
+#include <uapi/drm/drm_mode.h>
+
+/* DPU Layer registers offset */
+#define DPU_LAY_REG_OFFSET 0x30
+
+enum {
+ SPRD_DPU_IF_DPI,
+ SPRD_DPU_IF_EDPI,
+ SPRD_DPU_IF_LIMIT
+};
+
+/**
+ * Sprd DPU context structure
+ *
+ * @base: DPU controller base address
+ * @irq: IRQ number to install the handler for
+ * @if_type: The type of DPI interface, default is DPI mode.
+ * @vm: videomode structure to use for DPU and DPI initialization
+ * @stopped: indicates whether DPU are stopped
+ * @wait_queue: wait queue, used to wait for DPU shadow register update done and
+ * DPU stop register done interrupt signal.
+ * @evt_update: wait queue condition for DPU shadow register
+ * @evt_stop: wait queue condition for DPU stop register
+ */
+struct dpu_context {
+ void __iomem *base;
+ int irq;
+ u8 if_type;
+ struct videomode vm;
+ bool stopped;
+ wait_queue_head_t wait_queue;
+ bool evt_update;
+ bool evt_stop;
+};
+
+/**
+ * Sprd DPU device structure
+ *
+ * @crtc: crtc object
+ * @drm: A point to drm device
+ * @ctx: DPU's implementation specific context object
+ */
+struct sprd_dpu {
+ struct drm_crtc base;
+ struct drm_device *drm;
+ struct dpu_context ctx;
+};
+
+static inline struct sprd_dpu *to_sprd_crtc(struct drm_crtc *crtc)
+{
+ return container_of(crtc, struct sprd_dpu, base);
+}
+
+static inline void
+dpu_reg_set(struct dpu_context *ctx, u32 offset, u32 set_bits)
+{
+ u32 bits = readl_relaxed(ctx->base + offset);
+
+ writel(bits | set_bits, ctx->base + offset);
+}
+
+static inline void
+dpu_reg_clr(struct dpu_context *ctx, u32 offset, u32 clr_bits)
+{
+ u32 bits = readl_relaxed(ctx->base + offset);
+
+ writel(bits & ~clr_bits, ctx->base + offset);
+}
+
+static inline u32
+layer_reg_rd(struct dpu_context *ctx, u32 offset, int index)
+{
+ u32 layer_offset = offset + index * DPU_LAY_REG_OFFSET;
+
+ return readl(ctx->base + layer_offset);
+}
+
+static inline void
+layer_reg_wr(struct dpu_context *ctx, u32 offset, u32 cfg_bits, int index)
+{
+ u32 layer_offset = offset + index * DPU_LAY_REG_OFFSET;
+
+ writel(cfg_bits, ctx->base + layer_offset);
+}
+
+void sprd_dpu_run(struct sprd_dpu *dpu);
+void sprd_dpu_stop(struct sprd_dpu *dpu);
+
+#endif
diff --git a/drivers/gpu/drm/sprd/sprd_drm.c b/drivers/gpu/drm/sprd/sprd_drm.c
new file mode 100644
index 000000000000..a077e2d4d721
--- /dev/null
+++ b/drivers/gpu/drm/sprd/sprd_drm.c
@@ -0,0 +1,205 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2020 Unisoc Inc.
+ */
+
+#include <linux/component.h>
+#include <linux/dma-mapping.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/of_graph.h>
+#include <linux/of_platform.h>
+
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_drv.h>
+#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_gem_framebuffer_helper.h>
+#include <drm/drm_of.h>
+#include <drm/drm_probe_helper.h>
+#include <drm/drm_vblank.h>
+
+#include "sprd_drm.h"
+
+#define DRIVER_NAME "sprd"
+#define DRIVER_DESC "Spreadtrum SoCs' DRM Driver"
+#define DRIVER_DATE "20200201"
+#define DRIVER_MAJOR 1
+#define DRIVER_MINOR 0
+
+static const struct drm_mode_config_helper_funcs sprd_drm_mode_config_helper = {
+ .atomic_commit_tail = drm_atomic_helper_commit_tail_rpm,
+};
+
+static const struct drm_mode_config_funcs sprd_drm_mode_config_funcs = {
+ .fb_create = drm_gem_fb_create,
+ .atomic_check = drm_atomic_helper_check,
+ .atomic_commit = drm_atomic_helper_commit,
+};
+
+static void sprd_drm_mode_config_init(struct drm_device *drm)
+{
+ drm->mode_config.min_width = 0;
+ drm->mode_config.min_height = 0;
+ drm->mode_config.max_width = 8192;
+ drm->mode_config.max_height = 8192;
+ drm->mode_config.allow_fb_modifiers = true;
+
+ drm->mode_config.funcs = &sprd_drm_mode_config_funcs;
+ drm->mode_config.helper_private = &sprd_drm_mode_config_helper;
+}
+
+DEFINE_DRM_GEM_CMA_FOPS(sprd_drm_fops);
+
+static struct drm_driver sprd_drm_drv = {
+ .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC,
+ .fops = &sprd_drm_fops,
+
+ /* GEM Operations */
+ DRM_GEM_CMA_DRIVER_OPS,
+
+ .name = DRIVER_NAME,
+ .desc = DRIVER_DESC,
+ .date = DRIVER_DATE,
+ .major = DRIVER_MAJOR,
+ .minor = DRIVER_MINOR,
+};
+
+static int sprd_drm_bind(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct drm_device *drm;
+ struct sprd_drm *sprd;
+ int ret;
+
+ sprd = devm_drm_dev_alloc(dev, &sprd_drm_drv, struct sprd_drm, drm);
+ if (IS_ERR(sprd))
+ return PTR_ERR(sprd);
+
+ drm = &sprd->drm;
+ platform_set_drvdata(pdev, drm);
+
+ ret = drmm_mode_config_init(drm);
+ if (ret)
+ return ret;
+
+ sprd_drm_mode_config_init(drm);
+
+ /* bind and init sub drivers */
+ ret = component_bind_all(drm->dev, drm);
+ if (ret) {
+ drm_err(drm, "failed to bind all component.\n");
+ return ret;
+ }
+
+ /* vblank init */
+ ret = drm_vblank_init(drm, drm->mode_config.num_crtc);
+ if (ret) {
+ drm_err(drm, "failed to initialize vblank.\n");
+ goto err_unbind_all;
+ }
+
+ /* reset all the states of crtc/plane/encoder/connector */
+ drm_mode_config_reset(drm);
+
+ /* init kms poll for handling hpd */
+ drm_kms_helper_poll_init(drm);
+
+ ret = drm_dev_register(drm, 0);
+ if (ret < 0)
+ goto err_kms_helper_poll_fini;
+
+ return 0;
+
+err_kms_helper_poll_fini:
+ drm_kms_helper_poll_fini(drm);
+err_unbind_all:
+ component_unbind_all(drm->dev, drm);
+ return ret;
+}
+
+static void sprd_drm_unbind(struct device *dev)
+{
+ struct drm_device *drm = dev_get_drvdata(dev);
+
+ drm_dev_unregister(drm);
+
+ drm_kms_helper_poll_fini(drm);
+
+ component_unbind_all(drm->dev, drm);
+}
+
+static const struct component_master_ops drm_component_ops = {
+ .bind = sprd_drm_bind,
+ .unbind = sprd_drm_unbind,
+};
+
+static int compare_of(struct device *dev, void *data)
+{
+ return dev->of_node == data;
+}
+
+static int sprd_drm_probe(struct platform_device *pdev)
+{
+ return drm_of_component_probe(&pdev->dev, compare_of, &drm_component_ops);
+}
+
+static int sprd_drm_remove(struct platform_device *pdev)
+{
+ component_master_del(&pdev->dev, &drm_component_ops);
+ return 0;
+}
+
+static void sprd_drm_shutdown(struct platform_device *pdev)
+{
+ struct drm_device *drm = platform_get_drvdata(pdev);
+
+ if (!drm) {
+ drm_warn(drm, "drm device is not available, no shutdown\n");
+ return;
+ }
+
+ drm_atomic_helper_shutdown(drm);
+}
+
+static const struct of_device_id drm_match_table[] = {
+ { .compatible = "sprd,display-subsystem", },
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, drm_match_table);
+
+static struct platform_driver sprd_drm_driver = {
+ .probe = sprd_drm_probe,
+ .remove = sprd_drm_remove,
+ .shutdown = sprd_drm_shutdown,
+ .driver = {
+ .name = "sprd-drm-drv",
+ .of_match_table = drm_match_table,
+ },
+};
+
+static struct platform_driver *sprd_drm_drivers[] = {
+ &sprd_drm_driver,
+ &sprd_dpu_driver,
+ &sprd_dsi_driver,
+};
+
+static int __init sprd_drm_init(void)
+{
+ return platform_register_drivers(sprd_drm_drivers,
+ ARRAY_SIZE(sprd_drm_drivers));
+}
+
+static void __exit sprd_drm_exit(void)
+{
+ platform_unregister_drivers(sprd_drm_drivers,
+ ARRAY_SIZE(sprd_drm_drivers));
+}
+
+module_init(sprd_drm_init);
+module_exit(sprd_drm_exit);
+
+MODULE_AUTHOR("Leon He <leon.he@unisoc.com>");
+MODULE_AUTHOR("Kevin Tang <kevin.tang@unisoc.com>");
+MODULE_DESCRIPTION("Unisoc DRM KMS Master Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/sprd/sprd_drm.h b/drivers/gpu/drm/sprd/sprd_drm.h
new file mode 100644
index 000000000000..95d1b972f333
--- /dev/null
+++ b/drivers/gpu/drm/sprd/sprd_drm.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2020 Unisoc Inc.
+ */
+
+#ifndef _SPRD_DRM_H_
+#define _SPRD_DRM_H_
+
+#include <drm/drm_atomic.h>
+#include <drm/drm_print.h>
+
+struct sprd_drm {
+ struct drm_device drm;
+};
+
+extern struct platform_driver sprd_dpu_driver;
+extern struct platform_driver sprd_dsi_driver;
+
+#endif /* _SPRD_DRM_H_ */
diff --git a/drivers/gpu/drm/sprd/sprd_dsi.c b/drivers/gpu/drm/sprd/sprd_dsi.c
new file mode 100644
index 000000000000..911b3cddc264
--- /dev/null
+++ b/drivers/gpu/drm/sprd/sprd_dsi.c
@@ -0,0 +1,1073 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2020 Unisoc Inc.
+ */
+
+#include <linux/component.h>
+#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/of_irq.h>
+#include <linux/of_graph.h>
+#include <video/mipi_display.h>
+
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_bridge.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_of.h>
+#include <drm/drm_probe_helper.h>
+
+#include "sprd_drm.h"
+#include "sprd_dpu.h"
+#include "sprd_dsi.h"
+
+#define SOFT_RESET 0x04
+#define MASK_PROTOCOL_INT 0x0C
+#define MASK_INTERNAL_INT 0x14
+#define DSI_MODE_CFG 0x18
+
+#define VIRTUAL_CHANNEL_ID 0x1C
+#define GEN_RX_VCID GENMASK(1, 0)
+#define VIDEO_PKT_VCID GENMASK(3, 2)
+
+#define DPI_VIDEO_FORMAT 0x20
+#define DPI_VIDEO_MODE_FORMAT GENMASK(5, 0)
+#define LOOSELY18_EN BIT(6)
+
+#define VIDEO_PKT_CONFIG 0x24
+#define VIDEO_PKT_SIZE GENMASK(15, 0)
+#define VIDEO_LINE_CHUNK_NUM GENMASK(31, 16)
+
+#define VIDEO_LINE_HBLK_TIME 0x28
+#define VIDEO_LINE_HBP_TIME GENMASK(15, 0)
+#define VIDEO_LINE_HSA_TIME GENMASK(31, 16)
+
+#define VIDEO_LINE_TIME 0x2C
+
+#define VIDEO_VBLK_LINES 0x30
+#define VFP_LINES GENMASK(9, 0)
+#define VBP_LINES GENMASK(19, 10)
+#define VSA_LINES GENMASK(29, 20)
+
+#define VIDEO_VACTIVE_LINES 0x34
+
+#define VID_MODE_CFG 0x38
+#define VID_MODE_TYPE GENMASK(1, 0)
+#define LP_VSA_EN BIT(8)
+#define LP_VBP_EN BIT(9)
+#define LP_VFP_EN BIT(10)
+#define LP_VACT_EN BIT(11)
+#define LP_HBP_EN BIT(12)
+#define LP_HFP_EN BIT(13)
+#define FRAME_BTA_ACK_EN BIT(14)
+
+#define TIMEOUT_CNT_CLK_CONFIG 0x40
+#define HTX_TO_CONFIG 0x44
+#define LRX_H_TO_CONFIG 0x48
+
+#define TX_ESC_CLK_CONFIG 0x5C
+
+#define CMD_MODE_CFG 0x68
+#define TEAR_FX_EN BIT(0)
+
+#define GEN_HDR 0x6C
+#define GEN_DT GENMASK(5, 0)
+#define GEN_VC GENMASK(7, 6)
+
+#define GEN_PLD_DATA 0x70
+
+#define PHY_CLK_LANE_LP_CTRL 0x74
+#define PHY_CLKLANE_TX_REQ_HS BIT(0)
+#define AUTO_CLKLANE_CTRL_EN BIT(1)
+
+#define PHY_INTERFACE_CTRL 0x78
+#define RF_PHY_SHUTDOWN BIT(0)
+#define RF_PHY_RESET_N BIT(1)
+#define RF_PHY_CLK_EN BIT(2)
+
+#define CMD_MODE_STATUS 0x98
+#define GEN_CMD_RDATA_FIFO_EMPTY BIT(1)
+#define GEN_CMD_WDATA_FIFO_EMPTY BIT(3)
+#define GEN_CMD_CMD_FIFO_EMPTY BIT(5)
+#define GEN_CMD_RDCMD_DONE BIT(7)
+
+#define PHY_STATUS 0x9C
+#define PHY_LOCK BIT(1)
+
+#define PHY_MIN_STOP_TIME 0xA0
+#define PHY_LANE_NUM_CONFIG 0xA4
+
+#define PHY_CLKLANE_TIME_CONFIG 0xA8
+#define PHY_CLKLANE_LP_TO_HS_TIME GENMASK(15, 0)
+#define PHY_CLKLANE_HS_TO_LP_TIME GENMASK(31, 16)
+
+#define PHY_DATALANE_TIME_CONFIG 0xAC
+#define PHY_DATALANE_LP_TO_HS_TIME GENMASK(15, 0)
+#define PHY_DATALANE_HS_TO_LP_TIME GENMASK(31, 16)
+
+#define MAX_READ_TIME 0xB0
+
+#define RX_PKT_CHECK_CONFIG 0xB4
+#define RX_PKT_ECC_EN BIT(0)
+#define RX_PKT_CRC_EN BIT(1)
+
+#define TA_EN 0xB8
+
+#define EOTP_EN 0xBC
+#define TX_EOTP_EN BIT(0)
+#define RX_EOTP_EN BIT(1)
+
+#define VIDEO_NULLPKT_SIZE 0xC0
+#define DCS_WM_PKT_SIZE 0xC4
+
+#define VIDEO_SIG_DELAY_CONFIG 0xD0
+#define VIDEO_SIG_DELAY GENMASK(23, 0)
+
+#define PHY_TST_CTRL0 0xF0
+#define PHY_TESTCLR BIT(0)
+#define PHY_TESTCLK BIT(1)
+
+#define PHY_TST_CTRL1 0xF4
+#define PHY_TESTDIN GENMASK(7, 0)
+#define PHY_TESTDOUT GENMASK(15, 8)
+#define PHY_TESTEN BIT(16)
+
+#define host_to_dsi(host) \
+ container_of(host, struct sprd_dsi, host)
+
+static inline u32
+dsi_reg_rd(struct dsi_context *ctx, u32 offset, u32 mask,
+ u32 shift)
+{
+ return (readl(ctx->base + offset) & mask) >> shift;
+}
+
+static inline void
+dsi_reg_wr(struct dsi_context *ctx, u32 offset, u32 mask,
+ u32 shift, u32 val)
+{
+ u32 ret;
+
+ ret = readl(ctx->base + offset);
+ ret &= ~mask;
+ ret |= (val << shift) & mask;
+ writel(ret, ctx->base + offset);
+}
+
+static inline void
+dsi_reg_up(struct dsi_context *ctx, u32 offset, u32 mask,
+ u32 val)
+{
+ u32 ret = readl(ctx->base + offset);
+
+ writel((ret & ~mask) | (val & mask), ctx->base + offset);
+}
+
+static int regmap_tst_io_write(void *context, u32 reg, u32 val)
+{
+ struct sprd_dsi *dsi = context;
+ struct dsi_context *ctx = &dsi->ctx;
+
+ if (val > 0xff || reg > 0xff)
+ return -EINVAL;
+
+ drm_dbg(dsi->drm, "reg = 0x%02x, val = 0x%02x\n", reg, val);
+
+ dsi_reg_up(ctx, PHY_TST_CTRL1, PHY_TESTEN, PHY_TESTEN);
+ dsi_reg_wr(ctx, PHY_TST_CTRL1, PHY_TESTDIN, 0, reg);
+ dsi_reg_up(ctx, PHY_TST_CTRL0, PHY_TESTCLK, PHY_TESTCLK);
+ dsi_reg_up(ctx, PHY_TST_CTRL0, PHY_TESTCLK, 0);
+ dsi_reg_up(ctx, PHY_TST_CTRL1, PHY_TESTEN, 0);
+ dsi_reg_wr(ctx, PHY_TST_CTRL1, PHY_TESTDIN, 0, val);
+ dsi_reg_up(ctx, PHY_TST_CTRL0, PHY_TESTCLK, PHY_TESTCLK);
+ dsi_reg_up(ctx, PHY_TST_CTRL0, PHY_TESTCLK, 0);
+
+ return 0;
+}
+
+static int regmap_tst_io_read(void *context, u32 reg, u32 *val)
+{
+ struct sprd_dsi *dsi = context;
+ struct dsi_context *ctx = &dsi->ctx;
+ int ret;
+
+ if (reg > 0xff)
+ return -EINVAL;
+
+ dsi_reg_up(ctx, PHY_TST_CTRL1, PHY_TESTEN, PHY_TESTEN);
+ dsi_reg_wr(ctx, PHY_TST_CTRL1, PHY_TESTDIN, 0, reg);
+ dsi_reg_up(ctx, PHY_TST_CTRL0, PHY_TESTCLK, PHY_TESTCLK);
+ dsi_reg_up(ctx, PHY_TST_CTRL0, PHY_TESTCLK, 0);
+ dsi_reg_up(ctx, PHY_TST_CTRL1, PHY_TESTEN, 0);
+
+ udelay(1);
+
+ ret = dsi_reg_rd(ctx, PHY_TST_CTRL1, PHY_TESTDOUT, 8);
+ if (ret < 0)
+ return ret;
+
+ *val = ret;
+
+ drm_dbg(dsi->drm, "reg = 0x%02x, val = 0x%02x\n", reg, *val);
+ return 0;
+}
+
+static struct regmap_bus regmap_tst_io = {
+ .reg_write = regmap_tst_io_write,
+ .reg_read = regmap_tst_io_read,
+};
+
+static const struct regmap_config byte_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+};
+
+static int dphy_wait_pll_locked(struct dsi_context *ctx)
+{
+ struct sprd_dsi *dsi = container_of(ctx, struct sprd_dsi, ctx);
+ int i;
+
+ for (i = 0; i < 50000; i++) {
+ if (dsi_reg_rd(ctx, PHY_STATUS, PHY_LOCK, 1))
+ return 0;
+ udelay(3);
+ }
+
+ drm_err(dsi->drm, "dphy pll can not be locked\n");
+ return -ETIMEDOUT;
+}
+
+static int dsi_wait_tx_payload_fifo_empty(struct dsi_context *ctx)
+{
+ int i;
+
+ for (i = 0; i < 5000; i++) {
+ if (dsi_reg_rd(ctx, CMD_MODE_STATUS, GEN_CMD_WDATA_FIFO_EMPTY, 3))
+ return 0;
+ udelay(1);
+ }
+
+ return -ETIMEDOUT;
+}
+
+static int dsi_wait_tx_cmd_fifo_empty(struct dsi_context *ctx)
+{
+ int i;
+
+ for (i = 0; i < 5000; i++) {
+ if (dsi_reg_rd(ctx, CMD_MODE_STATUS, GEN_CMD_CMD_FIFO_EMPTY, 5))
+ return 0;
+ udelay(1);
+ }
+
+ return -ETIMEDOUT;
+}
+
+static int dsi_wait_rd_resp_completed(struct dsi_context *ctx)
+{
+ int i;
+
+ for (i = 0; i < 10000; i++) {
+ if (dsi_reg_rd(ctx, CMD_MODE_STATUS, GEN_CMD_RDCMD_DONE, 7))
+ return 0;
+ udelay(10);
+ }
+
+ return -ETIMEDOUT;
+}
+
+static u16 calc_bytes_per_pixel_x100(int coding)
+{
+ u16 bpp_x100;
+
+ switch (coding) {
+ case COLOR_CODE_16BIT_CONFIG1:
+ case COLOR_CODE_16BIT_CONFIG2:
+ case COLOR_CODE_16BIT_CONFIG3:
+ bpp_x100 = 200;
+ break;
+ case COLOR_CODE_18BIT_CONFIG1:
+ case COLOR_CODE_18BIT_CONFIG2:
+ bpp_x100 = 225;
+ break;
+ case COLOR_CODE_24BIT:
+ bpp_x100 = 300;
+ break;
+ case COLOR_CODE_COMPRESSTION:
+ bpp_x100 = 100;
+ break;
+ case COLOR_CODE_20BIT_YCC422_LOOSELY:
+ bpp_x100 = 250;
+ break;
+ case COLOR_CODE_24BIT_YCC422:
+ bpp_x100 = 300;
+ break;
+ case COLOR_CODE_16BIT_YCC422:
+ bpp_x100 = 200;
+ break;
+ case COLOR_CODE_30BIT:
+ bpp_x100 = 375;
+ break;
+ case COLOR_CODE_36BIT:
+ bpp_x100 = 450;
+ break;
+ case COLOR_CODE_12BIT_YCC420:
+ bpp_x100 = 150;
+ break;
+ default:
+ DRM_ERROR("invalid color coding");
+ bpp_x100 = 0;
+ break;
+ }
+
+ return bpp_x100;
+}
+
+static u8 calc_video_size_step(int coding)
+{
+ u8 video_size_step;
+
+ switch (coding) {
+ case COLOR_CODE_16BIT_CONFIG1:
+ case COLOR_CODE_16BIT_CONFIG2:
+ case COLOR_CODE_16BIT_CONFIG3:
+ case COLOR_CODE_18BIT_CONFIG1:
+ case COLOR_CODE_18BIT_CONFIG2:
+ case COLOR_CODE_24BIT:
+ case COLOR_CODE_COMPRESSTION:
+ return video_size_step = 1;
+ case COLOR_CODE_20BIT_YCC422_LOOSELY:
+ case COLOR_CODE_24BIT_YCC422:
+ case COLOR_CODE_16BIT_YCC422:
+ case COLOR_CODE_30BIT:
+ case COLOR_CODE_36BIT:
+ case COLOR_CODE_12BIT_YCC420:
+ return video_size_step = 2;
+ default:
+ DRM_ERROR("invalid color coding");
+ return 0;
+ }
+}
+
+static u16 round_video_size(int coding, u16 video_size)
+{
+ switch (coding) {
+ case COLOR_CODE_16BIT_YCC422:
+ case COLOR_CODE_24BIT_YCC422:
+ case COLOR_CODE_20BIT_YCC422_LOOSELY:
+ case COLOR_CODE_12BIT_YCC420:
+ /* round up active H pixels to a multiple of 2 */
+ if ((video_size % 2) != 0)
+ video_size += 1;
+ break;
+ default:
+ break;
+ }
+
+ return video_size;
+}
+
+#define SPRD_MIPI_DSI_FMT_DSC 0xff
+static u32 fmt_to_coding(u32 fmt)
+{
+ switch (fmt) {
+ case MIPI_DSI_FMT_RGB565:
+ return COLOR_CODE_16BIT_CONFIG1;
+ case MIPI_DSI_FMT_RGB666:
+ case MIPI_DSI_FMT_RGB666_PACKED:
+ return COLOR_CODE_18BIT_CONFIG1;
+ case MIPI_DSI_FMT_RGB888:
+ return COLOR_CODE_24BIT;
+ case SPRD_MIPI_DSI_FMT_DSC:
+ return COLOR_CODE_COMPRESSTION;
+ default:
+ DRM_ERROR("Unsupported format (%d)\n", fmt);
+ return COLOR_CODE_24BIT;
+ }
+}
+
+#define ns_to_cycle(ns, byte_clk) \
+ DIV_ROUND_UP((ns) * (byte_clk), 1000000)
+
+static void sprd_dsi_init(struct dsi_context *ctx)
+{
+ struct sprd_dsi *dsi = container_of(ctx, struct sprd_dsi, ctx);
+ u32 byte_clk = dsi->slave->hs_rate / 8;
+ u16 data_hs2lp, data_lp2hs, clk_hs2lp, clk_lp2hs;
+ u16 max_rd_time;
+ int div;
+
+ writel(0, ctx->base + SOFT_RESET);
+ writel(0xffffffff, ctx->base + MASK_PROTOCOL_INT);
+ writel(0xffffffff, ctx->base + MASK_INTERNAL_INT);
+ writel(1, ctx->base + DSI_MODE_CFG);
+ dsi_reg_up(ctx, EOTP_EN, RX_EOTP_EN, 0);
+ dsi_reg_up(ctx, EOTP_EN, TX_EOTP_EN, 0);
+ dsi_reg_up(ctx, RX_PKT_CHECK_CONFIG, RX_PKT_ECC_EN, RX_PKT_ECC_EN);
+ dsi_reg_up(ctx, RX_PKT_CHECK_CONFIG, RX_PKT_CRC_EN, RX_PKT_CRC_EN);
+ writel(1, ctx->base + TA_EN);
+ dsi_reg_up(ctx, VIRTUAL_CHANNEL_ID, VIDEO_PKT_VCID, 0);
+ dsi_reg_up(ctx, VIRTUAL_CHANNEL_ID, GEN_RX_VCID, 0);
+
+ div = DIV_ROUND_UP(byte_clk, dsi->slave->lp_rate);
+ writel(div, ctx->base + TX_ESC_CLK_CONFIG);
+
+ max_rd_time = ns_to_cycle(ctx->max_rd_time, byte_clk);
+ writel(max_rd_time, ctx->base + MAX_READ_TIME);
+
+ data_hs2lp = ns_to_cycle(ctx->data_hs2lp, byte_clk);
+ data_lp2hs = ns_to_cycle(ctx->data_lp2hs, byte_clk);
+ clk_hs2lp = ns_to_cycle(ctx->clk_hs2lp, byte_clk);
+ clk_lp2hs = ns_to_cycle(ctx->clk_lp2hs, byte_clk);
+ dsi_reg_wr(ctx, PHY_DATALANE_TIME_CONFIG,
+ PHY_DATALANE_HS_TO_LP_TIME, 16, data_hs2lp);
+ dsi_reg_wr(ctx, PHY_DATALANE_TIME_CONFIG,
+ PHY_DATALANE_LP_TO_HS_TIME, 0, data_lp2hs);
+ dsi_reg_wr(ctx, PHY_CLKLANE_TIME_CONFIG,
+ PHY_CLKLANE_HS_TO_LP_TIME, 16, clk_hs2lp);
+ dsi_reg_wr(ctx, PHY_CLKLANE_TIME_CONFIG,
+ PHY_CLKLANE_LP_TO_HS_TIME, 0, clk_lp2hs);
+
+ writel(1, ctx->base + SOFT_RESET);
+}
+
+/*
+ * Free up resources and shutdown host controller and PHY
+ */
+static void sprd_dsi_fini(struct dsi_context *ctx)
+{
+ writel(0xffffffff, ctx->base + MASK_PROTOCOL_INT);
+ writel(0xffffffff, ctx->base + MASK_INTERNAL_INT);
+ writel(0, ctx->base + SOFT_RESET);
+}
+
+/*
+ * If not in burst mode, it will compute the video and null packet sizes
+ * according to necessity.
+ * Configure timers for data lanes and/or clock lane to return to LP when
+ * bandwidth is not filled by data.
+ */
+static int sprd_dsi_dpi_video(struct dsi_context *ctx)
+{
+ struct sprd_dsi *dsi = container_of(ctx, struct sprd_dsi, ctx);
+ struct videomode *vm = &ctx->vm;
+ u32 byte_clk = dsi->slave->hs_rate / 8;
+ u16 bpp_x100;
+ u16 video_size;
+ u32 ratio_x1000;
+ u16 null_pkt_size = 0;
+ u8 video_size_step;
+ u32 hs_to;
+ u32 total_bytes;
+ u32 bytes_per_chunk;
+ u32 chunks = 0;
+ u32 bytes_left = 0;
+ u32 chunk_overhead;
+ const u8 pkt_header = 6;
+ u8 coding;
+ int div;
+ u16 hline;
+ u16 byte_cycle;
+
+ coding = fmt_to_coding(dsi->slave->format);
+ video_size = round_video_size(coding, vm->hactive);
+ bpp_x100 = calc_bytes_per_pixel_x100(coding);
+ video_size_step = calc_video_size_step(coding);
+ ratio_x1000 = byte_clk * 1000 / (vm->pixelclock / 1000);
+ hline = vm->hactive + vm->hsync_len + vm->hfront_porch +
+ vm->hback_porch;
+
+ writel(0, ctx->base + SOFT_RESET);
+ dsi_reg_wr(ctx, VID_MODE_CFG, FRAME_BTA_ACK_EN, 15, ctx->frame_ack_en);
+ dsi_reg_wr(ctx, DPI_VIDEO_FORMAT, DPI_VIDEO_MODE_FORMAT, 0, coding);
+ dsi_reg_wr(ctx, VID_MODE_CFG, VID_MODE_TYPE, 0, ctx->burst_mode);
+ byte_cycle = 95 * hline * ratio_x1000 / 100000;
+ dsi_reg_wr(ctx, VIDEO_SIG_DELAY_CONFIG, VIDEO_SIG_DELAY, 0, byte_cycle);
+ byte_cycle = hline * ratio_x1000 / 1000;
+ writel(byte_cycle, ctx->base + VIDEO_LINE_TIME);
+ byte_cycle = vm->hsync_len * ratio_x1000 / 1000;
+ dsi_reg_wr(ctx, VIDEO_LINE_HBLK_TIME, VIDEO_LINE_HSA_TIME, 16, byte_cycle);
+ byte_cycle = vm->hback_porch * ratio_x1000 / 1000;
+ dsi_reg_wr(ctx, VIDEO_LINE_HBLK_TIME, VIDEO_LINE_HBP_TIME, 0, byte_cycle);
+ writel(vm->vactive, ctx->base + VIDEO_VACTIVE_LINES);
+ dsi_reg_wr(ctx, VIDEO_VBLK_LINES, VFP_LINES, 0, vm->vfront_porch);
+ dsi_reg_wr(ctx, VIDEO_VBLK_LINES, VBP_LINES, 10, vm->vback_porch);
+ dsi_reg_wr(ctx, VIDEO_VBLK_LINES, VSA_LINES, 20, vm->vsync_len);
+ dsi_reg_up(ctx, VID_MODE_CFG, LP_HBP_EN | LP_HFP_EN | LP_VACT_EN |
+ LP_VFP_EN | LP_VBP_EN | LP_VSA_EN, LP_HBP_EN | LP_HFP_EN |
+ LP_VACT_EN | LP_VFP_EN | LP_VBP_EN | LP_VSA_EN);
+
+ hs_to = (hline * vm->vactive) + (2 * bpp_x100) / 100;
+ for (div = 0x80; (div < hs_to) && (div > 2); div--) {
+ if ((hs_to % div) == 0) {
+ writel(div, ctx->base + TIMEOUT_CNT_CLK_CONFIG);
+ writel(hs_to / div, ctx->base + LRX_H_TO_CONFIG);
+ writel(hs_to / div, ctx->base + HTX_TO_CONFIG);
+ break;
+ }
+ }
+
+ if (ctx->burst_mode == VIDEO_BURST_WITH_SYNC_PULSES) {
+ dsi_reg_wr(ctx, VIDEO_PKT_CONFIG, VIDEO_PKT_SIZE, 0, video_size);
+ writel(0, ctx->base + VIDEO_NULLPKT_SIZE);
+ dsi_reg_up(ctx, VIDEO_PKT_CONFIG, VIDEO_LINE_CHUNK_NUM, 0);
+ } else {
+ /* non burst transmission */
+ null_pkt_size = 0;
+
+ /* bytes to be sent - first as one chunk */
+ bytes_per_chunk = vm->hactive * bpp_x100 / 100 + pkt_header;
+
+ /* hline total bytes from the DPI interface */
+ total_bytes = (vm->hactive + vm->hfront_porch) *
+ ratio_x1000 / dsi->slave->lanes / 1000;
+
+ /* check if the pixels actually fit on the DSI link */
+ if (total_bytes < bytes_per_chunk) {
+ drm_err(dsi->drm, "current resolution can not be set\n");
+ return -EINVAL;
+ }
+
+ chunk_overhead = total_bytes - bytes_per_chunk;
+
+ /* overhead higher than 1 -> enable multi packets */
+ if (chunk_overhead > 1) {
+ /* multi packets */
+ for (video_size = video_size_step;
+ video_size < vm->hactive;
+ video_size += video_size_step) {
+ if (vm->hactive * 1000 / video_size % 1000)
+ continue;
+
+ chunks = vm->hactive / video_size;
+ bytes_per_chunk = bpp_x100 * video_size / 100
+ + pkt_header;
+ if (total_bytes >= (bytes_per_chunk * chunks)) {
+ bytes_left = total_bytes -
+ bytes_per_chunk * chunks;
+ break;
+ }
+ }
+
+ /* prevent overflow (unsigned - unsigned) */
+ if (bytes_left > (pkt_header * chunks)) {
+ null_pkt_size = (bytes_left -
+ pkt_header * chunks) / chunks;
+ /* avoid register overflow */
+ if (null_pkt_size > 1023)
+ null_pkt_size = 1023;
+ }
+
+ } else {
+ /* single packet */
+ chunks = 1;
+
+ /* must be a multiple of 4 except 18 loosely */
+ for (video_size = vm->hactive;
+ (video_size % video_size_step) != 0;
+ video_size++)
+ ;
+ }
+
+ dsi_reg_wr(ctx, VIDEO_PKT_CONFIG, VIDEO_PKT_SIZE, 0, video_size);
+ writel(null_pkt_size, ctx->base + VIDEO_NULLPKT_SIZE);
+ dsi_reg_wr(ctx, VIDEO_PKT_CONFIG, VIDEO_LINE_CHUNK_NUM, 16, chunks);
+ }
+
+ writel(ctx->int0_mask, ctx->base + MASK_PROTOCOL_INT);
+ writel(ctx->int1_mask, ctx->base + MASK_INTERNAL_INT);
+ writel(1, ctx->base + SOFT_RESET);
+
+ return 0;
+}
+
+static void sprd_dsi_edpi_video(struct dsi_context *ctx)
+{
+ struct sprd_dsi *dsi = container_of(ctx, struct sprd_dsi, ctx);
+ const u32 fifo_depth = 1096;
+ const u32 word_length = 4;
+ u32 hactive = ctx->vm.hactive;
+ u32 bpp_x100;
+ u32 max_fifo_len;
+ u8 coding;
+
+ coding = fmt_to_coding(dsi->slave->format);
+ bpp_x100 = calc_bytes_per_pixel_x100(coding);
+ max_fifo_len = word_length * fifo_depth * 100 / bpp_x100;
+
+ writel(0, ctx->base + SOFT_RESET);
+ dsi_reg_wr(ctx, DPI_VIDEO_FORMAT, DPI_VIDEO_MODE_FORMAT, 0, coding);
+ dsi_reg_wr(ctx, CMD_MODE_CFG, TEAR_FX_EN, 0, ctx->te_ack_en);
+
+ if (max_fifo_len > hactive)
+ writel(hactive, ctx->base + DCS_WM_PKT_SIZE);
+ else
+ writel(max_fifo_len, ctx->base + DCS_WM_PKT_SIZE);
+
+ writel(ctx->int0_mask, ctx->base + MASK_PROTOCOL_INT);
+ writel(ctx->int1_mask, ctx->base + MASK_INTERNAL_INT);
+ writel(1, ctx->base + SOFT_RESET);
+}
+
+/*
+ * Send a packet on the generic interface,
+ * this function has an active delay to wait for the buffer to clear.
+ * The delay is limited to:
+ * (param_length / 4) x DSIH_FIFO_ACTIVE_WAIT x register access time
+ * the controller restricts the sending of.
+ *
+ * This function will not be able to send Null and Blanking packets due to
+ * controller restriction
+ */
+static int sprd_dsi_wr_pkt(struct dsi_context *ctx, u8 vc, u8 type,
+ const u8 *param, u16 len)
+{
+ struct sprd_dsi *dsi = container_of(ctx, struct sprd_dsi, ctx);
+ u8 wc_lsbyte, wc_msbyte;
+ u32 payload;
+ int i, j, ret;
+
+ if (vc > 3)
+ return -EINVAL;
+
+ /* 1st: for long packet, must config payload first */
+ ret = dsi_wait_tx_payload_fifo_empty(ctx);
+ if (ret) {
+ drm_err(dsi->drm, "tx payload fifo is not empty\n");
+ return ret;
+ }
+
+ if (len > 2) {
+ for (i = 0, j = 0; i < len; i += j) {
+ payload = 0;
+ for (j = 0; (j < 4) && ((j + i) < (len)); j++)
+ payload |= param[i + j] << (j * 8);
+
+ writel(payload, ctx->base + GEN_PLD_DATA);
+ }
+ wc_lsbyte = len & 0xff;
+ wc_msbyte = len >> 8;
+ } else {
+ wc_lsbyte = (len > 0) ? param[0] : 0;
+ wc_msbyte = (len > 1) ? param[1] : 0;
+ }
+
+ /* 2nd: then set packet header */
+ ret = dsi_wait_tx_cmd_fifo_empty(ctx);
+ if (ret) {
+ drm_err(dsi->drm, "tx cmd fifo is not empty\n");
+ return ret;
+ }
+
+ writel(type | (vc << 6) | (wc_lsbyte << 8) | (wc_msbyte << 16),
+ ctx->base + GEN_HDR);
+
+ return 0;
+}
+
+/*
+ * Send READ packet to peripheral using the generic interface,
+ * this will force command mode and stop video mode (because of BTA).
+ *
+ * This function has an active delay to wait for the buffer to clear,
+ * the delay is limited to 2 x DSIH_FIFO_ACTIVE_WAIT
+ * (waiting for command buffer, and waiting for receiving)
+ * @note this function will enable BTA
+ */
+static int sprd_dsi_rd_pkt(struct dsi_context *ctx, u8 vc, u8 type,
+ u8 msb_byte, u8 lsb_byte,
+ u8 *buffer, u8 bytes_to_read)
+{
+ struct sprd_dsi *dsi = container_of(ctx, struct sprd_dsi, ctx);
+ int i, ret;
+ int count = 0;
+ u32 temp;
+
+ if (vc > 3)
+ return -EINVAL;
+
+ /* 1st: send read command to peripheral */
+ ret = dsi_reg_rd(ctx, CMD_MODE_STATUS, GEN_CMD_CMD_FIFO_EMPTY, 5);
+ if (!ret)
+ return -EIO;
+
+ writel(type | (vc << 6) | (lsb_byte << 8) | (msb_byte << 16),
+ ctx->base + GEN_HDR);
+
+ /* 2nd: wait peripheral response completed */
+ ret = dsi_wait_rd_resp_completed(ctx);
+ if (ret) {
+ drm_err(dsi->drm, "wait read response time out\n");
+ return ret;
+ }
+
+ /* 3rd: get data from rx payload fifo */
+ ret = dsi_reg_rd(ctx, CMD_MODE_STATUS, GEN_CMD_RDATA_FIFO_EMPTY, 1);
+ if (ret) {
+ drm_err(dsi->drm, "rx payload fifo empty\n");
+ return -EIO;
+ }
+
+ for (i = 0; i < 100; i++) {
+ temp = readl(ctx->base + GEN_PLD_DATA);
+
+ if (count < bytes_to_read)
+ buffer[count++] = temp & 0xff;
+ if (count < bytes_to_read)
+ buffer[count++] = (temp >> 8) & 0xff;
+ if (count < bytes_to_read)
+ buffer[count++] = (temp >> 16) & 0xff;
+ if (count < bytes_to_read)
+ buffer[count++] = (temp >> 24) & 0xff;
+
+ ret = dsi_reg_rd(ctx, CMD_MODE_STATUS, GEN_CMD_RDATA_FIFO_EMPTY, 1);
+ if (ret)
+ return count;
+ }
+
+ return 0;
+}
+
+static void sprd_dsi_set_work_mode(struct dsi_context *ctx, u8 mode)
+{
+ if (mode == DSI_MODE_CMD)
+ writel(1, ctx->base + DSI_MODE_CFG);
+ else
+ writel(0, ctx->base + DSI_MODE_CFG);
+}
+
+static void sprd_dsi_state_reset(struct dsi_context *ctx)
+{
+ writel(0, ctx->base + SOFT_RESET);
+ udelay(100);
+ writel(1, ctx->base + SOFT_RESET);
+}
+
+static int sprd_dphy_init(struct dsi_context *ctx)
+{
+ struct sprd_dsi *dsi = container_of(ctx, struct sprd_dsi, ctx);
+ int ret;
+
+ dsi_reg_up(ctx, PHY_INTERFACE_CTRL, RF_PHY_RESET_N, 0);
+ dsi_reg_up(ctx, PHY_INTERFACE_CTRL, RF_PHY_SHUTDOWN, 0);
+ dsi_reg_up(ctx, PHY_INTERFACE_CTRL, RF_PHY_CLK_EN, 0);
+
+ dsi_reg_up(ctx, PHY_TST_CTRL0, PHY_TESTCLR, 0);
+ dsi_reg_up(ctx, PHY_TST_CTRL0, PHY_TESTCLR, PHY_TESTCLR);
+ dsi_reg_up(ctx, PHY_TST_CTRL0, PHY_TESTCLR, 0);
+
+ dphy_pll_config(ctx);
+ dphy_timing_config(ctx);
+
+ dsi_reg_up(ctx, PHY_INTERFACE_CTRL, RF_PHY_SHUTDOWN, RF_PHY_SHUTDOWN);
+ dsi_reg_up(ctx, PHY_INTERFACE_CTRL, RF_PHY_RESET_N, RF_PHY_RESET_N);
+ writel(0x1C, ctx->base + PHY_MIN_STOP_TIME);
+ dsi_reg_up(ctx, PHY_INTERFACE_CTRL, RF_PHY_CLK_EN, RF_PHY_CLK_EN);
+ writel(dsi->slave->lanes - 1, ctx->base + PHY_LANE_NUM_CONFIG);
+
+ ret = dphy_wait_pll_locked(ctx);
+ if (ret) {
+ drm_err(dsi->drm, "dphy initial failed\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static void sprd_dphy_fini(struct dsi_context *ctx)
+{
+ dsi_reg_up(ctx, PHY_INTERFACE_CTRL, RF_PHY_RESET_N, 0);
+ dsi_reg_up(ctx, PHY_INTERFACE_CTRL, RF_PHY_SHUTDOWN, 0);
+ dsi_reg_up(ctx, PHY_INTERFACE_CTRL, RF_PHY_RESET_N, RF_PHY_RESET_N);
+}
+
+static void sprd_dsi_encoder_mode_set(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adj_mode)
+{
+ struct sprd_dsi *dsi = encoder_to_dsi(encoder);
+
+ drm_display_mode_to_videomode(adj_mode, &dsi->ctx.vm);
+}
+
+static void sprd_dsi_encoder_enable(struct drm_encoder *encoder)
+{
+ struct sprd_dsi *dsi = encoder_to_dsi(encoder);
+ struct sprd_dpu *dpu = to_sprd_crtc(encoder->crtc);
+ struct dsi_context *ctx = &dsi->ctx;
+
+ if (ctx->enabled) {
+ drm_warn(dsi->drm, "dsi is initialized\n");
+ return;
+ }
+
+ sprd_dsi_init(ctx);
+ if (ctx->work_mode == DSI_MODE_VIDEO)
+ sprd_dsi_dpi_video(ctx);
+ else
+ sprd_dsi_edpi_video(ctx);
+
+ sprd_dphy_init(ctx);
+
+ sprd_dsi_set_work_mode(ctx, ctx->work_mode);
+ sprd_dsi_state_reset(ctx);
+
+ if (dsi->slave->mode_flags & MIPI_DSI_CLOCK_NON_CONTINUOUS) {
+ dsi_reg_up(ctx, PHY_CLK_LANE_LP_CTRL, AUTO_CLKLANE_CTRL_EN,
+ AUTO_CLKLANE_CTRL_EN);
+ } else {
+ dsi_reg_up(ctx, PHY_CLK_LANE_LP_CTRL, RF_PHY_CLK_EN, RF_PHY_CLK_EN);
+ dsi_reg_up(ctx, PHY_CLK_LANE_LP_CTRL, PHY_CLKLANE_TX_REQ_HS,
+ PHY_CLKLANE_TX_REQ_HS);
+ dphy_wait_pll_locked(ctx);
+ }
+
+ sprd_dpu_run(dpu);
+
+ ctx->enabled = true;
+}
+
+static void sprd_dsi_encoder_disable(struct drm_encoder *encoder)
+{
+ struct sprd_dsi *dsi = encoder_to_dsi(encoder);
+ struct sprd_dpu *dpu = to_sprd_crtc(encoder->crtc);
+ struct dsi_context *ctx = &dsi->ctx;
+
+ if (!ctx->enabled) {
+ drm_warn(dsi->drm, "dsi isn't initialized\n");
+ return;
+ }
+
+ sprd_dpu_stop(dpu);
+ sprd_dphy_fini(ctx);
+ sprd_dsi_fini(ctx);
+
+ ctx->enabled = false;
+}
+
+static const struct drm_encoder_helper_funcs sprd_encoder_helper_funcs = {
+ .mode_set = sprd_dsi_encoder_mode_set,
+ .enable = sprd_dsi_encoder_enable,
+ .disable = sprd_dsi_encoder_disable
+};
+
+static const struct drm_encoder_funcs sprd_encoder_funcs = {
+ .destroy = drm_encoder_cleanup,
+};
+
+static int sprd_dsi_encoder_init(struct sprd_dsi *dsi,
+ struct device *dev)
+{
+ struct drm_encoder *encoder = &dsi->encoder;
+ u32 crtc_mask;
+ int ret;
+
+ crtc_mask = drm_of_find_possible_crtcs(dsi->drm, dev->of_node);
+ if (!crtc_mask) {
+ drm_err(dsi->drm, "failed to find crtc mask\n");
+ return -EINVAL;
+ }
+
+ drm_dbg(dsi->drm, "find possible crtcs: 0x%08x\n", crtc_mask);
+
+ encoder->possible_crtcs = crtc_mask;
+ ret = drm_encoder_init(dsi->drm, encoder, &sprd_encoder_funcs,
+ DRM_MODE_ENCODER_DSI, NULL);
+ if (ret) {
+ drm_err(dsi->drm, "failed to init dsi encoder\n");
+ return ret;
+ }
+
+ drm_encoder_helper_add(encoder, &sprd_encoder_helper_funcs);
+
+ return 0;
+}
+
+static int sprd_dsi_bridge_init(struct sprd_dsi *dsi,
+ struct device *dev)
+{
+ int ret;
+
+ dsi->panel_bridge = devm_drm_of_get_bridge(dev, dev->of_node, 1, 0);
+ if (IS_ERR(dsi->panel_bridge))
+ return PTR_ERR(dsi->panel_bridge);
+
+ ret = drm_bridge_attach(&dsi->encoder, dsi->panel_bridge, NULL, 0);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int sprd_dsi_context_init(struct sprd_dsi *dsi,
+ struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct dsi_context *ctx = &dsi->ctx;
+ struct resource *res;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ ctx->base = devm_ioremap(dev, res->start, resource_size(res));
+ if (!ctx->base) {
+ drm_err(dsi->drm, "failed to map dsi host registers\n");
+ return -ENXIO;
+ }
+
+ ctx->regmap = devm_regmap_init(dev, &regmap_tst_io, dsi, &byte_config);
+ if (IS_ERR(ctx->regmap)) {
+ drm_err(dsi->drm, "dphy regmap init failed\n");
+ return PTR_ERR(ctx->regmap);
+ }
+
+ ctx->data_hs2lp = 120;
+ ctx->data_lp2hs = 500;
+ ctx->clk_hs2lp = 4;
+ ctx->clk_lp2hs = 15;
+ ctx->max_rd_time = 6000;
+ ctx->int0_mask = 0xffffffff;
+ ctx->int1_mask = 0xffffffff;
+ ctx->enabled = true;
+
+ return 0;
+}
+
+static int sprd_dsi_bind(struct device *dev, struct device *master, void *data)
+{
+ struct drm_device *drm = data;
+ struct sprd_dsi *dsi = dev_get_drvdata(dev);
+ int ret;
+
+ dsi->drm = drm;
+
+ ret = sprd_dsi_encoder_init(dsi, dev);
+ if (ret)
+ return ret;
+
+ ret = sprd_dsi_bridge_init(dsi, dev);
+ if (ret)
+ return ret;
+
+ ret = sprd_dsi_context_init(dsi, dev);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static void sprd_dsi_unbind(struct device *dev,
+ struct device *master, void *data)
+{
+ struct sprd_dsi *dsi = dev_get_drvdata(dev);
+
+ drm_of_panel_bridge_remove(dev->of_node, 1, 0);
+
+ drm_encoder_cleanup(&dsi->encoder);
+}
+
+static const struct component_ops dsi_component_ops = {
+ .bind = sprd_dsi_bind,
+ .unbind = sprd_dsi_unbind,
+};
+
+static int sprd_dsi_host_attach(struct mipi_dsi_host *host,
+ struct mipi_dsi_device *slave)
+{
+ struct sprd_dsi *dsi = host_to_dsi(host);
+ struct dsi_context *ctx = &dsi->ctx;
+
+ dsi->slave = slave;
+
+ if (slave->mode_flags & MIPI_DSI_MODE_VIDEO)
+ ctx->work_mode = DSI_MODE_VIDEO;
+ else
+ ctx->work_mode = DSI_MODE_CMD;
+
+ if (slave->mode_flags & MIPI_DSI_MODE_VIDEO_BURST)
+ ctx->burst_mode = VIDEO_BURST_WITH_SYNC_PULSES;
+ else if (slave->mode_flags & MIPI_DSI_MODE_VIDEO_SYNC_PULSE)
+ ctx->burst_mode = VIDEO_NON_BURST_WITH_SYNC_PULSES;
+ else
+ ctx->burst_mode = VIDEO_NON_BURST_WITH_SYNC_EVENTS;
+
+ return component_add(host->dev, &dsi_component_ops);
+}
+
+static int sprd_dsi_host_detach(struct mipi_dsi_host *host,
+ struct mipi_dsi_device *slave)
+{
+ component_del(host->dev, &dsi_component_ops);
+
+ return 0;
+}
+
+static ssize_t sprd_dsi_host_transfer(struct mipi_dsi_host *host,
+ const struct mipi_dsi_msg *msg)
+{
+ struct sprd_dsi *dsi = host_to_dsi(host);
+ const u8 *tx_buf = msg->tx_buf;
+
+ if (msg->rx_buf && msg->rx_len) {
+ u8 lsb = (msg->tx_len > 0) ? tx_buf[0] : 0;
+ u8 msb = (msg->tx_len > 1) ? tx_buf[1] : 0;
+
+ return sprd_dsi_rd_pkt(&dsi->ctx, msg->channel, msg->type,
+ msb, lsb, msg->rx_buf, msg->rx_len);
+ }
+
+ if (msg->tx_buf && msg->tx_len)
+ return sprd_dsi_wr_pkt(&dsi->ctx, msg->channel, msg->type,
+ tx_buf, msg->tx_len);
+
+ return 0;
+}
+
+static const struct mipi_dsi_host_ops sprd_dsi_host_ops = {
+ .attach = sprd_dsi_host_attach,
+ .detach = sprd_dsi_host_detach,
+ .transfer = sprd_dsi_host_transfer,
+};
+
+static const struct of_device_id dsi_match_table[] = {
+ { .compatible = "sprd,sharkl3-dsi-host" },
+ { /* sentinel */ },
+};
+
+static int sprd_dsi_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct sprd_dsi *dsi;
+
+ dsi = devm_kzalloc(dev, sizeof(*dsi), GFP_KERNEL);
+ if (!dsi)
+ return -ENOMEM;
+
+ dev_set_drvdata(dev, dsi);
+
+ dsi->host.ops = &sprd_dsi_host_ops;
+ dsi->host.dev = dev;
+
+ return mipi_dsi_host_register(&dsi->host);
+}
+
+static int sprd_dsi_remove(struct platform_device *pdev)
+{
+ struct sprd_dsi *dsi = dev_get_drvdata(&pdev->dev);
+
+ mipi_dsi_host_unregister(&dsi->host);
+
+ return 0;
+}
+
+struct platform_driver sprd_dsi_driver = {
+ .probe = sprd_dsi_probe,
+ .remove = sprd_dsi_remove,
+ .driver = {
+ .name = "sprd-dsi-drv",
+ .of_match_table = dsi_match_table,
+ },
+};
+
+MODULE_AUTHOR("Leon He <leon.he@unisoc.com>");
+MODULE_AUTHOR("Kevin Tang <kevin.tang@unisoc.com>");
+MODULE_DESCRIPTION("Unisoc MIPI DSI HOST Controller Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/sprd/sprd_dsi.h b/drivers/gpu/drm/sprd/sprd_dsi.h
new file mode 100644
index 000000000000..d858ebb11115
--- /dev/null
+++ b/drivers/gpu/drm/sprd/sprd_dsi.h
@@ -0,0 +1,126 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2020 Unisoc Inc.
+ */
+
+#ifndef __SPRD_DSI_H__
+#define __SPRD_DSI_H__
+
+#include <linux/of.h>
+#include <linux/device.h>
+#include <linux/regmap.h>
+#include <video/videomode.h>
+
+#include <drm/drm_bridge.h>
+#include <drm/drm_connector.h>
+#include <drm/drm_encoder.h>
+#include <drm/drm_mipi_dsi.h>
+#include <drm/drm_print.h>
+#include <drm/drm_panel.h>
+
+#define encoder_to_dsi(encoder) \
+ container_of(encoder, struct sprd_dsi, encoder)
+
+enum dsi_work_mode {
+ DSI_MODE_CMD = 0,
+ DSI_MODE_VIDEO
+};
+
+enum video_burst_mode {
+ VIDEO_NON_BURST_WITH_SYNC_PULSES = 0,
+ VIDEO_NON_BURST_WITH_SYNC_EVENTS,
+ VIDEO_BURST_WITH_SYNC_PULSES
+};
+
+enum dsi_color_coding {
+ COLOR_CODE_16BIT_CONFIG1 = 0,
+ COLOR_CODE_16BIT_CONFIG2,
+ COLOR_CODE_16BIT_CONFIG3,
+ COLOR_CODE_18BIT_CONFIG1,
+ COLOR_CODE_18BIT_CONFIG2,
+ COLOR_CODE_24BIT,
+ COLOR_CODE_20BIT_YCC422_LOOSELY,
+ COLOR_CODE_24BIT_YCC422,
+ COLOR_CODE_16BIT_YCC422,
+ COLOR_CODE_30BIT,
+ COLOR_CODE_36BIT,
+ COLOR_CODE_12BIT_YCC420,
+ COLOR_CODE_COMPRESSTION,
+ COLOR_CODE_MAX
+};
+
+enum pll_timing {
+ NONE,
+ REQUEST_TIME,
+ PREPARE_TIME,
+ SETTLE_TIME,
+ ZERO_TIME,
+ TRAIL_TIME,
+ EXIT_TIME,
+ CLKPOST_TIME,
+ TA_GET,
+ TA_GO,
+ TA_SURE,
+ TA_WAIT,
+};
+
+struct dphy_pll {
+ u8 refin; /* Pre-divider control signal */
+ u8 cp_s; /* 00: SDM_EN=1, 10: SDM_EN=0 */
+ u8 fdk_s; /* PLL mode control: integer or fraction */
+ u8 sdm_en;
+ u8 div;
+ u8 int_n; /* integer N PLL */
+ u32 ref_clk; /* dphy reference clock, unit: MHz */
+ u32 freq; /* panel config, unit: KHz */
+ u32 fvco;
+ u32 potential_fvco;
+ u32 nint; /* sigma delta modulator NINT control */
+ u32 kint; /* sigma delta modulator KINT control */
+ u8 lpf_sel; /* low pass filter control */
+ u8 out_sel; /* post divider control */
+ u8 vco_band; /* vco range */
+ u8 det_delay;
+};
+
+struct dsi_context {
+ void __iomem *base;
+ struct regmap *regmap;
+ struct dphy_pll pll;
+ struct videomode vm;
+ bool enabled;
+
+ u8 work_mode;
+ u8 burst_mode;
+ u32 int0_mask;
+ u32 int1_mask;
+
+ /* maximum time (ns) for data lanes from HS to LP */
+ u16 data_hs2lp;
+ /* maximum time (ns) for data lanes from LP to HS */
+ u16 data_lp2hs;
+ /* maximum time (ns) for clk lanes from HS to LP */
+ u16 clk_hs2lp;
+ /* maximum time (ns) for clk lanes from LP to HS */
+ u16 clk_lp2hs;
+ /* maximum time (ns) for BTA operation - REQUIRED */
+ u16 max_rd_time;
+ /* enable receiving frame ack packets - for video mode */
+ bool frame_ack_en;
+ /* enable receiving tear effect ack packets - for cmd mode */
+ bool te_ack_en;
+};
+
+struct sprd_dsi {
+ struct drm_device *drm;
+ struct mipi_dsi_host host;
+ struct mipi_dsi_device *slave;
+ struct drm_encoder encoder;
+ struct drm_bridge *panel_bridge;
+ struct dsi_context ctx;
+};
+
+int dphy_pll_config(struct dsi_context *ctx);
+void dphy_timing_config(struct dsi_context *ctx);
+
+#endif /* __SPRD_DSI_H__ */
diff --git a/drivers/gpu/drm/sti/Kconfig b/drivers/gpu/drm/sti/Kconfig
index d0cfdd36b38f..246a94afbe74 100644
--- a/drivers/gpu/drm/sti/Kconfig
+++ b/drivers/gpu/drm/sti/Kconfig
@@ -5,7 +5,6 @@ config DRM_STI
select RESET_CONTROLLER
select DRM_KMS_HELPER
select DRM_GEM_CMA_HELPER
- select DRM_KMS_CMA_HELPER
select DRM_PANEL
select FW_LOADER
select SND_SOC_HDMI_CODEC if SND_SOC
diff --git a/drivers/gpu/drm/stm/Kconfig b/drivers/gpu/drm/stm/Kconfig
index b7d66915a2be..e0379488cd0d 100644
--- a/drivers/gpu/drm/stm/Kconfig
+++ b/drivers/gpu/drm/stm/Kconfig
@@ -4,7 +4,6 @@ config DRM_STM
depends on DRM && (ARCH_STM32 || ARCH_MULTIPLATFORM)
select DRM_KMS_HELPER
select DRM_GEM_CMA_HELPER
- select DRM_KMS_CMA_HELPER
select DRM_PANEL_BRIDGE
select VIDEOMODE_HELPERS
select FB_PROVIDE_GET_FB_UNMAPPED_AREA if FB
diff --git a/drivers/gpu/drm/stm/drv.c b/drivers/gpu/drm/stm/drv.c
index 222869b232ae..9f441aadf2d5 100644
--- a/drivers/gpu/drm/stm/drv.c
+++ b/drivers/gpu/drm/stm/drv.c
@@ -14,6 +14,7 @@
#include <linux/of_platform.h>
#include <linux/pm_runtime.h>
+#include <drm/drm_aperture.h>
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_drv.h>
@@ -183,6 +184,10 @@ static int stm_drm_platform_probe(struct platform_device *pdev)
DRM_DEBUG("%s\n", __func__);
+ ret = drm_aperture_remove_framebuffers(false, &drv_driver);
+ if (ret)
+ return ret;
+
dma_set_coherent_mask(dev, DMA_BIT_MASK(32));
ddev = drm_dev_alloc(&drv_driver, dev);
diff --git a/drivers/gpu/drm/stm/dw_mipi_dsi-stm.c b/drivers/gpu/drm/stm/dw_mipi_dsi-stm.c
index 32cb41b2202f..89897d5f5c72 100644
--- a/drivers/gpu/drm/stm/dw_mipi_dsi-stm.c
+++ b/drivers/gpu/drm/stm/dw_mipi_dsi-stm.c
@@ -247,14 +247,6 @@ dw_mipi_dsi_get_lane_mbps(void *priv_data, const struct drm_display_mode *mode,
int ret, bpp;
u32 val;
- /* Update lane capabilities according to hw version */
- dsi->lane_min_kbps = LANE_MIN_KBPS;
- dsi->lane_max_kbps = LANE_MAX_KBPS;
- if (dsi->hw_version == HWVER_131) {
- dsi->lane_min_kbps *= 2;
- dsi->lane_max_kbps *= 2;
- }
-
pll_in_khz = (unsigned int)(clk_get_rate(dsi->pllref_clk) / 1000);
/* Compute requested pll out */
@@ -330,6 +322,103 @@ dw_mipi_dsi_phy_get_timing(void *priv_data, unsigned int lane_mbps,
return 0;
}
+#define CLK_TOLERANCE_HZ 50
+
+static enum drm_mode_status
+dw_mipi_dsi_stm_mode_valid(void *priv_data,
+ const struct drm_display_mode *mode,
+ unsigned long mode_flags, u32 lanes, u32 format)
+{
+ struct dw_mipi_dsi_stm *dsi = priv_data;
+ unsigned int idf, ndiv, odf, pll_in_khz, pll_out_khz;
+ int ret, bpp;
+
+ bpp = mipi_dsi_pixel_format_to_bpp(format);
+ if (bpp < 0)
+ return MODE_BAD;
+
+ /* Compute requested pll out */
+ pll_out_khz = mode->clock * bpp / lanes;
+
+ if (pll_out_khz > dsi->lane_max_kbps)
+ return MODE_CLOCK_HIGH;
+
+ if (mode_flags & MIPI_DSI_MODE_VIDEO_BURST) {
+ /* Add 20% to pll out to be higher than pixel bw */
+ pll_out_khz = (pll_out_khz * 12) / 10;
+ } else {
+ if (pll_out_khz < dsi->lane_min_kbps)
+ return MODE_CLOCK_LOW;
+ }
+
+ /* Compute best pll parameters */
+ idf = 0;
+ ndiv = 0;
+ odf = 0;
+ pll_in_khz = clk_get_rate(dsi->pllref_clk) / 1000;
+ ret = dsi_pll_get_params(dsi, pll_in_khz, pll_out_khz, &idf, &ndiv, &odf);
+ if (ret) {
+ DRM_WARN("Warning dsi_pll_get_params(): bad params\n");
+ return MODE_ERROR;
+ }
+
+ if (!(mode_flags & MIPI_DSI_MODE_VIDEO_BURST)) {
+ unsigned int px_clock_hz, target_px_clock_hz, lane_mbps;
+ int dsi_short_packet_size_px, hfp, hsync, hbp, delay_to_lp;
+ struct dw_mipi_dsi_dphy_timing dphy_timing;
+
+ /* Get the adjusted pll out value */
+ pll_out_khz = dsi_pll_get_clkout_khz(pll_in_khz, idf, ndiv, odf);
+
+ px_clock_hz = DIV_ROUND_CLOSEST_ULL(1000ULL * pll_out_khz * lanes, bpp);
+ target_px_clock_hz = mode->clock * 1000;
+ /*
+ * Filter modes according to the clock value, particularly useful for
+ * hdmi modes that require precise pixel clocks.
+ */
+ if (px_clock_hz < target_px_clock_hz - CLK_TOLERANCE_HZ ||
+ px_clock_hz > target_px_clock_hz + CLK_TOLERANCE_HZ)
+ return MODE_CLOCK_RANGE;
+
+ /* sync packets are codes as DSI short packets (4 bytes) */
+ dsi_short_packet_size_px = DIV_ROUND_UP(4 * BITS_PER_BYTE, bpp);
+
+ hfp = mode->hsync_start - mode->hdisplay;
+ hsync = mode->hsync_end - mode->hsync_start;
+ hbp = mode->htotal - mode->hsync_end;
+
+ /* hsync must be longer than 4 bytes HSS packets */
+ if (hsync < dsi_short_packet_size_px)
+ return MODE_HSYNC_NARROW;
+
+ if (mode_flags & MIPI_DSI_MODE_VIDEO_SYNC_PULSE) {
+ /* HBP must be longer than 4 bytes HSE packets */
+ if (hbp < dsi_short_packet_size_px)
+ return MODE_HSYNC_NARROW;
+ hbp -= dsi_short_packet_size_px;
+ } else {
+ /* With sync events HBP extends in the hsync */
+ hbp += hsync - dsi_short_packet_size_px;
+ }
+
+ lane_mbps = pll_out_khz / 1000;
+ ret = dw_mipi_dsi_phy_get_timing(priv_data, lane_mbps, &dphy_timing);
+ if (ret)
+ return MODE_ERROR;
+ /*
+ * In non-burst mode DSI has to enter in LP during HFP
+ * (horizontal front porch) or HBP (horizontal back porch) to
+ * resync with LTDC pixel clock.
+ */
+ delay_to_lp = DIV_ROUND_UP((dphy_timing.data_hs2lp + dphy_timing.data_lp2hs) *
+ lanes * BITS_PER_BYTE, bpp);
+ if (hfp < delay_to_lp && hbp < delay_to_lp)
+ return MODE_HSYNC;
+ }
+
+ return MODE_OK;
+}
+
static const struct dw_mipi_dsi_phy_ops dw_mipi_dsi_stm_phy_ops = {
.init = dw_mipi_dsi_phy_init,
.power_on = dw_mipi_dsi_phy_power_on,
@@ -340,6 +429,7 @@ static const struct dw_mipi_dsi_phy_ops dw_mipi_dsi_stm_phy_ops = {
static struct dw_mipi_dsi_plat_data dw_mipi_dsi_stm_plat_data = {
.max_data_lanes = 2,
+ .mode_valid = dw_mipi_dsi_stm_mode_valid,
.phy_ops = &dw_mipi_dsi_stm_phy_ops,
};
@@ -417,6 +507,14 @@ static int dw_mipi_dsi_stm_probe(struct platform_device *pdev)
goto err_dsi_probe;
}
+ /* set lane capabilities according to hw version */
+ dsi->lane_min_kbps = LANE_MIN_KBPS;
+ dsi->lane_max_kbps = LANE_MAX_KBPS;
+ if (dsi->hw_version == HWVER_131) {
+ dsi->lane_min_kbps *= 2;
+ dsi->lane_max_kbps *= 2;
+ }
+
dw_mipi_dsi_stm_plat_data.base = dsi->base;
dw_mipi_dsi_stm_plat_data.priv_data = dsi;
diff --git a/drivers/gpu/drm/stm/ltdc.c b/drivers/gpu/drm/stm/ltdc.c
index dbdee954692a..c0619f372630 100644
--- a/drivers/gpu/drm/stm/ltdc.c
+++ b/drivers/gpu/drm/stm/ltdc.c
@@ -46,15 +46,15 @@
#define HWVER_10200 0x010200
#define HWVER_10300 0x010300
#define HWVER_20101 0x020101
+#define HWVER_40100 0x040100
/*
* The address of some registers depends on the HW version: such registers have
- * an extra offset specified with reg_ofs.
+ * an extra offset specified with layer_ofs.
*/
-#define REG_OFS_NONE 0
-#define REG_OFS_4 4 /* Insertion of "Layer Conf. 2" reg */
-#define REG_OFS (ldev->caps.reg_ofs)
-#define LAY_OFS 0x80 /* Register Offset between 2 layers */
+#define LAY_OFS_0 0x80
+#define LAY_OFS_1 0x100
+#define LAY_OFS (ldev->caps.layer_ofs)
/* Global register offsets */
#define LTDC_IDR 0x0000 /* IDentification */
@@ -75,29 +75,34 @@
#define LTDC_LIPCR 0x0040 /* Line Interrupt Position Conf. */
#define LTDC_CPSR 0x0044 /* Current Position Status */
#define LTDC_CDSR 0x0048 /* Current Display Status */
+#define LTDC_FUT 0x0090 /* Fifo underrun Threshold */
/* Layer register offsets */
-#define LTDC_L1LC1R (0x80) /* L1 Layer Configuration 1 */
-#define LTDC_L1LC2R (0x84) /* L1 Layer Configuration 2 */
-#define LTDC_L1CR (0x84 + REG_OFS)/* L1 Control */
-#define LTDC_L1WHPCR (0x88 + REG_OFS)/* L1 Window Hor Position Config */
-#define LTDC_L1WVPCR (0x8C + REG_OFS)/* L1 Window Vert Position Config */
-#define LTDC_L1CKCR (0x90 + REG_OFS)/* L1 Color Keying Configuration */
-#define LTDC_L1PFCR (0x94 + REG_OFS)/* L1 Pixel Format Configuration */
-#define LTDC_L1CACR (0x98 + REG_OFS)/* L1 Constant Alpha Config */
-#define LTDC_L1DCCR (0x9C + REG_OFS)/* L1 Default Color Configuration */
-#define LTDC_L1BFCR (0xA0 + REG_OFS)/* L1 Blend Factors Configuration */
-#define LTDC_L1FBBCR (0xA4 + REG_OFS)/* L1 FrameBuffer Bus Control */
-#define LTDC_L1AFBCR (0xA8 + REG_OFS)/* L1 AuxFB Control */
-#define LTDC_L1CFBAR (0xAC + REG_OFS)/* L1 Color FrameBuffer Address */
-#define LTDC_L1CFBLR (0xB0 + REG_OFS)/* L1 Color FrameBuffer Length */
-#define LTDC_L1CFBLNR (0xB4 + REG_OFS)/* L1 Color FrameBuffer Line Nb */
-#define LTDC_L1AFBAR (0xB8 + REG_OFS)/* L1 AuxFB Address */
-#define LTDC_L1AFBLR (0xBC + REG_OFS)/* L1 AuxFB Length */
-#define LTDC_L1AFBLNR (0xC0 + REG_OFS)/* L1 AuxFB Line Number */
-#define LTDC_L1CLUTWR (0xC4 + REG_OFS)/* L1 CLUT Write */
-#define LTDC_L1YS1R (0xE0 + REG_OFS)/* L1 YCbCr Scale 1 */
-#define LTDC_L1YS2R (0xE4 + REG_OFS)/* L1 YCbCr Scale 2 */
+#define LTDC_L1C0R (ldev->caps.layer_regs[0]) /* L1 configuration 0 */
+#define LTDC_L1C1R (ldev->caps.layer_regs[1]) /* L1 configuration 1 */
+#define LTDC_L1RCR (ldev->caps.layer_regs[2]) /* L1 reload control */
+#define LTDC_L1CR (ldev->caps.layer_regs[3]) /* L1 control register */
+#define LTDC_L1WHPCR (ldev->caps.layer_regs[4]) /* L1 window horizontal position configuration */
+#define LTDC_L1WVPCR (ldev->caps.layer_regs[5]) /* L1 window vertical position configuration */
+#define LTDC_L1CKCR (ldev->caps.layer_regs[6]) /* L1 color keying configuration */
+#define LTDC_L1PFCR (ldev->caps.layer_regs[7]) /* L1 pixel format configuration */
+#define LTDC_L1CACR (ldev->caps.layer_regs[8]) /* L1 constant alpha configuration */
+#define LTDC_L1DCCR (ldev->caps.layer_regs[9]) /* L1 default color configuration */
+#define LTDC_L1BFCR (ldev->caps.layer_regs[10]) /* L1 blending factors configuration */
+#define LTDC_L1BLCR (ldev->caps.layer_regs[11]) /* L1 burst length configuration */
+#define LTDC_L1PCR (ldev->caps.layer_regs[12]) /* L1 planar configuration */
+#define LTDC_L1CFBAR (ldev->caps.layer_regs[13]) /* L1 color frame buffer address */
+#define LTDC_L1CFBLR (ldev->caps.layer_regs[14]) /* L1 color frame buffer length */
+#define LTDC_L1CFBLNR (ldev->caps.layer_regs[15]) /* L1 color frame buffer line number */
+#define LTDC_L1AFBA0R (ldev->caps.layer_regs[16]) /* L1 auxiliary frame buffer address 0 */
+#define LTDC_L1AFBA1R (ldev->caps.layer_regs[17]) /* L1 auxiliary frame buffer address 1 */
+#define LTDC_L1AFBLR (ldev->caps.layer_regs[18]) /* L1 auxiliary frame buffer length */
+#define LTDC_L1AFBLNR (ldev->caps.layer_regs[19]) /* L1 auxiliary frame buffer line number */
+#define LTDC_L1CLUTWR (ldev->caps.layer_regs[20]) /* L1 CLUT write */
+#define LTDC_L1CYR0R (ldev->caps.layer_regs[21]) /* L1 Conversion YCbCr RGB 0 */
+#define LTDC_L1CYR1R (ldev->caps.layer_regs[22]) /* L1 Conversion YCbCr RGB 1 */
+#define LTDC_L1FPF0R (ldev->caps.layer_regs[23]) /* L1 Flexible Pixel Format 0 */
+#define LTDC_L1FPF1R (ldev->caps.layer_regs[24]) /* L1 Flexible Pixel Format 1 */
/* Bit definitions */
#define SSCR_VSH GENMASK(10, 0) /* Vertical Synchronization Height */
@@ -208,7 +213,10 @@ enum ltdc_pix_fmt {
/* Indexed formats */
PF_L8, /* Indexed 8 bits [8 bits] */
PF_AL44, /* Alpha:4 bits + indexed 4 bits [8 bits] */
- PF_AL88 /* Alpha:8 bits + indexed 8 bits [16 bits] */
+ PF_AL88, /* Alpha:8 bits + indexed 8 bits [16 bits] */
+ PF_ABGR8888, /* ABGR [32 bits] */
+ PF_BGRA8888, /* BGRA [32 bits] */
+ PF_BGR565 /* RGB [16 bits] */
};
/* The index gives the encoding of the pixel format for an HW version */
@@ -234,6 +242,102 @@ static const enum ltdc_pix_fmt ltdc_pix_fmt_a1[NB_PF] = {
PF_ARGB4444 /* 0x07 */
};
+static const enum ltdc_pix_fmt ltdc_pix_fmt_a2[NB_PF] = {
+ PF_ARGB8888, /* 0x00 */
+ PF_ABGR8888, /* 0x01 */
+ PF_RGBA8888, /* 0x02 */
+ PF_BGRA8888, /* 0x03 */
+ PF_RGB565, /* 0x04 */
+ PF_BGR565, /* 0x05 */
+ PF_RGB888, /* 0x06 */
+ PF_ARGB1555 /* 0x07 */
+};
+
+/* Layer register offsets */
+static const u32 ltdc_layer_regs_a0[] = {
+ 0x80, /* L1 configuration 0 */
+ 0x00, /* not available */
+ 0x00, /* not available */
+ 0x84, /* L1 control register */
+ 0x88, /* L1 window horizontal position configuration */
+ 0x8c, /* L1 window vertical position configuration */
+ 0x90, /* L1 color keying configuration */
+ 0x94, /* L1 pixel format configuration */
+ 0x98, /* L1 constant alpha configuration */
+ 0x9c, /* L1 default color configuration */
+ 0xa0, /* L1 blending factors configuration */
+ 0x00, /* not available */
+ 0x00, /* not available */
+ 0xac, /* L1 color frame buffer address */
+ 0xb0, /* L1 color frame buffer length */
+ 0xb4, /* L1 color frame buffer line number */
+ 0x00, /* not available */
+ 0x00, /* not available */
+ 0x00, /* not available */
+ 0x00, /* not available */
+ 0xc4, /* L1 CLUT write */
+ 0x00, /* not available */
+ 0x00, /* not available */
+ 0x00, /* not available */
+ 0x00 /* not available */
+};
+
+static const u32 ltdc_layer_regs_a1[] = {
+ 0x80, /* L1 configuration 0 */
+ 0x84, /* L1 configuration 1 */
+ 0x00, /* L1 reload control */
+ 0x88, /* L1 control register */
+ 0x8c, /* L1 window horizontal position configuration */
+ 0x90, /* L1 window vertical position configuration */
+ 0x94, /* L1 color keying configuration */
+ 0x98, /* L1 pixel format configuration */
+ 0x9c, /* L1 constant alpha configuration */
+ 0xa0, /* L1 default color configuration */
+ 0xa4, /* L1 blending factors configuration */
+ 0xa8, /* L1 burst length configuration */
+ 0x00, /* not available */
+ 0xac, /* L1 color frame buffer address */
+ 0xb0, /* L1 color frame buffer length */
+ 0xb4, /* L1 color frame buffer line number */
+ 0xb8, /* L1 auxiliary frame buffer address 0 */
+ 0xbc, /* L1 auxiliary frame buffer address 1 */
+ 0xc0, /* L1 auxiliary frame buffer length */
+ 0xc4, /* L1 auxiliary frame buffer line number */
+ 0xc8, /* L1 CLUT write */
+ 0x00, /* not available */
+ 0x00, /* not available */
+ 0x00, /* not available */
+ 0x00 /* not available */
+};
+
+static const u32 ltdc_layer_regs_a2[] = {
+ 0x100, /* L1 configuration 0 */
+ 0x104, /* L1 configuration 1 */
+ 0x108, /* L1 reload control */
+ 0x10c, /* L1 control register */
+ 0x110, /* L1 window horizontal position configuration */
+ 0x114, /* L1 window vertical position configuration */
+ 0x118, /* L1 color keying configuration */
+ 0x11c, /* L1 pixel format configuration */
+ 0x120, /* L1 constant alpha configuration */
+ 0x124, /* L1 default color configuration */
+ 0x128, /* L1 blending factors configuration */
+ 0x12c, /* L1 burst length configuration */
+ 0x130, /* L1 planar configuration */
+ 0x134, /* L1 color frame buffer address */
+ 0x138, /* L1 color frame buffer length */
+ 0x13c, /* L1 color frame buffer line number */
+ 0x140, /* L1 auxiliary frame buffer address 0 */
+ 0x144, /* L1 auxiliary frame buffer address 1 */
+ 0x148, /* L1 auxiliary frame buffer length */
+ 0x14c, /* L1 auxiliary frame buffer line number */
+ 0x150, /* L1 CLUT write */
+ 0x16c, /* L1 Conversion YCbCr RGB 0 */
+ 0x170, /* L1 Conversion YCbCr RGB 1 */
+ 0x174, /* L1 Flexible Pixel Format 0 */
+ 0x178 /* L1 Flexible Pixel Format 1 */
+};
+
static const u64 ltdc_format_modifiers[] = {
DRM_FORMAT_MOD_LINEAR,
DRM_FORMAT_MOD_INVALID
@@ -1158,7 +1262,8 @@ static int ltdc_get_caps(struct drm_device *ddev)
switch (ldev->caps.hw_version) {
case HWVER_10200:
case HWVER_10300:
- ldev->caps.reg_ofs = REG_OFS_NONE;
+ ldev->caps.layer_ofs = LAY_OFS_0;
+ ldev->caps.layer_regs = ltdc_layer_regs_a0;
ldev->caps.pix_fmt_hw = ltdc_pix_fmt_a0;
/*
* Hw older versions support non-alpha color formats derived
@@ -1174,12 +1279,21 @@ static int ltdc_get_caps(struct drm_device *ddev)
ldev->caps.nb_irq = 2;
break;
case HWVER_20101:
- ldev->caps.reg_ofs = REG_OFS_4;
+ ldev->caps.layer_ofs = LAY_OFS_0;
+ ldev->caps.layer_regs = ltdc_layer_regs_a1;
ldev->caps.pix_fmt_hw = ltdc_pix_fmt_a1;
ldev->caps.non_alpha_only_l1 = false;
ldev->caps.pad_max_freq_hz = 150000000;
ldev->caps.nb_irq = 4;
break;
+ case HWVER_40100:
+ ldev->caps.layer_ofs = LAY_OFS_1;
+ ldev->caps.layer_regs = ltdc_layer_regs_a2;
+ ldev->caps.pix_fmt_hw = ltdc_pix_fmt_a2;
+ ldev->caps.non_alpha_only_l1 = false;
+ ldev->caps.pad_max_freq_hz = 90000000;
+ ldev->caps.nb_irq = 2;
+ break;
default:
return -ENODEV;
}
diff --git a/drivers/gpu/drm/stm/ltdc.h b/drivers/gpu/drm/stm/ltdc.h
index f153b908c70e..55a125f89af6 100644
--- a/drivers/gpu/drm/stm/ltdc.h
+++ b/drivers/gpu/drm/stm/ltdc.h
@@ -14,7 +14,8 @@
struct ltdc_caps {
u32 hw_version; /* hardware version */
u32 nb_layers; /* number of supported layers */
- u32 reg_ofs; /* register offset for applicable regs */
+ u32 layer_ofs; /* layer offset for applicable regs */
+ const u32 *layer_regs; /* layer register offset */
u32 bus_width; /* bus width (32 or 64 bits) */
const u32 *pix_fmt_hw; /* supported pixel formats */
bool non_alpha_only_l1; /* non-native no-alpha formats on layer 1 */
diff --git a/drivers/gpu/drm/sun4i/Kconfig b/drivers/gpu/drm/sun4i/Kconfig
index 8c796de53222..befc5a80222d 100644
--- a/drivers/gpu/drm/sun4i/Kconfig
+++ b/drivers/gpu/drm/sun4i/Kconfig
@@ -5,7 +5,6 @@ config DRM_SUN4I
depends on ARCH_SUNXI || COMPILE_TEST
select DRM_GEM_CMA_HELPER
select DRM_KMS_HELPER
- select DRM_KMS_CMA_HELPER
select DRM_PANEL
select REGMAP_MMIO
select VIDEOMODE_HELPERS
diff --git a/drivers/gpu/drm/tegra/Kconfig b/drivers/gpu/drm/tegra/Kconfig
index 1650a448eabd..8cf5aeb9db6c 100644
--- a/drivers/gpu/drm/tegra/Kconfig
+++ b/drivers/gpu/drm/tegra/Kconfig
@@ -12,6 +12,9 @@ config DRM_TEGRA
select INTERCONNECT
select IOMMU_IOVA
select CEC_CORE if CEC_NOTIFIER
+ select SND_SIMPLE_CARD if SND_SOC_TEGRA20_SPDIF
+ select SND_SOC_HDMI_CODEC if SND_SOC_TEGRA20_SPDIF
+ select SND_AUDIO_GRAPH_CARD if SND_SOC_TEGRA20_SPDIF
help
Choose this option if you have an NVIDIA Tegra SoC.
diff --git a/drivers/gpu/drm/tegra/Makefile b/drivers/gpu/drm/tegra/Makefile
index d801909182cf..df6cc986aeba 100644
--- a/drivers/gpu/drm/tegra/Makefile
+++ b/drivers/gpu/drm/tegra/Makefile
@@ -23,7 +23,8 @@ tegra-drm-y := \
gr2d.o \
gr3d.o \
falcon.o \
- vic.o
+ vic.o \
+ nvdec.o
tegra-drm-y += trace.o
diff --git a/drivers/gpu/drm/tegra/dc.c b/drivers/gpu/drm/tegra/dc.c
index a29d64f87563..eb70eee8992a 100644
--- a/drivers/gpu/drm/tegra/dc.c
+++ b/drivers/gpu/drm/tegra/dc.c
@@ -11,9 +11,12 @@
#include <linux/interconnect.h>
#include <linux/module.h>
#include <linux/of_device.h>
+#include <linux/pm_domain.h>
+#include <linux/pm_opp.h>
#include <linux/pm_runtime.h>
#include <linux/reset.h>
+#include <soc/tegra/common.h>
#include <soc/tegra/pmc.h>
#include <drm/drm_atomic.h>
@@ -890,11 +893,9 @@ static int tegra_cursor_atomic_check(struct drm_plane *plane,
return 0;
}
-static void tegra_cursor_atomic_update(struct drm_plane *plane,
- struct drm_atomic_state *state)
+static void __tegra_cursor_atomic_update(struct drm_plane *plane,
+ struct drm_plane_state *new_state)
{
- struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
- plane);
struct tegra_plane_state *tegra_plane_state = to_tegra_plane_state(new_state);
struct tegra_dc *dc = to_tegra_dc(new_state->crtc);
struct tegra_drm *tegra = plane->dev->dev_private;
@@ -990,6 +991,14 @@ static void tegra_cursor_atomic_update(struct drm_plane *plane,
tegra_dc_writel(dc, value, DC_DISP_CURSOR_POSITION);
}
+static void tegra_cursor_atomic_update(struct drm_plane *plane,
+ struct drm_atomic_state *state)
+{
+ struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state, plane);
+
+ __tegra_cursor_atomic_update(plane, new_state);
+}
+
static void tegra_cursor_atomic_disable(struct drm_plane *plane,
struct drm_atomic_state *state)
{
@@ -1009,12 +1018,78 @@ static void tegra_cursor_atomic_disable(struct drm_plane *plane,
tegra_dc_writel(dc, value, DC_DISP_DISP_WIN_OPTIONS);
}
+static int tegra_cursor_atomic_async_check(struct drm_plane *plane, struct drm_atomic_state *state)
+{
+ struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state, plane);
+ struct drm_crtc_state *crtc_state;
+ int min_scale, max_scale;
+ int err;
+
+ crtc_state = drm_atomic_get_existing_crtc_state(state, new_state->crtc);
+ if (WARN_ON(!crtc_state))
+ return -EINVAL;
+
+ if (!crtc_state->active)
+ return -EINVAL;
+
+ if (plane->state->crtc != new_state->crtc ||
+ plane->state->src_w != new_state->src_w ||
+ plane->state->src_h != new_state->src_h ||
+ plane->state->crtc_w != new_state->crtc_w ||
+ plane->state->crtc_h != new_state->crtc_h ||
+ plane->state->fb != new_state->fb ||
+ plane->state->fb == NULL)
+ return -EINVAL;
+
+ min_scale = (1 << 16) / 8;
+ max_scale = (8 << 16) / 1;
+
+ err = drm_atomic_helper_check_plane_state(new_state, crtc_state, min_scale, max_scale,
+ true, true);
+ if (err < 0)
+ return err;
+
+ if (new_state->visible != plane->state->visible)
+ return -EINVAL;
+
+ return 0;
+}
+
+static void tegra_cursor_atomic_async_update(struct drm_plane *plane,
+ struct drm_atomic_state *state)
+{
+ struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state, plane);
+ struct tegra_dc *dc = to_tegra_dc(new_state->crtc);
+
+ plane->state->src_x = new_state->src_x;
+ plane->state->src_y = new_state->src_y;
+ plane->state->crtc_x = new_state->crtc_x;
+ plane->state->crtc_y = new_state->crtc_y;
+
+ if (new_state->visible) {
+ struct tegra_plane *p = to_tegra_plane(plane);
+ u32 value;
+
+ __tegra_cursor_atomic_update(plane, new_state);
+
+ value = (WIN_A_ACT_REQ << p->index) << 8 | GENERAL_UPDATE;
+ tegra_dc_writel(dc, value, DC_CMD_STATE_CONTROL);
+ (void)tegra_dc_readl(dc, DC_CMD_STATE_CONTROL);
+
+ value = (WIN_A_ACT_REQ << p->index) | GENERAL_ACT_REQ;
+ tegra_dc_writel(dc, value, DC_CMD_STATE_CONTROL);
+ (void)tegra_dc_readl(dc, DC_CMD_STATE_CONTROL);
+ }
+}
+
static const struct drm_plane_helper_funcs tegra_cursor_plane_helper_funcs = {
.prepare_fb = tegra_plane_prepare_fb,
.cleanup_fb = tegra_plane_cleanup_fb,
.atomic_check = tegra_cursor_atomic_check,
.atomic_update = tegra_cursor_atomic_update,
.atomic_disable = tegra_cursor_atomic_disable,
+ .atomic_async_check = tegra_cursor_atomic_async_check,
+ .atomic_async_update = tegra_cursor_atomic_async_update,
};
static const uint64_t linear_modifiers[] = {
@@ -1267,9 +1342,9 @@ static struct drm_plane *tegra_dc_add_planes(struct drm_device *drm,
err = PTR_ERR(planes[i]);
while (i--)
- tegra_plane_funcs.destroy(planes[i]);
+ planes[i]->funcs->destroy(planes[i]);
- tegra_plane_funcs.destroy(primary);
+ primary->funcs->destroy(primary);
return ERR_PTR(err);
}
}
@@ -1762,10 +1837,55 @@ int tegra_dc_state_setup_clock(struct tegra_dc *dc,
return 0;
}
-static void tegra_dc_commit_state(struct tegra_dc *dc,
- struct tegra_dc_state *state)
+static void tegra_dc_update_voltage_state(struct tegra_dc *dc,
+ struct tegra_dc_state *state)
+{
+ unsigned long rate, pstate;
+ struct dev_pm_opp *opp;
+ int err;
+
+ if (!dc->has_opp_table)
+ return;
+
+ /* calculate actual pixel clock rate which depends on internal divider */
+ rate = DIV_ROUND_UP(clk_get_rate(dc->clk) * 2, state->div + 2);
+
+ /* find suitable OPP for the rate */
+ opp = dev_pm_opp_find_freq_ceil(dc->dev, &rate);
+
+ /*
+ * Very high resolution modes may results in a clock rate that is
+ * above the characterized maximum. In this case it's okay to fall
+ * back to the characterized maximum.
+ */
+ if (opp == ERR_PTR(-ERANGE))
+ opp = dev_pm_opp_find_freq_floor(dc->dev, &rate);
+
+ if (IS_ERR(opp)) {
+ dev_err(dc->dev, "failed to find OPP for %luHz: %pe\n",
+ rate, opp);
+ return;
+ }
+
+ pstate = dev_pm_opp_get_required_pstate(opp, 0);
+ dev_pm_opp_put(opp);
+
+ /*
+ * The minimum core voltage depends on the pixel clock rate (which
+ * depends on internal clock divider of the CRTC) and not on the
+ * rate of the display controller clock. This is why we're not using
+ * dev_pm_opp_set_rate() API and instead controlling the power domain
+ * directly.
+ */
+ err = dev_pm_genpd_set_performance_state(dc->dev, pstate);
+ if (err)
+ dev_err(dc->dev, "failed to set power domain state to %lu: %d\n",
+ pstate, err);
+}
+
+static void tegra_dc_set_clock_rate(struct tegra_dc *dc,
+ struct tegra_dc_state *state)
{
- u32 value;
int err;
err = clk_set_parent(dc->clk, state->clk);
@@ -1797,10 +1917,7 @@ static void tegra_dc_commit_state(struct tegra_dc *dc,
state->div);
DRM_DEBUG_KMS("pclk: %lu\n", state->pclk);
- if (!dc->soc->has_nvdisplay) {
- value = SHIFT_CLK_DIVIDER(state->div) | PIXEL_CLK_DIVIDER_PCD1;
- tegra_dc_writel(dc, value, DC_DISP_DISP_CLOCK_CONTROL);
- }
+ tegra_dc_update_voltage_state(dc, state);
}
static void tegra_dc_stop(struct tegra_dc *dc)
@@ -1991,6 +2108,13 @@ static void tegra_crtc_atomic_disable(struct drm_crtc *crtc,
err = host1x_client_suspend(&dc->client);
if (err < 0)
dev_err(dc->dev, "failed to suspend: %d\n", err);
+
+ if (dc->has_opp_table) {
+ err = dev_pm_genpd_set_performance_state(dc->dev, 0);
+ if (err)
+ dev_err(dc->dev,
+ "failed to clear power domain state: %d\n", err);
+ }
}
static void tegra_crtc_atomic_enable(struct drm_crtc *crtc,
@@ -2002,6 +2126,9 @@ static void tegra_crtc_atomic_enable(struct drm_crtc *crtc,
u32 value;
int err;
+ /* apply PLL changes */
+ tegra_dc_set_clock_rate(dc, crtc_state);
+
err = host1x_client_resume(&dc->client);
if (err < 0) {
dev_err(dc->dev, "failed to resume: %d\n", err);
@@ -2076,8 +2203,11 @@ static void tegra_crtc_atomic_enable(struct drm_crtc *crtc,
else
tegra_dc_writel(dc, 0, DC_DISP_BORDER_COLOR);
- /* apply PLL and pixel clock changes */
- tegra_dc_commit_state(dc, crtc_state);
+ /* apply pixel clock changes */
+ if (!dc->soc->has_nvdisplay) {
+ value = SHIFT_CLK_DIVIDER(crtc_state->div) | PIXEL_CLK_DIVIDER_PCD1;
+ tegra_dc_writel(dc, value, DC_DISP_DISP_CLOCK_CONTROL);
+ }
/* program display mode */
tegra_dc_set_timings(dc, mode);
@@ -2107,6 +2237,12 @@ static void tegra_crtc_atomic_enable(struct drm_crtc *crtc,
tegra_dc_writel(dc, value, DC_COM_RG_UNDERFLOW);
}
+ if (dc->rgb) {
+ /* XXX: parameterize? */
+ value = SC0_H_QUALIFIER_NONE | SC1_H_QUALIFIER_NONE;
+ tegra_dc_writel(dc, value, DC_DISP_SHIFT_CLOCK_OPTIONS);
+ }
+
tegra_dc_commit(dc);
drm_crtc_vblank_on(crtc);
@@ -2685,6 +2821,7 @@ static const struct tegra_dc_soc_info tegra20_dc_soc_info = {
.has_win_b_vfilter_mem_client = true,
.has_win_c_without_vert_filter = true,
.plane_tiled_memory_bandwidth_x2 = false,
+ .has_pll_d2_out0 = false,
};
static const struct tegra_dc_soc_info tegra30_dc_soc_info = {
@@ -2707,6 +2844,7 @@ static const struct tegra_dc_soc_info tegra30_dc_soc_info = {
.has_win_b_vfilter_mem_client = true,
.has_win_c_without_vert_filter = false,
.plane_tiled_memory_bandwidth_x2 = true,
+ .has_pll_d2_out0 = true,
};
static const struct tegra_dc_soc_info tegra114_dc_soc_info = {
@@ -2729,6 +2867,7 @@ static const struct tegra_dc_soc_info tegra114_dc_soc_info = {
.has_win_b_vfilter_mem_client = false,
.has_win_c_without_vert_filter = false,
.plane_tiled_memory_bandwidth_x2 = true,
+ .has_pll_d2_out0 = true,
};
static const struct tegra_dc_soc_info tegra124_dc_soc_info = {
@@ -2751,6 +2890,7 @@ static const struct tegra_dc_soc_info tegra124_dc_soc_info = {
.has_win_b_vfilter_mem_client = false,
.has_win_c_without_vert_filter = false,
.plane_tiled_memory_bandwidth_x2 = false,
+ .has_pll_d2_out0 = true,
};
static const struct tegra_dc_soc_info tegra210_dc_soc_info = {
@@ -2773,6 +2913,7 @@ static const struct tegra_dc_soc_info tegra210_dc_soc_info = {
.has_win_b_vfilter_mem_client = false,
.has_win_c_without_vert_filter = false,
.plane_tiled_memory_bandwidth_x2 = false,
+ .has_pll_d2_out0 = true,
};
static const struct tegra_windowgroup_soc tegra186_dc_wgrps[] = {
@@ -2823,6 +2964,7 @@ static const struct tegra_dc_soc_info tegra186_dc_soc_info = {
.wgrps = tegra186_dc_wgrps,
.num_wgrps = ARRAY_SIZE(tegra186_dc_wgrps),
.plane_tiled_memory_bandwidth_x2 = false,
+ .has_pll_d2_out0 = false,
};
static const struct tegra_windowgroup_soc tegra194_dc_wgrps[] = {
@@ -2873,6 +3015,7 @@ static const struct tegra_dc_soc_info tegra194_dc_soc_info = {
.wgrps = tegra194_dc_wgrps,
.num_wgrps = ARRAY_SIZE(tegra194_dc_wgrps),
.plane_tiled_memory_bandwidth_x2 = false,
+ .has_pll_d2_out0 = false,
};
static const struct of_device_id tegra_dc_of_match[] = {
@@ -2973,6 +3116,23 @@ static int tegra_dc_couple(struct tegra_dc *dc)
return 0;
}
+static int tegra_dc_init_opp_table(struct tegra_dc *dc)
+{
+ struct tegra_core_opp_params opp_params = {};
+ int err;
+
+ err = devm_tegra_core_dev_init_opp_table(dc->dev, &opp_params);
+ if (err && err != -ENODEV)
+ return err;
+
+ if (err)
+ dc->has_opp_table = false;
+ else
+ dc->has_opp_table = true;
+
+ return 0;
+}
+
static int tegra_dc_probe(struct platform_device *pdev)
{
u64 dma_mask = dma_get_mask(pdev->dev.parent);
@@ -3038,6 +3198,10 @@ static int tegra_dc_probe(struct platform_device *pdev)
tegra_powergate_power_off(dc->powergate);
}
+ err = tegra_dc_init_opp_table(dc);
+ if (err < 0)
+ return err;
+
dc->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(dc->regs))
return PTR_ERR(dc->regs);
diff --git a/drivers/gpu/drm/tegra/dc.h b/drivers/gpu/drm/tegra/dc.h
index 40378308d527..3f91a10ea6c7 100644
--- a/drivers/gpu/drm/tegra/dc.h
+++ b/drivers/gpu/drm/tegra/dc.h
@@ -76,6 +76,7 @@ struct tegra_dc_soc_info {
bool has_win_b_vfilter_mem_client;
bool has_win_c_without_vert_filter;
bool plane_tiled_memory_bandwidth_x2;
+ bool has_pll_d2_out0;
};
struct tegra_dc {
@@ -100,6 +101,8 @@ struct tegra_dc {
struct drm_info_list *debugfs_files;
const struct tegra_dc_soc_info *soc;
+
+ bool has_opp_table;
};
static inline struct tegra_dc *
diff --git a/drivers/gpu/drm/tegra/drm.c b/drivers/gpu/drm/tegra/drm.c
index 8d37d6b00562..e9de91a4e7e8 100644
--- a/drivers/gpu/drm/tegra/drm.c
+++ b/drivers/gpu/drm/tegra/drm.c
@@ -10,6 +10,7 @@
#include <linux/iommu.h>
#include <linux/module.h>
#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
#include <drm/drm_aperture.h>
#include <drm/drm_atomic.h>
@@ -21,6 +22,10 @@
#include <drm/drm_prime.h>
#include <drm/drm_vblank.h>
+#if IS_ENABLED(CONFIG_ARM_DMA_USE_IOMMU)
+#include <asm/dma-iommu.h>
+#endif
+
#include "dc.h"
#include "drm.h"
#include "gem.h"
@@ -116,6 +121,7 @@ static int tegra_drm_open(struct drm_device *drm, struct drm_file *filp)
static void tegra_drm_context_free(struct tegra_drm_context *context)
{
context->client->ops->close_channel(context);
+ pm_runtime_put(context->client->base.dev);
kfree(context);
}
@@ -427,13 +433,20 @@ static int tegra_client_open(struct tegra_drm_file *fpriv,
{
int err;
+ err = pm_runtime_resume_and_get(client->base.dev);
+ if (err)
+ return err;
+
err = client->ops->open_channel(client, context);
- if (err < 0)
+ if (err < 0) {
+ pm_runtime_put(client->base.dev);
return err;
+ }
err = idr_alloc(&fpriv->legacy_contexts, context, 1, 0, GFP_KERNEL);
if (err < 0) {
client->ops->close_channel(context);
+ pm_runtime_put(client->base.dev);
return err;
}
@@ -936,6 +949,17 @@ int host1x_client_iommu_attach(struct host1x_client *client)
struct iommu_group *group = NULL;
int err;
+#if IS_ENABLED(CONFIG_ARM_DMA_USE_IOMMU)
+ if (client->dev->archdata.mapping) {
+ struct dma_iommu_mapping *mapping =
+ to_dma_iommu_mapping(client->dev);
+ arm_iommu_detach_device(client->dev);
+ arm_iommu_release_mapping(mapping);
+
+ domain = iommu_get_domain_for_dev(client->dev);
+ }
+#endif
+
/*
* If the host1x client is already attached to an IOMMU domain that is
* not the shared IOMMU domain, don't try to attach it to a different
@@ -1344,15 +1368,18 @@ static const struct of_device_id host1x_drm_subdevs[] = {
{ .compatible = "nvidia,tegra210-sor", },
{ .compatible = "nvidia,tegra210-sor1", },
{ .compatible = "nvidia,tegra210-vic", },
+ { .compatible = "nvidia,tegra210-nvdec", },
{ .compatible = "nvidia,tegra186-display", },
{ .compatible = "nvidia,tegra186-dc", },
{ .compatible = "nvidia,tegra186-sor", },
{ .compatible = "nvidia,tegra186-sor1", },
{ .compatible = "nvidia,tegra186-vic", },
+ { .compatible = "nvidia,tegra186-nvdec", },
{ .compatible = "nvidia,tegra194-display", },
{ .compatible = "nvidia,tegra194-dc", },
{ .compatible = "nvidia,tegra194-sor", },
{ .compatible = "nvidia,tegra194-vic", },
+ { .compatible = "nvidia,tegra194-nvdec", },
{ /* sentinel */ }
};
@@ -1376,6 +1403,7 @@ static struct platform_driver * const drivers[] = {
&tegra_gr2d_driver,
&tegra_gr3d_driver,
&tegra_vic_driver,
+ &tegra_nvdec_driver,
};
static int __init host1x_drm_init(void)
diff --git a/drivers/gpu/drm/tegra/drm.h b/drivers/gpu/drm/tegra/drm.h
index 8b28327c931c..fc0a19554eac 100644
--- a/drivers/gpu/drm/tegra/drm.h
+++ b/drivers/gpu/drm/tegra/drm.h
@@ -202,5 +202,6 @@ extern struct platform_driver tegra_sor_driver;
extern struct platform_driver tegra_gr2d_driver;
extern struct platform_driver tegra_gr3d_driver;
extern struct platform_driver tegra_vic_driver;
+extern struct platform_driver tegra_nvdec_driver;
#endif /* HOST1X_DRM_H */
diff --git a/drivers/gpu/drm/tegra/gem.c b/drivers/gpu/drm/tegra/gem.c
index d38fd7e12b57..fce0e52973c2 100644
--- a/drivers/gpu/drm/tegra/gem.c
+++ b/drivers/gpu/drm/tegra/gem.c
@@ -23,86 +23,97 @@
MODULE_IMPORT_NS(DMA_BUF);
-static void tegra_bo_put(struct host1x_bo *bo)
+static unsigned int sg_dma_count_chunks(struct scatterlist *sgl, unsigned int nents)
{
- struct tegra_bo *obj = host1x_to_tegra_bo(bo);
+ dma_addr_t next = ~(dma_addr_t)0;
+ unsigned int count = 0, i;
+ struct scatterlist *s;
- drm_gem_object_put(&obj->gem);
-}
+ for_each_sg(sgl, s, nents, i) {
+ /* sg_dma_address(s) is only valid for entries that have sg_dma_len(s) != 0. */
+ if (!sg_dma_len(s))
+ continue;
-/* XXX move this into lib/scatterlist.c? */
-static int sg_alloc_table_from_sg(struct sg_table *sgt, struct scatterlist *sg,
- unsigned int nents, gfp_t gfp_mask)
-{
- struct scatterlist *dst;
- unsigned int i;
- int err;
+ if (sg_dma_address(s) != next) {
+ next = sg_dma_address(s) + sg_dma_len(s);
+ count++;
+ }
+ }
- err = sg_alloc_table(sgt, nents, gfp_mask);
- if (err < 0)
- return err;
+ return count;
+}
- dst = sgt->sgl;
+static inline unsigned int sgt_dma_count_chunks(struct sg_table *sgt)
+{
+ return sg_dma_count_chunks(sgt->sgl, sgt->nents);
+}
- for (i = 0; i < nents; i++) {
- sg_set_page(dst, sg_page(sg), sg->length, 0);
- dst = sg_next(dst);
- sg = sg_next(sg);
- }
+static void tegra_bo_put(struct host1x_bo *bo)
+{
+ struct tegra_bo *obj = host1x_to_tegra_bo(bo);
- return 0;
+ drm_gem_object_put(&obj->gem);
}
-static struct sg_table *tegra_bo_pin(struct device *dev, struct host1x_bo *bo,
- dma_addr_t *phys)
+static struct host1x_bo_mapping *tegra_bo_pin(struct device *dev, struct host1x_bo *bo,
+ enum dma_data_direction direction)
{
struct tegra_bo *obj = host1x_to_tegra_bo(bo);
- struct sg_table *sgt;
+ struct drm_gem_object *gem = &obj->gem;
+ struct host1x_bo_mapping *map;
int err;
+ map = kzalloc(sizeof(*map), GFP_KERNEL);
+ if (!map)
+ return ERR_PTR(-ENOMEM);
+
+ kref_init(&map->ref);
+ map->bo = host1x_bo_get(bo);
+ map->direction = direction;
+ map->dev = dev;
+
/*
- * If we've manually mapped the buffer object through the IOMMU, make
- * sure to return the IOVA address of our mapping.
- *
- * Similarly, for buffers that have been allocated by the DMA API the
- * physical address can be used for devices that are not attached to
- * an IOMMU. For these devices, callers must pass a valid pointer via
- * the @phys argument.
- *
- * Imported buffers were also already mapped at import time, so the
- * existing mapping can be reused.
+ * Imported buffers need special treatment to satisfy the semantics of DMA-BUF.
*/
- if (phys) {
- *phys = obj->iova;
- return NULL;
+ if (gem->import_attach) {
+ struct dma_buf *buf = gem->import_attach->dmabuf;
+
+ map->attach = dma_buf_attach(buf, dev);
+ if (IS_ERR(map->attach)) {
+ err = PTR_ERR(map->attach);
+ goto free;
+ }
+
+ map->sgt = dma_buf_map_attachment(map->attach, direction);
+ if (IS_ERR(map->sgt)) {
+ dma_buf_detach(buf, map->attach);
+ err = PTR_ERR(map->sgt);
+ goto free;
+ }
+
+ err = sgt_dma_count_chunks(map->sgt);
+ map->size = gem->size;
+
+ goto out;
}
/*
* If we don't have a mapping for this buffer yet, return an SG table
* so that host1x can do the mapping for us via the DMA API.
*/
- sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
- if (!sgt)
- return ERR_PTR(-ENOMEM);
+ map->sgt = kzalloc(sizeof(*map->sgt), GFP_KERNEL);
+ if (!map->sgt) {
+ err = -ENOMEM;
+ goto free;
+ }
if (obj->pages) {
/*
* If the buffer object was allocated from the explicit IOMMU
* API code paths, construct an SG table from the pages.
*/
- err = sg_alloc_table_from_pages(sgt, obj->pages, obj->num_pages,
- 0, obj->gem.size, GFP_KERNEL);
- if (err < 0)
- goto free;
- } else if (obj->sgt) {
- /*
- * If the buffer object already has an SG table but no pages
- * were allocated for it, it means the buffer was imported and
- * the SG table needs to be copied to avoid overwriting any
- * other potential users of the original SG table.
- */
- err = sg_alloc_table_from_sg(sgt, obj->sgt->sgl,
- obj->sgt->orig_nents, GFP_KERNEL);
+ err = sg_alloc_table_from_pages(map->sgt, obj->pages, obj->num_pages, 0, gem->size,
+ GFP_KERNEL);
if (err < 0)
goto free;
} else {
@@ -111,25 +122,53 @@ static struct sg_table *tegra_bo_pin(struct device *dev, struct host1x_bo *bo,
* not imported, it had to be allocated with the DMA API, so
* the DMA API helper can be used.
*/
- err = dma_get_sgtable(dev, sgt, obj->vaddr, obj->iova,
- obj->gem.size);
+ err = dma_get_sgtable(dev, map->sgt, obj->vaddr, obj->iova, gem->size);
if (err < 0)
goto free;
}
- return sgt;
+ err = dma_map_sgtable(dev, map->sgt, direction, 0);
+ if (err)
+ goto free_sgt;
+
+out:
+ /*
+ * If we've manually mapped the buffer object through the IOMMU, make sure to return the
+ * existing IOVA address of our mapping.
+ */
+ if (!obj->mm) {
+ map->phys = sg_dma_address(map->sgt->sgl);
+ map->chunks = err;
+ } else {
+ map->phys = obj->iova;
+ map->chunks = 1;
+ }
+
+ map->size = gem->size;
+ return map;
+
+free_sgt:
+ sg_free_table(map->sgt);
free:
- kfree(sgt);
+ kfree(map->sgt);
+ kfree(map);
return ERR_PTR(err);
}
-static void tegra_bo_unpin(struct device *dev, struct sg_table *sgt)
+static void tegra_bo_unpin(struct host1x_bo_mapping *map)
{
- if (sgt) {
- sg_free_table(sgt);
- kfree(sgt);
+ if (map->attach) {
+ dma_buf_unmap_attachment(map->attach, map->sgt, map->direction);
+ dma_buf_detach(map->attach->dmabuf, map->attach);
+ } else {
+ dma_unmap_sgtable(map->dev, map->sgt, map->direction, 0);
+ sg_free_table(map->sgt);
+ kfree(map->sgt);
}
+
+ host1x_bo_put(map->bo);
+ kfree(map);
}
static void *tegra_bo_mmap(struct host1x_bo *bo)
@@ -452,8 +491,18 @@ free:
void tegra_bo_free_object(struct drm_gem_object *gem)
{
struct tegra_drm *tegra = gem->dev->dev_private;
+ struct host1x_bo_mapping *mapping, *tmp;
struct tegra_bo *bo = to_tegra_bo(gem);
+ /* remove all mappings of this buffer object from any caches */
+ list_for_each_entry_safe(mapping, tmp, &bo->base.mappings, list) {
+ if (mapping->cache)
+ host1x_bo_unpin(mapping);
+ else
+ dev_err(gem->dev->dev, "mapping %p stale for device %s\n", mapping,
+ dev_name(mapping->dev));
+ }
+
if (tegra->domain)
tegra_bo_iommu_unmap(tegra, bo);
diff --git a/drivers/gpu/drm/tegra/gr2d.c b/drivers/gpu/drm/tegra/gr2d.c
index de288cba3905..e3bb4c99ed39 100644
--- a/drivers/gpu/drm/tegra/gr2d.c
+++ b/drivers/gpu/drm/tegra/gr2d.c
@@ -4,14 +4,25 @@
*/
#include <linux/clk.h>
+#include <linux/delay.h>
#include <linux/iommu.h>
#include <linux/module.h>
#include <linux/of_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/reset.h>
+
+#include <soc/tegra/common.h>
#include "drm.h"
#include "gem.h"
#include "gr2d.h"
+enum {
+ RST_MC,
+ RST_GR2D,
+ RST_GR2D_MAX,
+};
+
struct gr2d_soc {
unsigned int version;
};
@@ -21,6 +32,9 @@ struct gr2d {
struct host1x_channel *channel;
struct clk *clk;
+ struct reset_control_bulk_data resets[RST_GR2D_MAX];
+ unsigned int nresets;
+
const struct gr2d_soc *soc;
DECLARE_BITMAP(addr_regs, GR2D_NUM_REGS);
@@ -56,15 +70,22 @@ static int gr2d_init(struct host1x_client *client)
goto free;
}
+ pm_runtime_enable(client->dev);
+ pm_runtime_use_autosuspend(client->dev);
+ pm_runtime_set_autosuspend_delay(client->dev, 200);
+
err = tegra_drm_register_client(dev->dev_private, drm);
if (err < 0) {
dev_err(client->dev, "failed to register client: %d\n", err);
- goto detach;
+ goto disable_rpm;
}
return 0;
-detach:
+disable_rpm:
+ pm_runtime_dont_use_autosuspend(client->dev);
+ pm_runtime_force_suspend(client->dev);
+
host1x_client_iommu_detach(client);
free:
host1x_syncpt_put(client->syncpts[0]);
@@ -85,10 +106,15 @@ static int gr2d_exit(struct host1x_client *client)
if (err < 0)
return err;
+ pm_runtime_dont_use_autosuspend(client->dev);
+ pm_runtime_force_suspend(client->dev);
+
host1x_client_iommu_detach(client);
host1x_syncpt_put(client->syncpts[0]);
host1x_channel_put(gr2d->channel);
+ gr2d->channel = NULL;
+
return 0;
}
@@ -190,6 +216,27 @@ static const u32 gr2d_addr_regs[] = {
GR2D_VA_BASE_ADDR_SB,
};
+static int gr2d_get_resets(struct device *dev, struct gr2d *gr2d)
+{
+ int err;
+
+ gr2d->resets[RST_MC].id = "mc";
+ gr2d->resets[RST_GR2D].id = "2d";
+ gr2d->nresets = RST_GR2D_MAX;
+
+ err = devm_reset_control_bulk_get_optional_exclusive_released(
+ dev, gr2d->nresets, gr2d->resets);
+ if (err) {
+ dev_err(dev, "failed to get reset: %d\n", err);
+ return err;
+ }
+
+ if (WARN_ON(!gr2d->resets[RST_GR2D].rstc))
+ return -ENOENT;
+
+ return 0;
+}
+
static int gr2d_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@@ -202,6 +249,8 @@ static int gr2d_probe(struct platform_device *pdev)
if (!gr2d)
return -ENOMEM;
+ platform_set_drvdata(pdev, gr2d);
+
gr2d->soc = of_device_get_match_data(dev);
syncpts = devm_kzalloc(dev, sizeof(*syncpts), GFP_KERNEL);
@@ -214,11 +263,9 @@ static int gr2d_probe(struct platform_device *pdev)
return PTR_ERR(gr2d->clk);
}
- err = clk_prepare_enable(gr2d->clk);
- if (err) {
- dev_err(dev, "cannot turn on clock\n");
+ err = gr2d_get_resets(dev, gr2d);
+ if (err)
return err;
- }
INIT_LIST_HEAD(&gr2d->client.base.list);
gr2d->client.base.ops = &gr2d_client_ops;
@@ -231,10 +278,13 @@ static int gr2d_probe(struct platform_device *pdev)
gr2d->client.version = gr2d->soc->version;
gr2d->client.ops = &gr2d_ops;
+ err = devm_tegra_core_dev_init_opp_table_common(dev);
+ if (err)
+ return err;
+
err = host1x_client_register(&gr2d->client.base);
if (err < 0) {
dev_err(dev, "failed to register host1x client: %d\n", err);
- clk_disable_unprepare(gr2d->clk);
return err;
}
@@ -242,8 +292,6 @@ static int gr2d_probe(struct platform_device *pdev)
for (i = 0; i < ARRAY_SIZE(gr2d_addr_regs); i++)
set_bit(gr2d_addr_regs[i], gr2d->addr_regs);
- platform_set_drvdata(pdev, gr2d);
-
return 0;
}
@@ -259,15 +307,100 @@ static int gr2d_remove(struct platform_device *pdev)
return err;
}
+ return 0;
+}
+
+static int __maybe_unused gr2d_runtime_suspend(struct device *dev)
+{
+ struct gr2d *gr2d = dev_get_drvdata(dev);
+ int err;
+
+ host1x_channel_stop(gr2d->channel);
+ reset_control_bulk_release(gr2d->nresets, gr2d->resets);
+
+ /*
+ * GR2D module shouldn't be reset while hardware is idling, otherwise
+ * host1x's cmdproc will stuck on trying to access any G2 register
+ * after reset. GR2D module could be either hot-reset or reset after
+ * power-gating of the HEG partition. Hence we will put in reset only
+ * the memory client part of the module, the HEG GENPD will take care
+ * of resetting GR2D module across power-gating.
+ *
+ * On Tegra20 there is no HEG partition, but it's okay to have
+ * undetermined h/w state since userspace is expected to reprogram
+ * the state on each job submission anyways.
+ */
+ err = reset_control_acquire(gr2d->resets[RST_MC].rstc);
+ if (err) {
+ dev_err(dev, "failed to acquire MC reset: %d\n", err);
+ goto acquire_reset;
+ }
+
+ err = reset_control_assert(gr2d->resets[RST_MC].rstc);
+ reset_control_release(gr2d->resets[RST_MC].rstc);
+ if (err) {
+ dev_err(dev, "failed to assert MC reset: %d\n", err);
+ goto acquire_reset;
+ }
+
clk_disable_unprepare(gr2d->clk);
return 0;
+
+acquire_reset:
+ reset_control_bulk_acquire(gr2d->nresets, gr2d->resets);
+ reset_control_bulk_deassert(gr2d->nresets, gr2d->resets);
+
+ return err;
}
+static int __maybe_unused gr2d_runtime_resume(struct device *dev)
+{
+ struct gr2d *gr2d = dev_get_drvdata(dev);
+ int err;
+
+ err = reset_control_bulk_acquire(gr2d->nresets, gr2d->resets);
+ if (err) {
+ dev_err(dev, "failed to acquire reset: %d\n", err);
+ return err;
+ }
+
+ err = clk_prepare_enable(gr2d->clk);
+ if (err) {
+ dev_err(dev, "failed to enable clock: %d\n", err);
+ goto release_reset;
+ }
+
+ usleep_range(2000, 4000);
+
+ /* this is a reset array which deasserts both 2D MC and 2D itself */
+ err = reset_control_bulk_deassert(gr2d->nresets, gr2d->resets);
+ if (err) {
+ dev_err(dev, "failed to deassert reset: %d\n", err);
+ goto disable_clk;
+ }
+
+ return 0;
+
+disable_clk:
+ clk_disable_unprepare(gr2d->clk);
+release_reset:
+ reset_control_bulk_release(gr2d->nresets, gr2d->resets);
+
+ return err;
+}
+
+static const struct dev_pm_ops tegra_gr2d_pm = {
+ SET_RUNTIME_PM_OPS(gr2d_runtime_suspend, gr2d_runtime_resume, NULL)
+ SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+ pm_runtime_force_resume)
+};
+
struct platform_driver tegra_gr2d_driver = {
.driver = {
.name = "tegra-gr2d",
.of_match_table = gr2d_match,
+ .pm = &tegra_gr2d_pm,
},
.probe = gr2d_probe,
.remove = gr2d_remove,
diff --git a/drivers/gpu/drm/tegra/gr3d.c b/drivers/gpu/drm/tegra/gr3d.c
index 24442ade0da3..a1fd3113ea96 100644
--- a/drivers/gpu/drm/tegra/gr3d.c
+++ b/drivers/gpu/drm/tegra/gr3d.c
@@ -5,32 +5,47 @@
*/
#include <linux/clk.h>
+#include <linux/delay.h>
#include <linux/host1x.h>
#include <linux/iommu.h>
#include <linux/module.h>
#include <linux/of_device.h>
#include <linux/platform_device.h>
+#include <linux/pm_domain.h>
+#include <linux/pm_opp.h>
+#include <linux/pm_runtime.h>
#include <linux/reset.h>
+#include <soc/tegra/common.h>
#include <soc/tegra/pmc.h>
#include "drm.h"
#include "gem.h"
#include "gr3d.h"
+enum {
+ RST_MC,
+ RST_GR3D,
+ RST_MC2,
+ RST_GR3D2,
+ RST_GR3D_MAX,
+};
+
struct gr3d_soc {
unsigned int version;
+ unsigned int num_clocks;
+ unsigned int num_resets;
};
struct gr3d {
struct tegra_drm_client client;
struct host1x_channel *channel;
- struct clk *clk_secondary;
- struct clk *clk;
- struct reset_control *rst_secondary;
- struct reset_control *rst;
const struct gr3d_soc *soc;
+ struct clk_bulk_data *clocks;
+ unsigned int nclocks;
+ struct reset_control_bulk_data resets[RST_GR3D_MAX];
+ unsigned int nresets;
DECLARE_BITMAP(addr_regs, GR3D_NUM_REGS);
};
@@ -65,15 +80,22 @@ static int gr3d_init(struct host1x_client *client)
goto free;
}
+ pm_runtime_enable(client->dev);
+ pm_runtime_use_autosuspend(client->dev);
+ pm_runtime_set_autosuspend_delay(client->dev, 200);
+
err = tegra_drm_register_client(dev->dev_private, drm);
if (err < 0) {
dev_err(client->dev, "failed to register client: %d\n", err);
- goto detach;
+ goto disable_rpm;
}
return 0;
-detach:
+disable_rpm:
+ pm_runtime_dont_use_autosuspend(client->dev);
+ pm_runtime_force_suspend(client->dev);
+
host1x_client_iommu_detach(client);
free:
host1x_syncpt_put(client->syncpts[0]);
@@ -93,10 +115,15 @@ static int gr3d_exit(struct host1x_client *client)
if (err < 0)
return err;
+ pm_runtime_dont_use_autosuspend(client->dev);
+ pm_runtime_force_suspend(client->dev);
+
host1x_client_iommu_detach(client);
host1x_syncpt_put(client->syncpts[0]);
host1x_channel_put(gr3d->channel);
+ gr3d->channel = NULL;
+
return 0;
}
@@ -155,14 +182,20 @@ static const struct tegra_drm_client_ops gr3d_ops = {
static const struct gr3d_soc tegra20_gr3d_soc = {
.version = 0x20,
+ .num_clocks = 1,
+ .num_resets = 2,
};
static const struct gr3d_soc tegra30_gr3d_soc = {
.version = 0x30,
+ .num_clocks = 2,
+ .num_resets = 4,
};
static const struct gr3d_soc tegra114_gr3d_soc = {
.version = 0x35,
+ .num_clocks = 1,
+ .num_resets = 2,
};
static const struct of_device_id tegra_gr3d_match[] = {
@@ -278,69 +311,216 @@ static const u32 gr3d_addr_regs[] = {
GR3D_GLOBAL_SAMP23SURFADDR(15),
};
-static int gr3d_probe(struct platform_device *pdev)
+static int gr3d_power_up_legacy_domain(struct device *dev, const char *name,
+ unsigned int id)
{
- struct device_node *np = pdev->dev.of_node;
- struct host1x_syncpt **syncpts;
- struct gr3d *gr3d;
+ struct gr3d *gr3d = dev_get_drvdata(dev);
+ struct reset_control *reset;
+ struct clk *clk;
unsigned int i;
int err;
- gr3d = devm_kzalloc(&pdev->dev, sizeof(*gr3d), GFP_KERNEL);
- if (!gr3d)
- return -ENOMEM;
-
- gr3d->soc = of_device_get_match_data(&pdev->dev);
+ /*
+ * Tegra20 device-tree doesn't specify 3d clock name and there is only
+ * one clock for Tegra20. Tegra30+ device-trees always specified names
+ * for the clocks.
+ */
+ if (gr3d->nclocks == 1) {
+ if (id == TEGRA_POWERGATE_3D1)
+ return 0;
+
+ clk = gr3d->clocks[0].clk;
+ } else {
+ for (i = 0; i < gr3d->nclocks; i++) {
+ if (WARN_ON(!gr3d->clocks[i].id))
+ continue;
+
+ if (!strcmp(gr3d->clocks[i].id, name)) {
+ clk = gr3d->clocks[i].clk;
+ break;
+ }
+ }
- syncpts = devm_kzalloc(&pdev->dev, sizeof(*syncpts), GFP_KERNEL);
- if (!syncpts)
- return -ENOMEM;
+ if (WARN_ON(i == gr3d->nclocks))
+ return -EINVAL;
+ }
- gr3d->clk = devm_clk_get(&pdev->dev, NULL);
- if (IS_ERR(gr3d->clk)) {
- dev_err(&pdev->dev, "cannot get clock\n");
- return PTR_ERR(gr3d->clk);
+ /*
+ * We use array of resets, which includes MC resets, and MC
+ * reset shouldn't be asserted while hardware is gated because
+ * MC flushing will fail for gated hardware. Hence for legacy
+ * PD we request the individual reset separately.
+ */
+ reset = reset_control_get_exclusive_released(dev, name);
+ if (IS_ERR(reset))
+ return PTR_ERR(reset);
+
+ err = reset_control_acquire(reset);
+ if (err) {
+ dev_err(dev, "failed to acquire %s reset: %d\n", name, err);
+ } else {
+ err = tegra_powergate_sequence_power_up(id, clk, reset);
+ reset_control_release(reset);
}
- gr3d->rst = devm_reset_control_get(&pdev->dev, "3d");
- if (IS_ERR(gr3d->rst)) {
- dev_err(&pdev->dev, "cannot get reset\n");
- return PTR_ERR(gr3d->rst);
+ reset_control_put(reset);
+ if (err)
+ return err;
+
+ /*
+ * tegra_powergate_sequence_power_up() leaves clocks enabled,
+ * while GENPD not. Hence keep clock-enable balanced.
+ */
+ clk_disable_unprepare(clk);
+
+ return 0;
+}
+
+static void gr3d_del_link(void *link)
+{
+ device_link_del(link);
+}
+
+static int gr3d_init_power(struct device *dev, struct gr3d *gr3d)
+{
+ static const char * const opp_genpd_names[] = { "3d0", "3d1", NULL };
+ const u32 link_flags = DL_FLAG_STATELESS | DL_FLAG_PM_RUNTIME;
+ struct device **opp_virt_devs, *pd_dev;
+ struct device_link *link;
+ unsigned int i;
+ int err;
+
+ err = of_count_phandle_with_args(dev->of_node, "power-domains",
+ "#power-domain-cells");
+ if (err < 0) {
+ if (err != -ENOENT)
+ return err;
+
+ /*
+ * Older device-trees don't use GENPD. In this case we should
+ * toggle power domain manually.
+ */
+ err = gr3d_power_up_legacy_domain(dev, "3d",
+ TEGRA_POWERGATE_3D);
+ if (err)
+ return err;
+
+ err = gr3d_power_up_legacy_domain(dev, "3d2",
+ TEGRA_POWERGATE_3D1);
+ if (err)
+ return err;
+
+ return 0;
}
- if (of_device_is_compatible(np, "nvidia,tegra30-gr3d")) {
- gr3d->clk_secondary = devm_clk_get(&pdev->dev, "3d2");
- if (IS_ERR(gr3d->clk_secondary)) {
- dev_err(&pdev->dev, "cannot get secondary clock\n");
- return PTR_ERR(gr3d->clk_secondary);
+ /*
+ * The PM domain core automatically attaches a single power domain,
+ * otherwise it skips attaching completely. We have a single domain
+ * on Tegra20 and two domains on Tegra30+.
+ */
+ if (dev->pm_domain)
+ return 0;
+
+ err = devm_pm_opp_attach_genpd(dev, opp_genpd_names, &opp_virt_devs);
+ if (err)
+ return err;
+
+ for (i = 0; opp_genpd_names[i]; i++) {
+ pd_dev = opp_virt_devs[i];
+ if (!pd_dev) {
+ dev_err(dev, "failed to get %s power domain\n",
+ opp_genpd_names[i]);
+ return -EINVAL;
}
- gr3d->rst_secondary = devm_reset_control_get(&pdev->dev,
- "3d2");
- if (IS_ERR(gr3d->rst_secondary)) {
- dev_err(&pdev->dev, "cannot get secondary reset\n");
- return PTR_ERR(gr3d->rst_secondary);
+ link = device_link_add(dev, pd_dev, link_flags);
+ if (!link) {
+ dev_err(dev, "failed to link to %s\n", dev_name(pd_dev));
+ return -EINVAL;
}
+
+ err = devm_add_action_or_reset(dev, gr3d_del_link, link);
+ if (err)
+ return err;
}
- err = tegra_powergate_sequence_power_up(TEGRA_POWERGATE_3D, gr3d->clk,
- gr3d->rst);
+ return 0;
+}
+
+static int gr3d_get_clocks(struct device *dev, struct gr3d *gr3d)
+{
+ int err;
+
+ err = devm_clk_bulk_get_all(dev, &gr3d->clocks);
if (err < 0) {
- dev_err(&pdev->dev, "failed to power up 3D unit\n");
+ dev_err(dev, "failed to get clock: %d\n", err);
return err;
}
+ gr3d->nclocks = err;
- if (gr3d->clk_secondary) {
- err = tegra_powergate_sequence_power_up(TEGRA_POWERGATE_3D1,
- gr3d->clk_secondary,
- gr3d->rst_secondary);
- if (err < 0) {
- dev_err(&pdev->dev,
- "failed to power up secondary 3D unit\n");
- return err;
- }
+ if (gr3d->nclocks != gr3d->soc->num_clocks) {
+ dev_err(dev, "invalid number of clocks: %u\n", gr3d->nclocks);
+ return -ENOENT;
+ }
+
+ return 0;
+}
+
+static int gr3d_get_resets(struct device *dev, struct gr3d *gr3d)
+{
+ int err;
+
+ gr3d->resets[RST_MC].id = "mc";
+ gr3d->resets[RST_MC2].id = "mc2";
+ gr3d->resets[RST_GR3D].id = "3d";
+ gr3d->resets[RST_GR3D2].id = "3d2";
+ gr3d->nresets = gr3d->soc->num_resets;
+
+ err = devm_reset_control_bulk_get_optional_exclusive_released(
+ dev, gr3d->nresets, gr3d->resets);
+ if (err) {
+ dev_err(dev, "failed to get reset: %d\n", err);
+ return err;
}
+ if (WARN_ON(!gr3d->resets[RST_GR3D].rstc) ||
+ WARN_ON(!gr3d->resets[RST_GR3D2].rstc && gr3d->nresets == 4))
+ return -ENOENT;
+
+ return 0;
+}
+
+static int gr3d_probe(struct platform_device *pdev)
+{
+ struct host1x_syncpt **syncpts;
+ struct gr3d *gr3d;
+ unsigned int i;
+ int err;
+
+ gr3d = devm_kzalloc(&pdev->dev, sizeof(*gr3d), GFP_KERNEL);
+ if (!gr3d)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, gr3d);
+
+ gr3d->soc = of_device_get_match_data(&pdev->dev);
+
+ syncpts = devm_kzalloc(&pdev->dev, sizeof(*syncpts), GFP_KERNEL);
+ if (!syncpts)
+ return -ENOMEM;
+
+ err = gr3d_get_clocks(&pdev->dev, gr3d);
+ if (err)
+ return err;
+
+ err = gr3d_get_resets(&pdev->dev, gr3d);
+ if (err)
+ return err;
+
+ err = gr3d_init_power(&pdev->dev, gr3d);
+ if (err)
+ return err;
+
INIT_LIST_HEAD(&gr3d->client.base.list);
gr3d->client.base.ops = &gr3d_client_ops;
gr3d->client.base.dev = &pdev->dev;
@@ -352,6 +532,10 @@ static int gr3d_probe(struct platform_device *pdev)
gr3d->client.version = gr3d->soc->version;
gr3d->client.ops = &gr3d_ops;
+ err = devm_tegra_core_dev_init_opp_table_common(&pdev->dev);
+ if (err)
+ return err;
+
err = host1x_client_register(&gr3d->client.base);
if (err < 0) {
dev_err(&pdev->dev, "failed to register host1x client: %d\n",
@@ -363,8 +547,6 @@ static int gr3d_probe(struct platform_device *pdev)
for (i = 0; i < ARRAY_SIZE(gr3d_addr_regs); i++)
set_bit(gr3d_addr_regs[i], gr3d->addr_regs);
- platform_set_drvdata(pdev, gr3d);
-
return 0;
}
@@ -380,23 +562,80 @@ static int gr3d_remove(struct platform_device *pdev)
return err;
}
- if (gr3d->clk_secondary) {
- reset_control_assert(gr3d->rst_secondary);
- tegra_powergate_power_off(TEGRA_POWERGATE_3D1);
- clk_disable_unprepare(gr3d->clk_secondary);
+ return 0;
+}
+
+static int __maybe_unused gr3d_runtime_suspend(struct device *dev)
+{
+ struct gr3d *gr3d = dev_get_drvdata(dev);
+ int err;
+
+ host1x_channel_stop(gr3d->channel);
+
+ err = reset_control_bulk_assert(gr3d->nresets, gr3d->resets);
+ if (err) {
+ dev_err(dev, "failed to assert reset: %d\n", err);
+ return err;
+ }
+
+ usleep_range(10, 20);
+
+ /*
+ * Older device-trees don't specify MC resets and power-gating can't
+ * be done safely in that case. Hence we will keep the power ungated
+ * for older DTBs. For newer DTBs, GENPD will perform the power-gating.
+ */
+
+ clk_bulk_disable_unprepare(gr3d->nclocks, gr3d->clocks);
+ reset_control_bulk_release(gr3d->nresets, gr3d->resets);
+
+ return 0;
+}
+
+static int __maybe_unused gr3d_runtime_resume(struct device *dev)
+{
+ struct gr3d *gr3d = dev_get_drvdata(dev);
+ int err;
+
+ err = reset_control_bulk_acquire(gr3d->nresets, gr3d->resets);
+ if (err) {
+ dev_err(dev, "failed to acquire reset: %d\n", err);
+ return err;
+ }
+
+ err = clk_bulk_prepare_enable(gr3d->nclocks, gr3d->clocks);
+ if (err) {
+ dev_err(dev, "failed to enable clock: %d\n", err);
+ goto release_reset;
}
- reset_control_assert(gr3d->rst);
- tegra_powergate_power_off(TEGRA_POWERGATE_3D);
- clk_disable_unprepare(gr3d->clk);
+ err = reset_control_bulk_deassert(gr3d->nresets, gr3d->resets);
+ if (err) {
+ dev_err(dev, "failed to deassert reset: %d\n", err);
+ goto disable_clk;
+ }
return 0;
+
+disable_clk:
+ clk_bulk_disable_unprepare(gr3d->nclocks, gr3d->clocks);
+release_reset:
+ reset_control_bulk_release(gr3d->nresets, gr3d->resets);
+
+ return err;
}
+static const struct dev_pm_ops tegra_gr3d_pm = {
+ SET_RUNTIME_PM_OPS(gr3d_runtime_suspend, gr3d_runtime_resume, NULL)
+ SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+ pm_runtime_force_resume)
+};
+
struct platform_driver tegra_gr3d_driver = {
.driver = {
.name = "tegra-gr3d",
.of_match_table = tegra_gr3d_match,
+ .pm = &tegra_gr3d_pm,
},
.probe = gr3d_probe,
.remove = gr3d_remove,
diff --git a/drivers/gpu/drm/tegra/hdmi.c b/drivers/gpu/drm/tegra/hdmi.c
index e5d2a4026028..8845af5d325f 100644
--- a/drivers/gpu/drm/tegra/hdmi.c
+++ b/drivers/gpu/drm/tegra/hdmi.c
@@ -11,10 +11,14 @@
#include <linux/math64.h>
#include <linux/module.h>
#include <linux/of_device.h>
+#include <linux/pm_opp.h>
#include <linux/pm_runtime.h>
#include <linux/regulator/consumer.h>
#include <linux/reset.h>
+#include <soc/tegra/common.h>
+#include <sound/hdmi-codec.h>
+
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc.h>
#include <drm/drm_debugfs.h>
@@ -78,6 +82,9 @@ struct tegra_hdmi {
bool dvi;
struct drm_info_list *debugfs_files;
+
+ struct platform_device *audio_pdev;
+ struct mutex audio_lock;
};
static inline struct tegra_hdmi *
@@ -360,6 +367,18 @@ static const struct tmds_config tegra124_tmds_config[] = {
},
};
+static void tegra_hdmi_audio_lock(struct tegra_hdmi *hdmi)
+{
+ mutex_lock(&hdmi->audio_lock);
+ disable_irq(hdmi->irq);
+}
+
+static void tegra_hdmi_audio_unlock(struct tegra_hdmi *hdmi)
+{
+ enable_irq(hdmi->irq);
+ mutex_unlock(&hdmi->audio_lock);
+}
+
static int
tegra_hdmi_get_audio_config(unsigned int audio_freq, unsigned int pix_clock,
struct tegra_hdmi_audio_config *config)
@@ -829,6 +848,23 @@ static void tegra_hdmi_setup_tmds(struct tegra_hdmi *hdmi,
HDMI_NV_PDISP_SOR_IO_PEAK_CURRENT);
}
+static int tegra_hdmi_reconfigure_audio(struct tegra_hdmi *hdmi)
+{
+ int err;
+
+ err = tegra_hdmi_setup_audio(hdmi);
+ if (err < 0) {
+ tegra_hdmi_disable_audio_infoframe(hdmi);
+ tegra_hdmi_disable_audio(hdmi);
+ } else {
+ tegra_hdmi_setup_audio_infoframe(hdmi);
+ tegra_hdmi_enable_audio_infoframe(hdmi);
+ tegra_hdmi_enable_audio(hdmi);
+ }
+
+ return err;
+}
+
static bool tegra_output_is_hdmi(struct tegra_output *output)
{
struct edid *edid;
@@ -1135,6 +1171,8 @@ static void tegra_hdmi_encoder_disable(struct drm_encoder *encoder)
u32 value;
int err;
+ tegra_hdmi_audio_lock(hdmi);
+
/*
* The following accesses registers of the display controller, so make
* sure it's only executed when the output is attached to one.
@@ -1159,6 +1197,10 @@ static void tegra_hdmi_encoder_disable(struct drm_encoder *encoder)
tegra_hdmi_writel(hdmi, 0, HDMI_NV_PDISP_INT_ENABLE);
tegra_hdmi_writel(hdmi, 0, HDMI_NV_PDISP_INT_MASK);
+ hdmi->pixel_clock = 0;
+
+ tegra_hdmi_audio_unlock(hdmi);
+
err = host1x_client_suspend(&hdmi->client);
if (err < 0)
dev_err(hdmi->dev, "failed to suspend: %d\n", err);
@@ -1182,6 +1224,8 @@ static void tegra_hdmi_encoder_enable(struct drm_encoder *encoder)
return;
}
+ tegra_hdmi_audio_lock(hdmi);
+
/*
* Enable and unmask the HDA codec SCRATCH0 register interrupt. This
* is used for interoperability between the HDA codec driver and the
@@ -1195,7 +1239,7 @@ static void tegra_hdmi_encoder_enable(struct drm_encoder *encoder)
h_back_porch = mode->htotal - mode->hsync_end;
h_front_porch = mode->hsync_start - mode->hdisplay;
- err = clk_set_rate(hdmi->clk, hdmi->pixel_clock);
+ err = dev_pm_opp_set_rate(hdmi->dev, hdmi->pixel_clock);
if (err < 0) {
dev_err(hdmi->dev, "failed to set HDMI clock frequency: %d\n",
err);
@@ -1387,6 +1431,8 @@ static void tegra_hdmi_encoder_enable(struct drm_encoder *encoder)
}
/* TODO: add HDCP support */
+
+ tegra_hdmi_audio_unlock(hdmi);
}
static int
@@ -1416,6 +1462,91 @@ static const struct drm_encoder_helper_funcs tegra_hdmi_encoder_helper_funcs = {
.atomic_check = tegra_hdmi_encoder_atomic_check,
};
+static int tegra_hdmi_hw_params(struct device *dev, void *data,
+ struct hdmi_codec_daifmt *fmt,
+ struct hdmi_codec_params *hparms)
+{
+ struct tegra_hdmi *hdmi = data;
+ int ret = 0;
+
+ tegra_hdmi_audio_lock(hdmi);
+
+ hdmi->format.sample_rate = hparms->sample_rate;
+ hdmi->format.channels = hparms->channels;
+
+ if (hdmi->pixel_clock && !hdmi->dvi)
+ ret = tegra_hdmi_reconfigure_audio(hdmi);
+
+ tegra_hdmi_audio_unlock(hdmi);
+
+ return ret;
+}
+
+static int tegra_hdmi_audio_startup(struct device *dev, void *data)
+{
+ struct tegra_hdmi *hdmi = data;
+ int ret;
+
+ ret = host1x_client_resume(&hdmi->client);
+ if (ret < 0)
+ dev_err(hdmi->dev, "failed to resume: %d\n", ret);
+
+ return ret;
+}
+
+static void tegra_hdmi_audio_shutdown(struct device *dev, void *data)
+{
+ struct tegra_hdmi *hdmi = data;
+ int ret;
+
+ tegra_hdmi_audio_lock(hdmi);
+
+ hdmi->format.sample_rate = 0;
+ hdmi->format.channels = 0;
+
+ tegra_hdmi_audio_unlock(hdmi);
+
+ ret = host1x_client_suspend(&hdmi->client);
+ if (ret < 0)
+ dev_err(hdmi->dev, "failed to suspend: %d\n", ret);
+}
+
+static const struct hdmi_codec_ops tegra_hdmi_codec_ops = {
+ .hw_params = tegra_hdmi_hw_params,
+ .audio_startup = tegra_hdmi_audio_startup,
+ .audio_shutdown = tegra_hdmi_audio_shutdown,
+};
+
+static int tegra_hdmi_codec_register(struct tegra_hdmi *hdmi)
+{
+ struct hdmi_codec_pdata codec_data = {};
+
+ if (hdmi->config->has_hda)
+ return 0;
+
+ codec_data.ops = &tegra_hdmi_codec_ops;
+ codec_data.data = hdmi;
+ codec_data.spdif = 1;
+
+ hdmi->audio_pdev = platform_device_register_data(hdmi->dev,
+ HDMI_CODEC_DRV_NAME,
+ PLATFORM_DEVID_AUTO,
+ &codec_data,
+ sizeof(codec_data));
+ if (IS_ERR(hdmi->audio_pdev))
+ return PTR_ERR(hdmi->audio_pdev);
+
+ hdmi->format.channels = 2;
+
+ return 0;
+}
+
+static void tegra_hdmi_codec_unregister(struct tegra_hdmi *hdmi)
+{
+ if (hdmi->audio_pdev)
+ platform_device_unregister(hdmi->audio_pdev);
+}
+
static int tegra_hdmi_init(struct host1x_client *client)
{
struct tegra_hdmi *hdmi = host1x_client_to_hdmi(client);
@@ -1453,28 +1584,47 @@ static int tegra_hdmi_init(struct host1x_client *client)
if (err < 0) {
dev_err(client->dev, "failed to enable HDMI regulator: %d\n",
err);
- return err;
+ goto output_exit;
}
err = regulator_enable(hdmi->pll);
if (err < 0) {
dev_err(hdmi->dev, "failed to enable PLL regulator: %d\n", err);
- return err;
+ goto disable_hdmi;
}
err = regulator_enable(hdmi->vdd);
if (err < 0) {
dev_err(hdmi->dev, "failed to enable VDD regulator: %d\n", err);
- return err;
+ goto disable_pll;
+ }
+
+ err = tegra_hdmi_codec_register(hdmi);
+ if (err < 0) {
+ dev_err(hdmi->dev, "failed to register audio codec: %d\n", err);
+ goto disable_vdd;
}
return 0;
+
+disable_vdd:
+ regulator_disable(hdmi->vdd);
+disable_pll:
+ regulator_disable(hdmi->pll);
+disable_hdmi:
+ regulator_disable(hdmi->hdmi);
+output_exit:
+ tegra_output_exit(&hdmi->output);
+
+ return err;
}
static int tegra_hdmi_exit(struct host1x_client *client)
{
struct tegra_hdmi *hdmi = host1x_client_to_hdmi(client);
+ tegra_hdmi_codec_unregister(hdmi);
+
tegra_output_exit(&hdmi->output);
regulator_disable(hdmi->vdd);
@@ -1599,7 +1749,6 @@ static irqreturn_t tegra_hdmi_irq(int irq, void *data)
{
struct tegra_hdmi *hdmi = data;
u32 value;
- int err;
value = tegra_hdmi_readl(hdmi, HDMI_NV_PDISP_INT_STATUS);
tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_INT_STATUS);
@@ -1614,16 +1763,7 @@ static irqreturn_t tegra_hdmi_irq(int irq, void *data)
format = value & SOR_AUDIO_HDA_CODEC_SCRATCH0_FMT_MASK;
tegra_hda_parse_format(format, &hdmi->format);
-
- err = tegra_hdmi_setup_audio(hdmi);
- if (err < 0) {
- tegra_hdmi_disable_audio_infoframe(hdmi);
- tegra_hdmi_disable_audio(hdmi);
- } else {
- tegra_hdmi_setup_audio_infoframe(hdmi);
- tegra_hdmi_enable_audio_infoframe(hdmi);
- tegra_hdmi_enable_audio(hdmi);
- }
+ tegra_hdmi_reconfigure_audio(hdmi);
} else {
tegra_hdmi_disable_audio_infoframe(hdmi);
tegra_hdmi_disable_audio(hdmi);
@@ -1651,6 +1791,8 @@ static int tegra_hdmi_probe(struct platform_device *pdev)
hdmi->stereo = false;
hdmi->dvi = false;
+ mutex_init(&hdmi->audio_lock);
+
hdmi->clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(hdmi->clk)) {
dev_err(&pdev->dev, "failed to get clock\n");
@@ -1732,7 +1874,14 @@ static int tegra_hdmi_probe(struct platform_device *pdev)
}
platform_set_drvdata(pdev, hdmi);
- pm_runtime_enable(&pdev->dev);
+
+ err = devm_pm_runtime_enable(&pdev->dev);
+ if (err)
+ return err;
+
+ err = devm_tegra_core_dev_init_opp_table_common(&pdev->dev);
+ if (err)
+ return err;
INIT_LIST_HEAD(&hdmi->client.list);
hdmi->client.ops = &hdmi_client_ops;
@@ -1753,8 +1902,6 @@ static int tegra_hdmi_remove(struct platform_device *pdev)
struct tegra_hdmi *hdmi = platform_get_drvdata(pdev);
int err;
- pm_runtime_disable(&pdev->dev);
-
err = host1x_client_unregister(&hdmi->client);
if (err < 0) {
dev_err(&pdev->dev, "failed to unregister host1x client: %d\n",
diff --git a/drivers/gpu/drm/tegra/hub.h b/drivers/gpu/drm/tegra/hub.h
index 3efa1be07ff8..23c4b2115ed1 100644
--- a/drivers/gpu/drm/tegra/hub.h
+++ b/drivers/gpu/drm/tegra/hub.h
@@ -72,7 +72,6 @@ to_tegra_display_hub_state(struct drm_private_state *priv)
return container_of(priv, struct tegra_display_hub_state, base);
}
-struct tegra_dc;
struct tegra_plane;
int tegra_display_hub_prepare(struct tegra_display_hub *hub);
diff --git a/drivers/gpu/drm/tegra/nvdec.c b/drivers/gpu/drm/tegra/nvdec.c
new file mode 100644
index 000000000000..79e1e88203cf
--- /dev/null
+++ b/drivers/gpu/drm/tegra/nvdec.c
@@ -0,0 +1,466 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2015-2021, NVIDIA Corporation.
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/host1x.h>
+#include <linux/iommu.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/reset.h>
+
+#include <soc/tegra/pmc.h>
+
+#include "drm.h"
+#include "falcon.h"
+#include "vic.h"
+
+struct nvdec_config {
+ const char *firmware;
+ unsigned int version;
+ bool supports_sid;
+};
+
+struct nvdec {
+ struct falcon falcon;
+
+ void __iomem *regs;
+ struct tegra_drm_client client;
+ struct host1x_channel *channel;
+ struct device *dev;
+ struct clk *clk;
+
+ /* Platform configuration */
+ const struct nvdec_config *config;
+};
+
+static inline struct nvdec *to_nvdec(struct tegra_drm_client *client)
+{
+ return container_of(client, struct nvdec, client);
+}
+
+static inline void nvdec_writel(struct nvdec *nvdec, u32 value,
+ unsigned int offset)
+{
+ writel(value, nvdec->regs + offset);
+}
+
+static int nvdec_boot(struct nvdec *nvdec)
+{
+#ifdef CONFIG_IOMMU_API
+ struct iommu_fwspec *spec = dev_iommu_fwspec_get(nvdec->dev);
+#endif
+ int err;
+
+#ifdef CONFIG_IOMMU_API
+ if (nvdec->config->supports_sid && spec) {
+ u32 value;
+
+ value = TRANSCFG_ATT(1, TRANSCFG_SID_FALCON) | TRANSCFG_ATT(0, TRANSCFG_SID_HW);
+ nvdec_writel(nvdec, value, VIC_TFBIF_TRANSCFG);
+
+ if (spec->num_ids > 0) {
+ value = spec->ids[0] & 0xffff;
+
+ nvdec_writel(nvdec, value, VIC_THI_STREAMID0);
+ nvdec_writel(nvdec, value, VIC_THI_STREAMID1);
+ }
+ }
+#endif
+
+ err = falcon_boot(&nvdec->falcon);
+ if (err < 0)
+ return err;
+
+ err = falcon_wait_idle(&nvdec->falcon);
+ if (err < 0) {
+ dev_err(nvdec->dev, "falcon boot timed out\n");
+ return err;
+ }
+
+ return 0;
+}
+
+static int nvdec_init(struct host1x_client *client)
+{
+ struct tegra_drm_client *drm = host1x_to_drm_client(client);
+ struct drm_device *dev = dev_get_drvdata(client->host);
+ struct tegra_drm *tegra = dev->dev_private;
+ struct nvdec *nvdec = to_nvdec(drm);
+ int err;
+
+ err = host1x_client_iommu_attach(client);
+ if (err < 0 && err != -ENODEV) {
+ dev_err(nvdec->dev, "failed to attach to domain: %d\n", err);
+ return err;
+ }
+
+ nvdec->channel = host1x_channel_request(client);
+ if (!nvdec->channel) {
+ err = -ENOMEM;
+ goto detach;
+ }
+
+ client->syncpts[0] = host1x_syncpt_request(client, 0);
+ if (!client->syncpts[0]) {
+ err = -ENOMEM;
+ goto free_channel;
+ }
+
+ pm_runtime_enable(client->dev);
+ pm_runtime_use_autosuspend(client->dev);
+ pm_runtime_set_autosuspend_delay(client->dev, 500);
+
+ err = tegra_drm_register_client(tegra, drm);
+ if (err < 0)
+ goto disable_rpm;
+
+ /*
+ * Inherit the DMA parameters (such as maximum segment size) from the
+ * parent host1x device.
+ */
+ client->dev->dma_parms = client->host->dma_parms;
+
+ return 0;
+
+disable_rpm:
+ pm_runtime_dont_use_autosuspend(client->dev);
+ pm_runtime_force_suspend(client->dev);
+
+ host1x_syncpt_put(client->syncpts[0]);
+free_channel:
+ host1x_channel_put(nvdec->channel);
+detach:
+ host1x_client_iommu_detach(client);
+
+ return err;
+}
+
+static int nvdec_exit(struct host1x_client *client)
+{
+ struct tegra_drm_client *drm = host1x_to_drm_client(client);
+ struct drm_device *dev = dev_get_drvdata(client->host);
+ struct tegra_drm *tegra = dev->dev_private;
+ struct nvdec *nvdec = to_nvdec(drm);
+ int err;
+
+ /* avoid a dangling pointer just in case this disappears */
+ client->dev->dma_parms = NULL;
+
+ err = tegra_drm_unregister_client(tegra, drm);
+ if (err < 0)
+ return err;
+
+ pm_runtime_dont_use_autosuspend(client->dev);
+ pm_runtime_force_suspend(client->dev);
+
+ host1x_syncpt_put(client->syncpts[0]);
+ host1x_channel_put(nvdec->channel);
+ host1x_client_iommu_detach(client);
+
+ nvdec->channel = NULL;
+
+ if (client->group) {
+ dma_unmap_single(nvdec->dev, nvdec->falcon.firmware.phys,
+ nvdec->falcon.firmware.size, DMA_TO_DEVICE);
+ tegra_drm_free(tegra, nvdec->falcon.firmware.size,
+ nvdec->falcon.firmware.virt,
+ nvdec->falcon.firmware.iova);
+ } else {
+ dma_free_coherent(nvdec->dev, nvdec->falcon.firmware.size,
+ nvdec->falcon.firmware.virt,
+ nvdec->falcon.firmware.iova);
+ }
+
+ return 0;
+}
+
+static const struct host1x_client_ops nvdec_client_ops = {
+ .init = nvdec_init,
+ .exit = nvdec_exit,
+};
+
+static int nvdec_load_firmware(struct nvdec *nvdec)
+{
+ struct host1x_client *client = &nvdec->client.base;
+ struct tegra_drm *tegra = nvdec->client.drm;
+ dma_addr_t iova;
+ size_t size;
+ void *virt;
+ int err;
+
+ if (nvdec->falcon.firmware.virt)
+ return 0;
+
+ err = falcon_read_firmware(&nvdec->falcon, nvdec->config->firmware);
+ if (err < 0)
+ return err;
+
+ size = nvdec->falcon.firmware.size;
+
+ if (!client->group) {
+ virt = dma_alloc_coherent(nvdec->dev, size, &iova, GFP_KERNEL);
+
+ err = dma_mapping_error(nvdec->dev, iova);
+ if (err < 0)
+ return err;
+ } else {
+ virt = tegra_drm_alloc(tegra, size, &iova);
+ }
+
+ nvdec->falcon.firmware.virt = virt;
+ nvdec->falcon.firmware.iova = iova;
+
+ err = falcon_load_firmware(&nvdec->falcon);
+ if (err < 0)
+ goto cleanup;
+
+ /*
+ * In this case we have received an IOVA from the shared domain, so we
+ * need to make sure to get the physical address so that the DMA API
+ * knows what memory pages to flush the cache for.
+ */
+ if (client->group) {
+ dma_addr_t phys;
+
+ phys = dma_map_single(nvdec->dev, virt, size, DMA_TO_DEVICE);
+
+ err = dma_mapping_error(nvdec->dev, phys);
+ if (err < 0)
+ goto cleanup;
+
+ nvdec->falcon.firmware.phys = phys;
+ }
+
+ return 0;
+
+cleanup:
+ if (!client->group)
+ dma_free_coherent(nvdec->dev, size, virt, iova);
+ else
+ tegra_drm_free(tegra, size, virt, iova);
+
+ return err;
+}
+
+
+static __maybe_unused int nvdec_runtime_resume(struct device *dev)
+{
+ struct nvdec *nvdec = dev_get_drvdata(dev);
+ int err;
+
+ err = clk_prepare_enable(nvdec->clk);
+ if (err < 0)
+ return err;
+
+ usleep_range(10, 20);
+
+ err = nvdec_load_firmware(nvdec);
+ if (err < 0)
+ goto disable;
+
+ err = nvdec_boot(nvdec);
+ if (err < 0)
+ goto disable;
+
+ return 0;
+
+disable:
+ clk_disable_unprepare(nvdec->clk);
+ return err;
+}
+
+static __maybe_unused int nvdec_runtime_suspend(struct device *dev)
+{
+ struct nvdec *nvdec = dev_get_drvdata(dev);
+
+ host1x_channel_stop(nvdec->channel);
+
+ clk_disable_unprepare(nvdec->clk);
+
+ return 0;
+}
+
+static int nvdec_open_channel(struct tegra_drm_client *client,
+ struct tegra_drm_context *context)
+{
+ struct nvdec *nvdec = to_nvdec(client);
+
+ context->channel = host1x_channel_get(nvdec->channel);
+ if (!context->channel)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static void nvdec_close_channel(struct tegra_drm_context *context)
+{
+ host1x_channel_put(context->channel);
+}
+
+static const struct tegra_drm_client_ops nvdec_ops = {
+ .open_channel = nvdec_open_channel,
+ .close_channel = nvdec_close_channel,
+ .submit = tegra_drm_submit,
+};
+
+#define NVIDIA_TEGRA_210_NVDEC_FIRMWARE "nvidia/tegra210/nvdec.bin"
+
+static const struct nvdec_config nvdec_t210_config = {
+ .firmware = NVIDIA_TEGRA_210_NVDEC_FIRMWARE,
+ .version = 0x21,
+ .supports_sid = false,
+};
+
+#define NVIDIA_TEGRA_186_NVDEC_FIRMWARE "nvidia/tegra186/nvdec.bin"
+
+static const struct nvdec_config nvdec_t186_config = {
+ .firmware = NVIDIA_TEGRA_186_NVDEC_FIRMWARE,
+ .version = 0x18,
+ .supports_sid = true,
+};
+
+#define NVIDIA_TEGRA_194_NVDEC_FIRMWARE "nvidia/tegra194/nvdec.bin"
+
+static const struct nvdec_config nvdec_t194_config = {
+ .firmware = NVIDIA_TEGRA_194_NVDEC_FIRMWARE,
+ .version = 0x19,
+ .supports_sid = true,
+};
+
+static const struct of_device_id tegra_nvdec_of_match[] = {
+ { .compatible = "nvidia,tegra210-nvdec", .data = &nvdec_t210_config },
+ { .compatible = "nvidia,tegra186-nvdec", .data = &nvdec_t186_config },
+ { .compatible = "nvidia,tegra194-nvdec", .data = &nvdec_t194_config },
+ { },
+};
+MODULE_DEVICE_TABLE(of, tegra_nvdec_of_match);
+
+static int nvdec_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct host1x_syncpt **syncpts;
+ struct nvdec *nvdec;
+ u32 host_class;
+ int err;
+
+ /* inherit DMA mask from host1x parent */
+ err = dma_coerce_mask_and_coherent(dev, *dev->parent->dma_mask);
+ if (err < 0) {
+ dev_err(&pdev->dev, "failed to set DMA mask: %d\n", err);
+ return err;
+ }
+
+ nvdec = devm_kzalloc(dev, sizeof(*nvdec), GFP_KERNEL);
+ if (!nvdec)
+ return -ENOMEM;
+
+ nvdec->config = of_device_get_match_data(dev);
+
+ syncpts = devm_kzalloc(dev, sizeof(*syncpts), GFP_KERNEL);
+ if (!syncpts)
+ return -ENOMEM;
+
+ nvdec->regs = devm_platform_get_and_ioremap_resource(pdev, 0, NULL);
+ if (IS_ERR(nvdec->regs))
+ return PTR_ERR(nvdec->regs);
+
+ nvdec->clk = devm_clk_get(dev, NULL);
+ if (IS_ERR(nvdec->clk)) {
+ dev_err(&pdev->dev, "failed to get clock\n");
+ return PTR_ERR(nvdec->clk);
+ }
+
+ err = clk_set_rate(nvdec->clk, ULONG_MAX);
+ if (err < 0) {
+ dev_err(&pdev->dev, "failed to set clock rate\n");
+ return err;
+ }
+
+ err = of_property_read_u32(dev->of_node, "nvidia,host1x-class", &host_class);
+ if (err < 0)
+ host_class = HOST1X_CLASS_NVDEC;
+
+ nvdec->falcon.dev = dev;
+ nvdec->falcon.regs = nvdec->regs;
+
+ err = falcon_init(&nvdec->falcon);
+ if (err < 0)
+ return err;
+
+ platform_set_drvdata(pdev, nvdec);
+
+ INIT_LIST_HEAD(&nvdec->client.base.list);
+ nvdec->client.base.ops = &nvdec_client_ops;
+ nvdec->client.base.dev = dev;
+ nvdec->client.base.class = host_class;
+ nvdec->client.base.syncpts = syncpts;
+ nvdec->client.base.num_syncpts = 1;
+ nvdec->dev = dev;
+
+ INIT_LIST_HEAD(&nvdec->client.list);
+ nvdec->client.version = nvdec->config->version;
+ nvdec->client.ops = &nvdec_ops;
+
+ err = host1x_client_register(&nvdec->client.base);
+ if (err < 0) {
+ dev_err(dev, "failed to register host1x client: %d\n", err);
+ goto exit_falcon;
+ }
+
+ return 0;
+
+exit_falcon:
+ falcon_exit(&nvdec->falcon);
+
+ return err;
+}
+
+static int nvdec_remove(struct platform_device *pdev)
+{
+ struct nvdec *nvdec = platform_get_drvdata(pdev);
+ int err;
+
+ err = host1x_client_unregister(&nvdec->client.base);
+ if (err < 0) {
+ dev_err(&pdev->dev, "failed to unregister host1x client: %d\n",
+ err);
+ return err;
+ }
+
+ falcon_exit(&nvdec->falcon);
+
+ return 0;
+}
+
+static const struct dev_pm_ops nvdec_pm_ops = {
+ SET_RUNTIME_PM_OPS(nvdec_runtime_suspend, nvdec_runtime_resume, NULL)
+ SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+ pm_runtime_force_resume)
+};
+
+struct platform_driver tegra_nvdec_driver = {
+ .driver = {
+ .name = "tegra-nvdec",
+ .of_match_table = tegra_nvdec_of_match,
+ .pm = &nvdec_pm_ops
+ },
+ .probe = nvdec_probe,
+ .remove = nvdec_remove,
+};
+
+#if IS_ENABLED(CONFIG_ARCH_TEGRA_210_SOC)
+MODULE_FIRMWARE(NVIDIA_TEGRA_210_NVDEC_FIRMWARE);
+#endif
+#if IS_ENABLED(CONFIG_ARCH_TEGRA_186_SOC)
+MODULE_FIRMWARE(NVIDIA_TEGRA_186_NVDEC_FIRMWARE);
+#endif
+#if IS_ENABLED(CONFIG_ARCH_TEGRA_194_SOC)
+MODULE_FIRMWARE(NVIDIA_TEGRA_194_NVDEC_FIRMWARE);
+#endif
diff --git a/drivers/gpu/drm/tegra/plane.c b/drivers/gpu/drm/tegra/plane.c
index 16a1cdc28657..321cb1f13da6 100644
--- a/drivers/gpu/drm/tegra/plane.c
+++ b/drivers/gpu/drm/tegra/plane.c
@@ -74,7 +74,7 @@ tegra_plane_atomic_duplicate_state(struct drm_plane *plane)
for (i = 0; i < 3; i++) {
copy->iova[i] = DMA_MAPPING_ERROR;
- copy->sgt[i] = NULL;
+ copy->map[i] = NULL;
}
return &copy->base;
@@ -138,55 +138,37 @@ const struct drm_plane_funcs tegra_plane_funcs = {
static int tegra_dc_pin(struct tegra_dc *dc, struct tegra_plane_state *state)
{
- struct iommu_domain *domain = iommu_get_domain_for_dev(dc->dev);
unsigned int i;
int err;
for (i = 0; i < state->base.fb->format->num_planes; i++) {
struct tegra_bo *bo = tegra_fb_get_plane(state->base.fb, i);
- dma_addr_t phys_addr, *phys;
- struct sg_table *sgt;
+ struct host1x_bo_mapping *map;
- /*
- * If we're not attached to a domain, we already stored the
- * physical address when the buffer was allocated. If we're
- * part of a group that's shared between all display
- * controllers, we've also already mapped the framebuffer
- * through the SMMU. In both cases we can short-circuit the
- * code below and retrieve the stored IOV address.
- */
- if (!domain || dc->client.group)
- phys = &phys_addr;
- else
- phys = NULL;
-
- sgt = host1x_bo_pin(dc->dev, &bo->base, phys);
- if (IS_ERR(sgt)) {
- err = PTR_ERR(sgt);
+ map = host1x_bo_pin(dc->dev, &bo->base, DMA_TO_DEVICE, &dc->client.cache);
+ if (IS_ERR(map)) {
+ err = PTR_ERR(map);
goto unpin;
}
- if (sgt) {
- err = dma_map_sgtable(dc->dev, sgt, DMA_TO_DEVICE, 0);
- if (err)
- goto unpin;
-
+ if (!dc->client.group) {
/*
* The display controller needs contiguous memory, so
* fail if the buffer is discontiguous and we fail to
* map its SG table to a single contiguous chunk of
* I/O virtual memory.
*/
- if (sgt->nents > 1) {
+ if (map->chunks > 1) {
err = -EINVAL;
goto unpin;
}
- state->iova[i] = sg_dma_address(sgt->sgl);
- state->sgt[i] = sgt;
+ state->iova[i] = map->phys;
} else {
- state->iova[i] = phys_addr;
+ state->iova[i] = bo->iova;
}
+
+ state->map[i] = map;
}
return 0;
@@ -195,15 +177,9 @@ unpin:
dev_err(dc->dev, "failed to map plane %u: %d\n", i, err);
while (i--) {
- struct tegra_bo *bo = tegra_fb_get_plane(state->base.fb, i);
- struct sg_table *sgt = state->sgt[i];
-
- if (sgt)
- dma_unmap_sgtable(dc->dev, sgt, DMA_TO_DEVICE, 0);
-
- host1x_bo_unpin(dc->dev, &bo->base, sgt);
+ host1x_bo_unpin(state->map[i]);
state->iova[i] = DMA_MAPPING_ERROR;
- state->sgt[i] = NULL;
+ state->map[i] = NULL;
}
return err;
@@ -214,15 +190,9 @@ static void tegra_dc_unpin(struct tegra_dc *dc, struct tegra_plane_state *state)
unsigned int i;
for (i = 0; i < state->base.fb->format->num_planes; i++) {
- struct tegra_bo *bo = tegra_fb_get_plane(state->base.fb, i);
- struct sg_table *sgt = state->sgt[i];
-
- if (sgt)
- dma_unmap_sgtable(dc->dev, sgt, DMA_TO_DEVICE, 0);
-
- host1x_bo_unpin(dc->dev, &bo->base, sgt);
+ host1x_bo_unpin(state->map[i]);
state->iova[i] = DMA_MAPPING_ERROR;
- state->sgt[i] = NULL;
+ state->map[i] = NULL;
}
}
@@ -230,11 +200,14 @@ int tegra_plane_prepare_fb(struct drm_plane *plane,
struct drm_plane_state *state)
{
struct tegra_dc *dc = to_tegra_dc(state->crtc);
+ int err;
if (!state->fb)
return 0;
- drm_gem_plane_helper_prepare_fb(plane, state);
+ err = drm_gem_plane_helper_prepare_fb(plane, state);
+ if (err < 0)
+ return err;
return tegra_dc_pin(dc, to_tegra_plane_state(state));
}
diff --git a/drivers/gpu/drm/tegra/plane.h b/drivers/gpu/drm/tegra/plane.h
index d9470780c803..dfb20712fbd7 100644
--- a/drivers/gpu/drm/tegra/plane.h
+++ b/drivers/gpu/drm/tegra/plane.h
@@ -43,7 +43,7 @@ struct tegra_plane_legacy_blending_state {
struct tegra_plane_state {
struct drm_plane_state base;
- struct sg_table *sgt[3];
+ struct host1x_bo_mapping *map[3];
dma_addr_t iova[3];
struct tegra_bo_tiling tiling;
diff --git a/drivers/gpu/drm/tegra/rgb.c b/drivers/gpu/drm/tegra/rgb.c
index 606c78a2b988..ff8fce36d2aa 100644
--- a/drivers/gpu/drm/tegra/rgb.c
+++ b/drivers/gpu/drm/tegra/rgb.c
@@ -17,6 +17,8 @@ struct tegra_rgb {
struct tegra_output output;
struct tegra_dc *dc;
+ struct clk *pll_d_out0;
+ struct clk *pll_d2_out0;
struct clk *clk_parent;
struct clk *clk;
};
@@ -116,13 +118,21 @@ static void tegra_rgb_encoder_enable(struct drm_encoder *encoder)
DISP_ORDER_RED_BLUE;
tegra_dc_writel(rgb->dc, value, DC_DISP_DISP_INTERFACE_CONTROL);
- /* XXX: parameterize? */
- value = SC0_H_QUALIFIER_NONE | SC1_H_QUALIFIER_NONE;
- tegra_dc_writel(rgb->dc, value, DC_DISP_SHIFT_CLOCK_OPTIONS);
-
tegra_dc_commit(rgb->dc);
}
+static bool tegra_rgb_pll_rate_change_allowed(struct tegra_rgb *rgb)
+{
+ if (!rgb->pll_d2_out0)
+ return false;
+
+ if (!clk_is_match(rgb->clk_parent, rgb->pll_d_out0) &&
+ !clk_is_match(rgb->clk_parent, rgb->pll_d2_out0))
+ return false;
+
+ return true;
+}
+
static int
tegra_rgb_encoder_atomic_check(struct drm_encoder *encoder,
struct drm_crtc_state *crtc_state,
@@ -151,8 +161,17 @@ tegra_rgb_encoder_atomic_check(struct drm_encoder *encoder,
* and hope that the desired frequency can be matched (or at least
* matched sufficiently close that the panel will still work).
*/
- div = ((clk_get_rate(rgb->clk) * 2) / pclk) - 2;
- pclk = 0;
+ if (tegra_rgb_pll_rate_change_allowed(rgb)) {
+ /*
+ * Set display controller clock to x2 of PCLK in order to
+ * produce higher resolution pulse positions.
+ */
+ div = 2;
+ pclk *= 2;
+ } else {
+ div = ((clk_get_rate(rgb->clk) * 2) / pclk) - 2;
+ pclk = 0;
+ }
err = tegra_dc_state_setup_clock(dc, crtc_state, rgb->clk_parent,
pclk, div);
@@ -210,6 +229,22 @@ int tegra_dc_rgb_probe(struct tegra_dc *dc)
return err;
}
+ rgb->pll_d_out0 = clk_get_sys(NULL, "pll_d_out0");
+ if (IS_ERR(rgb->pll_d_out0)) {
+ err = PTR_ERR(rgb->pll_d_out0);
+ dev_err(dc->dev, "failed to get pll_d_out0: %d\n", err);
+ return err;
+ }
+
+ if (dc->soc->has_pll_d2_out0) {
+ rgb->pll_d2_out0 = clk_get_sys(NULL, "pll_d2_out0");
+ if (IS_ERR(rgb->pll_d2_out0)) {
+ err = PTR_ERR(rgb->pll_d2_out0);
+ dev_err(dc->dev, "failed to get pll_d2_out0: %d\n", err);
+ return err;
+ }
+ }
+
dc->rgb = &rgb->output;
return 0;
@@ -217,9 +252,15 @@ int tegra_dc_rgb_probe(struct tegra_dc *dc)
int tegra_dc_rgb_remove(struct tegra_dc *dc)
{
+ struct tegra_rgb *rgb;
+
if (!dc->rgb)
return 0;
+ rgb = to_rgb(dc->rgb);
+ clk_put(rgb->pll_d2_out0);
+ clk_put(rgb->pll_d_out0);
+
tegra_output_remove(dc->rgb);
dc->rgb = NULL;
diff --git a/drivers/gpu/drm/tegra/submit.c b/drivers/gpu/drm/tegra/submit.c
index 776f825df52f..6d6dd8c35475 100644
--- a/drivers/gpu/drm/tegra/submit.c
+++ b/drivers/gpu/drm/tegra/submit.c
@@ -64,33 +64,62 @@ static void gather_bo_put(struct host1x_bo *host_bo)
kref_put(&bo->ref, gather_bo_release);
}
-static struct sg_table *
-gather_bo_pin(struct device *dev, struct host1x_bo *host_bo, dma_addr_t *phys)
+static struct host1x_bo_mapping *
+gather_bo_pin(struct device *dev, struct host1x_bo *bo, enum dma_data_direction direction)
{
- struct gather_bo *bo = container_of(host_bo, struct gather_bo, base);
- struct sg_table *sgt;
+ struct gather_bo *gather = container_of(bo, struct gather_bo, base);
+ struct host1x_bo_mapping *map;
int err;
- sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
- if (!sgt)
+ map = kzalloc(sizeof(*map), GFP_KERNEL);
+ if (!map)
return ERR_PTR(-ENOMEM);
- err = dma_get_sgtable(bo->dev, sgt, bo->gather_data, bo->gather_data_dma,
- bo->gather_data_words * 4);
- if (err) {
- kfree(sgt);
- return ERR_PTR(err);
+ kref_init(&map->ref);
+ map->bo = host1x_bo_get(bo);
+ map->direction = direction;
+ map->dev = dev;
+
+ map->sgt = kzalloc(sizeof(*map->sgt), GFP_KERNEL);
+ if (!map->sgt) {
+ err = -ENOMEM;
+ goto free;
}
- return sgt;
+ err = dma_get_sgtable(gather->dev, map->sgt, gather->gather_data, gather->gather_data_dma,
+ gather->gather_data_words * 4);
+ if (err)
+ goto free_sgt;
+
+ err = dma_map_sgtable(dev, map->sgt, direction, 0);
+ if (err)
+ goto free_sgt;
+
+ map->phys = sg_dma_address(map->sgt->sgl);
+ map->size = gather->gather_data_words * 4;
+ map->chunks = err;
+
+ return map;
+
+free_sgt:
+ sg_free_table(map->sgt);
+ kfree(map->sgt);
+free:
+ kfree(map);
+ return ERR_PTR(err);
}
-static void gather_bo_unpin(struct device *dev, struct sg_table *sgt)
+static void gather_bo_unpin(struct host1x_bo_mapping *map)
{
- if (sgt) {
- sg_free_table(sgt);
- kfree(sgt);
- }
+ if (!map)
+ return;
+
+ dma_unmap_sgtable(map->dev, map->sgt, map->direction, 0);
+ sg_free_table(map->sgt);
+ kfree(map->sgt);
+ host1x_bo_put(map->bo);
+
+ kfree(map);
}
static void *gather_bo_mmap(struct host1x_bo *host_bo)
@@ -475,8 +504,8 @@ static void release_job(struct host1x_job *job)
kfree(job_data->used_mappings);
kfree(job_data);
- if (pm_runtime_enabled(client->base.dev))
- pm_runtime_put_autosuspend(client->base.dev);
+ pm_runtime_mark_last_busy(client->base.dev);
+ pm_runtime_put_autosuspend(client->base.dev);
}
int tegra_drm_ioctl_channel_submit(struct drm_device *drm, void *data,
@@ -560,12 +589,10 @@ int tegra_drm_ioctl_channel_submit(struct drm_device *drm, void *data,
}
/* Boot engine. */
- if (pm_runtime_enabled(context->client->base.dev)) {
- err = pm_runtime_resume_and_get(context->client->base.dev);
- if (err < 0) {
- SUBMIT_ERR(context, "could not power up engine: %d", err);
- goto unpin_job;
- }
+ err = pm_runtime_resume_and_get(context->client->base.dev);
+ if (err < 0) {
+ SUBMIT_ERR(context, "could not power up engine: %d", err);
+ goto unpin_job;
}
job->user_data = job_data;
diff --git a/drivers/gpu/drm/tegra/uapi.c b/drivers/gpu/drm/tegra/uapi.c
index 690a339c52ec..9ab9179d2026 100644
--- a/drivers/gpu/drm/tegra/uapi.c
+++ b/drivers/gpu/drm/tegra/uapi.c
@@ -17,11 +17,7 @@ static void tegra_drm_mapping_release(struct kref *ref)
struct tegra_drm_mapping *mapping =
container_of(ref, struct tegra_drm_mapping, ref);
- if (mapping->sgt)
- dma_unmap_sgtable(mapping->dev, mapping->sgt, mapping->direction,
- DMA_ATTR_SKIP_CPU_SYNC);
-
- host1x_bo_unpin(mapping->dev, mapping->bo, mapping->sgt);
+ host1x_bo_unpin(mapping->map);
host1x_bo_put(mapping->bo);
kfree(mapping);
@@ -159,6 +155,7 @@ int tegra_drm_ioctl_channel_map(struct drm_device *drm, void *data, struct drm_f
struct drm_tegra_channel_map *args = data;
struct tegra_drm_mapping *mapping;
struct tegra_drm_context *context;
+ enum dma_data_direction direction;
int err = 0;
if (args->flags & ~DRM_TEGRA_CHANNEL_MAP_READ_WRITE)
@@ -180,68 +177,53 @@ int tegra_drm_ioctl_channel_map(struct drm_device *drm, void *data, struct drm_f
kref_init(&mapping->ref);
- mapping->dev = context->client->base.dev;
mapping->bo = tegra_gem_lookup(file, args->handle);
if (!mapping->bo) {
err = -EINVAL;
- goto unlock;
+ goto free;
}
- if (context->client->base.group) {
- /* IOMMU domain managed directly using IOMMU API */
- host1x_bo_pin(mapping->dev, mapping->bo, &mapping->iova);
- } else {
- switch (args->flags & DRM_TEGRA_CHANNEL_MAP_READ_WRITE) {
- case DRM_TEGRA_CHANNEL_MAP_READ_WRITE:
- mapping->direction = DMA_BIDIRECTIONAL;
- break;
-
- case DRM_TEGRA_CHANNEL_MAP_WRITE:
- mapping->direction = DMA_FROM_DEVICE;
- break;
-
- case DRM_TEGRA_CHANNEL_MAP_READ:
- mapping->direction = DMA_TO_DEVICE;
- break;
+ switch (args->flags & DRM_TEGRA_CHANNEL_MAP_READ_WRITE) {
+ case DRM_TEGRA_CHANNEL_MAP_READ_WRITE:
+ direction = DMA_BIDIRECTIONAL;
+ break;
- default:
- return -EINVAL;
- }
+ case DRM_TEGRA_CHANNEL_MAP_WRITE:
+ direction = DMA_FROM_DEVICE;
+ break;
- mapping->sgt = host1x_bo_pin(mapping->dev, mapping->bo, NULL);
- if (IS_ERR(mapping->sgt)) {
- err = PTR_ERR(mapping->sgt);
- goto put_gem;
- }
+ case DRM_TEGRA_CHANNEL_MAP_READ:
+ direction = DMA_TO_DEVICE;
+ break;
- err = dma_map_sgtable(mapping->dev, mapping->sgt, mapping->direction,
- DMA_ATTR_SKIP_CPU_SYNC);
- if (err)
- goto unpin;
+ default:
+ err = -EINVAL;
+ goto put_gem;
+ }
- mapping->iova = sg_dma_address(mapping->sgt->sgl);
+ mapping->map = host1x_bo_pin(context->client->base.dev, mapping->bo, direction, NULL);
+ if (IS_ERR(mapping->map)) {
+ err = PTR_ERR(mapping->map);
+ goto put_gem;
}
+ mapping->iova = mapping->map->phys;
mapping->iova_end = mapping->iova + host1x_to_tegra_bo(mapping->bo)->gem.size;
err = xa_alloc(&context->mappings, &args->mapping, mapping, XA_LIMIT(1, U32_MAX),
GFP_KERNEL);
if (err < 0)
- goto unmap;
+ goto unpin;
mutex_unlock(&fpriv->lock);
return 0;
-unmap:
- if (mapping->sgt) {
- dma_unmap_sgtable(mapping->dev, mapping->sgt, mapping->direction,
- DMA_ATTR_SKIP_CPU_SYNC);
- }
unpin:
- host1x_bo_unpin(mapping->dev, mapping->bo, mapping->sgt);
+ host1x_bo_unpin(mapping->map);
put_gem:
host1x_bo_put(mapping->bo);
+free:
kfree(mapping);
unlock:
mutex_unlock(&fpriv->lock);
diff --git a/drivers/gpu/drm/tegra/uapi.h b/drivers/gpu/drm/tegra/uapi.h
index 12adad770ad3..92ff1e44ff15 100644
--- a/drivers/gpu/drm/tegra/uapi.h
+++ b/drivers/gpu/drm/tegra/uapi.h
@@ -27,10 +27,9 @@ struct tegra_drm_file {
struct tegra_drm_mapping {
struct kref ref;
- struct device *dev;
+ struct host1x_bo_mapping *map;
struct host1x_bo *bo;
- struct sg_table *sgt;
- enum dma_data_direction direction;
+
dma_addr_t iova;
dma_addr_t iova_end;
};
diff --git a/drivers/gpu/drm/tegra/vic.c b/drivers/gpu/drm/tegra/vic.c
index c02010ff2b7f..1e342fa3d27b 100644
--- a/drivers/gpu/drm/tegra/vic.c
+++ b/drivers/gpu/drm/tegra/vic.c
@@ -5,6 +5,7 @@
#include <linux/clk.h>
#include <linux/delay.h>
+#include <linux/dma-mapping.h>
#include <linux/host1x.h>
#include <linux/iommu.h>
#include <linux/module.h>
@@ -151,9 +152,13 @@ static int vic_init(struct host1x_client *client)
goto free_channel;
}
+ pm_runtime_enable(client->dev);
+ pm_runtime_use_autosuspend(client->dev);
+ pm_runtime_set_autosuspend_delay(client->dev, 500);
+
err = tegra_drm_register_client(tegra, drm);
if (err < 0)
- goto free_syncpt;
+ goto disable_rpm;
/*
* Inherit the DMA parameters (such as maximum segment size) from the
@@ -163,7 +168,10 @@ static int vic_init(struct host1x_client *client)
return 0;
-free_syncpt:
+disable_rpm:
+ pm_runtime_dont_use_autosuspend(client->dev);
+ pm_runtime_force_suspend(client->dev);
+
host1x_syncpt_put(client->syncpts[0]);
free_channel:
host1x_channel_put(vic->channel);
@@ -188,10 +196,15 @@ static int vic_exit(struct host1x_client *client)
if (err < 0)
return err;
+ pm_runtime_dont_use_autosuspend(client->dev);
+ pm_runtime_force_suspend(client->dev);
+
host1x_syncpt_put(client->syncpts[0]);
host1x_channel_put(vic->channel);
host1x_client_iommu_detach(client);
+ vic->channel = NULL;
+
if (client->group) {
dma_unmap_single(vic->dev, vic->falcon.firmware.phys,
vic->falcon.firmware.size, DMA_TO_DEVICE);
@@ -232,12 +245,12 @@ static int vic_load_firmware(struct vic *vic)
if (!client->group) {
virt = dma_alloc_coherent(vic->dev, size, &iova, GFP_KERNEL);
-
- err = dma_mapping_error(vic->dev, iova);
- if (err < 0)
- return err;
+ if (!virt)
+ return -ENOMEM;
} else {
virt = tegra_drm_alloc(tegra, size, &iova);
+ if (IS_ERR(virt))
+ return PTR_ERR(virt);
}
vic->falcon.firmware.virt = virt;
@@ -315,6 +328,8 @@ static int vic_runtime_suspend(struct device *dev)
struct vic *vic = dev_get_drvdata(dev);
int err;
+ host1x_channel_stop(vic->channel);
+
err = reset_control_assert(vic->rst);
if (err < 0)
return err;
@@ -330,27 +345,17 @@ static int vic_open_channel(struct tegra_drm_client *client,
struct tegra_drm_context *context)
{
struct vic *vic = to_vic(client);
- int err;
-
- err = pm_runtime_resume_and_get(vic->dev);
- if (err < 0)
- return err;
context->channel = host1x_channel_get(vic->channel);
- if (!context->channel) {
- pm_runtime_put(vic->dev);
+ if (!context->channel)
return -ENOMEM;
- }
return 0;
}
static void vic_close_channel(struct tegra_drm_context *context)
{
- struct vic *vic = to_vic(context->client);
-
host1x_channel_put(context->channel);
- pm_runtime_put(vic->dev);
}
static const struct tegra_drm_client_ops vic_ops = {
@@ -441,6 +446,12 @@ static int vic_probe(struct platform_device *pdev)
return PTR_ERR(vic->clk);
}
+ err = clk_set_rate(vic->clk, ULONG_MAX);
+ if (err < 0) {
+ dev_err(&pdev->dev, "failed to set clock rate\n");
+ return err;
+ }
+
if (!dev->pm_domain) {
vic->rst = devm_reset_control_get(dev, "vic");
if (IS_ERR(vic->rst)) {
@@ -476,17 +487,8 @@ static int vic_probe(struct platform_device *pdev)
goto exit_falcon;
}
- pm_runtime_enable(&pdev->dev);
- if (!pm_runtime_enabled(&pdev->dev)) {
- err = vic_runtime_resume(&pdev->dev);
- if (err < 0)
- goto unregister_client;
- }
-
return 0;
-unregister_client:
- host1x_client_unregister(&vic->client.base);
exit_falcon:
falcon_exit(&vic->falcon);
@@ -505,11 +507,6 @@ static int vic_remove(struct platform_device *pdev)
return err;
}
- if (pm_runtime_enabled(&pdev->dev))
- pm_runtime_disable(&pdev->dev);
- else
- vic_runtime_suspend(&pdev->dev);
-
falcon_exit(&vic->falcon);
return 0;
@@ -517,6 +514,8 @@ static int vic_remove(struct platform_device *pdev)
static const struct dev_pm_ops vic_pm_ops = {
SET_RUNTIME_PM_OPS(vic_runtime_suspend, vic_runtime_resume, NULL)
+ SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+ pm_runtime_force_resume)
};
struct platform_driver tegra_vic_driver = {
diff --git a/drivers/gpu/drm/tidss/Kconfig b/drivers/gpu/drm/tidss/Kconfig
index f790a5215302..bc4fa59b6fa9 100644
--- a/drivers/gpu/drm/tidss/Kconfig
+++ b/drivers/gpu/drm/tidss/Kconfig
@@ -3,7 +3,6 @@ config DRM_TIDSS
depends on DRM && OF
depends on ARM || ARM64 || COMPILE_TEST
select DRM_KMS_HELPER
- select DRM_KMS_CMA_HELPER
select DRM_GEM_CMA_HELPER
help
The TI Keystone family SoCs introduced a new generation of
diff --git a/drivers/gpu/drm/tidss/tidss_drv.c b/drivers/gpu/drm/tidss/tidss_drv.c
index 4366b5c798e0..7c784e90e40e 100644
--- a/drivers/gpu/drm/tidss/tidss_drv.c
+++ b/drivers/gpu/drm/tidss/tidss_drv.c
@@ -88,7 +88,7 @@ static int __maybe_unused tidss_resume(struct device *dev)
return drm_mode_config_helper_resume(&tidss->ddev);
}
-static const struct dev_pm_ops tidss_pm_ops = {
+static __maybe_unused const struct dev_pm_ops tidss_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(tidss_suspend, tidss_resume)
SET_RUNTIME_PM_OPS(tidss_pm_runtime_suspend, tidss_pm_runtime_resume, NULL)
};
diff --git a/drivers/gpu/drm/tilcdc/Kconfig b/drivers/gpu/drm/tilcdc/Kconfig
index 9f505a149990..e315591eb36b 100644
--- a/drivers/gpu/drm/tilcdc/Kconfig
+++ b/drivers/gpu/drm/tilcdc/Kconfig
@@ -3,7 +3,6 @@ config DRM_TILCDC
tristate "DRM Support for TI LCDC Display Controller"
depends on DRM && OF && ARM
select DRM_KMS_HELPER
- select DRM_KMS_CMA_HELPER
select DRM_GEM_CMA_HELPER
select DRM_BRIDGE
select DRM_PANEL_BRIDGE
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_drv.c b/drivers/gpu/drm/tilcdc/tilcdc_drv.c
index 3ddb7c710a3d..cc567c87057d 100644
--- a/drivers/gpu/drm/tilcdc/tilcdc_drv.c
+++ b/drivers/gpu/drm/tilcdc/tilcdc_drv.c
@@ -60,8 +60,6 @@ void tilcdc_module_cleanup(struct tilcdc_module *mod)
list_del(&mod->list);
}
-static struct of_device_id tilcdc_of_match[];
-
static int tilcdc_atomic_check(struct drm_device *dev,
struct drm_atomic_state *state)
{
@@ -587,7 +585,7 @@ static int tilcdc_pdev_remove(struct platform_device *pdev)
return 0;
}
-static struct of_device_id tilcdc_of_match[] = {
+static const struct of_device_id tilcdc_of_match[] = {
{ .compatible = "ti,am33xx-tilcdc", },
{ .compatible = "ti,da850-tilcdc", },
{ },
diff --git a/drivers/gpu/drm/tiny/Kconfig b/drivers/gpu/drm/tiny/Kconfig
index 4ca309f80dee..712e0004e96e 100644
--- a/drivers/gpu/drm/tiny/Kconfig
+++ b/drivers/gpu/drm/tiny/Kconfig
@@ -3,7 +3,7 @@
config DRM_ARCPGU
tristate "ARC PGU"
depends on DRM && OF
- select DRM_KMS_CMA_HELPER
+ select DRM_GEM_CMA_HELPER
select DRM_KMS_HELPER
help
Choose this option if you have an ARC PGU controller.
@@ -71,7 +71,7 @@ config TINYDRM_HX8357D
tristate "DRM support for HX8357D display panels"
depends on DRM && SPI
select DRM_KMS_HELPER
- select DRM_KMS_CMA_HELPER
+ select DRM_GEM_CMA_HELPER
select DRM_MIPI_DBI
select BACKLIGHT_CLASS_DEVICE
help
@@ -84,7 +84,7 @@ config TINYDRM_ILI9163
tristate "DRM support for ILI9163 display panels"
depends on DRM && SPI
select BACKLIGHT_CLASS_DEVICE
- select DRM_KMS_CMA_HELPER
+ select DRM_GEM_CMA_HELPER
select DRM_KMS_HELPER
select DRM_MIPI_DBI
help
@@ -97,7 +97,7 @@ config TINYDRM_ILI9225
tristate "DRM support for ILI9225 display panels"
depends on DRM && SPI
select DRM_KMS_HELPER
- select DRM_KMS_CMA_HELPER
+ select DRM_GEM_CMA_HELPER
select DRM_MIPI_DBI
help
DRM driver for the following Ilitek ILI9225 panels:
@@ -109,7 +109,7 @@ config TINYDRM_ILI9341
tristate "DRM support for ILI9341 display panels"
depends on DRM && SPI
select DRM_KMS_HELPER
- select DRM_KMS_CMA_HELPER
+ select DRM_GEM_CMA_HELPER
select DRM_MIPI_DBI
select BACKLIGHT_CLASS_DEVICE
help
@@ -122,7 +122,7 @@ config TINYDRM_ILI9486
tristate "DRM support for ILI9486 display panels"
depends on DRM && SPI
select DRM_KMS_HELPER
- select DRM_KMS_CMA_HELPER
+ select DRM_GEM_CMA_HELPER
select DRM_MIPI_DBI
select BACKLIGHT_CLASS_DEVICE
help
@@ -136,7 +136,7 @@ config TINYDRM_MI0283QT
tristate "DRM support for MI0283QT"
depends on DRM && SPI
select DRM_KMS_HELPER
- select DRM_KMS_CMA_HELPER
+ select DRM_GEM_CMA_HELPER
select DRM_MIPI_DBI
select BACKLIGHT_CLASS_DEVICE
help
@@ -147,7 +147,7 @@ config TINYDRM_REPAPER
tristate "DRM support for Pervasive Displays RePaper panels (V231)"
depends on DRM && SPI
select DRM_KMS_HELPER
- select DRM_KMS_CMA_HELPER
+ select DRM_GEM_CMA_HELPER
help
DRM driver for the following Pervasive Displays panels:
1.44" TFT EPD Panel (E1144CS021)
@@ -161,7 +161,7 @@ config TINYDRM_ST7586
tristate "DRM support for Sitronix ST7586 display panels"
depends on DRM && SPI
select DRM_KMS_HELPER
- select DRM_KMS_CMA_HELPER
+ select DRM_GEM_CMA_HELPER
select DRM_MIPI_DBI
help
DRM driver for the following Sitronix ST7586 panels:
@@ -173,7 +173,7 @@ config TINYDRM_ST7735R
tristate "DRM support for Sitronix ST7715R/ST7735R display panels"
depends on DRM && SPI
select DRM_KMS_HELPER
- select DRM_KMS_CMA_HELPER
+ select DRM_GEM_CMA_HELPER
select DRM_MIPI_DBI
select BACKLIGHT_CLASS_DEVICE
help
diff --git a/drivers/gpu/drm/tiny/simpledrm.c b/drivers/gpu/drm/tiny/simpledrm.c
index 2f999915b9aa..04146da2d1d8 100644
--- a/drivers/gpu/drm/tiny/simpledrm.c
+++ b/drivers/gpu/drm/tiny/simpledrm.c
@@ -459,7 +459,7 @@ static struct drm_display_mode simpledrm_mode(unsigned int width,
{
struct drm_display_mode mode = { SIMPLEDRM_MODE(width, height) };
- mode.clock = 60 /* Hz */ * mode.hdisplay * mode.vdisplay;
+ mode.clock = mode.hdisplay * mode.vdisplay * 60 / 1000 /* kHz */;
drm_mode_set_name(&mode);
return mode;
@@ -571,8 +571,8 @@ static const uint32_t simpledrm_default_formats[] = {
//DRM_FORMAT_XRGB1555,
//DRM_FORMAT_ARGB1555,
DRM_FORMAT_RGB888,
- //DRM_FORMAT_XRGB2101010,
- //DRM_FORMAT_ARGB2101010,
+ DRM_FORMAT_XRGB2101010,
+ DRM_FORMAT_ARGB2101010,
};
static const uint64_t simpledrm_format_modifiers[] = {
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index e4a20a3a5d16..db3dc7ef5382 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -1086,7 +1086,6 @@ int ttm_bo_wait(struct ttm_buffer_object *bo,
if (timeout == 0)
return -EBUSY;
- dma_resv_add_excl_fence(bo->base.resv, NULL);
return 0;
}
EXPORT_SYMBOL(ttm_bo_wait);
@@ -1105,7 +1104,7 @@ int ttm_bo_swapout(struct ttm_buffer_object *bo, struct ttm_operation_ctx *ctx,
* as an indication that we're about to swap out.
*/
memset(&place, 0, sizeof(place));
- place.mem_type = TTM_PL_SYSTEM;
+ place.mem_type = bo->resource->mem_type;
if (!ttm_bo_evict_swapout_allowable(bo, ctx, &place, &locked, NULL))
return -EBUSY;
@@ -1137,6 +1136,7 @@ int ttm_bo_swapout(struct ttm_buffer_object *bo, struct ttm_operation_ctx *ctx,
struct ttm_place hop;
memset(&hop, 0, sizeof(hop));
+ place.mem_type = TTM_PL_SYSTEM;
ret = ttm_resource_alloc(bo, &place, &evict_mem);
if (unlikely(ret))
goto out;
diff --git a/drivers/gpu/drm/ttm/ttm_tt.c b/drivers/gpu/drm/ttm/ttm_tt.c
index 7e83c00a3f48..79c870a3bef8 100644
--- a/drivers/gpu/drm/ttm/ttm_tt.c
+++ b/drivers/gpu/drm/ttm/ttm_tt.c
@@ -34,6 +34,7 @@
#include <linux/sched.h>
#include <linux/shmem_fs.h>
#include <linux/file.h>
+#include <linux/module.h>
#include <drm/drm_cache.h>
#include <drm/ttm/ttm_bo_driver.h>
diff --git a/drivers/gpu/drm/tve200/Kconfig b/drivers/gpu/drm/tve200/Kconfig
index e2d163c74ed6..47a7dbe6c114 100644
--- a/drivers/gpu/drm/tve200/Kconfig
+++ b/drivers/gpu/drm/tve200/Kconfig
@@ -8,7 +8,6 @@ config DRM_TVE200
select DRM_BRIDGE
select DRM_PANEL_BRIDGE
select DRM_KMS_HELPER
- select DRM_KMS_CMA_HELPER
select DRM_GEM_CMA_HELPER
select VT_HW_CONSOLE_BINDING if FRAMEBUFFER_CONSOLE
help
diff --git a/drivers/gpu/drm/v3d/v3d_bo.c b/drivers/gpu/drm/v3d/v3d_bo.c
index 0d9af62f69ad..6e3113f419f4 100644
--- a/drivers/gpu/drm/v3d/v3d_bo.c
+++ b/drivers/gpu/drm/v3d/v3d_bo.c
@@ -70,11 +70,11 @@ struct drm_gem_object *v3d_create_object(struct drm_device *dev, size_t size)
struct drm_gem_object *obj;
if (size == 0)
- return NULL;
+ return ERR_PTR(-EINVAL);
bo = kzalloc(sizeof(*bo), GFP_KERNEL);
if (!bo)
- return NULL;
+ return ERR_PTR(-ENOMEM);
obj = &bo->base.base;
obj->funcs = &v3d_gem_funcs;
diff --git a/drivers/gpu/drm/vc4/Kconfig b/drivers/gpu/drm/vc4/Kconfig
index 345a5570a3da..de3424fed2fc 100644
--- a/drivers/gpu/drm/vc4/Kconfig
+++ b/drivers/gpu/drm/vc4/Kconfig
@@ -6,7 +6,6 @@ config DRM_VC4
depends on SND && SND_SOC
depends on COMMON_CLK
select DRM_KMS_HELPER
- select DRM_KMS_CMA_HELPER
select DRM_GEM_CMA_HELPER
select DRM_PANEL_BRIDGE
select SND_PCM
diff --git a/drivers/gpu/drm/vc4/vc4_bo.c b/drivers/gpu/drm/vc4/vc4_bo.c
index fddaeb0b09c1..6d1281a343e9 100644
--- a/drivers/gpu/drm/vc4/vc4_bo.c
+++ b/drivers/gpu/drm/vc4/vc4_bo.c
@@ -177,7 +177,7 @@ static void vc4_bo_destroy(struct vc4_bo *bo)
bo->validated_shader = NULL;
}
- drm_gem_cma_free_object(obj);
+ drm_gem_cma_free(&bo->base);
}
static void vc4_bo_remove_from_cache(struct vc4_bo *bo)
@@ -720,7 +720,7 @@ static int vc4_gem_object_mmap(struct drm_gem_object *obj, struct vm_area_struct
return -EINVAL;
}
- return drm_gem_cma_mmap(obj, vma);
+ return drm_gem_cma_mmap(&bo->base, vma);
}
static const struct vm_operations_struct vc4_vm_ops = {
@@ -732,8 +732,8 @@ static const struct vm_operations_struct vc4_vm_ops = {
static const struct drm_gem_object_funcs vc4_gem_object_funcs = {
.free = vc4_free_object,
.export = vc4_prime_export,
- .get_sg_table = drm_gem_cma_get_sg_table,
- .vmap = drm_gem_cma_vmap,
+ .get_sg_table = drm_gem_cma_object_get_sg_table,
+ .vmap = drm_gem_cma_object_vmap,
.mmap = vc4_gem_object_mmap,
.vm_ops = &vc4_vm_ops,
};
diff --git a/drivers/gpu/drm/vc4/vc4_kms.c b/drivers/gpu/drm/vc4/vc4_kms.c
index 79d4d9dd1394..24de29bc1cda 100644
--- a/drivers/gpu/drm/vc4/vc4_kms.c
+++ b/drivers/gpu/drm/vc4/vc4_kms.c
@@ -340,19 +340,19 @@ static void vc4_atomic_commit_tail(struct drm_atomic_state *state)
struct drm_device *dev = state->dev;
struct vc4_dev *vc4 = to_vc4_dev(dev);
struct vc4_hvs *hvs = vc4->hvs;
- struct drm_crtc_state *old_crtc_state;
struct drm_crtc_state *new_crtc_state;
struct vc4_hvs_state *new_hvs_state;
struct drm_crtc *crtc;
struct vc4_hvs_state *old_hvs_state;
+ unsigned int channel;
int i;
old_hvs_state = vc4_hvs_get_old_global_state(state);
- if (WARN_ON(!old_hvs_state))
+ if (WARN_ON(IS_ERR(old_hvs_state)))
return;
new_hvs_state = vc4_hvs_get_new_global_state(state);
- if (WARN_ON(!new_hvs_state))
+ if (WARN_ON(IS_ERR(new_hvs_state)))
return;
for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
@@ -365,31 +365,32 @@ static void vc4_atomic_commit_tail(struct drm_atomic_state *state)
vc4_hvs_mask_underrun(dev, vc4_crtc_state->assigned_channel);
}
- if (vc4->hvs->hvs5) {
- unsigned long core_rate = max_t(unsigned long,
- 500000000,
- new_hvs_state->core_clock_rate);
-
- clk_set_min_rate(hvs->core_clk, core_rate);
- }
-
- for_each_old_crtc_in_state(state, crtc, old_crtc_state, i) {
- struct vc4_crtc_state *vc4_crtc_state =
- to_vc4_crtc_state(old_crtc_state);
- unsigned int channel = vc4_crtc_state->assigned_channel;
+ for (channel = 0; channel < HVS_NUM_CHANNELS; channel++) {
+ struct drm_crtc_commit *commit;
int ret;
- if (channel == VC4_HVS_CHANNEL_DISABLED)
+ if (!old_hvs_state->fifo_state[channel].in_use)
continue;
- if (!old_hvs_state->fifo_state[channel].in_use)
+ commit = old_hvs_state->fifo_state[channel].pending_commit;
+ if (!commit)
continue;
- ret = drm_crtc_commit_wait(old_hvs_state->fifo_state[channel].pending_commit);
+ ret = drm_crtc_commit_wait(commit);
if (ret)
drm_err(dev, "Timed out waiting for commit\n");
+
+ drm_crtc_commit_put(commit);
+ old_hvs_state->fifo_state[channel].pending_commit = NULL;
}
+ if (vc4->hvs->hvs5) {
+ unsigned long core_rate = max_t(unsigned long,
+ 500000000,
+ new_hvs_state->core_clock_rate);
+
+ clk_set_min_rate(hvs->core_clk, core_rate);
+ }
drm_atomic_helper_commit_modeset_disables(dev, state);
vc4_ctm_commit(vc4, state);
@@ -427,8 +428,8 @@ static int vc4_atomic_commit_setup(struct drm_atomic_state *state)
unsigned int i;
hvs_state = vc4_hvs_get_new_global_state(state);
- if (!hvs_state)
- return -EINVAL;
+ if (WARN_ON(IS_ERR(hvs_state)))
+ return PTR_ERR(hvs_state);
for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
struct vc4_crtc_state *vc4_crtc_state =
@@ -676,12 +677,6 @@ vc4_hvs_channels_duplicate_state(struct drm_private_obj *obj)
for (i = 0; i < HVS_NUM_CHANNELS; i++) {
state->fifo_state[i].in_use = old_state->fifo_state[i].in_use;
state->fifo_state[i].fifo_load = old_state->fifo_state[i].fifo_load;
-
- if (!old_state->fifo_state[i].pending_commit)
- continue;
-
- state->fifo_state[i].pending_commit =
- drm_crtc_commit_get(old_state->fifo_state[i].pending_commit);
}
state->core_clock_rate = old_state->core_clock_rate;
@@ -772,8 +767,8 @@ static int vc4_pv_muxing_atomic_check(struct drm_device *dev,
unsigned int i;
hvs_new_state = vc4_hvs_get_global_state(state);
- if (!hvs_new_state)
- return -EINVAL;
+ if (IS_ERR(hvs_new_state))
+ return PTR_ERR(hvs_new_state);
for (i = 0; i < ARRAY_SIZE(hvs_new_state->fifo_state); i++)
if (!hvs_new_state->fifo_state[i].in_use)
@@ -862,8 +857,8 @@ vc4_core_clock_atomic_check(struct drm_atomic_state *state)
load_state = to_vc4_load_tracker_state(priv_state);
hvs_new_state = vc4_hvs_get_global_state(state);
- if (!hvs_new_state)
- return -EINVAL;
+ if (IS_ERR(hvs_new_state))
+ return PTR_ERR(hvs_new_state);
for_each_oldnew_crtc_in_state(state, crtc,
old_crtc_state,
diff --git a/drivers/gpu/drm/vc4/vc4_plane.c b/drivers/gpu/drm/vc4/vc4_plane.c
index ac761c683663..920a9eefe426 100644
--- a/drivers/gpu/drm/vc4/vc4_plane.c
+++ b/drivers/gpu/drm/vc4/vc4_plane.c
@@ -33,6 +33,7 @@ static const struct hvs_format {
u32 hvs; /* HVS_FORMAT_* */
u32 pixel_order;
u32 pixel_order_hvs5;
+ bool hvs5_only;
} hvs_formats[] = {
{
.drm = DRM_FORMAT_XRGB8888,
@@ -128,6 +129,12 @@ static const struct hvs_format {
.hvs = HVS_PIXEL_FORMAT_YCBCR_YUV422_2PLANE,
.pixel_order = HVS_PIXEL_ORDER_XYCRCB,
},
+ {
+ .drm = DRM_FORMAT_P030,
+ .hvs = HVS_PIXEL_FORMAT_YCBCR_10BIT,
+ .pixel_order = HVS_PIXEL_ORDER_XYCBCR,
+ .hvs5_only = true,
+ },
};
static const struct hvs_format *vc4_get_hvs_format(u32 drm_format)
@@ -616,6 +623,51 @@ static int vc4_plane_allocate_lbm(struct drm_plane_state *state)
return 0;
}
+/*
+ * The colorspace conversion matrices are held in 3 entries in the dlist.
+ * Create an array of them, with entries for each full and limited mode, and
+ * each supported colorspace.
+ */
+static const u32 colorspace_coeffs[2][DRM_COLOR_ENCODING_MAX][3] = {
+ {
+ /* Limited range */
+ {
+ /* BT601 */
+ SCALER_CSC0_ITR_R_601_5,
+ SCALER_CSC1_ITR_R_601_5,
+ SCALER_CSC2_ITR_R_601_5,
+ }, {
+ /* BT709 */
+ SCALER_CSC0_ITR_R_709_3,
+ SCALER_CSC1_ITR_R_709_3,
+ SCALER_CSC2_ITR_R_709_3,
+ }, {
+ /* BT2020 */
+ SCALER_CSC0_ITR_R_2020,
+ SCALER_CSC1_ITR_R_2020,
+ SCALER_CSC2_ITR_R_2020,
+ }
+ }, {
+ /* Full range */
+ {
+ /* JFIF */
+ SCALER_CSC0_JPEG_JFIF,
+ SCALER_CSC1_JPEG_JFIF,
+ SCALER_CSC2_JPEG_JFIF,
+ }, {
+ /* BT709 */
+ SCALER_CSC0_ITR_R_709_3_FR,
+ SCALER_CSC1_ITR_R_709_3_FR,
+ SCALER_CSC2_ITR_R_709_3_FR,
+ }, {
+ /* BT2020 */
+ SCALER_CSC0_ITR_R_2020_FR,
+ SCALER_CSC1_ITR_R_2020_FR,
+ SCALER_CSC2_ITR_R_2020_FR,
+ }
+ }
+};
+
/* Writes out a full display list for an active plane to the plane's
* private dlist state.
*/
@@ -762,47 +814,90 @@ static int vc4_plane_mode_set(struct drm_plane *plane,
case DRM_FORMAT_MOD_BROADCOM_SAND128:
case DRM_FORMAT_MOD_BROADCOM_SAND256: {
uint32_t param = fourcc_mod_broadcom_param(fb->modifier);
- u32 tile_w, tile, x_off, pix_per_tile;
-
- hvs_format = HVS_PIXEL_FORMAT_H264;
-
- switch (base_format_mod) {
- case DRM_FORMAT_MOD_BROADCOM_SAND64:
- tiling = SCALER_CTL0_TILING_64B;
- tile_w = 64;
- break;
- case DRM_FORMAT_MOD_BROADCOM_SAND128:
- tiling = SCALER_CTL0_TILING_128B;
- tile_w = 128;
- break;
- case DRM_FORMAT_MOD_BROADCOM_SAND256:
- tiling = SCALER_CTL0_TILING_256B_OR_T;
- tile_w = 256;
- break;
- default:
- break;
- }
if (param > SCALER_TILE_HEIGHT_MASK) {
- DRM_DEBUG_KMS("SAND height too large (%d)\n", param);
+ DRM_DEBUG_KMS("SAND height too large (%d)\n",
+ param);
return -EINVAL;
}
- pix_per_tile = tile_w / fb->format->cpp[0];
- tile = vc4_state->src_x / pix_per_tile;
- x_off = vc4_state->src_x % pix_per_tile;
+ if (fb->format->format == DRM_FORMAT_P030) {
+ hvs_format = HVS_PIXEL_FORMAT_YCBCR_10BIT;
+ tiling = SCALER_CTL0_TILING_128B;
+ } else {
+ hvs_format = HVS_PIXEL_FORMAT_H264;
+
+ switch (base_format_mod) {
+ case DRM_FORMAT_MOD_BROADCOM_SAND64:
+ tiling = SCALER_CTL0_TILING_64B;
+ break;
+ case DRM_FORMAT_MOD_BROADCOM_SAND128:
+ tiling = SCALER_CTL0_TILING_128B;
+ break;
+ case DRM_FORMAT_MOD_BROADCOM_SAND256:
+ tiling = SCALER_CTL0_TILING_256B_OR_T;
+ break;
+ default:
+ return -EINVAL;
+ }
+ }
/* Adjust the base pointer to the first pixel to be scanned
* out.
+ *
+ * For P030, y_ptr [31:4] is the 128bit word for the start pixel
+ * y_ptr [3:0] is the pixel (0-11) contained within that 128bit
+ * word that should be taken as the first pixel.
+ * Ditto uv_ptr [31:4] vs [3:0], however [3:0] contains the
+ * element within the 128bit word, eg for pixel 3 the value
+ * should be 6.
*/
for (i = 0; i < num_planes; i++) {
+ u32 tile_w, tile, x_off, pix_per_tile;
+
+ if (fb->format->format == DRM_FORMAT_P030) {
+ /*
+ * Spec says: bits [31:4] of the given address
+ * should point to the 128-bit word containing
+ * the desired starting pixel, and bits[3:0]
+ * should be between 0 and 11, indicating which
+ * of the 12-pixels in that 128-bit word is the
+ * first pixel to be used
+ */
+ u32 remaining_pixels = vc4_state->src_x % 96;
+ u32 aligned = remaining_pixels / 12;
+ u32 last_bits = remaining_pixels % 12;
+
+ x_off = aligned * 16 + last_bits;
+ tile_w = 128;
+ pix_per_tile = 96;
+ } else {
+ switch (base_format_mod) {
+ case DRM_FORMAT_MOD_BROADCOM_SAND64:
+ tile_w = 64;
+ break;
+ case DRM_FORMAT_MOD_BROADCOM_SAND128:
+ tile_w = 128;
+ break;
+ case DRM_FORMAT_MOD_BROADCOM_SAND256:
+ tile_w = 256;
+ break;
+ default:
+ return -EINVAL;
+ }
+ pix_per_tile = tile_w / fb->format->cpp[0];
+ x_off = (vc4_state->src_x % pix_per_tile) /
+ (i ? h_subsample : 1) *
+ fb->format->cpp[i];
+ }
+
+ tile = vc4_state->src_x / pix_per_tile;
+
vc4_state->offsets[i] += param * tile_w * tile;
vc4_state->offsets[i] += src_y /
(i ? v_subsample : 1) *
tile_w;
- vc4_state->offsets[i] += x_off /
- (i ? h_subsample : 1) *
- fb->format->cpp[i];
+ vc4_state->offsets[i] += x_off & ~(i ? 1 : 0);
}
pitch0 = VC4_SET_FIELD(param, SCALER_TILE_HEIGHT);
@@ -955,7 +1050,8 @@ static int vc4_plane_mode_set(struct drm_plane *plane,
/* Pitch word 1/2 */
for (i = 1; i < num_planes; i++) {
- if (hvs_format != HVS_PIXEL_FORMAT_H264) {
+ if (hvs_format != HVS_PIXEL_FORMAT_H264 &&
+ hvs_format != HVS_PIXEL_FORMAT_YCBCR_10BIT) {
vc4_dlist_write(vc4_state,
VC4_SET_FIELD(fb->pitches[i],
SCALER_SRC_PITCH));
@@ -966,9 +1062,20 @@ static int vc4_plane_mode_set(struct drm_plane *plane,
/* Colorspace conversion words */
if (vc4_state->is_yuv) {
- vc4_dlist_write(vc4_state, SCALER_CSC0_ITR_R_601_5);
- vc4_dlist_write(vc4_state, SCALER_CSC1_ITR_R_601_5);
- vc4_dlist_write(vc4_state, SCALER_CSC2_ITR_R_601_5);
+ enum drm_color_encoding color_encoding = state->color_encoding;
+ enum drm_color_range color_range = state->color_range;
+ const u32 *ccm;
+
+ if (color_encoding >= DRM_COLOR_ENCODING_MAX)
+ color_encoding = DRM_COLOR_YCBCR_BT601;
+ if (color_range >= DRM_COLOR_RANGE_MAX)
+ color_range = DRM_COLOR_YCBCR_LIMITED_RANGE;
+
+ ccm = colorspace_coeffs[color_range][color_encoding];
+
+ vc4_dlist_write(vc4_state, ccm[0]);
+ vc4_dlist_write(vc4_state, ccm[1]);
+ vc4_dlist_write(vc4_state, ccm[2]);
}
vc4_state->lbm_offset = 0;
@@ -1315,6 +1422,13 @@ static bool vc4_format_mod_supported(struct drm_plane *plane,
default:
return false;
}
+ case DRM_FORMAT_P030:
+ switch (fourcc_mod_broadcom_mod(modifier)) {
+ case DRM_FORMAT_MOD_BROADCOM_SAND128:
+ return true;
+ default:
+ return false;
+ }
case DRM_FORMAT_RGBX1010102:
case DRM_FORMAT_BGRX1010102:
case DRM_FORMAT_RGBA1010102:
@@ -1347,8 +1461,11 @@ struct drm_plane *vc4_plane_init(struct drm_device *dev,
struct drm_plane *plane = NULL;
struct vc4_plane *vc4_plane;
u32 formats[ARRAY_SIZE(hvs_formats)];
+ int num_formats = 0;
int ret = 0;
unsigned i;
+ bool hvs5 = of_device_is_compatible(dev->dev->of_node,
+ "brcm,bcm2711-vc5");
static const uint64_t modifiers[] = {
DRM_FORMAT_MOD_BROADCOM_VC4_T_TILED,
DRM_FORMAT_MOD_BROADCOM_SAND128,
@@ -1363,13 +1480,17 @@ struct drm_plane *vc4_plane_init(struct drm_device *dev,
if (!vc4_plane)
return ERR_PTR(-ENOMEM);
- for (i = 0; i < ARRAY_SIZE(hvs_formats); i++)
- formats[i] = hvs_formats[i].drm;
+ for (i = 0; i < ARRAY_SIZE(hvs_formats); i++) {
+ if (!hvs_formats[i].hvs5_only || hvs5) {
+ formats[num_formats] = hvs_formats[i].drm;
+ num_formats++;
+ }
+ }
plane = &vc4_plane->base;
ret = drm_universal_plane_init(dev, plane, 0,
&vc4_plane_funcs,
- formats, ARRAY_SIZE(formats),
+ formats, num_formats,
modifiers, type, NULL);
if (ret)
return ERR_PTR(ret);
@@ -1383,6 +1504,15 @@ struct drm_plane *vc4_plane_init(struct drm_device *dev,
DRM_MODE_REFLECT_X |
DRM_MODE_REFLECT_Y);
+ drm_plane_create_color_properties(plane,
+ BIT(DRM_COLOR_YCBCR_BT601) |
+ BIT(DRM_COLOR_YCBCR_BT709) |
+ BIT(DRM_COLOR_YCBCR_BT2020),
+ BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) |
+ BIT(DRM_COLOR_YCBCR_FULL_RANGE),
+ DRM_COLOR_YCBCR_BT709,
+ DRM_COLOR_YCBCR_LIMITED_RANGE);
+
return plane;
}
diff --git a/drivers/gpu/drm/vc4/vc4_regs.h b/drivers/gpu/drm/vc4/vc4_regs.h
index 489f921ef44d..7538b84a6dca 100644
--- a/drivers/gpu/drm/vc4/vc4_regs.h
+++ b/drivers/gpu/drm/vc4/vc4_regs.h
@@ -975,7 +975,10 @@ enum hvs_pixel_format {
#define SCALER_CSC0_COEF_CR_OFS_SHIFT 0
#define SCALER_CSC0_ITR_R_601_5 0x00f00000
#define SCALER_CSC0_ITR_R_709_3 0x00f00000
+#define SCALER_CSC0_ITR_R_2020 0x00f00000
#define SCALER_CSC0_JPEG_JFIF 0x00000000
+#define SCALER_CSC0_ITR_R_709_3_FR 0x00000000
+#define SCALER_CSC0_ITR_R_2020_FR 0x00000000
/* S2.8 contribution of Cb to Green */
#define SCALER_CSC1_COEF_CB_GRN_MASK VC4_MASK(31, 22)
@@ -990,8 +993,11 @@ enum hvs_pixel_format {
#define SCALER_CSC1_COEF_CR_BLU_MASK VC4_MASK(1, 0)
#define SCALER_CSC1_COEF_CR_BLU_SHIFT 0
#define SCALER_CSC1_ITR_R_601_5 0xe73304a8
-#define SCALER_CSC1_ITR_R_709_3 0xf2b784a8
-#define SCALER_CSC1_JPEG_JFIF 0xea34a400
+#define SCALER_CSC1_ITR_R_709_3 0xf27784a8
+#define SCALER_CSC1_ITR_R_2020 0xf43594a8
+#define SCALER_CSC1_JPEG_JFIF 0xea349400
+#define SCALER_CSC1_ITR_R_709_3_FR 0xf4388400
+#define SCALER_CSC1_ITR_R_2020_FR 0xf5b6d400
/* S2.8 contribution of Cb to Red */
#define SCALER_CSC2_COEF_CB_RED_MASK VC4_MASK(29, 20)
@@ -1002,9 +1008,12 @@ enum hvs_pixel_format {
/* S2.8 contribution of Cb to Blue */
#define SCALER_CSC2_COEF_CB_BLU_MASK VC4_MASK(19, 10)
#define SCALER_CSC2_COEF_CB_BLU_SHIFT 10
-#define SCALER_CSC2_ITR_R_601_5 0x00066204
-#define SCALER_CSC2_ITR_R_709_3 0x00072a1c
-#define SCALER_CSC2_JPEG_JFIF 0x000599c5
+#define SCALER_CSC2_ITR_R_601_5 0x00066604
+#define SCALER_CSC2_ITR_R_709_3 0x00072e1d
+#define SCALER_CSC2_ITR_R_2020 0x0006b624
+#define SCALER_CSC2_JPEG_JFIF 0x00059dc6
+#define SCALER_CSC2_ITR_R_709_3_FR 0x00064ddb
+#define SCALER_CSC2_ITR_R_2020_FR 0x0005e5e2
#define SCALER_TPZ0_VERT_RECALC BIT(31)
#define SCALER_TPZ0_SCALE_MASK VC4_MASK(28, 8)
diff --git a/drivers/gpu/drm/vgem/vgem_drv.c b/drivers/gpu/drm/vgem/vgem_drv.c
index a87eafa89e9f..c5e3e5457737 100644
--- a/drivers/gpu/drm/vgem/vgem_drv.c
+++ b/drivers/gpu/drm/vgem/vgem_drv.c
@@ -97,7 +97,7 @@ static struct drm_gem_object *vgem_gem_create_object(struct drm_device *dev, siz
obj = kzalloc(sizeof(*obj), GFP_KERNEL);
if (!obj)
- return NULL;
+ return ERR_PTR(-ENOMEM);
/*
* vgem doesn't have any begin/end cpu access ioctls, therefore must use
diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.c b/drivers/gpu/drm/virtio/virtgpu_drv.c
index c20c96a97a6c..5f25a8d15464 100644
--- a/drivers/gpu/drm/virtio/virtgpu_drv.c
+++ b/drivers/gpu/drm/virtio/virtgpu_drv.c
@@ -156,36 +156,6 @@ static void virtio_gpu_config_changed(struct virtio_device *vdev)
schedule_work(&vgdev->config_changed_work);
}
-static __poll_t virtio_gpu_poll(struct file *filp,
- struct poll_table_struct *wait)
-{
- struct drm_file *drm_file = filp->private_data;
- struct virtio_gpu_fpriv *vfpriv = drm_file->driver_priv;
- struct drm_device *dev = drm_file->minor->dev;
- struct virtio_gpu_device *vgdev = dev->dev_private;
- struct drm_pending_event *e = NULL;
- __poll_t mask = 0;
-
- if (!vgdev->has_virgl_3d || !vfpriv || !vfpriv->ring_idx_mask)
- return drm_poll(filp, wait);
-
- poll_wait(filp, &drm_file->event_wait, wait);
-
- if (!list_empty(&drm_file->event_list)) {
- spin_lock_irq(&dev->event_lock);
- e = list_first_entry(&drm_file->event_list,
- struct drm_pending_event, link);
- drm_file->event_space += e->event->length;
- list_del(&e->link);
- spin_unlock_irq(&dev->event_lock);
-
- kfree(e);
- mask |= EPOLLIN | EPOLLRDNORM;
- }
-
- return mask;
-}
-
static struct virtio_device_id id_table[] = {
{ VIRTIO_ID_GPU, VIRTIO_DEV_ANY_ID },
{ 0 },
@@ -225,17 +195,7 @@ MODULE_AUTHOR("Dave Airlie <airlied@redhat.com>");
MODULE_AUTHOR("Gerd Hoffmann <kraxel@redhat.com>");
MODULE_AUTHOR("Alon Levy");
-static const struct file_operations virtio_gpu_driver_fops = {
- .owner = THIS_MODULE,
- .open = drm_open,
- .release = drm_release,
- .unlocked_ioctl = drm_ioctl,
- .compat_ioctl = drm_compat_ioctl,
- .poll = virtio_gpu_poll,
- .read = drm_read,
- .llseek = noop_llseek,
- .mmap = drm_gem_mmap
-};
+DEFINE_DRM_GEM_FOPS(virtio_gpu_driver_fops);
static const struct drm_driver driver = {
.driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_RENDER | DRIVER_ATOMIC,
diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.h b/drivers/gpu/drm/virtio/virtgpu_drv.h
index e0265fe74aa5..0a194aaad419 100644
--- a/drivers/gpu/drm/virtio/virtgpu_drv.h
+++ b/drivers/gpu/drm/virtio/virtgpu_drv.h
@@ -138,7 +138,6 @@ struct virtio_gpu_fence_driver {
spinlock_t lock;
};
-#define VIRTGPU_EVENT_FENCE_SIGNALED_INTERNAL 0x10000000
struct virtio_gpu_fence_event {
struct drm_pending_event base;
struct drm_event event;
diff --git a/drivers/gpu/drm/virtio/virtgpu_ioctl.c b/drivers/gpu/drm/virtio/virtgpu_ioctl.c
index 0007e423d885..c708bab555c6 100644
--- a/drivers/gpu/drm/virtio/virtgpu_ioctl.c
+++ b/drivers/gpu/drm/virtio/virtgpu_ioctl.c
@@ -54,7 +54,7 @@ static int virtio_gpu_fence_event_create(struct drm_device *dev,
if (!e)
return -ENOMEM;
- e->event.type = VIRTGPU_EVENT_FENCE_SIGNALED_INTERNAL;
+ e->event.type = VIRTGPU_EVENT_FENCE_SIGNALED;
e->event.length = sizeof(e->event);
ret = drm_event_reserve_init(dev, file, &e->base, &e->event);
diff --git a/drivers/gpu/drm/virtio/virtgpu_object.c b/drivers/gpu/drm/virtio/virtgpu_object.c
index 187e10da2f17..baef2c5f2aaf 100644
--- a/drivers/gpu/drm/virtio/virtgpu_object.c
+++ b/drivers/gpu/drm/virtio/virtgpu_object.c
@@ -139,7 +139,7 @@ struct drm_gem_object *virtio_gpu_create_object(struct drm_device *dev,
shmem = kzalloc(sizeof(*shmem), GFP_KERNEL);
if (!shmem)
- return NULL;
+ return ERR_PTR(-ENOMEM);
dshmem = &shmem->base.base;
dshmem->base.funcs = &virtio_gpu_shmem_funcs;
diff --git a/drivers/gpu/drm/vmwgfx/Kconfig b/drivers/gpu/drm/vmwgfx/Kconfig
index c9ce47c448e0..a4fabe208d9f 100644
--- a/drivers/gpu/drm/vmwgfx/Kconfig
+++ b/drivers/gpu/drm/vmwgfx/Kconfig
@@ -4,6 +4,7 @@ config DRM_VMWGFX
depends on DRM && PCI && MMU
depends on X86 || ARM64
select DRM_TTM
+ select DRM_TTM_HELPER
select MAPPING_DIRTY_HELPERS
# Only needed for the transitional use of drm_crtc_init - can be removed
# again once vmwgfx sets up the primary plane itself.
diff --git a/drivers/gpu/drm/vmwgfx/Makefile b/drivers/gpu/drm/vmwgfx/Makefile
index bc323f7d4032..eee73b9aa404 100644
--- a/drivers/gpu/drm/vmwgfx/Makefile
+++ b/drivers/gpu/drm/vmwgfx/Makefile
@@ -1,5 +1,5 @@
# SPDX-License-Identifier: GPL-2.0
-vmwgfx-y := vmwgfx_execbuf.o vmwgfx_gmr.o vmwgfx_kms.o vmwgfx_drv.o \
+vmwgfx-y := vmwgfx_execbuf.o vmwgfx_gmr.o vmwgfx_hashtab.o vmwgfx_kms.o vmwgfx_drv.o \
vmwgfx_ioctl.o vmwgfx_resource.o vmwgfx_ttm_buffer.o \
vmwgfx_cmd.o vmwgfx_irq.o vmwgfx_ldu.o vmwgfx_ttm_glue.o \
vmwgfx_overlay.o vmwgfx_gmrid_manager.o vmwgfx_fence.o \
@@ -9,9 +9,9 @@ vmwgfx-y := vmwgfx_execbuf.o vmwgfx_gmr.o vmwgfx_kms.o vmwgfx_drv.o \
vmwgfx_cotable.o vmwgfx_so.o vmwgfx_binding.o vmwgfx_msg.o \
vmwgfx_simple_resource.o vmwgfx_va.o vmwgfx_blit.o \
vmwgfx_validation.o vmwgfx_page_dirty.o vmwgfx_streamoutput.o \
- vmwgfx_devcaps.o ttm_object.o ttm_memory.o
+ vmwgfx_devcaps.o ttm_object.o vmwgfx_system_manager.o \
+ vmwgfx_gem.o
vmwgfx-$(CONFIG_DRM_FBDEV_EMULATION) += vmwgfx_fb.o
-vmwgfx-$(CONFIG_TRANSPARENT_HUGEPAGE) += vmwgfx_thp.o
obj-$(CONFIG_DRM_VMWGFX) := vmwgfx.o
diff --git a/drivers/gpu/drm/vmwgfx/device_include/svga3d_cmd.h b/drivers/gpu/drm/vmwgfx/device_include/svga3d_cmd.h
index 945c84b27e81..d90d940ad3f4 100644
--- a/drivers/gpu/drm/vmwgfx/device_include/svga3d_cmd.h
+++ b/drivers/gpu/drm/vmwgfx/device_include/svga3d_cmd.h
@@ -1,6 +1,6 @@
-/**********************************************************
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
+/*
* Copyright 2012-2021 VMware, Inc.
- * SPDX-License-Identifier: GPL-2.0 OR MIT
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
@@ -22,7 +22,7 @@
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
- **********************************************************/
+ */
/*
* svga3d_cmd.h --
diff --git a/drivers/gpu/drm/vmwgfx/device_include/svga3d_devcaps.h b/drivers/gpu/drm/vmwgfx/device_include/svga3d_devcaps.h
index 379ec15c7758..815d0ab0053f 100644
--- a/drivers/gpu/drm/vmwgfx/device_include/svga3d_devcaps.h
+++ b/drivers/gpu/drm/vmwgfx/device_include/svga3d_devcaps.h
@@ -1,6 +1,6 @@
-/**********************************************************
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
+/*
* Copyright 1998-2021 VMware, Inc.
- * SPDX-License-Identifier: GPL-2.0 OR MIT
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
@@ -22,7 +22,7 @@
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
- **********************************************************/
+ */
/*
* svga3d_devcaps.h --
@@ -347,6 +347,10 @@ typedef uint32 SVGA3dDevCapIndex;
#define SVGA3D_DEVCAP_SM5 258
#define SVGA3D_DEVCAP_MULTISAMPLE_8X 259
+#define SVGA3D_DEVCAP_MAX_FORCED_SAMPLE_COUNT 260
+
+#define SVGA3D_DEVCAP_GL43 261
+
#define SVGA3D_DEVCAP_MAX 262
#define SVGA3D_DXFMT_SUPPORTED (1 << 0)
diff --git a/drivers/gpu/drm/vmwgfx/device_include/svga3d_dx.h b/drivers/gpu/drm/vmwgfx/device_include/svga3d_dx.h
index 5af442dad542..925bf4b93f01 100644
--- a/drivers/gpu/drm/vmwgfx/device_include/svga3d_dx.h
+++ b/drivers/gpu/drm/vmwgfx/device_include/svga3d_dx.h
@@ -1,6 +1,6 @@
-/**********************************************************
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
+/*
* Copyright 2012-2021 VMware, Inc.
- * SPDX-License-Identifier: GPL-2.0 OR MIT
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
@@ -22,7 +22,7 @@
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
- **********************************************************/
+ */
/*
* svga3d_dx.h --
@@ -508,11 +508,11 @@ typedef struct SVGA3dCmdDXSetPredication {
#pragma pack(pop)
#pragma pack(push, 1)
-typedef struct MKS3dDXSOState {
+typedef struct SVGA3dDXSOState {
uint32 offset;
uint32 intOffset;
- uint32 vertexCount;
- uint32 dead;
+ uint32 dead1;
+ uint32 dead2;
} SVGA3dDXSOState;
#pragma pack(pop)
diff --git a/drivers/gpu/drm/vmwgfx/device_include/svga3d_limits.h b/drivers/gpu/drm/vmwgfx/device_include/svga3d_limits.h
index 35494a728c7a..6103b41fe92b 100644
--- a/drivers/gpu/drm/vmwgfx/device_include/svga3d_limits.h
+++ b/drivers/gpu/drm/vmwgfx/device_include/svga3d_limits.h
@@ -1,6 +1,6 @@
-/**********************************************************
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
+/*
* Copyright 2012-2021 VMware, Inc.
- * SPDX-License-Identifier: GPL-2.0 OR MIT
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
@@ -22,7 +22,7 @@
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
- **********************************************************/
+ */
/*
* svga3d_limits.h --
@@ -82,4 +82,6 @@
#define SVGA3D_MIN_SBX_DATA_SIZE (GBYTES_2_BYTES(1))
#define SVGA3D_MAX_SBX_DATA_SIZE (GBYTES_2_BYTES(4))
+#define SVGA3D_MIN_SBX_DATA_SIZE_DVM (MBYTES_2_BYTES(900))
+#define SVGA3D_MAX_SBX_DATA_SIZE_DVM (MBYTES_2_BYTES(910))
#endif
diff --git a/drivers/gpu/drm/vmwgfx/device_include/svga3d_reg.h b/drivers/gpu/drm/vmwgfx/device_include/svga3d_reg.h
index 988d8509c472..b24b4f55c941 100644
--- a/drivers/gpu/drm/vmwgfx/device_include/svga3d_reg.h
+++ b/drivers/gpu/drm/vmwgfx/device_include/svga3d_reg.h
@@ -1,6 +1,6 @@
-/**********************************************************
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
+/*
* Copyright 1998-2015 VMware, Inc.
- * SPDX-License-Identifier: GPL-2.0 OR MIT
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
@@ -22,7 +22,7 @@
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
- **********************************************************/
+ */
/*
* svga3d_reg.h --
diff --git a/drivers/gpu/drm/vmwgfx/device_include/svga3d_types.h b/drivers/gpu/drm/vmwgfx/device_include/svga3d_types.h
index 70b88ee16cf6..e9219eb380a2 100644
--- a/drivers/gpu/drm/vmwgfx/device_include/svga3d_types.h
+++ b/drivers/gpu/drm/vmwgfx/device_include/svga3d_types.h
@@ -1,6 +1,6 @@
-/**********************************************************
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
+/*
* Copyright 2012-2021 VMware, Inc.
- * SPDX-License-Identifier: GPL-2.0 OR MIT
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
@@ -22,7 +22,7 @@
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
- **********************************************************/
+ */
/*
* svga3d_types.h --
@@ -370,7 +370,6 @@ typedef enum SVGA3dSurfaceFormat {
#define SVGA3D_SURFACE_TRANSFER_FROM_BUFFER (CONST64U(1) << 30)
#define SVGA3D_SURFACE_RESERVED1 (CONST64U(1) << 31)
-#define SVGA3D_SURFACE_VADECODE SVGA3D_SURFACE_RESERVED1
#define SVGA3D_SURFACE_MULTISAMPLE (CONST64U(1) << 32)
diff --git a/drivers/gpu/drm/vmwgfx/device_include/svga_escape.h b/drivers/gpu/drm/vmwgfx/device_include/svga_escape.h
index bf242c21f352..405f20fc26c2 100644
--- a/drivers/gpu/drm/vmwgfx/device_include/svga_escape.h
+++ b/drivers/gpu/drm/vmwgfx/device_include/svga_escape.h
@@ -1,6 +1,6 @@
-/**********************************************************
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
+/*
* Copyright 2007,2020 VMware, Inc.
- * SPDX-License-Identifier: GPL-2.0 OR MIT
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
@@ -22,7 +22,7 @@
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
- **********************************************************/
+ */
/*
* svga_escape.h --
diff --git a/drivers/gpu/drm/vmwgfx/device_include/svga_overlay.h b/drivers/gpu/drm/vmwgfx/device_include/svga_overlay.h
index aec17c3c6c29..691f48f77e33 100644
--- a/drivers/gpu/drm/vmwgfx/device_include/svga_overlay.h
+++ b/drivers/gpu/drm/vmwgfx/device_include/svga_overlay.h
@@ -1,6 +1,6 @@
-/**********************************************************
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
+/*
* Copyright 2007-2021 VMware, Inc.
- * SPDX-License-Identifier: GPL-2.0 OR MIT
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
@@ -22,7 +22,7 @@
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
- **********************************************************/
+ */
/*
* svga_overlay.h --
diff --git a/drivers/gpu/drm/vmwgfx/device_include/svga_reg.h b/drivers/gpu/drm/vmwgfx/device_include/svga_reg.h
index b3602557de2e..acabdb550c10 100644
--- a/drivers/gpu/drm/vmwgfx/device_include/svga_reg.h
+++ b/drivers/gpu/drm/vmwgfx/device_include/svga_reg.h
@@ -1,6 +1,6 @@
-/**********************************************************
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
+/*
* Copyright 1998-2021 VMware, Inc.
- * SPDX-License-Identifier: GPL-2.0 OR MIT
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
@@ -22,7 +22,7 @@
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
- **********************************************************/
+ */
/*
* svga_reg.h --
@@ -442,6 +442,7 @@ typedef struct {
#define SVGA_CAP2_TRACE_FULL_FB 0x00002000
#define SVGA_CAP2_EXTRA_REGS 0x00004000
#define SVGA_CAP2_LO_STAGING 0x00008000
+#define SVGA_CAP2_VIDEO_BLT 0x00010000
#define SVGA_CAP2_RESERVED 0x80000000
typedef enum {
@@ -450,9 +451,10 @@ typedef enum {
SVGABackdoorCap3dHWVersion = 2,
SVGABackdoorCapDeviceCaps2 = 3,
SVGABackdoorCapDevelCaps = 4,
- SVGABackdoorDevelRenderer = 5,
- SVGABackdoorDevelUsingISB = 6,
- SVGABackdoorCapMax = 7,
+ SVGABackdoorCapDevCaps = 5,
+ SVGABackdoorDevelRenderer = 6,
+ SVGABackdoorDevelUsingISB = 7,
+ SVGABackdoorCapMax = 8,
} SVGABackdoorCapType;
enum {
diff --git a/drivers/gpu/drm/vmwgfx/ttm_memory.c b/drivers/gpu/drm/vmwgfx/ttm_memory.c
deleted file mode 100644
index 7f7fe35fc21d..000000000000
--- a/drivers/gpu/drm/vmwgfx/ttm_memory.c
+++ /dev/null
@@ -1,683 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 OR MIT */
-/**************************************************************************
- *
- * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sub license, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
- * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
- * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
- * USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- **************************************************************************/
-
-#define pr_fmt(fmt) "[TTM] " fmt
-
-#include <linux/spinlock.h>
-#include <linux/sched.h>
-#include <linux/wait.h>
-#include <linux/mm.h>
-#include <linux/module.h>
-#include <linux/slab.h>
-#include <linux/swap.h>
-
-#include <drm/drm_device.h>
-#include <drm/drm_file.h>
-#include <drm/ttm/ttm_device.h>
-
-#include "ttm_memory.h"
-
-#define TTM_MEMORY_ALLOC_RETRIES 4
-
-struct ttm_mem_global ttm_mem_glob;
-EXPORT_SYMBOL(ttm_mem_glob);
-
-struct ttm_mem_zone {
- struct kobject kobj;
- struct ttm_mem_global *glob;
- const char *name;
- uint64_t zone_mem;
- uint64_t emer_mem;
- uint64_t max_mem;
- uint64_t swap_limit;
- uint64_t used_mem;
-};
-
-static struct attribute ttm_mem_sys = {
- .name = "zone_memory",
- .mode = S_IRUGO
-};
-static struct attribute ttm_mem_emer = {
- .name = "emergency_memory",
- .mode = S_IRUGO | S_IWUSR
-};
-static struct attribute ttm_mem_max = {
- .name = "available_memory",
- .mode = S_IRUGO | S_IWUSR
-};
-static struct attribute ttm_mem_swap = {
- .name = "swap_limit",
- .mode = S_IRUGO | S_IWUSR
-};
-static struct attribute ttm_mem_used = {
- .name = "used_memory",
- .mode = S_IRUGO
-};
-
-static void ttm_mem_zone_kobj_release(struct kobject *kobj)
-{
- struct ttm_mem_zone *zone =
- container_of(kobj, struct ttm_mem_zone, kobj);
-
- pr_info("Zone %7s: Used memory at exit: %llu KiB\n",
- zone->name, (unsigned long long)zone->used_mem >> 10);
- kfree(zone);
-}
-
-static ssize_t ttm_mem_zone_show(struct kobject *kobj,
- struct attribute *attr,
- char *buffer)
-{
- struct ttm_mem_zone *zone =
- container_of(kobj, struct ttm_mem_zone, kobj);
- uint64_t val = 0;
-
- spin_lock(&zone->glob->lock);
- if (attr == &ttm_mem_sys)
- val = zone->zone_mem;
- else if (attr == &ttm_mem_emer)
- val = zone->emer_mem;
- else if (attr == &ttm_mem_max)
- val = zone->max_mem;
- else if (attr == &ttm_mem_swap)
- val = zone->swap_limit;
- else if (attr == &ttm_mem_used)
- val = zone->used_mem;
- spin_unlock(&zone->glob->lock);
-
- return snprintf(buffer, PAGE_SIZE, "%llu\n",
- (unsigned long long) val >> 10);
-}
-
-static void ttm_check_swapping(struct ttm_mem_global *glob);
-
-static ssize_t ttm_mem_zone_store(struct kobject *kobj,
- struct attribute *attr,
- const char *buffer,
- size_t size)
-{
- struct ttm_mem_zone *zone =
- container_of(kobj, struct ttm_mem_zone, kobj);
- int chars;
- unsigned long val;
- uint64_t val64;
-
- chars = sscanf(buffer, "%lu", &val);
- if (chars == 0)
- return size;
-
- val64 = val;
- val64 <<= 10;
-
- spin_lock(&zone->glob->lock);
- if (val64 > zone->zone_mem)
- val64 = zone->zone_mem;
- if (attr == &ttm_mem_emer) {
- zone->emer_mem = val64;
- if (zone->max_mem > val64)
- zone->max_mem = val64;
- } else if (attr == &ttm_mem_max) {
- zone->max_mem = val64;
- if (zone->emer_mem < val64)
- zone->emer_mem = val64;
- } else if (attr == &ttm_mem_swap)
- zone->swap_limit = val64;
- spin_unlock(&zone->glob->lock);
-
- ttm_check_swapping(zone->glob);
-
- return size;
-}
-
-static struct attribute *ttm_mem_zone_attrs[] = {
- &ttm_mem_sys,
- &ttm_mem_emer,
- &ttm_mem_max,
- &ttm_mem_swap,
- &ttm_mem_used,
- NULL
-};
-
-static const struct sysfs_ops ttm_mem_zone_ops = {
- .show = &ttm_mem_zone_show,
- .store = &ttm_mem_zone_store
-};
-
-static struct kobj_type ttm_mem_zone_kobj_type = {
- .release = &ttm_mem_zone_kobj_release,
- .sysfs_ops = &ttm_mem_zone_ops,
- .default_attrs = ttm_mem_zone_attrs,
-};
-
-static struct attribute ttm_mem_global_lower_mem_limit = {
- .name = "lower_mem_limit",
- .mode = S_IRUGO | S_IWUSR
-};
-
-static ssize_t ttm_mem_global_show(struct kobject *kobj,
- struct attribute *attr,
- char *buffer)
-{
- struct ttm_mem_global *glob =
- container_of(kobj, struct ttm_mem_global, kobj);
- uint64_t val = 0;
-
- spin_lock(&glob->lock);
- val = glob->lower_mem_limit;
- spin_unlock(&glob->lock);
- /* convert from number of pages to KB */
- val <<= (PAGE_SHIFT - 10);
- return snprintf(buffer, PAGE_SIZE, "%llu\n",
- (unsigned long long) val);
-}
-
-static ssize_t ttm_mem_global_store(struct kobject *kobj,
- struct attribute *attr,
- const char *buffer,
- size_t size)
-{
- int chars;
- uint64_t val64;
- unsigned long val;
- struct ttm_mem_global *glob =
- container_of(kobj, struct ttm_mem_global, kobj);
-
- chars = sscanf(buffer, "%lu", &val);
- if (chars == 0)
- return size;
-
- val64 = val;
- /* convert from KB to number of pages */
- val64 >>= (PAGE_SHIFT - 10);
-
- spin_lock(&glob->lock);
- glob->lower_mem_limit = val64;
- spin_unlock(&glob->lock);
-
- return size;
-}
-
-static struct attribute *ttm_mem_global_attrs[] = {
- &ttm_mem_global_lower_mem_limit,
- NULL
-};
-
-static const struct sysfs_ops ttm_mem_global_ops = {
- .show = &ttm_mem_global_show,
- .store = &ttm_mem_global_store,
-};
-
-static struct kobj_type ttm_mem_glob_kobj_type = {
- .sysfs_ops = &ttm_mem_global_ops,
- .default_attrs = ttm_mem_global_attrs,
-};
-
-static bool ttm_zones_above_swap_target(struct ttm_mem_global *glob,
- bool from_wq, uint64_t extra)
-{
- unsigned int i;
- struct ttm_mem_zone *zone;
- uint64_t target;
-
- for (i = 0; i < glob->num_zones; ++i) {
- zone = glob->zones[i];
-
- if (from_wq)
- target = zone->swap_limit;
- else if (capable(CAP_SYS_ADMIN))
- target = zone->emer_mem;
- else
- target = zone->max_mem;
-
- target = (extra > target) ? 0ULL : target;
-
- if (zone->used_mem > target)
- return true;
- }
- return false;
-}
-
-/*
- * At this point we only support a single shrink callback.
- * Extend this if needed, perhaps using a linked list of callbacks.
- * Note that this function is reentrant:
- * many threads may try to swap out at any given time.
- */
-
-static void ttm_shrink(struct ttm_mem_global *glob, bool from_wq,
- uint64_t extra, struct ttm_operation_ctx *ctx)
-{
- int ret;
-
- spin_lock(&glob->lock);
-
- while (ttm_zones_above_swap_target(glob, from_wq, extra)) {
- spin_unlock(&glob->lock);
- ret = ttm_global_swapout(ctx, GFP_KERNEL);
- spin_lock(&glob->lock);
- if (unlikely(ret <= 0))
- break;
- }
-
- spin_unlock(&glob->lock);
-}
-
-static void ttm_shrink_work(struct work_struct *work)
-{
- struct ttm_operation_ctx ctx = {
- .interruptible = false,
- .no_wait_gpu = false
- };
- struct ttm_mem_global *glob =
- container_of(work, struct ttm_mem_global, work);
-
- ttm_shrink(glob, true, 0ULL, &ctx);
-}
-
-static int ttm_mem_init_kernel_zone(struct ttm_mem_global *glob,
- const struct sysinfo *si)
-{
- struct ttm_mem_zone *zone = kzalloc(sizeof(*zone), GFP_KERNEL);
- uint64_t mem;
- int ret;
-
- if (unlikely(!zone))
- return -ENOMEM;
-
- mem = si->totalram - si->totalhigh;
- mem *= si->mem_unit;
-
- zone->name = "kernel";
- zone->zone_mem = mem;
- zone->max_mem = mem >> 1;
- zone->emer_mem = (mem >> 1) + (mem >> 2);
- zone->swap_limit = zone->max_mem - (mem >> 3);
- zone->used_mem = 0;
- zone->glob = glob;
- glob->zone_kernel = zone;
- ret = kobject_init_and_add(
- &zone->kobj, &ttm_mem_zone_kobj_type, &glob->kobj, zone->name);
- if (unlikely(ret != 0)) {
- kobject_put(&zone->kobj);
- return ret;
- }
- glob->zones[glob->num_zones++] = zone;
- return 0;
-}
-
-#ifdef CONFIG_HIGHMEM
-static int ttm_mem_init_highmem_zone(struct ttm_mem_global *glob,
- const struct sysinfo *si)
-{
- struct ttm_mem_zone *zone;
- uint64_t mem;
- int ret;
-
- if (si->totalhigh == 0)
- return 0;
-
- zone = kzalloc(sizeof(*zone), GFP_KERNEL);
- if (unlikely(!zone))
- return -ENOMEM;
-
- mem = si->totalram;
- mem *= si->mem_unit;
-
- zone->name = "highmem";
- zone->zone_mem = mem;
- zone->max_mem = mem >> 1;
- zone->emer_mem = (mem >> 1) + (mem >> 2);
- zone->swap_limit = zone->max_mem - (mem >> 3);
- zone->used_mem = 0;
- zone->glob = glob;
- glob->zone_highmem = zone;
- ret = kobject_init_and_add(
- &zone->kobj, &ttm_mem_zone_kobj_type, &glob->kobj, "%s",
- zone->name);
- if (unlikely(ret != 0)) {
- kobject_put(&zone->kobj);
- return ret;
- }
- glob->zones[glob->num_zones++] = zone;
- return 0;
-}
-#else
-static int ttm_mem_init_dma32_zone(struct ttm_mem_global *glob,
- const struct sysinfo *si)
-{
- struct ttm_mem_zone *zone = kzalloc(sizeof(*zone), GFP_KERNEL);
- uint64_t mem;
- int ret;
-
- if (unlikely(!zone))
- return -ENOMEM;
-
- mem = si->totalram;
- mem *= si->mem_unit;
-
- /**
- * No special dma32 zone needed.
- */
-
- if (mem <= ((uint64_t) 1ULL << 32)) {
- kfree(zone);
- return 0;
- }
-
- /*
- * Limit max dma32 memory to 4GB for now
- * until we can figure out how big this
- * zone really is.
- */
-
- mem = ((uint64_t) 1ULL << 32);
- zone->name = "dma32";
- zone->zone_mem = mem;
- zone->max_mem = mem >> 1;
- zone->emer_mem = (mem >> 1) + (mem >> 2);
- zone->swap_limit = zone->max_mem - (mem >> 3);
- zone->used_mem = 0;
- zone->glob = glob;
- glob->zone_dma32 = zone;
- ret = kobject_init_and_add(
- &zone->kobj, &ttm_mem_zone_kobj_type, &glob->kobj, zone->name);
- if (unlikely(ret != 0)) {
- kobject_put(&zone->kobj);
- return ret;
- }
- glob->zones[glob->num_zones++] = zone;
- return 0;
-}
-#endif
-
-int ttm_mem_global_init(struct ttm_mem_global *glob, struct device *dev)
-{
- struct sysinfo si;
- int ret;
- int i;
- struct ttm_mem_zone *zone;
-
- spin_lock_init(&glob->lock);
- glob->swap_queue = create_singlethread_workqueue("ttm_swap");
- INIT_WORK(&glob->work, ttm_shrink_work);
-
- ret = kobject_init_and_add(&glob->kobj, &ttm_mem_glob_kobj_type,
- &dev->kobj, "memory_accounting");
- if (unlikely(ret != 0)) {
- kobject_put(&glob->kobj);
- return ret;
- }
-
- si_meminfo(&si);
-
- spin_lock(&glob->lock);
- /* set it as 0 by default to keep original behavior of OOM */
- glob->lower_mem_limit = 0;
- spin_unlock(&glob->lock);
-
- ret = ttm_mem_init_kernel_zone(glob, &si);
- if (unlikely(ret != 0))
- goto out_no_zone;
-#ifdef CONFIG_HIGHMEM
- ret = ttm_mem_init_highmem_zone(glob, &si);
- if (unlikely(ret != 0))
- goto out_no_zone;
-#else
- ret = ttm_mem_init_dma32_zone(glob, &si);
- if (unlikely(ret != 0))
- goto out_no_zone;
-#endif
- for (i = 0; i < glob->num_zones; ++i) {
- zone = glob->zones[i];
- pr_info("Zone %7s: Available graphics memory: %llu KiB\n",
- zone->name, (unsigned long long)zone->max_mem >> 10);
- }
- return 0;
-out_no_zone:
- ttm_mem_global_release(glob);
- return ret;
-}
-
-void ttm_mem_global_release(struct ttm_mem_global *glob)
-{
- struct ttm_mem_zone *zone;
- unsigned int i;
-
- destroy_workqueue(glob->swap_queue);
- glob->swap_queue = NULL;
- for (i = 0; i < glob->num_zones; ++i) {
- zone = glob->zones[i];
- kobject_del(&zone->kobj);
- kobject_put(&zone->kobj);
- }
- kobject_del(&glob->kobj);
- kobject_put(&glob->kobj);
- memset(glob, 0, sizeof(*glob));
-}
-
-static void ttm_check_swapping(struct ttm_mem_global *glob)
-{
- bool needs_swapping = false;
- unsigned int i;
- struct ttm_mem_zone *zone;
-
- spin_lock(&glob->lock);
- for (i = 0; i < glob->num_zones; ++i) {
- zone = glob->zones[i];
- if (zone->used_mem > zone->swap_limit) {
- needs_swapping = true;
- break;
- }
- }
-
- spin_unlock(&glob->lock);
-
- if (unlikely(needs_swapping))
- (void)queue_work(glob->swap_queue, &glob->work);
-
-}
-
-static void ttm_mem_global_free_zone(struct ttm_mem_global *glob,
- struct ttm_mem_zone *single_zone,
- uint64_t amount)
-{
- unsigned int i;
- struct ttm_mem_zone *zone;
-
- spin_lock(&glob->lock);
- for (i = 0; i < glob->num_zones; ++i) {
- zone = glob->zones[i];
- if (single_zone && zone != single_zone)
- continue;
- zone->used_mem -= amount;
- }
- spin_unlock(&glob->lock);
-}
-
-void ttm_mem_global_free(struct ttm_mem_global *glob,
- uint64_t amount)
-{
- return ttm_mem_global_free_zone(glob, glob->zone_kernel, amount);
-}
-EXPORT_SYMBOL(ttm_mem_global_free);
-
-/*
- * check if the available mem is under lower memory limit
- *
- * a. if no swap disk at all or free swap space is under swap_mem_limit
- * but available system mem is bigger than sys_mem_limit, allow TTM
- * allocation;
- *
- * b. if the available system mem is less than sys_mem_limit but free
- * swap disk is bigger than swap_mem_limit, allow TTM allocation.
- */
-bool
-ttm_check_under_lowerlimit(struct ttm_mem_global *glob,
- uint64_t num_pages,
- struct ttm_operation_ctx *ctx)
-{
- int64_t available;
-
- /* We allow over commit during suspend */
- if (ctx->force_alloc)
- return false;
-
- available = get_nr_swap_pages() + si_mem_available();
- available -= num_pages;
- if (available < glob->lower_mem_limit)
- return true;
-
- return false;
-}
-
-static int ttm_mem_global_reserve(struct ttm_mem_global *glob,
- struct ttm_mem_zone *single_zone,
- uint64_t amount, bool reserve)
-{
- uint64_t limit;
- int ret = -ENOMEM;
- unsigned int i;
- struct ttm_mem_zone *zone;
-
- spin_lock(&glob->lock);
- for (i = 0; i < glob->num_zones; ++i) {
- zone = glob->zones[i];
- if (single_zone && zone != single_zone)
- continue;
-
- limit = (capable(CAP_SYS_ADMIN)) ?
- zone->emer_mem : zone->max_mem;
-
- if (zone->used_mem > limit)
- goto out_unlock;
- }
-
- if (reserve) {
- for (i = 0; i < glob->num_zones; ++i) {
- zone = glob->zones[i];
- if (single_zone && zone != single_zone)
- continue;
- zone->used_mem += amount;
- }
- }
-
- ret = 0;
-out_unlock:
- spin_unlock(&glob->lock);
- ttm_check_swapping(glob);
-
- return ret;
-}
-
-
-static int ttm_mem_global_alloc_zone(struct ttm_mem_global *glob,
- struct ttm_mem_zone *single_zone,
- uint64_t memory,
- struct ttm_operation_ctx *ctx)
-{
- int count = TTM_MEMORY_ALLOC_RETRIES;
-
- while (unlikely(ttm_mem_global_reserve(glob,
- single_zone,
- memory, true)
- != 0)) {
- if (ctx->no_wait_gpu)
- return -ENOMEM;
- if (unlikely(count-- == 0))
- return -ENOMEM;
- ttm_shrink(glob, false, memory + (memory >> 2) + 16, ctx);
- }
-
- return 0;
-}
-
-int ttm_mem_global_alloc(struct ttm_mem_global *glob, uint64_t memory,
- struct ttm_operation_ctx *ctx)
-{
- /**
- * Normal allocations of kernel memory are registered in
- * the kernel zone.
- */
-
- return ttm_mem_global_alloc_zone(glob, glob->zone_kernel, memory, ctx);
-}
-EXPORT_SYMBOL(ttm_mem_global_alloc);
-
-int ttm_mem_global_alloc_page(struct ttm_mem_global *glob,
- struct page *page, uint64_t size,
- struct ttm_operation_ctx *ctx)
-{
- struct ttm_mem_zone *zone = NULL;
-
- /**
- * Page allocations may be registed in a single zone
- * only if highmem or !dma32.
- */
-
-#ifdef CONFIG_HIGHMEM
- if (PageHighMem(page) && glob->zone_highmem != NULL)
- zone = glob->zone_highmem;
-#else
- if (glob->zone_dma32 && page_to_pfn(page) > 0x00100000UL)
- zone = glob->zone_kernel;
-#endif
- return ttm_mem_global_alloc_zone(glob, zone, size, ctx);
-}
-
-void ttm_mem_global_free_page(struct ttm_mem_global *glob, struct page *page,
- uint64_t size)
-{
- struct ttm_mem_zone *zone = NULL;
-
-#ifdef CONFIG_HIGHMEM
- if (PageHighMem(page) && glob->zone_highmem != NULL)
- zone = glob->zone_highmem;
-#else
- if (glob->zone_dma32 && page_to_pfn(page) > 0x00100000UL)
- zone = glob->zone_kernel;
-#endif
- ttm_mem_global_free_zone(glob, zone, size);
-}
-
-size_t ttm_round_pot(size_t size)
-{
- if ((size & (size - 1)) == 0)
- return size;
- else if (size > PAGE_SIZE)
- return PAGE_ALIGN(size);
- else {
- size_t tmp_size = 4;
-
- while (tmp_size < size)
- tmp_size <<= 1;
-
- return tmp_size;
- }
- return 0;
-}
-EXPORT_SYMBOL(ttm_round_pot);
diff --git a/drivers/gpu/drm/vmwgfx/ttm_memory.h b/drivers/gpu/drm/vmwgfx/ttm_memory.h
deleted file mode 100644
index c50dba774485..000000000000
--- a/drivers/gpu/drm/vmwgfx/ttm_memory.h
+++ /dev/null
@@ -1,96 +0,0 @@
-/**************************************************************************
- *
- * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sub license, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
- * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
- * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
- * USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- **************************************************************************/
-
-#ifndef TTM_MEMORY_H
-#define TTM_MEMORY_H
-
-#include <linux/workqueue.h>
-#include <linux/spinlock.h>
-#include <linux/bug.h>
-#include <linux/wait.h>
-#include <linux/errno.h>
-#include <linux/kobject.h>
-#include <linux/mm.h>
-
-#include <drm/ttm/ttm_bo_api.h>
-
-/**
- * struct ttm_mem_global - Global memory accounting structure.
- *
- * @shrink: A single callback to shrink TTM memory usage. Extend this
- * to a linked list to be able to handle multiple callbacks when needed.
- * @swap_queue: A workqueue to handle shrinking in low memory situations. We
- * need a separate workqueue since it will spend a lot of time waiting
- * for the GPU, and this will otherwise block other workqueue tasks(?)
- * At this point we use only a single-threaded workqueue.
- * @work: The workqueue callback for the shrink queue.
- * @lock: Lock to protect the @shrink - and the memory accounting members,
- * that is, essentially the whole structure with some exceptions.
- * @lower_mem_limit: include lower limit of swap space and lower limit of
- * system memory.
- * @zones: Array of pointers to accounting zones.
- * @num_zones: Number of populated entries in the @zones array.
- * @zone_kernel: Pointer to the kernel zone.
- * @zone_highmem: Pointer to the highmem zone if there is one.
- * @zone_dma32: Pointer to the dma32 zone if there is one.
- *
- * Note that this structure is not per device. It should be global for all
- * graphics devices.
- */
-
-#define TTM_MEM_MAX_ZONES 2
-struct ttm_mem_zone;
-extern struct ttm_mem_global {
- struct kobject kobj;
- struct workqueue_struct *swap_queue;
- struct work_struct work;
- spinlock_t lock;
- uint64_t lower_mem_limit;
- struct ttm_mem_zone *zones[TTM_MEM_MAX_ZONES];
- unsigned int num_zones;
- struct ttm_mem_zone *zone_kernel;
-#ifdef CONFIG_HIGHMEM
- struct ttm_mem_zone *zone_highmem;
-#else
- struct ttm_mem_zone *zone_dma32;
-#endif
-} ttm_mem_glob;
-
-int ttm_mem_global_init(struct ttm_mem_global *glob, struct device *dev);
-void ttm_mem_global_release(struct ttm_mem_global *glob);
-int ttm_mem_global_alloc(struct ttm_mem_global *glob, uint64_t memory,
- struct ttm_operation_ctx *ctx);
-void ttm_mem_global_free(struct ttm_mem_global *glob, uint64_t amount);
-int ttm_mem_global_alloc_page(struct ttm_mem_global *glob,
- struct page *page, uint64_t size,
- struct ttm_operation_ctx *ctx);
-void ttm_mem_global_free_page(struct ttm_mem_global *glob,
- struct page *page, uint64_t size);
-size_t ttm_round_pot(size_t size);
-bool ttm_check_under_lowerlimit(struct ttm_mem_global *glob, uint64_t num_pages,
- struct ttm_operation_ctx *ctx);
-#endif
diff --git a/drivers/gpu/drm/vmwgfx/ttm_object.c b/drivers/gpu/drm/vmwgfx/ttm_object.c
index 899945f54dc7..26a55fef1ab5 100644
--- a/drivers/gpu/drm/vmwgfx/ttm_object.c
+++ b/drivers/gpu/drm/vmwgfx/ttm_object.c
@@ -50,6 +50,7 @@
#include <linux/atomic.h>
#include <linux/module.h>
#include "ttm_object.h"
+#include "vmwgfx_drv.h"
MODULE_IMPORT_NS(DMA_BUF);
@@ -73,7 +74,7 @@ struct ttm_object_file {
struct ttm_object_device *tdev;
spinlock_t lock;
struct list_head ref_list;
- struct drm_open_hash ref_hash[TTM_REF_NUM];
+ struct vmwgfx_open_hash ref_hash;
struct kref refcount;
};
@@ -91,12 +92,10 @@ struct ttm_object_file {
struct ttm_object_device {
spinlock_t object_lock;
- struct drm_open_hash object_hash;
+ struct vmwgfx_open_hash object_hash;
atomic_t object_count;
- struct ttm_mem_global *mem_glob;
struct dma_buf_ops ops;
void (*dmabuf_release)(struct dma_buf *dma_buf);
- size_t dma_buf_size;
struct idr idr;
};
@@ -123,10 +122,9 @@ struct ttm_object_device {
struct ttm_ref_object {
struct rcu_head rcu_head;
- struct drm_hash_item hash;
+ struct vmwgfx_hash_item hash;
struct list_head head;
struct kref kref;
- enum ttm_ref_type ref_type;
struct ttm_base_object *obj;
struct ttm_object_file *tfile;
};
@@ -162,9 +160,7 @@ int ttm_base_object_init(struct ttm_object_file *tfile,
struct ttm_base_object *base,
bool shareable,
enum ttm_object_type object_type,
- void (*refcount_release) (struct ttm_base_object **),
- void (*ref_obj_release) (struct ttm_base_object *,
- enum ttm_ref_type ref_type))
+ void (*refcount_release) (struct ttm_base_object **))
{
struct ttm_object_device *tdev = tfile->tdev;
int ret;
@@ -172,7 +168,6 @@ int ttm_base_object_init(struct ttm_object_file *tfile,
base->shareable = shareable;
base->tfile = ttm_object_file_ref(tfile);
base->refcount_release = refcount_release;
- base->ref_obj_release = ref_obj_release;
base->object_type = object_type;
kref_init(&base->refcount);
idr_preload(GFP_KERNEL);
@@ -184,7 +179,7 @@ int ttm_base_object_init(struct ttm_object_file *tfile,
return ret;
base->handle = ret;
- ret = ttm_ref_object_add(tfile, base, TTM_REF_USAGE, NULL, false);
+ ret = ttm_ref_object_add(tfile, base, NULL, false);
if (unlikely(ret != 0))
goto out_err1;
@@ -247,12 +242,12 @@ void ttm_base_object_unref(struct ttm_base_object **p_base)
struct ttm_base_object *
ttm_base_object_noref_lookup(struct ttm_object_file *tfile, uint32_t key)
{
- struct drm_hash_item *hash;
- struct drm_open_hash *ht = &tfile->ref_hash[TTM_REF_USAGE];
+ struct vmwgfx_hash_item *hash;
+ struct vmwgfx_open_hash *ht = &tfile->ref_hash;
int ret;
rcu_read_lock();
- ret = drm_ht_find_item_rcu(ht, key, &hash);
+ ret = vmwgfx_ht_find_item_rcu(ht, key, &hash);
if (ret) {
rcu_read_unlock();
return NULL;
@@ -267,12 +262,12 @@ struct ttm_base_object *ttm_base_object_lookup(struct ttm_object_file *tfile,
uint32_t key)
{
struct ttm_base_object *base = NULL;
- struct drm_hash_item *hash;
- struct drm_open_hash *ht = &tfile->ref_hash[TTM_REF_USAGE];
+ struct vmwgfx_hash_item *hash;
+ struct vmwgfx_open_hash *ht = &tfile->ref_hash;
int ret;
rcu_read_lock();
- ret = drm_ht_find_item_rcu(ht, key, &hash);
+ ret = vmwgfx_ht_find_item_rcu(ht, key, &hash);
if (likely(ret == 0)) {
base = drm_hash_entry(hash, struct ttm_ref_object, hash)->obj;
@@ -299,64 +294,14 @@ ttm_base_object_lookup_for_ref(struct ttm_object_device *tdev, uint32_t key)
return base;
}
-/**
- * ttm_ref_object_exists - Check whether a caller has a valid ref object
- * (has opened) a base object.
- *
- * @tfile: Pointer to a struct ttm_object_file identifying the caller.
- * @base: Pointer to a struct base object.
- *
- * Checks wether the caller identified by @tfile has put a valid USAGE
- * reference object on the base object identified by @base.
- */
-bool ttm_ref_object_exists(struct ttm_object_file *tfile,
- struct ttm_base_object *base)
-{
- struct drm_open_hash *ht = &tfile->ref_hash[TTM_REF_USAGE];
- struct drm_hash_item *hash;
- struct ttm_ref_object *ref;
-
- rcu_read_lock();
- if (unlikely(drm_ht_find_item_rcu(ht, base->handle, &hash) != 0))
- goto out_false;
-
- /*
- * Verify that the ref object is really pointing to our base object.
- * Our base object could actually be dead, and the ref object pointing
- * to another base object with the same handle.
- */
- ref = drm_hash_entry(hash, struct ttm_ref_object, hash);
- if (unlikely(base != ref->obj))
- goto out_false;
-
- /*
- * Verify that the ref->obj pointer was actually valid!
- */
- rmb();
- if (unlikely(kref_read(&ref->kref) == 0))
- goto out_false;
-
- rcu_read_unlock();
- return true;
-
- out_false:
- rcu_read_unlock();
- return false;
-}
-
int ttm_ref_object_add(struct ttm_object_file *tfile,
struct ttm_base_object *base,
- enum ttm_ref_type ref_type, bool *existed,
+ bool *existed,
bool require_existed)
{
- struct drm_open_hash *ht = &tfile->ref_hash[ref_type];
+ struct vmwgfx_open_hash *ht = &tfile->ref_hash;
struct ttm_ref_object *ref;
- struct drm_hash_item *hash;
- struct ttm_mem_global *mem_glob = tfile->tdev->mem_glob;
- struct ttm_operation_ctx ctx = {
- .interruptible = false,
- .no_wait_gpu = false
- };
+ struct vmwgfx_hash_item *hash;
int ret = -EINVAL;
if (base->tfile != tfile && !base->shareable)
@@ -367,7 +312,7 @@ int ttm_ref_object_add(struct ttm_object_file *tfile,
while (ret == -EINVAL) {
rcu_read_lock();
- ret = drm_ht_find_item_rcu(ht, base->handle, &hash);
+ ret = vmwgfx_ht_find_item_rcu(ht, base->handle, &hash);
if (ret == 0) {
ref = drm_hash_entry(hash, struct ttm_ref_object, hash);
@@ -381,24 +326,18 @@ int ttm_ref_object_add(struct ttm_object_file *tfile,
if (require_existed)
return -EPERM;
- ret = ttm_mem_global_alloc(mem_glob, sizeof(*ref),
- &ctx);
- if (unlikely(ret != 0))
- return ret;
ref = kmalloc(sizeof(*ref), GFP_KERNEL);
if (unlikely(ref == NULL)) {
- ttm_mem_global_free(mem_glob, sizeof(*ref));
return -ENOMEM;
}
ref->hash.key = base->handle;
ref->obj = base;
ref->tfile = tfile;
- ref->ref_type = ref_type;
kref_init(&ref->kref);
spin_lock(&tfile->lock);
- ret = drm_ht_insert_item_rcu(ht, &ref->hash);
+ ret = vmwgfx_ht_insert_item_rcu(ht, &ref->hash);
if (likely(ret == 0)) {
list_add_tail(&ref->head, &tfile->ref_list);
@@ -412,7 +351,6 @@ int ttm_ref_object_add(struct ttm_object_file *tfile,
spin_unlock(&tfile->lock);
BUG_ON(ret != -EINVAL);
- ttm_mem_global_free(mem_glob, sizeof(*ref));
kfree(ref);
}
@@ -424,35 +362,29 @@ ttm_ref_object_release(struct kref *kref)
{
struct ttm_ref_object *ref =
container_of(kref, struct ttm_ref_object, kref);
- struct ttm_base_object *base = ref->obj;
struct ttm_object_file *tfile = ref->tfile;
- struct drm_open_hash *ht;
- struct ttm_mem_global *mem_glob = tfile->tdev->mem_glob;
+ struct vmwgfx_open_hash *ht;
- ht = &tfile->ref_hash[ref->ref_type];
- (void)drm_ht_remove_item_rcu(ht, &ref->hash);
+ ht = &tfile->ref_hash;
+ (void)vmwgfx_ht_remove_item_rcu(ht, &ref->hash);
list_del(&ref->head);
spin_unlock(&tfile->lock);
- if (ref->ref_type != TTM_REF_USAGE && base->ref_obj_release)
- base->ref_obj_release(base, ref->ref_type);
-
ttm_base_object_unref(&ref->obj);
- ttm_mem_global_free(mem_glob, sizeof(*ref));
kfree_rcu(ref, rcu_head);
spin_lock(&tfile->lock);
}
int ttm_ref_object_base_unref(struct ttm_object_file *tfile,
- unsigned long key, enum ttm_ref_type ref_type)
+ unsigned long key)
{
- struct drm_open_hash *ht = &tfile->ref_hash[ref_type];
+ struct vmwgfx_open_hash *ht = &tfile->ref_hash;
struct ttm_ref_object *ref;
- struct drm_hash_item *hash;
+ struct vmwgfx_hash_item *hash;
int ret;
spin_lock(&tfile->lock);
- ret = drm_ht_find_item(ht, key, &hash);
+ ret = vmwgfx_ht_find_item(ht, key, &hash);
if (unlikely(ret != 0)) {
spin_unlock(&tfile->lock);
return -EINVAL;
@@ -467,7 +399,6 @@ void ttm_object_file_release(struct ttm_object_file **p_tfile)
{
struct ttm_ref_object *ref;
struct list_head *list;
- unsigned int i;
struct ttm_object_file *tfile = *p_tfile;
*p_tfile = NULL;
@@ -485,8 +416,7 @@ void ttm_object_file_release(struct ttm_object_file **p_tfile)
}
spin_unlock(&tfile->lock);
- for (i = 0; i < TTM_REF_NUM; ++i)
- drm_ht_remove(&tfile->ref_hash[i]);
+ vmwgfx_ht_remove(&tfile->ref_hash);
ttm_object_file_unref(&tfile);
}
@@ -495,8 +425,6 @@ struct ttm_object_file *ttm_object_file_init(struct ttm_object_device *tdev,
unsigned int hash_order)
{
struct ttm_object_file *tfile = kmalloc(sizeof(*tfile), GFP_KERNEL);
- unsigned int i;
- unsigned int j = 0;
int ret;
if (unlikely(tfile == NULL))
@@ -507,18 +435,13 @@ struct ttm_object_file *ttm_object_file_init(struct ttm_object_device *tdev,
kref_init(&tfile->refcount);
INIT_LIST_HEAD(&tfile->ref_list);
- for (i = 0; i < TTM_REF_NUM; ++i) {
- ret = drm_ht_create(&tfile->ref_hash[i], hash_order);
- if (ret) {
- j = i;
- goto out_err;
- }
- }
+ ret = vmwgfx_ht_create(&tfile->ref_hash, hash_order);
+ if (ret)
+ goto out_err;
return tfile;
out_err:
- for (i = 0; i < j; ++i)
- drm_ht_remove(&tfile->ref_hash[i]);
+ vmwgfx_ht_remove(&tfile->ref_hash);
kfree(tfile);
@@ -526,8 +449,7 @@ out_err:
}
struct ttm_object_device *
-ttm_object_device_init(struct ttm_mem_global *mem_glob,
- unsigned int hash_order,
+ttm_object_device_init(unsigned int hash_order,
const struct dma_buf_ops *ops)
{
struct ttm_object_device *tdev = kmalloc(sizeof(*tdev), GFP_KERNEL);
@@ -536,19 +458,24 @@ ttm_object_device_init(struct ttm_mem_global *mem_glob,
if (unlikely(tdev == NULL))
return NULL;
- tdev->mem_glob = mem_glob;
spin_lock_init(&tdev->object_lock);
atomic_set(&tdev->object_count, 0);
- ret = drm_ht_create(&tdev->object_hash, hash_order);
+ ret = vmwgfx_ht_create(&tdev->object_hash, hash_order);
if (ret != 0)
goto out_no_object_hash;
- idr_init_base(&tdev->idr, 1);
+ /*
+ * Our base is at VMWGFX_NUM_MOB + 1 because we want to create
+ * a seperate namespace for GEM handles (which are
+ * 1..VMWGFX_NUM_MOB) and the surface handles. Some ioctl's
+ * can take either handle as an argument so we want to
+ * easily be able to tell whether the handle refers to a
+ * GEM buffer or a surface.
+ */
+ idr_init_base(&tdev->idr, VMWGFX_NUM_MOB + 1);
tdev->ops = *ops;
tdev->dmabuf_release = tdev->ops.release;
tdev->ops.release = ttm_prime_dmabuf_release;
- tdev->dma_buf_size = ttm_round_pot(sizeof(struct dma_buf)) +
- ttm_round_pot(sizeof(struct file));
return tdev;
out_no_object_hash:
@@ -564,7 +491,7 @@ void ttm_object_device_release(struct ttm_object_device **p_tdev)
WARN_ON_ONCE(!idr_is_empty(&tdev->idr));
idr_destroy(&tdev->idr);
- drm_ht_remove(&tdev->object_hash);
+ vmwgfx_ht_remove(&tdev->object_hash);
kfree(tdev);
}
@@ -633,7 +560,6 @@ static void ttm_prime_dmabuf_release(struct dma_buf *dma_buf)
if (prime->dma_buf == dma_buf)
prime->dma_buf = NULL;
mutex_unlock(&prime->mutex);
- ttm_mem_global_free(tdev->mem_glob, tdev->dma_buf_size);
ttm_base_object_unref(&base);
}
@@ -667,7 +593,7 @@ int ttm_prime_fd_to_handle(struct ttm_object_file *tfile,
prime = (struct ttm_prime_object *) dma_buf->priv;
base = &prime->base;
*handle = base->handle;
- ret = ttm_ref_object_add(tfile, base, TTM_REF_USAGE, NULL, false);
+ ret = ttm_ref_object_add(tfile, base, NULL, false);
dma_buf_put(dma_buf);
@@ -715,30 +641,18 @@ int ttm_prime_handle_to_fd(struct ttm_object_file *tfile,
dma_buf = prime->dma_buf;
if (!dma_buf || !get_dma_buf_unless_doomed(dma_buf)) {
DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
- struct ttm_operation_ctx ctx = {
- .interruptible = true,
- .no_wait_gpu = false
- };
exp_info.ops = &tdev->ops;
exp_info.size = prime->size;
exp_info.flags = flags;
exp_info.priv = prime;
/*
- * Need to create a new dma_buf, with memory accounting.
+ * Need to create a new dma_buf
*/
- ret = ttm_mem_global_alloc(tdev->mem_glob, tdev->dma_buf_size,
- &ctx);
- if (unlikely(ret != 0)) {
- mutex_unlock(&prime->mutex);
- goto out_unref;
- }
dma_buf = dma_buf_export(&exp_info);
if (IS_ERR(dma_buf)) {
ret = PTR_ERR(dma_buf);
- ttm_mem_global_free(tdev->mem_glob,
- tdev->dma_buf_size);
mutex_unlock(&prime->mutex);
goto out_unref;
}
@@ -773,7 +687,6 @@ out_unref:
* @shareable: See ttm_base_object_init
* @type: See ttm_base_object_init
* @refcount_release: See ttm_base_object_init
- * @ref_obj_release: See ttm_base_object_init
*
* Initializes an object which is compatible with the drm_prime model
* for data sharing between processes and devices.
@@ -781,9 +694,7 @@ out_unref:
int ttm_prime_object_init(struct ttm_object_file *tfile, size_t size,
struct ttm_prime_object *prime, bool shareable,
enum ttm_object_type type,
- void (*refcount_release) (struct ttm_base_object **),
- void (*ref_obj_release) (struct ttm_base_object *,
- enum ttm_ref_type ref_type))
+ void (*refcount_release) (struct ttm_base_object **))
{
mutex_init(&prime->mutex);
prime->size = PAGE_ALIGN(size);
@@ -792,6 +703,5 @@ int ttm_prime_object_init(struct ttm_object_file *tfile, size_t size,
prime->refcount_release = refcount_release;
return ttm_base_object_init(tfile, &prime->base, shareable,
ttm_prime_type,
- ttm_prime_refcount_release,
- ref_obj_release);
+ ttm_prime_refcount_release);
}
diff --git a/drivers/gpu/drm/vmwgfx/ttm_object.h b/drivers/gpu/drm/vmwgfx/ttm_object.h
index 49b064f0cb19..4c8700027c6d 100644
--- a/drivers/gpu/drm/vmwgfx/ttm_object.h
+++ b/drivers/gpu/drm/vmwgfx/ttm_object.h
@@ -42,31 +42,7 @@
#include <linux/list.h>
#include <linux/rcupdate.h>
-#include <drm/drm_hashtab.h>
-
-#include "ttm_memory.h"
-
-/**
- * enum ttm_ref_type
- *
- * Describes what type of reference a ref object holds.
- *
- * TTM_REF_USAGE is a simple refcount on a base object.
- *
- * TTM_REF_SYNCCPU_READ is a SYNCCPU_READ reference on a
- * buffer object.
- *
- * TTM_REF_SYNCCPU_WRITE is a SYNCCPU_WRITE reference on a
- * buffer object.
- *
- */
-
-enum ttm_ref_type {
- TTM_REF_USAGE,
- TTM_REF_SYNCCPU_READ,
- TTM_REF_SYNCCPU_WRITE,
- TTM_REF_NUM
-};
+#include "vmwgfx_hashtab.h"
/**
* enum ttm_object_type
@@ -78,7 +54,6 @@ enum ttm_ref_type {
enum ttm_object_type {
ttm_fence_type,
- ttm_buffer_type,
ttm_lock_type,
ttm_prime_type,
ttm_driver_type0 = 256,
@@ -129,8 +104,6 @@ struct ttm_base_object {
struct ttm_object_file *tfile;
struct kref refcount;
void (*refcount_release) (struct ttm_base_object **base);
- void (*ref_obj_release) (struct ttm_base_object *base,
- enum ttm_ref_type ref_type);
u32 handle;
enum ttm_object_type object_type;
u32 shareable;
@@ -179,11 +152,7 @@ extern int ttm_base_object_init(struct ttm_object_file *tfile,
bool shareable,
enum ttm_object_type type,
void (*refcount_release) (struct ttm_base_object
- **),
- void (*ref_obj_release) (struct ttm_base_object
- *,
- enum ttm_ref_type
- ref_type));
+ **));
/**
* ttm_base_object_lookup
@@ -247,12 +216,9 @@ extern void ttm_base_object_unref(struct ttm_base_object **p_base);
*/
extern int ttm_ref_object_add(struct ttm_object_file *tfile,
struct ttm_base_object *base,
- enum ttm_ref_type ref_type, bool *existed,
+ bool *existed,
bool require_existed);
-extern bool ttm_ref_object_exists(struct ttm_object_file *tfile,
- struct ttm_base_object *base);
-
/**
* ttm_ref_object_base_unref
*
@@ -265,8 +231,7 @@ extern bool ttm_ref_object_exists(struct ttm_object_file *tfile,
* will be unreferenced.
*/
extern int ttm_ref_object_base_unref(struct ttm_object_file *tfile,
- unsigned long key,
- enum ttm_ref_type ref_type);
+ unsigned long key);
/**
* ttm_object_file_init - initialize a struct ttm_object file
@@ -297,7 +262,6 @@ extern void ttm_object_file_release(struct ttm_object_file **p_tfile);
/**
* ttm_object device init - initialize a struct ttm_object_device
*
- * @mem_glob: struct ttm_mem_global for memory accounting.
* @hash_order: Order of hash table used to hash the base objects.
* @ops: DMA buf ops for prime objects of this device.
*
@@ -306,8 +270,7 @@ extern void ttm_object_file_release(struct ttm_object_file **p_tfile);
*/
extern struct ttm_object_device *
-ttm_object_device_init(struct ttm_mem_global *mem_glob,
- unsigned int hash_order,
+ttm_object_device_init(unsigned int hash_order,
const struct dma_buf_ops *ops);
/**
@@ -332,10 +295,7 @@ extern int ttm_prime_object_init(struct ttm_object_file *tfile,
bool shareable,
enum ttm_object_type type,
void (*refcount_release)
- (struct ttm_base_object **),
- void (*ref_obj_release)
- (struct ttm_base_object *,
- enum ttm_ref_type ref_type));
+ (struct ttm_base_object **));
static inline enum ttm_object_type
ttm_base_object_type(struct ttm_base_object *base)
@@ -353,13 +313,6 @@ extern int ttm_prime_handle_to_fd(struct ttm_object_file *tfile,
#define ttm_prime_object_kfree(__obj, __prime) \
kfree_rcu(__obj, __prime.base.rhead)
-/*
- * Extra memory required by the base object's idr storage, which is allocated
- * separately from the base object itself. We estimate an on-average 128 bytes
- * per idr.
- */
-#define TTM_OBJ_EXTRA_SIZE 128
-
struct ttm_base_object *
ttm_base_object_noref_lookup(struct ttm_object_file *tfile, uint32_t key);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_binding.c b/drivers/gpu/drm/vmwgfx/vmwgfx_binding.c
index 6f27d69bad0e..ae2de914eb89 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_binding.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_binding.c
@@ -354,6 +354,27 @@ void vmw_binding_add(struct vmw_ctx_binding_state *cbs,
}
/**
+ * vmw_binding_cb_offset_update: Update the offset of a cb binding
+ *
+ * @cbs: Pointer to the context binding state tracker.
+ * @shader_slot: The shader slot of the binding.
+ * @slot: The slot of the binding.
+ * @offsetInBytes: The new offset of the binding.
+ *
+ * Updates the offset of an existing cb binding in the context binding
+ * state structure @cbs.
+ */
+void vmw_binding_cb_offset_update(struct vmw_ctx_binding_state *cbs,
+ u32 shader_slot, u32 slot, u32 offsetInBytes)
+{
+ struct vmw_ctx_bindinfo *loc =
+ vmw_binding_loc(cbs, vmw_ctx_binding_cb, shader_slot, slot);
+ struct vmw_ctx_bindinfo_cb *loc_cb =
+ (struct vmw_ctx_bindinfo_cb *)((u8 *) loc);
+ loc_cb->offset = offsetInBytes;
+}
+
+/**
* vmw_binding_add_uav_index - Add UAV index for tracking.
* @cbs: Pointer to the context binding state tracker.
* @slot: UAV type to which bind this index.
@@ -1070,7 +1091,7 @@ static int vmw_emit_set_uav(struct vmw_ctx_binding_state *cbs)
size_t cmd_size, view_id_size;
const struct vmw_resource *ctx = vmw_cbs_context(cbs);
- vmw_collect_view_ids(cbs, loc, SVGA3D_MAX_UAVIEWS);
+ vmw_collect_view_ids(cbs, loc, vmw_max_num_uavs(cbs->dev_priv));
view_id_size = cbs->bind_cmd_count*sizeof(uint32);
cmd_size = sizeof(*cmd) + view_id_size;
cmd = VMW_CMD_CTX_RESERVE(ctx->dev_priv, cmd_size, ctx->id);
@@ -1100,7 +1121,7 @@ static int vmw_emit_set_cs_uav(struct vmw_ctx_binding_state *cbs)
size_t cmd_size, view_id_size;
const struct vmw_resource *ctx = vmw_cbs_context(cbs);
- vmw_collect_view_ids(cbs, loc, SVGA3D_MAX_UAVIEWS);
+ vmw_collect_view_ids(cbs, loc, vmw_max_num_uavs(cbs->dev_priv));
view_id_size = cbs->bind_cmd_count*sizeof(uint32);
cmd_size = sizeof(*cmd) + view_id_size;
cmd = VMW_CMD_CTX_RESERVE(ctx->dev_priv, cmd_size, ctx->id);
@@ -1327,8 +1348,7 @@ static int vmw_binding_scrub_so(struct vmw_ctx_bindinfo *bi, bool rebind)
}
/**
- * vmw_binding_state_alloc - Allocate a struct vmw_ctx_binding_state with
- * memory accounting.
+ * vmw_binding_state_alloc - Allocate a struct vmw_ctx_binding_state.
*
* @dev_priv: Pointer to a device private structure.
*
@@ -1338,20 +1358,9 @@ struct vmw_ctx_binding_state *
vmw_binding_state_alloc(struct vmw_private *dev_priv)
{
struct vmw_ctx_binding_state *cbs;
- struct ttm_operation_ctx ctx = {
- .interruptible = false,
- .no_wait_gpu = false
- };
- int ret;
-
- ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv), sizeof(*cbs),
- &ctx);
- if (ret)
- return ERR_PTR(ret);
cbs = vzalloc(sizeof(*cbs));
if (!cbs) {
- ttm_mem_global_free(vmw_mem_glob(dev_priv), sizeof(*cbs));
return ERR_PTR(-ENOMEM);
}
@@ -1362,17 +1371,13 @@ vmw_binding_state_alloc(struct vmw_private *dev_priv)
}
/**
- * vmw_binding_state_free - Free a struct vmw_ctx_binding_state and its
- * memory accounting info.
+ * vmw_binding_state_free - Free a struct vmw_ctx_binding_state.
*
* @cbs: Pointer to the struct vmw_ctx_binding_state to be freed.
*/
void vmw_binding_state_free(struct vmw_ctx_binding_state *cbs)
{
- struct vmw_private *dev_priv = cbs->dev_priv;
-
vfree(cbs);
- ttm_mem_global_free(vmw_mem_glob(dev_priv), sizeof(*cbs));
}
/**
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_binding.h b/drivers/gpu/drm/vmwgfx/vmwgfx_binding.h
index dcb71fd0bb3b..85b90f7d398d 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_binding.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_binding.h
@@ -200,7 +200,7 @@ struct vmw_dx_shader_bindings {
* @splice_index: The device splice index set by user-space.
*/
struct vmw_ctx_bindinfo_uav {
- struct vmw_ctx_bindinfo_view views[SVGA3D_MAX_UAVIEWS];
+ struct vmw_ctx_bindinfo_view views[SVGA3D_DX11_1_MAX_UAVIEWS];
uint32 index;
};
@@ -217,6 +217,8 @@ struct vmw_ctx_bindinfo_so {
extern void vmw_binding_add(struct vmw_ctx_binding_state *cbs,
const struct vmw_ctx_bindinfo *ci,
u32 shader_slot, u32 slot);
+extern void vmw_binding_cb_offset_update(struct vmw_ctx_binding_state *cbs,
+ u32 shader_slot, u32 slot, u32 offsetInBytes);
extern void vmw_binding_add_uav_index(struct vmw_ctx_binding_state *cbs,
uint32 slot, uint32 splice_index);
extern void
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
index fd007f1c1776..31aecc46624b 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
@@ -33,18 +33,6 @@
/**
- * struct vmw_user_buffer_object - User-space-visible buffer object
- *
- * @prime: The prime object providing user visibility.
- * @vbo: The struct vmw_buffer_object
- */
-struct vmw_user_buffer_object {
- struct ttm_prime_object prime;
- struct vmw_buffer_object vbo;
-};
-
-
-/**
* vmw_buffer_object - Convert a struct ttm_buffer_object to a struct
* vmw_buffer_object.
*
@@ -60,23 +48,6 @@ vmw_buffer_object(struct ttm_buffer_object *bo)
/**
- * vmw_user_buffer_object - Convert a struct ttm_buffer_object to a struct
- * vmw_user_buffer_object.
- *
- * @bo: Pointer to the TTM buffer object.
- * Return: Pointer to the struct vmw_buffer_object embedding the TTM buffer
- * object.
- */
-static struct vmw_user_buffer_object *
-vmw_user_buffer_object(struct ttm_buffer_object *bo)
-{
- struct vmw_buffer_object *vmw_bo = vmw_buffer_object(bo);
-
- return container_of(vmw_bo, struct vmw_user_buffer_object, vbo);
-}
-
-
-/**
* vmw_bo_pin_in_placement - Validate a buffer to placement.
*
* @dev_priv: Driver private.
@@ -392,39 +363,6 @@ void vmw_bo_unmap(struct vmw_buffer_object *vbo)
/**
- * vmw_bo_acc_size - Calculate the pinned memory usage of buffers
- *
- * @dev_priv: Pointer to a struct vmw_private identifying the device.
- * @size: The requested buffer size.
- * @user: Whether this is an ordinary dma buffer or a user dma buffer.
- */
-static size_t vmw_bo_acc_size(struct vmw_private *dev_priv, size_t size,
- bool user)
-{
- static size_t struct_size, user_struct_size;
- size_t num_pages = PFN_UP(size);
- size_t page_array_size = ttm_round_pot(num_pages * sizeof(void *));
-
- if (unlikely(struct_size == 0)) {
- size_t backend_size = ttm_round_pot(vmw_tt_size);
-
- struct_size = backend_size +
- ttm_round_pot(sizeof(struct vmw_buffer_object));
- user_struct_size = backend_size +
- ttm_round_pot(sizeof(struct vmw_user_buffer_object)) +
- TTM_OBJ_EXTRA_SIZE;
- }
-
- if (dev_priv->map_mode == vmw_dma_alloc_coherent)
- page_array_size +=
- ttm_round_pot(num_pages * sizeof(dma_addr_t));
-
- return ((user) ? user_struct_size : struct_size) +
- page_array_size;
-}
-
-
-/**
* vmw_bo_bo_free - vmw buffer object destructor
*
* @bo: Pointer to the embedded struct ttm_buffer_object
@@ -436,27 +374,10 @@ void vmw_bo_bo_free(struct ttm_buffer_object *bo)
WARN_ON(vmw_bo->dirty);
WARN_ON(!RB_EMPTY_ROOT(&vmw_bo->res_tree));
vmw_bo_unmap(vmw_bo);
- dma_resv_fini(&bo->base._resv);
+ drm_gem_object_release(&bo->base);
kfree(vmw_bo);
}
-
-/**
- * vmw_user_bo_destroy - vmw buffer object destructor
- *
- * @bo: Pointer to the embedded struct ttm_buffer_object
- */
-static void vmw_user_bo_destroy(struct ttm_buffer_object *bo)
-{
- struct vmw_user_buffer_object *vmw_user_bo = vmw_user_buffer_object(bo);
- struct vmw_buffer_object *vbo = &vmw_user_bo->vbo;
-
- WARN_ON(vbo->dirty);
- WARN_ON(!RB_EMPTY_ROOT(&vbo->res_tree));
- vmw_bo_unmap(vbo);
- ttm_prime_object_kfree(vmw_user_bo, prime);
-}
-
/**
* vmw_bo_create_kernel - Create a pinned BO for internal kernel use.
*
@@ -471,33 +392,27 @@ int vmw_bo_create_kernel(struct vmw_private *dev_priv, unsigned long size,
struct ttm_placement *placement,
struct ttm_buffer_object **p_bo)
{
- struct ttm_operation_ctx ctx = { false, false };
+ struct ttm_operation_ctx ctx = {
+ .interruptible = false,
+ .no_wait_gpu = false
+ };
struct ttm_buffer_object *bo;
- size_t acc_size;
+ struct drm_device *vdev = &dev_priv->drm;
int ret;
bo = kzalloc(sizeof(*bo), GFP_KERNEL);
if (unlikely(!bo))
return -ENOMEM;
- acc_size = ttm_round_pot(sizeof(*bo));
- acc_size += ttm_round_pot(PFN_UP(size) * sizeof(void *));
- acc_size += ttm_round_pot(sizeof(struct ttm_tt));
-
- ret = ttm_mem_global_alloc(&ttm_mem_glob, acc_size, &ctx);
- if (unlikely(ret))
- goto error_free;
-
+ size = ALIGN(size, PAGE_SIZE);
- bo->base.size = size;
- dma_resv_init(&bo->base._resv);
- drm_vma_node_reset(&bo->base.vma_node);
+ drm_gem_private_object_init(vdev, &bo->base, size);
ret = ttm_bo_init_reserved(&dev_priv->bdev, bo, size,
- ttm_bo_type_device, placement, 0,
+ ttm_bo_type_kernel, placement, 0,
&ctx, NULL, NULL, NULL);
if (unlikely(ret))
- goto error_account;
+ goto error_free;
ttm_bo_pin(bo);
ttm_bo_unreserve(bo);
@@ -505,14 +420,38 @@ int vmw_bo_create_kernel(struct vmw_private *dev_priv, unsigned long size,
return 0;
-error_account:
- ttm_mem_global_free(&ttm_mem_glob, acc_size);
-
error_free:
kfree(bo);
return ret;
}
+int vmw_bo_create(struct vmw_private *vmw,
+ size_t size, struct ttm_placement *placement,
+ bool interruptible, bool pin,
+ void (*bo_free)(struct ttm_buffer_object *bo),
+ struct vmw_buffer_object **p_bo)
+{
+ int ret;
+
+ *p_bo = kmalloc(sizeof(**p_bo), GFP_KERNEL);
+ if (unlikely(!*p_bo)) {
+ DRM_ERROR("Failed to allocate a buffer.\n");
+ return -ENOMEM;
+ }
+
+ ret = vmw_bo_init(vmw, *p_bo, size,
+ placement, interruptible, pin,
+ bo_free);
+ if (unlikely(ret != 0))
+ goto out_error;
+
+ return ret;
+out_error:
+ kfree(*p_bo);
+ *p_bo = NULL;
+ return ret;
+}
+
/**
* vmw_bo_init - Initialize a vmw buffer object
*
@@ -533,192 +472,44 @@ int vmw_bo_init(struct vmw_private *dev_priv,
bool interruptible, bool pin,
void (*bo_free)(struct ttm_buffer_object *bo))
{
- struct ttm_operation_ctx ctx = { interruptible, false };
+ struct ttm_operation_ctx ctx = {
+ .interruptible = interruptible,
+ .no_wait_gpu = false
+ };
struct ttm_device *bdev = &dev_priv->bdev;
- size_t acc_size;
+ struct drm_device *vdev = &dev_priv->drm;
int ret;
- bool user = (bo_free == &vmw_user_bo_destroy);
-
- WARN_ON_ONCE(!bo_free && (!user && (bo_free != vmw_bo_bo_free)));
- acc_size = vmw_bo_acc_size(dev_priv, size, user);
+ WARN_ON_ONCE(!bo_free);
memset(vmw_bo, 0, sizeof(*vmw_bo));
BUILD_BUG_ON(TTM_MAX_BO_PRIORITY <= 3);
vmw_bo->base.priority = 3;
vmw_bo->res_tree = RB_ROOT;
- ret = ttm_mem_global_alloc(&ttm_mem_glob, acc_size, &ctx);
- if (unlikely(ret))
- return ret;
-
- vmw_bo->base.base.size = size;
- dma_resv_init(&vmw_bo->base.base._resv);
- drm_vma_node_reset(&vmw_bo->base.base.vma_node);
+ size = ALIGN(size, PAGE_SIZE);
+ drm_gem_private_object_init(vdev, &vmw_bo->base.base, size);
ret = ttm_bo_init_reserved(bdev, &vmw_bo->base, size,
- ttm_bo_type_device, placement,
+ ttm_bo_type_device,
+ placement,
0, &ctx, NULL, NULL, bo_free);
if (unlikely(ret)) {
- ttm_mem_global_free(&ttm_mem_glob, acc_size);
return ret;
}
if (pin)
ttm_bo_pin(&vmw_bo->base);
ttm_bo_unreserve(&vmw_bo->base);
- return 0;
-}
-
-
-/**
- * vmw_user_bo_release - TTM reference base object release callback for
- * vmw user buffer objects
- *
- * @p_base: The TTM base object pointer about to be unreferenced.
- *
- * Clears the TTM base object pointer and drops the reference the
- * base object has on the underlying struct vmw_buffer_object.
- */
-static void vmw_user_bo_release(struct ttm_base_object **p_base)
-{
- struct vmw_user_buffer_object *vmw_user_bo;
- struct ttm_base_object *base = *p_base;
-
- *p_base = NULL;
-
- if (unlikely(base == NULL))
- return;
-
- vmw_user_bo = container_of(base, struct vmw_user_buffer_object,
- prime.base);
- ttm_bo_put(&vmw_user_bo->vbo.base);
-}
-
-
-/**
- * vmw_user_bo_ref_obj_release - TTM synccpu reference object release callback
- * for vmw user buffer objects
- *
- * @base: Pointer to the TTM base object
- * @ref_type: Reference type of the reference reaching zero.
- *
- * Called when user-space drops its last synccpu reference on the buffer
- * object, Either explicitly or as part of a cleanup file close.
- */
-static void vmw_user_bo_ref_obj_release(struct ttm_base_object *base,
- enum ttm_ref_type ref_type)
-{
- struct vmw_user_buffer_object *user_bo;
-
- user_bo = container_of(base, struct vmw_user_buffer_object, prime.base);
-
- switch (ref_type) {
- case TTM_REF_SYNCCPU_WRITE:
- atomic_dec(&user_bo->vbo.cpu_writers);
- break;
- default:
- WARN_ONCE(true, "Undefined buffer object reference release.\n");
- }
-}
-
-
-/**
- * vmw_user_bo_alloc - Allocate a user buffer object
- *
- * @dev_priv: Pointer to a struct device private.
- * @tfile: Pointer to a struct ttm_object_file on which to register the user
- * object.
- * @size: Size of the buffer object.
- * @shareable: Boolean whether the buffer is shareable with other open files.
- * @handle: Pointer to where the handle value should be assigned.
- * @p_vbo: Pointer to where the refcounted struct vmw_buffer_object pointer
- * should be assigned.
- * @p_base: The TTM base object pointer about to be allocated.
- * Return: Zero on success, negative error code on error.
- */
-int vmw_user_bo_alloc(struct vmw_private *dev_priv,
- struct ttm_object_file *tfile,
- uint32_t size,
- bool shareable,
- uint32_t *handle,
- struct vmw_buffer_object **p_vbo,
- struct ttm_base_object **p_base)
-{
- struct vmw_user_buffer_object *user_bo;
- int ret;
-
- user_bo = kzalloc(sizeof(*user_bo), GFP_KERNEL);
- if (unlikely(!user_bo)) {
- DRM_ERROR("Failed to allocate a buffer.\n");
- return -ENOMEM;
- }
-
- ret = vmw_bo_init(dev_priv, &user_bo->vbo, size,
- (dev_priv->has_mob) ?
- &vmw_sys_placement :
- &vmw_vram_sys_placement, true, false,
- &vmw_user_bo_destroy);
- if (unlikely(ret != 0))
- return ret;
-
- ttm_bo_get(&user_bo->vbo.base);
- ret = ttm_prime_object_init(tfile,
- size,
- &user_bo->prime,
- shareable,
- ttm_buffer_type,
- &vmw_user_bo_release,
- &vmw_user_bo_ref_obj_release);
- if (unlikely(ret != 0)) {
- ttm_bo_put(&user_bo->vbo.base);
- goto out_no_base_object;
- }
-
- *p_vbo = &user_bo->vbo;
- if (p_base) {
- *p_base = &user_bo->prime.base;
- kref_get(&(*p_base)->refcount);
- }
- *handle = user_bo->prime.base.handle;
-
-out_no_base_object:
- return ret;
-}
-
-/**
- * vmw_user_bo_verify_access - verify access permissions on this
- * buffer object.
- *
- * @bo: Pointer to the buffer object being accessed
- * @tfile: Identifying the caller.
- */
-int vmw_user_bo_verify_access(struct ttm_buffer_object *bo,
- struct ttm_object_file *tfile)
-{
- struct vmw_user_buffer_object *vmw_user_bo;
-
- if (unlikely(bo->destroy != vmw_user_bo_destroy))
- return -EPERM;
-
- vmw_user_bo = vmw_user_buffer_object(bo);
-
- /* Check that the caller has opened the object. */
- if (likely(ttm_ref_object_exists(tfile, &vmw_user_bo->prime.base)))
- return 0;
-
- DRM_ERROR("Could not grant buffer access.\n");
- return -EPERM;
+ return 0;
}
-
/**
- * vmw_user_bo_synccpu_grab - Grab a struct vmw_user_buffer_object for cpu
+ * vmw_user_bo_synccpu_grab - Grab a struct vmw_buffer_object for cpu
* access, idling previous GPU operations on the buffer and optionally
* blocking it for further command submissions.
*
- * @user_bo: Pointer to the buffer object being grabbed for CPU access
- * @tfile: Identifying the caller.
+ * @vmw_bo: Pointer to the buffer object being grabbed for CPU access
* @flags: Flags indicating how the grab should be performed.
* Return: Zero on success, Negative error code on error. In particular,
* -EBUSY will be returned if a dontblock operation is requested and the
@@ -727,13 +518,11 @@ int vmw_user_bo_verify_access(struct ttm_buffer_object *bo,
*
* A blocking grab will be automatically released when @tfile is closed.
*/
-static int vmw_user_bo_synccpu_grab(struct vmw_user_buffer_object *user_bo,
- struct ttm_object_file *tfile,
+static int vmw_user_bo_synccpu_grab(struct vmw_buffer_object *vmw_bo,
uint32_t flags)
{
bool nonblock = !!(flags & drm_vmw_synccpu_dontblock);
- struct ttm_buffer_object *bo = &user_bo->vbo.base;
- bool existed;
+ struct ttm_buffer_object *bo = &vmw_bo->base;
int ret;
if (flags & drm_vmw_synccpu_allow_cs) {
@@ -755,17 +544,12 @@ static int vmw_user_bo_synccpu_grab(struct vmw_user_buffer_object *user_bo,
ret = ttm_bo_wait(bo, true, nonblock);
if (likely(ret == 0))
- atomic_inc(&user_bo->vbo.cpu_writers);
+ atomic_inc(&vmw_bo->cpu_writers);
ttm_bo_unreserve(bo);
if (unlikely(ret != 0))
return ret;
- ret = ttm_ref_object_add(tfile, &user_bo->prime.base,
- TTM_REF_SYNCCPU_WRITE, &existed, false);
- if (ret != 0 || existed)
- atomic_dec(&user_bo->vbo.cpu_writers);
-
return ret;
}
@@ -773,19 +557,25 @@ static int vmw_user_bo_synccpu_grab(struct vmw_user_buffer_object *user_bo,
* vmw_user_bo_synccpu_release - Release a previous grab for CPU access,
* and unblock command submission on the buffer if blocked.
*
+ * @filp: Identifying the caller.
* @handle: Handle identifying the buffer object.
- * @tfile: Identifying the caller.
* @flags: Flags indicating the type of release.
*/
-static int vmw_user_bo_synccpu_release(uint32_t handle,
- struct ttm_object_file *tfile,
- uint32_t flags)
+static int vmw_user_bo_synccpu_release(struct drm_file *filp,
+ uint32_t handle,
+ uint32_t flags)
{
- if (!(flags & drm_vmw_synccpu_allow_cs))
- return ttm_ref_object_base_unref(tfile, handle,
- TTM_REF_SYNCCPU_WRITE);
+ struct vmw_buffer_object *vmw_bo;
+ int ret = vmw_user_bo_lookup(filp, handle, &vmw_bo);
- return 0;
+ if (!ret) {
+ if (!(flags & drm_vmw_synccpu_allow_cs)) {
+ atomic_dec(&vmw_bo->cpu_writers);
+ }
+ ttm_bo_put(&vmw_bo->base);
+ }
+
+ return ret;
}
@@ -807,9 +597,6 @@ int vmw_user_bo_synccpu_ioctl(struct drm_device *dev, void *data,
struct drm_vmw_synccpu_arg *arg =
(struct drm_vmw_synccpu_arg *) data;
struct vmw_buffer_object *vbo;
- struct vmw_user_buffer_object *user_bo;
- struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
- struct ttm_base_object *buffer_base;
int ret;
if ((arg->flags & (drm_vmw_synccpu_read | drm_vmw_synccpu_write)) == 0
@@ -822,16 +609,12 @@ int vmw_user_bo_synccpu_ioctl(struct drm_device *dev, void *data,
switch (arg->op) {
case drm_vmw_synccpu_grab:
- ret = vmw_user_bo_lookup(tfile, arg->handle, &vbo,
- &buffer_base);
+ ret = vmw_user_bo_lookup(file_priv, arg->handle, &vbo);
if (unlikely(ret != 0))
return ret;
- user_bo = container_of(vbo, struct vmw_user_buffer_object,
- vbo);
- ret = vmw_user_bo_synccpu_grab(user_bo, tfile, arg->flags);
+ ret = vmw_user_bo_synccpu_grab(vbo, arg->flags);
vmw_bo_unreference(&vbo);
- ttm_base_object_unref(&buffer_base);
if (unlikely(ret != 0 && ret != -ERESTARTSYS &&
ret != -EBUSY)) {
DRM_ERROR("Failed synccpu grab on handle 0x%08x.\n",
@@ -840,7 +623,8 @@ int vmw_user_bo_synccpu_ioctl(struct drm_device *dev, void *data,
}
break;
case drm_vmw_synccpu_release:
- ret = vmw_user_bo_synccpu_release(arg->handle, tfile,
+ ret = vmw_user_bo_synccpu_release(file_priv,
+ arg->handle,
arg->flags);
if (unlikely(ret != 0)) {
DRM_ERROR("Failed synccpu release on handle 0x%08x.\n",
@@ -856,50 +640,6 @@ int vmw_user_bo_synccpu_ioctl(struct drm_device *dev, void *data,
return 0;
}
-
-/**
- * vmw_bo_alloc_ioctl - ioctl function implementing the buffer object
- * allocation functionality.
- *
- * @dev: Identifies the drm device.
- * @data: Pointer to the ioctl argument.
- * @file_priv: Identifies the caller.
- * Return: Zero on success, negative error code on error.
- *
- * This function checks the ioctl arguments for validity and allocates a
- * struct vmw_user_buffer_object bo.
- */
-int vmw_bo_alloc_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct vmw_private *dev_priv = vmw_priv(dev);
- union drm_vmw_alloc_dmabuf_arg *arg =
- (union drm_vmw_alloc_dmabuf_arg *)data;
- struct drm_vmw_alloc_dmabuf_req *req = &arg->req;
- struct drm_vmw_dmabuf_rep *rep = &arg->rep;
- struct vmw_buffer_object *vbo;
- uint32_t handle;
- int ret;
-
- ret = vmw_user_bo_alloc(dev_priv, vmw_fpriv(file_priv)->tfile,
- req->size, false, &handle, &vbo,
- NULL);
- if (unlikely(ret != 0))
- goto out_no_bo;
-
- rep->handle = handle;
- rep->map_handle = drm_vma_node_offset_addr(&vbo->base.base.vma_node);
- rep->cur_gmr_id = handle;
- rep->cur_gmr_offset = 0;
-
- vmw_bo_unreference(&vbo);
-
-out_no_bo:
-
- return ret;
-}
-
-
/**
* vmw_bo_unref_ioctl - Generic handle close ioctl.
*
@@ -917,65 +657,48 @@ int vmw_bo_unref_ioctl(struct drm_device *dev, void *data,
struct drm_vmw_unref_dmabuf_arg *arg =
(struct drm_vmw_unref_dmabuf_arg *)data;
- return ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile,
- arg->handle,
- TTM_REF_USAGE);
+ drm_gem_handle_delete(file_priv, arg->handle);
+ return 0;
}
/**
* vmw_user_bo_lookup - Look up a vmw user buffer object from a handle.
*
- * @tfile: The TTM object file the handle is registered with.
+ * @filp: The file the handle is registered with.
* @handle: The user buffer object handle
* @out: Pointer to a where a pointer to the embedded
* struct vmw_buffer_object should be placed.
- * @p_base: Pointer to where a pointer to the TTM base object should be
- * placed, or NULL if no such pointer is required.
* Return: Zero on success, Negative error code on error.
*
- * Both the output base object pointer and the vmw buffer object pointer
- * will be refcounted.
+ * The vmw buffer object pointer will be refcounted.
*/
-int vmw_user_bo_lookup(struct ttm_object_file *tfile,
- uint32_t handle, struct vmw_buffer_object **out,
- struct ttm_base_object **p_base)
+int vmw_user_bo_lookup(struct drm_file *filp,
+ uint32_t handle,
+ struct vmw_buffer_object **out)
{
- struct vmw_user_buffer_object *vmw_user_bo;
- struct ttm_base_object *base;
+ struct drm_gem_object *gobj;
- base = ttm_base_object_lookup(tfile, handle);
- if (unlikely(base == NULL)) {
+ gobj = drm_gem_object_lookup(filp, handle);
+ if (!gobj) {
DRM_ERROR("Invalid buffer object handle 0x%08lx.\n",
(unsigned long)handle);
return -ESRCH;
}
- if (unlikely(ttm_base_object_type(base) != ttm_buffer_type)) {
- ttm_base_object_unref(&base);
- DRM_ERROR("Invalid buffer object handle 0x%08lx.\n",
- (unsigned long)handle);
- return -EINVAL;
- }
-
- vmw_user_bo = container_of(base, struct vmw_user_buffer_object,
- prime.base);
- ttm_bo_get(&vmw_user_bo->vbo.base);
- if (p_base)
- *p_base = base;
- else
- ttm_base_object_unref(&base);
- *out = &vmw_user_bo->vbo;
+ *out = gem_to_vmw_bo(gobj);
+ ttm_bo_get(&(*out)->base);
+ drm_gem_object_put(gobj);
return 0;
}
/**
* vmw_user_bo_noref_lookup - Look up a vmw user buffer object without reference
- * @tfile: The TTM object file the handle is registered with.
+ * @filp: The TTM object file the handle is registered with.
* @handle: The user buffer object handle.
*
- * This function looks up a struct vmw_user_bo and returns a pointer to the
+ * This function looks up a struct vmw_bo and returns a pointer to the
* struct vmw_buffer_object it derives from without refcounting the pointer.
* The returned pointer is only valid until vmw_user_bo_noref_release() is
* called, and the object pointed to by the returned pointer may be doomed.
@@ -988,52 +711,23 @@ int vmw_user_bo_lookup(struct ttm_object_file *tfile,
* error pointer on failure.
*/
struct vmw_buffer_object *
-vmw_user_bo_noref_lookup(struct ttm_object_file *tfile, u32 handle)
+vmw_user_bo_noref_lookup(struct drm_file *filp, u32 handle)
{
- struct vmw_user_buffer_object *vmw_user_bo;
- struct ttm_base_object *base;
+ struct vmw_buffer_object *vmw_bo;
+ struct ttm_buffer_object *bo;
+ struct drm_gem_object *gobj = drm_gem_object_lookup(filp, handle);
- base = ttm_base_object_noref_lookup(tfile, handle);
- if (!base) {
+ if (!gobj) {
DRM_ERROR("Invalid buffer object handle 0x%08lx.\n",
(unsigned long)handle);
return ERR_PTR(-ESRCH);
}
+ vmw_bo = gem_to_vmw_bo(gobj);
+ bo = ttm_bo_get_unless_zero(&vmw_bo->base);
+ vmw_bo = vmw_buffer_object(bo);
+ drm_gem_object_put(gobj);
- if (unlikely(ttm_base_object_type(base) != ttm_buffer_type)) {
- ttm_base_object_noref_release();
- DRM_ERROR("Invalid buffer object handle 0x%08lx.\n",
- (unsigned long)handle);
- return ERR_PTR(-EINVAL);
- }
-
- vmw_user_bo = container_of(base, struct vmw_user_buffer_object,
- prime.base);
- return &vmw_user_bo->vbo;
-}
-
-/**
- * vmw_user_bo_reference - Open a handle to a vmw user buffer object.
- *
- * @tfile: The TTM object file to register the handle with.
- * @vbo: The embedded vmw buffer object.
- * @handle: Pointer to where the new handle should be placed.
- * Return: Zero on success, Negative error code on error.
- */
-int vmw_user_bo_reference(struct ttm_object_file *tfile,
- struct vmw_buffer_object *vbo,
- uint32_t *handle)
-{
- struct vmw_user_buffer_object *user_bo;
-
- if (vbo->base.destroy != vmw_user_bo_destroy)
- return -EINVAL;
-
- user_bo = container_of(vbo, struct vmw_user_buffer_object, vbo);
-
- *handle = user_bo->prime.base.handle;
- return ttm_ref_object_add(tfile, &user_bo->prime.base,
- TTM_REF_USAGE, NULL, false);
+ return vmw_bo;
}
@@ -1087,68 +781,15 @@ int vmw_dumb_create(struct drm_file *file_priv,
int ret;
args->pitch = args->width * ((args->bpp + 7) / 8);
- args->size = args->pitch * args->height;
+ args->size = ALIGN(args->pitch * args->height, PAGE_SIZE);
- ret = vmw_user_bo_alloc(dev_priv, vmw_fpriv(file_priv)->tfile,
- args->size, false, &args->handle,
- &vbo, NULL);
- if (unlikely(ret != 0))
- goto out_no_bo;
+ ret = vmw_gem_object_create_with_handle(dev_priv, file_priv,
+ args->size, &args->handle,
+ &vbo);
- vmw_bo_unreference(&vbo);
-out_no_bo:
return ret;
}
-
-/**
- * vmw_dumb_map_offset - Return the address space offset of a dumb buffer
- *
- * @file_priv: Pointer to a struct drm_file identifying the caller.
- * @dev: Pointer to the drm device.
- * @handle: Handle identifying the dumb buffer.
- * @offset: The address space offset returned.
- * Return: Zero on success, negative error code on failure.
- *
- * This is a driver callback for the core drm dumb_map_offset functionality.
- */
-int vmw_dumb_map_offset(struct drm_file *file_priv,
- struct drm_device *dev, uint32_t handle,
- uint64_t *offset)
-{
- struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
- struct vmw_buffer_object *out_buf;
- int ret;
-
- ret = vmw_user_bo_lookup(tfile, handle, &out_buf, NULL);
- if (ret != 0)
- return -EINVAL;
-
- *offset = drm_vma_node_offset_addr(&out_buf->base.base.vma_node);
- vmw_bo_unreference(&out_buf);
- return 0;
-}
-
-
-/**
- * vmw_dumb_destroy - Destroy a dumb boffer
- *
- * @file_priv: Pointer to a struct drm_file identifying the caller.
- * @dev: Pointer to the drm device.
- * @handle: Handle identifying the dumb buffer.
- * Return: Zero on success, negative error code on failure.
- *
- * This is a driver callback for the core drm dumb_destroy functionality.
- */
-int vmw_dumb_destroy(struct drm_file *file_priv,
- struct drm_device *dev,
- uint32_t handle)
-{
- return ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile,
- handle, TTM_REF_USAGE);
-}
-
-
/**
* vmw_bo_swap_notify - swapout notify callback.
*
@@ -1157,8 +798,7 @@ int vmw_dumb_destroy(struct drm_file *file_priv,
void vmw_bo_swap_notify(struct ttm_buffer_object *bo)
{
/* Is @bo embedded in a struct vmw_buffer_object? */
- if (bo->destroy != vmw_bo_bo_free &&
- bo->destroy != vmw_user_bo_destroy)
+ if (vmw_bo_is_vmw_bo(bo))
return;
/* Kill any cached kernel maps before swapout */
@@ -1182,8 +822,7 @@ void vmw_bo_move_notify(struct ttm_buffer_object *bo,
struct vmw_buffer_object *vbo;
/* Make sure @bo is embedded in a struct vmw_buffer_object? */
- if (bo->destroy != vmw_bo_bo_free &&
- bo->destroy != vmw_user_bo_destroy)
+ if (vmw_bo_is_vmw_bo(bo))
return;
vbo = container_of(bo, struct vmw_buffer_object, base);
@@ -1204,3 +843,22 @@ void vmw_bo_move_notify(struct ttm_buffer_object *bo,
if (mem->mem_type != VMW_PL_MOB && bo->resource->mem_type == VMW_PL_MOB)
vmw_resource_unbind_list(vbo);
}
+
+/**
+ * vmw_bo_is_vmw_bo - check if the buffer object is a &vmw_buffer_object
+ * @bo: buffer object to be checked
+ *
+ * Uses destroy function associated with the object to determine if this is
+ * a &vmw_buffer_object.
+ *
+ * Returns:
+ * true if the object is of &vmw_buffer_object type, false if not.
+ */
+bool vmw_bo_is_vmw_bo(struct ttm_buffer_object *bo)
+{
+ if (bo->destroy == &vmw_bo_bo_free ||
+ bo->destroy == &vmw_gem_destroy)
+ return true;
+
+ return false;
+}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_cmd.c b/drivers/gpu/drm/vmwgfx/vmwgfx_cmd.c
index 67db472d3493..a3bfbb6c3e14 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_cmd.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cmd.c
@@ -145,6 +145,13 @@ struct vmw_fifo_state *vmw_fifo_create(struct vmw_private *dev_priv)
(unsigned int) max,
(unsigned int) min,
(unsigned int) fifo->capabilities);
+
+ if (unlikely(min >= max)) {
+ drm_warn(&dev_priv->drm,
+ "FIFO memory is not usable. Driver failed to initialize.");
+ return ERR_PTR(-ENXIO);
+ }
+
return fifo;
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c
index 8381750db81b..415774fde796 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c
@@ -42,7 +42,7 @@
*/
struct vmw_cmdbuf_res {
struct vmw_resource *res;
- struct drm_hash_item hash;
+ struct vmwgfx_hash_item hash;
struct list_head head;
enum vmw_cmdbuf_res_state state;
struct vmw_cmdbuf_res_manager *man;
@@ -59,7 +59,7 @@ struct vmw_cmdbuf_res {
* @resources and @list are protected by the cmdbuf mutex for now.
*/
struct vmw_cmdbuf_res_manager {
- struct drm_open_hash resources;
+ struct vmwgfx_open_hash resources;
struct list_head list;
struct vmw_private *dev_priv;
};
@@ -81,11 +81,11 @@ vmw_cmdbuf_res_lookup(struct vmw_cmdbuf_res_manager *man,
enum vmw_cmdbuf_res_type res_type,
u32 user_key)
{
- struct drm_hash_item *hash;
+ struct vmwgfx_hash_item *hash;
int ret;
unsigned long key = user_key | (res_type << 24);
- ret = drm_ht_find_item(&man->resources, key, &hash);
+ ret = vmwgfx_ht_find_item(&man->resources, key, &hash);
if (unlikely(ret != 0))
return ERR_PTR(ret);
@@ -105,7 +105,7 @@ static void vmw_cmdbuf_res_free(struct vmw_cmdbuf_res_manager *man,
struct vmw_cmdbuf_res *entry)
{
list_del(&entry->head);
- WARN_ON(drm_ht_remove_item(&man->resources, &entry->hash));
+ WARN_ON(vmwgfx_ht_remove_item(&man->resources, &entry->hash));
vmw_resource_unreference(&entry->res);
kfree(entry);
}
@@ -167,7 +167,7 @@ void vmw_cmdbuf_res_revert(struct list_head *list)
vmw_cmdbuf_res_free(entry->man, entry);
break;
case VMW_CMDBUF_RES_DEL:
- ret = drm_ht_insert_item(&entry->man->resources, &entry->hash);
+ ret = vmwgfx_ht_insert_item(&entry->man->resources, &entry->hash);
BUG_ON(ret);
list_move_tail(&entry->head, &entry->man->list);
entry->state = VMW_CMDBUF_RES_COMMITTED;
@@ -206,7 +206,7 @@ int vmw_cmdbuf_res_add(struct vmw_cmdbuf_res_manager *man,
return -ENOMEM;
cres->hash.key = user_key | (res_type << 24);
- ret = drm_ht_insert_item(&man->resources, &cres->hash);
+ ret = vmwgfx_ht_insert_item(&man->resources, &cres->hash);
if (unlikely(ret != 0)) {
kfree(cres);
goto out_invalid_key;
@@ -244,10 +244,10 @@ int vmw_cmdbuf_res_remove(struct vmw_cmdbuf_res_manager *man,
struct vmw_resource **res_p)
{
struct vmw_cmdbuf_res *entry;
- struct drm_hash_item *hash;
+ struct vmwgfx_hash_item *hash;
int ret;
- ret = drm_ht_find_item(&man->resources, user_key | (res_type << 24),
+ ret = vmwgfx_ht_find_item(&man->resources, user_key | (res_type << 24),
&hash);
if (likely(ret != 0))
return -EINVAL;
@@ -260,7 +260,7 @@ int vmw_cmdbuf_res_remove(struct vmw_cmdbuf_res_manager *man,
*res_p = NULL;
break;
case VMW_CMDBUF_RES_COMMITTED:
- (void) drm_ht_remove_item(&man->resources, &entry->hash);
+ (void) vmwgfx_ht_remove_item(&man->resources, &entry->hash);
list_del(&entry->head);
entry->state = VMW_CMDBUF_RES_DEL;
list_add_tail(&entry->head, list);
@@ -295,7 +295,7 @@ vmw_cmdbuf_res_man_create(struct vmw_private *dev_priv)
man->dev_priv = dev_priv;
INIT_LIST_HEAD(&man->list);
- ret = drm_ht_create(&man->resources, VMW_CMDBUF_RES_MAN_HT_ORDER);
+ ret = vmwgfx_ht_create(&man->resources, VMW_CMDBUF_RES_MAN_HT_ORDER);
if (ret == 0)
return man;
@@ -320,26 +320,7 @@ void vmw_cmdbuf_res_man_destroy(struct vmw_cmdbuf_res_manager *man)
list_for_each_entry_safe(entry, next, &man->list, head)
vmw_cmdbuf_res_free(man, entry);
- drm_ht_remove(&man->resources);
+ vmwgfx_ht_remove(&man->resources);
kfree(man);
}
-/**
- * vmw_cmdbuf_res_man_size - Return the size of a command buffer managed
- * resource manager
- *
- * Returns the approximate allocation size of a command buffer managed
- * resource manager.
- */
-size_t vmw_cmdbuf_res_man_size(void)
-{
- static size_t res_man_size;
-
- if (unlikely(res_man_size == 0))
- res_man_size =
- ttm_round_pot(sizeof(struct vmw_cmdbuf_res_manager)) +
- ttm_round_pot(sizeof(struct hlist_head) <<
- VMW_CMDBUF_RES_MAN_HT_ORDER);
-
- return res_man_size;
-}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_context.c b/drivers/gpu/drm/vmwgfx/vmwgfx_context.c
index 4446758b6880..e0f48cd9529b 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_context.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_context.c
@@ -60,8 +60,6 @@ static int vmw_dx_context_unbind(struct vmw_resource *res,
struct ttm_validate_buffer *val_buf);
static int vmw_dx_context_destroy(struct vmw_resource *res);
-static uint64_t vmw_user_context_size;
-
static const struct vmw_user_resource_conv user_context_conv = {
.object_type = VMW_RES_CONTEXT,
.base_obj_to_res = vmw_user_context_base_to_res,
@@ -686,7 +684,6 @@ static void vmw_user_context_free(struct vmw_resource *res)
{
struct vmw_user_context *ctx =
container_of(res, struct vmw_user_context, res);
- struct vmw_private *dev_priv = res->dev_priv;
if (ctx->cbs)
vmw_binding_state_free(ctx->cbs);
@@ -694,8 +691,6 @@ static void vmw_user_context_free(struct vmw_resource *res)
(void) vmw_context_bind_dx_query(res, NULL);
ttm_base_object_kfree(ctx, base);
- ttm_mem_global_free(vmw_mem_glob(dev_priv),
- vmw_user_context_size);
}
/*
@@ -720,7 +715,7 @@ int vmw_context_destroy_ioctl(struct drm_device *dev, void *data,
struct drm_vmw_context_arg *arg = (struct drm_vmw_context_arg *)data;
struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
- return ttm_ref_object_base_unref(tfile, arg->cid, TTM_REF_USAGE);
+ return ttm_ref_object_base_unref(tfile, arg->cid);
}
static int vmw_context_define(struct drm_device *dev, void *data,
@@ -732,10 +727,6 @@ static int vmw_context_define(struct drm_device *dev, void *data,
struct vmw_resource *tmp;
struct drm_vmw_context_arg *arg = (struct drm_vmw_context_arg *)data;
struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
- struct ttm_operation_ctx ttm_opt_ctx = {
- .interruptible = true,
- .no_wait_gpu = false
- };
int ret;
if (!has_sm4_context(dev_priv) && dx) {
@@ -743,25 +734,8 @@ static int vmw_context_define(struct drm_device *dev, void *data,
return -EINVAL;
}
- if (unlikely(vmw_user_context_size == 0))
- vmw_user_context_size = ttm_round_pot(sizeof(*ctx)) +
- ((dev_priv->has_mob) ? vmw_cmdbuf_res_man_size() : 0) +
- + VMW_IDA_ACC_SIZE + TTM_OBJ_EXTRA_SIZE;
-
- ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv),
- vmw_user_context_size,
- &ttm_opt_ctx);
- if (unlikely(ret != 0)) {
- if (ret != -ERESTARTSYS)
- DRM_ERROR("Out of graphics memory for context"
- " creation.\n");
- goto out_ret;
- }
-
ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
if (unlikely(!ctx)) {
- ttm_mem_global_free(vmw_mem_glob(dev_priv),
- vmw_user_context_size);
ret = -ENOMEM;
goto out_ret;
}
@@ -780,7 +754,7 @@ static int vmw_context_define(struct drm_device *dev, void *data,
tmp = vmw_resource_reference(&ctx->res);
ret = ttm_base_object_init(tfile, &ctx->base, false, VMW_RES_CONTEXT,
- &vmw_user_context_base_release, NULL);
+ &vmw_user_context_base_release);
if (unlikely(ret != 0)) {
vmw_resource_unreference(&tmp);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c b/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c
index 17a98db00017..16f986b6cbea 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c
@@ -407,12 +407,8 @@ static int vmw_cotable_resize(struct vmw_resource *res, size_t new_size)
* for the new COTable. Initially pin the buffer object to make sure
* we can use tryreserve without failure.
*/
- buf = kzalloc(sizeof(*buf), GFP_KERNEL);
- if (!buf)
- return -ENOMEM;
-
- ret = vmw_bo_init(dev_priv, buf, new_size, &vmw_mob_placement,
- true, true, vmw_bo_bo_free);
+ ret = vmw_bo_create(dev_priv, new_size, &vmw_mob_placement,
+ true, true, vmw_bo_bo_free, &buf);
if (ret) {
DRM_ERROR("Failed initializing new cotable MOB.\n");
return ret;
@@ -546,8 +542,6 @@ static void vmw_hw_cotable_destroy(struct vmw_resource *res)
(void) vmw_cotable_destroy(res);
}
-static size_t cotable_acc_size;
-
/**
* vmw_cotable_free - Cotable resource destructor
*
@@ -555,10 +549,7 @@ static size_t cotable_acc_size;
*/
static void vmw_cotable_free(struct vmw_resource *res)
{
- struct vmw_private *dev_priv = res->dev_priv;
-
kfree(res);
- ttm_mem_global_free(vmw_mem_glob(dev_priv), cotable_acc_size);
}
/**
@@ -574,21 +565,9 @@ struct vmw_resource *vmw_cotable_alloc(struct vmw_private *dev_priv,
u32 type)
{
struct vmw_cotable *vcotbl;
- struct ttm_operation_ctx ttm_opt_ctx = {
- .interruptible = true,
- .no_wait_gpu = false
- };
int ret;
u32 num_entries;
- if (unlikely(cotable_acc_size == 0))
- cotable_acc_size = ttm_round_pot(sizeof(struct vmw_cotable));
-
- ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv),
- cotable_acc_size, &ttm_opt_ctx);
- if (unlikely(ret))
- return ERR_PTR(ret);
-
vcotbl = kzalloc(sizeof(*vcotbl), GFP_KERNEL);
if (unlikely(!vcotbl)) {
ret = -ENOMEM;
@@ -622,7 +601,6 @@ struct vmw_resource *vmw_cotable_alloc(struct vmw_private *dev_priv,
out_no_init:
kfree(vcotbl);
out_no_alloc:
- ttm_mem_global_free(vmw_mem_glob(dev_priv), cotable_acc_size);
return ERR_PTR(ret);
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
index 75d1e032cf16..fe36efdb7ff5 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
@@ -34,6 +34,7 @@
#include <drm/drm_drv.h>
#include <drm/drm_ioctl.h>
#include <drm/drm_sysfs.h>
+#include <drm/drm_gem_ttm_helper.h>
#include <drm/ttm/ttm_bo_driver.h>
#include <drm/ttm/ttm_range_manager.h>
#include <drm/ttm/ttm_placement.h>
@@ -50,9 +51,6 @@
#define VMW_MIN_INITIAL_WIDTH 800
#define VMW_MIN_INITIAL_HEIGHT 600
-#define VMWGFX_VALIDATION_MEM_GRAN (16*PAGE_SIZE)
-
-
/*
* Fully encoded drm commands. Might move to vmw_drm.h
*/
@@ -165,7 +163,7 @@
static const struct drm_ioctl_desc vmw_ioctls[] = {
DRM_IOCTL_DEF_DRV(VMW_GET_PARAM, vmw_getparam_ioctl,
DRM_RENDER_ALLOW),
- DRM_IOCTL_DEF_DRV(VMW_ALLOC_DMABUF, vmw_bo_alloc_ioctl,
+ DRM_IOCTL_DEF_DRV(VMW_ALLOC_DMABUF, vmw_gem_object_create_ioctl,
DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(VMW_UNREF_DMABUF, vmw_bo_unref_ioctl,
DRM_RENDER_ALLOW),
@@ -257,8 +255,8 @@ static const struct drm_ioctl_desc vmw_ioctls[] = {
};
static const struct pci_device_id vmw_pci_id_list[] = {
- { PCI_DEVICE(0x15ad, VMWGFX_PCI_ID_SVGA2) },
- { PCI_DEVICE(0x15ad, VMWGFX_PCI_ID_SVGA3) },
+ { PCI_DEVICE(PCI_VENDOR_ID_VMWARE, VMWGFX_PCI_ID_SVGA2) },
+ { PCI_DEVICE(PCI_VENDOR_ID_VMWARE, VMWGFX_PCI_ID_SVGA3) },
{ }
};
MODULE_DEVICE_TABLE(pci, vmw_pci_id_list);
@@ -366,6 +364,7 @@ static void vmw_print_sm_type(struct vmw_private *dev_priv)
[VMW_SM_4] = "SM4",
[VMW_SM_4_1] = "SM4_1",
[VMW_SM_5] = "SM_5",
+ [VMW_SM_5_1X] = "SM_5_1X",
[VMW_SM_MAX] = "Invalid"
};
BUILD_BUG_ON(ARRAY_SIZE(names) != (VMW_SM_MAX + 1));
@@ -399,13 +398,9 @@ static int vmw_dummy_query_bo_create(struct vmw_private *dev_priv)
* immediately succeed. This is because we're the only
* user of the bo currently.
*/
- vbo = kzalloc(sizeof(*vbo), GFP_KERNEL);
- if (!vbo)
- return -ENOMEM;
-
- ret = vmw_bo_init(dev_priv, vbo, PAGE_SIZE,
- &vmw_sys_placement, false, true,
- &vmw_bo_bo_free);
+ ret = vmw_bo_create(dev_priv, PAGE_SIZE,
+ &vmw_sys_placement, false, true,
+ &vmw_bo_bo_free, &vbo);
if (unlikely(ret != 0))
return ret;
@@ -706,23 +701,15 @@ static int vmw_dma_masks(struct vmw_private *dev_priv)
static int vmw_vram_manager_init(struct vmw_private *dev_priv)
{
int ret;
-#ifdef CONFIG_TRANSPARENT_HUGEPAGE
- ret = vmw_thp_init(dev_priv);
-#else
ret = ttm_range_man_init(&dev_priv->bdev, TTM_PL_VRAM, false,
dev_priv->vram_size >> PAGE_SHIFT);
-#endif
ttm_resource_manager_set_used(ttm_manager_type(&dev_priv->bdev, TTM_PL_VRAM), false);
return ret;
}
static void vmw_vram_manager_fini(struct vmw_private *dev_priv)
{
-#ifdef CONFIG_TRANSPARENT_HUGEPAGE
- vmw_thp_fini(dev_priv);
-#else
ttm_range_man_fini(&dev_priv->bdev, TTM_PL_VRAM);
-#endif
}
static int vmw_setup_pci_resources(struct vmw_private *dev,
@@ -986,8 +973,7 @@ static int vmw_driver_load(struct vmw_private *dev_priv, u32 pci_id)
goto out_err0;
}
- dev_priv->tdev = ttm_object_device_init(&ttm_mem_glob, 12,
- &vmw_prime_dmabuf_ops);
+ dev_priv->tdev = ttm_object_device_init(12, &vmw_prime_dmabuf_ops);
if (unlikely(dev_priv->tdev == NULL)) {
drm_err(&dev_priv->drm,
@@ -1070,6 +1056,12 @@ static int vmw_driver_load(struct vmw_private *dev_priv, u32 pci_id)
"3D will be disabled.\n");
dev_priv->has_mob = false;
}
+ if (vmw_sys_man_init(dev_priv) != 0) {
+ drm_info(&dev_priv->drm,
+ "No MOB page table memory available. "
+ "3D will be disabled.\n");
+ dev_priv->has_mob = false;
+ }
}
if (dev_priv->has_mob && (dev_priv->capabilities & SVGA_CAP_DX)) {
@@ -1077,8 +1069,6 @@ static int vmw_driver_load(struct vmw_private *dev_priv, u32 pci_id)
dev_priv->sm_type = VMW_SM_4;
}
- vmw_validation_mem_init_ttm(dev_priv, VMWGFX_VALIDATION_MEM_GRAN);
-
/* SVGA_CAP2_DX2 (DefineGBSurface_v3) is needed for SM4_1 support */
if (has_sm4_context(dev_priv) &&
(dev_priv->capabilities2 & SVGA_CAP2_DX2)) {
@@ -1086,8 +1076,11 @@ static int vmw_driver_load(struct vmw_private *dev_priv, u32 pci_id)
dev_priv->sm_type = VMW_SM_4_1;
if (has_sm4_1_context(dev_priv) &&
(dev_priv->capabilities2 & SVGA_CAP2_DX3)) {
- if (vmw_devcap_get(dev_priv, SVGA3D_DEVCAP_SM5))
+ if (vmw_devcap_get(dev_priv, SVGA3D_DEVCAP_SM5)) {
dev_priv->sm_type = VMW_SM_5;
+ if (vmw_devcap_get(dev_priv, SVGA3D_DEVCAP_GL43))
+ dev_priv->sm_type = VMW_SM_5_1X;
+ }
}
}
@@ -1120,8 +1113,10 @@ out_no_fifo:
vmw_overlay_close(dev_priv);
vmw_kms_close(dev_priv);
out_no_kms:
- if (dev_priv->has_mob)
+ if (dev_priv->has_mob) {
vmw_gmrid_man_fini(dev_priv, VMW_PL_MOB);
+ vmw_sys_man_fini(dev_priv);
+ }
if (dev_priv->has_gmr)
vmw_gmrid_man_fini(dev_priv, VMW_PL_GMR);
vmw_devcaps_destroy(dev_priv);
@@ -1155,7 +1150,7 @@ static void vmw_driver_unload(struct drm_device *dev)
unregister_pm_notifier(&dev_priv->pm_nb);
if (dev_priv->ctx.res_ht_initialized)
- drm_ht_remove(&dev_priv->ctx.res_ht);
+ vmwgfx_ht_remove(&dev_priv->ctx.res_ht);
vfree(dev_priv->ctx.cmd_bounce);
if (dev_priv->enable_fb) {
vmw_fb_off(dev_priv);
@@ -1171,8 +1166,10 @@ static void vmw_driver_unload(struct drm_device *dev)
vmw_gmrid_man_fini(dev_priv, VMW_PL_GMR);
vmw_release_device_early(dev_priv);
- if (dev_priv->has_mob)
+ if (dev_priv->has_mob) {
vmw_gmrid_man_fini(dev_priv, VMW_PL_MOB);
+ vmw_sys_man_fini(dev_priv);
+ }
vmw_devcaps_destroy(dev_priv);
vmw_vram_manager_fini(dev_priv);
ttm_device_fini(&dev_priv->bdev);
@@ -1387,7 +1384,6 @@ static void vmw_remove(struct pci_dev *pdev)
{
struct drm_device *dev = pci_get_drvdata(pdev);
- ttm_mem_global_release(&ttm_mem_glob);
drm_dev_unregister(dev);
vmw_driver_unload(dev);
}
@@ -1575,7 +1571,7 @@ static const struct file_operations vmwgfx_driver_fops = {
static const struct drm_driver driver = {
.driver_features =
- DRIVER_MODESET | DRIVER_RENDER | DRIVER_ATOMIC,
+ DRIVER_MODESET | DRIVER_RENDER | DRIVER_ATOMIC | DRIVER_GEM,
.ioctls = vmw_ioctls,
.num_ioctls = ARRAY_SIZE(vmw_ioctls),
.master_set = vmw_master_set,
@@ -1584,8 +1580,7 @@ static const struct drm_driver driver = {
.postclose = vmw_postclose,
.dumb_create = vmw_dumb_create,
- .dumb_map_offset = vmw_dumb_map_offset,
- .dumb_destroy = vmw_dumb_destroy,
+ .dumb_map_offset = drm_gem_ttm_dumb_map_offset,
.prime_fd_to_handle = vmw_prime_fd_to_handle,
.prime_handle_to_fd = vmw_prime_handle_to_fd,
@@ -1616,34 +1611,36 @@ static int vmw_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
ret = drm_aperture_remove_conflicting_pci_framebuffers(pdev, &driver);
if (ret)
- return ret;
+ goto out_error;
ret = pcim_enable_device(pdev);
if (ret)
- return ret;
+ goto out_error;
vmw = devm_drm_dev_alloc(&pdev->dev, &driver,
struct vmw_private, drm);
- if (IS_ERR(vmw))
- return PTR_ERR(vmw);
+ if (IS_ERR(vmw)) {
+ ret = PTR_ERR(vmw);
+ goto out_error;
+ }
pci_set_drvdata(pdev, &vmw->drm);
- ret = ttm_mem_global_init(&ttm_mem_glob, &pdev->dev);
- if (ret)
- return ret;
-
ret = vmw_driver_load(vmw, ent->device);
if (ret)
- return ret;
+ goto out_error;
ret = drm_dev_register(&vmw->drm, 0);
- if (ret) {
- vmw_driver_unload(&vmw->drm);
- return ret;
- }
+ if (ret)
+ goto out_unload;
+
+ vmw_debugfs_gem_init(vmw);
return 0;
+out_unload:
+ vmw_driver_unload(&vmw->drm);
+out_error:
+ return ret;
}
static int __init vmwgfx_init(void)
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
index 858aff99a3fe..d6b66636a19b 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
@@ -34,7 +34,6 @@
#include <drm/drm_auth.h>
#include <drm/drm_device.h>
#include <drm/drm_file.h>
-#include <drm/drm_hashtab.h>
#include <drm/drm_rect.h>
#include <drm/ttm/ttm_bo_driver.h>
@@ -43,6 +42,7 @@
#include "ttm_object.h"
#include "vmwgfx_fence.h"
+#include "vmwgfx_hashtab.h"
#include "vmwgfx_reg.h"
#include "vmwgfx_validation.h"
@@ -54,16 +54,13 @@
#define VMWGFX_DRIVER_NAME "vmwgfx"
-#define VMWGFX_DRIVER_DATE "20210722"
+#define VMWGFX_DRIVER_DATE "20211206"
#define VMWGFX_DRIVER_MAJOR 2
-#define VMWGFX_DRIVER_MINOR 19
+#define VMWGFX_DRIVER_MINOR 20
#define VMWGFX_DRIVER_PATCHLEVEL 0
#define VMWGFX_FIFO_STATIC_SIZE (1024*1024)
-#define VMWGFX_MAX_RELOCATIONS 2048
-#define VMWGFX_MAX_VALIDATIONS 2048
#define VMWGFX_MAX_DISPLAYS 16
#define VMWGFX_CMD_BOUNCE_INIT_SIZE 32768
-#define VMWGFX_ENABLE_SCREEN_TARGET_OTABLE 1
#define VMWGFX_PCI_ID_SVGA2 0x0405
#define VMWGFX_PCI_ID_SVGA3 0x0406
@@ -82,8 +79,9 @@
VMWGFX_NUM_GB_SURFACE +\
VMWGFX_NUM_GB_SCREEN_TARGET)
-#define VMW_PL_GMR (TTM_PL_PRIV + 0)
-#define VMW_PL_MOB (TTM_PL_PRIV + 1)
+#define VMW_PL_GMR (TTM_PL_PRIV + 0)
+#define VMW_PL_MOB (TTM_PL_PRIV + 1)
+#define VMW_PL_SYSTEM (TTM_PL_PRIV + 2)
#define VMW_RES_CONTEXT ttm_driver_type0
#define VMW_RES_SURFACE ttm_driver_type1
@@ -133,7 +131,7 @@ struct vmw_buffer_object {
*/
struct vmw_validate_buffer {
struct ttm_validate_buffer base;
- struct drm_hash_item hash;
+ struct vmwgfx_hash_item hash;
bool validate_as_mob;
};
@@ -332,7 +330,6 @@ struct vmw_sg_table {
struct page **pages;
const dma_addr_t *addrs;
struct sg_table *sgt;
- unsigned long num_regions;
unsigned long num_pages;
};
@@ -360,6 +357,19 @@ struct vmw_piter {
dma_addr_t (*dma_address)(struct vmw_piter *);
};
+
+struct vmw_ttm_tt {
+ struct ttm_tt dma_ttm;
+ struct vmw_private *dev_priv;
+ int gmr_id;
+ struct vmw_mob *mob;
+ int mem_type;
+ struct sg_table sgt;
+ struct vmw_sg_table vsgt;
+ bool mapped;
+ bool bound;
+};
+
/*
* enum vmw_display_unit_type - Describes the display unit
*/
@@ -406,10 +416,11 @@ struct vmw_ctx_validation_info;
* @ctx: The validation context
*/
struct vmw_sw_context{
- struct drm_open_hash res_ht;
+ struct vmwgfx_open_hash res_ht;
bool res_ht_initialized;
bool kernel;
struct vmw_fpriv *fp;
+ struct drm_file *filp;
uint32_t *cmd_bounce;
uint32_t cmd_bounce_size;
struct vmw_buffer_object *cur_query_bo;
@@ -473,6 +484,7 @@ enum {
* @VMW_SM_4: Context support upto SM4.
* @VMW_SM_4_1: Context support upto SM4_1.
* @VMW_SM_5: Context support up to SM5.
+ * @VMW_SM_5_1X: Adds support for sm5_1 and gl43 extensions.
* @VMW_SM_MAX: Should be the last.
*/
enum vmw_sm_type {
@@ -480,6 +492,7 @@ enum vmw_sm_type {
VMW_SM_4,
VMW_SM_4_1,
VMW_SM_5,
+ VMW_SM_5_1X,
VMW_SM_MAX
};
@@ -627,9 +640,6 @@ struct vmw_private {
struct vmw_cmdbuf_man *cman;
DECLARE_BITMAP(irqthread_pending, VMW_IRQTHREAD_MAX);
- /* Validation memory reservation */
- struct vmw_validation_mem vvm;
-
uint32 *devcaps;
/*
@@ -645,6 +655,11 @@ struct vmw_private {
#endif
};
+static inline struct vmw_buffer_object *gem_to_vmw_bo(struct drm_gem_object *gobj)
+{
+ return container_of((gobj), struct vmw_buffer_object, base.base);
+}
+
static inline struct vmw_surface *vmw_res_to_srf(struct vmw_resource *res)
{
return container_of(res, struct vmw_surface, res);
@@ -738,6 +753,24 @@ static inline bool has_sm5_context(const struct vmw_private *dev_priv)
return (dev_priv->sm_type >= VMW_SM_5);
}
+/**
+ * has_gl43_context - Does the device support GL43 context.
+ * @dev_priv: Device private.
+ *
+ * Return: Bool value if device support SM5 context or not.
+ */
+static inline bool has_gl43_context(const struct vmw_private *dev_priv)
+{
+ return (dev_priv->sm_type >= VMW_SM_5_1X);
+}
+
+
+static inline u32 vmw_max_num_uavs(struct vmw_private *dev_priv)
+{
+ return (has_gl43_context(dev_priv) ?
+ SVGA3D_DX11_1_MAX_UAVIEWS : SVGA3D_MAX_UAVIEWS);
+}
+
extern void vmw_svga_enable(struct vmw_private *dev_priv);
extern void vmw_svga_disable(struct vmw_private *dev_priv);
@@ -767,7 +800,7 @@ extern int vmw_resource_reserve(struct vmw_resource *res, bool interruptible,
bool no_backup);
extern bool vmw_resource_needs_backup(const struct vmw_resource *res);
extern int vmw_user_lookup_handle(struct vmw_private *dev_priv,
- struct ttm_object_file *tfile,
+ struct drm_file *filp,
uint32_t handle,
struct vmw_surface **out_surf,
struct vmw_buffer_object **out_buf);
@@ -833,6 +866,7 @@ static inline void vmw_user_resource_noref_release(void)
/**
* Buffer object helper functions - vmwgfx_bo.c
*/
+extern bool vmw_bo_is_vmw_bo(struct ttm_buffer_object *bo);
extern int vmw_bo_pin_in_placement(struct vmw_private *vmw_priv,
struct vmw_buffer_object *bo,
struct ttm_placement *placement,
@@ -857,32 +891,23 @@ extern int vmw_bo_create_kernel(struct vmw_private *dev_priv,
unsigned long size,
struct ttm_placement *placement,
struct ttm_buffer_object **p_bo);
+extern int vmw_bo_create(struct vmw_private *dev_priv,
+ size_t size, struct ttm_placement *placement,
+ bool interruptible, bool pin,
+ void (*bo_free)(struct ttm_buffer_object *bo),
+ struct vmw_buffer_object **p_bo);
extern int vmw_bo_init(struct vmw_private *dev_priv,
struct vmw_buffer_object *vmw_bo,
size_t size, struct ttm_placement *placement,
bool interruptible, bool pin,
void (*bo_free)(struct ttm_buffer_object *bo));
-extern int vmw_user_bo_verify_access(struct ttm_buffer_object *bo,
- struct ttm_object_file *tfile);
-extern int vmw_user_bo_alloc(struct vmw_private *dev_priv,
- struct ttm_object_file *tfile,
- uint32_t size,
- bool shareable,
- uint32_t *handle,
- struct vmw_buffer_object **p_dma_buf,
- struct ttm_base_object **p_base);
-extern int vmw_user_bo_reference(struct ttm_object_file *tfile,
- struct vmw_buffer_object *dma_buf,
- uint32_t *handle);
-extern int vmw_bo_alloc_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
extern int vmw_bo_unref_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
extern int vmw_user_bo_synccpu_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
-extern int vmw_user_bo_lookup(struct ttm_object_file *tfile,
- uint32_t id, struct vmw_buffer_object **out,
- struct ttm_base_object **base);
+extern int vmw_user_bo_lookup(struct drm_file *filp,
+ uint32_t handle,
+ struct vmw_buffer_object **out);
extern void vmw_bo_fence_single(struct ttm_buffer_object *bo,
struct vmw_fence_obj *fence);
extern void *vmw_bo_map_and_cache(struct vmw_buffer_object *vbo);
@@ -891,16 +916,7 @@ extern void vmw_bo_move_notify(struct ttm_buffer_object *bo,
struct ttm_resource *mem);
extern void vmw_bo_swap_notify(struct ttm_buffer_object *bo);
extern struct vmw_buffer_object *
-vmw_user_bo_noref_lookup(struct ttm_object_file *tfile, u32 handle);
-
-/**
- * vmw_user_bo_noref_release - release a buffer object pointer looked up
- * without reference
- */
-static inline void vmw_user_bo_noref_release(void)
-{
- ttm_base_object_noref_release();
-}
+vmw_user_bo_noref_lookup(struct drm_file *filp, u32 handle);
/**
* vmw_bo_adjust_prio - Adjust the buffer object eviction priority
@@ -952,6 +968,19 @@ static inline void vmw_bo_prio_del(struct vmw_buffer_object *vbo, int prio)
}
/**
+ * GEM related functionality - vmwgfx_gem.c
+ */
+extern int vmw_gem_object_create_with_handle(struct vmw_private *dev_priv,
+ struct drm_file *filp,
+ uint32_t size,
+ uint32_t *handle,
+ struct vmw_buffer_object **p_vbo);
+extern int vmw_gem_object_create_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *filp);
+extern void vmw_gem_destroy(struct ttm_buffer_object *bo);
+extern void vmw_debugfs_gem_init(struct vmw_private *vdev);
+
+/**
* Misc Ioctl functionality - vmwgfx_ioctl.c
*/
@@ -1027,9 +1056,6 @@ vmw_is_cursor_bypass3_enabled(const struct vmw_private *dev_priv)
extern int vmw_mmap(struct file *filp, struct vm_area_struct *vma);
-extern void vmw_validation_mem_init_ttm(struct vmw_private *dev_priv,
- size_t gran);
-
/**
* TTM buffer object driver - vmwgfx_ttm_buffer.c
*/
@@ -1039,7 +1065,6 @@ extern struct ttm_placement vmw_vram_placement;
extern struct ttm_placement vmw_vram_sys_placement;
extern struct ttm_placement vmw_vram_gmr_placement;
extern struct ttm_placement vmw_sys_placement;
-extern struct ttm_placement vmw_evictable_placement;
extern struct ttm_placement vmw_srf_placement;
extern struct ttm_placement vmw_mob_placement;
extern struct ttm_placement vmw_nonfixed_placement;
@@ -1218,13 +1243,6 @@ void vmw_kms_lost_device(struct drm_device *dev);
int vmw_dumb_create(struct drm_file *file_priv,
struct drm_device *dev,
struct drm_mode_create_dumb *args);
-
-int vmw_dumb_map_offset(struct drm_file *file_priv,
- struct drm_device *dev, uint32_t handle,
- uint64_t *offset);
-int vmw_dumb_destroy(struct drm_file *file_priv,
- struct drm_device *dev,
- uint32_t handle);
extern int vmw_resource_pin(struct vmw_resource *res, bool interruptible);
extern void vmw_resource_unpin(struct vmw_resource *res);
extern enum vmw_res_type vmw_res_type(const struct vmw_resource *res);
@@ -1252,6 +1270,12 @@ int vmw_gmrid_man_init(struct vmw_private *dev_priv, int type);
void vmw_gmrid_man_fini(struct vmw_private *dev_priv, int type);
/**
+ * System memory manager
+ */
+int vmw_sys_man_init(struct vmw_private *dev_priv);
+void vmw_sys_man_fini(struct vmw_private *dev_priv);
+
+/**
* Prime - vmwgfx_prime.c
*/
@@ -1322,18 +1346,6 @@ extern int vmw_gb_surface_define_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
extern int vmw_gb_surface_reference_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
-int vmw_surface_gb_priv_define(struct drm_device *dev,
- uint32_t user_accounting_size,
- SVGA3dSurfaceAllFlags svga3d_flags,
- SVGA3dSurfaceFormat format,
- bool for_scanout,
- uint32_t num_mip_levels,
- uint32_t multisample_count,
- uint32_t array_size,
- struct drm_vmw_size size,
- SVGA3dMSPattern multisample_pattern,
- SVGA3dMSQualityLevel quality_level,
- struct vmw_surface **srf_out);
extern int vmw_gb_surface_define_ext_ioctl(struct drm_device *dev,
void *data,
struct drm_file *file_priv);
@@ -1342,7 +1354,6 @@ extern int vmw_gb_surface_reference_ext_ioctl(struct drm_device *dev,
struct drm_file *file_priv);
int vmw_gb_surface_define(struct vmw_private *dev_priv,
- uint32_t user_accounting_size,
const struct vmw_surface_metadata *req,
struct vmw_surface **srf_out);
@@ -1403,7 +1414,6 @@ void vmw_dx_streamoutput_cotable_list_scrub(struct vmw_private *dev_priv,
extern struct vmw_cmdbuf_res_manager *
vmw_cmdbuf_res_man_create(struct vmw_private *dev_priv);
extern void vmw_cmdbuf_res_man_destroy(struct vmw_cmdbuf_res_manager *man);
-extern size_t vmw_cmdbuf_res_man_size(void);
extern struct vmw_resource *
vmw_cmdbuf_res_lookup(struct vmw_cmdbuf_res_manager *man,
enum vmw_cmdbuf_res_type res_type,
@@ -1551,11 +1561,6 @@ void vmw_bo_dirty_unmap(struct vmw_buffer_object *vbo,
vm_fault_t vmw_bo_vm_fault(struct vm_fault *vmf);
vm_fault_t vmw_bo_vm_mkwrite(struct vm_fault *vmf);
-/* Transparent hugepage support - vmwgfx_thp.c */
-#ifdef CONFIG_TRANSPARENT_HUGEPAGE
-extern int vmw_thp_init(struct vmw_private *dev_priv);
-void vmw_thp_fini(struct vmw_private *dev_priv);
-#endif
/**
* VMW_DEBUG_KMS - Debug output for kernel mode-setting
@@ -1600,11 +1605,6 @@ vmw_bo_reference(struct vmw_buffer_object *buf)
return buf;
}
-static inline struct ttm_mem_global *vmw_mem_glob(struct vmw_private *dev_priv)
-{
- return &ttm_mem_glob;
-}
-
static inline void vmw_fifo_resource_inc(struct vmw_private *dev_priv)
{
atomic_inc(&dev_priv->num_fifo_resources);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
index 5f2ffa9de5c8..44ca23b0ea4e 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
@@ -1171,14 +1171,13 @@ static int vmw_translate_mob_ptr(struct vmw_private *dev_priv,
int ret;
vmw_validation_preload_bo(sw_context->ctx);
- vmw_bo = vmw_user_bo_noref_lookup(sw_context->fp->tfile, handle);
- if (IS_ERR(vmw_bo)) {
+ vmw_bo = vmw_user_bo_noref_lookup(sw_context->filp, handle);
+ if (IS_ERR_OR_NULL(vmw_bo)) {
VMW_DEBUG_USER("Could not find or use MOB buffer.\n");
return PTR_ERR(vmw_bo);
}
-
ret = vmw_validation_add_bo(sw_context->ctx, vmw_bo, true, false);
- vmw_user_bo_noref_release();
+ ttm_bo_put(&vmw_bo->base);
if (unlikely(ret != 0))
return ret;
@@ -1226,14 +1225,13 @@ static int vmw_translate_guest_ptr(struct vmw_private *dev_priv,
int ret;
vmw_validation_preload_bo(sw_context->ctx);
- vmw_bo = vmw_user_bo_noref_lookup(sw_context->fp->tfile, handle);
- if (IS_ERR(vmw_bo)) {
+ vmw_bo = vmw_user_bo_noref_lookup(sw_context->filp, handle);
+ if (IS_ERR_OR_NULL(vmw_bo)) {
VMW_DEBUG_USER("Could not find or use GMR region.\n");
return PTR_ERR(vmw_bo);
}
-
ret = vmw_validation_add_bo(sw_context->ctx, vmw_bo, false, false);
- vmw_user_bo_noref_release();
+ ttm_bo_put(&vmw_bo->base);
if (unlikely(ret != 0))
return ret;
@@ -2166,6 +2164,44 @@ vmw_cmd_dx_set_single_constant_buffer(struct vmw_private *dev_priv,
}
/**
+ * vmw_cmd_dx_set_constant_buffer_offset - Validate
+ * SVGA_3D_CMD_DX_SET_VS/PS/GS/HS/DS/CS_CONSTANT_BUFFER_OFFSET command.
+ *
+ * @dev_priv: Pointer to a device private struct.
+ * @sw_context: The software context being used for this batch.
+ * @header: Pointer to the command header in the command stream.
+ */
+static int
+vmw_cmd_dx_set_constant_buffer_offset(struct vmw_private *dev_priv,
+ struct vmw_sw_context *sw_context,
+ SVGA3dCmdHeader *header)
+{
+ VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXSetConstantBufferOffset);
+
+ struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
+ u32 shader_slot;
+
+ if (!has_sm5_context(dev_priv))
+ return -EINVAL;
+
+ if (!ctx_node)
+ return -EINVAL;
+
+ cmd = container_of(header, typeof(*cmd), header);
+ if (cmd->body.slot >= SVGA3D_DX_MAX_CONSTBUFFERS) {
+ VMW_DEBUG_USER("Illegal const buffer slot %u.\n",
+ (unsigned int) cmd->body.slot);
+ return -EINVAL;
+ }
+
+ shader_slot = cmd->header.id - SVGA_3D_CMD_DX_SET_VS_CONSTANT_BUFFER_OFFSET;
+ vmw_binding_cb_offset_update(ctx_node->staged, shader_slot,
+ cmd->body.slot, cmd->body.offsetInBytes);
+
+ return 0;
+}
+
+/**
* vmw_cmd_dx_set_shader_res - Validate SVGA_3D_CMD_DX_SET_SHADER_RESOURCES
* command
*
@@ -2918,7 +2954,7 @@ static int vmw_cmd_set_uav(struct vmw_private *dev_priv,
if (!has_sm5_context(dev_priv))
return -EINVAL;
- if (num_uav > SVGA3D_MAX_UAVIEWS) {
+ if (num_uav > vmw_max_num_uavs(dev_priv)) {
VMW_DEBUG_USER("Invalid UAV binding.\n");
return -EINVAL;
}
@@ -2950,7 +2986,7 @@ static int vmw_cmd_set_cs_uav(struct vmw_private *dev_priv,
if (!has_sm5_context(dev_priv))
return -EINVAL;
- if (num_uav > SVGA3D_MAX_UAVIEWS) {
+ if (num_uav > vmw_max_num_uavs(dev_priv)) {
VMW_DEBUG_USER("Invalid UAV binding.\n");
return -EINVAL;
}
@@ -3528,6 +3564,24 @@ static const struct vmw_cmd_entry vmw_cmd_entries[SVGA_3D_CMD_MAX] = {
VMW_CMD_DEF(SVGA_3D_CMD_DX_TRANSFER_FROM_BUFFER,
&vmw_cmd_dx_transfer_from_buffer,
true, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_VS_CONSTANT_BUFFER_OFFSET,
+ &vmw_cmd_dx_set_constant_buffer_offset,
+ true, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_PS_CONSTANT_BUFFER_OFFSET,
+ &vmw_cmd_dx_set_constant_buffer_offset,
+ true, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_GS_CONSTANT_BUFFER_OFFSET,
+ &vmw_cmd_dx_set_constant_buffer_offset,
+ true, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_HS_CONSTANT_BUFFER_OFFSET,
+ &vmw_cmd_dx_set_constant_buffer_offset,
+ true, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_DS_CONSTANT_BUFFER_OFFSET,
+ &vmw_cmd_dx_set_constant_buffer_offset,
+ true, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_CS_CONSTANT_BUFFER_OFFSET,
+ &vmw_cmd_dx_set_constant_buffer_offset,
+ true, false, true),
VMW_CMD_DEF(SVGA_3D_CMD_INTRA_SURFACE_COPY, &vmw_cmd_intra_surface_copy,
true, false, true),
@@ -3561,6 +3615,8 @@ static const struct vmw_cmd_entry vmw_cmd_entries[SVGA_3D_CMD_MAX] = {
&vmw_cmd_dx_define_streamoutput, true, false, true),
VMW_CMD_DEF(SVGA_3D_CMD_DX_BIND_STREAMOUTPUT,
&vmw_cmd_dx_bind_streamoutput, true, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_RASTERIZER_STATE_V2,
+ &vmw_cmd_dx_so_define, true, false, true),
};
bool vmw_cmd_describe(const void *buf, u32 *size, char const **cmd)
@@ -3869,8 +3925,7 @@ vmw_execbuf_copy_fence_user(struct vmw_private *dev_priv,
fence_rep.fd = -1;
}
- ttm_ref_object_base_unref(vmw_fp->tfile, fence_handle,
- TTM_REF_USAGE);
+ ttm_ref_object_base_unref(vmw_fp->tfile, fence_handle);
VMW_DEBUG_USER("Fence copy error. Syncing.\n");
(void) vmw_fence_obj_wait(fence, false, false,
VMW_FENCE_WAIT_TIMEOUT);
@@ -4054,8 +4109,6 @@ int vmw_execbuf_process(struct drm_file *file_priv,
struct sync_file *sync_file = NULL;
DECLARE_VAL_CONTEXT(val_ctx, &sw_context->res_ht, 1);
- vmw_validation_set_val_mem(&val_ctx, &dev_priv->vvm);
-
if (flags & DRM_VMW_EXECBUF_FLAG_EXPORT_FENCE_FD) {
out_fence_fd = get_unused_fd_flags(O_CLOEXEC);
if (out_fence_fd < 0) {
@@ -4101,6 +4154,7 @@ int vmw_execbuf_process(struct drm_file *file_priv,
sw_context->kernel = true;
}
+ sw_context->filp = file_priv;
sw_context->fp = vmw_fpriv(file_priv);
INIT_LIST_HEAD(&sw_context->ctx_list);
sw_context->cur_query_bo = dev_priv->pinned_bo;
@@ -4117,7 +4171,7 @@ int vmw_execbuf_process(struct drm_file *file_priv,
vmw_binding_state_reset(sw_context->staged_bindings);
if (!sw_context->res_ht_initialized) {
- ret = drm_ht_create(&sw_context->res_ht, VMW_RES_HT_ORDER);
+ ret = vmwgfx_ht_create(&sw_context->res_ht, VMW_RES_HT_ORDER);
if (unlikely(ret != 0))
goto out_unlock;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
index d18c6a56e3dc..8ee34576c7d0 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
@@ -394,22 +394,15 @@ static int vmw_fb_create_bo(struct vmw_private *vmw_priv,
struct vmw_buffer_object *vmw_bo;
int ret;
- vmw_bo = kmalloc(sizeof(*vmw_bo), GFP_KERNEL);
- if (!vmw_bo) {
- ret = -ENOMEM;
- goto err_unlock;
- }
-
- ret = vmw_bo_init(vmw_priv, vmw_bo, size,
+ ret = vmw_bo_create(vmw_priv, size,
&vmw_sys_placement,
false, false,
- &vmw_bo_bo_free);
+ &vmw_bo_bo_free, &vmw_bo);
if (unlikely(ret != 0))
- goto err_unlock; /* init frees the buffer on failure */
+ return ret;
*out = vmw_bo;
-err_unlock:
return ret;
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
index 9fe12329a4d5..c60d395f9e2e 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
@@ -37,9 +37,6 @@ struct vmw_fence_manager {
spinlock_t lock;
struct list_head fence_list;
struct work_struct work;
- u32 user_fence_size;
- u32 fence_size;
- u32 event_fence_action_size;
bool fifo_down;
struct list_head cleanup_list;
uint32_t pending_actions[VMW_ACTION_MAX];
@@ -304,11 +301,6 @@ struct vmw_fence_manager *vmw_fence_manager_init(struct vmw_private *dev_priv)
INIT_LIST_HEAD(&fman->cleanup_list);
INIT_WORK(&fman->work, &vmw_fence_work_func);
fman->fifo_down = true;
- fman->user_fence_size = ttm_round_pot(sizeof(struct vmw_user_fence)) +
- TTM_OBJ_EXTRA_SIZE;
- fman->fence_size = ttm_round_pot(sizeof(struct vmw_fence_obj));
- fman->event_fence_action_size =
- ttm_round_pot(sizeof(struct vmw_event_fence_action));
mutex_init(&fman->goal_irq_mutex);
fman->ctx = dma_fence_context_alloc(1);
@@ -560,14 +552,8 @@ static void vmw_user_fence_destroy(struct vmw_fence_obj *fence)
{
struct vmw_user_fence *ufence =
container_of(fence, struct vmw_user_fence, fence);
- struct vmw_fence_manager *fman = fman_from_fence(fence);
ttm_base_object_kfree(ufence, base);
- /*
- * Free kernel space accounting.
- */
- ttm_mem_global_free(vmw_mem_glob(fman->dev_priv),
- fman->user_fence_size);
}
static void vmw_user_fence_base_release(struct ttm_base_object **p_base)
@@ -590,23 +576,8 @@ int vmw_user_fence_create(struct drm_file *file_priv,
struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
struct vmw_user_fence *ufence;
struct vmw_fence_obj *tmp;
- struct ttm_mem_global *mem_glob = vmw_mem_glob(fman->dev_priv);
- struct ttm_operation_ctx ctx = {
- .interruptible = false,
- .no_wait_gpu = false
- };
int ret;
- /*
- * Kernel memory space accounting, since this object may
- * be created by a user-space request.
- */
-
- ret = ttm_mem_global_alloc(mem_glob, fman->user_fence_size,
- &ctx);
- if (unlikely(ret != 0))
- return ret;
-
ufence = kzalloc(sizeof(*ufence), GFP_KERNEL);
if (unlikely(!ufence)) {
ret = -ENOMEM;
@@ -625,9 +596,10 @@ int vmw_user_fence_create(struct drm_file *file_priv,
* vmw_user_fence_base_release.
*/
tmp = vmw_fence_obj_reference(&ufence->fence);
+
ret = ttm_base_object_init(tfile, &ufence->base, false,
VMW_RES_FENCE,
- &vmw_user_fence_base_release, NULL);
+ &vmw_user_fence_base_release);
if (unlikely(ret != 0)) {
@@ -646,7 +618,6 @@ out_err:
tmp = &ufence->fence;
vmw_fence_obj_unreference(&tmp);
out_no_object:
- ttm_mem_global_free(mem_glob, fman->user_fence_size);
return ret;
}
@@ -831,8 +802,7 @@ out:
*/
if (ret == 0 && (arg->wait_options & DRM_VMW_WAIT_OPTION_UNREF))
- return ttm_ref_object_base_unref(tfile, arg->handle,
- TTM_REF_USAGE);
+ return ttm_ref_object_base_unref(tfile, arg->handle);
return ret;
}
@@ -874,8 +844,7 @@ int vmw_fence_obj_unref_ioctl(struct drm_device *dev, void *data,
(struct drm_vmw_fence_arg *) data;
return ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile,
- arg->handle,
- TTM_REF_USAGE);
+ arg->handle);
}
/**
@@ -1121,7 +1090,7 @@ int vmw_fence_event_ioctl(struct drm_device *dev, void *data,
if (user_fence_rep != NULL) {
ret = ttm_ref_object_add(vmw_fp->tfile, base,
- TTM_REF_USAGE, NULL, false);
+ NULL, false);
if (unlikely(ret != 0)) {
DRM_ERROR("Failed to reference a fence "
"object.\n");
@@ -1164,7 +1133,7 @@ int vmw_fence_event_ioctl(struct drm_device *dev, void *data,
return 0;
out_no_create:
if (user_fence_rep != NULL)
- ttm_ref_object_base_unref(tfile, handle, TTM_REF_USAGE);
+ ttm_ref_object_base_unref(tfile, handle);
out_no_ref_obj:
vmw_fence_obj_unreference(&fence);
return ret;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_gem.c b/drivers/gpu/drm/vmwgfx/vmwgfx_gem.c
new file mode 100644
index 000000000000..ce609e7d758f
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_gem.c
@@ -0,0 +1,294 @@
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
+/*
+ * Copyright 2021 VMware, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person
+ * obtaining a copy of this software and associated documentation
+ * files (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use, copy,
+ * modify, merge, publish, distribute, sublicense, and/or sell copies
+ * of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#include "vmwgfx_drv.h"
+
+#include "drm/drm_prime.h"
+#include "drm/drm_gem_ttm_helper.h"
+
+/**
+ * vmw_buffer_object - Convert a struct ttm_buffer_object to a struct
+ * vmw_buffer_object.
+ *
+ * @bo: Pointer to the TTM buffer object.
+ * Return: Pointer to the struct vmw_buffer_object embedding the
+ * TTM buffer object.
+ */
+static struct vmw_buffer_object *
+vmw_buffer_object(struct ttm_buffer_object *bo)
+{
+ return container_of(bo, struct vmw_buffer_object, base);
+}
+
+static void vmw_gem_object_free(struct drm_gem_object *gobj)
+{
+ struct ttm_buffer_object *bo = drm_gem_ttm_of_gem(gobj);
+ if (bo) {
+ ttm_bo_put(bo);
+ }
+}
+
+static int vmw_gem_object_open(struct drm_gem_object *obj,
+ struct drm_file *file_priv)
+{
+ return 0;
+}
+
+static void vmw_gem_object_close(struct drm_gem_object *obj,
+ struct drm_file *file_priv)
+{
+}
+
+static int vmw_gem_pin_private(struct drm_gem_object *obj, bool do_pin)
+{
+ struct ttm_buffer_object *bo = drm_gem_ttm_of_gem(obj);
+ struct vmw_buffer_object *vbo = vmw_buffer_object(bo);
+ int ret;
+
+ ret = ttm_bo_reserve(bo, false, false, NULL);
+ if (unlikely(ret != 0))
+ goto err;
+
+ vmw_bo_pin_reserved(vbo, do_pin);
+
+ ttm_bo_unreserve(bo);
+
+err:
+ return ret;
+}
+
+
+static int vmw_gem_object_pin(struct drm_gem_object *obj)
+{
+ return vmw_gem_pin_private(obj, true);
+}
+
+static void vmw_gem_object_unpin(struct drm_gem_object *obj)
+{
+ vmw_gem_pin_private(obj, false);
+}
+
+static struct sg_table *vmw_gem_object_get_sg_table(struct drm_gem_object *obj)
+{
+ struct ttm_buffer_object *bo = drm_gem_ttm_of_gem(obj);
+ struct vmw_ttm_tt *vmw_tt =
+ container_of(bo->ttm, struct vmw_ttm_tt, dma_ttm);
+
+ if (vmw_tt->vsgt.sgt)
+ return vmw_tt->vsgt.sgt;
+
+ return drm_prime_pages_to_sg(obj->dev, vmw_tt->dma_ttm.pages, vmw_tt->dma_ttm.num_pages);
+}
+
+
+static const struct drm_gem_object_funcs vmw_gem_object_funcs = {
+ .free = vmw_gem_object_free,
+ .open = vmw_gem_object_open,
+ .close = vmw_gem_object_close,
+ .print_info = drm_gem_ttm_print_info,
+ .pin = vmw_gem_object_pin,
+ .unpin = vmw_gem_object_unpin,
+ .get_sg_table = vmw_gem_object_get_sg_table,
+ .vmap = drm_gem_ttm_vmap,
+ .vunmap = drm_gem_ttm_vunmap,
+ .mmap = drm_gem_ttm_mmap,
+};
+
+/**
+ * vmw_gem_destroy - vmw buffer object destructor
+ *
+ * @bo: Pointer to the embedded struct ttm_buffer_object
+ */
+void vmw_gem_destroy(struct ttm_buffer_object *bo)
+{
+ struct vmw_buffer_object *vbo = vmw_buffer_object(bo);
+
+ WARN_ON(vbo->dirty);
+ WARN_ON(!RB_EMPTY_ROOT(&vbo->res_tree));
+ vmw_bo_unmap(vbo);
+ drm_gem_object_release(&vbo->base.base);
+ kfree(vbo);
+}
+
+int vmw_gem_object_create_with_handle(struct vmw_private *dev_priv,
+ struct drm_file *filp,
+ uint32_t size,
+ uint32_t *handle,
+ struct vmw_buffer_object **p_vbo)
+{
+ int ret;
+
+ ret = vmw_bo_create(dev_priv, size,
+ (dev_priv->has_mob) ?
+ &vmw_sys_placement :
+ &vmw_vram_sys_placement,
+ true, false, &vmw_gem_destroy, p_vbo);
+
+ (*p_vbo)->base.base.funcs = &vmw_gem_object_funcs;
+ if (ret != 0)
+ goto out_no_bo;
+
+ ret = drm_gem_handle_create(filp, &(*p_vbo)->base.base, handle);
+ /* drop reference from allocate - handle holds it now */
+ drm_gem_object_put(&(*p_vbo)->base.base);
+out_no_bo:
+ return ret;
+}
+
+
+int vmw_gem_object_create_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *filp)
+{
+ struct vmw_private *dev_priv = vmw_priv(dev);
+ union drm_vmw_alloc_dmabuf_arg *arg =
+ (union drm_vmw_alloc_dmabuf_arg *)data;
+ struct drm_vmw_alloc_dmabuf_req *req = &arg->req;
+ struct drm_vmw_dmabuf_rep *rep = &arg->rep;
+ struct vmw_buffer_object *vbo;
+ uint32_t handle;
+ int ret;
+
+ ret = vmw_gem_object_create_with_handle(dev_priv, filp,
+ req->size, &handle, &vbo);
+ if (ret)
+ goto out_no_bo;
+
+ rep->handle = handle;
+ rep->map_handle = drm_vma_node_offset_addr(&vbo->base.base.vma_node);
+ rep->cur_gmr_id = handle;
+ rep->cur_gmr_offset = 0;
+out_no_bo:
+ return ret;
+}
+
+#if defined(CONFIG_DEBUG_FS)
+
+static void vmw_bo_print_info(int id, struct vmw_buffer_object *bo, struct seq_file *m)
+{
+ const char *placement;
+ const char *type;
+
+ switch (bo->base.resource->mem_type) {
+ case TTM_PL_SYSTEM:
+ placement = " CPU";
+ break;
+ case VMW_PL_GMR:
+ placement = " GMR";
+ break;
+ case VMW_PL_MOB:
+ placement = " MOB";
+ break;
+ case VMW_PL_SYSTEM:
+ placement = "VCPU";
+ break;
+ case TTM_PL_VRAM:
+ placement = "VRAM";
+ break;
+ default:
+ placement = "None";
+ break;
+ }
+
+ switch (bo->base.type) {
+ case ttm_bo_type_device:
+ type = "device";
+ break;
+ case ttm_bo_type_kernel:
+ type = "kernel";
+ break;
+ case ttm_bo_type_sg:
+ type = "sg ";
+ break;
+ default:
+ type = "none ";
+ break;
+ }
+
+ seq_printf(m, "\t\t0x%08x: %12zu bytes %s, type = %s",
+ id, bo->base.base.size, placement, type);
+ seq_printf(m, ", priority = %u, pin_count = %u, GEM refs = %d, TTM refs = %d",
+ bo->base.priority,
+ bo->base.pin_count,
+ kref_read(&bo->base.base.refcount),
+ kref_read(&bo->base.kref));
+ seq_puts(m, "\n");
+}
+
+static int vmw_debugfs_gem_info_show(struct seq_file *m, void *unused)
+{
+ struct vmw_private *vdev = (struct vmw_private *)m->private;
+ struct drm_device *dev = &vdev->drm;
+ struct drm_file *file;
+ int r;
+
+ r = mutex_lock_interruptible(&dev->filelist_mutex);
+ if (r)
+ return r;
+
+ list_for_each_entry(file, &dev->filelist, lhead) {
+ struct task_struct *task;
+ struct drm_gem_object *gobj;
+ int id;
+
+ /*
+ * Although we have a valid reference on file->pid, that does
+ * not guarantee that the task_struct who called get_pid() is
+ * still alive (e.g. get_pid(current) => fork() => exit()).
+ * Therefore, we need to protect this ->comm access using RCU.
+ */
+ rcu_read_lock();
+ task = pid_task(file->pid, PIDTYPE_PID);
+ seq_printf(m, "pid %8d command %s:\n", pid_nr(file->pid),
+ task ? task->comm : "<unknown>");
+ rcu_read_unlock();
+
+ spin_lock(&file->table_lock);
+ idr_for_each_entry(&file->object_idr, gobj, id) {
+ struct vmw_buffer_object *bo = gem_to_vmw_bo(gobj);
+
+ vmw_bo_print_info(id, bo, m);
+ }
+ spin_unlock(&file->table_lock);
+ }
+
+ mutex_unlock(&dev->filelist_mutex);
+ return 0;
+}
+
+DEFINE_SHOW_ATTRIBUTE(vmw_debugfs_gem_info);
+
+#endif
+
+void vmw_debugfs_gem_init(struct vmw_private *vdev)
+{
+#if defined(CONFIG_DEBUG_FS)
+ struct drm_minor *minor = vdev->drm.primary;
+ struct dentry *root = minor->debugfs_root;
+
+ debugfs_create_file("vmwgfx_gem_info", 0444, root, vdev,
+ &vmw_debugfs_gem_info_fops);
+#endif
+}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c b/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c
index b2c4af331c9d..ebb4505a31a3 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c
@@ -42,6 +42,7 @@ struct vmwgfx_gmrid_man {
uint32_t max_gmr_ids;
uint32_t max_gmr_pages;
uint32_t used_gmr_pages;
+ uint8_t type;
};
static struct vmwgfx_gmrid_man *to_gmrid_manager(struct ttm_resource_manager *man)
@@ -132,6 +133,18 @@ static void vmw_gmrid_man_put_node(struct ttm_resource_manager *man,
kfree(res);
}
+static void vmw_gmrid_man_debug(struct ttm_resource_manager *man,
+ struct drm_printer *printer)
+{
+ struct vmwgfx_gmrid_man *gman = to_gmrid_manager(man);
+
+ BUG_ON(gman->type != VMW_PL_GMR && gman->type != VMW_PL_MOB);
+
+ drm_printf(printer, "%s's used: %u pages, max: %u pages, %u id's\n",
+ (gman->type == VMW_PL_MOB) ? "Mob" : "GMR",
+ gman->used_gmr_pages, gman->max_gmr_pages, gman->max_gmr_ids);
+}
+
static const struct ttm_resource_manager_func vmw_gmrid_manager_func;
int vmw_gmrid_man_init(struct vmw_private *dev_priv, int type)
@@ -146,12 +159,12 @@ int vmw_gmrid_man_init(struct vmw_private *dev_priv, int type)
man = &gman->manager;
man->func = &vmw_gmrid_manager_func;
- /* TODO: This is most likely not correct */
man->use_tt = true;
ttm_resource_manager_init(man, 0);
spin_lock_init(&gman->lock);
gman->used_gmr_pages = 0;
ida_init(&gman->gmr_ida);
+ gman->type = type;
switch (type) {
case VMW_PL_GMR:
@@ -190,4 +203,5 @@ void vmw_gmrid_man_fini(struct vmw_private *dev_priv, int type)
static const struct ttm_resource_manager_func vmw_gmrid_manager_func = {
.alloc = vmw_gmrid_man_get_node,
.free = vmw_gmrid_man_put_node,
+ .debug = vmw_gmrid_man_debug
};
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_hashtab.c b/drivers/gpu/drm/vmwgfx/vmwgfx_hashtab.c
new file mode 100644
index 000000000000..06aebc12774e
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_hashtab.c
@@ -0,0 +1,199 @@
+/*
+ * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND. USA.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+/*
+ * Simple open hash tab implementation.
+ *
+ * Authors:
+ * Thomas Hellström <thomas-at-tungstengraphics-dot-com>
+ */
+
+#include <linux/export.h>
+#include <linux/hash.h>
+#include <linux/mm.h>
+#include <linux/rculist.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+
+#include <drm/drm_print.h>
+
+#include "vmwgfx_hashtab.h"
+
+int vmwgfx_ht_create(struct vmwgfx_open_hash *ht, unsigned int order)
+{
+ unsigned int size = 1 << order;
+
+ ht->order = order;
+ ht->table = NULL;
+ if (size <= PAGE_SIZE / sizeof(*ht->table))
+ ht->table = kcalloc(size, sizeof(*ht->table), GFP_KERNEL);
+ else
+ ht->table = vzalloc(array_size(size, sizeof(*ht->table)));
+ if (!ht->table) {
+ DRM_ERROR("Out of memory for hash table\n");
+ return -ENOMEM;
+ }
+ return 0;
+}
+
+void vmwgfx_ht_verbose_list(struct vmwgfx_open_hash *ht, unsigned long key)
+{
+ struct vmwgfx_hash_item *entry;
+ struct hlist_head *h_list;
+ unsigned int hashed_key;
+ int count = 0;
+
+ hashed_key = hash_long(key, ht->order);
+ DRM_DEBUG("Key is 0x%08lx, Hashed key is 0x%08x\n", key, hashed_key);
+ h_list = &ht->table[hashed_key];
+ hlist_for_each_entry(entry, h_list, head)
+ DRM_DEBUG("count %d, key: 0x%08lx\n", count++, entry->key);
+}
+
+static struct hlist_node *vmwgfx_ht_find_key(struct vmwgfx_open_hash *ht, unsigned long key)
+{
+ struct vmwgfx_hash_item *entry;
+ struct hlist_head *h_list;
+ unsigned int hashed_key;
+
+ hashed_key = hash_long(key, ht->order);
+ h_list = &ht->table[hashed_key];
+ hlist_for_each_entry(entry, h_list, head) {
+ if (entry->key == key)
+ return &entry->head;
+ if (entry->key > key)
+ break;
+ }
+ return NULL;
+}
+
+static struct hlist_node *vmwgfx_ht_find_key_rcu(struct vmwgfx_open_hash *ht, unsigned long key)
+{
+ struct vmwgfx_hash_item *entry;
+ struct hlist_head *h_list;
+ unsigned int hashed_key;
+
+ hashed_key = hash_long(key, ht->order);
+ h_list = &ht->table[hashed_key];
+ hlist_for_each_entry_rcu(entry, h_list, head) {
+ if (entry->key == key)
+ return &entry->head;
+ if (entry->key > key)
+ break;
+ }
+ return NULL;
+}
+
+int vmwgfx_ht_insert_item(struct vmwgfx_open_hash *ht, struct vmwgfx_hash_item *item)
+{
+ struct vmwgfx_hash_item *entry;
+ struct hlist_head *h_list;
+ struct hlist_node *parent;
+ unsigned int hashed_key;
+ unsigned long key = item->key;
+
+ hashed_key = hash_long(key, ht->order);
+ h_list = &ht->table[hashed_key];
+ parent = NULL;
+ hlist_for_each_entry(entry, h_list, head) {
+ if (entry->key == key)
+ return -EINVAL;
+ if (entry->key > key)
+ break;
+ parent = &entry->head;
+ }
+ if (parent)
+ hlist_add_behind_rcu(&item->head, parent);
+ else
+ hlist_add_head_rcu(&item->head, h_list);
+ return 0;
+}
+
+/*
+ * Just insert an item and return any "bits" bit key that hasn't been
+ * used before.
+ */
+int vmwgfx_ht_just_insert_please(struct vmwgfx_open_hash *ht, struct vmwgfx_hash_item *item,
+ unsigned long seed, int bits, int shift,
+ unsigned long add)
+{
+ int ret;
+ unsigned long mask = (1UL << bits) - 1;
+ unsigned long first, unshifted_key;
+
+ unshifted_key = hash_long(seed, bits);
+ first = unshifted_key;
+ do {
+ item->key = (unshifted_key << shift) + add;
+ ret = vmwgfx_ht_insert_item(ht, item);
+ if (ret)
+ unshifted_key = (unshifted_key + 1) & mask;
+ } while (ret && (unshifted_key != first));
+
+ if (ret) {
+ DRM_ERROR("Available key bit space exhausted\n");
+ return -EINVAL;
+ }
+ return 0;
+}
+
+int vmwgfx_ht_find_item(struct vmwgfx_open_hash *ht, unsigned long key,
+ struct vmwgfx_hash_item **item)
+{
+ struct hlist_node *list;
+
+ list = vmwgfx_ht_find_key_rcu(ht, key);
+ if (!list)
+ return -EINVAL;
+
+ *item = hlist_entry(list, struct vmwgfx_hash_item, head);
+ return 0;
+}
+
+int vmwgfx_ht_remove_key(struct vmwgfx_open_hash *ht, unsigned long key)
+{
+ struct hlist_node *list;
+
+ list = vmwgfx_ht_find_key(ht, key);
+ if (list) {
+ hlist_del_init_rcu(list);
+ return 0;
+ }
+ return -EINVAL;
+}
+
+int vmwgfx_ht_remove_item(struct vmwgfx_open_hash *ht, struct vmwgfx_hash_item *item)
+{
+ hlist_del_init_rcu(&item->head);
+ return 0;
+}
+
+void vmwgfx_ht_remove(struct vmwgfx_open_hash *ht)
+{
+ if (ht->table) {
+ kvfree(ht->table);
+ ht->table = NULL;
+ }
+}
diff --git a/include/drm/drm_hashtab.h b/drivers/gpu/drm/vmwgfx/vmwgfx_hashtab.h
index bb95ff011baf..a9ce12922e21 100644
--- a/include/drm/drm_hashtab.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_hashtab.h
@@ -1,5 +1,4 @@
-/**************************************************************************
- *
+/*
* Copyright 2006 Tungsten Graphics, Inc., Bismack, ND. USA.
* All Rights Reserved.
*
@@ -22,9 +21,8 @@
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- *
- **************************************************************************/
+ */
+
/*
* Simple open hash tab implementation.
*
@@ -32,48 +30,54 @@
* Thomas Hellström <thomas-at-tungstengraphics-dot-com>
*/
-#ifndef DRM_HASHTAB_H
-#define DRM_HASHTAB_H
+/*
+ * TODO: Replace this hashtable with Linux' generic implementation
+ * from <linux/hashtable.h>.
+ */
+
+#ifndef VMWGFX_HASHTAB_H
+#define VMWGFX_HASHTAB_H
#include <linux/list.h>
#define drm_hash_entry(_ptr, _type, _member) container_of(_ptr, _type, _member)
-struct drm_hash_item {
+struct vmwgfx_hash_item {
struct hlist_node head;
unsigned long key;
};
-struct drm_open_hash {
+struct vmwgfx_open_hash {
struct hlist_head *table;
u8 order;
};
-int drm_ht_create(struct drm_open_hash *ht, unsigned int order);
-int drm_ht_insert_item(struct drm_open_hash *ht, struct drm_hash_item *item);
-int drm_ht_just_insert_please(struct drm_open_hash *ht, struct drm_hash_item *item,
- unsigned long seed, int bits, int shift,
- unsigned long add);
-int drm_ht_find_item(struct drm_open_hash *ht, unsigned long key, struct drm_hash_item **item);
+int vmwgfx_ht_create(struct vmwgfx_open_hash *ht, unsigned int order);
+int vmwgfx_ht_insert_item(struct vmwgfx_open_hash *ht, struct vmwgfx_hash_item *item);
+int vmwgfx_ht_just_insert_please(struct vmwgfx_open_hash *ht, struct vmwgfx_hash_item *item,
+ unsigned long seed, int bits, int shift,
+ unsigned long add);
+int vmwgfx_ht_find_item(struct vmwgfx_open_hash *ht, unsigned long key,
+ struct vmwgfx_hash_item **item);
-void drm_ht_verbose_list(struct drm_open_hash *ht, unsigned long key);
-int drm_ht_remove_key(struct drm_open_hash *ht, unsigned long key);
-int drm_ht_remove_item(struct drm_open_hash *ht, struct drm_hash_item *item);
-void drm_ht_remove(struct drm_open_hash *ht);
+void vmwgfx_ht_verbose_list(struct vmwgfx_open_hash *ht, unsigned long key);
+int vmwgfx_ht_remove_key(struct vmwgfx_open_hash *ht, unsigned long key);
+int vmwgfx_ht_remove_item(struct vmwgfx_open_hash *ht, struct vmwgfx_hash_item *item);
+void vmwgfx_ht_remove(struct vmwgfx_open_hash *ht);
/*
* RCU-safe interface
*
* The user of this API needs to make sure that two or more instances of the
* hash table manipulation functions are never run simultaneously.
- * The lookup function drm_ht_find_item_rcu may, however, run simultaneously
+ * The lookup function vmwgfx_ht_find_item_rcu may, however, run simultaneously
* with any of the manipulation functions as long as it's called from within
* an RCU read-locked section.
*/
-#define drm_ht_insert_item_rcu drm_ht_insert_item
-#define drm_ht_just_insert_please_rcu drm_ht_just_insert_please
-#define drm_ht_remove_key_rcu drm_ht_remove_key
-#define drm_ht_remove_item_rcu drm_ht_remove_item
-#define drm_ht_find_item_rcu drm_ht_find_item
+#define vmwgfx_ht_insert_item_rcu vmwgfx_ht_insert_item
+#define vmwgfx_ht_just_insert_please_rcu vmwgfx_ht_just_insert_please
+#define vmwgfx_ht_remove_key_rcu vmwgfx_ht_remove_key
+#define vmwgfx_ht_remove_item_rcu vmwgfx_ht_remove_item
+#define vmwgfx_ht_find_item_rcu vmwgfx_ht_find_item
#endif
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
index 28af34ab6ed6..471da2b4c177 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
@@ -105,6 +105,9 @@ int vmw_getparam_ioctl(struct drm_device *dev, void *data,
case DRM_VMW_PARAM_SM5:
param->value = has_sm5_context(dev_priv);
break;
+ case DRM_VMW_PARAM_GL43:
+ param->value = has_gl43_context(dev_priv);
+ break;
default:
return -EINVAL;
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
index 74fa41909213..4e693e8de2c3 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
@@ -843,8 +843,6 @@ static void vmw_framebuffer_surface_destroy(struct drm_framebuffer *framebuffer)
drm_framebuffer_cleanup(framebuffer);
vmw_surface_unreference(&vfbs->surface);
- if (vfbs->base.user_obj)
- ttm_base_object_unref(&vfbs->base.user_obj);
kfree(vfbs);
}
@@ -989,6 +987,16 @@ out_err1:
* Buffer-object framebuffer code
*/
+static int vmw_framebuffer_bo_create_handle(struct drm_framebuffer *fb,
+ struct drm_file *file_priv,
+ unsigned int *handle)
+{
+ struct vmw_framebuffer_bo *vfbd =
+ vmw_framebuffer_to_vfbd(fb);
+
+ return drm_gem_handle_create(file_priv, &vfbd->buffer->base.base, handle);
+}
+
static void vmw_framebuffer_bo_destroy(struct drm_framebuffer *framebuffer)
{
struct vmw_framebuffer_bo *vfbd =
@@ -996,8 +1004,6 @@ static void vmw_framebuffer_bo_destroy(struct drm_framebuffer *framebuffer)
drm_framebuffer_cleanup(framebuffer);
vmw_bo_unreference(&vfbd->buffer);
- if (vfbd->base.user_obj)
- ttm_base_object_unref(&vfbd->base.user_obj);
kfree(vfbd);
}
@@ -1063,6 +1069,7 @@ static int vmw_framebuffer_bo_dirty_ext(struct drm_framebuffer *framebuffer,
}
static const struct drm_framebuffer_funcs vmw_framebuffer_bo_funcs = {
+ .create_handle = vmw_framebuffer_bo_create_handle,
.destroy = vmw_framebuffer_bo_destroy,
.dirty = vmw_framebuffer_bo_dirty_ext,
};
@@ -1188,7 +1195,7 @@ static int vmw_create_bo_proxy(struct drm_device *dev,
metadata.base_size.depth = 1;
metadata.scanout = true;
- ret = vmw_gb_surface_define(vmw_priv(dev), 0, &metadata, srf_out);
+ ret = vmw_gb_surface_define(vmw_priv(dev), &metadata, srf_out);
if (ret) {
DRM_ERROR("Failed to allocate proxy content buffer\n");
return ret;
@@ -1251,6 +1258,7 @@ static int vmw_kms_new_framebuffer_bo(struct vmw_private *dev_priv,
goto out_err1;
}
+ vfbd->base.base.obj[0] = &bo->base.base;
drm_helper_mode_fill_fb_struct(dev, &vfbd->base.base, mode_cmd);
vfbd->base.bo = true;
vfbd->buffer = vmw_bo_reference(bo);
@@ -1368,34 +1376,13 @@ static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev,
const struct drm_mode_fb_cmd2 *mode_cmd)
{
struct vmw_private *dev_priv = vmw_priv(dev);
- struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
struct vmw_framebuffer *vfb = NULL;
struct vmw_surface *surface = NULL;
struct vmw_buffer_object *bo = NULL;
- struct ttm_base_object *user_obj;
int ret;
- /*
- * Take a reference on the user object of the resource
- * backing the kms fb. This ensures that user-space handle
- * lookups on that resource will always work as long as
- * it's registered with a kms framebuffer. This is important,
- * since vmw_execbuf_process identifies resources in the
- * command stream using user-space handles.
- */
-
- user_obj = ttm_base_object_lookup(tfile, mode_cmd->handles[0]);
- if (unlikely(user_obj == NULL)) {
- DRM_ERROR("Could not locate requested kms frame buffer.\n");
- return ERR_PTR(-ENOENT);
- }
-
- /**
- * End conditioned code.
- */
-
/* returns either a bo or surface */
- ret = vmw_user_lookup_handle(dev_priv, tfile,
+ ret = vmw_user_lookup_handle(dev_priv, file_priv,
mode_cmd->handles[0],
&surface, &bo);
if (ret)
@@ -1428,10 +1415,8 @@ err_out:
if (ret) {
DRM_ERROR("failed to create vmw_framebuffer: %i\n", ret);
- ttm_base_object_unref(&user_obj);
return ERR_PTR(ret);
- } else
- vfb->user_obj = user_obj;
+ }
return &vfb->base;
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
index bbc809f7bd8a..4d36e8507380 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
@@ -219,7 +219,6 @@ struct vmw_framebuffer {
int (*pin)(struct vmw_framebuffer *fb);
int (*unpin)(struct vmw_framebuffer *fb);
bool bo;
- struct ttm_base_object *user_obj;
uint32_t user_handle;
};
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c b/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c
index f9394207dd3c..0a8cc28d6606 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0 OR MIT
/**************************************************************************
*
- * Copyright 2012-2015 VMware, Inc., Palo Alto, CA., USA
+ * Copyright 2012-2021 VMware, Inc., Palo Alto, CA., USA
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
@@ -29,12 +29,6 @@
#include "vmwgfx_drv.h"
-/*
- * If we set up the screen target otable, screen objects stop working.
- */
-
-#define VMW_OTABLE_SETUP_SUB ((VMWGFX_ENABLE_SCREEN_TARGET_OTABLE ? 0 : 1))
-
#ifdef CONFIG_64BIT
#define VMW_PPN_SIZE 8
#define VMW_MOBFMT_PTDEPTH_0 SVGA3D_MOBFMT_PT64_0
@@ -75,7 +69,7 @@ static const struct vmw_otable pre_dx_tables[] = {
{VMWGFX_NUM_GB_CONTEXT * sizeof(SVGAOTableContextEntry), NULL, true},
{VMWGFX_NUM_GB_SHADER * sizeof(SVGAOTableShaderEntry), NULL, true},
{VMWGFX_NUM_GB_SCREEN_TARGET * sizeof(SVGAOTableScreenTargetEntry),
- NULL, VMWGFX_ENABLE_SCREEN_TARGET_OTABLE}
+ NULL, true}
};
static const struct vmw_otable dx_tables[] = {
@@ -84,7 +78,7 @@ static const struct vmw_otable dx_tables[] = {
{VMWGFX_NUM_GB_CONTEXT * sizeof(SVGAOTableContextEntry), NULL, true},
{VMWGFX_NUM_GB_SHADER * sizeof(SVGAOTableShaderEntry), NULL, true},
{VMWGFX_NUM_GB_SCREEN_TARGET * sizeof(SVGAOTableScreenTargetEntry),
- NULL, VMWGFX_ENABLE_SCREEN_TARGET_OTABLE},
+ NULL, true},
{VMWGFX_NUM_DXCONTEXT * sizeof(SVGAOTableDXContextEntry), NULL, true},
};
@@ -146,9 +140,6 @@ static int vmw_setup_otable_base(struct vmw_private *dev_priv,
if (otable->size <= PAGE_SIZE) {
mob->pt_level = VMW_MOBFMT_PTDEPTH_0;
mob->pt_root_page = vmw_piter_dma_addr(&iter);
- } else if (vsgt->num_regions == 1) {
- mob->pt_level = SVGA3D_MOBFMT_RANGE;
- mob->pt_root_page = vmw_piter_dma_addr(&iter);
} else {
ret = vmw_mob_pt_populate(dev_priv, mob);
if (unlikely(ret != 0))
@@ -413,10 +404,9 @@ struct vmw_mob *vmw_mob_create(unsigned long data_pages)
* @mob: Pointer to the mob the pagetable of which we want to
* populate.
*
- * This function allocates memory to be used for the pagetable, and
- * adjusts TTM memory accounting accordingly. Returns ENOMEM if
- * memory resources aren't sufficient and may cause TTM buffer objects
- * to be swapped out by using the TTM memory accounting function.
+ * This function allocates memory to be used for the pagetable.
+ * Returns ENOMEM if memory resources aren't sufficient and may
+ * cause TTM buffer objects to be swapped out.
*/
static int vmw_mob_pt_populate(struct vmw_private *dev_priv,
struct vmw_mob *mob)
@@ -624,9 +614,6 @@ int vmw_mob_bind(struct vmw_private *dev_priv,
if (likely(num_data_pages == 1)) {
mob->pt_level = VMW_MOBFMT_PTDEPTH_0;
mob->pt_root_page = vmw_piter_dma_addr(&data_iter);
- } else if (vsgt->num_regions == 1) {
- mob->pt_level = SVGA3D_MOBFMT_RANGE;
- mob->pt_root_page = vmw_piter_dma_addr(&data_iter);
} else if (unlikely(mob->pt_bo == NULL)) {
ret = vmw_mob_pt_populate(dev_priv, mob);
if (unlikely(ret != 0))
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c b/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c
index 54c5d16eb3b7..e9f5c89b4ca6 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c
@@ -451,7 +451,7 @@ int vmw_overlay_ioctl(struct drm_device *dev, void *data,
goto out_unlock;
}
- ret = vmw_user_bo_lookup(tfile, arg->handle, &buf, NULL);
+ ret = vmw_user_bo_lookup(file_priv, arg->handle, &buf);
if (ret)
goto out_unlock;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_page_dirty.c b/drivers/gpu/drm/vmwgfx/vmwgfx_page_dirty.c
index 922317d1acc8..7bc99b1279f7 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_page_dirty.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_page_dirty.c
@@ -57,7 +57,6 @@ enum vmw_bo_dirty_method {
* @ref_count: Reference count for this structure
* @bitmap_size: The size of the bitmap in bits. Typically equal to the
* nuber of pages in the bo.
- * @size: The accounting size for this struct.
* @bitmap: A bitmap where each bit represents a page. A set bit means a
* dirty page.
*/
@@ -68,7 +67,6 @@ struct vmw_bo_dirty {
unsigned int change_count;
unsigned int ref_count;
unsigned long bitmap_size;
- size_t size;
unsigned long bitmap[];
};
@@ -233,12 +231,8 @@ int vmw_bo_dirty_add(struct vmw_buffer_object *vbo)
{
struct vmw_bo_dirty *dirty = vbo->dirty;
pgoff_t num_pages = vbo->base.resource->num_pages;
- size_t size, acc_size;
+ size_t size;
int ret;
- static struct ttm_operation_ctx ctx = {
- .interruptible = false,
- .no_wait_gpu = false
- };
if (dirty) {
dirty->ref_count++;
@@ -246,20 +240,12 @@ int vmw_bo_dirty_add(struct vmw_buffer_object *vbo)
}
size = sizeof(*dirty) + BITS_TO_LONGS(num_pages) * sizeof(long);
- acc_size = ttm_round_pot(size);
- ret = ttm_mem_global_alloc(&ttm_mem_glob, acc_size, &ctx);
- if (ret) {
- VMW_DEBUG_USER("Out of graphics memory for buffer object "
- "dirty tracker.\n");
- return ret;
- }
dirty = kvzalloc(size, GFP_KERNEL);
if (!dirty) {
ret = -ENOMEM;
goto out_no_dirty;
}
- dirty->size = acc_size;
dirty->bitmap_size = num_pages;
dirty->start = dirty->bitmap_size;
dirty->end = 0;
@@ -285,7 +271,6 @@ int vmw_bo_dirty_add(struct vmw_buffer_object *vbo)
return 0;
out_no_dirty:
- ttm_mem_global_free(&ttm_mem_glob, acc_size);
return ret;
}
@@ -304,10 +289,7 @@ void vmw_bo_dirty_release(struct vmw_buffer_object *vbo)
struct vmw_bo_dirty *dirty = vbo->dirty;
if (dirty && --dirty->ref_count == 0) {
- size_t acc_size = dirty->size;
-
kvfree(dirty);
- ttm_mem_global_free(&ttm_mem_glob, acc_size);
vbo->dirty = NULL;
}
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_prime.c b/drivers/gpu/drm/vmwgfx/vmwgfx_prime.c
index d9552a1efd13..2d72a5ee7c0c 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_prime.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_prime.c
@@ -85,6 +85,5 @@ int vmw_prime_handle_to_fd(struct drm_device *dev,
int *prime_fd)
{
struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
-
return ttm_prime_handle_to_fd(tfile, handle, flags, prime_fd);
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
index 8d1e869cc196..708899ba2102 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
@@ -320,11 +320,12 @@ vmw_user_resource_noref_lookup_handle(struct vmw_private *dev_priv,
* The pointer this pointed at by out_surf and out_buf needs to be null.
*/
int vmw_user_lookup_handle(struct vmw_private *dev_priv,
- struct ttm_object_file *tfile,
+ struct drm_file *filp,
uint32_t handle,
struct vmw_surface **out_surf,
struct vmw_buffer_object **out_buf)
{
+ struct ttm_object_file *tfile = vmw_fpriv(filp)->tfile;
struct vmw_resource *res;
int ret;
@@ -339,7 +340,7 @@ int vmw_user_lookup_handle(struct vmw_private *dev_priv,
}
*out_surf = NULL;
- ret = vmw_user_bo_lookup(tfile, handle, out_buf, NULL);
+ ret = vmw_user_bo_lookup(filp, handle, out_buf);
return ret;
}
@@ -362,14 +363,10 @@ static int vmw_resource_buf_alloc(struct vmw_resource *res,
return 0;
}
- backup = kzalloc(sizeof(*backup), GFP_KERNEL);
- if (unlikely(!backup))
- return -ENOMEM;
-
- ret = vmw_bo_init(res->dev_priv, backup, res->backup_size,
- res->func->backup_placement,
- interruptible, false,
- &vmw_bo_bo_free);
+ ret = vmw_bo_create(res->dev_priv, res->backup_size,
+ res->func->backup_placement,
+ interruptible, false,
+ &vmw_bo_bo_free, &backup);
if (unlikely(ret != 0))
goto out_no_bo;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
index bd157fb21b45..3004c7a719e9 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
@@ -442,19 +442,15 @@ vmw_sou_primary_plane_prepare_fb(struct drm_plane *plane,
vps->bo_size = 0;
}
- vps->bo = kzalloc(sizeof(*vps->bo), GFP_KERNEL);
- if (!vps->bo)
- return -ENOMEM;
-
vmw_svga_enable(dev_priv);
/* After we have alloced the backing store might not be able to
* resume the overlays, this is preferred to failing to alloc.
*/
vmw_overlay_pause_all(dev_priv);
- ret = vmw_bo_init(dev_priv, vps->bo, size,
- &vmw_vram_placement,
- false, true, &vmw_bo_bo_free);
+ ret = vmw_bo_create(dev_priv, size,
+ &vmw_vram_placement,
+ false, true, &vmw_bo_bo_free, &vps->bo);
vmw_overlay_resume_all(dev_priv);
if (ret) {
vps->bo = NULL; /* vmw_bo_init frees on error */
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c b/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c
index b8dd62529104..108a496b5d18 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c
@@ -53,10 +53,6 @@ struct vmw_dx_shader {
struct list_head cotable_head;
};
-static uint64_t vmw_user_shader_size;
-static uint64_t vmw_shader_size;
-static size_t vmw_shader_dx_size;
-
static void vmw_user_shader_free(struct vmw_resource *res);
static struct vmw_resource *
vmw_user_shader_base_to_res(struct ttm_base_object *base);
@@ -79,7 +75,6 @@ static void vmw_dx_shader_commit_notify(struct vmw_resource *res,
enum vmw_cmdbuf_res_state state);
static bool vmw_shader_id_ok(u32 user_key, SVGA3dShaderType shader_type);
static u32 vmw_shader_key(u32 user_key, SVGA3dShaderType shader_type);
-static uint64_t vmw_user_shader_size;
static const struct vmw_user_resource_conv user_shader_conv = {
.object_type = VMW_RES_SHADER,
@@ -563,16 +558,14 @@ void vmw_dx_shader_cotable_list_scrub(struct vmw_private *dev_priv,
*
* @res: The shader resource
*
- * Frees the DX shader resource and updates memory accounting.
+ * Frees the DX shader resource.
*/
static void vmw_dx_shader_res_free(struct vmw_resource *res)
{
- struct vmw_private *dev_priv = res->dev_priv;
struct vmw_dx_shader *shader = vmw_res_to_dx_shader(res);
vmw_resource_unreference(&shader->cotable);
kfree(shader);
- ttm_mem_global_free(vmw_mem_glob(dev_priv), vmw_shader_dx_size);
}
/**
@@ -594,30 +587,13 @@ int vmw_dx_shader_add(struct vmw_cmdbuf_res_manager *man,
struct vmw_dx_shader *shader;
struct vmw_resource *res;
struct vmw_private *dev_priv = ctx->dev_priv;
- struct ttm_operation_ctx ttm_opt_ctx = {
- .interruptible = true,
- .no_wait_gpu = false
- };
int ret;
- if (!vmw_shader_dx_size)
- vmw_shader_dx_size = ttm_round_pot(sizeof(*shader));
-
if (!vmw_shader_id_ok(user_key, shader_type))
return -EINVAL;
- ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv), vmw_shader_dx_size,
- &ttm_opt_ctx);
- if (ret) {
- if (ret != -ERESTARTSYS)
- DRM_ERROR("Out of graphics memory for shader "
- "creation.\n");
- return ret;
- }
-
shader = kmalloc(sizeof(*shader), GFP_KERNEL);
if (!shader) {
- ttm_mem_global_free(vmw_mem_glob(dev_priv), vmw_shader_dx_size);
return -ENOMEM;
}
@@ -669,21 +645,15 @@ static void vmw_user_shader_free(struct vmw_resource *res)
{
struct vmw_user_shader *ushader =
container_of(res, struct vmw_user_shader, shader.res);
- struct vmw_private *dev_priv = res->dev_priv;
ttm_base_object_kfree(ushader, base);
- ttm_mem_global_free(vmw_mem_glob(dev_priv),
- vmw_user_shader_size);
}
static void vmw_shader_free(struct vmw_resource *res)
{
struct vmw_shader *shader = vmw_res_to_shader(res);
- struct vmw_private *dev_priv = res->dev_priv;
kfree(shader);
- ttm_mem_global_free(vmw_mem_glob(dev_priv),
- vmw_shader_size);
}
/*
@@ -706,8 +676,7 @@ int vmw_shader_destroy_ioctl(struct drm_device *dev, void *data,
struct drm_vmw_shader_arg *arg = (struct drm_vmw_shader_arg *)data;
struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
- return ttm_ref_object_base_unref(tfile, arg->handle,
- TTM_REF_USAGE);
+ return ttm_ref_object_base_unref(tfile, arg->handle);
}
static int vmw_user_shader_alloc(struct vmw_private *dev_priv,
@@ -722,31 +691,10 @@ static int vmw_user_shader_alloc(struct vmw_private *dev_priv,
{
struct vmw_user_shader *ushader;
struct vmw_resource *res, *tmp;
- struct ttm_operation_ctx ctx = {
- .interruptible = true,
- .no_wait_gpu = false
- };
int ret;
- if (unlikely(vmw_user_shader_size == 0))
- vmw_user_shader_size =
- ttm_round_pot(sizeof(struct vmw_user_shader)) +
- VMW_IDA_ACC_SIZE + TTM_OBJ_EXTRA_SIZE;
-
- ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv),
- vmw_user_shader_size,
- &ctx);
- if (unlikely(ret != 0)) {
- if (ret != -ERESTARTSYS)
- DRM_ERROR("Out of graphics memory for shader "
- "creation.\n");
- goto out;
- }
-
ushader = kzalloc(sizeof(*ushader), GFP_KERNEL);
if (unlikely(!ushader)) {
- ttm_mem_global_free(vmw_mem_glob(dev_priv),
- vmw_user_shader_size);
ret = -ENOMEM;
goto out;
}
@@ -769,7 +717,7 @@ static int vmw_user_shader_alloc(struct vmw_private *dev_priv,
tmp = vmw_resource_reference(res);
ret = ttm_base_object_init(tfile, &ushader->base, false,
VMW_RES_SHADER,
- &vmw_user_shader_base_release, NULL);
+ &vmw_user_shader_base_release);
if (unlikely(ret != 0)) {
vmw_resource_unreference(&tmp);
@@ -793,31 +741,10 @@ static struct vmw_resource *vmw_shader_alloc(struct vmw_private *dev_priv,
{
struct vmw_shader *shader;
struct vmw_resource *res;
- struct ttm_operation_ctx ctx = {
- .interruptible = true,
- .no_wait_gpu = false
- };
int ret;
- if (unlikely(vmw_shader_size == 0))
- vmw_shader_size =
- ttm_round_pot(sizeof(struct vmw_shader)) +
- VMW_IDA_ACC_SIZE;
-
- ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv),
- vmw_shader_size,
- &ctx);
- if (unlikely(ret != 0)) {
- if (ret != -ERESTARTSYS)
- DRM_ERROR("Out of graphics memory for shader "
- "creation.\n");
- goto out_err;
- }
-
shader = kzalloc(sizeof(*shader), GFP_KERNEL);
if (unlikely(!shader)) {
- ttm_mem_global_free(vmw_mem_glob(dev_priv),
- vmw_shader_size);
ret = -ENOMEM;
goto out_err;
}
@@ -849,8 +776,7 @@ static int vmw_shader_define(struct drm_device *dev, struct drm_file *file_priv,
int ret;
if (buffer_handle != SVGA3D_INVALID_ID) {
- ret = vmw_user_bo_lookup(tfile, buffer_handle,
- &buffer, NULL);
+ ret = vmw_user_bo_lookup(file_priv, buffer_handle, &buffer);
if (unlikely(ret != 0)) {
VMW_DEBUG_USER("Couldn't find buffer for shader creation.\n");
return ret;
@@ -966,13 +892,8 @@ int vmw_compat_shader_add(struct vmw_private *dev_priv,
if (!vmw_shader_id_ok(user_key, shader_type))
return -EINVAL;
- /* Allocate and pin a DMA buffer */
- buf = kzalloc(sizeof(*buf), GFP_KERNEL);
- if (unlikely(!buf))
- return -ENOMEM;
-
- ret = vmw_bo_init(dev_priv, buf, size, &vmw_sys_placement,
- true, true, vmw_bo_bo_free);
+ ret = vmw_bo_create(dev_priv, size, &vmw_sys_placement,
+ true, true, vmw_bo_bo_free, &buf);
if (unlikely(ret != 0))
goto out;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_simple_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_simple_resource.c
index 33b69a70cfe3..483ad544ea54 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_simple_resource.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_simple_resource.c
@@ -32,12 +32,10 @@
* struct vmw_user_simple_resource - User-space simple resource struct
*
* @base: The TTM base object implementing user-space visibility.
- * @account_size: How much memory was accounted for this object.
* @simple: The embedded struct vmw_simple_resource.
*/
struct vmw_user_simple_resource {
struct ttm_base_object base;
- size_t account_size;
struct vmw_simple_resource simple;
/*
* Nothing to be placed after @simple, since size of @simple is
@@ -91,18 +89,15 @@ static int vmw_simple_resource_init(struct vmw_private *dev_priv,
*
* @res: The struct vmw_resource member of the simple resource object.
*
- * Frees memory and memory accounting for the object.
+ * Frees memory for the object.
*/
static void vmw_simple_resource_free(struct vmw_resource *res)
{
struct vmw_user_simple_resource *usimple =
container_of(res, struct vmw_user_simple_resource,
simple.res);
- struct vmw_private *dev_priv = res->dev_priv;
- size_t size = usimple->account_size;
ttm_base_object_kfree(usimple, base);
- ttm_mem_global_free(vmw_mem_glob(dev_priv), size);
}
/**
@@ -149,39 +144,19 @@ vmw_simple_resource_create_ioctl(struct drm_device *dev, void *data,
struct vmw_resource *res;
struct vmw_resource *tmp;
struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
- struct ttm_operation_ctx ctx = {
- .interruptible = true,
- .no_wait_gpu = false
- };
size_t alloc_size;
- size_t account_size;
int ret;
alloc_size = offsetof(struct vmw_user_simple_resource, simple) +
func->size;
- account_size = ttm_round_pot(alloc_size) + VMW_IDA_ACC_SIZE +
- TTM_OBJ_EXTRA_SIZE;
-
- ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv), account_size,
- &ctx);
- if (ret) {
- if (ret != -ERESTARTSYS)
- DRM_ERROR("Out of graphics memory for %s"
- " creation.\n", func->res_func.type_name);
-
- goto out_ret;
- }
usimple = kzalloc(alloc_size, GFP_KERNEL);
if (!usimple) {
- ttm_mem_global_free(vmw_mem_glob(dev_priv),
- account_size);
ret = -ENOMEM;
goto out_ret;
}
usimple->simple.func = func;
- usimple->account_size = account_size;
res = &usimple->simple.res;
usimple->base.shareable = false;
usimple->base.tfile = NULL;
@@ -197,7 +172,7 @@ vmw_simple_resource_create_ioctl(struct drm_device *dev, void *data,
tmp = vmw_resource_reference(res);
ret = ttm_base_object_init(tfile, &usimple->base, false,
func->ttm_res_type,
- &vmw_simple_resource_base_release, NULL);
+ &vmw_simple_resource_base_release);
if (ret) {
vmw_resource_unreference(&tmp);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_so.c b/drivers/gpu/drm/vmwgfx/vmwgfx_so.c
index 9efb4463ce99..4ea32b01efc0 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_so.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_so.c
@@ -279,18 +279,15 @@ static bool vmw_view_id_ok(u32 user_key, enum vmw_view_type view_type)
*
* @res: Pointer to a struct vmw_resource
*
- * Frees memory and memory accounting held by a struct vmw_view.
+ * Frees memory held by the struct vmw_view.
*/
static void vmw_view_res_free(struct vmw_resource *res)
{
struct vmw_view *view = vmw_view(res);
- size_t size = offsetof(struct vmw_view, cmd) + view->cmd_size;
- struct vmw_private *dev_priv = res->dev_priv;
vmw_resource_unreference(&view->cotable);
vmw_resource_unreference(&view->srf);
kfree_rcu(view, rcu);
- ttm_mem_global_free(vmw_mem_glob(dev_priv), size);
}
/**
@@ -327,10 +324,6 @@ int vmw_view_add(struct vmw_cmdbuf_res_manager *man,
struct vmw_private *dev_priv = ctx->dev_priv;
struct vmw_resource *res;
struct vmw_view *view;
- struct ttm_operation_ctx ttm_opt_ctx = {
- .interruptible = true,
- .no_wait_gpu = false
- };
size_t size;
int ret;
@@ -347,16 +340,8 @@ int vmw_view_add(struct vmw_cmdbuf_res_manager *man,
size = offsetof(struct vmw_view, cmd) + cmd_size;
- ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv), size, &ttm_opt_ctx);
- if (ret) {
- if (ret != -ERESTARTSYS)
- DRM_ERROR("Out of graphics memory for view creation\n");
- return ret;
- }
-
view = kmalloc(size, GFP_KERNEL);
if (!view) {
- ttm_mem_global_free(vmw_mem_glob(dev_priv), size);
return -ENOMEM;
}
@@ -582,4 +567,8 @@ static void vmw_so_build_asserts(void)
offsetof(SVGA3dCmdDXDefineRenderTargetView, sid));
BUILD_BUG_ON(offsetof(struct vmw_view_define, sid) !=
offsetof(SVGA3dCmdDXDefineDepthStencilView, sid));
+ BUILD_BUG_ON(offsetof(struct vmw_view_define, sid) !=
+ offsetof(SVGA3dCmdDXDefineUAView, sid));
+ BUILD_BUG_ON(offsetof(struct vmw_view_define, sid) !=
+ offsetof(SVGA3dCmdDXDefineDepthStencilView_v2, sid));
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_so.h b/drivers/gpu/drm/vmwgfx/vmwgfx_so.h
index f48b84bfeeac..01c701e7466e 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_so.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_so.h
@@ -93,7 +93,10 @@ static inline enum vmw_view_type vmw_view_cmd_to_type(u32 id)
id == SVGA_3D_CMD_DX_DESTROY_UA_VIEW)
return vmw_view_ua;
- if (tmp > (u32)vmw_view_max)
+ if (id == SVGA_3D_CMD_DX_DEFINE_DEPTHSTENCIL_VIEW_V2)
+ return vmw_view_ds;
+
+ if (tmp > (u32)vmw_view_ds)
return vmw_view_max;
return (enum vmw_view_type) tmp;
@@ -123,6 +126,7 @@ static inline enum vmw_so_type vmw_so_cmd_to_type(u32 id)
case SVGA_3D_CMD_DX_DESTROY_DEPTHSTENCIL_STATE:
return vmw_so_ds;
case SVGA_3D_CMD_DX_DEFINE_RASTERIZER_STATE:
+ case SVGA_3D_CMD_DX_DEFINE_RASTERIZER_STATE_V2:
case SVGA_3D_CMD_DX_DESTROY_RASTERIZER_STATE:
return vmw_so_rs;
case SVGA_3D_CMD_DX_DEFINE_SAMPLER_STATE:
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
index d85310b2608d..ae9a6044d448 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
@@ -1123,7 +1123,7 @@ vmw_stdu_primary_plane_prepare_fb(struct drm_plane *plane,
}
if (!vps->surf) {
- ret = vmw_gb_surface_define(dev_priv, 0, &metadata,
+ ret = vmw_gb_surface_define(dev_priv, &metadata,
&vps->surf);
if (ret != 0) {
DRM_ERROR("Couldn't allocate STDU surface.\n");
@@ -1872,8 +1872,8 @@ int vmw_kms_stdu_init_display(struct vmw_private *dev_priv)
int i, ret;
- /* Do nothing if Screen Target support is turned off */
- if (!VMWGFX_ENABLE_SCREEN_TARGET_OTABLE || !dev_priv->has_mob)
+ /* Do nothing if there's no support for MOBs */
+ if (!dev_priv->has_mob)
return -ENOSYS;
if (!(dev_priv->capabilities & SVGA_CAP_GBOBJECTS))
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_streamoutput.c b/drivers/gpu/drm/vmwgfx/vmwgfx_streamoutput.c
index c8efa4a6c995..2de97419d5c9 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_streamoutput.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_streamoutput.c
@@ -60,8 +60,6 @@ static int vmw_dx_streamoutput_unbind(struct vmw_resource *res, bool readback,
static void vmw_dx_streamoutput_commit_notify(struct vmw_resource *res,
enum vmw_cmdbuf_res_state state);
-static size_t vmw_streamoutput_size;
-
static const struct vmw_res_func vmw_dx_streamoutput_func = {
.res_type = vmw_res_streamoutput,
.needs_backup = true,
@@ -254,12 +252,10 @@ vmw_dx_streamoutput_lookup(struct vmw_cmdbuf_res_manager *man,
static void vmw_dx_streamoutput_res_free(struct vmw_resource *res)
{
- struct vmw_private *dev_priv = res->dev_priv;
struct vmw_dx_streamoutput *so = vmw_res_to_dx_streamoutput(res);
vmw_resource_unreference(&so->cotable);
kfree(so);
- ttm_mem_global_free(vmw_mem_glob(dev_priv), vmw_streamoutput_size);
}
static void vmw_dx_streamoutput_hw_destroy(struct vmw_resource *res)
@@ -284,27 +280,10 @@ int vmw_dx_streamoutput_add(struct vmw_cmdbuf_res_manager *man,
struct vmw_dx_streamoutput *so;
struct vmw_resource *res;
struct vmw_private *dev_priv = ctx->dev_priv;
- struct ttm_operation_ctx ttm_opt_ctx = {
- .interruptible = true,
- .no_wait_gpu = false
- };
int ret;
- if (!vmw_streamoutput_size)
- vmw_streamoutput_size = ttm_round_pot(sizeof(*so));
-
- ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv),
- vmw_streamoutput_size, &ttm_opt_ctx);
- if (ret) {
- if (ret != -ERESTARTSYS)
- DRM_ERROR("Out of graphics memory for streamout.\n");
- return ret;
- }
-
so = kmalloc(sizeof(*so), GFP_KERNEL);
if (!so) {
- ttm_mem_global_free(vmw_mem_glob(dev_priv),
- vmw_streamoutput_size);
return -ENOMEM;
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
index 5d53a5f9d123..00e8e27e4884 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
@@ -45,16 +45,12 @@
* @prime: The TTM prime object.
* @base: The TTM base object handling user-space visibility.
* @srf: The surface metadata.
- * @size: TTM accounting size for the surface.
* @master: Master of the creating client. Used for security check.
- * @backup_base: The TTM base object of the backup buffer.
*/
struct vmw_user_surface {
struct ttm_prime_object prime;
struct vmw_surface srf;
- uint32_t size;
struct drm_master *master;
- struct ttm_base_object *backup_base;
};
/**
@@ -74,13 +70,11 @@ struct vmw_surface_offset {
/**
* struct vmw_surface_dirty - Surface dirty-tracker
* @cache: Cached layout information of the surface.
- * @size: Accounting size for the struct vmw_surface_dirty.
* @num_subres: Number of subresources.
* @boxes: Array of SVGA3dBoxes indicating dirty regions. One per subresource.
*/
struct vmw_surface_dirty {
struct vmw_surface_cache cache;
- size_t size;
u32 num_subres;
SVGA3dBox boxes[];
};
@@ -129,9 +123,6 @@ static const struct vmw_user_resource_conv user_surface_conv = {
const struct vmw_user_resource_conv *user_surface_converter =
&user_surface_conv;
-
-static uint64_t vmw_user_surface_size;
-
static const struct vmw_res_func vmw_legacy_surface_func = {
.res_type = vmw_res_surface,
.needs_backup = false,
@@ -359,7 +350,7 @@ static void vmw_surface_dma_encode(struct vmw_surface *srf,
* vmw_surface.
*
* Destroys a the device surface associated with a struct vmw_surface if
- * any, and adjusts accounting and resource count accordingly.
+ * any, and adjusts resource count accordingly.
*/
static void vmw_hw_surface_destroy(struct vmw_resource *res)
{
@@ -666,8 +657,6 @@ static void vmw_user_surface_free(struct vmw_resource *res)
struct vmw_surface *srf = vmw_res_to_srf(res);
struct vmw_user_surface *user_srf =
container_of(srf, struct vmw_user_surface, srf);
- struct vmw_private *dev_priv = srf->res.dev_priv;
- uint32_t size = user_srf->size;
WARN_ON_ONCE(res->dirty);
if (user_srf->master)
@@ -676,7 +665,6 @@ static void vmw_user_surface_free(struct vmw_resource *res)
kfree(srf->metadata.sizes);
kfree(srf->snooper.image);
ttm_prime_object_kfree(user_srf, prime);
- ttm_mem_global_free(vmw_mem_glob(dev_priv), size);
}
/**
@@ -696,8 +684,6 @@ static void vmw_user_surface_base_release(struct ttm_base_object **p_base)
struct vmw_resource *res = &user_srf->srf.res;
*p_base = NULL;
- if (user_srf->backup_base)
- ttm_base_object_unref(&user_srf->backup_base);
vmw_resource_unreference(&res);
}
@@ -715,7 +701,7 @@ int vmw_surface_destroy_ioctl(struct drm_device *dev, void *data,
struct drm_vmw_surface_arg *arg = (struct drm_vmw_surface_arg *)data;
struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
- return ttm_ref_object_base_unref(tfile, arg->sid, TTM_REF_USAGE);
+ return ttm_ref_object_base_unref(tfile, arg->sid);
}
/**
@@ -740,23 +726,14 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
struct drm_vmw_surface_create_req *req = &arg->req;
struct drm_vmw_surface_arg *rep = &arg->rep;
struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
- struct ttm_operation_ctx ctx = {
- .interruptible = true,
- .no_wait_gpu = false
- };
int ret;
int i, j;
uint32_t cur_bo_offset;
struct drm_vmw_size *cur_size;
struct vmw_surface_offset *cur_offset;
uint32_t num_sizes;
- uint32_t size;
const SVGA3dSurfaceDesc *desc;
- if (unlikely(vmw_user_surface_size == 0))
- vmw_user_surface_size = ttm_round_pot(sizeof(*user_srf)) +
- VMW_IDA_ACC_SIZE + TTM_OBJ_EXTRA_SIZE;
-
num_sizes = 0;
for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i) {
if (req->mip_levels[i] > DRM_VMW_MAX_MIP_LEVELS)
@@ -768,10 +745,6 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
num_sizes == 0)
return -EINVAL;
- size = vmw_user_surface_size +
- ttm_round_pot(num_sizes * sizeof(struct drm_vmw_size)) +
- ttm_round_pot(num_sizes * sizeof(struct vmw_surface_offset));
-
desc = vmw_surface_get_desc(req->format);
if (unlikely(desc->blockDesc == SVGA3DBLOCKDESC_NONE)) {
VMW_DEBUG_USER("Invalid format %d for surface creation.\n",
@@ -779,18 +752,10 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
return -EINVAL;
}
- ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv),
- size, &ctx);
- if (unlikely(ret != 0)) {
- if (ret != -ERESTARTSYS)
- DRM_ERROR("Out of graphics memory for surface.\n");
- goto out_unlock;
- }
-
user_srf = kzalloc(sizeof(*user_srf), GFP_KERNEL);
if (unlikely(!user_srf)) {
ret = -ENOMEM;
- goto out_no_user_srf;
+ goto out_unlock;
}
srf = &user_srf->srf;
@@ -805,7 +770,6 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
memcpy(metadata->mip_levels, req->mip_levels,
sizeof(metadata->mip_levels));
metadata->num_sizes = num_sizes;
- user_srf->size = size;
metadata->sizes =
memdup_user((struct drm_vmw_size __user *)(unsigned long)
req->size_addr,
@@ -883,22 +847,22 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
if (dev_priv->has_mob && req->shareable) {
uint32_t backup_handle;
- ret = vmw_user_bo_alloc(dev_priv, tfile,
- res->backup_size,
- true,
- &backup_handle,
- &res->backup,
- &user_srf->backup_base);
+ ret = vmw_gem_object_create_with_handle(dev_priv,
+ file_priv,
+ res->backup_size,
+ &backup_handle,
+ &res->backup);
if (unlikely(ret != 0)) {
vmw_resource_unreference(&res);
goto out_unlock;
}
+ vmw_bo_reference(res->backup);
}
tmp = vmw_resource_reference(&srf->res);
ret = ttm_prime_object_init(tfile, res->backup_size, &user_srf->prime,
req->shareable, VMW_RES_SURFACE,
- &vmw_user_surface_base_release, NULL);
+ &vmw_user_surface_base_release);
if (unlikely(ret != 0)) {
vmw_resource_unreference(&tmp);
@@ -916,8 +880,6 @@ out_no_offsets:
kfree(metadata->sizes);
out_no_sizes:
ttm_prime_object_kfree(user_srf, prime);
-out_no_user_srf:
- ttm_mem_global_free(vmw_mem_glob(dev_priv), size);
out_unlock:
return ret;
}
@@ -955,7 +917,6 @@ vmw_surface_handle_reference(struct vmw_private *dev_priv,
VMW_DEBUG_USER("Referenced object is not a surface.\n");
goto out_bad_resource;
}
-
if (handle_type != DRM_VMW_HANDLE_PRIME) {
bool require_exist = false;
@@ -980,8 +941,7 @@ vmw_surface_handle_reference(struct vmw_private *dev_priv,
if (unlikely(drm_is_render_client(file_priv)))
require_exist = true;
- ret = ttm_ref_object_add(tfile, base, TTM_REF_USAGE, NULL,
- require_exist);
+ ret = ttm_ref_object_add(tfile, base, NULL, require_exist);
if (unlikely(ret != 0)) {
DRM_ERROR("Could not add a reference to a surface.\n");
goto out_bad_resource;
@@ -995,7 +955,7 @@ out_bad_resource:
ttm_base_object_unref(&base);
out_no_lookup:
if (handle_type == DRM_VMW_HANDLE_PRIME)
- (void) ttm_ref_object_base_unref(tfile, handle, TTM_REF_USAGE);
+ (void) ttm_ref_object_base_unref(tfile, handle);
return ret;
}
@@ -1045,7 +1005,7 @@ int vmw_surface_reference_ioctl(struct drm_device *dev, void *data,
if (unlikely(ret != 0)) {
VMW_DEBUG_USER("copy_to_user failed %p %u\n", user_sizes,
srf->metadata.num_sizes);
- ttm_ref_object_base_unref(tfile, base->handle, TTM_REF_USAGE);
+ ttm_ref_object_base_unref(tfile, base->handle);
ret = -EFAULT;
}
@@ -1459,7 +1419,6 @@ vmw_gb_surface_define_internal(struct drm_device *dev,
struct vmw_resource *res;
struct vmw_resource *tmp;
int ret = 0;
- uint32_t size;
uint32_t backup_handle = 0;
SVGA3dSurfaceAllFlags svga3d_flags_64 =
SVGA3D_FLAGS_64(req->svga3d_flags_upper_32_bits,
@@ -1506,12 +1465,6 @@ vmw_gb_surface_define_internal(struct drm_device *dev,
return -EINVAL;
}
- if (unlikely(vmw_user_surface_size == 0))
- vmw_user_surface_size = ttm_round_pot(sizeof(*user_srf)) +
- VMW_IDA_ACC_SIZE + TTM_OBJ_EXTRA_SIZE;
-
- size = vmw_user_surface_size;
-
metadata.flags = svga3d_flags_64;
metadata.format = req->base.format;
metadata.mip_levels[0] = req->base.mip_levels;
@@ -1526,7 +1479,7 @@ vmw_gb_surface_define_internal(struct drm_device *dev,
drm_vmw_surface_flag_scanout;
/* Define a surface based on the parameters. */
- ret = vmw_gb_surface_define(dev_priv, size, &metadata, &srf);
+ ret = vmw_gb_surface_define(dev_priv, &metadata, &srf);
if (ret != 0) {
VMW_DEBUG_USER("Failed to define surface.\n");
return ret;
@@ -1539,9 +1492,8 @@ vmw_gb_surface_define_internal(struct drm_device *dev,
res = &user_srf->srf.res;
if (req->base.buffer_handle != SVGA3D_INVALID_ID) {
- ret = vmw_user_bo_lookup(tfile, req->base.buffer_handle,
- &res->backup,
- &user_srf->backup_base);
+ ret = vmw_user_bo_lookup(file_priv, req->base.buffer_handle,
+ &res->backup);
if (ret == 0) {
if (res->backup->base.base.size < res->backup_size) {
VMW_DEBUG_USER("Surface backup buffer too small.\n");
@@ -1554,14 +1506,15 @@ vmw_gb_surface_define_internal(struct drm_device *dev,
}
} else if (req->base.drm_surface_flags &
(drm_vmw_surface_flag_create_buffer |
- drm_vmw_surface_flag_coherent))
- ret = vmw_user_bo_alloc(dev_priv, tfile,
- res->backup_size,
- req->base.drm_surface_flags &
- drm_vmw_surface_flag_shareable,
- &backup_handle,
- &res->backup,
- &user_srf->backup_base);
+ drm_vmw_surface_flag_coherent)) {
+ ret = vmw_gem_object_create_with_handle(dev_priv, file_priv,
+ res->backup_size,
+ &backup_handle,
+ &res->backup);
+ if (ret == 0)
+ vmw_bo_reference(res->backup);
+
+ }
if (unlikely(ret != 0)) {
vmw_resource_unreference(&res);
@@ -1593,7 +1546,7 @@ vmw_gb_surface_define_internal(struct drm_device *dev,
req->base.drm_surface_flags &
drm_vmw_surface_flag_shareable,
VMW_RES_SURFACE,
- &vmw_user_surface_base_release, NULL);
+ &vmw_user_surface_base_release);
if (unlikely(ret != 0)) {
vmw_resource_unreference(&tmp);
@@ -1613,7 +1566,6 @@ vmw_gb_surface_define_internal(struct drm_device *dev,
rep->buffer_size = 0;
rep->buffer_handle = SVGA3D_INVALID_ID;
}
-
vmw_resource_unreference(&res);
out_unlock:
@@ -1636,12 +1588,11 @@ vmw_gb_surface_reference_internal(struct drm_device *dev,
struct drm_file *file_priv)
{
struct vmw_private *dev_priv = vmw_priv(dev);
- struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
struct vmw_surface *srf;
struct vmw_user_surface *user_srf;
struct vmw_surface_metadata *metadata;
struct ttm_base_object *base;
- uint32_t backup_handle;
+ u32 backup_handle;
int ret;
ret = vmw_surface_handle_reference(dev_priv, file_priv, req->sid,
@@ -1658,14 +1609,12 @@ vmw_gb_surface_reference_internal(struct drm_device *dev,
metadata = &srf->metadata;
mutex_lock(&dev_priv->cmdbuf_mutex); /* Protect res->backup */
- ret = vmw_user_bo_reference(tfile, srf->res.backup, &backup_handle);
+ ret = drm_gem_handle_create(file_priv, &srf->res.backup->base.base,
+ &backup_handle);
mutex_unlock(&dev_priv->cmdbuf_mutex);
-
- if (unlikely(ret != 0)) {
- DRM_ERROR("Could not add a reference to a GB surface "
- "backup buffer.\n");
- (void) ttm_ref_object_base_unref(tfile, base->handle,
- TTM_REF_USAGE);
+ if (ret != 0) {
+ drm_err(dev, "Wasn't able to create a backing handle for surface sid = %u.\n",
+ req->sid);
goto out_bad_resource;
}
@@ -1955,11 +1904,7 @@ static int vmw_surface_dirty_alloc(struct vmw_resource *res)
u32 num_mip;
u32 num_subres;
u32 num_samples;
- size_t dirty_size, acc_size;
- static struct ttm_operation_ctx ctx = {
- .interruptible = false,
- .no_wait_gpu = false
- };
+ size_t dirty_size;
int ret;
if (metadata->array_size)
@@ -1973,14 +1918,6 @@ static int vmw_surface_dirty_alloc(struct vmw_resource *res)
num_subres = num_layers * num_mip;
dirty_size = struct_size(dirty, boxes, num_subres);
- acc_size = ttm_round_pot(dirty_size);
- ret = ttm_mem_global_alloc(vmw_mem_glob(res->dev_priv),
- acc_size, &ctx);
- if (ret) {
- VMW_DEBUG_USER("Out of graphics memory for surface "
- "dirty tracker.\n");
- return ret;
- }
dirty = kvzalloc(dirty_size, GFP_KERNEL);
if (!dirty) {
@@ -1990,13 +1927,12 @@ static int vmw_surface_dirty_alloc(struct vmw_resource *res)
num_samples = max_t(u32, 1, metadata->multisample_count);
ret = vmw_surface_setup_cache(&metadata->base_size, metadata->format,
- num_mip, num_layers, num_samples,
- &dirty->cache);
+ num_mip, num_layers, num_samples,
+ &dirty->cache);
if (ret)
goto out_no_cache;
dirty->num_subres = num_subres;
- dirty->size = acc_size;
res->dirty = (struct vmw_resource_dirty *) dirty;
return 0;
@@ -2004,7 +1940,6 @@ static int vmw_surface_dirty_alloc(struct vmw_resource *res)
out_no_cache:
kvfree(dirty);
out_no_dirty:
- ttm_mem_global_free(vmw_mem_glob(res->dev_priv), acc_size);
return ret;
}
@@ -2015,10 +1950,8 @@ static void vmw_surface_dirty_free(struct vmw_resource *res)
{
struct vmw_surface_dirty *dirty =
(struct vmw_surface_dirty *) res->dirty;
- size_t acc_size = dirty->size;
kvfree(dirty);
- ttm_mem_global_free(vmw_mem_glob(res->dev_priv), acc_size);
res->dirty = NULL;
}
@@ -2051,8 +1984,6 @@ static int vmw_surface_clean(struct vmw_resource *res)
* vmw_gb_surface_define - Define a private GB surface
*
* @dev_priv: Pointer to a device private.
- * @user_accounting_size: Used to track user-space memory usage, set
- * to 0 for kernel mode only memory
* @metadata: Metadata representing the surface to create.
* @user_srf_out: allocated user_srf. Set to NULL on failure.
*
@@ -2062,17 +1993,12 @@ static int vmw_surface_clean(struct vmw_resource *res)
* it available to user mode drivers.
*/
int vmw_gb_surface_define(struct vmw_private *dev_priv,
- uint32_t user_accounting_size,
const struct vmw_surface_metadata *req,
struct vmw_surface **srf_out)
{
struct vmw_surface_metadata *metadata;
struct vmw_user_surface *user_srf;
struct vmw_surface *srf;
- struct ttm_operation_ctx ctx = {
- .interruptible = true,
- .no_wait_gpu = false
- };
u32 sample_count = 1;
u32 num_layers = 1;
int ret;
@@ -2113,22 +2039,13 @@ int vmw_gb_surface_define(struct vmw_private *dev_priv,
if (req->sizes != NULL)
return -EINVAL;
- ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv),
- user_accounting_size, &ctx);
- if (ret != 0) {
- if (ret != -ERESTARTSYS)
- DRM_ERROR("Out of graphics memory for surface.\n");
- goto out_unlock;
- }
-
user_srf = kzalloc(sizeof(*user_srf), GFP_KERNEL);
if (unlikely(!user_srf)) {
ret = -ENOMEM;
- goto out_no_user_srf;
+ goto out_unlock;
}
*srf_out = &user_srf->srf;
- user_srf->size = user_accounting_size;
user_srf->prime.base.shareable = false;
user_srf->prime.base.tfile = NULL;
@@ -2179,9 +2096,6 @@ int vmw_gb_surface_define(struct vmw_private *dev_priv,
return ret;
-out_no_user_srf:
- ttm_mem_global_free(vmw_mem_glob(dev_priv), user_accounting_size);
-
out_unlock:
return ret;
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_system_manager.c b/drivers/gpu/drm/vmwgfx/vmwgfx_system_manager.c
new file mode 100644
index 000000000000..b0005b03a617
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_system_manager.c
@@ -0,0 +1,90 @@
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
+/*
+ * Copyright 2021 VMware, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person
+ * obtaining a copy of this software and associated documentation
+ * files (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use, copy,
+ * modify, merge, publish, distribute, sublicense, and/or sell copies
+ * of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#include "vmwgfx_drv.h"
+
+#include <drm/ttm/ttm_bo_driver.h>
+#include <drm/ttm/ttm_device.h>
+#include <drm/ttm/ttm_placement.h>
+#include <drm/ttm/ttm_resource.h>
+#include <linux/slab.h>
+
+
+static int vmw_sys_man_alloc(struct ttm_resource_manager *man,
+ struct ttm_buffer_object *bo,
+ const struct ttm_place *place,
+ struct ttm_resource **res)
+{
+ *res = kzalloc(sizeof(**res), GFP_KERNEL);
+ if (!*res)
+ return -ENOMEM;
+
+ ttm_resource_init(bo, place, *res);
+ return 0;
+}
+
+static void vmw_sys_man_free(struct ttm_resource_manager *man,
+ struct ttm_resource *res)
+{
+ kfree(res);
+}
+
+static const struct ttm_resource_manager_func vmw_sys_manager_func = {
+ .alloc = vmw_sys_man_alloc,
+ .free = vmw_sys_man_free,
+};
+
+int vmw_sys_man_init(struct vmw_private *dev_priv)
+{
+ struct ttm_device *bdev = &dev_priv->bdev;
+ struct ttm_resource_manager *man =
+ kzalloc(sizeof(*man), GFP_KERNEL);
+
+ if (!man)
+ return -ENOMEM;
+
+ man->use_tt = true;
+ man->func = &vmw_sys_manager_func;
+
+ ttm_resource_manager_init(man, 0);
+ ttm_set_driver_manager(bdev, VMW_PL_SYSTEM, man);
+ ttm_resource_manager_set_used(man, true);
+ return 0;
+}
+
+void vmw_sys_man_fini(struct vmw_private *dev_priv)
+{
+ struct ttm_resource_manager *man = ttm_manager_type(&dev_priv->bdev,
+ VMW_PL_SYSTEM);
+
+ ttm_resource_manager_evict_all(&dev_priv->bdev, man);
+
+ ttm_resource_manager_set_used(man, false);
+ ttm_resource_manager_cleanup(man);
+
+ ttm_set_driver_manager(&dev_priv->bdev, VMW_PL_SYSTEM, NULL);
+ kfree(man);
+}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_thp.c b/drivers/gpu/drm/vmwgfx/vmwgfx_thp.c
deleted file mode 100644
index 2a3d3468e4e0..000000000000
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_thp.c
+++ /dev/null
@@ -1,184 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0 OR MIT
-/*
- * Huge page-table-entry support for IO memory.
- *
- * Copyright (C) 2007-2019 Vmware, Inc. All rights reservedd.
- */
-#include "vmwgfx_drv.h"
-#include <drm/ttm/ttm_bo_driver.h>
-#include <drm/ttm/ttm_placement.h>
-#include <drm/ttm/ttm_range_manager.h>
-
-/**
- * struct vmw_thp_manager - Range manager implementing huge page alignment
- *
- * @manager: TTM resource manager.
- * @mm: The underlying range manager. Protected by @lock.
- * @lock: Manager lock.
- */
-struct vmw_thp_manager {
- struct ttm_resource_manager manager;
- struct drm_mm mm;
- spinlock_t lock;
-};
-
-static struct vmw_thp_manager *to_thp_manager(struct ttm_resource_manager *man)
-{
- return container_of(man, struct vmw_thp_manager, manager);
-}
-
-static const struct ttm_resource_manager_func vmw_thp_func;
-
-static int vmw_thp_insert_aligned(struct ttm_buffer_object *bo,
- struct drm_mm *mm, struct drm_mm_node *node,
- unsigned long align_pages,
- const struct ttm_place *place,
- struct ttm_resource *mem,
- unsigned long lpfn,
- enum drm_mm_insert_mode mode)
-{
- if (align_pages >= bo->page_alignment &&
- (!bo->page_alignment || align_pages % bo->page_alignment == 0)) {
- return drm_mm_insert_node_in_range(mm, node,
- mem->num_pages,
- align_pages, 0,
- place->fpfn, lpfn, mode);
- }
-
- return -ENOSPC;
-}
-
-static int vmw_thp_get_node(struct ttm_resource_manager *man,
- struct ttm_buffer_object *bo,
- const struct ttm_place *place,
- struct ttm_resource **res)
-{
- struct vmw_thp_manager *rman = to_thp_manager(man);
- struct drm_mm *mm = &rman->mm;
- struct ttm_range_mgr_node *node;
- unsigned long align_pages;
- unsigned long lpfn;
- enum drm_mm_insert_mode mode = DRM_MM_INSERT_BEST;
- int ret;
-
- node = kzalloc(struct_size(node, mm_nodes, 1), GFP_KERNEL);
- if (!node)
- return -ENOMEM;
-
- ttm_resource_init(bo, place, &node->base);
-
- lpfn = place->lpfn;
- if (!lpfn)
- lpfn = man->size;
-
- mode = DRM_MM_INSERT_BEST;
- if (place->flags & TTM_PL_FLAG_TOPDOWN)
- mode = DRM_MM_INSERT_HIGH;
-
- spin_lock(&rman->lock);
- if (IS_ENABLED(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD)) {
- align_pages = (HPAGE_PUD_SIZE >> PAGE_SHIFT);
- if (node->base.num_pages >= align_pages) {
- ret = vmw_thp_insert_aligned(bo, mm, &node->mm_nodes[0],
- align_pages, place,
- &node->base, lpfn, mode);
- if (!ret)
- goto found_unlock;
- }
- }
-
- align_pages = (HPAGE_PMD_SIZE >> PAGE_SHIFT);
- if (node->base.num_pages >= align_pages) {
- ret = vmw_thp_insert_aligned(bo, mm, &node->mm_nodes[0],
- align_pages, place, &node->base,
- lpfn, mode);
- if (!ret)
- goto found_unlock;
- }
-
- ret = drm_mm_insert_node_in_range(mm, &node->mm_nodes[0],
- node->base.num_pages,
- bo->page_alignment, 0,
- place->fpfn, lpfn, mode);
-found_unlock:
- spin_unlock(&rman->lock);
-
- if (unlikely(ret)) {
- kfree(node);
- } else {
- node->base.start = node->mm_nodes[0].start;
- *res = &node->base;
- }
-
- return ret;
-}
-
-static void vmw_thp_put_node(struct ttm_resource_manager *man,
- struct ttm_resource *res)
-{
- struct ttm_range_mgr_node *node = to_ttm_range_mgr_node(res);
- struct vmw_thp_manager *rman = to_thp_manager(man);
-
- spin_lock(&rman->lock);
- drm_mm_remove_node(&node->mm_nodes[0]);
- spin_unlock(&rman->lock);
-
- kfree(node);
-}
-
-int vmw_thp_init(struct vmw_private *dev_priv)
-{
- struct vmw_thp_manager *rman;
-
- rman = kzalloc(sizeof(*rman), GFP_KERNEL);
- if (!rman)
- return -ENOMEM;
-
- ttm_resource_manager_init(&rman->manager,
- dev_priv->vram_size >> PAGE_SHIFT);
-
- rman->manager.func = &vmw_thp_func;
- drm_mm_init(&rman->mm, 0, rman->manager.size);
- spin_lock_init(&rman->lock);
-
- ttm_set_driver_manager(&dev_priv->bdev, TTM_PL_VRAM, &rman->manager);
- ttm_resource_manager_set_used(&rman->manager, true);
- return 0;
-}
-
-void vmw_thp_fini(struct vmw_private *dev_priv)
-{
- struct ttm_resource_manager *man = ttm_manager_type(&dev_priv->bdev, TTM_PL_VRAM);
- struct vmw_thp_manager *rman = to_thp_manager(man);
- struct drm_mm *mm = &rman->mm;
- int ret;
-
- ttm_resource_manager_set_used(man, false);
-
- ret = ttm_resource_manager_evict_all(&dev_priv->bdev, man);
- if (ret)
- return;
- spin_lock(&rman->lock);
- drm_mm_clean(mm);
- drm_mm_takedown(mm);
- spin_unlock(&rman->lock);
- ttm_resource_manager_cleanup(man);
- ttm_set_driver_manager(&dev_priv->bdev, TTM_PL_VRAM, NULL);
- kfree(rman);
-}
-
-static void vmw_thp_debug(struct ttm_resource_manager *man,
- struct drm_printer *printer)
-{
- struct vmw_thp_manager *rman = to_thp_manager(man);
-
- spin_lock(&rman->lock);
- drm_mm_print(&rman->mm, printer);
- spin_unlock(&rman->lock);
-}
-
-static const struct ttm_resource_manager_func vmw_thp_func = {
- .alloc = vmw_thp_get_node,
- .free = vmw_thp_put_node,
- .debug = vmw_thp_debug
-};
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c
index e899a936a42a..b84ecc6d6611 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c
@@ -92,6 +92,13 @@ static const struct ttm_place gmr_vram_placement_flags[] = {
}
};
+static const struct ttm_place vmw_sys_placement_flags = {
+ .fpfn = 0,
+ .lpfn = 0,
+ .mem_type = VMW_PL_SYSTEM,
+ .flags = 0
+};
+
struct ttm_placement vmw_vram_gmr_placement = {
.num_placement = 2,
.placement = vram_gmr_placement_flags,
@@ -113,28 +120,11 @@ struct ttm_placement vmw_sys_placement = {
.busy_placement = &sys_placement_flags
};
-static const struct ttm_place evictable_placement_flags[] = {
- {
- .fpfn = 0,
- .lpfn = 0,
- .mem_type = TTM_PL_SYSTEM,
- .flags = 0
- }, {
- .fpfn = 0,
- .lpfn = 0,
- .mem_type = TTM_PL_VRAM,
- .flags = 0
- }, {
- .fpfn = 0,
- .lpfn = 0,
- .mem_type = VMW_PL_GMR,
- .flags = 0
- }, {
- .fpfn = 0,
- .lpfn = 0,
- .mem_type = VMW_PL_MOB,
- .flags = 0
- }
+struct ttm_placement vmw_pt_sys_placement = {
+ .num_placement = 1,
+ .placement = &vmw_sys_placement_flags,
+ .num_busy_placement = 1,
+ .busy_placement = &vmw_sys_placement_flags
};
static const struct ttm_place nonfixed_placement_flags[] = {
@@ -156,13 +146,6 @@ static const struct ttm_place nonfixed_placement_flags[] = {
}
};
-struct ttm_placement vmw_evictable_placement = {
- .num_placement = 4,
- .placement = evictable_placement_flags,
- .num_busy_placement = 1,
- .busy_placement = &sys_placement_flags
-};
-
struct ttm_placement vmw_srf_placement = {
.num_placement = 1,
.num_busy_placement = 2,
@@ -184,19 +167,6 @@ struct ttm_placement vmw_nonfixed_placement = {
.busy_placement = &sys_placement_flags
};
-struct vmw_ttm_tt {
- struct ttm_tt dma_ttm;
- struct vmw_private *dev_priv;
- int gmr_id;
- struct vmw_mob *mob;
- int mem_type;
- struct sg_table sgt;
- struct vmw_sg_table vsgt;
- uint64_t sg_alloc_size;
- bool mapped;
- bool bound;
-};
-
const size_t vmw_tt_size = sizeof(struct vmw_ttm_tt);
/**
@@ -317,17 +287,8 @@ static int vmw_ttm_map_for_dma(struct vmw_ttm_tt *vmw_tt)
static int vmw_ttm_map_dma(struct vmw_ttm_tt *vmw_tt)
{
struct vmw_private *dev_priv = vmw_tt->dev_priv;
- struct ttm_mem_global *glob = vmw_mem_glob(dev_priv);
struct vmw_sg_table *vsgt = &vmw_tt->vsgt;
- struct ttm_operation_ctx ctx = {
- .interruptible = true,
- .no_wait_gpu = false
- };
- struct vmw_piter iter;
- dma_addr_t old;
int ret = 0;
- static size_t sgl_size;
- static size_t sgt_size;
if (vmw_tt->mapped)
return 0;
@@ -336,20 +297,12 @@ static int vmw_ttm_map_dma(struct vmw_ttm_tt *vmw_tt)
vsgt->pages = vmw_tt->dma_ttm.pages;
vsgt->num_pages = vmw_tt->dma_ttm.num_pages;
vsgt->addrs = vmw_tt->dma_ttm.dma_address;
- vsgt->sgt = &vmw_tt->sgt;
+ vsgt->sgt = NULL;
switch (dev_priv->map_mode) {
case vmw_dma_map_bind:
case vmw_dma_map_populate:
- if (unlikely(!sgl_size)) {
- sgl_size = ttm_round_pot(sizeof(struct scatterlist));
- sgt_size = ttm_round_pot(sizeof(struct sg_table));
- }
- vmw_tt->sg_alloc_size = sgt_size + sgl_size * vsgt->num_pages;
- ret = ttm_mem_global_alloc(glob, vmw_tt->sg_alloc_size, &ctx);
- if (unlikely(ret != 0))
- return ret;
-
+ vsgt->sgt = &vmw_tt->sgt;
ret = sg_alloc_table_from_pages_segment(
&vmw_tt->sgt, vsgt->pages, vsgt->num_pages, 0,
(unsigned long)vsgt->num_pages << PAGE_SHIFT,
@@ -357,15 +310,6 @@ static int vmw_ttm_map_dma(struct vmw_ttm_tt *vmw_tt)
if (ret)
goto out_sg_alloc_fail;
- if (vsgt->num_pages > vmw_tt->sgt.orig_nents) {
- uint64_t over_alloc =
- sgl_size * (vsgt->num_pages -
- vmw_tt->sgt.orig_nents);
-
- ttm_mem_global_free(glob, over_alloc);
- vmw_tt->sg_alloc_size -= over_alloc;
- }
-
ret = vmw_ttm_map_for_dma(vmw_tt);
if (unlikely(ret != 0))
goto out_map_fail;
@@ -375,16 +319,6 @@ static int vmw_ttm_map_dma(struct vmw_ttm_tt *vmw_tt)
break;
}
- old = ~((dma_addr_t) 0);
- vmw_tt->vsgt.num_regions = 0;
- for (vmw_piter_start(&iter, vsgt, 0); vmw_piter_next(&iter);) {
- dma_addr_t cur = vmw_piter_dma_addr(&iter);
-
- if (cur != old + PAGE_SIZE)
- vmw_tt->vsgt.num_regions++;
- old = cur;
- }
-
vmw_tt->mapped = true;
return 0;
@@ -392,7 +326,6 @@ out_map_fail:
sg_free_table(vmw_tt->vsgt.sgt);
vmw_tt->vsgt.sgt = NULL;
out_sg_alloc_fail:
- ttm_mem_global_free(glob, vmw_tt->sg_alloc_size);
return ret;
}
@@ -418,8 +351,6 @@ static void vmw_ttm_unmap_dma(struct vmw_ttm_tt *vmw_tt)
vmw_ttm_unmap_from_dma(vmw_tt);
sg_free_table(vmw_tt->vsgt.sgt);
vmw_tt->vsgt.sgt = NULL;
- ttm_mem_global_free(vmw_mem_glob(dev_priv),
- vmw_tt->sg_alloc_size);
break;
default:
break;
@@ -484,6 +415,9 @@ static int vmw_ttm_bind(struct ttm_device *bdev,
&vmw_be->vsgt, ttm->num_pages,
vmw_be->gmr_id);
break;
+ case VMW_PL_SYSTEM:
+ /* Nothing to be done for a system bind */
+ break;
default:
BUG();
}
@@ -507,6 +441,8 @@ static void vmw_ttm_unbind(struct ttm_device *bdev,
case VMW_PL_MOB:
vmw_mob_unbind(vmw_be->dev_priv, vmw_be->mob);
break;
+ case VMW_PL_SYSTEM:
+ break;
default:
BUG();
}
@@ -534,7 +470,6 @@ static void vmw_ttm_destroy(struct ttm_device *bdev, struct ttm_tt *ttm)
static int vmw_ttm_populate(struct ttm_device *bdev,
struct ttm_tt *ttm, struct ttm_operation_ctx *ctx)
{
- unsigned int i;
int ret;
/* TODO: maybe completely drop this ? */
@@ -542,22 +477,7 @@ static int vmw_ttm_populate(struct ttm_device *bdev,
return 0;
ret = ttm_pool_alloc(&bdev->pool, ttm, ctx);
- if (ret)
- return ret;
-
- for (i = 0; i < ttm->num_pages; ++i) {
- ret = ttm_mem_global_alloc_page(&ttm_mem_glob, ttm->pages[i],
- PAGE_SIZE, ctx);
- if (ret)
- goto error;
- }
- return 0;
-error:
- while (i--)
- ttm_mem_global_free_page(&ttm_mem_glob, ttm->pages[i],
- PAGE_SIZE);
- ttm_pool_free(&bdev->pool, ttm);
return ret;
}
@@ -566,7 +486,6 @@ static void vmw_ttm_unpopulate(struct ttm_device *bdev,
{
struct vmw_ttm_tt *vmw_tt = container_of(ttm, struct vmw_ttm_tt,
dma_ttm);
- unsigned int i;
vmw_ttm_unbind(bdev, ttm);
@@ -577,10 +496,6 @@ static void vmw_ttm_unpopulate(struct ttm_device *bdev,
vmw_ttm_unmap_dma(vmw_tt);
- for (i = 0; i < ttm->num_pages; ++i)
- ttm_mem_global_free_page(&ttm_mem_glob, ttm->pages[i],
- PAGE_SIZE);
-
ttm_pool_free(&bdev->pool, ttm);
}
@@ -624,6 +539,7 @@ static int vmw_ttm_io_mem_reserve(struct ttm_device *bdev, struct ttm_resource *
switch (mem->mem_type) {
case TTM_PL_SYSTEM:
+ case VMW_PL_SYSTEM:
case VMW_PL_GMR:
case VMW_PL_MOB:
return 0;
@@ -670,6 +586,11 @@ static void vmw_swap_notify(struct ttm_buffer_object *bo)
(void) ttm_bo_wait(bo, false, false);
}
+static bool vmw_memtype_is_system(uint32_t mem_type)
+{
+ return mem_type == TTM_PL_SYSTEM || mem_type == VMW_PL_SYSTEM;
+}
+
static int vmw_move(struct ttm_buffer_object *bo,
bool evict,
struct ttm_operation_ctx *ctx,
@@ -680,7 +601,7 @@ static int vmw_move(struct ttm_buffer_object *bo,
struct ttm_resource_manager *new_man = ttm_manager_type(bo->bdev, new_mem->mem_type);
int ret;
- if (new_man->use_tt && new_mem->mem_type != TTM_PL_SYSTEM) {
+ if (new_man->use_tt && !vmw_memtype_is_system(new_mem->mem_type)) {
ret = vmw_ttm_bind(bo->bdev, bo->ttm, new_mem);
if (ret)
return ret;
@@ -689,7 +610,7 @@ static int vmw_move(struct ttm_buffer_object *bo,
vmw_move_notify(bo, bo->resource, new_mem);
if (old_man->use_tt && new_man->use_tt) {
- if (bo->resource->mem_type == TTM_PL_SYSTEM) {
+ if (vmw_memtype_is_system(bo->resource->mem_type)) {
ttm_bo_move_null(bo, new_mem);
return 0;
}
@@ -736,7 +657,7 @@ int vmw_bo_create_and_populate(struct vmw_private *dev_priv,
int ret;
ret = vmw_bo_create_kernel(dev_priv, bo_size,
- &vmw_sys_placement,
+ &vmw_pt_sys_placement,
&bo);
if (unlikely(ret != 0))
return ret;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c
index 0a4c340252ec..265f7c48d856 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c
@@ -27,30 +27,44 @@
#include "vmwgfx_drv.h"
-static struct ttm_buffer_object *vmw_bo_vm_lookup(struct ttm_device *bdev,
- unsigned long offset,
- unsigned long pages)
+static int vmw_bo_vm_lookup(struct ttm_device *bdev,
+ struct drm_file *filp,
+ unsigned long offset,
+ unsigned long pages,
+ struct ttm_buffer_object **p_bo)
{
struct vmw_private *dev_priv = container_of(bdev, struct vmw_private, bdev);
struct drm_device *drm = &dev_priv->drm;
struct drm_vma_offset_node *node;
- struct ttm_buffer_object *bo = NULL;
+ int ret;
+
+ *p_bo = NULL;
drm_vma_offset_lock_lookup(bdev->vma_manager);
node = drm_vma_offset_lookup_locked(bdev->vma_manager, offset, pages);
if (likely(node)) {
- bo = container_of(node, struct ttm_buffer_object,
+ *p_bo = container_of(node, struct ttm_buffer_object,
base.vma_node);
- bo = ttm_bo_get_unless_zero(bo);
+ *p_bo = ttm_bo_get_unless_zero(*p_bo);
}
drm_vma_offset_unlock_lookup(bdev->vma_manager);
- if (!bo)
+ if (!*p_bo) {
drm_err(drm, "Could not find buffer object to map\n");
+ return -EINVAL;
+ }
+
+ if (!drm_vma_node_is_allowed(node, filp)) {
+ ret = -EACCES;
+ goto out_no_access;
+ }
- return bo;
+ return 0;
+out_no_access:
+ ttm_bo_put(*p_bo);
+ return ret;
}
int vmw_mmap(struct file *filp, struct vm_area_struct *vma)
@@ -64,7 +78,6 @@ int vmw_mmap(struct file *filp, struct vm_area_struct *vma)
};
struct drm_file *file_priv = filp->private_data;
struct vmw_private *dev_priv = vmw_priv(file_priv->minor->dev);
- struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
struct ttm_device *bdev = &dev_priv->bdev;
struct ttm_buffer_object *bo;
int ret;
@@ -72,13 +85,9 @@ int vmw_mmap(struct file *filp, struct vm_area_struct *vma)
if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET_START))
return -EINVAL;
- bo = vmw_bo_vm_lookup(bdev, vma->vm_pgoff, vma_pages(vma));
- if (unlikely(!bo))
- return -EINVAL;
-
- ret = vmw_user_bo_verify_access(bo, tfile);
+ ret = vmw_bo_vm_lookup(bdev, file_priv, vma->vm_pgoff, vma_pages(vma), &bo);
if (unlikely(ret != 0))
- goto out_unref;
+ return ret;
ret = ttm_bo_mmap_obj(vma, bo);
if (unlikely(ret != 0))
@@ -99,38 +108,3 @@ out_unref:
return ret;
}
-/* struct vmw_validation_mem callback */
-static int vmw_vmt_reserve(struct vmw_validation_mem *m, size_t size)
-{
- static struct ttm_operation_ctx ctx = {.interruptible = false,
- .no_wait_gpu = false};
- struct vmw_private *dev_priv = container_of(m, struct vmw_private, vvm);
-
- return ttm_mem_global_alloc(vmw_mem_glob(dev_priv), size, &ctx);
-}
-
-/* struct vmw_validation_mem callback */
-static void vmw_vmt_unreserve(struct vmw_validation_mem *m, size_t size)
-{
- struct vmw_private *dev_priv = container_of(m, struct vmw_private, vvm);
-
- return ttm_mem_global_free(vmw_mem_glob(dev_priv), size);
-}
-
-/**
- * vmw_validation_mem_init_ttm - Interface the validation memory tracker
- * to ttm.
- * @dev_priv: Pointer to struct vmw_private. The reason we choose a vmw private
- * rather than a struct vmw_validation_mem is to make sure assumption in the
- * callbacks that struct vmw_private derives from struct vmw_validation_mem
- * holds true.
- * @gran: The recommended allocation granularity
- */
-void vmw_validation_mem_init_ttm(struct vmw_private *dev_priv, size_t gran)
-{
- struct vmw_validation_mem *vvm = &dev_priv->vvm;
-
- vvm->reserve_mem = vmw_vmt_reserve;
- vvm->unreserve_mem = vmw_vmt_unreserve;
- vvm->gran = gran;
-}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_va.c b/drivers/gpu/drm/vmwgfx/vmwgfx_va.c
index ebc1d83c34b4..6ad744ae07f5 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_va.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_va.c
@@ -117,7 +117,7 @@ int vmw_stream_unref_ioctl(struct drm_device *dev, void *data,
struct drm_vmw_stream_arg *arg = (struct drm_vmw_stream_arg *)data;
return ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile,
- arg->stream_id, TTM_REF_USAGE);
+ arg->stream_id);
}
/**
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_validation.c b/drivers/gpu/drm/vmwgfx/vmwgfx_validation.c
index b09094b50c5d..f46891012be3 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_validation.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_validation.c
@@ -29,6 +29,9 @@
#include "vmwgfx_validation.h"
#include "vmwgfx_drv.h"
+
+#define VMWGFX_VALIDATION_MEM_GRAN (16*PAGE_SIZE)
+
/**
* struct vmw_validation_bo_node - Buffer object validation metadata.
* @base: Metadata used for TTM reservation- and validation.
@@ -43,7 +46,7 @@
*/
struct vmw_validation_bo_node {
struct ttm_validate_buffer base;
- struct drm_hash_item hash;
+ struct vmwgfx_hash_item hash;
unsigned int coherent_count;
u32 as_mob : 1;
u32 cpu_blit : 1;
@@ -72,7 +75,7 @@ struct vmw_validation_bo_node {
*/
struct vmw_validation_res_node {
struct list_head head;
- struct drm_hash_item hash;
+ struct vmwgfx_hash_item hash;
struct vmw_resource *res;
struct vmw_buffer_object *new_backup;
unsigned long new_backup_offset;
@@ -113,13 +116,8 @@ void *vmw_validation_mem_alloc(struct vmw_validation_context *ctx,
struct page *page;
if (ctx->vm && ctx->vm_size_left < PAGE_SIZE) {
- int ret = ctx->vm->reserve_mem(ctx->vm, ctx->vm->gran);
-
- if (ret)
- return NULL;
-
- ctx->vm_size_left += ctx->vm->gran;
- ctx->total_mem += ctx->vm->gran;
+ ctx->vm_size_left += VMWGFX_VALIDATION_MEM_GRAN;
+ ctx->total_mem += VMWGFX_VALIDATION_MEM_GRAN;
}
page = alloc_page(GFP_KERNEL | __GFP_ZERO);
@@ -159,7 +157,6 @@ static void vmw_validation_mem_free(struct vmw_validation_context *ctx)
ctx->mem_size_left = 0;
if (ctx->vm && ctx->total_mem) {
- ctx->vm->unreserve_mem(ctx->vm, ctx->total_mem);
ctx->total_mem = 0;
ctx->vm_size_left = 0;
}
@@ -184,9 +181,9 @@ vmw_validation_find_bo_dup(struct vmw_validation_context *ctx,
return NULL;
if (ctx->ht) {
- struct drm_hash_item *hash;
+ struct vmwgfx_hash_item *hash;
- if (!drm_ht_find_item(ctx->ht, (unsigned long) vbo, &hash))
+ if (!vmwgfx_ht_find_item(ctx->ht, (unsigned long) vbo, &hash))
bo_node = container_of(hash, typeof(*bo_node), hash);
} else {
struct vmw_validation_bo_node *entry;
@@ -221,9 +218,9 @@ vmw_validation_find_res_dup(struct vmw_validation_context *ctx,
return NULL;
if (ctx->ht) {
- struct drm_hash_item *hash;
+ struct vmwgfx_hash_item *hash;
- if (!drm_ht_find_item(ctx->ht, (unsigned long) res, &hash))
+ if (!vmwgfx_ht_find_item(ctx->ht, (unsigned long) res, &hash))
res_node = container_of(hash, typeof(*res_node), hash);
} else {
struct vmw_validation_res_node *entry;
@@ -280,7 +277,7 @@ int vmw_validation_add_bo(struct vmw_validation_context *ctx,
if (ctx->ht) {
bo_node->hash.key = (unsigned long) vbo;
- ret = drm_ht_insert_item(ctx->ht, &bo_node->hash);
+ ret = vmwgfx_ht_insert_item(ctx->ht, &bo_node->hash);
if (ret) {
DRM_ERROR("Failed to initialize a buffer "
"validation entry.\n");
@@ -335,7 +332,7 @@ int vmw_validation_add_resource(struct vmw_validation_context *ctx,
if (ctx->ht) {
node->hash.key = (unsigned long) res;
- ret = drm_ht_insert_item(ctx->ht, &node->hash);
+ ret = vmwgfx_ht_insert_item(ctx->ht, &node->hash);
if (ret) {
DRM_ERROR("Failed to initialize a resource validation "
"entry.\n");
@@ -688,13 +685,13 @@ void vmw_validation_drop_ht(struct vmw_validation_context *ctx)
return;
list_for_each_entry(entry, &ctx->bo_list, base.head)
- (void) drm_ht_remove_item(ctx->ht, &entry->hash);
+ (void) vmwgfx_ht_remove_item(ctx->ht, &entry->hash);
list_for_each_entry(val, &ctx->resource_list, head)
- (void) drm_ht_remove_item(ctx->ht, &val->hash);
+ (void) vmwgfx_ht_remove_item(ctx->ht, &val->hash);
list_for_each_entry(val, &ctx->resource_ctx_list, head)
- (void) drm_ht_remove_item(ctx->ht, &val->hash);
+ (void) vmwgfx_ht_remove_item(ctx->ht, &val->hash);
ctx->ht = NULL;
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_validation.h b/drivers/gpu/drm/vmwgfx/vmwgfx_validation.h
index 739906d1b3eb..f21df053882b 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_validation.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_validation.h
@@ -31,29 +31,15 @@
#include <linux/list.h>
#include <linux/ww_mutex.h>
-#include <drm/drm_hashtab.h>
#include <drm/ttm/ttm_execbuf_util.h>
+#include "vmwgfx_hashtab.h"
+
#define VMW_RES_DIRTY_NONE 0
#define VMW_RES_DIRTY_SET BIT(0)
#define VMW_RES_DIRTY_CLEAR BIT(1)
/**
- * struct vmw_validation_mem - Custom interface to provide memory reservations
- * for the validation code.
- * @reserve_mem: Callback to reserve memory
- * @unreserve_mem: Callback to unreserve memory
- * @gran: Reservation granularity. Contains a hint how much memory should
- * be reserved in each call to @reserve_mem(). A slow implementation may want
- * reservation to be done in large batches.
- */
-struct vmw_validation_mem {
- int (*reserve_mem)(struct vmw_validation_mem *m, size_t size);
- void (*unreserve_mem)(struct vmw_validation_mem *m, size_t size);
- size_t gran;
-};
-
-/**
* struct vmw_validation_context - Per command submission validation context
* @ht: Hash table used to find resource- or buffer object duplicates
* @resource_list: List head for resource validation metadata
@@ -73,7 +59,7 @@ struct vmw_validation_mem {
* @total_mem: Amount of reserved memory.
*/
struct vmw_validation_context {
- struct drm_open_hash *ht;
+ struct vmwgfx_open_hash *ht;
struct list_head resource_list;
struct list_head resource_ctx_list;
struct list_head bo_list;
@@ -129,21 +115,6 @@ vmw_validation_has_bos(struct vmw_validation_context *ctx)
}
/**
- * vmw_validation_set_val_mem - Register a validation mem object for
- * validation memory reservation
- * @ctx: The validation context
- * @vm: Pointer to a struct vmw_validation_mem
- *
- * Must be set before the first attempt to allocate validation memory.
- */
-static inline void
-vmw_validation_set_val_mem(struct vmw_validation_context *ctx,
- struct vmw_validation_mem *vm)
-{
- ctx->vm = vm;
-}
-
-/**
* vmw_validation_set_ht - Register a hash table for duplicate finding
* @ctx: The validation context
* @ht: Pointer to a hash table to use for duplicate finding
@@ -151,7 +122,7 @@ vmw_validation_set_val_mem(struct vmw_validation_context *ctx,
* available at validation context declaration time
*/
static inline void vmw_validation_set_ht(struct vmw_validation_context *ctx,
- struct drm_open_hash *ht)
+ struct vmwgfx_open_hash *ht)
{
ctx->ht = ht;
}
@@ -190,22 +161,6 @@ vmw_validation_bo_fence(struct vmw_validation_context *ctx,
}
/**
- * vmw_validation_context_init - Initialize a validation context
- * @ctx: Pointer to the validation context to initialize
- *
- * This function initializes a validation context with @merge_dups set
- * to false
- */
-static inline void
-vmw_validation_context_init(struct vmw_validation_context *ctx)
-{
- memset(ctx, 0, sizeof(*ctx));
- INIT_LIST_HEAD(&ctx->resource_list);
- INIT_LIST_HEAD(&ctx->resource_ctx_list);
- INIT_LIST_HEAD(&ctx->bo_list);
-}
-
-/**
* vmw_validation_align - Align a validation memory allocation
* @val: The size to be aligned
*
diff --git a/drivers/gpu/drm/xen/xen_drm_front.c b/drivers/gpu/drm/xen/xen_drm_front.c
index 434064c820e8..e63088c2121d 100644
--- a/drivers/gpu/drm/xen/xen_drm_front.c
+++ b/drivers/gpu/drm/xen/xen_drm_front.c
@@ -761,6 +761,7 @@ static struct xenbus_driver xen_driver = {
.probe = xen_drv_probe,
.remove = xen_drv_remove,
.otherend_changed = displback_changed,
+ .not_essential = true,
};
static int __init xen_drv_init(void)
diff --git a/drivers/gpu/drm/xlnx/Kconfig b/drivers/gpu/drm/xlnx/Kconfig
index c3d08269faa9..d8d38d86d5c6 100644
--- a/drivers/gpu/drm/xlnx/Kconfig
+++ b/drivers/gpu/drm/xlnx/Kconfig
@@ -7,7 +7,6 @@ config DRM_ZYNQMP_DPSUB
depends on XILINX_ZYNQMP_DPDMA
select DMA_ENGINE
select DRM_GEM_CMA_HELPER
- select DRM_KMS_CMA_HELPER
select DRM_KMS_HELPER
select GENERIC_PHY
help
diff --git a/drivers/gpu/host1x/Kconfig b/drivers/gpu/host1x/Kconfig
index 6dab94adf25e..6815b4db17c1 100644
--- a/drivers/gpu/host1x/Kconfig
+++ b/drivers/gpu/host1x/Kconfig
@@ -2,6 +2,7 @@
config TEGRA_HOST1X
tristate "NVIDIA Tegra host1x driver"
depends on ARCH_TEGRA || (ARM && COMPILE_TEST)
+ select DMA_SHARED_BUFFER
select IOMMU_IOVA
help
Driver for the NVIDIA Tegra host1x hardware.
diff --git a/drivers/gpu/host1x/bus.c b/drivers/gpu/host1x/bus.c
index 218e3718fd68..bdee16a0bb8e 100644
--- a/drivers/gpu/host1x/bus.c
+++ b/drivers/gpu/host1x/bus.c
@@ -5,6 +5,7 @@
*/
#include <linux/debugfs.h>
+#include <linux/dma-mapping.h>
#include <linux/host1x.h>
#include <linux/of.h>
#include <linux/seq_file.h>
@@ -742,6 +743,7 @@ EXPORT_SYMBOL(host1x_driver_unregister);
*/
void __host1x_client_init(struct host1x_client *client, struct lock_class_key *key)
{
+ host1x_bo_cache_init(&client->cache);
INIT_LIST_HEAD(&client->list);
__mutex_init(&client->lock, "host1x client lock", key);
client->usecount = 0;
@@ -761,7 +763,6 @@ EXPORT_SYMBOL(host1x_client_exit);
/**
* __host1x_client_register() - register a host1x client
* @client: host1x client
- * @key: lock class key for the client-specific mutex
*
* Registers a host1x client with each host1x controller instance. Note that
* each client will only match their parent host1x controller and will only be
@@ -830,6 +831,8 @@ int host1x_client_unregister(struct host1x_client *client)
mutex_unlock(&clients_lock);
+ host1x_bo_cache_destroy(&client->cache);
+
return 0;
}
EXPORT_SYMBOL(host1x_client_unregister);
@@ -904,3 +907,78 @@ unlock:
return err;
}
EXPORT_SYMBOL(host1x_client_resume);
+
+struct host1x_bo_mapping *host1x_bo_pin(struct device *dev, struct host1x_bo *bo,
+ enum dma_data_direction dir,
+ struct host1x_bo_cache *cache)
+{
+ struct host1x_bo_mapping *mapping;
+
+ if (cache) {
+ mutex_lock(&cache->lock);
+
+ list_for_each_entry(mapping, &cache->mappings, entry) {
+ if (mapping->bo == bo && mapping->direction == dir) {
+ kref_get(&mapping->ref);
+ goto unlock;
+ }
+ }
+ }
+
+ mapping = bo->ops->pin(dev, bo, dir);
+ if (IS_ERR(mapping))
+ goto unlock;
+
+ spin_lock(&mapping->bo->lock);
+ list_add_tail(&mapping->list, &bo->mappings);
+ spin_unlock(&mapping->bo->lock);
+
+ if (cache) {
+ INIT_LIST_HEAD(&mapping->entry);
+ mapping->cache = cache;
+
+ list_add_tail(&mapping->entry, &cache->mappings);
+
+ /* bump reference count to track the copy in the cache */
+ kref_get(&mapping->ref);
+ }
+
+unlock:
+ if (cache)
+ mutex_unlock(&cache->lock);
+
+ return mapping;
+}
+EXPORT_SYMBOL(host1x_bo_pin);
+
+static void __host1x_bo_unpin(struct kref *ref)
+{
+ struct host1x_bo_mapping *mapping = to_host1x_bo_mapping(ref);
+
+ /*
+ * When the last reference of the mapping goes away, make sure to remove the mapping from
+ * the cache.
+ */
+ if (mapping->cache)
+ list_del(&mapping->entry);
+
+ spin_lock(&mapping->bo->lock);
+ list_del(&mapping->list);
+ spin_unlock(&mapping->bo->lock);
+
+ mapping->bo->ops->unpin(mapping);
+}
+
+void host1x_bo_unpin(struct host1x_bo_mapping *mapping)
+{
+ struct host1x_bo_cache *cache = mapping->cache;
+
+ if (cache)
+ mutex_lock(&cache->lock);
+
+ kref_put(&mapping->ref, __host1x_bo_unpin);
+
+ if (cache)
+ mutex_unlock(&cache->lock);
+}
+EXPORT_SYMBOL(host1x_bo_unpin);
diff --git a/drivers/gpu/host1x/channel.c b/drivers/gpu/host1x/channel.c
index 4cd212bb570d..2a9a3a8d5931 100644
--- a/drivers/gpu/host1x/channel.c
+++ b/drivers/gpu/host1x/channel.c
@@ -75,6 +75,14 @@ struct host1x_channel *host1x_channel_get_index(struct host1x *host,
return ch;
}
+void host1x_channel_stop(struct host1x_channel *channel)
+{
+ struct host1x *host = dev_get_drvdata(channel->dev->parent);
+
+ host1x_hw_cdma_stop(host, &channel->cdma);
+}
+EXPORT_SYMBOL(host1x_channel_stop);
+
static void release_channel(struct kref *kref)
{
struct host1x_channel *channel =
diff --git a/drivers/gpu/host1x/debug.c b/drivers/gpu/host1x/debug.c
index 8a14880c61bb..18d9c8d206e3 100644
--- a/drivers/gpu/host1x/debug.c
+++ b/drivers/gpu/host1x/debug.c
@@ -7,6 +7,7 @@
*/
#include <linux/debugfs.h>
+#include <linux/pm_runtime.h>
#include <linux/seq_file.h>
#include <linux/uaccess.h>
@@ -52,6 +53,11 @@ static int show_channel(struct host1x_channel *ch, void *data, bool show_fifo)
{
struct host1x *m = dev_get_drvdata(ch->dev->parent);
struct output *o = data;
+ int err;
+
+ err = pm_runtime_resume_and_get(m->dev);
+ if (err < 0)
+ return err;
mutex_lock(&ch->cdma.lock);
mutex_lock(&debug_lock);
@@ -64,6 +70,8 @@ static int show_channel(struct host1x_channel *ch, void *data, bool show_fifo)
mutex_unlock(&debug_lock);
mutex_unlock(&ch->cdma.lock);
+ pm_runtime_put(m->dev);
+
return 0;
}
@@ -71,9 +79,14 @@ static void show_syncpts(struct host1x *m, struct output *o)
{
struct list_head *pos;
unsigned int i;
+ int err;
host1x_debug_output(o, "---- syncpts ----\n");
+ err = pm_runtime_resume_and_get(m->dev);
+ if (err < 0)
+ return;
+
for (i = 0; i < host1x_syncpt_nb_pts(m); i++) {
u32 max = host1x_syncpt_read_max(m->syncpt + i);
u32 min = host1x_syncpt_load(m->syncpt + i);
@@ -101,6 +114,8 @@ static void show_syncpts(struct host1x *m, struct output *o)
base_val);
}
+ pm_runtime_put(m->dev);
+
host1x_debug_output(o, "\n");
}
diff --git a/drivers/gpu/host1x/dev.c b/drivers/gpu/host1x/dev.c
index fbb6447b8659..6994f8c0e02e 100644
--- a/drivers/gpu/host1x/dev.c
+++ b/drivers/gpu/host1x/dev.c
@@ -6,18 +6,26 @@
*/
#include <linux/clk.h>
+#include <linux/delay.h>
#include <linux/dma-mapping.h>
#include <linux/io.h>
#include <linux/list.h>
#include <linux/module.h>
#include <linux/of_device.h>
#include <linux/of.h>
+#include <linux/pm_runtime.h>
#include <linux/slab.h>
+#include <soc/tegra/common.h>
+
#define CREATE_TRACE_POINTS
#include <trace/events/host1x.h>
#undef CREATE_TRACE_POINTS
+#if IS_ENABLED(CONFIG_ARM_DMA_USE_IOMMU)
+#include <asm/dma-iommu.h>
+#endif
+
#include "bus.h"
#include "channel.h"
#include "debug.h"
@@ -132,6 +140,12 @@ static const struct host1x_sid_entry tegra186_sid_table[] = {
.offset = 0x30,
.limit = 0x34
},
+ {
+ /* NVDEC */
+ .base = 0x1b00,
+ .offset = 0x30,
+ .limit = 0x34
+ },
};
static const struct host1x_info host1x06_info = {
@@ -156,6 +170,18 @@ static const struct host1x_sid_entry tegra194_sid_table[] = {
.offset = 0x30,
.limit = 0x34
},
+ {
+ /* NVDEC */
+ .base = 0x1b00,
+ .offset = 0x30,
+ .limit = 0x34
+ },
+ {
+ /* NVDEC1 */
+ .base = 0x1bc0,
+ .offset = 0x30,
+ .limit = 0x34
+ },
};
static const struct host1x_info host1x07_info = {
@@ -190,6 +216,9 @@ static void host1x_setup_sid_table(struct host1x *host)
const struct host1x_info *info = host->info;
unsigned int i;
+ if (!info->has_hypervisor)
+ return;
+
for (i = 0; i < info->num_sid_entries; i++) {
const struct host1x_sid_entry *entry = &info->sid_table[i];
@@ -238,6 +267,17 @@ static struct iommu_domain *host1x_iommu_attach(struct host1x *host)
struct iommu_domain *domain = iommu_get_domain_for_dev(host->dev);
int err;
+#if IS_ENABLED(CONFIG_ARM_DMA_USE_IOMMU)
+ if (host->dev->archdata.mapping) {
+ struct dma_iommu_mapping *mapping =
+ to_dma_iommu_mapping(host->dev);
+ arm_iommu_detach_device(host->dev);
+ arm_iommu_release_mapping(mapping);
+
+ domain = iommu_get_domain_for_dev(host->dev);
+ }
+#endif
+
/*
* We may not always want to enable IOMMU support (for example if the
* host1x firewall is already enabled and we don't support addressing
@@ -347,6 +387,27 @@ static void host1x_iommu_exit(struct host1x *host)
}
}
+static int host1x_get_resets(struct host1x *host)
+{
+ int err;
+
+ host->resets[0].id = "mc";
+ host->resets[1].id = "host1x";
+ host->nresets = ARRAY_SIZE(host->resets);
+
+ err = devm_reset_control_bulk_get_optional_exclusive_released(
+ host->dev, host->nresets, host->resets);
+ if (err) {
+ dev_err(host->dev, "failed to get reset: %d\n", err);
+ return err;
+ }
+
+ if (WARN_ON(!host->resets[1].rstc))
+ return -ENOENT;
+
+ return 0;
+}
+
static int host1x_probe(struct platform_device *pdev)
{
struct host1x *host;
@@ -386,6 +447,7 @@ static int host1x_probe(struct platform_device *pdev)
if (syncpt_irq < 0)
return syncpt_irq;
+ host1x_bo_cache_init(&host->cache);
mutex_init(&host->devices_lock);
INIT_LIST_HEAD(&host->devices);
INIT_LIST_HEAD(&host->list);
@@ -423,12 +485,9 @@ static int host1x_probe(struct platform_device *pdev)
return err;
}
- host->rst = devm_reset_control_get(&pdev->dev, "host1x");
- if (IS_ERR(host->rst)) {
- err = PTR_ERR(host->rst);
- dev_err(&pdev->dev, "failed to get reset: %d\n", err);
+ err = host1x_get_resets(host);
+ if (err)
return err;
- }
err = host1x_iommu_init(host);
if (err < 0) {
@@ -443,22 +502,10 @@ static int host1x_probe(struct platform_device *pdev)
goto iommu_exit;
}
- err = clk_prepare_enable(host->clk);
- if (err < 0) {
- dev_err(&pdev->dev, "failed to enable clock\n");
- goto free_channels;
- }
-
- err = reset_control_deassert(host->rst);
- if (err < 0) {
- dev_err(&pdev->dev, "failed to deassert reset: %d\n", err);
- goto unprepare_disable;
- }
-
err = host1x_syncpt_init(host);
if (err) {
dev_err(&pdev->dev, "failed to initialize syncpts\n");
- goto reset_assert;
+ goto free_channels;
}
err = host1x_intr_init(host, syncpt_irq);
@@ -467,10 +514,18 @@ static int host1x_probe(struct platform_device *pdev)
goto deinit_syncpt;
}
- host1x_debug_init(host);
+ pm_runtime_enable(&pdev->dev);
+
+ err = devm_tegra_core_dev_init_opp_table_common(&pdev->dev);
+ if (err)
+ goto pm_disable;
- if (host->info->has_hypervisor)
- host1x_setup_sid_table(host);
+ /* the driver's code isn't ready yet for the dynamic RPM */
+ err = pm_runtime_resume_and_get(&pdev->dev);
+ if (err)
+ goto pm_disable;
+
+ host1x_debug_init(host);
err = host1x_register(host);
if (err < 0)
@@ -486,13 +541,14 @@ unregister:
host1x_unregister(host);
deinit_debugfs:
host1x_debug_deinit(host);
+
+ pm_runtime_put_sync_suspend(&pdev->dev);
+pm_disable:
+ pm_runtime_disable(&pdev->dev);
+
host1x_intr_deinit(host);
deinit_syncpt:
host1x_syncpt_deinit(host);
-reset_assert:
- reset_control_assert(host->rst);
-unprepare_disable:
- clk_disable_unprepare(host->clk);
free_channels:
host1x_channel_list_free(&host->channel_list);
iommu_exit:
@@ -507,19 +563,94 @@ static int host1x_remove(struct platform_device *pdev)
host1x_unregister(host);
host1x_debug_deinit(host);
+
+ pm_runtime_force_suspend(&pdev->dev);
+
host1x_intr_deinit(host);
host1x_syncpt_deinit(host);
- reset_control_assert(host->rst);
- clk_disable_unprepare(host->clk);
host1x_iommu_exit(host);
+ host1x_bo_cache_destroy(&host->cache);
return 0;
}
+static int __maybe_unused host1x_runtime_suspend(struct device *dev)
+{
+ struct host1x *host = dev_get_drvdata(dev);
+ int err;
+
+ host1x_intr_stop(host);
+ host1x_syncpt_save(host);
+
+ err = reset_control_bulk_assert(host->nresets, host->resets);
+ if (err) {
+ dev_err(dev, "failed to assert reset: %d\n", err);
+ goto resume_host1x;
+ }
+
+ usleep_range(1000, 2000);
+
+ clk_disable_unprepare(host->clk);
+ reset_control_bulk_release(host->nresets, host->resets);
+
+ return 0;
+
+resume_host1x:
+ host1x_setup_sid_table(host);
+ host1x_syncpt_restore(host);
+ host1x_intr_start(host);
+
+ return err;
+}
+
+static int __maybe_unused host1x_runtime_resume(struct device *dev)
+{
+ struct host1x *host = dev_get_drvdata(dev);
+ int err;
+
+ err = reset_control_bulk_acquire(host->nresets, host->resets);
+ if (err) {
+ dev_err(dev, "failed to acquire reset: %d\n", err);
+ return err;
+ }
+
+ err = clk_prepare_enable(host->clk);
+ if (err) {
+ dev_err(dev, "failed to enable clock: %d\n", err);
+ goto release_reset;
+ }
+
+ err = reset_control_bulk_deassert(host->nresets, host->resets);
+ if (err < 0) {
+ dev_err(dev, "failed to deassert reset: %d\n", err);
+ goto disable_clk;
+ }
+
+ host1x_setup_sid_table(host);
+ host1x_syncpt_restore(host);
+ host1x_intr_start(host);
+
+ return 0;
+
+disable_clk:
+ clk_disable_unprepare(host->clk);
+release_reset:
+ reset_control_bulk_release(host->nresets, host->resets);
+
+ return err;
+}
+
+static const struct dev_pm_ops host1x_pm_ops = {
+ SET_RUNTIME_PM_OPS(host1x_runtime_suspend, host1x_runtime_resume,
+ NULL)
+ /* TODO: add system suspend-resume once driver will be ready for that */
+};
+
static struct platform_driver tegra_host1x_driver = {
.driver = {
.name = "tegra-host1x",
.of_match_table = host1x_of_match,
+ .pm = &host1x_pm_ops,
},
.probe = host1x_probe,
.remove = host1x_remove,
diff --git a/drivers/gpu/host1x/dev.h b/drivers/gpu/host1x/dev.h
index fa6d4bc46e98..ca4b082f0cd4 100644
--- a/drivers/gpu/host1x/dev.h
+++ b/drivers/gpu/host1x/dev.h
@@ -118,7 +118,8 @@ struct host1x {
struct host1x_syncpt_base *bases;
struct device *dev;
struct clk *clk;
- struct reset_control *rst;
+ struct reset_control_bulk_data resets[2];
+ unsigned int nresets;
struct iommu_group *group;
struct iommu_domain *domain;
@@ -149,6 +150,8 @@ struct host1x {
struct list_head list;
struct device_dma_parameters dma_parms;
+
+ struct host1x_bo_cache cache;
};
void host1x_hypervisor_writel(struct host1x *host1x, u32 r, u32 v);
diff --git a/drivers/gpu/host1x/hw/channel_hw.c b/drivers/gpu/host1x/hw/channel_hw.c
index 1999780a7203..6b40e9af1e88 100644
--- a/drivers/gpu/host1x/hw/channel_hw.c
+++ b/drivers/gpu/host1x/hw/channel_hw.c
@@ -159,6 +159,27 @@ static void host1x_channel_set_streamid(struct host1x_channel *channel)
#endif
}
+static void host1x_enable_gather_filter(struct host1x_channel *ch)
+{
+#if HOST1X_HW >= 6
+ struct host1x *host = dev_get_drvdata(ch->dev->parent);
+ u32 val;
+
+ if (!host->hv_regs)
+ return;
+
+ val = host1x_hypervisor_readl(
+ host, HOST1X_HV_CH_KERNEL_FILTER_GBUFFER(ch->id / 32));
+ val |= BIT(ch->id % 32);
+ host1x_hypervisor_writel(
+ host, val, HOST1X_HV_CH_KERNEL_FILTER_GBUFFER(ch->id / 32));
+#elif HOST1X_HW >= 4
+ host1x_ch_writel(ch,
+ HOST1X_CHANNEL_CHANNELCTRL_KERNEL_FILTER_GBUFFER(1),
+ HOST1X_CHANNEL_CHANNELCTRL);
+#endif
+}
+
static int channel_submit(struct host1x_job *job)
{
struct host1x_channel *ch = job->channel;
@@ -190,6 +211,7 @@ static int channel_submit(struct host1x_job *job)
}
host1x_channel_set_streamid(ch);
+ host1x_enable_gather_filter(ch);
/* begin a CDMA submit */
err = host1x_cdma_begin(&ch->cdma, job);
@@ -249,27 +271,6 @@ error:
return err;
}
-static void enable_gather_filter(struct host1x *host,
- struct host1x_channel *ch)
-{
-#if HOST1X_HW >= 6
- u32 val;
-
- if (!host->hv_regs)
- return;
-
- val = host1x_hypervisor_readl(
- host, HOST1X_HV_CH_KERNEL_FILTER_GBUFFER(ch->id / 32));
- val |= BIT(ch->id % 32);
- host1x_hypervisor_writel(
- host, val, HOST1X_HV_CH_KERNEL_FILTER_GBUFFER(ch->id / 32));
-#elif HOST1X_HW >= 4
- host1x_ch_writel(ch,
- HOST1X_CHANNEL_CHANNELCTRL_KERNEL_FILTER_GBUFFER(1),
- HOST1X_CHANNEL_CHANNELCTRL);
-#endif
-}
-
static int host1x_channel_init(struct host1x_channel *ch, struct host1x *dev,
unsigned int index)
{
@@ -278,7 +279,6 @@ static int host1x_channel_init(struct host1x_channel *ch, struct host1x *dev,
#else
ch->regs = dev->regs + index * 0x100;
#endif
- enable_gather_filter(dev, ch);
return 0;
}
diff --git a/drivers/gpu/host1x/intr.c b/drivers/gpu/host1x/intr.c
index 45b6be927ec4..965ba21818b1 100644
--- a/drivers/gpu/host1x/intr.c
+++ b/drivers/gpu/host1x/intr.c
@@ -297,14 +297,11 @@ int host1x_intr_init(struct host1x *host, unsigned int irq_sync)
"host1x_sp_%02u", id);
}
- host1x_intr_start(host);
-
return 0;
}
void host1x_intr_deinit(struct host1x *host)
{
- host1x_intr_stop(host);
}
void host1x_intr_start(struct host1x *host)
diff --git a/drivers/gpu/host1x/job.c b/drivers/gpu/host1x/job.c
index 0eef6df7c89e..5e8c183167b7 100644
--- a/drivers/gpu/host1x/job.c
+++ b/drivers/gpu/host1x/job.c
@@ -134,20 +134,20 @@ EXPORT_SYMBOL(host1x_job_add_wait);
static unsigned int pin_job(struct host1x *host, struct host1x_job *job)
{
+ unsigned long mask = HOST1X_RELOC_READ | HOST1X_RELOC_WRITE;
struct host1x_client *client = job->client;
struct device *dev = client->dev;
struct host1x_job_gather *g;
- struct iommu_domain *domain;
- struct sg_table *sgt;
unsigned int i;
int err;
- domain = iommu_get_domain_for_dev(dev);
job->num_unpins = 0;
for (i = 0; i < job->num_relocs; i++) {
struct host1x_reloc *reloc = &job->relocs[i];
- dma_addr_t phys_addr, *phys;
+ enum dma_data_direction direction;
+ struct host1x_bo_mapping *map;
+ struct host1x_bo *bo;
reloc->target.bo = host1x_bo_get(reloc->target.bo);
if (!reloc->target.bo) {
@@ -155,64 +155,44 @@ static unsigned int pin_job(struct host1x *host, struct host1x_job *job)
goto unpin;
}
- /*
- * If the client device is not attached to an IOMMU, the
- * physical address of the buffer object can be used.
- *
- * Similarly, when an IOMMU domain is shared between all
- * host1x clients, the IOVA is already available, so no
- * need to map the buffer object again.
- *
- * XXX Note that this isn't always safe to do because it
- * relies on an assumption that no cache maintenance is
- * needed on the buffer objects.
- */
- if (!domain || client->group)
- phys = &phys_addr;
- else
- phys = NULL;
-
- sgt = host1x_bo_pin(dev, reloc->target.bo, phys);
- if (IS_ERR(sgt)) {
- err = PTR_ERR(sgt);
- goto unpin;
- }
+ bo = reloc->target.bo;
- if (sgt) {
- unsigned long mask = HOST1X_RELOC_READ |
- HOST1X_RELOC_WRITE;
- enum dma_data_direction dir;
-
- switch (reloc->flags & mask) {
- case HOST1X_RELOC_READ:
- dir = DMA_TO_DEVICE;
- break;
+ switch (reloc->flags & mask) {
+ case HOST1X_RELOC_READ:
+ direction = DMA_TO_DEVICE;
+ break;
- case HOST1X_RELOC_WRITE:
- dir = DMA_FROM_DEVICE;
- break;
+ case HOST1X_RELOC_WRITE:
+ direction = DMA_FROM_DEVICE;
+ break;
- case HOST1X_RELOC_READ | HOST1X_RELOC_WRITE:
- dir = DMA_BIDIRECTIONAL;
- break;
+ case HOST1X_RELOC_READ | HOST1X_RELOC_WRITE:
+ direction = DMA_BIDIRECTIONAL;
+ break;
- default:
- err = -EINVAL;
- goto unpin;
- }
+ default:
+ err = -EINVAL;
+ goto unpin;
+ }
- err = dma_map_sgtable(dev, sgt, dir, 0);
- if (err)
- goto unpin;
+ map = host1x_bo_pin(dev, bo, direction, &client->cache);
+ if (IS_ERR(map)) {
+ err = PTR_ERR(map);
+ goto unpin;
+ }
- job->unpins[job->num_unpins].dev = dev;
- job->unpins[job->num_unpins].dir = dir;
- phys_addr = sg_dma_address(sgt->sgl);
+ /*
+ * host1x clients are generally not able to do scatter-gather themselves, so fail
+ * if the buffer is discontiguous and we fail to map its SG table to a single
+ * contiguous chunk of I/O virtual memory.
+ */
+ if (map->chunks > 1) {
+ err = -EINVAL;
+ goto unpin;
}
- job->addr_phys[job->num_unpins] = phys_addr;
- job->unpins[job->num_unpins].bo = reloc->target.bo;
- job->unpins[job->num_unpins].sgt = sgt;
+ job->addr_phys[job->num_unpins] = map->phys;
+ job->unpins[job->num_unpins].map = map;
job->num_unpins++;
}
@@ -224,12 +204,11 @@ static unsigned int pin_job(struct host1x *host, struct host1x_job *job)
return 0;
for (i = 0; i < job->num_cmds; i++) {
+ struct host1x_bo_mapping *map;
size_t gather_size = 0;
struct scatterlist *sg;
- dma_addr_t phys_addr;
unsigned long shift;
struct iova *alloc;
- dma_addr_t *phys;
unsigned int j;
if (job->cmds[i].is_wait)
@@ -243,25 +222,16 @@ static unsigned int pin_job(struct host1x *host, struct host1x_job *job)
goto unpin;
}
- /**
- * If the host1x is not attached to an IOMMU, there is no need
- * to map the buffer object for the host1x, since the physical
- * address can simply be used.
- */
- if (!iommu_get_domain_for_dev(host->dev))
- phys = &phys_addr;
- else
- phys = NULL;
-
- sgt = host1x_bo_pin(host->dev, g->bo, phys);
- if (IS_ERR(sgt)) {
- err = PTR_ERR(sgt);
- goto put;
+ map = host1x_bo_pin(host->dev, g->bo, DMA_TO_DEVICE, &host->cache);
+ if (IS_ERR(map)) {
+ err = PTR_ERR(map);
+ goto unpin;
}
if (host->domain) {
- for_each_sgtable_sg(sgt, sg, j)
+ for_each_sgtable_sg(map->sgt, sg, j)
gather_size += sg->length;
+
gather_size = iova_align(&host->iova, gather_size);
shift = iova_shift(&host->iova);
@@ -272,33 +242,23 @@ static unsigned int pin_job(struct host1x *host, struct host1x_job *job)
goto put;
}
- err = iommu_map_sgtable(host->domain,
- iova_dma_addr(&host->iova, alloc),
- sgt, IOMMU_READ);
+ err = iommu_map_sgtable(host->domain, iova_dma_addr(&host->iova, alloc),
+ map->sgt, IOMMU_READ);
if (err == 0) {
__free_iova(&host->iova, alloc);
err = -EINVAL;
goto put;
}
- job->unpins[job->num_unpins].size = gather_size;
- phys_addr = iova_dma_addr(&host->iova, alloc);
- } else if (sgt) {
- err = dma_map_sgtable(host->dev, sgt, DMA_TO_DEVICE, 0);
- if (err)
- goto put;
-
- job->unpins[job->num_unpins].dir = DMA_TO_DEVICE;
- job->unpins[job->num_unpins].dev = host->dev;
- phys_addr = sg_dma_address(sgt->sgl);
+ map->phys = iova_dma_addr(&host->iova, alloc);
+ map->size = gather_size;
}
- job->addr_phys[job->num_unpins] = phys_addr;
- job->gather_addr_phys[i] = phys_addr;
-
- job->unpins[job->num_unpins].bo = g->bo;
- job->unpins[job->num_unpins].sgt = sgt;
+ job->addr_phys[job->num_unpins] = map->phys;
+ job->unpins[job->num_unpins].map = map;
job->num_unpins++;
+
+ job->gather_addr_phys[i] = map->phys;
}
return 0;
@@ -690,22 +650,16 @@ void host1x_job_unpin(struct host1x_job *job)
unsigned int i;
for (i = 0; i < job->num_unpins; i++) {
- struct host1x_job_unpin_data *unpin = &job->unpins[i];
- struct device *dev = unpin->dev ?: host->dev;
- struct sg_table *sgt = unpin->sgt;
-
- if (!job->enable_firewall && unpin->size && host->domain) {
- iommu_unmap(host->domain, job->addr_phys[i],
- unpin->size);
- free_iova(&host->iova,
- iova_pfn(&host->iova, job->addr_phys[i]));
- }
+ struct host1x_bo_mapping *map = job->unpins[i].map;
+ struct host1x_bo *bo = map->bo;
- if (unpin->dev && sgt)
- dma_unmap_sgtable(unpin->dev, sgt, unpin->dir, 0);
+ if (!job->enable_firewall && map->size && host->domain) {
+ iommu_unmap(host->domain, job->addr_phys[i], map->size);
+ free_iova(&host->iova, iova_pfn(&host->iova, job->addr_phys[i]));
+ }
- host1x_bo_unpin(dev, unpin->bo, sgt);
- host1x_bo_put(unpin->bo);
+ host1x_bo_unpin(map);
+ host1x_bo_put(bo);
}
job->num_unpins = 0;
diff --git a/drivers/gpu/host1x/job.h b/drivers/gpu/host1x/job.h
index b4428c5495c9..dad5a1946693 100644
--- a/drivers/gpu/host1x/job.h
+++ b/drivers/gpu/host1x/job.h
@@ -35,11 +35,7 @@ struct host1x_job_cmd {
};
struct host1x_job_unpin_data {
- struct host1x_bo *bo;
- struct sg_table *sgt;
- struct device *dev;
- size_t size;
- enum dma_data_direction dir;
+ struct host1x_bo_mapping *map;
};
/*
diff --git a/drivers/gpu/host1x/syncpt.c b/drivers/gpu/host1x/syncpt.c
index d198a10848c6..e08e331e46ae 100644
--- a/drivers/gpu/host1x/syncpt.c
+++ b/drivers/gpu/host1x/syncpt.c
@@ -143,6 +143,8 @@ void host1x_syncpt_restore(struct host1x *host)
for (i = 0; i < host1x_syncpt_nb_bases(host); i++)
host1x_hw_syncpt_restore_wait_base(host, sp_base + i);
+ host1x_hw_syncpt_enable_protection(host);
+
wmb();
}
@@ -366,9 +368,6 @@ int host1x_syncpt_init(struct host1x *host)
host->syncpt = syncpt;
host->bases = bases;
- host1x_syncpt_restore(host);
- host1x_hw_syncpt_enable_protection(host);
-
/* Allocate sync point to use for clearing waits for expired fences */
host->nop_sp = host1x_syncpt_alloc(host, 0, "reserved-nop");
if (!host->nop_sp)
diff --git a/drivers/hid/Kconfig b/drivers/hid/Kconfig
index 9f5435b55949..a7c78ac96270 100644
--- a/drivers/hid/Kconfig
+++ b/drivers/hid/Kconfig
@@ -207,14 +207,14 @@ config HID_CHERRY
config HID_CHICONY
tristate "Chicony devices"
- depends on HID
+ depends on USB_HID
default !EXPERT
help
Support for Chicony Tactical pad and special keys on Chicony keyboards.
config HID_CORSAIR
tristate "Corsair devices"
- depends on HID && USB && LEDS_CLASS
+ depends on USB_HID && LEDS_CLASS
help
Support for Corsair devices that are not fully compliant with the
HID standard.
@@ -245,7 +245,7 @@ config HID_MACALLY
config HID_PRODIKEYS
tristate "Prodikeys PC-MIDI Keyboard support"
- depends on HID && SND
+ depends on USB_HID && SND
select SND_RAWMIDI
help
Support for Prodikeys PC-MIDI Keyboard device support.
@@ -560,7 +560,7 @@ config HID_LENOVO
config HID_LOGITECH
tristate "Logitech devices"
- depends on HID
+ depends on USB_HID
depends on LEDS_CLASS
default !EXPERT
help
@@ -951,7 +951,7 @@ config HID_SAITEK
config HID_SAMSUNG
tristate "Samsung InfraRed remote control or keyboards"
- depends on HID
+ depends on USB_HID
help
Support for Samsung InfraRed remote control or keyboards.
diff --git a/drivers/hid/hid-asus.c b/drivers/hid/hid-asus.c
index 5d57214d8dee..08c9a9a60ae4 100644
--- a/drivers/hid/hid-asus.c
+++ b/drivers/hid/hid-asus.c
@@ -854,7 +854,7 @@ static int asus_input_mapping(struct hid_device *hdev,
switch (usage->hid & HID_USAGE) {
case 0x10: asus_map_key_clear(KEY_BRIGHTNESSDOWN); break;
case 0x20: asus_map_key_clear(KEY_BRIGHTNESSUP); break;
- case 0x35: asus_map_key_clear(KEY_SCREENLOCK); break;
+ case 0x35: asus_map_key_clear(KEY_DISPLAY_OFF); break;
case 0x6c: asus_map_key_clear(KEY_SLEEP); break;
case 0x7c: asus_map_key_clear(KEY_MICMUTE); break;
case 0x82: asus_map_key_clear(KEY_CAMERA); break;
@@ -1028,8 +1028,7 @@ static int asus_probe(struct hid_device *hdev, const struct hid_device_id *id)
if (drvdata->quirks & QUIRK_IS_MULTITOUCH)
drvdata->tp = &asus_i2c_tp;
- if ((drvdata->quirks & QUIRK_T100_KEYBOARD) &&
- hid_is_using_ll_driver(hdev, &usb_hid_driver)) {
+ if ((drvdata->quirks & QUIRK_T100_KEYBOARD) && hid_is_usb(hdev)) {
struct usb_interface *intf = to_usb_interface(hdev->dev.parent);
if (intf->altsetting->desc.bInterfaceNumber == T100_TPAD_INTF) {
@@ -1057,8 +1056,7 @@ static int asus_probe(struct hid_device *hdev, const struct hid_device_id *id)
drvdata->tp = &asus_t100chi_tp;
}
- if ((drvdata->quirks & QUIRK_MEDION_E1239T) &&
- hid_is_using_ll_driver(hdev, &usb_hid_driver)) {
+ if ((drvdata->quirks & QUIRK_MEDION_E1239T) && hid_is_usb(hdev)) {
struct usb_host_interface *alt =
to_usb_interface(hdev->dev.parent)->altsetting;
diff --git a/drivers/hid/hid-bigbenff.c b/drivers/hid/hid-bigbenff.c
index db6da21ade06..74ad8bf98bfd 100644
--- a/drivers/hid/hid-bigbenff.c
+++ b/drivers/hid/hid-bigbenff.c
@@ -191,7 +191,7 @@ static void bigben_worker(struct work_struct *work)
struct bigben_device, worker);
struct hid_field *report_field = bigben->report->field[0];
- if (bigben->removed)
+ if (bigben->removed || !report_field)
return;
if (bigben->work_led) {
diff --git a/drivers/hid/hid-chicony.c b/drivers/hid/hid-chicony.c
index ca556d39da2a..f04d2aa23efe 100644
--- a/drivers/hid/hid-chicony.c
+++ b/drivers/hid/hid-chicony.c
@@ -114,6 +114,9 @@ static int ch_probe(struct hid_device *hdev, const struct hid_device_id *id)
{
int ret;
+ if (!hid_is_usb(hdev))
+ return -EINVAL;
+
hdev->quirks |= HID_QUIRK_INPUT_PER_APP;
ret = hid_parse(hdev);
if (ret) {
diff --git a/drivers/hid/hid-corsair.c b/drivers/hid/hid-corsair.c
index 902a60e249ed..8c895c820b67 100644
--- a/drivers/hid/hid-corsair.c
+++ b/drivers/hid/hid-corsair.c
@@ -553,7 +553,12 @@ static int corsair_probe(struct hid_device *dev, const struct hid_device_id *id)
int ret;
unsigned long quirks = id->driver_data;
struct corsair_drvdata *drvdata;
- struct usb_interface *usbif = to_usb_interface(dev->dev.parent);
+ struct usb_interface *usbif;
+
+ if (!hid_is_usb(dev))
+ return -EINVAL;
+
+ usbif = to_usb_interface(dev->dev.parent);
drvdata = devm_kzalloc(&dev->dev, sizeof(struct corsair_drvdata),
GFP_KERNEL);
diff --git a/drivers/hid/hid-elan.c b/drivers/hid/hid-elan.c
index 021049805bb7..3091355d48df 100644
--- a/drivers/hid/hid-elan.c
+++ b/drivers/hid/hid-elan.c
@@ -50,7 +50,7 @@ struct elan_drvdata {
static int is_not_elan_touchpad(struct hid_device *hdev)
{
- if (hdev->bus == BUS_USB) {
+ if (hid_is_usb(hdev)) {
struct usb_interface *intf = to_usb_interface(hdev->dev.parent);
return (intf->altsetting->desc.bInterfaceNumber !=
diff --git a/drivers/hid/hid-elo.c b/drivers/hid/hid-elo.c
index 383dfda8c12f..8e960d7b233b 100644
--- a/drivers/hid/hid-elo.c
+++ b/drivers/hid/hid-elo.c
@@ -230,6 +230,9 @@ static int elo_probe(struct hid_device *hdev, const struct hid_device_id *id)
int ret;
struct usb_device *udev;
+ if (!hid_is_usb(hdev))
+ return -EINVAL;
+
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
diff --git a/drivers/hid/hid-ft260.c b/drivers/hid/hid-ft260.c
index 4ef1c3b8094e..79505c64dbfe 100644
--- a/drivers/hid/hid-ft260.c
+++ b/drivers/hid/hid-ft260.c
@@ -915,6 +915,9 @@ static int ft260_probe(struct hid_device *hdev, const struct hid_device_id *id)
struct ft260_get_chip_version_report version;
int ret;
+ if (!hid_is_usb(hdev))
+ return -EINVAL;
+
dev = devm_kzalloc(&hdev->dev, sizeof(*dev), GFP_KERNEL);
if (!dev)
return -ENOMEM;
@@ -966,24 +969,23 @@ static int ft260_probe(struct hid_device *hdev, const struct hid_device_id *id)
mutex_init(&dev->lock);
init_completion(&dev->wait);
+ ret = ft260_xfer_status(dev);
+ if (ret)
+ ft260_i2c_reset(hdev);
+
+ i2c_set_adapdata(&dev->adap, dev);
ret = i2c_add_adapter(&dev->adap);
if (ret) {
hid_err(hdev, "failed to add i2c adapter\n");
goto err_hid_close;
}
- i2c_set_adapdata(&dev->adap, dev);
-
ret = sysfs_create_group(&hdev->dev.kobj, &ft260_attr_group);
if (ret < 0) {
hid_err(hdev, "failed to create sysfs attrs\n");
goto err_i2c_free;
}
- ret = ft260_xfer_status(dev);
- if (ret)
- ft260_i2c_reset(hdev);
-
return 0;
err_i2c_free:
diff --git a/drivers/hid/hid-google-hammer.c b/drivers/hid/hid-google-hammer.c
index 8123b871a3eb..0403beb3104b 100644
--- a/drivers/hid/hid-google-hammer.c
+++ b/drivers/hid/hid-google-hammer.c
@@ -586,6 +586,8 @@ static const struct hid_device_id hammer_devices[] = {
{ HID_DEVICE(BUS_USB, HID_GROUP_GENERIC,
USB_VENDOR_ID_GOOGLE, USB_DEVICE_ID_GOOGLE_DON) },
{ HID_DEVICE(BUS_USB, HID_GROUP_GENERIC,
+ USB_VENDOR_ID_GOOGLE, USB_DEVICE_ID_GOOGLE_EEL) },
+ { HID_DEVICE(BUS_USB, HID_GROUP_GENERIC,
USB_VENDOR_ID_GOOGLE, USB_DEVICE_ID_GOOGLE_HAMMER) },
{ HID_DEVICE(BUS_USB, HID_GROUP_GENERIC,
USB_VENDOR_ID_GOOGLE, USB_DEVICE_ID_GOOGLE_MAGNEMITE) },
diff --git a/drivers/hid/hid-holtek-kbd.c b/drivers/hid/hid-holtek-kbd.c
index 0a38e8e9bc78..403506b9697e 100644
--- a/drivers/hid/hid-holtek-kbd.c
+++ b/drivers/hid/hid-holtek-kbd.c
@@ -140,12 +140,17 @@ static int holtek_kbd_input_event(struct input_dev *dev, unsigned int type,
static int holtek_kbd_probe(struct hid_device *hdev,
const struct hid_device_id *id)
{
- struct usb_interface *intf = to_usb_interface(hdev->dev.parent);
- int ret = hid_parse(hdev);
+ struct usb_interface *intf;
+ int ret;
+
+ if (!hid_is_usb(hdev))
+ return -EINVAL;
+ ret = hid_parse(hdev);
if (!ret)
ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT);
+ intf = to_usb_interface(hdev->dev.parent);
if (!ret && intf->cur_altsetting->desc.bInterfaceNumber == 1) {
struct hid_input *hidinput;
list_for_each_entry(hidinput, &hdev->inputs, list) {
diff --git a/drivers/hid/hid-holtek-mouse.c b/drivers/hid/hid-holtek-mouse.c
index 195b735b001d..7c907939bfae 100644
--- a/drivers/hid/hid-holtek-mouse.c
+++ b/drivers/hid/hid-holtek-mouse.c
@@ -62,6 +62,29 @@ static __u8 *holtek_mouse_report_fixup(struct hid_device *hdev, __u8 *rdesc,
return rdesc;
}
+static int holtek_mouse_probe(struct hid_device *hdev,
+ const struct hid_device_id *id)
+{
+ int ret;
+
+ if (!hid_is_usb(hdev))
+ return -EINVAL;
+
+ ret = hid_parse(hdev);
+ if (ret) {
+ hid_err(hdev, "hid parse failed: %d\n", ret);
+ return ret;
+ }
+
+ ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT);
+ if (ret) {
+ hid_err(hdev, "hw start failed: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
static const struct hid_device_id holtek_mouse_devices[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT,
USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A067) },
@@ -83,6 +106,7 @@ static struct hid_driver holtek_mouse_driver = {
.name = "holtek_mouse",
.id_table = holtek_mouse_devices,
.report_fixup = holtek_mouse_report_fixup,
+ .probe = holtek_mouse_probe,
};
module_hid_driver(holtek_mouse_driver);
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index 95037a3e2e6e..19da07777d62 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -397,7 +397,9 @@
#define USB_DEVICE_ID_TOSHIBA_CLICK_L9W 0x0401
#define USB_DEVICE_ID_HP_X2 0x074d
#define USB_DEVICE_ID_HP_X2_10_COVER 0x0755
+#define I2C_DEVICE_ID_HP_ENVY_X360_15 0x2d05
#define I2C_DEVICE_ID_HP_SPECTRE_X360_15 0x2817
+#define USB_DEVICE_ID_ASUS_UX550VE_TOUCHSCREEN 0x2544
#define USB_DEVICE_ID_ASUS_UX550_TOUCHSCREEN 0x2706
#define I2C_DEVICE_ID_SURFACE_GO_TOUCHSCREEN 0x261A
@@ -500,6 +502,7 @@
#define USB_DEVICE_ID_GOOGLE_MAGNEMITE 0x503d
#define USB_DEVICE_ID_GOOGLE_MOONBALL 0x5044
#define USB_DEVICE_ID_GOOGLE_DON 0x5050
+#define USB_DEVICE_ID_GOOGLE_EEL 0x5057
#define USB_VENDOR_ID_GOTOP 0x08f2
#define USB_DEVICE_ID_SUPER_Q2 0x007f
@@ -885,6 +888,7 @@
#define USB_DEVICE_ID_MS_TOUCH_COVER_2 0x07a7
#define USB_DEVICE_ID_MS_TYPE_COVER_2 0x07a9
#define USB_DEVICE_ID_MS_POWER_COVER 0x07da
+#define USB_DEVICE_ID_MS_SURFACE3_COVER 0x07de
#define USB_DEVICE_ID_MS_XBOX_ONE_S_CONTROLLER 0x02fd
#define USB_DEVICE_ID_MS_PIXART_MOUSE 0x00cb
#define USB_DEVICE_ID_8BITDO_SN30_PRO_PLUS 0x02e0
diff --git a/drivers/hid/hid-input.c b/drivers/hid/hid-input.c
index 2c72ce4147b1..03f994541981 100644
--- a/drivers/hid/hid-input.c
+++ b/drivers/hid/hid-input.c
@@ -160,6 +160,7 @@ static int hidinput_setkeycode(struct input_dev *dev,
if (usage) {
*old_keycode = usage->type == EV_KEY ?
usage->code : KEY_RESERVED;
+ usage->type = EV_KEY;
usage->code = ke->keycode;
clear_bit(*old_keycode, dev->keybit);
@@ -324,6 +325,10 @@ static const struct hid_device_id hid_battery_quirks[] = {
HID_BATTERY_QUIRK_IGNORE },
{ HID_USB_DEVICE(USB_VENDOR_ID_ELAN, USB_DEVICE_ID_ASUS_UX550_TOUCHSCREEN),
HID_BATTERY_QUIRK_IGNORE },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ELAN, USB_DEVICE_ID_ASUS_UX550VE_TOUCHSCREEN),
+ HID_BATTERY_QUIRK_IGNORE },
+ { HID_I2C_DEVICE(USB_VENDOR_ID_ELAN, I2C_DEVICE_ID_HP_ENVY_X360_15),
+ HID_BATTERY_QUIRK_IGNORE },
{ HID_I2C_DEVICE(USB_VENDOR_ID_ELAN, I2C_DEVICE_ID_HP_SPECTRE_X360_15),
HID_BATTERY_QUIRK_IGNORE },
{ HID_I2C_DEVICE(USB_VENDOR_ID_ELAN, I2C_DEVICE_ID_SURFACE_GO_TOUCHSCREEN),
@@ -650,10 +655,9 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel
code += KEY_MACRO1;
else
code += BTN_TRIGGER_HAPPY - 0x1e;
- } else {
- goto ignore;
+ break;
}
- break;
+ fallthrough;
default:
switch (field->physical) {
case HID_GD_MOUSE:
diff --git a/drivers/hid/hid-lg.c b/drivers/hid/hid-lg.c
index d40af911df63..fb3f7258009c 100644
--- a/drivers/hid/hid-lg.c
+++ b/drivers/hid/hid-lg.c
@@ -749,12 +749,18 @@ static int lg_raw_event(struct hid_device *hdev, struct hid_report *report,
static int lg_probe(struct hid_device *hdev, const struct hid_device_id *id)
{
- struct usb_interface *iface = to_usb_interface(hdev->dev.parent);
- __u8 iface_num = iface->cur_altsetting->desc.bInterfaceNumber;
+ struct usb_interface *iface;
+ __u8 iface_num;
unsigned int connect_mask = HID_CONNECT_DEFAULT;
struct lg_drv_data *drv_data;
int ret;
+ if (!hid_is_usb(hdev))
+ return -EINVAL;
+
+ iface = to_usb_interface(hdev->dev.parent);
+ iface_num = iface->cur_altsetting->desc.bInterfaceNumber;
+
/* G29 only work with the 1st interface */
if ((hdev->product == USB_DEVICE_ID_LOGITECH_G29_WHEEL) &&
(iface_num != 0)) {
diff --git a/drivers/hid/hid-logitech-dj.c b/drivers/hid/hid-logitech-dj.c
index a0017b010c34..7106b921b53c 100644
--- a/drivers/hid/hid-logitech-dj.c
+++ b/drivers/hid/hid-logitech-dj.c
@@ -1777,7 +1777,7 @@ static int logi_dj_probe(struct hid_device *hdev,
case recvr_type_bluetooth: no_dj_interfaces = 2; break;
case recvr_type_dinovo: no_dj_interfaces = 2; break;
}
- if (hid_is_using_ll_driver(hdev, &usb_hid_driver)) {
+ if (hid_is_usb(hdev)) {
intf = to_usb_interface(hdev->dev.parent);
if (intf && intf->altsetting->desc.bInterfaceNumber >=
no_dj_interfaces) {
diff --git a/drivers/hid/hid-magicmouse.c b/drivers/hid/hid-magicmouse.c
index 686788ebf3e1..d7687ce70614 100644
--- a/drivers/hid/hid-magicmouse.c
+++ b/drivers/hid/hid-magicmouse.c
@@ -256,8 +256,11 @@ static void magicmouse_emit_touch(struct magicmouse_sc *msc, int raw_id, u8 *tda
unsigned long now = jiffies;
int step_x = msc->touches[id].scroll_x - x;
int step_y = msc->touches[id].scroll_y - y;
- int step_hr = ((64 - (int)scroll_speed) * msc->scroll_accel) /
- SCROLL_HR_STEPS;
+ int step_hr =
+ max_t(int,
+ ((64 - (int)scroll_speed) * msc->scroll_accel) /
+ SCROLL_HR_STEPS,
+ 1);
int step_x_hr = msc->touches[id].scroll_x_hr - x;
int step_y_hr = msc->touches[id].scroll_y_hr - y;
diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c
index e1afddb7b33d..082376a6cb3d 100644
--- a/drivers/hid/hid-multitouch.c
+++ b/drivers/hid/hid-multitouch.c
@@ -1888,6 +1888,11 @@ static const struct hid_device_id mt_devices[] = {
MT_USB_DEVICE(USB_VENDOR_ID_CVTOUCH,
USB_DEVICE_ID_CVTOUCH_SCREEN) },
+ /* eGalax devices (SAW) */
+ { .driver_data = MT_CLS_EXPORT_ALL_INPUTS,
+ MT_USB_DEVICE(USB_VENDOR_ID_DWAV,
+ USB_DEVICE_ID_EGALAX_TOUCHCONTROLLER) },
+
/* eGalax devices (resistive) */
{ .driver_data = MT_CLS_EGALAX,
MT_USB_DEVICE(USB_VENDOR_ID_DWAV,
diff --git a/drivers/hid/hid-nintendo.c b/drivers/hid/hid-nintendo.c
index a1e0f6849875..b6a9a0f3966e 100644
--- a/drivers/hid/hid-nintendo.c
+++ b/drivers/hid/hid-nintendo.c
@@ -189,6 +189,7 @@ struct joycon_rumble_amp_data {
u16 amp;
};
+#if IS_ENABLED(CONFIG_NINTENDO_FF)
/*
* These tables are from
* https://github.com/dekuNukem/Nintendo_Switch_Reverse_Engineering/blob/master/rumble_data_table.md
@@ -289,6 +290,10 @@ static const struct joycon_rumble_amp_data joycon_rumble_amplitudes[] = {
{ 0xc2, 0x8070, 940 }, { 0xc4, 0x0071, 960 }, { 0xc6, 0x8071, 981 },
{ 0xc8, 0x0072, joycon_max_rumble_amp }
};
+static const u16 JC_RUMBLE_DFLT_LOW_FREQ = 160;
+static const u16 JC_RUMBLE_DFLT_HIGH_FREQ = 320;
+#endif /* IS_ENABLED(CONFIG_NINTENDO_FF) */
+static const u16 JC_RUMBLE_PERIOD_MS = 50;
/* States for controller state machine */
enum joycon_ctlr_state {
@@ -397,9 +402,6 @@ struct joycon_input_report {
#define JC_RUMBLE_DATA_SIZE 8
#define JC_RUMBLE_QUEUE_SIZE 8
-static const u16 JC_RUMBLE_DFLT_LOW_FREQ = 160;
-static const u16 JC_RUMBLE_DFLT_HIGH_FREQ = 320;
-static const u16 JC_RUMBLE_PERIOD_MS = 50;
static const unsigned short JC_RUMBLE_ZERO_AMP_PKT_CNT = 5;
static const char * const joycon_player_led_names[] = {
@@ -1850,8 +1852,10 @@ static int joycon_leds_create(struct joycon_ctlr *ctlr)
d_name,
"green",
joycon_player_led_names[i]);
- if (!name)
+ if (!name) {
+ mutex_unlock(&joycon_input_num_mutex);
return -ENOMEM;
+ }
led = &ctlr->leds[i];
led->name = name;
@@ -1864,6 +1868,7 @@ static int joycon_leds_create(struct joycon_ctlr *ctlr)
ret = devm_led_classdev_register(&hdev->dev, led);
if (ret) {
hid_err(hdev, "Failed registering %s LED\n", led->name);
+ mutex_unlock(&joycon_input_num_mutex);
return ret;
}
}
diff --git a/drivers/hid/hid-prodikeys.c b/drivers/hid/hid-prodikeys.c
index 2666af02d5c1..e4e9471d0f1e 100644
--- a/drivers/hid/hid-prodikeys.c
+++ b/drivers/hid/hid-prodikeys.c
@@ -798,12 +798,18 @@ static int pk_raw_event(struct hid_device *hdev, struct hid_report *report,
static int pk_probe(struct hid_device *hdev, const struct hid_device_id *id)
{
int ret;
- struct usb_interface *intf = to_usb_interface(hdev->dev.parent);
- unsigned short ifnum = intf->cur_altsetting->desc.bInterfaceNumber;
+ struct usb_interface *intf;
+ unsigned short ifnum;
unsigned long quirks = id->driver_data;
struct pk_device *pk;
struct pcmidi_snd *pm = NULL;
+ if (!hid_is_usb(hdev))
+ return -EINVAL;
+
+ intf = to_usb_interface(hdev->dev.parent);
+ ifnum = intf->cur_altsetting->desc.bInterfaceNumber;
+
pk = kzalloc(sizeof(*pk), GFP_KERNEL);
if (pk == NULL) {
hid_err(hdev, "can't alloc descriptor\n");
diff --git a/drivers/hid/hid-quirks.c b/drivers/hid/hid-quirks.c
index 06b7908c874c..ee7e504e7279 100644
--- a/drivers/hid/hid-quirks.c
+++ b/drivers/hid/hid-quirks.c
@@ -124,6 +124,7 @@ static const struct hid_device_id hid_quirks[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_MCS, USB_DEVICE_ID_MCS_GAMEPADBLOCK), HID_QUIRK_MULTI_INPUT },
{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_PIXART_MOUSE), HID_QUIRK_ALWAYS_POLL },
{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_POWER_COVER), HID_QUIRK_NO_INIT_REPORTS },
+ { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_SURFACE3_COVER), HID_QUIRK_NO_INIT_REPORTS },
{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_SURFACE_PRO_2), HID_QUIRK_NO_INIT_REPORTS },
{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TOUCH_COVER_2), HID_QUIRK_NO_INIT_REPORTS },
{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_2), HID_QUIRK_NO_INIT_REPORTS },
diff --git a/drivers/hid/hid-roccat-arvo.c b/drivers/hid/hid-roccat-arvo.c
index 4556d2a50f75..d94ee0539421 100644
--- a/drivers/hid/hid-roccat-arvo.c
+++ b/drivers/hid/hid-roccat-arvo.c
@@ -344,6 +344,9 @@ static int arvo_probe(struct hid_device *hdev,
{
int retval;
+ if (!hid_is_usb(hdev))
+ return -EINVAL;
+
retval = hid_parse(hdev);
if (retval) {
hid_err(hdev, "parse failed\n");
diff --git a/drivers/hid/hid-roccat-isku.c b/drivers/hid/hid-roccat-isku.c
index ce5f22519956..e95d59cd8d07 100644
--- a/drivers/hid/hid-roccat-isku.c
+++ b/drivers/hid/hid-roccat-isku.c
@@ -324,6 +324,9 @@ static int isku_probe(struct hid_device *hdev,
{
int retval;
+ if (!hid_is_usb(hdev))
+ return -EINVAL;
+
retval = hid_parse(hdev);
if (retval) {
hid_err(hdev, "parse failed\n");
diff --git a/drivers/hid/hid-roccat-kone.c b/drivers/hid/hid-roccat-kone.c
index ea17abc7ad52..76da04801ca9 100644
--- a/drivers/hid/hid-roccat-kone.c
+++ b/drivers/hid/hid-roccat-kone.c
@@ -749,6 +749,9 @@ static int kone_probe(struct hid_device *hdev, const struct hid_device_id *id)
{
int retval;
+ if (!hid_is_usb(hdev))
+ return -EINVAL;
+
retval = hid_parse(hdev);
if (retval) {
hid_err(hdev, "parse failed\n");
diff --git a/drivers/hid/hid-roccat-koneplus.c b/drivers/hid/hid-roccat-koneplus.c
index 0316edf8c5bb..1896c69ea512 100644
--- a/drivers/hid/hid-roccat-koneplus.c
+++ b/drivers/hid/hid-roccat-koneplus.c
@@ -431,6 +431,9 @@ static int koneplus_probe(struct hid_device *hdev,
{
int retval;
+ if (!hid_is_usb(hdev))
+ return -EINVAL;
+
retval = hid_parse(hdev);
if (retval) {
hid_err(hdev, "parse failed\n");
diff --git a/drivers/hid/hid-roccat-konepure.c b/drivers/hid/hid-roccat-konepure.c
index 5248b3c7cf78..cf8eeb33a125 100644
--- a/drivers/hid/hid-roccat-konepure.c
+++ b/drivers/hid/hid-roccat-konepure.c
@@ -133,6 +133,9 @@ static int konepure_probe(struct hid_device *hdev,
{
int retval;
+ if (!hid_is_usb(hdev))
+ return -EINVAL;
+
retval = hid_parse(hdev);
if (retval) {
hid_err(hdev, "parse failed\n");
diff --git a/drivers/hid/hid-roccat-kovaplus.c b/drivers/hid/hid-roccat-kovaplus.c
index 960012881570..6fb9b9563769 100644
--- a/drivers/hid/hid-roccat-kovaplus.c
+++ b/drivers/hid/hid-roccat-kovaplus.c
@@ -501,6 +501,9 @@ static int kovaplus_probe(struct hid_device *hdev,
{
int retval;
+ if (!hid_is_usb(hdev))
+ return -EINVAL;
+
retval = hid_parse(hdev);
if (retval) {
hid_err(hdev, "parse failed\n");
diff --git a/drivers/hid/hid-roccat-lua.c b/drivers/hid/hid-roccat-lua.c
index 4a88a76d5c62..d5ddf0d68346 100644
--- a/drivers/hid/hid-roccat-lua.c
+++ b/drivers/hid/hid-roccat-lua.c
@@ -160,6 +160,9 @@ static int lua_probe(struct hid_device *hdev,
{
int retval;
+ if (!hid_is_usb(hdev))
+ return -EINVAL;
+
retval = hid_parse(hdev);
if (retval) {
hid_err(hdev, "parse failed\n");
diff --git a/drivers/hid/hid-roccat-pyra.c b/drivers/hid/hid-roccat-pyra.c
index 989927defe8d..4fcc8e7d276f 100644
--- a/drivers/hid/hid-roccat-pyra.c
+++ b/drivers/hid/hid-roccat-pyra.c
@@ -449,6 +449,9 @@ static int pyra_probe(struct hid_device *hdev, const struct hid_device_id *id)
{
int retval;
+ if (!hid_is_usb(hdev))
+ return -EINVAL;
+
retval = hid_parse(hdev);
if (retval) {
hid_err(hdev, "parse failed\n");
diff --git a/drivers/hid/hid-roccat-ryos.c b/drivers/hid/hid-roccat-ryos.c
index 3956a6c9c521..5bf1971a2b14 100644
--- a/drivers/hid/hid-roccat-ryos.c
+++ b/drivers/hid/hid-roccat-ryos.c
@@ -141,6 +141,9 @@ static int ryos_probe(struct hid_device *hdev,
{
int retval;
+ if (!hid_is_usb(hdev))
+ return -EINVAL;
+
retval = hid_parse(hdev);
if (retval) {
hid_err(hdev, "parse failed\n");
diff --git a/drivers/hid/hid-roccat-savu.c b/drivers/hid/hid-roccat-savu.c
index 818701f7a028..a784bb4ee651 100644
--- a/drivers/hid/hid-roccat-savu.c
+++ b/drivers/hid/hid-roccat-savu.c
@@ -113,6 +113,9 @@ static int savu_probe(struct hid_device *hdev,
{
int retval;
+ if (!hid_is_usb(hdev))
+ return -EINVAL;
+
retval = hid_parse(hdev);
if (retval) {
hid_err(hdev, "parse failed\n");
diff --git a/drivers/hid/hid-samsung.c b/drivers/hid/hid-samsung.c
index 2e1c31156eca..cf5992e97094 100644
--- a/drivers/hid/hid-samsung.c
+++ b/drivers/hid/hid-samsung.c
@@ -152,6 +152,9 @@ static int samsung_probe(struct hid_device *hdev,
int ret;
unsigned int cmask = HID_CONNECT_DEFAULT;
+ if (!hid_is_usb(hdev))
+ return -EINVAL;
+
ret = hid_parse(hdev);
if (ret) {
hid_err(hdev, "parse failed\n");
diff --git a/drivers/hid/hid-sony.c b/drivers/hid/hid-sony.c
index d1b107d547f5..60ec2b29d54d 100644
--- a/drivers/hid/hid-sony.c
+++ b/drivers/hid/hid-sony.c
@@ -3000,7 +3000,6 @@ static int sony_probe(struct hid_device *hdev, const struct hid_device_id *id)
sc->quirks = quirks;
hid_set_drvdata(hdev, sc);
sc->hdev = hdev;
- usbdev = to_usb_device(sc->hdev->dev.parent->parent);
ret = hid_parse(hdev);
if (ret) {
@@ -3038,14 +3037,23 @@ static int sony_probe(struct hid_device *hdev, const struct hid_device_id *id)
*/
if (!(hdev->claimed & HID_CLAIMED_INPUT)) {
hid_err(hdev, "failed to claim input\n");
- hid_hw_stop(hdev);
- return -ENODEV;
+ ret = -ENODEV;
+ goto err;
}
if (sc->quirks & (GHL_GUITAR_PS3WIIU | GHL_GUITAR_PS4)) {
+ if (!hid_is_usb(hdev)) {
+ ret = -EINVAL;
+ goto err;
+ }
+
+ usbdev = to_usb_device(sc->hdev->dev.parent->parent);
+
sc->ghl_urb = usb_alloc_urb(0, GFP_ATOMIC);
- if (!sc->ghl_urb)
- return -ENOMEM;
+ if (!sc->ghl_urb) {
+ ret = -ENOMEM;
+ goto err;
+ }
if (sc->quirks & GHL_GUITAR_PS3WIIU)
ret = ghl_init_urb(sc, usbdev, ghl_ps3wiiu_magic_data,
@@ -3055,7 +3063,7 @@ static int sony_probe(struct hid_device *hdev, const struct hid_device_id *id)
ARRAY_SIZE(ghl_ps4_magic_data));
if (ret) {
hid_err(hdev, "error preparing URB\n");
- return ret;
+ goto err;
}
timer_setup(&sc->ghl_poke_timer, ghl_magic_poke, 0);
@@ -3064,6 +3072,10 @@ static int sony_probe(struct hid_device *hdev, const struct hid_device_id *id)
}
return ret;
+
+err:
+ hid_hw_stop(hdev);
+ return ret;
}
static void sony_remove(struct hid_device *hdev)
diff --git a/drivers/hid/hid-thrustmaster.c b/drivers/hid/hid-thrustmaster.c
index d44550aa8805..03b935ff02d5 100644
--- a/drivers/hid/hid-thrustmaster.c
+++ b/drivers/hid/hid-thrustmaster.c
@@ -205,7 +205,7 @@ static void thrustmaster_model_handler(struct urb *urb)
struct tm_wheel *tm_wheel = hid_get_drvdata(hdev);
uint16_t model = 0;
int i, ret;
- const struct tm_wheel_info *twi = 0;
+ const struct tm_wheel_info *twi = NULL;
if (urb->status) {
hid_err(hdev, "URB to get model id failed with error %d\n", urb->status);
@@ -238,7 +238,7 @@ static void thrustmaster_model_handler(struct urb *urb)
tm_wheel->usb_dev,
usb_sndctrlpipe(tm_wheel->usb_dev, 0),
(char *)tm_wheel->change_request,
- 0, 0, // We do not expect any response from the wheel
+ NULL, 0, // We do not expect any response from the wheel
thrustmaster_change_handler,
hdev
);
@@ -272,7 +272,10 @@ static void thrustmaster_remove(struct hid_device *hdev)
static int thrustmaster_probe(struct hid_device *hdev, const struct hid_device_id *id)
{
int ret = 0;
- struct tm_wheel *tm_wheel = 0;
+ struct tm_wheel *tm_wheel = NULL;
+
+ if (!hid_is_usb(hdev))
+ return -EINVAL;
ret = hid_parse(hdev);
if (ret) {
diff --git a/drivers/hid/hid-u2fzero.c b/drivers/hid/hid-u2fzero.c
index 31ea7fc69916..ad489caf53ad 100644
--- a/drivers/hid/hid-u2fzero.c
+++ b/drivers/hid/hid-u2fzero.c
@@ -311,7 +311,7 @@ static int u2fzero_probe(struct hid_device *hdev,
unsigned int minor;
int ret;
- if (!hid_is_using_ll_driver(hdev, &usb_hid_driver))
+ if (!hid_is_usb(hdev))
return -EINVAL;
dev = devm_kzalloc(&hdev->dev, sizeof(*dev), GFP_KERNEL);
diff --git a/drivers/hid/hid-uclogic-core.c b/drivers/hid/hid-uclogic-core.c
index 6a9865dd703c..d8ab0139e5cd 100644
--- a/drivers/hid/hid-uclogic-core.c
+++ b/drivers/hid/hid-uclogic-core.c
@@ -164,6 +164,9 @@ static int uclogic_probe(struct hid_device *hdev,
struct uclogic_drvdata *drvdata = NULL;
bool params_initialized = false;
+ if (!hid_is_usb(hdev))
+ return -EINVAL;
+
/*
* libinput requires the pad interface to be on a different node
* than the pen, so use QUIRK_MULTI_INPUT for all tablets.
diff --git a/drivers/hid/hid-uclogic-params.c b/drivers/hid/hid-uclogic-params.c
index 3d67b748a3b9..adff1bd68d9f 100644
--- a/drivers/hid/hid-uclogic-params.c
+++ b/drivers/hid/hid-uclogic-params.c
@@ -843,8 +843,7 @@ int uclogic_params_init(struct uclogic_params *params,
struct uclogic_params p = {0, };
/* Check arguments */
- if (params == NULL || hdev == NULL ||
- !hid_is_using_ll_driver(hdev, &usb_hid_driver)) {
+ if (params == NULL || hdev == NULL || !hid_is_usb(hdev)) {
rc = -EINVAL;
goto cleanup;
}
diff --git a/drivers/hid/hid-vivaldi.c b/drivers/hid/hid-vivaldi.c
index cd7ada48b1d9..72957a9f7117 100644
--- a/drivers/hid/hid-vivaldi.c
+++ b/drivers/hid/hid-vivaldi.c
@@ -57,6 +57,9 @@ static int vivaldi_probe(struct hid_device *hdev,
int ret;
drvdata = devm_kzalloc(&hdev->dev, sizeof(*drvdata), GFP_KERNEL);
+ if (!drvdata)
+ return -ENOMEM;
+
hid_set_drvdata(hdev, drvdata);
ret = hid_parse(hdev);
diff --git a/drivers/hid/intel-ish-hid/ipc/pci-ish.c b/drivers/hid/intel-ish-hid/ipc/pci-ish.c
index 1c5039081db2..8e9d9450cb83 100644
--- a/drivers/hid/intel-ish-hid/ipc/pci-ish.c
+++ b/drivers/hid/intel-ish-hid/ipc/pci-ish.c
@@ -266,7 +266,8 @@ static void __maybe_unused ish_resume_handler(struct work_struct *work)
if (ish_should_leave_d0i3(pdev) && !dev->suspend_flag
&& IPC_IS_ISH_ILUP(fwsts)) {
- disable_irq_wake(pdev->irq);
+ if (device_may_wakeup(&pdev->dev))
+ disable_irq_wake(pdev->irq);
ish_set_host_ready(dev);
@@ -337,7 +338,8 @@ static int __maybe_unused ish_suspend(struct device *device)
*/
pci_save_state(pdev);
- enable_irq_wake(pdev->irq);
+ if (device_may_wakeup(&pdev->dev))
+ enable_irq_wake(pdev->irq);
}
} else {
/*
diff --git a/drivers/hid/intel-ish-hid/ishtp-fw-loader.c b/drivers/hid/intel-ish-hid/ishtp-fw-loader.c
index 1b486f262747..0e1183e96147 100644
--- a/drivers/hid/intel-ish-hid/ishtp-fw-loader.c
+++ b/drivers/hid/intel-ish-hid/ishtp-fw-loader.c
@@ -76,9 +76,12 @@ enum ish_loader_commands {
#define LOADER_XFER_MODE_ISHTP BIT(1)
/* ISH Transport Loader client unique GUID */
-static const guid_t loader_ishtp_guid =
- GUID_INIT(0xc804d06a, 0x55bd, 0x4ea7,
- 0xad, 0xed, 0x1e, 0x31, 0x22, 0x8c, 0x76, 0xdc);
+static const struct ishtp_device_id loader_ishtp_id_table[] = {
+ { .guid = GUID_INIT(0xc804d06a, 0x55bd, 0x4ea7,
+ 0xad, 0xed, 0x1e, 0x31, 0x22, 0x8c, 0x76, 0xdc) },
+ { }
+};
+MODULE_DEVICE_TABLE(ishtp, loader_ishtp_id_table);
#define FILENAME_SIZE 256
@@ -880,7 +883,7 @@ static int loader_init(struct ishtp_cl *loader_ishtp_cl, int reset)
fw_client =
ishtp_fw_cl_get_client(ishtp_get_ishtp_device(loader_ishtp_cl),
- &loader_ishtp_guid);
+ &loader_ishtp_id_table[0].guid);
if (!fw_client) {
dev_err(cl_data_to_dev(client_data),
"ISH client uuid not found\n");
@@ -1057,7 +1060,7 @@ static int loader_ishtp_cl_reset(struct ishtp_cl_device *cl_device)
static struct ishtp_cl_driver loader_ishtp_cl_driver = {
.name = "ish-loader",
- .guid = &loader_ishtp_guid,
+ .id = loader_ishtp_id_table,
.probe = loader_ishtp_cl_probe,
.remove = loader_ishtp_cl_remove,
.reset = loader_ishtp_cl_reset,
@@ -1083,4 +1086,3 @@ MODULE_DESCRIPTION("ISH ISH-TP Host firmware Loader Client Driver");
MODULE_AUTHOR("Rushikesh S Kadam <rushikesh.s.kadam@intel.com>");
MODULE_LICENSE("GPL v2");
-MODULE_ALIAS("ishtp:*");
diff --git a/drivers/hid/intel-ish-hid/ishtp-hid-client.c b/drivers/hid/intel-ish-hid/ishtp-hid-client.c
index 91bf4d01e91a..4338c9b68a43 100644
--- a/drivers/hid/intel-ish-hid/ishtp-hid-client.c
+++ b/drivers/hid/intel-ish-hid/ishtp-hid-client.c
@@ -12,9 +12,12 @@
#include "ishtp-hid.h"
/* ISH Transport protocol (ISHTP in short) GUID */
-static const guid_t hid_ishtp_guid =
- GUID_INIT(0x33AECD58, 0xB679, 0x4E54,
- 0x9B, 0xD9, 0xA0, 0x4D, 0x34, 0xF0, 0xC2, 0x26);
+static const struct ishtp_device_id hid_ishtp_id_table[] = {
+ { .guid = GUID_INIT(0x33AECD58, 0xB679, 0x4E54,
+ 0x9B, 0xD9, 0xA0, 0x4D, 0x34, 0xF0, 0xC2, 0x26), },
+ { }
+};
+MODULE_DEVICE_TABLE(ishtp, hid_ishtp_id_table);
/* Rx ring buffer pool size */
#define HID_CL_RX_RING_SIZE 32
@@ -662,7 +665,7 @@ static int hid_ishtp_cl_init(struct ishtp_cl *hid_ishtp_cl, int reset)
ishtp_set_tx_ring_size(hid_ishtp_cl, HID_CL_TX_RING_SIZE);
ishtp_set_rx_ring_size(hid_ishtp_cl, HID_CL_RX_RING_SIZE);
- fw_client = ishtp_fw_cl_get_client(dev, &hid_ishtp_guid);
+ fw_client = ishtp_fw_cl_get_client(dev, &hid_ishtp_id_table[0].guid);
if (!fw_client) {
dev_err(cl_data_to_dev(client_data),
"ish client uuid not found\n");
@@ -945,7 +948,7 @@ static const struct dev_pm_ops hid_ishtp_pm_ops = {
static struct ishtp_cl_driver hid_ishtp_cl_driver = {
.name = "ish-hid",
- .guid = &hid_ishtp_guid,
+ .id = hid_ishtp_id_table,
.probe = hid_ishtp_cl_probe,
.remove = hid_ishtp_cl_remove,
.reset = hid_ishtp_cl_reset,
@@ -981,4 +984,3 @@ MODULE_AUTHOR("Daniel Drubin <daniel.drubin@intel.com>");
MODULE_AUTHOR("Srinivas Pandruvada <srinivas.pandruvada@linux.intel.com>");
MODULE_LICENSE("GPL");
-MODULE_ALIAS("ishtp:*");
diff --git a/drivers/hid/intel-ish-hid/ishtp/bus.c b/drivers/hid/intel-ish-hid/ishtp/bus.c
index 334eac611774..f68aba8794fe 100644
--- a/drivers/hid/intel-ish-hid/ishtp/bus.c
+++ b/drivers/hid/intel-ish-hid/ishtp/bus.c
@@ -241,7 +241,7 @@ static int ishtp_cl_bus_match(struct device *dev, struct device_driver *drv)
struct ishtp_cl_device *device = to_ishtp_cl_device(dev);
struct ishtp_cl_driver *driver = to_ishtp_cl_driver(drv);
- return guid_equal(driver->guid,
+ return guid_equal(&driver->id[0].guid,
&device->fw_client->props.protocol_name);
}
@@ -350,7 +350,7 @@ static ssize_t modalias_show(struct device *dev, struct device_attribute *a,
{
int len;
- len = snprintf(buf, PAGE_SIZE, "ishtp:%s\n", dev_name(dev));
+ len = snprintf(buf, PAGE_SIZE, ISHTP_MODULE_PREFIX "%s\n", dev_name(dev));
return (len >= PAGE_SIZE) ? (PAGE_SIZE - 1) : len;
}
static DEVICE_ATTR_RO(modalias);
@@ -363,7 +363,7 @@ ATTRIBUTE_GROUPS(ishtp_cl_dev);
static int ishtp_cl_uevent(struct device *dev, struct kobj_uevent_env *env)
{
- if (add_uevent_var(env, "MODALIAS=ishtp:%s", dev_name(dev)))
+ if (add_uevent_var(env, "MODALIAS=" ISHTP_MODULE_PREFIX "%s", dev_name(dev)))
return -ENOMEM;
return 0;
}
diff --git a/drivers/hid/wacom_sys.c b/drivers/hid/wacom_sys.c
index 2717d39600b4..066c567dbaa2 100644
--- a/drivers/hid/wacom_sys.c
+++ b/drivers/hid/wacom_sys.c
@@ -726,7 +726,7 @@ static void wacom_retrieve_hid_descriptor(struct hid_device *hdev,
* Skip the query for this type and modify defaults based on
* interface number.
*/
- if (features->type == WIRELESS) {
+ if (features->type == WIRELESS && intf) {
if (intf->cur_altsetting->desc.bInterfaceNumber == 0)
features->device_type = WACOM_DEVICETYPE_WL_MONITOR;
else
@@ -2214,7 +2214,7 @@ static void wacom_update_name(struct wacom *wacom, const char *suffix)
if ((features->type == HID_GENERIC) && !strcmp("Wacom HID", features->name)) {
char *product_name = wacom->hdev->name;
- if (hid_is_using_ll_driver(wacom->hdev, &usb_hid_driver)) {
+ if (hid_is_usb(wacom->hdev)) {
struct usb_interface *intf = to_usb_interface(wacom->hdev->dev.parent);
struct usb_device *dev = interface_to_usbdev(intf);
product_name = dev->product;
@@ -2451,6 +2451,9 @@ static void wacom_wireless_work(struct work_struct *work)
wacom_destroy_battery(wacom);
+ if (!usbdev)
+ return;
+
/* Stylus interface */
hdev1 = usb_get_intfdata(usbdev->config->interface[1]);
wacom1 = hid_get_drvdata(hdev1);
@@ -2730,8 +2733,6 @@ static void wacom_mode_change_work(struct work_struct *work)
static int wacom_probe(struct hid_device *hdev,
const struct hid_device_id *id)
{
- struct usb_interface *intf = to_usb_interface(hdev->dev.parent);
- struct usb_device *dev = interface_to_usbdev(intf);
struct wacom *wacom;
struct wacom_wac *wacom_wac;
struct wacom_features *features;
@@ -2766,8 +2767,14 @@ static int wacom_probe(struct hid_device *hdev,
wacom_wac->hid_data.inputmode = -1;
wacom_wac->mode_report = -1;
- wacom->usbdev = dev;
- wacom->intf = intf;
+ if (hid_is_usb(hdev)) {
+ struct usb_interface *intf = to_usb_interface(hdev->dev.parent);
+ struct usb_device *dev = interface_to_usbdev(intf);
+
+ wacom->usbdev = dev;
+ wacom->intf = intf;
+ }
+
mutex_init(&wacom->lock);
INIT_DELAYED_WORK(&wacom->init_work, wacom_init_work);
INIT_WORK(&wacom->wireless_work, wacom_wireless_work);
diff --git a/drivers/hid/wacom_wac.c b/drivers/hid/wacom_wac.c
index 33a6908995b1..2a4cc39962e7 100644
--- a/drivers/hid/wacom_wac.c
+++ b/drivers/hid/wacom_wac.c
@@ -2603,6 +2603,9 @@ static void wacom_wac_finger_event(struct hid_device *hdev,
return;
switch (equivalent_usage) {
+ case HID_DG_CONFIDENCE:
+ wacom_wac->hid_data.confidence = value;
+ break;
case HID_GD_X:
wacom_wac->hid_data.x = value;
break;
@@ -2635,7 +2638,8 @@ static void wacom_wac_finger_event(struct hid_device *hdev,
}
if (usage->usage_index + 1 == field->report_count) {
- if (equivalent_usage == wacom_wac->hid_data.last_slot_field)
+ if (equivalent_usage == wacom_wac->hid_data.last_slot_field &&
+ wacom_wac->hid_data.confidence)
wacom_wac_finger_slot(wacom_wac, wacom_wac->touch_input);
}
}
@@ -2653,6 +2657,8 @@ static void wacom_wac_finger_pre_report(struct hid_device *hdev,
wacom_wac->is_invalid_bt_frame = false;
+ hid_data->confidence = true;
+
for (i = 0; i < report->maxfield; i++) {
struct hid_field *field = report->field[i];
int j;
diff --git a/drivers/hid/wacom_wac.h b/drivers/hid/wacom_wac.h
index 8b2d4e5b2303..466b62cc16dc 100644
--- a/drivers/hid/wacom_wac.h
+++ b/drivers/hid/wacom_wac.h
@@ -301,6 +301,7 @@ struct hid_data {
bool barrelswitch;
bool barrelswitch2;
bool serialhi;
+ bool confidence;
int x;
int y;
int pressure;
diff --git a/drivers/hv/Kconfig b/drivers/hv/Kconfig
index dd12af20e467..0747a8f1fcee 100644
--- a/drivers/hv/Kconfig
+++ b/drivers/hv/Kconfig
@@ -19,6 +19,7 @@ config HYPERV_TIMER
config HYPERV_UTILS
tristate "Microsoft Hyper-V Utilities driver"
depends on HYPERV && CONNECTOR && NLS
+ depends on PTP_1588_CLOCK_OPTIONAL
help
Select this option to enable the Hyper-V Utilities.
diff --git a/drivers/hwmon/corsair-psu.c b/drivers/hwmon/corsair-psu.c
index 731d5117f9f1..14389fd7afb8 100644
--- a/drivers/hwmon/corsair-psu.c
+++ b/drivers/hwmon/corsair-psu.c
@@ -729,7 +729,7 @@ static int corsairpsu_probe(struct hid_device *hdev, const struct hid_device_id
corsairpsu_check_cmd_support(priv);
priv->hwmon_dev = hwmon_device_register_with_info(&hdev->dev, "corsairpsu", priv,
- &corsairpsu_chip_info, 0);
+ &corsairpsu_chip_info, NULL);
if (IS_ERR(priv->hwmon_dev)) {
ret = PTR_ERR(priv->hwmon_dev);
diff --git a/drivers/hwmon/dell-smm-hwmon.c b/drivers/hwmon/dell-smm-hwmon.c
index eaace478f508..5596c211f38d 100644
--- a/drivers/hwmon/dell-smm-hwmon.c
+++ b/drivers/hwmon/dell-smm-hwmon.c
@@ -627,10 +627,9 @@ static void __init i8k_init_procfs(struct device *dev)
{
struct dell_smm_data *data = dev_get_drvdata(dev);
- /* Register the proc entry */
- proc_create_data("i8k", 0, NULL, &i8k_proc_ops, data);
-
- devm_add_action_or_reset(dev, i8k_exit_procfs, NULL);
+ /* Only register exit function if creation was successful */
+ if (proc_create_data("i8k", 0, NULL, &i8k_proc_ops, data))
+ devm_add_action_or_reset(dev, i8k_exit_procfs, NULL);
}
#else
diff --git a/drivers/hwmon/lm90.c b/drivers/hwmon/lm90.c
index 618052c6cdb6..74019dff2550 100644
--- a/drivers/hwmon/lm90.c
+++ b/drivers/hwmon/lm90.c
@@ -35,13 +35,14 @@
* explicitly as max6659, or if its address is not 0x4c.
* These chips lack the remote temperature offset feature.
*
- * This driver also supports the MAX6654 chip made by Maxim. This chip can
- * be at 9 different addresses, similar to MAX6680/MAX6681. The MAX6654 is
- * otherwise similar to MAX6657/MAX6658/MAX6659. Extended range is available
- * by setting the configuration register accordingly, and is done during
- * initialization. Extended precision is only available at conversion rates
- * of 1 Hz and slower. Note that extended precision is not enabled by
- * default, as this driver initializes all chips to 2 Hz by design.
+ * This driver also supports the MAX6654 chip made by Maxim. This chip can be
+ * at 9 different addresses, similar to MAX6680/MAX6681. The MAX6654 is similar
+ * to MAX6657/MAX6658/MAX6659, but does not support critical temperature
+ * limits. Extended range is available by setting the configuration register
+ * accordingly, and is done during initialization. Extended precision is only
+ * available at conversion rates of 1 Hz and slower. Note that extended
+ * precision is not enabled by default, as this driver initializes all chips
+ * to 2 Hz by design.
*
* This driver also supports the MAX6646, MAX6647, MAX6648, MAX6649 and
* MAX6692 chips made by Maxim. These are again similar to the LM86,
@@ -188,6 +189,8 @@ enum chips { lm90, adm1032, lm99, lm86, max6657, max6659, adt7461, max6680,
#define LM90_HAVE_BROKEN_ALERT (1 << 7) /* Broken alert */
#define LM90_HAVE_EXTENDED_TEMP (1 << 8) /* extended temperature support*/
#define LM90_PAUSE_FOR_CONFIG (1 << 9) /* Pause conversion for config */
+#define LM90_HAVE_CRIT (1 << 10)/* Chip supports CRIT/OVERT register */
+#define LM90_HAVE_CRIT_ALRM_SWP (1 << 11)/* critical alarm bits swapped */
/* LM90 status */
#define LM90_STATUS_LTHRM (1 << 0) /* local THERM limit tripped */
@@ -197,6 +200,7 @@ enum chips { lm90, adm1032, lm99, lm86, max6657, max6659, adt7461, max6680,
#define LM90_STATUS_RHIGH (1 << 4) /* remote high temp limit tripped */
#define LM90_STATUS_LLOW (1 << 5) /* local low temp limit tripped */
#define LM90_STATUS_LHIGH (1 << 6) /* local high temp limit tripped */
+#define LM90_STATUS_BUSY (1 << 7) /* conversion is ongoing */
#define MAX6696_STATUS2_R2THRM (1 << 1) /* remote2 THERM limit tripped */
#define MAX6696_STATUS2_R2OPEN (1 << 2) /* remote2 is an open circuit */
@@ -354,38 +358,43 @@ struct lm90_params {
static const struct lm90_params lm90_params[] = {
[adm1032] = {
.flags = LM90_HAVE_OFFSET | LM90_HAVE_REM_LIMIT_EXT
- | LM90_HAVE_BROKEN_ALERT,
+ | LM90_HAVE_BROKEN_ALERT | LM90_HAVE_CRIT,
.alert_alarms = 0x7c,
.max_convrate = 10,
},
[adt7461] = {
.flags = LM90_HAVE_OFFSET | LM90_HAVE_REM_LIMIT_EXT
- | LM90_HAVE_BROKEN_ALERT | LM90_HAVE_EXTENDED_TEMP,
+ | LM90_HAVE_BROKEN_ALERT | LM90_HAVE_EXTENDED_TEMP
+ | LM90_HAVE_CRIT,
.alert_alarms = 0x7c,
.max_convrate = 10,
},
[g781] = {
.flags = LM90_HAVE_OFFSET | LM90_HAVE_REM_LIMIT_EXT
- | LM90_HAVE_BROKEN_ALERT,
+ | LM90_HAVE_BROKEN_ALERT | LM90_HAVE_CRIT,
.alert_alarms = 0x7c,
.max_convrate = 8,
},
[lm86] = {
- .flags = LM90_HAVE_OFFSET | LM90_HAVE_REM_LIMIT_EXT,
+ .flags = LM90_HAVE_OFFSET | LM90_HAVE_REM_LIMIT_EXT
+ | LM90_HAVE_CRIT,
.alert_alarms = 0x7b,
.max_convrate = 9,
},
[lm90] = {
- .flags = LM90_HAVE_OFFSET | LM90_HAVE_REM_LIMIT_EXT,
+ .flags = LM90_HAVE_OFFSET | LM90_HAVE_REM_LIMIT_EXT
+ | LM90_HAVE_CRIT,
.alert_alarms = 0x7b,
.max_convrate = 9,
},
[lm99] = {
- .flags = LM90_HAVE_OFFSET | LM90_HAVE_REM_LIMIT_EXT,
+ .flags = LM90_HAVE_OFFSET | LM90_HAVE_REM_LIMIT_EXT
+ | LM90_HAVE_CRIT,
.alert_alarms = 0x7b,
.max_convrate = 9,
},
[max6646] = {
+ .flags = LM90_HAVE_CRIT,
.alert_alarms = 0x7c,
.max_convrate = 6,
.reg_local_ext = MAX6657_REG_R_LOCAL_TEMPL,
@@ -396,50 +405,51 @@ static const struct lm90_params lm90_params[] = {
.reg_local_ext = MAX6657_REG_R_LOCAL_TEMPL,
},
[max6657] = {
- .flags = LM90_PAUSE_FOR_CONFIG,
+ .flags = LM90_PAUSE_FOR_CONFIG | LM90_HAVE_CRIT,
.alert_alarms = 0x7c,
.max_convrate = 8,
.reg_local_ext = MAX6657_REG_R_LOCAL_TEMPL,
},
[max6659] = {
- .flags = LM90_HAVE_EMERGENCY,
+ .flags = LM90_HAVE_EMERGENCY | LM90_HAVE_CRIT,
.alert_alarms = 0x7c,
.max_convrate = 8,
.reg_local_ext = MAX6657_REG_R_LOCAL_TEMPL,
},
[max6680] = {
- .flags = LM90_HAVE_OFFSET,
+ .flags = LM90_HAVE_OFFSET | LM90_HAVE_CRIT
+ | LM90_HAVE_CRIT_ALRM_SWP,
.alert_alarms = 0x7c,
.max_convrate = 7,
},
[max6696] = {
.flags = LM90_HAVE_EMERGENCY
- | LM90_HAVE_EMERGENCY_ALARM | LM90_HAVE_TEMP3,
+ | LM90_HAVE_EMERGENCY_ALARM | LM90_HAVE_TEMP3 | LM90_HAVE_CRIT,
.alert_alarms = 0x1c7c,
.max_convrate = 6,
.reg_local_ext = MAX6657_REG_R_LOCAL_TEMPL,
},
[w83l771] = {
- .flags = LM90_HAVE_OFFSET | LM90_HAVE_REM_LIMIT_EXT,
+ .flags = LM90_HAVE_OFFSET | LM90_HAVE_REM_LIMIT_EXT | LM90_HAVE_CRIT,
.alert_alarms = 0x7c,
.max_convrate = 8,
},
[sa56004] = {
- .flags = LM90_HAVE_OFFSET | LM90_HAVE_REM_LIMIT_EXT,
+ .flags = LM90_HAVE_OFFSET | LM90_HAVE_REM_LIMIT_EXT | LM90_HAVE_CRIT,
.alert_alarms = 0x7b,
.max_convrate = 9,
.reg_local_ext = SA56004_REG_R_LOCAL_TEMPL,
},
[tmp451] = {
.flags = LM90_HAVE_OFFSET | LM90_HAVE_REM_LIMIT_EXT
- | LM90_HAVE_BROKEN_ALERT | LM90_HAVE_EXTENDED_TEMP,
+ | LM90_HAVE_BROKEN_ALERT | LM90_HAVE_EXTENDED_TEMP | LM90_HAVE_CRIT,
.alert_alarms = 0x7c,
.max_convrate = 9,
.reg_local_ext = TMP451_REG_R_LOCAL_TEMPL,
},
[tmp461] = {
.flags = LM90_HAVE_OFFSET | LM90_HAVE_REM_LIMIT_EXT
- | LM90_HAVE_BROKEN_ALERT | LM90_HAVE_EXTENDED_TEMP,
+ | LM90_HAVE_BROKEN_ALERT | LM90_HAVE_EXTENDED_TEMP | LM90_HAVE_CRIT,
.alert_alarms = 0x7c,
.max_convrate = 9,
.reg_local_ext = TMP451_REG_R_LOCAL_TEMPL,
@@ -668,20 +678,22 @@ static int lm90_update_limits(struct device *dev)
struct i2c_client *client = data->client;
int val;
- val = lm90_read_reg(client, LM90_REG_R_LOCAL_CRIT);
- if (val < 0)
- return val;
- data->temp8[LOCAL_CRIT] = val;
+ if (data->flags & LM90_HAVE_CRIT) {
+ val = lm90_read_reg(client, LM90_REG_R_LOCAL_CRIT);
+ if (val < 0)
+ return val;
+ data->temp8[LOCAL_CRIT] = val;
- val = lm90_read_reg(client, LM90_REG_R_REMOTE_CRIT);
- if (val < 0)
- return val;
- data->temp8[REMOTE_CRIT] = val;
+ val = lm90_read_reg(client, LM90_REG_R_REMOTE_CRIT);
+ if (val < 0)
+ return val;
+ data->temp8[REMOTE_CRIT] = val;
- val = lm90_read_reg(client, LM90_REG_R_TCRIT_HYST);
- if (val < 0)
- return val;
- data->temp_hyst = val;
+ val = lm90_read_reg(client, LM90_REG_R_TCRIT_HYST);
+ if (val < 0)
+ return val;
+ data->temp_hyst = val;
+ }
val = lm90_read_reg(client, LM90_REG_R_REMOTE_LOWH);
if (val < 0)
@@ -809,7 +821,7 @@ static int lm90_update_device(struct device *dev)
val = lm90_read_reg(client, LM90_REG_R_STATUS);
if (val < 0)
return val;
- data->alarms = val; /* lower 8 bit of alarms */
+ data->alarms = val & ~LM90_STATUS_BUSY;
if (data->kind == max6696) {
val = lm90_select_remote_channel(data, 1);
@@ -1160,8 +1172,8 @@ static int lm90_set_temphyst(struct lm90_data *data, long val)
else
temp = temp_from_s8(data->temp8[LOCAL_CRIT]);
- /* prevent integer underflow */
- val = max(val, -128000l);
+ /* prevent integer overflow/underflow */
+ val = clamp_val(val, -128000l, 255000l);
data->temp_hyst = hyst_to_reg(temp - val);
err = i2c_smbus_write_byte_data(client, LM90_REG_W_TCRIT_HYST,
@@ -1192,6 +1204,7 @@ static const u8 lm90_temp_emerg_index[3] = {
static const u8 lm90_min_alarm_bits[3] = { 5, 3, 11 };
static const u8 lm90_max_alarm_bits[3] = { 6, 4, 12 };
static const u8 lm90_crit_alarm_bits[3] = { 0, 1, 9 };
+static const u8 lm90_crit_alarm_bits_swapped[3] = { 1, 0, 9 };
static const u8 lm90_emergency_alarm_bits[3] = { 15, 13, 14 };
static const u8 lm90_fault_bits[3] = { 0, 2, 10 };
@@ -1217,7 +1230,10 @@ static int lm90_temp_read(struct device *dev, u32 attr, int channel, long *val)
*val = (data->alarms >> lm90_max_alarm_bits[channel]) & 1;
break;
case hwmon_temp_crit_alarm:
- *val = (data->alarms >> lm90_crit_alarm_bits[channel]) & 1;
+ if (data->flags & LM90_HAVE_CRIT_ALRM_SWP)
+ *val = (data->alarms >> lm90_crit_alarm_bits_swapped[channel]) & 1;
+ else
+ *val = (data->alarms >> lm90_crit_alarm_bits[channel]) & 1;
break;
case hwmon_temp_emergency_alarm:
*val = (data->alarms >> lm90_emergency_alarm_bits[channel]) & 1;
@@ -1465,12 +1481,11 @@ static int lm90_detect(struct i2c_client *client,
if (man_id < 0 || chip_id < 0 || config1 < 0 || convrate < 0)
return -ENODEV;
- if (man_id == 0x01 || man_id == 0x5C || man_id == 0x41) {
+ if (man_id == 0x01 || man_id == 0x5C || man_id == 0xA1) {
config2 = i2c_smbus_read_byte_data(client, LM90_REG_R_CONFIG2);
if (config2 < 0)
return -ENODEV;
- } else
- config2 = 0; /* Make compiler happy */
+ }
if ((address == 0x4C || address == 0x4D)
&& man_id == 0x01) { /* National Semiconductor */
@@ -1903,11 +1918,14 @@ static int lm90_probe(struct i2c_client *client)
info->config = data->channel_config;
data->channel_config[0] = HWMON_T_INPUT | HWMON_T_MIN | HWMON_T_MAX |
- HWMON_T_CRIT | HWMON_T_CRIT_HYST | HWMON_T_MIN_ALARM |
- HWMON_T_MAX_ALARM | HWMON_T_CRIT_ALARM;
+ HWMON_T_MIN_ALARM | HWMON_T_MAX_ALARM;
data->channel_config[1] = HWMON_T_INPUT | HWMON_T_MIN | HWMON_T_MAX |
- HWMON_T_CRIT | HWMON_T_CRIT_HYST | HWMON_T_MIN_ALARM |
- HWMON_T_MAX_ALARM | HWMON_T_CRIT_ALARM | HWMON_T_FAULT;
+ HWMON_T_MIN_ALARM | HWMON_T_MAX_ALARM | HWMON_T_FAULT;
+
+ if (data->flags & LM90_HAVE_CRIT) {
+ data->channel_config[0] |= HWMON_T_CRIT | HWMON_T_CRIT_ALARM | HWMON_T_CRIT_HYST;
+ data->channel_config[1] |= HWMON_T_CRIT | HWMON_T_CRIT_ALARM | HWMON_T_CRIT_HYST;
+ }
if (data->flags & LM90_HAVE_OFFSET)
data->channel_config[1] |= HWMON_T_OFFSET;
diff --git a/drivers/hwmon/nct6775.c b/drivers/hwmon/nct6775.c
index 93dca471972e..57ce8633a725 100644
--- a/drivers/hwmon/nct6775.c
+++ b/drivers/hwmon/nct6775.c
@@ -1527,7 +1527,7 @@ static u16 nct6775_wmi_read_value(struct nct6775_data *data, u16 reg)
nct6775_wmi_set_bank(data, reg);
- err = nct6775_asuswmi_read(data->bank, reg, &tmp);
+ err = nct6775_asuswmi_read(data->bank, reg & 0xff, &tmp);
if (err)
return 0;
diff --git a/drivers/hwmon/pwm-fan.c b/drivers/hwmon/pwm-fan.c
index 17518b4cab1b..f12b9a28a232 100644
--- a/drivers/hwmon/pwm-fan.c
+++ b/drivers/hwmon/pwm-fan.c
@@ -336,8 +336,6 @@ static int pwm_fan_probe(struct platform_device *pdev)
return ret;
}
- ctx->pwm_value = MAX_PWM;
-
pwm_init_state(ctx->pwm, &ctx->pwm_state);
/*
diff --git a/drivers/hwmon/sht4x.c b/drivers/hwmon/sht4x.c
index 09c2a0b06444..3415d7a0e0fc 100644
--- a/drivers/hwmon/sht4x.c
+++ b/drivers/hwmon/sht4x.c
@@ -23,7 +23,7 @@
/*
* I2C command delays (in microseconds)
*/
-#define SHT4X_MEAS_DELAY 1000
+#define SHT4X_MEAS_DELAY_HPM 8200 /* see t_MEAS,h in datasheet */
#define SHT4X_DELAY_EXTRA 10000
/*
@@ -90,7 +90,7 @@ static int sht4x_read_values(struct sht4x_data *data)
if (ret < 0)
goto unlock;
- usleep_range(SHT4X_MEAS_DELAY, SHT4X_MEAS_DELAY + SHT4X_DELAY_EXTRA);
+ usleep_range(SHT4X_MEAS_DELAY_HPM, SHT4X_MEAS_DELAY_HPM + SHT4X_DELAY_EXTRA);
ret = i2c_master_recv(client, raw_data, SHT4X_RESPONSE_LENGTH);
if (ret != SHT4X_RESPONSE_LENGTH) {
diff --git a/drivers/i2c/busses/i2c-cbus-gpio.c b/drivers/i2c/busses/i2c-cbus-gpio.c
index 72df563477b1..f8639a4457d2 100644
--- a/drivers/i2c/busses/i2c-cbus-gpio.c
+++ b/drivers/i2c/busses/i2c-cbus-gpio.c
@@ -195,8 +195,9 @@ static u32 cbus_i2c_func(struct i2c_adapter *adapter)
}
static const struct i2c_algorithm cbus_i2c_algo = {
- .smbus_xfer = cbus_i2c_smbus_xfer,
- .functionality = cbus_i2c_func,
+ .smbus_xfer = cbus_i2c_smbus_xfer,
+ .smbus_xfer_atomic = cbus_i2c_smbus_xfer,
+ .functionality = cbus_i2c_func,
};
static int cbus_i2c_remove(struct platform_device *pdev)
diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c
index 05187457f88a..41446f9cc52d 100644
--- a/drivers/i2c/busses/i2c-i801.c
+++ b/drivers/i2c/busses/i2c-i801.c
@@ -191,6 +191,7 @@
#define SMBSLVSTS_HST_NTFY_STS BIT(0)
/* Host Notify Command register bits */
+#define SMBSLVCMD_SMBALERT_DISABLE BIT(2)
#define SMBSLVCMD_HST_NTFY_INTREN BIT(0)
#define STATUS_ERROR_FLAGS (SMBHSTSTS_FAILED | SMBHSTSTS_BUS_ERR | \
@@ -259,6 +260,7 @@ struct i801_priv {
struct i2c_adapter adapter;
unsigned long smba;
unsigned char original_hstcfg;
+ unsigned char original_hstcnt;
unsigned char original_slvcmd;
struct pci_dev *pci_dev;
unsigned int features;
@@ -641,12 +643,20 @@ static irqreturn_t i801_isr(int irq, void *dev_id)
i801_isr_byte_done(priv);
/*
- * Clear irq sources and report transaction result.
+ * Clear remaining IRQ sources: Completion of last command, errors
+ * and the SMB_ALERT signal. SMB_ALERT status is set after signal
+ * assertion independently of the interrupt generation being blocked
+ * or not so clear it always when the status is set.
+ */
+ status &= SMBHSTSTS_INTR | STATUS_ERROR_FLAGS | SMBHSTSTS_SMBALERT_STS;
+ if (status)
+ outb_p(status, SMBHSTSTS(priv));
+ status &= ~SMBHSTSTS_SMBALERT_STS; /* SMB_ALERT not reported */
+ /*
+ * Report transaction result.
* ->status must be cleared before the next transaction is started.
*/
- status &= SMBHSTSTS_INTR | STATUS_ERROR_FLAGS;
if (status) {
- outb_p(status, SMBHSTSTS(priv));
priv->status = status;
complete(&priv->done);
}
@@ -974,9 +984,13 @@ static void i801_enable_host_notify(struct i2c_adapter *adapter)
if (!(priv->features & FEATURE_HOST_NOTIFY))
return;
- if (!(SMBSLVCMD_HST_NTFY_INTREN & priv->original_slvcmd))
- outb_p(SMBSLVCMD_HST_NTFY_INTREN | priv->original_slvcmd,
- SMBSLVCMD(priv));
+ /*
+ * Enable host notify interrupt and block the generation of interrupt
+ * from the SMB_ALERT signal because the driver does not support
+ * SMBus Alert.
+ */
+ outb_p(SMBSLVCMD_HST_NTFY_INTREN | SMBSLVCMD_SMBALERT_DISABLE |
+ priv->original_slvcmd, SMBSLVCMD(priv));
/* clear Host Notify bit to allow a new notification */
outb_p(SMBSLVSTS_HST_NTFY_STS, SMBSLVSTS(priv));
@@ -1805,7 +1819,8 @@ static int i801_probe(struct pci_dev *dev, const struct pci_device_id *id)
outb_p(inb_p(SMBAUXCTL(priv)) &
~(SMBAUXCTL_CRC | SMBAUXCTL_E32B), SMBAUXCTL(priv));
- /* Remember original Host Notify setting */
+ /* Remember original Interrupt and Host Notify settings */
+ priv->original_hstcnt = inb_p(SMBHSTCNT(priv)) & ~SMBHSTCNT_KILL;
if (priv->features & FEATURE_HOST_NOTIFY)
priv->original_slvcmd = inb_p(SMBSLVCMD(priv));
@@ -1869,6 +1884,7 @@ static void i801_remove(struct pci_dev *dev)
{
struct i801_priv *priv = pci_get_drvdata(dev);
+ outb_p(priv->original_hstcnt, SMBHSTCNT(priv));
i801_disable_host_notify(priv);
i801_del_mux(priv);
i2c_del_adapter(&priv->adapter);
@@ -1892,6 +1908,7 @@ static void i801_shutdown(struct pci_dev *dev)
struct i801_priv *priv = pci_get_drvdata(dev);
/* Restore config registers to avoid hard hang on some systems */
+ outb_p(priv->original_hstcnt, SMBHSTCNT(priv));
i801_disable_host_notify(priv);
pci_write_config_byte(dev, SMBHSTCFG, priv->original_hstcfg);
}
@@ -1901,6 +1918,7 @@ static int i801_suspend(struct device *dev)
{
struct i801_priv *priv = dev_get_drvdata(dev);
+ outb_p(priv->original_hstcnt, SMBHSTCNT(priv));
pci_write_config_byte(priv->pci_dev, SMBHSTCFG, priv->original_hstcfg);
return 0;
}
diff --git a/drivers/i2c/busses/i2c-mpc.c b/drivers/i2c/busses/i2c-mpc.c
index a6ea1eb1394e..53b8da6dbb23 100644
--- a/drivers/i2c/busses/i2c-mpc.c
+++ b/drivers/i2c/busses/i2c-mpc.c
@@ -636,7 +636,7 @@ static irqreturn_t mpc_i2c_isr(int irq, void *dev_id)
status = readb(i2c->base + MPC_I2C_SR);
if (status & CSR_MIF) {
/* Wait up to 100us for transfer to properly complete */
- readb_poll_timeout(i2c->base + MPC_I2C_SR, status, !(status & CSR_MCF), 0, 100);
+ readb_poll_timeout_atomic(i2c->base + MPC_I2C_SR, status, status & CSR_MCF, 0, 100);
writeb(0, i2c->base + MPC_I2C_SR);
mpc_i2c_do_intr(i2c, status);
return IRQ_HANDLED;
diff --git a/drivers/i2c/busses/i2c-rk3x.c b/drivers/i2c/busses/i2c-rk3x.c
index 819ab4ee517e..02ddb237f69a 100644
--- a/drivers/i2c/busses/i2c-rk3x.c
+++ b/drivers/i2c/busses/i2c-rk3x.c
@@ -423,8 +423,8 @@ static void rk3x_i2c_handle_read(struct rk3x_i2c *i2c, unsigned int ipd)
if (!(ipd & REG_INT_MBRF))
return;
- /* ack interrupt */
- i2c_writel(i2c, REG_INT_MBRF, REG_IPD);
+ /* ack interrupt (read also produces a spurious START flag, clear it too) */
+ i2c_writel(i2c, REG_INT_MBRF | REG_INT_START, REG_IPD);
/* Can only handle a maximum of 32 bytes at a time */
if (len > 32)
diff --git a/drivers/i2c/busses/i2c-stm32f7.c b/drivers/i2c/busses/i2c-stm32f7.c
index b9b19a2a2ffa..66145d2b9b55 100644
--- a/drivers/i2c/busses/i2c-stm32f7.c
+++ b/drivers/i2c/busses/i2c-stm32f7.c
@@ -1493,6 +1493,7 @@ static irqreturn_t stm32f7_i2c_isr_event(int irq, void *data)
{
struct stm32f7_i2c_dev *i2c_dev = data;
struct stm32f7_i2c_msg *f7_msg = &i2c_dev->f7_msg;
+ struct stm32_i2c_dma *dma = i2c_dev->dma;
void __iomem *base = i2c_dev->base;
u32 status, mask;
int ret = IRQ_HANDLED;
@@ -1518,6 +1519,10 @@ static irqreturn_t stm32f7_i2c_isr_event(int irq, void *data)
dev_dbg(i2c_dev->dev, "<%s>: Receive NACK (addr %x)\n",
__func__, f7_msg->addr);
writel_relaxed(STM32F7_I2C_ICR_NACKCF, base + STM32F7_I2C_ICR);
+ if (i2c_dev->use_dma) {
+ stm32f7_i2c_disable_dma_req(i2c_dev);
+ dmaengine_terminate_async(dma->chan_using);
+ }
f7_msg->result = -ENXIO;
}
@@ -1533,7 +1538,7 @@ static irqreturn_t stm32f7_i2c_isr_event(int irq, void *data)
/* Clear STOP flag */
writel_relaxed(STM32F7_I2C_ICR_STOPCF, base + STM32F7_I2C_ICR);
- if (i2c_dev->use_dma) {
+ if (i2c_dev->use_dma && !f7_msg->result) {
ret = IRQ_WAKE_THREAD;
} else {
i2c_dev->master_mode = false;
@@ -1546,7 +1551,7 @@ static irqreturn_t stm32f7_i2c_isr_event(int irq, void *data)
if (f7_msg->stop) {
mask = STM32F7_I2C_CR2_STOP;
stm32f7_i2c_set_bits(base + STM32F7_I2C_CR2, mask);
- } else if (i2c_dev->use_dma) {
+ } else if (i2c_dev->use_dma && !f7_msg->result) {
ret = IRQ_WAKE_THREAD;
} else if (f7_msg->smbus) {
stm32f7_i2c_smbus_rep_start(i2c_dev);
@@ -1583,7 +1588,7 @@ static irqreturn_t stm32f7_i2c_isr_event_thread(int irq, void *data)
if (!ret) {
dev_dbg(i2c_dev->dev, "<%s>: Timed out\n", __func__);
stm32f7_i2c_disable_dma_req(i2c_dev);
- dmaengine_terminate_all(dma->chan_using);
+ dmaengine_terminate_async(dma->chan_using);
f7_msg->result = -ETIMEDOUT;
}
@@ -1660,7 +1665,7 @@ static irqreturn_t stm32f7_i2c_isr_error(int irq, void *data)
/* Disable dma */
if (i2c_dev->use_dma) {
stm32f7_i2c_disable_dma_req(i2c_dev);
- dmaengine_terminate_all(dma->chan_using);
+ dmaengine_terminate_async(dma->chan_using);
}
i2c_dev->master_mode = false;
@@ -1696,12 +1701,26 @@ static int stm32f7_i2c_xfer(struct i2c_adapter *i2c_adap,
time_left = wait_for_completion_timeout(&i2c_dev->complete,
i2c_dev->adap.timeout);
ret = f7_msg->result;
+ if (ret) {
+ if (i2c_dev->use_dma)
+ dmaengine_synchronize(dma->chan_using);
+
+ /*
+ * It is possible that some unsent data have already been
+ * written into TXDR. To avoid sending old data in a
+ * further transfer, flush TXDR in case of any error
+ */
+ writel_relaxed(STM32F7_I2C_ISR_TXE,
+ i2c_dev->base + STM32F7_I2C_ISR);
+ goto pm_free;
+ }
if (!time_left) {
dev_dbg(i2c_dev->dev, "Access to slave 0x%x timed out\n",
i2c_dev->msg->addr);
if (i2c_dev->use_dma)
- dmaengine_terminate_all(dma->chan_using);
+ dmaengine_terminate_sync(dma->chan_using);
+ stm32f7_i2c_wait_free_bus(i2c_dev);
ret = -ETIMEDOUT;
}
@@ -1744,13 +1763,25 @@ static int stm32f7_i2c_smbus_xfer(struct i2c_adapter *adapter, u16 addr,
timeout = wait_for_completion_timeout(&i2c_dev->complete,
i2c_dev->adap.timeout);
ret = f7_msg->result;
- if (ret)
+ if (ret) {
+ if (i2c_dev->use_dma)
+ dmaengine_synchronize(dma->chan_using);
+
+ /*
+ * It is possible that some unsent data have already been
+ * written into TXDR. To avoid sending old data in a
+ * further transfer, flush TXDR in case of any error
+ */
+ writel_relaxed(STM32F7_I2C_ISR_TXE,
+ i2c_dev->base + STM32F7_I2C_ISR);
goto pm_free;
+ }
if (!timeout) {
dev_dbg(dev, "Access to slave 0x%x timed out\n", f7_msg->addr);
if (i2c_dev->use_dma)
- dmaengine_terminate_all(dma->chan_using);
+ dmaengine_terminate_sync(dma->chan_using);
+ stm32f7_i2c_wait_free_bus(i2c_dev);
ret = -ETIMEDOUT;
goto pm_free;
}
diff --git a/drivers/i2c/busses/i2c-virtio.c b/drivers/i2c/busses/i2c-virtio.c
index 1ed4daa918a0..41eb0dcc3204 100644
--- a/drivers/i2c/busses/i2c-virtio.c
+++ b/drivers/i2c/busses/i2c-virtio.c
@@ -22,24 +22,24 @@
/**
* struct virtio_i2c - virtio I2C data
* @vdev: virtio device for this controller
- * @completion: completion of virtio I2C message
* @adap: I2C adapter for this controller
* @vq: the virtio virtqueue for communication
*/
struct virtio_i2c {
struct virtio_device *vdev;
- struct completion completion;
struct i2c_adapter adap;
struct virtqueue *vq;
};
/**
* struct virtio_i2c_req - the virtio I2C request structure
+ * @completion: completion of virtio I2C message
* @out_hdr: the OUT header of the virtio I2C message
* @buf: the buffer into which data is read, or from which it's written
* @in_hdr: the IN header of the virtio I2C message
*/
struct virtio_i2c_req {
+ struct completion completion;
struct virtio_i2c_out_hdr out_hdr ____cacheline_aligned;
uint8_t *buf ____cacheline_aligned;
struct virtio_i2c_in_hdr in_hdr ____cacheline_aligned;
@@ -47,9 +47,11 @@ struct virtio_i2c_req {
static void virtio_i2c_msg_done(struct virtqueue *vq)
{
- struct virtio_i2c *vi = vq->vdev->priv;
+ struct virtio_i2c_req *req;
+ unsigned int len;
- complete(&vi->completion);
+ while ((req = virtqueue_get_buf(vq, &len)))
+ complete(&req->completion);
}
static int virtio_i2c_prepare_reqs(struct virtqueue *vq,
@@ -62,6 +64,8 @@ static int virtio_i2c_prepare_reqs(struct virtqueue *vq,
for (i = 0; i < num; i++) {
int outcnt = 0, incnt = 0;
+ init_completion(&reqs[i].completion);
+
/*
* Only 7-bit mode supported for this moment. For the address
* format, Please check the Virtio I2C Specification.
@@ -104,24 +108,17 @@ static int virtio_i2c_prepare_reqs(struct virtqueue *vq,
static int virtio_i2c_complete_reqs(struct virtqueue *vq,
struct virtio_i2c_req *reqs,
- struct i2c_msg *msgs, int num,
- bool timedout)
+ struct i2c_msg *msgs, int num)
{
- struct virtio_i2c_req *req;
- bool failed = timedout;
- unsigned int len;
+ bool failed = false;
int i, j = 0;
for (i = 0; i < num; i++) {
- /* Detach the ith request from the vq */
- req = virtqueue_get_buf(vq, &len);
+ struct virtio_i2c_req *req = &reqs[i];
- /*
- * Condition req == &reqs[i] should always meet since we have
- * total num requests in the vq. reqs[i] can never be NULL here.
- */
- if (!failed && (WARN_ON(req != &reqs[i]) ||
- req->in_hdr.status != VIRTIO_I2C_MSG_OK))
+ wait_for_completion(&req->completion);
+
+ if (!failed && req->in_hdr.status != VIRTIO_I2C_MSG_OK)
failed = true;
i2c_put_dma_safe_msg_buf(reqs[i].buf, &msgs[i], !failed);
@@ -130,7 +127,7 @@ static int virtio_i2c_complete_reqs(struct virtqueue *vq,
j++;
}
- return timedout ? -ETIMEDOUT : j;
+ return j;
}
static int virtio_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs,
@@ -139,7 +136,6 @@ static int virtio_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs,
struct virtio_i2c *vi = i2c_get_adapdata(adap);
struct virtqueue *vq = vi->vq;
struct virtio_i2c_req *reqs;
- unsigned long time_left;
int count;
reqs = kcalloc(num, sizeof(*reqs), GFP_KERNEL);
@@ -158,15 +154,9 @@ static int virtio_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs,
* remote here to clear the virtqueue, so we can try another set of
* messages later on.
*/
-
- reinit_completion(&vi->completion);
virtqueue_kick(vq);
- time_left = wait_for_completion_timeout(&vi->completion, adap->timeout);
- if (!time_left)
- dev_err(&adap->dev, "virtio i2c backend timeout.\n");
-
- count = virtio_i2c_complete_reqs(vq, reqs, msgs, count, !time_left);
+ count = virtio_i2c_complete_reqs(vq, reqs, msgs, count);
err_free:
kfree(reqs);
@@ -214,8 +204,6 @@ static int virtio_i2c_probe(struct virtio_device *vdev)
vdev->priv = vi;
vi->vdev = vdev;
- init_completion(&vi->completion);
-
ret = virtio_i2c_setup_vqs(vi);
if (ret)
return ret;
diff --git a/drivers/i2c/i2c-dev.c b/drivers/i2c/i2c-dev.c
index bce0e8bb7852..cf5d049342ea 100644
--- a/drivers/i2c/i2c-dev.c
+++ b/drivers/i2c/i2c-dev.c
@@ -535,6 +535,9 @@ static long compat_i2cdev_ioctl(struct file *file, unsigned int cmd, unsigned lo
sizeof(rdwr_arg)))
return -EFAULT;
+ if (!rdwr_arg.msgs || rdwr_arg.nmsgs == 0)
+ return -EINVAL;
+
if (rdwr_arg.nmsgs > I2C_RDWR_IOCTL_MAX_MSGS)
return -EINVAL;
diff --git a/drivers/iio/accel/kxcjk-1013.c b/drivers/iio/accel/kxcjk-1013.c
index a51fdd3c9b5b..24c9387c2968 100644
--- a/drivers/iio/accel/kxcjk-1013.c
+++ b/drivers/iio/accel/kxcjk-1013.c
@@ -1595,8 +1595,7 @@ static int kxcjk1013_probe(struct i2c_client *client,
return 0;
err_buffer_cleanup:
- if (data->dready_trig)
- iio_triggered_buffer_cleanup(indio_dev);
+ iio_triggered_buffer_cleanup(indio_dev);
err_trigger_unregister:
if (data->dready_trig)
iio_trigger_unregister(data->dready_trig);
@@ -1618,8 +1617,8 @@ static int kxcjk1013_remove(struct i2c_client *client)
pm_runtime_disable(&client->dev);
pm_runtime_set_suspended(&client->dev);
+ iio_triggered_buffer_cleanup(indio_dev);
if (data->dready_trig) {
- iio_triggered_buffer_cleanup(indio_dev);
iio_trigger_unregister(data->dready_trig);
iio_trigger_unregister(data->motion_trig);
}
diff --git a/drivers/iio/accel/kxsd9.c b/drivers/iio/accel/kxsd9.c
index 2faf85ca996e..552eba5e8b4f 100644
--- a/drivers/iio/accel/kxsd9.c
+++ b/drivers/iio/accel/kxsd9.c
@@ -224,14 +224,14 @@ static irqreturn_t kxsd9_trigger_handler(int irq, void *p)
hw_values.chan,
sizeof(hw_values.chan));
if (ret) {
- dev_err(st->dev,
- "error reading data\n");
- return ret;
+ dev_err(st->dev, "error reading data: %d\n", ret);
+ goto out;
}
iio_push_to_buffers_with_timestamp(indio_dev,
&hw_values,
iio_get_time_ns(indio_dev));
+out:
iio_trigger_notify_done(indio_dev->trig);
return IRQ_HANDLED;
diff --git a/drivers/iio/accel/mma8452.c b/drivers/iio/accel/mma8452.c
index 715b8138fb71..09c7f10fefb6 100644
--- a/drivers/iio/accel/mma8452.c
+++ b/drivers/iio/accel/mma8452.c
@@ -1470,7 +1470,7 @@ static int mma8452_trigger_setup(struct iio_dev *indio_dev)
if (ret)
return ret;
- indio_dev->trig = trig;
+ indio_dev->trig = iio_trigger_get(trig);
return 0;
}
diff --git a/drivers/iio/adc/Kconfig b/drivers/iio/adc/Kconfig
index 8bf5b62a73f4..3363af15a43f 100644
--- a/drivers/iio/adc/Kconfig
+++ b/drivers/iio/adc/Kconfig
@@ -532,7 +532,7 @@ config IMX7D_ADC
config IMX8QXP_ADC
tristate "NXP IMX8QXP ADC driver"
- depends on ARCH_MXC_ARM64 || COMPILE_TEST
+ depends on ARCH_MXC || COMPILE_TEST
depends on HAS_IOMEM
help
Say yes here to build support for IMX8QXP ADC.
diff --git a/drivers/iio/adc/ad7768-1.c b/drivers/iio/adc/ad7768-1.c
index 2c5c8a3672b2..aa42ba759fa1 100644
--- a/drivers/iio/adc/ad7768-1.c
+++ b/drivers/iio/adc/ad7768-1.c
@@ -480,8 +480,8 @@ static irqreturn_t ad7768_trigger_handler(int irq, void *p)
iio_push_to_buffers_with_timestamp(indio_dev, &st->data.scan,
iio_get_time_ns(indio_dev));
- iio_trigger_notify_done(indio_dev->trig);
err_unlock:
+ iio_trigger_notify_done(indio_dev->trig);
mutex_unlock(&st->lock);
return IRQ_HANDLED;
diff --git a/drivers/iio/adc/at91-sama5d2_adc.c b/drivers/iio/adc/at91-sama5d2_adc.c
index 4c922ef634f8..92a57cf10fba 100644
--- a/drivers/iio/adc/at91-sama5d2_adc.c
+++ b/drivers/iio/adc/at91-sama5d2_adc.c
@@ -1586,7 +1586,8 @@ static int at91_adc_read_info_raw(struct iio_dev *indio_dev,
*val = st->conversion_value;
ret = at91_adc_adjust_val_osr(st, val);
if (chan->scan_type.sign == 's')
- *val = sign_extend32(*val, 11);
+ *val = sign_extend32(*val,
+ chan->scan_type.realbits - 1);
st->conversion_done = false;
}
diff --git a/drivers/iio/adc/axp20x_adc.c b/drivers/iio/adc/axp20x_adc.c
index 3e0c0233b431..df99f1365c39 100644
--- a/drivers/iio/adc/axp20x_adc.c
+++ b/drivers/iio/adc/axp20x_adc.c
@@ -251,19 +251,8 @@ static int axp22x_adc_raw(struct iio_dev *indio_dev,
struct iio_chan_spec const *chan, int *val)
{
struct axp20x_adc_iio *info = iio_priv(indio_dev);
- int size;
- /*
- * N.B.: Unlike the Chinese datasheets tell, the charging current is
- * stored on 12 bits, not 13 bits. Only discharging current is on 13
- * bits.
- */
- if (chan->type == IIO_CURRENT && chan->channel == AXP22X_BATT_DISCHRG_I)
- size = 13;
- else
- size = 12;
-
- *val = axp20x_read_variable_width(info->regmap, chan->address, size);
+ *val = axp20x_read_variable_width(info->regmap, chan->address, 12);
if (*val < 0)
return *val;
@@ -386,9 +375,8 @@ static int axp22x_adc_scale(struct iio_chan_spec const *chan, int *val,
return IIO_VAL_INT_PLUS_MICRO;
case IIO_CURRENT:
- *val = 0;
- *val2 = 500000;
- return IIO_VAL_INT_PLUS_MICRO;
+ *val = 1;
+ return IIO_VAL_INT;
case IIO_TEMP:
*val = 100;
diff --git a/drivers/iio/adc/dln2-adc.c b/drivers/iio/adc/dln2-adc.c
index 16407664182c..97d162a3cba4 100644
--- a/drivers/iio/adc/dln2-adc.c
+++ b/drivers/iio/adc/dln2-adc.c
@@ -248,7 +248,6 @@ static int dln2_adc_set_chan_period(struct dln2_adc *dln2,
static int dln2_adc_read(struct dln2_adc *dln2, unsigned int channel)
{
int ret, i;
- struct iio_dev *indio_dev = platform_get_drvdata(dln2->pdev);
u16 conflict;
__le16 value;
int olen = sizeof(value);
@@ -257,13 +256,9 @@ static int dln2_adc_read(struct dln2_adc *dln2, unsigned int channel)
.chan = channel,
};
- ret = iio_device_claim_direct_mode(indio_dev);
- if (ret < 0)
- return ret;
-
ret = dln2_adc_set_chan_enabled(dln2, channel, true);
if (ret < 0)
- goto release_direct;
+ return ret;
ret = dln2_adc_set_port_enabled(dln2, true, &conflict);
if (ret < 0) {
@@ -300,8 +295,6 @@ disable_port:
dln2_adc_set_port_enabled(dln2, false, NULL);
disable_chan:
dln2_adc_set_chan_enabled(dln2, channel, false);
-release_direct:
- iio_device_release_direct_mode(indio_dev);
return ret;
}
@@ -337,10 +330,16 @@ static int dln2_adc_read_raw(struct iio_dev *indio_dev,
switch (mask) {
case IIO_CHAN_INFO_RAW:
+ ret = iio_device_claim_direct_mode(indio_dev);
+ if (ret < 0)
+ return ret;
+
mutex_lock(&dln2->mutex);
ret = dln2_adc_read(dln2, chan->channel);
mutex_unlock(&dln2->mutex);
+ iio_device_release_direct_mode(indio_dev);
+
if (ret < 0)
return ret;
@@ -656,7 +655,11 @@ static int dln2_adc_probe(struct platform_device *pdev)
return -ENOMEM;
}
iio_trigger_set_drvdata(dln2->trig, dln2);
- devm_iio_trigger_register(dev, dln2->trig);
+ ret = devm_iio_trigger_register(dev, dln2->trig);
+ if (ret) {
+ dev_err(dev, "failed to register trigger: %d\n", ret);
+ return ret;
+ }
iio_trigger_set_immutable(indio_dev, dln2->trig);
ret = devm_iio_triggered_buffer_setup(dev, indio_dev, NULL,
diff --git a/drivers/iio/adc/stm32-adc.c b/drivers/iio/adc/stm32-adc.c
index 6245434f8377..8cd258cb2682 100644
--- a/drivers/iio/adc/stm32-adc.c
+++ b/drivers/iio/adc/stm32-adc.c
@@ -1117,6 +1117,7 @@ static void stm32h7_adc_unprepare(struct iio_dev *indio_dev)
{
struct stm32_adc *adc = iio_priv(indio_dev);
+ stm32_adc_writel(adc, STM32H7_ADC_PCSEL, 0);
stm32h7_adc_disable(indio_dev);
stm32_adc_int_ch_disable(adc);
stm32h7_adc_enter_pwr_down(adc);
@@ -1986,7 +1987,7 @@ static int stm32_adc_populate_int_ch(struct iio_dev *indio_dev, const char *ch_n
/* Get calibration data for vrefint channel */
ret = nvmem_cell_read_u16(&indio_dev->dev, "vrefint", &vrefint);
if (ret && ret != -ENOENT) {
- return dev_err_probe(&indio_dev->dev, ret,
+ return dev_err_probe(indio_dev->dev.parent, ret,
"nvmem access error\n");
}
if (ret == -ENOENT)
diff --git a/drivers/iio/gyro/adxrs290.c b/drivers/iio/gyro/adxrs290.c
index 3e0734ddafe3..600e9725da78 100644
--- a/drivers/iio/gyro/adxrs290.c
+++ b/drivers/iio/gyro/adxrs290.c
@@ -7,6 +7,7 @@
*/
#include <linux/bitfield.h>
+#include <linux/bitops.h>
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/kernel.h>
@@ -124,7 +125,7 @@ static int adxrs290_get_rate_data(struct iio_dev *indio_dev, const u8 cmd, int *
goto err_unlock;
}
- *val = temp;
+ *val = sign_extend32(temp, 15);
err_unlock:
mutex_unlock(&st->lock);
@@ -146,7 +147,7 @@ static int adxrs290_get_temp_data(struct iio_dev *indio_dev, int *val)
}
/* extract lower 12 bits temperature reading */
- *val = temp & 0x0FFF;
+ *val = sign_extend32(temp, 11);
err_unlock:
mutex_unlock(&st->lock);
diff --git a/drivers/iio/gyro/itg3200_buffer.c b/drivers/iio/gyro/itg3200_buffer.c
index 04dd6a7969ea..4cfa0d439560 100644
--- a/drivers/iio/gyro/itg3200_buffer.c
+++ b/drivers/iio/gyro/itg3200_buffer.c
@@ -61,9 +61,9 @@ static irqreturn_t itg3200_trigger_handler(int irq, void *p)
iio_push_to_buffers_with_timestamp(indio_dev, &scan, pf->timestamp);
+error_ret:
iio_trigger_notify_done(indio_dev->trig);
-error_ret:
return IRQ_HANDLED;
}
diff --git a/drivers/iio/industrialio-trigger.c b/drivers/iio/industrialio-trigger.c
index b23caa2f2aa1..93990ff1dfe3 100644
--- a/drivers/iio/industrialio-trigger.c
+++ b/drivers/iio/industrialio-trigger.c
@@ -556,7 +556,6 @@ struct iio_trigger *viio_trigger_alloc(struct device *parent,
irq_modify_status(trig->subirq_base + i,
IRQ_NOREQUEST | IRQ_NOAUTOEN, IRQ_NOPROBE);
}
- get_device(&trig->dev);
return trig;
diff --git a/drivers/iio/light/ltr501.c b/drivers/iio/light/ltr501.c
index 7e51aaac0bf8..b2983b1a9ed1 100644
--- a/drivers/iio/light/ltr501.c
+++ b/drivers/iio/light/ltr501.c
@@ -1275,7 +1275,7 @@ static irqreturn_t ltr501_trigger_handler(int irq, void *p)
ret = regmap_bulk_read(data->regmap, LTR501_ALS_DATA1,
als_buf, sizeof(als_buf));
if (ret < 0)
- return ret;
+ goto done;
if (test_bit(0, indio_dev->active_scan_mask))
scan.channels[j++] = le16_to_cpu(als_buf[1]);
if (test_bit(1, indio_dev->active_scan_mask))
diff --git a/drivers/iio/light/stk3310.c b/drivers/iio/light/stk3310.c
index 07e91846307c..fc63856ed54d 100644
--- a/drivers/iio/light/stk3310.c
+++ b/drivers/iio/light/stk3310.c
@@ -546,9 +546,8 @@ static irqreturn_t stk3310_irq_event_handler(int irq, void *private)
mutex_lock(&data->lock);
ret = regmap_field_read(data->reg_flag_nf, &dir);
if (ret < 0) {
- dev_err(&data->client->dev, "register read failed\n");
- mutex_unlock(&data->lock);
- return ret;
+ dev_err(&data->client->dev, "register read failed: %d\n", ret);
+ goto out;
}
event = IIO_UNMOD_EVENT_CODE(IIO_PROXIMITY, 1,
IIO_EV_TYPE_THRESH,
@@ -560,6 +559,7 @@ static irqreturn_t stk3310_irq_event_handler(int irq, void *private)
ret = regmap_field_write(data->reg_flag_psint, 0);
if (ret < 0)
dev_err(&data->client->dev, "failed to reset interrupts\n");
+out:
mutex_unlock(&data->lock);
return IRQ_HANDLED;
diff --git a/drivers/iio/trigger/stm32-timer-trigger.c b/drivers/iio/trigger/stm32-timer-trigger.c
index 33083877cd19..4353b749ecef 100644
--- a/drivers/iio/trigger/stm32-timer-trigger.c
+++ b/drivers/iio/trigger/stm32-timer-trigger.c
@@ -912,6 +912,6 @@ static struct platform_driver stm32_timer_trigger_driver = {
};
module_platform_driver(stm32_timer_trigger_driver);
-MODULE_ALIAS("platform: stm32-timer-trigger");
+MODULE_ALIAS("platform:stm32-timer-trigger");
MODULE_DESCRIPTION("STMicroelectronics STM32 Timer Trigger driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/infiniband/hw/hfi1/chip.c b/drivers/infiniband/hw/hfi1/chip.c
index ec37f4fd8e96..f1245c94ae26 100644
--- a/drivers/infiniband/hw/hfi1/chip.c
+++ b/drivers/infiniband/hw/hfi1/chip.c
@@ -8415,6 +8415,8 @@ static void receive_interrupt_common(struct hfi1_ctxtdata *rcd)
*/
static void __hfi1_rcd_eoi_intr(struct hfi1_ctxtdata *rcd)
{
+ if (!rcd->rcvhdrq)
+ return;
clear_recv_intr(rcd);
if (check_packet_present(rcd))
force_recv_intr(rcd);
diff --git a/drivers/infiniband/hw/hfi1/driver.c b/drivers/infiniband/hw/hfi1/driver.c
index 61f341c3005c..e2c634af40e9 100644
--- a/drivers/infiniband/hw/hfi1/driver.c
+++ b/drivers/infiniband/hw/hfi1/driver.c
@@ -1012,6 +1012,8 @@ int handle_receive_interrupt(struct hfi1_ctxtdata *rcd, int thread)
struct hfi1_packet packet;
int skip_pkt = 0;
+ if (!rcd->rcvhdrq)
+ return RCV_PKT_OK;
/* Control context will always use the slow path interrupt handler */
needset = (rcd->ctxt == HFI1_CTRL_CTXT) ? 0 : 1;
diff --git a/drivers/infiniband/hw/hfi1/init.c b/drivers/infiniband/hw/hfi1/init.c
index dbd1c31830b9..4436ed41547c 100644
--- a/drivers/infiniband/hw/hfi1/init.c
+++ b/drivers/infiniband/hw/hfi1/init.c
@@ -113,7 +113,6 @@ static int hfi1_create_kctxt(struct hfi1_devdata *dd,
rcd->fast_handler = get_dma_rtail_setting(rcd) ?
handle_receive_interrupt_dma_rtail :
handle_receive_interrupt_nodma_rtail;
- rcd->slow_handler = handle_receive_interrupt;
hfi1_set_seq_cnt(rcd, 1);
@@ -334,6 +333,8 @@ int hfi1_create_ctxtdata(struct hfi1_pportdata *ppd, int numa,
rcd->numa_id = numa;
rcd->rcv_array_groups = dd->rcv_entries.ngroups;
rcd->rhf_rcv_function_map = normal_rhf_rcv_functions;
+ rcd->slow_handler = handle_receive_interrupt;
+ rcd->do_interrupt = rcd->slow_handler;
rcd->msix_intr = CCE_NUM_MSIX_VECTORS;
mutex_init(&rcd->exp_mutex);
@@ -874,18 +875,6 @@ int hfi1_init(struct hfi1_devdata *dd, int reinit)
if (ret)
goto done;
- /* allocate dummy tail memory for all receive contexts */
- dd->rcvhdrtail_dummy_kvaddr = dma_alloc_coherent(&dd->pcidev->dev,
- sizeof(u64),
- &dd->rcvhdrtail_dummy_dma,
- GFP_KERNEL);
-
- if (!dd->rcvhdrtail_dummy_kvaddr) {
- dd_dev_err(dd, "cannot allocate dummy tail memory\n");
- ret = -ENOMEM;
- goto done;
- }
-
/* dd->rcd can be NULL if early initialization failed */
for (i = 0; dd->rcd && i < dd->first_dyn_alloc_ctxt; ++i) {
/*
@@ -898,8 +887,6 @@ int hfi1_init(struct hfi1_devdata *dd, int reinit)
if (!rcd)
continue;
- rcd->do_interrupt = &handle_receive_interrupt;
-
lastfail = hfi1_create_rcvhdrq(dd, rcd);
if (!lastfail)
lastfail = hfi1_setup_eagerbufs(rcd);
@@ -1120,7 +1107,7 @@ void hfi1_free_ctxtdata(struct hfi1_devdata *dd, struct hfi1_ctxtdata *rcd)
rcd->egrbufs.rcvtids = NULL;
for (e = 0; e < rcd->egrbufs.alloced; e++) {
- if (rcd->egrbufs.buffers[e].dma)
+ if (rcd->egrbufs.buffers[e].addr)
dma_free_coherent(&dd->pcidev->dev,
rcd->egrbufs.buffers[e].len,
rcd->egrbufs.buffers[e].addr,
@@ -1201,6 +1188,11 @@ void hfi1_free_devdata(struct hfi1_devdata *dd)
dd->tx_opstats = NULL;
kfree(dd->comp_vect);
dd->comp_vect = NULL;
+ if (dd->rcvhdrtail_dummy_kvaddr)
+ dma_free_coherent(&dd->pcidev->dev, sizeof(u64),
+ (void *)dd->rcvhdrtail_dummy_kvaddr,
+ dd->rcvhdrtail_dummy_dma);
+ dd->rcvhdrtail_dummy_kvaddr = NULL;
sdma_clean(dd, dd->num_sdma);
rvt_dealloc_device(&dd->verbs_dev.rdi);
}
@@ -1298,6 +1290,15 @@ static struct hfi1_devdata *hfi1_alloc_devdata(struct pci_dev *pdev,
goto bail;
}
+ /* allocate dummy tail memory for all receive contexts */
+ dd->rcvhdrtail_dummy_kvaddr =
+ dma_alloc_coherent(&dd->pcidev->dev, sizeof(u64),
+ &dd->rcvhdrtail_dummy_dma, GFP_KERNEL);
+ if (!dd->rcvhdrtail_dummy_kvaddr) {
+ ret = -ENOMEM;
+ goto bail;
+ }
+
atomic_set(&dd->ipoib_rsm_usr_num, 0);
return dd;
@@ -1505,13 +1506,6 @@ static void cleanup_device_data(struct hfi1_devdata *dd)
free_credit_return(dd);
- if (dd->rcvhdrtail_dummy_kvaddr) {
- dma_free_coherent(&dd->pcidev->dev, sizeof(u64),
- (void *)dd->rcvhdrtail_dummy_kvaddr,
- dd->rcvhdrtail_dummy_dma);
- dd->rcvhdrtail_dummy_kvaddr = NULL;
- }
-
/*
* Free any resources still in use (usually just kernel contexts)
* at unload; we do for ctxtcnt, because that's what we allocate.
diff --git a/drivers/infiniband/hw/hfi1/sdma.c b/drivers/infiniband/hw/hfi1/sdma.c
index 2b6c24b7b586..f07d328689d3 100644
--- a/drivers/infiniband/hw/hfi1/sdma.c
+++ b/drivers/infiniband/hw/hfi1/sdma.c
@@ -838,8 +838,8 @@ struct sdma_engine *sdma_select_user_engine(struct hfi1_devdata *dd,
if (current->nr_cpus_allowed != 1)
goto out;
- cpu_id = smp_processor_id();
rcu_read_lock();
+ cpu_id = smp_processor_id();
rht_node = rhashtable_lookup(dd->sdma_rht, &cpu_id,
sdma_rht_params);
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
index 9bfbaddd1763..eb0defa80d0d 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
@@ -33,6 +33,7 @@
#include <linux/acpi.h>
#include <linux/etherdevice.h>
#include <linux/interrupt.h>
+#include <linux/iopoll.h>
#include <linux/kernel.h>
#include <linux/types.h>
#include <net/addrconf.h>
@@ -1050,9 +1051,14 @@ static u32 hns_roce_v2_cmd_hw_resetting(struct hns_roce_dev *hr_dev,
unsigned long instance_stage,
unsigned long reset_stage)
{
+#define HW_RESET_TIMEOUT_US 1000000
+#define HW_RESET_SLEEP_US 1000
+
struct hns_roce_v2_priv *priv = hr_dev->priv;
struct hnae3_handle *handle = priv->handle;
const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
+ unsigned long val;
+ int ret;
/* When hardware reset is detected, we should stop sending mailbox&cmq&
* doorbell to hardware. If now in .init_instance() function, we should
@@ -1064,7 +1070,11 @@ static u32 hns_roce_v2_cmd_hw_resetting(struct hns_roce_dev *hr_dev,
* again.
*/
hr_dev->dis_db = true;
- if (!ops->get_hw_reset_stat(handle))
+
+ ret = read_poll_timeout(ops->ae_dev_reset_cnt, val,
+ val > hr_dev->reset_cnt, HW_RESET_SLEEP_US,
+ HW_RESET_TIMEOUT_US, false, handle);
+ if (!ret)
hr_dev->is_reset = true;
if (!hr_dev->is_reset || reset_stage == HNS_ROCE_STATE_RST_INIT ||
@@ -1584,11 +1594,17 @@ static int hns_roce_config_global_param(struct hns_roce_dev *hr_dev)
{
struct hns_roce_cmq_desc desc;
struct hns_roce_cmq_req *req = (struct hns_roce_cmq_req *)desc.data;
+ u32 clock_cycles_of_1us;
hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_CFG_GLOBAL_PARAM,
false);
- hr_reg_write(req, CFG_GLOBAL_PARAM_1US_CYCLES, 0x3e8);
+ if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08)
+ clock_cycles_of_1us = HNS_ROCE_1NS_CFG;
+ else
+ clock_cycles_of_1us = HNS_ROCE_1US_CFG;
+
+ hr_reg_write(req, CFG_GLOBAL_PARAM_1US_CYCLES, clock_cycles_of_1us);
hr_reg_write(req, CFG_GLOBAL_PARAM_UDP_PORT, ROCE_V2_UDP_DPORT);
return hns_roce_cmq_send(hr_dev, &desc, 1);
@@ -4792,6 +4808,30 @@ static int hns_roce_v2_set_abs_fields(struct ib_qp *ibqp,
return ret;
}
+static bool check_qp_timeout_cfg_range(struct hns_roce_dev *hr_dev, u8 *timeout)
+{
+#define QP_ACK_TIMEOUT_MAX_HIP08 20
+#define QP_ACK_TIMEOUT_OFFSET 10
+#define QP_ACK_TIMEOUT_MAX 31
+
+ if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08) {
+ if (*timeout > QP_ACK_TIMEOUT_MAX_HIP08) {
+ ibdev_warn(&hr_dev->ib_dev,
+ "Local ACK timeout shall be 0 to 20.\n");
+ return false;
+ }
+ *timeout += QP_ACK_TIMEOUT_OFFSET;
+ } else if (hr_dev->pci_dev->revision > PCI_REVISION_ID_HIP08) {
+ if (*timeout > QP_ACK_TIMEOUT_MAX) {
+ ibdev_warn(&hr_dev->ib_dev,
+ "Local ACK timeout shall be 0 to 31.\n");
+ return false;
+ }
+ }
+
+ return true;
+}
+
static int hns_roce_v2_set_opt_fields(struct ib_qp *ibqp,
const struct ib_qp_attr *attr,
int attr_mask,
@@ -4801,6 +4841,7 @@ static int hns_roce_v2_set_opt_fields(struct ib_qp *ibqp,
struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
int ret = 0;
+ u8 timeout;
if (attr_mask & IB_QP_AV) {
ret = hns_roce_v2_set_path(ibqp, attr, attr_mask, context,
@@ -4810,12 +4851,10 @@ static int hns_roce_v2_set_opt_fields(struct ib_qp *ibqp,
}
if (attr_mask & IB_QP_TIMEOUT) {
- if (attr->timeout < 31) {
- hr_reg_write(context, QPC_AT, attr->timeout);
+ timeout = attr->timeout;
+ if (check_qp_timeout_cfg_range(hr_dev, &timeout)) {
+ hr_reg_write(context, QPC_AT, timeout);
hr_reg_clear(qpc_mask, QPC_AT);
- } else {
- ibdev_warn(&hr_dev->ib_dev,
- "Local ACK timeout shall be 0 to 30.\n");
}
}
@@ -4872,7 +4911,9 @@ static int hns_roce_v2_set_opt_fields(struct ib_qp *ibqp,
set_access_flags(hr_qp, context, qpc_mask, attr, attr_mask);
if (attr_mask & IB_QP_MIN_RNR_TIMER) {
- hr_reg_write(context, QPC_MIN_RNR_TIME, attr->min_rnr_timer);
+ hr_reg_write(context, QPC_MIN_RNR_TIME,
+ hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08 ?
+ HNS_ROCE_RNR_TIMER_10NS : attr->min_rnr_timer);
hr_reg_clear(qpc_mask, QPC_MIN_RNR_TIME);
}
@@ -5489,6 +5530,16 @@ static int hns_roce_v2_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period)
hr_reg_write(cq_context, CQC_CQ_MAX_CNT, cq_count);
hr_reg_clear(cqc_mask, CQC_CQ_MAX_CNT);
+
+ if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08) {
+ if (cq_period * HNS_ROCE_CLOCK_ADJUST > USHRT_MAX) {
+ dev_info(hr_dev->dev,
+ "cq_period(%u) reached the upper limit, adjusted to 65.\n",
+ cq_period);
+ cq_period = HNS_ROCE_MAX_CQ_PERIOD;
+ }
+ cq_period *= HNS_ROCE_CLOCK_ADJUST;
+ }
hr_reg_write(cq_context, CQC_CQ_PERIOD, cq_period);
hr_reg_clear(cqc_mask, CQC_CQ_PERIOD);
@@ -5884,6 +5935,15 @@ static int config_eqc(struct hns_roce_dev *hr_dev, struct hns_roce_eq *eq,
hr_reg_write(eqc, EQC_EQ_PROD_INDX, HNS_ROCE_EQ_INIT_PROD_IDX);
hr_reg_write(eqc, EQC_EQ_MAX_CNT, eq->eq_max_cnt);
+ if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08) {
+ if (eq->eq_period * HNS_ROCE_CLOCK_ADJUST > USHRT_MAX) {
+ dev_info(hr_dev->dev, "eq_period(%u) reached the upper limit, adjusted to 65.\n",
+ eq->eq_period);
+ eq->eq_period = HNS_ROCE_MAX_EQ_PERIOD;
+ }
+ eq->eq_period *= HNS_ROCE_CLOCK_ADJUST;
+ }
+
hr_reg_write(eqc, EQC_EQ_PERIOD, eq->eq_period);
hr_reg_write(eqc, EQC_EQE_REPORT_TIMER, HNS_ROCE_EQ_INIT_REPORT_TIMER);
hr_reg_write(eqc, EQC_EQE_BA_L, bt_ba >> 3);
@@ -6387,10 +6447,8 @@ static int hns_roce_hw_v2_reset_notify_down(struct hnae3_handle *handle)
if (!hr_dev)
return 0;
- hr_dev->is_reset = true;
hr_dev->active = false;
hr_dev->dis_db = true;
-
hr_dev->state = HNS_ROCE_DEVICE_STATE_RST_DOWN;
return 0;
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
index 4d904d5e82be..35c61da7ba15 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
@@ -1444,6 +1444,14 @@ struct hns_roce_dip {
struct list_head node; /* all dips are on a list */
};
+/* only for RNR timeout issue of HIP08 */
+#define HNS_ROCE_CLOCK_ADJUST 1000
+#define HNS_ROCE_MAX_CQ_PERIOD 65
+#define HNS_ROCE_MAX_EQ_PERIOD 65
+#define HNS_ROCE_RNR_TIMER_10NS 1
+#define HNS_ROCE_1US_CFG 999
+#define HNS_ROCE_1NS_CFG 0
+
#define HNS_ROCE_AEQ_DEFAULT_BURST_NUM 0x0
#define HNS_ROCE_AEQ_DEFAULT_INTERVAL 0x0
#define HNS_ROCE_CEQ_DEFAULT_BURST_NUM 0x0
diff --git a/drivers/infiniband/hw/hns/hns_roce_srq.c b/drivers/infiniband/hw/hns/hns_roce_srq.c
index 6eee9deadd12..e64ef6903fb4 100644
--- a/drivers/infiniband/hw/hns/hns_roce_srq.c
+++ b/drivers/infiniband/hw/hns/hns_roce_srq.c
@@ -259,7 +259,7 @@ static int alloc_srq_wrid(struct hns_roce_dev *hr_dev, struct hns_roce_srq *srq)
static void free_srq_wrid(struct hns_roce_srq *srq)
{
- kfree(srq->wrid);
+ kvfree(srq->wrid);
srq->wrid = NULL;
}
diff --git a/drivers/infiniband/hw/irdma/hw.c b/drivers/infiniband/hw/irdma/hw.c
index 4108dcabece2..b4c657f5f2f9 100644
--- a/drivers/infiniband/hw/irdma/hw.c
+++ b/drivers/infiniband/hw/irdma/hw.c
@@ -60,6 +60,8 @@ static void irdma_iwarp_ce_handler(struct irdma_sc_cq *iwcq)
{
struct irdma_cq *cq = iwcq->back_cq;
+ if (!cq->user_mode)
+ cq->armed = false;
if (cq->ibcq.comp_handler)
cq->ibcq.comp_handler(&cq->ibcq, cq->ibcq.cq_context);
}
@@ -146,6 +148,7 @@ static void irdma_set_flush_fields(struct irdma_sc_qp *qp,
qp->flush_code = FLUSH_PROT_ERR;
break;
case IRDMA_AE_AMP_BAD_QP:
+ case IRDMA_AE_WQE_UNEXPECTED_OPCODE:
qp->flush_code = FLUSH_LOC_QP_OP_ERR;
break;
case IRDMA_AE_AMP_BAD_STAG_KEY:
@@ -156,7 +159,6 @@ static void irdma_set_flush_fields(struct irdma_sc_qp *qp,
case IRDMA_AE_PRIV_OPERATION_DENIED:
case IRDMA_AE_IB_INVALID_REQUEST:
case IRDMA_AE_IB_REMOTE_ACCESS_ERROR:
- case IRDMA_AE_IB_REMOTE_OP_ERROR:
qp->flush_code = FLUSH_REM_ACCESS_ERR;
qp->event_type = IRDMA_QP_EVENT_ACCESS_ERR;
break;
@@ -184,6 +186,9 @@ static void irdma_set_flush_fields(struct irdma_sc_qp *qp,
case IRDMA_AE_AMP_MWBIND_INVALID_BOUNDS:
qp->flush_code = FLUSH_MW_BIND_ERR;
break;
+ case IRDMA_AE_IB_REMOTE_OP_ERROR:
+ qp->flush_code = FLUSH_REM_OP_ERR;
+ break;
default:
qp->flush_code = FLUSH_FATAL_ERR;
break;
diff --git a/drivers/infiniband/hw/irdma/main.h b/drivers/infiniband/hw/irdma/main.h
index 91a497139ba3..cb218cab79ac 100644
--- a/drivers/infiniband/hw/irdma/main.h
+++ b/drivers/infiniband/hw/irdma/main.h
@@ -542,6 +542,7 @@ int irdma_ah_cqp_op(struct irdma_pci_f *rf, struct irdma_sc_ah *sc_ah, u8 cmd,
void (*callback_fcn)(struct irdma_cqp_request *cqp_request),
void *cb_param);
void irdma_gsi_ud_qp_ah_cb(struct irdma_cqp_request *cqp_request);
+bool irdma_cq_empty(struct irdma_cq *iwcq);
int irdma_inetaddr_event(struct notifier_block *notifier, unsigned long event,
void *ptr);
int irdma_inet6addr_event(struct notifier_block *notifier, unsigned long event,
diff --git a/drivers/infiniband/hw/irdma/pble.c b/drivers/infiniband/hw/irdma/pble.c
index aeeb1c310965..fed49da770f3 100644
--- a/drivers/infiniband/hw/irdma/pble.c
+++ b/drivers/infiniband/hw/irdma/pble.c
@@ -25,8 +25,7 @@ void irdma_destroy_pble_prm(struct irdma_hmc_pble_rsrc *pble_rsrc)
list_del(&chunk->list);
if (chunk->type == PBLE_SD_PAGED)
irdma_pble_free_paged_mem(chunk);
- if (chunk->bitmapbuf)
- kfree(chunk->bitmapmem.va);
+ bitmap_free(chunk->bitmapbuf);
kfree(chunk->chunkmem.va);
}
}
@@ -283,7 +282,6 @@ add_pble_prm(struct irdma_hmc_pble_rsrc *pble_rsrc)
"PBLE: next_fpm_addr = %llx chunk_size[%llu] = 0x%llx\n",
pble_rsrc->next_fpm_addr, chunk->size, chunk->size);
pble_rsrc->unallocated_pble -= (u32)(chunk->size >> 3);
- list_add(&chunk->list, &pble_rsrc->pinfo.clist);
sd_reg_val = (sd_entry_type == IRDMA_SD_TYPE_PAGED) ?
sd_entry->u.pd_table.pd_page_addr.pa :
sd_entry->u.bp.addr.pa;
@@ -295,12 +293,12 @@ add_pble_prm(struct irdma_hmc_pble_rsrc *pble_rsrc)
goto error;
}
+ list_add(&chunk->list, &pble_rsrc->pinfo.clist);
sd_entry->valid = true;
return 0;
error:
- if (chunk->bitmapbuf)
- kfree(chunk->bitmapmem.va);
+ bitmap_free(chunk->bitmapbuf);
kfree(chunk->chunkmem.va);
return ret_code;
diff --git a/drivers/infiniband/hw/irdma/pble.h b/drivers/infiniband/hw/irdma/pble.h
index e1b3b8118a2c..aa20827dcc9d 100644
--- a/drivers/infiniband/hw/irdma/pble.h
+++ b/drivers/infiniband/hw/irdma/pble.h
@@ -78,7 +78,6 @@ struct irdma_chunk {
u32 pg_cnt;
enum irdma_alloc_type type;
struct irdma_sc_dev *dev;
- struct irdma_virt_mem bitmapmem;
struct irdma_virt_mem chunkmem;
};
diff --git a/drivers/infiniband/hw/irdma/utils.c b/drivers/infiniband/hw/irdma/utils.c
index 8b42c43fc14f..398736d8c78a 100644
--- a/drivers/infiniband/hw/irdma/utils.c
+++ b/drivers/infiniband/hw/irdma/utils.c
@@ -2239,15 +2239,10 @@ enum irdma_status_code irdma_prm_add_pble_mem(struct irdma_pble_prm *pprm,
sizeofbitmap = (u64)pchunk->size >> pprm->pble_shift;
- pchunk->bitmapmem.size = sizeofbitmap >> 3;
- pchunk->bitmapmem.va = kzalloc(pchunk->bitmapmem.size, GFP_KERNEL);
-
- if (!pchunk->bitmapmem.va)
+ pchunk->bitmapbuf = bitmap_zalloc(sizeofbitmap, GFP_KERNEL);
+ if (!pchunk->bitmapbuf)
return IRDMA_ERR_NO_MEMORY;
- pchunk->bitmapbuf = pchunk->bitmapmem.va;
- bitmap_zero(pchunk->bitmapbuf, sizeofbitmap);
-
pchunk->sizeofbitmap = sizeofbitmap;
/* each pble is 8 bytes hence shift by 3 */
pprm->total_pble_alloc += pchunk->size >> 3;
@@ -2491,3 +2486,18 @@ void irdma_ib_qp_event(struct irdma_qp *iwqp, enum irdma_qp_event_type event)
ibevent.element.qp = &iwqp->ibqp;
iwqp->ibqp.event_handler(&ibevent, iwqp->ibqp.qp_context);
}
+
+bool irdma_cq_empty(struct irdma_cq *iwcq)
+{
+ struct irdma_cq_uk *ukcq;
+ u64 qword3;
+ __le64 *cqe;
+ u8 polarity;
+
+ ukcq = &iwcq->sc_cq.cq_uk;
+ cqe = IRDMA_GET_CURRENT_CQ_ELEM(ukcq);
+ get_64bit_val(cqe, 24, &qword3);
+ polarity = (u8)FIELD_GET(IRDMA_CQ_VALID, qword3);
+
+ return polarity != ukcq->polarity;
+}
diff --git a/drivers/infiniband/hw/irdma/verbs.c b/drivers/infiniband/hw/irdma/verbs.c
index 0f66e809d418..8cd5f9261692 100644
--- a/drivers/infiniband/hw/irdma/verbs.c
+++ b/drivers/infiniband/hw/irdma/verbs.c
@@ -3584,18 +3584,31 @@ static int irdma_req_notify_cq(struct ib_cq *ibcq,
struct irdma_cq *iwcq;
struct irdma_cq_uk *ukcq;
unsigned long flags;
- enum irdma_cmpl_notify cq_notify = IRDMA_CQ_COMPL_EVENT;
+ enum irdma_cmpl_notify cq_notify;
+ bool promo_event = false;
+ int ret = 0;
+ cq_notify = notify_flags == IB_CQ_SOLICITED ?
+ IRDMA_CQ_COMPL_SOLICITED : IRDMA_CQ_COMPL_EVENT;
iwcq = to_iwcq(ibcq);
ukcq = &iwcq->sc_cq.cq_uk;
- if (notify_flags == IB_CQ_SOLICITED)
- cq_notify = IRDMA_CQ_COMPL_SOLICITED;
spin_lock_irqsave(&iwcq->lock, flags);
- irdma_uk_cq_request_notification(ukcq, cq_notify);
+ /* Only promote to arm the CQ for any event if the last arm event was solicited. */
+ if (iwcq->last_notify == IRDMA_CQ_COMPL_SOLICITED && notify_flags != IB_CQ_SOLICITED)
+ promo_event = true;
+
+ if (!iwcq->armed || promo_event) {
+ iwcq->armed = true;
+ iwcq->last_notify = cq_notify;
+ irdma_uk_cq_request_notification(ukcq, cq_notify);
+ }
+
+ if ((notify_flags & IB_CQ_REPORT_MISSED_EVENTS) && !irdma_cq_empty(iwcq))
+ ret = 1;
spin_unlock_irqrestore(&iwcq->lock, flags);
- return 0;
+ return ret;
}
static int irdma_roce_port_immutable(struct ib_device *ibdev, u32 port_num,
diff --git a/drivers/infiniband/hw/irdma/verbs.h b/drivers/infiniband/hw/irdma/verbs.h
index 5c244cd321a3..d0fdef8d09ea 100644
--- a/drivers/infiniband/hw/irdma/verbs.h
+++ b/drivers/infiniband/hw/irdma/verbs.h
@@ -110,6 +110,8 @@ struct irdma_cq {
u16 cq_size;
u16 cq_num;
bool user_mode;
+ bool armed;
+ enum irdma_cmpl_notify last_notify;
u32 polled_cmpls;
u32 cq_mem_size;
struct irdma_dma_mem kmem;
diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h
index e636e954f6bf..4a7a56ed740b 100644
--- a/drivers/infiniband/hw/mlx5/mlx5_ib.h
+++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h
@@ -664,7 +664,6 @@ struct mlx5_ib_mr {
/* User MR data */
struct mlx5_cache_ent *cache_ent;
- struct ib_umem *umem;
/* This is zero'd when the MR is allocated */
union {
@@ -676,7 +675,7 @@ struct mlx5_ib_mr {
struct list_head list;
};
- /* Used only by kernel MRs (umem == NULL) */
+ /* Used only by kernel MRs */
struct {
void *descs;
void *descs_alloc;
@@ -697,8 +696,9 @@ struct mlx5_ib_mr {
int data_length;
};
- /* Used only by User MRs (umem != NULL) */
+ /* Used only by User MRs */
struct {
+ struct ib_umem *umem;
unsigned int page_shift;
/* Current access_flags */
int access_flags;
diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c
index 157d862fb864..63e2129f1142 100644
--- a/drivers/infiniband/hw/mlx5/mr.c
+++ b/drivers/infiniband/hw/mlx5/mr.c
@@ -1904,19 +1904,18 @@ err:
return ret;
}
-static void
-mlx5_free_priv_descs(struct mlx5_ib_mr *mr)
+static void mlx5_free_priv_descs(struct mlx5_ib_mr *mr)
{
- if (!mr->umem && mr->descs) {
- struct ib_device *device = mr->ibmr.device;
- int size = mr->max_descs * mr->desc_size;
- struct mlx5_ib_dev *dev = to_mdev(device);
+ struct mlx5_ib_dev *dev = to_mdev(mr->ibmr.device);
+ int size = mr->max_descs * mr->desc_size;
- dma_unmap_single(&dev->mdev->pdev->dev, mr->desc_map, size,
- DMA_TO_DEVICE);
- kfree(mr->descs_alloc);
- mr->descs = NULL;
- }
+ if (!mr->descs)
+ return;
+
+ dma_unmap_single(&dev->mdev->pdev->dev, mr->desc_map, size,
+ DMA_TO_DEVICE);
+ kfree(mr->descs_alloc);
+ mr->descs = NULL;
}
int mlx5_ib_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata)
@@ -1992,7 +1991,8 @@ int mlx5_ib_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata)
if (mr->cache_ent) {
mlx5_mr_cache_free(dev, mr);
} else {
- mlx5_free_priv_descs(mr);
+ if (!udata)
+ mlx5_free_priv_descs(mr);
kfree(mr);
}
return 0;
@@ -2079,7 +2079,6 @@ static struct mlx5_ib_mr *mlx5_ib_alloc_pi_mr(struct ib_pd *pd,
if (err)
goto err_free_in;
- mr->umem = NULL;
kfree(in);
return mr;
@@ -2206,7 +2205,6 @@ static struct ib_mr *__mlx5_ib_alloc_mr(struct ib_pd *pd,
}
mr->ibmr.device = pd->device;
- mr->umem = NULL;
switch (mr_type) {
case IB_MR_TYPE_MEM_REG:
diff --git a/drivers/infiniband/hw/qib/qib_user_sdma.c b/drivers/infiniband/hw/qib/qib_user_sdma.c
index ac11943a5ddb..bf2f30d67949 100644
--- a/drivers/infiniband/hw/qib/qib_user_sdma.c
+++ b/drivers/infiniband/hw/qib/qib_user_sdma.c
@@ -941,7 +941,7 @@ static int qib_user_sdma_queue_pkts(const struct qib_devdata *dd,
&addrlimit) ||
addrlimit > type_max(typeof(pkt->addrlimit))) {
ret = -EINVAL;
- goto free_pbc;
+ goto free_pkt;
}
pkt->addrlimit = addrlimit;
diff --git a/drivers/infiniband/sw/rxe/rxe_qp.c b/drivers/infiniband/sw/rxe/rxe_qp.c
index 975321812c87..54b8711321c1 100644
--- a/drivers/infiniband/sw/rxe/rxe_qp.c
+++ b/drivers/infiniband/sw/rxe/rxe_qp.c
@@ -359,6 +359,7 @@ int rxe_qp_from_init(struct rxe_dev *rxe, struct rxe_qp *qp, struct rxe_pd *pd,
err2:
rxe_queue_cleanup(qp->sq.queue);
+ qp->sq.queue = NULL;
err1:
qp->pd = NULL;
qp->rcq = NULL;
diff --git a/drivers/infiniband/ulp/rtrs/rtrs-clt-stats.c b/drivers/infiniband/ulp/rtrs/rtrs-clt-stats.c
index f7e459fe68be..76e4352fe3f6 100644
--- a/drivers/infiniband/ulp/rtrs/rtrs-clt-stats.c
+++ b/drivers/infiniband/ulp/rtrs/rtrs-clt-stats.c
@@ -19,7 +19,7 @@ void rtrs_clt_update_wc_stats(struct rtrs_clt_con *con)
int cpu;
cpu = raw_smp_processor_id();
- s = this_cpu_ptr(stats->pcpu_stats);
+ s = get_cpu_ptr(stats->pcpu_stats);
if (con->cpu != cpu) {
s->cpu_migr.to++;
@@ -27,14 +27,16 @@ void rtrs_clt_update_wc_stats(struct rtrs_clt_con *con)
s = per_cpu_ptr(stats->pcpu_stats, con->cpu);
atomic_inc(&s->cpu_migr.from);
}
+ put_cpu_ptr(stats->pcpu_stats);
}
void rtrs_clt_inc_failover_cnt(struct rtrs_clt_stats *stats)
{
struct rtrs_clt_stats_pcpu *s;
- s = this_cpu_ptr(stats->pcpu_stats);
+ s = get_cpu_ptr(stats->pcpu_stats);
s->rdma.failover_cnt++;
+ put_cpu_ptr(stats->pcpu_stats);
}
int rtrs_clt_stats_migration_from_cnt_to_str(struct rtrs_clt_stats *stats, char *buf)
@@ -169,9 +171,10 @@ static inline void rtrs_clt_update_rdma_stats(struct rtrs_clt_stats *stats,
{
struct rtrs_clt_stats_pcpu *s;
- s = this_cpu_ptr(stats->pcpu_stats);
+ s = get_cpu_ptr(stats->pcpu_stats);
s->rdma.dir[d].cnt++;
s->rdma.dir[d].size_total += size;
+ put_cpu_ptr(stats->pcpu_stats);
}
void rtrs_clt_update_all_stats(struct rtrs_clt_io_req *req, int dir)
diff --git a/drivers/input/joystick/spaceball.c b/drivers/input/joystick/spaceball.c
index 429411c6c0a8..a85a4f33aea8 100644
--- a/drivers/input/joystick/spaceball.c
+++ b/drivers/input/joystick/spaceball.c
@@ -19,6 +19,7 @@
#include <linux/module.h>
#include <linux/input.h>
#include <linux/serio.h>
+#include <asm/unaligned.h>
#define DRIVER_DESC "SpaceTec SpaceBall 2003/3003/4000 FLX driver"
@@ -75,9 +76,15 @@ static void spaceball_process_packet(struct spaceball* spaceball)
case 'D': /* Ball data */
if (spaceball->idx != 15) return;
- for (i = 0; i < 6; i++)
+ /*
+ * Skip first three bytes; read six axes worth of data.
+ * Axis values are signed 16-bit big-endian.
+ */
+ data += 3;
+ for (i = 0; i < ARRAY_SIZE(spaceball_axes); i++) {
input_report_abs(dev, spaceball_axes[i],
- (__s16)((data[2 * i + 3] << 8) | data[2 * i + 2]));
+ (__s16)get_unaligned_be16(&data[i * 2]));
+ }
break;
case 'K': /* Button data */
diff --git a/drivers/input/misc/iqs626a.c b/drivers/input/misc/iqs626a.c
index d57e996732cf..23b5dd9552dc 100644
--- a/drivers/input/misc/iqs626a.c
+++ b/drivers/input/misc/iqs626a.c
@@ -456,9 +456,10 @@ struct iqs626_private {
unsigned int suspend_mode;
};
-static int iqs626_parse_events(struct iqs626_private *iqs626,
- const struct fwnode_handle *ch_node,
- enum iqs626_ch_id ch_id)
+static noinline_for_stack int
+iqs626_parse_events(struct iqs626_private *iqs626,
+ const struct fwnode_handle *ch_node,
+ enum iqs626_ch_id ch_id)
{
struct iqs626_sys_reg *sys_reg = &iqs626->sys_reg;
struct i2c_client *client = iqs626->client;
@@ -604,9 +605,10 @@ static int iqs626_parse_events(struct iqs626_private *iqs626,
return 0;
}
-static int iqs626_parse_ati_target(struct iqs626_private *iqs626,
- const struct fwnode_handle *ch_node,
- enum iqs626_ch_id ch_id)
+static noinline_for_stack int
+iqs626_parse_ati_target(struct iqs626_private *iqs626,
+ const struct fwnode_handle *ch_node,
+ enum iqs626_ch_id ch_id)
{
struct iqs626_sys_reg *sys_reg = &iqs626->sys_reg;
struct i2c_client *client = iqs626->client;
@@ -885,9 +887,10 @@ static int iqs626_parse_trackpad(struct iqs626_private *iqs626,
return 0;
}
-static int iqs626_parse_channel(struct iqs626_private *iqs626,
- const struct fwnode_handle *ch_node,
- enum iqs626_ch_id ch_id)
+static noinline_for_stack int
+iqs626_parse_channel(struct iqs626_private *iqs626,
+ const struct fwnode_handle *ch_node,
+ enum iqs626_ch_id ch_id)
{
struct iqs626_sys_reg *sys_reg = &iqs626->sys_reg;
struct i2c_client *client = iqs626->client;
diff --git a/drivers/input/misc/xen-kbdfront.c b/drivers/input/misc/xen-kbdfront.c
index 4ff5cd2a6d8d..3d17a0b3fe51 100644
--- a/drivers/input/misc/xen-kbdfront.c
+++ b/drivers/input/misc/xen-kbdfront.c
@@ -542,6 +542,7 @@ static struct xenbus_driver xenkbd_driver = {
.remove = xenkbd_remove,
.resume = xenkbd_resume,
.otherend_changed = xenkbd_backend_changed,
+ .not_essential = true,
};
static int __init xenkbd_init(void)
diff --git a/drivers/input/mouse/appletouch.c b/drivers/input/mouse/appletouch.c
index bfa26651c0be..627048bc6a12 100644
--- a/drivers/input/mouse/appletouch.c
+++ b/drivers/input/mouse/appletouch.c
@@ -916,6 +916,8 @@ static int atp_probe(struct usb_interface *iface,
set_bit(BTN_TOOL_TRIPLETAP, input_dev->keybit);
set_bit(BTN_LEFT, input_dev->keybit);
+ INIT_WORK(&dev->work, atp_reinit);
+
error = input_register_device(dev->input);
if (error)
goto err_free_buffer;
@@ -923,8 +925,6 @@ static int atp_probe(struct usb_interface *iface,
/* save our data pointer in this interface device */
usb_set_intfdata(iface, dev);
- INIT_WORK(&dev->work, atp_reinit);
-
return 0;
err_free_buffer:
diff --git a/drivers/input/mouse/elantech.c b/drivers/input/mouse/elantech.c
index 956d9cd34796..ece97f8c6a3e 100644
--- a/drivers/input/mouse/elantech.c
+++ b/drivers/input/mouse/elantech.c
@@ -1588,7 +1588,13 @@ static const struct dmi_system_id no_hw_res_dmi_table[] = {
*/
static int elantech_change_report_id(struct psmouse *psmouse)
{
- unsigned char param[2] = { 0x10, 0x03 };
+ /*
+ * NOTE: the code is expecting to receive param[] as an array of 3
+ * items (see __ps2_command()), even if in this case only 2 are
+ * actually needed. Make sure the array size is 3 to avoid potential
+ * stack out-of-bound accesses.
+ */
+ unsigned char param[3] = { 0x10, 0x03 };
if (elantech_write_reg_params(psmouse, 0x7, param) ||
elantech_read_reg_params(psmouse, 0x7, param) ||
diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h
index aedd05541044..148a7c5fd0e2 100644
--- a/drivers/input/serio/i8042-x86ia64io.h
+++ b/drivers/input/serio/i8042-x86ia64io.h
@@ -995,6 +995,24 @@ static const struct dmi_system_id __initconst i8042_dmi_kbdreset_table[] = {
{ }
};
+static const struct dmi_system_id i8042_dmi_probe_defer_table[] __initconst = {
+ {
+ /* ASUS ZenBook UX425UA */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "ZenBook UX425UA"),
+ },
+ },
+ {
+ /* ASUS ZenBook UM325UA */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "ZenBook UX325UA_UM325UA"),
+ },
+ },
+ { }
+};
+
#endif /* CONFIG_X86 */
#ifdef CONFIG_PNP
@@ -1315,6 +1333,9 @@ static int __init i8042_platform_init(void)
if (dmi_check_system(i8042_dmi_kbdreset_table))
i8042_kbdreset = true;
+ if (dmi_check_system(i8042_dmi_probe_defer_table))
+ i8042_probe_defer = true;
+
/*
* A20 was already enabled during early kernel init. But some buggy
* BIOSes (in MSI Laptops) require A20 to be enabled using 8042 to
diff --git a/drivers/input/serio/i8042.c b/drivers/input/serio/i8042.c
index 0b9f1d0a8f8b..3fc0a89cc785 100644
--- a/drivers/input/serio/i8042.c
+++ b/drivers/input/serio/i8042.c
@@ -45,6 +45,10 @@ static bool i8042_unlock;
module_param_named(unlock, i8042_unlock, bool, 0);
MODULE_PARM_DESC(unlock, "Ignore keyboard lock.");
+static bool i8042_probe_defer;
+module_param_named(probe_defer, i8042_probe_defer, bool, 0);
+MODULE_PARM_DESC(probe_defer, "Allow deferred probing.");
+
enum i8042_controller_reset_mode {
I8042_RESET_NEVER,
I8042_RESET_ALWAYS,
@@ -711,7 +715,7 @@ static int i8042_set_mux_mode(bool multiplex, unsigned char *mux_version)
* LCS/Telegraphics.
*/
-static int __init i8042_check_mux(void)
+static int i8042_check_mux(void)
{
unsigned char mux_version;
@@ -740,10 +744,10 @@ static int __init i8042_check_mux(void)
/*
* The following is used to test AUX IRQ delivery.
*/
-static struct completion i8042_aux_irq_delivered __initdata;
-static bool i8042_irq_being_tested __initdata;
+static struct completion i8042_aux_irq_delivered;
+static bool i8042_irq_being_tested;
-static irqreturn_t __init i8042_aux_test_irq(int irq, void *dev_id)
+static irqreturn_t i8042_aux_test_irq(int irq, void *dev_id)
{
unsigned long flags;
unsigned char str, data;
@@ -770,7 +774,7 @@ static irqreturn_t __init i8042_aux_test_irq(int irq, void *dev_id)
* verifies success by readinng CTR. Used when testing for presence of AUX
* port.
*/
-static int __init i8042_toggle_aux(bool on)
+static int i8042_toggle_aux(bool on)
{
unsigned char param;
int i;
@@ -798,7 +802,7 @@ static int __init i8042_toggle_aux(bool on)
* the presence of an AUX interface.
*/
-static int __init i8042_check_aux(void)
+static int i8042_check_aux(void)
{
int retval = -1;
bool irq_registered = false;
@@ -1005,7 +1009,7 @@ static int i8042_controller_init(void)
if (i8042_command(&ctr[n++ % 2], I8042_CMD_CTL_RCTR)) {
pr_err("Can't read CTR while initializing i8042\n");
- return -EIO;
+ return i8042_probe_defer ? -EPROBE_DEFER : -EIO;
}
} while (n < 2 || ctr[0] != ctr[1]);
@@ -1320,7 +1324,7 @@ static void i8042_shutdown(struct platform_device *dev)
i8042_controller_reset(false);
}
-static int __init i8042_create_kbd_port(void)
+static int i8042_create_kbd_port(void)
{
struct serio *serio;
struct i8042_port *port = &i8042_ports[I8042_KBD_PORT_NO];
@@ -1349,7 +1353,7 @@ static int __init i8042_create_kbd_port(void)
return 0;
}
-static int __init i8042_create_aux_port(int idx)
+static int i8042_create_aux_port(int idx)
{
struct serio *serio;
int port_no = idx < 0 ? I8042_AUX_PORT_NO : I8042_MUX_PORT_NO + idx;
@@ -1386,13 +1390,13 @@ static int __init i8042_create_aux_port(int idx)
return 0;
}
-static void __init i8042_free_kbd_port(void)
+static void i8042_free_kbd_port(void)
{
kfree(i8042_ports[I8042_KBD_PORT_NO].serio);
i8042_ports[I8042_KBD_PORT_NO].serio = NULL;
}
-static void __init i8042_free_aux_ports(void)
+static void i8042_free_aux_ports(void)
{
int i;
@@ -1402,7 +1406,7 @@ static void __init i8042_free_aux_ports(void)
}
}
-static void __init i8042_register_ports(void)
+static void i8042_register_ports(void)
{
int i;
@@ -1443,7 +1447,7 @@ static void i8042_free_irqs(void)
i8042_aux_irq_registered = i8042_kbd_irq_registered = false;
}
-static int __init i8042_setup_aux(void)
+static int i8042_setup_aux(void)
{
int (*aux_enable)(void);
int error;
@@ -1485,7 +1489,7 @@ static int __init i8042_setup_aux(void)
return error;
}
-static int __init i8042_setup_kbd(void)
+static int i8042_setup_kbd(void)
{
int error;
@@ -1535,7 +1539,7 @@ static int i8042_kbd_bind_notifier(struct notifier_block *nb,
return 0;
}
-static int __init i8042_probe(struct platform_device *dev)
+static int i8042_probe(struct platform_device *dev)
{
int error;
@@ -1600,6 +1604,7 @@ static struct platform_driver i8042_driver = {
.pm = &i8042_pm_ops,
#endif
},
+ .probe = i8042_probe,
.remove = i8042_remove,
.shutdown = i8042_shutdown,
};
@@ -1610,7 +1615,6 @@ static struct notifier_block i8042_kbd_bind_notifier_block = {
static int __init i8042_init(void)
{
- struct platform_device *pdev;
int err;
dbg_init();
@@ -1626,17 +1630,29 @@ static int __init i8042_init(void)
/* Set this before creating the dev to allow i8042_command to work right away */
i8042_present = true;
- pdev = platform_create_bundle(&i8042_driver, i8042_probe, NULL, 0, NULL, 0);
- if (IS_ERR(pdev)) {
- err = PTR_ERR(pdev);
+ err = platform_driver_register(&i8042_driver);
+ if (err)
goto err_platform_exit;
+
+ i8042_platform_device = platform_device_alloc("i8042", -1);
+ if (!i8042_platform_device) {
+ err = -ENOMEM;
+ goto err_unregister_driver;
}
+ err = platform_device_add(i8042_platform_device);
+ if (err)
+ goto err_free_device;
+
bus_register_notifier(&serio_bus, &i8042_kbd_bind_notifier_block);
panic_blink = i8042_panic_blink;
return 0;
+err_free_device:
+ platform_device_put(i8042_platform_device);
+err_unregister_driver:
+ platform_driver_unregister(&i8042_driver);
err_platform_exit:
i8042_platform_exit();
return err;
diff --git a/drivers/input/touchscreen/atmel_mxt_ts.c b/drivers/input/touchscreen/atmel_mxt_ts.c
index 05de92c0293b..eb66cd2689b7 100644
--- a/drivers/input/touchscreen/atmel_mxt_ts.c
+++ b/drivers/input/touchscreen/atmel_mxt_ts.c
@@ -1882,7 +1882,7 @@ static int mxt_read_info_block(struct mxt_data *data)
if (error) {
dev_err(&client->dev, "Error %d parsing object table\n", error);
mxt_free_object_table(data);
- goto err_free_mem;
+ return error;
}
data->object_table = (struct mxt_object *)(id_buf + MXT_OBJECT_START);
diff --git a/drivers/input/touchscreen/elants_i2c.c b/drivers/input/touchscreen/elants_i2c.c
index 7e13a66a8a95..879a4d984c90 100644
--- a/drivers/input/touchscreen/elants_i2c.c
+++ b/drivers/input/touchscreen/elants_i2c.c
@@ -117,6 +117,19 @@
#define ELAN_POWERON_DELAY_USEC 500
#define ELAN_RESET_DELAY_MSEC 20
+/* FW boot code version */
+#define BC_VER_H_BYTE_FOR_EKTH3900x1_I2C 0x72
+#define BC_VER_H_BYTE_FOR_EKTH3900x2_I2C 0x82
+#define BC_VER_H_BYTE_FOR_EKTH3900x3_I2C 0x92
+#define BC_VER_H_BYTE_FOR_EKTH5312x1_I2C 0x6D
+#define BC_VER_H_BYTE_FOR_EKTH5312x2_I2C 0x6E
+#define BC_VER_H_BYTE_FOR_EKTH5312cx1_I2C 0x77
+#define BC_VER_H_BYTE_FOR_EKTH5312cx2_I2C 0x78
+#define BC_VER_H_BYTE_FOR_EKTH5312x1_I2C_USB 0x67
+#define BC_VER_H_BYTE_FOR_EKTH5312x2_I2C_USB 0x68
+#define BC_VER_H_BYTE_FOR_EKTH5312cx1_I2C_USB 0x74
+#define BC_VER_H_BYTE_FOR_EKTH5312cx2_I2C_USB 0x75
+
enum elants_chip_id {
EKTH3500,
EKTF3624,
@@ -736,6 +749,37 @@ static int elants_i2c_validate_remark_id(struct elants_data *ts,
return 0;
}
+static bool elants_i2c_should_check_remark_id(struct elants_data *ts)
+{
+ struct i2c_client *client = ts->client;
+ const u8 bootcode_version = ts->iap_version;
+ bool check;
+
+ /* I2C eKTH3900 and eKTH5312 are NOT support Remark ID */
+ if ((bootcode_version == BC_VER_H_BYTE_FOR_EKTH3900x1_I2C) ||
+ (bootcode_version == BC_VER_H_BYTE_FOR_EKTH3900x2_I2C) ||
+ (bootcode_version == BC_VER_H_BYTE_FOR_EKTH3900x3_I2C) ||
+ (bootcode_version == BC_VER_H_BYTE_FOR_EKTH5312x1_I2C) ||
+ (bootcode_version == BC_VER_H_BYTE_FOR_EKTH5312x2_I2C) ||
+ (bootcode_version == BC_VER_H_BYTE_FOR_EKTH5312cx1_I2C) ||
+ (bootcode_version == BC_VER_H_BYTE_FOR_EKTH5312cx2_I2C) ||
+ (bootcode_version == BC_VER_H_BYTE_FOR_EKTH5312x1_I2C_USB) ||
+ (bootcode_version == BC_VER_H_BYTE_FOR_EKTH5312x2_I2C_USB) ||
+ (bootcode_version == BC_VER_H_BYTE_FOR_EKTH5312cx1_I2C_USB) ||
+ (bootcode_version == BC_VER_H_BYTE_FOR_EKTH5312cx2_I2C_USB)) {
+ dev_dbg(&client->dev,
+ "eKTH3900/eKTH5312(0x%02x) are not support remark id\n",
+ bootcode_version);
+ check = false;
+ } else if (bootcode_version >= 0x60) {
+ check = true;
+ } else {
+ check = false;
+ }
+
+ return check;
+}
+
static int elants_i2c_do_update_firmware(struct i2c_client *client,
const struct firmware *fw,
bool force)
@@ -749,7 +793,7 @@ static int elants_i2c_do_update_firmware(struct i2c_client *client,
u16 send_id;
int page, n_fw_pages;
int error;
- bool check_remark_id = ts->iap_version >= 0x60;
+ bool check_remark_id = elants_i2c_should_check_remark_id(ts);
/* Recovery mode detection! */
if (force) {
diff --git a/drivers/input/touchscreen/goodix.c b/drivers/input/touchscreen/goodix.c
index b5cc91788195..aaa3c455e01e 100644
--- a/drivers/input/touchscreen/goodix.c
+++ b/drivers/input/touchscreen/goodix.c
@@ -102,6 +102,7 @@ static const struct goodix_chip_id goodix_chip_ids[] = {
{ .id = "911", .data = &gt911_chip_data },
{ .id = "9271", .data = &gt911_chip_data },
{ .id = "9110", .data = &gt911_chip_data },
+ { .id = "9111", .data = &gt911_chip_data },
{ .id = "927", .data = &gt911_chip_data },
{ .id = "928", .data = &gt911_chip_data },
@@ -650,10 +651,16 @@ int goodix_reset_no_int_sync(struct goodix_ts_data *ts)
usleep_range(6000, 10000); /* T4: > 5ms */
- /* end select I2C slave addr */
- error = gpiod_direction_input(ts->gpiod_rst);
- if (error)
- goto error;
+ /*
+ * Put the reset pin back in to input / high-impedance mode to save
+ * power. Only do this in the non ACPI case since some ACPI boards
+ * don't have a pull-up, so there the reset pin must stay active-high.
+ */
+ if (ts->irq_pin_access_method == IRQ_PIN_ACCESS_GPIO) {
+ error = gpiod_direction_input(ts->gpiod_rst);
+ if (error)
+ goto error;
+ }
return 0;
@@ -787,6 +794,14 @@ static int goodix_add_acpi_gpio_mappings(struct goodix_ts_data *ts)
return -EINVAL;
}
+ /*
+ * Normally we put the reset pin in input / high-impedance mode to save
+ * power. But some x86/ACPI boards don't have a pull-up, so for the ACPI
+ * case, leave the pin as is. This results in the pin not being touched
+ * at all on x86/ACPI boards, except when needed for error-recover.
+ */
+ ts->gpiod_rst_flags = GPIOD_ASIS;
+
return devm_acpi_dev_add_driver_gpios(dev, gpio_mapping);
}
#else
@@ -812,6 +827,12 @@ static int goodix_get_gpio_config(struct goodix_ts_data *ts)
return -EINVAL;
dev = &ts->client->dev;
+ /*
+ * By default we request the reset pin as input, leaving it in
+ * high-impedance when not resetting the controller to save power.
+ */
+ ts->gpiod_rst_flags = GPIOD_IN;
+
ts->avdd28 = devm_regulator_get(dev, "AVDD28");
if (IS_ERR(ts->avdd28)) {
error = PTR_ERR(ts->avdd28);
@@ -849,7 +870,7 @@ retry_get_irq_gpio:
ts->gpiod_int = gpiod;
/* Get the reset line GPIO pin number */
- gpiod = devm_gpiod_get_optional(dev, GOODIX_GPIO_RST_NAME, GPIOD_IN);
+ gpiod = devm_gpiod_get_optional(dev, GOODIX_GPIO_RST_NAME, ts->gpiod_rst_flags);
if (IS_ERR(gpiod)) {
error = PTR_ERR(gpiod);
if (error != -EPROBE_DEFER)
diff --git a/drivers/input/touchscreen/goodix.h b/drivers/input/touchscreen/goodix.h
index 62138f930d1a..02065d1c3263 100644
--- a/drivers/input/touchscreen/goodix.h
+++ b/drivers/input/touchscreen/goodix.h
@@ -87,6 +87,7 @@ struct goodix_ts_data {
struct gpio_desc *gpiod_rst;
int gpio_count;
int gpio_int_idx;
+ enum gpiod_flags gpiod_rst_flags;
char id[GOODIX_ID_MAX_LEN + 1];
char cfg_name[64];
u16 version;
diff --git a/drivers/input/touchscreen/goodix_fwupload.c b/drivers/input/touchscreen/goodix_fwupload.c
index c1e7a2413078..191d4f38d991 100644
--- a/drivers/input/touchscreen/goodix_fwupload.c
+++ b/drivers/input/touchscreen/goodix_fwupload.c
@@ -207,7 +207,7 @@ static int goodix_firmware_upload(struct goodix_ts_data *ts)
error = goodix_reset_no_int_sync(ts);
if (error)
- return error;
+ goto release;
error = goodix_enter_upload_mode(ts->client);
if (error)
diff --git a/drivers/iommu/amd/iommu_v2.c b/drivers/iommu/amd/iommu_v2.c
index 13cbeb997cc1..58da08cc3d01 100644
--- a/drivers/iommu/amd/iommu_v2.c
+++ b/drivers/iommu/amd/iommu_v2.c
@@ -929,10 +929,8 @@ static int __init amd_iommu_v2_init(void)
{
int ret;
- pr_info("AMD IOMMUv2 driver by Joerg Roedel <jroedel@suse.de>\n");
-
if (!amd_iommu_v2_supported()) {
- pr_info("AMD IOMMUv2 functionality not available on this system\n");
+ pr_info("AMD IOMMUv2 functionality not available on this system - This is not a bug.\n");
/*
* Load anyway to provide the symbols to other modules
* which may use AMD IOMMUv2 optionally.
@@ -947,6 +945,8 @@ static int __init amd_iommu_v2_init(void)
amd_iommu_register_ppr_notifier(&ppr_nb);
+ pr_info("AMD IOMMUv2 loaded and initialized\n");
+
return 0;
out:
diff --git a/drivers/iommu/intel/cap_audit.c b/drivers/iommu/intel/cap_audit.c
index b39d223926a4..71596fc62822 100644
--- a/drivers/iommu/intel/cap_audit.c
+++ b/drivers/iommu/intel/cap_audit.c
@@ -144,6 +144,7 @@ static int cap_audit_static(struct intel_iommu *iommu, enum cap_audit_type type)
{
struct dmar_drhd_unit *d;
struct intel_iommu *i;
+ int rc = 0;
rcu_read_lock();
if (list_empty(&dmar_drhd_units))
@@ -169,11 +170,11 @@ static int cap_audit_static(struct intel_iommu *iommu, enum cap_audit_type type)
*/
if (intel_cap_smts_sanity() &&
!intel_cap_flts_sanity() && !intel_cap_slts_sanity())
- return -EOPNOTSUPP;
+ rc = -EOPNOTSUPP;
out:
rcu_read_unlock();
- return 0;
+ return rc;
}
int intel_cap_audit(enum cap_audit_type type, struct intel_iommu *iommu)
diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c
index 0bde0c8b4126..b6a8f3282411 100644
--- a/drivers/iommu/intel/iommu.c
+++ b/drivers/iommu/intel/iommu.c
@@ -1339,13 +1339,11 @@ static struct page *dma_pte_clear_level(struct dmar_domain *domain, int level,
pte = &pte[pfn_level_offset(pfn, level)];
do {
- unsigned long level_pfn;
+ unsigned long level_pfn = pfn & level_mask(level);
if (!dma_pte_present(pte))
goto next;
- level_pfn = pfn & level_mask(level);
-
/* If range covers entire pagetable, free it */
if (start_pfn <= level_pfn &&
last_pfn >= level_pfn + level_size(level) - 1) {
@@ -1366,7 +1364,7 @@ static struct page *dma_pte_clear_level(struct dmar_domain *domain, int level,
freelist);
}
next:
- pfn += level_size(level);
+ pfn = level_pfn + level_size(level);
} while (!first_pte_in_page(++pte) && pfn <= last_pfn);
if (first_pte)
diff --git a/drivers/iommu/rockchip-iommu.c b/drivers/iommu/rockchip-iommu.c
index 5cb260820eda..7f23ad61c094 100644
--- a/drivers/iommu/rockchip-iommu.c
+++ b/drivers/iommu/rockchip-iommu.c
@@ -200,8 +200,8 @@ static inline phys_addr_t rk_dte_pt_address(u32 dte)
#define DTE_HI_MASK2 GENMASK(7, 4)
#define DTE_HI_SHIFT1 24 /* shift bit 8 to bit 32 */
#define DTE_HI_SHIFT2 32 /* shift bit 4 to bit 36 */
-#define PAGE_DESC_HI_MASK1 GENMASK_ULL(39, 36)
-#define PAGE_DESC_HI_MASK2 GENMASK_ULL(35, 32)
+#define PAGE_DESC_HI_MASK1 GENMASK_ULL(35, 32)
+#define PAGE_DESC_HI_MASK2 GENMASK_ULL(39, 36)
static inline phys_addr_t rk_dte_pt_address_v2(u32 dte)
{
diff --git a/drivers/irqchip/irq-apple-aic.c b/drivers/irqchip/irq-apple-aic.c
index 3759dc36cc8f..2543ef65825b 100644
--- a/drivers/irqchip/irq-apple-aic.c
+++ b/drivers/irqchip/irq-apple-aic.c
@@ -707,7 +707,7 @@ static const struct irq_domain_ops aic_ipi_domain_ops = {
.free = aic_ipi_free,
};
-static int aic_init_smp(struct aic_irq_chip *irqc, struct device_node *node)
+static int __init aic_init_smp(struct aic_irq_chip *irqc, struct device_node *node)
{
struct irq_domain *ipi_domain;
int base_ipi;
diff --git a/drivers/irqchip/irq-armada-370-xp.c b/drivers/irqchip/irq-armada-370-xp.c
index 80906bfec845..5b8d571c041d 100644
--- a/drivers/irqchip/irq-armada-370-xp.c
+++ b/drivers/irqchip/irq-armada-370-xp.c
@@ -232,16 +232,12 @@ static int armada_370_xp_msi_alloc(struct irq_domain *domain, unsigned int virq,
int hwirq, i;
mutex_lock(&msi_used_lock);
+ hwirq = bitmap_find_free_region(msi_used, PCI_MSI_DOORBELL_NR,
+ order_base_2(nr_irqs));
+ mutex_unlock(&msi_used_lock);
- hwirq = bitmap_find_next_zero_area(msi_used, PCI_MSI_DOORBELL_NR,
- 0, nr_irqs, 0);
- if (hwirq >= PCI_MSI_DOORBELL_NR) {
- mutex_unlock(&msi_used_lock);
+ if (hwirq < 0)
return -ENOSPC;
- }
-
- bitmap_set(msi_used, hwirq, nr_irqs);
- mutex_unlock(&msi_used_lock);
for (i = 0; i < nr_irqs; i++) {
irq_domain_set_info(domain, virq + i, hwirq + i,
@@ -250,7 +246,7 @@ static int armada_370_xp_msi_alloc(struct irq_domain *domain, unsigned int virq,
NULL, NULL);
}
- return hwirq;
+ return 0;
}
static void armada_370_xp_msi_free(struct irq_domain *domain,
@@ -259,7 +255,7 @@ static void armada_370_xp_msi_free(struct irq_domain *domain,
struct irq_data *d = irq_domain_get_irq_data(domain, virq);
mutex_lock(&msi_used_lock);
- bitmap_clear(msi_used, d->hwirq, nr_irqs);
+ bitmap_release_region(msi_used, d->hwirq, order_base_2(nr_irqs));
mutex_unlock(&msi_used_lock);
}
diff --git a/drivers/irqchip/irq-aspeed-scu-ic.c b/drivers/irqchip/irq-aspeed-scu-ic.c
index f3c6855a4cef..18b77c3e6db4 100644
--- a/drivers/irqchip/irq-aspeed-scu-ic.c
+++ b/drivers/irqchip/irq-aspeed-scu-ic.c
@@ -76,8 +76,8 @@ static void aspeed_scu_ic_irq_handler(struct irq_desc *desc)
generic_handle_domain_irq(scu_ic->irq_domain,
bit - scu_ic->irq_shift);
- regmap_update_bits(scu_ic->scu, scu_ic->reg, mask,
- BIT(bit + ASPEED_SCU_IC_STATUS_SHIFT));
+ regmap_write_bits(scu_ic->scu, scu_ic->reg, mask,
+ BIT(bit + ASPEED_SCU_IC_STATUS_SHIFT));
}
chained_irq_exit(chip, desc);
diff --git a/drivers/irqchip/irq-bcm7120-l2.c b/drivers/irqchip/irq-bcm7120-l2.c
index d80e67a6aad2..bb6609cebdbc 100644
--- a/drivers/irqchip/irq-bcm7120-l2.c
+++ b/drivers/irqchip/irq-bcm7120-l2.c
@@ -238,6 +238,7 @@ static int __init bcm7120_l2_intc_probe(struct device_node *dn,
}
data->num_parent_irqs = platform_irq_count(pdev);
+ put_device(&pdev->dev);
if (data->num_parent_irqs <= 0) {
pr_err("invalid number of parent interrupts\n");
ret = -ENOMEM;
diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c
index eb0882d15366..0cb584d9815b 100644
--- a/drivers/irqchip/irq-gic-v3-its.c
+++ b/drivers/irqchip/irq-gic-v3-its.c
@@ -742,7 +742,7 @@ static struct its_collection *its_build_invall_cmd(struct its_node *its,
its_fixup_cmd(cmd);
- return NULL;
+ return desc->its_invall_cmd.col;
}
static struct its_vpe *its_build_vinvall_cmd(struct its_node *its,
diff --git a/drivers/irqchip/irq-mips-gic.c b/drivers/irqchip/irq-mips-gic.c
index d02b05a067d9..ff89b36267dd 100644
--- a/drivers/irqchip/irq-mips-gic.c
+++ b/drivers/irqchip/irq-mips-gic.c
@@ -9,6 +9,7 @@
#define pr_fmt(fmt) "irq-mips-gic: " fmt
+#include <linux/bitfield.h>
#include <linux/bitmap.h>
#include <linux/clocksource.h>
#include <linux/cpuhotplug.h>
@@ -735,8 +736,7 @@ static int __init gic_of_init(struct device_node *node,
mips_gic_base = ioremap(gic_base, gic_len);
gicconfig = read_gic_config();
- gic_shared_intrs = gicconfig & GIC_CONFIG_NUMINTERRUPTS;
- gic_shared_intrs >>= __ffs(GIC_CONFIG_NUMINTERRUPTS);
+ gic_shared_intrs = FIELD_GET(GIC_CONFIG_NUMINTERRUPTS, gicconfig);
gic_shared_intrs = (gic_shared_intrs + 1) * 8;
if (cpu_has_veic) {
diff --git a/drivers/irqchip/irq-nvic.c b/drivers/irqchip/irq-nvic.c
index 63bac3f78863..ba4759b3e269 100644
--- a/drivers/irqchip/irq-nvic.c
+++ b/drivers/irqchip/irq-nvic.c
@@ -26,7 +26,7 @@
#define NVIC_ISER 0x000
#define NVIC_ICER 0x080
-#define NVIC_IPR 0x300
+#define NVIC_IPR 0x400
#define NVIC_MAX_BANKS 16
/*
diff --git a/drivers/isdn/mISDN/core.c b/drivers/isdn/mISDN/core.c
index 55891e420446..a41b4b264594 100644
--- a/drivers/isdn/mISDN/core.c
+++ b/drivers/isdn/mISDN/core.c
@@ -381,7 +381,7 @@ mISDNInit(void)
err = mISDN_inittimer(&debug);
if (err)
goto error2;
- err = l1_init(&debug);
+ err = Isdnl1_Init(&debug);
if (err)
goto error3;
err = Isdnl2_Init(&debug);
@@ -395,7 +395,7 @@ mISDNInit(void)
error5:
Isdnl2_cleanup();
error4:
- l1_cleanup();
+ Isdnl1_cleanup();
error3:
mISDN_timer_cleanup();
error2:
@@ -408,7 +408,7 @@ static void mISDN_cleanup(void)
{
misdn_sock_cleanup();
Isdnl2_cleanup();
- l1_cleanup();
+ Isdnl1_cleanup();
mISDN_timer_cleanup();
class_unregister(&mISDN_class);
diff --git a/drivers/isdn/mISDN/core.h b/drivers/isdn/mISDN/core.h
index 23b44d303327..42599f49c189 100644
--- a/drivers/isdn/mISDN/core.h
+++ b/drivers/isdn/mISDN/core.h
@@ -60,8 +60,8 @@ struct Bprotocol *get_Bprotocol4id(u_int);
extern int mISDN_inittimer(u_int *);
extern void mISDN_timer_cleanup(void);
-extern int l1_init(u_int *);
-extern void l1_cleanup(void);
+extern int Isdnl1_Init(u_int *);
+extern void Isdnl1_cleanup(void);
extern int Isdnl2_Init(u_int *);
extern void Isdnl2_cleanup(void);
diff --git a/drivers/isdn/mISDN/layer1.c b/drivers/isdn/mISDN/layer1.c
index 98a3bc6c1700..7b31c25a550e 100644
--- a/drivers/isdn/mISDN/layer1.c
+++ b/drivers/isdn/mISDN/layer1.c
@@ -398,7 +398,7 @@ create_l1(struct dchannel *dch, dchannel_l1callback *dcb) {
EXPORT_SYMBOL(create_l1);
int
-l1_init(u_int *deb)
+Isdnl1_Init(u_int *deb)
{
debug = deb;
l1fsm_s.state_count = L1S_STATE_COUNT;
@@ -409,7 +409,7 @@ l1_init(u_int *deb)
}
void
-l1_cleanup(void)
+Isdnl1_cleanup(void)
{
mISDN_FsmFree(&l1fsm_s);
}
diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
index 86b9e355c583..140f35dc0c45 100644
--- a/drivers/md/bcache/super.c
+++ b/drivers/md/bcache/super.c
@@ -1139,6 +1139,7 @@ static void cancel_writeback_rate_update_dwork(struct cached_dev *dc)
static void cached_dev_detach_finish(struct work_struct *w)
{
struct cached_dev *dc = container_of(w, struct cached_dev, detach);
+ struct cache_set *c = dc->disk.c;
BUG_ON(!test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags));
BUG_ON(refcount_read(&dc->count));
@@ -1156,7 +1157,7 @@ static void cached_dev_detach_finish(struct work_struct *w)
bcache_device_detach(&dc->disk);
list_move(&dc->list, &uncached_devices);
- calc_cached_dev_sectors(dc->disk.c);
+ calc_cached_dev_sectors(c);
clear_bit(BCACHE_DEV_DETACHING, &dc->disk.flags);
clear_bit(BCACHE_DEV_UNLINK_DONE, &dc->disk.flags);
diff --git a/drivers/md/dm-integrity.c b/drivers/md/dm-integrity.c
index 6319deccbe09..7af242de3202 100644
--- a/drivers/md/dm-integrity.c
+++ b/drivers/md/dm-integrity.c
@@ -1963,7 +1963,7 @@ static bool __journal_read_write(struct dm_integrity_io *dio, struct bio *bio,
n_sectors -= bv.bv_len >> SECTOR_SHIFT;
bio_advance_iter(bio, &bio->bi_iter, bv.bv_len);
retry_kmap:
- mem = bvec_kmap_local(&bv);
+ mem = kmap_local_page(bv.bv_page);
if (likely(dio->op == REQ_OP_WRITE))
flush_dcache_page(bv.bv_page);
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 5111ed966947..41d6e2383517 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -2189,6 +2189,7 @@ super_1_rdev_size_change(struct md_rdev *rdev, sector_t num_sectors)
if (!num_sectors || num_sectors > max_sectors)
num_sectors = max_sectors;
+ rdev->sb_start = sb_start;
}
sb = page_address(rdev->sb_page);
sb->data_size = cpu_to_le64(num_sectors);
@@ -6270,7 +6271,8 @@ static void __md_stop(struct mddev *mddev)
spin_lock(&mddev->lock);
mddev->pers = NULL;
spin_unlock(&mddev->lock);
- pers->free(mddev, mddev->private);
+ if (mddev->private)
+ pers->free(mddev, mddev->private);
mddev->private = NULL;
if (pers->sync_request && mddev->to_remove == NULL)
mddev->to_remove = &md_redundancy_group;
diff --git a/drivers/md/persistent-data/dm-btree-remove.c b/drivers/md/persistent-data/dm-btree-remove.c
index 70532335c7c7..cb670f16e98e 100644
--- a/drivers/md/persistent-data/dm-btree-remove.c
+++ b/drivers/md/persistent-data/dm-btree-remove.c
@@ -423,9 +423,9 @@ static int rebalance_children(struct shadow_spine *s,
memcpy(n, dm_block_data(child),
dm_bm_block_size(dm_tm_get_bm(info->tm)));
- dm_tm_unlock(info->tm, child);
dm_tm_dec(info->tm, dm_block_location(child));
+ dm_tm_unlock(info->tm, child);
return 0;
}
diff --git a/drivers/media/cec/core/cec-adap.c b/drivers/media/cec/core/cec-adap.c
index 79fa36de8a04..cd9cb354dc2c 100644
--- a/drivers/media/cec/core/cec-adap.c
+++ b/drivers/media/cec/core/cec-adap.c
@@ -1199,6 +1199,7 @@ void cec_received_msg_ts(struct cec_adapter *adap,
if (abort)
dst->rx_status |= CEC_RX_STATUS_FEATURE_ABORT;
msg->flags = dst->flags;
+ msg->sequence = dst->sequence;
/* Remove it from the wait_queue */
list_del_init(&data->list);
diff --git a/drivers/media/common/videobuf2/videobuf2-dma-sg.c b/drivers/media/common/videobuf2/videobuf2-dma-sg.c
index 1094575abf95..90acafd9a290 100644
--- a/drivers/media/common/videobuf2/videobuf2-dma-sg.c
+++ b/drivers/media/common/videobuf2/videobuf2-dma-sg.c
@@ -241,6 +241,7 @@ static void *vb2_dma_sg_get_userptr(struct vb2_buffer *vb, struct device *dev,
buf->offset = vaddr & ~PAGE_MASK;
buf->size = size;
buf->dma_sgt = &buf->sg_table;
+ buf->vb = vb;
vec = vb2_create_framevec(vaddr, size);
if (IS_ERR(vec))
goto userptr_fail_pfnvec;
@@ -642,6 +643,7 @@ static void *vb2_dma_sg_attach_dmabuf(struct vb2_buffer *vb, struct device *dev,
buf->dma_dir = vb->vb2_queue->dma_dir;
buf->size = size;
buf->db_attach = dba;
+ buf->vb = vb;
return buf;
}
diff --git a/drivers/media/i2c/hi846.c b/drivers/media/i2c/hi846.c
index 822ce3021fde..48909faeced4 100644
--- a/drivers/media/i2c/hi846.c
+++ b/drivers/media/i2c/hi846.c
@@ -7,9 +7,9 @@
#include <linux/gpio/consumer.h>
#include <linux/i2c.h>
#include <linux/module.h>
-#include <linux/of_graph.h>
#include <linux/pm_runtime.h>
#include <linux/pm.h>
+#include <linux/property.h>
#include <linux/regulator/consumer.h>
#include <media/v4l2-ctrls.h>
#include <media/v4l2-device.h>
@@ -2176,7 +2176,7 @@ static struct i2c_driver hi846_i2c_driver = {
.driver = {
.name = "hi846",
.pm = &hi846_pm_ops,
- .of_match_table = of_match_ptr(hi846_of_match),
+ .of_match_table = hi846_of_match,
},
.probe_new = hi846_probe,
.remove = hi846_remove,
diff --git a/drivers/media/v4l2-core/v4l2-compat-ioctl32.c b/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
index 8176769a89fa..0f3d6b5667b0 100644
--- a/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
+++ b/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
@@ -751,10 +751,6 @@ static int put_v4l2_ext_controls32(struct v4l2_ext_controls *p64,
/*
* x86 is the only compat architecture with different struct alignment
* between 32-bit and 64-bit tasks.
- *
- * On all other architectures, v4l2_event32 and v4l2_event32_time32 are
- * the same as v4l2_event and v4l2_event_time32, so we can use the native
- * handlers, converting v4l2_event to v4l2_event_time32 if necessary.
*/
struct v4l2_event32 {
__u32 type;
@@ -772,21 +768,6 @@ struct v4l2_event32 {
__u32 reserved[8];
};
-#ifdef CONFIG_COMPAT_32BIT_TIME
-struct v4l2_event32_time32 {
- __u32 type;
- union {
- compat_s64 value64;
- __u8 data[64];
- } u;
- __u32 pending;
- __u32 sequence;
- struct old_timespec32 timestamp;
- __u32 id;
- __u32 reserved[8];
-};
-#endif
-
static int put_v4l2_event32(struct v4l2_event *p64,
struct v4l2_event32 __user *p32)
{
@@ -802,7 +783,22 @@ static int put_v4l2_event32(struct v4l2_event *p64,
return 0;
}
+#endif
+
#ifdef CONFIG_COMPAT_32BIT_TIME
+struct v4l2_event32_time32 {
+ __u32 type;
+ union {
+ compat_s64 value64;
+ __u8 data[64];
+ } u;
+ __u32 pending;
+ __u32 sequence;
+ struct old_timespec32 timestamp;
+ __u32 id;
+ __u32 reserved[8];
+};
+
static int put_v4l2_event32_time32(struct v4l2_event *p64,
struct v4l2_event32_time32 __user *p32)
{
@@ -818,7 +814,6 @@ static int put_v4l2_event32_time32(struct v4l2_event *p64,
return 0;
}
#endif
-#endif
struct v4l2_edid32 {
__u32 pad;
@@ -880,9 +875,7 @@ static int put_v4l2_edid32(struct v4l2_edid *p64,
#define VIDIOC_QUERYBUF32_TIME32 _IOWR('V', 9, struct v4l2_buffer32_time32)
#define VIDIOC_QBUF32_TIME32 _IOWR('V', 15, struct v4l2_buffer32_time32)
#define VIDIOC_DQBUF32_TIME32 _IOWR('V', 17, struct v4l2_buffer32_time32)
-#ifdef CONFIG_X86_64
#define VIDIOC_DQEVENT32_TIME32 _IOR ('V', 89, struct v4l2_event32_time32)
-#endif
#define VIDIOC_PREPARE_BUF32_TIME32 _IOWR('V', 93, struct v4l2_buffer32_time32)
#endif
@@ -936,11 +929,11 @@ unsigned int v4l2_compat_translate_cmd(unsigned int cmd)
#ifdef CONFIG_X86_64
case VIDIOC_DQEVENT32:
return VIDIOC_DQEVENT;
+#endif
#ifdef CONFIG_COMPAT_32BIT_TIME
case VIDIOC_DQEVENT32_TIME32:
return VIDIOC_DQEVENT;
#endif
-#endif
}
return cmd;
}
@@ -1032,11 +1025,11 @@ int v4l2_compat_put_user(void __user *arg, void *parg, unsigned int cmd)
#ifdef CONFIG_X86_64
case VIDIOC_DQEVENT32:
return put_v4l2_event32(parg, arg);
+#endif
#ifdef CONFIG_COMPAT_32BIT_TIME
case VIDIOC_DQEVENT32_TIME32:
return put_v4l2_event32_time32(parg, arg);
#endif
-#endif
}
return 0;
}
diff --git a/drivers/memory/mtk-smi.c b/drivers/memory/mtk-smi.c
index b883dcc0bbfa..e201e5976f34 100644
--- a/drivers/memory/mtk-smi.c
+++ b/drivers/memory/mtk-smi.c
@@ -241,7 +241,7 @@ static void mtk_smi_larb_config_port_gen2_general(struct device *dev)
{
struct mtk_smi_larb *larb = dev_get_drvdata(dev);
u32 reg, flags_general = larb->larb_gen->flags_general;
- const u8 *larbostd = larb->larb_gen->ostd[larb->larbid];
+ const u8 *larbostd = larb->larb_gen->ostd ? larb->larb_gen->ostd[larb->larbid] : NULL;
int i;
if (BIT(larb->larbid) & larb->larb_gen->larb_direct_to_common_mask)
diff --git a/drivers/misc/cardreader/rtsx_pcr.c b/drivers/misc/cardreader/rtsx_pcr.c
index 8c72eb590f79..6ac509c1821c 100644
--- a/drivers/misc/cardreader/rtsx_pcr.c
+++ b/drivers/misc/cardreader/rtsx_pcr.c
@@ -1803,8 +1803,6 @@ static int rtsx_pci_runtime_suspend(struct device *device)
mutex_lock(&pcr->pcr_mutex);
rtsx_pci_power_off(pcr, HOST_ENTER_S3);
- free_irq(pcr->irq, (void *)pcr);
-
mutex_unlock(&pcr->pcr_mutex);
pcr->is_runtime_suspended = true;
@@ -1825,8 +1823,6 @@ static int rtsx_pci_runtime_resume(struct device *device)
mutex_lock(&pcr->pcr_mutex);
rtsx_pci_write_register(pcr, HOST_SLEEP_STATE, 0x03, 0x00);
- rtsx_pci_acquire_irq(pcr);
- synchronize_irq(pcr->irq);
if (pcr->ops->fetch_vendor_settings)
pcr->ops->fetch_vendor_settings(pcr);
diff --git a/drivers/misc/eeprom/at25.c b/drivers/misc/eeprom/at25.c
index 632325474233..b38978a3b3ff 100644
--- a/drivers/misc/eeprom/at25.c
+++ b/drivers/misc/eeprom/at25.c
@@ -376,7 +376,6 @@ MODULE_DEVICE_TABLE(spi, at25_spi_ids);
static int at25_probe(struct spi_device *spi)
{
struct at25_data *at25 = NULL;
- struct spi_eeprom chip;
int err;
int sr;
u8 id[FM25_ID_LEN];
@@ -389,15 +388,18 @@ static int at25_probe(struct spi_device *spi)
if (match && !strcmp(match->compatible, "cypress,fm25"))
is_fram = 1;
+ at25 = devm_kzalloc(&spi->dev, sizeof(struct at25_data), GFP_KERNEL);
+ if (!at25)
+ return -ENOMEM;
+
/* Chip description */
- if (!spi->dev.platform_data) {
- if (!is_fram) {
- err = at25_fw_to_chip(&spi->dev, &chip);
- if (err)
- return err;
- }
- } else
- chip = *(struct spi_eeprom *)spi->dev.platform_data;
+ if (spi->dev.platform_data) {
+ memcpy(&at25->chip, spi->dev.platform_data, sizeof(at25->chip));
+ } else if (!is_fram) {
+ err = at25_fw_to_chip(&spi->dev, &at25->chip);
+ if (err)
+ return err;
+ }
/* Ping the chip ... the status register is pretty portable,
* unlike probing manufacturer IDs. We do expect that system
@@ -409,12 +411,7 @@ static int at25_probe(struct spi_device *spi)
return -ENXIO;
}
- at25 = devm_kzalloc(&spi->dev, sizeof(struct at25_data), GFP_KERNEL);
- if (!at25)
- return -ENOMEM;
-
mutex_init(&at25->lock);
- at25->chip = chip;
at25->spi = spi;
spi_set_drvdata(spi, at25);
@@ -431,7 +428,7 @@ static int at25_probe(struct spi_device *spi)
dev_err(&spi->dev, "Error: unsupported size (id %02x)\n", id[7]);
return -ENODEV;
}
- chip.byte_len = int_pow(2, id[7] - 0x21 + 4) * 1024;
+ at25->chip.byte_len = int_pow(2, id[7] - 0x21 + 4) * 1024;
if (at25->chip.byte_len > 64 * 1024)
at25->chip.flags |= EE_ADDR3;
@@ -464,7 +461,7 @@ static int at25_probe(struct spi_device *spi)
at25->nvmem_config.type = is_fram ? NVMEM_TYPE_FRAM : NVMEM_TYPE_EEPROM;
at25->nvmem_config.name = dev_name(&spi->dev);
at25->nvmem_config.dev = &spi->dev;
- at25->nvmem_config.read_only = chip.flags & EE_READONLY;
+ at25->nvmem_config.read_only = at25->chip.flags & EE_READONLY;
at25->nvmem_config.root_only = true;
at25->nvmem_config.owner = THIS_MODULE;
at25->nvmem_config.compat = true;
@@ -474,17 +471,18 @@ static int at25_probe(struct spi_device *spi)
at25->nvmem_config.priv = at25;
at25->nvmem_config.stride = 1;
at25->nvmem_config.word_size = 1;
- at25->nvmem_config.size = chip.byte_len;
+ at25->nvmem_config.size = at25->chip.byte_len;
at25->nvmem = devm_nvmem_register(&spi->dev, &at25->nvmem_config);
if (IS_ERR(at25->nvmem))
return PTR_ERR(at25->nvmem);
dev_info(&spi->dev, "%d %s %s %s%s, pagesize %u\n",
- (chip.byte_len < 1024) ? chip.byte_len : (chip.byte_len / 1024),
- (chip.byte_len < 1024) ? "Byte" : "KByte",
+ (at25->chip.byte_len < 1024) ?
+ at25->chip.byte_len : (at25->chip.byte_len / 1024),
+ (at25->chip.byte_len < 1024) ? "Byte" : "KByte",
at25->chip.name, is_fram ? "fram" : "eeprom",
- (chip.flags & EE_READONLY) ? " (readonly)" : "",
+ (at25->chip.flags & EE_READONLY) ? " (readonly)" : "",
at25->chip.page_size);
return 0;
}
diff --git a/drivers/misc/fastrpc.c b/drivers/misc/fastrpc.c
index 39aca7753719..4ccbf43e6bfa 100644
--- a/drivers/misc/fastrpc.c
+++ b/drivers/misc/fastrpc.c
@@ -719,16 +719,18 @@ static int fastrpc_get_meta_size(struct fastrpc_invoke_ctx *ctx)
static u64 fastrpc_get_payload_size(struct fastrpc_invoke_ctx *ctx, int metalen)
{
u64 size = 0;
- int i;
+ int oix;
size = ALIGN(metalen, FASTRPC_ALIGN);
- for (i = 0; i < ctx->nscalars; i++) {
+ for (oix = 0; oix < ctx->nbufs; oix++) {
+ int i = ctx->olaps[oix].raix;
+
if (ctx->args[i].fd == 0 || ctx->args[i].fd == -1) {
- if (ctx->olaps[i].offset == 0)
+ if (ctx->olaps[oix].offset == 0)
size = ALIGN(size, FASTRPC_ALIGN);
- size += (ctx->olaps[i].mend - ctx->olaps[i].mstart);
+ size += (ctx->olaps[oix].mend - ctx->olaps[oix].mstart);
}
}
diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
index 240c5af793dc..368f10405e13 100644
--- a/drivers/mmc/core/core.c
+++ b/drivers/mmc/core/core.c
@@ -2264,7 +2264,7 @@ void mmc_start_host(struct mmc_host *host)
_mmc_detect_change(host, 0, false);
}
-void mmc_stop_host(struct mmc_host *host)
+void __mmc_stop_host(struct mmc_host *host)
{
if (host->slot.cd_irq >= 0) {
mmc_gpio_set_cd_wake(host, false);
@@ -2273,6 +2273,11 @@ void mmc_stop_host(struct mmc_host *host)
host->rescan_disable = 1;
cancel_delayed_work_sync(&host->detect);
+}
+
+void mmc_stop_host(struct mmc_host *host)
+{
+ __mmc_stop_host(host);
/* clear pm flags now and let card drivers set them as needed */
host->pm_flags = 0;
diff --git a/drivers/mmc/core/core.h b/drivers/mmc/core/core.h
index 7931a4f0137d..f5f3f623ea49 100644
--- a/drivers/mmc/core/core.h
+++ b/drivers/mmc/core/core.h
@@ -70,6 +70,7 @@ static inline void mmc_delay(unsigned int ms)
void mmc_rescan(struct work_struct *work);
void mmc_start_host(struct mmc_host *host);
+void __mmc_stop_host(struct mmc_host *host);
void mmc_stop_host(struct mmc_host *host);
void _mmc_detect_change(struct mmc_host *host, unsigned long delay,
diff --git a/drivers/mmc/core/host.c b/drivers/mmc/core/host.c
index d4683b1d263f..cf140f4ec864 100644
--- a/drivers/mmc/core/host.c
+++ b/drivers/mmc/core/host.c
@@ -80,9 +80,18 @@ static void mmc_host_classdev_release(struct device *dev)
kfree(host);
}
+static int mmc_host_classdev_shutdown(struct device *dev)
+{
+ struct mmc_host *host = cls_dev_to_mmc_host(dev);
+
+ __mmc_stop_host(host);
+ return 0;
+}
+
static struct class mmc_host_class = {
.name = "mmc_host",
.dev_release = mmc_host_classdev_release,
+ .shutdown_pre = mmc_host_classdev_shutdown,
.pm = MMC_HOST_CLASS_DEV_PM_OPS,
};
diff --git a/drivers/mmc/host/meson-mx-sdhc-mmc.c b/drivers/mmc/host/meson-mx-sdhc-mmc.c
index 7cd9c0ec2fcf..8fdd0bbbfa21 100644
--- a/drivers/mmc/host/meson-mx-sdhc-mmc.c
+++ b/drivers/mmc/host/meson-mx-sdhc-mmc.c
@@ -135,6 +135,7 @@ static void meson_mx_sdhc_start_cmd(struct mmc_host *mmc,
struct mmc_command *cmd)
{
struct meson_mx_sdhc_host *host = mmc_priv(mmc);
+ bool manual_stop = false;
u32 ictl, send;
int pack_len;
@@ -172,12 +173,27 @@ static void meson_mx_sdhc_start_cmd(struct mmc_host *mmc,
else
/* software flush: */
ictl |= MESON_SDHC_ICTL_DATA_XFER_OK;
+
+ /*
+ * Mimic the logic from the vendor driver where (only)
+ * SD_IO_RW_EXTENDED commands with more than one block set the
+ * MESON_SDHC_MISC_MANUAL_STOP bit. This fixes the firmware
+ * download in the brcmfmac driver for a BCM43362/1 card.
+ * Without this sdio_memcpy_toio() (with a size of 219557
+ * bytes) times out if MESON_SDHC_MISC_MANUAL_STOP is not set.
+ */
+ manual_stop = cmd->data->blocks > 1 &&
+ cmd->opcode == SD_IO_RW_EXTENDED;
} else {
pack_len = 0;
ictl |= MESON_SDHC_ICTL_RESP_OK;
}
+ regmap_update_bits(host->regmap, MESON_SDHC_MISC,
+ MESON_SDHC_MISC_MANUAL_STOP,
+ manual_stop ? MESON_SDHC_MISC_MANUAL_STOP : 0);
+
if (cmd->opcode == MMC_STOP_TRANSMISSION)
send |= MESON_SDHC_SEND_DATA_STOP;
diff --git a/drivers/mmc/host/mmc_spi.c b/drivers/mmc/host/mmc_spi.c
index f4c8e1a61f53..b431cdd27353 100644
--- a/drivers/mmc/host/mmc_spi.c
+++ b/drivers/mmc/host/mmc_spi.c
@@ -1514,6 +1514,12 @@ static int mmc_spi_remove(struct spi_device *spi)
return 0;
}
+static const struct spi_device_id mmc_spi_dev_ids[] = {
+ { "mmc-spi-slot"},
+ { },
+};
+MODULE_DEVICE_TABLE(spi, mmc_spi_dev_ids);
+
static const struct of_device_id mmc_spi_of_match_table[] = {
{ .compatible = "mmc-spi-slot", },
{},
@@ -1525,6 +1531,7 @@ static struct spi_driver mmc_spi_driver = {
.name = "mmc_spi",
.of_match_table = mmc_spi_of_match_table,
},
+ .id_table = mmc_spi_dev_ids,
.probe = mmc_spi_probe,
.remove = mmc_spi_remove,
};
diff --git a/drivers/mmc/host/mmci_stm32_sdmmc.c b/drivers/mmc/host/mmci_stm32_sdmmc.c
index fdaa11f92fe6..a75d3dd34d18 100644
--- a/drivers/mmc/host/mmci_stm32_sdmmc.c
+++ b/drivers/mmc/host/mmci_stm32_sdmmc.c
@@ -441,6 +441,8 @@ static int sdmmc_dlyb_phase_tuning(struct mmci_host *host, u32 opcode)
return -EINVAL;
}
+ writel_relaxed(0, dlyb->base + DLYB_CR);
+
phase = end_of_len - max_len / 2;
sdmmc_dlyb_set_cfgr(dlyb, dlyb->unit, phase, false);
diff --git a/drivers/mmc/host/mtk-sd.c b/drivers/mmc/host/mtk-sd.c
index 943940b44e83..632775217d35 100644
--- a/drivers/mmc/host/mtk-sd.c
+++ b/drivers/mmc/host/mtk-sd.c
@@ -2291,8 +2291,10 @@ static int msdc_execute_hs400_tuning(struct mmc_host *mmc, struct mmc_card *card
sdr_set_field(host->base + PAD_DS_TUNE,
PAD_DS_TUNE_DLY1, i);
ret = mmc_get_ext_csd(card, &ext_csd);
- if (!ret)
+ if (!ret) {
result_dly1 |= (1 << i);
+ kfree(ext_csd);
+ }
}
host->hs400_tuning = false;
diff --git a/drivers/mmc/host/renesas_sdhi_core.c b/drivers/mmc/host/renesas_sdhi_core.c
index a4407f391f66..f5b2684ad805 100644
--- a/drivers/mmc/host/renesas_sdhi_core.c
+++ b/drivers/mmc/host/renesas_sdhi_core.c
@@ -673,7 +673,7 @@ static int renesas_sdhi_execute_tuning(struct mmc_host *mmc, u32 opcode)
/* Issue CMD19 twice for each tap */
for (i = 0; i < 2 * priv->tap_num; i++) {
- int cmd_error;
+ int cmd_error = 0;
/* Set sampling clock position */
sd_scc_write32(host, priv, SH_MOBILE_SDHI_SCC_TAPSET, i % priv->tap_num);
diff --git a/drivers/mmc/host/sdhci-esdhc-imx.c b/drivers/mmc/host/sdhci-esdhc-imx.c
index afaf33707d46..764ee1b761d9 100644
--- a/drivers/mmc/host/sdhci-esdhc-imx.c
+++ b/drivers/mmc/host/sdhci-esdhc-imx.c
@@ -310,7 +310,6 @@ static struct esdhc_soc_data usdhc_imx8qxp_data = {
.flags = ESDHC_FLAG_USDHC | ESDHC_FLAG_STD_TUNING
| ESDHC_FLAG_HAVE_CAP1 | ESDHC_FLAG_HS200
| ESDHC_FLAG_HS400 | ESDHC_FLAG_HS400_ES
- | ESDHC_FLAG_CQHCI
| ESDHC_FLAG_STATE_LOST_IN_LPMODE
| ESDHC_FLAG_CLK_RATE_LOST_IN_PM_RUNTIME,
};
@@ -319,7 +318,6 @@ static struct esdhc_soc_data usdhc_imx8mm_data = {
.flags = ESDHC_FLAG_USDHC | ESDHC_FLAG_STD_TUNING
| ESDHC_FLAG_HAVE_CAP1 | ESDHC_FLAG_HS200
| ESDHC_FLAG_HS400 | ESDHC_FLAG_HS400_ES
- | ESDHC_FLAG_CQHCI
| ESDHC_FLAG_STATE_LOST_IN_LPMODE,
};
diff --git a/drivers/mmc/host/sdhci-tegra.c b/drivers/mmc/host/sdhci-tegra.c
index a5001875876b..9762ffab2e23 100644
--- a/drivers/mmc/host/sdhci-tegra.c
+++ b/drivers/mmc/host/sdhci-tegra.c
@@ -356,23 +356,6 @@ static void tegra_sdhci_set_tap(struct sdhci_host *host, unsigned int tap)
}
}
-static void tegra_sdhci_hs400_enhanced_strobe(struct mmc_host *mmc,
- struct mmc_ios *ios)
-{
- struct sdhci_host *host = mmc_priv(mmc);
- u32 val;
-
- val = sdhci_readl(host, SDHCI_TEGRA_VENDOR_SYS_SW_CTRL);
-
- if (ios->enhanced_strobe)
- val |= SDHCI_TEGRA_SYS_SW_CTRL_ENHANCED_STROBE;
- else
- val &= ~SDHCI_TEGRA_SYS_SW_CTRL_ENHANCED_STROBE;
-
- sdhci_writel(host, val, SDHCI_TEGRA_VENDOR_SYS_SW_CTRL);
-
-}
-
static void tegra_sdhci_reset(struct sdhci_host *host, u8 mask)
{
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
@@ -793,6 +776,32 @@ static void tegra_sdhci_set_clock(struct sdhci_host *host, unsigned int clock)
}
}
+static void tegra_sdhci_hs400_enhanced_strobe(struct mmc_host *mmc,
+ struct mmc_ios *ios)
+{
+ struct sdhci_host *host = mmc_priv(mmc);
+ u32 val;
+
+ val = sdhci_readl(host, SDHCI_TEGRA_VENDOR_SYS_SW_CTRL);
+
+ if (ios->enhanced_strobe) {
+ val |= SDHCI_TEGRA_SYS_SW_CTRL_ENHANCED_STROBE;
+ /*
+ * When CMD13 is sent from mmc_select_hs400es() after
+ * switching to HS400ES mode, the bus is operating at
+ * either MMC_HIGH_26_MAX_DTR or MMC_HIGH_52_MAX_DTR.
+ * To meet Tegra SDHCI requirement at HS400ES mode, force SDHCI
+ * interface clock to MMC_HS200_MAX_DTR (200 MHz) so that host
+ * controller CAR clock and the interface clock are rate matched.
+ */
+ tegra_sdhci_set_clock(host, MMC_HS200_MAX_DTR);
+ } else {
+ val &= ~SDHCI_TEGRA_SYS_SW_CTRL_ENHANCED_STROBE;
+ }
+
+ sdhci_writel(host, val, SDHCI_TEGRA_VENDOR_SYS_SW_CTRL);
+}
+
static unsigned int tegra_sdhci_get_max_clock(struct sdhci_host *host)
{
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
index 269c86569402..07c6da1f2f0f 100644
--- a/drivers/mmc/host/sdhci.c
+++ b/drivers/mmc/host/sdhci.c
@@ -771,7 +771,19 @@ static void sdhci_adma_table_pre(struct sdhci_host *host,
len -= offset;
}
- BUG_ON(len > 65536);
+ /*
+ * The block layer forces a minimum segment size of PAGE_SIZE,
+ * so 'len' can be too big here if PAGE_SIZE >= 64KiB. Write
+ * multiple descriptors, noting that the ADMA table is sized
+ * for 4KiB chunks anyway, so it will be big enough.
+ */
+ while (len > host->max_adma) {
+ int n = 32 * 1024; /* 32KiB*/
+
+ __sdhci_adma_write_desc(host, &desc, addr, n, ADMA2_TRAN_VALID);
+ addr += n;
+ len -= n;
+ }
/* tran, valid */
if (len)
@@ -3968,6 +3980,7 @@ struct sdhci_host *sdhci_alloc_host(struct device *dev,
* descriptor for each segment, plus 1 for a nop end descriptor.
*/
host->adma_table_cnt = SDHCI_MAX_SEGS * 2 + 1;
+ host->max_adma = 65536;
host->max_timeout_count = 0xE;
@@ -4633,10 +4646,12 @@ int sdhci_setup_host(struct sdhci_host *host)
* be larger than 64 KiB though.
*/
if (host->flags & SDHCI_USE_ADMA) {
- if (host->quirks & SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC)
+ if (host->quirks & SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC) {
+ host->max_adma = 65532; /* 32-bit alignment */
mmc->max_seg_size = 65535;
- else
+ } else {
mmc->max_seg_size = 65536;
+ }
} else {
mmc->max_seg_size = mmc->max_req_size;
}
diff --git a/drivers/mmc/host/sdhci.h b/drivers/mmc/host/sdhci.h
index bb883553d3b4..d7929d725730 100644
--- a/drivers/mmc/host/sdhci.h
+++ b/drivers/mmc/host/sdhci.h
@@ -340,7 +340,8 @@ struct sdhci_adma2_64_desc {
/*
* Maximum segments assuming a 512KiB maximum requisition size and a minimum
- * 4KiB page size.
+ * 4KiB page size. Note this also allows enough for multiple descriptors in
+ * case of PAGE_SIZE >= 64KiB.
*/
#define SDHCI_MAX_SEGS 128
@@ -543,6 +544,7 @@ struct sdhci_host {
unsigned int blocks; /* remaining PIO blocks */
int sg_count; /* Mapped sg entries */
+ int max_adma; /* Max. length in ADMA descriptor */
void *adma_table; /* ADMA descriptor table */
void *align_buffer; /* Bounce buffer */
diff --git a/drivers/mtd/devices/mtd_dataflash.c b/drivers/mtd/devices/mtd_dataflash.c
index 9802e265fca8..2b317ed6c103 100644
--- a/drivers/mtd/devices/mtd_dataflash.c
+++ b/drivers/mtd/devices/mtd_dataflash.c
@@ -96,6 +96,13 @@ struct dataflash {
struct mtd_info mtd;
};
+static const struct spi_device_id dataflash_dev_ids[] = {
+ { "at45" },
+ { "dataflash" },
+ { },
+};
+MODULE_DEVICE_TABLE(spi, dataflash_dev_ids);
+
#ifdef CONFIG_OF
static const struct of_device_id dataflash_dt_ids[] = {
{ .compatible = "atmel,at45", },
@@ -927,6 +934,7 @@ static struct spi_driver dataflash_driver = {
.name = "mtd_dataflash",
.of_match_table = of_match_ptr(dataflash_dt_ids),
},
+ .id_table = dataflash_dev_ids,
.probe = dataflash_probe,
.remove = dataflash_remove,
diff --git a/drivers/mtd/nand/raw/Kconfig b/drivers/mtd/nand/raw/Kconfig
index 67b7cb67c030..0a45d3c6c15b 100644
--- a/drivers/mtd/nand/raw/Kconfig
+++ b/drivers/mtd/nand/raw/Kconfig
@@ -26,7 +26,7 @@ config MTD_NAND_DENALI_PCI
config MTD_NAND_DENALI_DT
tristate "Denali NAND controller as a DT device"
select MTD_NAND_DENALI
- depends on HAS_DMA && HAVE_CLK && OF
+ depends on HAS_DMA && HAVE_CLK && OF && HAS_IOMEM
help
Enable the driver for NAND flash on platforms using a Denali NAND
controller as a DT device.
diff --git a/drivers/mtd/nand/raw/fsmc_nand.c b/drivers/mtd/nand/raw/fsmc_nand.c
index 658f0cbe7ce8..6b2bda815b88 100644
--- a/drivers/mtd/nand/raw/fsmc_nand.c
+++ b/drivers/mtd/nand/raw/fsmc_nand.c
@@ -15,6 +15,7 @@
#include <linux/clk.h>
#include <linux/completion.h>
+#include <linux/delay.h>
#include <linux/dmaengine.h>
#include <linux/dma-direction.h>
#include <linux/dma-mapping.h>
@@ -93,6 +94,14 @@
#define FSMC_BUSY_WAIT_TIMEOUT (1 * HZ)
+/*
+ * According to SPEAr300 Reference Manual (RM0082)
+ * TOUDEL = 7ns (Output delay from the flip-flops to the board)
+ * TINDEL = 5ns (Input delay from the board to the flipflop)
+ */
+#define TOUTDEL 7000
+#define TINDEL 5000
+
struct fsmc_nand_timings {
u8 tclr;
u8 tar;
@@ -277,7 +286,7 @@ static int fsmc_calc_timings(struct fsmc_nand_data *host,
{
unsigned long hclk = clk_get_rate(host->clk);
unsigned long hclkn = NSEC_PER_SEC / hclk;
- u32 thiz, thold, twait, tset;
+ u32 thiz, thold, twait, tset, twait_min;
if (sdrt->tRC_min < 30000)
return -EOPNOTSUPP;
@@ -309,13 +318,6 @@ static int fsmc_calc_timings(struct fsmc_nand_data *host,
else if (tims->thold > FSMC_THOLD_MASK)
tims->thold = FSMC_THOLD_MASK;
- twait = max(sdrt->tRP_min, sdrt->tWP_min);
- tims->twait = DIV_ROUND_UP(twait / 1000, hclkn) - 1;
- if (tims->twait == 0)
- tims->twait = 1;
- else if (tims->twait > FSMC_TWAIT_MASK)
- tims->twait = FSMC_TWAIT_MASK;
-
tset = max(sdrt->tCS_min - sdrt->tWP_min,
sdrt->tCEA_max - sdrt->tREA_max);
tims->tset = DIV_ROUND_UP(tset / 1000, hclkn) - 1;
@@ -324,6 +326,21 @@ static int fsmc_calc_timings(struct fsmc_nand_data *host,
else if (tims->tset > FSMC_TSET_MASK)
tims->tset = FSMC_TSET_MASK;
+ /*
+ * According to SPEAr300 Reference Manual (RM0082) which gives more
+ * information related to FSMSC timings than the SPEAr600 one (RM0305),
+ * twait >= tCEA - (tset * TCLK) + TOUTDEL + TINDEL
+ */
+ twait_min = sdrt->tCEA_max - ((tims->tset + 1) * hclkn * 1000)
+ + TOUTDEL + TINDEL;
+ twait = max3(sdrt->tRP_min, sdrt->tWP_min, twait_min);
+
+ tims->twait = DIV_ROUND_UP(twait / 1000, hclkn) - 1;
+ if (tims->twait == 0)
+ tims->twait = 1;
+ else if (tims->twait > FSMC_TWAIT_MASK)
+ tims->twait = FSMC_TWAIT_MASK;
+
return 0;
}
@@ -664,6 +681,9 @@ static int fsmc_exec_op(struct nand_chip *chip, const struct nand_operation *op,
instr->ctx.waitrdy.timeout_ms);
break;
}
+
+ if (instr->delay_ns)
+ ndelay(instr->delay_ns);
}
return ret;
diff --git a/drivers/mtd/nand/raw/nand_base.c b/drivers/mtd/nand/raw/nand_base.c
index 3d6c6e880520..a130320de412 100644
--- a/drivers/mtd/nand/raw/nand_base.c
+++ b/drivers/mtd/nand/raw/nand_base.c
@@ -926,7 +926,7 @@ int nand_choose_best_sdr_timings(struct nand_chip *chip,
struct nand_sdr_timings *spec_timings)
{
const struct nand_controller_ops *ops = chip->controller->ops;
- int best_mode = 0, mode, ret;
+ int best_mode = 0, mode, ret = -EOPNOTSUPP;
iface->type = NAND_SDR_IFACE;
@@ -977,7 +977,7 @@ int nand_choose_best_nvddr_timings(struct nand_chip *chip,
struct nand_nvddr_timings *spec_timings)
{
const struct nand_controller_ops *ops = chip->controller->ops;
- int best_mode = 0, mode, ret;
+ int best_mode = 0, mode, ret = -EOPNOTSUPP;
iface->type = NAND_NVDDR_IFACE;
@@ -1837,7 +1837,7 @@ int nand_erase_op(struct nand_chip *chip, unsigned int eraseblock)
NAND_OP_CMD(NAND_CMD_ERASE1, 0),
NAND_OP_ADDR(2, addrs, 0),
NAND_OP_CMD(NAND_CMD_ERASE2,
- NAND_COMMON_TIMING_MS(conf, tWB_max)),
+ NAND_COMMON_TIMING_NS(conf, tWB_max)),
NAND_OP_WAIT_RDY(NAND_COMMON_TIMING_MS(conf, tBERS_max),
0),
};
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index 10506a4b66ef..6cccc3dc00bc 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -567,9 +567,7 @@ config XEN_NETDEV_BACKEND
config VMXNET3
tristate "VMware VMXNET3 ethernet driver"
depends on PCI && INET
- depends on !(PAGE_SIZE_64KB || ARM64_64K_PAGES || \
- IA64_PAGE_SIZE_64KB || PARISC_PAGE_SIZE_64KB || \
- PPC_64K_PAGES)
+ depends on PAGE_SIZE_LESS_THAN_64KB
help
This driver supports VMware's vmxnet3 virtual ethernet NIC.
To compile this driver as a module, choose M here: the
diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c
index 2ec8e015c7b3..533e476988f2 100644
--- a/drivers/net/bonding/bond_alb.c
+++ b/drivers/net/bonding/bond_alb.c
@@ -1501,14 +1501,14 @@ void bond_alb_monitor(struct work_struct *work)
struct slave *slave;
if (!bond_has_slaves(bond)) {
- bond_info->tx_rebalance_counter = 0;
+ atomic_set(&bond_info->tx_rebalance_counter, 0);
bond_info->lp_counter = 0;
goto re_arm;
}
rcu_read_lock();
- bond_info->tx_rebalance_counter++;
+ atomic_inc(&bond_info->tx_rebalance_counter);
bond_info->lp_counter++;
/* send learning packets */
@@ -1530,7 +1530,7 @@ void bond_alb_monitor(struct work_struct *work)
}
/* rebalance tx traffic */
- if (bond_info->tx_rebalance_counter >= BOND_TLB_REBALANCE_TICKS) {
+ if (atomic_read(&bond_info->tx_rebalance_counter) >= BOND_TLB_REBALANCE_TICKS) {
bond_for_each_slave_rcu(bond, slave, iter) {
tlb_clear_slave(bond, slave, 1);
if (slave == rcu_access_pointer(bond->curr_active_slave)) {
@@ -1540,7 +1540,7 @@ void bond_alb_monitor(struct work_struct *work)
bond_info->unbalanced_load = 0;
}
}
- bond_info->tx_rebalance_counter = 0;
+ atomic_set(&bond_info->tx_rebalance_counter, 0);
}
if (bond_info->rlb_enabled) {
@@ -1610,7 +1610,8 @@ int bond_alb_init_slave(struct bonding *bond, struct slave *slave)
tlb_init_slave(slave);
/* order a rebalance ASAP */
- bond->alb_info.tx_rebalance_counter = BOND_TLB_REBALANCE_TICKS;
+ atomic_set(&bond->alb_info.tx_rebalance_counter,
+ BOND_TLB_REBALANCE_TICKS);
if (bond->alb_info.rlb_enabled)
bond->alb_info.rlb_rebalance = 1;
@@ -1647,7 +1648,8 @@ void bond_alb_handle_link_change(struct bonding *bond, struct slave *slave, char
rlb_clear_slave(bond, slave);
} else if (link == BOND_LINK_UP) {
/* order a rebalance ASAP */
- bond_info->tx_rebalance_counter = BOND_TLB_REBALANCE_TICKS;
+ atomic_set(&bond_info->tx_rebalance_counter,
+ BOND_TLB_REBALANCE_TICKS);
if (bond->alb_info.rlb_enabled) {
bond->alb_info.rlb_rebalance = 1;
/* If the updelay module parameter is smaller than the
diff --git a/drivers/net/bonding/bond_options.c b/drivers/net/bonding/bond_options.c
index a8fde3bc458f..b93337b5a721 100644
--- a/drivers/net/bonding/bond_options.c
+++ b/drivers/net/bonding/bond_options.c
@@ -1526,7 +1526,7 @@ static int bond_option_ad_actor_system_set(struct bonding *bond,
mac = (u8 *)&newval->value;
}
- if (!is_valid_ether_addr(mac))
+ if (is_multicast_ether_addr(mac))
goto err;
netdev_dbg(bond->dev, "Setting ad_actor_system to %pM\n", mac);
diff --git a/drivers/net/can/kvaser_pciefd.c b/drivers/net/can/kvaser_pciefd.c
index 74d9899fc904..eb74cdf26b88 100644
--- a/drivers/net/can/kvaser_pciefd.c
+++ b/drivers/net/can/kvaser_pciefd.c
@@ -248,6 +248,9 @@ MODULE_DESCRIPTION("CAN driver for Kvaser CAN/PCIe devices");
#define KVASER_PCIEFD_SPACK_EWLR BIT(23)
#define KVASER_PCIEFD_SPACK_EPLR BIT(24)
+/* Kvaser KCAN_EPACK second word */
+#define KVASER_PCIEFD_EPACK_DIR_TX BIT(0)
+
struct kvaser_pciefd;
struct kvaser_pciefd_can {
@@ -1285,7 +1288,10 @@ static int kvaser_pciefd_rx_error_frame(struct kvaser_pciefd_can *can,
can->err_rep_cnt++;
can->can.can_stats.bus_error++;
- stats->rx_errors++;
+ if (p->header[1] & KVASER_PCIEFD_EPACK_DIR_TX)
+ stats->tx_errors++;
+ else
+ stats->rx_errors++;
can->bec.txerr = bec.txerr;
can->bec.rxerr = bec.rxerr;
diff --git a/drivers/net/can/m_can/m_can.c b/drivers/net/can/m_can/m_can.c
index 2470c47b2e31..c2a8421e7845 100644
--- a/drivers/net/can/m_can/m_can.c
+++ b/drivers/net/can/m_can/m_can.c
@@ -204,16 +204,16 @@ enum m_can_reg {
/* Interrupts for version 3.0.x */
#define IR_ERR_LEC_30X (IR_STE | IR_FOE | IR_ACKE | IR_BE | IR_CRCE)
-#define IR_ERR_BUS_30X (IR_ERR_LEC_30X | IR_WDI | IR_ELO | IR_BEU | \
- IR_BEC | IR_TOO | IR_MRAF | IR_TSW | IR_TEFL | \
- IR_RF1L | IR_RF0L)
+#define IR_ERR_BUS_30X (IR_ERR_LEC_30X | IR_WDI | IR_BEU | IR_BEC | \
+ IR_TOO | IR_MRAF | IR_TSW | IR_TEFL | IR_RF1L | \
+ IR_RF0L)
#define IR_ERR_ALL_30X (IR_ERR_STATE | IR_ERR_BUS_30X)
/* Interrupts for version >= 3.1.x */
#define IR_ERR_LEC_31X (IR_PED | IR_PEA)
-#define IR_ERR_BUS_31X (IR_ERR_LEC_31X | IR_WDI | IR_ELO | IR_BEU | \
- IR_BEC | IR_TOO | IR_MRAF | IR_TSW | IR_TEFL | \
- IR_RF1L | IR_RF0L)
+#define IR_ERR_BUS_31X (IR_ERR_LEC_31X | IR_WDI | IR_BEU | IR_BEC | \
+ IR_TOO | IR_MRAF | IR_TSW | IR_TEFL | IR_RF1L | \
+ IR_RF0L)
#define IR_ERR_ALL_31X (IR_ERR_STATE | IR_ERR_BUS_31X)
/* Interrupt Line Select (ILS) */
@@ -517,7 +517,7 @@ static int m_can_read_fifo(struct net_device *dev, u32 rxfs)
err = m_can_fifo_read(cdev, fgi, M_CAN_FIFO_DATA,
cf->data, DIV_ROUND_UP(cf->len, 4));
if (err)
- goto out_fail;
+ goto out_free_skb;
}
/* acknowledge rx fifo 0 */
@@ -532,6 +532,8 @@ static int m_can_read_fifo(struct net_device *dev, u32 rxfs)
return 0;
+out_free_skb:
+ kfree_skb(skb);
out_fail:
netdev_err(dev, "FIFO read returned %d\n", err);
return err;
@@ -810,8 +812,6 @@ static void m_can_handle_other_err(struct net_device *dev, u32 irqstatus)
{
if (irqstatus & IR_WDI)
netdev_err(dev, "Message RAM Watchdog event due to missing READY\n");
- if (irqstatus & IR_ELO)
- netdev_err(dev, "Error Logging Overflow\n");
if (irqstatus & IR_BEU)
netdev_err(dev, "Bit Error Uncorrected\n");
if (irqstatus & IR_BEC)
@@ -1494,20 +1494,32 @@ static int m_can_dev_setup(struct m_can_classdev *cdev)
case 30:
/* CAN_CTRLMODE_FD_NON_ISO is fixed with M_CAN IP v3.0.x */
can_set_static_ctrlmode(dev, CAN_CTRLMODE_FD_NON_ISO);
- cdev->can.bittiming_const = &m_can_bittiming_const_30X;
- cdev->can.data_bittiming_const = &m_can_data_bittiming_const_30X;
+ cdev->can.bittiming_const = cdev->bit_timing ?
+ cdev->bit_timing : &m_can_bittiming_const_30X;
+
+ cdev->can.data_bittiming_const = cdev->data_timing ?
+ cdev->data_timing :
+ &m_can_data_bittiming_const_30X;
break;
case 31:
/* CAN_CTRLMODE_FD_NON_ISO is fixed with M_CAN IP v3.1.x */
can_set_static_ctrlmode(dev, CAN_CTRLMODE_FD_NON_ISO);
- cdev->can.bittiming_const = &m_can_bittiming_const_31X;
- cdev->can.data_bittiming_const = &m_can_data_bittiming_const_31X;
+ cdev->can.bittiming_const = cdev->bit_timing ?
+ cdev->bit_timing : &m_can_bittiming_const_31X;
+
+ cdev->can.data_bittiming_const = cdev->data_timing ?
+ cdev->data_timing :
+ &m_can_data_bittiming_const_31X;
break;
case 32:
case 33:
/* Support both MCAN version v3.2.x and v3.3.0 */
- cdev->can.bittiming_const = &m_can_bittiming_const_31X;
- cdev->can.data_bittiming_const = &m_can_data_bittiming_const_31X;
+ cdev->can.bittiming_const = cdev->bit_timing ?
+ cdev->bit_timing : &m_can_bittiming_const_31X;
+
+ cdev->can.data_bittiming_const = cdev->data_timing ?
+ cdev->data_timing :
+ &m_can_data_bittiming_const_31X;
cdev->can.ctrlmode_supported |=
(m_can_niso_supported(cdev) ?
diff --git a/drivers/net/can/m_can/m_can.h b/drivers/net/can/m_can/m_can.h
index d18b515e6ccc..2c5d40997168 100644
--- a/drivers/net/can/m_can/m_can.h
+++ b/drivers/net/can/m_can/m_can.h
@@ -85,6 +85,9 @@ struct m_can_classdev {
struct sk_buff *tx_skb;
struct phy *transceiver;
+ const struct can_bittiming_const *bit_timing;
+ const struct can_bittiming_const *data_timing;
+
struct m_can_ops *ops;
int version;
diff --git a/drivers/net/can/m_can/m_can_pci.c b/drivers/net/can/m_can/m_can_pci.c
index 89cc3d41e952..b56a54d6c5a9 100644
--- a/drivers/net/can/m_can/m_can_pci.c
+++ b/drivers/net/can/m_can/m_can_pci.c
@@ -18,9 +18,14 @@
#define M_CAN_PCI_MMIO_BAR 0
-#define M_CAN_CLOCK_FREQ_EHL 100000000
#define CTL_CSR_INT_CTL_OFFSET 0x508
+struct m_can_pci_config {
+ const struct can_bittiming_const *bit_timing;
+ const struct can_bittiming_const *data_timing;
+ unsigned int clock_freq;
+};
+
struct m_can_pci_priv {
struct m_can_classdev cdev;
@@ -42,8 +47,13 @@ static u32 iomap_read_reg(struct m_can_classdev *cdev, int reg)
static int iomap_read_fifo(struct m_can_classdev *cdev, int offset, void *val, size_t val_count)
{
struct m_can_pci_priv *priv = cdev_to_priv(cdev);
+ void __iomem *src = priv->base + offset;
- ioread32_rep(priv->base + offset, val, val_count);
+ while (val_count--) {
+ *(unsigned int *)val = ioread32(src);
+ val += 4;
+ src += 4;
+ }
return 0;
}
@@ -61,8 +71,13 @@ static int iomap_write_fifo(struct m_can_classdev *cdev, int offset,
const void *val, size_t val_count)
{
struct m_can_pci_priv *priv = cdev_to_priv(cdev);
+ void __iomem *dst = priv->base + offset;
- iowrite32_rep(priv->base + offset, val, val_count);
+ while (val_count--) {
+ iowrite32(*(unsigned int *)val, dst);
+ val += 4;
+ dst += 4;
+ }
return 0;
}
@@ -74,9 +89,40 @@ static struct m_can_ops m_can_pci_ops = {
.read_fifo = iomap_read_fifo,
};
+static const struct can_bittiming_const m_can_bittiming_const_ehl = {
+ .name = KBUILD_MODNAME,
+ .tseg1_min = 2, /* Time segment 1 = prop_seg + phase_seg1 */
+ .tseg1_max = 64,
+ .tseg2_min = 1, /* Time segment 2 = phase_seg2 */
+ .tseg2_max = 128,
+ .sjw_max = 128,
+ .brp_min = 1,
+ .brp_max = 512,
+ .brp_inc = 1,
+};
+
+static const struct can_bittiming_const m_can_data_bittiming_const_ehl = {
+ .name = KBUILD_MODNAME,
+ .tseg1_min = 2, /* Time segment 1 = prop_seg + phase_seg1 */
+ .tseg1_max = 16,
+ .tseg2_min = 1, /* Time segment 2 = phase_seg2 */
+ .tseg2_max = 8,
+ .sjw_max = 4,
+ .brp_min = 1,
+ .brp_max = 32,
+ .brp_inc = 1,
+};
+
+static const struct m_can_pci_config m_can_pci_ehl = {
+ .bit_timing = &m_can_bittiming_const_ehl,
+ .data_timing = &m_can_data_bittiming_const_ehl,
+ .clock_freq = 200000000,
+};
+
static int m_can_pci_probe(struct pci_dev *pci, const struct pci_device_id *id)
{
struct device *dev = &pci->dev;
+ const struct m_can_pci_config *cfg;
struct m_can_classdev *mcan_class;
struct m_can_pci_priv *priv;
void __iomem *base;
@@ -104,6 +150,8 @@ static int m_can_pci_probe(struct pci_dev *pci, const struct pci_device_id *id)
if (!mcan_class)
return -ENOMEM;
+ cfg = (const struct m_can_pci_config *)id->driver_data;
+
priv = cdev_to_priv(mcan_class);
priv->base = base;
@@ -115,7 +163,9 @@ static int m_can_pci_probe(struct pci_dev *pci, const struct pci_device_id *id)
mcan_class->dev = &pci->dev;
mcan_class->net->irq = pci_irq_vector(pci, 0);
mcan_class->pm_clock_support = 1;
- mcan_class->can.clock.freq = id->driver_data;
+ mcan_class->bit_timing = cfg->bit_timing;
+ mcan_class->data_timing = cfg->data_timing;
+ mcan_class->can.clock.freq = cfg->clock_freq;
mcan_class->ops = &m_can_pci_ops;
pci_set_drvdata(pci, mcan_class);
@@ -168,8 +218,8 @@ static SIMPLE_DEV_PM_OPS(m_can_pci_pm_ops,
m_can_pci_suspend, m_can_pci_resume);
static const struct pci_device_id m_can_pci_id_table[] = {
- { PCI_VDEVICE(INTEL, 0x4bc1), M_CAN_CLOCK_FREQ_EHL, },
- { PCI_VDEVICE(INTEL, 0x4bc2), M_CAN_CLOCK_FREQ_EHL, },
+ { PCI_VDEVICE(INTEL, 0x4bc1), (kernel_ulong_t)&m_can_pci_ehl, },
+ { PCI_VDEVICE(INTEL, 0x4bc2), (kernel_ulong_t)&m_can_pci_ehl, },
{ } /* Terminating Entry */
};
MODULE_DEVICE_TABLE(pci, m_can_pci_id_table);
diff --git a/drivers/net/can/pch_can.c b/drivers/net/can/pch_can.c
index 92a54a5fd4c5..964c8a09226a 100644
--- a/drivers/net/can/pch_can.c
+++ b/drivers/net/can/pch_can.c
@@ -692,11 +692,11 @@ static int pch_can_rx_normal(struct net_device *ndev, u32 obj_num, int quota)
cf->data[i + 1] = data_reg >> 8;
}
- netif_receive_skb(skb);
rcv_pkts++;
stats->rx_packets++;
quota--;
stats->rx_bytes += cf->len;
+ netif_receive_skb(skb);
pch_fifo_thresh(priv, obj_num);
obj_num++;
diff --git a/drivers/net/can/sja1000/ems_pcmcia.c b/drivers/net/can/sja1000/ems_pcmcia.c
index e21b169c14c0..4642b6d4aaf7 100644
--- a/drivers/net/can/sja1000/ems_pcmcia.c
+++ b/drivers/net/can/sja1000/ems_pcmcia.c
@@ -234,7 +234,12 @@ static int ems_pcmcia_add_card(struct pcmcia_device *pdev, unsigned long base)
free_sja1000dev(dev);
}
- err = request_irq(dev->irq, &ems_pcmcia_interrupt, IRQF_SHARED,
+ if (!card->channels) {
+ err = -ENODEV;
+ goto failure_cleanup;
+ }
+
+ err = request_irq(pdev->irq, &ems_pcmcia_interrupt, IRQF_SHARED,
DRV_NAME, card);
if (!err)
return 0;
diff --git a/drivers/net/can/usb/kvaser_usb/kvaser_usb_leaf.c b/drivers/net/can/usb/kvaser_usb/kvaser_usb_leaf.c
index 59ba7c7beec0..f7af1bf5ab46 100644
--- a/drivers/net/can/usb/kvaser_usb/kvaser_usb_leaf.c
+++ b/drivers/net/can/usb/kvaser_usb/kvaser_usb_leaf.c
@@ -28,10 +28,6 @@
#include "kvaser_usb.h"
-/* Forward declaration */
-static const struct kvaser_usb_dev_cfg kvaser_usb_leaf_dev_cfg;
-
-#define CAN_USB_CLOCK 8000000
#define MAX_USBCAN_NET_DEVICES 2
/* Command header size */
@@ -80,6 +76,12 @@ static const struct kvaser_usb_dev_cfg kvaser_usb_leaf_dev_cfg;
#define CMD_LEAF_LOG_MESSAGE 106
+/* Leaf frequency options */
+#define KVASER_USB_LEAF_SWOPTION_FREQ_MASK 0x60
+#define KVASER_USB_LEAF_SWOPTION_FREQ_16_MHZ_CLK 0
+#define KVASER_USB_LEAF_SWOPTION_FREQ_32_MHZ_CLK BIT(5)
+#define KVASER_USB_LEAF_SWOPTION_FREQ_24_MHZ_CLK BIT(6)
+
/* error factors */
#define M16C_EF_ACKE BIT(0)
#define M16C_EF_CRCE BIT(1)
@@ -340,6 +342,50 @@ struct kvaser_usb_err_summary {
};
};
+static const struct can_bittiming_const kvaser_usb_leaf_bittiming_const = {
+ .name = "kvaser_usb",
+ .tseg1_min = KVASER_USB_TSEG1_MIN,
+ .tseg1_max = KVASER_USB_TSEG1_MAX,
+ .tseg2_min = KVASER_USB_TSEG2_MIN,
+ .tseg2_max = KVASER_USB_TSEG2_MAX,
+ .sjw_max = KVASER_USB_SJW_MAX,
+ .brp_min = KVASER_USB_BRP_MIN,
+ .brp_max = KVASER_USB_BRP_MAX,
+ .brp_inc = KVASER_USB_BRP_INC,
+};
+
+static const struct kvaser_usb_dev_cfg kvaser_usb_leaf_dev_cfg_8mhz = {
+ .clock = {
+ .freq = 8000000,
+ },
+ .timestamp_freq = 1,
+ .bittiming_const = &kvaser_usb_leaf_bittiming_const,
+};
+
+static const struct kvaser_usb_dev_cfg kvaser_usb_leaf_dev_cfg_16mhz = {
+ .clock = {
+ .freq = 16000000,
+ },
+ .timestamp_freq = 1,
+ .bittiming_const = &kvaser_usb_leaf_bittiming_const,
+};
+
+static const struct kvaser_usb_dev_cfg kvaser_usb_leaf_dev_cfg_24mhz = {
+ .clock = {
+ .freq = 24000000,
+ },
+ .timestamp_freq = 1,
+ .bittiming_const = &kvaser_usb_leaf_bittiming_const,
+};
+
+static const struct kvaser_usb_dev_cfg kvaser_usb_leaf_dev_cfg_32mhz = {
+ .clock = {
+ .freq = 32000000,
+ },
+ .timestamp_freq = 1,
+ .bittiming_const = &kvaser_usb_leaf_bittiming_const,
+};
+
static void *
kvaser_usb_leaf_frame_to_cmd(const struct kvaser_usb_net_priv *priv,
const struct sk_buff *skb, int *frame_len,
@@ -471,6 +517,27 @@ static int kvaser_usb_leaf_send_simple_cmd(const struct kvaser_usb *dev,
return rc;
}
+static void kvaser_usb_leaf_get_software_info_leaf(struct kvaser_usb *dev,
+ const struct leaf_cmd_softinfo *softinfo)
+{
+ u32 sw_options = le32_to_cpu(softinfo->sw_options);
+
+ dev->fw_version = le32_to_cpu(softinfo->fw_version);
+ dev->max_tx_urbs = le16_to_cpu(softinfo->max_outstanding_tx);
+
+ switch (sw_options & KVASER_USB_LEAF_SWOPTION_FREQ_MASK) {
+ case KVASER_USB_LEAF_SWOPTION_FREQ_16_MHZ_CLK:
+ dev->cfg = &kvaser_usb_leaf_dev_cfg_16mhz;
+ break;
+ case KVASER_USB_LEAF_SWOPTION_FREQ_24_MHZ_CLK:
+ dev->cfg = &kvaser_usb_leaf_dev_cfg_24mhz;
+ break;
+ case KVASER_USB_LEAF_SWOPTION_FREQ_32_MHZ_CLK:
+ dev->cfg = &kvaser_usb_leaf_dev_cfg_32mhz;
+ break;
+ }
+}
+
static int kvaser_usb_leaf_get_software_info_inner(struct kvaser_usb *dev)
{
struct kvaser_cmd cmd;
@@ -486,14 +553,13 @@ static int kvaser_usb_leaf_get_software_info_inner(struct kvaser_usb *dev)
switch (dev->card_data.leaf.family) {
case KVASER_LEAF:
- dev->fw_version = le32_to_cpu(cmd.u.leaf.softinfo.fw_version);
- dev->max_tx_urbs =
- le16_to_cpu(cmd.u.leaf.softinfo.max_outstanding_tx);
+ kvaser_usb_leaf_get_software_info_leaf(dev, &cmd.u.leaf.softinfo);
break;
case KVASER_USBCAN:
dev->fw_version = le32_to_cpu(cmd.u.usbcan.softinfo.fw_version);
dev->max_tx_urbs =
le16_to_cpu(cmd.u.usbcan.softinfo.max_outstanding_tx);
+ dev->cfg = &kvaser_usb_leaf_dev_cfg_8mhz;
break;
}
@@ -1225,24 +1291,11 @@ static int kvaser_usb_leaf_init_card(struct kvaser_usb *dev)
{
struct kvaser_usb_dev_card_data *card_data = &dev->card_data;
- dev->cfg = &kvaser_usb_leaf_dev_cfg;
card_data->ctrlmode_supported |= CAN_CTRLMODE_3_SAMPLES;
return 0;
}
-static const struct can_bittiming_const kvaser_usb_leaf_bittiming_const = {
- .name = "kvaser_usb",
- .tseg1_min = KVASER_USB_TSEG1_MIN,
- .tseg1_max = KVASER_USB_TSEG1_MAX,
- .tseg2_min = KVASER_USB_TSEG2_MIN,
- .tseg2_max = KVASER_USB_TSEG2_MAX,
- .sjw_max = KVASER_USB_SJW_MAX,
- .brp_min = KVASER_USB_BRP_MIN,
- .brp_max = KVASER_USB_BRP_MAX,
- .brp_inc = KVASER_USB_BRP_INC,
-};
-
static int kvaser_usb_leaf_set_bittiming(struct net_device *netdev)
{
struct kvaser_usb_net_priv *priv = netdev_priv(netdev);
@@ -1348,11 +1401,3 @@ const struct kvaser_usb_dev_ops kvaser_usb_leaf_dev_ops = {
.dev_read_bulk_callback = kvaser_usb_leaf_read_bulk_callback,
.dev_frame_to_cmd = kvaser_usb_leaf_frame_to_cmd,
};
-
-static const struct kvaser_usb_dev_cfg kvaser_usb_leaf_dev_cfg = {
- .clock = {
- .freq = CAN_USB_CLOCK,
- },
- .timestamp_freq = 1,
- .bittiming_const = &kvaser_usb_leaf_bittiming_const,
-};
diff --git a/drivers/net/dsa/b53/b53_spi.c b/drivers/net/dsa/b53/b53_spi.c
index 01e37b75471e..2b88f03e5252 100644
--- a/drivers/net/dsa/b53/b53_spi.c
+++ b/drivers/net/dsa/b53/b53_spi.c
@@ -349,6 +349,19 @@ static const struct of_device_id b53_spi_of_match[] = {
};
MODULE_DEVICE_TABLE(of, b53_spi_of_match);
+static const struct spi_device_id b53_spi_ids[] = {
+ { .name = "bcm5325" },
+ { .name = "bcm5365" },
+ { .name = "bcm5395" },
+ { .name = "bcm5397" },
+ { .name = "bcm5398" },
+ { .name = "bcm53115" },
+ { .name = "bcm53125" },
+ { .name = "bcm53128" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(spi, b53_spi_ids);
+
static struct spi_driver b53_spi_driver = {
.driver = {
.name = "b53-switch",
@@ -357,6 +370,7 @@ static struct spi_driver b53_spi_driver = {
.probe = b53_spi_probe,
.remove = b53_spi_remove,
.shutdown = b53_spi_shutdown,
+ .id_table = b53_spi_ids,
};
module_spi_driver(b53_spi_driver);
diff --git a/drivers/net/dsa/microchip/ksz8795.c b/drivers/net/dsa/microchip/ksz8795.c
index 43fc3087aeb3..013e9c02be71 100644
--- a/drivers/net/dsa/microchip/ksz8795.c
+++ b/drivers/net/dsa/microchip/ksz8795.c
@@ -1002,57 +1002,32 @@ static void ksz8_cfg_port_member(struct ksz_device *dev, int port, u8 member)
data &= ~PORT_VLAN_MEMBERSHIP;
data |= (member & dev->port_mask);
ksz_pwrite8(dev, port, P_MIRROR_CTRL, data);
- dev->ports[port].member = member;
}
static void ksz8_port_stp_state_set(struct dsa_switch *ds, int port, u8 state)
{
struct ksz_device *dev = ds->priv;
- int forward = dev->member;
struct ksz_port *p;
- int member = -1;
u8 data;
- p = &dev->ports[port];
-
ksz_pread8(dev, port, P_STP_CTRL, &data);
data &= ~(PORT_TX_ENABLE | PORT_RX_ENABLE | PORT_LEARN_DISABLE);
switch (state) {
case BR_STATE_DISABLED:
data |= PORT_LEARN_DISABLE;
- if (port < dev->phy_port_cnt)
- member = 0;
break;
case BR_STATE_LISTENING:
data |= (PORT_RX_ENABLE | PORT_LEARN_DISABLE);
- if (port < dev->phy_port_cnt &&
- p->stp_state == BR_STATE_DISABLED)
- member = dev->host_mask | p->vid_member;
break;
case BR_STATE_LEARNING:
data |= PORT_RX_ENABLE;
break;
case BR_STATE_FORWARDING:
data |= (PORT_TX_ENABLE | PORT_RX_ENABLE);
-
- /* This function is also used internally. */
- if (port == dev->cpu_port)
- break;
-
- /* Port is a member of a bridge. */
- if (dev->br_member & BIT(port)) {
- dev->member |= BIT(port);
- member = dev->member;
- } else {
- member = dev->host_mask | p->vid_member;
- }
break;
case BR_STATE_BLOCKING:
data |= PORT_LEARN_DISABLE;
- if (port < dev->phy_port_cnt &&
- p->stp_state == BR_STATE_DISABLED)
- member = dev->host_mask | p->vid_member;
break;
default:
dev_err(ds->dev, "invalid STP state: %d\n", state);
@@ -1060,22 +1035,11 @@ static void ksz8_port_stp_state_set(struct dsa_switch *ds, int port, u8 state)
}
ksz_pwrite8(dev, port, P_STP_CTRL, data);
+
+ p = &dev->ports[port];
p->stp_state = state;
- /* Port membership may share register with STP state. */
- if (member >= 0 && member != p->member)
- ksz8_cfg_port_member(dev, port, (u8)member);
-
- /* Check if forwarding needs to be updated. */
- if (state != BR_STATE_FORWARDING) {
- if (dev->br_member & BIT(port))
- dev->member &= ~BIT(port);
- }
- /* When topology has changed the function ksz_update_port_member
- * should be called to modify port forwarding behavior.
- */
- if (forward != dev->member)
- ksz_update_port_member(dev, port);
+ ksz_update_port_member(dev, port);
}
static void ksz8_flush_dyn_mac_table(struct ksz_device *dev, int port)
@@ -1341,7 +1305,7 @@ static void ksz8795_cpu_interface_select(struct ksz_device *dev, int port)
static void ksz8_port_setup(struct ksz_device *dev, int port, bool cpu_port)
{
- struct ksz_port *p = &dev->ports[port];
+ struct dsa_switch *ds = dev->ds;
struct ksz8 *ksz8 = dev->priv;
const u32 *masks;
u8 member;
@@ -1368,10 +1332,11 @@ static void ksz8_port_setup(struct ksz_device *dev, int port, bool cpu_port)
if (!ksz_is_ksz88x3(dev))
ksz8795_cpu_interface_select(dev, port);
- member = dev->port_mask;
+ member = dsa_user_ports(ds);
} else {
- member = dev->host_mask | p->vid_member;
+ member = BIT(dsa_upstream_port(ds, port));
}
+
ksz8_cfg_port_member(dev, port, member);
}
@@ -1392,20 +1357,13 @@ static void ksz8_config_cpu_port(struct dsa_switch *ds)
ksz_cfg(dev, regs[S_TAIL_TAG_CTRL], masks[SW_TAIL_TAG_ENABLE], true);
p = &dev->ports[dev->cpu_port];
- p->vid_member = dev->port_mask;
p->on = 1;
ksz8_port_setup(dev, dev->cpu_port, true);
- dev->member = dev->host_mask;
for (i = 0; i < dev->phy_port_cnt; i++) {
p = &dev->ports[i];
- /* Initialize to non-zero so that ksz_cfg_port_member() will
- * be called.
- */
- p->vid_member = BIT(i);
- p->member = dev->port_mask;
ksz8_port_stp_state_set(ds, i, BR_STATE_DISABLED);
/* Last port may be disabled. */
diff --git a/drivers/net/dsa/microchip/ksz9477.c b/drivers/net/dsa/microchip/ksz9477.c
index 854e25f43fa7..353b5f981740 100644
--- a/drivers/net/dsa/microchip/ksz9477.c
+++ b/drivers/net/dsa/microchip/ksz9477.c
@@ -391,7 +391,6 @@ static void ksz9477_cfg_port_member(struct ksz_device *dev, int port,
u8 member)
{
ksz_pwrite32(dev, port, REG_PORT_VLAN_MEMBERSHIP__4, member);
- dev->ports[port].member = member;
}
static void ksz9477_port_stp_state_set(struct dsa_switch *ds, int port,
@@ -400,8 +399,6 @@ static void ksz9477_port_stp_state_set(struct dsa_switch *ds, int port,
struct ksz_device *dev = ds->priv;
struct ksz_port *p = &dev->ports[port];
u8 data;
- int member = -1;
- int forward = dev->member;
ksz_pread8(dev, port, P_STP_CTRL, &data);
data &= ~(PORT_TX_ENABLE | PORT_RX_ENABLE | PORT_LEARN_DISABLE);
@@ -409,40 +406,18 @@ static void ksz9477_port_stp_state_set(struct dsa_switch *ds, int port,
switch (state) {
case BR_STATE_DISABLED:
data |= PORT_LEARN_DISABLE;
- if (port != dev->cpu_port)
- member = 0;
break;
case BR_STATE_LISTENING:
data |= (PORT_RX_ENABLE | PORT_LEARN_DISABLE);
- if (port != dev->cpu_port &&
- p->stp_state == BR_STATE_DISABLED)
- member = dev->host_mask | p->vid_member;
break;
case BR_STATE_LEARNING:
data |= PORT_RX_ENABLE;
break;
case BR_STATE_FORWARDING:
data |= (PORT_TX_ENABLE | PORT_RX_ENABLE);
-
- /* This function is also used internally. */
- if (port == dev->cpu_port)
- break;
-
- member = dev->host_mask | p->vid_member;
- mutex_lock(&dev->dev_mutex);
-
- /* Port is a member of a bridge. */
- if (dev->br_member & (1 << port)) {
- dev->member |= (1 << port);
- member = dev->member;
- }
- mutex_unlock(&dev->dev_mutex);
break;
case BR_STATE_BLOCKING:
data |= PORT_LEARN_DISABLE;
- if (port != dev->cpu_port &&
- p->stp_state == BR_STATE_DISABLED)
- member = dev->host_mask | p->vid_member;
break;
default:
dev_err(ds->dev, "invalid STP state: %d\n", state);
@@ -451,23 +426,8 @@ static void ksz9477_port_stp_state_set(struct dsa_switch *ds, int port,
ksz_pwrite8(dev, port, P_STP_CTRL, data);
p->stp_state = state;
- mutex_lock(&dev->dev_mutex);
- /* Port membership may share register with STP state. */
- if (member >= 0 && member != p->member)
- ksz9477_cfg_port_member(dev, port, (u8)member);
-
- /* Check if forwarding needs to be updated. */
- if (state != BR_STATE_FORWARDING) {
- if (dev->br_member & (1 << port))
- dev->member &= ~(1 << port);
- }
- /* When topology has changed the function ksz_update_port_member
- * should be called to modify port forwarding behavior.
- */
- if (forward != dev->member)
- ksz_update_port_member(dev, port);
- mutex_unlock(&dev->dev_mutex);
+ ksz_update_port_member(dev, port);
}
static void ksz9477_flush_dyn_mac_table(struct ksz_device *dev, int port)
@@ -1168,10 +1128,10 @@ static void ksz9477_phy_errata_setup(struct ksz_device *dev, int port)
static void ksz9477_port_setup(struct ksz_device *dev, int port, bool cpu_port)
{
- u8 data8;
- u8 member;
- u16 data16;
struct ksz_port *p = &dev->ports[port];
+ struct dsa_switch *ds = dev->ds;
+ u8 data8, member;
+ u16 data16;
/* enable tag tail for host port */
if (cpu_port)
@@ -1250,12 +1210,12 @@ static void ksz9477_port_setup(struct ksz_device *dev, int port, bool cpu_port)
ksz_pwrite8(dev, port, REG_PORT_XMII_CTRL_1, data8);
p->phydev.duplex = 1;
}
- mutex_lock(&dev->dev_mutex);
+
if (cpu_port)
- member = dev->port_mask;
+ member = dsa_user_ports(ds);
else
- member = dev->host_mask | p->vid_member;
- mutex_unlock(&dev->dev_mutex);
+ member = BIT(dsa_upstream_port(ds, port));
+
ksz9477_cfg_port_member(dev, port, member);
/* clear pending interrupts */
@@ -1276,8 +1236,6 @@ static void ksz9477_config_cpu_port(struct dsa_switch *ds)
const char *prev_mode;
dev->cpu_port = i;
- dev->host_mask = (1 << dev->cpu_port);
- dev->port_mask |= dev->host_mask;
p = &dev->ports[i];
/* Read from XMII register to determine host port
@@ -1312,23 +1270,15 @@ static void ksz9477_config_cpu_port(struct dsa_switch *ds)
/* enable cpu port */
ksz9477_port_setup(dev, i, true);
- p->vid_member = dev->port_mask;
p->on = 1;
}
}
- dev->member = dev->host_mask;
-
for (i = 0; i < dev->port_cnt; i++) {
if (i == dev->cpu_port)
continue;
p = &dev->ports[i];
- /* Initialize to non-zero so that ksz_cfg_port_member() will
- * be called.
- */
- p->vid_member = (1 << i);
- p->member = dev->port_mask;
ksz9477_port_stp_state_set(ds, i, BR_STATE_DISABLED);
p->on = 1;
if (i < dev->phy_port_cnt)
diff --git a/drivers/net/dsa/microchip/ksz_common.c b/drivers/net/dsa/microchip/ksz_common.c
index 7c2968a639eb..8a04302018dc 100644
--- a/drivers/net/dsa/microchip/ksz_common.c
+++ b/drivers/net/dsa/microchip/ksz_common.c
@@ -22,21 +22,40 @@
void ksz_update_port_member(struct ksz_device *dev, int port)
{
- struct ksz_port *p;
+ struct ksz_port *p = &dev->ports[port];
+ struct dsa_switch *ds = dev->ds;
+ u8 port_member = 0, cpu_port;
+ const struct dsa_port *dp;
int i;
- for (i = 0; i < dev->port_cnt; i++) {
- if (i == port || i == dev->cpu_port)
+ if (!dsa_is_user_port(ds, port))
+ return;
+
+ dp = dsa_to_port(ds, port);
+ cpu_port = BIT(dsa_upstream_port(ds, port));
+
+ for (i = 0; i < ds->num_ports; i++) {
+ const struct dsa_port *other_dp = dsa_to_port(ds, i);
+ struct ksz_port *other_p = &dev->ports[i];
+ u8 val = 0;
+
+ if (!dsa_is_user_port(ds, i))
continue;
- p = &dev->ports[i];
- if (!(dev->member & (1 << i)))
+ if (port == i)
+ continue;
+ if (!dp->bridge_dev || dp->bridge_dev != other_dp->bridge_dev)
continue;
- /* Port is a member of the bridge and is forwarding. */
- if (p->stp_state == BR_STATE_FORWARDING &&
- p->member != dev->member)
- dev->dev_ops->cfg_port_member(dev, i, dev->member);
+ if (other_p->stp_state == BR_STATE_FORWARDING &&
+ p->stp_state == BR_STATE_FORWARDING) {
+ val |= BIT(port);
+ port_member |= BIT(i);
+ }
+
+ dev->dev_ops->cfg_port_member(dev, i, val | cpu_port);
}
+
+ dev->dev_ops->cfg_port_member(dev, port, port_member | cpu_port);
}
EXPORT_SYMBOL_GPL(ksz_update_port_member);
@@ -175,12 +194,6 @@ EXPORT_SYMBOL_GPL(ksz_get_ethtool_stats);
int ksz_port_bridge_join(struct dsa_switch *ds, int port,
struct net_device *br)
{
- struct ksz_device *dev = ds->priv;
-
- mutex_lock(&dev->dev_mutex);
- dev->br_member |= (1 << port);
- mutex_unlock(&dev->dev_mutex);
-
/* port_stp_state_set() will be called after to put the port in
* appropriate state so there is no need to do anything.
*/
@@ -192,13 +205,6 @@ EXPORT_SYMBOL_GPL(ksz_port_bridge_join);
void ksz_port_bridge_leave(struct dsa_switch *ds, int port,
struct net_device *br)
{
- struct ksz_device *dev = ds->priv;
-
- mutex_lock(&dev->dev_mutex);
- dev->br_member &= ~(1 << port);
- dev->member &= ~(1 << port);
- mutex_unlock(&dev->dev_mutex);
-
/* port_stp_state_set() will be called after to put the port in
* forwarding state so there is no need to do anything.
*/
diff --git a/drivers/net/dsa/microchip/ksz_common.h b/drivers/net/dsa/microchip/ksz_common.h
index 1597c63988b4..54b456bc8972 100644
--- a/drivers/net/dsa/microchip/ksz_common.h
+++ b/drivers/net/dsa/microchip/ksz_common.h
@@ -25,8 +25,6 @@ struct ksz_port_mib {
};
struct ksz_port {
- u16 member;
- u16 vid_member;
bool remove_tag; /* Remove Tag flag set, for ksz8795 only */
int stp_state;
struct phy_device phydev;
@@ -83,8 +81,6 @@ struct ksz_device {
struct ksz_port *ports;
struct delayed_work mib_read;
unsigned long mib_read_interval;
- u16 br_member;
- u16 member;
u16 mirror_rx;
u16 mirror_tx;
u32 features; /* chip specific features */
diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c
index f00cbf5753b9..cd8462d1e27c 100644
--- a/drivers/net/dsa/mv88e6xxx/chip.c
+++ b/drivers/net/dsa/mv88e6xxx/chip.c
@@ -471,6 +471,12 @@ static int mv88e6xxx_port_ppu_updates(struct mv88e6xxx_chip *chip, int port)
u16 reg;
int err;
+ /* The 88e6250 family does not have the PHY detect bit. Instead,
+ * report whether the port is internal.
+ */
+ if (chip->info->family == MV88E6XXX_FAMILY_6250)
+ return port < chip->info->num_internal_phys;
+
err = mv88e6xxx_port_read(chip, port, MV88E6XXX_PORT_STS, &reg);
if (err) {
dev_err(chip->dev,
@@ -692,44 +698,48 @@ static void mv88e6xxx_mac_config(struct dsa_switch *ds, int port,
{
struct mv88e6xxx_chip *chip = ds->priv;
struct mv88e6xxx_port *p;
- int err;
+ int err = 0;
p = &chip->ports[port];
- /* FIXME: is this the correct test? If we're in fixed mode on an
- * internal port, why should we process this any different from
- * PHY mode? On the other hand, the port may be automedia between
- * an internal PHY and the serdes...
- */
- if ((mode == MLO_AN_PHY) && mv88e6xxx_phy_is_internal(ds, port))
- return;
-
mv88e6xxx_reg_lock(chip);
- /* In inband mode, the link may come up at any time while the link
- * is not forced down. Force the link down while we reconfigure the
- * interface mode.
- */
- if (mode == MLO_AN_INBAND && p->interface != state->interface &&
- chip->info->ops->port_set_link)
- chip->info->ops->port_set_link(chip, port, LINK_FORCED_DOWN);
- err = mv88e6xxx_port_config_interface(chip, port, state->interface);
- if (err && err != -EOPNOTSUPP)
- goto err_unlock;
-
- err = mv88e6xxx_serdes_pcs_config(chip, port, mode, state->interface,
- state->advertising);
- /* FIXME: we should restart negotiation if something changed - which
- * is something we get if we convert to using phylinks PCS operations.
- */
- if (err > 0)
- err = 0;
+ if (mode != MLO_AN_PHY || !mv88e6xxx_phy_is_internal(ds, port)) {
+ /* In inband mode, the link may come up at any time while the
+ * link is not forced down. Force the link down while we
+ * reconfigure the interface mode.
+ */
+ if (mode == MLO_AN_INBAND &&
+ p->interface != state->interface &&
+ chip->info->ops->port_set_link)
+ chip->info->ops->port_set_link(chip, port,
+ LINK_FORCED_DOWN);
+
+ err = mv88e6xxx_port_config_interface(chip, port,
+ state->interface);
+ if (err && err != -EOPNOTSUPP)
+ goto err_unlock;
+
+ err = mv88e6xxx_serdes_pcs_config(chip, port, mode,
+ state->interface,
+ state->advertising);
+ /* FIXME: we should restart negotiation if something changed -
+ * which is something we get if we convert to using phylinks
+ * PCS operations.
+ */
+ if (err > 0)
+ err = 0;
+ }
/* Undo the forced down state above after completing configuration
- * irrespective of its state on entry, which allows the link to come up.
+ * irrespective of its state on entry, which allows the link to come
+ * up in the in-band case where there is no separate SERDES. Also
+ * ensure that the link can come up if the PPU is in use and we are
+ * in PHY mode (we treat the PPU as an effective in-band mechanism.)
*/
- if (mode == MLO_AN_INBAND && p->interface != state->interface &&
- chip->info->ops->port_set_link)
+ if (chip->info->ops->port_set_link &&
+ ((mode == MLO_AN_INBAND && p->interface != state->interface) ||
+ (mode == MLO_AN_PHY && mv88e6xxx_port_ppu_updates(chip, port))))
chip->info->ops->port_set_link(chip, port, LINK_UNFORCED);
p->interface = state->interface;
@@ -752,13 +762,16 @@ static void mv88e6xxx_mac_link_down(struct dsa_switch *ds, int port,
ops = chip->info->ops;
mv88e6xxx_reg_lock(chip);
- /* Internal PHYs propagate their configuration directly to the MAC.
- * External PHYs depend on whether the PPU is enabled for this port.
+ /* Force the link down if we know the port may not be automatically
+ * updated by the switch or if we are using fixed-link mode.
*/
- if (((!mv88e6xxx_phy_is_internal(ds, port) &&
- !mv88e6xxx_port_ppu_updates(chip, port)) ||
+ if ((!mv88e6xxx_port_ppu_updates(chip, port) ||
mode == MLO_AN_FIXED) && ops->port_sync_link)
err = ops->port_sync_link(chip, port, mode, false);
+
+ if (!err && ops->port_set_speed_duplex)
+ err = ops->port_set_speed_duplex(chip, port, SPEED_UNFORCED,
+ DUPLEX_UNFORCED);
mv88e6xxx_reg_unlock(chip);
if (err)
@@ -779,11 +792,11 @@ static void mv88e6xxx_mac_link_up(struct dsa_switch *ds, int port,
ops = chip->info->ops;
mv88e6xxx_reg_lock(chip);
- /* Internal PHYs propagate their configuration directly to the MAC.
- * External PHYs depend on whether the PPU is enabled for this port.
+ /* Configure and force the link up if we know that the port may not
+ * automatically updated by the switch or if we are using fixed-link
+ * mode.
*/
- if ((!mv88e6xxx_phy_is_internal(ds, port) &&
- !mv88e6xxx_port_ppu_updates(chip, port)) ||
+ if (!mv88e6xxx_port_ppu_updates(chip, port) ||
mode == MLO_AN_FIXED) {
/* FIXME: for an automedia port, should we force the link
* down here - what if the link comes up due to "other" media
diff --git a/drivers/net/dsa/mv88e6xxx/port.c b/drivers/net/dsa/mv88e6xxx/port.c
index d9817b20ea64..ab41619a809b 100644
--- a/drivers/net/dsa/mv88e6xxx/port.c
+++ b/drivers/net/dsa/mv88e6xxx/port.c
@@ -283,7 +283,7 @@ static int mv88e6xxx_port_set_speed_duplex(struct mv88e6xxx_chip *chip,
if (err)
return err;
- if (speed)
+ if (speed != SPEED_UNFORCED)
dev_dbg(chip->dev, "p%d: Speed set to %d Mbps\n", port, speed);
else
dev_dbg(chip->dev, "p%d: Speed unforced\n", port);
@@ -516,7 +516,7 @@ int mv88e6393x_port_set_speed_duplex(struct mv88e6xxx_chip *chip, int port,
if (err)
return err;
- if (speed)
+ if (speed != SPEED_UNFORCED)
dev_dbg(chip->dev, "p%d: Speed set to %d Mbps\n", port, speed);
else
dev_dbg(chip->dev, "p%d: Speed unforced\n", port);
diff --git a/drivers/net/dsa/mv88e6xxx/serdes.c b/drivers/net/dsa/mv88e6xxx/serdes.c
index 6ea003678798..2b05ead515cd 100644
--- a/drivers/net/dsa/mv88e6xxx/serdes.c
+++ b/drivers/net/dsa/mv88e6xxx/serdes.c
@@ -50,11 +50,22 @@ static int mv88e6390_serdes_write(struct mv88e6xxx_chip *chip,
}
static int mv88e6xxx_serdes_pcs_get_state(struct mv88e6xxx_chip *chip,
- u16 status, u16 lpa,
+ u16 ctrl, u16 status, u16 lpa,
struct phylink_link_state *state)
{
+ state->link = !!(status & MV88E6390_SGMII_PHY_STATUS_LINK);
+
if (status & MV88E6390_SGMII_PHY_STATUS_SPD_DPL_VALID) {
- state->link = !!(status & MV88E6390_SGMII_PHY_STATUS_LINK);
+ /* The Spped and Duplex Resolved register is 1 if AN is enabled
+ * and complete, or if AN is disabled. So with disabled AN we
+ * still get here on link up. But we want to set an_complete
+ * only if AN was enabled, thus we look at BMCR_ANENABLE.
+ * (According to 802.3-2008 section 22.2.4.2.10, we should be
+ * able to get this same value from BMSR_ANEGCAPABLE, but tests
+ * show that these Marvell PHYs don't conform to this part of
+ * the specificaion - BMSR_ANEGCAPABLE is simply always 1.)
+ */
+ state->an_complete = !!(ctrl & BMCR_ANENABLE);
state->duplex = status &
MV88E6390_SGMII_PHY_STATUS_DUPLEX_FULL ?
DUPLEX_FULL : DUPLEX_HALF;
@@ -81,6 +92,18 @@ static int mv88e6xxx_serdes_pcs_get_state(struct mv88e6xxx_chip *chip,
dev_err(chip->dev, "invalid PHY speed\n");
return -EINVAL;
}
+ } else if (state->link &&
+ state->interface != PHY_INTERFACE_MODE_SGMII) {
+ /* If Speed and Duplex Resolved register is 0 and link is up, it
+ * means that AN was enabled, but link partner had it disabled
+ * and the PHY invoked the Auto-Negotiation Bypass feature and
+ * linked anyway.
+ */
+ state->duplex = DUPLEX_FULL;
+ if (state->interface == PHY_INTERFACE_MODE_2500BASEX)
+ state->speed = SPEED_2500;
+ else
+ state->speed = SPEED_1000;
} else {
state->link = false;
}
@@ -168,9 +191,15 @@ int mv88e6352_serdes_pcs_config(struct mv88e6xxx_chip *chip, int port,
int mv88e6352_serdes_pcs_get_state(struct mv88e6xxx_chip *chip, int port,
int lane, struct phylink_link_state *state)
{
- u16 lpa, status;
+ u16 lpa, status, ctrl;
int err;
+ err = mv88e6352_serdes_read(chip, MII_BMCR, &ctrl);
+ if (err) {
+ dev_err(chip->dev, "can't read Serdes PHY control: %d\n", err);
+ return err;
+ }
+
err = mv88e6352_serdes_read(chip, 0x11, &status);
if (err) {
dev_err(chip->dev, "can't read Serdes PHY status: %d\n", err);
@@ -183,7 +212,7 @@ int mv88e6352_serdes_pcs_get_state(struct mv88e6xxx_chip *chip, int port,
return err;
}
- return mv88e6xxx_serdes_pcs_get_state(chip, status, lpa, state);
+ return mv88e6xxx_serdes_pcs_get_state(chip, ctrl, status, lpa, state);
}
int mv88e6352_serdes_pcs_an_restart(struct mv88e6xxx_chip *chip, int port,
@@ -801,7 +830,7 @@ int mv88e6390_serdes_power(struct mv88e6xxx_chip *chip, int port, int lane,
bool up)
{
u8 cmode = chip->ports[port].cmode;
- int err = 0;
+ int err;
switch (cmode) {
case MV88E6XXX_PORT_STS_CMODE_SGMII:
@@ -813,6 +842,9 @@ int mv88e6390_serdes_power(struct mv88e6xxx_chip *chip, int port, int lane,
case MV88E6XXX_PORT_STS_CMODE_RXAUI:
err = mv88e6390_serdes_power_10g(chip, lane, up);
break;
+ default:
+ err = -EINVAL;
+ break;
}
if (!err && up)
@@ -883,10 +915,17 @@ int mv88e6390_serdes_pcs_config(struct mv88e6xxx_chip *chip, int port,
static int mv88e6390_serdes_pcs_get_state_sgmii(struct mv88e6xxx_chip *chip,
int port, int lane, struct phylink_link_state *state)
{
- u16 lpa, status;
+ u16 lpa, status, ctrl;
int err;
err = mv88e6390_serdes_read(chip, lane, MDIO_MMD_PHYXS,
+ MV88E6390_SGMII_BMCR, &ctrl);
+ if (err) {
+ dev_err(chip->dev, "can't read Serdes PHY control: %d\n", err);
+ return err;
+ }
+
+ err = mv88e6390_serdes_read(chip, lane, MDIO_MMD_PHYXS,
MV88E6390_SGMII_PHY_STATUS, &status);
if (err) {
dev_err(chip->dev, "can't read Serdes PHY status: %d\n", err);
@@ -900,7 +939,7 @@ static int mv88e6390_serdes_pcs_get_state_sgmii(struct mv88e6xxx_chip *chip,
return err;
}
- return mv88e6xxx_serdes_pcs_get_state(chip, status, lpa, state);
+ return mv88e6xxx_serdes_pcs_get_state(chip, ctrl, status, lpa, state);
}
static int mv88e6390_serdes_pcs_get_state_10g(struct mv88e6xxx_chip *chip,
@@ -1271,9 +1310,31 @@ void mv88e6390_serdes_get_regs(struct mv88e6xxx_chip *chip, int port, void *_p)
}
}
-static int mv88e6393x_serdes_port_errata(struct mv88e6xxx_chip *chip, int lane)
+static int mv88e6393x_serdes_power_lane(struct mv88e6xxx_chip *chip, int lane,
+ bool on)
{
- u16 reg, pcs;
+ u16 reg;
+ int err;
+
+ err = mv88e6390_serdes_read(chip, lane, MDIO_MMD_PHYXS,
+ MV88E6393X_SERDES_CTRL1, &reg);
+ if (err)
+ return err;
+
+ if (on)
+ reg &= ~(MV88E6393X_SERDES_CTRL1_TX_PDOWN |
+ MV88E6393X_SERDES_CTRL1_RX_PDOWN);
+ else
+ reg |= MV88E6393X_SERDES_CTRL1_TX_PDOWN |
+ MV88E6393X_SERDES_CTRL1_RX_PDOWN;
+
+ return mv88e6390_serdes_write(chip, lane, MDIO_MMD_PHYXS,
+ MV88E6393X_SERDES_CTRL1, reg);
+}
+
+static int mv88e6393x_serdes_erratum_4_6(struct mv88e6xxx_chip *chip, int lane)
+{
+ u16 reg;
int err;
/* mv88e6393x family errata 4.6:
@@ -1284,26 +1345,45 @@ static int mv88e6393x_serdes_port_errata(struct mv88e6xxx_chip *chip, int lane)
* It seems that after this workaround the SERDES is automatically
* powered up (the bit is cleared), so power it down.
*/
- if (lane == MV88E6393X_PORT0_LANE || lane == MV88E6393X_PORT9_LANE ||
- lane == MV88E6393X_PORT10_LANE) {
- err = mv88e6390_serdes_read(chip, lane,
- MDIO_MMD_PHYXS,
- MV88E6393X_SERDES_POC, &reg);
- if (err)
- return err;
+ err = mv88e6390_serdes_read(chip, lane, MDIO_MMD_PHYXS,
+ MV88E6393X_SERDES_POC, &reg);
+ if (err)
+ return err;
- reg &= ~MV88E6393X_SERDES_POC_PDOWN;
- reg |= MV88E6393X_SERDES_POC_RESET;
+ reg &= ~MV88E6393X_SERDES_POC_PDOWN;
+ reg |= MV88E6393X_SERDES_POC_RESET;
- err = mv88e6390_serdes_write(chip, lane, MDIO_MMD_PHYXS,
- MV88E6393X_SERDES_POC, reg);
- if (err)
- return err;
+ err = mv88e6390_serdes_write(chip, lane, MDIO_MMD_PHYXS,
+ MV88E6393X_SERDES_POC, reg);
+ if (err)
+ return err;
- err = mv88e6390_serdes_power_sgmii(chip, lane, false);
- if (err)
- return err;
- }
+ err = mv88e6390_serdes_power_sgmii(chip, lane, false);
+ if (err)
+ return err;
+
+ return mv88e6393x_serdes_power_lane(chip, lane, false);
+}
+
+int mv88e6393x_serdes_setup_errata(struct mv88e6xxx_chip *chip)
+{
+ int err;
+
+ err = mv88e6393x_serdes_erratum_4_6(chip, MV88E6393X_PORT0_LANE);
+ if (err)
+ return err;
+
+ err = mv88e6393x_serdes_erratum_4_6(chip, MV88E6393X_PORT9_LANE);
+ if (err)
+ return err;
+
+ return mv88e6393x_serdes_erratum_4_6(chip, MV88E6393X_PORT10_LANE);
+}
+
+static int mv88e6393x_serdes_erratum_4_8(struct mv88e6xxx_chip *chip, int lane)
+{
+ u16 reg, pcs;
+ int err;
/* mv88e6393x family errata 4.8:
* When a SERDES port is operating in 1000BASE-X or SGMII mode link may
@@ -1334,38 +1414,152 @@ static int mv88e6393x_serdes_port_errata(struct mv88e6xxx_chip *chip, int lane)
MV88E6393X_ERRATA_4_8_REG, reg);
}
-int mv88e6393x_serdes_setup_errata(struct mv88e6xxx_chip *chip)
+static int mv88e6393x_serdes_erratum_5_2(struct mv88e6xxx_chip *chip, int lane,
+ u8 cmode)
+{
+ static const struct {
+ u16 dev, reg, val, mask;
+ } fixes[] = {
+ { MDIO_MMD_VEND1, 0x8093, 0xcb5a, 0xffff },
+ { MDIO_MMD_VEND1, 0x8171, 0x7088, 0xffff },
+ { MDIO_MMD_VEND1, 0x80c9, 0x311a, 0xffff },
+ { MDIO_MMD_VEND1, 0x80a2, 0x8000, 0xff7f },
+ { MDIO_MMD_VEND1, 0x80a9, 0x0000, 0xfff0 },
+ { MDIO_MMD_VEND1, 0x80a3, 0x0000, 0xf8ff },
+ { MDIO_MMD_PHYXS, MV88E6393X_SERDES_POC,
+ MV88E6393X_SERDES_POC_RESET, MV88E6393X_SERDES_POC_RESET },
+ };
+ int err, i;
+ u16 reg;
+
+ /* mv88e6393x family errata 5.2:
+ * For optimal signal integrity the following sequence should be applied
+ * to SERDES operating in 10G mode. These registers only apply to 10G
+ * operation and have no effect on other speeds.
+ */
+ if (cmode != MV88E6393X_PORT_STS_CMODE_10GBASER)
+ return 0;
+
+ for (i = 0; i < ARRAY_SIZE(fixes); ++i) {
+ err = mv88e6390_serdes_read(chip, lane, fixes[i].dev,
+ fixes[i].reg, &reg);
+ if (err)
+ return err;
+
+ reg &= ~fixes[i].mask;
+ reg |= fixes[i].val;
+
+ err = mv88e6390_serdes_write(chip, lane, fixes[i].dev,
+ fixes[i].reg, reg);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static int mv88e6393x_serdes_fix_2500basex_an(struct mv88e6xxx_chip *chip,
+ int lane, u8 cmode, bool on)
{
+ u16 reg;
int err;
- err = mv88e6393x_serdes_port_errata(chip, MV88E6393X_PORT0_LANE);
+ if (cmode != MV88E6XXX_PORT_STS_CMODE_2500BASEX)
+ return 0;
+
+ /* Inband AN is broken on Amethyst in 2500base-x mode when set by
+ * standard mechanism (via cmode).
+ * We can get around this by configuring the PCS mode to 1000base-x
+ * and then writing value 0x58 to register 1e.8000. (This must be done
+ * while SerDes receiver and transmitter are disabled, which is, when
+ * this function is called.)
+ * It seem that when we do this configuration to 2500base-x mode (by
+ * changing PCS mode to 1000base-x and frequency to 3.125 GHz from
+ * 1.25 GHz) and then configure to sgmii or 1000base-x, the device
+ * thinks that it already has SerDes at 1.25 GHz and does not change
+ * the 1e.8000 register, leaving SerDes at 3.125 GHz.
+ * To avoid this, change PCS mode back to 2500base-x when disabling
+ * SerDes from 2500base-x mode.
+ */
+ err = mv88e6390_serdes_read(chip, lane, MDIO_MMD_PHYXS,
+ MV88E6393X_SERDES_POC, &reg);
+ if (err)
+ return err;
+
+ reg &= ~(MV88E6393X_SERDES_POC_PCS_MASK | MV88E6393X_SERDES_POC_AN);
+ if (on)
+ reg |= MV88E6393X_SERDES_POC_PCS_1000BASEX |
+ MV88E6393X_SERDES_POC_AN;
+ else
+ reg |= MV88E6393X_SERDES_POC_PCS_2500BASEX;
+ reg |= MV88E6393X_SERDES_POC_RESET;
+
+ err = mv88e6390_serdes_write(chip, lane, MDIO_MMD_PHYXS,
+ MV88E6393X_SERDES_POC, reg);
if (err)
return err;
- err = mv88e6393x_serdes_port_errata(chip, MV88E6393X_PORT9_LANE);
+ err = mv88e6390_serdes_write(chip, lane, MDIO_MMD_VEND1, 0x8000, 0x58);
if (err)
return err;
- return mv88e6393x_serdes_port_errata(chip, MV88E6393X_PORT10_LANE);
+ return 0;
}
int mv88e6393x_serdes_power(struct mv88e6xxx_chip *chip, int port, int lane,
bool on)
{
u8 cmode = chip->ports[port].cmode;
+ int err;
if (port != 0 && port != 9 && port != 10)
return -EOPNOTSUPP;
+ if (on) {
+ err = mv88e6393x_serdes_erratum_4_8(chip, lane);
+ if (err)
+ return err;
+
+ err = mv88e6393x_serdes_erratum_5_2(chip, lane, cmode);
+ if (err)
+ return err;
+
+ err = mv88e6393x_serdes_fix_2500basex_an(chip, lane, cmode,
+ true);
+ if (err)
+ return err;
+
+ err = mv88e6393x_serdes_power_lane(chip, lane, true);
+ if (err)
+ return err;
+ }
+
switch (cmode) {
case MV88E6XXX_PORT_STS_CMODE_SGMII:
case MV88E6XXX_PORT_STS_CMODE_1000BASEX:
case MV88E6XXX_PORT_STS_CMODE_2500BASEX:
- return mv88e6390_serdes_power_sgmii(chip, lane, on);
+ err = mv88e6390_serdes_power_sgmii(chip, lane, on);
+ break;
case MV88E6393X_PORT_STS_CMODE_5GBASER:
case MV88E6393X_PORT_STS_CMODE_10GBASER:
- return mv88e6390_serdes_power_10g(chip, lane, on);
+ err = mv88e6390_serdes_power_10g(chip, lane, on);
+ break;
+ default:
+ err = -EINVAL;
+ break;
}
- return 0;
+ if (err)
+ return err;
+
+ if (!on) {
+ err = mv88e6393x_serdes_power_lane(chip, lane, false);
+ if (err)
+ return err;
+
+ err = mv88e6393x_serdes_fix_2500basex_an(chip, lane, cmode,
+ false);
+ }
+
+ return err;
}
diff --git a/drivers/net/dsa/mv88e6xxx/serdes.h b/drivers/net/dsa/mv88e6xxx/serdes.h
index cbb3ba30caea..8dd8ed225b45 100644
--- a/drivers/net/dsa/mv88e6xxx/serdes.h
+++ b/drivers/net/dsa/mv88e6xxx/serdes.h
@@ -93,6 +93,10 @@
#define MV88E6393X_SERDES_POC_PCS_MASK 0x0007
#define MV88E6393X_SERDES_POC_RESET BIT(15)
#define MV88E6393X_SERDES_POC_PDOWN BIT(5)
+#define MV88E6393X_SERDES_POC_AN BIT(3)
+#define MV88E6393X_SERDES_CTRL1 0xf003
+#define MV88E6393X_SERDES_CTRL1_TX_PDOWN BIT(9)
+#define MV88E6393X_SERDES_CTRL1_RX_PDOWN BIT(8)
#define MV88E6393X_ERRATA_4_8_REG 0xF074
#define MV88E6393X_ERRATA_4_8_BIT BIT(14)
diff --git a/drivers/net/dsa/ocelot/felix.c b/drivers/net/dsa/ocelot/felix.c
index 327cc4654806..f1a05e7dc818 100644
--- a/drivers/net/dsa/ocelot/felix.c
+++ b/drivers/net/dsa/ocelot/felix.c
@@ -290,8 +290,11 @@ static int felix_setup_mmio_filtering(struct felix *felix)
}
}
- if (cpu < 0)
+ if (cpu < 0) {
+ kfree(tagging_rule);
+ kfree(redirect_rule);
return -EINVAL;
+ }
tagging_rule->key_type = OCELOT_VCAP_KEY_ETYPE;
*(__be16 *)tagging_rule->key.etype.etype.value = htons(ETH_P_1588);
diff --git a/drivers/net/dsa/qca8k.c b/drivers/net/dsa/qca8k.c
index a429c9750add..147ca39531a3 100644
--- a/drivers/net/dsa/qca8k.c
+++ b/drivers/net/dsa/qca8k.c
@@ -1256,8 +1256,12 @@ qca8k_setup(struct dsa_switch *ds)
/* Set initial MTU for every port.
* We have only have a general MTU setting. So track
* every port and set the max across all port.
+ * Set per port MTU to 1500 as the MTU change function
+ * will add the overhead and if its set to 1518 then it
+ * will apply the overhead again and we will end up with
+ * MTU of 1536 instead of 1518
*/
- priv->port_mtu[i] = ETH_FRAME_LEN + ETH_FCS_LEN;
+ priv->port_mtu[i] = ETH_DATA_LEN;
}
/* Special GLOBAL_FC_THRESH value are needed for ar8327 switch */
@@ -1433,6 +1437,12 @@ qca8k_phylink_mac_config(struct dsa_switch *ds, int port, unsigned int mode,
qca8k_write(priv, QCA8K_REG_SGMII_CTRL, val);
+ /* From original code is reported port instability as SGMII also
+ * require delay set. Apply advised values here or take them from DT.
+ */
+ if (state->interface == PHY_INTERFACE_MODE_SGMII)
+ qca8k_mac_config_setup_internal_delay(priv, cpu_port_index, reg);
+
/* For qca8327/qca8328/qca8334/qca8338 sgmii is unique and
* falling edge is set writing in the PORT0 PAD reg
*/
@@ -1455,12 +1465,6 @@ qca8k_phylink_mac_config(struct dsa_switch *ds, int port, unsigned int mode,
QCA8K_PORT0_PAD_SGMII_TXCLK_FALLING_EDGE,
val);
- /* From original code is reported port instability as SGMII also
- * require delay set. Apply advised values here or take them from DT.
- */
- if (state->interface == PHY_INTERFACE_MODE_SGMII)
- qca8k_mac_config_setup_internal_delay(priv, cpu_port_index, reg);
-
break;
default:
dev_err(ds->dev, "xMII mode %s not supported for port %d\n",
diff --git a/drivers/net/dsa/rtl8365mb.c b/drivers/net/dsa/rtl8365mb.c
index baaae97283c5..078ca4cd7160 100644
--- a/drivers/net/dsa/rtl8365mb.c
+++ b/drivers/net/dsa/rtl8365mb.c
@@ -107,6 +107,7 @@
#define RTL8365MB_LEARN_LIMIT_MAX_8365MB_VC 2112
/* Family-specific data and limits */
+#define RTL8365MB_PHYADDRMAX 7
#define RTL8365MB_NUM_PHYREGS 32
#define RTL8365MB_PHYREGMAX (RTL8365MB_NUM_PHYREGS - 1)
#define RTL8365MB_MAX_NUM_PORTS (RTL8365MB_CPU_PORT_NUM_8365MB_VC + 1)
@@ -176,7 +177,7 @@
#define RTL8365MB_INDIRECT_ACCESS_STATUS_REG 0x1F01
#define RTL8365MB_INDIRECT_ACCESS_ADDRESS_REG 0x1F02
#define RTL8365MB_INDIRECT_ACCESS_ADDRESS_OCPADR_5_1_MASK GENMASK(4, 0)
-#define RTL8365MB_INDIRECT_ACCESS_ADDRESS_PHYNUM_MASK GENMASK(6, 5)
+#define RTL8365MB_INDIRECT_ACCESS_ADDRESS_PHYNUM_MASK GENMASK(7, 5)
#define RTL8365MB_INDIRECT_ACCESS_ADDRESS_OCPADR_9_6_MASK GENMASK(11, 8)
#define RTL8365MB_PHY_BASE 0x2000
#define RTL8365MB_INDIRECT_ACCESS_WRITE_DATA_REG 0x1F03
@@ -679,6 +680,9 @@ static int rtl8365mb_phy_read(struct realtek_smi *smi, int phy, int regnum)
u16 val;
int ret;
+ if (phy > RTL8365MB_PHYADDRMAX)
+ return -EINVAL;
+
if (regnum > RTL8365MB_PHYREGMAX)
return -EINVAL;
@@ -704,6 +708,9 @@ static int rtl8365mb_phy_write(struct realtek_smi *smi, int phy, int regnum,
u32 ocp_addr;
int ret;
+ if (phy > RTL8365MB_PHYADDRMAX)
+ return -EINVAL;
+
if (regnum > RTL8365MB_PHYREGMAX)
return -EINVAL;
diff --git a/drivers/net/ethernet/altera/altera_tse_main.c b/drivers/net/ethernet/altera/altera_tse_main.c
index d75d95a97dd9..993b2fb42961 100644
--- a/drivers/net/ethernet/altera/altera_tse_main.c
+++ b/drivers/net/ethernet/altera/altera_tse_main.c
@@ -1430,16 +1430,19 @@ static int altera_tse_probe(struct platform_device *pdev)
priv->rxdescmem_busaddr = dma_res->start;
} else {
+ ret = -ENODEV;
goto err_free_netdev;
}
- if (!dma_set_mask(priv->device, DMA_BIT_MASK(priv->dmaops->dmamask)))
+ if (!dma_set_mask(priv->device, DMA_BIT_MASK(priv->dmaops->dmamask))) {
dma_set_coherent_mask(priv->device,
DMA_BIT_MASK(priv->dmaops->dmamask));
- else if (!dma_set_mask(priv->device, DMA_BIT_MASK(32)))
+ } else if (!dma_set_mask(priv->device, DMA_BIT_MASK(32))) {
dma_set_coherent_mask(priv->device, DMA_BIT_MASK(32));
- else
+ } else {
+ ret = -EIO;
goto err_free_netdev;
+ }
/* MAC address space */
ret = request_and_map(pdev, "control_port", &control_port,
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_common.h b/drivers/net/ethernet/aquantia/atlantic/aq_common.h
index 23b2d390fcdd..ace691d7cd75 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_common.h
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_common.h
@@ -40,10 +40,12 @@
#define AQ_DEVICE_ID_AQC113DEV 0x00C0
#define AQ_DEVICE_ID_AQC113CS 0x94C0
+#define AQ_DEVICE_ID_AQC113CA 0x34C0
#define AQ_DEVICE_ID_AQC114CS 0x93C0
#define AQ_DEVICE_ID_AQC113 0x04C0
#define AQ_DEVICE_ID_AQC113C 0x14C0
#define AQ_DEVICE_ID_AQC115C 0x12C0
+#define AQ_DEVICE_ID_AQC116C 0x11C0
#define HW_ATL_NIC_NAME "Marvell (aQuantia) AQtion 10Gbit Network Adapter"
@@ -53,20 +55,19 @@
#define AQ_NIC_RATE_10G BIT(0)
#define AQ_NIC_RATE_5G BIT(1)
-#define AQ_NIC_RATE_5GSR BIT(2)
-#define AQ_NIC_RATE_2G5 BIT(3)
-#define AQ_NIC_RATE_1G BIT(4)
-#define AQ_NIC_RATE_100M BIT(5)
-#define AQ_NIC_RATE_10M BIT(6)
-#define AQ_NIC_RATE_1G_HALF BIT(7)
-#define AQ_NIC_RATE_100M_HALF BIT(8)
-#define AQ_NIC_RATE_10M_HALF BIT(9)
+#define AQ_NIC_RATE_2G5 BIT(2)
+#define AQ_NIC_RATE_1G BIT(3)
+#define AQ_NIC_RATE_100M BIT(4)
+#define AQ_NIC_RATE_10M BIT(5)
+#define AQ_NIC_RATE_1G_HALF BIT(6)
+#define AQ_NIC_RATE_100M_HALF BIT(7)
+#define AQ_NIC_RATE_10M_HALF BIT(8)
-#define AQ_NIC_RATE_EEE_10G BIT(10)
-#define AQ_NIC_RATE_EEE_5G BIT(11)
-#define AQ_NIC_RATE_EEE_2G5 BIT(12)
-#define AQ_NIC_RATE_EEE_1G BIT(13)
-#define AQ_NIC_RATE_EEE_100M BIT(14)
+#define AQ_NIC_RATE_EEE_10G BIT(9)
+#define AQ_NIC_RATE_EEE_5G BIT(10)
+#define AQ_NIC_RATE_EEE_2G5 BIT(11)
+#define AQ_NIC_RATE_EEE_1G BIT(12)
+#define AQ_NIC_RATE_EEE_100M BIT(13)
#define AQ_NIC_RATE_EEE_MSK (AQ_NIC_RATE_EEE_10G |\
AQ_NIC_RATE_EEE_5G |\
AQ_NIC_RATE_EEE_2G5 |\
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_hw.h b/drivers/net/ethernet/aquantia/atlantic/aq_hw.h
index 062a300a566a..dbd284660135 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_hw.h
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_hw.h
@@ -80,6 +80,8 @@ struct aq_hw_link_status_s {
};
struct aq_stats_s {
+ u64 brc;
+ u64 btc;
u64 uprc;
u64 mprc;
u64 bprc;
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
index 1acf544afeb4..33f1a1377588 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
@@ -316,18 +316,22 @@ int aq_nic_ndev_register(struct aq_nic_s *self)
aq_macsec_init(self);
#endif
- mutex_lock(&self->fwreq_mutex);
- err = self->aq_fw_ops->get_mac_permanent(self->aq_hw, addr);
- mutex_unlock(&self->fwreq_mutex);
- if (err)
- goto err_exit;
+ if (platform_get_ethdev_address(&self->pdev->dev, self->ndev) != 0) {
+ // If DT has none or an invalid one, ask device for MAC address
+ mutex_lock(&self->fwreq_mutex);
+ err = self->aq_fw_ops->get_mac_permanent(self->aq_hw, addr);
+ mutex_unlock(&self->fwreq_mutex);
- eth_hw_addr_set(self->ndev, addr);
+ if (err)
+ goto err_exit;
- if (!is_valid_ether_addr(self->ndev->dev_addr) ||
- !aq_nic_is_valid_ether_addr(self->ndev->dev_addr)) {
- netdev_warn(self->ndev, "MAC is invalid, will use random.");
- eth_hw_addr_random(self->ndev);
+ if (is_valid_ether_addr(addr) &&
+ aq_nic_is_valid_ether_addr(addr)) {
+ eth_hw_addr_set(self->ndev, addr);
+ } else {
+ netdev_warn(self->ndev, "MAC is invalid, will use random.");
+ eth_hw_addr_random(self->ndev);
+ }
}
#if defined(AQ_CFG_MAC_ADDR_PERMANENT)
@@ -905,8 +909,14 @@ u64 *aq_nic_get_stats(struct aq_nic_s *self, u64 *data)
data[++i] = stats->mbtc;
data[++i] = stats->bbrc;
data[++i] = stats->bbtc;
- data[++i] = stats->ubrc + stats->mbrc + stats->bbrc;
- data[++i] = stats->ubtc + stats->mbtc + stats->bbtc;
+ if (stats->brc)
+ data[++i] = stats->brc;
+ else
+ data[++i] = stats->ubrc + stats->mbrc + stats->bbrc;
+ if (stats->btc)
+ data[++i] = stats->btc;
+ else
+ data[++i] = stats->ubtc + stats->mbtc + stats->bbtc;
data[++i] = stats->dma_pkt_rc;
data[++i] = stats->dma_pkt_tc;
data[++i] = stats->dma_oct_rc;
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c b/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c
index d4b1976ee69b..797a95142d1f 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c
@@ -49,6 +49,8 @@ static const struct pci_device_id aq_pci_tbl[] = {
{ PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_AQC113), },
{ PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_AQC113C), },
{ PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_AQC115C), },
+ { PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_AQC113CA), },
+ { PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_AQC116C), },
{}
};
@@ -85,7 +87,10 @@ static const struct aq_board_revision_s hw_atl_boards[] = {
{ AQ_DEVICE_ID_AQC113CS, AQ_HWREV_ANY, &hw_atl2_ops, &hw_atl2_caps_aqc113, },
{ AQ_DEVICE_ID_AQC114CS, AQ_HWREV_ANY, &hw_atl2_ops, &hw_atl2_caps_aqc113, },
{ AQ_DEVICE_ID_AQC113C, AQ_HWREV_ANY, &hw_atl2_ops, &hw_atl2_caps_aqc113, },
- { AQ_DEVICE_ID_AQC115C, AQ_HWREV_ANY, &hw_atl2_ops, &hw_atl2_caps_aqc113, },
+ { AQ_DEVICE_ID_AQC115C, AQ_HWREV_ANY, &hw_atl2_ops, &hw_atl2_caps_aqc115c, },
+ { AQ_DEVICE_ID_AQC113CA, AQ_HWREV_ANY, &hw_atl2_ops, &hw_atl2_caps_aqc113, },
+ { AQ_DEVICE_ID_AQC116C, AQ_HWREV_ANY, &hw_atl2_ops, &hw_atl2_caps_aqc116c, },
+
};
MODULE_DEVICE_TABLE(pci, aq_pci_tbl);
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
index 24122ccda614..77e76c9efd32 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
@@ -298,13 +298,14 @@ bool aq_ring_tx_clean(struct aq_ring_s *self)
}
}
- if (unlikely(buff->is_eop)) {
+ if (unlikely(buff->is_eop && buff->skb)) {
u64_stats_update_begin(&self->stats.tx.syncp);
++self->stats.tx.packets;
self->stats.tx.bytes += buff->skb->len;
u64_stats_update_end(&self->stats.tx.syncp);
dev_kfree_skb_any(buff->skb);
+ buff->skb = NULL;
}
buff->pa = 0U;
buff->eop_index = 0xffffU;
@@ -365,6 +366,10 @@ int aq_ring_rx_clean(struct aq_ring_s *self,
if (!buff->is_eop) {
buff_ = buff;
do {
+ if (buff_->next >= self->size) {
+ err = -EIO;
+ goto err_exit;
+ }
next_ = buff_->next,
buff_ = &self->buff_ring[next_];
is_rsc_completed =
@@ -388,6 +393,10 @@ int aq_ring_rx_clean(struct aq_ring_s *self,
(buff->is_lro && buff->is_cso_err)) {
buff_ = buff;
do {
+ if (buff_->next >= self->size) {
+ err = -EIO;
+ goto err_exit;
+ }
next_ = buff_->next,
buff_ = &self->buff_ring[next_];
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_vec.c b/drivers/net/ethernet/aquantia/atlantic/aq_vec.c
index d281322d7dd2..f4774cf051c9 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_vec.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_vec.c
@@ -362,9 +362,6 @@ unsigned int aq_vec_get_sw_stats(struct aq_vec_s *self, const unsigned int tc, u
{
unsigned int count;
- WARN_ONCE(!aq_vec_is_valid_tc(self, tc),
- "Invalid tc %u (#rx=%u, #tx=%u)\n",
- tc, self->rx_rings, self->tx_rings);
if (!aq_vec_is_valid_tc(self, tc))
return 0;
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c
index 3f1704cbe1cb..7e88d7234b14 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c
@@ -867,12 +867,20 @@ static int hw_atl_fw1x_deinit(struct aq_hw_s *self)
int hw_atl_utils_update_stats(struct aq_hw_s *self)
{
struct aq_stats_s *cs = &self->curr_stats;
+ struct aq_stats_s curr_stats = *cs;
struct hw_atl_utils_mbox mbox;
+ bool corrupted_stats = false;
hw_atl_utils_mpi_read_stats(self, &mbox);
-#define AQ_SDELTA(_N_) (self->curr_stats._N_ += \
- mbox.stats._N_ - self->last_stats._N_)
+#define AQ_SDELTA(_N_) \
+do { \
+ if (!corrupted_stats && \
+ ((s64)(mbox.stats._N_ - self->last_stats._N_)) >= 0) \
+ curr_stats._N_ += mbox.stats._N_ - self->last_stats._N_; \
+ else \
+ corrupted_stats = true; \
+} while (0)
if (self->aq_link_status.mbps) {
AQ_SDELTA(uprc);
@@ -892,6 +900,9 @@ int hw_atl_utils_update_stats(struct aq_hw_s *self)
AQ_SDELTA(bbrc);
AQ_SDELTA(bbtc);
AQ_SDELTA(dpc);
+
+ if (!corrupted_stats)
+ *cs = curr_stats;
}
#undef AQ_SDELTA
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils_fw2x.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils_fw2x.c
index eac631c45c56..4d4cfbc91e19 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils_fw2x.c
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils_fw2x.c
@@ -132,9 +132,6 @@ static enum hw_atl_fw2x_rate link_speed_mask_2fw2x_ratemask(u32 speed)
if (speed & AQ_NIC_RATE_5G)
rate |= FW2X_RATE_5G;
- if (speed & AQ_NIC_RATE_5GSR)
- rate |= FW2X_RATE_5G;
-
if (speed & AQ_NIC_RATE_2G5)
rate |= FW2X_RATE_2G5;
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2.c
index c98708bb044c..5dfc751572ed 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2.c
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2.c
@@ -65,11 +65,25 @@ const struct aq_hw_caps_s hw_atl2_caps_aqc113 = {
AQ_NIC_RATE_5G |
AQ_NIC_RATE_2G5 |
AQ_NIC_RATE_1G |
- AQ_NIC_RATE_1G_HALF |
AQ_NIC_RATE_100M |
- AQ_NIC_RATE_100M_HALF |
- AQ_NIC_RATE_10M |
- AQ_NIC_RATE_10M_HALF,
+ AQ_NIC_RATE_10M,
+};
+
+const struct aq_hw_caps_s hw_atl2_caps_aqc115c = {
+ DEFAULT_BOARD_BASIC_CAPABILITIES,
+ .media_type = AQ_HW_MEDIA_TYPE_TP,
+ .link_speed_msk = AQ_NIC_RATE_2G5 |
+ AQ_NIC_RATE_1G |
+ AQ_NIC_RATE_100M |
+ AQ_NIC_RATE_10M,
+};
+
+const struct aq_hw_caps_s hw_atl2_caps_aqc116c = {
+ DEFAULT_BOARD_BASIC_CAPABILITIES,
+ .media_type = AQ_HW_MEDIA_TYPE_TP,
+ .link_speed_msk = AQ_NIC_RATE_1G |
+ AQ_NIC_RATE_100M |
+ AQ_NIC_RATE_10M,
};
static u32 hw_atl2_sem_act_rslvr_get(struct aq_hw_s *self)
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2.h
index de8723f1c28a..346f0dc9912e 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2.h
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2.h
@@ -9,6 +9,8 @@
#include "aq_common.h"
extern const struct aq_hw_caps_s hw_atl2_caps_aqc113;
+extern const struct aq_hw_caps_s hw_atl2_caps_aqc115c;
+extern const struct aq_hw_caps_s hw_atl2_caps_aqc116c;
extern const struct aq_hw_ops hw_atl2_ops;
#endif /* HW_ATL2_H */
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2_utils.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2_utils.h
index b66fa346581c..6bad64c77b87 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2_utils.h
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2_utils.h
@@ -239,7 +239,8 @@ struct version_s {
u8 minor;
u16 build;
} phy;
- u32 rsvd;
+ u32 drv_iface_ver:4;
+ u32 rsvd:28;
};
struct link_status_s {
@@ -424,7 +425,7 @@ struct cable_diag_status_s {
u16 rsvd2;
};
-struct statistics_s {
+struct statistics_a0_s {
struct {
u32 link_up;
u32 link_down;
@@ -457,6 +458,33 @@ struct statistics_s {
u32 reserve_fw_gap;
};
+struct __packed statistics_b0_s {
+ u64 rx_good_octets;
+ u64 rx_pause_frames;
+ u64 rx_good_frames;
+ u64 rx_errors;
+ u64 rx_unicast_frames;
+ u64 rx_multicast_frames;
+ u64 rx_broadcast_frames;
+
+ u64 tx_good_octets;
+ u64 tx_pause_frames;
+ u64 tx_good_frames;
+ u64 tx_errors;
+ u64 tx_unicast_frames;
+ u64 tx_multicast_frames;
+ u64 tx_broadcast_frames;
+
+ u32 main_loop_cycles;
+};
+
+struct __packed statistics_s {
+ union __packed {
+ struct statistics_a0_s a0;
+ struct statistics_b0_s b0;
+ };
+};
+
struct filter_caps_s {
u8 l2_filters_base_index:6;
u8 flexible_filter_mask:2;
@@ -545,7 +573,7 @@ struct management_status_s {
u32 rsvd5;
};
-struct fw_interface_out {
+struct __packed fw_interface_out {
struct transaction_counter_s transaction_id;
struct version_s version;
struct link_status_s link_status;
@@ -569,7 +597,6 @@ struct fw_interface_out {
struct core_dump_s core_dump;
u32 rsvd11;
struct statistics_s stats;
- u32 rsvd12;
struct filter_caps_s filter_caps;
struct device_caps_s device_caps;
u32 rsvd13;
@@ -592,6 +619,9 @@ struct fw_interface_out {
#define AQ_HOST_MODE_LOW_POWER 3U
#define AQ_HOST_MODE_SHUTDOWN 4U
+#define AQ_A2_FW_INTERFACE_A0 0
+#define AQ_A2_FW_INTERFACE_B0 1
+
int hw_atl2_utils_initfw(struct aq_hw_s *self, const struct aq_fw_ops **fw_ops);
int hw_atl2_utils_soft_reset(struct aq_hw_s *self);
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2_utils_fw.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2_utils_fw.c
index dd259c8f2f4f..58d426dda3ed 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2_utils_fw.c
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2_utils_fw.c
@@ -84,7 +84,7 @@ static int hw_atl2_shared_buffer_read_block(struct aq_hw_s *self,
if (cnt > AQ_A2_FW_READ_TRY_MAX)
return -ETIME;
if (tid1.transaction_cnt_a != tid1.transaction_cnt_b)
- udelay(1);
+ mdelay(1);
} while (tid1.transaction_cnt_a != tid1.transaction_cnt_b);
hw_atl2_mif_shared_buf_read(self, offset, (u32 *)data, dwords);
@@ -154,7 +154,7 @@ static void a2_link_speed_mask2fw(u32 speed,
{
link_options->rate_10G = !!(speed & AQ_NIC_RATE_10G);
link_options->rate_5G = !!(speed & AQ_NIC_RATE_5G);
- link_options->rate_N5G = !!(speed & AQ_NIC_RATE_5GSR);
+ link_options->rate_N5G = link_options->rate_5G;
link_options->rate_2P5G = !!(speed & AQ_NIC_RATE_2G5);
link_options->rate_N2P5G = link_options->rate_2P5G;
link_options->rate_1G = !!(speed & AQ_NIC_RATE_1G);
@@ -192,8 +192,6 @@ static u32 a2_fw_lkp_to_mask(struct lkp_link_caps_s *lkp_link_caps)
rate |= AQ_NIC_RATE_10G;
if (lkp_link_caps->rate_5G)
rate |= AQ_NIC_RATE_5G;
- if (lkp_link_caps->rate_N5G)
- rate |= AQ_NIC_RATE_5GSR;
if (lkp_link_caps->rate_2P5G)
rate |= AQ_NIC_RATE_2G5;
if (lkp_link_caps->rate_1G)
@@ -335,15 +333,22 @@ static int aq_a2_fw_get_mac_permanent(struct aq_hw_s *self, u8 *mac)
return 0;
}
-static int aq_a2_fw_update_stats(struct aq_hw_s *self)
+static void aq_a2_fill_a0_stats(struct aq_hw_s *self,
+ struct statistics_s *stats)
{
struct hw_atl2_priv *priv = (struct hw_atl2_priv *)self->priv;
- struct statistics_s stats;
-
- hw_atl2_shared_buffer_read_safe(self, stats, &stats);
-
-#define AQ_SDELTA(_N_, _F_) (self->curr_stats._N_ += \
- stats.msm._F_ - priv->last_stats.msm._F_)
+ struct aq_stats_s *cs = &self->curr_stats;
+ struct aq_stats_s curr_stats = *cs;
+ bool corrupted_stats = false;
+
+#define AQ_SDELTA(_N, _F) \
+do { \
+ if (!corrupted_stats && \
+ ((s64)(stats->a0.msm._F - priv->last_stats.a0.msm._F)) >= 0) \
+ curr_stats._N += stats->a0.msm._F - priv->last_stats.a0.msm._F;\
+ else \
+ corrupted_stats = true; \
+} while (0)
if (self->aq_link_status.mbps) {
AQ_SDELTA(uprc, rx_unicast_frames);
@@ -362,17 +367,76 @@ static int aq_a2_fw_update_stats(struct aq_hw_s *self)
AQ_SDELTA(mbtc, tx_multicast_octets);
AQ_SDELTA(bbrc, rx_broadcast_octets);
AQ_SDELTA(bbtc, tx_broadcast_octets);
+
+ if (!corrupted_stats)
+ *cs = curr_stats;
}
#undef AQ_SDELTA
- self->curr_stats.dma_pkt_rc =
- hw_atl_stats_rx_dma_good_pkt_counter_get(self);
- self->curr_stats.dma_pkt_tc =
- hw_atl_stats_tx_dma_good_pkt_counter_get(self);
- self->curr_stats.dma_oct_rc =
- hw_atl_stats_rx_dma_good_octet_counter_get(self);
- self->curr_stats.dma_oct_tc =
- hw_atl_stats_tx_dma_good_octet_counter_get(self);
- self->curr_stats.dpc = hw_atl_rpb_rx_dma_drop_pkt_cnt_get(self);
+
+}
+
+static void aq_a2_fill_b0_stats(struct aq_hw_s *self,
+ struct statistics_s *stats)
+{
+ struct hw_atl2_priv *priv = (struct hw_atl2_priv *)self->priv;
+ struct aq_stats_s *cs = &self->curr_stats;
+ struct aq_stats_s curr_stats = *cs;
+ bool corrupted_stats = false;
+
+#define AQ_SDELTA(_N, _F) \
+do { \
+ if (!corrupted_stats && \
+ ((s64)(stats->b0._F - priv->last_stats.b0._F)) >= 0) \
+ curr_stats._N += stats->b0._F - priv->last_stats.b0._F; \
+ else \
+ corrupted_stats = true; \
+} while (0)
+
+ if (self->aq_link_status.mbps) {
+ AQ_SDELTA(uprc, rx_unicast_frames);
+ AQ_SDELTA(mprc, rx_multicast_frames);
+ AQ_SDELTA(bprc, rx_broadcast_frames);
+ AQ_SDELTA(erpr, rx_errors);
+ AQ_SDELTA(brc, rx_good_octets);
+
+ AQ_SDELTA(uptc, tx_unicast_frames);
+ AQ_SDELTA(mptc, tx_multicast_frames);
+ AQ_SDELTA(bptc, tx_broadcast_frames);
+ AQ_SDELTA(erpt, tx_errors);
+ AQ_SDELTA(btc, tx_good_octets);
+
+ if (!corrupted_stats)
+ *cs = curr_stats;
+ }
+#undef AQ_SDELTA
+}
+
+static int aq_a2_fw_update_stats(struct aq_hw_s *self)
+{
+ struct hw_atl2_priv *priv = (struct hw_atl2_priv *)self->priv;
+ struct aq_stats_s *cs = &self->curr_stats;
+ struct statistics_s stats;
+ struct version_s version;
+ int err;
+
+ err = hw_atl2_shared_buffer_read_safe(self, version, &version);
+ if (err)
+ return err;
+
+ err = hw_atl2_shared_buffer_read_safe(self, stats, &stats);
+ if (err)
+ return err;
+
+ if (version.drv_iface_ver == AQ_A2_FW_INTERFACE_A0)
+ aq_a2_fill_a0_stats(self, &stats);
+ else
+ aq_a2_fill_b0_stats(self, &stats);
+
+ cs->dma_pkt_rc = hw_atl_stats_rx_dma_good_pkt_counter_get(self);
+ cs->dma_pkt_tc = hw_atl_stats_tx_dma_good_pkt_counter_get(self);
+ cs->dma_oct_rc = hw_atl_stats_rx_dma_good_octet_counter_get(self);
+ cs->dma_oct_tc = hw_atl_stats_tx_dma_good_octet_counter_get(self);
+ cs->dpc = hw_atl_rpb_rx_dma_drop_pkt_cnt_get(self);
memcpy(&priv->last_stats, &stats, sizeof(stats));
@@ -499,9 +563,9 @@ u32 hw_atl2_utils_get_fw_version(struct aq_hw_s *self)
hw_atl2_shared_buffer_read_safe(self, version, &version);
/* A2 FW version is stored in reverse order */
- return version.mac.major << 24 |
- version.mac.minor << 16 |
- version.mac.build;
+ return version.bundle.major << 24 |
+ version.bundle.minor << 16 |
+ version.bundle.build;
}
int hw_atl2_utils_get_action_resolve_table_caps(struct aq_hw_s *self,
diff --git a/drivers/net/ethernet/asix/ax88796c_spi.c b/drivers/net/ethernet/asix/ax88796c_spi.c
index 94df4f96d2be..0710e716d682 100644
--- a/drivers/net/ethernet/asix/ax88796c_spi.c
+++ b/drivers/net/ethernet/asix/ax88796c_spi.c
@@ -34,7 +34,7 @@ int axspi_read_status(struct axspi_data *ax_spi, struct spi_status *status)
/* OP */
ax_spi->cmd_buf[0] = AX_SPICMD_READ_STATUS;
- ret = spi_write_then_read(ax_spi->spi, ax_spi->cmd_buf, 1, (u8 *)&status, 3);
+ ret = spi_write_then_read(ax_spi->spi, ax_spi->cmd_buf, 1, (u8 *)status, 3);
if (ret)
dev_err(&ax_spi->spi->dev, "%s() failed: ret = %d\n", __func__, ret);
else
diff --git a/drivers/net/ethernet/atheros/ag71xx.c b/drivers/net/ethernet/atheros/ag71xx.c
index 88d2ab748399..4579ddf9c427 100644
--- a/drivers/net/ethernet/atheros/ag71xx.c
+++ b/drivers/net/ethernet/atheros/ag71xx.c
@@ -1913,15 +1913,12 @@ static int ag71xx_probe(struct platform_device *pdev)
ag->mac_reset = devm_reset_control_get(&pdev->dev, "mac");
if (IS_ERR(ag->mac_reset)) {
netif_err(ag, probe, ndev, "missing mac reset\n");
- err = PTR_ERR(ag->mac_reset);
- goto err_free;
+ return PTR_ERR(ag->mac_reset);
}
ag->mac_base = devm_ioremap(&pdev->dev, res->start, resource_size(res));
- if (!ag->mac_base) {
- err = -ENOMEM;
- goto err_free;
- }
+ if (!ag->mac_base)
+ return -ENOMEM;
ndev->irq = platform_get_irq(pdev, 0);
err = devm_request_irq(&pdev->dev, ndev->irq, ag71xx_interrupt,
@@ -1929,7 +1926,7 @@ static int ag71xx_probe(struct platform_device *pdev)
if (err) {
netif_err(ag, probe, ndev, "unable to request IRQ %d\n",
ndev->irq);
- goto err_free;
+ return err;
}
ndev->netdev_ops = &ag71xx_netdev_ops;
@@ -1957,10 +1954,8 @@ static int ag71xx_probe(struct platform_device *pdev)
ag->stop_desc = dmam_alloc_coherent(&pdev->dev,
sizeof(struct ag71xx_desc),
&ag->stop_desc_dma, GFP_KERNEL);
- if (!ag->stop_desc) {
- err = -ENOMEM;
- goto err_free;
- }
+ if (!ag->stop_desc)
+ return -ENOMEM;
ag->stop_desc->data = 0;
ag->stop_desc->ctrl = 0;
@@ -1975,7 +1970,7 @@ static int ag71xx_probe(struct platform_device *pdev)
err = of_get_phy_mode(np, &ag->phy_if_mode);
if (err) {
netif_err(ag, probe, ndev, "missing phy-mode property in DT\n");
- goto err_free;
+ return err;
}
netif_napi_add(ndev, &ag->napi, ag71xx_poll, AG71XX_NAPI_WEIGHT);
@@ -1983,7 +1978,7 @@ static int ag71xx_probe(struct platform_device *pdev)
err = clk_prepare_enable(ag->clk_eth);
if (err) {
netif_err(ag, probe, ndev, "Failed to enable eth clk.\n");
- goto err_free;
+ return err;
}
ag71xx_wr(ag, AG71XX_REG_MAC_CFG1, 0);
@@ -2019,8 +2014,6 @@ err_mdio_remove:
ag71xx_mdio_remove(ag);
err_put_clk:
clk_disable_unprepare(ag->clk_eth);
-err_free:
- free_netdev(ndev);
return err;
}
diff --git a/drivers/net/ethernet/broadcom/bcm4908_enet.c b/drivers/net/ethernet/broadcom/bcm4908_enet.c
index 7cc5213c575a..b07cb9bc5f2d 100644
--- a/drivers/net/ethernet/broadcom/bcm4908_enet.c
+++ b/drivers/net/ethernet/broadcom/bcm4908_enet.c
@@ -708,7 +708,9 @@ static int bcm4908_enet_probe(struct platform_device *pdev)
enet->irq_tx = platform_get_irq_byname(pdev, "tx");
- dma_set_coherent_mask(dev, DMA_BIT_MASK(32));
+ err = dma_set_coherent_mask(dev, DMA_BIT_MASK(32));
+ if (err)
+ return err;
err = bcm4908_enet_dma_alloc(enet);
if (err)
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c
index 40933bf5a710..60dde29974bf 100644
--- a/drivers/net/ethernet/broadcom/bcmsysport.c
+++ b/drivers/net/ethernet/broadcom/bcmsysport.c
@@ -1309,11 +1309,11 @@ static netdev_tx_t bcm_sysport_xmit(struct sk_buff *skb,
struct bcm_sysport_priv *priv = netdev_priv(dev);
struct device *kdev = &priv->pdev->dev;
struct bcm_sysport_tx_ring *ring;
+ unsigned long flags, desc_flags;
struct bcm_sysport_cb *cb;
struct netdev_queue *txq;
u32 len_status, addr_lo;
unsigned int skb_len;
- unsigned long flags;
dma_addr_t mapping;
u16 queue;
int ret;
@@ -1373,8 +1373,10 @@ static netdev_tx_t bcm_sysport_xmit(struct sk_buff *skb,
ring->desc_count--;
/* Ports are latched, so write upper address first */
+ spin_lock_irqsave(&priv->desc_lock, desc_flags);
tdma_writel(priv, len_status, TDMA_WRITE_PORT_HI(ring->index));
tdma_writel(priv, addr_lo, TDMA_WRITE_PORT_LO(ring->index));
+ spin_unlock_irqrestore(&priv->desc_lock, desc_flags);
/* Check ring space and update SW control flow */
if (ring->desc_count == 0)
@@ -2013,6 +2015,7 @@ static int bcm_sysport_open(struct net_device *dev)
}
/* Initialize both hardware and software ring */
+ spin_lock_init(&priv->desc_lock);
for (i = 0; i < dev->num_tx_queues; i++) {
ret = bcm_sysport_init_tx_ring(priv, i);
if (ret) {
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.h b/drivers/net/ethernet/broadcom/bcmsysport.h
index 984f76e74b43..16b73bb9acc7 100644
--- a/drivers/net/ethernet/broadcom/bcmsysport.h
+++ b/drivers/net/ethernet/broadcom/bcmsysport.h
@@ -711,6 +711,7 @@ struct bcm_sysport_priv {
int wol_irq;
/* Transmit rings */
+ spinlock_t desc_lock;
struct bcm_sysport_tx_ring *tx_rings;
/* Receive queue */
diff --git a/drivers/net/ethernet/broadcom/genet/bcmmii.c b/drivers/net/ethernet/broadcom/genet/bcmmii.c
index 5f259641437a..c888ddee1fc4 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmmii.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmmii.c
@@ -589,9 +589,9 @@ static int bcmgenet_mii_pd_init(struct bcmgenet_priv *priv)
* Internal or external PHY with MDIO access
*/
phydev = phy_attach(priv->dev, phy_name, pd->phy_interface);
- if (!phydev) {
+ if (IS_ERR(phydev)) {
dev_err(kdev, "failed to register PHY device\n");
- return -ENODEV;
+ return PTR_ERR(phydev);
}
} else {
/*
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
index 64479c464b4e..ae9cca768d74 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
@@ -3196,6 +3196,7 @@ static int cxgb4vf_pci_probe(struct pci_dev *pdev,
}
if (adapter->registered_device_map == 0) {
dev_err(&pdev->dev, "could not register any net devices\n");
+ err = -EINVAL;
goto err_disable_interrupts;
}
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
index 6451c8383639..8e643567abce 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
@@ -4550,6 +4550,8 @@ static int dpaa2_eth_remove(struct fsl_mc_device *ls_dev)
fsl_mc_portal_free(priv->mc_io);
+ destroy_workqueue(priv->dpaa2_ptp_wq);
+
dev_dbg(net_dev->dev.parent, "Removed interface %s\n", net_dev->name);
free_netdev(net_dev);
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h
index 2085844227fe..e54e70ebdd05 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h
@@ -388,6 +388,8 @@ struct dpaa2_eth_ch_stats {
__u64 bytes_per_cdan;
};
+#define DPAA2_ETH_CH_STATS 7
+
/* Maximum number of queues associated with a DPNI */
#define DPAA2_ETH_MAX_TCS 8
#define DPAA2_ETH_MAX_RX_QUEUES_PER_TC 16
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c
index adb8ce5306ee..3fdbf87dccb1 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c
@@ -278,7 +278,7 @@ static void dpaa2_eth_get_ethtool_stats(struct net_device *net_dev,
/* Per-channel stats */
for (k = 0; k < priv->num_channels; k++) {
ch_stats = &priv->channel[k]->stats;
- for (j = 0; j < sizeof(*ch_stats) / sizeof(__u64) - 1; j++)
+ for (j = 0; j < DPAA2_ETH_CH_STATS; j++)
*((__u64 *)data + i + j) += *((__u64 *)ch_stats + j);
}
i += j;
diff --git a/drivers/net/ethernet/freescale/fec.h b/drivers/net/ethernet/freescale/fec.h
index 7b4961daa254..ed7301b69169 100644
--- a/drivers/net/ethernet/freescale/fec.h
+++ b/drivers/net/ethernet/freescale/fec.h
@@ -377,6 +377,9 @@ struct bufdesc_ex {
#define FEC_ENET_WAKEUP ((uint)0x00020000) /* Wakeup request */
#define FEC_ENET_TXF (FEC_ENET_TXF_0 | FEC_ENET_TXF_1 | FEC_ENET_TXF_2)
#define FEC_ENET_RXF (FEC_ENET_RXF_0 | FEC_ENET_RXF_1 | FEC_ENET_RXF_2)
+#define FEC_ENET_RXF_GET(X) (((X) == 0) ? FEC_ENET_RXF_0 : \
+ (((X) == 1) ? FEC_ENET_RXF_1 : \
+ FEC_ENET_RXF_2))
#define FEC_ENET_TS_AVAIL ((uint)0x00010000)
#define FEC_ENET_TS_TIMER ((uint)0x00008000)
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index bc418b910999..1b1f7f2a6130 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -1480,7 +1480,7 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id)
break;
pkt_received++;
- writel(FEC_ENET_RXF, fep->hwp + FEC_IEVENT);
+ writel(FEC_ENET_RXF_GET(queue_id), fep->hwp + FEC_IEVENT);
/* Check for errors. */
status ^= BD_ENET_RX_LAST;
diff --git a/drivers/net/ethernet/freescale/fman/fman_port.c b/drivers/net/ethernet/freescale/fman/fman_port.c
index d9baac0dbc7d..4c9d05c45c03 100644
--- a/drivers/net/ethernet/freescale/fman/fman_port.c
+++ b/drivers/net/ethernet/freescale/fman/fman_port.c
@@ -1805,7 +1805,7 @@ static int fman_port_probe(struct platform_device *of_dev)
fman = dev_get_drvdata(&fm_pdev->dev);
if (!fman) {
err = -EINVAL;
- goto return_err;
+ goto put_device;
}
err = of_property_read_u32(port_node, "cell-index", &val);
@@ -1813,7 +1813,7 @@ static int fman_port_probe(struct platform_device *of_dev)
dev_err(port->dev, "%s: reading cell-index for %pOF failed\n",
__func__, port_node);
err = -EINVAL;
- goto return_err;
+ goto put_device;
}
port_id = (u8)val;
port->dts_params.id = port_id;
@@ -1847,7 +1847,7 @@ static int fman_port_probe(struct platform_device *of_dev)
} else {
dev_err(port->dev, "%s: Illegal port type\n", __func__);
err = -EINVAL;
- goto return_err;
+ goto put_device;
}
port->dts_params.type = port_type;
@@ -1861,7 +1861,7 @@ static int fman_port_probe(struct platform_device *of_dev)
dev_err(port->dev, "%s: incorrect qman-channel-id\n",
__func__);
err = -EINVAL;
- goto return_err;
+ goto put_device;
}
port->dts_params.qman_channel_id = qman_channel_id;
}
@@ -1871,7 +1871,7 @@ static int fman_port_probe(struct platform_device *of_dev)
dev_err(port->dev, "%s: of_address_to_resource() failed\n",
__func__);
err = -ENOMEM;
- goto return_err;
+ goto put_device;
}
port->dts_params.fman = fman;
@@ -1896,6 +1896,8 @@ static int fman_port_probe(struct platform_device *of_dev)
return 0;
+put_device:
+ put_device(&fm_pdev->dev);
return_err:
of_node_put(port_node);
free_port:
diff --git a/drivers/net/ethernet/google/gve/gve_adminq.c b/drivers/net/ethernet/google/gve/gve_adminq.c
index 83ae56c310d3..326b56b49216 100644
--- a/drivers/net/ethernet/google/gve/gve_adminq.c
+++ b/drivers/net/ethernet/google/gve/gve_adminq.c
@@ -738,10 +738,7 @@ int gve_adminq_describe_device(struct gve_priv *priv)
* is not set to GqiRda, choose the queue format in a priority order:
* DqoRda, GqiRda, GqiQpl. Use GqiQpl as default.
*/
- if (priv->queue_format == GVE_GQI_RDA_FORMAT) {
- dev_info(&priv->pdev->dev,
- "Driver is running with GQI RDA queue format.\n");
- } else if (dev_op_dqo_rda) {
+ if (dev_op_dqo_rda) {
priv->queue_format = GVE_DQO_RDA_FORMAT;
dev_info(&priv->pdev->dev,
"Driver is running with DQO RDA queue format.\n");
@@ -753,6 +750,9 @@ int gve_adminq_describe_device(struct gve_priv *priv)
"Driver is running with GQI RDA queue format.\n");
supported_features_mask =
be32_to_cpu(dev_op_gqi_rda->supported_features_mask);
+ } else if (priv->queue_format == GVE_GQI_RDA_FORMAT) {
+ dev_info(&priv->pdev->dev,
+ "Driver is running with GQI RDA queue format.\n");
} else {
priv->queue_format = GVE_GQI_QPL_FORMAT;
if (dev_op_gqi_qpl)
diff --git a/drivers/net/ethernet/google/gve/gve_utils.c b/drivers/net/ethernet/google/gve/gve_utils.c
index 88ca49cbc1e2..d57508bc4307 100644
--- a/drivers/net/ethernet/google/gve/gve_utils.c
+++ b/drivers/net/ethernet/google/gve/gve_utils.c
@@ -68,6 +68,9 @@ struct sk_buff *gve_rx_copy(struct net_device *dev, struct napi_struct *napi,
set_protocol = ctx->curr_frag_cnt == ctx->expected_frag_cnt - 1;
} else {
skb = napi_alloc_skb(napi, len);
+
+ if (unlikely(!skb))
+ return NULL;
set_protocol = true;
}
__skb_put(skb, len);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.h b/drivers/net/ethernet/hisilicon/hns3/hnae3.h
index 3f7a9a4c59d5..63f5abcc6bf4 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hnae3.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.h
@@ -839,6 +839,8 @@ struct hnae3_handle {
u8 netdev_flags;
struct dentry *hnae3_dbgfs;
+ /* protects concurrent contention between debugfs commands */
+ struct mutex dbgfs_lock;
/* Network interface message level enabled bits */
u32 msg_enable;
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c
index 67364ab63a1f..c381f8af67f0 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c
@@ -1081,7 +1081,8 @@ static void hns3_dump_page_pool_info(struct hns3_enet_ring *ring,
u32 j = 0;
sprintf(result[j++], "%u", index);
- sprintf(result[j++], "%u", ring->page_pool->pages_state_hold_cnt);
+ sprintf(result[j++], "%u",
+ READ_ONCE(ring->page_pool->pages_state_hold_cnt));
sprintf(result[j++], "%u",
atomic_read(&ring->page_pool->pages_state_release_cnt));
sprintf(result[j++], "%u", ring->page_pool->p.pool_size);
@@ -1106,6 +1107,11 @@ hns3_dbg_page_pool_info(struct hnae3_handle *h, char *buf, int len)
return -EFAULT;
}
+ if (!priv->ring[h->kinfo.num_tqps].page_pool) {
+ dev_err(&h->pdev->dev, "page pool is not initialized\n");
+ return -EFAULT;
+ }
+
for (i = 0; i < ARRAY_SIZE(page_pool_info_items); i++)
result[i] = &data_str[i][0];
@@ -1220,6 +1226,7 @@ static ssize_t hns3_dbg_read(struct file *filp, char __user *buffer,
if (ret)
return ret;
+ mutex_lock(&handle->dbgfs_lock);
save_buf = &hns3_dbg_cmd[index].buf;
if (!test_bit(HNS3_NIC_STATE_INITED, &priv->state) ||
@@ -1232,15 +1239,15 @@ static ssize_t hns3_dbg_read(struct file *filp, char __user *buffer,
read_buf = *save_buf;
} else {
read_buf = kvzalloc(hns3_dbg_cmd[index].buf_len, GFP_KERNEL);
- if (!read_buf)
- return -ENOMEM;
+ if (!read_buf) {
+ ret = -ENOMEM;
+ goto out;
+ }
/* save the buffer addr until the last read operation */
*save_buf = read_buf;
- }
- /* get data ready for the first time to read */
- if (!*ppos) {
+ /* get data ready for the first time to read */
ret = hns3_dbg_read_cmd(dbg_data, hns3_dbg_cmd[index].cmd,
read_buf, hns3_dbg_cmd[index].buf_len);
if (ret)
@@ -1249,8 +1256,10 @@ static ssize_t hns3_dbg_read(struct file *filp, char __user *buffer,
size = simple_read_from_buffer(buffer, count, ppos, read_buf,
strlen(read_buf));
- if (size > 0)
+ if (size > 0) {
+ mutex_unlock(&handle->dbgfs_lock);
return size;
+ }
out:
/* free the buffer for the last read operation */
@@ -1259,6 +1268,7 @@ out:
*save_buf = NULL;
}
+ mutex_unlock(&handle->dbgfs_lock);
return ret;
}
@@ -1331,6 +1341,8 @@ int hns3_dbg_init(struct hnae3_handle *handle)
debugfs_create_dir(hns3_dbg_dentry[i].name,
handle->hnae3_dbgfs);
+ mutex_init(&handle->dbgfs_lock);
+
for (i = 0; i < ARRAY_SIZE(hns3_dbg_cmd); i++) {
if ((hns3_dbg_cmd[i].cmd == HNAE3_DBG_CMD_TM_NODES &&
ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2) ||
@@ -1357,6 +1369,7 @@ int hns3_dbg_init(struct hnae3_handle *handle)
return 0;
out:
+ mutex_destroy(&handle->dbgfs_lock);
debugfs_remove_recursive(handle->hnae3_dbgfs);
handle->hnae3_dbgfs = NULL;
return ret;
@@ -1372,6 +1385,7 @@ void hns3_dbg_uninit(struct hnae3_handle *handle)
hns3_dbg_cmd[i].buf = NULL;
}
+ mutex_destroy(&handle->dbgfs_lock);
debugfs_remove_recursive(handle->hnae3_dbgfs);
handle->hnae3_dbgfs = NULL;
}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
index c8442b86df94..c9b4568d7a8d 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
@@ -987,6 +987,7 @@ static int hns3_set_reset(struct net_device *netdev, u32 *flags)
struct hnae3_ae_dev *ae_dev = pci_get_drvdata(h->pdev);
const struct hnae3_ae_ops *ops = h->ae_algo->ops;
const struct hns3_reset_type_map *rst_type_map;
+ enum ethtool_reset_flags rst_flags;
u32 i, size;
if (ops->ae_dev_resetting && ops->ae_dev_resetting(h))
@@ -1006,6 +1007,7 @@ static int hns3_set_reset(struct net_device *netdev, u32 *flags)
for (i = 0; i < size; i++) {
if (rst_type_map[i].rst_flags == *flags) {
rst_type = rst_type_map[i].rst_type;
+ rst_flags = rst_type_map[i].rst_flags;
break;
}
}
@@ -1021,6 +1023,8 @@ static int hns3_set_reset(struct net_device *netdev, u32 *flags)
ops->reset_event(h->pdev, h);
+ *flags &= ~rst_flags;
+
return 0;
}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
index 25c419d40066..41afaeea881b 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
@@ -703,9 +703,9 @@ static int hclgevf_set_rss_tc_mode(struct hclgevf_dev *hdev, u16 rss_size)
roundup_size = ilog2(roundup_size);
for (i = 0; i < HCLGEVF_MAX_TC_NUM; i++) {
- tc_valid[i] = !!(hdev->hw_tc_map & BIT(i));
+ tc_valid[i] = 1;
tc_size[i] = roundup_size;
- tc_offset[i] = rss_size * i;
+ tc_offset[i] = (hdev->hw_tc_map & BIT(i)) ? rss_size * i : 0;
}
hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_RSS_TC_MODE, false);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c
index fdc66fae0960..c5ac6ecf36e1 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c
@@ -114,7 +114,8 @@ int hclgevf_send_mbx_msg(struct hclgevf_dev *hdev,
memcpy(&req->msg, send_msg, sizeof(struct hclge_vf_to_pf_msg));
- trace_hclge_vf_mbx_send(hdev, req);
+ if (test_bit(HCLGEVF_STATE_NIC_REGISTERED, &hdev->state))
+ trace_hclge_vf_mbx_send(hdev, req);
/* synchronous send */
if (need_resp) {
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_sriov.c b/drivers/net/ethernet/huawei/hinic/hinic_sriov.c
index a78c398bf5b2..01e7d3c0b68e 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_sriov.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_sriov.c
@@ -8,6 +8,7 @@
#include <linux/interrupt.h>
#include <linux/etherdevice.h>
#include <linux/netdevice.h>
+#include <linux/module.h>
#include "hinic_hw_dev.h"
#include "hinic_dev.h"
diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
index 3cca51735421..0bb3911dd014 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.c
+++ b/drivers/net/ethernet/ibm/ibmvnic.c
@@ -628,17 +628,9 @@ static bool reuse_rx_pools(struct ibmvnic_adapter *adapter)
old_buff_size = adapter->prev_rx_buf_sz;
new_buff_size = adapter->cur_rx_buf_sz;
- /* Require buff size to be exactly same for now */
- if (old_buff_size != new_buff_size)
- return false;
-
- if (old_num_pools == new_num_pools && old_pool_size == new_pool_size)
- return true;
-
- if (old_num_pools < adapter->min_rx_queues ||
- old_num_pools > adapter->max_rx_queues ||
- old_pool_size < adapter->min_rx_add_entries_per_subcrq ||
- old_pool_size > adapter->max_rx_add_entries_per_subcrq)
+ if (old_buff_size != new_buff_size ||
+ old_num_pools != new_num_pools ||
+ old_pool_size != new_pool_size)
return false;
return true;
@@ -874,17 +866,9 @@ static bool reuse_tx_pools(struct ibmvnic_adapter *adapter)
old_mtu = adapter->prev_mtu;
new_mtu = adapter->req_mtu;
- /* Require MTU to be exactly same to reuse pools for now */
- if (old_mtu != new_mtu)
- return false;
-
- if (old_num_pools == new_num_pools && old_pool_size == new_pool_size)
- return true;
-
- if (old_num_pools < adapter->min_tx_queues ||
- old_num_pools > adapter->max_tx_queues ||
- old_pool_size < adapter->min_tx_entries_per_subcrq ||
- old_pool_size > adapter->max_tx_entries_per_subcrq)
+ if (old_mtu != new_mtu ||
+ old_num_pools != new_num_pools ||
+ old_pool_size != new_pool_size)
return false;
return true;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
index 291e61ac3e44..2c1b1da1220e 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
@@ -553,6 +553,14 @@ static void i40e_dbg_dump_desc(int cnt, int vsi_seid, int ring_id, int desc_n,
dev_info(&pf->pdev->dev, "vsi %d not found\n", vsi_seid);
return;
}
+ if (vsi->type != I40E_VSI_MAIN &&
+ vsi->type != I40E_VSI_FDIR &&
+ vsi->type != I40E_VSI_VMDQ2) {
+ dev_info(&pf->pdev->dev,
+ "vsi %d type %d descriptor rings not available\n",
+ vsi_seid, vsi->type);
+ return;
+ }
if (type == RING_TYPE_XDP && !i40e_enabled_xdp_vsi(vsi)) {
dev_info(&pf->pdev->dev, "XDP not enabled on VSI %d\n", vsi_seid);
return;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
index 80ae264c99ba..2ea4deb8fc44 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
@@ -1949,6 +1949,32 @@ static int i40e_vc_send_resp_to_vf(struct i40e_vf *vf,
}
/**
+ * i40e_sync_vf_state
+ * @vf: pointer to the VF info
+ * @state: VF state
+ *
+ * Called from a VF message to synchronize the service with a potential
+ * VF reset state
+ **/
+static bool i40e_sync_vf_state(struct i40e_vf *vf, enum i40e_vf_states state)
+{
+ int i;
+
+ /* When handling some messages, it needs VF state to be set.
+ * It is possible that this flag is cleared during VF reset,
+ * so there is a need to wait until the end of the reset to
+ * handle the request message correctly.
+ */
+ for (i = 0; i < I40E_VF_STATE_WAIT_COUNT; i++) {
+ if (test_bit(state, &vf->vf_states))
+ return true;
+ usleep_range(10000, 20000);
+ }
+
+ return test_bit(state, &vf->vf_states);
+}
+
+/**
* i40e_vc_get_version_msg
* @vf: pointer to the VF info
* @msg: pointer to the msg buffer
@@ -2008,7 +2034,7 @@ static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf, u8 *msg)
size_t len = 0;
int ret;
- if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) {
+ if (!i40e_sync_vf_state(vf, I40E_VF_STATE_INIT)) {
aq_ret = I40E_ERR_PARAM;
goto err;
}
@@ -2131,7 +2157,7 @@ static int i40e_vc_config_promiscuous_mode_msg(struct i40e_vf *vf, u8 *msg)
bool allmulti = false;
bool alluni = false;
- if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
+ if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
aq_ret = I40E_ERR_PARAM;
goto err_out;
}
@@ -2219,7 +2245,7 @@ static int i40e_vc_config_queues_msg(struct i40e_vf *vf, u8 *msg)
struct i40e_vsi *vsi;
u16 num_qps_all = 0;
- if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
+ if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
aq_ret = I40E_ERR_PARAM;
goto error_param;
}
@@ -2368,7 +2394,7 @@ static int i40e_vc_config_irq_map_msg(struct i40e_vf *vf, u8 *msg)
i40e_status aq_ret = 0;
int i;
- if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
+ if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
aq_ret = I40E_ERR_PARAM;
goto error_param;
}
@@ -2540,7 +2566,7 @@ static int i40e_vc_disable_queues_msg(struct i40e_vf *vf, u8 *msg)
struct i40e_pf *pf = vf->pf;
i40e_status aq_ret = 0;
- if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
+ if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
aq_ret = I40E_ERR_PARAM;
goto error_param;
}
@@ -2590,7 +2616,7 @@ static int i40e_vc_request_queues_msg(struct i40e_vf *vf, u8 *msg)
u8 cur_pairs = vf->num_queue_pairs;
struct i40e_pf *pf = vf->pf;
- if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states))
+ if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE))
return -EINVAL;
if (req_pairs > I40E_MAX_VF_QUEUES) {
@@ -2635,7 +2661,7 @@ static int i40e_vc_get_stats_msg(struct i40e_vf *vf, u8 *msg)
memset(&stats, 0, sizeof(struct i40e_eth_stats));
- if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
+ if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
aq_ret = I40E_ERR_PARAM;
goto error_param;
}
@@ -2752,7 +2778,7 @@ static int i40e_vc_add_mac_addr_msg(struct i40e_vf *vf, u8 *msg)
i40e_status ret = 0;
int i;
- if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) ||
+ if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE) ||
!i40e_vc_isvalid_vsi_id(vf, al->vsi_id)) {
ret = I40E_ERR_PARAM;
goto error_param;
@@ -2824,7 +2850,7 @@ static int i40e_vc_del_mac_addr_msg(struct i40e_vf *vf, u8 *msg)
i40e_status ret = 0;
int i;
- if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) ||
+ if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE) ||
!i40e_vc_isvalid_vsi_id(vf, al->vsi_id)) {
ret = I40E_ERR_PARAM;
goto error_param;
@@ -2968,7 +2994,7 @@ static int i40e_vc_remove_vlan_msg(struct i40e_vf *vf, u8 *msg)
i40e_status aq_ret = 0;
int i;
- if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) ||
+ if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE) ||
!i40e_vc_isvalid_vsi_id(vf, vfl->vsi_id)) {
aq_ret = I40E_ERR_PARAM;
goto error_param;
@@ -3088,9 +3114,9 @@ static int i40e_vc_config_rss_key(struct i40e_vf *vf, u8 *msg)
struct i40e_vsi *vsi = NULL;
i40e_status aq_ret = 0;
- if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) ||
+ if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE) ||
!i40e_vc_isvalid_vsi_id(vf, vrk->vsi_id) ||
- (vrk->key_len != I40E_HKEY_ARRAY_SIZE)) {
+ vrk->key_len != I40E_HKEY_ARRAY_SIZE) {
aq_ret = I40E_ERR_PARAM;
goto err;
}
@@ -3119,9 +3145,9 @@ static int i40e_vc_config_rss_lut(struct i40e_vf *vf, u8 *msg)
i40e_status aq_ret = 0;
u16 i;
- if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) ||
+ if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE) ||
!i40e_vc_isvalid_vsi_id(vf, vrl->vsi_id) ||
- (vrl->lut_entries != I40E_VF_HLUT_ARRAY_SIZE)) {
+ vrl->lut_entries != I40E_VF_HLUT_ARRAY_SIZE) {
aq_ret = I40E_ERR_PARAM;
goto err;
}
@@ -3154,7 +3180,7 @@ static int i40e_vc_get_rss_hena(struct i40e_vf *vf, u8 *msg)
i40e_status aq_ret = 0;
int len = 0;
- if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
+ if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
aq_ret = I40E_ERR_PARAM;
goto err;
}
@@ -3190,7 +3216,7 @@ static int i40e_vc_set_rss_hena(struct i40e_vf *vf, u8 *msg)
struct i40e_hw *hw = &pf->hw;
i40e_status aq_ret = 0;
- if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
+ if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
aq_ret = I40E_ERR_PARAM;
goto err;
}
@@ -3215,7 +3241,7 @@ static int i40e_vc_enable_vlan_stripping(struct i40e_vf *vf, u8 *msg)
i40e_status aq_ret = 0;
struct i40e_vsi *vsi;
- if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
+ if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
aq_ret = I40E_ERR_PARAM;
goto err;
}
@@ -3241,7 +3267,7 @@ static int i40e_vc_disable_vlan_stripping(struct i40e_vf *vf, u8 *msg)
i40e_status aq_ret = 0;
struct i40e_vsi *vsi;
- if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
+ if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
aq_ret = I40E_ERR_PARAM;
goto err;
}
@@ -3468,7 +3494,7 @@ static int i40e_vc_del_cloud_filter(struct i40e_vf *vf, u8 *msg)
i40e_status aq_ret = 0;
int i, ret;
- if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
+ if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
aq_ret = I40E_ERR_PARAM;
goto err;
}
@@ -3599,7 +3625,7 @@ static int i40e_vc_add_cloud_filter(struct i40e_vf *vf, u8 *msg)
i40e_status aq_ret = 0;
int i, ret;
- if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
+ if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
aq_ret = I40E_ERR_PARAM;
goto err_out;
}
@@ -3708,7 +3734,7 @@ static int i40e_vc_add_qch_msg(struct i40e_vf *vf, u8 *msg)
i40e_status aq_ret = 0;
u64 speed = 0;
- if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
+ if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
aq_ret = I40E_ERR_PARAM;
goto err;
}
@@ -3797,11 +3823,6 @@ static int i40e_vc_add_qch_msg(struct i40e_vf *vf, u8 *msg)
/* set this flag only after making sure all inputs are sane */
vf->adq_enabled = true;
- /* num_req_queues is set when user changes number of queues via ethtool
- * and this causes issue for default VSI(which depends on this variable)
- * when ADq is enabled, hence reset it.
- */
- vf->num_req_queues = 0;
/* reset the VF in order to allocate resources */
i40e_vc_reset_vf(vf, true);
@@ -3824,7 +3845,7 @@ static int i40e_vc_del_qch_msg(struct i40e_vf *vf, u8 *msg)
struct i40e_pf *pf = vf->pf;
i40e_status aq_ret = 0;
- if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
+ if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
aq_ret = I40E_ERR_PARAM;
goto err;
}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
index 091e32c1bb46..49575a640a84 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
@@ -18,6 +18,8 @@
#define I40E_MAX_VF_PROMISC_FLAGS 3
+#define I40E_VF_STATE_WAIT_COUNT 20
+
/* Various queue ctrls */
enum i40e_queue_ctrl {
I40E_QUEUE_CTRL_UNKNOWN = 0,
diff --git a/drivers/net/ethernet/intel/iavf/iavf.h b/drivers/net/ethernet/intel/iavf/iavf.h
index 75635bd57cf6..3789269ce741 100644
--- a/drivers/net/ethernet/intel/iavf/iavf.h
+++ b/drivers/net/ethernet/intel/iavf/iavf.h
@@ -305,6 +305,7 @@ struct iavf_adapter {
#define IAVF_FLAG_AQ_DEL_FDIR_FILTER BIT(26)
#define IAVF_FLAG_AQ_ADD_ADV_RSS_CFG BIT(27)
#define IAVF_FLAG_AQ_DEL_ADV_RSS_CFG BIT(28)
+#define IAVF_FLAG_AQ_REQUEST_STATS BIT(29)
/* OS defined structs */
struct net_device *netdev;
@@ -444,6 +445,7 @@ int iavf_up(struct iavf_adapter *adapter);
void iavf_down(struct iavf_adapter *adapter);
int iavf_process_config(struct iavf_adapter *adapter);
void iavf_schedule_reset(struct iavf_adapter *adapter);
+void iavf_schedule_request_stats(struct iavf_adapter *adapter);
void iavf_reset(struct iavf_adapter *adapter);
void iavf_set_ethtool_ops(struct net_device *netdev);
void iavf_update_stats(struct iavf_adapter *adapter);
@@ -501,4 +503,5 @@ void iavf_add_adv_rss_cfg(struct iavf_adapter *adapter);
void iavf_del_adv_rss_cfg(struct iavf_adapter *adapter);
struct iavf_mac_filter *iavf_add_filter(struct iavf_adapter *adapter,
const u8 *macaddr);
+int iavf_lock_timeout(struct mutex *lock, unsigned int msecs);
#endif /* _IAVF_H_ */
diff --git a/drivers/net/ethernet/intel/iavf/iavf_ethtool.c b/drivers/net/ethernet/intel/iavf/iavf_ethtool.c
index 144a77679359..461f5237a2f8 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_ethtool.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_ethtool.c
@@ -354,6 +354,9 @@ static void iavf_get_ethtool_stats(struct net_device *netdev,
struct iavf_adapter *adapter = netdev_priv(netdev);
unsigned int i;
+ /* Explicitly request stats refresh */
+ iavf_schedule_request_stats(adapter);
+
iavf_add_ethtool_stats(&data, adapter, iavf_gstrings_stats);
rcu_read_lock();
@@ -612,23 +615,44 @@ static int iavf_set_ringparam(struct net_device *netdev,
if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
return -EINVAL;
- new_tx_count = clamp_t(u32, ring->tx_pending,
- IAVF_MIN_TXD,
- IAVF_MAX_TXD);
- new_tx_count = ALIGN(new_tx_count, IAVF_REQ_DESCRIPTOR_MULTIPLE);
+ if (ring->tx_pending > IAVF_MAX_TXD ||
+ ring->tx_pending < IAVF_MIN_TXD ||
+ ring->rx_pending > IAVF_MAX_RXD ||
+ ring->rx_pending < IAVF_MIN_RXD) {
+ netdev_err(netdev, "Descriptors requested (Tx: %d / Rx: %d) out of range [%d-%d] (increment %d)\n",
+ ring->tx_pending, ring->rx_pending, IAVF_MIN_TXD,
+ IAVF_MAX_RXD, IAVF_REQ_DESCRIPTOR_MULTIPLE);
+ return -EINVAL;
+ }
- new_rx_count = clamp_t(u32, ring->rx_pending,
- IAVF_MIN_RXD,
- IAVF_MAX_RXD);
- new_rx_count = ALIGN(new_rx_count, IAVF_REQ_DESCRIPTOR_MULTIPLE);
+ new_tx_count = ALIGN(ring->tx_pending, IAVF_REQ_DESCRIPTOR_MULTIPLE);
+ if (new_tx_count != ring->tx_pending)
+ netdev_info(netdev, "Requested Tx descriptor count rounded up to %d\n",
+ new_tx_count);
+
+ new_rx_count = ALIGN(ring->rx_pending, IAVF_REQ_DESCRIPTOR_MULTIPLE);
+ if (new_rx_count != ring->rx_pending)
+ netdev_info(netdev, "Requested Rx descriptor count rounded up to %d\n",
+ new_rx_count);
/* if nothing to do return success */
if ((new_tx_count == adapter->tx_desc_count) &&
- (new_rx_count == adapter->rx_desc_count))
+ (new_rx_count == adapter->rx_desc_count)) {
+ netdev_dbg(netdev, "Nothing to change, descriptor count is same as requested\n");
return 0;
+ }
- adapter->tx_desc_count = new_tx_count;
- adapter->rx_desc_count = new_rx_count;
+ if (new_tx_count != adapter->tx_desc_count) {
+ netdev_dbg(netdev, "Changing Tx descriptor count from %d to %d\n",
+ adapter->tx_desc_count, new_tx_count);
+ adapter->tx_desc_count = new_tx_count;
+ }
+
+ if (new_rx_count != adapter->rx_desc_count) {
+ netdev_dbg(netdev, "Changing Rx descriptor count from %d to %d\n",
+ adapter->rx_desc_count, new_rx_count);
+ adapter->rx_desc_count = new_rx_count;
+ }
if (netif_running(netdev)) {
adapter->flags |= IAVF_FLAG_RESET_NEEDED;
@@ -723,12 +747,31 @@ static int iavf_get_per_queue_coalesce(struct net_device *netdev, u32 queue,
*
* Change the ITR settings for a specific queue.
**/
-static void iavf_set_itr_per_queue(struct iavf_adapter *adapter,
- struct ethtool_coalesce *ec, int queue)
+static int iavf_set_itr_per_queue(struct iavf_adapter *adapter,
+ struct ethtool_coalesce *ec, int queue)
{
struct iavf_ring *rx_ring = &adapter->rx_rings[queue];
struct iavf_ring *tx_ring = &adapter->tx_rings[queue];
struct iavf_q_vector *q_vector;
+ u16 itr_setting;
+
+ itr_setting = rx_ring->itr_setting & ~IAVF_ITR_DYNAMIC;
+
+ if (ec->rx_coalesce_usecs != itr_setting &&
+ ec->use_adaptive_rx_coalesce) {
+ netif_info(adapter, drv, adapter->netdev,
+ "Rx interrupt throttling cannot be changed if adaptive-rx is enabled\n");
+ return -EINVAL;
+ }
+
+ itr_setting = tx_ring->itr_setting & ~IAVF_ITR_DYNAMIC;
+
+ if (ec->tx_coalesce_usecs != itr_setting &&
+ ec->use_adaptive_tx_coalesce) {
+ netif_info(adapter, drv, adapter->netdev,
+ "Tx interrupt throttling cannot be changed if adaptive-tx is enabled\n");
+ return -EINVAL;
+ }
rx_ring->itr_setting = ITR_REG_ALIGN(ec->rx_coalesce_usecs);
tx_ring->itr_setting = ITR_REG_ALIGN(ec->tx_coalesce_usecs);
@@ -751,6 +794,7 @@ static void iavf_set_itr_per_queue(struct iavf_adapter *adapter,
* the Tx and Rx ITR values based on the values we have entered
* into the q_vector, no need to write the values now.
*/
+ return 0;
}
/**
@@ -792,9 +836,11 @@ static int __iavf_set_coalesce(struct net_device *netdev,
*/
if (queue < 0) {
for (i = 0; i < adapter->num_active_queues; i++)
- iavf_set_itr_per_queue(adapter, ec, i);
+ if (iavf_set_itr_per_queue(adapter, ec, i))
+ return -EINVAL;
} else if (queue < adapter->num_active_queues) {
- iavf_set_itr_per_queue(adapter, ec, queue);
+ if (iavf_set_itr_per_queue(adapter, ec, queue))
+ return -EINVAL;
} else {
netif_info(adapter, drv, netdev, "Invalid queue value, queue range is 0 - %d\n",
adapter->num_active_queues - 1);
diff --git a/drivers/net/ethernet/intel/iavf/iavf_main.c b/drivers/net/ethernet/intel/iavf/iavf_main.c
index 336e6bf95e48..4e7c04047f91 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_main.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_main.c
@@ -147,7 +147,7 @@ enum iavf_status iavf_free_virt_mem_d(struct iavf_hw *hw,
*
* Returns 0 on success, negative on failure
**/
-static int iavf_lock_timeout(struct mutex *lock, unsigned int msecs)
+int iavf_lock_timeout(struct mutex *lock, unsigned int msecs)
{
unsigned int wait, delay = 10;
@@ -175,6 +175,19 @@ void iavf_schedule_reset(struct iavf_adapter *adapter)
}
/**
+ * iavf_schedule_request_stats - Set the flags and schedule statistics request
+ * @adapter: board private structure
+ *
+ * Sets IAVF_FLAG_AQ_REQUEST_STATS flag so iavf_watchdog_task() will explicitly
+ * request and refresh ethtool stats
+ **/
+void iavf_schedule_request_stats(struct iavf_adapter *adapter)
+{
+ adapter->aq_required |= IAVF_FLAG_AQ_REQUEST_STATS;
+ mod_delayed_work(iavf_wq, &adapter->watchdog_task, 0);
+}
+
+/**
* iavf_tx_timeout - Respond to a Tx Hang
* @netdev: network interface device structure
* @txqueue: queue number that is timing out
@@ -704,13 +717,11 @@ static void iavf_del_vlan(struct iavf_adapter *adapter, u16 vlan)
**/
static void iavf_restore_filters(struct iavf_adapter *adapter)
{
- /* re-add all VLAN filters */
- if (VLAN_ALLOWED(adapter)) {
- u16 vid;
+ u16 vid;
- for_each_set_bit(vid, adapter->vsi.active_vlans, VLAN_N_VID)
- iavf_add_vlan(adapter, vid);
- }
+ /* re-add all VLAN filters */
+ for_each_set_bit(vid, adapter->vsi.active_vlans, VLAN_N_VID)
+ iavf_add_vlan(adapter, vid);
}
/**
@@ -745,9 +756,6 @@ static int iavf_vlan_rx_kill_vid(struct net_device *netdev,
{
struct iavf_adapter *adapter = netdev_priv(netdev);
- if (!VLAN_ALLOWED(adapter))
- return -EIO;
-
iavf_del_vlan(adapter, vid);
clear_bit(vid, adapter->vsi.active_vlans);
@@ -1709,6 +1717,11 @@ static int iavf_process_aq_command(struct iavf_adapter *adapter)
iavf_del_adv_rss_cfg(adapter);
return 0;
}
+ if (adapter->aq_required & IAVF_FLAG_AQ_REQUEST_STATS) {
+ iavf_request_stats(adapter);
+ return 0;
+ }
+
return -EAGAIN;
}
@@ -2033,6 +2046,7 @@ static void iavf_watchdog_task(struct work_struct *work)
}
adapter->aq_required = 0;
adapter->current_op = VIRTCHNL_OP_UNKNOWN;
+ mutex_unlock(&adapter->crit_lock);
queue_delayed_work(iavf_wq,
&adapter->watchdog_task,
msecs_to_jiffies(10));
@@ -2063,16 +2077,14 @@ static void iavf_watchdog_task(struct work_struct *work)
iavf_detect_recover_hung(&adapter->vsi);
break;
case __IAVF_REMOVE:
- mutex_unlock(&adapter->crit_lock);
- return;
default:
+ mutex_unlock(&adapter->crit_lock);
return;
}
/* check for hw reset */
reg_val = rd32(hw, IAVF_VF_ARQLEN1) & IAVF_VF_ARQLEN1_ARQENABLE_MASK;
if (!reg_val) {
- iavf_change_state(adapter, __IAVF_RESETTING);
adapter->flags |= IAVF_FLAG_RESET_PENDING;
adapter->aq_required = 0;
adapter->current_op = VIRTCHNL_OP_UNKNOWN;
@@ -2173,7 +2185,6 @@ static void iavf_reset_task(struct work_struct *work)
struct net_device *netdev = adapter->netdev;
struct iavf_hw *hw = &adapter->hw;
struct iavf_mac_filter *f, *ftmp;
- struct iavf_vlan_filter *vlf;
struct iavf_cloud_filter *cf;
u32 reg_val;
int i = 0, err;
@@ -2236,6 +2247,7 @@ static void iavf_reset_task(struct work_struct *work)
}
pci_set_master(adapter->pdev);
+ pci_restore_msi_state(adapter->pdev);
if (i == IAVF_RESET_WAIT_COMPLETE_COUNT) {
dev_err(&adapter->pdev->dev, "Reset never finished (%x)\n",
@@ -2254,6 +2266,7 @@ continue_reset:
(adapter->state == __IAVF_RESETTING));
if (running) {
+ netdev->flags &= ~IFF_UP;
netif_carrier_off(netdev);
netif_tx_stop_all_queues(netdev);
adapter->link_up = false;
@@ -2313,11 +2326,6 @@ continue_reset:
list_for_each_entry(f, &adapter->mac_filter_list, list) {
f->add = true;
}
- /* re-add all VLAN filters */
- list_for_each_entry(vlf, &adapter->vlan_filter_list, list) {
- vlf->add = true;
- }
-
spin_unlock_bh(&adapter->mac_vlan_list_lock);
/* check if TCs are running and re-add all cloud filters */
@@ -2331,7 +2339,6 @@ continue_reset:
spin_unlock_bh(&adapter->cloud_filter_list_lock);
adapter->aq_required |= IAVF_FLAG_AQ_ADD_MAC_FILTER;
- adapter->aq_required |= IAVF_FLAG_AQ_ADD_VLAN_FILTER;
adapter->aq_required |= IAVF_FLAG_AQ_ADD_CLOUD_FILTER;
iavf_misc_irq_enable(adapter);
@@ -2365,7 +2372,7 @@ continue_reset:
* to __IAVF_RUNNING
*/
iavf_up_complete(adapter);
-
+ netdev->flags |= IFF_UP;
iavf_irq_enable(adapter, true);
} else {
iavf_change_state(adapter, __IAVF_DOWN);
@@ -2378,8 +2385,10 @@ continue_reset:
reset_err:
mutex_unlock(&adapter->client_lock);
mutex_unlock(&adapter->crit_lock);
- if (running)
+ if (running) {
iavf_change_state(adapter, __IAVF_RUNNING);
+ netdev->flags |= IFF_UP;
+ }
dev_err(&adapter->pdev->dev, "failed to allocate resources during reinit\n");
iavf_close(netdev);
}
@@ -3441,11 +3450,16 @@ static int iavf_set_features(struct net_device *netdev,
{
struct iavf_adapter *adapter = netdev_priv(netdev);
- /* Don't allow changing VLAN_RX flag when adapter is not capable
- * of VLAN offload
+ /* Don't allow enabling VLAN features when adapter is not capable
+ * of VLAN offload/filtering
*/
if (!VLAN_ALLOWED(adapter)) {
- if ((netdev->features ^ features) & NETIF_F_HW_VLAN_CTAG_RX)
+ netdev->hw_features &= ~(NETIF_F_HW_VLAN_CTAG_RX |
+ NETIF_F_HW_VLAN_CTAG_TX |
+ NETIF_F_HW_VLAN_CTAG_FILTER);
+ if (features & (NETIF_F_HW_VLAN_CTAG_RX |
+ NETIF_F_HW_VLAN_CTAG_TX |
+ NETIF_F_HW_VLAN_CTAG_FILTER))
return -EINVAL;
} else if ((netdev->features ^ features) & NETIF_F_HW_VLAN_CTAG_RX) {
if (features & NETIF_F_HW_VLAN_CTAG_RX)
diff --git a/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c b/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c
index 8c3f0f77cb57..d60bf7c21200 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c
@@ -607,7 +607,7 @@ void iavf_add_vlans(struct iavf_adapter *adapter)
if (f->add)
count++;
}
- if (!count) {
+ if (!count || !VLAN_ALLOWED(adapter)) {
adapter->aq_required &= ~IAVF_FLAG_AQ_ADD_VLAN_FILTER;
spin_unlock_bh(&adapter->mac_vlan_list_lock);
return;
@@ -673,9 +673,19 @@ void iavf_del_vlans(struct iavf_adapter *adapter)
spin_lock_bh(&adapter->mac_vlan_list_lock);
- list_for_each_entry(f, &adapter->vlan_filter_list, list) {
- if (f->remove)
+ list_for_each_entry_safe(f, ftmp, &adapter->vlan_filter_list, list) {
+ /* since VLAN capabilities are not allowed, we dont want to send
+ * a VLAN delete request because it will most likely fail and
+ * create unnecessary errors/noise, so just free the VLAN
+ * filters marked for removal to enable bailing out before
+ * sending a virtchnl message
+ */
+ if (f->remove && !VLAN_ALLOWED(adapter)) {
+ list_del(&f->list);
+ kfree(f);
+ } else if (f->remove) {
count++;
+ }
}
if (!count) {
adapter->aq_required &= ~IAVF_FLAG_AQ_DEL_VLAN_FILTER;
@@ -784,6 +794,8 @@ void iavf_request_stats(struct iavf_adapter *adapter)
/* no error message, this isn't crucial */
return;
}
+
+ adapter->aq_required &= ~IAVF_FLAG_AQ_REQUEST_STATS;
adapter->current_op = VIRTCHNL_OP_GET_STATS;
vqs.vsi_id = adapter->vsi_res->vsi_id;
/* queue maps are ignored for this message - only the vsi is used */
@@ -1722,8 +1734,37 @@ void iavf_virtchnl_completion(struct iavf_adapter *adapter,
}
spin_lock_bh(&adapter->mac_vlan_list_lock);
iavf_add_filter(adapter, adapter->hw.mac.addr);
+
+ if (VLAN_ALLOWED(adapter)) {
+ if (!list_empty(&adapter->vlan_filter_list)) {
+ struct iavf_vlan_filter *vlf;
+
+ /* re-add all VLAN filters over virtchnl */
+ list_for_each_entry(vlf,
+ &adapter->vlan_filter_list,
+ list)
+ vlf->add = true;
+
+ adapter->aq_required |=
+ IAVF_FLAG_AQ_ADD_VLAN_FILTER;
+ }
+ }
+
spin_unlock_bh(&adapter->mac_vlan_list_lock);
iavf_process_config(adapter);
+
+ /* unlock crit_lock before acquiring rtnl_lock as other
+ * processes holding rtnl_lock could be waiting for the same
+ * crit_lock
+ */
+ mutex_unlock(&adapter->crit_lock);
+ rtnl_lock();
+ netdev_update_features(adapter->netdev);
+ rtnl_unlock();
+ if (iavf_lock_timeout(&adapter->crit_lock, 10000))
+ dev_warn(&adapter->pdev->dev, "failed to acquire crit_lock in %s\n",
+ __FUNCTION__);
+
}
break;
case VIRTCHNL_OP_ENABLE_QUEUES:
diff --git a/drivers/net/ethernet/intel/ice/ice_base.c b/drivers/net/ethernet/intel/ice/ice_base.c
index 1efc635cc0f5..fafe020e46ee 100644
--- a/drivers/net/ethernet/intel/ice/ice_base.c
+++ b/drivers/net/ethernet/intel/ice/ice_base.c
@@ -6,6 +6,18 @@
#include "ice_lib.h"
#include "ice_dcb_lib.h"
+static bool ice_alloc_rx_buf_zc(struct ice_rx_ring *rx_ring)
+{
+ rx_ring->xdp_buf = kcalloc(rx_ring->count, sizeof(*rx_ring->xdp_buf), GFP_KERNEL);
+ return !!rx_ring->xdp_buf;
+}
+
+static bool ice_alloc_rx_buf(struct ice_rx_ring *rx_ring)
+{
+ rx_ring->rx_buf = kcalloc(rx_ring->count, sizeof(*rx_ring->rx_buf), GFP_KERNEL);
+ return !!rx_ring->rx_buf;
+}
+
/**
* __ice_vsi_get_qs_contig - Assign a contiguous chunk of queues to VSI
* @qs_cfg: gathered variables needed for PF->VSI queues assignment
@@ -492,8 +504,11 @@ int ice_vsi_cfg_rxq(struct ice_rx_ring *ring)
xdp_rxq_info_reg(&ring->xdp_rxq, ring->netdev,
ring->q_index, ring->q_vector->napi.napi_id);
+ kfree(ring->rx_buf);
ring->xsk_pool = ice_xsk_pool(ring);
if (ring->xsk_pool) {
+ if (!ice_alloc_rx_buf_zc(ring))
+ return -ENOMEM;
xdp_rxq_info_unreg_mem_model(&ring->xdp_rxq);
ring->rx_buf_len =
@@ -508,6 +523,8 @@ int ice_vsi_cfg_rxq(struct ice_rx_ring *ring)
dev_info(dev, "Registered XDP mem model MEM_TYPE_XSK_BUFF_POOL on Rx ring %d\n",
ring->q_index);
} else {
+ if (!ice_alloc_rx_buf(ring))
+ return -ENOMEM;
if (!xdp_rxq_info_is_reg(&ring->xdp_rxq))
/* coverity[check_return] */
xdp_rxq_info_reg(&ring->xdp_rxq,
diff --git a/drivers/net/ethernet/intel/ice/ice_dcb_nl.c b/drivers/net/ethernet/intel/ice/ice_dcb_nl.c
index 7fdeb411b6df..3eb01731e496 100644
--- a/drivers/net/ethernet/intel/ice/ice_dcb_nl.c
+++ b/drivers/net/ethernet/intel/ice/ice_dcb_nl.c
@@ -97,6 +97,9 @@ static int ice_dcbnl_setets(struct net_device *netdev, struct ieee_ets *ets)
new_cfg->etscfg.maxtcs = pf->hw.func_caps.common_cap.maxtc;
+ if (!bwcfg)
+ new_cfg->etscfg.tcbwtable[0] = 100;
+
if (!bwrec)
new_cfg->etsrec.tcbwtable[0] = 100;
@@ -167,15 +170,18 @@ static u8 ice_dcbnl_setdcbx(struct net_device *netdev, u8 mode)
if (mode == pf->dcbx_cap)
return ICE_DCB_NO_HW_CHG;
- pf->dcbx_cap = mode;
qos_cfg = &pf->hw.port_info->qos_cfg;
- if (mode & DCB_CAP_DCBX_VER_CEE) {
- if (qos_cfg->local_dcbx_cfg.pfc_mode == ICE_QOS_MODE_DSCP)
- return ICE_DCB_NO_HW_CHG;
+
+ /* DSCP configuration is not DCBx negotiated */
+ if (qos_cfg->local_dcbx_cfg.pfc_mode == ICE_QOS_MODE_DSCP)
+ return ICE_DCB_NO_HW_CHG;
+
+ pf->dcbx_cap = mode;
+
+ if (mode & DCB_CAP_DCBX_VER_CEE)
qos_cfg->local_dcbx_cfg.dcbx_mode = ICE_DCBX_MODE_CEE;
- } else {
+ else
qos_cfg->local_dcbx_cfg.dcbx_mode = ICE_DCBX_MODE_IEEE;
- }
dev_info(ice_pf_to_dev(pf), "DCBx mode = 0x%x\n", mode);
return ICE_DCB_HW_CHG_RST;
diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c b/drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c
index 38960bcc384c..b6e7f47c8c78 100644
--- a/drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c
+++ b/drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c
@@ -1268,7 +1268,7 @@ ice_fdir_write_all_fltr(struct ice_pf *pf, struct ice_fdir_fltr *input,
bool is_tun = tun == ICE_FD_HW_SEG_TUN;
int err;
- if (is_tun && !ice_get_open_tunnel_port(&pf->hw, &port_num))
+ if (is_tun && !ice_get_open_tunnel_port(&pf->hw, &port_num, TNL_ALL))
continue;
err = ice_fdir_write_fltr(pf, input, add, is_tun);
if (err)
@@ -1652,7 +1652,7 @@ int ice_add_fdir_ethtool(struct ice_vsi *vsi, struct ethtool_rxnfc *cmd)
}
/* return error if not an update and no available filters */
- fltrs_needed = ice_get_open_tunnel_port(hw, &tunnel_port) ? 2 : 1;
+ fltrs_needed = ice_get_open_tunnel_port(hw, &tunnel_port, TNL_ALL) ? 2 : 1;
if (!ice_fdir_find_fltr_by_idx(hw, fsp->location) &&
ice_fdir_num_avail_fltr(hw, pf->vsi[vsi->idx]) < fltrs_needed) {
dev_err(dev, "Failed to add filter. The maximum number of flow director filters has been reached.\n");
diff --git a/drivers/net/ethernet/intel/ice/ice_fdir.c b/drivers/net/ethernet/intel/ice/ice_fdir.c
index cbd8424631e3..4dca009bdd50 100644
--- a/drivers/net/ethernet/intel/ice/ice_fdir.c
+++ b/drivers/net/ethernet/intel/ice/ice_fdir.c
@@ -924,7 +924,7 @@ ice_fdir_get_gen_prgm_pkt(struct ice_hw *hw, struct ice_fdir_fltr *input,
memcpy(pkt, ice_fdir_pkt[idx].pkt, ice_fdir_pkt[idx].pkt_len);
loc = pkt;
} else {
- if (!ice_get_open_tunnel_port(hw, &tnl_port))
+ if (!ice_get_open_tunnel_port(hw, &tnl_port, TNL_ALL))
return ICE_ERR_DOES_NOT_EXIST;
if (!ice_fdir_pkt[idx].tun_pkt)
return ICE_ERR_PARAM;
diff --git a/drivers/net/ethernet/intel/ice/ice_flex_pipe.c b/drivers/net/ethernet/intel/ice/ice_flex_pipe.c
index 23cfcceb1536..6ad1c2559724 100644
--- a/drivers/net/ethernet/intel/ice/ice_flex_pipe.c
+++ b/drivers/net/ethernet/intel/ice/ice_flex_pipe.c
@@ -1899,9 +1899,11 @@ static struct ice_buf *ice_pkg_buf(struct ice_buf_build *bld)
* ice_get_open_tunnel_port - retrieve an open tunnel port
* @hw: pointer to the HW structure
* @port: returns open port
+ * @type: type of tunnel, can be TNL_LAST if it doesn't matter
*/
bool
-ice_get_open_tunnel_port(struct ice_hw *hw, u16 *port)
+ice_get_open_tunnel_port(struct ice_hw *hw, u16 *port,
+ enum ice_tunnel_type type)
{
bool res = false;
u16 i;
@@ -1909,7 +1911,8 @@ ice_get_open_tunnel_port(struct ice_hw *hw, u16 *port)
mutex_lock(&hw->tnl_lock);
for (i = 0; i < hw->tnl.count && i < ICE_TUNNEL_MAX_ENTRIES; i++)
- if (hw->tnl.tbl[i].valid && hw->tnl.tbl[i].port) {
+ if (hw->tnl.tbl[i].valid && hw->tnl.tbl[i].port &&
+ (type == TNL_LAST || type == hw->tnl.tbl[i].type)) {
*port = hw->tnl.tbl[i].port;
res = true;
break;
diff --git a/drivers/net/ethernet/intel/ice/ice_flex_pipe.h b/drivers/net/ethernet/intel/ice/ice_flex_pipe.h
index 344c2637facd..a2863f38fd1f 100644
--- a/drivers/net/ethernet/intel/ice/ice_flex_pipe.h
+++ b/drivers/net/ethernet/intel/ice/ice_flex_pipe.h
@@ -33,7 +33,8 @@ enum ice_status
ice_get_sw_fv_list(struct ice_hw *hw, u8 *prot_ids, u16 ids_cnt,
unsigned long *bm, struct list_head *fv_list);
bool
-ice_get_open_tunnel_port(struct ice_hw *hw, u16 *port);
+ice_get_open_tunnel_port(struct ice_hw *hw, u16 *port,
+ enum ice_tunnel_type type);
int ice_udp_tunnel_set_port(struct net_device *netdev, unsigned int table,
unsigned int idx, struct udp_tunnel_info *ti);
int ice_udp_tunnel_unset_port(struct net_device *netdev, unsigned int table,
diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c
index 40562600a8cf..09a3297cd63c 100644
--- a/drivers/net/ethernet/intel/ice/ice_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_lib.c
@@ -89,8 +89,13 @@ static int ice_vsi_alloc_arrays(struct ice_vsi *vsi)
if (!vsi->rx_rings)
goto err_rings;
- /* XDP will have vsi->alloc_txq Tx queues as well, so double the size */
- vsi->txq_map = devm_kcalloc(dev, (2 * vsi->alloc_txq),
+ /* txq_map needs to have enough space to track both Tx (stack) rings
+ * and XDP rings; at this point vsi->num_xdp_txq might not be set,
+ * so use num_possible_cpus() as we want to always provide XDP ring
+ * per CPU, regardless of queue count settings from user that might
+ * have come from ethtool's set_channels() callback;
+ */
+ vsi->txq_map = devm_kcalloc(dev, (vsi->alloc_txq + num_possible_cpus()),
sizeof(*vsi->txq_map), GFP_KERNEL);
if (!vsi->txq_map)
diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c
index f099797f35e3..73c61cdb036f 100644
--- a/drivers/net/ethernet/intel/ice/ice_main.c
+++ b/drivers/net/ethernet/intel/ice/ice_main.c
@@ -2609,7 +2609,18 @@ int ice_prepare_xdp_rings(struct ice_vsi *vsi, struct bpf_prog *prog)
ice_stat_str(status));
goto clear_xdp_rings;
}
- ice_vsi_assign_bpf_prog(vsi, prog);
+
+ /* assign the prog only when it's not already present on VSI;
+ * this flow is a subject of both ethtool -L and ndo_bpf flows;
+ * VSI rebuild that happens under ethtool -L can expose us to
+ * the bpf_prog refcount issues as we would be swapping same
+ * bpf_prog pointers from vsi->xdp_prog and calling bpf_prog_put
+ * on it as it would be treated as an 'old_prog'; for ndo_bpf
+ * this is not harmful as dev_xdp_install bumps the refcount
+ * before calling the op exposed by the driver;
+ */
+ if (!ice_is_xdp_ena_vsi(vsi))
+ ice_vsi_assign_bpf_prog(vsi, prog);
return 0;
clear_xdp_rings:
@@ -2785,6 +2796,11 @@ ice_xdp_setup_prog(struct ice_vsi *vsi, struct bpf_prog *prog,
if (xdp_ring_err)
NL_SET_ERR_MSG_MOD(extack, "Freeing XDP Tx resources failed");
} else {
+ /* safe to call even when prog == vsi->xdp_prog as
+ * dev_xdp_install in net/core/dev.c incremented prog's
+ * refcount so corresponding bpf_prog_put won't cause
+ * underflow
+ */
ice_vsi_assign_bpf_prog(vsi, prog);
}
@@ -5865,6 +5881,9 @@ static int ice_up_complete(struct ice_vsi *vsi)
netif_carrier_on(vsi->netdev);
}
+ /* clear this now, and the first stats read will be used as baseline */
+ vsi->stat_offsets_loaded = false;
+
ice_service_task_schedule(pf);
return 0;
@@ -5911,14 +5930,15 @@ ice_fetch_u64_stats_per_ring(struct u64_stats_sync *syncp, struct ice_q_stats st
/**
* ice_update_vsi_tx_ring_stats - Update VSI Tx ring stats counters
* @vsi: the VSI to be updated
+ * @vsi_stats: the stats struct to be updated
* @rings: rings to work on
* @count: number of rings
*/
static void
-ice_update_vsi_tx_ring_stats(struct ice_vsi *vsi, struct ice_tx_ring **rings,
- u16 count)
+ice_update_vsi_tx_ring_stats(struct ice_vsi *vsi,
+ struct rtnl_link_stats64 *vsi_stats,
+ struct ice_tx_ring **rings, u16 count)
{
- struct rtnl_link_stats64 *vsi_stats = &vsi->net_stats;
u16 i;
for (i = 0; i < count; i++) {
@@ -5942,15 +5962,13 @@ ice_update_vsi_tx_ring_stats(struct ice_vsi *vsi, struct ice_tx_ring **rings,
*/
static void ice_update_vsi_ring_stats(struct ice_vsi *vsi)
{
- struct rtnl_link_stats64 *vsi_stats = &vsi->net_stats;
+ struct rtnl_link_stats64 *vsi_stats;
u64 pkts, bytes;
int i;
- /* reset netdev stats */
- vsi_stats->tx_packets = 0;
- vsi_stats->tx_bytes = 0;
- vsi_stats->rx_packets = 0;
- vsi_stats->rx_bytes = 0;
+ vsi_stats = kzalloc(sizeof(*vsi_stats), GFP_ATOMIC);
+ if (!vsi_stats)
+ return;
/* reset non-netdev (extended) stats */
vsi->tx_restart = 0;
@@ -5962,7 +5980,8 @@ static void ice_update_vsi_ring_stats(struct ice_vsi *vsi)
rcu_read_lock();
/* update Tx rings counters */
- ice_update_vsi_tx_ring_stats(vsi, vsi->tx_rings, vsi->num_txq);
+ ice_update_vsi_tx_ring_stats(vsi, vsi_stats, vsi->tx_rings,
+ vsi->num_txq);
/* update Rx rings counters */
ice_for_each_rxq(vsi, i) {
@@ -5977,10 +5996,17 @@ static void ice_update_vsi_ring_stats(struct ice_vsi *vsi)
/* update XDP Tx rings counters */
if (ice_is_xdp_ena_vsi(vsi))
- ice_update_vsi_tx_ring_stats(vsi, vsi->xdp_rings,
+ ice_update_vsi_tx_ring_stats(vsi, vsi_stats, vsi->xdp_rings,
vsi->num_xdp_txq);
rcu_read_unlock();
+
+ vsi->net_stats.tx_packets = vsi_stats->tx_packets;
+ vsi->net_stats.tx_bytes = vsi_stats->tx_bytes;
+ vsi->net_stats.rx_packets = vsi_stats->rx_packets;
+ vsi->net_stats.rx_bytes = vsi_stats->rx_bytes;
+
+ kfree(vsi_stats);
}
/**
diff --git a/drivers/net/ethernet/intel/ice/ice_ptp.c b/drivers/net/ethernet/intel/ice/ice_ptp.c
index bf7247c6f58e..442b031b0edc 100644
--- a/drivers/net/ethernet/intel/ice/ice_ptp.c
+++ b/drivers/net/ethernet/intel/ice/ice_ptp.c
@@ -705,7 +705,7 @@ static int ice_ptp_adjfine(struct ptp_clock_info *info, long scaled_ppm)
scaled_ppm = -scaled_ppm;
}
- while ((u64)scaled_ppm > div_u64(U64_MAX, incval)) {
+ while ((u64)scaled_ppm > div64_u64(U64_MAX, incval)) {
/* handle overflow by scaling down the scaled_ppm and
* the divisor, losing some precision
*/
@@ -1540,19 +1540,16 @@ static void ice_ptp_tx_tstamp_work(struct kthread_work *work)
if (err)
continue;
- /* Check if the timestamp is valid */
- if (!(raw_tstamp & ICE_PTP_TS_VALID))
+ /* Check if the timestamp is invalid or stale */
+ if (!(raw_tstamp & ICE_PTP_TS_VALID) ||
+ raw_tstamp == tx->tstamps[idx].cached_tstamp)
continue;
- /* clear the timestamp register, so that it won't show valid
- * again when re-used.
- */
- ice_clear_phy_tstamp(hw, tx->quad, phy_idx);
-
/* The timestamp is valid, so we'll go ahead and clear this
* index and then send the timestamp up to the stack.
*/
spin_lock(&tx->lock);
+ tx->tstamps[idx].cached_tstamp = raw_tstamp;
clear_bit(idx, tx->in_use);
skb = tx->tstamps[idx].skb;
tx->tstamps[idx].skb = NULL;
diff --git a/drivers/net/ethernet/intel/ice/ice_ptp.h b/drivers/net/ethernet/intel/ice/ice_ptp.h
index f71ad317d6c8..53c15fc9d996 100644
--- a/drivers/net/ethernet/intel/ice/ice_ptp.h
+++ b/drivers/net/ethernet/intel/ice/ice_ptp.h
@@ -55,15 +55,21 @@ struct ice_perout_channel {
* struct ice_tx_tstamp - Tracking for a single Tx timestamp
* @skb: pointer to the SKB for this timestamp request
* @start: jiffies when the timestamp was first requested
+ * @cached_tstamp: last read timestamp
*
* This structure tracks a single timestamp request. The SKB pointer is
* provided when initiating a request. The start time is used to ensure that
* we discard old requests that were not fulfilled within a 2 second time
* window.
+ * Timestamp values in the PHY are read only and do not get cleared except at
+ * hardware reset or when a new timestamp value is captured. The cached_tstamp
+ * field is used to detect the case where a new timestamp has not yet been
+ * captured, ensuring that we avoid sending stale timestamp data to the stack.
*/
struct ice_tx_tstamp {
struct sk_buff *skb;
unsigned long start;
+ u64 cached_tstamp;
};
/**
diff --git a/drivers/net/ethernet/intel/ice/ice_switch.c b/drivers/net/ethernet/intel/ice/ice_switch.c
index 793f4a9fc2cd..183d93033890 100644
--- a/drivers/net/ethernet/intel/ice/ice_switch.c
+++ b/drivers/net/ethernet/intel/ice/ice_switch.c
@@ -3796,10 +3796,13 @@ static struct ice_protocol_entry ice_prot_id_tbl[ICE_PROTOCOL_LAST] = {
* ice_find_recp - find a recipe
* @hw: pointer to the hardware structure
* @lkup_exts: extension sequence to match
+ * @tun_type: type of recipe tunnel
*
* Returns index of matching recipe, or ICE_MAX_NUM_RECIPES if not found.
*/
-static u16 ice_find_recp(struct ice_hw *hw, struct ice_prot_lkup_ext *lkup_exts)
+static u16
+ice_find_recp(struct ice_hw *hw, struct ice_prot_lkup_ext *lkup_exts,
+ enum ice_sw_tunnel_type tun_type)
{
bool refresh_required = true;
struct ice_sw_recipe *recp;
@@ -3860,8 +3863,9 @@ static u16 ice_find_recp(struct ice_hw *hw, struct ice_prot_lkup_ext *lkup_exts)
}
/* If for "i"th recipe the found was never set to false
* then it means we found our match
+ * Also tun type of recipe needs to be checked
*/
- if (found)
+ if (found && recp[i].tun_type == tun_type)
return i; /* Return the recipe ID */
}
}
@@ -4651,11 +4655,12 @@ ice_add_adv_recipe(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
}
/* Look for a recipe which matches our requested fv / mask list */
- *rid = ice_find_recp(hw, lkup_exts);
+ *rid = ice_find_recp(hw, lkup_exts, rinfo->tun_type);
if (*rid < ICE_MAX_NUM_RECIPES)
/* Success if found a recipe that match the existing criteria */
goto err_unroll;
+ rm->tun_type = rinfo->tun_type;
/* Recipe we need does not exist, add a recipe */
status = ice_add_sw_recipe(hw, rm, profiles);
if (status)
@@ -4958,11 +4963,13 @@ ice_fill_adv_packet_tun(struct ice_hw *hw, enum ice_sw_tunnel_type tun_type,
switch (tun_type) {
case ICE_SW_TUN_VXLAN:
+ if (!ice_get_open_tunnel_port(hw, &open_port, TNL_VXLAN))
+ return ICE_ERR_CFG;
+ break;
case ICE_SW_TUN_GENEVE:
- if (!ice_get_open_tunnel_port(hw, &open_port))
+ if (!ice_get_open_tunnel_port(hw, &open_port, TNL_GENEVE))
return ICE_ERR_CFG;
break;
-
default:
/* Nothing needs to be done for this tunnel type */
return 0;
@@ -5555,7 +5562,7 @@ ice_rem_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
if (status)
return status;
- rid = ice_find_recp(hw, &lkup_exts);
+ rid = ice_find_recp(hw, &lkup_exts, rinfo->tun_type);
/* If did not find a recipe that match the existing criteria */
if (rid == ICE_MAX_NUM_RECIPES)
return ICE_ERR_PARAM;
diff --git a/drivers/net/ethernet/intel/ice/ice_tc_lib.c b/drivers/net/ethernet/intel/ice/ice_tc_lib.c
index e5d23feb6701..25cca5c4ae57 100644
--- a/drivers/net/ethernet/intel/ice/ice_tc_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_tc_lib.c
@@ -74,21 +74,13 @@ static enum ice_protocol_type ice_proto_type_from_ipv6(bool inner)
return inner ? ICE_IPV6_IL : ICE_IPV6_OFOS;
}
-static enum ice_protocol_type
-ice_proto_type_from_l4_port(bool inner, u16 ip_proto)
+static enum ice_protocol_type ice_proto_type_from_l4_port(u16 ip_proto)
{
- if (inner) {
- switch (ip_proto) {
- case IPPROTO_UDP:
- return ICE_UDP_ILOS;
- }
- } else {
- switch (ip_proto) {
- case IPPROTO_TCP:
- return ICE_TCP_IL;
- case IPPROTO_UDP:
- return ICE_UDP_OF;
- }
+ switch (ip_proto) {
+ case IPPROTO_TCP:
+ return ICE_TCP_IL;
+ case IPPROTO_UDP:
+ return ICE_UDP_ILOS;
}
return 0;
@@ -191,8 +183,9 @@ ice_tc_fill_tunnel_outer(u32 flags, struct ice_tc_flower_fltr *fltr,
i++;
}
- if (flags & ICE_TC_FLWR_FIELD_ENC_DEST_L4_PORT) {
- list[i].type = ice_proto_type_from_l4_port(false, hdr->l3_key.ip_proto);
+ if ((flags & ICE_TC_FLWR_FIELD_ENC_DEST_L4_PORT) &&
+ hdr->l3_key.ip_proto == IPPROTO_UDP) {
+ list[i].type = ICE_UDP_OF;
list[i].h_u.l4_hdr.dst_port = hdr->l4_key.dst_port;
list[i].m_u.l4_hdr.dst_port = hdr->l4_mask.dst_port;
i++;
@@ -317,7 +310,7 @@ ice_tc_fill_rules(struct ice_hw *hw, u32 flags,
ICE_TC_FLWR_FIELD_SRC_L4_PORT)) {
struct ice_tc_l4_hdr *l4_key, *l4_mask;
- list[i].type = ice_proto_type_from_l4_port(inner, headers->l3_key.ip_proto);
+ list[i].type = ice_proto_type_from_l4_port(headers->l3_key.ip_proto);
l4_key = &headers->l4_key;
l4_mask = &headers->l4_mask;
@@ -802,7 +795,8 @@ ice_parse_tunnel_attr(struct net_device *dev, struct flow_rule *rule,
headers->l3_mask.ttl = match.mask->ttl;
}
- if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_PORTS)) {
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_PORTS) &&
+ fltr->tunnel_type != TNL_VXLAN && fltr->tunnel_type != TNL_GENEVE) {
struct flow_match_ports match;
flow_rule_match_enc_ports(rule, &match);
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.c b/drivers/net/ethernet/intel/ice/ice_txrx.c
index bc3ba19dc88f..dccf09eefc75 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx.c
+++ b/drivers/net/ethernet/intel/ice/ice_txrx.c
@@ -419,7 +419,10 @@ void ice_clean_rx_ring(struct ice_rx_ring *rx_ring)
}
rx_skip_free:
- memset(rx_ring->rx_buf, 0, sizeof(*rx_ring->rx_buf) * rx_ring->count);
+ if (rx_ring->xsk_pool)
+ memset(rx_ring->xdp_buf, 0, array_size(rx_ring->count, sizeof(*rx_ring->xdp_buf)));
+ else
+ memset(rx_ring->rx_buf, 0, array_size(rx_ring->count, sizeof(*rx_ring->rx_buf)));
/* Zero out the descriptor ring */
size = ALIGN(rx_ring->count * sizeof(union ice_32byte_rx_desc),
@@ -446,8 +449,13 @@ void ice_free_rx_ring(struct ice_rx_ring *rx_ring)
if (xdp_rxq_info_is_reg(&rx_ring->xdp_rxq))
xdp_rxq_info_unreg(&rx_ring->xdp_rxq);
rx_ring->xdp_prog = NULL;
- devm_kfree(rx_ring->dev, rx_ring->rx_buf);
- rx_ring->rx_buf = NULL;
+ if (rx_ring->xsk_pool) {
+ kfree(rx_ring->xdp_buf);
+ rx_ring->xdp_buf = NULL;
+ } else {
+ kfree(rx_ring->rx_buf);
+ rx_ring->rx_buf = NULL;
+ }
if (rx_ring->desc) {
size = ALIGN(rx_ring->count * sizeof(union ice_32byte_rx_desc),
@@ -475,8 +483,7 @@ int ice_setup_rx_ring(struct ice_rx_ring *rx_ring)
/* warn if we are about to overwrite the pointer */
WARN_ON(rx_ring->rx_buf);
rx_ring->rx_buf =
- devm_kcalloc(dev, sizeof(*rx_ring->rx_buf), rx_ring->count,
- GFP_KERNEL);
+ kcalloc(rx_ring->count, sizeof(*rx_ring->rx_buf), GFP_KERNEL);
if (!rx_ring->rx_buf)
return -ENOMEM;
@@ -505,7 +512,7 @@ int ice_setup_rx_ring(struct ice_rx_ring *rx_ring)
return 0;
err:
- devm_kfree(dev, rx_ring->rx_buf);
+ kfree(rx_ring->rx_buf);
rx_ring->rx_buf = NULL;
return -ENOMEM;
}
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.h b/drivers/net/ethernet/intel/ice/ice_txrx.h
index c56dd1749903..b7b3bd4816f0 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx.h
+++ b/drivers/net/ethernet/intel/ice/ice_txrx.h
@@ -24,7 +24,6 @@
#define ICE_MAX_DATA_PER_TXD_ALIGNED \
(~(ICE_MAX_READ_REQ_SIZE - 1) & ICE_MAX_DATA_PER_TXD)
-#define ICE_RX_BUF_WRITE 16 /* Must be power of 2 */
#define ICE_MAX_TXQ_PER_TXQG 128
/* Attempt to maximize the headroom available for incoming frames. We use a 2K
diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c
index 217ff5e9a6f1..6427e7ec93de 100644
--- a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c
+++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c
@@ -1617,6 +1617,7 @@ bool ice_reset_all_vfs(struct ice_pf *pf, bool is_vflr)
ice_vc_set_default_allowlist(vf);
ice_vf_fdir_exit(vf);
+ ice_vf_fdir_init(vf);
/* clean VF control VSI when resetting VFs since it should be
* setup only when VF creates its first FDIR rule.
*/
@@ -1747,6 +1748,7 @@ bool ice_reset_vf(struct ice_vf *vf, bool is_vflr)
}
ice_vf_fdir_exit(vf);
+ ice_vf_fdir_init(vf);
/* clean VF control VSI when resetting VF since it should be setup
* only when VF creates its first FDIR rule.
*/
@@ -2021,6 +2023,10 @@ static int ice_ena_vfs(struct ice_pf *pf, u16 num_vfs)
if (ret)
goto err_unroll_sriov;
+ /* rearm global interrupts */
+ if (test_and_clear_bit(ICE_OICR_INTR_DIS, pf->state))
+ ice_irq_dynamic_ena(hw, NULL, NULL);
+
return 0;
err_unroll_sriov:
diff --git a/drivers/net/ethernet/intel/ice/ice_xsk.c b/drivers/net/ethernet/intel/ice/ice_xsk.c
index ff55cb415b11..c895351b25e0 100644
--- a/drivers/net/ethernet/intel/ice/ice_xsk.c
+++ b/drivers/net/ethernet/intel/ice/ice_xsk.c
@@ -12,6 +12,11 @@
#include "ice_txrx_lib.h"
#include "ice_lib.h"
+static struct xdp_buff **ice_xdp_buf(struct ice_rx_ring *rx_ring, u32 idx)
+{
+ return &rx_ring->xdp_buf[idx];
+}
+
/**
* ice_qp_reset_stats - Resets all stats for rings of given index
* @vsi: VSI that contains rings of interest
@@ -372,7 +377,7 @@ bool ice_alloc_rx_bufs_zc(struct ice_rx_ring *rx_ring, u16 count)
dma_addr_t dma;
rx_desc = ICE_RX_DESC(rx_ring, ntu);
- xdp = &rx_ring->xdp_buf[ntu];
+ xdp = ice_xdp_buf(rx_ring, ntu);
nb_buffs = min_t(u16, count, rx_ring->count - ntu);
nb_buffs = xsk_buff_alloc_batch(rx_ring->xsk_pool, xdp, nb_buffs);
@@ -383,20 +388,16 @@ bool ice_alloc_rx_bufs_zc(struct ice_rx_ring *rx_ring, u16 count)
while (i--) {
dma = xsk_buff_xdp_get_dma(*xdp);
rx_desc->read.pkt_addr = cpu_to_le64(dma);
+ rx_desc->wb.status_error0 = 0;
rx_desc++;
xdp++;
}
ntu += nb_buffs;
- if (ntu == rx_ring->count) {
- rx_desc = ICE_RX_DESC(rx_ring, 0);
- xdp = rx_ring->xdp_buf;
+ if (ntu == rx_ring->count)
ntu = 0;
- }
- /* clear the status bits for the next_to_use descriptor */
- rx_desc->wb.status_error0 = 0;
ice_release_rx_desc(rx_ring, ntu);
return count == nb_buffs;
@@ -418,19 +419,18 @@ static void ice_bump_ntc(struct ice_rx_ring *rx_ring)
/**
* ice_construct_skb_zc - Create an sk_buff from zero-copy buffer
* @rx_ring: Rx ring
- * @xdp_arr: Pointer to the SW ring of xdp_buff pointers
+ * @xdp: Pointer to XDP buffer
*
* This function allocates a new skb from a zero-copy Rx buffer.
*
* Returns the skb on success, NULL on failure.
*/
static struct sk_buff *
-ice_construct_skb_zc(struct ice_rx_ring *rx_ring, struct xdp_buff **xdp_arr)
+ice_construct_skb_zc(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp)
{
- struct xdp_buff *xdp = *xdp_arr;
+ unsigned int datasize_hard = xdp->data_end - xdp->data_hard_start;
unsigned int metasize = xdp->data - xdp->data_meta;
unsigned int datasize = xdp->data_end - xdp->data;
- unsigned int datasize_hard = xdp->data_end - xdp->data_hard_start;
struct sk_buff *skb;
skb = __napi_alloc_skb(&rx_ring->q_vector->napi, datasize_hard,
@@ -444,7 +444,6 @@ ice_construct_skb_zc(struct ice_rx_ring *rx_ring, struct xdp_buff **xdp_arr)
skb_metadata_set(skb, metasize);
xsk_buff_free(xdp);
- *xdp_arr = NULL;
return skb;
}
@@ -506,7 +505,6 @@ out_failure:
int ice_clean_rx_irq_zc(struct ice_rx_ring *rx_ring, int budget)
{
unsigned int total_rx_bytes = 0, total_rx_packets = 0;
- u16 cleaned_count = ICE_DESC_UNUSED(rx_ring);
struct ice_tx_ring *xdp_ring;
unsigned int xdp_xmit = 0;
struct bpf_prog *xdp_prog;
@@ -521,7 +519,7 @@ int ice_clean_rx_irq_zc(struct ice_rx_ring *rx_ring, int budget)
while (likely(total_rx_packets < (unsigned int)budget)) {
union ice_32b_rx_flex_desc *rx_desc;
unsigned int size, xdp_res = 0;
- struct xdp_buff **xdp;
+ struct xdp_buff *xdp;
struct sk_buff *skb;
u16 stat_err_bits;
u16 vlan_tag = 0;
@@ -539,31 +537,35 @@ int ice_clean_rx_irq_zc(struct ice_rx_ring *rx_ring, int budget)
*/
dma_rmb();
+ xdp = *ice_xdp_buf(rx_ring, rx_ring->next_to_clean);
+
size = le16_to_cpu(rx_desc->wb.pkt_len) &
ICE_RX_FLX_DESC_PKT_LEN_M;
- if (!size)
- break;
+ if (!size) {
+ xdp->data = NULL;
+ xdp->data_end = NULL;
+ xdp->data_hard_start = NULL;
+ xdp->data_meta = NULL;
+ goto construct_skb;
+ }
- xdp = &rx_ring->xdp_buf[rx_ring->next_to_clean];
- xsk_buff_set_size(*xdp, size);
- xsk_buff_dma_sync_for_cpu(*xdp, rx_ring->xsk_pool);
+ xsk_buff_set_size(xdp, size);
+ xsk_buff_dma_sync_for_cpu(xdp, rx_ring->xsk_pool);
- xdp_res = ice_run_xdp_zc(rx_ring, *xdp, xdp_prog, xdp_ring);
+ xdp_res = ice_run_xdp_zc(rx_ring, xdp, xdp_prog, xdp_ring);
if (xdp_res) {
if (xdp_res & (ICE_XDP_TX | ICE_XDP_REDIR))
xdp_xmit |= xdp_res;
else
- xsk_buff_free(*xdp);
+ xsk_buff_free(xdp);
- *xdp = NULL;
total_rx_bytes += size;
total_rx_packets++;
- cleaned_count++;
ice_bump_ntc(rx_ring);
continue;
}
-
+construct_skb:
/* XDP_PASS path */
skb = ice_construct_skb_zc(rx_ring, xdp);
if (!skb) {
@@ -571,7 +573,6 @@ int ice_clean_rx_irq_zc(struct ice_rx_ring *rx_ring, int budget)
break;
}
- cleaned_count++;
ice_bump_ntc(rx_ring);
if (eth_skb_pad(skb)) {
@@ -593,8 +594,7 @@ int ice_clean_rx_irq_zc(struct ice_rx_ring *rx_ring, int budget)
ice_receive_skb(rx_ring, skb, vlan_tag);
}
- if (cleaned_count >= ICE_RX_BUF_WRITE)
- failure = !ice_alloc_rx_bufs_zc(rx_ring, cleaned_count);
+ failure = !ice_alloc_rx_bufs_zc(rx_ring, ICE_DESC_UNUSED(rx_ring));
ice_finalize_xdp_rx(xdp_ring, xdp_xmit);
ice_update_rx_ring_stats(rx_ring, total_rx_packets, total_rx_bytes);
@@ -810,15 +810,14 @@ bool ice_xsk_any_rx_ring_ena(struct ice_vsi *vsi)
*/
void ice_xsk_clean_rx_ring(struct ice_rx_ring *rx_ring)
{
- u16 i;
-
- for (i = 0; i < rx_ring->count; i++) {
- struct xdp_buff **xdp = &rx_ring->xdp_buf[i];
+ u16 count_mask = rx_ring->count - 1;
+ u16 ntc = rx_ring->next_to_clean;
+ u16 ntu = rx_ring->next_to_use;
- if (!xdp)
- continue;
+ for ( ; ntc != ntu; ntc = (ntc + 1) & count_mask) {
+ struct xdp_buff *xdp = *ice_xdp_buf(rx_ring, ntc);
- *xdp = NULL;
+ xsk_buff_free(xdp);
}
}
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index 836be0d3b291..446894dde182 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -7648,6 +7648,20 @@ static int igb_set_vf_mac_filter(struct igb_adapter *adapter, const int vf,
struct vf_mac_filter *entry = NULL;
int ret = 0;
+ if ((vf_data->flags & IGB_VF_FLAG_PF_SET_MAC) &&
+ !vf_data->trusted) {
+ dev_warn(&pdev->dev,
+ "VF %d requested MAC filter but is administratively denied\n",
+ vf);
+ return -EINVAL;
+ }
+ if (!is_valid_ether_addr(addr)) {
+ dev_warn(&pdev->dev,
+ "VF %d attempted to set invalid MAC filter\n",
+ vf);
+ return -EINVAL;
+ }
+
switch (info) {
case E1000_VF_MAC_FILTER_CLR:
/* remove all unicast MAC filters related to the current VF */
@@ -7661,20 +7675,6 @@ static int igb_set_vf_mac_filter(struct igb_adapter *adapter, const int vf,
}
break;
case E1000_VF_MAC_FILTER_ADD:
- if ((vf_data->flags & IGB_VF_FLAG_PF_SET_MAC) &&
- !vf_data->trusted) {
- dev_warn(&pdev->dev,
- "VF %d requested MAC filter but is administratively denied\n",
- vf);
- return -EINVAL;
- }
- if (!is_valid_ether_addr(addr)) {
- dev_warn(&pdev->dev,
- "VF %d attempted to set invalid MAC filter\n",
- vf);
- return -EINVAL;
- }
-
/* try to find empty slot in the list */
list_for_each(pos, &adapter->vf_macs.l) {
entry = list_entry(pos, struct vf_mac_filter, l);
@@ -8026,7 +8026,7 @@ static int igb_poll(struct napi_struct *napi, int budget)
if (likely(napi_complete_done(napi, work_done)))
igb_ring_irq_enable(q_vector);
- return min(work_done, budget - 1);
+ return work_done;
}
/**
@@ -9254,7 +9254,7 @@ static int __maybe_unused igb_suspend(struct device *dev)
return __igb_shutdown(to_pci_dev(dev), NULL, 0);
}
-static int __maybe_unused igb_resume(struct device *dev)
+static int __maybe_unused __igb_resume(struct device *dev, bool rpm)
{
struct pci_dev *pdev = to_pci_dev(dev);
struct net_device *netdev = pci_get_drvdata(pdev);
@@ -9297,17 +9297,24 @@ static int __maybe_unused igb_resume(struct device *dev)
wr32(E1000_WUS, ~0);
- rtnl_lock();
+ if (!rpm)
+ rtnl_lock();
if (!err && netif_running(netdev))
err = __igb_open(netdev, true);
if (!err)
netif_device_attach(netdev);
- rtnl_unlock();
+ if (!rpm)
+ rtnl_unlock();
return err;
}
+static int __maybe_unused igb_resume(struct device *dev)
+{
+ return __igb_resume(dev, false);
+}
+
static int __maybe_unused igb_runtime_idle(struct device *dev)
{
struct net_device *netdev = dev_get_drvdata(dev);
@@ -9326,7 +9333,7 @@ static int __maybe_unused igb_runtime_suspend(struct device *dev)
static int __maybe_unused igb_runtime_resume(struct device *dev)
{
- return igb_resume(dev);
+ return __igb_resume(dev, true);
}
static void igb_shutdown(struct pci_dev *pdev)
@@ -9442,7 +9449,7 @@ static pci_ers_result_t igb_io_error_detected(struct pci_dev *pdev,
* @pdev: Pointer to PCI device
*
* Restart the card from scratch, as if from a cold-boot. Implementation
- * resembles the first-half of the igb_resume routine.
+ * resembles the first-half of the __igb_resume routine.
**/
static pci_ers_result_t igb_io_slot_reset(struct pci_dev *pdev)
{
@@ -9482,7 +9489,7 @@ static pci_ers_result_t igb_io_slot_reset(struct pci_dev *pdev)
*
* This callback is called when the error recovery driver tells us that
* its OK to resume normal operation. Implementation resembles the
- * second-half of the igb_resume routine.
+ * second-half of the __igb_resume routine.
*/
static void igb_io_resume(struct pci_dev *pdev)
{
diff --git a/drivers/net/ethernet/intel/igbvf/netdev.c b/drivers/net/ethernet/intel/igbvf/netdev.c
index 74ccd622251a..4d988da68394 100644
--- a/drivers/net/ethernet/intel/igbvf/netdev.c
+++ b/drivers/net/ethernet/intel/igbvf/netdev.c
@@ -2859,6 +2859,7 @@ static int igbvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
return 0;
err_hw_init:
+ netif_napi_del(&adapter->rx_ring->napi);
kfree(adapter->tx_ring);
kfree(adapter->rx_ring);
err_sw_init:
diff --git a/drivers/net/ethernet/intel/igc/igc_i225.c b/drivers/net/ethernet/intel/igc/igc_i225.c
index b2ef9fde97b3..b6807e16eea9 100644
--- a/drivers/net/ethernet/intel/igc/igc_i225.c
+++ b/drivers/net/ethernet/intel/igc/igc_i225.c
@@ -636,7 +636,7 @@ s32 igc_set_ltr_i225(struct igc_hw *hw, bool link)
ltrv = rd32(IGC_LTRMAXV);
if (ltr_max != (ltrv & IGC_LTRMAXV_LTRV_MASK)) {
ltrv = IGC_LTRMAXV_LSNP_REQ | ltr_max |
- (scale_min << IGC_LTRMAXV_SCALE_SHIFT);
+ (scale_max << IGC_LTRMAXV_SCALE_SHIFT);
wr32(IGC_LTRMAXV, ltrv);
}
}
diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c
index 8e448288ee26..d28a80a00953 100644
--- a/drivers/net/ethernet/intel/igc/igc_main.c
+++ b/drivers/net/ethernet/intel/igc/igc_main.c
@@ -5467,6 +5467,9 @@ static irqreturn_t igc_intr_msi(int irq, void *data)
mod_timer(&adapter->watchdog_timer, jiffies + 1);
}
+ if (icr & IGC_ICR_TS)
+ igc_tsync_interrupt(adapter);
+
napi_schedule(&q_vector->napi);
return IRQ_HANDLED;
@@ -5510,6 +5513,9 @@ static irqreturn_t igc_intr(int irq, void *data)
mod_timer(&adapter->watchdog_timer, jiffies + 1);
}
+ if (icr & IGC_ICR_TS)
+ igc_tsync_interrupt(adapter);
+
napi_schedule(&q_vector->napi);
return IRQ_HANDLED;
diff --git a/drivers/net/ethernet/intel/igc/igc_ptp.c b/drivers/net/ethernet/intel/igc/igc_ptp.c
index 30568e3544cd..4f9245aa79a1 100644
--- a/drivers/net/ethernet/intel/igc/igc_ptp.c
+++ b/drivers/net/ethernet/intel/igc/igc_ptp.c
@@ -768,7 +768,20 @@ int igc_ptp_get_ts_config(struct net_device *netdev, struct ifreq *ifr)
*/
static bool igc_is_crosststamp_supported(struct igc_adapter *adapter)
{
- return IS_ENABLED(CONFIG_X86_TSC) ? pcie_ptm_enabled(adapter->pdev) : false;
+ if (!IS_ENABLED(CONFIG_X86_TSC))
+ return false;
+
+ /* FIXME: it was noticed that enabling support for PCIe PTM in
+ * some i225-V models could cause lockups when bringing the
+ * interface up/down. There should be no downsides to
+ * disabling crosstimestamping support for i225-V, as it
+ * doesn't have any PTP support. That way we gain some time
+ * while root causing the issue.
+ */
+ if (adapter->pdev->device == IGC_DEV_ID_I225_V)
+ return false;
+
+ return pcie_ptm_enabled(adapter->pdev);
}
static struct system_counterval_t igc_device_tstamp_to_system(u64 tstamp)
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index 0f9f022260d7..45e2ec4d264d 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -5531,6 +5531,10 @@ static int ixgbe_non_sfp_link_config(struct ixgbe_hw *hw)
if (!speed && hw->mac.ops.get_link_capabilities) {
ret = hw->mac.ops.get_link_capabilities(hw, &speed,
&autoneg);
+ /* remove NBASE-T speeds from default autonegotiation
+ * to accommodate broken network switches in the field
+ * which cannot cope with advertised NBASE-T speeds
+ */
speed &= ~(IXGBE_LINK_SPEED_5GB_FULL |
IXGBE_LINK_SPEED_2_5GB_FULL);
}
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
index 9724ffb16518..e4b50c7781ff 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
@@ -3405,6 +3405,9 @@ static s32 ixgbe_reset_hw_X550em(struct ixgbe_hw *hw)
/* flush pending Tx transactions */
ixgbe_clear_tx_pending(hw);
+ /* set MDIO speed before talking to the PHY in case it's the 1st time */
+ ixgbe_set_mdio_speed(hw);
+
/* PHY ops must be identified and initialized prior to reset */
status = hw->phy.ops.init(hw);
if (status == IXGBE_ERR_SFP_NOT_SUPPORTED ||
diff --git a/drivers/net/ethernet/lantiq_xrx200.c b/drivers/net/ethernet/lantiq_xrx200.c
index 0da09ea81980..80bfaf2fec92 100644
--- a/drivers/net/ethernet/lantiq_xrx200.c
+++ b/drivers/net/ethernet/lantiq_xrx200.c
@@ -71,6 +71,8 @@ struct xrx200_priv {
struct xrx200_chan chan_tx;
struct xrx200_chan chan_rx;
+ u16 rx_buf_size;
+
struct net_device *net_dev;
struct device *dev;
@@ -97,6 +99,16 @@ static void xrx200_pmac_mask(struct xrx200_priv *priv, u32 clear, u32 set,
xrx200_pmac_w32(priv, val, offset);
}
+static int xrx200_max_frame_len(int mtu)
+{
+ return VLAN_ETH_HLEN + mtu;
+}
+
+static int xrx200_buffer_size(int mtu)
+{
+ return round_up(xrx200_max_frame_len(mtu), 4 * XRX200_DMA_BURST_LEN);
+}
+
/* drop all the packets from the DMA ring */
static void xrx200_flush_dma(struct xrx200_chan *ch)
{
@@ -109,8 +121,7 @@ static void xrx200_flush_dma(struct xrx200_chan *ch)
break;
desc->ctl = LTQ_DMA_OWN | LTQ_DMA_RX_OFFSET(NET_IP_ALIGN) |
- (ch->priv->net_dev->mtu + VLAN_ETH_HLEN +
- ETH_FCS_LEN);
+ ch->priv->rx_buf_size;
ch->dma.desc++;
ch->dma.desc %= LTQ_DESC_NUM;
}
@@ -158,21 +169,21 @@ static int xrx200_close(struct net_device *net_dev)
static int xrx200_alloc_skb(struct xrx200_chan *ch)
{
- int len = ch->priv->net_dev->mtu + VLAN_ETH_HLEN + ETH_FCS_LEN;
struct sk_buff *skb = ch->skb[ch->dma.desc];
+ struct xrx200_priv *priv = ch->priv;
dma_addr_t mapping;
int ret = 0;
- ch->skb[ch->dma.desc] = netdev_alloc_skb_ip_align(ch->priv->net_dev,
- len);
+ ch->skb[ch->dma.desc] = netdev_alloc_skb_ip_align(priv->net_dev,
+ priv->rx_buf_size);
if (!ch->skb[ch->dma.desc]) {
ret = -ENOMEM;
goto skip;
}
- mapping = dma_map_single(ch->priv->dev, ch->skb[ch->dma.desc]->data,
- len, DMA_FROM_DEVICE);
- if (unlikely(dma_mapping_error(ch->priv->dev, mapping))) {
+ mapping = dma_map_single(priv->dev, ch->skb[ch->dma.desc]->data,
+ priv->rx_buf_size, DMA_FROM_DEVICE);
+ if (unlikely(dma_mapping_error(priv->dev, mapping))) {
dev_kfree_skb_any(ch->skb[ch->dma.desc]);
ch->skb[ch->dma.desc] = skb;
ret = -ENOMEM;
@@ -184,7 +195,7 @@ static int xrx200_alloc_skb(struct xrx200_chan *ch)
wmb();
skip:
ch->dma.desc_base[ch->dma.desc].ctl =
- LTQ_DMA_OWN | LTQ_DMA_RX_OFFSET(NET_IP_ALIGN) | len;
+ LTQ_DMA_OWN | LTQ_DMA_RX_OFFSET(NET_IP_ALIGN) | priv->rx_buf_size;
return ret;
}
@@ -213,7 +224,7 @@ static int xrx200_hw_receive(struct xrx200_chan *ch)
skb->protocol = eth_type_trans(skb, net_dev);
netif_receive_skb(skb);
net_dev->stats.rx_packets++;
- net_dev->stats.rx_bytes += len - ETH_FCS_LEN;
+ net_dev->stats.rx_bytes += len;
return 0;
}
@@ -356,6 +367,7 @@ xrx200_change_mtu(struct net_device *net_dev, int new_mtu)
int ret = 0;
net_dev->mtu = new_mtu;
+ priv->rx_buf_size = xrx200_buffer_size(new_mtu);
if (new_mtu <= old_mtu)
return ret;
@@ -375,6 +387,7 @@ xrx200_change_mtu(struct net_device *net_dev, int new_mtu)
ret = xrx200_alloc_skb(ch_rx);
if (ret) {
net_dev->mtu = old_mtu;
+ priv->rx_buf_size = xrx200_buffer_size(old_mtu);
break;
}
dev_kfree_skb_any(skb);
@@ -505,7 +518,8 @@ static int xrx200_probe(struct platform_device *pdev)
net_dev->netdev_ops = &xrx200_netdev_ops;
SET_NETDEV_DEV(net_dev, dev);
net_dev->min_mtu = ETH_ZLEN;
- net_dev->max_mtu = XRX200_DMA_DATA_LEN - VLAN_ETH_HLEN - ETH_FCS_LEN;
+ net_dev->max_mtu = XRX200_DMA_DATA_LEN - xrx200_max_frame_len(0);
+ priv->rx_buf_size = xrx200_buffer_size(ETH_DATA_LEN);
/* load the memory ranges */
priv->pmac_reg = devm_platform_get_and_ioremap_resource(pdev, 0, NULL);
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
index 2b18d89d9756..6da8a595026b 100644
--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
@@ -2960,11 +2960,11 @@ static int mvpp2_rxq_init(struct mvpp2_port *port,
mvpp2_rxq_status_update(port, rxq->id, 0, rxq->size);
if (priv->percpu_pools) {
- err = xdp_rxq_info_reg(&rxq->xdp_rxq_short, port->dev, rxq->id, 0);
+ err = xdp_rxq_info_reg(&rxq->xdp_rxq_short, port->dev, rxq->logic_rxq, 0);
if (err < 0)
goto err_free_dma;
- err = xdp_rxq_info_reg(&rxq->xdp_rxq_long, port->dev, rxq->id, 0);
+ err = xdp_rxq_info_reg(&rxq->xdp_rxq_long, port->dev, rxq->logic_rxq, 0);
if (err < 0)
goto err_unregister_rxq_short;
@@ -5017,11 +5017,13 @@ static int mvpp2_change_mtu(struct net_device *dev, int mtu)
mtu = ALIGN(MVPP2_RX_PKT_SIZE(mtu), 8);
}
+ if (port->xdp_prog && mtu > MVPP2_MAX_RX_BUF_SIZE) {
+ netdev_err(dev, "Illegal MTU value %d (> %d) for XDP mode\n",
+ mtu, (int)MVPP2_MAX_RX_BUF_SIZE);
+ return -EINVAL;
+ }
+
if (MVPP2_RX_PKT_SIZE(mtu) > MVPP2_BM_LONG_PKT_SIZE) {
- if (port->xdp_prog) {
- netdev_err(dev, "Jumbo frames are not supported with XDP\n");
- return -EINVAL;
- }
if (priv->percpu_pools) {
netdev_warn(dev, "mtu %d too high, switching to shared buffers", mtu);
mvpp2_bm_switch_buffers(priv, false);
@@ -5307,8 +5309,8 @@ static int mvpp2_xdp_setup(struct mvpp2_port *port, struct netdev_bpf *bpf)
bool running = netif_running(port->dev);
bool reset = !prog != !port->xdp_prog;
- if (port->dev->mtu > ETH_DATA_LEN) {
- NL_SET_ERR_MSG_MOD(bpf->extack, "XDP is not supported with jumbo frames enabled");
+ if (port->dev->mtu > MVPP2_MAX_RX_BUF_SIZE) {
+ NL_SET_ERR_MSG_MOD(bpf->extack, "MTU too large for XDP");
return -EOPNOTSUPP;
}
@@ -7456,7 +7458,7 @@ static int mvpp2_probe(struct platform_device *pdev)
shared = num_present_cpus() - priv->nthreads;
if (shared > 0)
- bitmap_fill(&priv->lock_map,
+ bitmap_set(&priv->lock_map, 0,
min_t(int, shared, MVPP2_MAX_THREADS));
for (i = 0; i < MVPP2_MAX_THREADS; i++) {
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
index cb56e171ddd4..3ca6b942ebe2 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
@@ -2341,7 +2341,7 @@ static int rvu_mbox_init(struct rvu *rvu, struct mbox_wq_info *mw,
goto free_regions;
break;
default:
- return err;
+ goto free_regions;
}
mw->mbox_wq = alloc_workqueue(name,
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.c
index 0ef68fdd1f26..61c20907315f 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.c
@@ -5,6 +5,8 @@
*
*/
+#include <linux/module.h>
+
#include "otx2_common.h"
#include "otx2_ptp.h"
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_main.c b/drivers/net/ethernet/marvell/prestera/prestera_main.c
index 4369a3ffad45..c687dc9aa973 100644
--- a/drivers/net/ethernet/marvell/prestera/prestera_main.c
+++ b/drivers/net/ethernet/marvell/prestera/prestera_main.c
@@ -54,12 +54,14 @@ int prestera_port_pvid_set(struct prestera_port *port, u16 vid)
struct prestera_port *prestera_port_find_by_hwid(struct prestera_switch *sw,
u32 dev_id, u32 hw_id)
{
- struct prestera_port *port = NULL;
+ struct prestera_port *port = NULL, *tmp;
read_lock(&sw->port_list_lock);
- list_for_each_entry(port, &sw->port_list, list) {
- if (port->dev_id == dev_id && port->hw_id == hw_id)
+ list_for_each_entry(tmp, &sw->port_list, list) {
+ if (tmp->dev_id == dev_id && tmp->hw_id == hw_id) {
+ port = tmp;
break;
+ }
}
read_unlock(&sw->port_list_lock);
@@ -68,12 +70,14 @@ struct prestera_port *prestera_port_find_by_hwid(struct prestera_switch *sw,
struct prestera_port *prestera_find_port(struct prestera_switch *sw, u32 id)
{
- struct prestera_port *port = NULL;
+ struct prestera_port *port = NULL, *tmp;
read_lock(&sw->port_list_lock);
- list_for_each_entry(port, &sw->port_list, list) {
- if (port->id == id)
+ list_for_each_entry(tmp, &sw->port_list, list) {
+ if (tmp->id == id) {
+ port = tmp;
break;
+ }
}
read_unlock(&sw->port_list_lock);
@@ -764,23 +768,27 @@ static int prestera_netdev_port_event(struct net_device *lower,
struct net_device *dev,
unsigned long event, void *ptr)
{
- struct netdev_notifier_changeupper_info *info = ptr;
+ struct netdev_notifier_info *info = ptr;
+ struct netdev_notifier_changeupper_info *cu_info;
struct prestera_port *port = netdev_priv(dev);
struct netlink_ext_ack *extack;
struct net_device *upper;
- extack = netdev_notifier_info_to_extack(&info->info);
- upper = info->upper_dev;
+ extack = netdev_notifier_info_to_extack(info);
+ cu_info = container_of(info,
+ struct netdev_notifier_changeupper_info,
+ info);
switch (event) {
case NETDEV_PRECHANGEUPPER:
+ upper = cu_info->upper_dev;
if (!netif_is_bridge_master(upper) &&
!netif_is_lag_master(upper)) {
NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type");
return -EINVAL;
}
- if (!info->linking)
+ if (!cu_info->linking)
break;
if (netdev_has_any_upper_dev(upper)) {
@@ -789,7 +797,7 @@ static int prestera_netdev_port_event(struct net_device *lower,
}
if (netif_is_lag_master(upper) &&
- !prestera_lag_master_check(upper, info->upper_info, extack))
+ !prestera_lag_master_check(upper, cu_info->upper_info, extack))
return -EOPNOTSUPP;
if (netif_is_lag_master(upper) && vlan_uses_dev(dev)) {
NL_SET_ERR_MSG_MOD(extack,
@@ -805,14 +813,15 @@ static int prestera_netdev_port_event(struct net_device *lower,
break;
case NETDEV_CHANGEUPPER:
+ upper = cu_info->upper_dev;
if (netif_is_bridge_master(upper)) {
- if (info->linking)
+ if (cu_info->linking)
return prestera_bridge_port_join(upper, port,
extack);
else
prestera_bridge_port_leave(upper, port);
} else if (netif_is_lag_master(upper)) {
- if (info->linking)
+ if (cu_info->linking)
return prestera_lag_port_add(port, upper);
else
prestera_lag_port_del(port);
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_switchdev.c b/drivers/net/ethernet/marvell/prestera/prestera_switchdev.c
index 3ce6ccd0f539..b4599fe4ca8d 100644
--- a/drivers/net/ethernet/marvell/prestera/prestera_switchdev.c
+++ b/drivers/net/ethernet/marvell/prestera/prestera_switchdev.c
@@ -497,8 +497,8 @@ int prestera_bridge_port_join(struct net_device *br_dev,
br_port = prestera_bridge_port_add(bridge, port->dev);
if (IS_ERR(br_port)) {
- err = PTR_ERR(br_port);
- goto err_brport_create;
+ prestera_bridge_put(bridge);
+ return PTR_ERR(br_port);
}
err = switchdev_bridge_port_offload(br_port->dev, port->dev, NULL,
@@ -519,8 +519,6 @@ err_port_join:
switchdev_bridge_port_unoffload(br_port->dev, NULL, NULL, NULL);
err_switchdev_offload:
prestera_bridge_port_put(br_port);
-err_brport_create:
- prestera_bridge_put(bridge);
return err;
}
@@ -1124,7 +1122,7 @@ static int prestera_switchdev_blk_event(struct notifier_block *unused,
prestera_port_obj_attr_set);
break;
default:
- err = -EOPNOTSUPP;
+ return NOTIFY_DONE;
}
return notifier_from_errno(err);
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
index 066d79e4ecfc..10238bedd694 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
@@ -670,7 +670,7 @@ void __init mlx4_en_init_ptys2ethtool_map(void)
MLX4_BUILD_PTYS2ETHTOOL_CONFIG(MLX4_1000BASE_T, SPEED_1000,
ETHTOOL_LINK_MODE_1000baseT_Full_BIT);
MLX4_BUILD_PTYS2ETHTOOL_CONFIG(MLX4_1000BASE_CX_SGMII, SPEED_1000,
- ETHTOOL_LINK_MODE_1000baseKX_Full_BIT);
+ ETHTOOL_LINK_MODE_1000baseX_Full_BIT);
MLX4_BUILD_PTYS2ETHTOOL_CONFIG(MLX4_1000BASE_KX, SPEED_1000,
ETHTOOL_LINK_MODE_1000baseKX_Full_BIT);
MLX4_BUILD_PTYS2ETHTOOL_CONFIG(MLX4_10GBASE_T, SPEED_10000,
@@ -682,9 +682,9 @@ void __init mlx4_en_init_ptys2ethtool_map(void)
MLX4_BUILD_PTYS2ETHTOOL_CONFIG(MLX4_10GBASE_KR, SPEED_10000,
ETHTOOL_LINK_MODE_10000baseKR_Full_BIT);
MLX4_BUILD_PTYS2ETHTOOL_CONFIG(MLX4_10GBASE_CR, SPEED_10000,
- ETHTOOL_LINK_MODE_10000baseKR_Full_BIT);
+ ETHTOOL_LINK_MODE_10000baseCR_Full_BIT);
MLX4_BUILD_PTYS2ETHTOOL_CONFIG(MLX4_10GBASE_SR, SPEED_10000,
- ETHTOOL_LINK_MODE_10000baseKR_Full_BIT);
+ ETHTOOL_LINK_MODE_10000baseSR_Full_BIT);
MLX4_BUILD_PTYS2ETHTOOL_CONFIG(MLX4_20GBASE_KR2, SPEED_20000,
ETHTOOL_LINK_MODE_20000baseMLD2_Full_BIT,
ETHTOOL_LINK_MODE_20000baseKR2_Full_BIT);
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
index 3f6d5c384637..f1c10f2bda78 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
@@ -2286,9 +2286,14 @@ int mlx4_en_try_alloc_resources(struct mlx4_en_priv *priv,
bool carry_xdp_prog)
{
struct bpf_prog *xdp_prog;
- int i, t;
+ int i, t, ret;
- mlx4_en_copy_priv(tmp, priv, prof);
+ ret = mlx4_en_copy_priv(tmp, priv, prof);
+ if (ret) {
+ en_warn(priv, "%s: mlx4_en_copy_priv() failed, return\n",
+ __func__);
+ return ret;
+ }
if (mlx4_en_alloc_resources(tmp)) {
en_warn(priv,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
index 8eaa24d865c5..a46284ca5172 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
@@ -341,6 +341,7 @@ static int mlx5_internal_err_ret_value(struct mlx5_core_dev *dev, u16 op,
case MLX5_CMD_OP_DEALLOC_SF:
case MLX5_CMD_OP_DESTROY_UCTX:
case MLX5_CMD_OP_DESTROY_UMEM:
+ case MLX5_CMD_OP_MODIFY_RQT:
return MLX5_CMD_STAT_OK;
case MLX5_CMD_OP_QUERY_HCA_CAP:
@@ -446,7 +447,6 @@ static int mlx5_internal_err_ret_value(struct mlx5_core_dev *dev, u16 op,
case MLX5_CMD_OP_MODIFY_TIS:
case MLX5_CMD_OP_QUERY_TIS:
case MLX5_CMD_OP_CREATE_RQT:
- case MLX5_CMD_OP_MODIFY_RQT:
case MLX5_CMD_OP_QUERY_RQT:
case MLX5_CMD_OP_CREATE_FLOW_TABLE:
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h
index f0ac6b0d9653..b47a0d3ef22f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
@@ -783,6 +783,8 @@ struct mlx5e_channel {
DECLARE_BITMAP(state, MLX5E_CHANNEL_NUM_STATES);
int ix;
int cpu;
+ /* Sync between icosq recovery and XSK enable/disable. */
+ struct mutex icosq_recovery_lock;
};
struct mlx5e_ptp;
@@ -1014,9 +1016,6 @@ int mlx5e_create_rq(struct mlx5e_rq *rq, struct mlx5e_rq_param *param);
void mlx5e_destroy_rq(struct mlx5e_rq *rq);
struct mlx5e_sq_param;
-int mlx5e_open_icosq(struct mlx5e_channel *c, struct mlx5e_params *params,
- struct mlx5e_sq_param *param, struct mlx5e_icosq *sq);
-void mlx5e_close_icosq(struct mlx5e_icosq *sq);
int mlx5e_open_xdpsq(struct mlx5e_channel *c, struct mlx5e_params *params,
struct mlx5e_sq_param *param, struct xsk_buff_pool *xsk_pool,
struct mlx5e_xdpsq *sq, bool is_redirect);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/health.h b/drivers/net/ethernet/mellanox/mlx5/core/en/health.h
index d5b7110a4265..0107e4e73bb0 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/health.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/health.h
@@ -30,6 +30,8 @@ void mlx5e_reporter_rx_destroy(struct mlx5e_priv *priv);
void mlx5e_reporter_icosq_cqe_err(struct mlx5e_icosq *icosq);
void mlx5e_reporter_rq_cqe_err(struct mlx5e_rq *rq);
void mlx5e_reporter_rx_timeout(struct mlx5e_rq *rq);
+void mlx5e_reporter_icosq_suspend_recovery(struct mlx5e_channel *c);
+void mlx5e_reporter_icosq_resume_recovery(struct mlx5e_channel *c);
#define MLX5E_REPORTER_PER_Q_MAX_LEN 256
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.h b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.h
index d6c7c81690eb..7c9dd3a75f8a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.h
@@ -66,7 +66,7 @@ mlx5e_rep_setup_tc(struct net_device *dev, enum tc_setup_type type,
static inline void
mlx5e_rep_tc_receive(struct mlx5_cqe64 *cqe, struct mlx5e_rq *rq,
- struct sk_buff *skb) {}
+ struct sk_buff *skb) { napi_gro_receive(rq->cq.napi, skb); }
#endif /* CONFIG_MLX5_CLS_ACT */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c
index 74086eb556ae..2684e9da9f41 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c
@@ -62,6 +62,7 @@ static void mlx5e_reset_icosq_cc_pc(struct mlx5e_icosq *icosq)
static int mlx5e_rx_reporter_err_icosq_cqe_recover(void *ctx)
{
+ struct mlx5e_rq *xskrq = NULL;
struct mlx5_core_dev *mdev;
struct mlx5e_icosq *icosq;
struct net_device *dev;
@@ -70,7 +71,13 @@ static int mlx5e_rx_reporter_err_icosq_cqe_recover(void *ctx)
int err;
icosq = ctx;
+
+ mutex_lock(&icosq->channel->icosq_recovery_lock);
+
+ /* mlx5e_close_rq cancels this work before RQ and ICOSQ are killed. */
rq = &icosq->channel->rq;
+ if (test_bit(MLX5E_RQ_STATE_ENABLED, &icosq->channel->xskrq.state))
+ xskrq = &icosq->channel->xskrq;
mdev = icosq->channel->mdev;
dev = icosq->channel->netdev;
err = mlx5_core_query_sq_state(mdev, icosq->sqn, &state);
@@ -84,6 +91,9 @@ static int mlx5e_rx_reporter_err_icosq_cqe_recover(void *ctx)
goto out;
mlx5e_deactivate_rq(rq);
+ if (xskrq)
+ mlx5e_deactivate_rq(xskrq);
+
err = mlx5e_wait_for_icosq_flush(icosq);
if (err)
goto out;
@@ -97,15 +107,28 @@ static int mlx5e_rx_reporter_err_icosq_cqe_recover(void *ctx)
goto out;
mlx5e_reset_icosq_cc_pc(icosq);
+
mlx5e_free_rx_in_progress_descs(rq);
+ if (xskrq)
+ mlx5e_free_rx_in_progress_descs(xskrq);
+
clear_bit(MLX5E_SQ_STATE_RECOVERING, &icosq->state);
mlx5e_activate_icosq(icosq);
- mlx5e_activate_rq(rq);
+ mlx5e_activate_rq(rq);
rq->stats->recover++;
+
+ if (xskrq) {
+ mlx5e_activate_rq(xskrq);
+ xskrq->stats->recover++;
+ }
+
+ mutex_unlock(&icosq->channel->icosq_recovery_lock);
+
return 0;
out:
clear_bit(MLX5E_SQ_STATE_RECOVERING, &icosq->state);
+ mutex_unlock(&icosq->channel->icosq_recovery_lock);
return err;
}
@@ -706,6 +729,16 @@ void mlx5e_reporter_icosq_cqe_err(struct mlx5e_icosq *icosq)
mlx5e_health_report(priv, priv->rx_reporter, err_str, &err_ctx);
}
+void mlx5e_reporter_icosq_suspend_recovery(struct mlx5e_channel *c)
+{
+ mutex_lock(&c->icosq_recovery_lock);
+}
+
+void mlx5e_reporter_icosq_resume_recovery(struct mlx5e_channel *c)
+{
+ mutex_unlock(&c->icosq_recovery_lock);
+}
+
static const struct devlink_health_reporter_ops mlx5_rx_reporter_ops = {
.name = "rx",
.recover = mlx5e_rx_reporter_recover,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c
index 4f4bc8726ec4..614cd9477600 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c
@@ -466,6 +466,14 @@ static int mlx5e_tx_reporter_dump_sq(struct mlx5e_priv *priv, struct devlink_fms
return mlx5e_health_fmsg_named_obj_nest_end(fmsg);
}
+static int mlx5e_tx_reporter_timeout_dump(struct mlx5e_priv *priv, struct devlink_fmsg *fmsg,
+ void *ctx)
+{
+ struct mlx5e_tx_timeout_ctx *to_ctx = ctx;
+
+ return mlx5e_tx_reporter_dump_sq(priv, fmsg, to_ctx->sq);
+}
+
static int mlx5e_tx_reporter_dump_all_sqs(struct mlx5e_priv *priv,
struct devlink_fmsg *fmsg)
{
@@ -561,7 +569,7 @@ int mlx5e_reporter_tx_timeout(struct mlx5e_txqsq *sq)
to_ctx.sq = sq;
err_ctx.ctx = &to_ctx;
err_ctx.recover = mlx5e_tx_reporter_timeout_recover;
- err_ctx.dump = mlx5e_tx_reporter_dump_sq;
+ err_ctx.dump = mlx5e_tx_reporter_timeout_dump;
snprintf(err_str, sizeof(err_str),
"TX timeout on queue: %d, SQ: 0x%x, CQ: 0x%x, SQ Cons: 0x%x SQ Prod: 0x%x, usecs since last trans: %u",
sq->ch_ix, sq->sqn, sq->cq.mcq.cqn, sq->cc, sq->pc,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.c b/drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.c
index 142953847996..0015a81eb9a1 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.c
@@ -13,6 +13,9 @@ struct mlx5e_rx_res {
unsigned int max_nch;
u32 drop_rqn;
+ struct mlx5e_packet_merge_param pkt_merge_param;
+ struct rw_semaphore pkt_merge_param_sem;
+
struct mlx5e_rss *rss[MLX5E_MAX_NUM_RSS];
bool rss_active;
u32 rss_rqns[MLX5E_INDIR_RQT_SIZE];
@@ -392,6 +395,7 @@ static int mlx5e_rx_res_ptp_init(struct mlx5e_rx_res *res)
if (err)
goto out;
+ /* Separated from the channels RQs, does not share pkt_merge state with them */
mlx5e_tir_builder_build_rqt(builder, res->mdev->mlx5e_res.hw_objs.td.tdn,
mlx5e_rqt_get_rqtn(&res->ptp.rqt),
inner_ft_support);
@@ -447,6 +451,9 @@ int mlx5e_rx_res_init(struct mlx5e_rx_res *res, struct mlx5_core_dev *mdev,
res->max_nch = max_nch;
res->drop_rqn = drop_rqn;
+ res->pkt_merge_param = *init_pkt_merge_param;
+ init_rwsem(&res->pkt_merge_param_sem);
+
err = mlx5e_rx_res_rss_init_def(res, init_pkt_merge_param, init_nch);
if (err)
goto err_out;
@@ -513,7 +520,7 @@ u32 mlx5e_rx_res_get_tirn_ptp(struct mlx5e_rx_res *res)
return mlx5e_tir_get_tirn(&res->ptp.tir);
}
-u32 mlx5e_rx_res_get_rqtn_direct(struct mlx5e_rx_res *res, unsigned int ix)
+static u32 mlx5e_rx_res_get_rqtn_direct(struct mlx5e_rx_res *res, unsigned int ix)
{
return mlx5e_rqt_get_rqtn(&res->channels[ix].direct_rqt);
}
@@ -656,6 +663,9 @@ int mlx5e_rx_res_packet_merge_set_param(struct mlx5e_rx_res *res,
if (!builder)
return -ENOMEM;
+ down_write(&res->pkt_merge_param_sem);
+ res->pkt_merge_param = *pkt_merge_param;
+
mlx5e_tir_builder_build_packet_merge(builder, pkt_merge_param);
final_err = 0;
@@ -681,6 +691,7 @@ int mlx5e_rx_res_packet_merge_set_param(struct mlx5e_rx_res *res,
}
}
+ up_write(&res->pkt_merge_param_sem);
mlx5e_tir_builder_free(builder);
return final_err;
}
@@ -689,3 +700,31 @@ struct mlx5e_rss_params_hash mlx5e_rx_res_get_current_hash(struct mlx5e_rx_res *
{
return mlx5e_rss_get_hash(res->rss[0]);
}
+
+int mlx5e_rx_res_tls_tir_create(struct mlx5e_rx_res *res, unsigned int rxq,
+ struct mlx5e_tir *tir)
+{
+ bool inner_ft_support = res->features & MLX5E_RX_RES_FEATURE_INNER_FT;
+ struct mlx5e_tir_builder *builder;
+ u32 rqtn;
+ int err;
+
+ builder = mlx5e_tir_builder_alloc(false);
+ if (!builder)
+ return -ENOMEM;
+
+ rqtn = mlx5e_rx_res_get_rqtn_direct(res, rxq);
+
+ mlx5e_tir_builder_build_rqt(builder, res->mdev->mlx5e_res.hw_objs.td.tdn, rqtn,
+ inner_ft_support);
+ mlx5e_tir_builder_build_direct(builder);
+ mlx5e_tir_builder_build_tls(builder);
+ down_read(&res->pkt_merge_param_sem);
+ mlx5e_tir_builder_build_packet_merge(builder, &res->pkt_merge_param);
+ err = mlx5e_tir_init(tir, builder, res->mdev, false);
+ up_read(&res->pkt_merge_param_sem);
+
+ mlx5e_tir_builder_free(builder);
+
+ return err;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.h b/drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.h
index d09f7d174a51..b39b20a720e0 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.h
@@ -37,9 +37,6 @@ u32 mlx5e_rx_res_get_tirn_rss(struct mlx5e_rx_res *res, enum mlx5_traffic_types
u32 mlx5e_rx_res_get_tirn_rss_inner(struct mlx5e_rx_res *res, enum mlx5_traffic_types tt);
u32 mlx5e_rx_res_get_tirn_ptp(struct mlx5e_rx_res *res);
-/* RQTN getters for modules that create their own TIRs */
-u32 mlx5e_rx_res_get_rqtn_direct(struct mlx5e_rx_res *res, unsigned int ix);
-
/* Activate/deactivate API */
void mlx5e_rx_res_channels_activate(struct mlx5e_rx_res *res, struct mlx5e_channels *chs);
void mlx5e_rx_res_channels_deactivate(struct mlx5e_rx_res *res);
@@ -69,4 +66,7 @@ struct mlx5e_rss *mlx5e_rx_res_rss_get(struct mlx5e_rx_res *res, u32 rss_idx);
/* Workaround for hairpin */
struct mlx5e_rss_params_hash mlx5e_rx_res_get_current_hash(struct mlx5e_rx_res *res);
+/* Accel TIRs */
+int mlx5e_rx_res_tls_tir_create(struct mlx5e_rx_res *res, unsigned int rxq,
+ struct mlx5e_tir *tir);
#endif /* __MLX5_EN_RX_RES_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c
index 538bc2419bd8..8526a5fbbf0b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c
@@ -4,6 +4,7 @@
#include "setup.h"
#include "en/params.h"
#include "en/txrx.h"
+#include "en/health.h"
/* It matches XDP_UMEM_MIN_CHUNK_SIZE, but as this constant is private and may
* change unexpectedly, and mlx5e has a minimum valid stride size for striding
@@ -170,7 +171,13 @@ void mlx5e_close_xsk(struct mlx5e_channel *c)
void mlx5e_activate_xsk(struct mlx5e_channel *c)
{
+ /* ICOSQ recovery deactivates RQs. Suspend the recovery to avoid
+ * activating XSKRQ in the middle of recovery.
+ */
+ mlx5e_reporter_icosq_suspend_recovery(c);
set_bit(MLX5E_RQ_STATE_ENABLED, &c->xskrq.state);
+ mlx5e_reporter_icosq_resume_recovery(c);
+
/* TX queue is created active. */
spin_lock_bh(&c->async_icosq_lock);
@@ -180,6 +187,13 @@ void mlx5e_activate_xsk(struct mlx5e_channel *c)
void mlx5e_deactivate_xsk(struct mlx5e_channel *c)
{
- mlx5e_deactivate_rq(&c->xskrq);
+ /* ICOSQ recovery may reactivate XSKRQ if clear_bit is called in the
+ * middle of recovery. Suspend the recovery to avoid it.
+ */
+ mlx5e_reporter_icosq_suspend_recovery(c);
+ clear_bit(MLX5E_RQ_STATE_ENABLED, &c->xskrq.state);
+ mlx5e_reporter_icosq_resume_recovery(c);
+ synchronize_net(); /* Sync with NAPI to prevent mlx5e_post_rx_wqes. */
+
/* TX queue is disabled on close. */
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c
index fb5397324aa4..2db9573a3fe6 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c
@@ -191,7 +191,7 @@ static void mlx5e_ipsec_set_swp(struct sk_buff *skb,
eseg->swp_inner_l3_offset = skb_inner_network_offset(skb) / 2;
eseg->swp_inner_l4_offset =
(skb->csum_start + skb->head - skb->data) / 2;
- if (skb->protocol == htons(ETH_P_IPV6))
+ if (inner_ip_hdr(skb)->version == 6)
eseg->swp_flags |= MLX5_ETH_WQE_SWP_INNER_L3_IPV6;
break;
default:
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c
index a2a9f68579dd..15711814d2d2 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c
@@ -100,25 +100,6 @@ mlx5e_ktls_rx_resync_create_resp_list(void)
return resp_list;
}
-static int mlx5e_ktls_create_tir(struct mlx5_core_dev *mdev, struct mlx5e_tir *tir, u32 rqtn)
-{
- struct mlx5e_tir_builder *builder;
- int err;
-
- builder = mlx5e_tir_builder_alloc(false);
- if (!builder)
- return -ENOMEM;
-
- mlx5e_tir_builder_build_rqt(builder, mdev->mlx5e_res.hw_objs.td.tdn, rqtn, false);
- mlx5e_tir_builder_build_direct(builder);
- mlx5e_tir_builder_build_tls(builder);
- err = mlx5e_tir_init(tir, builder, mdev, false);
-
- mlx5e_tir_builder_free(builder);
-
- return err;
-}
-
static void accel_rule_handle_work(struct work_struct *work)
{
struct mlx5e_ktls_offload_context_rx *priv_rx;
@@ -609,7 +590,6 @@ int mlx5e_ktls_add_rx(struct net_device *netdev, struct sock *sk,
struct mlx5_core_dev *mdev;
struct mlx5e_priv *priv;
int rxq, err;
- u32 rqtn;
tls_ctx = tls_get_ctx(sk);
priv = netdev_priv(netdev);
@@ -635,9 +615,7 @@ int mlx5e_ktls_add_rx(struct net_device *netdev, struct sock *sk,
priv_rx->sw_stats = &priv->tls->sw_stats;
mlx5e_set_ktls_rx_priv_ctx(tls_ctx, priv_rx);
- rqtn = mlx5e_rx_res_get_rqtn_direct(priv->rx_res, rxq);
-
- err = mlx5e_ktls_create_tir(mdev, &priv_rx->tir, rqtn);
+ err = mlx5e_rx_res_tls_tir_create(priv->rx_res, rxq, &priv_rx->tir);
if (err)
goto err_create_tir;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index 65571593ec5c..41379844eee1 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -1087,8 +1087,6 @@ void mlx5e_deactivate_rq(struct mlx5e_rq *rq)
void mlx5e_close_rq(struct mlx5e_rq *rq)
{
cancel_work_sync(&rq->dim.work);
- if (rq->icosq)
- cancel_work_sync(&rq->icosq->recover_work);
cancel_work_sync(&rq->recover_work);
mlx5e_destroy_rq(rq);
mlx5e_free_rx_descs(rq);
@@ -1216,9 +1214,20 @@ static void mlx5e_icosq_err_cqe_work(struct work_struct *recover_work)
mlx5e_reporter_icosq_cqe_err(sq);
}
+static void mlx5e_async_icosq_err_cqe_work(struct work_struct *recover_work)
+{
+ struct mlx5e_icosq *sq = container_of(recover_work, struct mlx5e_icosq,
+ recover_work);
+
+ /* Not implemented yet. */
+
+ netdev_warn(sq->channel->netdev, "async_icosq recovery is not implemented\n");
+}
+
static int mlx5e_alloc_icosq(struct mlx5e_channel *c,
struct mlx5e_sq_param *param,
- struct mlx5e_icosq *sq)
+ struct mlx5e_icosq *sq,
+ work_func_t recover_work_func)
{
void *sqc_wq = MLX5_ADDR_OF(sqc, param->sqc, wq);
struct mlx5_core_dev *mdev = c->mdev;
@@ -1239,7 +1248,7 @@ static int mlx5e_alloc_icosq(struct mlx5e_channel *c,
if (err)
goto err_sq_wq_destroy;
- INIT_WORK(&sq->recover_work, mlx5e_icosq_err_cqe_work);
+ INIT_WORK(&sq->recover_work, recover_work_func);
return 0;
@@ -1575,13 +1584,14 @@ void mlx5e_tx_err_cqe_work(struct work_struct *recover_work)
mlx5e_reporter_tx_err_cqe(sq);
}
-int mlx5e_open_icosq(struct mlx5e_channel *c, struct mlx5e_params *params,
- struct mlx5e_sq_param *param, struct mlx5e_icosq *sq)
+static int mlx5e_open_icosq(struct mlx5e_channel *c, struct mlx5e_params *params,
+ struct mlx5e_sq_param *param, struct mlx5e_icosq *sq,
+ work_func_t recover_work_func)
{
struct mlx5e_create_sq_param csp = {};
int err;
- err = mlx5e_alloc_icosq(c, param, sq);
+ err = mlx5e_alloc_icosq(c, param, sq, recover_work_func);
if (err)
return err;
@@ -1620,7 +1630,7 @@ void mlx5e_deactivate_icosq(struct mlx5e_icosq *icosq)
synchronize_net(); /* Sync with NAPI. */
}
-void mlx5e_close_icosq(struct mlx5e_icosq *sq)
+static void mlx5e_close_icosq(struct mlx5e_icosq *sq)
{
struct mlx5e_channel *c = sq->channel;
@@ -2084,11 +2094,15 @@ static int mlx5e_open_queues(struct mlx5e_channel *c,
spin_lock_init(&c->async_icosq_lock);
- err = mlx5e_open_icosq(c, params, &cparam->async_icosq, &c->async_icosq);
+ err = mlx5e_open_icosq(c, params, &cparam->async_icosq, &c->async_icosq,
+ mlx5e_async_icosq_err_cqe_work);
if (err)
goto err_close_xdpsq_cq;
- err = mlx5e_open_icosq(c, params, &cparam->icosq, &c->icosq);
+ mutex_init(&c->icosq_recovery_lock);
+
+ err = mlx5e_open_icosq(c, params, &cparam->icosq, &c->icosq,
+ mlx5e_icosq_err_cqe_work);
if (err)
goto err_close_async_icosq;
@@ -2156,9 +2170,12 @@ static void mlx5e_close_queues(struct mlx5e_channel *c)
mlx5e_close_xdpsq(&c->xdpsq);
if (c->xdp)
mlx5e_close_xdpsq(&c->rq_xdpsq);
+ /* The same ICOSQ is used for UMRs for both RQ and XSKRQ. */
+ cancel_work_sync(&c->icosq.recover_work);
mlx5e_close_rq(&c->rq);
mlx5e_close_sqs(c);
mlx5e_close_icosq(&c->icosq);
+ mutex_destroy(&c->icosq_recovery_lock);
mlx5e_close_icosq(&c->async_icosq);
if (c->xdp)
mlx5e_close_cq(&c->rq_xdpsq.cq);
@@ -3724,12 +3741,11 @@ static int set_feature_arfs(struct net_device *netdev, bool enable)
static int mlx5e_handle_feature(struct net_device *netdev,
netdev_features_t *features,
- netdev_features_t wanted_features,
netdev_features_t feature,
mlx5e_feature_handler feature_handler)
{
- netdev_features_t changes = wanted_features ^ netdev->features;
- bool enable = !!(wanted_features & feature);
+ netdev_features_t changes = *features ^ netdev->features;
+ bool enable = !!(*features & feature);
int err;
if (!(changes & feature))
@@ -3737,22 +3753,22 @@ static int mlx5e_handle_feature(struct net_device *netdev,
err = feature_handler(netdev, enable);
if (err) {
+ MLX5E_SET_FEATURE(features, feature, !enable);
netdev_err(netdev, "%s feature %pNF failed, err %d\n",
enable ? "Enable" : "Disable", &feature, err);
return err;
}
- MLX5E_SET_FEATURE(features, feature, enable);
return 0;
}
int mlx5e_set_features(struct net_device *netdev, netdev_features_t features)
{
- netdev_features_t oper_features = netdev->features;
+ netdev_features_t oper_features = features;
int err = 0;
#define MLX5E_HANDLE_FEATURE(feature, handler) \
- mlx5e_handle_feature(netdev, &oper_features, features, feature, handler)
+ mlx5e_handle_feature(netdev, &oper_features, feature, handler)
err |= MLX5E_HANDLE_FEATURE(NETIF_F_LRO, set_feature_lro);
err |= MLX5E_HANDLE_FEATURE(NETIF_F_GRO_HW, set_feature_hw_gro);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
index e58a9ec42553..48895d79796a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
@@ -1080,6 +1080,10 @@ static mlx5e_stats_grp_t mlx5e_ul_rep_stats_grps[] = {
&MLX5E_STATS_GRP(pme),
&MLX5E_STATS_GRP(channels),
&MLX5E_STATS_GRP(per_port_buff_congest),
+#ifdef CONFIG_MLX5_EN_IPSEC
+ &MLX5E_STATS_GRP(ipsec_sw),
+ &MLX5E_STATS_GRP(ipsec_hw),
+#endif
};
static unsigned int mlx5e_ul_rep_stats_grps_num(struct mlx5e_priv *priv)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
index 96967b0a2441..793511d5ee4c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
@@ -543,13 +543,13 @@ static int mlx5e_build_shampo_hd_umr(struct mlx5e_rq *rq,
u16 klm_entries, u16 index)
{
struct mlx5e_shampo_hd *shampo = rq->mpwqe.shampo;
- u16 entries, pi, i, header_offset, err, wqe_bbs, new_entries;
+ u16 entries, pi, header_offset, err, wqe_bbs, new_entries;
u32 lkey = rq->mdev->mlx5e_res.hw_objs.mkey;
struct page *page = shampo->last_page;
u64 addr = shampo->last_addr;
struct mlx5e_dma_info *dma_info;
struct mlx5e_umr_wqe *umr_wqe;
- int headroom;
+ int headroom, i;
headroom = rq->buff.headroom;
new_entries = klm_entries - (shampo->pi & (MLX5_UMR_KLM_ALIGNMENT - 1));
@@ -601,9 +601,7 @@ update_klm:
err_unmap:
while (--i >= 0) {
- if (--index < 0)
- index = shampo->hd_per_wq - 1;
- dma_info = &shampo->info[index];
+ dma_info = &shampo->info[--index];
if (!(i & (MLX5E_SHAMPO_WQ_HEADER_PER_PAGE - 1))) {
dma_info->addr = ALIGN_DOWN(dma_info->addr, PAGE_SIZE);
mlx5e_page_release(rq, dma_info, true);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
index 3d45f4ae80c0..5e454a14428f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
@@ -1196,21 +1196,16 @@ void mlx5e_tc_unoffload_fdb_rules(struct mlx5_eswitch *esw,
if (attr->flags & MLX5_ESW_ATTR_FLAG_SLOW_PATH)
goto offload_rule_0;
- if (flow_flag_test(flow, CT)) {
- mlx5_tc_ct_delete_flow(get_ct_priv(flow->priv), flow, attr);
- return;
- }
-
- if (flow_flag_test(flow, SAMPLE)) {
- mlx5e_tc_sample_unoffload(get_sample_priv(flow->priv), flow->rule[0], attr);
- return;
- }
-
if (attr->esw_attr->split_count)
mlx5_eswitch_del_fwd_rule(esw, flow->rule[1], attr);
+ if (flow_flag_test(flow, CT))
+ mlx5_tc_ct_delete_flow(get_ct_priv(flow->priv), flow, attr);
+ else if (flow_flag_test(flow, SAMPLE))
+ mlx5e_tc_sample_unoffload(get_sample_priv(flow->priv), flow->rule[0], attr);
+ else
offload_rule_0:
- mlx5_eswitch_del_offloaded_rule(esw, flow->rule[0], attr);
+ mlx5_eswitch_del_offloaded_rule(esw, flow->rule[0], attr);
}
struct mlx5_flow_handle *
@@ -1445,7 +1440,9 @@ mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
MLX5_FLOW_NAMESPACE_FDB, VPORT_TO_REG,
metadata);
if (err)
- return err;
+ goto err_out;
+
+ attr->action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
}
}
@@ -1461,13 +1458,15 @@ mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
if (attr->chain) {
NL_SET_ERR_MSG_MOD(extack,
"Internal port rule is only supported on chain 0");
- return -EOPNOTSUPP;
+ err = -EOPNOTSUPP;
+ goto err_out;
}
if (attr->dest_chain) {
NL_SET_ERR_MSG_MOD(extack,
"Internal port rule offload doesn't support goto action");
- return -EOPNOTSUPP;
+ err = -EOPNOTSUPP;
+ goto err_out;
}
int_port = mlx5e_tc_int_port_get(mlx5e_get_int_port_priv(priv),
@@ -1475,8 +1474,10 @@ mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
flow_flag_test(flow, EGRESS) ?
MLX5E_TC_INT_PORT_EGRESS :
MLX5E_TC_INT_PORT_INGRESS);
- if (IS_ERR(int_port))
- return PTR_ERR(int_port);
+ if (IS_ERR(int_port)) {
+ err = PTR_ERR(int_port);
+ goto err_out;
+ }
esw_attr->int_port = int_port;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
index c6cc67cb4f6a..d377ddc70fc7 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
@@ -130,7 +130,7 @@ static u32 esw_qos_calculate_min_rate_divider(struct mlx5_eswitch *esw,
/* If vports min rate divider is 0 but their group has bw_share configured, then
* need to set bw_share for vports to minimal value.
*/
- if (!group_level && !max_guarantee && group->bw_share)
+ if (!group_level && !max_guarantee && group && group->bw_share)
return 1;
return 0;
}
@@ -423,7 +423,7 @@ static int esw_qos_vport_update_group(struct mlx5_eswitch *esw,
return err;
/* Recalculate bw share weights of old and new groups */
- if (vport->qos.bw_share) {
+ if (vport->qos.bw_share || new_group->bw_share) {
esw_qos_normalize_vports_min_rate(esw, curr_group, extack);
esw_qos_normalize_vports_min_rate(esw, new_group, extack);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
index a46455694f7a..32bc08a39925 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
@@ -329,14 +329,25 @@ static bool
esw_is_indir_table(struct mlx5_eswitch *esw, struct mlx5_flow_attr *attr)
{
struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
+ bool result = false;
int i;
- for (i = esw_attr->split_count; i < esw_attr->out_count; i++)
+ /* Indirect table is supported only for flows with in_port uplink
+ * and the destination is vport on the same eswitch as the uplink,
+ * return false in case at least one of destinations doesn't meet
+ * this criteria.
+ */
+ for (i = esw_attr->split_count; i < esw_attr->out_count; i++) {
if (esw_attr->dests[i].rep &&
mlx5_esw_indir_table_needed(esw, attr, esw_attr->dests[i].rep->vport,
- esw_attr->dests[i].mdev))
- return true;
- return false;
+ esw_attr->dests[i].mdev)) {
+ result = true;
+ } else {
+ result = false;
+ break;
+ }
+ }
+ return result;
}
static int
@@ -2512,6 +2523,7 @@ static int esw_set_master_egress_rule(struct mlx5_core_dev *master,
struct mlx5_eswitch *esw = master->priv.eswitch;
struct mlx5_flow_table_attr ft_attr = {
.max_fte = 1, .prio = 0, .level = 0,
+ .flags = MLX5_FLOW_TABLE_OTHER_VPORT,
};
struct mlx5_flow_namespace *egress_ns;
struct mlx5_flow_table *acl;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/health.c b/drivers/net/ethernet/mellanox/mlx5/core/health.c
index 64f1abc4dc36..3ca998874c50 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/health.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/health.c
@@ -835,6 +835,9 @@ void mlx5_start_health_poll(struct mlx5_core_dev *dev)
health->timer.expires = jiffies + msecs_to_jiffies(poll_interval_ms);
add_timer(&health->timer);
+
+ if (mlx5_core_is_pf(dev) && MLX5_CAP_MCAM_REG(dev, mrtc))
+ queue_delayed_work(health->wq, &health->update_fw_log_ts_work, 0);
}
void mlx5_stop_health_poll(struct mlx5_core_dev *dev, bool disable_health)
@@ -902,8 +905,6 @@ int mlx5_health_init(struct mlx5_core_dev *dev)
INIT_WORK(&health->fatal_report_work, mlx5_fw_fatal_reporter_err_work);
INIT_WORK(&health->report_work, mlx5_fw_reporter_err_work);
INIT_DELAYED_WORK(&health->update_fw_log_ts_work, mlx5_health_log_ts_update);
- if (mlx5_core_is_pf(dev))
- queue_delayed_work(health->wq, &health->update_fw_log_ts_work, 0);
return 0;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/port_sel.c b/drivers/net/ethernet/mellanox/mlx5/core/lag/port_sel.c
index ad63dd45c8fb..a6592f9c3c05 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lag/port_sel.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/port_sel.c
@@ -608,4 +608,5 @@ void mlx5_lag_port_sel_destroy(struct mlx5_lag *ldev)
if (port_sel->tunnel)
mlx5_destroy_ttc_table(port_sel->inner.ttc);
mlx5_lag_destroy_definers(ldev);
+ memset(port_sel, 0, sizeof(*port_sel));
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.c
index 97e5845b4cfd..d5e47630e284 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.c
@@ -121,6 +121,9 @@ u32 mlx5_chains_get_nf_ft_chain(struct mlx5_fs_chains *chains)
u32 mlx5_chains_get_prio_range(struct mlx5_fs_chains *chains)
{
+ if (!mlx5_chains_prios_supported(chains))
+ return 1;
+
if (mlx5_chains_ignore_flow_level_supported(chains))
return UINT_MAX;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/tout.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/tout.c
index 0dd96a6b140d..c1df0d3595d8 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/tout.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/tout.c
@@ -31,11 +31,11 @@ static void tout_set(struct mlx5_core_dev *dev, u64 val, enum mlx5_timeouts_type
dev->timeouts->to[type] = val;
}
-static void tout_set_def_val(struct mlx5_core_dev *dev)
+void mlx5_tout_set_def_val(struct mlx5_core_dev *dev)
{
int i;
- for (i = MLX5_TO_FW_PRE_INIT_TIMEOUT_MS; i < MAX_TIMEOUT_TYPES; i++)
+ for (i = 0; i < MAX_TIMEOUT_TYPES; i++)
tout_set(dev, tout_def_sw_val[i], i);
}
@@ -45,7 +45,6 @@ int mlx5_tout_init(struct mlx5_core_dev *dev)
if (!dev->timeouts)
return -ENOMEM;
- tout_set_def_val(dev);
return 0;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/tout.h b/drivers/net/ethernet/mellanox/mlx5/core/lib/tout.h
index 31faa5c17aa9..1c42ead782fa 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/tout.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/tout.h
@@ -34,6 +34,7 @@ int mlx5_tout_init(struct mlx5_core_dev *dev);
void mlx5_tout_cleanup(struct mlx5_core_dev *dev);
void mlx5_tout_query_iseg(struct mlx5_core_dev *dev);
int mlx5_tout_query_dtor(struct mlx5_core_dev *dev);
+void mlx5_tout_set_def_val(struct mlx5_core_dev *dev);
u64 _mlx5_tout_ms(struct mlx5_core_dev *dev, enum mlx5_timeouts_types type);
#define mlx5_tout_ms(dev, type) _mlx5_tout_ms(dev, MLX5_TO_##type##_MS)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index a92a92a52346..65083496f913 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -992,11 +992,7 @@ static int mlx5_function_setup(struct mlx5_core_dev *dev, bool boot)
if (mlx5_core_is_pf(dev))
pcie_print_link_status(dev->pdev);
- err = mlx5_tout_init(dev);
- if (err) {
- mlx5_core_err(dev, "Failed initializing timeouts, aborting\n");
- return err;
- }
+ mlx5_tout_set_def_val(dev);
/* wait for firmware to accept initialization segments configurations
*/
@@ -1005,13 +1001,13 @@ static int mlx5_function_setup(struct mlx5_core_dev *dev, bool boot)
if (err) {
mlx5_core_err(dev, "Firmware over %llu MS in pre-initializing state, aborting\n",
mlx5_tout_ms(dev, FW_PRE_INIT_TIMEOUT));
- goto err_tout_cleanup;
+ return err;
}
err = mlx5_cmd_init(dev);
if (err) {
mlx5_core_err(dev, "Failed initializing command interface, aborting\n");
- goto err_tout_cleanup;
+ return err;
}
mlx5_tout_query_iseg(dev);
@@ -1075,18 +1071,16 @@ static int mlx5_function_setup(struct mlx5_core_dev *dev, bool boot)
mlx5_set_driver_version(dev);
- mlx5_start_health_poll(dev);
-
err = mlx5_query_hca_caps(dev);
if (err) {
mlx5_core_err(dev, "query hca failed\n");
- goto stop_health;
+ goto reclaim_boot_pages;
}
+ mlx5_start_health_poll(dev);
+
return 0;
-stop_health:
- mlx5_stop_health_poll(dev, boot);
reclaim_boot_pages:
mlx5_reclaim_startup_pages(dev);
err_disable_hca:
@@ -1094,8 +1088,6 @@ err_disable_hca:
err_cmd_cleanup:
mlx5_cmd_set_state(dev, MLX5_CMDIF_STATE_DOWN);
mlx5_cmd_cleanup(dev);
-err_tout_cleanup:
- mlx5_tout_cleanup(dev);
return err;
}
@@ -1114,7 +1106,6 @@ static int mlx5_function_teardown(struct mlx5_core_dev *dev, bool boot)
mlx5_core_disable_hca(dev, 0);
mlx5_cmd_set_state(dev, MLX5_CMDIF_STATE_DOWN);
mlx5_cmd_cleanup(dev);
- mlx5_tout_cleanup(dev);
return 0;
}
@@ -1476,6 +1467,12 @@ int mlx5_mdev_init(struct mlx5_core_dev *dev, int profile_idx)
mlx5_debugfs_root);
INIT_LIST_HEAD(&priv->traps);
+ err = mlx5_tout_init(dev);
+ if (err) {
+ mlx5_core_err(dev, "Failed initializing timeouts, aborting\n");
+ goto err_timeout_init;
+ }
+
err = mlx5_health_init(dev);
if (err)
goto err_health_init;
@@ -1501,6 +1498,8 @@ err_adev_init:
err_pagealloc_init:
mlx5_health_cleanup(dev);
err_health_init:
+ mlx5_tout_cleanup(dev);
+err_timeout_init:
debugfs_remove(dev->priv.dbg_root);
mutex_destroy(&priv->pgdir_mutex);
mutex_destroy(&priv->alloc_mutex);
@@ -1518,6 +1517,7 @@ void mlx5_mdev_uninit(struct mlx5_core_dev *dev)
mlx5_adev_cleanup(dev);
mlx5_pagealloc_cleanup(dev);
mlx5_health_cleanup(dev);
+ mlx5_tout_cleanup(dev);
debugfs_remove_recursive(dev->priv.dbg_root);
mutex_destroy(&priv->pgdir_mutex);
mutex_destroy(&priv->alloc_mutex);
@@ -1809,12 +1809,13 @@ void mlx5_disable_device(struct mlx5_core_dev *dev)
int mlx5_recover_device(struct mlx5_core_dev *dev)
{
- int ret = -EIO;
+ if (!mlx5_core_is_sf(dev)) {
+ mlx5_pci_disable_device(dev);
+ if (mlx5_pci_slot_reset(dev->pdev) != PCI_ERS_RESULT_RECOVERED)
+ return -EIO;
+ }
- mlx5_pci_disable_device(dev);
- if (mlx5_pci_slot_reset(dev->pdev) == PCI_ERS_RESULT_RECOVERED)
- ret = mlx5_load_one(dev);
- return ret;
+ return mlx5_load_one(dev);
}
static struct pci_driver mlx5_core_driver = {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c b/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c
index 830444f927d4..bcee30f5de0a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c
@@ -356,8 +356,8 @@ static struct mlx5_irq *irq_pool_request_affinity(struct mlx5_irq_pool *pool,
new_irq = irq_pool_create_irq(pool, affinity);
if (IS_ERR(new_irq)) {
if (!least_loaded_irq) {
- mlx5_core_err(pool->dev, "Didn't find IRQ for cpu = %u\n",
- cpumask_first(affinity));
+ mlx5_core_err(pool->dev, "Didn't find a matching IRQ. err = %ld\n",
+ PTR_ERR(new_irq));
mutex_unlock(&pool->lock);
return new_irq;
}
@@ -398,7 +398,7 @@ irq_pool_request_vector(struct mlx5_irq_pool *pool, int vecidx,
cpumask_copy(irq->mask, affinity);
if (!irq_pool_is_sf_pool(pool) && !pool->xa_num_irqs.max &&
cpumask_empty(irq->mask))
- cpumask_set_cpu(0, irq->mask);
+ cpumask_set_cpu(cpumask_first(cpu_online_mask), irq->mask);
irq_set_affinity_hint(irq->irqn, irq->mask);
unlock:
mutex_unlock(&pool->lock);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_domain.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_domain.c
index 8cbd36c82b3b..c54cc45f63dc 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_domain.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_domain.c
@@ -2,6 +2,7 @@
/* Copyright (c) 2019 Mellanox Technologies. */
#include <linux/mlx5/eswitch.h>
+#include <linux/err.h>
#include "dr_types.h"
#define DR_DOMAIN_SW_STEERING_SUPPORTED(dmn, dmn_type) \
@@ -72,9 +73,9 @@ static int dr_domain_init_resources(struct mlx5dr_domain *dmn)
}
dmn->uar = mlx5_get_uars_page(dmn->mdev);
- if (!dmn->uar) {
+ if (IS_ERR(dmn->uar)) {
mlx5dr_err(dmn, "Couldn't allocate UAR\n");
- ret = -ENOMEM;
+ ret = PTR_ERR(dmn->uar);
goto clean_pd;
}
@@ -163,9 +164,7 @@ static int dr_domain_query_vport(struct mlx5dr_domain *dmn,
static int dr_domain_query_esw_mngr(struct mlx5dr_domain *dmn)
{
- return dr_domain_query_vport(dmn,
- dmn->info.caps.is_ecpf ? MLX5_VPORT_ECPF : 0,
- false,
+ return dr_domain_query_vport(dmn, 0, false,
&dmn->info.caps.vports.esw_manager_caps);
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
index 5925db386b1b..03e5bad4e405 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
@@ -2153,7 +2153,7 @@ static void mlxsw_sp_pude_event_func(const struct mlxsw_reg_info *reg,
max_ports = mlxsw_core_max_ports(mlxsw_sp->core);
local_port = mlxsw_reg_pude_local_port_get(pude_pl);
- if (WARN_ON_ONCE(local_port >= max_ports))
+ if (WARN_ON_ONCE(!local_port || local_port >= max_ports))
return;
mlxsw_sp_port = mlxsw_sp->ports[local_port];
if (!mlxsw_sp_port)
@@ -3290,10 +3290,10 @@ mlxsw_sp_resources_rif_mac_profile_register(struct mlxsw_core *mlxsw_core)
u8 max_rif_mac_profiles;
if (!MLXSW_CORE_RES_VALID(mlxsw_core, MAX_RIF_MAC_PROFILES))
- return -EIO;
-
- max_rif_mac_profiles = MLXSW_CORE_RES_GET(mlxsw_core,
- MAX_RIF_MAC_PROFILES);
+ max_rif_mac_profiles = 1;
+ else
+ max_rif_mac_profiles = MLXSW_CORE_RES_GET(mlxsw_core,
+ MAX_RIF_MAC_PROFILES);
devlink_resource_size_params_init(&size_params, max_rif_mac_profiles,
max_rif_mac_profiles, 1,
DEVLINK_RESOURCE_UNIT_ENTRY);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
index 217e3b351dfe..c34833ff1dde 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
@@ -8494,7 +8494,8 @@ mlxsw_sp_rif_mac_profile_replace(struct mlxsw_sp *mlxsw_sp,
u8 mac_profile;
int err;
- if (!mlxsw_sp_rif_mac_profile_is_shared(rif))
+ if (!mlxsw_sp_rif_mac_profile_is_shared(rif) &&
+ !mlxsw_sp_rif_mac_profile_find(mlxsw_sp, new_mac))
return mlxsw_sp_rif_mac_profile_edit(rif, new_mac);
err = mlxsw_sp_rif_mac_profile_get(mlxsw_sp, new_mac,
diff --git a/drivers/net/ethernet/micrel/ks8851_par.c b/drivers/net/ethernet/micrel/ks8851_par.c
index 2e25798c610e..7f49042484bd 100644
--- a/drivers/net/ethernet/micrel/ks8851_par.c
+++ b/drivers/net/ethernet/micrel/ks8851_par.c
@@ -321,6 +321,8 @@ static int ks8851_probe_par(struct platform_device *pdev)
return ret;
netdev->irq = platform_get_irq(pdev, 0);
+ if (netdev->irq < 0)
+ return netdev->irq;
return ks8851_probe_common(netdev, dev, msg_enable);
}
diff --git a/drivers/net/ethernet/microchip/lan743x_main.c b/drivers/net/ethernet/microchip/lan743x_main.c
index 4fc97823bc84..7d7647481f70 100644
--- a/drivers/net/ethernet/microchip/lan743x_main.c
+++ b/drivers/net/ethernet/microchip/lan743x_main.c
@@ -914,8 +914,7 @@ static int lan743x_phy_reset(struct lan743x_adapter *adapter)
}
static void lan743x_phy_update_flowcontrol(struct lan743x_adapter *adapter,
- u8 duplex, u16 local_adv,
- u16 remote_adv)
+ u16 local_adv, u16 remote_adv)
{
struct lan743x_phy *phy = &adapter->phy;
u8 cap;
@@ -943,7 +942,6 @@ static void lan743x_phy_link_status_change(struct net_device *netdev)
phy_print_status(phydev);
if (phydev->state == PHY_RUNNING) {
- struct ethtool_link_ksettings ksettings;
int remote_advertisement = 0;
int local_advertisement = 0;
@@ -980,18 +978,14 @@ static void lan743x_phy_link_status_change(struct net_device *netdev)
}
lan743x_csr_write(adapter, MAC_CR, data);
- memset(&ksettings, 0, sizeof(ksettings));
- phy_ethtool_get_link_ksettings(netdev, &ksettings);
local_advertisement =
linkmode_adv_to_mii_adv_t(phydev->advertising);
remote_advertisement =
linkmode_adv_to_mii_adv_t(phydev->lp_advertising);
- lan743x_phy_update_flowcontrol(adapter,
- ksettings.base.duplex,
- local_advertisement,
+ lan743x_phy_update_flowcontrol(adapter, local_advertisement,
remote_advertisement);
- lan743x_ptp_update_latency(adapter, ksettings.base.speed);
+ lan743x_ptp_update_latency(adapter, phydev->speed);
}
}
diff --git a/drivers/net/ethernet/microsoft/mana/hw_channel.c b/drivers/net/ethernet/microsoft/mana/hw_channel.c
index 34b971ff8ef8..078d6a5a0768 100644
--- a/drivers/net/ethernet/microsoft/mana/hw_channel.c
+++ b/drivers/net/ethernet/microsoft/mana/hw_channel.c
@@ -480,16 +480,16 @@ static int mana_hwc_create_wq(struct hw_channel_context *hwc,
if (err)
goto out;
- err = mana_hwc_alloc_dma_buf(hwc, q_depth, max_msg_size,
- &hwc_wq->msg_buf);
- if (err)
- goto out;
-
hwc_wq->hwc = hwc;
hwc_wq->gdma_wq = queue;
hwc_wq->queue_depth = q_depth;
hwc_wq->hwc_cq = hwc_cq;
+ err = mana_hwc_alloc_dma_buf(hwc, q_depth, max_msg_size,
+ &hwc_wq->msg_buf);
+ if (err)
+ goto out;
+
*hwc_wq_ptr = hwc_wq;
return 0;
out:
diff --git a/drivers/net/ethernet/mscc/ocelot.c b/drivers/net/ethernet/mscc/ocelot.c
index e6c18b598d5c..1e4ad953cffb 100644
--- a/drivers/net/ethernet/mscc/ocelot.c
+++ b/drivers/net/ethernet/mscc/ocelot.c
@@ -1278,6 +1278,225 @@ int ocelot_fdb_dump(struct ocelot *ocelot, int port,
}
EXPORT_SYMBOL(ocelot_fdb_dump);
+static void ocelot_populate_l2_ptp_trap_key(struct ocelot_vcap_filter *trap)
+{
+ trap->key_type = OCELOT_VCAP_KEY_ETYPE;
+ *(__be16 *)trap->key.etype.etype.value = htons(ETH_P_1588);
+ *(__be16 *)trap->key.etype.etype.mask = htons(0xffff);
+}
+
+static void
+ocelot_populate_ipv4_ptp_event_trap_key(struct ocelot_vcap_filter *trap)
+{
+ trap->key_type = OCELOT_VCAP_KEY_IPV4;
+ trap->key.ipv4.dport.value = PTP_EV_PORT;
+ trap->key.ipv4.dport.mask = 0xffff;
+}
+
+static void
+ocelot_populate_ipv6_ptp_event_trap_key(struct ocelot_vcap_filter *trap)
+{
+ trap->key_type = OCELOT_VCAP_KEY_IPV6;
+ trap->key.ipv6.dport.value = PTP_EV_PORT;
+ trap->key.ipv6.dport.mask = 0xffff;
+}
+
+static void
+ocelot_populate_ipv4_ptp_general_trap_key(struct ocelot_vcap_filter *trap)
+{
+ trap->key_type = OCELOT_VCAP_KEY_IPV4;
+ trap->key.ipv4.dport.value = PTP_GEN_PORT;
+ trap->key.ipv4.dport.mask = 0xffff;
+}
+
+static void
+ocelot_populate_ipv6_ptp_general_trap_key(struct ocelot_vcap_filter *trap)
+{
+ trap->key_type = OCELOT_VCAP_KEY_IPV6;
+ trap->key.ipv6.dport.value = PTP_GEN_PORT;
+ trap->key.ipv6.dport.mask = 0xffff;
+}
+
+static int ocelot_trap_add(struct ocelot *ocelot, int port,
+ unsigned long cookie,
+ void (*populate)(struct ocelot_vcap_filter *f))
+{
+ struct ocelot_vcap_block *block_vcap_is2;
+ struct ocelot_vcap_filter *trap;
+ bool new = false;
+ int err;
+
+ block_vcap_is2 = &ocelot->block[VCAP_IS2];
+
+ trap = ocelot_vcap_block_find_filter_by_id(block_vcap_is2, cookie,
+ false);
+ if (!trap) {
+ trap = kzalloc(sizeof(*trap), GFP_KERNEL);
+ if (!trap)
+ return -ENOMEM;
+
+ populate(trap);
+ trap->prio = 1;
+ trap->id.cookie = cookie;
+ trap->id.tc_offload = false;
+ trap->block_id = VCAP_IS2;
+ trap->type = OCELOT_VCAP_FILTER_OFFLOAD;
+ trap->lookup = 0;
+ trap->action.cpu_copy_ena = true;
+ trap->action.mask_mode = OCELOT_MASK_MODE_PERMIT_DENY;
+ trap->action.port_mask = 0;
+ new = true;
+ }
+
+ trap->ingress_port_mask |= BIT(port);
+
+ if (new)
+ err = ocelot_vcap_filter_add(ocelot, trap, NULL);
+ else
+ err = ocelot_vcap_filter_replace(ocelot, trap);
+ if (err) {
+ trap->ingress_port_mask &= ~BIT(port);
+ if (!trap->ingress_port_mask)
+ kfree(trap);
+ return err;
+ }
+
+ return 0;
+}
+
+static int ocelot_trap_del(struct ocelot *ocelot, int port,
+ unsigned long cookie)
+{
+ struct ocelot_vcap_block *block_vcap_is2;
+ struct ocelot_vcap_filter *trap;
+
+ block_vcap_is2 = &ocelot->block[VCAP_IS2];
+
+ trap = ocelot_vcap_block_find_filter_by_id(block_vcap_is2, cookie,
+ false);
+ if (!trap)
+ return 0;
+
+ trap->ingress_port_mask &= ~BIT(port);
+ if (!trap->ingress_port_mask)
+ return ocelot_vcap_filter_del(ocelot, trap);
+
+ return ocelot_vcap_filter_replace(ocelot, trap);
+}
+
+static int ocelot_l2_ptp_trap_add(struct ocelot *ocelot, int port)
+{
+ unsigned long l2_cookie = ocelot->num_phys_ports + 1;
+
+ return ocelot_trap_add(ocelot, port, l2_cookie,
+ ocelot_populate_l2_ptp_trap_key);
+}
+
+static int ocelot_l2_ptp_trap_del(struct ocelot *ocelot, int port)
+{
+ unsigned long l2_cookie = ocelot->num_phys_ports + 1;
+
+ return ocelot_trap_del(ocelot, port, l2_cookie);
+}
+
+static int ocelot_ipv4_ptp_trap_add(struct ocelot *ocelot, int port)
+{
+ unsigned long ipv4_gen_cookie = ocelot->num_phys_ports + 2;
+ unsigned long ipv4_ev_cookie = ocelot->num_phys_ports + 3;
+ int err;
+
+ err = ocelot_trap_add(ocelot, port, ipv4_ev_cookie,
+ ocelot_populate_ipv4_ptp_event_trap_key);
+ if (err)
+ return err;
+
+ err = ocelot_trap_add(ocelot, port, ipv4_gen_cookie,
+ ocelot_populate_ipv4_ptp_general_trap_key);
+ if (err)
+ ocelot_trap_del(ocelot, port, ipv4_ev_cookie);
+
+ return err;
+}
+
+static int ocelot_ipv4_ptp_trap_del(struct ocelot *ocelot, int port)
+{
+ unsigned long ipv4_gen_cookie = ocelot->num_phys_ports + 2;
+ unsigned long ipv4_ev_cookie = ocelot->num_phys_ports + 3;
+ int err;
+
+ err = ocelot_trap_del(ocelot, port, ipv4_ev_cookie);
+ err |= ocelot_trap_del(ocelot, port, ipv4_gen_cookie);
+ return err;
+}
+
+static int ocelot_ipv6_ptp_trap_add(struct ocelot *ocelot, int port)
+{
+ unsigned long ipv6_gen_cookie = ocelot->num_phys_ports + 4;
+ unsigned long ipv6_ev_cookie = ocelot->num_phys_ports + 5;
+ int err;
+
+ err = ocelot_trap_add(ocelot, port, ipv6_ev_cookie,
+ ocelot_populate_ipv6_ptp_event_trap_key);
+ if (err)
+ return err;
+
+ err = ocelot_trap_add(ocelot, port, ipv6_gen_cookie,
+ ocelot_populate_ipv6_ptp_general_trap_key);
+ if (err)
+ ocelot_trap_del(ocelot, port, ipv6_ev_cookie);
+
+ return err;
+}
+
+static int ocelot_ipv6_ptp_trap_del(struct ocelot *ocelot, int port)
+{
+ unsigned long ipv6_gen_cookie = ocelot->num_phys_ports + 4;
+ unsigned long ipv6_ev_cookie = ocelot->num_phys_ports + 5;
+ int err;
+
+ err = ocelot_trap_del(ocelot, port, ipv6_ev_cookie);
+ err |= ocelot_trap_del(ocelot, port, ipv6_gen_cookie);
+ return err;
+}
+
+static int ocelot_setup_ptp_traps(struct ocelot *ocelot, int port,
+ bool l2, bool l4)
+{
+ int err;
+
+ if (l2)
+ err = ocelot_l2_ptp_trap_add(ocelot, port);
+ else
+ err = ocelot_l2_ptp_trap_del(ocelot, port);
+ if (err)
+ return err;
+
+ if (l4) {
+ err = ocelot_ipv4_ptp_trap_add(ocelot, port);
+ if (err)
+ goto err_ipv4;
+
+ err = ocelot_ipv6_ptp_trap_add(ocelot, port);
+ if (err)
+ goto err_ipv6;
+ } else {
+ err = ocelot_ipv4_ptp_trap_del(ocelot, port);
+
+ err |= ocelot_ipv6_ptp_trap_del(ocelot, port);
+ }
+ if (err)
+ return err;
+
+ return 0;
+
+err_ipv6:
+ ocelot_ipv4_ptp_trap_del(ocelot, port);
+err_ipv4:
+ if (l2)
+ ocelot_l2_ptp_trap_del(ocelot, port);
+ return err;
+}
+
int ocelot_hwstamp_get(struct ocelot *ocelot, int port, struct ifreq *ifr)
{
return copy_to_user(ifr->ifr_data, &ocelot->hwtstamp_config,
@@ -1288,7 +1507,9 @@ EXPORT_SYMBOL(ocelot_hwstamp_get);
int ocelot_hwstamp_set(struct ocelot *ocelot, int port, struct ifreq *ifr)
{
struct ocelot_port *ocelot_port = ocelot->ports[port];
+ bool l2 = false, l4 = false;
struct hwtstamp_config cfg;
+ int err;
if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg)))
return -EFAULT;
@@ -1320,28 +1541,42 @@ int ocelot_hwstamp_set(struct ocelot *ocelot, int port, struct ifreq *ifr)
switch (cfg.rx_filter) {
case HWTSTAMP_FILTER_NONE:
break;
- case HWTSTAMP_FILTER_ALL:
- case HWTSTAMP_FILTER_SOME:
- case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
- case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
- case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
- case HWTSTAMP_FILTER_NTP_ALL:
case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
+ l4 = true;
+ break;
case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
+ l2 = true;
+ break;
case HWTSTAMP_FILTER_PTP_V2_EVENT:
case HWTSTAMP_FILTER_PTP_V2_SYNC:
case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
- cfg.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
+ l2 = true;
+ l4 = true;
break;
default:
mutex_unlock(&ocelot->ptp_lock);
return -ERANGE;
}
+ err = ocelot_setup_ptp_traps(ocelot, port, l2, l4);
+ if (err) {
+ mutex_unlock(&ocelot->ptp_lock);
+ return err;
+ }
+
+ if (l2 && l4)
+ cfg.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
+ else if (l2)
+ cfg.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT;
+ else if (l4)
+ cfg.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
+ else
+ cfg.rx_filter = HWTSTAMP_FILTER_NONE;
+
/* Commit back the result & save it */
memcpy(&ocelot->hwtstamp_config, &cfg, sizeof(cfg));
mutex_unlock(&ocelot->ptp_lock);
@@ -1444,7 +1679,10 @@ int ocelot_get_ts_info(struct ocelot *ocelot, int port,
SOF_TIMESTAMPING_RAW_HARDWARE;
info->tx_types = BIT(HWTSTAMP_TX_OFF) | BIT(HWTSTAMP_TX_ON) |
BIT(HWTSTAMP_TX_ONESTEP_SYNC);
- info->rx_filters = BIT(HWTSTAMP_FILTER_NONE) | BIT(HWTSTAMP_FILTER_ALL);
+ info->rx_filters = BIT(HWTSTAMP_FILTER_NONE) |
+ BIT(HWTSTAMP_FILTER_PTP_V2_EVENT) |
+ BIT(HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
+ BIT(HWTSTAMP_FILTER_PTP_V2_L4_EVENT);
return 0;
}
diff --git a/drivers/net/ethernet/mscc/ocelot_vcap.c b/drivers/net/ethernet/mscc/ocelot_vcap.c
index 99d7376a70a7..337cd08b1a54 100644
--- a/drivers/net/ethernet/mscc/ocelot_vcap.c
+++ b/drivers/net/ethernet/mscc/ocelot_vcap.c
@@ -1217,6 +1217,22 @@ int ocelot_vcap_filter_del(struct ocelot *ocelot,
}
EXPORT_SYMBOL(ocelot_vcap_filter_del);
+int ocelot_vcap_filter_replace(struct ocelot *ocelot,
+ struct ocelot_vcap_filter *filter)
+{
+ struct ocelot_vcap_block *block = &ocelot->block[filter->block_id];
+ int index;
+
+ index = ocelot_vcap_block_get_filter_index(block, filter);
+ if (index < 0)
+ return index;
+
+ vcap_entry_set(ocelot, index, filter);
+
+ return 0;
+}
+EXPORT_SYMBOL(ocelot_vcap_filter_replace);
+
int ocelot_vcap_filter_stats_update(struct ocelot *ocelot,
struct ocelot_vcap_filter *filter)
{
diff --git a/drivers/net/ethernet/natsemi/xtsonic.c b/drivers/net/ethernet/natsemi/xtsonic.c
index ca4686094701..0a02d8bd0a3e 100644
--- a/drivers/net/ethernet/natsemi/xtsonic.c
+++ b/drivers/net/ethernet/natsemi/xtsonic.c
@@ -120,7 +120,7 @@ static const struct net_device_ops xtsonic_netdev_ops = {
.ndo_set_mac_address = eth_mac_addr,
};
-static int __init sonic_probe1(struct net_device *dev)
+static int sonic_probe1(struct net_device *dev)
{
unsigned int silicon_revision;
struct sonic_local *lp = netdev_priv(dev);
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net.h b/drivers/net/ethernet/netronome/nfp/nfp_net.h
index df203738511b..0b1865e9f0b5 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net.h
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net.h
@@ -565,7 +565,6 @@ struct nfp_net_dp {
* @exn_name: Name for Exception interrupt
* @shared_handler: Handler for shared interrupts
* @shared_name: Name for shared interrupt
- * @me_freq_mhz: ME clock_freq (MHz)
* @reconfig_lock: Protects @reconfig_posted, @reconfig_timer_active,
* @reconfig_sync_present and HW reconfiguration request
* regs/machinery from async requests (sync must take
@@ -650,8 +649,6 @@ struct nfp_net {
irq_handler_t shared_handler;
char shared_name[IFNAMSIZ + 8];
- u32 me_freq_mhz;
-
bool link_up;
spinlock_t link_status_lock;
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
index 1de076f55740..cf7882933993 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
@@ -1344,7 +1344,7 @@ static int nfp_net_set_coalesce(struct net_device *netdev,
* ME timestamp ticks. There are 16 ME clock cycles for each timestamp
* count.
*/
- factor = nn->me_freq_mhz / 16;
+ factor = nn->tlv_caps.me_freq_mhz / 16;
/* Each pair of (usecs, max_frames) fields specifies that interrupts
* should be coalesced until
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c
index d7ac0307797f..34c0d2ddf9ef 100644
--- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c
+++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c
@@ -803,8 +803,10 @@ int nfp_cpp_area_cache_add(struct nfp_cpp *cpp, size_t size)
return -ENOMEM;
cache = kzalloc(sizeof(*cache), GFP_KERNEL);
- if (!cache)
+ if (!cache) {
+ nfp_cpp_area_free(area);
return -ENOMEM;
+ }
cache->id = 0;
cache->addr = 0;
diff --git a/drivers/net/ethernet/ni/nixge.c b/drivers/net/ethernet/ni/nixge.c
index cfeb7620ae20..07a00dd9cfe0 100644
--- a/drivers/net/ethernet/ni/nixge.c
+++ b/drivers/net/ethernet/ni/nixge.c
@@ -1209,7 +1209,7 @@ static void *nixge_get_nvmem_address(struct device *dev)
cell = nvmem_cell_get(dev, "address");
if (IS_ERR(cell))
- return NULL;
+ return cell;
mac = nvmem_cell_read(cell, &cell_size);
nvmem_cell_put(cell);
@@ -1282,7 +1282,7 @@ static int nixge_probe(struct platform_device *pdev)
ndev->max_mtu = NIXGE_JUMBO_MTU;
mac_addr = nixge_get_nvmem_address(&pdev->dev);
- if (mac_addr && is_valid_ether_addr(mac_addr)) {
+ if (!IS_ERR(mac_addr) && is_valid_ether_addr(mac_addr)) {
eth_hw_addr_set(ndev, mac_addr);
kfree(mac_addr);
} else {
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_lif.c b/drivers/net/ethernet/pensando/ionic/ionic_lif.c
index 63f8a8163b5f..2ff7be17e5af 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_lif.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_lif.c
@@ -3135,7 +3135,7 @@ int ionic_lif_init(struct ionic_lif *lif)
return -EINVAL;
}
- lif->dbid_inuse = bitmap_alloc(lif->dbid_count, GFP_KERNEL);
+ lif->dbid_inuse = bitmap_zalloc(lif->dbid_count, GFP_KERNEL);
if (!lif->dbid_inuse) {
dev_err(dev, "Failed alloc doorbell id bitmap, aborting\n");
return -ENOMEM;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_int.c b/drivers/net/ethernet/qlogic/qed/qed_int.c
index a97f691839e0..6958adeca86d 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_int.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_int.c
@@ -1045,7 +1045,7 @@ static int qed_int_deassertion(struct qed_hwfn *p_hwfn,
if (!parities)
continue;
- for (j = 0, bit_idx = 0; bit_idx < 32; j++) {
+ for (j = 0, bit_idx = 0; bit_idx < 32 && j < 32; j++) {
struct aeu_invert_reg_bit *p_bit = &p_aeu->bits[j];
if (qed_int_is_parity_flag(p_hwfn, p_bit) &&
@@ -1083,7 +1083,7 @@ static int qed_int_deassertion(struct qed_hwfn *p_hwfn,
* to current group, making them responsible for the
* previous assertion.
*/
- for (j = 0, bit_idx = 0; bit_idx < 32; j++) {
+ for (j = 0, bit_idx = 0; bit_idx < 32 && j < 32; j++) {
long unsigned int bitmask;
u8 bit, bit_len;
@@ -1382,7 +1382,7 @@ static void qed_int_sb_attn_init(struct qed_hwfn *p_hwfn,
memset(sb_info->parity_mask, 0, sizeof(u32) * NUM_ATTN_REGS);
for (i = 0; i < NUM_ATTN_REGS; i++) {
/* j is array index, k is bit index */
- for (j = 0, k = 0; k < 32; j++) {
+ for (j = 0, k = 0; k < 32 && j < 32; j++) {
struct aeu_invert_reg_bit *p_aeu;
p_aeu = &aeu_descs[i].bits[j];
diff --git a/drivers/net/ethernet/qlogic/qede/qede_fp.c b/drivers/net/ethernet/qlogic/qede/qede_fp.c
index 065e9004598e..999abcfe3310 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_fp.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_fp.c
@@ -1643,6 +1643,13 @@ netdev_tx_t qede_start_xmit(struct sk_buff *skb, struct net_device *ndev)
data_split = true;
}
} else {
+ if (unlikely(skb->len > ETH_TX_MAX_NON_LSO_PKT_LEN)) {
+ DP_ERR(edev, "Unexpected non LSO skb length = 0x%x\n", skb->len);
+ qede_free_failed_tx_pkt(txq, first_bd, 0, false);
+ qede_update_tx_producer(txq);
+ return NETDEV_TX_OK;
+ }
+
val |= ((skb->len & ETH_TX_DATA_1ST_BD_PKT_LEN_MASK) <<
ETH_TX_DATA_1ST_BD_PKT_LEN_SHIFT);
}
diff --git a/drivers/net/ethernet/qlogic/qla3xxx.c b/drivers/net/ethernet/qlogic/qla3xxx.c
index 1e6d72adfe43..71523d747e93 100644
--- a/drivers/net/ethernet/qlogic/qla3xxx.c
+++ b/drivers/net/ethernet/qlogic/qla3xxx.c
@@ -3480,20 +3480,19 @@ static int ql_adapter_up(struct ql3_adapter *qdev)
spin_lock_irqsave(&qdev->hw_lock, hw_flags);
- err = ql_wait_for_drvr_lock(qdev);
- if (err) {
- err = ql_adapter_initialize(qdev);
- if (err) {
- netdev_err(ndev, "Unable to initialize adapter\n");
- goto err_init;
- }
- netdev_err(ndev, "Releasing driver lock\n");
- ql_sem_unlock(qdev, QL_DRVR_SEM_MASK);
- } else {
+ if (!ql_wait_for_drvr_lock(qdev)) {
netdev_err(ndev, "Could not acquire driver lock\n");
+ err = -ENODEV;
goto err_lock;
}
+ err = ql_adapter_initialize(qdev);
+ if (err) {
+ netdev_err(ndev, "Unable to initialize adapter\n");
+ goto err_init;
+ }
+ ql_sem_unlock(qdev, QL_DRVR_SEM_MASK);
+
spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
set_bit(QL_ADAPTER_UP, &qdev->flags);
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
index d51bac7ba5af..bd0607680329 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
@@ -1077,8 +1077,14 @@ static int qlcnic_83xx_add_rings(struct qlcnic_adapter *adapter)
sds_mbx_size = sizeof(struct qlcnic_sds_mbx);
context_id = recv_ctx->context_id;
num_sds = adapter->drv_sds_rings - QLCNIC_MAX_SDS_RINGS;
- ahw->hw_ops->alloc_mbx_args(&cmd, adapter,
- QLCNIC_CMD_ADD_RCV_RINGS);
+ err = ahw->hw_ops->alloc_mbx_args(&cmd, adapter,
+ QLCNIC_CMD_ADD_RCV_RINGS);
+ if (err) {
+ dev_err(&adapter->pdev->dev,
+ "Failed to alloc mbx args %d\n", err);
+ return err;
+ }
+
cmd.req.arg[1] = 0 | (num_sds << 8) | (context_id << 16);
/* set up status rings, mbx 2-81 */
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov.h
index 7160b42f51dd..d0111cb3b40e 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov.h
@@ -201,7 +201,7 @@ int qlcnic_sriov_get_vf_vport_info(struct qlcnic_adapter *,
struct qlcnic_info *, u16);
int qlcnic_sriov_cfg_vf_guest_vlan(struct qlcnic_adapter *, u16, u8);
void qlcnic_sriov_free_vlans(struct qlcnic_adapter *);
-void qlcnic_sriov_alloc_vlans(struct qlcnic_adapter *);
+int qlcnic_sriov_alloc_vlans(struct qlcnic_adapter *);
bool qlcnic_sriov_check_any_vlan(struct qlcnic_vf_info *);
void qlcnic_sriov_del_vlan_id(struct qlcnic_sriov *,
struct qlcnic_vf_info *, u16);
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c
index dd03be3fc82a..42a44c97572a 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c
@@ -432,7 +432,7 @@ static int qlcnic_sriov_set_guest_vlan_mode(struct qlcnic_adapter *adapter,
struct qlcnic_cmd_args *cmd)
{
struct qlcnic_sriov *sriov = adapter->ahw->sriov;
- int i, num_vlans;
+ int i, num_vlans, ret;
u16 *vlans;
if (sriov->allowed_vlans)
@@ -443,7 +443,9 @@ static int qlcnic_sriov_set_guest_vlan_mode(struct qlcnic_adapter *adapter,
dev_info(&adapter->pdev->dev, "Number of allowed Guest VLANs = %d\n",
sriov->num_allowed_vlans);
- qlcnic_sriov_alloc_vlans(adapter);
+ ret = qlcnic_sriov_alloc_vlans(adapter);
+ if (ret)
+ return ret;
if (!sriov->any_vlan)
return 0;
@@ -2154,7 +2156,7 @@ static int qlcnic_sriov_vf_resume(struct qlcnic_adapter *adapter)
return err;
}
-void qlcnic_sriov_alloc_vlans(struct qlcnic_adapter *adapter)
+int qlcnic_sriov_alloc_vlans(struct qlcnic_adapter *adapter)
{
struct qlcnic_sriov *sriov = adapter->ahw->sriov;
struct qlcnic_vf_info *vf;
@@ -2164,7 +2166,11 @@ void qlcnic_sriov_alloc_vlans(struct qlcnic_adapter *adapter)
vf = &sriov->vf_info[i];
vf->sriov_vlans = kcalloc(sriov->num_allowed_vlans,
sizeof(*vf->sriov_vlans), GFP_KERNEL);
+ if (!vf->sriov_vlans)
+ return -ENOMEM;
}
+
+ return 0;
}
void qlcnic_sriov_free_vlans(struct qlcnic_adapter *adapter)
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c
index 447720b93e5a..e90fa97c0ae6 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c
@@ -597,7 +597,9 @@ static int __qlcnic_pci_sriov_enable(struct qlcnic_adapter *adapter,
if (err)
goto del_flr_queue;
- qlcnic_sriov_alloc_vlans(adapter);
+ err = qlcnic_sriov_alloc_vlans(adapter);
+ if (err)
+ goto del_flr_queue;
return err;
diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c
index bbe21db20417..86c44bc5f73f 100644
--- a/drivers/net/ethernet/realtek/r8169_main.c
+++ b/drivers/net/ethernet/realtek/r8169_main.c
@@ -5217,8 +5217,8 @@ static int rtl_get_ether_clk(struct rtl8169_private *tp)
static void rtl_init_mac_address(struct rtl8169_private *tp)
{
+ u8 mac_addr[ETH_ALEN] __aligned(2) = {};
struct net_device *dev = tp->dev;
- u8 mac_addr[ETH_ALEN];
int rc;
rc = eth_platform_get_mac_address(tp_to_dev(tp), mac_addr);
@@ -5233,7 +5233,8 @@ static void rtl_init_mac_address(struct rtl8169_private *tp)
if (is_valid_ether_addr(mac_addr))
goto done;
- eth_hw_addr_random(dev);
+ eth_random_addr(mac_addr);
+ dev->addr_assign_type = NET_ADDR_RANDOM;
dev_warn(tp_to_dev(tp), "can't read MAC address, setting random one\n");
done:
eth_hw_addr_set(dev, mac_addr);
diff --git a/drivers/net/ethernet/sfc/ef100_nic.c b/drivers/net/ethernet/sfc/ef100_nic.c
index 6aa81229b68a..e77a5cb4e40d 100644
--- a/drivers/net/ethernet/sfc/ef100_nic.c
+++ b/drivers/net/ethernet/sfc/ef100_nic.c
@@ -609,6 +609,9 @@ static size_t ef100_update_stats(struct efx_nic *efx,
ef100_common_stat_mask(mask);
ef100_ethtool_stat_mask(mask);
+ if (!mc_stats)
+ return 0;
+
efx_nic_copy_stats(efx, mc_stats);
efx_nic_update_stats(ef100_stat_desc, EF100_STAT_COUNT, mask,
stats, mc_stats, false);
diff --git a/drivers/net/ethernet/sfc/falcon/rx.c b/drivers/net/ethernet/sfc/falcon/rx.c
index 966f13e7475d..11a6aee852e9 100644
--- a/drivers/net/ethernet/sfc/falcon/rx.c
+++ b/drivers/net/ethernet/sfc/falcon/rx.c
@@ -728,7 +728,10 @@ static void ef4_init_rx_recycle_ring(struct ef4_nic *efx,
efx->rx_bufs_per_page);
rx_queue->page_ring = kcalloc(page_ring_size,
sizeof(*rx_queue->page_ring), GFP_KERNEL);
- rx_queue->page_ptr_mask = page_ring_size - 1;
+ if (!rx_queue->page_ring)
+ rx_queue->page_ptr_mask = 0;
+ else
+ rx_queue->page_ptr_mask = page_ring_size - 1;
}
void ef4_init_rx_queue(struct ef4_rx_queue *rx_queue)
diff --git a/drivers/net/ethernet/sfc/rx_common.c b/drivers/net/ethernet/sfc/rx_common.c
index 68fc7d317693..0983abc0cc5f 100644
--- a/drivers/net/ethernet/sfc/rx_common.c
+++ b/drivers/net/ethernet/sfc/rx_common.c
@@ -150,7 +150,10 @@ static void efx_init_rx_recycle_ring(struct efx_rx_queue *rx_queue)
efx->rx_bufs_per_page);
rx_queue->page_ring = kcalloc(page_ring_size,
sizeof(*rx_queue->page_ring), GFP_KERNEL);
- rx_queue->page_ptr_mask = page_ring_size - 1;
+ if (!rx_queue->page_ring)
+ rx_queue->page_ptr_mask = 0;
+ else
+ rx_queue->page_ptr_mask = page_ring_size - 1;
}
static void efx_fini_rx_recycle_ring(struct efx_rx_queue *rx_queue)
diff --git a/drivers/net/ethernet/smsc/smc911x.c b/drivers/net/ethernet/smsc/smc911x.c
index 89381f796985..dd6f69ced4ee 100644
--- a/drivers/net/ethernet/smsc/smc911x.c
+++ b/drivers/net/ethernet/smsc/smc911x.c
@@ -2072,6 +2072,11 @@ static int smc911x_drv_probe(struct platform_device *pdev)
ndev->dma = (unsigned char)-1;
ndev->irq = platform_get_irq(pdev, 0);
+ if (ndev->irq < 0) {
+ ret = ndev->irq;
+ goto release_both;
+ }
+
lp = netdev_priv(ndev);
lp->netdev = ndev;
#ifdef SMC_DYNAMIC_BUS_CONFIG
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
index 6924a6aacbd5..c469abc91fa1 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
@@ -33,6 +33,7 @@ struct rk_gmac_ops {
void (*set_rgmii_speed)(struct rk_priv_data *bsp_priv, int speed);
void (*set_rmii_speed)(struct rk_priv_data *bsp_priv, int speed);
void (*integrated_phy_powerup)(struct rk_priv_data *bsp_priv);
+ bool regs_valid;
u32 regs[];
};
@@ -1092,6 +1093,7 @@ static const struct rk_gmac_ops rk3568_ops = {
.set_to_rmii = rk3568_set_to_rmii,
.set_rgmii_speed = rk3568_set_gmac_speed,
.set_rmii_speed = rk3568_set_gmac_speed,
+ .regs_valid = true,
.regs = {
0xfe2a0000, /* gmac0 */
0xfe010000, /* gmac1 */
@@ -1383,7 +1385,7 @@ static struct rk_priv_data *rk_gmac_setup(struct platform_device *pdev,
* to be distinguished.
*/
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (res) {
+ if (res && ops->regs_valid) {
int i = 0;
while (ops->regs[i]) {
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-visconti.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-visconti.c
index 66fc8be34bb7..e2e0f977875d 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-visconti.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-visconti.c
@@ -26,7 +26,7 @@
#define ETHER_CLK_SEL_FREQ_SEL_125M (BIT(9) | BIT(8))
#define ETHER_CLK_SEL_FREQ_SEL_50M BIT(9)
#define ETHER_CLK_SEL_FREQ_SEL_25M BIT(8)
-#define ETHER_CLK_SEL_FREQ_SEL_2P5M BIT(0)
+#define ETHER_CLK_SEL_FREQ_SEL_2P5M 0
#define ETHER_CLK_SEL_TX_CLK_EXT_SEL_IN BIT(0)
#define ETHER_CLK_SEL_TX_CLK_EXT_SEL_TXC BIT(10)
#define ETHER_CLK_SEL_TX_CLK_EXT_SEL_DIV BIT(11)
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac.h b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
index 43eead726886..873b9e3e5da2 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac.h
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
@@ -172,6 +172,19 @@ struct stmmac_flow_entry {
int is_l4;
};
+/* Rx Frame Steering */
+enum stmmac_rfs_type {
+ STMMAC_RFS_T_VLAN,
+ STMMAC_RFS_T_MAX,
+};
+
+struct stmmac_rfs_entry {
+ unsigned long cookie;
+ int in_use;
+ int type;
+ int tc;
+};
+
struct stmmac_priv {
/* Frequently used values are kept adjacent for cache effect */
u32 tx_coal_frames[MTL_MAX_TX_QUEUES];
@@ -289,6 +302,10 @@ struct stmmac_priv {
struct stmmac_tc_entry *tc_entries;
unsigned int flow_entries_max;
struct stmmac_flow_entry *flow_entries;
+ unsigned int rfs_entries_max[STMMAC_RFS_T_MAX];
+ unsigned int rfs_entries_cnt[STMMAC_RFS_T_MAX];
+ unsigned int rfs_entries_total;
+ struct stmmac_rfs_entry *rfs_entries;
/* Pulse Per Second output */
struct stmmac_pps_cfg pps[STMMAC_PPS_MAX];
@@ -314,6 +331,7 @@ int stmmac_mdio_reset(struct mii_bus *mii);
int stmmac_xpcs_setup(struct mii_bus *mii);
void stmmac_set_ethtool_ops(struct net_device *netdev);
+int stmmac_init_tstamp_counter(struct stmmac_priv *priv, u32 systime_flags);
void stmmac_ptp_register(struct stmmac_priv *priv);
void stmmac_ptp_unregister(struct stmmac_priv *priv);
int stmmac_open(struct net_device *dev);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 2eb284576336..8ded4be08b00 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -50,6 +50,13 @@
#include "dwxgmac2.h"
#include "hwif.h"
+/* As long as the interface is active, we keep the timestamping counter enabled
+ * with fine resolution and binary rollover. This avoid non-monotonic behavior
+ * (clock jumps) when changing timestamping settings at runtime.
+ */
+#define STMMAC_HWTS_ACTIVE (PTP_TCR_TSENA | PTP_TCR_TSCFUPDT | \
+ PTP_TCR_TSCTRLSSR)
+
#define STMMAC_ALIGN(x) ALIGN(ALIGN(x, SMP_CACHE_BYTES), 16)
#define TSO_MAX_BUFF_SIZE (SZ_16K - 1)
@@ -613,8 +620,6 @@ static int stmmac_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
{
struct stmmac_priv *priv = netdev_priv(dev);
struct hwtstamp_config config;
- struct timespec64 now;
- u64 temp = 0;
u32 ptp_v2 = 0;
u32 tstamp_all = 0;
u32 ptp_over_ipv4_udp = 0;
@@ -623,11 +628,6 @@ static int stmmac_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
u32 snap_type_sel = 0;
u32 ts_master_en = 0;
u32 ts_event_en = 0;
- u32 sec_inc = 0;
- u32 value = 0;
- bool xmac;
-
- xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
if (!(priv->dma_cap.time_stamp || priv->adv_ts)) {
netdev_alert(priv->dev, "No support for HW time stamping\n");
@@ -789,42 +789,17 @@ static int stmmac_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
priv->hwts_rx_en = ((config.rx_filter == HWTSTAMP_FILTER_NONE) ? 0 : 1);
priv->hwts_tx_en = config.tx_type == HWTSTAMP_TX_ON;
- if (!priv->hwts_tx_en && !priv->hwts_rx_en)
- stmmac_config_hw_tstamping(priv, priv->ptpaddr, 0);
- else {
- value = (PTP_TCR_TSENA | PTP_TCR_TSCFUPDT | PTP_TCR_TSCTRLSSR |
- tstamp_all | ptp_v2 | ptp_over_ethernet |
- ptp_over_ipv6_udp | ptp_over_ipv4_udp | ts_event_en |
- ts_master_en | snap_type_sel);
- stmmac_config_hw_tstamping(priv, priv->ptpaddr, value);
-
- /* program Sub Second Increment reg */
- stmmac_config_sub_second_increment(priv,
- priv->ptpaddr, priv->plat->clk_ptp_rate,
- xmac, &sec_inc);
- temp = div_u64(1000000000ULL, sec_inc);
-
- /* Store sub second increment and flags for later use */
- priv->sub_second_inc = sec_inc;
- priv->systime_flags = value;
-
- /* calculate default added value:
- * formula is :
- * addend = (2^32)/freq_div_ratio;
- * where, freq_div_ratio = 1e9ns/sec_inc
- */
- temp = (u64)(temp << 32);
- priv->default_addend = div_u64(temp, priv->plat->clk_ptp_rate);
- stmmac_config_addend(priv, priv->ptpaddr, priv->default_addend);
-
- /* initialize system time */
- ktime_get_real_ts64(&now);
+ priv->systime_flags = STMMAC_HWTS_ACTIVE;
- /* lower 32 bits of tv_sec are safe until y2106 */
- stmmac_init_systime(priv, priv->ptpaddr,
- (u32)now.tv_sec, now.tv_nsec);
+ if (priv->hwts_tx_en || priv->hwts_rx_en) {
+ priv->systime_flags |= tstamp_all | ptp_v2 |
+ ptp_over_ethernet | ptp_over_ipv6_udp |
+ ptp_over_ipv4_udp | ts_event_en |
+ ts_master_en | snap_type_sel;
}
+ stmmac_config_hw_tstamping(priv, priv->ptpaddr, priv->systime_flags);
+
memcpy(&priv->tstamp_config, &config, sizeof(config));
return copy_to_user(ifr->ifr_data, &config,
@@ -853,6 +828,66 @@ static int stmmac_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
}
/**
+ * stmmac_init_tstamp_counter - init hardware timestamping counter
+ * @priv: driver private structure
+ * @systime_flags: timestamping flags
+ * Description:
+ * Initialize hardware counter for packet timestamping.
+ * This is valid as long as the interface is open and not suspended.
+ * Will be rerun after resuming from suspend, case in which the timestamping
+ * flags updated by stmmac_hwtstamp_set() also need to be restored.
+ */
+int stmmac_init_tstamp_counter(struct stmmac_priv *priv, u32 systime_flags)
+{
+ bool xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
+ struct timespec64 now;
+ u32 sec_inc = 0;
+ u64 temp = 0;
+ int ret;
+
+ if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
+ return -EOPNOTSUPP;
+
+ ret = clk_prepare_enable(priv->plat->clk_ptp_ref);
+ if (ret < 0) {
+ netdev_warn(priv->dev,
+ "failed to enable PTP reference clock: %pe\n",
+ ERR_PTR(ret));
+ return ret;
+ }
+
+ stmmac_config_hw_tstamping(priv, priv->ptpaddr, systime_flags);
+ priv->systime_flags = systime_flags;
+
+ /* program Sub Second Increment reg */
+ stmmac_config_sub_second_increment(priv, priv->ptpaddr,
+ priv->plat->clk_ptp_rate,
+ xmac, &sec_inc);
+ temp = div_u64(1000000000ULL, sec_inc);
+
+ /* Store sub second increment for later use */
+ priv->sub_second_inc = sec_inc;
+
+ /* calculate default added value:
+ * formula is :
+ * addend = (2^32)/freq_div_ratio;
+ * where, freq_div_ratio = 1e9ns/sec_inc
+ */
+ temp = (u64)(temp << 32);
+ priv->default_addend = div_u64(temp, priv->plat->clk_ptp_rate);
+ stmmac_config_addend(priv, priv->ptpaddr, priv->default_addend);
+
+ /* initialize system time */
+ ktime_get_real_ts64(&now);
+
+ /* lower 32 bits of tv_sec are safe until y2106 */
+ stmmac_init_systime(priv, priv->ptpaddr, (u32)now.tv_sec, now.tv_nsec);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(stmmac_init_tstamp_counter);
+
+/**
* stmmac_init_ptp - init PTP
* @priv: driver private structure
* Description: this is to verify if the HW supports the PTPv1 or PTPv2.
@@ -862,9 +897,11 @@ static int stmmac_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
static int stmmac_init_ptp(struct stmmac_priv *priv)
{
bool xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
+ int ret;
- if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
- return -EOPNOTSUPP;
+ ret = stmmac_init_tstamp_counter(priv, STMMAC_HWTS_ACTIVE);
+ if (ret)
+ return ret;
priv->adv_ts = 0;
/* Check if adv_ts can be enabled for dwmac 4.x / xgmac core */
@@ -1424,16 +1461,20 @@ static int stmmac_init_rx_buffers(struct stmmac_priv *priv, struct dma_desc *p,
{
struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
+ gfp_t gfp = (GFP_ATOMIC | __GFP_NOWARN);
+
+ if (priv->dma_cap.addr64 <= 32)
+ gfp |= GFP_DMA32;
if (!buf->page) {
- buf->page = page_pool_dev_alloc_pages(rx_q->page_pool);
+ buf->page = page_pool_alloc_pages(rx_q->page_pool, gfp);
if (!buf->page)
return -ENOMEM;
buf->page_offset = stmmac_rx_offset(priv);
}
if (priv->sph && !buf->sec_page) {
- buf->sec_page = page_pool_dev_alloc_pages(rx_q->page_pool);
+ buf->sec_page = page_pool_alloc_pages(rx_q->page_pool, gfp);
if (!buf->sec_page)
return -ENOMEM;
@@ -3272,10 +3313,6 @@ static int stmmac_hw_setup(struct net_device *dev, bool init_ptp)
stmmac_mmc_setup(priv);
if (init_ptp) {
- ret = clk_prepare_enable(priv->plat->clk_ptp_ref);
- if (ret < 0)
- netdev_warn(priv->dev, "failed to enable PTP reference clock: %d\n", ret);
-
ret = stmmac_init_ptp(priv);
if (ret == -EOPNOTSUPP)
netdev_warn(priv->dev, "PTP not supported by HW\n");
@@ -3769,6 +3806,8 @@ int stmmac_release(struct net_device *dev)
struct stmmac_priv *priv = netdev_priv(dev);
u32 chan;
+ netif_tx_disable(dev);
+
if (device_may_wakeup(priv->device))
phylink_speed_down(priv->phylink, false);
/* Stop and disconnect the PHY */
@@ -4447,6 +4486,10 @@ static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue)
struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
int dirty = stmmac_rx_dirty(priv, queue);
unsigned int entry = rx_q->dirty_rx;
+ gfp_t gfp = (GFP_ATOMIC | __GFP_NOWARN);
+
+ if (priv->dma_cap.addr64 <= 32)
+ gfp |= GFP_DMA32;
while (dirty-- > 0) {
struct stmmac_rx_buffer *buf = &rx_q->buf_pool[entry];
@@ -4459,13 +4502,13 @@ static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue)
p = rx_q->dma_rx + entry;
if (!buf->page) {
- buf->page = page_pool_dev_alloc_pages(rx_q->page_pool);
+ buf->page = page_pool_alloc_pages(rx_q->page_pool, gfp);
if (!buf->page)
break;
}
if (priv->sph && !buf->sec_page) {
- buf->sec_page = page_pool_dev_alloc_pages(rx_q->page_pool);
+ buf->sec_page = page_pool_alloc_pages(rx_q->page_pool, gfp);
if (!buf->sec_page)
break;
@@ -5161,12 +5204,13 @@ read_again:
if (likely(!(status & rx_not_ls)) &&
(likely(priv->synopsys_id >= DWMAC_CORE_4_00) ||
unlikely(status != llc_snap))) {
- if (buf2_len)
+ if (buf2_len) {
buf2_len -= ETH_FCS_LEN;
- else
+ len -= ETH_FCS_LEN;
+ } else if (buf1_len) {
buf1_len -= ETH_FCS_LEN;
-
- len -= ETH_FCS_LEN;
+ len -= ETH_FCS_LEN;
+ }
}
if (!skb) {
@@ -5504,8 +5548,6 @@ static int stmmac_set_features(struct net_device *netdev,
netdev_features_t features)
{
struct stmmac_priv *priv = netdev_priv(netdev);
- bool sph_en;
- u32 chan;
/* Keep the COE Type in case of csum is supporting */
if (features & NETIF_F_RXCSUM)
@@ -5517,10 +5559,13 @@ static int stmmac_set_features(struct net_device *netdev,
*/
stmmac_rx_ipc(priv, priv->hw);
- sph_en = (priv->hw->rx_csum > 0) && priv->sph;
+ if (priv->sph_cap) {
+ bool sph_en = (priv->hw->rx_csum > 0) && priv->sph;
+ u32 chan;
- for (chan = 0; chan < priv->plat->rx_queues_to_use; chan++)
- stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan);
+ for (chan = 0; chan < priv->plat->rx_queues_to_use; chan++)
+ stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan);
+ }
return 0;
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
index 232ac98943cd..5d29f336315b 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
@@ -816,7 +816,7 @@ static int __maybe_unused stmmac_pltfr_noirq_resume(struct device *dev)
if (ret)
return ret;
- clk_prepare_enable(priv->plat->clk_ptp_ref);
+ stmmac_init_tstamp_counter(priv, priv->systime_flags);
}
return 0;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c
index 580cc035536b..be9b58b2abf9 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c
@@ -102,7 +102,7 @@ static int stmmac_adjust_time(struct ptp_clock_info *ptp, s64 delta)
time.tv_nsec = priv->plat->est->btr_reserve[0];
time.tv_sec = priv->plat->est->btr_reserve[1];
basetime = timespec64_to_ktime(time);
- cycle_time = priv->plat->est->ctr[1] * NSEC_PER_SEC +
+ cycle_time = (u64)priv->plat->est->ctr[1] * NSEC_PER_SEC +
priv->plat->est->ctr[0];
time = stmmac_calc_tas_basetime(basetime,
current_time_ns,
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
index 1c4ea0b1b845..d0a2b289f460 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
@@ -232,11 +232,33 @@ static int tc_setup_cls_u32(struct stmmac_priv *priv,
}
}
+static int tc_rfs_init(struct stmmac_priv *priv)
+{
+ int i;
+
+ priv->rfs_entries_max[STMMAC_RFS_T_VLAN] = 8;
+
+ for (i = 0; i < STMMAC_RFS_T_MAX; i++)
+ priv->rfs_entries_total += priv->rfs_entries_max[i];
+
+ priv->rfs_entries = devm_kcalloc(priv->device,
+ priv->rfs_entries_total,
+ sizeof(*priv->rfs_entries),
+ GFP_KERNEL);
+ if (!priv->rfs_entries)
+ return -ENOMEM;
+
+ dev_info(priv->device, "Enabled RFS Flow TC (entries=%d)\n",
+ priv->rfs_entries_total);
+
+ return 0;
+}
+
static int tc_init(struct stmmac_priv *priv)
{
struct dma_features *dma_cap = &priv->dma_cap;
unsigned int count;
- int i;
+ int ret, i;
if (dma_cap->l3l4fnum) {
priv->flow_entries_max = dma_cap->l3l4fnum;
@@ -250,10 +272,14 @@ static int tc_init(struct stmmac_priv *priv)
for (i = 0; i < priv->flow_entries_max; i++)
priv->flow_entries[i].idx = i;
- dev_info(priv->device, "Enabled Flow TC (entries=%d)\n",
+ dev_info(priv->device, "Enabled L3L4 Flow TC (entries=%d)\n",
priv->flow_entries_max);
}
+ ret = tc_rfs_init(priv);
+ if (ret)
+ return -ENOMEM;
+
if (!priv->plat->fpe_cfg) {
priv->plat->fpe_cfg = devm_kzalloc(priv->device,
sizeof(*priv->plat->fpe_cfg),
@@ -607,16 +633,45 @@ static int tc_del_flow(struct stmmac_priv *priv,
return ret;
}
+static struct stmmac_rfs_entry *tc_find_rfs(struct stmmac_priv *priv,
+ struct flow_cls_offload *cls,
+ bool get_free)
+{
+ int i;
+
+ for (i = 0; i < priv->rfs_entries_total; i++) {
+ struct stmmac_rfs_entry *entry = &priv->rfs_entries[i];
+
+ if (entry->cookie == cls->cookie)
+ return entry;
+ if (get_free && entry->in_use == false)
+ return entry;
+ }
+
+ return NULL;
+}
+
#define VLAN_PRIO_FULL_MASK (0x07)
static int tc_add_vlan_flow(struct stmmac_priv *priv,
struct flow_cls_offload *cls)
{
+ struct stmmac_rfs_entry *entry = tc_find_rfs(priv, cls, false);
struct flow_rule *rule = flow_cls_offload_flow_rule(cls);
struct flow_dissector *dissector = rule->match.dissector;
int tc = tc_classid_to_hwtc(priv->dev, cls->classid);
struct flow_match_vlan match;
+ if (!entry) {
+ entry = tc_find_rfs(priv, cls, true);
+ if (!entry)
+ return -ENOENT;
+ }
+
+ if (priv->rfs_entries_cnt[STMMAC_RFS_T_VLAN] >=
+ priv->rfs_entries_max[STMMAC_RFS_T_VLAN])
+ return -ENOENT;
+
/* Nothing to do here */
if (!dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_VLAN))
return -EINVAL;
@@ -638,6 +693,12 @@ static int tc_add_vlan_flow(struct stmmac_priv *priv,
prio = BIT(match.key->vlan_priority);
stmmac_rx_queue_prio(priv, priv->hw, prio, tc);
+
+ entry->in_use = true;
+ entry->cookie = cls->cookie;
+ entry->tc = tc;
+ entry->type = STMMAC_RFS_T_VLAN;
+ priv->rfs_entries_cnt[STMMAC_RFS_T_VLAN]++;
}
return 0;
@@ -646,20 +707,19 @@ static int tc_add_vlan_flow(struct stmmac_priv *priv,
static int tc_del_vlan_flow(struct stmmac_priv *priv,
struct flow_cls_offload *cls)
{
- struct flow_rule *rule = flow_cls_offload_flow_rule(cls);
- struct flow_dissector *dissector = rule->match.dissector;
- int tc = tc_classid_to_hwtc(priv->dev, cls->classid);
+ struct stmmac_rfs_entry *entry = tc_find_rfs(priv, cls, false);
- /* Nothing to do here */
- if (!dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_VLAN))
- return -EINVAL;
+ if (!entry || !entry->in_use || entry->type != STMMAC_RFS_T_VLAN)
+ return -ENOENT;
- if (tc < 0) {
- netdev_err(priv->dev, "Invalid traffic class\n");
- return -EINVAL;
- }
+ stmmac_rx_queue_prio(priv, priv->hw, 0, entry->tc);
+
+ entry->in_use = false;
+ entry->cookie = 0;
+ entry->tc = 0;
+ entry->type = 0;
- stmmac_rx_queue_prio(priv, priv->hw, 0, tc);
+ priv->rfs_entries_cnt[STMMAC_RFS_T_VLAN]--;
return 0;
}
diff --git a/drivers/net/ethernet/ti/am65-cpsw-nuss.c b/drivers/net/ethernet/ti/am65-cpsw-nuss.c
index c092cb61416a..ffbbda8f4d41 100644
--- a/drivers/net/ethernet/ti/am65-cpsw-nuss.c
+++ b/drivers/net/ethernet/ti/am65-cpsw-nuss.c
@@ -1844,13 +1844,14 @@ static int am65_cpsw_nuss_init_slave_ports(struct am65_cpsw_common *common)
if (ret < 0) {
dev_err(dev, "%pOF error reading port_id %d\n",
port_np, ret);
- return ret;
+ goto of_node_put;
}
if (!port_id || port_id > common->port_num) {
dev_err(dev, "%pOF has invalid port_id %u %s\n",
port_np, port_id, port_np->name);
- return -EINVAL;
+ ret = -EINVAL;
+ goto of_node_put;
}
port = am65_common_get_port(common, port_id);
@@ -1866,8 +1867,10 @@ static int am65_cpsw_nuss_init_slave_ports(struct am65_cpsw_common *common)
(AM65_CPSW_NU_FRAM_PORT_OFFSET * (port_id - 1));
port->slave.mac_sl = cpsw_sl_get("am65", dev, port->port_base);
- if (IS_ERR(port->slave.mac_sl))
- return PTR_ERR(port->slave.mac_sl);
+ if (IS_ERR(port->slave.mac_sl)) {
+ ret = PTR_ERR(port->slave.mac_sl);
+ goto of_node_put;
+ }
port->disabled = !of_device_is_available(port_np);
if (port->disabled) {
@@ -1880,7 +1883,7 @@ static int am65_cpsw_nuss_init_slave_ports(struct am65_cpsw_common *common)
ret = PTR_ERR(port->slave.ifphy);
dev_err(dev, "%pOF error retrieving port phy: %d\n",
port_np, ret);
- return ret;
+ goto of_node_put;
}
port->slave.mac_only =
@@ -1889,10 +1892,12 @@ static int am65_cpsw_nuss_init_slave_ports(struct am65_cpsw_common *common)
/* get phy/link info */
if (of_phy_is_fixed_link(port_np)) {
ret = of_phy_register_fixed_link(port_np);
- if (ret)
- return dev_err_probe(dev, ret,
+ if (ret) {
+ ret = dev_err_probe(dev, ret,
"failed to register fixed-link phy %pOF\n",
port_np);
+ goto of_node_put;
+ }
port->slave.phy_node = of_node_get(port_np);
} else {
port->slave.phy_node =
@@ -1902,14 +1907,15 @@ static int am65_cpsw_nuss_init_slave_ports(struct am65_cpsw_common *common)
if (!port->slave.phy_node) {
dev_err(dev,
"slave[%d] no phy found\n", port_id);
- return -ENODEV;
+ ret = -ENODEV;
+ goto of_node_put;
}
ret = of_get_phy_mode(port_np, &port->slave.phy_if);
if (ret) {
dev_err(dev, "%pOF read phy-mode err %d\n",
port_np, ret);
- return ret;
+ goto of_node_put;
}
ret = of_get_mac_address(port_np, port->slave.mac_addr);
@@ -1932,6 +1938,11 @@ static int am65_cpsw_nuss_init_slave_ports(struct am65_cpsw_common *common)
}
return 0;
+
+of_node_put:
+ of_node_put(port_np);
+ of_node_put(node);
+ return ret;
}
static void am65_cpsw_pcpu_stats_free(void *data)
diff --git a/drivers/net/fjes/fjes_main.c b/drivers/net/fjes/fjes_main.c
index b06c17ac8d4e..ebd287039a54 100644
--- a/drivers/net/fjes/fjes_main.c
+++ b/drivers/net/fjes/fjes_main.c
@@ -1262,6 +1262,11 @@ static int fjes_probe(struct platform_device *plat_dev)
hw->hw_res.start = res->start;
hw->hw_res.size = resource_size(res);
hw->hw_res.irq = platform_get_irq(plat_dev, 0);
+ if (hw->hw_res.irq < 0) {
+ err = hw->hw_res.irq;
+ goto err_free_control_wq;
+ }
+
err = fjes_hw_init(&adapter->hw);
if (err)
goto err_free_control_wq;
diff --git a/drivers/net/hamradio/mkiss.c b/drivers/net/hamradio/mkiss.c
index e2b332b54f06..edde9c3ae12b 100644
--- a/drivers/net/hamradio/mkiss.c
+++ b/drivers/net/hamradio/mkiss.c
@@ -31,6 +31,8 @@
#define AX_MTU 236
+/* some arch define END as assembly function ending, just undef it */
+#undef END
/* SLIP/KISS protocol characters. */
#define END 0300 /* indicates end of frame */
#define ESC 0333 /* indicates byte stuffing */
@@ -792,14 +794,14 @@ static void mkiss_close(struct tty_struct *tty)
*/
netif_stop_queue(ax->dev);
- ax->tty = NULL;
-
unregister_netdev(ax->dev);
/* Free all AX25 frame buffers after unreg. */
kfree(ax->rbuff);
kfree(ax->xbuff);
+ ax->tty = NULL;
+
free_netdev(ax->dev);
}
diff --git a/drivers/net/ipa/ipa_cmd.c b/drivers/net/ipa/ipa_cmd.c
index cff51731195a..d57472ea077f 100644
--- a/drivers/net/ipa/ipa_cmd.c
+++ b/drivers/net/ipa/ipa_cmd.c
@@ -661,22 +661,6 @@ void ipa_cmd_pipeline_clear_wait(struct ipa *ipa)
wait_for_completion(&ipa->completion);
}
-void ipa_cmd_pipeline_clear(struct ipa *ipa)
-{
- u32 count = ipa_cmd_pipeline_clear_count();
- struct gsi_trans *trans;
-
- trans = ipa_cmd_trans_alloc(ipa, count);
- if (trans) {
- ipa_cmd_pipeline_clear_add(trans);
- gsi_trans_commit_wait(trans);
- ipa_cmd_pipeline_clear_wait(ipa);
- } else {
- dev_err(&ipa->pdev->dev,
- "error allocating %u entry tag transaction\n", count);
- }
-}
-
static struct ipa_cmd_info *
ipa_cmd_info_alloc(struct ipa_endpoint *endpoint, u32 tre_count)
{
diff --git a/drivers/net/ipa/ipa_cmd.h b/drivers/net/ipa/ipa_cmd.h
index 69cd085d427d..05ed7e42e184 100644
--- a/drivers/net/ipa/ipa_cmd.h
+++ b/drivers/net/ipa/ipa_cmd.h
@@ -164,12 +164,6 @@ u32 ipa_cmd_pipeline_clear_count(void);
void ipa_cmd_pipeline_clear_wait(struct ipa *ipa);
/**
- * ipa_cmd_pipeline_clear() - Clear the hardware pipeline
- * @ipa: - IPA pointer
- */
-void ipa_cmd_pipeline_clear(struct ipa *ipa);
-
-/**
* ipa_cmd_trans_alloc() - Allocate a transaction for the command TX endpoint
* @ipa: IPA pointer
* @tre_count: Number of elements in the transaction
diff --git a/drivers/net/ipa/ipa_endpoint.c b/drivers/net/ipa/ipa_endpoint.c
index ef790fd0ab56..03a170993420 100644
--- a/drivers/net/ipa/ipa_endpoint.c
+++ b/drivers/net/ipa/ipa_endpoint.c
@@ -1636,8 +1636,6 @@ void ipa_endpoint_suspend(struct ipa *ipa)
if (ipa->modem_netdev)
ipa_modem_suspend(ipa->modem_netdev);
- ipa_cmd_pipeline_clear(ipa);
-
ipa_endpoint_suspend_one(ipa->name_map[IPA_ENDPOINT_AP_LAN_RX]);
ipa_endpoint_suspend_one(ipa->name_map[IPA_ENDPOINT_AP_COMMAND_TX]);
}
diff --git a/drivers/net/ipa/ipa_main.c b/drivers/net/ipa/ipa_main.c
index cdfa98a76e1f..a448ec198bee 100644
--- a/drivers/net/ipa/ipa_main.c
+++ b/drivers/net/ipa/ipa_main.c
@@ -28,6 +28,7 @@
#include "ipa_reg.h"
#include "ipa_mem.h"
#include "ipa_table.h"
+#include "ipa_smp2p.h"
#include "ipa_modem.h"
#include "ipa_uc.h"
#include "ipa_interrupt.h"
@@ -801,6 +802,11 @@ static int ipa_remove(struct platform_device *pdev)
struct device *dev = &pdev->dev;
int ret;
+ /* Prevent the modem from triggering a call to ipa_setup(). This
+ * also ensures a modem-initiated setup that's underway completes.
+ */
+ ipa_smp2p_irq_disable_setup(ipa);
+
ret = pm_runtime_get_sync(dev);
if (WARN_ON(ret < 0))
goto out_power_put;
diff --git a/drivers/net/ipa/ipa_modem.c b/drivers/net/ipa/ipa_modem.c
index ad116bcc0580..d0ab4d70c303 100644
--- a/drivers/net/ipa/ipa_modem.c
+++ b/drivers/net/ipa/ipa_modem.c
@@ -339,9 +339,6 @@ int ipa_modem_stop(struct ipa *ipa)
if (state != IPA_MODEM_STATE_RUNNING)
return -EBUSY;
- /* Prevent the modem from triggering a call to ipa_setup() */
- ipa_smp2p_disable(ipa);
-
/* Clean up the netdev and endpoints if it was started */
if (netdev) {
struct ipa_priv *priv = netdev_priv(netdev);
@@ -369,6 +366,9 @@ static void ipa_modem_crashed(struct ipa *ipa)
struct device *dev = &ipa->pdev->dev;
int ret;
+ /* Prevent the modem from triggering a call to ipa_setup() */
+ ipa_smp2p_irq_disable_setup(ipa);
+
ret = pm_runtime_get_sync(dev);
if (ret < 0) {
dev_err(dev, "error %d getting power to handle crash\n", ret);
diff --git a/drivers/net/ipa/ipa_smp2p.c b/drivers/net/ipa/ipa_smp2p.c
index df7639c39d71..211233612039 100644
--- a/drivers/net/ipa/ipa_smp2p.c
+++ b/drivers/net/ipa/ipa_smp2p.c
@@ -53,7 +53,7 @@
* @setup_ready_irq: IPA interrupt triggered by modem to signal GSI ready
* @power_on: Whether IPA power is on
* @notified: Whether modem has been notified of power state
- * @disabled: Whether setup ready interrupt handling is disabled
+ * @setup_disabled: Whether setup ready interrupt handler is disabled
* @mutex: Mutex protecting ready-interrupt/shutdown interlock
* @panic_notifier: Panic notifier structure
*/
@@ -67,7 +67,7 @@ struct ipa_smp2p {
u32 setup_ready_irq;
bool power_on;
bool notified;
- bool disabled;
+ bool setup_disabled;
struct mutex mutex;
struct notifier_block panic_notifier;
};
@@ -155,11 +155,9 @@ static irqreturn_t ipa_smp2p_modem_setup_ready_isr(int irq, void *dev_id)
struct device *dev;
int ret;
- mutex_lock(&smp2p->mutex);
-
- if (smp2p->disabled)
- goto out_mutex_unlock;
- smp2p->disabled = true; /* If any others arrive, ignore them */
+ /* Ignore any (spurious) interrupts received after the first */
+ if (smp2p->ipa->setup_complete)
+ return IRQ_HANDLED;
/* Power needs to be active for setup */
dev = &smp2p->ipa->pdev->dev;
@@ -176,8 +174,6 @@ static irqreturn_t ipa_smp2p_modem_setup_ready_isr(int irq, void *dev_id)
out_power_put:
pm_runtime_mark_last_busy(dev);
(void)pm_runtime_put_autosuspend(dev);
-out_mutex_unlock:
- mutex_unlock(&smp2p->mutex);
return IRQ_HANDLED;
}
@@ -313,7 +309,7 @@ void ipa_smp2p_exit(struct ipa *ipa)
kfree(smp2p);
}
-void ipa_smp2p_disable(struct ipa *ipa)
+void ipa_smp2p_irq_disable_setup(struct ipa *ipa)
{
struct ipa_smp2p *smp2p = ipa->smp2p;
@@ -322,7 +318,10 @@ void ipa_smp2p_disable(struct ipa *ipa)
mutex_lock(&smp2p->mutex);
- smp2p->disabled = true;
+ if (!smp2p->setup_disabled) {
+ disable_irq(smp2p->setup_ready_irq);
+ smp2p->setup_disabled = true;
+ }
mutex_unlock(&smp2p->mutex);
}
diff --git a/drivers/net/ipa/ipa_smp2p.h b/drivers/net/ipa/ipa_smp2p.h
index 99a956789638..59cee31a7383 100644
--- a/drivers/net/ipa/ipa_smp2p.h
+++ b/drivers/net/ipa/ipa_smp2p.h
@@ -27,13 +27,12 @@ int ipa_smp2p_init(struct ipa *ipa, bool modem_init);
void ipa_smp2p_exit(struct ipa *ipa);
/**
- * ipa_smp2p_disable() - Prevent "ipa-setup-ready" interrupt handling
+ * ipa_smp2p_irq_disable_setup() - Disable the "setup ready" interrupt
* @ipa: IPA pointer
*
- * Prevent handling of the "setup ready" interrupt from the modem.
- * This is used before initiating shutdown of the driver.
+ * Disable the "ipa-setup-ready" interrupt from the modem.
*/
-void ipa_smp2p_disable(struct ipa *ipa);
+void ipa_smp2p_irq_disable_setup(struct ipa *ipa);
/**
* ipa_smp2p_notify_reset() - Reset modem notification state
diff --git a/drivers/net/mdio/mdio-aspeed.c b/drivers/net/mdio/mdio-aspeed.c
index cad820568f75..966c3b4ad59d 100644
--- a/drivers/net/mdio/mdio-aspeed.c
+++ b/drivers/net/mdio/mdio-aspeed.c
@@ -61,6 +61,13 @@ static int aspeed_mdio_read(struct mii_bus *bus, int addr, int regnum)
iowrite32(ctrl, ctx->base + ASPEED_MDIO_CTRL);
+ rc = readl_poll_timeout(ctx->base + ASPEED_MDIO_CTRL, ctrl,
+ !(ctrl & ASPEED_MDIO_CTRL_FIRE),
+ ASPEED_MDIO_INTERVAL_US,
+ ASPEED_MDIO_TIMEOUT_US);
+ if (rc < 0)
+ return rc;
+
rc = readl_poll_timeout(ctx->base + ASPEED_MDIO_DATA, data,
data & ASPEED_MDIO_DATA_IDLE,
ASPEED_MDIO_INTERVAL_US,
diff --git a/drivers/net/netdevsim/bpf.c b/drivers/net/netdevsim/bpf.c
index 90aafb56f140..a43820212932 100644
--- a/drivers/net/netdevsim/bpf.c
+++ b/drivers/net/netdevsim/bpf.c
@@ -514,6 +514,7 @@ nsim_bpf_map_alloc(struct netdevsim *ns, struct bpf_offloaded_map *offmap)
goto err_free;
key = nmap->entry[i].key;
*key = i;
+ memset(nmap->entry[i].value, 0, offmap->map.value_size);
}
}
diff --git a/drivers/net/netdevsim/ethtool.c b/drivers/net/netdevsim/ethtool.c
index 0ab6a40be611..a6a713b31aad 100644
--- a/drivers/net/netdevsim/ethtool.c
+++ b/drivers/net/netdevsim/ethtool.c
@@ -77,7 +77,10 @@ static int nsim_set_ringparam(struct net_device *dev,
{
struct netdevsim *ns = netdev_priv(dev);
- memcpy(&ns->ethtool.ring, ring, sizeof(ns->ethtool.ring));
+ ns->ethtool.ring.rx_pending = ring->rx_pending;
+ ns->ethtool.ring.rx_jumbo_pending = ring->rx_jumbo_pending;
+ ns->ethtool.ring.rx_mini_pending = ring->rx_mini_pending;
+ ns->ethtool.ring.tx_pending = ring->tx_pending;
return 0;
}
diff --git a/drivers/net/phy/fixed_phy.c b/drivers/net/phy/fixed_phy.c
index c65fb5f5d2dc..a0c256bd5441 100644
--- a/drivers/net/phy/fixed_phy.c
+++ b/drivers/net/phy/fixed_phy.c
@@ -239,8 +239,8 @@ static struct phy_device *__fixed_phy_register(unsigned int irq,
/* Check if we have a GPIO associated with this fixed phy */
if (!gpiod) {
gpiod = fixed_phy_get_gpiod(np);
- if (IS_ERR(gpiod))
- return ERR_CAST(gpiod);
+ if (!gpiod)
+ return ERR_PTR(-EINVAL);
}
/* Get the next available PHY address, up to PHY_MAX_ADDR */
diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c
index c204067f1890..c198722e4871 100644
--- a/drivers/net/phy/mdio_bus.c
+++ b/drivers/net/phy/mdio_bus.c
@@ -460,6 +460,9 @@ static void of_mdiobus_link_mdiodev(struct mii_bus *bus,
if (addr == mdiodev->addr) {
device_set_node(dev, of_fwnode_handle(child));
+ /* The refcount on "child" is passed to the mdio
+ * device. Do _not_ use of_node_put(child) here.
+ */
return;
}
}
diff --git a/drivers/net/phy/phylink.c b/drivers/net/phy/phylink.c
index 3ad7397b8119..ea82ea5660e7 100644
--- a/drivers/net/phy/phylink.c
+++ b/drivers/net/phy/phylink.c
@@ -710,6 +710,7 @@ static void phylink_resolve(struct work_struct *w)
struct phylink_link_state link_state;
struct net_device *ndev = pl->netdev;
bool mac_config = false;
+ bool retrigger = false;
bool cur_link_state;
mutex_lock(&pl->state_mutex);
@@ -723,6 +724,7 @@ static void phylink_resolve(struct work_struct *w)
link_state.link = false;
} else if (pl->mac_link_dropped) {
link_state.link = false;
+ retrigger = true;
} else {
switch (pl->cur_link_an_mode) {
case MLO_AN_PHY:
@@ -739,6 +741,19 @@ static void phylink_resolve(struct work_struct *w)
case MLO_AN_INBAND:
phylink_mac_pcs_get_state(pl, &link_state);
+ /* The PCS may have a latching link-fail indicator.
+ * If the link was up, bring the link down and
+ * re-trigger the resolve. Otherwise, re-read the
+ * PCS state to get the current status of the link.
+ */
+ if (!link_state.link) {
+ if (cur_link_state)
+ retrigger = true;
+ else
+ phylink_mac_pcs_get_state(pl,
+ &link_state);
+ }
+
/* If we have a phy, the "up" state is the union of
* both the PHY and the MAC
*/
@@ -747,6 +762,15 @@ static void phylink_resolve(struct work_struct *w)
/* Only update if the PHY link is up */
if (pl->phydev && pl->phy_state.link) {
+ /* If the interface has changed, force a
+ * link down event if the link isn't already
+ * down, and re-resolve.
+ */
+ if (link_state.interface !=
+ pl->phy_state.interface) {
+ retrigger = true;
+ link_state.link = false;
+ }
link_state.interface = pl->phy_state.interface;
/* If we have a PHY, we need to update with
@@ -789,7 +813,7 @@ static void phylink_resolve(struct work_struct *w)
else
phylink_link_up(pl, link_state);
}
- if (!link_state.link && pl->mac_link_dropped) {
+ if (!link_state.link && retrigger) {
pl->mac_link_dropped = false;
queue_work(system_power_efficient_wq, &pl->resolve);
}
@@ -1364,6 +1388,7 @@ EXPORT_SYMBOL_GPL(phylink_stop);
* @mac_wol: true if the MAC needs to receive packets for Wake-on-Lan
*
* Handle a network device suspend event. There are several cases:
+ *
* - If Wake-on-Lan is not active, we can bring down the link between
* the MAC and PHY by calling phylink_stop().
* - If Wake-on-Lan is active, and being handled only by the PHY, we
diff --git a/drivers/net/slip/slip.h b/drivers/net/slip/slip.h
index c420e5948522..3d7f88b330c1 100644
--- a/drivers/net/slip/slip.h
+++ b/drivers/net/slip/slip.h
@@ -40,6 +40,8 @@
insmod -oslip_maxdev=nnn */
#define SL_MTU 296 /* 296; I am used to 600- FvK */
+/* some arch define END as assembly function ending, just undef it */
+#undef END
/* SLIP protocol characters. */
#define END 0300 /* indicates end of frame */
#define ESC 0333 /* indicates byte stuffing */
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index 1572878c3403..45a67e72a02c 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -209,6 +209,9 @@ struct tun_struct {
struct tun_prog __rcu *steering_prog;
struct tun_prog __rcu *filter_prog;
struct ethtool_link_ksettings link_ksettings;
+ /* init args */
+ struct file *file;
+ struct ifreq *ifr;
};
struct veth {
@@ -216,6 +219,9 @@ struct veth {
__be16 h_vlan_TCI;
};
+static void tun_flow_init(struct tun_struct *tun);
+static void tun_flow_uninit(struct tun_struct *tun);
+
static int tun_napi_receive(struct napi_struct *napi, int budget)
{
struct tun_file *tfile = container_of(napi, struct tun_file, napi);
@@ -953,6 +959,49 @@ static int check_filter(struct tap_filter *filter, const struct sk_buff *skb)
static const struct ethtool_ops tun_ethtool_ops;
+static int tun_net_init(struct net_device *dev)
+{
+ struct tun_struct *tun = netdev_priv(dev);
+ struct ifreq *ifr = tun->ifr;
+ int err;
+
+ dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
+ if (!dev->tstats)
+ return -ENOMEM;
+
+ spin_lock_init(&tun->lock);
+
+ err = security_tun_dev_alloc_security(&tun->security);
+ if (err < 0) {
+ free_percpu(dev->tstats);
+ return err;
+ }
+
+ tun_flow_init(tun);
+
+ dev->hw_features = NETIF_F_SG | NETIF_F_FRAGLIST |
+ TUN_USER_FEATURES | NETIF_F_HW_VLAN_CTAG_TX |
+ NETIF_F_HW_VLAN_STAG_TX;
+ dev->features = dev->hw_features | NETIF_F_LLTX;
+ dev->vlan_features = dev->features &
+ ~(NETIF_F_HW_VLAN_CTAG_TX |
+ NETIF_F_HW_VLAN_STAG_TX);
+
+ tun->flags = (tun->flags & ~TUN_FEATURES) |
+ (ifr->ifr_flags & TUN_FEATURES);
+
+ INIT_LIST_HEAD(&tun->disabled);
+ err = tun_attach(tun, tun->file, false, ifr->ifr_flags & IFF_NAPI,
+ ifr->ifr_flags & IFF_NAPI_FRAGS, false);
+ if (err < 0) {
+ tun_flow_uninit(tun);
+ security_tun_dev_free_security(tun->security);
+ free_percpu(dev->tstats);
+ return err;
+ }
+ return 0;
+}
+
/* Net device detach from fd. */
static void tun_net_uninit(struct net_device *dev)
{
@@ -1169,6 +1218,7 @@ static int tun_net_change_carrier(struct net_device *dev, bool new_carrier)
}
static const struct net_device_ops tun_netdev_ops = {
+ .ndo_init = tun_net_init,
.ndo_uninit = tun_net_uninit,
.ndo_open = tun_net_open,
.ndo_stop = tun_net_close,
@@ -1252,6 +1302,7 @@ static int tun_xdp_tx(struct net_device *dev, struct xdp_buff *xdp)
}
static const struct net_device_ops tap_netdev_ops = {
+ .ndo_init = tun_net_init,
.ndo_uninit = tun_net_uninit,
.ndo_open = tun_net_open,
.ndo_stop = tun_net_close,
@@ -1292,7 +1343,7 @@ static void tun_flow_uninit(struct tun_struct *tun)
#define MAX_MTU 65535
/* Initialize net device. */
-static void tun_net_init(struct net_device *dev)
+static void tun_net_initialize(struct net_device *dev)
{
struct tun_struct *tun = netdev_priv(dev);
@@ -2206,11 +2257,6 @@ static void tun_free_netdev(struct net_device *dev)
BUG_ON(!(list_empty(&tun->disabled)));
free_percpu(dev->tstats);
- /* We clear tstats so that tun_set_iff() can tell if
- * tun_free_netdev() has been called from register_netdevice().
- */
- dev->tstats = NULL;
-
tun_flow_uninit(tun);
security_tun_dev_free_security(tun->security);
__tun_set_ebpf(tun, &tun->steering_prog, NULL);
@@ -2716,41 +2762,16 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
tun->rx_batched = 0;
RCU_INIT_POINTER(tun->steering_prog, NULL);
- dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
- if (!dev->tstats) {
- err = -ENOMEM;
- goto err_free_dev;
- }
-
- spin_lock_init(&tun->lock);
-
- err = security_tun_dev_alloc_security(&tun->security);
- if (err < 0)
- goto err_free_stat;
-
- tun_net_init(dev);
- tun_flow_init(tun);
+ tun->ifr = ifr;
+ tun->file = file;
- dev->hw_features = NETIF_F_SG | NETIF_F_FRAGLIST |
- TUN_USER_FEATURES | NETIF_F_HW_VLAN_CTAG_TX |
- NETIF_F_HW_VLAN_STAG_TX;
- dev->features = dev->hw_features | NETIF_F_LLTX;
- dev->vlan_features = dev->features &
- ~(NETIF_F_HW_VLAN_CTAG_TX |
- NETIF_F_HW_VLAN_STAG_TX);
-
- tun->flags = (tun->flags & ~TUN_FEATURES) |
- (ifr->ifr_flags & TUN_FEATURES);
-
- INIT_LIST_HEAD(&tun->disabled);
- err = tun_attach(tun, file, false, ifr->ifr_flags & IFF_NAPI,
- ifr->ifr_flags & IFF_NAPI_FRAGS, false);
- if (err < 0)
- goto err_free_flow;
+ tun_net_initialize(dev);
err = register_netdevice(tun->dev);
- if (err < 0)
- goto err_detach;
+ if (err < 0) {
+ free_netdev(dev);
+ return err;
+ }
/* free_netdev() won't check refcnt, to avoid race
* with dev_put() we need publish tun after registration.
*/
@@ -2767,24 +2788,6 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
strcpy(ifr->ifr_name, tun->dev->name);
return 0;
-
-err_detach:
- tun_detach_all(dev);
- /* We are here because register_netdevice() has failed.
- * If register_netdevice() already called tun_free_netdev()
- * while dealing with the error, dev->stats has been cleared.
- */
- if (!dev->tstats)
- goto err_free_dev;
-
-err_free_flow:
- tun_flow_uninit(tun);
- security_tun_dev_free_security(tun->security);
-err_free_stat:
- free_percpu(dev->tstats);
-err_free_dev:
- free_netdev(dev);
- return err;
}
static void tun_get_iff(struct tun_struct *tun, struct ifreq *ifr)
diff --git a/drivers/net/usb/asix_common.c b/drivers/net/usb/asix_common.c
index 42ba4af68090..71682970be58 100644
--- a/drivers/net/usb/asix_common.c
+++ b/drivers/net/usb/asix_common.c
@@ -9,6 +9,8 @@
#include "asix.h"
+#define AX_HOST_EN_RETRIES 30
+
int asix_read_cmd(struct usbnet *dev, u8 cmd, u16 value, u16 index,
u16 size, void *data, int in_pm)
{
@@ -68,7 +70,7 @@ static int asix_check_host_enable(struct usbnet *dev, int in_pm)
int i, ret;
u8 smsr;
- for (i = 0; i < 30; ++i) {
+ for (i = 0; i < AX_HOST_EN_RETRIES; ++i) {
ret = asix_set_sw_mii(dev, in_pm);
if (ret == -ENODEV || ret == -ETIMEDOUT)
break;
@@ -77,13 +79,13 @@ static int asix_check_host_enable(struct usbnet *dev, int in_pm)
0, 0, 1, &smsr, in_pm);
if (ret == -ENODEV)
break;
- else if (ret < 0)
+ else if (ret < sizeof(smsr))
continue;
else if (smsr & AX_HOST_EN)
break;
}
- return ret;
+ return i >= AX_HOST_EN_RETRIES ? -ETIMEDOUT : ret;
}
static void reset_asix_rx_fixup_info(struct asix_rx_fixup_info *rx)
diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c
index 24753a4da7e6..e303b522efb5 100644
--- a/drivers/net/usb/cdc_ncm.c
+++ b/drivers/net/usb/cdc_ncm.c
@@ -181,6 +181,8 @@ static u32 cdc_ncm_check_tx_max(struct usbnet *dev, u32 new_tx)
min = ctx->max_datagram_size + ctx->max_ndp_size + sizeof(struct usb_cdc_ncm_nth32);
max = min_t(u32, CDC_NCM_NTB_MAX_SIZE_TX, le32_to_cpu(ctx->ncm_parm.dwNtbOutMaxSize));
+ if (max == 0)
+ max = CDC_NCM_NTB_MAX_SIZE_TX; /* dwNtbOutMaxSize not set */
/* some devices set dwNtbOutMaxSize too low for the above default */
min = min(min, max);
diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c
index f20376c1ef3f..075f8abde5cd 100644
--- a/drivers/net/usb/lan78xx.c
+++ b/drivers/net/usb/lan78xx.c
@@ -76,6 +76,8 @@
#define LAN7801_USB_PRODUCT_ID (0x7801)
#define LAN78XX_EEPROM_MAGIC (0x78A5)
#define LAN78XX_OTP_MAGIC (0x78F3)
+#define AT29M2AF_USB_VENDOR_ID (0x07C9)
+#define AT29M2AF_USB_PRODUCT_ID (0x0012)
#define MII_READ 1
#define MII_WRITE 0
@@ -2228,7 +2230,7 @@ static int lan78xx_phy_init(struct lan78xx_net *dev)
if (dev->domain_data.phyirq > 0)
phydev->irq = dev->domain_data.phyirq;
else
- phydev->irq = 0;
+ phydev->irq = PHY_POLL;
netdev_dbg(dev->net, "phydev->irq = %d\n", phydev->irq);
/* set to AUTOMDIX */
@@ -4734,6 +4736,10 @@ static const struct usb_device_id products[] = {
/* LAN7801 USB Gigabit Ethernet Device */
USB_DEVICE(LAN78XX_USB_VENDOR_ID, LAN7801_USB_PRODUCT_ID),
},
+ {
+ /* ATM2-AF USB Gigabit Ethernet Device */
+ USB_DEVICE(AT29M2AF_USB_VENDOR_ID, AT29M2AF_USB_PRODUCT_ID),
+ },
{},
};
MODULE_DEVICE_TABLE(usb, products);
diff --git a/drivers/net/usb/pegasus.c b/drivers/net/usb/pegasus.c
index c4cd40b090fd..feb247e355f7 100644
--- a/drivers/net/usb/pegasus.c
+++ b/drivers/net/usb/pegasus.c
@@ -493,11 +493,11 @@ static void read_bulk_callback(struct urb *urb)
goto goon;
rx_status = buf[count - 2];
- if (rx_status & 0x1e) {
+ if (rx_status & 0x1c) {
netif_dbg(pegasus, rx_err, net,
"RX packet error %x\n", rx_status);
net->stats.rx_errors++;
- if (rx_status & 0x06) /* long or runt */
+ if (rx_status & 0x04) /* runt */
net->stats.rx_length_errors++;
if (rx_status & 0x08)
net->stats.rx_crc_errors++;
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index 86b814e99224..f510e8219470 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -1358,6 +1358,7 @@ static const struct usb_device_id products[] = {
{QMI_QUIRK_SET_DTR(0x1bc7, 0x1040, 2)}, /* Telit LE922A */
{QMI_QUIRK_SET_DTR(0x1bc7, 0x1050, 2)}, /* Telit FN980 */
{QMI_QUIRK_SET_DTR(0x1bc7, 0x1060, 2)}, /* Telit LN920 */
+ {QMI_QUIRK_SET_DTR(0x1bc7, 0x1070, 2)}, /* Telit FN990 */
{QMI_FIXED_INTF(0x1bc7, 0x1100, 3)}, /* Telit ME910 */
{QMI_FIXED_INTF(0x1bc7, 0x1101, 3)}, /* Telit ME910 dual modem */
{QMI_FIXED_INTF(0x1bc7, 0x1200, 5)}, /* Telit LE920 */
diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
index f9877a3e83ac..3085e8118d7f 100644
--- a/drivers/net/usb/r8152.c
+++ b/drivers/net/usb/r8152.c
@@ -32,7 +32,7 @@
#define NETNEXT_VERSION "12"
/* Information for net */
-#define NET_VERSION "11"
+#define NET_VERSION "12"
#define DRIVER_VERSION "v1." NETNEXT_VERSION "." NET_VERSION
#define DRIVER_AUTHOR "Realtek linux nic maintainers <nic_swsd@realtek.com>"
@@ -4016,6 +4016,11 @@ static void rtl_clear_bp(struct r8152 *tp, u16 type)
ocp_write_word(tp, type, PLA_BP_BA, 0);
}
+static inline void rtl_reset_ocp_base(struct r8152 *tp)
+{
+ tp->ocp_base = -1;
+}
+
static int rtl_phy_patch_request(struct r8152 *tp, bool request, bool wait)
{
u16 data, check;
@@ -4087,8 +4092,6 @@ static int rtl_post_ram_code(struct r8152 *tp, u16 key_addr, bool wait)
rtl_phy_patch_request(tp, false, wait);
- ocp_write_word(tp, MCU_TYPE_PLA, PLA_OCP_GPHY_BASE, tp->ocp_base);
-
return 0;
}
@@ -4800,6 +4803,8 @@ static void rtl_ram_code_speed_up(struct r8152 *tp, struct fw_phy_speed_up *phy,
u32 len;
u8 *data;
+ rtl_reset_ocp_base(tp);
+
if (sram_read(tp, SRAM_GPHY_FW_VER) >= __le16_to_cpu(phy->version)) {
dev_dbg(&tp->intf->dev, "PHY firmware has been the newest\n");
return;
@@ -4845,7 +4850,8 @@ static void rtl_ram_code_speed_up(struct r8152 *tp, struct fw_phy_speed_up *phy,
}
}
- ocp_write_word(tp, MCU_TYPE_PLA, PLA_OCP_GPHY_BASE, tp->ocp_base);
+ rtl_reset_ocp_base(tp);
+
rtl_phy_patch_request(tp, false, wait);
if (sram_read(tp, SRAM_GPHY_FW_VER) == __le16_to_cpu(phy->version))
@@ -4861,6 +4867,8 @@ static int rtl8152_fw_phy_ver(struct r8152 *tp, struct fw_phy_ver *phy_ver)
ver_addr = __le16_to_cpu(phy_ver->ver.addr);
ver = __le16_to_cpu(phy_ver->ver.data);
+ rtl_reset_ocp_base(tp);
+
if (sram_read(tp, ver_addr) >= ver) {
dev_dbg(&tp->intf->dev, "PHY firmware has been the newest\n");
return 0;
@@ -4877,6 +4885,8 @@ static void rtl8152_fw_phy_fixup(struct r8152 *tp, struct fw_phy_fixup *fix)
{
u16 addr, data;
+ rtl_reset_ocp_base(tp);
+
addr = __le16_to_cpu(fix->setting.addr);
data = ocp_reg_read(tp, addr);
@@ -4908,6 +4918,8 @@ static void rtl8152_fw_phy_union_apply(struct r8152 *tp, struct fw_phy_union *ph
u32 length;
int i, num;
+ rtl_reset_ocp_base(tp);
+
num = phy->pre_num;
for (i = 0; i < num; i++)
sram_write(tp, __le16_to_cpu(phy->pre_set[i].addr),
@@ -4938,6 +4950,8 @@ static void rtl8152_fw_phy_nc_apply(struct r8152 *tp, struct fw_phy_nc *phy)
u32 length, i, num;
__le16 *data;
+ rtl_reset_ocp_base(tp);
+
mode_reg = __le16_to_cpu(phy->mode_reg);
sram_write(tp, mode_reg, __le16_to_cpu(phy->mode_pre));
sram_write(tp, __le16_to_cpu(phy->ba_reg),
@@ -5107,6 +5121,7 @@ post_fw:
if (rtl_fw->post_fw)
rtl_fw->post_fw(tp);
+ rtl_reset_ocp_base(tp);
strscpy(rtl_fw->version, fw_hdr->version, RTL_VER_SIZE);
dev_info(&tp->intf->dev, "load %s successfully\n", rtl_fw->version);
}
@@ -6584,6 +6599,21 @@ static bool rtl8153_in_nway(struct r8152 *tp)
return true;
}
+static void r8156_mdio_force_mode(struct r8152 *tp)
+{
+ u16 data;
+
+ /* Select force mode through 0xa5b4 bit 15
+ * 0: MDIO force mode
+ * 1: MMD force mode
+ */
+ data = ocp_reg_read(tp, 0xa5b4);
+ if (data & BIT(15)) {
+ data &= ~BIT(15);
+ ocp_reg_write(tp, 0xa5b4, data);
+ }
+}
+
static void set_carrier(struct r8152 *tp)
{
struct net_device *netdev = tp->netdev;
@@ -8016,6 +8046,7 @@ static void r8156_init(struct r8152 *tp)
ocp_data |= ACT_ODMA;
ocp_write_byte(tp, MCU_TYPE_USB, USB_BMU_CONFIG, ocp_data);
+ r8156_mdio_force_mode(tp);
rtl_tally_reset(tp);
tp->coalesce = 15000; /* 15 us */
@@ -8145,6 +8176,7 @@ static void r8156b_init(struct r8152 *tp)
ocp_data &= ~(RX_AGG_DISABLE | RX_ZERO_EN);
ocp_write_word(tp, MCU_TYPE_USB, USB_USB_CTRL, ocp_data);
+ r8156_mdio_force_mode(tp);
rtl_tally_reset(tp);
tp->coalesce = 15000; /* 15 us */
@@ -8467,6 +8499,8 @@ static int rtl8152_resume(struct usb_interface *intf)
mutex_lock(&tp->control);
+ rtl_reset_ocp_base(tp);
+
if (test_bit(SELECTIVE_SUSPEND, &tp->flags))
ret = rtl8152_runtime_resume(tp);
else
@@ -8482,6 +8516,7 @@ static int rtl8152_reset_resume(struct usb_interface *intf)
struct r8152 *tp = usb_get_intfdata(intf);
clear_bit(SELECTIVE_SUSPEND, &tp->flags);
+ rtl_reset_ocp_base(tp);
tp->rtl_ops.init(tp);
queue_delayed_work(system_long_wq, &tp->hw_phy_work, 0);
set_ethernet_addr(tp, true);
diff --git a/drivers/net/usb/smsc95xx.c b/drivers/net/usb/smsc95xx.c
index 20fe4cd8f784..abe0149ed917 100644
--- a/drivers/net/usb/smsc95xx.c
+++ b/drivers/net/usb/smsc95xx.c
@@ -1050,6 +1050,14 @@ static const struct net_device_ops smsc95xx_netdev_ops = {
.ndo_set_features = smsc95xx_set_features,
};
+static void smsc95xx_handle_link_change(struct net_device *net)
+{
+ struct usbnet *dev = netdev_priv(net);
+
+ phy_print_status(net->phydev);
+ usbnet_defer_kevent(dev, EVENT_LINK_CHANGE);
+}
+
static int smsc95xx_bind(struct usbnet *dev, struct usb_interface *intf)
{
struct smsc95xx_priv *pdata;
@@ -1154,6 +1162,17 @@ static int smsc95xx_bind(struct usbnet *dev, struct usb_interface *intf)
dev->net->min_mtu = ETH_MIN_MTU;
dev->net->max_mtu = ETH_DATA_LEN;
dev->hard_mtu = dev->net->mtu + dev->net->hard_header_len;
+
+ ret = phy_connect_direct(dev->net, pdata->phydev,
+ &smsc95xx_handle_link_change,
+ PHY_INTERFACE_MODE_MII);
+ if (ret) {
+ netdev_err(dev->net, "can't attach PHY to %s\n", pdata->mdiobus->id);
+ goto unregister_mdio;
+ }
+
+ phy_attached_info(dev->net->phydev);
+
return 0;
unregister_mdio:
@@ -1171,47 +1190,25 @@ static void smsc95xx_unbind(struct usbnet *dev, struct usb_interface *intf)
{
struct smsc95xx_priv *pdata = dev->driver_priv;
+ phy_disconnect(dev->net->phydev);
mdiobus_unregister(pdata->mdiobus);
mdiobus_free(pdata->mdiobus);
netif_dbg(dev, ifdown, dev->net, "free pdata\n");
kfree(pdata);
}
-static void smsc95xx_handle_link_change(struct net_device *net)
-{
- struct usbnet *dev = netdev_priv(net);
-
- phy_print_status(net->phydev);
- usbnet_defer_kevent(dev, EVENT_LINK_CHANGE);
-}
-
static int smsc95xx_start_phy(struct usbnet *dev)
{
- struct smsc95xx_priv *pdata = dev->driver_priv;
- struct net_device *net = dev->net;
- int ret;
+ phy_start(dev->net->phydev);
- ret = smsc95xx_reset(dev);
- if (ret < 0)
- return ret;
-
- ret = phy_connect_direct(net, pdata->phydev,
- &smsc95xx_handle_link_change,
- PHY_INTERFACE_MODE_MII);
- if (ret) {
- netdev_err(net, "can't attach PHY to %s\n", pdata->mdiobus->id);
- return ret;
- }
-
- phy_attached_info(net->phydev);
- phy_start(net->phydev);
return 0;
}
-static int smsc95xx_disconnect_phy(struct usbnet *dev)
+static int smsc95xx_stop(struct usbnet *dev)
{
- phy_stop(dev->net->phydev);
- phy_disconnect(dev->net->phydev);
+ if (dev->net->phydev)
+ phy_stop(dev->net->phydev);
+
return 0;
}
@@ -1966,7 +1963,7 @@ static const struct driver_info smsc95xx_info = {
.unbind = smsc95xx_unbind,
.link_reset = smsc95xx_link_reset,
.reset = smsc95xx_start_phy,
- .stop = smsc95xx_disconnect_phy,
+ .stop = smsc95xx_stop,
.rx_fixup = smsc95xx_rx_fixup,
.tx_fixup = smsc95xx_tx_fixup,
.status = smsc95xx_status,
diff --git a/drivers/net/veth.c b/drivers/net/veth.c
index 50eb43e5bf45..2acdb8ad6c71 100644
--- a/drivers/net/veth.c
+++ b/drivers/net/veth.c
@@ -879,8 +879,12 @@ static int veth_xdp_rcv(struct veth_rq *rq, int budget,
stats->xdp_bytes += skb->len;
skb = veth_xdp_rcv_skb(rq, skb, bq, stats);
- if (skb)
- napi_gro_receive(&rq->xdp_napi, skb);
+ if (skb) {
+ if (skb_shared(skb) || skb_unclone(skb, GFP_ATOMIC))
+ netif_receive_skb(skb);
+ else
+ napi_gro_receive(&rq->xdp_napi, skb);
+ }
}
done++;
}
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 1771d6e5224f..b107835242ad 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -733,7 +733,7 @@ static struct sk_buff *receive_small(struct net_device *dev,
pr_debug("%s: rx error: len %u exceeds max size %d\n",
dev->name, len, GOOD_PACKET_LEN);
dev->stats.rx_length_errors++;
- goto err_len;
+ goto err;
}
if (likely(!vi->xdp_enabled)) {
@@ -825,10 +825,8 @@ static struct sk_buff *receive_small(struct net_device *dev,
skip_xdp:
skb = build_skb(buf, buflen);
- if (!skb) {
- put_page(page);
+ if (!skb)
goto err;
- }
skb_reserve(skb, headroom - delta);
skb_put(skb, len);
if (!xdp_prog) {
@@ -839,13 +837,12 @@ skip_xdp:
if (metasize)
skb_metadata_set(skb, metasize);
-err:
return skb;
err_xdp:
rcu_read_unlock();
stats->xdp_drops++;
-err_len:
+err:
stats->drops++;
put_page(page);
xdp_xmit:
@@ -3423,7 +3420,6 @@ static struct virtio_driver virtio_net_driver = {
.feature_table_size = ARRAY_SIZE(features),
.feature_table_legacy = features_legacy,
.feature_table_size_legacy = ARRAY_SIZE(features_legacy),
- .suppress_used_validation = true,
.driver.name = KBUILD_MODNAME,
.driver.owner = THIS_MODULE,
.id_table = id_table,
diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c
index 14fae317bc70..fd407c0e2856 100644
--- a/drivers/net/vmxnet3/vmxnet3_drv.c
+++ b/drivers/net/vmxnet3/vmxnet3_drv.c
@@ -3261,7 +3261,7 @@ vmxnet3_alloc_intr_resources(struct vmxnet3_adapter *adapter)
#ifdef CONFIG_PCI_MSI
if (adapter->intr.type == VMXNET3_IT_MSIX) {
- int i, nvec;
+ int i, nvec, nvec_allocated;
nvec = adapter->share_intr == VMXNET3_INTR_TXSHARE ?
1 : adapter->num_tx_queues;
@@ -3274,14 +3274,15 @@ vmxnet3_alloc_intr_resources(struct vmxnet3_adapter *adapter)
for (i = 0; i < nvec; i++)
adapter->intr.msix_entries[i].entry = i;
- nvec = vmxnet3_acquire_msix_vectors(adapter, nvec);
- if (nvec < 0)
+ nvec_allocated = vmxnet3_acquire_msix_vectors(adapter, nvec);
+ if (nvec_allocated < 0)
goto msix_err;
/* If we cannot allocate one MSIx vector per queue
* then limit the number of rx queues to 1
*/
- if (nvec == VMXNET3_LINUX_MIN_MSIX_VECT) {
+ if (nvec_allocated == VMXNET3_LINUX_MIN_MSIX_VECT &&
+ nvec != VMXNET3_LINUX_MIN_MSIX_VECT) {
if (adapter->share_intr != VMXNET3_INTR_BUDDYSHARE
|| adapter->num_rx_queues != 1) {
adapter->share_intr = VMXNET3_INTR_TXSHARE;
@@ -3291,14 +3292,14 @@ vmxnet3_alloc_intr_resources(struct vmxnet3_adapter *adapter)
}
}
- adapter->intr.num_intrs = nvec;
+ adapter->intr.num_intrs = nvec_allocated;
return;
msix_err:
/* If we cannot allocate MSIx vectors use only one rx queue */
dev_info(&adapter->pdev->dev,
"Failed to enable MSI-X, error %d. "
- "Limiting #rx queues to 1, try MSI.\n", nvec);
+ "Limiting #rx queues to 1, try MSI.\n", nvec_allocated);
adapter->intr.type = VMXNET3_IT_MSI;
}
diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c
index ccf677015d5b..b2242a082431 100644
--- a/drivers/net/vrf.c
+++ b/drivers/net/vrf.c
@@ -497,6 +497,7 @@ static netdev_tx_t vrf_process_v6_outbound(struct sk_buff *skb,
/* strip the ethernet header added for pass through VRF device */
__skb_pull(skb, skb_network_offset(skb));
+ memset(IP6CB(skb), 0, sizeof(*IP6CB(skb)));
ret = vrf_ip6_local_out(net, skb->sk, skb);
if (unlikely(net_xmit_eval(ret)))
dev->stats.tx_errors++;
@@ -579,6 +580,7 @@ static netdev_tx_t vrf_process_v4_outbound(struct sk_buff *skb,
RT_SCOPE_LINK);
}
+ memset(IPCB(skb), 0, sizeof(*IPCB(skb)));
ret = vrf_ip_local_out(dev_net(skb_dst(skb)->dev), skb->sk, skb);
if (unlikely(net_xmit_eval(ret)))
vrf_dev->stats.tx_errors++;
@@ -768,8 +770,6 @@ static struct sk_buff *vrf_ip6_out_direct(struct net_device *vrf_dev,
skb->dev = vrf_dev;
- vrf_nf_set_untracked(skb);
-
err = nf_hook(NFPROTO_IPV6, NF_INET_LOCAL_OUT, net, sk,
skb, NULL, vrf_dev, vrf_ip6_out_direct_finish);
@@ -790,6 +790,8 @@ static struct sk_buff *vrf_ip6_out(struct net_device *vrf_dev,
if (rt6_need_strict(&ipv6_hdr(skb)->daddr))
return skb;
+ vrf_nf_set_untracked(skb);
+
if (qdisc_tx_is_default(vrf_dev) ||
IP6CB(skb)->flags & IP6SKB_XFRM_TRANSFORMED)
return vrf_ip6_out_direct(vrf_dev, sk, skb);
@@ -998,8 +1000,6 @@ static struct sk_buff *vrf_ip_out_direct(struct net_device *vrf_dev,
skb->dev = vrf_dev;
- vrf_nf_set_untracked(skb);
-
err = nf_hook(NFPROTO_IPV4, NF_INET_LOCAL_OUT, net, sk,
skb, NULL, vrf_dev, vrf_ip_out_direct_finish);
@@ -1021,6 +1021,8 @@ static struct sk_buff *vrf_ip_out(struct net_device *vrf_dev,
ipv4_is_lbcast(ip_hdr(skb)->daddr))
return skb;
+ vrf_nf_set_untracked(skb);
+
if (qdisc_tx_is_default(vrf_dev) ||
IPCB(skb)->flags & IPSKB_XFRM_TRANSFORMED)
return vrf_ip_out_direct(vrf_dev, sk, skb);
diff --git a/drivers/net/wireguard/allowedips.c b/drivers/net/wireguard/allowedips.c
index b7197e80f226..9a4c8ff32d9d 100644
--- a/drivers/net/wireguard/allowedips.c
+++ b/drivers/net/wireguard/allowedips.c
@@ -163,7 +163,7 @@ static bool node_placement(struct allowedips_node __rcu *trie, const u8 *key,
return exact;
}
-static inline void connect_node(struct allowedips_node **parent, u8 bit, struct allowedips_node *node)
+static inline void connect_node(struct allowedips_node __rcu **parent, u8 bit, struct allowedips_node *node)
{
node->parent_bit_packed = (unsigned long)parent | bit;
rcu_assign_pointer(*parent, node);
diff --git a/drivers/net/wireguard/device.c b/drivers/net/wireguard/device.c
index 551ddaaaf540..a46067c38bf5 100644
--- a/drivers/net/wireguard/device.c
+++ b/drivers/net/wireguard/device.c
@@ -98,6 +98,7 @@ static int wg_stop(struct net_device *dev)
{
struct wg_device *wg = netdev_priv(dev);
struct wg_peer *peer;
+ struct sk_buff *skb;
mutex_lock(&wg->device_update_lock);
list_for_each_entry(peer, &wg->peer_list, peer_list) {
@@ -108,7 +109,9 @@ static int wg_stop(struct net_device *dev)
wg_noise_reset_last_sent_handshake(&peer->last_sent_handshake);
}
mutex_unlock(&wg->device_update_lock);
- skb_queue_purge(&wg->incoming_handshakes);
+ while ((skb = ptr_ring_consume(&wg->handshake_queue.ring)) != NULL)
+ kfree_skb(skb);
+ atomic_set(&wg->handshake_queue_len, 0);
wg_socket_reinit(wg, NULL, NULL);
return 0;
}
@@ -235,14 +238,13 @@ static void wg_destruct(struct net_device *dev)
destroy_workqueue(wg->handshake_receive_wq);
destroy_workqueue(wg->handshake_send_wq);
destroy_workqueue(wg->packet_crypt_wq);
- wg_packet_queue_free(&wg->decrypt_queue);
- wg_packet_queue_free(&wg->encrypt_queue);
+ wg_packet_queue_free(&wg->handshake_queue, true);
+ wg_packet_queue_free(&wg->decrypt_queue, false);
+ wg_packet_queue_free(&wg->encrypt_queue, false);
rcu_barrier(); /* Wait for all the peers to be actually freed. */
wg_ratelimiter_uninit();
memzero_explicit(&wg->static_identity, sizeof(wg->static_identity));
- skb_queue_purge(&wg->incoming_handshakes);
free_percpu(dev->tstats);
- free_percpu(wg->incoming_handshakes_worker);
kvfree(wg->index_hashtable);
kvfree(wg->peer_hashtable);
mutex_unlock(&wg->device_update_lock);
@@ -298,7 +300,6 @@ static int wg_newlink(struct net *src_net, struct net_device *dev,
init_rwsem(&wg->static_identity.lock);
mutex_init(&wg->socket_update_lock);
mutex_init(&wg->device_update_lock);
- skb_queue_head_init(&wg->incoming_handshakes);
wg_allowedips_init(&wg->peer_allowedips);
wg_cookie_checker_init(&wg->cookie_checker, wg);
INIT_LIST_HEAD(&wg->peer_list);
@@ -316,16 +317,10 @@ static int wg_newlink(struct net *src_net, struct net_device *dev,
if (!dev->tstats)
goto err_free_index_hashtable;
- wg->incoming_handshakes_worker =
- wg_packet_percpu_multicore_worker_alloc(
- wg_packet_handshake_receive_worker, wg);
- if (!wg->incoming_handshakes_worker)
- goto err_free_tstats;
-
wg->handshake_receive_wq = alloc_workqueue("wg-kex-%s",
WQ_CPU_INTENSIVE | WQ_FREEZABLE, 0, dev->name);
if (!wg->handshake_receive_wq)
- goto err_free_incoming_handshakes;
+ goto err_free_tstats;
wg->handshake_send_wq = alloc_workqueue("wg-kex-%s",
WQ_UNBOUND | WQ_FREEZABLE, 0, dev->name);
@@ -347,10 +342,15 @@ static int wg_newlink(struct net *src_net, struct net_device *dev,
if (ret < 0)
goto err_free_encrypt_queue;
- ret = wg_ratelimiter_init();
+ ret = wg_packet_queue_init(&wg->handshake_queue, wg_packet_handshake_receive_worker,
+ MAX_QUEUED_INCOMING_HANDSHAKES);
if (ret < 0)
goto err_free_decrypt_queue;
+ ret = wg_ratelimiter_init();
+ if (ret < 0)
+ goto err_free_handshake_queue;
+
ret = register_netdevice(dev);
if (ret < 0)
goto err_uninit_ratelimiter;
@@ -367,18 +367,18 @@ static int wg_newlink(struct net *src_net, struct net_device *dev,
err_uninit_ratelimiter:
wg_ratelimiter_uninit();
+err_free_handshake_queue:
+ wg_packet_queue_free(&wg->handshake_queue, false);
err_free_decrypt_queue:
- wg_packet_queue_free(&wg->decrypt_queue);
+ wg_packet_queue_free(&wg->decrypt_queue, false);
err_free_encrypt_queue:
- wg_packet_queue_free(&wg->encrypt_queue);
+ wg_packet_queue_free(&wg->encrypt_queue, false);
err_destroy_packet_crypt:
destroy_workqueue(wg->packet_crypt_wq);
err_destroy_handshake_send:
destroy_workqueue(wg->handshake_send_wq);
err_destroy_handshake_receive:
destroy_workqueue(wg->handshake_receive_wq);
-err_free_incoming_handshakes:
- free_percpu(wg->incoming_handshakes_worker);
err_free_tstats:
free_percpu(dev->tstats);
err_free_index_hashtable:
@@ -398,6 +398,7 @@ static struct rtnl_link_ops link_ops __read_mostly = {
static void wg_netns_pre_exit(struct net *net)
{
struct wg_device *wg;
+ struct wg_peer *peer;
rtnl_lock();
list_for_each_entry(wg, &device_list, device_list) {
@@ -407,6 +408,8 @@ static void wg_netns_pre_exit(struct net *net)
mutex_lock(&wg->device_update_lock);
rcu_assign_pointer(wg->creating_net, NULL);
wg_socket_reinit(wg, NULL, NULL);
+ list_for_each_entry(peer, &wg->peer_list, peer_list)
+ wg_socket_clear_peer_endpoint_src(peer);
mutex_unlock(&wg->device_update_lock);
}
}
diff --git a/drivers/net/wireguard/device.h b/drivers/net/wireguard/device.h
index 854bc3d97150..43c7cebbf50b 100644
--- a/drivers/net/wireguard/device.h
+++ b/drivers/net/wireguard/device.h
@@ -39,21 +39,18 @@ struct prev_queue {
struct wg_device {
struct net_device *dev;
- struct crypt_queue encrypt_queue, decrypt_queue;
+ struct crypt_queue encrypt_queue, decrypt_queue, handshake_queue;
struct sock __rcu *sock4, *sock6;
struct net __rcu *creating_net;
struct noise_static_identity static_identity;
- struct workqueue_struct *handshake_receive_wq, *handshake_send_wq;
- struct workqueue_struct *packet_crypt_wq;
- struct sk_buff_head incoming_handshakes;
- int incoming_handshake_cpu;
- struct multicore_worker __percpu *incoming_handshakes_worker;
+ struct workqueue_struct *packet_crypt_wq,*handshake_receive_wq, *handshake_send_wq;
struct cookie_checker cookie_checker;
struct pubkey_hashtable *peer_hashtable;
struct index_hashtable *index_hashtable;
struct allowedips peer_allowedips;
struct mutex device_update_lock, socket_update_lock;
struct list_head device_list, peer_list;
+ atomic_t handshake_queue_len;
unsigned int num_peers, device_update_gen;
u32 fwmark;
u16 incoming_port;
diff --git a/drivers/net/wireguard/main.c b/drivers/net/wireguard/main.c
index 75dbe77b0b4b..ee4da9ab8013 100644
--- a/drivers/net/wireguard/main.c
+++ b/drivers/net/wireguard/main.c
@@ -17,7 +17,7 @@
#include <linux/genetlink.h>
#include <net/rtnetlink.h>
-static int __init mod_init(void)
+static int __init wg_mod_init(void)
{
int ret;
@@ -60,7 +60,7 @@ err_allowedips:
return ret;
}
-static void __exit mod_exit(void)
+static void __exit wg_mod_exit(void)
{
wg_genetlink_uninit();
wg_device_uninit();
@@ -68,8 +68,8 @@ static void __exit mod_exit(void)
wg_allowedips_slab_uninit();
}
-module_init(mod_init);
-module_exit(mod_exit);
+module_init(wg_mod_init);
+module_exit(wg_mod_exit);
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("WireGuard secure network tunnel");
MODULE_AUTHOR("Jason A. Donenfeld <Jason@zx2c4.com>");
diff --git a/drivers/net/wireguard/queueing.c b/drivers/net/wireguard/queueing.c
index 48e7b982a307..1de413b19e34 100644
--- a/drivers/net/wireguard/queueing.c
+++ b/drivers/net/wireguard/queueing.c
@@ -38,11 +38,11 @@ int wg_packet_queue_init(struct crypt_queue *queue, work_func_t function,
return 0;
}
-void wg_packet_queue_free(struct crypt_queue *queue)
+void wg_packet_queue_free(struct crypt_queue *queue, bool purge)
{
free_percpu(queue->worker);
- WARN_ON(!__ptr_ring_empty(&queue->ring));
- ptr_ring_cleanup(&queue->ring, NULL);
+ WARN_ON(!purge && !__ptr_ring_empty(&queue->ring));
+ ptr_ring_cleanup(&queue->ring, purge ? (void(*)(void*))kfree_skb : NULL);
}
#define NEXT(skb) ((skb)->prev)
diff --git a/drivers/net/wireguard/queueing.h b/drivers/net/wireguard/queueing.h
index 4ef2944a68bc..e2388107f7fd 100644
--- a/drivers/net/wireguard/queueing.h
+++ b/drivers/net/wireguard/queueing.h
@@ -23,7 +23,7 @@ struct sk_buff;
/* queueing.c APIs: */
int wg_packet_queue_init(struct crypt_queue *queue, work_func_t function,
unsigned int len);
-void wg_packet_queue_free(struct crypt_queue *queue);
+void wg_packet_queue_free(struct crypt_queue *queue, bool purge);
struct multicore_worker __percpu *
wg_packet_percpu_multicore_worker_alloc(work_func_t function, void *ptr);
diff --git a/drivers/net/wireguard/ratelimiter.c b/drivers/net/wireguard/ratelimiter.c
index 3fedd1d21f5e..dd55e5c26f46 100644
--- a/drivers/net/wireguard/ratelimiter.c
+++ b/drivers/net/wireguard/ratelimiter.c
@@ -176,12 +176,12 @@ int wg_ratelimiter_init(void)
(1U << 14) / sizeof(struct hlist_head)));
max_entries = table_size * 8;
- table_v4 = kvzalloc(table_size * sizeof(*table_v4), GFP_KERNEL);
+ table_v4 = kvcalloc(table_size, sizeof(*table_v4), GFP_KERNEL);
if (unlikely(!table_v4))
goto err_kmemcache;
#if IS_ENABLED(CONFIG_IPV6)
- table_v6 = kvzalloc(table_size * sizeof(*table_v6), GFP_KERNEL);
+ table_v6 = kvcalloc(table_size, sizeof(*table_v6), GFP_KERNEL);
if (unlikely(!table_v6)) {
kvfree(table_v4);
goto err_kmemcache;
diff --git a/drivers/net/wireguard/receive.c b/drivers/net/wireguard/receive.c
index 7dc84bcca261..7b8df406c773 100644
--- a/drivers/net/wireguard/receive.c
+++ b/drivers/net/wireguard/receive.c
@@ -116,8 +116,8 @@ static void wg_receive_handshake_packet(struct wg_device *wg,
return;
}
- under_load = skb_queue_len(&wg->incoming_handshakes) >=
- MAX_QUEUED_INCOMING_HANDSHAKES / 8;
+ under_load = atomic_read(&wg->handshake_queue_len) >=
+ MAX_QUEUED_INCOMING_HANDSHAKES / 8;
if (under_load) {
last_under_load = ktime_get_coarse_boottime_ns();
} else if (last_under_load) {
@@ -212,13 +212,14 @@ static void wg_receive_handshake_packet(struct wg_device *wg,
void wg_packet_handshake_receive_worker(struct work_struct *work)
{
- struct wg_device *wg = container_of(work, struct multicore_worker,
- work)->ptr;
+ struct crypt_queue *queue = container_of(work, struct multicore_worker, work)->ptr;
+ struct wg_device *wg = container_of(queue, struct wg_device, handshake_queue);
struct sk_buff *skb;
- while ((skb = skb_dequeue(&wg->incoming_handshakes)) != NULL) {
+ while ((skb = ptr_ring_consume_bh(&queue->ring)) != NULL) {
wg_receive_handshake_packet(wg, skb);
dev_kfree_skb(skb);
+ atomic_dec(&wg->handshake_queue_len);
cond_resched();
}
}
@@ -553,22 +554,28 @@ void wg_packet_receive(struct wg_device *wg, struct sk_buff *skb)
case cpu_to_le32(MESSAGE_HANDSHAKE_INITIATION):
case cpu_to_le32(MESSAGE_HANDSHAKE_RESPONSE):
case cpu_to_le32(MESSAGE_HANDSHAKE_COOKIE): {
- int cpu;
-
- if (skb_queue_len(&wg->incoming_handshakes) >
- MAX_QUEUED_INCOMING_HANDSHAKES ||
- unlikely(!rng_is_initialized())) {
+ int cpu, ret = -EBUSY;
+
+ if (unlikely(!rng_is_initialized()))
+ goto drop;
+ if (atomic_read(&wg->handshake_queue_len) > MAX_QUEUED_INCOMING_HANDSHAKES / 2) {
+ if (spin_trylock_bh(&wg->handshake_queue.ring.producer_lock)) {
+ ret = __ptr_ring_produce(&wg->handshake_queue.ring, skb);
+ spin_unlock_bh(&wg->handshake_queue.ring.producer_lock);
+ }
+ } else
+ ret = ptr_ring_produce_bh(&wg->handshake_queue.ring, skb);
+ if (ret) {
+ drop:
net_dbg_skb_ratelimited("%s: Dropping handshake packet from %pISpfsc\n",
wg->dev->name, skb);
goto err;
}
- skb_queue_tail(&wg->incoming_handshakes, skb);
- /* Queues up a call to packet_process_queued_handshake_
- * packets(skb):
- */
- cpu = wg_cpumask_next_online(&wg->incoming_handshake_cpu);
+ atomic_inc(&wg->handshake_queue_len);
+ cpu = wg_cpumask_next_online(&wg->handshake_queue.last_cpu);
+ /* Queues up a call to packet_process_queued_handshake_packets(skb): */
queue_work_on(cpu, wg->handshake_receive_wq,
- &per_cpu_ptr(wg->incoming_handshakes_worker, cpu)->work);
+ &per_cpu_ptr(wg->handshake_queue.worker, cpu)->work);
break;
}
case cpu_to_le32(MESSAGE_DATA):
diff --git a/drivers/net/wireguard/socket.c b/drivers/net/wireguard/socket.c
index 8c496b747108..6f07b949cb81 100644
--- a/drivers/net/wireguard/socket.c
+++ b/drivers/net/wireguard/socket.c
@@ -308,7 +308,7 @@ void wg_socket_clear_peer_endpoint_src(struct wg_peer *peer)
{
write_lock_bh(&peer->endpoint_lock);
memset(&peer->endpoint.src6, 0, sizeof(peer->endpoint.src6));
- dst_cache_reset(&peer->endpoint_cache);
+ dst_cache_reset_now(&peer->endpoint_cache);
write_unlock_bh(&peer->endpoint_lock);
}
diff --git a/drivers/net/wireless/ath/ath11k/mhi.c b/drivers/net/wireless/ath/ath11k/mhi.c
index 26c7ae242db6..49c0b1ad40a0 100644
--- a/drivers/net/wireless/ath/ath11k/mhi.c
+++ b/drivers/net/wireless/ath/ath11k/mhi.c
@@ -533,7 +533,11 @@ static int ath11k_mhi_set_state(struct ath11k_pci *ab_pci,
ret = mhi_pm_suspend(ab_pci->mhi_ctrl);
break;
case ATH11K_MHI_RESUME:
- ret = mhi_pm_resume(ab_pci->mhi_ctrl);
+ /* Do force MHI resume as some devices like QCA6390, WCN6855
+ * are not in M3 state but they are functional. So just ignore
+ * the MHI state while resuming.
+ */
+ ret = mhi_pm_resume_force(ab_pci->mhi_ctrl);
break;
case ATH11K_MHI_TRIGGER_RDDM:
ret = mhi_force_rddm_mode(ab_pci->mhi_ctrl);
diff --git a/drivers/net/wireless/broadcom/brcm80211/Kconfig b/drivers/net/wireless/broadcom/brcm80211/Kconfig
index 5bf2318763c5..3a1a35b5672f 100644
--- a/drivers/net/wireless/broadcom/brcm80211/Kconfig
+++ b/drivers/net/wireless/broadcom/brcm80211/Kconfig
@@ -7,16 +7,20 @@ config BRCMSMAC
depends on MAC80211
depends on BCMA_POSSIBLE
select BCMA
- select NEW_LEDS if BCMA_DRIVER_GPIO
- select LEDS_CLASS if BCMA_DRIVER_GPIO
select BRCMUTIL
select FW_LOADER
select CORDIC
help
This module adds support for PCIe wireless adapters based on Broadcom
- IEEE802.11n SoftMAC chipsets. It also has WLAN led support, which will
- be available if you select BCMA_DRIVER_GPIO. If you choose to build a
- module, the driver will be called brcmsmac.ko.
+ IEEE802.11n SoftMAC chipsets. If you choose to build a module, the
+ driver will be called brcmsmac.ko.
+
+config BRCMSMAC_LEDS
+ def_bool BRCMSMAC && BCMA_DRIVER_GPIO && MAC80211_LEDS
+ help
+ The brcmsmac LED support depends on the presence of the
+ BCMA_DRIVER_GPIO driver, and it only works if LED support
+ is enabled and reachable from the driver module.
source "drivers/net/wireless/broadcom/brcm80211/brcmfmac/Kconfig"
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/Makefile b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/Makefile
index 482d7737764d..090757730ba6 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/Makefile
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/Makefile
@@ -42,6 +42,6 @@ brcmsmac-y := \
brcms_trace_events.o \
debug.o
-brcmsmac-$(CONFIG_BCMA_DRIVER_GPIO) += led.o
+brcmsmac-$(CONFIG_BRCMSMAC_LEDS) += led.o
obj-$(CONFIG_BRCMSMAC) += brcmsmac.o
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/led.h b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/led.h
index d65f5c268fd7..2a5cbeb9e783 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/led.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/led.h
@@ -24,7 +24,7 @@ struct brcms_led {
struct gpio_desc *gpiod;
};
-#ifdef CONFIG_BCMA_DRIVER_GPIO
+#ifdef CONFIG_BRCMSMAC_LEDS
void brcms_led_unregister(struct brcms_info *wl);
int brcms_led_register(struct brcms_info *wl);
#else
diff --git a/drivers/net/wireless/intel/iwlegacy/Kconfig b/drivers/net/wireless/intel/iwlegacy/Kconfig
index 24fe3f63c321..7eacc8e58ee1 100644
--- a/drivers/net/wireless/intel/iwlegacy/Kconfig
+++ b/drivers/net/wireless/intel/iwlegacy/Kconfig
@@ -2,14 +2,13 @@
config IWLEGACY
tristate
select FW_LOADER
- select NEW_LEDS
- select LEDS_CLASS
select LEDS_TRIGGERS
select MAC80211_LEDS
config IWL4965
tristate "Intel Wireless WiFi 4965AGN (iwl4965)"
depends on PCI && MAC80211
+ depends on LEDS_CLASS=y || LEDS_CLASS=MAC80211
select IWLEGACY
help
This option enables support for
@@ -38,6 +37,7 @@ config IWL4965
config IWL3945
tristate "Intel PRO/Wireless 3945ABG/BG Network Connection (iwl3945)"
depends on PCI && MAC80211
+ depends on LEDS_CLASS=y || LEDS_CLASS=MAC80211
select IWLEGACY
help
Select to build the driver supporting the:
diff --git a/drivers/net/wireless/intel/iwlwifi/Kconfig b/drivers/net/wireless/intel/iwlwifi/Kconfig
index 1085afbefba8..418ae4f870ab 100644
--- a/drivers/net/wireless/intel/iwlwifi/Kconfig
+++ b/drivers/net/wireless/intel/iwlwifi/Kconfig
@@ -47,7 +47,7 @@ if IWLWIFI
config IWLWIFI_LEDS
bool
- depends on LEDS_CLASS=y || LEDS_CLASS=IWLWIFI
+ depends on LEDS_CLASS=y || LEDS_CLASS=MAC80211
depends on IWLMVM || IWLDVM
select LEDS_TRIGGERS
select MAC80211_LEDS
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/uefi.c b/drivers/net/wireless/intel/iwlwifi/fw/uefi.c
index c875bf35533c..009dd4be597b 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/uefi.c
+++ b/drivers/net/wireless/intel/iwlwifi/fw/uefi.c
@@ -86,6 +86,7 @@ static void *iwl_uefi_reduce_power_section(struct iwl_trans *trans,
if (len < tlv_len) {
IWL_ERR(trans, "invalid TLV len: %zd/%u\n",
len, tlv_len);
+ kfree(reduce_power_data);
reduce_power_data = ERR_PTR(-EINVAL);
goto out;
}
@@ -105,6 +106,7 @@ static void *iwl_uefi_reduce_power_section(struct iwl_trans *trans,
IWL_DEBUG_FW(trans,
"Couldn't allocate (more) reduce_power_data\n");
+ kfree(reduce_power_data);
reduce_power_data = ERR_PTR(-ENOMEM);
goto out;
}
@@ -134,6 +136,10 @@ static void *iwl_uefi_reduce_power_section(struct iwl_trans *trans,
done:
if (!size) {
IWL_DEBUG_FW(trans, "Empty REDUCE_POWER, skipping.\n");
+ /* Better safe than sorry, but 'reduce_power_data' should
+ * always be NULL if !size.
+ */
+ kfree(reduce_power_data);
reduce_power_data = ERR_PTR(-ENOENT);
goto out;
}
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
index 36196e07b1a0..5cec467b995b 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
@@ -1313,23 +1313,31 @@ _iwl_op_mode_start(struct iwl_drv *drv, struct iwlwifi_opmode_table *op)
const struct iwl_op_mode_ops *ops = op->ops;
struct dentry *dbgfs_dir = NULL;
struct iwl_op_mode *op_mode = NULL;
+ int retry, max_retry = !!iwlwifi_mod_params.fw_restart * IWL_MAX_INIT_RETRY;
+
+ for (retry = 0; retry <= max_retry; retry++) {
#ifdef CONFIG_IWLWIFI_DEBUGFS
- drv->dbgfs_op_mode = debugfs_create_dir(op->name,
- drv->dbgfs_drv);
- dbgfs_dir = drv->dbgfs_op_mode;
+ drv->dbgfs_op_mode = debugfs_create_dir(op->name,
+ drv->dbgfs_drv);
+ dbgfs_dir = drv->dbgfs_op_mode;
#endif
- op_mode = ops->start(drv->trans, drv->trans->cfg, &drv->fw, dbgfs_dir);
+ op_mode = ops->start(drv->trans, drv->trans->cfg,
+ &drv->fw, dbgfs_dir);
+
+ if (op_mode)
+ return op_mode;
+
+ IWL_ERR(drv, "retry init count %d\n", retry);
#ifdef CONFIG_IWLWIFI_DEBUGFS
- if (!op_mode) {
debugfs_remove_recursive(drv->dbgfs_op_mode);
drv->dbgfs_op_mode = NULL;
- }
#endif
+ }
- return op_mode;
+ return NULL;
}
static void _iwl_op_mode_stop(struct iwl_drv *drv)
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-drv.h b/drivers/net/wireless/intel/iwlwifi/iwl-drv.h
index 2e2d60a58692..0fd009e6d685 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-drv.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-drv.h
@@ -89,4 +89,7 @@ void iwl_drv_stop(struct iwl_drv *drv);
#define IWL_EXPORT_SYMBOL(sym)
#endif
+/* max retry for init flow */
+#define IWL_MAX_INIT_RETRY 2
+
#endif /* __iwl_drv_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
index 9fb9c7dad314..897e3b91ddb2 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
@@ -16,6 +16,7 @@
#include <net/ieee80211_radiotap.h>
#include <net/tcp.h>
+#include "iwl-drv.h"
#include "iwl-op-mode.h"
#include "iwl-io.h"
#include "mvm.h"
@@ -1117,9 +1118,30 @@ static int iwl_mvm_mac_start(struct ieee80211_hw *hw)
{
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
int ret;
+ int retry, max_retry = 0;
mutex_lock(&mvm->mutex);
- ret = __iwl_mvm_mac_start(mvm);
+
+ /* we are starting the mac not in error flow, and restart is enabled */
+ if (!test_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED, &mvm->status) &&
+ iwlwifi_mod_params.fw_restart) {
+ max_retry = IWL_MAX_INIT_RETRY;
+ /*
+ * This will prevent mac80211 recovery flows to trigger during
+ * init failures
+ */
+ set_bit(IWL_MVM_STATUS_STARTING, &mvm->status);
+ }
+
+ for (retry = 0; retry <= max_retry; retry++) {
+ ret = __iwl_mvm_mac_start(mvm);
+ if (!ret)
+ break;
+
+ IWL_ERR(mvm, "mac start retry %d\n", retry);
+ }
+ clear_bit(IWL_MVM_STATUS_STARTING, &mvm->status);
+
mutex_unlock(&mvm->mutex);
return ret;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
index 2b1dcd60e00f..a72d85086fe3 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
@@ -1123,6 +1123,8 @@ struct iwl_mvm {
* @IWL_MVM_STATUS_FIRMWARE_RUNNING: firmware is running
* @IWL_MVM_STATUS_NEED_FLUSH_P2P: need to flush P2P bcast STA
* @IWL_MVM_STATUS_IN_D3: in D3 (or at least about to go into it)
+ * @IWL_MVM_STATUS_STARTING: starting mac,
+ * used to disable restart flow while in STARTING state
*/
enum iwl_mvm_status {
IWL_MVM_STATUS_HW_RFKILL,
@@ -1134,6 +1136,7 @@ enum iwl_mvm_status {
IWL_MVM_STATUS_FIRMWARE_RUNNING,
IWL_MVM_STATUS_NEED_FLUSH_P2P,
IWL_MVM_STATUS_IN_D3,
+ IWL_MVM_STATUS_STARTING,
};
/* Keep track of completed init configuration */
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
index 232ad531d612..cd08e289cd9a 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
@@ -686,6 +686,7 @@ static int iwl_mvm_start_get_nvm(struct iwl_mvm *mvm)
int ret;
rtnl_lock();
+ wiphy_lock(mvm->hw->wiphy);
mutex_lock(&mvm->mutex);
ret = iwl_run_init_mvm_ucode(mvm);
@@ -701,6 +702,7 @@ static int iwl_mvm_start_get_nvm(struct iwl_mvm *mvm)
iwl_mvm_stop_device(mvm);
mutex_unlock(&mvm->mutex);
+ wiphy_unlock(mvm->hw->wiphy);
rtnl_unlock();
if (ret < 0)
@@ -1600,6 +1602,9 @@ void iwl_mvm_nic_restart(struct iwl_mvm *mvm, bool fw_error)
*/
if (!mvm->fw_restart && fw_error) {
iwl_fw_error_collect(&mvm->fwrt, false);
+ } else if (test_bit(IWL_MVM_STATUS_STARTING,
+ &mvm->status)) {
+ IWL_ERR(mvm, "Starting mac, retry will be triggered anyway\n");
} else if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
struct iwl_mvm_reprobe *reprobe;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
index bdd4ee432548..76e0b7b45980 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
@@ -269,17 +269,18 @@ static u32 iwl_mvm_get_tx_rate(struct iwl_mvm *mvm,
u8 rate_plcp;
u32 rate_flags = 0;
bool is_cck;
- struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
/* info->control is only relevant for non HW rate control */
if (!ieee80211_hw_check(mvm->hw, HAS_RATE_CONTROL)) {
+ struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
+
/* HT rate doesn't make sense for a non data frame */
WARN_ONCE(info->control.rates[0].flags & IEEE80211_TX_RC_MCS &&
!ieee80211_is_data(fc),
"Got a HT rate (flags:0x%x/mcs:%d/fc:0x%x/state:%d) for a non data frame\n",
info->control.rates[0].flags,
info->control.rates[0].idx,
- le16_to_cpu(fc), mvmsta->sta_state);
+ le16_to_cpu(fc), sta ? mvmsta->sta_state : -1);
rate_idx = info->control.rates[0].idx;
}
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
index c574f041f096..5ce07f28e7c3 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
@@ -1339,9 +1339,13 @@ iwl_pci_find_dev_info(u16 device, u16 subsystem_device,
u16 mac_type, u8 mac_step,
u16 rf_type, u8 cdb, u8 rf_id, u8 no_160, u8 cores)
{
+ int num_devices = ARRAY_SIZE(iwl_dev_info_table);
int i;
- for (i = ARRAY_SIZE(iwl_dev_info_table) - 1; i >= 0; i--) {
+ if (!num_devices)
+ return NULL;
+
+ for (i = num_devices - 1; i >= 0; i--) {
const struct iwl_dev_info *dev_info = &iwl_dev_info_table[i];
if (dev_info->device != (u16)IWL_CFG_ANY &&
@@ -1442,8 +1446,10 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
*/
if (iwl_trans->trans_cfg->rf_id &&
iwl_trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_9000 &&
- !CSR_HW_RFID_TYPE(iwl_trans->hw_rf_id) && get_crf_id(iwl_trans))
+ !CSR_HW_RFID_TYPE(iwl_trans->hw_rf_id) && get_crf_id(iwl_trans)) {
+ ret = -EINVAL;
goto out_free_trans;
+ }
dev_info = iwl_pci_find_dev_info(pdev->device, pdev->subsystem_device,
CSR_HW_REV_TYPE(iwl_trans->hw_rev),
diff --git a/drivers/net/wireless/mediatek/mt76/Makefile b/drivers/net/wireless/mediatek/mt76/Makefile
index 79ab850a45a2..c78ae4b89761 100644
--- a/drivers/net/wireless/mediatek/mt76/Makefile
+++ b/drivers/net/wireless/mediatek/mt76/Makefile
@@ -34,4 +34,4 @@ obj-$(CONFIG_MT76x2_COMMON) += mt76x2/
obj-$(CONFIG_MT7603E) += mt7603/
obj-$(CONFIG_MT7615_COMMON) += mt7615/
obj-$(CONFIG_MT7915E) += mt7915/
-obj-$(CONFIG_MT7921E) += mt7921/
+obj-$(CONFIG_MT7921_COMMON) += mt7921/
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/pci_mac.c b/drivers/net/wireless/mediatek/mt76/mt7615/pci_mac.c
index 5ee52cd70a4b..d1806f198aed 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/pci_mac.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/pci_mac.c
@@ -143,8 +143,6 @@ int mt7615_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
if (!wcid)
wcid = &dev->mt76.global_wcid;
- pid = mt76_tx_status_skb_add(mdev, wcid, tx_info->skb);
-
if ((info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE) && msta) {
struct mt7615_phy *phy = &dev->phy;
@@ -164,6 +162,7 @@ int mt7615_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
if (id < 0)
return id;
+ pid = mt76_tx_status_skb_add(mdev, wcid, tx_info->skb);
mt7615_mac_write_txwi(dev, txwi_ptr, tx_info->skb, wcid, sta,
pid, key, false);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/usb_sdio.c b/drivers/net/wireless/mediatek/mt76/mt7615/usb_sdio.c
index bd2939ebcbf4..5a6d7829c6e0 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/usb_sdio.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/usb_sdio.c
@@ -43,19 +43,11 @@ EXPORT_SYMBOL_GPL(mt7663_usb_sdio_reg_map);
static void
mt7663_usb_sdio_write_txwi(struct mt7615_dev *dev, struct mt76_wcid *wcid,
enum mt76_txq_id qid, struct ieee80211_sta *sta,
+ struct ieee80211_key_conf *key, int pid,
struct sk_buff *skb)
{
- struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
- struct ieee80211_key_conf *key = info->control.hw_key;
- __le32 *txwi;
- int pid;
-
- if (!wcid)
- wcid = &dev->mt76.global_wcid;
-
- pid = mt76_tx_status_skb_add(&dev->mt76, wcid, skb);
+ __le32 *txwi = (__le32 *)(skb->data - MT_USB_TXD_SIZE);
- txwi = (__le32 *)(skb->data - MT_USB_TXD_SIZE);
memset(txwi, 0, MT_USB_TXD_SIZE);
mt7615_mac_write_txwi(dev, txwi, skb, wcid, sta, pid, key, false);
skb_push(skb, MT_USB_TXD_SIZE);
@@ -194,10 +186,14 @@ int mt7663_usb_sdio_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
struct mt7615_dev *dev = container_of(mdev, struct mt7615_dev, mt76);
struct sk_buff *skb = tx_info->skb;
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ struct ieee80211_key_conf *key = info->control.hw_key;
struct mt7615_sta *msta;
- int pad;
+ int pad, err, pktid;
msta = wcid ? container_of(wcid, struct mt7615_sta, wcid) : NULL;
+ if (!wcid)
+ wcid = &dev->mt76.global_wcid;
+
if ((info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE) &&
msta && !msta->rate_probe) {
/* request to configure sampling rate */
@@ -207,7 +203,8 @@ int mt7663_usb_sdio_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
spin_unlock_bh(&dev->mt76.lock);
}
- mt7663_usb_sdio_write_txwi(dev, wcid, qid, sta, skb);
+ pktid = mt76_tx_status_skb_add(&dev->mt76, wcid, skb);
+ mt7663_usb_sdio_write_txwi(dev, wcid, qid, sta, key, pktid, skb);
if (mt76_is_usb(mdev)) {
u32 len = skb->len;
@@ -217,7 +214,12 @@ int mt7663_usb_sdio_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
pad = round_up(skb->len, 4) - skb->len;
}
- return mt76_skb_adjust_pad(skb, pad);
+ err = mt76_skb_adjust_pad(skb, pad);
+ if (err)
+ /* Release pktid in case of error. */
+ idr_remove(&wcid->pktid, pktid);
+
+ return err;
}
EXPORT_SYMBOL_GPL(mt7663_usb_sdio_tx_prepare_skb);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_usb_core.c b/drivers/net/wireless/mediatek/mt76/mt76x02_usb_core.c
index efd70ddc2fd1..2c6c03809b20 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x02_usb_core.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_usb_core.c
@@ -72,6 +72,7 @@ int mt76x02u_tx_prepare_skb(struct mt76_dev *mdev, void *data,
bool ampdu = IEEE80211_SKB_CB(tx_info->skb)->flags & IEEE80211_TX_CTL_AMPDU;
enum mt76_qsel qsel;
u32 flags;
+ int err;
mt76_insert_hdr_pad(tx_info->skb);
@@ -106,7 +107,12 @@ int mt76x02u_tx_prepare_skb(struct mt76_dev *mdev, void *data,
ewma_pktlen_add(&msta->pktlen, tx_info->skb->len);
}
- return mt76x02u_skb_dma_info(tx_info->skb, WLAN_PORT, flags);
+ err = mt76x02u_skb_dma_info(tx_info->skb, WLAN_PORT, flags);
+ if (err && wcid)
+ /* Release pktid in case of error. */
+ idr_remove(&wcid->pktid, pid);
+
+ return err;
}
EXPORT_SYMBOL_GPL(mt76x02u_tx_prepare_skb);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mac.c b/drivers/net/wireless/mediatek/mt76/mt7915/mac.c
index 5fcf35f2d9fb..809dc18e5083 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/mac.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/mac.c
@@ -1151,8 +1151,14 @@ int mt7915_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
}
}
- pid = mt76_tx_status_skb_add(mdev, wcid, tx_info->skb);
+ t = (struct mt76_txwi_cache *)(txwi + mdev->drv->txwi_size);
+ t->skb = tx_info->skb;
+
+ id = mt76_token_consume(mdev, &t);
+ if (id < 0)
+ return id;
+ pid = mt76_tx_status_skb_add(mdev, wcid, tx_info->skb);
mt7915_mac_write_txwi(dev, txwi_ptr, tx_info->skb, wcid, pid, key,
false);
@@ -1178,13 +1184,6 @@ int mt7915_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
txp->bss_idx = mvif->idx;
}
- t = (struct mt76_txwi_cache *)(txwi + mdev->drv->txwi_size);
- t->skb = tx_info->skb;
-
- id = mt76_token_consume(mdev, &t);
- if (id < 0)
- return id;
-
txp->token = cpu_to_le16(id);
if (test_bit(MT_WCID_FLAG_4ADDR, &wcid->flags))
txp->rept_wds_wcid = cpu_to_le16(wcid->idx);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c
index 899957b9d0f1..852d5d97c70b 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c
@@ -176,7 +176,7 @@ mt7915_get_phy_mode(struct ieee80211_vif *vif, struct ieee80211_sta *sta)
if (ht_cap->ht_supported)
mode |= PHY_MODE_GN;
- if (he_cap->has_he)
+ if (he_cap && he_cap->has_he)
mode |= PHY_MODE_AX_24G;
} else if (band == NL80211_BAND_5GHZ) {
mode |= PHY_MODE_A;
@@ -187,7 +187,7 @@ mt7915_get_phy_mode(struct ieee80211_vif *vif, struct ieee80211_sta *sta)
if (vht_cap->vht_supported)
mode |= PHY_MODE_AC;
- if (he_cap->has_he)
+ if (he_cap && he_cap->has_he)
mode |= PHY_MODE_AX_5G;
}
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/sdio_mac.c b/drivers/net/wireless/mediatek/mt76/mt7921/sdio_mac.c
index 137f86a6dbf8..bdec508b6b9f 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7921/sdio_mac.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/sdio_mac.c
@@ -142,15 +142,11 @@ out:
static void
mt7921s_write_txwi(struct mt7921_dev *dev, struct mt76_wcid *wcid,
enum mt76_txq_id qid, struct ieee80211_sta *sta,
+ struct ieee80211_key_conf *key, int pid,
struct sk_buff *skb)
{
- struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
- struct ieee80211_key_conf *key = info->control.hw_key;
- __le32 *txwi;
- int pid;
+ __le32 *txwi = (__le32 *)(skb->data - MT_SDIO_TXD_SIZE);
- pid = mt76_tx_status_skb_add(&dev->mt76, wcid, skb);
- txwi = (__le32 *)(skb->data - MT_SDIO_TXD_SIZE);
memset(txwi, 0, MT_SDIO_TXD_SIZE);
mt7921_mac_write_txwi(dev, txwi, skb, wcid, key, pid, false);
skb_push(skb, MT_SDIO_TXD_SIZE);
@@ -163,8 +159,9 @@ int mt7921s_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
{
struct mt7921_dev *dev = container_of(mdev, struct mt7921_dev, mt76);
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx_info->skb);
+ struct ieee80211_key_conf *key = info->control.hw_key;
struct sk_buff *skb = tx_info->skb;
- int pad;
+ int err, pad, pktid;
if (unlikely(tx_info->skb->len <= ETH_HLEN))
return -EINVAL;
@@ -181,12 +178,18 @@ int mt7921s_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
}
}
- mt7921s_write_txwi(dev, wcid, qid, sta, skb);
+ pktid = mt76_tx_status_skb_add(&dev->mt76, wcid, skb);
+ mt7921s_write_txwi(dev, wcid, qid, sta, key, pktid, skb);
mt7921_skb_add_sdio_hdr(skb, MT7921_SDIO_DATA);
pad = round_up(skb->len, 4) - skb->len;
- return mt76_skb_adjust_pad(skb, pad);
+ err = mt76_skb_adjust_pad(skb, pad);
+ if (err)
+ /* Release pktid in case of error. */
+ idr_remove(&wcid->pktid, pktid);
+
+ return err;
}
void mt7921s_tx_complete_skb(struct mt76_dev *mdev, struct mt76_queue_entry *e)
diff --git a/drivers/net/wireless/mediatek/mt76/tx.c b/drivers/net/wireless/mediatek/mt76/tx.c
index 11719ef034d8..6b8c9dc80542 100644
--- a/drivers/net/wireless/mediatek/mt76/tx.c
+++ b/drivers/net/wireless/mediatek/mt76/tx.c
@@ -173,7 +173,7 @@ mt76_tx_status_skb_get(struct mt76_dev *dev, struct mt76_wcid *wcid, int pktid,
if (!(cb->flags & MT_TX_CB_DMA_DONE))
continue;
- if (!time_is_after_jiffies(cb->jiffies +
+ if (time_is_after_jiffies(cb->jiffies +
MT_TX_STATUS_SKB_TIMEOUT))
continue;
}
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00usb.c b/drivers/net/wireless/ralink/rt2x00/rt2x00usb.c
index e4473a551241..74c3d8cb3100 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2x00usb.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt2x00usb.c
@@ -25,6 +25,9 @@ static bool rt2x00usb_check_usb_error(struct rt2x00_dev *rt2x00dev, int status)
if (status == -ENODEV || status == -ENOENT)
return true;
+ if (!test_bit(DEVICE_STATE_STARTED, &rt2x00dev->flags))
+ return false;
+
if (status == -EPROTO || status == -ETIMEDOUT)
rt2x00dev->num_proto_errs++;
else
diff --git a/drivers/net/wireless/realtek/rtw89/fw.c b/drivers/net/wireless/realtek/rtw89/fw.c
index 212aaf577d3c..65ef3dc9d061 100644
--- a/drivers/net/wireless/realtek/rtw89/fw.c
+++ b/drivers/net/wireless/realtek/rtw89/fw.c
@@ -91,7 +91,6 @@ static int rtw89_fw_hdr_parser(struct rtw89_dev *rtwdev, const u8 *fw, u32 len,
info->section_num = GET_FW_HDR_SEC_NUM(fw);
info->hdr_len = RTW89_FW_HDR_SIZE +
info->section_num * RTW89_FW_SECTION_HDR_SIZE;
- SET_FW_HDR_PART_SIZE(fw, FWDL_SECTION_PER_PKT_LEN);
bin = fw + info->hdr_len;
@@ -275,6 +274,7 @@ static int __rtw89_fw_download_hdr(struct rtw89_dev *rtwdev, const u8 *fw, u32 l
}
skb_put_data(skb, fw, len);
+ SET_FW_HDR_PART_SIZE(skb->data, FWDL_SECTION_PER_PKT_LEN);
rtw89_h2c_pkt_set_hdr_fwdl(rtwdev, skb, FWCMD_TYPE_H2C,
H2C_CAT_MAC, H2C_CL_MAC_FWDL,
H2C_FUNC_MAC_FWHDR_DL, len);
diff --git a/drivers/net/wireless/realtek/rtw89/fw.h b/drivers/net/wireless/realtek/rtw89/fw.h
index 7ee0d9323310..36e8d0da6c1e 100644
--- a/drivers/net/wireless/realtek/rtw89/fw.h
+++ b/drivers/net/wireless/realtek/rtw89/fw.h
@@ -282,8 +282,10 @@ struct rtw89_h2creg_sch_tx_en {
le32_get_bits(*((__le32 *)(fwhdr) + 6), GENMASK(15, 8))
#define GET_FW_HDR_CMD_VERSERION(fwhdr) \
le32_get_bits(*((__le32 *)(fwhdr) + 7), GENMASK(31, 24))
-#define SET_FW_HDR_PART_SIZE(fwhdr, val) \
- le32p_replace_bits((__le32 *)(fwhdr) + 7, val, GENMASK(15, 0))
+static inline void SET_FW_HDR_PART_SIZE(void *fwhdr, u32 val)
+{
+ le32p_replace_bits((__le32 *)fwhdr + 7, val, GENMASK(15, 0));
+}
#define SET_CTRL_INFO_MACID(table, val) \
le32p_replace_bits((__le32 *)(table) + 0, val, GENMASK(6, 0))
diff --git a/drivers/net/wwan/iosm/iosm_ipc_imem.c b/drivers/net/wwan/iosm/iosm_ipc_imem.c
index cff3b43ca4d7..12c03dacb5dd 100644
--- a/drivers/net/wwan/iosm/iosm_ipc_imem.c
+++ b/drivers/net/wwan/iosm/iosm_ipc_imem.c
@@ -181,9 +181,9 @@ void ipc_imem_hrtimer_stop(struct hrtimer *hr_timer)
bool ipc_imem_ul_write_td(struct iosm_imem *ipc_imem)
{
struct ipc_mem_channel *channel;
+ bool hpda_ctrl_pending = false;
struct sk_buff_head *ul_list;
bool hpda_pending = false;
- bool forced_hpdu = false;
struct ipc_pipe *pipe;
int i;
@@ -200,15 +200,19 @@ bool ipc_imem_ul_write_td(struct iosm_imem *ipc_imem)
ul_list = &channel->ul_list;
/* Fill the transfer descriptor with the uplink buffer info. */
- hpda_pending |= ipc_protocol_ul_td_send(ipc_imem->ipc_protocol,
+ if (!ipc_imem_check_wwan_ips(channel)) {
+ hpda_ctrl_pending |=
+ ipc_protocol_ul_td_send(ipc_imem->ipc_protocol,
pipe, ul_list);
-
- /* forced HP update needed for non data channels */
- if (hpda_pending && !ipc_imem_check_wwan_ips(channel))
- forced_hpdu = true;
+ } else {
+ hpda_pending |=
+ ipc_protocol_ul_td_send(ipc_imem->ipc_protocol,
+ pipe, ul_list);
+ }
}
- if (forced_hpdu) {
+ /* forced HP update needed for non data channels */
+ if (hpda_ctrl_pending) {
hpda_pending = false;
ipc_protocol_doorbell_trigger(ipc_imem->ipc_protocol,
IPC_HP_UL_WRITE_TD);
@@ -527,6 +531,9 @@ static void ipc_imem_run_state_worker(struct work_struct *instance)
return;
}
+ if (test_and_clear_bit(IOSM_DEVLINK_INIT, &ipc_imem->flag))
+ ipc_devlink_deinit(ipc_imem->ipc_devlink);
+
if (!ipc_imem_setup_cp_mux_cap_init(ipc_imem, &mux_cfg))
ipc_imem->mux = ipc_mux_init(&mux_cfg, ipc_imem);
@@ -1167,7 +1174,7 @@ void ipc_imem_cleanup(struct iosm_imem *ipc_imem)
ipc_port_deinit(ipc_imem->ipc_port);
}
- if (ipc_imem->ipc_devlink)
+ if (test_and_clear_bit(IOSM_DEVLINK_INIT, &ipc_imem->flag))
ipc_devlink_deinit(ipc_imem->ipc_devlink);
ipc_imem_device_ipc_uninit(ipc_imem);
@@ -1263,7 +1270,6 @@ struct iosm_imem *ipc_imem_init(struct iosm_pcie *pcie, unsigned int device_id,
ipc_imem->pci_device_id = device_id;
- ipc_imem->ev_cdev_write_pending = false;
ipc_imem->cp_version = 0;
ipc_imem->device_sleep = IPC_HOST_SLEEP_ENTER_SLEEP;
@@ -1331,6 +1337,8 @@ struct iosm_imem *ipc_imem_init(struct iosm_pcie *pcie, unsigned int device_id,
if (ipc_flash_link_establish(ipc_imem))
goto devlink_channel_fail;
+
+ set_bit(IOSM_DEVLINK_INIT, &ipc_imem->flag);
}
return ipc_imem;
devlink_channel_fail:
diff --git a/drivers/net/wwan/iosm/iosm_ipc_imem.h b/drivers/net/wwan/iosm/iosm_ipc_imem.h
index 6be6708b4eec..6b8a837faef2 100644
--- a/drivers/net/wwan/iosm/iosm_ipc_imem.h
+++ b/drivers/net/wwan/iosm/iosm_ipc_imem.h
@@ -101,6 +101,7 @@ struct ipc_chnl_cfg;
#define IOSM_CHIP_INFO_SIZE_MAX 100
#define FULLY_FUNCTIONAL 0
+#define IOSM_DEVLINK_INIT 1
/* List of the supported UL/DL pipes. */
enum ipc_mem_pipes {
@@ -335,8 +336,6 @@ enum ipc_phase {
* process the irq actions.
* @flag: Flag to monitor the state of driver
* @td_update_timer_suspended: if true then td update timer suspend
- * @ev_cdev_write_pending: 0 means inform the IPC tasklet to pass
- * the accumulated uplink buffers to CP.
* @ev_mux_net_transmit_pending:0 means inform the IPC tasklet to pass
* @reset_det_n: Reset detect flag
* @pcie_wake_n: Pcie wake flag
@@ -374,7 +373,6 @@ struct iosm_imem {
u8 ev_irq_pending[IPC_IRQ_VECTORS];
unsigned long flag;
u8 td_update_timer_suspended:1,
- ev_cdev_write_pending:1,
ev_mux_net_transmit_pending:1,
reset_det_n:1,
pcie_wake_n:1;
diff --git a/drivers/net/wwan/iosm/iosm_ipc_imem_ops.c b/drivers/net/wwan/iosm/iosm_ipc_imem_ops.c
index 825e8e5ffb2a..831cdae28e8a 100644
--- a/drivers/net/wwan/iosm/iosm_ipc_imem_ops.c
+++ b/drivers/net/wwan/iosm/iosm_ipc_imem_ops.c
@@ -41,7 +41,6 @@ void ipc_imem_sys_wwan_close(struct iosm_imem *ipc_imem, int if_id,
static int ipc_imem_tq_cdev_write(struct iosm_imem *ipc_imem, int arg,
void *msg, size_t size)
{
- ipc_imem->ev_cdev_write_pending = false;
ipc_imem_ul_send(ipc_imem);
return 0;
@@ -50,11 +49,6 @@ static int ipc_imem_tq_cdev_write(struct iosm_imem *ipc_imem, int arg,
/* Through tasklet to do sio write. */
static int ipc_imem_call_cdev_write(struct iosm_imem *ipc_imem)
{
- if (ipc_imem->ev_cdev_write_pending)
- return -1;
-
- ipc_imem->ev_cdev_write_pending = true;
-
return ipc_task_queue_send_task(ipc_imem, ipc_imem_tq_cdev_write, 0,
NULL, 0, false);
}
@@ -450,6 +444,7 @@ void ipc_imem_sys_devlink_close(struct iosm_devlink *ipc_devlink)
/* Release the pipe resources */
ipc_imem_pipe_cleanup(ipc_imem, &channel->ul_pipe);
ipc_imem_pipe_cleanup(ipc_imem, &channel->dl_pipe);
+ ipc_imem->nr_of_channels--;
}
void ipc_imem_sys_devlink_notify_rx(struct iosm_devlink *ipc_devlink,
diff --git a/drivers/net/xen-netback/common.h b/drivers/net/xen-netback/common.h
index 4a16d6e33c09..d9dea4829c86 100644
--- a/drivers/net/xen-netback/common.h
+++ b/drivers/net/xen-netback/common.h
@@ -203,6 +203,7 @@ struct xenvif_queue { /* Per-queue data for xenvif */
unsigned int rx_queue_max;
unsigned int rx_queue_len;
unsigned long last_rx_time;
+ unsigned int rx_slots_needed;
bool stalled;
struct xenvif_copy_state rx_copy;
diff --git a/drivers/net/xen-netback/rx.c b/drivers/net/xen-netback/rx.c
index accc991d153f..dbac4c03d21a 100644
--- a/drivers/net/xen-netback/rx.c
+++ b/drivers/net/xen-netback/rx.c
@@ -33,28 +33,36 @@
#include <xen/xen.h>
#include <xen/events.h>
-static bool xenvif_rx_ring_slots_available(struct xenvif_queue *queue)
+/*
+ * Update the needed ring page slots for the first SKB queued.
+ * Note that any call sequence outside the RX thread calling this function
+ * needs to wake up the RX thread via a call of xenvif_kick_thread()
+ * afterwards in order to avoid a race with putting the thread to sleep.
+ */
+static void xenvif_update_needed_slots(struct xenvif_queue *queue,
+ const struct sk_buff *skb)
{
- RING_IDX prod, cons;
- struct sk_buff *skb;
- int needed;
- unsigned long flags;
-
- spin_lock_irqsave(&queue->rx_queue.lock, flags);
+ unsigned int needed = 0;
- skb = skb_peek(&queue->rx_queue);
- if (!skb) {
- spin_unlock_irqrestore(&queue->rx_queue.lock, flags);
- return false;
+ if (skb) {
+ needed = DIV_ROUND_UP(skb->len, XEN_PAGE_SIZE);
+ if (skb_is_gso(skb))
+ needed++;
+ if (skb->sw_hash)
+ needed++;
}
- needed = DIV_ROUND_UP(skb->len, XEN_PAGE_SIZE);
- if (skb_is_gso(skb))
- needed++;
- if (skb->sw_hash)
- needed++;
+ WRITE_ONCE(queue->rx_slots_needed, needed);
+}
- spin_unlock_irqrestore(&queue->rx_queue.lock, flags);
+static bool xenvif_rx_ring_slots_available(struct xenvif_queue *queue)
+{
+ RING_IDX prod, cons;
+ unsigned int needed;
+
+ needed = READ_ONCE(queue->rx_slots_needed);
+ if (!needed)
+ return false;
do {
prod = queue->rx.sring->req_prod;
@@ -80,13 +88,19 @@ void xenvif_rx_queue_tail(struct xenvif_queue *queue, struct sk_buff *skb)
spin_lock_irqsave(&queue->rx_queue.lock, flags);
- __skb_queue_tail(&queue->rx_queue, skb);
-
- queue->rx_queue_len += skb->len;
- if (queue->rx_queue_len > queue->rx_queue_max) {
+ if (queue->rx_queue_len >= queue->rx_queue_max) {
struct net_device *dev = queue->vif->dev;
netif_tx_stop_queue(netdev_get_tx_queue(dev, queue->id));
+ kfree_skb(skb);
+ queue->vif->dev->stats.rx_dropped++;
+ } else {
+ if (skb_queue_empty(&queue->rx_queue))
+ xenvif_update_needed_slots(queue, skb);
+
+ __skb_queue_tail(&queue->rx_queue, skb);
+
+ queue->rx_queue_len += skb->len;
}
spin_unlock_irqrestore(&queue->rx_queue.lock, flags);
@@ -100,6 +114,8 @@ static struct sk_buff *xenvif_rx_dequeue(struct xenvif_queue *queue)
skb = __skb_dequeue(&queue->rx_queue);
if (skb) {
+ xenvif_update_needed_slots(queue, skb_peek(&queue->rx_queue));
+
queue->rx_queue_len -= skb->len;
if (queue->rx_queue_len < queue->rx_queue_max) {
struct netdev_queue *txq;
@@ -134,6 +150,7 @@ static void xenvif_rx_queue_drop_expired(struct xenvif_queue *queue)
break;
xenvif_rx_dequeue(queue);
kfree_skb(skb);
+ queue->vif->dev->stats.rx_dropped++;
}
}
@@ -487,27 +504,31 @@ void xenvif_rx_action(struct xenvif_queue *queue)
xenvif_rx_copy_flush(queue);
}
-static bool xenvif_rx_queue_stalled(struct xenvif_queue *queue)
+static RING_IDX xenvif_rx_queue_slots(const struct xenvif_queue *queue)
{
RING_IDX prod, cons;
prod = queue->rx.sring->req_prod;
cons = queue->rx.req_cons;
+ return prod - cons;
+}
+
+static bool xenvif_rx_queue_stalled(const struct xenvif_queue *queue)
+{
+ unsigned int needed = READ_ONCE(queue->rx_slots_needed);
+
return !queue->stalled &&
- prod - cons < 1 &&
+ xenvif_rx_queue_slots(queue) < needed &&
time_after(jiffies,
queue->last_rx_time + queue->vif->stall_timeout);
}
static bool xenvif_rx_queue_ready(struct xenvif_queue *queue)
{
- RING_IDX prod, cons;
-
- prod = queue->rx.sring->req_prod;
- cons = queue->rx.req_cons;
+ unsigned int needed = READ_ONCE(queue->rx_slots_needed);
- return queue->stalled && prod - cons >= 1;
+ return queue->stalled && xenvif_rx_queue_slots(queue) >= needed;
}
bool xenvif_have_rx_work(struct xenvif_queue *queue, bool test_kthread)
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index 911f43986a8c..d514d96027a6 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -148,6 +148,9 @@ struct netfront_queue {
grant_ref_t gref_rx_head;
grant_ref_t grant_rx_ref[NET_RX_RING_SIZE];
+ unsigned int rx_rsp_unconsumed;
+ spinlock_t rx_cons_lock;
+
struct page_pool *page_pool;
struct xdp_rxq_info xdp_rxq;
};
@@ -376,12 +379,13 @@ static int xennet_open(struct net_device *dev)
return 0;
}
-static void xennet_tx_buf_gc(struct netfront_queue *queue)
+static bool xennet_tx_buf_gc(struct netfront_queue *queue)
{
RING_IDX cons, prod;
unsigned short id;
struct sk_buff *skb;
bool more_to_do;
+ bool work_done = false;
const struct device *dev = &queue->info->netdev->dev;
BUG_ON(!netif_carrier_ok(queue->info->netdev));
@@ -398,6 +402,8 @@ static void xennet_tx_buf_gc(struct netfront_queue *queue)
for (cons = queue->tx.rsp_cons; cons != prod; cons++) {
struct xen_netif_tx_response txrsp;
+ work_done = true;
+
RING_COPY_RESPONSE(&queue->tx, cons, &txrsp);
if (txrsp.status == XEN_NETIF_RSP_NULL)
continue;
@@ -441,11 +447,13 @@ static void xennet_tx_buf_gc(struct netfront_queue *queue)
xennet_maybe_wake_tx(queue);
- return;
+ return work_done;
err:
queue->info->broken = true;
dev_alert(dev, "Disabled for further use\n");
+
+ return work_done;
}
struct xennet_gnttab_make_txreq {
@@ -834,6 +842,16 @@ static int xennet_close(struct net_device *dev)
return 0;
}
+static void xennet_set_rx_rsp_cons(struct netfront_queue *queue, RING_IDX val)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&queue->rx_cons_lock, flags);
+ queue->rx.rsp_cons = val;
+ queue->rx_rsp_unconsumed = RING_HAS_UNCONSUMED_RESPONSES(&queue->rx);
+ spin_unlock_irqrestore(&queue->rx_cons_lock, flags);
+}
+
static void xennet_move_rx_slot(struct netfront_queue *queue, struct sk_buff *skb,
grant_ref_t ref)
{
@@ -885,7 +903,7 @@ static int xennet_get_extras(struct netfront_queue *queue,
xennet_move_rx_slot(queue, skb, ref);
} while (extra.flags & XEN_NETIF_EXTRA_FLAG_MORE);
- queue->rx.rsp_cons = cons;
+ xennet_set_rx_rsp_cons(queue, cons);
return err;
}
@@ -1039,7 +1057,7 @@ next:
}
if (unlikely(err))
- queue->rx.rsp_cons = cons + slots;
+ xennet_set_rx_rsp_cons(queue, cons + slots);
return err;
}
@@ -1093,7 +1111,8 @@ static int xennet_fill_frags(struct netfront_queue *queue,
__pskb_pull_tail(skb, pull_to - skb_headlen(skb));
}
if (unlikely(skb_shinfo(skb)->nr_frags >= MAX_SKB_FRAGS)) {
- queue->rx.rsp_cons = ++cons + skb_queue_len(list);
+ xennet_set_rx_rsp_cons(queue,
+ ++cons + skb_queue_len(list));
kfree_skb(nskb);
return -ENOENT;
}
@@ -1106,7 +1125,7 @@ static int xennet_fill_frags(struct netfront_queue *queue,
kfree_skb(nskb);
}
- queue->rx.rsp_cons = cons;
+ xennet_set_rx_rsp_cons(queue, cons);
return 0;
}
@@ -1229,7 +1248,9 @@ err:
if (unlikely(xennet_set_skb_gso(skb, gso))) {
__skb_queue_head(&tmpq, skb);
- queue->rx.rsp_cons += skb_queue_len(&tmpq);
+ xennet_set_rx_rsp_cons(queue,
+ queue->rx.rsp_cons +
+ skb_queue_len(&tmpq));
goto err;
}
}
@@ -1253,7 +1274,8 @@ err:
__skb_queue_tail(&rxq, skb);
- i = ++queue->rx.rsp_cons;
+ i = queue->rx.rsp_cons + 1;
+ xennet_set_rx_rsp_cons(queue, i);
work_done++;
}
if (need_xdp_flush)
@@ -1417,40 +1439,79 @@ static int xennet_set_features(struct net_device *dev,
return 0;
}
-static irqreturn_t xennet_tx_interrupt(int irq, void *dev_id)
+static bool xennet_handle_tx(struct netfront_queue *queue, unsigned int *eoi)
{
- struct netfront_queue *queue = dev_id;
unsigned long flags;
- if (queue->info->broken)
- return IRQ_HANDLED;
+ if (unlikely(queue->info->broken))
+ return false;
spin_lock_irqsave(&queue->tx_lock, flags);
- xennet_tx_buf_gc(queue);
+ if (xennet_tx_buf_gc(queue))
+ *eoi = 0;
spin_unlock_irqrestore(&queue->tx_lock, flags);
+ return true;
+}
+
+static irqreturn_t xennet_tx_interrupt(int irq, void *dev_id)
+{
+ unsigned int eoiflag = XEN_EOI_FLAG_SPURIOUS;
+
+ if (likely(xennet_handle_tx(dev_id, &eoiflag)))
+ xen_irq_lateeoi(irq, eoiflag);
+
return IRQ_HANDLED;
}
-static irqreturn_t xennet_rx_interrupt(int irq, void *dev_id)
+static bool xennet_handle_rx(struct netfront_queue *queue, unsigned int *eoi)
{
- struct netfront_queue *queue = dev_id;
- struct net_device *dev = queue->info->netdev;
+ unsigned int work_queued;
+ unsigned long flags;
- if (queue->info->broken)
- return IRQ_HANDLED;
+ if (unlikely(queue->info->broken))
+ return false;
+
+ spin_lock_irqsave(&queue->rx_cons_lock, flags);
+ work_queued = RING_HAS_UNCONSUMED_RESPONSES(&queue->rx);
+ if (work_queued > queue->rx_rsp_unconsumed) {
+ queue->rx_rsp_unconsumed = work_queued;
+ *eoi = 0;
+ } else if (unlikely(work_queued < queue->rx_rsp_unconsumed)) {
+ const struct device *dev = &queue->info->netdev->dev;
+
+ spin_unlock_irqrestore(&queue->rx_cons_lock, flags);
+ dev_alert(dev, "RX producer index going backwards\n");
+ dev_alert(dev, "Disabled for further use\n");
+ queue->info->broken = true;
+ return false;
+ }
+ spin_unlock_irqrestore(&queue->rx_cons_lock, flags);
- if (likely(netif_carrier_ok(dev) &&
- RING_HAS_UNCONSUMED_RESPONSES(&queue->rx)))
+ if (likely(netif_carrier_ok(queue->info->netdev) && work_queued))
napi_schedule(&queue->napi);
+ return true;
+}
+
+static irqreturn_t xennet_rx_interrupt(int irq, void *dev_id)
+{
+ unsigned int eoiflag = XEN_EOI_FLAG_SPURIOUS;
+
+ if (likely(xennet_handle_rx(dev_id, &eoiflag)))
+ xen_irq_lateeoi(irq, eoiflag);
+
return IRQ_HANDLED;
}
static irqreturn_t xennet_interrupt(int irq, void *dev_id)
{
- xennet_tx_interrupt(irq, dev_id);
- xennet_rx_interrupt(irq, dev_id);
+ unsigned int eoiflag = XEN_EOI_FLAG_SPURIOUS;
+
+ if (xennet_handle_tx(dev_id, &eoiflag) &&
+ xennet_handle_rx(dev_id, &eoiflag))
+ xen_irq_lateeoi(irq, eoiflag);
+
return IRQ_HANDLED;
}
@@ -1768,9 +1829,10 @@ static int setup_netfront_single(struct netfront_queue *queue)
if (err < 0)
goto fail;
- err = bind_evtchn_to_irqhandler(queue->tx_evtchn,
- xennet_interrupt,
- 0, queue->info->netdev->name, queue);
+ err = bind_evtchn_to_irqhandler_lateeoi(queue->tx_evtchn,
+ xennet_interrupt, 0,
+ queue->info->netdev->name,
+ queue);
if (err < 0)
goto bind_fail;
queue->rx_evtchn = queue->tx_evtchn;
@@ -1798,18 +1860,18 @@ static int setup_netfront_split(struct netfront_queue *queue)
snprintf(queue->tx_irq_name, sizeof(queue->tx_irq_name),
"%s-tx", queue->name);
- err = bind_evtchn_to_irqhandler(queue->tx_evtchn,
- xennet_tx_interrupt,
- 0, queue->tx_irq_name, queue);
+ err = bind_evtchn_to_irqhandler_lateeoi(queue->tx_evtchn,
+ xennet_tx_interrupt, 0,
+ queue->tx_irq_name, queue);
if (err < 0)
goto bind_tx_fail;
queue->tx_irq = err;
snprintf(queue->rx_irq_name, sizeof(queue->rx_irq_name),
"%s-rx", queue->name);
- err = bind_evtchn_to_irqhandler(queue->rx_evtchn,
- xennet_rx_interrupt,
- 0, queue->rx_irq_name, queue);
+ err = bind_evtchn_to_irqhandler_lateeoi(queue->rx_evtchn,
+ xennet_rx_interrupt, 0,
+ queue->rx_irq_name, queue);
if (err < 0)
goto bind_rx_fail;
queue->rx_irq = err;
@@ -1911,6 +1973,7 @@ static int xennet_init_queue(struct netfront_queue *queue)
spin_lock_init(&queue->tx_lock);
spin_lock_init(&queue->rx_lock);
+ spin_lock_init(&queue->rx_cons_lock);
timer_setup(&queue->rx_refill_timer, rx_refill_timeout, 0);
diff --git a/drivers/nfc/st21nfca/i2c.c b/drivers/nfc/st21nfca/i2c.c
index f126ce96a7df..35b32fb90906 100644
--- a/drivers/nfc/st21nfca/i2c.c
+++ b/drivers/nfc/st21nfca/i2c.c
@@ -524,7 +524,8 @@ static int st21nfca_hci_i2c_probe(struct i2c_client *client,
phy->gpiod_ena = devm_gpiod_get(dev, "enable", GPIOD_OUT_LOW);
if (IS_ERR(phy->gpiod_ena)) {
nfc_err(dev, "Unable to get ENABLE GPIO\n");
- return PTR_ERR(phy->gpiod_ena);
+ r = PTR_ERR(phy->gpiod_ena);
+ goto out_free;
}
phy->se_status.is_ese_present =
@@ -535,7 +536,7 @@ static int st21nfca_hci_i2c_probe(struct i2c_client *client,
r = st21nfca_hci_platform_init(phy);
if (r < 0) {
nfc_err(&client->dev, "Unable to reboot st21nfca\n");
- return r;
+ goto out_free;
}
r = devm_request_threaded_irq(&client->dev, client->irq, NULL,
@@ -544,15 +545,23 @@ static int st21nfca_hci_i2c_probe(struct i2c_client *client,
ST21NFCA_HCI_DRIVER_NAME, phy);
if (r < 0) {
nfc_err(&client->dev, "Unable to register IRQ handler\n");
- return r;
+ goto out_free;
}
- return st21nfca_hci_probe(phy, &i2c_phy_ops, LLC_SHDLC_NAME,
- ST21NFCA_FRAME_HEADROOM,
- ST21NFCA_FRAME_TAILROOM,
- ST21NFCA_HCI_LLC_MAX_PAYLOAD,
- &phy->hdev,
- &phy->se_status);
+ r = st21nfca_hci_probe(phy, &i2c_phy_ops, LLC_SHDLC_NAME,
+ ST21NFCA_FRAME_HEADROOM,
+ ST21NFCA_FRAME_TAILROOM,
+ ST21NFCA_HCI_LLC_MAX_PAYLOAD,
+ &phy->hdev,
+ &phy->se_status);
+ if (r)
+ goto out_free;
+
+ return 0;
+
+out_free:
+ kfree_skb(phy->pending_skb);
+ return r;
}
static int st21nfca_hci_i2c_remove(struct i2c_client *client)
@@ -563,6 +572,8 @@ static int st21nfca_hci_i2c_remove(struct i2c_client *client)
if (phy->powered)
st21nfca_hci_i2c_disable(phy);
+ if (phy->pending_skb)
+ kfree_skb(phy->pending_skb);
return 0;
}
diff --git a/drivers/nfc/virtual_ncidev.c b/drivers/nfc/virtual_ncidev.c
index 221fa3bb8705..f577449e4935 100644
--- a/drivers/nfc/virtual_ncidev.c
+++ b/drivers/nfc/virtual_ncidev.c
@@ -202,7 +202,7 @@ static int __init virtual_ncidev_init(void)
miscdev.minor = MISC_DYNAMIC_MINOR;
miscdev.name = "virtual_nci";
miscdev.fops = &virtual_ncidev_fops;
- miscdev.mode = S_IALLUGO;
+ miscdev.mode = 0600;
return misc_register(&miscdev);
}
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index 4b5de8f5435a..1af8a4513708 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -666,6 +666,7 @@ blk_status_t nvme_fail_nonready_command(struct nvme_ctrl *ctrl,
struct request *rq)
{
if (ctrl->state != NVME_CTRL_DELETING_NOIO &&
+ ctrl->state != NVME_CTRL_DELETING &&
ctrl->state != NVME_CTRL_DEAD &&
!test_bit(NVME_CTRL_FAILFAST_EXPIRED, &ctrl->flags) &&
!blk_noretry_request(rq) && !(rq->cmd_flags & REQ_NVME_MPATH))
@@ -895,10 +896,19 @@ static inline blk_status_t nvme_setup_write_zeroes(struct nvme_ns *ns,
cpu_to_le64(nvme_sect_to_lba(ns, blk_rq_pos(req)));
cmnd->write_zeroes.length =
cpu_to_le16((blk_rq_bytes(req) >> ns->lba_shift) - 1);
- if (nvme_ns_has_pi(ns))
+
+ if (nvme_ns_has_pi(ns)) {
cmnd->write_zeroes.control = cpu_to_le16(NVME_RW_PRINFO_PRACT);
- else
- cmnd->write_zeroes.control = 0;
+
+ switch (ns->pi_type) {
+ case NVME_NS_DPS_PI_TYPE1:
+ case NVME_NS_DPS_PI_TYPE2:
+ cmnd->write_zeroes.reftag =
+ cpu_to_le32(t10_pi_ref_tag(req));
+ break;
+ }
+ }
+
return BLK_STS_OK;
}
@@ -1740,9 +1750,20 @@ static int nvme_configure_metadata(struct nvme_ns *ns, struct nvme_id_ns *id)
*/
if (WARN_ON_ONCE(!(id->flbas & NVME_NS_FLBAS_META_EXT)))
return -EINVAL;
- if (ctrl->max_integrity_segments)
- ns->features |=
- (NVME_NS_METADATA_SUPPORTED | NVME_NS_EXT_LBAS);
+
+ ns->features |= NVME_NS_EXT_LBAS;
+
+ /*
+ * The current fabrics transport drivers support namespace
+ * metadata formats only if nvme_ns_has_pi() returns true.
+ * Suppress support for all other formats so the namespace will
+ * have a 0 capacity and not be usable through the block stack.
+ *
+ * Note, this check will need to be modified if any drivers
+ * gain the ability to use other metadata formats.
+ */
+ if (ctrl->max_integrity_segments && nvme_ns_has_pi(ns))
+ ns->features |= NVME_NS_METADATA_SUPPORTED;
} else {
/*
* For PCIe controllers, we can't easily remap the separate
@@ -2469,6 +2490,20 @@ static const struct nvme_core_quirk_entry core_quirks[] = {
.vid = 0x14a4,
.fr = "22301111",
.quirks = NVME_QUIRK_SIMPLE_SUSPEND,
+ },
+ {
+ /*
+ * This Kioxia CD6-V Series / HPE PE8030 device times out and
+ * aborts I/O during any load, but more easily reproducible
+ * with discards (fstrim).
+ *
+ * The device is left in a state where it is also not possible
+ * to use "nvme set-feature" to disable APST, but booting with
+ * nvme_core.default_ps_max_latency=0 works.
+ */
+ .vid = 0x1e0f,
+ .mn = "KCD6XVUL6T40",
+ .quirks = NVME_QUIRK_NO_APST,
}
};
@@ -2673,8 +2708,9 @@ static bool nvme_validate_cntlid(struct nvme_subsystem *subsys,
if (tmp->cntlid == ctrl->cntlid) {
dev_err(ctrl->device,
- "Duplicate cntlid %u with %s, rejecting\n",
- ctrl->cntlid, dev_name(tmp->device));
+ "Duplicate cntlid %u with %s, subsys %s, rejecting\n",
+ ctrl->cntlid, dev_name(tmp->device),
+ subsys->subnqn);
return false;
}
diff --git a/drivers/nvme/host/fabrics.c b/drivers/nvme/host/fabrics.c
index c5a2b71c5268..282d54117e0a 100644
--- a/drivers/nvme/host/fabrics.c
+++ b/drivers/nvme/host/fabrics.c
@@ -698,6 +698,9 @@ static int nvmf_parse_options(struct nvmf_ctrl_options *opts,
if (token >= 0)
pr_warn("I/O fail on reconnect controller after %d sec\n",
token);
+ else
+ token = -1;
+
opts->fast_io_fail_tmo = token;
break;
case NVMF_OPT_HOSTNQN:
diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c
index 7f2071f2460c..13e5d503ed07 100644
--- a/drivers/nvme/host/multipath.c
+++ b/drivers/nvme/host/multipath.c
@@ -866,7 +866,7 @@ int nvme_mpath_init_identify(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id)
}
if (ana_log_size > ctrl->ana_log_size) {
nvme_mpath_stop(ctrl);
- kfree(ctrl->ana_log_buf);
+ nvme_mpath_uninit(ctrl);
ctrl->ana_log_buf = kmalloc(ana_log_size, GFP_KERNEL);
if (!ctrl->ana_log_buf)
return -ENOMEM;
@@ -886,4 +886,5 @@ void nvme_mpath_uninit(struct nvme_ctrl *ctrl)
{
kfree(ctrl->ana_log_buf);
ctrl->ana_log_buf = NULL;
+ ctrl->ana_log_size = 0;
}
diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
index b334af8aa264..9b095ee01364 100644
--- a/drivers/nvme/host/nvme.h
+++ b/drivers/nvme/host/nvme.h
@@ -709,7 +709,7 @@ static inline bool nvme_check_ready(struct nvme_ctrl *ctrl, struct request *rq,
return true;
if (ctrl->ops->flags & NVME_F_FABRICS &&
ctrl->state == NVME_CTRL_DELETING)
- return true;
+ return queue_live;
return __nvme_check_ready(ctrl, rq, queue_live);
}
int nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c
index 33bc83d8d992..4ceb28675fdf 100644
--- a/drivers/nvme/host/tcp.c
+++ b/drivers/nvme/host/tcp.c
@@ -572,7 +572,7 @@ static int nvme_tcp_handle_comp(struct nvme_tcp_queue *queue,
return ret;
}
-static int nvme_tcp_setup_h2c_data_pdu(struct nvme_tcp_request *req,
+static void nvme_tcp_setup_h2c_data_pdu(struct nvme_tcp_request *req,
struct nvme_tcp_r2t_pdu *pdu)
{
struct nvme_tcp_data_pdu *data = req->pdu;
@@ -581,32 +581,11 @@ static int nvme_tcp_setup_h2c_data_pdu(struct nvme_tcp_request *req,
u8 hdgst = nvme_tcp_hdgst_len(queue);
u8 ddgst = nvme_tcp_ddgst_len(queue);
+ req->state = NVME_TCP_SEND_H2C_PDU;
+ req->offset = 0;
req->pdu_len = le32_to_cpu(pdu->r2t_length);
req->pdu_sent = 0;
- if (unlikely(!req->pdu_len)) {
- dev_err(queue->ctrl->ctrl.device,
- "req %d r2t len is %u, probably a bug...\n",
- rq->tag, req->pdu_len);
- return -EPROTO;
- }
-
- if (unlikely(req->data_sent + req->pdu_len > req->data_len)) {
- dev_err(queue->ctrl->ctrl.device,
- "req %d r2t len %u exceeded data len %u (%zu sent)\n",
- rq->tag, req->pdu_len, req->data_len,
- req->data_sent);
- return -EPROTO;
- }
-
- if (unlikely(le32_to_cpu(pdu->r2t_offset) < req->data_sent)) {
- dev_err(queue->ctrl->ctrl.device,
- "req %d unexpected r2t offset %u (expected %zu)\n",
- rq->tag, le32_to_cpu(pdu->r2t_offset),
- req->data_sent);
- return -EPROTO;
- }
-
memset(data, 0, sizeof(*data));
data->hdr.type = nvme_tcp_h2c_data;
data->hdr.flags = NVME_TCP_F_DATA_LAST;
@@ -622,7 +601,6 @@ static int nvme_tcp_setup_h2c_data_pdu(struct nvme_tcp_request *req,
data->command_id = nvme_cid(rq);
data->data_offset = pdu->r2t_offset;
data->data_length = cpu_to_le32(req->pdu_len);
- return 0;
}
static int nvme_tcp_handle_r2t(struct nvme_tcp_queue *queue,
@@ -630,7 +608,7 @@ static int nvme_tcp_handle_r2t(struct nvme_tcp_queue *queue,
{
struct nvme_tcp_request *req;
struct request *rq;
- int ret;
+ u32 r2t_length = le32_to_cpu(pdu->r2t_length);
rq = nvme_find_rq(nvme_tcp_tagset(queue), pdu->command_id);
if (!rq) {
@@ -641,13 +619,28 @@ static int nvme_tcp_handle_r2t(struct nvme_tcp_queue *queue,
}
req = blk_mq_rq_to_pdu(rq);
- ret = nvme_tcp_setup_h2c_data_pdu(req, pdu);
- if (unlikely(ret))
- return ret;
+ if (unlikely(!r2t_length)) {
+ dev_err(queue->ctrl->ctrl.device,
+ "req %d r2t len is %u, probably a bug...\n",
+ rq->tag, r2t_length);
+ return -EPROTO;
+ }
- req->state = NVME_TCP_SEND_H2C_PDU;
- req->offset = 0;
+ if (unlikely(req->data_sent + r2t_length > req->data_len)) {
+ dev_err(queue->ctrl->ctrl.device,
+ "req %d r2t len %u exceeded data len %u (%zu sent)\n",
+ rq->tag, r2t_length, req->data_len, req->data_sent);
+ return -EPROTO;
+ }
+ if (unlikely(le32_to_cpu(pdu->r2t_offset) < req->data_sent)) {
+ dev_err(queue->ctrl->ctrl.device,
+ "req %d unexpected r2t offset %u (expected %zu)\n",
+ rq->tag, le32_to_cpu(pdu->r2t_offset), req->data_sent);
+ return -EPROTO;
+ }
+
+ nvme_tcp_setup_h2c_data_pdu(req, pdu);
nvme_tcp_queue_request(req, false, true);
return 0;
@@ -1232,6 +1225,7 @@ static int nvme_tcp_alloc_async_req(struct nvme_tcp_ctrl *ctrl)
static void nvme_tcp_free_queue(struct nvme_ctrl *nctrl, int qid)
{
+ struct page *page;
struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
struct nvme_tcp_queue *queue = &ctrl->queues[qid];
@@ -1241,6 +1235,11 @@ static void nvme_tcp_free_queue(struct nvme_ctrl *nctrl, int qid)
if (queue->hdr_digest || queue->data_digest)
nvme_tcp_free_crypto(queue);
+ if (queue->pf_cache.va) {
+ page = virt_to_head_page(queue->pf_cache.va);
+ __page_frag_cache_drain(page, queue->pf_cache.pagecnt_bias);
+ queue->pf_cache.va = NULL;
+ }
sock_release(queue->sock);
kfree(queue->pdu);
mutex_destroy(&queue->send_mutex);
diff --git a/drivers/nvme/host/zns.c b/drivers/nvme/host/zns.c
index bfc259e0d7b8..9f81beb4df4e 100644
--- a/drivers/nvme/host/zns.c
+++ b/drivers/nvme/host/zns.c
@@ -166,7 +166,10 @@ static int nvme_zone_parse_entry(struct nvme_ns *ns,
zone.len = ns->zsze;
zone.capacity = nvme_lba_to_sect(ns, le64_to_cpu(entry->zcap));
zone.start = nvme_lba_to_sect(ns, le64_to_cpu(entry->zslba));
- zone.wp = nvme_lba_to_sect(ns, le64_to_cpu(entry->wp));
+ if (zone.cond == BLK_ZONE_COND_FULL)
+ zone.wp = zone.start + zone.len;
+ else
+ zone.wp = nvme_lba_to_sect(ns, le64_to_cpu(entry->wp));
return cb(&zone, idx, data);
}
diff --git a/drivers/nvme/target/io-cmd-file.c b/drivers/nvme/target/io-cmd-file.c
index 6aa30f30b572..6be6e59d273b 100644
--- a/drivers/nvme/target/io-cmd-file.c
+++ b/drivers/nvme/target/io-cmd-file.c
@@ -8,6 +8,7 @@
#include <linux/uio.h>
#include <linux/falloc.h>
#include <linux/file.h>
+#include <linux/fs.h>
#include "nvmet.h"
#define NVMET_MAX_MPOOL_BVEC 16
@@ -266,7 +267,8 @@ static void nvmet_file_execute_rw(struct nvmet_req *req)
if (req->ns->buffered_io) {
if (likely(!req->f.mpool_alloc) &&
- nvmet_file_execute_io(req, IOCB_NOWAIT))
+ (req->ns->file->f_mode & FMODE_NOWAIT) &&
+ nvmet_file_execute_io(req, IOCB_NOWAIT))
return;
nvmet_file_submit_buffered_io(req);
} else
diff --git a/drivers/nvme/target/tcp.c b/drivers/nvme/target/tcp.c
index 84c387e4bf43..7c1c43ce466b 100644
--- a/drivers/nvme/target/tcp.c
+++ b/drivers/nvme/target/tcp.c
@@ -166,6 +166,8 @@ static struct workqueue_struct *nvmet_tcp_wq;
static const struct nvmet_fabrics_ops nvmet_tcp_ops;
static void nvmet_tcp_free_cmd(struct nvmet_tcp_cmd *c);
static void nvmet_tcp_finish_cmd(struct nvmet_tcp_cmd *cmd);
+static void nvmet_tcp_free_cmd_buffers(struct nvmet_tcp_cmd *cmd);
+static void nvmet_tcp_unmap_pdu_iovec(struct nvmet_tcp_cmd *cmd);
static inline u16 nvmet_tcp_cmd_tag(struct nvmet_tcp_queue *queue,
struct nvmet_tcp_cmd *cmd)
@@ -297,6 +299,16 @@ static int nvmet_tcp_check_ddgst(struct nvmet_tcp_queue *queue, void *pdu)
return 0;
}
+static void nvmet_tcp_free_cmd_buffers(struct nvmet_tcp_cmd *cmd)
+{
+ WARN_ON(unlikely(cmd->nr_mapped > 0));
+
+ kfree(cmd->iov);
+ sgl_free(cmd->req.sg);
+ cmd->iov = NULL;
+ cmd->req.sg = NULL;
+}
+
static void nvmet_tcp_unmap_pdu_iovec(struct nvmet_tcp_cmd *cmd)
{
struct scatterlist *sg;
@@ -306,6 +318,8 @@ static void nvmet_tcp_unmap_pdu_iovec(struct nvmet_tcp_cmd *cmd)
for (i = 0; i < cmd->nr_mapped; i++)
kunmap(sg_page(&sg[i]));
+
+ cmd->nr_mapped = 0;
}
static void nvmet_tcp_map_pdu_iovec(struct nvmet_tcp_cmd *cmd)
@@ -387,7 +401,7 @@ static int nvmet_tcp_map_data(struct nvmet_tcp_cmd *cmd)
return 0;
err:
- sgl_free(cmd->req.sg);
+ nvmet_tcp_free_cmd_buffers(cmd);
return NVME_SC_INTERNAL;
}
@@ -632,10 +646,8 @@ static int nvmet_try_send_data(struct nvmet_tcp_cmd *cmd, bool last_in_batch)
}
}
- if (queue->nvme_sq.sqhd_disabled) {
- kfree(cmd->iov);
- sgl_free(cmd->req.sg);
- }
+ if (queue->nvme_sq.sqhd_disabled)
+ nvmet_tcp_free_cmd_buffers(cmd);
return 1;
@@ -664,8 +676,7 @@ static int nvmet_try_send_response(struct nvmet_tcp_cmd *cmd,
if (left)
return -EAGAIN;
- kfree(cmd->iov);
- sgl_free(cmd->req.sg);
+ nvmet_tcp_free_cmd_buffers(cmd);
cmd->queue->snd_cmd = NULL;
nvmet_tcp_put_cmd(cmd);
return 1;
@@ -700,10 +711,11 @@ static int nvmet_try_send_r2t(struct nvmet_tcp_cmd *cmd, bool last_in_batch)
static int nvmet_try_send_ddgst(struct nvmet_tcp_cmd *cmd, bool last_in_batch)
{
struct nvmet_tcp_queue *queue = cmd->queue;
+ int left = NVME_TCP_DIGEST_LENGTH - cmd->offset;
struct msghdr msg = { .msg_flags = MSG_DONTWAIT };
struct kvec iov = {
.iov_base = (u8 *)&cmd->exp_ddgst + cmd->offset,
- .iov_len = NVME_TCP_DIGEST_LENGTH - cmd->offset
+ .iov_len = left
};
int ret;
@@ -717,6 +729,10 @@ static int nvmet_try_send_ddgst(struct nvmet_tcp_cmd *cmd, bool last_in_batch)
return ret;
cmd->offset += ret;
+ left -= ret;
+
+ if (left)
+ return -EAGAIN;
if (queue->nvme_sq.sqhd_disabled) {
cmd->queue->snd_cmd = NULL;
@@ -906,7 +922,14 @@ static void nvmet_tcp_handle_req_failure(struct nvmet_tcp_queue *queue,
size_t data_len = le32_to_cpu(req->cmd->common.dptr.sgl.length);
int ret;
- if (!nvme_is_write(cmd->req.cmd) ||
+ /*
+ * This command has not been processed yet, hence we are trying to
+ * figure out if there is still pending data left to receive. If
+ * we don't, we can simply prepare for the next pdu and bail out,
+ * otherwise we will need to prepare a buffer and receive the
+ * stale data before continuing forward.
+ */
+ if (!nvme_is_write(cmd->req.cmd) || !data_len ||
data_len > cmd->req.port->inline_data_size) {
nvmet_prepare_receive_pdu(queue);
return;
@@ -1406,8 +1429,7 @@ static void nvmet_tcp_finish_cmd(struct nvmet_tcp_cmd *cmd)
{
nvmet_req_uninit(&cmd->req);
nvmet_tcp_unmap_pdu_iovec(cmd);
- kfree(cmd->iov);
- sgl_free(cmd->req.sg);
+ nvmet_tcp_free_cmd_buffers(cmd);
}
static void nvmet_tcp_uninit_data_in_cmds(struct nvmet_tcp_queue *queue)
@@ -1417,7 +1439,10 @@ static void nvmet_tcp_uninit_data_in_cmds(struct nvmet_tcp_queue *queue)
for (i = 0; i < queue->nr_cmds; i++, cmd++) {
if (nvmet_tcp_need_data_in(cmd))
- nvmet_tcp_finish_cmd(cmd);
+ nvmet_req_uninit(&cmd->req);
+
+ nvmet_tcp_unmap_pdu_iovec(cmd);
+ nvmet_tcp_free_cmd_buffers(cmd);
}
if (!queue->nr_cmds && nvmet_tcp_need_data_in(&queue->connect)) {
@@ -1437,7 +1462,9 @@ static void nvmet_tcp_release_queue_work(struct work_struct *w)
mutex_unlock(&nvmet_tcp_queue_mutex);
nvmet_tcp_restore_socket_callbacks(queue);
- flush_work(&queue->io_work);
+ cancel_work_sync(&queue->io_work);
+ /* stop accepting incoming data */
+ queue->rcv_state = NVMET_TCP_RECV_ERR;
nvmet_tcp_uninit_data_in_cmds(queue);
nvmet_sq_destroy(&queue->nvme_sq);
diff --git a/drivers/of/irq.c b/drivers/of/irq.c
index b10f015b2e37..2b07677a386b 100644
--- a/drivers/of/irq.c
+++ b/drivers/of/irq.c
@@ -76,6 +76,26 @@ struct device_node *of_irq_find_parent(struct device_node *child)
}
EXPORT_SYMBOL_GPL(of_irq_find_parent);
+/*
+ * These interrupt controllers abuse interrupt-map for unspeakable
+ * reasons and rely on the core code to *ignore* it (the drivers do
+ * their own parsing of the property).
+ *
+ * If you think of adding to the list for something *new*, think
+ * again. There is a high chance that you will be sent back to the
+ * drawing board.
+ */
+static const char * const of_irq_imap_abusers[] = {
+ "CBEA,platform-spider-pic",
+ "sti,platform-spider-pic",
+ "realtek,rtl-intc",
+ "fsl,ls1021a-extirq",
+ "fsl,ls1043a-extirq",
+ "fsl,ls1088a-extirq",
+ "renesas,rza1-irqc",
+ NULL,
+};
+
/**
* of_irq_parse_raw - Low level interrupt tree parsing
* @addr: address specifier (start of "reg" property of the device) in be32 format
@@ -159,12 +179,15 @@ int of_irq_parse_raw(const __be32 *addr, struct of_phandle_args *out_irq)
/*
* Now check if cursor is an interrupt-controller and
* if it is then we are done, unless there is an
- * interrupt-map which takes precedence.
+ * interrupt-map which takes precedence except on one
+ * of these broken platforms that want to parse
+ * interrupt-map themselves for $reason.
*/
bool intc = of_property_read_bool(ipar, "interrupt-controller");
imap = of_get_property(ipar, "interrupt-map", &imaplen);
- if (imap == NULL && intc) {
+ if (intc &&
+ (!imap || of_device_compatible_match(ipar, of_irq_imap_abusers))) {
pr_debug(" -> got it !\n");
return 0;
}
diff --git a/drivers/of/platform.c b/drivers/of/platform.c
index b3faf89744aa..793350028906 100644
--- a/drivers/of/platform.c
+++ b/drivers/of/platform.c
@@ -540,6 +540,10 @@ static int __init of_platform_default_populate_init(void)
of_node_put(node);
}
+ node = of_get_compatible_child(of_chosen, "simple-framebuffer");
+ of_platform_device_create(node, NULL, NULL);
+ of_node_put(node);
+
/* Populate everything else. */
of_platform_default_populate(NULL, NULL, NULL);
diff --git a/drivers/pci/controller/Kconfig b/drivers/pci/controller/Kconfig
index 93b141110537..7fc5135ffbbf 100644
--- a/drivers/pci/controller/Kconfig
+++ b/drivers/pci/controller/Kconfig
@@ -332,8 +332,8 @@ config PCIE_APPLE
If unsure, say Y if you have an Apple Silicon system.
config PCIE_MT7621
- tristate "MediaTek MT7621 PCIe Controller"
- depends on (RALINK && SOC_MT7621) || (MIPS && COMPILE_TEST)
+ bool "MediaTek MT7621 PCIe Controller"
+ depends on SOC_MT7621 || (MIPS && COMPILE_TEST)
select PHY_MT7621_PCI
default SOC_MT7621
help
diff --git a/drivers/pci/controller/dwc/pci-exynos.c b/drivers/pci/controller/dwc/pci-exynos.c
index c24dab383654..722dacdd5a17 100644
--- a/drivers/pci/controller/dwc/pci-exynos.c
+++ b/drivers/pci/controller/dwc/pci-exynos.c
@@ -19,6 +19,7 @@
#include <linux/platform_device.h>
#include <linux/phy/phy.h>
#include <linux/regulator/consumer.h>
+#include <linux/module.h>
#include "pcie-designware.h"
diff --git a/drivers/pci/controller/dwc/pcie-qcom-ep.c b/drivers/pci/controller/dwc/pcie-qcom-ep.c
index 7b17da2f9b3f..cfe66bf04c1d 100644
--- a/drivers/pci/controller/dwc/pcie-qcom-ep.c
+++ b/drivers/pci/controller/dwc/pcie-qcom-ep.c
@@ -18,6 +18,7 @@
#include <linux/pm_domain.h>
#include <linux/regmap.h>
#include <linux/reset.h>
+#include <linux/module.h>
#include "pcie-designware.h"
diff --git a/drivers/pci/controller/pci-aardvark.c b/drivers/pci/controller/pci-aardvark.c
index c5300d49807a..c3b725afa11f 100644
--- a/drivers/pci/controller/pci-aardvark.c
+++ b/drivers/pci/controller/pci-aardvark.c
@@ -32,7 +32,6 @@
#define PCIE_CORE_DEV_ID_REG 0x0
#define PCIE_CORE_CMD_STATUS_REG 0x4
#define PCIE_CORE_DEV_REV_REG 0x8
-#define PCIE_CORE_EXP_ROM_BAR_REG 0x30
#define PCIE_CORE_PCIEXP_CAP 0xc0
#define PCIE_CORE_ERR_CAPCTL_REG 0x118
#define PCIE_CORE_ERR_CAPCTL_ECRC_CHK_TX BIT(5)
@@ -774,10 +773,6 @@ advk_pci_bridge_emul_base_conf_read(struct pci_bridge_emul *bridge,
*value = advk_readl(pcie, PCIE_CORE_CMD_STATUS_REG);
return PCI_BRIDGE_EMUL_HANDLED;
- case PCI_ROM_ADDRESS1:
- *value = advk_readl(pcie, PCIE_CORE_EXP_ROM_BAR_REG);
- return PCI_BRIDGE_EMUL_HANDLED;
-
case PCI_INTERRUPT_LINE: {
/*
* From the whole 32bit register we support reading from HW only
@@ -810,10 +805,6 @@ advk_pci_bridge_emul_base_conf_write(struct pci_bridge_emul *bridge,
advk_writel(pcie, new, PCIE_CORE_CMD_STATUS_REG);
break;
- case PCI_ROM_ADDRESS1:
- advk_writel(pcie, new, PCIE_CORE_EXP_ROM_BAR_REG);
- break;
-
case PCI_INTERRUPT_LINE:
if (mask & (PCI_BRIDGE_CTL_BUS_RESET << 16)) {
u32 val = advk_readl(pcie, PCIE_CORE_CTRL1_REG);
diff --git a/drivers/pci/controller/pcie-apple.c b/drivers/pci/controller/pcie-apple.c
index 1bf4d75b61be..b090924b41fe 100644
--- a/drivers/pci/controller/pcie-apple.c
+++ b/drivers/pci/controller/pcie-apple.c
@@ -516,7 +516,7 @@ static int apple_pcie_setup_port(struct apple_pcie *pcie,
int ret, i;
reset = gpiod_get_from_of_node(np, "reset-gpios", 0,
- GPIOD_OUT_LOW, "#PERST");
+ GPIOD_OUT_LOW, "PERST#");
if (IS_ERR(reset))
return PTR_ERR(reset);
@@ -539,12 +539,22 @@ static int apple_pcie_setup_port(struct apple_pcie *pcie,
rmw_set(PORT_APPCLK_EN, port->base + PORT_APPCLK);
+ /* Assert PERST# before setting up the clock */
+ gpiod_set_value(reset, 1);
+
ret = apple_pcie_setup_refclk(pcie, port);
if (ret < 0)
return ret;
+ /* The minimal Tperst-clk value is 100us (PCIe CEM r5.0, 2.9.2) */
+ usleep_range(100, 200);
+
+ /* Deassert PERST# */
rmw_set(PORT_PERST_OFF, port->base + PORT_PERST);
- gpiod_set_value(reset, 1);
+ gpiod_set_value(reset, 0);
+
+ /* Wait for 100ms after PERST# deassertion (PCIe r5.0, 6.6.1) */
+ msleep(100);
ret = readl_relaxed_poll_timeout(port->base + PORT_STATUS, stat,
stat & PORT_STATUS_READY, 100, 250000);
diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c
index 48e3f4e47b29..d84cf30bb279 100644
--- a/drivers/pci/msi.c
+++ b/drivers/pci/msi.c
@@ -722,9 +722,6 @@ static int msix_capability_init(struct pci_dev *dev, struct msix_entry *entries,
goto out_disable;
}
- /* Ensure that all table entries are masked. */
- msix_mask_all(base, tsize);
-
ret = msix_setup_entries(dev, base, entries, nvec, affd);
if (ret)
goto out_disable;
@@ -751,6 +748,16 @@ static int msix_capability_init(struct pci_dev *dev, struct msix_entry *entries,
/* Set MSI-X enabled bits and unmask the function */
pci_intx_for_msi(dev, 0);
dev->msix_enabled = 1;
+
+ /*
+ * Ensure that all table entries are masked to prevent
+ * stale entries from firing in a crash kernel.
+ *
+ * Done late to deal with a broken Marvell NVME device
+ * which takes the MSI-X mask bits into account even
+ * when MSI-X is disabled, which prevents MSI delivery.
+ */
+ msix_mask_all(base, tsize);
pci_msix_clear_and_set_ctrl(dev, PCI_MSIX_FLAGS_MASKALL, 0);
pcibios_free_irq(dev);
@@ -777,7 +784,7 @@ out_free:
free_msi_irqs(dev);
out_disable:
- pci_msix_clear_and_set_ctrl(dev, PCI_MSIX_FLAGS_ENABLE, 0);
+ pci_msix_clear_and_set_ctrl(dev, PCI_MSIX_FLAGS_MASKALL | PCI_MSIX_FLAGS_ENABLE, 0);
return ret;
}
diff --git a/drivers/phy/hisilicon/phy-hi3670-pcie.c b/drivers/phy/hisilicon/phy-hi3670-pcie.c
index c64c6679b1b9..0ac9634b398d 100644
--- a/drivers/phy/hisilicon/phy-hi3670-pcie.c
+++ b/drivers/phy/hisilicon/phy-hi3670-pcie.c
@@ -757,8 +757,8 @@ static int hi3670_pcie_phy_get_resources(struct hi3670_pcie_phy *phy,
return PTR_ERR(phy->sysctrl);
phy->pmctrl = syscon_regmap_lookup_by_compatible("hisilicon,hi3670-pmctrl");
- if (IS_ERR(phy->sysctrl))
- return PTR_ERR(phy->sysctrl);
+ if (IS_ERR(phy->pmctrl))
+ return PTR_ERR(phy->pmctrl);
/* clocks */
phy->phy_ref_clk = devm_clk_get(dev, "phy_ref");
diff --git a/drivers/phy/marvell/phy-mvebu-cp110-utmi.c b/drivers/phy/marvell/phy-mvebu-cp110-utmi.c
index 08d178a4dc13..aa27c7994610 100644
--- a/drivers/phy/marvell/phy-mvebu-cp110-utmi.c
+++ b/drivers/phy/marvell/phy-mvebu-cp110-utmi.c
@@ -82,9 +82,9 @@
* struct mvebu_cp110_utmi - PHY driver data
*
* @regs: PHY registers
- * @syscom: Regmap with system controller registers
+ * @syscon: Regmap with system controller registers
* @dev: device driver handle
- * @caps: PHY capabilities
+ * @ops: phy ops
*/
struct mvebu_cp110_utmi {
void __iomem *regs;
diff --git a/drivers/phy/qualcomm/phy-qcom-ipq806x-usb.c b/drivers/phy/qualcomm/phy-qcom-ipq806x-usb.c
index bfff0c8c9130..fec1da470d26 100644
--- a/drivers/phy/qualcomm/phy-qcom-ipq806x-usb.c
+++ b/drivers/phy/qualcomm/phy-qcom-ipq806x-usb.c
@@ -127,12 +127,13 @@ struct phy_drvdata {
};
/**
- * Write register and read back masked value to confirm it is written
+ * usb_phy_write_readback() - Write register and read back masked value to
+ * confirm it is written
*
- * @base - QCOM DWC3 PHY base virtual address.
- * @offset - register offset.
- * @mask - register bitmask specifying what should be updated
- * @val - value to write.
+ * @phy_dwc3: QCOM DWC3 phy context
+ * @offset: register offset.
+ * @mask: register bitmask specifying what should be updated
+ * @val: value to write.
*/
static inline void usb_phy_write_readback(struct usb_phy *phy_dwc3,
u32 offset,
@@ -171,11 +172,11 @@ static int wait_for_latch(void __iomem *addr)
}
/**
- * Write SSPHY register
+ * usb_ss_write_phycreg() - Write SSPHY register
*
- * @base - QCOM DWC3 PHY base virtual address.
- * @addr - SSPHY address to write.
- * @val - value to write.
+ * @phy_dwc3: QCOM DWC3 phy context
+ * @addr: SSPHY address to write.
+ * @val: value to write.
*/
static int usb_ss_write_phycreg(struct usb_phy *phy_dwc3,
u32 addr, u32 val)
@@ -209,10 +210,11 @@ err_wait:
}
/**
- * Read SSPHY register.
+ * usb_ss_read_phycreg() - Read SSPHY register.
*
- * @base - QCOM DWC3 PHY base virtual address.
- * @addr - SSPHY address to read.
+ * @phy_dwc3: QCOM DWC3 phy context
+ * @addr: SSPHY address to read.
+ * @val: pointer in which read is store.
*/
static int usb_ss_read_phycreg(struct usb_phy *phy_dwc3,
u32 addr, u32 *val)
diff --git a/drivers/phy/qualcomm/phy-qcom-qmp.c b/drivers/phy/qualcomm/phy-qcom-qmp.c
index 456a59d8c7d0..c96639d5f581 100644
--- a/drivers/phy/qualcomm/phy-qcom-qmp.c
+++ b/drivers/phy/qualcomm/phy-qcom-qmp.c
@@ -2973,6 +2973,9 @@ struct qmp_phy_combo_cfg {
* @qmp: QMP phy to which this lane belongs
* @lane_rst: lane's reset controller
* @mode: current PHY mode
+ * @dp_aux_cfg: Display port aux config
+ * @dp_opts: Display port optional config
+ * @dp_clks: Display port clocks
*/
struct qmp_phy {
struct phy *phy;
diff --git a/drivers/phy/qualcomm/phy-qcom-usb-hsic.c b/drivers/phy/qualcomm/phy-qcom-usb-hsic.c
index 04d18d52f700..716a77748ed8 100644
--- a/drivers/phy/qualcomm/phy-qcom-usb-hsic.c
+++ b/drivers/phy/qualcomm/phy-qcom-usb-hsic.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0-only
-/**
+/*
* Copyright (C) 2016 Linaro Ltd
*/
#include <linux/module.h>
diff --git a/drivers/phy/st/phy-stm32-usbphyc.c b/drivers/phy/st/phy-stm32-usbphyc.c
index 7df6a63ad37b..e4f4a9be5132 100644
--- a/drivers/phy/st/phy-stm32-usbphyc.c
+++ b/drivers/phy/st/phy-stm32-usbphyc.c
@@ -478,7 +478,7 @@ static void stm32_usbphyc_phy_tuning(struct stm32_usbphyc *usbphyc,
if (!of_property_read_bool(np, "st,no-lsfs-fb-cap"))
usbphyc_phy->tune |= LFSCAPEN;
- if (of_property_read_bool(np, "st,slow-hs-slew-rate"))
+ if (of_property_read_bool(np, "st,decrease-hs-slew-rate"))
usbphyc_phy->tune |= HSDRVSLEW;
ret = of_property_read_u32(np, "st,tune-hs-dc-level", &val);
diff --git a/drivers/phy/ti/phy-am654-serdes.c b/drivers/phy/ti/phy-am654-serdes.c
index 2ff56ce77b30..c1211c4f863c 100644
--- a/drivers/phy/ti/phy-am654-serdes.c
+++ b/drivers/phy/ti/phy-am654-serdes.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/**
+/*
* PCIe SERDES driver for AM654x SoC
*
* Copyright (C) 2018 - 2019 Texas Instruments Incorporated - http://www.ti.com/
diff --git a/drivers/phy/ti/phy-j721e-wiz.c b/drivers/phy/ti/phy-j721e-wiz.c
index 126f5b8735cc..b3384c31637a 100644
--- a/drivers/phy/ti/phy-j721e-wiz.c
+++ b/drivers/phy/ti/phy-j721e-wiz.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/**
+/*
* Wrapper driver for SERDES used in J721E
*
* Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com/
diff --git a/drivers/phy/ti/phy-omap-usb2.c b/drivers/phy/ti/phy-omap-usb2.c
index ebceb1520ce8..3a505fe5715a 100644
--- a/drivers/phy/ti/phy-omap-usb2.c
+++ b/drivers/phy/ti/phy-omap-usb2.c
@@ -89,9 +89,9 @@ static inline void omap_usb_writel(void __iomem *addr, unsigned int offset,
}
/**
- * omap_usb2_set_comparator - links the comparator present in the system with
- * this phy
- * @comparator - the companion phy(comparator) for this phy
+ * omap_usb2_set_comparator() - links the comparator present in the system with this phy
+ *
+ * @comparator: the companion phy(comparator) for this phy
*
* The phy companion driver should call this API passing the phy_companion
* filled with set_vbus and start_srp to be used by usb phy.
diff --git a/drivers/phy/ti/phy-tusb1210.c b/drivers/phy/ti/phy-tusb1210.c
index a63213f5972a..15c1c79e5c29 100644
--- a/drivers/phy/ti/phy-tusb1210.c
+++ b/drivers/phy/ti/phy-tusb1210.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0-only
-/**
+/*
* tusb1210.c - TUSB1210 USB ULPI PHY driver
*
* Copyright (C) 2015 Intel Corporation
diff --git a/drivers/pinctrl/bcm/pinctrl-bcm2835.c b/drivers/pinctrl/bcm/pinctrl-bcm2835.c
index 2abcc6ce4eba..b607d10e4cbd 100644
--- a/drivers/pinctrl/bcm/pinctrl-bcm2835.c
+++ b/drivers/pinctrl/bcm/pinctrl-bcm2835.c
@@ -1244,6 +1244,18 @@ static int bcm2835_pinctrl_probe(struct platform_device *pdev)
raw_spin_lock_init(&pc->irq_lock[i]);
}
+ pc->pctl_desc = *pdata->pctl_desc;
+ pc->pctl_dev = devm_pinctrl_register(dev, &pc->pctl_desc, pc);
+ if (IS_ERR(pc->pctl_dev)) {
+ gpiochip_remove(&pc->gpio_chip);
+ return PTR_ERR(pc->pctl_dev);
+ }
+
+ pc->gpio_range = *pdata->gpio_range;
+ pc->gpio_range.base = pc->gpio_chip.base;
+ pc->gpio_range.gc = &pc->gpio_chip;
+ pinctrl_add_gpio_range(pc->pctl_dev, &pc->gpio_range);
+
girq = &pc->gpio_chip.irq;
girq->chip = &bcm2835_gpio_irq_chip;
girq->parent_handler = bcm2835_gpio_irq_handler;
@@ -1251,8 +1263,10 @@ static int bcm2835_pinctrl_probe(struct platform_device *pdev)
girq->parents = devm_kcalloc(dev, BCM2835_NUM_IRQS,
sizeof(*girq->parents),
GFP_KERNEL);
- if (!girq->parents)
+ if (!girq->parents) {
+ pinctrl_remove_gpio_range(pc->pctl_dev, &pc->gpio_range);
return -ENOMEM;
+ }
if (is_7211) {
pc->wake_irq = devm_kcalloc(dev, BCM2835_NUM_IRQS,
@@ -1307,21 +1321,10 @@ static int bcm2835_pinctrl_probe(struct platform_device *pdev)
err = gpiochip_add_data(&pc->gpio_chip, pc);
if (err) {
dev_err(dev, "could not add GPIO chip\n");
+ pinctrl_remove_gpio_range(pc->pctl_dev, &pc->gpio_range);
return err;
}
- pc->pctl_desc = *pdata->pctl_desc;
- pc->pctl_dev = devm_pinctrl_register(dev, &pc->pctl_desc, pc);
- if (IS_ERR(pc->pctl_dev)) {
- gpiochip_remove(&pc->gpio_chip);
- return PTR_ERR(pc->pctl_dev);
- }
-
- pc->gpio_range = *pdata->gpio_range;
- pc->gpio_range.base = pc->gpio_chip.base;
- pc->gpio_range.gc = &pc->gpio_chip;
- pinctrl_add_gpio_range(pc->pctl_dev, &pc->gpio_range);
-
return 0;
}
diff --git a/drivers/pinctrl/mediatek/pinctrl-mtk-common-v2.c b/drivers/pinctrl/mediatek/pinctrl-mtk-common-v2.c
index 91553b2fc160..53779822348d 100644
--- a/drivers/pinctrl/mediatek/pinctrl-mtk-common-v2.c
+++ b/drivers/pinctrl/mediatek/pinctrl-mtk-common-v2.c
@@ -285,8 +285,12 @@ static int mtk_xt_get_gpio_n(void *data, unsigned long eint_n,
desc = (const struct mtk_pin_desc *)hw->soc->pins;
*gpio_chip = &hw->chip;
- /* Be greedy to guess first gpio_n is equal to eint_n */
- if (desc[eint_n].eint.eint_n == eint_n)
+ /*
+ * Be greedy to guess first gpio_n is equal to eint_n.
+ * Only eint virtual eint number is greater than gpio number.
+ */
+ if (hw->soc->npins > eint_n &&
+ desc[eint_n].eint.eint_n == eint_n)
*gpio_n = eint_n;
else
*gpio_n = mtk_xt_find_eint_num(hw, eint_n);
diff --git a/drivers/pinctrl/stm32/pinctrl-stm32.c b/drivers/pinctrl/stm32/pinctrl-stm32.c
index 24764ebcc936..9ed764731570 100644
--- a/drivers/pinctrl/stm32/pinctrl-stm32.c
+++ b/drivers/pinctrl/stm32/pinctrl-stm32.c
@@ -1251,10 +1251,10 @@ static int stm32_gpiolib_register_bank(struct stm32_pinctrl *pctl,
bank_nr = args.args[1] / STM32_GPIO_PINS_PER_BANK;
bank->gpio_chip.base = args.args[1];
- npins = args.args[2];
- while (!of_parse_phandle_with_fixed_args(np, "gpio-ranges", 3,
- ++i, &args))
- npins += args.args[2];
+ /* get the last defined gpio line (offset + nb of pins) */
+ npins = args.args[0] + args.args[2];
+ while (!of_parse_phandle_with_fixed_args(np, "gpio-ranges", 3, ++i, &args))
+ npins = max(npins, (int)(args.args[0] + args.args[2]));
} else {
bank_nr = pctl->nbanks;
bank->gpio_chip.base = bank_nr * STM32_GPIO_PINS_PER_BANK;
diff --git a/drivers/platform/chrome/cros_ec_ishtp.c b/drivers/platform/chrome/cros_ec_ishtp.c
index 9d1e7e03628e..4020b8354bae 100644
--- a/drivers/platform/chrome/cros_ec_ishtp.c
+++ b/drivers/platform/chrome/cros_ec_ishtp.c
@@ -41,9 +41,12 @@ enum cros_ec_ish_channel {
#define ISHTP_SEND_TIMEOUT (3 * HZ)
/* ISH Transport CrOS EC ISH client unique GUID */
-static const guid_t cros_ish_guid =
- GUID_INIT(0x7b7154d0, 0x56f4, 0x4bdc,
- 0xb0, 0xd8, 0x9e, 0x7c, 0xda, 0xe0, 0xd6, 0xa0);
+static const struct ishtp_device_id cros_ec_ishtp_id_table[] = {
+ { .guid = GUID_INIT(0x7b7154d0, 0x56f4, 0x4bdc,
+ 0xb0, 0xd8, 0x9e, 0x7c, 0xda, 0xe0, 0xd6, 0xa0), },
+ { }
+};
+MODULE_DEVICE_TABLE(ishtp, cros_ec_ishtp_id_table);
struct header {
u8 channel;
@@ -389,7 +392,7 @@ static int cros_ish_init(struct ishtp_cl *cros_ish_cl)
ishtp_set_tx_ring_size(cros_ish_cl, CROS_ISH_CL_TX_RING_SIZE);
ishtp_set_rx_ring_size(cros_ish_cl, CROS_ISH_CL_RX_RING_SIZE);
- fw_client = ishtp_fw_cl_get_client(dev, &cros_ish_guid);
+ fw_client = ishtp_fw_cl_get_client(dev, &cros_ec_ishtp_id_table[0].guid);
if (!fw_client) {
dev_err(cl_data_to_dev(client_data),
"ish client uuid not found\n");
@@ -765,7 +768,7 @@ static SIMPLE_DEV_PM_OPS(cros_ec_ishtp_pm_ops, cros_ec_ishtp_suspend,
static struct ishtp_cl_driver cros_ec_ishtp_driver = {
.name = "cros_ec_ishtp",
- .guid = &cros_ish_guid,
+ .id = cros_ec_ishtp_id_table,
.probe = cros_ec_ishtp_probe,
.remove = cros_ec_ishtp_remove,
.reset = cros_ec_ishtp_reset,
@@ -791,4 +794,3 @@ MODULE_DESCRIPTION("ChromeOS EC ISHTP Client Driver");
MODULE_AUTHOR("Rushikesh S Kadam <rushikesh.s.kadam@intel.com>");
MODULE_LICENSE("GPL v2");
-MODULE_ALIAS("ishtp:*");
diff --git a/drivers/platform/mellanox/mlxbf-pmc.c b/drivers/platform/mellanox/mlxbf-pmc.c
index 04bc3b50aa7a..65b4a819f1bd 100644
--- a/drivers/platform/mellanox/mlxbf-pmc.c
+++ b/drivers/platform/mellanox/mlxbf-pmc.c
@@ -1374,8 +1374,8 @@ static int mlxbf_pmc_map_counters(struct device *dev)
pmc->block[i].counters = info[2];
pmc->block[i].type = info[3];
- if (IS_ERR(pmc->block[i].mmio_base))
- return PTR_ERR(pmc->block[i].mmio_base);
+ if (!pmc->block[i].mmio_base)
+ return -ENOMEM;
ret = mlxbf_pmc_create_groups(dev, i);
if (ret)
diff --git a/drivers/platform/x86/Makefile b/drivers/platform/x86/Makefile
index 219478061683..253a096b5dd8 100644
--- a/drivers/platform/x86/Makefile
+++ b/drivers/platform/x86/Makefile
@@ -68,7 +68,7 @@ obj-$(CONFIG_THINKPAD_ACPI) += thinkpad_acpi.o
obj-$(CONFIG_THINKPAD_LMI) += think-lmi.o
# Intel
-obj-$(CONFIG_X86_PLATFORM_DRIVERS_INTEL) += intel/
+obj-y += intel/
# MSI
obj-$(CONFIG_MSI_LAPTOP) += msi-laptop.o
diff --git a/drivers/platform/x86/amd-pmc.c b/drivers/platform/x86/amd-pmc.c
index b7e50ed050a8..230593ae5d6d 100644
--- a/drivers/platform/x86/amd-pmc.c
+++ b/drivers/platform/x86/amd-pmc.c
@@ -76,7 +76,7 @@
#define AMD_CPU_ID_CZN AMD_CPU_ID_RN
#define AMD_CPU_ID_YC 0x14B5
-#define PMC_MSG_DELAY_MIN_US 100
+#define PMC_MSG_DELAY_MIN_US 50
#define RESPONSE_REGISTER_LOOP_MAX 20000
#define SOC_SUBSYSTEM_IP_MAX 12
@@ -508,7 +508,8 @@ static int __maybe_unused amd_pmc_resume(struct device *dev)
}
static const struct dev_pm_ops amd_pmc_pm_ops = {
- SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(amd_pmc_suspend, amd_pmc_resume)
+ .suspend_noirq = amd_pmc_suspend,
+ .resume_noirq = amd_pmc_resume,
};
static const struct pci_device_id pmc_pci_ids[] = {
diff --git a/drivers/platform/x86/apple-gmux.c b/drivers/platform/x86/apple-gmux.c
index 9aae45a45200..57553f9b4d1d 100644
--- a/drivers/platform/x86/apple-gmux.c
+++ b/drivers/platform/x86/apple-gmux.c
@@ -625,7 +625,7 @@ static int gmux_probe(struct pnp_dev *pnp, const struct pnp_device_id *id)
}
gmux_data->iostart = res->start;
- gmux_data->iolen = res->end - res->start;
+ gmux_data->iolen = resource_size(res);
if (gmux_data->iolen < GMUX_MIN_IO_LEN) {
pr_err("gmux I/O region too small (%lu < %u)\n",
diff --git a/drivers/platform/x86/intel/Kconfig b/drivers/platform/x86/intel/Kconfig
index 38ce3e344589..40096b25994a 100644
--- a/drivers/platform/x86/intel/Kconfig
+++ b/drivers/platform/x86/intel/Kconfig
@@ -3,19 +3,6 @@
# Intel x86 Platform Specific Drivers
#
-menuconfig X86_PLATFORM_DRIVERS_INTEL
- bool "Intel x86 Platform Specific Device Drivers"
- default y
- help
- Say Y here to get to see options for device drivers for
- various Intel x86 platforms, including vendor-specific
- drivers. This option alone does not add any kernel code.
-
- If you say N, all options in this submenu will be skipped
- and disabled.
-
-if X86_PLATFORM_DRIVERS_INTEL
-
source "drivers/platform/x86/intel/atomisp2/Kconfig"
source "drivers/platform/x86/intel/int1092/Kconfig"
source "drivers/platform/x86/intel/int33fe/Kconfig"
@@ -183,5 +170,3 @@ config INTEL_UNCORE_FREQ_CONTROL
To compile this driver as a module, choose M here: the module
will be called intel-uncore-frequency.
-
-endif # X86_PLATFORM_DRIVERS_INTEL
diff --git a/drivers/platform/x86/intel/hid.c b/drivers/platform/x86/intel/hid.c
index 08598942a6d7..13f8cf70b9ae 100644
--- a/drivers/platform/x86/intel/hid.c
+++ b/drivers/platform/x86/intel/hid.c
@@ -99,6 +99,13 @@ static const struct dmi_system_id button_array_table[] = {
DMI_MATCH(DMI_PRODUCT_FAMILY, "ThinkPad X1 Tablet Gen 2"),
},
},
+ {
+ .ident = "Microsoft Surface Go 3",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Microsoft Corporation"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Surface Go 3"),
+ },
+ },
{ }
};
diff --git a/drivers/platform/x86/intel/ishtp_eclite.c b/drivers/platform/x86/intel/ishtp_eclite.c
index 12fc98a48657..93ac8b2dbf38 100644
--- a/drivers/platform/x86/intel/ishtp_eclite.c
+++ b/drivers/platform/x86/intel/ishtp_eclite.c
@@ -93,9 +93,12 @@ struct ishtp_opregion_dev {
};
/* eclite ishtp client UUID: 6a19cc4b-d760-4de3-b14d-f25ebd0fbcd9 */
-static const guid_t ecl_ishtp_guid =
- GUID_INIT(0x6a19cc4b, 0xd760, 0x4de3,
- 0xb1, 0x4d, 0xf2, 0x5e, 0xbd, 0xf, 0xbc, 0xd9);
+static const struct ishtp_device_id ecl_ishtp_id_table[] = {
+ { .guid = GUID_INIT(0x6a19cc4b, 0xd760, 0x4de3,
+ 0xb1, 0x4d, 0xf2, 0x5e, 0xbd, 0xf, 0xbc, 0xd9), },
+ { }
+};
+MODULE_DEVICE_TABLE(ishtp, ecl_ishtp_id_table);
/* ACPI DSM UUID: 91d936a7-1f01-49c6-a6b4-72f00ad8d8a5 */
static const guid_t ecl_acpi_guid =
@@ -462,7 +465,7 @@ static int ecl_ishtp_cl_init(struct ishtp_cl *ecl_ishtp_cl)
ishtp_set_tx_ring_size(ecl_ishtp_cl, ECL_CL_TX_RING_SIZE);
ishtp_set_rx_ring_size(ecl_ishtp_cl, ECL_CL_RX_RING_SIZE);
- fw_client = ishtp_fw_cl_get_client(dev, &ecl_ishtp_guid);
+ fw_client = ishtp_fw_cl_get_client(dev, &ecl_ishtp_id_table[0].guid);
if (!fw_client) {
dev_err(cl_data_to_dev(opr_dev), "fw client not found\n");
return -ENOENT;
@@ -674,7 +677,7 @@ static const struct dev_pm_ops ecl_ishtp_pm_ops = {
static struct ishtp_cl_driver ecl_ishtp_cl_driver = {
.name = "ishtp-eclite",
- .guid = &ecl_ishtp_guid,
+ .id = ecl_ishtp_id_table,
.probe = ecl_ishtp_cl_probe,
.remove = ecl_ishtp_cl_remove,
.reset = ecl_ishtp_cl_reset,
@@ -698,4 +701,3 @@ MODULE_DESCRIPTION("ISH ISHTP eclite client opregion driver");
MODULE_AUTHOR("K Naduvalath, Sumesh <sumesh.k.naduvalath@intel.com>");
MODULE_LICENSE("GPL v2");
-MODULE_ALIAS("ishtp:*");
diff --git a/drivers/platform/x86/intel/pmc/pltdrv.c b/drivers/platform/x86/intel/pmc/pltdrv.c
index 73797680b895..15ca8afdd973 100644
--- a/drivers/platform/x86/intel/pmc/pltdrv.c
+++ b/drivers/platform/x86/intel/pmc/pltdrv.c
@@ -65,7 +65,7 @@ static int __init pmc_core_platform_init(void)
retval = platform_device_register(pmc_core_device);
if (retval)
- kfree(pmc_core_device);
+ platform_device_put(pmc_core_device);
return retval;
}
diff --git a/drivers/platform/x86/lg-laptop.c b/drivers/platform/x86/lg-laptop.c
index ae9293024c77..a91847a551a7 100644
--- a/drivers/platform/x86/lg-laptop.c
+++ b/drivers/platform/x86/lg-laptop.c
@@ -657,6 +657,18 @@ static int acpi_add(struct acpi_device *device)
if (product && strlen(product) > 4)
switch (product[4]) {
case '5':
+ if (strlen(product) > 5)
+ switch (product[5]) {
+ case 'N':
+ year = 2021;
+ break;
+ case '0':
+ year = 2016;
+ break;
+ default:
+ year = 2022;
+ }
+ break;
case '6':
year = 2016;
break;
diff --git a/drivers/platform/x86/system76_acpi.c b/drivers/platform/x86/system76_acpi.c
index 8b292ee95a14..7299ad08c838 100644
--- a/drivers/platform/x86/system76_acpi.c
+++ b/drivers/platform/x86/system76_acpi.c
@@ -35,6 +35,7 @@ struct system76_data {
union acpi_object *nfan;
union acpi_object *ntmp;
struct input_dev *input;
+ bool has_open_ec;
};
static const struct acpi_device_id device_ids[] = {
@@ -279,20 +280,12 @@ static struct acpi_battery_hook system76_battery_hook = {
static void system76_battery_init(void)
{
- acpi_handle handle;
-
- handle = ec_get_handle();
- if (handle && acpi_has_method(handle, "GBCT"))
- battery_hook_register(&system76_battery_hook);
+ battery_hook_register(&system76_battery_hook);
}
static void system76_battery_exit(void)
{
- acpi_handle handle;
-
- handle = ec_get_handle();
- if (handle && acpi_has_method(handle, "GBCT"))
- battery_hook_unregister(&system76_battery_hook);
+ battery_hook_unregister(&system76_battery_hook);
}
// Get the airplane mode LED brightness
@@ -673,6 +666,10 @@ static int system76_add(struct acpi_device *acpi_dev)
acpi_dev->driver_data = data;
data->acpi_dev = acpi_dev;
+ // Some models do not run open EC firmware. Check for an ACPI method
+ // that only exists on open EC to guard functionality specific to it.
+ data->has_open_ec = acpi_has_method(acpi_device_handle(data->acpi_dev), "NFAN");
+
err = system76_get(data, "INIT");
if (err)
return err;
@@ -718,27 +715,31 @@ static int system76_add(struct acpi_device *acpi_dev)
if (err)
goto error;
- err = system76_get_object(data, "NFAN", &data->nfan);
- if (err)
- goto error;
+ if (data->has_open_ec) {
+ err = system76_get_object(data, "NFAN", &data->nfan);
+ if (err)
+ goto error;
- err = system76_get_object(data, "NTMP", &data->ntmp);
- if (err)
- goto error;
+ err = system76_get_object(data, "NTMP", &data->ntmp);
+ if (err)
+ goto error;
- data->therm = devm_hwmon_device_register_with_info(&acpi_dev->dev,
- "system76_acpi", data, &thermal_chip_info, NULL);
- err = PTR_ERR_OR_ZERO(data->therm);
- if (err)
- goto error;
+ data->therm = devm_hwmon_device_register_with_info(&acpi_dev->dev,
+ "system76_acpi", data, &thermal_chip_info, NULL);
+ err = PTR_ERR_OR_ZERO(data->therm);
+ if (err)
+ goto error;
- system76_battery_init();
+ system76_battery_init();
+ }
return 0;
error:
- kfree(data->ntmp);
- kfree(data->nfan);
+ if (data->has_open_ec) {
+ kfree(data->ntmp);
+ kfree(data->nfan);
+ }
return err;
}
@@ -749,14 +750,15 @@ static int system76_remove(struct acpi_device *acpi_dev)
data = acpi_driver_data(acpi_dev);
- system76_battery_exit();
+ if (data->has_open_ec) {
+ system76_battery_exit();
+ kfree(data->nfan);
+ kfree(data->ntmp);
+ }
devm_led_classdev_unregister(&acpi_dev->dev, &data->ap_led);
devm_led_classdev_unregister(&acpi_dev->dev, &data->kb_led);
- kfree(data->nfan);
- kfree(data->ntmp);
-
system76_get(data, "FINI");
return 0;
diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
index e95aad96bca6..341655d711ce 100644
--- a/drivers/platform/x86/thinkpad_acpi.c
+++ b/drivers/platform/x86/thinkpad_acpi.c
@@ -3017,6 +3017,8 @@ static struct attribute *hotkey_attributes[] = {
&dev_attr_hotkey_all_mask.attr,
&dev_attr_hotkey_adaptive_all_mask.attr,
&dev_attr_hotkey_recommended_mask.attr,
+ &dev_attr_hotkey_tablet_mode.attr,
+ &dev_attr_hotkey_radio_sw.attr,
#ifdef CONFIG_THINKPAD_ACPI_HOTKEY_POLL
&dev_attr_hotkey_source_mask.attr,
&dev_attr_hotkey_poll_freq.attr,
@@ -5742,11 +5744,11 @@ static const char * const tpacpi_led_names[TPACPI_LED_NUMLEDS] = {
"tpacpi::standby",
"tpacpi::dock_status1",
"tpacpi::dock_status2",
- "tpacpi::unknown_led2",
+ "tpacpi::lid_logo_dot",
"tpacpi::unknown_led3",
"tpacpi::thinkvantage",
};
-#define TPACPI_SAFE_LEDS 0x1081U
+#define TPACPI_SAFE_LEDS 0x1481U
static inline bool tpacpi_is_led_restricted(const unsigned int led)
{
diff --git a/drivers/platform/x86/touchscreen_dmi.c b/drivers/platform/x86/touchscreen_dmi.c
index fa8812039b82..17dd54d4b783 100644
--- a/drivers/platform/x86/touchscreen_dmi.c
+++ b/drivers/platform/x86/touchscreen_dmi.c
@@ -905,6 +905,16 @@ static const struct ts_dmi_data trekstor_primetab_t13b_data = {
.properties = trekstor_primetab_t13b_props,
};
+static const struct property_entry trekstor_surftab_duo_w1_props[] = {
+ PROPERTY_ENTRY_BOOL("touchscreen-inverted-x"),
+ { }
+};
+
+static const struct ts_dmi_data trekstor_surftab_duo_w1_data = {
+ .acpi_name = "GDIX1001:00",
+ .properties = trekstor_surftab_duo_w1_props,
+};
+
static const struct property_entry trekstor_surftab_twin_10_1_props[] = {
PROPERTY_ENTRY_U32("touchscreen-min-x", 20),
PROPERTY_ENTRY_U32("touchscreen-min-y", 0),
@@ -1503,6 +1513,14 @@ const struct dmi_system_id touchscreen_dmi_table[] = {
},
},
{
+ /* TrekStor SurfTab duo W1 10.1 ST10432-10b */
+ .driver_data = (void *)&trekstor_surftab_duo_w1_data,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "TrekStor"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "SurfTab duo W1 10.1 (VT4)"),
+ },
+ },
+ {
/* TrekStor SurfTab twin 10.1 ST10432-8 */
.driver_data = (void *)&trekstor_surftab_twin_10_1_data,
.matches = {
diff --git a/drivers/powercap/dtpm.c b/drivers/powercap/dtpm.c
index b9fac786246a..2a5c1829aab7 100644
--- a/drivers/powercap/dtpm.c
+++ b/drivers/powercap/dtpm.c
@@ -463,17 +463,12 @@ int dtpm_register(const char *name, struct dtpm *dtpm, struct dtpm *parent)
static int __init init_dtpm(void)
{
- struct dtpm_descr *dtpm_descr;
-
pct = powercap_register_control_type(NULL, "dtpm", NULL);
if (IS_ERR(pct)) {
pr_err("Failed to register control type\n");
return PTR_ERR(pct);
}
- for_each_dtpm_table(dtpm_descr)
- dtpm_descr->init();
-
return 0;
}
late_initcall(init_dtpm);
diff --git a/drivers/reset/tegra/reset-bpmp.c b/drivers/reset/tegra/reset-bpmp.c
index 4c5bba52b105..24d3395964cc 100644
--- a/drivers/reset/tegra/reset-bpmp.c
+++ b/drivers/reset/tegra/reset-bpmp.c
@@ -20,7 +20,6 @@ static int tegra_bpmp_reset_common(struct reset_controller_dev *rstc,
struct tegra_bpmp *bpmp = to_tegra_bpmp(rstc);
struct mrq_reset_request request;
struct tegra_bpmp_message msg;
- int err;
memset(&request, 0, sizeof(request));
request.cmd = command;
@@ -31,13 +30,7 @@ static int tegra_bpmp_reset_common(struct reset_controller_dev *rstc,
msg.tx.data = &request;
msg.tx.size = sizeof(request);
- err = tegra_bpmp_transfer(bpmp, &msg);
- if (err)
- return err;
- if (msg.rx.ret)
- return -EINVAL;
-
- return 0;
+ return tegra_bpmp_transfer(bpmp, &msg);
}
static int tegra_bpmp_reset_module(struct reset_controller_dev *rstc,
diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c
index 284b939fb1ea..059dae8909ee 100644
--- a/drivers/scsi/libiscsi.c
+++ b/drivers/scsi/libiscsi.c
@@ -3100,6 +3100,8 @@ void iscsi_conn_teardown(struct iscsi_cls_conn *cls_conn)
{
struct iscsi_conn *conn = cls_conn->dd_data;
struct iscsi_session *session = conn->session;
+ char *tmp_persistent_address = conn->persistent_address;
+ char *tmp_local_ipaddr = conn->local_ipaddr;
del_timer_sync(&conn->transport_timer);
@@ -3121,8 +3123,6 @@ void iscsi_conn_teardown(struct iscsi_cls_conn *cls_conn)
spin_lock_bh(&session->frwd_lock);
free_pages((unsigned long) conn->data,
get_order(ISCSI_DEF_MAX_RECV_SEG_LEN));
- kfree(conn->persistent_address);
- kfree(conn->local_ipaddr);
/* regular RX path uses back_lock */
spin_lock_bh(&session->back_lock);
kfifo_in(&session->cmdpool.queue, (void*)&conn->login_task,
@@ -3134,6 +3134,8 @@ void iscsi_conn_teardown(struct iscsi_cls_conn *cls_conn)
mutex_unlock(&session->eh_mutex);
iscsi_destroy_conn(cls_conn);
+ kfree(tmp_persistent_address);
+ kfree(tmp_local_ipaddr);
}
EXPORT_SYMBOL_GPL(iscsi_conn_teardown);
diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c
index bd6d459afce5..08b2e85dcd7d 100644
--- a/drivers/scsi/lpfc/lpfc_debugfs.c
+++ b/drivers/scsi/lpfc/lpfc_debugfs.c
@@ -2954,8 +2954,8 @@ lpfc_debugfs_nvmeio_trc_write(struct file *file, const char __user *buf,
char mybuf[64];
char *pbuf;
- if (nbytes > 64)
- nbytes = 64;
+ if (nbytes > 63)
+ nbytes = 63;
memset(mybuf, 0, sizeof(mybuf));
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
index b940e0268f96..e83453bea2ae 100644
--- a/drivers/scsi/lpfc/lpfc_els.c
+++ b/drivers/scsi/lpfc/lpfc_els.c
@@ -5095,14 +5095,9 @@ lpfc_cmpl_els_logo_acc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
/* NPort Recovery mode or node is just allocated */
if (!lpfc_nlp_not_used(ndlp)) {
/* A LOGO is completing and the node is in NPR state.
- * If this a fabric node that cleared its transport
- * registration, release the rpi.
+ * Just unregister the RPI because the node is still
+ * required.
*/
- spin_lock_irq(&ndlp->lock);
- ndlp->nlp_flag &= ~NLP_NPR_2B_DISC;
- if (phba->sli_rev == LPFC_SLI_REV4)
- ndlp->nlp_flag |= NLP_RELEASE_RPI;
- spin_unlock_irq(&ndlp->lock);
lpfc_unreg_rpi(vport, ndlp);
} else {
/* Indicate the node has already released, should
diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c
index 27eb652b564f..81dab9b82f79 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_base.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_base.c
@@ -639,8 +639,8 @@ static void _base_sync_drv_fw_timestamp(struct MPT3SAS_ADAPTER *ioc)
mpi_request->IOCParameter = MPI26_SET_IOC_PARAMETER_SYNC_TIMESTAMP;
current_time = ktime_get_real();
TimeStamp = ktime_to_ms(current_time);
- mpi_request->Reserved7 = cpu_to_le32(TimeStamp & 0xFFFFFFFF);
- mpi_request->IOCParameterValue = cpu_to_le32(TimeStamp >> 32);
+ mpi_request->Reserved7 = cpu_to_le32(TimeStamp >> 32);
+ mpi_request->IOCParameterValue = cpu_to_le32(TimeStamp & 0xFFFFFFFF);
init_completion(&ioc->scsih_cmds.done);
ioc->put_smid_default(ioc, smid);
dinitprintk(ioc, ioc_info(ioc,
diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.h b/drivers/scsi/mpt3sas/mpt3sas_base.h
index db6a759de1e9..a0af986633d2 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_base.h
+++ b/drivers/scsi/mpt3sas/mpt3sas_base.h
@@ -142,6 +142,8 @@
#define MPT_MAX_CALLBACKS 32
+#define MPT_MAX_HBA_NUM_PHYS 32
+
#define INTERNAL_CMDS_COUNT 10 /* reserved cmds */
/* reserved for issuing internally framed scsi io cmds */
#define INTERNAL_SCSIIO_CMDS_COUNT 3
@@ -798,6 +800,7 @@ struct _sas_phy {
* @enclosure_handle: handle for this a member of an enclosure
* @device_info: bitwise defining capabilities of this sas_host/expander
* @responding: used in _scsih_expander_device_mark_responding
+ * @nr_phys_allocated: Allocated memory for this many count phys
* @phy: a list of phys that make up this sas_host/expander
* @sas_port_list: list of ports attached to this sas_host/expander
* @port: hba port entry containing node's port number info
@@ -813,6 +816,7 @@ struct _sas_node {
u16 enclosure_handle;
u64 enclosure_logical_id;
u8 responding;
+ u8 nr_phys_allocated;
struct hba_port *port;
struct _sas_phy *phy;
struct list_head sas_port_list;
diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
index cee7170beae8..00792767c620 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
@@ -3869,7 +3869,7 @@ _scsih_ublock_io_device(struct MPT3SAS_ADAPTER *ioc,
shost_for_each_device(sdev, ioc->shost) {
sas_device_priv_data = sdev->hostdata;
- if (!sas_device_priv_data)
+ if (!sas_device_priv_data || !sas_device_priv_data->sas_target)
continue;
if (sas_device_priv_data->sas_target->sas_address
!= sas_address)
@@ -6406,11 +6406,26 @@ _scsih_sas_port_refresh(struct MPT3SAS_ADAPTER *ioc)
int i, j, count = 0, lcount = 0;
int ret;
u64 sas_addr;
+ u8 num_phys;
drsprintk(ioc, ioc_info(ioc,
"updating ports for sas_host(0x%016llx)\n",
(unsigned long long)ioc->sas_hba.sas_address));
+ mpt3sas_config_get_number_hba_phys(ioc, &num_phys);
+ if (!num_phys) {
+ ioc_err(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ return;
+ }
+
+ if (num_phys > ioc->sas_hba.nr_phys_allocated) {
+ ioc_err(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ return;
+ }
+ ioc->sas_hba.num_phys = num_phys;
+
port_table = kcalloc(ioc->sas_hba.num_phys,
sizeof(struct hba_port), GFP_KERNEL);
if (!port_table)
@@ -6611,6 +6626,30 @@ _scsih_sas_host_refresh(struct MPT3SAS_ADAPTER *ioc)
ioc->sas_hba.phy[i].hba_vphy = 1;
}
+ /*
+ * Add new HBA phys to STL if these new phys got added as part
+ * of HBA Firmware upgrade/downgrade operation.
+ */
+ if (!ioc->sas_hba.phy[i].phy) {
+ if ((mpt3sas_config_get_phy_pg0(ioc, &mpi_reply,
+ &phy_pg0, i))) {
+ ioc_err(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ continue;
+ }
+ ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
+ MPI2_IOCSTATUS_MASK;
+ if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
+ ioc_err(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ continue;
+ }
+ ioc->sas_hba.phy[i].phy_id = i;
+ mpt3sas_transport_add_host_phy(ioc,
+ &ioc->sas_hba.phy[i], phy_pg0,
+ ioc->sas_hba.parent_dev);
+ continue;
+ }
ioc->sas_hba.phy[i].handle = ioc->sas_hba.handle;
attached_handle = le16_to_cpu(sas_iounit_pg0->PhyData[i].
AttachedDevHandle);
@@ -6622,6 +6661,19 @@ _scsih_sas_host_refresh(struct MPT3SAS_ADAPTER *ioc)
attached_handle, i, link_rate,
ioc->sas_hba.phy[i].port);
}
+ /*
+ * Clear the phy details if this phy got disabled as part of
+ * HBA Firmware upgrade/downgrade operation.
+ */
+ for (i = ioc->sas_hba.num_phys;
+ i < ioc->sas_hba.nr_phys_allocated; i++) {
+ if (ioc->sas_hba.phy[i].phy &&
+ ioc->sas_hba.phy[i].phy->negotiated_linkrate >=
+ SAS_LINK_RATE_1_5_GBPS)
+ mpt3sas_transport_update_links(ioc,
+ ioc->sas_hba.sas_address, 0, i,
+ MPI2_SAS_NEG_LINK_RATE_PHY_DISABLED, NULL);
+ }
out:
kfree(sas_iounit_pg0);
}
@@ -6654,7 +6706,10 @@ _scsih_sas_host_add(struct MPT3SAS_ADAPTER *ioc)
__FILE__, __LINE__, __func__);
return;
}
- ioc->sas_hba.phy = kcalloc(num_phys,
+
+ ioc->sas_hba.nr_phys_allocated = max_t(u8,
+ MPT_MAX_HBA_NUM_PHYS, num_phys);
+ ioc->sas_hba.phy = kcalloc(ioc->sas_hba.nr_phys_allocated,
sizeof(struct _sas_phy), GFP_KERNEL);
if (!ioc->sas_hba.phy) {
ioc_err(ioc, "failure at %s:%d/%s()!\n",
diff --git a/drivers/scsi/pm8001/pm8001_init.c b/drivers/scsi/pm8001/pm8001_init.c
index bed8cc125544..fbfeb0b046dd 100644
--- a/drivers/scsi/pm8001/pm8001_init.c
+++ b/drivers/scsi/pm8001/pm8001_init.c
@@ -282,12 +282,12 @@ static int pm8001_alloc(struct pm8001_hba_info *pm8001_ha,
if (rc) {
pm8001_dbg(pm8001_ha, FAIL,
"pm8001_setup_irq failed [ret: %d]\n", rc);
- goto err_out_shost;
+ goto err_out;
}
/* Request Interrupt */
rc = pm8001_request_irq(pm8001_ha);
if (rc)
- goto err_out_shost;
+ goto err_out;
count = pm8001_ha->max_q_num;
/* Queues are chosen based on the number of cores/msix availability */
@@ -423,8 +423,6 @@ static int pm8001_alloc(struct pm8001_hba_info *pm8001_ha,
pm8001_tag_init(pm8001_ha);
return 0;
-err_out_shost:
- scsi_remove_host(pm8001_ha->shost);
err_out_nodev:
for (i = 0; i < pm8001_ha->max_memcnt; i++) {
if (pm8001_ha->memoryMap.region[i].virt_ptr != NULL) {
diff --git a/drivers/scsi/pm8001/pm80xx_hwi.c b/drivers/scsi/pm8001/pm80xx_hwi.c
index b9f6d83ff380..2101fc5761c3 100644
--- a/drivers/scsi/pm8001/pm80xx_hwi.c
+++ b/drivers/scsi/pm8001/pm80xx_hwi.c
@@ -3053,7 +3053,6 @@ mpi_smp_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
struct smp_completion_resp *psmpPayload;
struct task_status_struct *ts;
struct pm8001_device *pm8001_dev;
- char *pdma_respaddr = NULL;
psmpPayload = (struct smp_completion_resp *)(piomb + 4);
status = le32_to_cpu(psmpPayload->status);
@@ -3080,19 +3079,23 @@ mpi_smp_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
if (pm8001_dev)
atomic_dec(&pm8001_dev->running_req);
if (pm8001_ha->smp_exp_mode == SMP_DIRECT) {
+ struct scatterlist *sg_resp = &t->smp_task.smp_resp;
+ u8 *payload;
+ void *to;
+
pm8001_dbg(pm8001_ha, IO,
"DIRECT RESPONSE Length:%d\n",
param);
- pdma_respaddr = (char *)(phys_to_virt(cpu_to_le64
- ((u64)sg_dma_address
- (&t->smp_task.smp_resp))));
+ to = kmap_atomic(sg_page(sg_resp));
+ payload = to + sg_resp->offset;
for (i = 0; i < param; i++) {
- *(pdma_respaddr+i) = psmpPayload->_r_a[i];
+ *(payload + i) = psmpPayload->_r_a[i];
pm8001_dbg(pm8001_ha, IO,
"SMP Byte%d DMA data 0x%x psmp 0x%x\n",
- i, *(pdma_respaddr + i),
+ i, *(payload + i),
psmpPayload->_r_a[i]);
}
+ kunmap_atomic(to);
}
break;
case IO_ABORTED:
@@ -4236,14 +4239,14 @@ static int pm80xx_chip_smp_req(struct pm8001_hba_info *pm8001_ha,
struct sas_task *task = ccb->task;
struct domain_device *dev = task->dev;
struct pm8001_device *pm8001_dev = dev->lldd_dev;
- struct scatterlist *sg_req, *sg_resp;
+ struct scatterlist *sg_req, *sg_resp, *smp_req;
u32 req_len, resp_len;
struct smp_req smp_cmd;
u32 opc;
struct inbound_queue_table *circularQ;
- char *preq_dma_addr = NULL;
- __le64 tmp_addr;
u32 i, length;
+ u8 *payload;
+ u8 *to;
memset(&smp_cmd, 0, sizeof(smp_cmd));
/*
@@ -4280,8 +4283,9 @@ static int pm80xx_chip_smp_req(struct pm8001_hba_info *pm8001_ha,
pm8001_ha->smp_exp_mode = SMP_INDIRECT;
- tmp_addr = cpu_to_le64((u64)sg_dma_address(&task->smp_task.smp_req));
- preq_dma_addr = (char *)phys_to_virt(tmp_addr);
+ smp_req = &task->smp_task.smp_req;
+ to = kmap_atomic(sg_page(smp_req));
+ payload = to + smp_req->offset;
/* INDIRECT MODE command settings. Use DMA */
if (pm8001_ha->smp_exp_mode == SMP_INDIRECT) {
@@ -4289,7 +4293,7 @@ static int pm80xx_chip_smp_req(struct pm8001_hba_info *pm8001_ha,
/* for SPCv indirect mode. Place the top 4 bytes of
* SMP Request header here. */
for (i = 0; i < 4; i++)
- smp_cmd.smp_req16[i] = *(preq_dma_addr + i);
+ smp_cmd.smp_req16[i] = *(payload + i);
/* exclude top 4 bytes for SMP req header */
smp_cmd.long_smp_req.long_req_addr =
cpu_to_le64((u64)sg_dma_address
@@ -4320,20 +4324,20 @@ static int pm80xx_chip_smp_req(struct pm8001_hba_info *pm8001_ha,
pm8001_dbg(pm8001_ha, IO, "SMP REQUEST DIRECT MODE\n");
for (i = 0; i < length; i++)
if (i < 16) {
- smp_cmd.smp_req16[i] = *(preq_dma_addr+i);
+ smp_cmd.smp_req16[i] = *(payload + i);
pm8001_dbg(pm8001_ha, IO,
"Byte[%d]:%x (DMA data:%x)\n",
i, smp_cmd.smp_req16[i],
- *(preq_dma_addr));
+ *(payload));
} else {
- smp_cmd.smp_req[i] = *(preq_dma_addr+i);
+ smp_cmd.smp_req[i] = *(payload + i);
pm8001_dbg(pm8001_ha, IO,
"Byte[%d]:%x (DMA data:%x)\n",
i, smp_cmd.smp_req[i],
- *(preq_dma_addr));
+ *(payload));
}
}
-
+ kunmap_atomic(to);
build_smp_cmd(pm8001_dev->device_id, smp_cmd.tag,
&smp_cmd, pm8001_ha->smp_exp_mode, length);
rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &smp_cmd,
diff --git a/drivers/scsi/qedi/qedi_fw.c b/drivers/scsi/qedi/qedi_fw.c
index 84a4204a2cb4..5916ed7662d5 100644
--- a/drivers/scsi/qedi/qedi_fw.c
+++ b/drivers/scsi/qedi/qedi_fw.c
@@ -732,7 +732,6 @@ static void qedi_process_cmd_cleanup_resp(struct qedi_ctx *qedi,
{
struct qedi_work_map *work, *work_tmp;
u32 proto_itt = cqe->itid;
- itt_t protoitt = 0;
int found = 0;
struct qedi_cmd *qedi_cmd = NULL;
u32 iscsi_cid;
@@ -812,16 +811,12 @@ unlock:
return;
check_cleanup_reqs:
- if (qedi_conn->cmd_cleanup_req > 0) {
- QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_TID,
+ if (atomic_inc_return(&qedi_conn->cmd_cleanup_cmpl) ==
+ qedi_conn->cmd_cleanup_req) {
+ QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_SCSI_TM,
"Freeing tid=0x%x for cid=0x%x\n",
cqe->itid, qedi_conn->iscsi_conn_id);
- qedi_conn->cmd_cleanup_cmpl++;
wake_up(&qedi_conn->wait_queue);
- } else {
- QEDI_ERR(&qedi->dbg_ctx,
- "Delayed or untracked cleanup response, itt=0x%x, tid=0x%x, cid=0x%x\n",
- protoitt, cqe->itid, qedi_conn->iscsi_conn_id);
}
}
@@ -1163,7 +1158,7 @@ int qedi_cleanup_all_io(struct qedi_ctx *qedi, struct qedi_conn *qedi_conn,
}
qedi_conn->cmd_cleanup_req = 0;
- qedi_conn->cmd_cleanup_cmpl = 0;
+ atomic_set(&qedi_conn->cmd_cleanup_cmpl, 0);
QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_SCSI_TM,
"active_cmd_count=%d, cid=0x%x, in_recovery=%d, lun_reset=%d\n",
@@ -1215,16 +1210,15 @@ int qedi_cleanup_all_io(struct qedi_ctx *qedi, struct qedi_conn *qedi_conn,
qedi_conn->iscsi_conn_id);
rval = wait_event_interruptible_timeout(qedi_conn->wait_queue,
- ((qedi_conn->cmd_cleanup_req ==
- qedi_conn->cmd_cleanup_cmpl) ||
- test_bit(QEDI_IN_RECOVERY,
- &qedi->flags)),
- 5 * HZ);
+ (qedi_conn->cmd_cleanup_req ==
+ atomic_read(&qedi_conn->cmd_cleanup_cmpl)) ||
+ test_bit(QEDI_IN_RECOVERY, &qedi->flags),
+ 5 * HZ);
if (rval) {
QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_SCSI_TM,
"i/o cmd_cleanup_req=%d, equal to cmd_cleanup_cmpl=%d, cid=0x%x\n",
qedi_conn->cmd_cleanup_req,
- qedi_conn->cmd_cleanup_cmpl,
+ atomic_read(&qedi_conn->cmd_cleanup_cmpl),
qedi_conn->iscsi_conn_id);
return 0;
@@ -1233,7 +1227,7 @@ int qedi_cleanup_all_io(struct qedi_ctx *qedi, struct qedi_conn *qedi_conn,
QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_SCSI_TM,
"i/o cmd_cleanup_req=%d, not equal to cmd_cleanup_cmpl=%d, cid=0x%x\n",
qedi_conn->cmd_cleanup_req,
- qedi_conn->cmd_cleanup_cmpl,
+ atomic_read(&qedi_conn->cmd_cleanup_cmpl),
qedi_conn->iscsi_conn_id);
iscsi_host_for_each_session(qedi->shost,
@@ -1242,11 +1236,10 @@ int qedi_cleanup_all_io(struct qedi_ctx *qedi, struct qedi_conn *qedi_conn,
/* Enable IOs for all other sessions except current.*/
if (!wait_event_interruptible_timeout(qedi_conn->wait_queue,
- (qedi_conn->cmd_cleanup_req ==
- qedi_conn->cmd_cleanup_cmpl) ||
- test_bit(QEDI_IN_RECOVERY,
- &qedi->flags),
- 5 * HZ)) {
+ (qedi_conn->cmd_cleanup_req ==
+ atomic_read(&qedi_conn->cmd_cleanup_cmpl)) ||
+ test_bit(QEDI_IN_RECOVERY, &qedi->flags),
+ 5 * HZ)) {
iscsi_host_for_each_session(qedi->shost,
qedi_mark_device_available);
return -1;
@@ -1266,7 +1259,7 @@ void qedi_clearsq(struct qedi_ctx *qedi, struct qedi_conn *qedi_conn,
qedi_ep = qedi_conn->ep;
qedi_conn->cmd_cleanup_req = 0;
- qedi_conn->cmd_cleanup_cmpl = 0;
+ atomic_set(&qedi_conn->cmd_cleanup_cmpl, 0);
if (!qedi_ep) {
QEDI_WARN(&qedi->dbg_ctx,
diff --git a/drivers/scsi/qedi/qedi_iscsi.c b/drivers/scsi/qedi/qedi_iscsi.c
index 88aa7d8b11c9..282ecb4e39bb 100644
--- a/drivers/scsi/qedi/qedi_iscsi.c
+++ b/drivers/scsi/qedi/qedi_iscsi.c
@@ -412,7 +412,7 @@ static int qedi_conn_bind(struct iscsi_cls_session *cls_session,
qedi_conn->iscsi_conn_id = qedi_ep->iscsi_cid;
qedi_conn->fw_cid = qedi_ep->fw_cid;
qedi_conn->cmd_cleanup_req = 0;
- qedi_conn->cmd_cleanup_cmpl = 0;
+ atomic_set(&qedi_conn->cmd_cleanup_cmpl, 0);
if (qedi_bind_conn_to_iscsi_cid(qedi, qedi_conn)) {
rc = -EINVAL;
diff --git a/drivers/scsi/qedi/qedi_iscsi.h b/drivers/scsi/qedi/qedi_iscsi.h
index a282860da0aa..9b9f2e44fdde 100644
--- a/drivers/scsi/qedi/qedi_iscsi.h
+++ b/drivers/scsi/qedi/qedi_iscsi.h
@@ -155,7 +155,7 @@ struct qedi_conn {
spinlock_t list_lock; /* internal conn lock */
u32 active_cmd_count;
u32 cmd_cleanup_req;
- u32 cmd_cleanup_cmpl;
+ atomic_t cmd_cleanup_cmpl;
u32 iscsi_conn_id;
int itt;
diff --git a/drivers/scsi/qla2xxx/qla_dbg.c b/drivers/scsi/qla2xxx/qla_dbg.c
index 25549a8a2d72..7cf1f78cbaee 100644
--- a/drivers/scsi/qla2xxx/qla_dbg.c
+++ b/drivers/scsi/qla2xxx/qla_dbg.c
@@ -2491,6 +2491,9 @@ ql_dbg(uint level, scsi_qla_host_t *vha, uint id, const char *fmt, ...)
struct va_format vaf;
char pbuf[64];
+ if (!ql_mask_match(level) && !trace_ql_dbg_log_enabled())
+ return;
+
va_start(va, fmt);
vaf.fmt = fmt;
diff --git a/drivers/scsi/qla2xxx/qla_edif.c b/drivers/scsi/qla2xxx/qla_edif.c
index 2e37b189cb75..53d2b8562027 100644
--- a/drivers/scsi/qla2xxx/qla_edif.c
+++ b/drivers/scsi/qla2xxx/qla_edif.c
@@ -865,7 +865,7 @@ qla_edif_app_getfcinfo(scsi_qla_host_t *vha, struct bsg_job *bsg_job)
"APP request entry - portid=%06x.\n", tdid.b24);
/* Ran out of space */
- if (pcnt > app_req.num_ports)
+ if (pcnt >= app_req.num_ports)
break;
if (tdid.b24 != 0 && tdid.b24 != fcport->d_id.b24)
diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c
index 1d0278da9041..2104973a35cd 100644
--- a/drivers/scsi/scsi_debug.c
+++ b/drivers/scsi/scsi_debug.c
@@ -1189,7 +1189,7 @@ static int p_fill_from_dev_buffer(struct scsi_cmnd *scp, const void *arr,
__func__, off_dst, scsi_bufflen(scp), act_len,
scsi_get_resid(scp));
n = scsi_bufflen(scp) - (off_dst + act_len);
- scsi_set_resid(scp, min_t(int, scsi_get_resid(scp), n));
+ scsi_set_resid(scp, min_t(u32, scsi_get_resid(scp), n));
return 0;
}
@@ -1562,7 +1562,8 @@ static int resp_inquiry(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
unsigned char pq_pdt;
unsigned char *arr;
unsigned char *cmd = scp->cmnd;
- int alloc_len, n, ret;
+ u32 alloc_len, n;
+ int ret;
bool have_wlun, is_disk, is_zbc, is_disk_zbc;
alloc_len = get_unaligned_be16(cmd + 3);
@@ -1585,7 +1586,8 @@ static int resp_inquiry(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
kfree(arr);
return check_condition_result;
} else if (0x1 & cmd[1]) { /* EVPD bit set */
- int lu_id_num, port_group_id, target_dev_id, len;
+ int lu_id_num, port_group_id, target_dev_id;
+ u32 len;
char lu_id_str[6];
int host_no = devip->sdbg_host->shost->host_no;
@@ -1676,9 +1678,9 @@ static int resp_inquiry(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
kfree(arr);
return check_condition_result;
}
- len = min(get_unaligned_be16(arr + 2) + 4, alloc_len);
+ len = min_t(u32, get_unaligned_be16(arr + 2) + 4, alloc_len);
ret = fill_from_dev_buffer(scp, arr,
- min(len, SDEBUG_MAX_INQ_ARR_SZ));
+ min_t(u32, len, SDEBUG_MAX_INQ_ARR_SZ));
kfree(arr);
return ret;
}
@@ -1714,7 +1716,7 @@ static int resp_inquiry(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
}
put_unaligned_be16(0x2100, arr + n); /* SPL-4 no version claimed */
ret = fill_from_dev_buffer(scp, arr,
- min_t(int, alloc_len, SDEBUG_LONG_INQ_SZ));
+ min_t(u32, alloc_len, SDEBUG_LONG_INQ_SZ));
kfree(arr);
return ret;
}
@@ -1729,8 +1731,8 @@ static int resp_requests(struct scsi_cmnd *scp,
unsigned char *cmd = scp->cmnd;
unsigned char arr[SCSI_SENSE_BUFFERSIZE]; /* assume >= 18 bytes */
bool dsense = !!(cmd[1] & 1);
- int alloc_len = cmd[4];
- int len = 18;
+ u32 alloc_len = cmd[4];
+ u32 len = 18;
int stopped_state = atomic_read(&devip->stopped);
memset(arr, 0, sizeof(arr));
@@ -1774,7 +1776,7 @@ static int resp_requests(struct scsi_cmnd *scp,
arr[7] = 0xa;
}
}
- return fill_from_dev_buffer(scp, arr, min_t(int, len, alloc_len));
+ return fill_from_dev_buffer(scp, arr, min_t(u32, len, alloc_len));
}
static int resp_start_stop(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
@@ -2312,7 +2314,8 @@ static int resp_mode_sense(struct scsi_cmnd *scp,
{
int pcontrol, pcode, subpcode, bd_len;
unsigned char dev_spec;
- int alloc_len, offset, len, target_dev_id;
+ u32 alloc_len, offset, len;
+ int target_dev_id;
int target = scp->device->id;
unsigned char *ap;
unsigned char arr[SDEBUG_MAX_MSENSE_SZ];
@@ -2468,7 +2471,7 @@ static int resp_mode_sense(struct scsi_cmnd *scp,
arr[0] = offset - 1;
else
put_unaligned_be16((offset - 2), arr + 0);
- return fill_from_dev_buffer(scp, arr, min_t(int, alloc_len, offset));
+ return fill_from_dev_buffer(scp, arr, min_t(u32, alloc_len, offset));
}
#define SDEBUG_MAX_MSELECT_SZ 512
@@ -2499,11 +2502,11 @@ static int resp_mode_select(struct scsi_cmnd *scp,
__func__, param_len, res);
md_len = mselect6 ? (arr[0] + 1) : (get_unaligned_be16(arr + 0) + 2);
bd_len = mselect6 ? arr[3] : get_unaligned_be16(arr + 6);
- if (md_len > 2) {
+ off = bd_len + (mselect6 ? 4 : 8);
+ if (md_len > 2 || off >= res) {
mk_sense_invalid_fld(scp, SDEB_IN_DATA, 0, -1);
return check_condition_result;
}
- off = bd_len + (mselect6 ? 4 : 8);
mpage = arr[off] & 0x3f;
ps = !!(arr[off] & 0x80);
if (ps) {
@@ -2583,7 +2586,8 @@ static int resp_ie_l_pg(unsigned char *arr)
static int resp_log_sense(struct scsi_cmnd *scp,
struct sdebug_dev_info *devip)
{
- int ppc, sp, pcode, subpcode, alloc_len, len, n;
+ int ppc, sp, pcode, subpcode;
+ u32 alloc_len, len, n;
unsigned char arr[SDEBUG_MAX_LSENSE_SZ];
unsigned char *cmd = scp->cmnd;
@@ -2653,9 +2657,9 @@ static int resp_log_sense(struct scsi_cmnd *scp,
mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
return check_condition_result;
}
- len = min_t(int, get_unaligned_be16(arr + 2) + 4, alloc_len);
+ len = min_t(u32, get_unaligned_be16(arr + 2) + 4, alloc_len);
return fill_from_dev_buffer(scp, arr,
- min_t(int, len, SDEBUG_MAX_INQ_ARR_SZ));
+ min_t(u32, len, SDEBUG_MAX_INQ_ARR_SZ));
}
static inline bool sdebug_dev_is_zoned(struct sdebug_dev_info *devip)
@@ -4338,7 +4342,7 @@ static int resp_report_zones(struct scsi_cmnd *scp,
rep_max_zones = min((alloc_len - 64) >> ilog2(RZONES_DESC_HD),
max_zones);
- arr = kcalloc(RZONES_DESC_HD, alloc_len, GFP_ATOMIC);
+ arr = kzalloc(alloc_len, GFP_ATOMIC);
if (!arr) {
mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
INSUFF_RES_ASCQ);
@@ -4430,7 +4434,7 @@ static int resp_report_zones(struct scsi_cmnd *scp,
put_unaligned_be64(sdebug_capacity - 1, arr + 8);
rep_len = (unsigned long)desc - (unsigned long)arr;
- ret = fill_from_dev_buffer(scp, arr, min_t(int, alloc_len, rep_len));
+ ret = fill_from_dev_buffer(scp, arr, min_t(u32, alloc_len, rep_len));
fini:
read_unlock(macc_lckp);
@@ -4653,6 +4657,7 @@ static void zbc_rwp_zone(struct sdebug_dev_info *devip,
struct sdeb_zone_state *zsp)
{
enum sdebug_z_cond zc;
+ struct sdeb_store_info *sip = devip2sip(devip, false);
if (zbc_zone_is_conv(zsp))
return;
@@ -4664,6 +4669,10 @@ static void zbc_rwp_zone(struct sdebug_dev_info *devip,
if (zsp->z_cond == ZC4_CLOSED)
devip->nr_closed--;
+ if (zsp->z_wp > zsp->z_start)
+ memset(sip->storep + zsp->z_start * sdebug_sector_size, 0,
+ (zsp->z_wp - zsp->z_start) * sdebug_sector_size);
+
zsp->z_non_seq_resource = false;
zsp->z_wp = zsp->z_start;
zsp->z_cond = ZC1_EMPTY;
diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
index 7afcec250f9b..d4edce930a4a 100644
--- a/drivers/scsi/scsi_sysfs.c
+++ b/drivers/scsi/scsi_sysfs.c
@@ -812,7 +812,7 @@ store_state_field(struct device *dev, struct device_attribute *attr,
mutex_lock(&sdev->state_mutex);
if (sdev->sdev_state == SDEV_RUNNING && state == SDEV_RUNNING) {
- ret = count;
+ ret = 0;
} else {
ret = scsi_device_set_state(sdev, state);
if (ret == 0 && state == SDEV_RUNNING)
diff --git a/drivers/scsi/ufs/ufs-mediatek.c b/drivers/scsi/ufs/ufs-mediatek.c
index fc5b214347b3..5393b5c9dd9c 100644
--- a/drivers/scsi/ufs/ufs-mediatek.c
+++ b/drivers/scsi/ufs/ufs-mediatek.c
@@ -1189,6 +1189,7 @@ static int ufs_mtk_probe(struct platform_device *pdev)
}
link = device_link_add(dev, &reset_pdev->dev,
DL_FLAG_AUTOPROBE_CONSUMER);
+ put_device(&reset_pdev->dev);
if (!link) {
dev_notice(dev, "add reset device_link fail\n");
goto skip_reset;
diff --git a/drivers/scsi/ufs/ufshcd-pci.c b/drivers/scsi/ufs/ufshcd-pci.c
index 51424557810d..f725248ba57f 100644
--- a/drivers/scsi/ufs/ufshcd-pci.c
+++ b/drivers/scsi/ufs/ufshcd-pci.c
@@ -421,6 +421,13 @@ static int ufs_intel_lkf_init(struct ufs_hba *hba)
return err;
}
+static int ufs_intel_adl_init(struct ufs_hba *hba)
+{
+ hba->nop_out_timeout = 200;
+ hba->quirks |= UFSHCD_QUIRK_BROKEN_AUTO_HIBERN8;
+ return ufs_intel_common_init(hba);
+}
+
static struct ufs_hba_variant_ops ufs_intel_cnl_hba_vops = {
.name = "intel-pci",
.init = ufs_intel_common_init,
@@ -449,6 +456,15 @@ static struct ufs_hba_variant_ops ufs_intel_lkf_hba_vops = {
.device_reset = ufs_intel_device_reset,
};
+static struct ufs_hba_variant_ops ufs_intel_adl_hba_vops = {
+ .name = "intel-pci",
+ .init = ufs_intel_adl_init,
+ .exit = ufs_intel_common_exit,
+ .link_startup_notify = ufs_intel_link_startup_notify,
+ .resume = ufs_intel_resume,
+ .device_reset = ufs_intel_device_reset,
+};
+
#ifdef CONFIG_PM_SLEEP
static int ufshcd_pci_restore(struct device *dev)
{
@@ -563,6 +579,8 @@ static const struct pci_device_id ufshcd_pci_tbl[] = {
{ PCI_VDEVICE(INTEL, 0x4B41), (kernel_ulong_t)&ufs_intel_ehl_hba_vops },
{ PCI_VDEVICE(INTEL, 0x4B43), (kernel_ulong_t)&ufs_intel_ehl_hba_vops },
{ PCI_VDEVICE(INTEL, 0x98FA), (kernel_ulong_t)&ufs_intel_lkf_hba_vops },
+ { PCI_VDEVICE(INTEL, 0x51FF), (kernel_ulong_t)&ufs_intel_adl_hba_vops },
+ { PCI_VDEVICE(INTEL, 0x54FF), (kernel_ulong_t)&ufs_intel_adl_hba_vops },
{ } /* terminate list */
};
diff --git a/drivers/scsi/ufs/ufshpb.c b/drivers/scsi/ufs/ufshpb.c
index 2e31e1413826..ded5ba9b1466 100644
--- a/drivers/scsi/ufs/ufshpb.c
+++ b/drivers/scsi/ufs/ufshpb.c
@@ -331,7 +331,7 @@ ufshpb_set_hpb_read_to_upiu(struct ufs_hba *hba, struct ufshcd_lrb *lrbp,
cdb[0] = UFSHPB_READ;
if (hba->dev_quirks & UFS_DEVICE_QUIRK_SWAP_L2P_ENTRY_FOR_HPB_READ)
- ppn_tmp = swab64(ppn);
+ ppn_tmp = (__force __be64)swab64((__force u64)ppn);
/* ppn value is stored as big-endian in the host memory */
memcpy(&cdb[6], &ppn_tmp, sizeof(__be64));
diff --git a/drivers/scsi/virtio_scsi.c b/drivers/scsi/virtio_scsi.c
index 19f7d7b90625..28e1d98ae102 100644
--- a/drivers/scsi/virtio_scsi.c
+++ b/drivers/scsi/virtio_scsi.c
@@ -977,7 +977,6 @@ static unsigned int features[] = {
static struct virtio_driver virtio_scsi_driver = {
.feature_table = features,
.feature_table_size = ARRAY_SIZE(features),
- .suppress_used_validation = true,
.driver.name = KBUILD_MODNAME,
.driver.owner = THIS_MODULE,
.id_table = id_table,
diff --git a/drivers/scsi/vmw_pvscsi.c b/drivers/scsi/vmw_pvscsi.c
index c2ba65224633..1f037b8ab904 100644
--- a/drivers/scsi/vmw_pvscsi.c
+++ b/drivers/scsi/vmw_pvscsi.c
@@ -586,9 +586,12 @@ static void pvscsi_complete_request(struct pvscsi_adapter *adapter,
* Commands like INQUIRY may transfer less data than
* requested by the initiator via bufflen. Set residual
* count to make upper layer aware of the actual amount
- * of data returned.
+ * of data returned. There are cases when controller
+ * returns zero dataLen with non zero data - do not set
+ * residual count in that case.
*/
- scsi_set_resid(cmd, scsi_bufflen(cmd) - e->dataLen);
+ if (e->dataLen && (e->dataLen < scsi_bufflen(cmd)))
+ scsi_set_resid(cmd, scsi_bufflen(cmd) - e->dataLen);
cmd->result = (DID_OK << 16);
break;
diff --git a/drivers/soc/imx/imx8m-blk-ctrl.c b/drivers/soc/imx/imx8m-blk-ctrl.c
index 519b3651d1d9..c2f076b56e24 100644
--- a/drivers/soc/imx/imx8m-blk-ctrl.c
+++ b/drivers/soc/imx/imx8m-blk-ctrl.c
@@ -17,6 +17,7 @@
#define BLK_SFT_RSTN 0x0
#define BLK_CLK_EN 0x4
+#define BLK_MIPI_RESET_DIV 0x8 /* Mini/Nano DISPLAY_BLK_CTRL only */
struct imx8m_blk_ctrl_domain;
@@ -36,6 +37,15 @@ struct imx8m_blk_ctrl_domain_data {
const char *gpc_name;
u32 rst_mask;
u32 clk_mask;
+
+ /*
+ * i.MX8M Mini and Nano have a third DISPLAY_BLK_CTRL register
+ * which is used to control the reset for the MIPI Phy.
+ * Since it's only present in certain circumstances,
+ * an if-statement should be used before setting and clearing this
+ * register.
+ */
+ u32 mipi_phy_rst_mask;
};
#define DOMAIN_MAX_CLKS 3
@@ -78,6 +88,8 @@ static int imx8m_blk_ctrl_power_on(struct generic_pm_domain *genpd)
/* put devices into reset */
regmap_clear_bits(bc->regmap, BLK_SFT_RSTN, data->rst_mask);
+ if (data->mipi_phy_rst_mask)
+ regmap_clear_bits(bc->regmap, BLK_MIPI_RESET_DIV, data->mipi_phy_rst_mask);
/* enable upstream and blk-ctrl clocks to allow reset to propagate */
ret = clk_bulk_prepare_enable(data->num_clks, domain->clks);
@@ -99,6 +111,8 @@ static int imx8m_blk_ctrl_power_on(struct generic_pm_domain *genpd)
/* release reset */
regmap_set_bits(bc->regmap, BLK_SFT_RSTN, data->rst_mask);
+ if (data->mipi_phy_rst_mask)
+ regmap_set_bits(bc->regmap, BLK_MIPI_RESET_DIV, data->mipi_phy_rst_mask);
/* disable upstream clocks */
clk_bulk_disable_unprepare(data->num_clks, domain->clks);
@@ -120,6 +134,9 @@ static int imx8m_blk_ctrl_power_off(struct generic_pm_domain *genpd)
struct imx8m_blk_ctrl *bc = domain->bc;
/* put devices into reset and disable clocks */
+ if (data->mipi_phy_rst_mask)
+ regmap_clear_bits(bc->regmap, BLK_MIPI_RESET_DIV, data->mipi_phy_rst_mask);
+
regmap_clear_bits(bc->regmap, BLK_SFT_RSTN, data->rst_mask);
regmap_clear_bits(bc->regmap, BLK_CLK_EN, data->clk_mask);
@@ -480,6 +497,7 @@ static const struct imx8m_blk_ctrl_domain_data imx8mm_disp_blk_ctl_domain_data[]
.gpc_name = "mipi-dsi",
.rst_mask = BIT(5),
.clk_mask = BIT(8) | BIT(9),
+ .mipi_phy_rst_mask = BIT(17),
},
[IMX8MM_DISPBLK_PD_MIPI_CSI] = {
.name = "dispblk-mipi-csi",
@@ -488,6 +506,7 @@ static const struct imx8m_blk_ctrl_domain_data imx8mm_disp_blk_ctl_domain_data[]
.gpc_name = "mipi-csi",
.rst_mask = BIT(3) | BIT(4),
.clk_mask = BIT(10) | BIT(11),
+ .mipi_phy_rst_mask = BIT(16),
},
};
diff --git a/drivers/soc/imx/soc-imx.c b/drivers/soc/imx/soc-imx.c
index ac6d856ba228..77bc12039c3d 100644
--- a/drivers/soc/imx/soc-imx.c
+++ b/drivers/soc/imx/soc-imx.c
@@ -36,6 +36,10 @@ static int __init imx_soc_device_init(void)
int ret;
int i;
+ /* Return early if this is running on devices with different SoCs */
+ if (!__mxc_cpu_type)
+ return 0;
+
if (of_machine_is_compatible("fsl,ls1021a"))
return 0;
diff --git a/drivers/soc/tegra/common.c b/drivers/soc/tegra/common.c
index cd33e99249c3..35c882da55fc 100644
--- a/drivers/soc/tegra/common.c
+++ b/drivers/soc/tegra/common.c
@@ -10,6 +10,7 @@
#include <linux/export.h>
#include <linux/of.h>
#include <linux/pm_opp.h>
+#include <linux/pm_runtime.h>
#include <soc/tegra/common.h>
#include <soc/tegra/fuse.h>
@@ -43,6 +44,7 @@ static int tegra_core_dev_init_opp_state(struct device *dev)
{
unsigned long rate;
struct clk *clk;
+ bool rpm_enabled;
int err;
clk = devm_clk_get(dev, NULL);
@@ -57,8 +59,31 @@ static int tegra_core_dev_init_opp_state(struct device *dev)
return -EINVAL;
}
+ /*
+ * Runtime PM of the device must be enabled in order to set up
+ * GENPD's performance properly because GENPD core checks whether
+ * device is suspended and this check doesn't work while RPM is
+ * disabled. This makes sure the OPP vote below gets cached in
+ * GENPD for the device. Instead, the vote is done the next time
+ * the device gets runtime resumed.
+ */
+ rpm_enabled = pm_runtime_enabled(dev);
+ if (!rpm_enabled)
+ pm_runtime_enable(dev);
+
+ /* should never happen in practice */
+ if (!pm_runtime_enabled(dev)) {
+ dev_WARN(dev, "failed to enable runtime PM\n");
+ pm_runtime_disable(dev);
+ return -EINVAL;
+ }
+
/* first dummy rate-setting initializes voltage vote */
err = dev_pm_opp_set_rate(dev, rate);
+
+ if (!rpm_enabled)
+ pm_runtime_disable(dev);
+
if (err) {
dev_err(dev, "failed to initialize OPP clock: %d\n", err);
return err;
diff --git a/drivers/soc/tegra/fuse/fuse-tegra.c b/drivers/soc/tegra/fuse/fuse-tegra.c
index f2151815db58..e714ed3b61bc 100644
--- a/drivers/soc/tegra/fuse/fuse-tegra.c
+++ b/drivers/soc/tegra/fuse/fuse-tegra.c
@@ -320,7 +320,7 @@ static struct platform_driver tegra_fuse_driver = {
};
builtin_platform_driver(tegra_fuse_driver);
-bool __init tegra_fuse_read_spare(unsigned int spare)
+u32 __init tegra_fuse_read_spare(unsigned int spare)
{
unsigned int offset = fuse->soc->info->spare + spare * 4;
diff --git a/drivers/soc/tegra/fuse/fuse.h b/drivers/soc/tegra/fuse/fuse.h
index de58feba0435..ecff0c08e959 100644
--- a/drivers/soc/tegra/fuse/fuse.h
+++ b/drivers/soc/tegra/fuse/fuse.h
@@ -65,7 +65,7 @@ struct tegra_fuse {
void tegra_init_revision(void);
void tegra_init_apbmisc(void);
-bool __init tegra_fuse_read_spare(unsigned int spare);
+u32 __init tegra_fuse_read_spare(unsigned int spare);
u32 __init tegra_fuse_read_early(unsigned int offset);
u8 tegra_get_major_rev(void);
diff --git a/drivers/spi/spi-armada-3700.c b/drivers/spi/spi-armada-3700.c
index 46feafe4e201..d8cc4b270644 100644
--- a/drivers/spi/spi-armada-3700.c
+++ b/drivers/spi/spi-armada-3700.c
@@ -901,7 +901,7 @@ static int a3700_spi_probe(struct platform_device *pdev)
return 0;
error_clk:
- clk_disable_unprepare(spi->clk);
+ clk_unprepare(spi->clk);
error:
spi_master_put(master);
out:
diff --git a/drivers/staging/Kconfig b/drivers/staging/Kconfig
index 59af251e7576..7fec86946131 100644
--- a/drivers/staging/Kconfig
+++ b/drivers/staging/Kconfig
@@ -66,8 +66,6 @@ source "drivers/staging/gdm724x/Kconfig"
source "drivers/staging/fwserial/Kconfig"
-source "drivers/staging/netlogic/Kconfig"
-
source "drivers/staging/gs_fpgaboot/Kconfig"
source "drivers/staging/unisys/Kconfig"
diff --git a/drivers/staging/Makefile b/drivers/staging/Makefile
index 76f413470bc8..e66e19c45425 100644
--- a/drivers/staging/Makefile
+++ b/drivers/staging/Makefile
@@ -10,7 +10,6 @@ obj-$(CONFIG_RTL8723BS) += rtl8723bs/
obj-$(CONFIG_R8712U) += rtl8712/
obj-$(CONFIG_R8188EU) += r8188eu/
obj-$(CONFIG_RTS5208) += rts5208/
-obj-$(CONFIG_NETLOGIC_XLR_NET) += netlogic/
obj-$(CONFIG_OCTEON_ETHERNET) += octeon/
obj-$(CONFIG_OCTEON_USB) += octeon-usb/
obj-$(CONFIG_VT6655) += vt6655/
diff --git a/drivers/staging/fbtft/fb_ssd1351.c b/drivers/staging/fbtft/fb_ssd1351.c
index cf263a58a148..6fd549a424d5 100644
--- a/drivers/staging/fbtft/fb_ssd1351.c
+++ b/drivers/staging/fbtft/fb_ssd1351.c
@@ -187,7 +187,6 @@ static struct fbtft_display display = {
},
};
-#ifdef CONFIG_FB_BACKLIGHT
static int update_onboard_backlight(struct backlight_device *bd)
{
struct fbtft_par *par = bl_get_data(bd);
@@ -231,9 +230,6 @@ static void register_onboard_backlight(struct fbtft_par *par)
if (!par->fbtftops.unregister_backlight)
par->fbtftops.unregister_backlight = fbtft_unregister_backlight;
}
-#else
-static void register_onboard_backlight(struct fbtft_par *par) { };
-#endif
FBTFT_REGISTER_DRIVER(DRVNAME, "solomon,ssd1351", &display);
diff --git a/drivers/staging/fbtft/fbtft-core.c b/drivers/staging/fbtft/fbtft-core.c
index ecb5f75f6dd5..f2684d2d6851 100644
--- a/drivers/staging/fbtft/fbtft-core.c
+++ b/drivers/staging/fbtft/fbtft-core.c
@@ -128,7 +128,6 @@ static int fbtft_request_gpios(struct fbtft_par *par)
return 0;
}
-#ifdef CONFIG_FB_BACKLIGHT
static int fbtft_backlight_update_status(struct backlight_device *bd)
{
struct fbtft_par *par = bl_get_data(bd);
@@ -161,6 +160,7 @@ void fbtft_unregister_backlight(struct fbtft_par *par)
par->info->bl_dev = NULL;
}
}
+EXPORT_SYMBOL(fbtft_unregister_backlight);
static const struct backlight_ops fbtft_bl_ops = {
.get_brightness = fbtft_backlight_get_brightness,
@@ -198,12 +198,7 @@ void fbtft_register_backlight(struct fbtft_par *par)
if (!par->fbtftops.unregister_backlight)
par->fbtftops.unregister_backlight = fbtft_unregister_backlight;
}
-#else
-void fbtft_register_backlight(struct fbtft_par *par) { };
-void fbtft_unregister_backlight(struct fbtft_par *par) { };
-#endif
EXPORT_SYMBOL(fbtft_register_backlight);
-EXPORT_SYMBOL(fbtft_unregister_backlight);
static void fbtft_set_addr_win(struct fbtft_par *par, int xs, int ys, int xe,
int ye)
@@ -853,13 +848,11 @@ int fbtft_register_framebuffer(struct fb_info *fb_info)
fb_info->fix.smem_len >> 10, text1,
HZ / fb_info->fbdefio->delay, text2);
-#ifdef CONFIG_FB_BACKLIGHT
/* Turn on backlight if available */
if (fb_info->bl_dev) {
fb_info->bl_dev->props.power = FB_BLANK_UNBLANK;
fb_info->bl_dev->ops->update_status(fb_info->bl_dev);
}
-#endif
return 0;
diff --git a/drivers/staging/greybus/audio_helper.c b/drivers/staging/greybus/audio_helper.c
index 1ed4772d2771..843760675876 100644
--- a/drivers/staging/greybus/audio_helper.c
+++ b/drivers/staging/greybus/audio_helper.c
@@ -192,7 +192,11 @@ int gbaudio_remove_component_controls(struct snd_soc_component *component,
unsigned int num_controls)
{
struct snd_card *card = component->card->snd_card;
+ int err;
- return gbaudio_remove_controls(card, component->dev, controls,
- num_controls, component->name_prefix);
+ down_write(&card->controls_rwsem);
+ err = gbaudio_remove_controls(card, component->dev, controls,
+ num_controls, component->name_prefix);
+ up_write(&card->controls_rwsem);
+ return err;
}
diff --git a/drivers/staging/netlogic/Kconfig b/drivers/staging/netlogic/Kconfig
deleted file mode 100644
index e1712606ee3c..000000000000
--- a/drivers/staging/netlogic/Kconfig
+++ /dev/null
@@ -1,9 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-config NETLOGIC_XLR_NET
- tristate "Netlogic XLR/XLS network device"
- depends on CPU_XLR
- depends on NETDEVICES
- select PHYLIB
- help
- This driver support Netlogic XLR/XLS on chip gigabit
- Ethernet.
diff --git a/drivers/staging/netlogic/Makefile b/drivers/staging/netlogic/Makefile
deleted file mode 100644
index 7e2902af26a3..000000000000
--- a/drivers/staging/netlogic/Makefile
+++ /dev/null
@@ -1,2 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-obj-$(CONFIG_NETLOGIC_XLR_NET) += xlr_net.o platform_net.o
diff --git a/drivers/staging/netlogic/TODO b/drivers/staging/netlogic/TODO
deleted file mode 100644
index 20e22ecb9903..000000000000
--- a/drivers/staging/netlogic/TODO
+++ /dev/null
@@ -1,11 +0,0 @@
-* Implementing 64bit stat counter in software
-* All memory allocation should be changed to DMA allocations
-* Changing comments into linux standard format
-
-Please send patches
-To:
-Ganesan Ramalingam <ganesanr@broadcom.com>
-Greg Kroah-Hartman <gregkh@linuxfoundation.org>
-Cc:
-Jayachandran Chandrashekaran Nair <jchandra@broadcom.com>
-
diff --git a/drivers/staging/netlogic/platform_net.c b/drivers/staging/netlogic/platform_net.c
deleted file mode 100644
index 8be9d0b0c22c..000000000000
--- a/drivers/staging/netlogic/platform_net.c
+++ /dev/null
@@ -1,219 +0,0 @@
-// SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
-/*
- * Copyright (c) 2003-2012 Broadcom Corporation
- * All Rights Reserved
- */
-
-#include <linux/device.h>
-#include <linux/platform_device.h>
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/io.h>
-#include <linux/delay.h>
-#include <linux/ioport.h>
-#include <linux/resource.h>
-#include <linux/phy.h>
-
-#include <asm/netlogic/haldefs.h>
-#include <asm/netlogic/common.h>
-#include <asm/netlogic/xlr/fmn.h>
-#include <asm/netlogic/xlr/xlr.h>
-#include <asm/netlogic/psb-bootinfo.h>
-#include <asm/netlogic/xlr/pic.h>
-#include <asm/netlogic/xlr/iomap.h>
-
-#include "platform_net.h"
-
-/* Linux Net */
-#define MAX_NUM_GMAC 8
-#define MAX_NUM_XLS_GMAC 8
-#define MAX_NUM_XLR_GMAC 4
-
-static u32 xlr_gmac_offsets[] = {
- NETLOGIC_IO_GMAC_0_OFFSET, NETLOGIC_IO_GMAC_1_OFFSET,
- NETLOGIC_IO_GMAC_2_OFFSET, NETLOGIC_IO_GMAC_3_OFFSET,
- NETLOGIC_IO_GMAC_4_OFFSET, NETLOGIC_IO_GMAC_5_OFFSET,
- NETLOGIC_IO_GMAC_6_OFFSET, NETLOGIC_IO_GMAC_7_OFFSET
-};
-
-static u32 xlr_gmac_irqs[] = { PIC_GMAC_0_IRQ, PIC_GMAC_1_IRQ,
- PIC_GMAC_2_IRQ, PIC_GMAC_3_IRQ,
- PIC_GMAC_4_IRQ, PIC_GMAC_5_IRQ,
- PIC_GMAC_6_IRQ, PIC_GMAC_7_IRQ
-};
-
-static struct resource xlr_net0_res[8];
-static struct resource xlr_net1_res[8];
-static u32 __iomem *gmac4_addr;
-static u32 __iomem *gpio_addr;
-
-static void xlr_resource_init(struct resource *res, int offset, int irq)
-{
- res->name = "gmac";
-
- res->start = CPHYSADDR(nlm_mmio_base(offset));
- res->end = res->start + 0xfff;
- res->flags = IORESOURCE_MEM;
-
- res++;
- res->name = "gmac";
- res->start = irq;
- res->end = irq;
- res->flags = IORESOURCE_IRQ;
-}
-
-static struct platform_device *gmac_controller2_init(void *gmac0_addr)
-{
- int mac;
- static struct xlr_net_data ndata1 = {
- .phy_interface = PHY_INTERFACE_MODE_SGMII,
- .rfr_station = FMN_STNID_GMAC1_FR_0,
- .bucket_size = xlr_board_fmn_config.bucket_size,
- .gmac_fmn_info = &xlr_board_fmn_config.gmac[1],
- };
-
- static struct platform_device xlr_net_dev1 = {
- .name = "xlr-net",
- .id = 1,
- .dev.platform_data = &ndata1,
- };
-
- gmac4_addr =
- ioremap(CPHYSADDR(nlm_mmio_base(NETLOGIC_IO_GMAC_4_OFFSET)),
- 0xfff);
- ndata1.serdes_addr = gmac4_addr;
- ndata1.pcs_addr = gmac4_addr;
- ndata1.mii_addr = gmac0_addr;
- ndata1.gpio_addr = gpio_addr;
- ndata1.cpu_mask = nlm_current_node()->coremask;
-
- xlr_net_dev1.resource = xlr_net1_res;
-
- for (mac = 0; mac < 4; mac++) {
- ndata1.tx_stnid[mac] = FMN_STNID_GMAC1_TX0 + mac;
- ndata1.phy_addr[mac] = mac + 4 + 0x10;
-
- xlr_resource_init(&xlr_net1_res[mac * 2],
- xlr_gmac_offsets[mac + 4],
- xlr_gmac_irqs[mac + 4]);
- }
- xlr_net_dev1.num_resources = 8;
-
- return &xlr_net_dev1;
-}
-
-static void xls_gmac_init(void)
-{
- int mac;
- struct platform_device *xlr_net_dev1;
- void __iomem *gmac0_addr =
- ioremap(CPHYSADDR(nlm_mmio_base(NETLOGIC_IO_GMAC_0_OFFSET)),
- 0xfff);
-
- static struct xlr_net_data ndata0 = {
- .rfr_station = FMN_STNID_GMACRFR_0,
- .bucket_size = xlr_board_fmn_config.bucket_size,
- .gmac_fmn_info = &xlr_board_fmn_config.gmac[0],
- };
-
- static struct platform_device xlr_net_dev0 = {
- .name = "xlr-net",
- .id = 0,
- };
- xlr_net_dev0.dev.platform_data = &ndata0;
- ndata0.serdes_addr = gmac0_addr;
- ndata0.pcs_addr = gmac0_addr;
- ndata0.mii_addr = gmac0_addr;
-
- /* Passing GPIO base for serdes init. Only needed on sgmii ports */
- gpio_addr =
- ioremap(CPHYSADDR(nlm_mmio_base(NETLOGIC_IO_GPIO_OFFSET)),
- 0xfff);
- ndata0.gpio_addr = gpio_addr;
- ndata0.cpu_mask = nlm_current_node()->coremask;
-
- xlr_net_dev0.resource = xlr_net0_res;
-
- switch (nlm_prom_info.board_major_version) {
- case 12:
- /* first block RGMII or XAUI, use RGMII */
- ndata0.phy_interface = PHY_INTERFACE_MODE_RGMII;
- ndata0.tx_stnid[0] = FMN_STNID_GMAC0_TX0;
- ndata0.phy_addr[0] = 0;
-
- xlr_net_dev0.num_resources = 2;
-
- xlr_resource_init(&xlr_net0_res[0], xlr_gmac_offsets[0],
- xlr_gmac_irqs[0]);
- platform_device_register(&xlr_net_dev0);
-
- /* second block is XAUI, not supported yet */
- break;
- default:
- /* default XLS config, all ports SGMII */
- ndata0.phy_interface = PHY_INTERFACE_MODE_SGMII;
- for (mac = 0; mac < 4; mac++) {
- ndata0.tx_stnid[mac] = FMN_STNID_GMAC0_TX0 + mac;
- ndata0.phy_addr[mac] = mac + 0x10;
-
- xlr_resource_init(&xlr_net0_res[mac * 2],
- xlr_gmac_offsets[mac],
- xlr_gmac_irqs[mac]);
- }
- xlr_net_dev0.num_resources = 8;
- platform_device_register(&xlr_net_dev0);
-
- xlr_net_dev1 = gmac_controller2_init(gmac0_addr);
- platform_device_register(xlr_net_dev1);
- }
-}
-
-static void xlr_gmac_init(void)
-{
- int mac;
-
- /* assume all GMACs for now */
- static struct xlr_net_data ndata0 = {
- .phy_interface = PHY_INTERFACE_MODE_RGMII,
- .serdes_addr = NULL,
- .pcs_addr = NULL,
- .rfr_station = FMN_STNID_GMACRFR_0,
- .bucket_size = xlr_board_fmn_config.bucket_size,
- .gmac_fmn_info = &xlr_board_fmn_config.gmac[0],
- .gpio_addr = NULL,
- };
-
- static struct platform_device xlr_net_dev0 = {
- .name = "xlr-net",
- .id = 0,
- .dev.platform_data = &ndata0,
- };
- ndata0.mii_addr =
- ioremap(CPHYSADDR(nlm_mmio_base(NETLOGIC_IO_GMAC_0_OFFSET)),
- 0xfff);
-
- ndata0.cpu_mask = nlm_current_node()->coremask;
-
- for (mac = 0; mac < MAX_NUM_XLR_GMAC; mac++) {
- ndata0.tx_stnid[mac] = FMN_STNID_GMAC0_TX0 + mac;
- ndata0.phy_addr[mac] = mac;
- xlr_resource_init(&xlr_net0_res[mac * 2], xlr_gmac_offsets[mac],
- xlr_gmac_irqs[mac]);
- }
- xlr_net_dev0.num_resources = 8;
- xlr_net_dev0.resource = xlr_net0_res;
-
- platform_device_register(&xlr_net_dev0);
-}
-
-static int __init xlr_net_init(void)
-{
- if (nlm_chip_is_xls())
- xls_gmac_init();
- else
- xlr_gmac_init();
-
- return 0;
-}
-
-arch_initcall(xlr_net_init);
diff --git a/drivers/staging/netlogic/platform_net.h b/drivers/staging/netlogic/platform_net.h
deleted file mode 100644
index c8d4c13424c6..000000000000
--- a/drivers/staging/netlogic/platform_net.h
+++ /dev/null
@@ -1,21 +0,0 @@
-/* SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause) */
-/*
- * Copyright (c) 2003-2012 Broadcom Corporation
- * All Rights Reserved
- */
-
-#define PORTS_PER_CONTROLLER 4
-
-struct xlr_net_data {
- int cpu_mask;
- u32 __iomem *mii_addr;
- u32 __iomem *serdes_addr;
- u32 __iomem *pcs_addr;
- u32 __iomem *gpio_addr;
- int phy_interface;
- int rfr_station;
- int tx_stnid[PORTS_PER_CONTROLLER];
- int *bucket_size;
- int phy_addr[PORTS_PER_CONTROLLER];
- struct xlr_fmn_info *gmac_fmn_info;
-};
diff --git a/drivers/staging/netlogic/xlr_net.c b/drivers/staging/netlogic/xlr_net.c
deleted file mode 100644
index 69ea61faf8fa..000000000000
--- a/drivers/staging/netlogic/xlr_net.c
+++ /dev/null
@@ -1,1080 +0,0 @@
-// SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
-/*
- * Copyright (c) 2003-2012 Broadcom Corporation
- * All Rights Reserved
- */
-
-#include <linux/phy.h>
-#include <linux/delay.h>
-#include <linux/netdevice.h>
-#include <linux/smp.h>
-#include <linux/ethtool.h>
-#include <linux/module.h>
-#include <linux/etherdevice.h>
-#include <linux/skbuff.h>
-#include <linux/jiffies.h>
-#include <linux/interrupt.h>
-#include <linux/platform_device.h>
-
-#include <asm/mipsregs.h>
-/*
- * fmn.h - For FMN credit configuration and registering fmn_handler.
- * FMN is communication mechanism that allows processing agents within
- * XLR/XLS to communicate each other.
- */
-#include <asm/netlogic/xlr/fmn.h>
-
-#include "platform_net.h"
-#include "xlr_net.h"
-
-/*
- * The readl/writel implementation byteswaps on XLR/XLS, so
- * we need to use __raw_ IO to read the NAE registers
- * because they are in the big-endian MMIO area on the SoC.
- */
-static inline void xlr_nae_wreg(u32 __iomem *base, unsigned int reg, u32 val)
-{
- __raw_writel(val, base + reg);
-}
-
-static inline u32 xlr_nae_rdreg(u32 __iomem *base, unsigned int reg)
-{
- return __raw_readl(base + reg);
-}
-
-static inline void xlr_reg_update(u32 *base_addr, u32 off, u32 val, u32 mask)
-{
- u32 tmp;
-
- tmp = xlr_nae_rdreg(base_addr, off);
- xlr_nae_wreg(base_addr, off, (tmp & ~mask) | (val & mask));
-}
-
-#define MAC_SKB_BACK_PTR_SIZE SMP_CACHE_BYTES
-
-static int send_to_rfr_fifo(struct xlr_net_priv *priv, void *addr)
-{
- struct nlm_fmn_msg msg;
- int ret = 0, num_try = 0, stnid;
- unsigned long paddr, mflags;
-
- paddr = virt_to_bus(addr);
- msg.msg0 = (u64)paddr & 0xffffffffe0ULL;
- msg.msg1 = 0;
- msg.msg2 = 0;
- msg.msg3 = 0;
- stnid = priv->nd->rfr_station;
- do {
- mflags = nlm_cop2_enable_irqsave();
- ret = nlm_fmn_send(1, 0, stnid, &msg);
- nlm_cop2_disable_irqrestore(mflags);
- if (ret == 0)
- return 0;
- } while (++num_try < 10000);
-
- netdev_err(priv->ndev, "Send to RFR failed in RX path\n");
- return ret;
-}
-
-static inline unsigned char *xlr_alloc_skb(void)
-{
- struct sk_buff *skb;
- int buf_len = sizeof(struct sk_buff *);
- unsigned char *skb_data;
-
- /* skb->data is cache aligned */
- skb = alloc_skb(XLR_RX_BUF_SIZE, GFP_ATOMIC);
- if (!skb)
- return NULL;
- skb_data = skb->data;
- skb_reserve(skb, MAC_SKB_BACK_PTR_SIZE);
- memcpy(skb_data, &skb, buf_len);
-
- return skb->data;
-}
-
-static void xlr_net_fmn_handler(int bkt, int src_stnid, int size, int code,
- struct nlm_fmn_msg *msg, void *arg)
-{
- struct sk_buff *skb;
- void *skb_data = NULL;
- struct net_device *ndev;
- struct xlr_net_priv *priv;
- u32 port, length;
- unsigned char *addr;
- struct xlr_adapter *adapter = arg;
-
- length = (msg->msg0 >> 40) & 0x3fff;
- if (length == 0) {
- addr = bus_to_virt(msg->msg0 & 0xffffffffffULL);
- addr = addr - MAC_SKB_BACK_PTR_SIZE;
- skb = (struct sk_buff *)(*(unsigned long *)addr);
- dev_kfree_skb_any((struct sk_buff *)addr);
- } else {
- addr = (unsigned char *)
- bus_to_virt(msg->msg0 & 0xffffffffe0ULL);
- length = length - BYTE_OFFSET - MAC_CRC_LEN;
- port = ((int)msg->msg0) & 0x0f;
- addr = addr - MAC_SKB_BACK_PTR_SIZE;
- skb = (struct sk_buff *)(*(unsigned long *)addr);
- skb->dev = adapter->netdev[port];
- if (!skb->dev)
- return;
- ndev = skb->dev;
- priv = netdev_priv(ndev);
-
- /* 16 byte IP header align */
- skb_reserve(skb, BYTE_OFFSET);
- skb_put(skb, length);
- skb->protocol = eth_type_trans(skb, skb->dev);
- netif_rx(skb);
- /* Fill rx ring */
- skb_data = xlr_alloc_skb();
- if (skb_data)
- send_to_rfr_fifo(priv, skb_data);
- }
-}
-
-static struct phy_device *xlr_get_phydev(struct xlr_net_priv *priv)
-{
- return mdiobus_get_phy(priv->mii_bus, priv->phy_addr);
-}
-
-/*
- * Ethtool operation
- */
-static int xlr_get_link_ksettings(struct net_device *ndev,
- struct ethtool_link_ksettings *ecmd)
-{
- struct xlr_net_priv *priv = netdev_priv(ndev);
- struct phy_device *phydev = xlr_get_phydev(priv);
-
- if (!phydev)
- return -ENODEV;
-
- phy_ethtool_ksettings_get(phydev, ecmd);
-
- return 0;
-}
-
-static int xlr_set_link_ksettings(struct net_device *ndev,
- const struct ethtool_link_ksettings *ecmd)
-{
- struct xlr_net_priv *priv = netdev_priv(ndev);
- struct phy_device *phydev = xlr_get_phydev(priv);
-
- if (!phydev)
- return -ENODEV;
- return phy_ethtool_ksettings_set(phydev, ecmd);
-}
-
-static const struct ethtool_ops xlr_ethtool_ops = {
- .get_link_ksettings = xlr_get_link_ksettings,
- .set_link_ksettings = xlr_set_link_ksettings,
-};
-
-/*
- * Net operations
- */
-static int xlr_net_fill_rx_ring(struct net_device *ndev)
-{
- void *skb_data;
- struct xlr_net_priv *priv = netdev_priv(ndev);
- int i;
-
- for (i = 0; i < MAX_FRIN_SPILL / 4; i++) {
- skb_data = xlr_alloc_skb();
- if (!skb_data)
- return -ENOMEM;
- send_to_rfr_fifo(priv, skb_data);
- }
- netdev_info(ndev, "Rx ring setup done\n");
- return 0;
-}
-
-static int xlr_net_open(struct net_device *ndev)
-{
- u32 err;
- struct xlr_net_priv *priv = netdev_priv(ndev);
- struct phy_device *phydev = xlr_get_phydev(priv);
-
- /* schedule a link state check */
- phy_start(phydev);
-
- err = phy_start_aneg(phydev);
- if (err) {
- pr_err("Autoneg failed\n");
- return err;
- }
- /* Setup the speed from PHY to internal reg*/
- xlr_set_gmac_speed(priv);
-
- netif_tx_start_all_queues(ndev);
-
- return 0;
-}
-
-static int xlr_net_stop(struct net_device *ndev)
-{
- struct xlr_net_priv *priv = netdev_priv(ndev);
- struct phy_device *phydev = xlr_get_phydev(priv);
-
- phy_stop(phydev);
- netif_tx_stop_all_queues(ndev);
- return 0;
-}
-
-static void xlr_make_tx_desc(struct nlm_fmn_msg *msg, unsigned long addr,
- struct sk_buff *skb)
-{
- unsigned long physkb = virt_to_phys(skb);
- int cpu_core = nlm_core_id();
- int fr_stn_id = cpu_core * 8 + XLR_FB_STN; /* FB to 6th bucket */
-
- msg->msg0 = (((u64)1 << 63) | /* End of packet descriptor */
- ((u64)127 << 54) | /* No Free back */
- (u64)skb->len << 40 | /* Length of data */
- ((u64)addr));
- msg->msg1 = (((u64)1 << 63) |
- ((u64)fr_stn_id << 54) | /* Free back id */
- (u64)0 << 40 | /* Set len to 0 */
- ((u64)physkb & 0xffffffff)); /* 32bit address */
- msg->msg2 = 0;
- msg->msg3 = 0;
-}
-
-static netdev_tx_t xlr_net_start_xmit(struct sk_buff *skb,
- struct net_device *ndev)
-{
- struct nlm_fmn_msg msg;
- struct xlr_net_priv *priv = netdev_priv(ndev);
- int ret;
- u32 flags;
-
- xlr_make_tx_desc(&msg, virt_to_phys(skb->data), skb);
- flags = nlm_cop2_enable_irqsave();
- ret = nlm_fmn_send(2, 0, priv->tx_stnid, &msg);
- nlm_cop2_disable_irqrestore(flags);
- if (ret)
- dev_kfree_skb_any(skb);
- return NETDEV_TX_OK;
-}
-
-static void xlr_hw_set_mac_addr(struct net_device *ndev)
-{
- struct xlr_net_priv *priv = netdev_priv(ndev);
-
- /* set mac station address */
- xlr_nae_wreg(priv->base_addr, R_MAC_ADDR0,
- ((ndev->dev_addr[5] << 24) | (ndev->dev_addr[4] << 16) |
- (ndev->dev_addr[3] << 8) | (ndev->dev_addr[2])));
- xlr_nae_wreg(priv->base_addr, R_MAC_ADDR0 + 1,
- ((ndev->dev_addr[1] << 24) | (ndev->dev_addr[0] << 16)));
-
- xlr_nae_wreg(priv->base_addr, R_MAC_ADDR_MASK2, 0xffffffff);
- xlr_nae_wreg(priv->base_addr, R_MAC_ADDR_MASK2 + 1, 0xffffffff);
- xlr_nae_wreg(priv->base_addr, R_MAC_ADDR_MASK3, 0xffffffff);
- xlr_nae_wreg(priv->base_addr, R_MAC_ADDR_MASK3 + 1, 0xffffffff);
-
- xlr_nae_wreg(priv->base_addr, R_MAC_FILTER_CONFIG,
- (1 << O_MAC_FILTER_CONFIG__BROADCAST_EN) |
- (1 << O_MAC_FILTER_CONFIG__ALL_MCAST_EN) |
- (1 << O_MAC_FILTER_CONFIG__MAC_ADDR0_VALID));
-
- if (priv->nd->phy_interface == PHY_INTERFACE_MODE_RGMII ||
- priv->nd->phy_interface == PHY_INTERFACE_MODE_SGMII)
- xlr_reg_update(priv->base_addr, R_IPG_IFG, MAC_B2B_IPG, 0x7f);
-}
-
-static int xlr_net_set_mac_addr(struct net_device *ndev, void *data)
-{
- int err;
-
- err = eth_mac_addr(ndev, data);
- if (err)
- return err;
- xlr_hw_set_mac_addr(ndev);
- return 0;
-}
-
-static void xlr_set_rx_mode(struct net_device *ndev)
-{
- struct xlr_net_priv *priv = netdev_priv(ndev);
- u32 regval;
-
- regval = xlr_nae_rdreg(priv->base_addr, R_MAC_FILTER_CONFIG);
-
- if (ndev->flags & IFF_PROMISC) {
- regval |= (1 << O_MAC_FILTER_CONFIG__BROADCAST_EN) |
- (1 << O_MAC_FILTER_CONFIG__PAUSE_FRAME_EN) |
- (1 << O_MAC_FILTER_CONFIG__ALL_MCAST_EN) |
- (1 << O_MAC_FILTER_CONFIG__ALL_UCAST_EN);
- } else {
- regval &= ~((1 << O_MAC_FILTER_CONFIG__PAUSE_FRAME_EN) |
- (1 << O_MAC_FILTER_CONFIG__ALL_UCAST_EN));
- }
-
- xlr_nae_wreg(priv->base_addr, R_MAC_FILTER_CONFIG, regval);
-}
-
-static void xlr_stats(struct net_device *ndev, struct rtnl_link_stats64 *stats)
-{
- struct xlr_net_priv *priv = netdev_priv(ndev);
-
- stats->rx_packets = xlr_nae_rdreg(priv->base_addr, RX_PACKET_COUNTER);
- stats->tx_packets = xlr_nae_rdreg(priv->base_addr, TX_PACKET_COUNTER);
- stats->rx_bytes = xlr_nae_rdreg(priv->base_addr, RX_BYTE_COUNTER);
- stats->tx_bytes = xlr_nae_rdreg(priv->base_addr, TX_BYTE_COUNTER);
- stats->tx_errors = xlr_nae_rdreg(priv->base_addr, TX_FCS_ERROR_COUNTER);
- stats->rx_dropped = xlr_nae_rdreg(priv->base_addr,
- RX_DROP_PACKET_COUNTER);
- stats->tx_dropped = xlr_nae_rdreg(priv->base_addr,
- TX_DROP_FRAME_COUNTER);
-
- stats->multicast = xlr_nae_rdreg(priv->base_addr,
- RX_MULTICAST_PACKET_COUNTER);
- stats->collisions = xlr_nae_rdreg(priv->base_addr,
- TX_TOTAL_COLLISION_COUNTER);
-
- stats->rx_length_errors = xlr_nae_rdreg(priv->base_addr,
- RX_FRAME_LENGTH_ERROR_COUNTER);
- stats->rx_over_errors = xlr_nae_rdreg(priv->base_addr,
- RX_DROP_PACKET_COUNTER);
- stats->rx_crc_errors = xlr_nae_rdreg(priv->base_addr,
- RX_FCS_ERROR_COUNTER);
- stats->rx_frame_errors = xlr_nae_rdreg(priv->base_addr,
- RX_ALIGNMENT_ERROR_COUNTER);
-
- stats->rx_fifo_errors = xlr_nae_rdreg(priv->base_addr,
- RX_DROP_PACKET_COUNTER);
- stats->rx_missed_errors = xlr_nae_rdreg(priv->base_addr,
- RX_CARRIER_SENSE_ERROR_COUNTER);
-
- stats->rx_errors = (stats->rx_over_errors + stats->rx_crc_errors +
- stats->rx_frame_errors + stats->rx_fifo_errors +
- stats->rx_missed_errors);
-
- stats->tx_aborted_errors = xlr_nae_rdreg(priv->base_addr,
- TX_EXCESSIVE_COLLISION_PACKET_COUNTER);
- stats->tx_carrier_errors = xlr_nae_rdreg(priv->base_addr,
- TX_DROP_FRAME_COUNTER);
- stats->tx_fifo_errors = xlr_nae_rdreg(priv->base_addr,
- TX_DROP_FRAME_COUNTER);
-}
-
-static const struct net_device_ops xlr_netdev_ops = {
- .ndo_open = xlr_net_open,
- .ndo_stop = xlr_net_stop,
- .ndo_start_xmit = xlr_net_start_xmit,
- .ndo_select_queue = dev_pick_tx_cpu_id,
- .ndo_set_mac_address = xlr_net_set_mac_addr,
- .ndo_set_rx_mode = xlr_set_rx_mode,
- .ndo_get_stats64 = xlr_stats,
-};
-
-/*
- * Gmac init
- */
-static void *xlr_config_spill(struct xlr_net_priv *priv, int reg_start_0,
- int reg_start_1, int reg_size, int size)
-{
- void *spill;
- u32 *base;
- unsigned long phys_addr;
- u32 spill_size;
-
- base = priv->base_addr;
- spill_size = size;
- spill = kmalloc(spill_size + SMP_CACHE_BYTES, GFP_KERNEL);
- if (!spill)
- return ZERO_SIZE_PTR;
-
- spill = PTR_ALIGN(spill, SMP_CACHE_BYTES);
- phys_addr = virt_to_phys(spill);
- dev_dbg(&priv->ndev->dev, "Allocated spill %d bytes at %lx\n",
- size, phys_addr);
- xlr_nae_wreg(base, reg_start_0, (phys_addr >> 5) & 0xffffffff);
- xlr_nae_wreg(base, reg_start_1, ((u64)phys_addr >> 37) & 0x07);
- xlr_nae_wreg(base, reg_size, spill_size);
-
- return spill;
-}
-
-/*
- * Configure the 6 FIFO's that are used by the network accelarator to
- * communicate with the rest of the XLx device. 4 of the FIFO's are for
- * packets from NA --> cpu (called Class FIFO's) and 2 are for feeding
- * the NA with free descriptors.
- */
-static void xlr_config_fifo_spill_area(struct xlr_net_priv *priv)
-{
- priv->frin_spill = xlr_config_spill(priv,
- R_REG_FRIN_SPILL_MEM_START_0,
- R_REG_FRIN_SPILL_MEM_START_1,
- R_REG_FRIN_SPILL_MEM_SIZE,
- MAX_FRIN_SPILL * sizeof(u64));
- priv->frout_spill = xlr_config_spill(priv,
- R_FROUT_SPILL_MEM_START_0,
- R_FROUT_SPILL_MEM_START_1,
- R_FROUT_SPILL_MEM_SIZE,
- MAX_FROUT_SPILL * sizeof(u64));
- priv->class_0_spill = xlr_config_spill(priv,
- R_CLASS0_SPILL_MEM_START_0,
- R_CLASS0_SPILL_MEM_START_1,
- R_CLASS0_SPILL_MEM_SIZE,
- MAX_CLASS_0_SPILL * sizeof(u64));
- priv->class_1_spill = xlr_config_spill(priv,
- R_CLASS1_SPILL_MEM_START_0,
- R_CLASS1_SPILL_MEM_START_1,
- R_CLASS1_SPILL_MEM_SIZE,
- MAX_CLASS_1_SPILL * sizeof(u64));
- priv->class_2_spill = xlr_config_spill(priv,
- R_CLASS2_SPILL_MEM_START_0,
- R_CLASS2_SPILL_MEM_START_1,
- R_CLASS2_SPILL_MEM_SIZE,
- MAX_CLASS_2_SPILL * sizeof(u64));
- priv->class_3_spill = xlr_config_spill(priv,
- R_CLASS3_SPILL_MEM_START_0,
- R_CLASS3_SPILL_MEM_START_1,
- R_CLASS3_SPILL_MEM_SIZE,
- MAX_CLASS_3_SPILL * sizeof(u64));
-}
-
-/*
- * Configure PDE to Round-Robin distribution of packets to the
- * available cpu
- */
-static void xlr_config_pde(struct xlr_net_priv *priv)
-{
- int i = 0;
- u64 bkt_map = 0;
-
- /* Each core has 8 buckets(station) */
- for (i = 0; i < hweight32(priv->nd->cpu_mask); i++)
- bkt_map |= (0xff << (i * 8));
-
- xlr_nae_wreg(priv->base_addr, R_PDE_CLASS_0, (bkt_map & 0xffffffff));
- xlr_nae_wreg(priv->base_addr, R_PDE_CLASS_0 + 1,
- ((bkt_map >> 32) & 0xffffffff));
-
- xlr_nae_wreg(priv->base_addr, R_PDE_CLASS_1, (bkt_map & 0xffffffff));
- xlr_nae_wreg(priv->base_addr, R_PDE_CLASS_1 + 1,
- ((bkt_map >> 32) & 0xffffffff));
-
- xlr_nae_wreg(priv->base_addr, R_PDE_CLASS_2, (bkt_map & 0xffffffff));
- xlr_nae_wreg(priv->base_addr, R_PDE_CLASS_2 + 1,
- ((bkt_map >> 32) & 0xffffffff));
-
- xlr_nae_wreg(priv->base_addr, R_PDE_CLASS_3, (bkt_map & 0xffffffff));
- xlr_nae_wreg(priv->base_addr, R_PDE_CLASS_3 + 1,
- ((bkt_map >> 32) & 0xffffffff));
-}
-
-/*
- * Setup the Message ring credits, bucket size and other
- * common configuration
- */
-static int xlr_config_common(struct xlr_net_priv *priv)
-{
- struct xlr_fmn_info *gmac = priv->nd->gmac_fmn_info;
- int start_stn_id = gmac->start_stn_id;
- int end_stn_id = gmac->end_stn_id;
- int *bucket_size = priv->nd->bucket_size;
- int i, j, err;
-
- /* Setting non-core MsgBktSize(0x321 - 0x325) */
- for (i = start_stn_id; i <= end_stn_id; i++) {
- xlr_nae_wreg(priv->base_addr,
- R_GMAC_RFR0_BUCKET_SIZE + i - start_stn_id,
- bucket_size[i]);
- }
-
- /*
- * Setting non-core Credit counter register
- * Distributing Gmac's credit to CPU's
- */
- for (i = 0; i < 8; i++) {
- for (j = 0; j < 8; j++)
- xlr_nae_wreg(priv->base_addr,
- (R_CC_CPU0_0 + (i * 8)) + j,
- gmac->credit_config[(i * 8) + j]);
- }
-
- xlr_nae_wreg(priv->base_addr, R_MSG_TX_THRESHOLD, 3);
- xlr_nae_wreg(priv->base_addr, R_DMACR0, 0xffffffff);
- xlr_nae_wreg(priv->base_addr, R_DMACR1, 0xffffffff);
- xlr_nae_wreg(priv->base_addr, R_DMACR2, 0xffffffff);
- xlr_nae_wreg(priv->base_addr, R_DMACR3, 0xffffffff);
- xlr_nae_wreg(priv->base_addr, R_FREEQCARVE, 0);
-
- err = xlr_net_fill_rx_ring(priv->ndev);
- if (err)
- return err;
- nlm_register_fmn_handler(start_stn_id, end_stn_id, xlr_net_fmn_handler,
- priv->adapter);
- return 0;
-}
-
-static void xlr_config_translate_table(struct xlr_net_priv *priv)
-{
- u32 cpu_mask;
- u32 val;
- int bkts[32]; /* one bucket is assumed for each cpu */
- int b1, b2, c1, c2, i, j, k;
- int use_bkt;
-
- use_bkt = 0;
- cpu_mask = priv->nd->cpu_mask;
-
- pr_info("Using %s-based distribution\n",
- (use_bkt) ? "bucket" : "class");
- j = 0;
- for (i = 0; i < 32; i++) {
- if ((1 << i) & cpu_mask) {
- /* for each cpu, mark the 4+threadid bucket */
- bkts[j] = ((i / 4) * 8) + (i % 4);
- j++;
- }
- }
-
- /*configure the 128 * 9 Translation table to send to available buckets*/
- k = 0;
- c1 = 3;
- c2 = 0;
- for (i = 0; i < 64; i++) {
- /*
- * On use_bkt set the b0, b1 are used, else
- * the 4 classes are used, here implemented
- * a logic to distribute the packets to the
- * buckets equally or based on the class
- */
- c1 = (c1 + 1) & 3;
- c2 = (c1 + 1) & 3;
- b1 = bkts[k];
- k = (k + 1) % j;
- b2 = bkts[k];
- k = (k + 1) % j;
-
- val = ((c1 << 23) | (b1 << 17) | (use_bkt << 16) |
- (c2 << 7) | (b2 << 1) | (use_bkt << 0));
- dev_dbg(&priv->ndev->dev, "Table[%d] b1=%d b2=%d c1=%d c2=%d\n",
- i, b1, b2, c1, c2);
- xlr_nae_wreg(priv->base_addr, R_TRANSLATETABLE + i, val);
- c1 = c2;
- }
-}
-
-static void xlr_config_parser(struct xlr_net_priv *priv)
-{
- u32 val;
-
- /* Mark it as ETHERNET type */
- xlr_nae_wreg(priv->base_addr, R_L2TYPE_0, 0x01);
-
- /* Use 7bit CRChash for flow classification with 127 as CRC polynomial*/
- xlr_nae_wreg(priv->base_addr, R_PARSERCONFIGREG,
- ((0x7f << 8) | (1 << 1)));
-
- /* configure the parser : L2 Type is configured in the bootloader */
- /* extract IP: src, dest protocol */
- xlr_nae_wreg(priv->base_addr, R_L3CTABLE,
- (9 << 20) | (1 << 19) | (1 << 18) | (0x01 << 16) |
- (0x0800 << 0));
- xlr_nae_wreg(priv->base_addr, R_L3CTABLE + 1,
- (9 << 25) | (1 << 21) | (12 << 14) | (4 << 10) |
- (16 << 4) | 4);
-
- /* Configure to extract SRC port and Dest port for TCP and UDP pkts */
- xlr_nae_wreg(priv->base_addr, R_L4CTABLE, 6);
- xlr_nae_wreg(priv->base_addr, R_L4CTABLE + 2, 17);
- val = ((0 << 21) | (2 << 17) | (2 << 11) | (2 << 7));
- xlr_nae_wreg(priv->base_addr, R_L4CTABLE + 1, val);
- xlr_nae_wreg(priv->base_addr, R_L4CTABLE + 3, val);
-
- xlr_config_translate_table(priv);
-}
-
-static int xlr_phy_write(u32 *base_addr, int phy_addr, int regnum, u16 val)
-{
- unsigned long timeout, stoptime, checktime;
- int timedout;
-
- /* 100ms timeout*/
- timeout = msecs_to_jiffies(100);
- stoptime = jiffies + timeout;
- timedout = 0;
-
- xlr_nae_wreg(base_addr, R_MII_MGMT_ADDRESS, (phy_addr << 8) | regnum);
-
- /* Write the data which starts the write cycle */
- xlr_nae_wreg(base_addr, R_MII_MGMT_WRITE_DATA, (u32)val);
-
- /* poll for the read cycle to complete */
- while (!timedout) {
- checktime = jiffies;
- if (xlr_nae_rdreg(base_addr, R_MII_MGMT_INDICATORS) == 0)
- break;
- timedout = time_after(checktime, stoptime);
- }
- if (timedout) {
- pr_info("Phy device write err: device busy");
- return -EBUSY;
- }
-
- return 0;
-}
-
-static int xlr_phy_read(u32 *base_addr, int phy_addr, int regnum)
-{
- unsigned long timeout, stoptime, checktime;
- int timedout;
-
- /* 100ms timeout*/
- timeout = msecs_to_jiffies(100);
- stoptime = jiffies + timeout;
- timedout = 0;
-
- /* setup the phy reg to be used */
- xlr_nae_wreg(base_addr, R_MII_MGMT_ADDRESS,
- (phy_addr << 8) | (regnum << 0));
-
- /* Issue the read command */
- xlr_nae_wreg(base_addr, R_MII_MGMT_COMMAND,
- (1 << O_MII_MGMT_COMMAND__rstat));
-
- /* poll for the read cycle to complete */
- while (!timedout) {
- checktime = jiffies;
- if (xlr_nae_rdreg(base_addr, R_MII_MGMT_INDICATORS) == 0)
- break;
- timedout = time_after(checktime, stoptime);
- }
- if (timedout) {
- pr_info("Phy device read err: device busy");
- return -EBUSY;
- }
-
- /* clear the read cycle */
- xlr_nae_wreg(base_addr, R_MII_MGMT_COMMAND, 0);
-
- /* Read the data */
- return xlr_nae_rdreg(base_addr, R_MII_MGMT_STATUS);
-}
-
-static int xlr_mii_write(struct mii_bus *bus, int phy_addr, int regnum, u16 val)
-{
- struct xlr_net_priv *priv = bus->priv;
- int ret;
-
- ret = xlr_phy_write(priv->mii_addr, phy_addr, regnum, val);
- dev_dbg(&priv->ndev->dev, "mii_write phy %d : %d <- %x [%x]\n",
- phy_addr, regnum, val, ret);
- return ret;
-}
-
-static int xlr_mii_read(struct mii_bus *bus, int phy_addr, int regnum)
-{
- struct xlr_net_priv *priv = bus->priv;
- int ret;
-
- ret = xlr_phy_read(priv->mii_addr, phy_addr, regnum);
- dev_dbg(&priv->ndev->dev, "mii_read phy %d : %d [%x]\n",
- phy_addr, regnum, ret);
- return ret;
-}
-
-/*
- * XLR ports are RGMII. XLS ports are SGMII mostly except the port0,
- * which can be configured either SGMII or RGMII, considered SGMII
- * by default, if board setup to RGMII the port_type need to set
- * accordingly.Serdes and PCS layer need to configured for SGMII
- */
-static void xlr_sgmii_init(struct xlr_net_priv *priv)
-{
- int phy;
-
- xlr_phy_write(priv->serdes_addr, 26, 0, 0x6DB0);
- xlr_phy_write(priv->serdes_addr, 26, 1, 0xFFFF);
- xlr_phy_write(priv->serdes_addr, 26, 2, 0xB6D0);
- xlr_phy_write(priv->serdes_addr, 26, 3, 0x00FF);
- xlr_phy_write(priv->serdes_addr, 26, 4, 0x0000);
- xlr_phy_write(priv->serdes_addr, 26, 5, 0x0000);
- xlr_phy_write(priv->serdes_addr, 26, 6, 0x0005);
- xlr_phy_write(priv->serdes_addr, 26, 7, 0x0001);
- xlr_phy_write(priv->serdes_addr, 26, 8, 0x0000);
- xlr_phy_write(priv->serdes_addr, 26, 9, 0x0000);
- xlr_phy_write(priv->serdes_addr, 26, 10, 0x0000);
-
- /* program GPIO values for serdes init parameters */
- xlr_nae_wreg(priv->gpio_addr, 0x20, 0x7e6802);
- xlr_nae_wreg(priv->gpio_addr, 0x10, 0x7104);
-
- xlr_nae_wreg(priv->gpio_addr, 0x22, 0x7e6802);
- xlr_nae_wreg(priv->gpio_addr, 0x21, 0x7104);
-
- /* enable autoneg - more magic */
- phy = priv->phy_addr % 4 + 27;
- xlr_phy_write(priv->pcs_addr, phy, 0, 0x1000);
- xlr_phy_write(priv->pcs_addr, phy, 0, 0x0200);
-}
-
-void xlr_set_gmac_speed(struct xlr_net_priv *priv)
-{
- struct phy_device *phydev = xlr_get_phydev(priv);
- int speed;
-
- if (phydev->interface == PHY_INTERFACE_MODE_SGMII)
- xlr_sgmii_init(priv);
-
- if (phydev->speed != priv->phy_speed) {
- speed = phydev->speed;
- if (speed == SPEED_1000) {
- /* Set interface to Byte mode */
- xlr_nae_wreg(priv->base_addr, R_MAC_CONFIG_2, 0x7217);
- priv->phy_speed = speed;
- } else if (speed == SPEED_100 || speed == SPEED_10) {
- /* Set interface to Nibble mode */
- xlr_nae_wreg(priv->base_addr, R_MAC_CONFIG_2, 0x7117);
- priv->phy_speed = speed;
- }
- /* Set SGMII speed in Interface control reg */
- if (phydev->interface == PHY_INTERFACE_MODE_SGMII) {
- if (speed == SPEED_10)
- xlr_nae_wreg(priv->base_addr,
- R_INTERFACE_CONTROL,
- SGMII_SPEED_10);
- if (speed == SPEED_100)
- xlr_nae_wreg(priv->base_addr,
- R_INTERFACE_CONTROL,
- SGMII_SPEED_100);
- if (speed == SPEED_1000)
- xlr_nae_wreg(priv->base_addr,
- R_INTERFACE_CONTROL,
- SGMII_SPEED_1000);
- }
- if (speed == SPEED_10)
- xlr_nae_wreg(priv->base_addr, R_CORECONTROL, 0x2);
- if (speed == SPEED_100)
- xlr_nae_wreg(priv->base_addr, R_CORECONTROL, 0x1);
- if (speed == SPEED_1000)
- xlr_nae_wreg(priv->base_addr, R_CORECONTROL, 0x0);
- }
- pr_info("gmac%d : %dMbps\n", priv->port_id, priv->phy_speed);
-}
-
-static void xlr_gmac_link_adjust(struct net_device *ndev)
-{
- struct xlr_net_priv *priv = netdev_priv(ndev);
- struct phy_device *phydev = xlr_get_phydev(priv);
- u32 intreg;
-
- intreg = xlr_nae_rdreg(priv->base_addr, R_INTREG);
- if (phydev->link) {
- if (phydev->speed != priv->phy_speed) {
- xlr_set_gmac_speed(priv);
- pr_info("gmac%d : Link up\n", priv->port_id);
- }
- } else {
- xlr_set_gmac_speed(priv);
- pr_info("gmac%d : Link down\n", priv->port_id);
- }
-}
-
-static int xlr_mii_probe(struct xlr_net_priv *priv)
-{
- struct phy_device *phydev = xlr_get_phydev(priv);
-
- if (!phydev) {
- pr_err("no PHY found on phy_addr %d\n", priv->phy_addr);
- return -ENODEV;
- }
-
- /* Attach MAC to PHY */
- phydev = phy_connect(priv->ndev, phydev_name(phydev),
- xlr_gmac_link_adjust, priv->nd->phy_interface);
-
- if (IS_ERR(phydev)) {
- pr_err("could not attach PHY\n");
- return PTR_ERR(phydev);
- }
- phydev->supported &= (ADVERTISED_10baseT_Full
- | ADVERTISED_10baseT_Half
- | ADVERTISED_100baseT_Full
- | ADVERTISED_100baseT_Half
- | ADVERTISED_1000baseT_Full
- | ADVERTISED_Autoneg
- | ADVERTISED_MII);
-
- phydev->advertising = phydev->supported;
- phy_attached_info(phydev);
- return 0;
-}
-
-static int xlr_setup_mdio(struct xlr_net_priv *priv,
- struct platform_device *pdev)
-{
- int err;
-
- priv->mii_bus = mdiobus_alloc();
- if (!priv->mii_bus) {
- pr_err("mdiobus alloc failed\n");
- return -ENOMEM;
- }
-
- priv->mii_bus->priv = priv;
- priv->mii_bus->name = "xlr-mdio";
- snprintf(priv->mii_bus->id, MII_BUS_ID_SIZE, "%s-%d",
- priv->mii_bus->name, priv->port_id);
- priv->mii_bus->read = xlr_mii_read;
- priv->mii_bus->write = xlr_mii_write;
- priv->mii_bus->parent = &pdev->dev;
-
- /* Scan only the enabled address */
- priv->mii_bus->phy_mask = ~(1 << priv->phy_addr);
-
- /* setting clock divisor to 54 */
- xlr_nae_wreg(priv->base_addr, R_MII_MGMT_CONFIG, 0x7);
-
- err = mdiobus_register(priv->mii_bus);
- if (err) {
- mdiobus_free(priv->mii_bus);
- pr_err("mdio bus registration failed\n");
- return err;
- }
-
- pr_info("Registered mdio bus id : %s\n", priv->mii_bus->id);
- err = xlr_mii_probe(priv);
- if (err) {
- mdiobus_free(priv->mii_bus);
- return err;
- }
- return 0;
-}
-
-static void xlr_port_enable(struct xlr_net_priv *priv)
-{
- u32 prid = (read_c0_prid() & 0xf000);
-
- /* Setup MAC_CONFIG reg if (xls & rgmii) */
- if ((prid == 0x8000 || prid == 0x4000 || prid == 0xc000) &&
- priv->nd->phy_interface == PHY_INTERFACE_MODE_RGMII)
- xlr_reg_update(priv->base_addr, R_RX_CONTROL,
- (1 << O_RX_CONTROL__RGMII),
- (1 << O_RX_CONTROL__RGMII));
-
- /* Rx Tx enable */
- xlr_reg_update(priv->base_addr, R_MAC_CONFIG_1,
- ((1 << O_MAC_CONFIG_1__rxen) |
- (1 << O_MAC_CONFIG_1__txen) |
- (1 << O_MAC_CONFIG_1__rxfc) |
- (1 << O_MAC_CONFIG_1__txfc)),
- ((1 << O_MAC_CONFIG_1__rxen) |
- (1 << O_MAC_CONFIG_1__txen) |
- (1 << O_MAC_CONFIG_1__rxfc) |
- (1 << O_MAC_CONFIG_1__txfc)));
-
- /* Setup tx control reg */
- xlr_reg_update(priv->base_addr, R_TX_CONTROL,
- ((1 << O_TX_CONTROL__TXENABLE) |
- (512 << O_TX_CONTROL__TXTHRESHOLD)), 0x3fff);
-
- /* Setup rx control reg */
- xlr_reg_update(priv->base_addr, R_RX_CONTROL,
- 1 << O_RX_CONTROL__RXENABLE,
- 1 << O_RX_CONTROL__RXENABLE);
-}
-
-static void xlr_port_disable(struct xlr_net_priv *priv)
-{
- /* Setup MAC_CONFIG reg */
- /* Rx Tx disable*/
- xlr_reg_update(priv->base_addr, R_MAC_CONFIG_1,
- ((1 << O_MAC_CONFIG_1__rxen) |
- (1 << O_MAC_CONFIG_1__txen) |
- (1 << O_MAC_CONFIG_1__rxfc) |
- (1 << O_MAC_CONFIG_1__txfc)), 0x0);
-
- /* Setup tx control reg */
- xlr_reg_update(priv->base_addr, R_TX_CONTROL,
- ((1 << O_TX_CONTROL__TXENABLE) |
- (512 << O_TX_CONTROL__TXTHRESHOLD)), 0);
-
- /* Setup rx control reg */
- xlr_reg_update(priv->base_addr, R_RX_CONTROL,
- 1 << O_RX_CONTROL__RXENABLE, 0);
-}
-
-/*
- * Initialization of gmac
- */
-static int xlr_gmac_init(struct xlr_net_priv *priv,
- struct platform_device *pdev)
-{
- int ret;
-
- pr_info("Initializing the gmac%d\n", priv->port_id);
-
- xlr_port_disable(priv);
-
- xlr_nae_wreg(priv->base_addr, R_DESC_PACK_CTRL,
- (1 << O_DESC_PACK_CTRL__MAXENTRY) |
- (BYTE_OFFSET << O_DESC_PACK_CTRL__BYTEOFFSET) |
- (1600 << O_DESC_PACK_CTRL__REGULARSIZE));
-
- ret = xlr_setup_mdio(priv, pdev);
- if (ret)
- return ret;
- xlr_port_enable(priv);
-
- /* Enable Full-duplex/1000Mbps/CRC */
- xlr_nae_wreg(priv->base_addr, R_MAC_CONFIG_2, 0x7217);
- /* speed 2.5Mhz */
- xlr_nae_wreg(priv->base_addr, R_CORECONTROL, 0x02);
- /* Setup Interrupt mask reg */
- xlr_nae_wreg(priv->base_addr, R_INTMASK, (1 << O_INTMASK__TXILLEGAL) |
- (1 << O_INTMASK__MDINT) | (1 << O_INTMASK__TXFETCHERROR) |
- (1 << O_INTMASK__P2PSPILLECC) | (1 << O_INTMASK__TAGFULL) |
- (1 << O_INTMASK__UNDERRUN) | (1 << O_INTMASK__ABORT));
-
- /* Clear all stats */
- xlr_reg_update(priv->base_addr, R_STATCTRL, 0, 1 << O_STATCTRL__CLRCNT);
- xlr_reg_update(priv->base_addr, R_STATCTRL, 1 << 2, 1 << 2);
- return 0;
-}
-
-static int xlr_net_probe(struct platform_device *pdev)
-{
- struct xlr_net_priv *priv = NULL;
- struct net_device *ndev;
- struct resource *res;
- struct xlr_adapter *adapter;
- int err, port;
-
- pr_info("XLR/XLS Ethernet Driver controller %d\n", pdev->id);
- /*
- * Allocate our adapter data structure and attach it to the device.
- */
- adapter = devm_kzalloc(&pdev->dev, sizeof(*adapter), GFP_KERNEL);
- if (!adapter)
- return -ENOMEM;
-
- /*
- * XLR and XLS have 1 and 2 NAE controller respectively
- * Each controller has 4 gmac ports, mapping each controller
- * under one parent device, 4 gmac ports under one device.
- */
- for (port = 0; port < pdev->num_resources / 2; port++) {
- ndev = alloc_etherdev_mq(sizeof(struct xlr_net_priv), 32);
- if (!ndev) {
- dev_err(&pdev->dev,
- "Allocation of Ethernet device failed\n");
- return -ENOMEM;
- }
-
- priv = netdev_priv(ndev);
- priv->pdev = pdev;
- priv->ndev = ndev;
- priv->port_id = (pdev->id * 4) + port;
- priv->nd = (struct xlr_net_data *)pdev->dev.platform_data;
- priv->base_addr = devm_platform_ioremap_resource(pdev, port);
- if (IS_ERR(priv->base_addr)) {
- err = PTR_ERR(priv->base_addr);
- goto err_gmac;
- }
- priv->adapter = adapter;
- adapter->netdev[port] = ndev;
-
- res = platform_get_resource(pdev, IORESOURCE_IRQ, port);
- if (!res) {
- dev_err(&pdev->dev, "No irq resource for MAC %d\n",
- priv->port_id);
- err = -ENODEV;
- goto err_gmac;
- }
-
- ndev->irq = res->start;
-
- priv->phy_addr = priv->nd->phy_addr[port];
- priv->tx_stnid = priv->nd->tx_stnid[port];
- priv->mii_addr = priv->nd->mii_addr;
- priv->serdes_addr = priv->nd->serdes_addr;
- priv->pcs_addr = priv->nd->pcs_addr;
- priv->gpio_addr = priv->nd->gpio_addr;
-
- ndev->netdev_ops = &xlr_netdev_ops;
- ndev->watchdog_timeo = HZ;
-
- /* Setup Mac address and Rx mode */
- eth_hw_addr_random(ndev);
- xlr_hw_set_mac_addr(ndev);
- xlr_set_rx_mode(ndev);
-
- priv->num_rx_desc += MAX_NUM_DESC_SPILL;
- ndev->ethtool_ops = &xlr_ethtool_ops;
- SET_NETDEV_DEV(ndev, &pdev->dev);
-
- xlr_config_fifo_spill_area(priv);
- /* Configure PDE to Round-Robin pkt distribution */
- xlr_config_pde(priv);
- xlr_config_parser(priv);
-
- /* Call init with respect to port */
- if (strcmp(res->name, "gmac") == 0) {
- err = xlr_gmac_init(priv, pdev);
- if (err) {
- dev_err(&pdev->dev, "gmac%d init failed\n",
- priv->port_id);
- goto err_gmac;
- }
- }
-
- if (priv->port_id == 0 || priv->port_id == 4) {
- err = xlr_config_common(priv);
- if (err)
- goto err_netdev;
- }
-
- err = register_netdev(ndev);
- if (err) {
- dev_err(&pdev->dev,
- "Registering netdev failed for gmac%d\n",
- priv->port_id);
- goto err_netdev;
- }
- platform_set_drvdata(pdev, priv);
- }
-
- return 0;
-
-err_netdev:
- mdiobus_free(priv->mii_bus);
-err_gmac:
- free_netdev(ndev);
- return err;
-}
-
-static int xlr_net_remove(struct platform_device *pdev)
-{
- struct xlr_net_priv *priv = platform_get_drvdata(pdev);
-
- unregister_netdev(priv->ndev);
- mdiobus_unregister(priv->mii_bus);
- mdiobus_free(priv->mii_bus);
- free_netdev(priv->ndev);
- return 0;
-}
-
-static struct platform_driver xlr_net_driver = {
- .probe = xlr_net_probe,
- .remove = xlr_net_remove,
- .driver = {
- .name = "xlr-net",
- },
-};
-
-module_platform_driver(xlr_net_driver);
-
-MODULE_AUTHOR("Ganesan Ramalingam <ganesanr@broadcom.com>");
-MODULE_DESCRIPTION("Ethernet driver for Netlogic XLR/XLS");
-MODULE_LICENSE("Dual BSD/GPL");
-MODULE_ALIAS("platform:xlr-net");
diff --git a/drivers/staging/netlogic/xlr_net.h b/drivers/staging/netlogic/xlr_net.h
deleted file mode 100644
index 8365b744f9b3..000000000000
--- a/drivers/staging/netlogic/xlr_net.h
+++ /dev/null
@@ -1,1079 +0,0 @@
-/* SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause) */
-/*
- * Copyright (c) 2003-2012 Broadcom Corporation
- * All Rights Reserved
- */
-
-/* #define MAC_SPLIT_MODE */
-
-#define MAC_SPACING 0x400
-#define XGMAC_SPACING 0x400
-
-/* PE-MCXMAC register and bit field definitions */
-#define R_MAC_CONFIG_1 0x00
-#define O_MAC_CONFIG_1__srst 31
-#define O_MAC_CONFIG_1__simr 30
-#define O_MAC_CONFIG_1__hrrmc 18
-#define W_MAC_CONFIG_1__hrtmc 2
-#define O_MAC_CONFIG_1__hrrfn 16
-#define W_MAC_CONFIG_1__hrtfn 2
-#define O_MAC_CONFIG_1__intlb 8
-#define O_MAC_CONFIG_1__rxfc 5
-#define O_MAC_CONFIG_1__txfc 4
-#define O_MAC_CONFIG_1__srxen 3
-#define O_MAC_CONFIG_1__rxen 2
-#define O_MAC_CONFIG_1__stxen 1
-#define O_MAC_CONFIG_1__txen 0
-#define R_MAC_CONFIG_2 0x01
-#define O_MAC_CONFIG_2__prlen 12
-#define W_MAC_CONFIG_2__prlen 4
-#define O_MAC_CONFIG_2__speed 8
-#define W_MAC_CONFIG_2__speed 2
-#define O_MAC_CONFIG_2__hugen 5
-#define O_MAC_CONFIG_2__flchk 4
-#define O_MAC_CONFIG_2__crce 1
-#define O_MAC_CONFIG_2__fulld 0
-#define R_IPG_IFG 0x02
-#define O_IPG_IFG__ipgr1 24
-#define W_IPG_IFG__ipgr1 7
-#define O_IPG_IFG__ipgr2 16
-#define W_IPG_IFG__ipgr2 7
-#define O_IPG_IFG__mifg 8
-#define W_IPG_IFG__mifg 8
-#define O_IPG_IFG__ipgt 0
-#define W_IPG_IFG__ipgt 7
-#define R_HALF_DUPLEX 0x03
-#define O_HALF_DUPLEX__abebt 24
-#define W_HALF_DUPLEX__abebt 4
-#define O_HALF_DUPLEX__abebe 19
-#define O_HALF_DUPLEX__bpnb 18
-#define O_HALF_DUPLEX__nobo 17
-#define O_HALF_DUPLEX__edxsdfr 16
-#define O_HALF_DUPLEX__retry 12
-#define W_HALF_DUPLEX__retry 4
-#define O_HALF_DUPLEX__lcol 0
-#define W_HALF_DUPLEX__lcol 10
-#define R_MAXIMUM_FRAME_LENGTH 0x04
-#define O_MAXIMUM_FRAME_LENGTH__maxf 0
-#define W_MAXIMUM_FRAME_LENGTH__maxf 16
-#define R_TEST 0x07
-#define O_TEST__mbof 3
-#define O_TEST__rthdf 2
-#define O_TEST__tpause 1
-#define O_TEST__sstct 0
-#define R_MII_MGMT_CONFIG 0x08
-#define O_MII_MGMT_CONFIG__scinc 5
-#define O_MII_MGMT_CONFIG__spre 4
-#define O_MII_MGMT_CONFIG__clks 3
-#define W_MII_MGMT_CONFIG__clks 3
-#define R_MII_MGMT_COMMAND 0x09
-#define O_MII_MGMT_COMMAND__scan 1
-#define O_MII_MGMT_COMMAND__rstat 0
-#define R_MII_MGMT_ADDRESS 0x0A
-#define O_MII_MGMT_ADDRESS__fiad 8
-#define W_MII_MGMT_ADDRESS__fiad 5
-#define O_MII_MGMT_ADDRESS__fgad 5
-#define W_MII_MGMT_ADDRESS__fgad 0
-#define R_MII_MGMT_WRITE_DATA 0x0B
-#define O_MII_MGMT_WRITE_DATA__ctld 0
-#define W_MII_MGMT_WRITE_DATA__ctld 16
-#define R_MII_MGMT_STATUS 0x0C
-#define R_MII_MGMT_INDICATORS 0x0D
-#define O_MII_MGMT_INDICATORS__nvalid 2
-#define O_MII_MGMT_INDICATORS__scan 1
-#define O_MII_MGMT_INDICATORS__busy 0
-#define R_INTERFACE_CONTROL 0x0E
-#define O_INTERFACE_CONTROL__hrstint 31
-#define O_INTERFACE_CONTROL__tbimode 27
-#define O_INTERFACE_CONTROL__ghdmode 26
-#define O_INTERFACE_CONTROL__lhdmode 25
-#define O_INTERFACE_CONTROL__phymod 24
-#define O_INTERFACE_CONTROL__hrrmi 23
-#define O_INTERFACE_CONTROL__rspd 16
-#define O_INTERFACE_CONTROL__hr100 15
-#define O_INTERFACE_CONTROL__frcq 10
-#define O_INTERFACE_CONTROL__nocfr 9
-#define O_INTERFACE_CONTROL__dlfct 8
-#define O_INTERFACE_CONTROL__enjab 0
-#define R_INTERFACE_STATUS 0x0F
-#define O_INTERFACE_STATUS__xsdfr 9
-#define O_INTERFACE_STATUS__ssrr 8
-#define W_INTERFACE_STATUS__ssrr 5
-#define O_INTERFACE_STATUS__miilf 3
-#define O_INTERFACE_STATUS__locar 2
-#define O_INTERFACE_STATUS__sqerr 1
-#define O_INTERFACE_STATUS__jabber 0
-#define R_STATION_ADDRESS_LS 0x10
-#define R_STATION_ADDRESS_MS 0x11
-
-/* A-XGMAC register and bit field definitions */
-#define R_XGMAC_CONFIG_0 0x00
-#define O_XGMAC_CONFIG_0__hstmacrst 31
-#define O_XGMAC_CONFIG_0__hstrstrctl 23
-#define O_XGMAC_CONFIG_0__hstrstrfn 22
-#define O_XGMAC_CONFIG_0__hstrsttctl 18
-#define O_XGMAC_CONFIG_0__hstrsttfn 17
-#define O_XGMAC_CONFIG_0__hstrstmiim 16
-#define O_XGMAC_CONFIG_0__hstloopback 8
-#define R_XGMAC_CONFIG_1 0x01
-#define O_XGMAC_CONFIG_1__hsttctlen 31
-#define O_XGMAC_CONFIG_1__hsttfen 30
-#define O_XGMAC_CONFIG_1__hstrctlen 29
-#define O_XGMAC_CONFIG_1__hstrfen 28
-#define O_XGMAC_CONFIG_1__tfen 26
-#define O_XGMAC_CONFIG_1__rfen 24
-#define O_XGMAC_CONFIG_1__hstrctlshrtp 12
-#define O_XGMAC_CONFIG_1__hstdlyfcstx 10
-#define W_XGMAC_CONFIG_1__hstdlyfcstx 2
-#define O_XGMAC_CONFIG_1__hstdlyfcsrx 8
-#define W_XGMAC_CONFIG_1__hstdlyfcsrx 2
-#define O_XGMAC_CONFIG_1__hstppen 7
-#define O_XGMAC_CONFIG_1__hstbytswp 6
-#define O_XGMAC_CONFIG_1__hstdrplt64 5
-#define O_XGMAC_CONFIG_1__hstprmscrx 4
-#define O_XGMAC_CONFIG_1__hstlenchk 3
-#define O_XGMAC_CONFIG_1__hstgenfcs 2
-#define O_XGMAC_CONFIG_1__hstpadmode 0
-#define W_XGMAC_CONFIG_1__hstpadmode 2
-#define R_XGMAC_CONFIG_2 0x02
-#define O_XGMAC_CONFIG_2__hsttctlfrcp 31
-#define O_XGMAC_CONFIG_2__hstmlnkflth 27
-#define O_XGMAC_CONFIG_2__hstalnkflth 26
-#define O_XGMAC_CONFIG_2__rflnkflt 24
-#define W_XGMAC_CONFIG_2__rflnkflt 2
-#define O_XGMAC_CONFIG_2__hstipgextmod 16
-#define W_XGMAC_CONFIG_2__hstipgextmod 5
-#define O_XGMAC_CONFIG_2__hstrctlfrcp 15
-#define O_XGMAC_CONFIG_2__hstipgexten 5
-#define O_XGMAC_CONFIG_2__hstmipgext 0
-#define W_XGMAC_CONFIG_2__hstmipgext 5
-#define R_XGMAC_CONFIG_3 0x03
-#define O_XGMAC_CONFIG_3__hstfltrfrm 31
-#define W_XGMAC_CONFIG_3__hstfltrfrm 16
-#define O_XGMAC_CONFIG_3__hstfltrfrmdc 15
-#define W_XGMAC_CONFIG_3__hstfltrfrmdc 16
-#define R_XGMAC_STATION_ADDRESS_LS 0x04
-#define O_XGMAC_STATION_ADDRESS_LS__hstmacadr0 0
-#define W_XGMAC_STATION_ADDRESS_LS__hstmacadr0 32
-#define R_XGMAC_STATION_ADDRESS_MS 0x05
-#define R_XGMAC_MAX_FRAME_LEN 0x08
-#define O_XGMAC_MAX_FRAME_LEN__hstmxfrmwctx 16
-#define W_XGMAC_MAX_FRAME_LEN__hstmxfrmwctx 14
-#define O_XGMAC_MAX_FRAME_LEN__hstmxfrmbcrx 0
-#define W_XGMAC_MAX_FRAME_LEN__hstmxfrmbcrx 16
-#define R_XGMAC_REV_LEVEL 0x0B
-#define O_XGMAC_REV_LEVEL__revlvl 0
-#define W_XGMAC_REV_LEVEL__revlvl 15
-#define R_XGMAC_MIIM_COMMAND 0x10
-#define O_XGMAC_MIIM_COMMAND__hstldcmd 3
-#define O_XGMAC_MIIM_COMMAND__hstmiimcmd 0
-#define W_XGMAC_MIIM_COMMAND__hstmiimcmd 3
-#define R_XGMAC_MIIM_FILED 0x11
-#define O_XGMAC_MIIM_FILED__hststfield 30
-#define W_XGMAC_MIIM_FILED__hststfield 2
-#define O_XGMAC_MIIM_FILED__hstopfield 28
-#define W_XGMAC_MIIM_FILED__hstopfield 2
-#define O_XGMAC_MIIM_FILED__hstphyadx 23
-#define W_XGMAC_MIIM_FILED__hstphyadx 5
-#define O_XGMAC_MIIM_FILED__hstregadx 18
-#define W_XGMAC_MIIM_FILED__hstregadx 5
-#define O_XGMAC_MIIM_FILED__hsttafield 16
-#define W_XGMAC_MIIM_FILED__hsttafield 2
-#define O_XGMAC_MIIM_FILED__miimrddat 0
-#define W_XGMAC_MIIM_FILED__miimrddat 16
-#define R_XGMAC_MIIM_CONFIG 0x12
-#define O_XGMAC_MIIM_CONFIG__hstnopram 7
-#define O_XGMAC_MIIM_CONFIG__hstclkdiv 0
-#define W_XGMAC_MIIM_CONFIG__hstclkdiv 7
-#define R_XGMAC_MIIM_LINK_FAIL_VECTOR 0x13
-#define O_XGMAC_MIIM_LINK_FAIL_VECTOR__miimlfvec 0
-#define W_XGMAC_MIIM_LINK_FAIL_VECTOR__miimlfvec 32
-#define R_XGMAC_MIIM_INDICATOR 0x14
-#define O_XGMAC_MIIM_INDICATOR__miimphylf 4
-#define O_XGMAC_MIIM_INDICATOR__miimmoncplt 3
-#define O_XGMAC_MIIM_INDICATOR__miimmonvld 2
-#define O_XGMAC_MIIM_INDICATOR__miimmon 1
-#define O_XGMAC_MIIM_INDICATOR__miimbusy 0
-
-/* GMAC stats registers */
-#define R_RBYT 0x27
-#define R_RPKT 0x28
-#define R_RFCS 0x29
-#define R_RMCA 0x2A
-#define R_RBCA 0x2B
-#define R_RXCF 0x2C
-#define R_RXPF 0x2D
-#define R_RXUO 0x2E
-#define R_RALN 0x2F
-#define R_RFLR 0x30
-#define R_RCDE 0x31
-#define R_RCSE 0x32
-#define R_RUND 0x33
-#define R_ROVR 0x34
-#define R_TBYT 0x38
-#define R_TPKT 0x39
-#define R_TMCA 0x3A
-#define R_TBCA 0x3B
-#define R_TXPF 0x3C
-#define R_TDFR 0x3D
-#define R_TEDF 0x3E
-#define R_TSCL 0x3F
-#define R_TMCL 0x40
-#define R_TLCL 0x41
-#define R_TXCL 0x42
-#define R_TNCL 0x43
-#define R_TJBR 0x46
-#define R_TFCS 0x47
-#define R_TXCF 0x48
-#define R_TOVR 0x49
-#define R_TUND 0x4A
-#define R_TFRG 0x4B
-
-/* Glue logic register and bit field definitions */
-#define R_MAC_ADDR0 0x50
-#define R_MAC_ADDR1 0x52
-#define R_MAC_ADDR2 0x54
-#define R_MAC_ADDR3 0x56
-#define R_MAC_ADDR_MASK2 0x58
-#define R_MAC_ADDR_MASK3 0x5A
-#define R_MAC_FILTER_CONFIG 0x5C
-#define O_MAC_FILTER_CONFIG__BROADCAST_EN 10
-#define O_MAC_FILTER_CONFIG__PAUSE_FRAME_EN 9
-#define O_MAC_FILTER_CONFIG__ALL_MCAST_EN 8
-#define O_MAC_FILTER_CONFIG__ALL_UCAST_EN 7
-#define O_MAC_FILTER_CONFIG__HASH_MCAST_EN 6
-#define O_MAC_FILTER_CONFIG__HASH_UCAST_EN 5
-#define O_MAC_FILTER_CONFIG__ADDR_MATCH_DISC 4
-#define O_MAC_FILTER_CONFIG__MAC_ADDR3_VALID 3
-#define O_MAC_FILTER_CONFIG__MAC_ADDR2_VALID 2
-#define O_MAC_FILTER_CONFIG__MAC_ADDR1_VALID 1
-#define O_MAC_FILTER_CONFIG__MAC_ADDR0_VALID 0
-#define R_HASH_TABLE_VECTOR 0x30
-#define R_TX_CONTROL 0x0A0
-#define O_TX_CONTROL__TX15HALT 31
-#define O_TX_CONTROL__TX14HALT 30
-#define O_TX_CONTROL__TX13HALT 29
-#define O_TX_CONTROL__TX12HALT 28
-#define O_TX_CONTROL__TX11HALT 27
-#define O_TX_CONTROL__TX10HALT 26
-#define O_TX_CONTROL__TX9HALT 25
-#define O_TX_CONTROL__TX8HALT 24
-#define O_TX_CONTROL__TX7HALT 23
-#define O_TX_CONTROL__TX6HALT 22
-#define O_TX_CONTROL__TX5HALT 21
-#define O_TX_CONTROL__TX4HALT 20
-#define O_TX_CONTROL__TX3HALT 19
-#define O_TX_CONTROL__TX2HALT 18
-#define O_TX_CONTROL__TX1HALT 17
-#define O_TX_CONTROL__TX0HALT 16
-#define O_TX_CONTROL__TXIDLE 15
-#define O_TX_CONTROL__TXENABLE 14
-#define O_TX_CONTROL__TXTHRESHOLD 0
-#define W_TX_CONTROL__TXTHRESHOLD 14
-#define R_RX_CONTROL 0x0A1
-#define O_RX_CONTROL__RGMII 10
-#define O_RX_CONTROL__SOFTRESET 2
-#define O_RX_CONTROL__RXHALT 1
-#define O_RX_CONTROL__RXENABLE 0
-#define R_DESC_PACK_CTRL 0x0A2
-#define O_DESC_PACK_CTRL__BYTEOFFSET 17
-#define W_DESC_PACK_CTRL__BYTEOFFSET 3
-#define O_DESC_PACK_CTRL__PREPADENABLE 16
-#define O_DESC_PACK_CTRL__MAXENTRY 14
-#define W_DESC_PACK_CTRL__MAXENTRY 2
-#define O_DESC_PACK_CTRL__REGULARSIZE 0
-#define W_DESC_PACK_CTRL__REGULARSIZE 14
-#define R_STATCTRL 0x0A3
-#define O_STATCTRL__OVERFLOWEN 4
-#define O_STATCTRL__GIG 3
-#define O_STATCTRL__STEN 2
-#define O_STATCTRL__CLRCNT 1
-#define O_STATCTRL__AUTOZ 0
-#define R_L2ALLOCCTRL 0x0A4
-#define O_L2ALLOCCTRL__TXL2ALLOCATE 9
-#define W_L2ALLOCCTRL__TXL2ALLOCATE 9
-#define O_L2ALLOCCTRL__RXL2ALLOCATE 0
-#define W_L2ALLOCCTRL__RXL2ALLOCATE 9
-#define R_INTMASK 0x0A5
-#define O_INTMASK__SPI4TXERROR 28
-#define O_INTMASK__SPI4RXERROR 27
-#define O_INTMASK__RGMIIHALFDUPCOLLISION 27
-#define O_INTMASK__ABORT 26
-#define O_INTMASK__UNDERRUN 25
-#define O_INTMASK__DISCARDPACKET 24
-#define O_INTMASK__ASYNCFIFOFULL 23
-#define O_INTMASK__TAGFULL 22
-#define O_INTMASK__CLASS3FULL 21
-#define O_INTMASK__C3EARLYFULL 20
-#define O_INTMASK__CLASS2FULL 19
-#define O_INTMASK__C2EARLYFULL 18
-#define O_INTMASK__CLASS1FULL 17
-#define O_INTMASK__C1EARLYFULL 16
-#define O_INTMASK__CLASS0FULL 15
-#define O_INTMASK__C0EARLYFULL 14
-#define O_INTMASK__RXDATAFULL 13
-#define O_INTMASK__RXEARLYFULL 12
-#define O_INTMASK__RFREEEMPTY 9
-#define O_INTMASK__RFEARLYEMPTY 8
-#define O_INTMASK__P2PSPILLECC 7
-#define O_INTMASK__FREEDESCFULL 5
-#define O_INTMASK__FREEEARLYFULL 4
-#define O_INTMASK__TXFETCHERROR 3
-#define O_INTMASK__STATCARRY 2
-#define O_INTMASK__MDINT 1
-#define O_INTMASK__TXILLEGAL 0
-#define R_INTREG 0x0A6
-#define O_INTREG__SPI4TXERROR 28
-#define O_INTREG__SPI4RXERROR 27
-#define O_INTREG__RGMIIHALFDUPCOLLISION 27
-#define O_INTREG__ABORT 26
-#define O_INTREG__UNDERRUN 25
-#define O_INTREG__DISCARDPACKET 24
-#define O_INTREG__ASYNCFIFOFULL 23
-#define O_INTREG__TAGFULL 22
-#define O_INTREG__CLASS3FULL 21
-#define O_INTREG__C3EARLYFULL 20
-#define O_INTREG__CLASS2FULL 19
-#define O_INTREG__C2EARLYFULL 18
-#define O_INTREG__CLASS1FULL 17
-#define O_INTREG__C1EARLYFULL 16
-#define O_INTREG__CLASS0FULL 15
-#define O_INTREG__C0EARLYFULL 14
-#define O_INTREG__RXDATAFULL 13
-#define O_INTREG__RXEARLYFULL 12
-#define O_INTREG__RFREEEMPTY 9
-#define O_INTREG__RFEARLYEMPTY 8
-#define O_INTREG__P2PSPILLECC 7
-#define O_INTREG__FREEDESCFULL 5
-#define O_INTREG__FREEEARLYFULL 4
-#define O_INTREG__TXFETCHERROR 3
-#define O_INTREG__STATCARRY 2
-#define O_INTREG__MDINT 1
-#define O_INTREG__TXILLEGAL 0
-#define R_TXRETRY 0x0A7
-#define O_TXRETRY__COLLISIONRETRY 6
-#define O_TXRETRY__BUSERRORRETRY 5
-#define O_TXRETRY__UNDERRUNRETRY 4
-#define O_TXRETRY__RETRIES 0
-#define W_TXRETRY__RETRIES 4
-#define R_CORECONTROL 0x0A8
-#define O_CORECONTROL__ERRORTHREAD 4
-#define W_CORECONTROL__ERRORTHREAD 7
-#define O_CORECONTROL__SHUTDOWN 2
-#define O_CORECONTROL__SPEED 0
-#define W_CORECONTROL__SPEED 2
-#define R_BYTEOFFSET0 0x0A9
-#define R_BYTEOFFSET1 0x0AA
-#define R_L2TYPE_0 0x0F0
-#define O_L2TYPE__EXTRAHDRPROTOSIZE 26
-#define W_L2TYPE__EXTRAHDRPROTOSIZE 5
-#define O_L2TYPE__EXTRAHDRPROTOOFFSET 20
-#define W_L2TYPE__EXTRAHDRPROTOOFFSET 6
-#define O_L2TYPE__EXTRAHEADERSIZE 14
-#define W_L2TYPE__EXTRAHEADERSIZE 6
-#define O_L2TYPE__PROTOOFFSET 8
-#define W_L2TYPE__PROTOOFFSET 6
-#define O_L2TYPE__L2HDROFFSET 2
-#define W_L2TYPE__L2HDROFFSET 6
-#define O_L2TYPE__L2PROTO 0
-#define W_L2TYPE__L2PROTO 2
-#define R_L2TYPE_1 0xF0
-#define R_L2TYPE_2 0xF0
-#define R_L2TYPE_3 0xF0
-#define R_PARSERCONFIGREG 0x100
-#define O_PARSERCONFIGREG__CRCHASHPOLY 8
-#define W_PARSERCONFIGREG__CRCHASHPOLY 7
-#define O_PARSERCONFIGREG__PREPADOFFSET 4
-#define W_PARSERCONFIGREG__PREPADOFFSET 4
-#define O_PARSERCONFIGREG__USECAM 2
-#define O_PARSERCONFIGREG__USEHASH 1
-#define O_PARSERCONFIGREG__USEPROTO 0
-#define R_L3CTABLE 0x140
-#define O_L3CTABLE__OFFSET0 25
-#define W_L3CTABLE__OFFSET0 7
-#define O_L3CTABLE__LEN0 21
-#define W_L3CTABLE__LEN0 4
-#define O_L3CTABLE__OFFSET1 14
-#define W_L3CTABLE__OFFSET1 7
-#define O_L3CTABLE__LEN1 10
-#define W_L3CTABLE__LEN1 4
-#define O_L3CTABLE__OFFSET2 4
-#define W_L3CTABLE__OFFSET2 6
-#define O_L3CTABLE__LEN2 0
-#define W_L3CTABLE__LEN2 4
-#define O_L3CTABLE__L3HDROFFSET 26
-#define W_L3CTABLE__L3HDROFFSET 6
-#define O_L3CTABLE__L4PROTOOFFSET 20
-#define W_L3CTABLE__L4PROTOOFFSET 6
-#define O_L3CTABLE__IPCHKSUMCOMPUTE 19
-#define O_L3CTABLE__L4CLASSIFY 18
-#define O_L3CTABLE__L2PROTO 16
-#define W_L3CTABLE__L2PROTO 2
-#define O_L3CTABLE__L3PROTOKEY 0
-#define W_L3CTABLE__L3PROTOKEY 16
-#define R_L4CTABLE 0x160
-#define O_L4CTABLE__OFFSET0 21
-#define W_L4CTABLE__OFFSET0 6
-#define O_L4CTABLE__LEN0 17
-#define W_L4CTABLE__LEN0 4
-#define O_L4CTABLE__OFFSET1 11
-#define W_L4CTABLE__OFFSET1 6
-#define O_L4CTABLE__LEN1 7
-#define W_L4CTABLE__LEN1 4
-#define O_L4CTABLE__TCPCHKSUMENABLE 0
-#define R_CAM4X128TABLE 0x172
-#define O_CAM4X128TABLE__CLASSID 7
-#define W_CAM4X128TABLE__CLASSID 2
-#define O_CAM4X128TABLE__BUCKETID 1
-#define W_CAM4X128TABLE__BUCKETID 6
-#define O_CAM4X128TABLE__USEBUCKET 0
-#define R_CAM4X128KEY 0x180
-#define R_TRANSLATETABLE 0x1A0
-#define R_DMACR0 0x200
-#define O_DMACR0__DATA0WRMAXCR 27
-#define W_DMACR0__DATA0WRMAXCR 3
-#define O_DMACR0__DATA0RDMAXCR 24
-#define W_DMACR0__DATA0RDMAXCR 3
-#define O_DMACR0__DATA1WRMAXCR 21
-#define W_DMACR0__DATA1WRMAXCR 3
-#define O_DMACR0__DATA1RDMAXCR 18
-#define W_DMACR0__DATA1RDMAXCR 3
-#define O_DMACR0__DATA2WRMAXCR 15
-#define W_DMACR0__DATA2WRMAXCR 3
-#define O_DMACR0__DATA2RDMAXCR 12
-#define W_DMACR0__DATA2RDMAXCR 3
-#define O_DMACR0__DATA3WRMAXCR 9
-#define W_DMACR0__DATA3WRMAXCR 3
-#define O_DMACR0__DATA3RDMAXCR 6
-#define W_DMACR0__DATA3RDMAXCR 3
-#define O_DMACR0__DATA4WRMAXCR 3
-#define W_DMACR0__DATA4WRMAXCR 3
-#define O_DMACR0__DATA4RDMAXCR 0
-#define W_DMACR0__DATA4RDMAXCR 3
-#define R_DMACR1 0x201
-#define O_DMACR1__DATA5WRMAXCR 27
-#define W_DMACR1__DATA5WRMAXCR 3
-#define O_DMACR1__DATA5RDMAXCR 24
-#define W_DMACR1__DATA5RDMAXCR 3
-#define O_DMACR1__DATA6WRMAXCR 21
-#define W_DMACR1__DATA6WRMAXCR 3
-#define O_DMACR1__DATA6RDMAXCR 18
-#define W_DMACR1__DATA6RDMAXCR 3
-#define O_DMACR1__DATA7WRMAXCR 15
-#define W_DMACR1__DATA7WRMAXCR 3
-#define O_DMACR1__DATA7RDMAXCR 12
-#define W_DMACR1__DATA7RDMAXCR 3
-#define O_DMACR1__DATA8WRMAXCR 9
-#define W_DMACR1__DATA8WRMAXCR 3
-#define O_DMACR1__DATA8RDMAXCR 6
-#define W_DMACR1__DATA8RDMAXCR 3
-#define O_DMACR1__DATA9WRMAXCR 3
-#define W_DMACR1__DATA9WRMAXCR 3
-#define O_DMACR1__DATA9RDMAXCR 0
-#define W_DMACR1__DATA9RDMAXCR 3
-#define R_DMACR2 0x202
-#define O_DMACR2__DATA10WRMAXCR 27
-#define W_DMACR2__DATA10WRMAXCR 3
-#define O_DMACR2__DATA10RDMAXCR 24
-#define W_DMACR2__DATA10RDMAXCR 3
-#define O_DMACR2__DATA11WRMAXCR 21
-#define W_DMACR2__DATA11WRMAXCR 3
-#define O_DMACR2__DATA11RDMAXCR 18
-#define W_DMACR2__DATA11RDMAXCR 3
-#define O_DMACR2__DATA12WRMAXCR 15
-#define W_DMACR2__DATA12WRMAXCR 3
-#define O_DMACR2__DATA12RDMAXCR 12
-#define W_DMACR2__DATA12RDMAXCR 3
-#define O_DMACR2__DATA13WRMAXCR 9
-#define W_DMACR2__DATA13WRMAXCR 3
-#define O_DMACR2__DATA13RDMAXCR 6
-#define W_DMACR2__DATA13RDMAXCR 3
-#define O_DMACR2__DATA14WRMAXCR 3
-#define W_DMACR2__DATA14WRMAXCR 3
-#define O_DMACR2__DATA14RDMAXCR 0
-#define W_DMACR2__DATA14RDMAXCR 3
-#define R_DMACR3 0x203
-#define O_DMACR3__DATA15WRMAXCR 27
-#define W_DMACR3__DATA15WRMAXCR 3
-#define O_DMACR3__DATA15RDMAXCR 24
-#define W_DMACR3__DATA15RDMAXCR 3
-#define O_DMACR3__SPCLASSWRMAXCR 21
-#define W_DMACR3__SPCLASSWRMAXCR 3
-#define O_DMACR3__SPCLASSRDMAXCR 18
-#define W_DMACR3__SPCLASSRDMAXCR 3
-#define O_DMACR3__JUMFRINWRMAXCR 15
-#define W_DMACR3__JUMFRINWRMAXCR 3
-#define O_DMACR3__JUMFRINRDMAXCR 12
-#define W_DMACR3__JUMFRINRDMAXCR 3
-#define O_DMACR3__REGFRINWRMAXCR 9
-#define W_DMACR3__REGFRINWRMAXCR 3
-#define O_DMACR3__REGFRINRDMAXCR 6
-#define W_DMACR3__REGFRINRDMAXCR 3
-#define O_DMACR3__FROUTWRMAXCR 3
-#define W_DMACR3__FROUTWRMAXCR 3
-#define O_DMACR3__FROUTRDMAXCR 0
-#define W_DMACR3__FROUTRDMAXCR 3
-#define R_REG_FRIN_SPILL_MEM_START_0 0x204
-#define O_REG_FRIN_SPILL_MEM_START_0__REGFRINSPILLMEMSTART0 0
-#define W_REG_FRIN_SPILL_MEM_START_0__REGFRINSPILLMEMSTART0 32
-#define R_REG_FRIN_SPILL_MEM_START_1 0x205
-#define O_REG_FRIN_SPILL_MEM_START_1__REGFRINSPILLMEMSTART1 0
-#define W_REG_FRIN_SPILL_MEM_START_1__REGFRINSPILLMEMSTART1 3
-#define R_REG_FRIN_SPILL_MEM_SIZE 0x206
-#define O_REG_FRIN_SPILL_MEM_SIZE__REGFRINSPILLMEMSIZE 0
-#define W_REG_FRIN_SPILL_MEM_SIZE__REGFRINSPILLMEMSIZE 32
-#define R_FROUT_SPILL_MEM_START_0 0x207
-#define O_FROUT_SPILL_MEM_START_0__FROUTSPILLMEMSTART0 0
-#define W_FROUT_SPILL_MEM_START_0__FROUTSPILLMEMSTART0 32
-#define R_FROUT_SPILL_MEM_START_1 0x208
-#define O_FROUT_SPILL_MEM_START_1__FROUTSPILLMEMSTART1 0
-#define W_FROUT_SPILL_MEM_START_1__FROUTSPILLMEMSTART1 3
-#define R_FROUT_SPILL_MEM_SIZE 0x209
-#define O_FROUT_SPILL_MEM_SIZE__FROUTSPILLMEMSIZE 0
-#define W_FROUT_SPILL_MEM_SIZE__FROUTSPILLMEMSIZE 32
-#define R_CLASS0_SPILL_MEM_START_0 0x20A
-#define O_CLASS0_SPILL_MEM_START_0__CLASS0SPILLMEMSTART0 0
-#define W_CLASS0_SPILL_MEM_START_0__CLASS0SPILLMEMSTART0 32
-#define R_CLASS0_SPILL_MEM_START_1 0x20B
-#define O_CLASS0_SPILL_MEM_START_1__CLASS0SPILLMEMSTART1 0
-#define W_CLASS0_SPILL_MEM_START_1__CLASS0SPILLMEMSTART1 3
-#define R_CLASS0_SPILL_MEM_SIZE 0x20C
-#define O_CLASS0_SPILL_MEM_SIZE__CLASS0SPILLMEMSIZE 0
-#define W_CLASS0_SPILL_MEM_SIZE__CLASS0SPILLMEMSIZE 32
-#define R_JUMFRIN_SPILL_MEM_START_0 0x20D
-#define O_JUMFRIN_SPILL_MEM_START_0__JUMFRINSPILLMEMSTART0 0
-#define W_JUMFRIN_SPILL_MEM_START_0__JUMFRINSPILLMEMSTART0 32
-#define R_JUMFRIN_SPILL_MEM_START_1 0x20E
-#define O_JUMFRIN_SPILL_MEM_START_1__JUMFRINSPILLMEMSTART1 0
-#define W_JUMFRIN_SPILL_MEM_START_1__JUMFRINSPILLMEMSTART1 3
-#define R_JUMFRIN_SPILL_MEM_SIZE 0x20F
-#define O_JUMFRIN_SPILL_MEM_SIZE__JUMFRINSPILLMEMSIZE 0
-#define W_JUMFRIN_SPILL_MEM_SIZE__JUMFRINSPILLMEMSIZE 32
-#define R_CLASS1_SPILL_MEM_START_0 0x210
-#define O_CLASS1_SPILL_MEM_START_0__CLASS1SPILLMEMSTART0 0
-#define W_CLASS1_SPILL_MEM_START_0__CLASS1SPILLMEMSTART0 32
-#define R_CLASS1_SPILL_MEM_START_1 0x211
-#define O_CLASS1_SPILL_MEM_START_1__CLASS1SPILLMEMSTART1 0
-#define W_CLASS1_SPILL_MEM_START_1__CLASS1SPILLMEMSTART1 3
-#define R_CLASS1_SPILL_MEM_SIZE 0x212
-#define O_CLASS1_SPILL_MEM_SIZE__CLASS1SPILLMEMSIZE 0
-#define W_CLASS1_SPILL_MEM_SIZE__CLASS1SPILLMEMSIZE 32
-#define R_CLASS2_SPILL_MEM_START_0 0x213
-#define O_CLASS2_SPILL_MEM_START_0__CLASS2SPILLMEMSTART0 0
-#define W_CLASS2_SPILL_MEM_START_0__CLASS2SPILLMEMSTART0 32
-#define R_CLASS2_SPILL_MEM_START_1 0x214
-#define O_CLASS2_SPILL_MEM_START_1__CLASS2SPILLMEMSTART1 0
-#define W_CLASS2_SPILL_MEM_START_1__CLASS2SPILLMEMSTART1 3
-#define R_CLASS2_SPILL_MEM_SIZE 0x215
-#define O_CLASS2_SPILL_MEM_SIZE__CLASS2SPILLMEMSIZE 0
-#define W_CLASS2_SPILL_MEM_SIZE__CLASS2SPILLMEMSIZE 32
-#define R_CLASS3_SPILL_MEM_START_0 0x216
-#define O_CLASS3_SPILL_MEM_START_0__CLASS3SPILLMEMSTART0 0
-#define W_CLASS3_SPILL_MEM_START_0__CLASS3SPILLMEMSTART0 32
-#define R_CLASS3_SPILL_MEM_START_1 0x217
-#define O_CLASS3_SPILL_MEM_START_1__CLASS3SPILLMEMSTART1 0
-#define W_CLASS3_SPILL_MEM_START_1__CLASS3SPILLMEMSTART1 3
-#define R_CLASS3_SPILL_MEM_SIZE 0x218
-#define O_CLASS3_SPILL_MEM_SIZE__CLASS3SPILLMEMSIZE 0
-#define W_CLASS3_SPILL_MEM_SIZE__CLASS3SPILLMEMSIZE 32
-#define R_REG_FRIN1_SPILL_MEM_START_0 0x219
-#define R_REG_FRIN1_SPILL_MEM_START_1 0x21a
-#define R_REG_FRIN1_SPILL_MEM_SIZE 0x21b
-#define R_SPIHNGY0 0x219
-#define O_SPIHNGY0__EG_HNGY_THRESH_0 24
-#define W_SPIHNGY0__EG_HNGY_THRESH_0 7
-#define O_SPIHNGY0__EG_HNGY_THRESH_1 16
-#define W_SPIHNGY0__EG_HNGY_THRESH_1 7
-#define O_SPIHNGY0__EG_HNGY_THRESH_2 8
-#define W_SPIHNGY0__EG_HNGY_THRESH_2 7
-#define O_SPIHNGY0__EG_HNGY_THRESH_3 0
-#define W_SPIHNGY0__EG_HNGY_THRESH_3 7
-#define R_SPIHNGY1 0x21A
-#define O_SPIHNGY1__EG_HNGY_THRESH_4 24
-#define W_SPIHNGY1__EG_HNGY_THRESH_4 7
-#define O_SPIHNGY1__EG_HNGY_THRESH_5 16
-#define W_SPIHNGY1__EG_HNGY_THRESH_5 7
-#define O_SPIHNGY1__EG_HNGY_THRESH_6 8
-#define W_SPIHNGY1__EG_HNGY_THRESH_6 7
-#define O_SPIHNGY1__EG_HNGY_THRESH_7 0
-#define W_SPIHNGY1__EG_HNGY_THRESH_7 7
-#define R_SPIHNGY2 0x21B
-#define O_SPIHNGY2__EG_HNGY_THRESH_8 24
-#define W_SPIHNGY2__EG_HNGY_THRESH_8 7
-#define O_SPIHNGY2__EG_HNGY_THRESH_9 16
-#define W_SPIHNGY2__EG_HNGY_THRESH_9 7
-#define O_SPIHNGY2__EG_HNGY_THRESH_10 8
-#define W_SPIHNGY2__EG_HNGY_THRESH_10 7
-#define O_SPIHNGY2__EG_HNGY_THRESH_11 0
-#define W_SPIHNGY2__EG_HNGY_THRESH_11 7
-#define R_SPIHNGY3 0x21C
-#define O_SPIHNGY3__EG_HNGY_THRESH_12 24
-#define W_SPIHNGY3__EG_HNGY_THRESH_12 7
-#define O_SPIHNGY3__EG_HNGY_THRESH_13 16
-#define W_SPIHNGY3__EG_HNGY_THRESH_13 7
-#define O_SPIHNGY3__EG_HNGY_THRESH_14 8
-#define W_SPIHNGY3__EG_HNGY_THRESH_14 7
-#define O_SPIHNGY3__EG_HNGY_THRESH_15 0
-#define W_SPIHNGY3__EG_HNGY_THRESH_15 7
-#define R_SPISTRV0 0x21D
-#define O_SPISTRV0__EG_STRV_THRESH_0 24
-#define W_SPISTRV0__EG_STRV_THRESH_0 7
-#define O_SPISTRV0__EG_STRV_THRESH_1 16
-#define W_SPISTRV0__EG_STRV_THRESH_1 7
-#define O_SPISTRV0__EG_STRV_THRESH_2 8
-#define W_SPISTRV0__EG_STRV_THRESH_2 7
-#define O_SPISTRV0__EG_STRV_THRESH_3 0
-#define W_SPISTRV0__EG_STRV_THRESH_3 7
-#define R_SPISTRV1 0x21E
-#define O_SPISTRV1__EG_STRV_THRESH_4 24
-#define W_SPISTRV1__EG_STRV_THRESH_4 7
-#define O_SPISTRV1__EG_STRV_THRESH_5 16
-#define W_SPISTRV1__EG_STRV_THRESH_5 7
-#define O_SPISTRV1__EG_STRV_THRESH_6 8
-#define W_SPISTRV1__EG_STRV_THRESH_6 7
-#define O_SPISTRV1__EG_STRV_THRESH_7 0
-#define W_SPISTRV1__EG_STRV_THRESH_7 7
-#define R_SPISTRV2 0x21F
-#define O_SPISTRV2__EG_STRV_THRESH_8 24
-#define W_SPISTRV2__EG_STRV_THRESH_8 7
-#define O_SPISTRV2__EG_STRV_THRESH_9 16
-#define W_SPISTRV2__EG_STRV_THRESH_9 7
-#define O_SPISTRV2__EG_STRV_THRESH_10 8
-#define W_SPISTRV2__EG_STRV_THRESH_10 7
-#define O_SPISTRV2__EG_STRV_THRESH_11 0
-#define W_SPISTRV2__EG_STRV_THRESH_11 7
-#define R_SPISTRV3 0x220
-#define O_SPISTRV3__EG_STRV_THRESH_12 24
-#define W_SPISTRV3__EG_STRV_THRESH_12 7
-#define O_SPISTRV3__EG_STRV_THRESH_13 16
-#define W_SPISTRV3__EG_STRV_THRESH_13 7
-#define O_SPISTRV3__EG_STRV_THRESH_14 8
-#define W_SPISTRV3__EG_STRV_THRESH_14 7
-#define O_SPISTRV3__EG_STRV_THRESH_15 0
-#define W_SPISTRV3__EG_STRV_THRESH_15 7
-#define R_TXDATAFIFO0 0x221
-#define O_TXDATAFIFO0__TX0DATAFIFOSTART 24
-#define W_TXDATAFIFO0__TX0DATAFIFOSTART 7
-#define O_TXDATAFIFO0__TX0DATAFIFOSIZE 16
-#define W_TXDATAFIFO0__TX0DATAFIFOSIZE 7
-#define O_TXDATAFIFO0__TX1DATAFIFOSTART 8
-#define W_TXDATAFIFO0__TX1DATAFIFOSTART 7
-#define O_TXDATAFIFO0__TX1DATAFIFOSIZE 0
-#define W_TXDATAFIFO0__TX1DATAFIFOSIZE 7
-#define R_TXDATAFIFO1 0x222
-#define O_TXDATAFIFO1__TX2DATAFIFOSTART 24
-#define W_TXDATAFIFO1__TX2DATAFIFOSTART 7
-#define O_TXDATAFIFO1__TX2DATAFIFOSIZE 16
-#define W_TXDATAFIFO1__TX2DATAFIFOSIZE 7
-#define O_TXDATAFIFO1__TX3DATAFIFOSTART 8
-#define W_TXDATAFIFO1__TX3DATAFIFOSTART 7
-#define O_TXDATAFIFO1__TX3DATAFIFOSIZE 0
-#define W_TXDATAFIFO1__TX3DATAFIFOSIZE 7
-#define R_TXDATAFIFO2 0x223
-#define O_TXDATAFIFO2__TX4DATAFIFOSTART 24
-#define W_TXDATAFIFO2__TX4DATAFIFOSTART 7
-#define O_TXDATAFIFO2__TX4DATAFIFOSIZE 16
-#define W_TXDATAFIFO2__TX4DATAFIFOSIZE 7
-#define O_TXDATAFIFO2__TX5DATAFIFOSTART 8
-#define W_TXDATAFIFO2__TX5DATAFIFOSTART 7
-#define O_TXDATAFIFO2__TX5DATAFIFOSIZE 0
-#define W_TXDATAFIFO2__TX5DATAFIFOSIZE 7
-#define R_TXDATAFIFO3 0x224
-#define O_TXDATAFIFO3__TX6DATAFIFOSTART 24
-#define W_TXDATAFIFO3__TX6DATAFIFOSTART 7
-#define O_TXDATAFIFO3__TX6DATAFIFOSIZE 16
-#define W_TXDATAFIFO3__TX6DATAFIFOSIZE 7
-#define O_TXDATAFIFO3__TX7DATAFIFOSTART 8
-#define W_TXDATAFIFO3__TX7DATAFIFOSTART 7
-#define O_TXDATAFIFO3__TX7DATAFIFOSIZE 0
-#define W_TXDATAFIFO3__TX7DATAFIFOSIZE 7
-#define R_TXDATAFIFO4 0x225
-#define O_TXDATAFIFO4__TX8DATAFIFOSTART 24
-#define W_TXDATAFIFO4__TX8DATAFIFOSTART 7
-#define O_TXDATAFIFO4__TX8DATAFIFOSIZE 16
-#define W_TXDATAFIFO4__TX8DATAFIFOSIZE 7
-#define O_TXDATAFIFO4__TX9DATAFIFOSTART 8
-#define W_TXDATAFIFO4__TX9DATAFIFOSTART 7
-#define O_TXDATAFIFO4__TX9DATAFIFOSIZE 0
-#define W_TXDATAFIFO4__TX9DATAFIFOSIZE 7
-#define R_TXDATAFIFO5 0x226
-#define O_TXDATAFIFO5__TX10DATAFIFOSTART 24
-#define W_TXDATAFIFO5__TX10DATAFIFOSTART 7
-#define O_TXDATAFIFO5__TX10DATAFIFOSIZE 16
-#define W_TXDATAFIFO5__TX10DATAFIFOSIZE 7
-#define O_TXDATAFIFO5__TX11DATAFIFOSTART 8
-#define W_TXDATAFIFO5__TX11DATAFIFOSTART 7
-#define O_TXDATAFIFO5__TX11DATAFIFOSIZE 0
-#define W_TXDATAFIFO5__TX11DATAFIFOSIZE 7
-#define R_TXDATAFIFO6 0x227
-#define O_TXDATAFIFO6__TX12DATAFIFOSTART 24
-#define W_TXDATAFIFO6__TX12DATAFIFOSTART 7
-#define O_TXDATAFIFO6__TX12DATAFIFOSIZE 16
-#define W_TXDATAFIFO6__TX12DATAFIFOSIZE 7
-#define O_TXDATAFIFO6__TX13DATAFIFOSTART 8
-#define W_TXDATAFIFO6__TX13DATAFIFOSTART 7
-#define O_TXDATAFIFO6__TX13DATAFIFOSIZE 0
-#define W_TXDATAFIFO6__TX13DATAFIFOSIZE 7
-#define R_TXDATAFIFO7 0x228
-#define O_TXDATAFIFO7__TX14DATAFIFOSTART 24
-#define W_TXDATAFIFO7__TX14DATAFIFOSTART 7
-#define O_TXDATAFIFO7__TX14DATAFIFOSIZE 16
-#define W_TXDATAFIFO7__TX14DATAFIFOSIZE 7
-#define O_TXDATAFIFO7__TX15DATAFIFOSTART 8
-#define W_TXDATAFIFO7__TX15DATAFIFOSTART 7
-#define O_TXDATAFIFO7__TX15DATAFIFOSIZE 0
-#define W_TXDATAFIFO7__TX15DATAFIFOSIZE 7
-#define R_RXDATAFIFO0 0x229
-#define O_RXDATAFIFO0__RX0DATAFIFOSTART 24
-#define W_RXDATAFIFO0__RX0DATAFIFOSTART 7
-#define O_RXDATAFIFO0__RX0DATAFIFOSIZE 16
-#define W_RXDATAFIFO0__RX0DATAFIFOSIZE 7
-#define O_RXDATAFIFO0__RX1DATAFIFOSTART 8
-#define W_RXDATAFIFO0__RX1DATAFIFOSTART 7
-#define O_RXDATAFIFO0__RX1DATAFIFOSIZE 0
-#define W_RXDATAFIFO0__RX1DATAFIFOSIZE 7
-#define R_RXDATAFIFO1 0x22A
-#define O_RXDATAFIFO1__RX2DATAFIFOSTART 24
-#define W_RXDATAFIFO1__RX2DATAFIFOSTART 7
-#define O_RXDATAFIFO1__RX2DATAFIFOSIZE 16
-#define W_RXDATAFIFO1__RX2DATAFIFOSIZE 7
-#define O_RXDATAFIFO1__RX3DATAFIFOSTART 8
-#define W_RXDATAFIFO1__RX3DATAFIFOSTART 7
-#define O_RXDATAFIFO1__RX3DATAFIFOSIZE 0
-#define W_RXDATAFIFO1__RX3DATAFIFOSIZE 7
-#define R_RXDATAFIFO2 0x22B
-#define O_RXDATAFIFO2__RX4DATAFIFOSTART 24
-#define W_RXDATAFIFO2__RX4DATAFIFOSTART 7
-#define O_RXDATAFIFO2__RX4DATAFIFOSIZE 16
-#define W_RXDATAFIFO2__RX4DATAFIFOSIZE 7
-#define O_RXDATAFIFO2__RX5DATAFIFOSTART 8
-#define W_RXDATAFIFO2__RX5DATAFIFOSTART 7
-#define O_RXDATAFIFO2__RX5DATAFIFOSIZE 0
-#define W_RXDATAFIFO2__RX5DATAFIFOSIZE 7
-#define R_RXDATAFIFO3 0x22C
-#define O_RXDATAFIFO3__RX6DATAFIFOSTART 24
-#define W_RXDATAFIFO3__RX6DATAFIFOSTART 7
-#define O_RXDATAFIFO3__RX6DATAFIFOSIZE 16
-#define W_RXDATAFIFO3__RX6DATAFIFOSIZE 7
-#define O_RXDATAFIFO3__RX7DATAFIFOSTART 8
-#define W_RXDATAFIFO3__RX7DATAFIFOSTART 7
-#define O_RXDATAFIFO3__RX7DATAFIFOSIZE 0
-#define W_RXDATAFIFO3__RX7DATAFIFOSIZE 7
-#define R_RXDATAFIFO4 0x22D
-#define O_RXDATAFIFO4__RX8DATAFIFOSTART 24
-#define W_RXDATAFIFO4__RX8DATAFIFOSTART 7
-#define O_RXDATAFIFO4__RX8DATAFIFOSIZE 16
-#define W_RXDATAFIFO4__RX8DATAFIFOSIZE 7
-#define O_RXDATAFIFO4__RX9DATAFIFOSTART 8
-#define W_RXDATAFIFO4__RX9DATAFIFOSTART 7
-#define O_RXDATAFIFO4__RX9DATAFIFOSIZE 0
-#define W_RXDATAFIFO4__RX9DATAFIFOSIZE 7
-#define R_RXDATAFIFO5 0x22E
-#define O_RXDATAFIFO5__RX10DATAFIFOSTART 24
-#define W_RXDATAFIFO5__RX10DATAFIFOSTART 7
-#define O_RXDATAFIFO5__RX10DATAFIFOSIZE 16
-#define W_RXDATAFIFO5__RX10DATAFIFOSIZE 7
-#define O_RXDATAFIFO5__RX11DATAFIFOSTART 8
-#define W_RXDATAFIFO5__RX11DATAFIFOSTART 7
-#define O_RXDATAFIFO5__RX11DATAFIFOSIZE 0
-#define W_RXDATAFIFO5__RX11DATAFIFOSIZE 7
-#define R_RXDATAFIFO6 0x22F
-#define O_RXDATAFIFO6__RX12DATAFIFOSTART 24
-#define W_RXDATAFIFO6__RX12DATAFIFOSTART 7
-#define O_RXDATAFIFO6__RX12DATAFIFOSIZE 16
-#define W_RXDATAFIFO6__RX12DATAFIFOSIZE 7
-#define O_RXDATAFIFO6__RX13DATAFIFOSTART 8
-#define W_RXDATAFIFO6__RX13DATAFIFOSTART 7
-#define O_RXDATAFIFO6__RX13DATAFIFOSIZE 0
-#define W_RXDATAFIFO6__RX13DATAFIFOSIZE 7
-#define R_RXDATAFIFO7 0x230
-#define O_RXDATAFIFO7__RX14DATAFIFOSTART 24
-#define W_RXDATAFIFO7__RX14DATAFIFOSTART 7
-#define O_RXDATAFIFO7__RX14DATAFIFOSIZE 16
-#define W_RXDATAFIFO7__RX14DATAFIFOSIZE 7
-#define O_RXDATAFIFO7__RX15DATAFIFOSTART 8
-#define W_RXDATAFIFO7__RX15DATAFIFOSTART 7
-#define O_RXDATAFIFO7__RX15DATAFIFOSIZE 0
-#define W_RXDATAFIFO7__RX15DATAFIFOSIZE 7
-#define R_XGMACPADCALIBRATION 0x231
-#define R_FREEQCARVE 0x233
-#define R_SPI4STATICDELAY0 0x240
-#define O_SPI4STATICDELAY0__DATALINE7 28
-#define W_SPI4STATICDELAY0__DATALINE7 4
-#define O_SPI4STATICDELAY0__DATALINE6 24
-#define W_SPI4STATICDELAY0__DATALINE6 4
-#define O_SPI4STATICDELAY0__DATALINE5 20
-#define W_SPI4STATICDELAY0__DATALINE5 4
-#define O_SPI4STATICDELAY0__DATALINE4 16
-#define W_SPI4STATICDELAY0__DATALINE4 4
-#define O_SPI4STATICDELAY0__DATALINE3 12
-#define W_SPI4STATICDELAY0__DATALINE3 4
-#define O_SPI4STATICDELAY0__DATALINE2 8
-#define W_SPI4STATICDELAY0__DATALINE2 4
-#define O_SPI4STATICDELAY0__DATALINE1 4
-#define W_SPI4STATICDELAY0__DATALINE1 4
-#define O_SPI4STATICDELAY0__DATALINE0 0
-#define W_SPI4STATICDELAY0__DATALINE0 4
-#define R_SPI4STATICDELAY1 0x241
-#define O_SPI4STATICDELAY1__DATALINE15 28
-#define W_SPI4STATICDELAY1__DATALINE15 4
-#define O_SPI4STATICDELAY1__DATALINE14 24
-#define W_SPI4STATICDELAY1__DATALINE14 4
-#define O_SPI4STATICDELAY1__DATALINE13 20
-#define W_SPI4STATICDELAY1__DATALINE13 4
-#define O_SPI4STATICDELAY1__DATALINE12 16
-#define W_SPI4STATICDELAY1__DATALINE12 4
-#define O_SPI4STATICDELAY1__DATALINE11 12
-#define W_SPI4STATICDELAY1__DATALINE11 4
-#define O_SPI4STATICDELAY1__DATALINE10 8
-#define W_SPI4STATICDELAY1__DATALINE10 4
-#define O_SPI4STATICDELAY1__DATALINE9 4
-#define W_SPI4STATICDELAY1__DATALINE9 4
-#define O_SPI4STATICDELAY1__DATALINE8 0
-#define W_SPI4STATICDELAY1__DATALINE8 4
-#define R_SPI4STATICDELAY2 0x242
-#define O_SPI4STATICDELAY0__TXSTAT1 8
-#define W_SPI4STATICDELAY0__TXSTAT1 4
-#define O_SPI4STATICDELAY0__TXSTAT0 4
-#define W_SPI4STATICDELAY0__TXSTAT0 4
-#define O_SPI4STATICDELAY0__RXCONTROL 0
-#define W_SPI4STATICDELAY0__RXCONTROL 4
-#define R_SPI4CONTROL 0x243
-#define O_SPI4CONTROL__STATICDELAY 2
-#define O_SPI4CONTROL__LVDS_LVTTL 1
-#define O_SPI4CONTROL__SPI4ENABLE 0
-#define R_CLASSWATERMARKS 0x244
-#define O_CLASSWATERMARKS__CLASS0WATERMARK 24
-#define W_CLASSWATERMARKS__CLASS0WATERMARK 5
-#define O_CLASSWATERMARKS__CLASS1WATERMARK 16
-#define W_CLASSWATERMARKS__CLASS1WATERMARK 5
-#define O_CLASSWATERMARKS__CLASS3WATERMARK 0
-#define W_CLASSWATERMARKS__CLASS3WATERMARK 5
-#define R_RXWATERMARKS1 0x245
-#define O_RXWATERMARKS__RX0DATAWATERMARK 24
-#define W_RXWATERMARKS__RX0DATAWATERMARK 7
-#define O_RXWATERMARKS__RX1DATAWATERMARK 16
-#define W_RXWATERMARKS__RX1DATAWATERMARK 7
-#define O_RXWATERMARKS__RX3DATAWATERMARK 0
-#define W_RXWATERMARKS__RX3DATAWATERMARK 7
-#define R_RXWATERMARKS2 0x246
-#define O_RXWATERMARKS__RX4DATAWATERMARK 24
-#define W_RXWATERMARKS__RX4DATAWATERMARK 7
-#define O_RXWATERMARKS__RX5DATAWATERMARK 16
-#define W_RXWATERMARKS__RX5DATAWATERMARK 7
-#define O_RXWATERMARKS__RX6DATAWATERMARK 8
-#define W_RXWATERMARKS__RX6DATAWATERMARK 7
-#define O_RXWATERMARKS__RX7DATAWATERMARK 0
-#define W_RXWATERMARKS__RX7DATAWATERMARK 7
-#define R_RXWATERMARKS3 0x247
-#define O_RXWATERMARKS__RX8DATAWATERMARK 24
-#define W_RXWATERMARKS__RX8DATAWATERMARK 7
-#define O_RXWATERMARKS__RX9DATAWATERMARK 16
-#define W_RXWATERMARKS__RX9DATAWATERMARK 7
-#define O_RXWATERMARKS__RX10DATAWATERMARK 8
-#define W_RXWATERMARKS__RX10DATAWATERMARK 7
-#define O_RXWATERMARKS__RX11DATAWATERMARK 0
-#define W_RXWATERMARKS__RX11DATAWATERMARK 7
-#define R_RXWATERMARKS4 0x248
-#define O_RXWATERMARKS__RX12DATAWATERMARK 24
-#define W_RXWATERMARKS__RX12DATAWATERMARK 7
-#define O_RXWATERMARKS__RX13DATAWATERMARK 16
-#define W_RXWATERMARKS__RX13DATAWATERMARK 7
-#define O_RXWATERMARKS__RX14DATAWATERMARK 8
-#define W_RXWATERMARKS__RX14DATAWATERMARK 7
-#define O_RXWATERMARKS__RX15DATAWATERMARK 0
-#define W_RXWATERMARKS__RX15DATAWATERMARK 7
-#define R_FREEWATERMARKS 0x249
-#define O_FREEWATERMARKS__FREEOUTWATERMARK 16
-#define W_FREEWATERMARKS__FREEOUTWATERMARK 16
-#define O_FREEWATERMARKS__JUMFRWATERMARK 8
-#define W_FREEWATERMARKS__JUMFRWATERMARK 7
-#define O_FREEWATERMARKS__REGFRWATERMARK 0
-#define W_FREEWATERMARKS__REGFRWATERMARK 7
-#define R_EGRESSFIFOCARVINGSLOTS 0x24a
-
-#define CTRL_RES0 0
-#define CTRL_RES1 1
-#define CTRL_REG_FREE 2
-#define CTRL_JUMBO_FREE 3
-#define CTRL_CONT 4
-#define CTRL_EOP 5
-#define CTRL_START 6
-#define CTRL_SNGL 7
-
-#define CTRL_B0_NOT_EOP 0
-#define CTRL_B0_EOP 1
-
-#define R_ROUND_ROBIN_TABLE 0
-#define R_PDE_CLASS_0 0x300
-#define R_PDE_CLASS_1 0x302
-#define R_PDE_CLASS_2 0x304
-#define R_PDE_CLASS_3 0x306
-
-#define R_MSG_TX_THRESHOLD 0x308
-
-#define R_GMAC_JFR0_BUCKET_SIZE 0x320
-#define R_GMAC_RFR0_BUCKET_SIZE 0x321
-#define R_GMAC_TX0_BUCKET_SIZE 0x322
-#define R_GMAC_TX1_BUCKET_SIZE 0x323
-#define R_GMAC_TX2_BUCKET_SIZE 0x324
-#define R_GMAC_TX3_BUCKET_SIZE 0x325
-#define R_GMAC_JFR1_BUCKET_SIZE 0x326
-#define R_GMAC_RFR1_BUCKET_SIZE 0x327
-
-#define R_XGS_TX0_BUCKET_SIZE 0x320
-#define R_XGS_TX1_BUCKET_SIZE 0x321
-#define R_XGS_TX2_BUCKET_SIZE 0x322
-#define R_XGS_TX3_BUCKET_SIZE 0x323
-#define R_XGS_TX4_BUCKET_SIZE 0x324
-#define R_XGS_TX5_BUCKET_SIZE 0x325
-#define R_XGS_TX6_BUCKET_SIZE 0x326
-#define R_XGS_TX7_BUCKET_SIZE 0x327
-#define R_XGS_TX8_BUCKET_SIZE 0x328
-#define R_XGS_TX9_BUCKET_SIZE 0x329
-#define R_XGS_TX10_BUCKET_SIZE 0x32A
-#define R_XGS_TX11_BUCKET_SIZE 0x32B
-#define R_XGS_TX12_BUCKET_SIZE 0x32C
-#define R_XGS_TX13_BUCKET_SIZE 0x32D
-#define R_XGS_TX14_BUCKET_SIZE 0x32E
-#define R_XGS_TX15_BUCKET_SIZE 0x32F
-#define R_XGS_JFR_BUCKET_SIZE 0x330
-#define R_XGS_RFR_BUCKET_SIZE 0x331
-
-#define R_CC_CPU0_0 0x380
-#define R_CC_CPU1_0 0x388
-#define R_CC_CPU2_0 0x390
-#define R_CC_CPU3_0 0x398
-#define R_CC_CPU4_0 0x3a0
-#define R_CC_CPU5_0 0x3a8
-#define R_CC_CPU6_0 0x3b0
-#define R_CC_CPU7_0 0x3b8
-
-#define XLR_GMAC_BLK_SZ (XLR_IO_GMAC_1_OFFSET - \
- XLR_IO_GMAC_0_OFFSET)
-
-/* Constants used for configuring the devices */
-
-#define XLR_FB_STN 6 /* Bucket used for Tx freeback */
-
-#define MAC_B2B_IPG 88
-
-#define XLR_NET_PREPAD_LEN 32
-
-/* frame sizes need to be cacheline aligned */
-#define MAX_FRAME_SIZE (1536 + XLR_NET_PREPAD_LEN)
-#define MAX_FRAME_SIZE_JUMBO 9216
-
-#define MAC_SKB_BACK_PTR_SIZE SMP_CACHE_BYTES
-#define MAC_PREPAD 0
-#define BYTE_OFFSET 2
-#define XLR_RX_BUF_SIZE (MAX_FRAME_SIZE + BYTE_OFFSET + \
- MAC_PREPAD + MAC_SKB_BACK_PTR_SIZE + SMP_CACHE_BYTES)
-#define MAC_CRC_LEN 4
-#define MAX_NUM_MSGRNG_STN_CC 128
-#define MAX_MSG_SND_ATTEMPTS 100 /* 13 stns x 4 entry msg/stn +
- * headroom
- */
-
-#define MAC_FRIN_TO_BE_SENT_THRESHOLD 16
-
-#define MAX_NUM_DESC_SPILL 1024
-#define MAX_FRIN_SPILL (MAX_NUM_DESC_SPILL << 2)
-#define MAX_FROUT_SPILL (MAX_NUM_DESC_SPILL << 2)
-#define MAX_CLASS_0_SPILL (MAX_NUM_DESC_SPILL << 2)
-#define MAX_CLASS_1_SPILL (MAX_NUM_DESC_SPILL << 2)
-#define MAX_CLASS_2_SPILL (MAX_NUM_DESC_SPILL << 2)
-#define MAX_CLASS_3_SPILL (MAX_NUM_DESC_SPILL << 2)
-
-enum {
- SGMII_SPEED_10 = 0x00000000,
- SGMII_SPEED_100 = 0x02000000,
- SGMII_SPEED_1000 = 0x04000000,
-};
-
-enum tsv_rsv_reg {
- TX_RX_64_BYTE_FRAME = 0x20,
- TX_RX_64_127_BYTE_FRAME,
- TX_RX_128_255_BYTE_FRAME,
- TX_RX_256_511_BYTE_FRAME,
- TX_RX_512_1023_BYTE_FRAME,
- TX_RX_1024_1518_BYTE_FRAME,
- TX_RX_1519_1522_VLAN_BYTE_FRAME,
-
- RX_BYTE_COUNTER = 0x27,
- RX_PACKET_COUNTER,
- RX_FCS_ERROR_COUNTER,
- RX_MULTICAST_PACKET_COUNTER,
- RX_BROADCAST_PACKET_COUNTER,
- RX_CONTROL_FRAME_PACKET_COUNTER,
- RX_PAUSE_FRAME_PACKET_COUNTER,
- RX_UNKNOWN_OP_CODE_COUNTER,
- RX_ALIGNMENT_ERROR_COUNTER,
- RX_FRAME_LENGTH_ERROR_COUNTER,
- RX_CODE_ERROR_COUNTER,
- RX_CARRIER_SENSE_ERROR_COUNTER,
- RX_UNDERSIZE_PACKET_COUNTER,
- RX_OVERSIZE_PACKET_COUNTER,
- RX_FRAGMENTS_COUNTER,
- RX_JABBER_COUNTER,
- RX_DROP_PACKET_COUNTER,
-
- TX_BYTE_COUNTER = 0x38,
- TX_PACKET_COUNTER,
- TX_MULTICAST_PACKET_COUNTER,
- TX_BROADCAST_PACKET_COUNTER,
- TX_PAUSE_CONTROL_FRAME_COUNTER,
- TX_DEFERRAL_PACKET_COUNTER,
- TX_EXCESSIVE_DEFERRAL_PACKET_COUNTER,
- TX_SINGLE_COLLISION_PACKET_COUNTER,
- TX_MULTI_COLLISION_PACKET_COUNTER,
- TX_LATE_COLLISION_PACKET_COUNTER,
- TX_EXCESSIVE_COLLISION_PACKET_COUNTER,
- TX_TOTAL_COLLISION_COUNTER,
- TX_PAUSE_FRAME_HONERED_COUNTER,
- TX_DROP_FRAME_COUNTER,
- TX_JABBER_FRAME_COUNTER,
- TX_FCS_ERROR_COUNTER,
- TX_CONTROL_FRAME_COUNTER,
- TX_OVERSIZE_FRAME_COUNTER,
- TX_UNDERSIZE_FRAME_COUNTER,
- TX_FRAGMENT_FRAME_COUNTER,
-
- CARRY_REG_1 = 0x4c,
- CARRY_REG_2 = 0x4d,
-};
-
-struct xlr_adapter {
- struct net_device *netdev[4];
-};
-
-struct xlr_net_priv {
- u32 __iomem *base_addr;
- struct net_device *ndev;
- struct xlr_adapter *adapter;
- struct mii_bus *mii_bus;
- int num_rx_desc;
- int phy_addr; /* PHY addr on MDIO bus */
- int pcs_id; /* PCS id on MDIO bus */
- int port_id; /* Port(gmac/xgmac) number, i.e 0-7 */
- int tx_stnid;
- u32 __iomem *mii_addr;
- u32 __iomem *serdes_addr;
- u32 __iomem *pcs_addr;
- u32 __iomem *gpio_addr;
- int phy_speed;
- int port_type;
- struct timer_list queue_timer;
- int wakeup_q;
- struct platform_device *pdev;
- struct xlr_net_data *nd;
-
- u64 *frin_spill;
- u64 *frout_spill;
- u64 *class_0_spill;
- u64 *class_1_spill;
- u64 *class_2_spill;
- u64 *class_3_spill;
-};
-
-void xlr_set_gmac_speed(struct xlr_net_priv *priv);
diff --git a/drivers/staging/r8188eu/core/rtw_mlme_ext.c b/drivers/staging/r8188eu/core/rtw_mlme_ext.c
index 55c3d4a6faeb..b4820ad2cee7 100644
--- a/drivers/staging/r8188eu/core/rtw_mlme_ext.c
+++ b/drivers/staging/r8188eu/core/rtw_mlme_ext.c
@@ -107,6 +107,7 @@ static struct rt_channel_plan_map RTW_ChannelPlanMap[RT_CHANNEL_DOMAIN_MAX] = {
{0x01}, /* 0x10, RT_CHANNEL_DOMAIN_JAPAN */
{0x02}, /* 0x11, RT_CHANNEL_DOMAIN_FCC_NO_DFS */
{0x01}, /* 0x12, RT_CHANNEL_DOMAIN_JAPAN_NO_DFS */
+ {0x00}, /* 0x13 */
{0x02}, /* 0x14, RT_CHANNEL_DOMAIN_TAIWAN_NO_DFS */
{0x00}, /* 0x15, RT_CHANNEL_DOMAIN_ETSI_NO_DFS */
{0x00}, /* 0x16, RT_CHANNEL_DOMAIN_KOREA_NO_DFS */
@@ -118,6 +119,7 @@ static struct rt_channel_plan_map RTW_ChannelPlanMap[RT_CHANNEL_DOMAIN_MAX] = {
{0x00}, /* 0x1C, */
{0x00}, /* 0x1D, */
{0x00}, /* 0x1E, */
+ {0x00}, /* 0x1F, */
/* 0x20 ~ 0x7F , New Define ===== */
{0x00}, /* 0x20, RT_CHANNEL_DOMAIN_WORLD_NULL */
{0x01}, /* 0x21, RT_CHANNEL_DOMAIN_ETSI1_NULL */
@@ -6845,12 +6847,12 @@ void report_del_sta_event(struct adapter *padapter, unsigned char *MacAddr, unsi
struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
- pcmd_obj = kzalloc(sizeof(struct cmd_obj), GFP_KERNEL);
+ pcmd_obj = kzalloc(sizeof(*pcmd_obj), GFP_ATOMIC);
if (!pcmd_obj)
return;
cmdsz = (sizeof(struct stadel_event) + sizeof(struct C2HEvent_Header));
- pevtcmd = kzalloc(cmdsz, GFP_KERNEL);
+ pevtcmd = kzalloc(cmdsz, GFP_ATOMIC);
if (!pevtcmd) {
kfree(pcmd_obj);
return;
diff --git a/drivers/staging/r8188eu/os_dep/ioctl_linux.c b/drivers/staging/r8188eu/os_dep/ioctl_linux.c
index 52d42e576443..9404355726d0 100644
--- a/drivers/staging/r8188eu/os_dep/ioctl_linux.c
+++ b/drivers/staging/r8188eu/os_dep/ioctl_linux.c
@@ -1980,6 +1980,7 @@ static int rtw_wx_read32(struct net_device *dev,
u32 data32;
u32 bytes;
u8 *ptmp;
+ int ret;
padapter = (struct adapter *)rtw_netdev_priv(dev);
p = &wrqu->data;
@@ -2007,12 +2008,17 @@ static int rtw_wx_read32(struct net_device *dev,
break;
default:
DBG_88E(KERN_INFO "%s: usage> read [bytes],[address(hex)]\n", __func__);
- return -EINVAL;
+ ret = -EINVAL;
+ goto err_free_ptmp;
}
DBG_88E(KERN_INFO "%s: addr = 0x%08X data =%s\n", __func__, addr, extra);
kfree(ptmp);
return 0;
+
+err_free_ptmp:
+ kfree(ptmp);
+ return ret;
}
static int rtw_wx_write32(struct net_device *dev,
diff --git a/drivers/staging/r8188eu/os_dep/mlme_linux.c b/drivers/staging/r8188eu/os_dep/mlme_linux.c
index a9b6ffdbf31a..f7ce724ebf87 100644
--- a/drivers/staging/r8188eu/os_dep/mlme_linux.c
+++ b/drivers/staging/r8188eu/os_dep/mlme_linux.c
@@ -112,7 +112,7 @@ void rtw_report_sec_ie(struct adapter *adapter, u8 authmode, u8 *sec_ie)
buff = NULL;
if (authmode == _WPA_IE_ID_) {
- buff = kzalloc(IW_CUSTOM_MAX, GFP_KERNEL);
+ buff = kzalloc(IW_CUSTOM_MAX, GFP_ATOMIC);
if (!buff)
return;
p = buff;
diff --git a/drivers/staging/rtl8192e/rtl8192e/rtl_core.c b/drivers/staging/rtl8192e/rtl8192e/rtl_core.c
index d2e9df60e9ba..b9ce71848023 100644
--- a/drivers/staging/rtl8192e/rtl8192e/rtl_core.c
+++ b/drivers/staging/rtl8192e/rtl8192e/rtl_core.c
@@ -2549,13 +2549,14 @@ static void _rtl92e_pci_disconnect(struct pci_dev *pdev)
free_irq(dev->irq, dev);
priv->irq = 0;
}
- free_rtllib(dev);
if (dev->mem_start != 0) {
iounmap((void __iomem *)dev->mem_start);
release_mem_region(pci_resource_start(pdev, 1),
pci_resource_len(pdev, 1));
}
+
+ free_rtllib(dev);
}
pci_disable_device(pdev);
diff --git a/drivers/target/target_core_fabric_configfs.c b/drivers/target/target_core_fabric_configfs.c
index 0b65de9f2df1..95a88f6224cd 100644
--- a/drivers/target/target_core_fabric_configfs.c
+++ b/drivers/target/target_core_fabric_configfs.c
@@ -520,7 +520,7 @@ static ssize_t target_fabric_port_alua_tg_pt_gp_show(struct config_item *item,
{
struct se_lun *lun = item_to_lun(item);
- if (!lun || !lun->lun_se_dev)
+ if (!lun->lun_se_dev)
return -ENODEV;
return core_alua_show_tg_pt_gp_info(lun, page);
@@ -531,7 +531,7 @@ static ssize_t target_fabric_port_alua_tg_pt_gp_store(struct config_item *item,
{
struct se_lun *lun = item_to_lun(item);
- if (!lun || !lun->lun_se_dev)
+ if (!lun->lun_se_dev)
return -ENODEV;
return core_alua_store_tg_pt_gp_info(lun, page, count);
@@ -542,7 +542,7 @@ static ssize_t target_fabric_port_alua_tg_pt_offline_show(
{
struct se_lun *lun = item_to_lun(item);
- if (!lun || !lun->lun_se_dev)
+ if (!lun->lun_se_dev)
return -ENODEV;
return core_alua_show_offline_bit(lun, page);
@@ -553,7 +553,7 @@ static ssize_t target_fabric_port_alua_tg_pt_offline_store(
{
struct se_lun *lun = item_to_lun(item);
- if (!lun || !lun->lun_se_dev)
+ if (!lun->lun_se_dev)
return -ENODEV;
return core_alua_store_offline_bit(lun, page, count);
@@ -564,7 +564,7 @@ static ssize_t target_fabric_port_alua_tg_pt_status_show(
{
struct se_lun *lun = item_to_lun(item);
- if (!lun || !lun->lun_se_dev)
+ if (!lun->lun_se_dev)
return -ENODEV;
return core_alua_show_secondary_status(lun, page);
@@ -575,7 +575,7 @@ static ssize_t target_fabric_port_alua_tg_pt_status_store(
{
struct se_lun *lun = item_to_lun(item);
- if (!lun || !lun->lun_se_dev)
+ if (!lun->lun_se_dev)
return -ENODEV;
return core_alua_store_secondary_status(lun, page, count);
@@ -586,7 +586,7 @@ static ssize_t target_fabric_port_alua_tg_pt_write_md_show(
{
struct se_lun *lun = item_to_lun(item);
- if (!lun || !lun->lun_se_dev)
+ if (!lun->lun_se_dev)
return -ENODEV;
return core_alua_show_secondary_write_metadata(lun, page);
@@ -597,7 +597,7 @@ static ssize_t target_fabric_port_alua_tg_pt_write_md_store(
{
struct se_lun *lun = item_to_lun(item);
- if (!lun || !lun->lun_se_dev)
+ if (!lun->lun_se_dev)
return -ENODEV;
return core_alua_store_secondary_write_metadata(lun, page, count);
diff --git a/drivers/target/target_core_spc.c b/drivers/target/target_core_spc.c
index 22703a0dbd07..4c76498d3fb0 100644
--- a/drivers/target/target_core_spc.c
+++ b/drivers/target/target_core_spc.c
@@ -40,11 +40,11 @@ static void spc_fill_alua_data(struct se_lun *lun, unsigned char *buf)
*
* See spc4r17 section 6.4.2 Table 135
*/
- spin_lock(&lun->lun_tg_pt_gp_lock);
- tg_pt_gp = lun->lun_tg_pt_gp;
+ rcu_read_lock();
+ tg_pt_gp = rcu_dereference(lun->lun_tg_pt_gp);
if (tg_pt_gp)
buf[5] |= tg_pt_gp->tg_pt_gp_alua_access_type;
- spin_unlock(&lun->lun_tg_pt_gp_lock);
+ rcu_read_unlock();
}
static u16
@@ -325,14 +325,14 @@ check_t10_vend_desc:
* Get the PROTOCOL IDENTIFIER as defined by spc4r17
* section 7.5.1 Table 362
*/
- spin_lock(&lun->lun_tg_pt_gp_lock);
- tg_pt_gp = lun->lun_tg_pt_gp;
+ rcu_read_lock();
+ tg_pt_gp = rcu_dereference(lun->lun_tg_pt_gp);
if (!tg_pt_gp) {
- spin_unlock(&lun->lun_tg_pt_gp_lock);
+ rcu_read_unlock();
goto check_lu_gp;
}
tg_pt_gp_id = tg_pt_gp->tg_pt_gp_id;
- spin_unlock(&lun->lun_tg_pt_gp_lock);
+ rcu_read_unlock();
buf[off] = tpg->proto_id << 4;
buf[off++] |= 0x1; /* CODE SET == Binary */
diff --git a/drivers/tee/amdtee/core.c b/drivers/tee/amdtee/core.c
index da6b88e80dc0..297dc62bca29 100644
--- a/drivers/tee/amdtee/core.c
+++ b/drivers/tee/amdtee/core.c
@@ -203,9 +203,8 @@ static int copy_ta_binary(struct tee_context *ctx, void *ptr, void **ta,
*ta_size = roundup(fw->size, PAGE_SIZE);
*ta = (void *)__get_free_pages(GFP_KERNEL, get_order(*ta_size));
- if (IS_ERR(*ta)) {
- pr_err("%s: get_free_pages failed 0x%llx\n", __func__,
- (u64)*ta);
+ if (!*ta) {
+ pr_err("%s: get_free_pages failed\n", __func__);
rc = -ENOMEM;
goto rel_fw;
}
diff --git a/drivers/tee/optee/core.c b/drivers/tee/optee/core.c
index ab2edfcc6c70..2a66a5203d2f 100644
--- a/drivers/tee/optee/core.c
+++ b/drivers/tee/optee/core.c
@@ -48,10 +48,8 @@ int optee_pool_op_alloc_helper(struct tee_shm_pool_mgr *poolm,
goto err;
}
- for (i = 0; i < nr_pages; i++) {
- pages[i] = page;
- page++;
- }
+ for (i = 0; i < nr_pages; i++)
+ pages[i] = page + i;
shm->flags |= TEE_SHM_REGISTER;
rc = shm_register(shm->ctx, shm, pages, nr_pages,
diff --git a/drivers/tee/optee/ffa_abi.c b/drivers/tee/optee/ffa_abi.c
index 45424824e0f9..d8c8683863aa 100644
--- a/drivers/tee/optee/ffa_abi.c
+++ b/drivers/tee/optee/ffa_abi.c
@@ -810,10 +810,9 @@ static int optee_ffa_probe(struct ffa_device *ffa_dev)
return -EINVAL;
optee = kzalloc(sizeof(*optee), GFP_KERNEL);
- if (!optee) {
- rc = -ENOMEM;
- goto err;
- }
+ if (!optee)
+ return -ENOMEM;
+
optee->pool = optee_ffa_config_dyn_shm();
if (IS_ERR(optee->pool)) {
rc = PTR_ERR(optee->pool);
diff --git a/drivers/tee/optee/smc_abi.c b/drivers/tee/optee/smc_abi.c
index 6196d7c3888f..cf2e3293567d 100644
--- a/drivers/tee/optee/smc_abi.c
+++ b/drivers/tee/optee/smc_abi.c
@@ -23,6 +23,7 @@
#include "optee_private.h"
#include "optee_smc.h"
#include "optee_rpc_cmd.h"
+#include <linux/kmemleak.h>
#define CREATE_TRACE_POINTS
#include "optee_trace.h"
@@ -783,6 +784,7 @@ static void optee_handle_rpc(struct tee_context *ctx,
param->a4 = 0;
param->a5 = 0;
}
+ kmemleak_not_leak(shm);
break;
case OPTEE_SMC_RPC_FUNC_FREE:
shm = reg_pair_to_ptr(param->a1, param->a2);
diff --git a/drivers/tee/tee_shm.c b/drivers/tee/tee_shm.c
index 8a8deb95e918..499fccba3d74 100644
--- a/drivers/tee/tee_shm.c
+++ b/drivers/tee/tee_shm.c
@@ -1,20 +1,17 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * Copyright (c) 2015-2016, Linaro Limited
+ * Copyright (c) 2015-2017, 2019-2021 Linaro Limited
*/
+#include <linux/anon_inodes.h>
#include <linux/device.h>
-#include <linux/dma-buf.h>
-#include <linux/fdtable.h>
#include <linux/idr.h>
+#include <linux/mm.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/tee_drv.h>
#include <linux/uio.h>
-#include <linux/module.h>
#include "tee_private.h"
-MODULE_IMPORT_NS(DMA_BUF);
-
static void release_registered_pages(struct tee_shm *shm)
{
if (shm->pages) {
@@ -31,16 +28,8 @@ static void release_registered_pages(struct tee_shm *shm)
}
}
-static void tee_shm_release(struct tee_shm *shm)
+static void tee_shm_release(struct tee_device *teedev, struct tee_shm *shm)
{
- struct tee_device *teedev = shm->ctx->teedev;
-
- if (shm->flags & TEE_SHM_DMA_BUF) {
- mutex_lock(&teedev->mutex);
- idr_remove(&teedev->idr, shm->id);
- mutex_unlock(&teedev->mutex);
- }
-
if (shm->flags & TEE_SHM_POOL) {
struct tee_shm_pool_mgr *poolm;
@@ -67,45 +56,6 @@ static void tee_shm_release(struct tee_shm *shm)
tee_device_put(teedev);
}
-static struct sg_table *tee_shm_op_map_dma_buf(struct dma_buf_attachment
- *attach, enum dma_data_direction dir)
-{
- return NULL;
-}
-
-static void tee_shm_op_unmap_dma_buf(struct dma_buf_attachment *attach,
- struct sg_table *table,
- enum dma_data_direction dir)
-{
-}
-
-static void tee_shm_op_release(struct dma_buf *dmabuf)
-{
- struct tee_shm *shm = dmabuf->priv;
-
- tee_shm_release(shm);
-}
-
-static int tee_shm_op_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
-{
- struct tee_shm *shm = dmabuf->priv;
- size_t size = vma->vm_end - vma->vm_start;
-
- /* Refuse sharing shared memory provided by application */
- if (shm->flags & TEE_SHM_USER_MAPPED)
- return -EINVAL;
-
- return remap_pfn_range(vma, vma->vm_start, shm->paddr >> PAGE_SHIFT,
- size, vma->vm_page_prot);
-}
-
-static const struct dma_buf_ops tee_shm_dma_buf_ops = {
- .map_dma_buf = tee_shm_op_map_dma_buf,
- .unmap_dma_buf = tee_shm_op_unmap_dma_buf,
- .release = tee_shm_op_release,
- .mmap = tee_shm_op_mmap,
-};
-
struct tee_shm *tee_shm_alloc(struct tee_context *ctx, size_t size, u32 flags)
{
struct tee_device *teedev = ctx->teedev;
@@ -140,6 +90,7 @@ struct tee_shm *tee_shm_alloc(struct tee_context *ctx, size_t size, u32 flags)
goto err_dev_put;
}
+ refcount_set(&shm->refcount, 1);
shm->flags = flags | TEE_SHM_POOL;
shm->ctx = ctx;
if (flags & TEE_SHM_DMA_BUF)
@@ -153,10 +104,7 @@ struct tee_shm *tee_shm_alloc(struct tee_context *ctx, size_t size, u32 flags)
goto err_kfree;
}
-
if (flags & TEE_SHM_DMA_BUF) {
- DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
-
mutex_lock(&teedev->mutex);
shm->id = idr_alloc(&teedev->idr, shm, 1, 0, GFP_KERNEL);
mutex_unlock(&teedev->mutex);
@@ -164,28 +112,11 @@ struct tee_shm *tee_shm_alloc(struct tee_context *ctx, size_t size, u32 flags)
ret = ERR_PTR(shm->id);
goto err_pool_free;
}
-
- exp_info.ops = &tee_shm_dma_buf_ops;
- exp_info.size = shm->size;
- exp_info.flags = O_RDWR;
- exp_info.priv = shm;
-
- shm->dmabuf = dma_buf_export(&exp_info);
- if (IS_ERR(shm->dmabuf)) {
- ret = ERR_CAST(shm->dmabuf);
- goto err_rem;
- }
}
teedev_ctx_get(ctx);
return shm;
-err_rem:
- if (flags & TEE_SHM_DMA_BUF) {
- mutex_lock(&teedev->mutex);
- idr_remove(&teedev->idr, shm->id);
- mutex_unlock(&teedev->mutex);
- }
err_pool_free:
poolm->ops->free(poolm, shm);
err_kfree:
@@ -246,6 +177,7 @@ struct tee_shm *tee_shm_register(struct tee_context *ctx, unsigned long addr,
goto err;
}
+ refcount_set(&shm->refcount, 1);
shm->flags = flags | TEE_SHM_REGISTER;
shm->ctx = ctx;
shm->id = -1;
@@ -306,22 +238,6 @@ struct tee_shm *tee_shm_register(struct tee_context *ctx, unsigned long addr,
goto err;
}
- if (flags & TEE_SHM_DMA_BUF) {
- DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
-
- exp_info.ops = &tee_shm_dma_buf_ops;
- exp_info.size = shm->size;
- exp_info.flags = O_RDWR;
- exp_info.priv = shm;
-
- shm->dmabuf = dma_buf_export(&exp_info);
- if (IS_ERR(shm->dmabuf)) {
- ret = ERR_CAST(shm->dmabuf);
- teedev->desc->ops->shm_unregister(ctx, shm);
- goto err;
- }
- }
-
return shm;
err:
if (shm) {
@@ -339,6 +255,35 @@ err:
}
EXPORT_SYMBOL_GPL(tee_shm_register);
+static int tee_shm_fop_release(struct inode *inode, struct file *filp)
+{
+ tee_shm_put(filp->private_data);
+ return 0;
+}
+
+static int tee_shm_fop_mmap(struct file *filp, struct vm_area_struct *vma)
+{
+ struct tee_shm *shm = filp->private_data;
+ size_t size = vma->vm_end - vma->vm_start;
+
+ /* Refuse sharing shared memory provided by application */
+ if (shm->flags & TEE_SHM_USER_MAPPED)
+ return -EINVAL;
+
+ /* check for overflowing the buffer's size */
+ if (vma->vm_pgoff + vma_pages(vma) > shm->size >> PAGE_SHIFT)
+ return -EINVAL;
+
+ return remap_pfn_range(vma, vma->vm_start, shm->paddr >> PAGE_SHIFT,
+ size, vma->vm_page_prot);
+}
+
+static const struct file_operations tee_shm_fops = {
+ .owner = THIS_MODULE,
+ .release = tee_shm_fop_release,
+ .mmap = tee_shm_fop_mmap,
+};
+
/**
* tee_shm_get_fd() - Increase reference count and return file descriptor
* @shm: Shared memory handle
@@ -351,10 +296,11 @@ int tee_shm_get_fd(struct tee_shm *shm)
if (!(shm->flags & TEE_SHM_DMA_BUF))
return -EINVAL;
- get_dma_buf(shm->dmabuf);
- fd = dma_buf_fd(shm->dmabuf, O_CLOEXEC);
+ /* matched by tee_shm_put() in tee_shm_op_release() */
+ refcount_inc(&shm->refcount);
+ fd = anon_inode_getfd("tee_shm", &tee_shm_fops, shm, O_RDWR);
if (fd < 0)
- dma_buf_put(shm->dmabuf);
+ tee_shm_put(shm);
return fd;
}
@@ -364,17 +310,7 @@ int tee_shm_get_fd(struct tee_shm *shm)
*/
void tee_shm_free(struct tee_shm *shm)
{
- /*
- * dma_buf_put() decreases the dmabuf reference counter and will
- * call tee_shm_release() when the last reference is gone.
- *
- * In the case of driver private memory we call tee_shm_release
- * directly instead as it doesn't have a reference counter.
- */
- if (shm->flags & TEE_SHM_DMA_BUF)
- dma_buf_put(shm->dmabuf);
- else
- tee_shm_release(shm);
+ tee_shm_put(shm);
}
EXPORT_SYMBOL_GPL(tee_shm_free);
@@ -481,10 +417,15 @@ struct tee_shm *tee_shm_get_from_id(struct tee_context *ctx, int id)
teedev = ctx->teedev;
mutex_lock(&teedev->mutex);
shm = idr_find(&teedev->idr, id);
+ /*
+ * If the tee_shm was found in the IDR it must have a refcount
+ * larger than 0 due to the guarantee in tee_shm_put() below. So
+ * it's safe to use refcount_inc().
+ */
if (!shm || shm->ctx != ctx)
shm = ERR_PTR(-EINVAL);
- else if (shm->flags & TEE_SHM_DMA_BUF)
- get_dma_buf(shm->dmabuf);
+ else
+ refcount_inc(&shm->refcount);
mutex_unlock(&teedev->mutex);
return shm;
}
@@ -496,7 +437,24 @@ EXPORT_SYMBOL_GPL(tee_shm_get_from_id);
*/
void tee_shm_put(struct tee_shm *shm)
{
- if (shm->flags & TEE_SHM_DMA_BUF)
- dma_buf_put(shm->dmabuf);
+ struct tee_device *teedev = shm->ctx->teedev;
+ bool do_release = false;
+
+ mutex_lock(&teedev->mutex);
+ if (refcount_dec_and_test(&shm->refcount)) {
+ /*
+ * refcount has reached 0, we must now remove it from the
+ * IDR before releasing the mutex. This will guarantee that
+ * the refcount_inc() in tee_shm_get_from_id() never starts
+ * from 0.
+ */
+ if (shm->flags & TEE_SHM_DMA_BUF)
+ idr_remove(&teedev->idr, shm->id);
+ do_release = true;
+ }
+ mutex_unlock(&teedev->mutex);
+
+ if (do_release)
+ tee_shm_release(teedev, shm);
}
EXPORT_SYMBOL_GPL(tee_shm_put);
diff --git a/drivers/thermal/intel/int340x_thermal/processor_thermal_rfim.c b/drivers/thermal/intel/int340x_thermal/processor_thermal_rfim.c
index b25b54d4bac1..e693ec8234fb 100644
--- a/drivers/thermal/intel/int340x_thermal/processor_thermal_rfim.c
+++ b/drivers/thermal/intel/int340x_thermal/processor_thermal_rfim.c
@@ -29,7 +29,7 @@ static const char * const fivr_strings[] = {
};
static const struct mmio_reg tgl_fivr_mmio_regs[] = {
- { 0, 0x5A18, 3, 0x7, 12}, /* vco_ref_code_lo */
+ { 0, 0x5A18, 3, 0x7, 11}, /* vco_ref_code_lo */
{ 0, 0x5A18, 8, 0xFF, 16}, /* vco_ref_code_hi */
{ 0, 0x5A08, 8, 0xFF, 0}, /* spread_spectrum_pct */
{ 0, 0x5A08, 1, 0x1, 8}, /* spread_spectrum_clk_enable */
diff --git a/drivers/tty/hvc/hvc_xen.c b/drivers/tty/hvc/hvc_xen.c
index f0bf01ea069a..ebaf7500f48f 100644
--- a/drivers/tty/hvc/hvc_xen.c
+++ b/drivers/tty/hvc/hvc_xen.c
@@ -37,6 +37,8 @@ struct xencons_info {
struct xenbus_device *xbdev;
struct xencons_interface *intf;
unsigned int evtchn;
+ XENCONS_RING_IDX out_cons;
+ unsigned int out_cons_same;
struct hvc_struct *hvc;
int irq;
int vtermno;
@@ -138,6 +140,8 @@ static int domU_read_console(uint32_t vtermno, char *buf, int len)
XENCONS_RING_IDX cons, prod;
int recv = 0;
struct xencons_info *xencons = vtermno_to_xencons(vtermno);
+ unsigned int eoiflag = 0;
+
if (xencons == NULL)
return -EINVAL;
intf = xencons->intf;
@@ -157,7 +161,27 @@ static int domU_read_console(uint32_t vtermno, char *buf, int len)
mb(); /* read ring before consuming */
intf->in_cons = cons;
- notify_daemon(xencons);
+ /*
+ * When to mark interrupt having been spurious:
+ * - there was no new data to be read, and
+ * - the backend did not consume some output bytes, and
+ * - the previous round with no read data didn't see consumed bytes
+ * (we might have a race with an interrupt being in flight while
+ * updating xencons->out_cons, so account for that by allowing one
+ * round without any visible reason)
+ */
+ if (intf->out_cons != xencons->out_cons) {
+ xencons->out_cons = intf->out_cons;
+ xencons->out_cons_same = 0;
+ }
+ if (recv) {
+ notify_daemon(xencons);
+ } else if (xencons->out_cons_same++ > 1) {
+ eoiflag = XEN_EOI_FLAG_SPURIOUS;
+ }
+
+ xen_irq_lateeoi(xencons->irq, eoiflag);
+
return recv;
}
@@ -386,7 +410,7 @@ static int xencons_connect_backend(struct xenbus_device *dev,
if (ret)
return ret;
info->evtchn = evtchn;
- irq = bind_evtchn_to_irq(evtchn);
+ irq = bind_interdomain_evtchn_to_irq_lateeoi(dev, evtchn);
if (irq < 0)
return irq;
info->irq = irq;
@@ -522,6 +546,7 @@ static struct xenbus_driver xencons_driver = {
.remove = xencons_remove,
.resume = xencons_resume,
.otherend_changed = xencons_backend_changed,
+ .not_essential = true,
};
#endif /* CONFIG_HVC_XEN_FRONTEND */
@@ -550,7 +575,7 @@ static int __init xen_hvc_init(void)
return r;
info = vtermno_to_xencons(HVC_COOKIE);
- info->irq = bind_evtchn_to_irq(info->evtchn);
+ info->irq = bind_evtchn_to_irq_lateeoi(info->evtchn);
}
if (info->irq < 0)
info->irq = 0; /* NO_IRQ */
diff --git a/drivers/tty/n_hdlc.c b/drivers/tty/n_hdlc.c
index 7e0884ecc74f..23ba1fc99df8 100644
--- a/drivers/tty/n_hdlc.c
+++ b/drivers/tty/n_hdlc.c
@@ -140,6 +140,8 @@ struct n_hdlc {
struct n_hdlc_buf_list rx_buf_list;
struct n_hdlc_buf_list tx_free_buf_list;
struct n_hdlc_buf_list rx_free_buf_list;
+ struct work_struct write_work;
+ struct tty_struct *tty_for_write_work;
};
/*
@@ -154,6 +156,7 @@ static struct n_hdlc_buf *n_hdlc_buf_get(struct n_hdlc_buf_list *list);
/* Local functions */
static struct n_hdlc *n_hdlc_alloc(void);
+static void n_hdlc_tty_write_work(struct work_struct *work);
/* max frame size for memory allocations */
static int maxframe = 4096;
@@ -210,6 +213,8 @@ static void n_hdlc_tty_close(struct tty_struct *tty)
wake_up_interruptible(&tty->read_wait);
wake_up_interruptible(&tty->write_wait);
+ cancel_work_sync(&n_hdlc->write_work);
+
n_hdlc_free_buf_list(&n_hdlc->rx_free_buf_list);
n_hdlc_free_buf_list(&n_hdlc->tx_free_buf_list);
n_hdlc_free_buf_list(&n_hdlc->rx_buf_list);
@@ -241,6 +246,8 @@ static int n_hdlc_tty_open(struct tty_struct *tty)
return -ENFILE;
}
+ INIT_WORK(&n_hdlc->write_work, n_hdlc_tty_write_work);
+ n_hdlc->tty_for_write_work = tty;
tty->disc_data = n_hdlc;
tty->receive_room = 65536;
@@ -335,6 +342,20 @@ check_again:
} /* end of n_hdlc_send_frames() */
/**
+ * n_hdlc_tty_write_work - Asynchronous callback for transmit wakeup
+ * @work: pointer to work_struct
+ *
+ * Called when low level device driver can accept more send data.
+ */
+static void n_hdlc_tty_write_work(struct work_struct *work)
+{
+ struct n_hdlc *n_hdlc = container_of(work, struct n_hdlc, write_work);
+ struct tty_struct *tty = n_hdlc->tty_for_write_work;
+
+ n_hdlc_send_frames(n_hdlc, tty);
+} /* end of n_hdlc_tty_write_work() */
+
+/**
* n_hdlc_tty_wakeup - Callback for transmit wakeup
* @tty: pointer to associated tty instance data
*
@@ -344,7 +365,7 @@ static void n_hdlc_tty_wakeup(struct tty_struct *tty)
{
struct n_hdlc *n_hdlc = tty->disc_data;
- n_hdlc_send_frames(n_hdlc, tty);
+ schedule_work(&n_hdlc->write_work);
} /* end of n_hdlc_tty_wakeup() */
/**
diff --git a/drivers/tty/serial/8250/8250_bcm7271.c b/drivers/tty/serial/8250/8250_bcm7271.c
index 7f656fac503f..5163d60756b7 100644
--- a/drivers/tty/serial/8250/8250_bcm7271.c
+++ b/drivers/tty/serial/8250/8250_bcm7271.c
@@ -237,6 +237,7 @@ struct brcmuart_priv {
u32 rx_err;
u32 rx_timeout;
u32 rx_abort;
+ u32 saved_mctrl;
};
static struct dentry *brcmuart_debugfs_root;
@@ -1133,16 +1134,27 @@ static int brcmuart_remove(struct platform_device *pdev)
static int __maybe_unused brcmuart_suspend(struct device *dev)
{
struct brcmuart_priv *priv = dev_get_drvdata(dev);
+ struct uart_8250_port *up = serial8250_get_port(priv->line);
+ struct uart_port *port = &up->port;
serial8250_suspend_port(priv->line);
clk_disable_unprepare(priv->baud_mux_clk);
+ /*
+ * This will prevent resume from enabling RTS before the
+ * baud rate has been resored.
+ */
+ priv->saved_mctrl = port->mctrl;
+ port->mctrl = 0;
+
return 0;
}
static int __maybe_unused brcmuart_resume(struct device *dev)
{
struct brcmuart_priv *priv = dev_get_drvdata(dev);
+ struct uart_8250_port *up = serial8250_get_port(priv->line);
+ struct uart_port *port = &up->port;
int ret;
ret = clk_prepare_enable(priv->baud_mux_clk);
@@ -1165,6 +1177,7 @@ static int __maybe_unused brcmuart_resume(struct device *dev)
start_rx_dma(serial8250_get_port(priv->line));
}
serial8250_resume_port(priv->line);
+ port->mctrl = priv->saved_mctrl;
return 0;
}
diff --git a/drivers/tty/serial/8250/8250_fintek.c b/drivers/tty/serial/8250/8250_fintek.c
index 31c9e83ea3cb..251f0018ae8c 100644
--- a/drivers/tty/serial/8250/8250_fintek.c
+++ b/drivers/tty/serial/8250/8250_fintek.c
@@ -290,25 +290,6 @@ static void fintek_8250_set_max_fifo(struct fintek_8250 *pdata)
}
}
-static void fintek_8250_goto_highspeed(struct uart_8250_port *uart,
- struct fintek_8250 *pdata)
-{
- sio_write_reg(pdata, LDN, pdata->index);
-
- switch (pdata->pid) {
- case CHIP_ID_F81966:
- case CHIP_ID_F81866: /* set uart clock for high speed serial mode */
- sio_write_mask_reg(pdata, F81866_UART_CLK,
- F81866_UART_CLK_MASK,
- F81866_UART_CLK_14_769MHZ);
-
- uart->port.uartclk = 921600 * 16;
- break;
- default: /* leave clock speed untouched */
- break;
- }
-}
-
static void fintek_8250_set_termios(struct uart_port *port,
struct ktermios *termios,
struct ktermios *old)
@@ -430,7 +411,6 @@ static int probe_setup_port(struct fintek_8250 *pdata,
fintek_8250_set_irq_mode(pdata, level_mode);
fintek_8250_set_max_fifo(pdata);
- fintek_8250_goto_highspeed(uart, pdata);
fintek_8250_exit_key(addr[i]);
diff --git a/drivers/tty/serial/8250/8250_pci.c b/drivers/tty/serial/8250/8250_pci.c
index 5d43de143f33..60f8fffdfd77 100644
--- a/drivers/tty/serial/8250/8250_pci.c
+++ b/drivers/tty/serial/8250/8250_pci.c
@@ -1324,29 +1324,33 @@ pericom_do_set_divisor(struct uart_port *port, unsigned int baud,
{
int scr;
int lcr;
- int actual_baud;
- int tolerance;
- for (scr = 5 ; scr <= 15 ; scr++) {
- actual_baud = 921600 * 16 / scr;
- tolerance = actual_baud / 50;
+ for (scr = 16; scr > 4; scr--) {
+ unsigned int maxrate = port->uartclk / scr;
+ unsigned int divisor = max(maxrate / baud, 1U);
+ int delta = maxrate / divisor - baud;
- if ((baud < actual_baud + tolerance) &&
- (baud > actual_baud - tolerance)) {
+ if (baud > maxrate + baud / 50)
+ continue;
+ if (delta > baud / 50)
+ divisor++;
+
+ if (divisor > 0xffff)
+ continue;
+
+ /* Update delta due to possible divisor change */
+ delta = maxrate / divisor - baud;
+ if (abs(delta) < baud / 50) {
lcr = serial_port_in(port, UART_LCR);
serial_port_out(port, UART_LCR, lcr | 0x80);
-
- serial_port_out(port, UART_DLL, 1);
- serial_port_out(port, UART_DLM, 0);
+ serial_port_out(port, UART_DLL, divisor & 0xff);
+ serial_port_out(port, UART_DLM, divisor >> 8 & 0xff);
serial_port_out(port, 2, 16 - scr);
serial_port_out(port, UART_LCR, lcr);
return;
- } else if (baud > actual_baud) {
- break;
}
}
- serial8250_do_set_divisor(port, baud, quot, quot_frac);
}
static int pci_pericom_setup(struct serial_private *priv,
const struct pciserial_board *board,
@@ -2291,7 +2295,7 @@ static struct pci_serial_quirk pci_serial_quirks[] = {
.setup = pci_pericom_setup_four_at_eight,
},
{
- .vendor = PCI_DEVICE_ID_ACCESIO_PCIE_ICM_4S,
+ .vendor = PCI_VENDOR_ID_ACCESIO,
.device = PCI_DEVICE_ID_ACCESIO_PCIE_ICM232_4,
.subvendor = PCI_ANY_ID,
.subdevice = PCI_ANY_ID,
@@ -2299,6 +2303,13 @@ static struct pci_serial_quirk pci_serial_quirks[] = {
},
{
.vendor = PCI_VENDOR_ID_ACCESIO,
+ .device = PCI_DEVICE_ID_ACCESIO_PCIE_ICM_4S,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .setup = pci_pericom_setup_four_at_eight,
+ },
+ {
+ .vendor = PCI_VENDOR_ID_ACCESIO,
.device = PCI_DEVICE_ID_ACCESIO_MPCIE_ICM232_4,
.subvendor = PCI_ANY_ID,
.subdevice = PCI_ANY_ID,
diff --git a/drivers/tty/serial/8250/8250_port.c b/drivers/tty/serial/8250/8250_port.c
index 5775cbff8f6e..46e2079ad1aa 100644
--- a/drivers/tty/serial/8250/8250_port.c
+++ b/drivers/tty/serial/8250/8250_port.c
@@ -2024,13 +2024,6 @@ void serial8250_do_set_mctrl(struct uart_port *port, unsigned int mctrl)
struct uart_8250_port *up = up_to_u8250p(port);
unsigned char mcr;
- if (port->rs485.flags & SER_RS485_ENABLED) {
- if (serial8250_in_MCR(up) & UART_MCR_RTS)
- mctrl |= TIOCM_RTS;
- else
- mctrl &= ~TIOCM_RTS;
- }
-
mcr = serial8250_TIOCM_to_MCR(mctrl);
mcr = (mcr & up->mcr_mask) | up->mcr_force | up->mcr;
diff --git a/drivers/tty/serial/Kconfig b/drivers/tty/serial/Kconfig
index 6ff94cfcd9db..fc543ac97c13 100644
--- a/drivers/tty/serial/Kconfig
+++ b/drivers/tty/serial/Kconfig
@@ -1533,7 +1533,7 @@ config SERIAL_LITEUART
tristate "LiteUART serial port support"
depends on HAS_IOMEM
depends on OF || COMPILE_TEST
- depends on LITEX
+ depends on LITEX || COMPILE_TEST
select SERIAL_CORE
help
This driver is for the FPGA-based LiteUART serial controller from LiteX
diff --git a/drivers/tty/serial/amba-pl011.c b/drivers/tty/serial/amba-pl011.c
index d361cd84ff8c..52518a606c06 100644
--- a/drivers/tty/serial/amba-pl011.c
+++ b/drivers/tty/serial/amba-pl011.c
@@ -2947,6 +2947,7 @@ MODULE_DEVICE_TABLE(of, sbsa_uart_of_match);
static const struct acpi_device_id __maybe_unused sbsa_uart_acpi_match[] = {
{ "ARMH0011", 0 },
+ { "ARMHB000", 0 },
{},
};
MODULE_DEVICE_TABLE(acpi, sbsa_uart_acpi_match);
diff --git a/drivers/tty/serial/fsl_lpuart.c b/drivers/tty/serial/fsl_lpuart.c
index b1e7190ae483..ac5112def40d 100644
--- a/drivers/tty/serial/fsl_lpuart.c
+++ b/drivers/tty/serial/fsl_lpuart.c
@@ -2625,6 +2625,7 @@ OF_EARLYCON_DECLARE(lpuart, "fsl,vf610-lpuart", lpuart_early_console_setup);
OF_EARLYCON_DECLARE(lpuart32, "fsl,ls1021a-lpuart", lpuart32_early_console_setup);
OF_EARLYCON_DECLARE(lpuart32, "fsl,ls1028a-lpuart", ls1028a_early_console_setup);
OF_EARLYCON_DECLARE(lpuart32, "fsl,imx7ulp-lpuart", lpuart32_imx_early_console_setup);
+OF_EARLYCON_DECLARE(lpuart32, "fsl,imx8qxp-lpuart", lpuart32_imx_early_console_setup);
EARLYCON_DECLARE(lpuart, lpuart_early_console_setup);
EARLYCON_DECLARE(lpuart32, lpuart32_early_console_setup);
diff --git a/drivers/tty/serial/liteuart.c b/drivers/tty/serial/liteuart.c
index dbc0559a9157..2941659e5274 100644
--- a/drivers/tty/serial/liteuart.c
+++ b/drivers/tty/serial/liteuart.c
@@ -270,8 +270,10 @@ static int liteuart_probe(struct platform_device *pdev)
/* get membase */
port->membase = devm_platform_get_and_ioremap_resource(pdev, 0, NULL);
- if (IS_ERR(port->membase))
- return PTR_ERR(port->membase);
+ if (IS_ERR(port->membase)) {
+ ret = PTR_ERR(port->membase);
+ goto err_erase_id;
+ }
/* values not from device tree */
port->dev = &pdev->dev;
@@ -285,7 +287,18 @@ static int liteuart_probe(struct platform_device *pdev)
port->line = dev_id;
spin_lock_init(&port->lock);
- return uart_add_one_port(&liteuart_driver, &uart->port);
+ platform_set_drvdata(pdev, port);
+
+ ret = uart_add_one_port(&liteuart_driver, &uart->port);
+ if (ret)
+ goto err_erase_id;
+
+ return 0;
+
+err_erase_id:
+ xa_erase(&liteuart_array, uart->id);
+
+ return ret;
}
static int liteuart_remove(struct platform_device *pdev)
@@ -293,6 +306,7 @@ static int liteuart_remove(struct platform_device *pdev)
struct uart_port *port = platform_get_drvdata(pdev);
struct liteuart_port *uart = to_liteuart_port(port);
+ uart_remove_one_port(&liteuart_driver, port);
xa_erase(&liteuart_array, uart->id);
return 0;
diff --git a/drivers/tty/serial/msm_serial.c b/drivers/tty/serial/msm_serial.c
index fcef7a961430..489d19274f9a 100644
--- a/drivers/tty/serial/msm_serial.c
+++ b/drivers/tty/serial/msm_serial.c
@@ -598,6 +598,9 @@ static void msm_start_rx_dma(struct msm_port *msm_port)
u32 val;
int ret;
+ if (IS_ENABLED(CONFIG_CONSOLE_POLL))
+ return;
+
if (!dma->chan)
return;
diff --git a/drivers/tty/serial/serial-tegra.c b/drivers/tty/serial/serial-tegra.c
index 45e2e4109acd..b6223fab0687 100644
--- a/drivers/tty/serial/serial-tegra.c
+++ b/drivers/tty/serial/serial-tegra.c
@@ -1506,7 +1506,7 @@ static struct tegra_uart_chip_data tegra20_uart_chip_data = {
.fifo_mode_enable_status = false,
.uart_max_port = 5,
.max_dma_burst_bytes = 4,
- .error_tolerance_low_range = 0,
+ .error_tolerance_low_range = -4,
.error_tolerance_high_range = 4,
};
@@ -1517,7 +1517,7 @@ static struct tegra_uart_chip_data tegra30_uart_chip_data = {
.fifo_mode_enable_status = false,
.uart_max_port = 5,
.max_dma_burst_bytes = 4,
- .error_tolerance_low_range = 0,
+ .error_tolerance_low_range = -4,
.error_tolerance_high_range = 4,
};
diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c
index 1e738f265eea..61e3dd0222af 100644
--- a/drivers/tty/serial/serial_core.c
+++ b/drivers/tty/serial/serial_core.c
@@ -1075,6 +1075,11 @@ uart_tiocmset(struct tty_struct *tty, unsigned int set, unsigned int clear)
goto out;
if (!tty_io_error(tty)) {
+ if (uport->rs485.flags & SER_RS485_ENABLED) {
+ set &= ~TIOCM_RTS;
+ clear &= ~TIOCM_RTS;
+ }
+
uart_update_mctrl(uport, set, clear);
ret = 0;
}
@@ -1549,6 +1554,7 @@ static void uart_tty_port_shutdown(struct tty_port *port)
{
struct uart_state *state = container_of(port, struct uart_state, port);
struct uart_port *uport = uart_port_check(state);
+ char *buf;
/*
* At this point, we stop accepting input. To do this, we
@@ -1570,8 +1576,18 @@ static void uart_tty_port_shutdown(struct tty_port *port)
*/
tty_port_set_suspended(port, 0);
- uart_change_pm(state, UART_PM_STATE_OFF);
+ /*
+ * Free the transmit buffer.
+ */
+ spin_lock_irq(&uport->lock);
+ buf = state->xmit.buf;
+ state->xmit.buf = NULL;
+ spin_unlock_irq(&uport->lock);
+ if (buf)
+ free_page((unsigned long)buf);
+
+ uart_change_pm(state, UART_PM_STATE_OFF);
}
static void uart_wait_until_sent(struct tty_struct *tty, int timeout)
diff --git a/drivers/usb/cdns3/cdns3-gadget.c b/drivers/usb/cdns3/cdns3-gadget.c
index 1f3b4a142212..f9af7ebe003d 100644
--- a/drivers/usb/cdns3/cdns3-gadget.c
+++ b/drivers/usb/cdns3/cdns3-gadget.c
@@ -337,19 +337,6 @@ static void cdns3_ep_inc_deq(struct cdns3_endpoint *priv_ep)
cdns3_ep_inc_trb(&priv_ep->dequeue, &priv_ep->ccs, priv_ep->num_trbs);
}
-static void cdns3_move_deq_to_next_trb(struct cdns3_request *priv_req)
-{
- struct cdns3_endpoint *priv_ep = priv_req->priv_ep;
- int current_trb = priv_req->start_trb;
-
- while (current_trb != priv_req->end_trb) {
- cdns3_ep_inc_deq(priv_ep);
- current_trb = priv_ep->dequeue;
- }
-
- cdns3_ep_inc_deq(priv_ep);
-}
-
/**
* cdns3_allow_enable_l1 - enable/disable permits to transition to L1.
* @priv_dev: Extended gadget object
@@ -1517,10 +1504,11 @@ static void cdns3_transfer_completed(struct cdns3_device *priv_dev,
trb = priv_ep->trb_pool + priv_ep->dequeue;
- /* Request was dequeued and TRB was changed to TRB_LINK. */
- if (TRB_FIELD_TO_TYPE(le32_to_cpu(trb->control)) == TRB_LINK) {
+ /* The TRB was changed as link TRB, and the request was handled at ep_dequeue */
+ while (TRB_FIELD_TO_TYPE(le32_to_cpu(trb->control)) == TRB_LINK) {
trace_cdns3_complete_trb(priv_ep, trb);
- cdns3_move_deq_to_next_trb(priv_req);
+ cdns3_ep_inc_deq(priv_ep);
+ trb = priv_ep->trb_pool + priv_ep->dequeue;
}
if (!request->stream_id) {
diff --git a/drivers/usb/cdns3/cdnsp-gadget.c b/drivers/usb/cdns3/cdnsp-gadget.c
index 27df0c697897..e85bf768c66d 100644
--- a/drivers/usb/cdns3/cdnsp-gadget.c
+++ b/drivers/usb/cdns3/cdnsp-gadget.c
@@ -1541,15 +1541,27 @@ static int cdnsp_gadget_pullup(struct usb_gadget *gadget, int is_on)
{
struct cdnsp_device *pdev = gadget_to_cdnsp(gadget);
struct cdns *cdns = dev_get_drvdata(pdev->dev);
+ unsigned long flags;
trace_cdnsp_pullup(is_on);
+ /*
+ * Disable events handling while controller is being
+ * enabled/disabled.
+ */
+ disable_irq(cdns->dev_irq);
+ spin_lock_irqsave(&pdev->lock, flags);
+
if (!is_on) {
cdnsp_reset_device(pdev);
cdns_clear_vbus(cdns);
} else {
cdns_set_vbus(cdns);
}
+
+ spin_unlock_irqrestore(&pdev->lock, flags);
+ enable_irq(cdns->dev_irq);
+
return 0;
}
diff --git a/drivers/usb/cdns3/cdnsp-mem.c b/drivers/usb/cdns3/cdnsp-mem.c
index ad9aee3f1e39..97866bfb2da9 100644
--- a/drivers/usb/cdns3/cdnsp-mem.c
+++ b/drivers/usb/cdns3/cdnsp-mem.c
@@ -987,6 +987,9 @@ int cdnsp_endpoint_init(struct cdnsp_device *pdev,
/* Set up the endpoint ring. */
pep->ring = cdnsp_ring_alloc(pdev, 2, ring_type, max_packet, mem_flags);
+ if (!pep->ring)
+ return -ENOMEM;
+
pep->skip = false;
/* Fill the endpoint context */
diff --git a/drivers/usb/cdns3/cdnsp-ring.c b/drivers/usb/cdns3/cdnsp-ring.c
index 1b1438457fb0..e45c3d6e1536 100644
--- a/drivers/usb/cdns3/cdnsp-ring.c
+++ b/drivers/usb/cdns3/cdnsp-ring.c
@@ -1029,6 +1029,8 @@ static void cdnsp_process_ctrl_td(struct cdnsp_device *pdev,
return;
}
+ *status = 0;
+
cdnsp_finish_td(pdev, td, event, pep, status);
}
@@ -1523,7 +1525,14 @@ irqreturn_t cdnsp_thread_irq_handler(int irq, void *data)
spin_lock_irqsave(&pdev->lock, flags);
if (pdev->cdnsp_state & (CDNSP_STATE_HALTED | CDNSP_STATE_DYING)) {
- cdnsp_died(pdev);
+ /*
+ * While removing or stopping driver there may still be deferred
+ * not handled interrupt which should not be treated as error.
+ * Driver should simply ignore it.
+ */
+ if (pdev->gadget_driver)
+ cdnsp_died(pdev);
+
spin_unlock_irqrestore(&pdev->lock, flags);
return IRQ_HANDLED;
}
diff --git a/drivers/usb/cdns3/cdnsp-trace.h b/drivers/usb/cdns3/cdnsp-trace.h
index 6a2571c6aa9e..5983dfb99653 100644
--- a/drivers/usb/cdns3/cdnsp-trace.h
+++ b/drivers/usb/cdns3/cdnsp-trace.h
@@ -57,9 +57,9 @@ DECLARE_EVENT_CLASS(cdnsp_log_ep,
__entry->first_prime_det = pep->stream_info.first_prime_det;
__entry->drbls_count = pep->stream_info.drbls_count;
),
- TP_printk("%s: SID: %08x ep state: %x stream: enabled: %d num %d "
+ TP_printk("%s: SID: %08x, ep state: %x, stream: enabled: %d num %d "
"tds %d, first prime: %d drbls %d",
- __get_str(name), __entry->state, __entry->stream_id,
+ __get_str(name), __entry->stream_id, __entry->state,
__entry->enabled, __entry->num_streams, __entry->td_count,
__entry->first_prime_det, __entry->drbls_count)
);
diff --git a/drivers/usb/cdns3/host.c b/drivers/usb/cdns3/host.c
index 84dadfa726aa..9643b905e2d8 100644
--- a/drivers/usb/cdns3/host.c
+++ b/drivers/usb/cdns3/host.c
@@ -10,6 +10,7 @@
*/
#include <linux/platform_device.h>
+#include <linux/slab.h>
#include "core.h"
#include "drd.h"
#include "host-export.h"
diff --git a/drivers/usb/chipidea/ci_hdrc_imx.c b/drivers/usb/chipidea/ci_hdrc_imx.c
index f1d100671ee6..097142ffb184 100644
--- a/drivers/usb/chipidea/ci_hdrc_imx.c
+++ b/drivers/usb/chipidea/ci_hdrc_imx.c
@@ -420,15 +420,15 @@ static int ci_hdrc_imx_probe(struct platform_device *pdev)
data->phy = devm_usb_get_phy_by_phandle(dev, "fsl,usbphy", 0);
if (IS_ERR(data->phy)) {
ret = PTR_ERR(data->phy);
- if (ret == -ENODEV) {
- data->phy = devm_usb_get_phy_by_phandle(dev, "phys", 0);
- if (IS_ERR(data->phy)) {
- ret = PTR_ERR(data->phy);
- if (ret == -ENODEV)
- data->phy = NULL;
- else
- goto err_clk;
- }
+ if (ret != -ENODEV)
+ goto err_clk;
+ data->phy = devm_usb_get_phy_by_phandle(dev, "phys", 0);
+ if (IS_ERR(data->phy)) {
+ ret = PTR_ERR(data->phy);
+ if (ret == -ENODEV)
+ data->phy = NULL;
+ else
+ goto err_clk;
}
}
diff --git a/drivers/usb/core/config.c b/drivers/usb/core/config.c
index 16b1fd9dc60c..48bc8a4814ac 100644
--- a/drivers/usb/core/config.c
+++ b/drivers/usb/core/config.c
@@ -406,7 +406,7 @@ static int usb_parse_endpoint(struct device *ddev, int cfgno,
* the USB-2 spec requires such endpoints to have wMaxPacketSize = 0
* (see the end of section 5.6.3), so don't warn about them.
*/
- maxp = usb_endpoint_maxp(&endpoint->desc);
+ maxp = le16_to_cpu(endpoint->desc.wMaxPacketSize);
if (maxp == 0 && !(usb_endpoint_xfer_isoc(d) && asnum == 0)) {
dev_warn(ddev, "config %d interface %d altsetting %d endpoint 0x%X has invalid wMaxPacketSize 0\n",
cfgno, inum, asnum, d->bEndpointAddress);
@@ -422,9 +422,9 @@ static int usb_parse_endpoint(struct device *ddev, int cfgno,
maxpacket_maxes = full_speed_maxpacket_maxes;
break;
case USB_SPEED_HIGH:
- /* Bits 12..11 are allowed only for HS periodic endpoints */
+ /* Multiple-transactions bits are allowed only for HS periodic endpoints */
if (usb_endpoint_xfer_int(d) || usb_endpoint_xfer_isoc(d)) {
- i = maxp & (BIT(12) | BIT(11));
+ i = maxp & USB_EP_MAXP_MULT_MASK;
maxp &= ~i;
}
fallthrough;
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index 86658a81d284..00070a8a6507 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -4700,8 +4700,6 @@ hub_port_init(struct usb_hub *hub, struct usb_device *udev, int port1,
if (oldspeed == USB_SPEED_LOW)
delay = HUB_LONG_RESET_TIME;
- mutex_lock(hcd->address0_mutex);
-
/* Reset the device; full speed may morph to high speed */
/* FIXME a USB 2.0 device may morph into SuperSpeed on reset. */
retval = hub_port_reset(hub, port1, udev, delay, false);
@@ -5016,7 +5014,6 @@ fail:
hub_port_disable(hub, port1, 0);
update_devnum(udev, devnum); /* for disconnect processing */
}
- mutex_unlock(hcd->address0_mutex);
return retval;
}
@@ -5191,6 +5188,7 @@ static void hub_port_connect(struct usb_hub *hub, int port1, u16 portstatus,
struct usb_port *port_dev = hub->ports[port1 - 1];
struct usb_device *udev = port_dev->child;
static int unreliable_port = -1;
+ bool retry_locked;
/* Disconnect any existing devices under this port */
if (udev) {
@@ -5246,8 +5244,11 @@ static void hub_port_connect(struct usb_hub *hub, int port1, u16 portstatus,
unit_load = 100;
status = 0;
- for (i = 0; i < PORT_INIT_TRIES; i++) {
+ for (i = 0; i < PORT_INIT_TRIES; i++) {
+ usb_lock_port(port_dev);
+ mutex_lock(hcd->address0_mutex);
+ retry_locked = true;
/* reallocate for each attempt, since references
* to the previous one can escape in various ways
*/
@@ -5255,6 +5256,8 @@ static void hub_port_connect(struct usb_hub *hub, int port1, u16 portstatus,
if (!udev) {
dev_err(&port_dev->dev,
"couldn't allocate usb_device\n");
+ mutex_unlock(hcd->address0_mutex);
+ usb_unlock_port(port_dev);
goto done;
}
@@ -5276,12 +5279,14 @@ static void hub_port_connect(struct usb_hub *hub, int port1, u16 portstatus,
}
/* reset (non-USB 3.0 devices) and get descriptor */
- usb_lock_port(port_dev);
status = hub_port_init(hub, udev, port1, i);
- usb_unlock_port(port_dev);
if (status < 0)
goto loop;
+ mutex_unlock(hcd->address0_mutex);
+ usb_unlock_port(port_dev);
+ retry_locked = false;
+
if (udev->quirks & USB_QUIRK_DELAY_INIT)
msleep(2000);
@@ -5374,6 +5379,10 @@ loop:
usb_ep0_reinit(udev);
release_devnum(udev);
hub_free_dev(udev);
+ if (retry_locked) {
+ mutex_unlock(hcd->address0_mutex);
+ usb_unlock_port(port_dev);
+ }
usb_put_dev(udev);
if ((status == -ENOTCONN) || (status == -ENOTSUPP))
break;
@@ -5915,6 +5924,8 @@ static int usb_reset_and_verify_device(struct usb_device *udev)
bos = udev->bos;
udev->bos = NULL;
+ mutex_lock(hcd->address0_mutex);
+
for (i = 0; i < PORT_INIT_TRIES; ++i) {
/* ep0 maxpacket size may change; let the HCD know about it.
@@ -5924,6 +5935,7 @@ static int usb_reset_and_verify_device(struct usb_device *udev)
if (ret >= 0 || ret == -ENOTCONN || ret == -ENODEV)
break;
}
+ mutex_unlock(hcd->address0_mutex);
if (ret < 0)
goto re_enumerate;
diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
index 8239fe7129dd..d3c14b5ed4a1 100644
--- a/drivers/usb/core/quirks.c
+++ b/drivers/usb/core/quirks.c
@@ -434,6 +434,12 @@ static const struct usb_device_id usb_quirk_list[] = {
{ USB_DEVICE(0x1532, 0x0116), .driver_info =
USB_QUIRK_LINEAR_UFRAME_INTR_BINTERVAL },
+ /* Lenovo USB-C to Ethernet Adapter RTL8153-04 */
+ { USB_DEVICE(0x17ef, 0x720c), .driver_info = USB_QUIRK_NO_LPM },
+
+ /* Lenovo Powered USB-C Travel Hub (4X90S92381, RTL8153 GigE) */
+ { USB_DEVICE(0x17ef, 0x721e), .driver_info = USB_QUIRK_NO_LPM },
+
/* Lenovo ThinkCenter A630Z TI024Gen3 usb-audio */
{ USB_DEVICE(0x17ef, 0xa012), .driver_info =
USB_QUIRK_DISCONNECT_SUSPEND },
diff --git a/drivers/usb/dwc2/gadget.c b/drivers/usb/dwc2/gadget.c
index 4ab4a1d5062b..ab8d7dad9f56 100644
--- a/drivers/usb/dwc2/gadget.c
+++ b/drivers/usb/dwc2/gadget.c
@@ -1198,6 +1198,8 @@ static void dwc2_hsotg_start_req(struct dwc2_hsotg *hsotg,
}
ctrl |= DXEPCTL_CNAK;
} else {
+ hs_req->req.frame_number = hs_ep->target_frame;
+ hs_req->req.actual = 0;
dwc2_hsotg_complete_request(hsotg, hs_ep, hs_req, -ENODATA);
return;
}
@@ -2857,9 +2859,12 @@ static void dwc2_gadget_handle_ep_disabled(struct dwc2_hsotg_ep *hs_ep)
do {
hs_req = get_ep_head(hs_ep);
- if (hs_req)
+ if (hs_req) {
+ hs_req->req.frame_number = hs_ep->target_frame;
+ hs_req->req.actual = 0;
dwc2_hsotg_complete_request(hsotg, hs_ep, hs_req,
-ENODATA);
+ }
dwc2_gadget_incr_frame_num(hs_ep);
/* Update current frame number value. */
hsotg->frame_number = dwc2_hsotg_read_frameno(hsotg);
@@ -2912,8 +2917,11 @@ static void dwc2_gadget_handle_out_token_ep_disabled(struct dwc2_hsotg_ep *ep)
while (dwc2_gadget_target_frame_elapsed(ep)) {
hs_req = get_ep_head(ep);
- if (hs_req)
+ if (hs_req) {
+ hs_req->req.frame_number = ep->target_frame;
+ hs_req->req.actual = 0;
dwc2_hsotg_complete_request(hsotg, ep, hs_req, -ENODATA);
+ }
dwc2_gadget_incr_frame_num(ep);
/* Update current frame number value. */
@@ -3002,8 +3010,11 @@ static void dwc2_gadget_handle_nak(struct dwc2_hsotg_ep *hs_ep)
while (dwc2_gadget_target_frame_elapsed(hs_ep)) {
hs_req = get_ep_head(hs_ep);
- if (hs_req)
+ if (hs_req) {
+ hs_req->req.frame_number = hs_ep->target_frame;
+ hs_req->req.actual = 0;
dwc2_hsotg_complete_request(hsotg, hs_ep, hs_req, -ENODATA);
+ }
dwc2_gadget_incr_frame_num(hs_ep);
/* Update current frame number value. */
diff --git a/drivers/usb/dwc2/hcd_queue.c b/drivers/usb/dwc2/hcd_queue.c
index 89a788326c56..24beff610cf2 100644
--- a/drivers/usb/dwc2/hcd_queue.c
+++ b/drivers/usb/dwc2/hcd_queue.c
@@ -59,7 +59,7 @@
#define DWC2_UNRESERVE_DELAY (msecs_to_jiffies(5))
/* If we get a NAK, wait this long before retrying */
-#define DWC2_RETRY_WAIT_DELAY (1 * 1E6L)
+#define DWC2_RETRY_WAIT_DELAY (1 * NSEC_PER_MSEC)
/**
* dwc2_periodic_channel_available() - Checks that a channel is available for a
diff --git a/drivers/usb/dwc2/platform.c b/drivers/usb/dwc2/platform.c
index c8f18f3ba9e3..c331a5128c2c 100644
--- a/drivers/usb/dwc2/platform.c
+++ b/drivers/usb/dwc2/platform.c
@@ -575,6 +575,9 @@ static int dwc2_driver_probe(struct platform_device *dev)
ggpio |= GGPIO_STM32_OTG_GCCFG_IDEN;
ggpio |= GGPIO_STM32_OTG_GCCFG_VBDEN;
dwc2_writel(hsotg, ggpio, GGPIO);
+
+ /* ID/VBUS detection startup time */
+ usleep_range(5000, 7000);
}
retval = dwc2_drd_init(hsotg);
diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c
index 643239d7d370..f4c09951b517 100644
--- a/drivers/usb/dwc3/core.c
+++ b/drivers/usb/dwc3/core.c
@@ -1594,9 +1594,11 @@ static int dwc3_probe(struct platform_device *pdev)
dwc3_get_properties(dwc);
- ret = dma_set_mask_and_coherent(dwc->sysdev, DMA_BIT_MASK(64));
- if (ret)
- return ret;
+ if (!dwc->sysdev_is_parent) {
+ ret = dma_set_mask_and_coherent(dwc->sysdev, DMA_BIT_MASK(64));
+ if (ret)
+ return ret;
+ }
dwc->reset = devm_reset_control_array_get_optional_shared(dev);
if (IS_ERR(dwc->reset))
diff --git a/drivers/usb/dwc3/core.h b/drivers/usb/dwc3/core.h
index 620c8d3914d7..5c491d0a19d7 100644
--- a/drivers/usb/dwc3/core.h
+++ b/drivers/usb/dwc3/core.h
@@ -143,7 +143,7 @@
#define DWC3_GHWPARAMS8 0xc600
#define DWC3_GUCTL3 0xc60c
#define DWC3_GFLADJ 0xc630
-#define DWC3_GHWPARAMS9 0xc680
+#define DWC3_GHWPARAMS9 0xc6e0
/* Device Registers */
#define DWC3_DCFG 0xc700
diff --git a/drivers/usb/dwc3/dwc3-qcom.c b/drivers/usb/dwc3/dwc3-qcom.c
index 9abbd01028c5..3cb01cdd02c2 100644
--- a/drivers/usb/dwc3/dwc3-qcom.c
+++ b/drivers/usb/dwc3/dwc3-qcom.c
@@ -649,7 +649,6 @@ static int dwc3_qcom_of_register_core(struct platform_device *pdev)
struct dwc3_qcom *qcom = platform_get_drvdata(pdev);
struct device_node *np = pdev->dev.of_node, *dwc3_np;
struct device *dev = &pdev->dev;
- struct property *prop;
int ret;
dwc3_np = of_get_compatible_child(np, "snps,dwc3");
@@ -658,20 +657,6 @@ static int dwc3_qcom_of_register_core(struct platform_device *pdev)
return -ENODEV;
}
- prop = devm_kzalloc(dev, sizeof(*prop), GFP_KERNEL);
- if (!prop) {
- ret = -ENOMEM;
- dev_err(dev, "unable to allocate memory for property\n");
- goto node_put;
- }
-
- prop->name = "tx-fifo-resize";
- ret = of_add_property(dwc3_np, prop);
- if (ret) {
- dev_err(dev, "unable to add property\n");
- goto node_put;
- }
-
ret = of_platform_populate(np, NULL, NULL, dev);
if (ret) {
dev_err(dev, "failed to register dwc3 core - %d\n", ret);
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
index 23de2a5a40d6..7e3db00e9759 100644
--- a/drivers/usb/dwc3/gadget.c
+++ b/drivers/usb/dwc3/gadget.c
@@ -310,13 +310,24 @@ int dwc3_send_gadget_ep_cmd(struct dwc3_ep *dep, unsigned int cmd,
if (DWC3_DEPCMD_CMD(cmd) == DWC3_DEPCMD_STARTTRANSFER) {
int link_state;
+ /*
+ * Initiate remote wakeup if the link state is in U3 when
+ * operating in SS/SSP or L1/L2 when operating in HS/FS. If the
+ * link state is in U1/U2, no remote wakeup is needed. The Start
+ * Transfer command will initiate the link recovery.
+ */
link_state = dwc3_gadget_get_link_state(dwc);
- if (link_state == DWC3_LINK_STATE_U1 ||
- link_state == DWC3_LINK_STATE_U2 ||
- link_state == DWC3_LINK_STATE_U3) {
+ switch (link_state) {
+ case DWC3_LINK_STATE_U2:
+ if (dwc->gadget->speed >= USB_SPEED_SUPER)
+ break;
+
+ fallthrough;
+ case DWC3_LINK_STATE_U3:
ret = __dwc3_gadget_wakeup(dwc);
dev_WARN_ONCE(dwc->dev, ret, "wakeup failed --> %d\n",
ret);
+ break;
}
}
@@ -3252,6 +3263,9 @@ static bool dwc3_gadget_endpoint_trbs_complete(struct dwc3_ep *dep,
struct dwc3 *dwc = dep->dwc;
bool no_started_trb = true;
+ if (!dep->endpoint.desc)
+ return no_started_trb;
+
dwc3_gadget_ep_cleanup_completed_requests(dep, event, status);
if (dep->flags & DWC3_EP_END_TRANSFER_PENDING)
@@ -3299,6 +3313,9 @@ static void dwc3_gadget_endpoint_transfer_in_progress(struct dwc3_ep *dep,
{
int status = 0;
+ if (!dep->endpoint.desc)
+ return;
+
if (usb_endpoint_xfer_isoc(dep->endpoint.desc))
dwc3_gadget_endpoint_frame_from_event(dep, event);
@@ -3352,6 +3369,14 @@ static void dwc3_gadget_endpoint_command_complete(struct dwc3_ep *dep,
if (cmd != DWC3_DEPCMD_ENDTRANSFER)
return;
+ /*
+ * The END_TRANSFER command will cause the controller to generate a
+ * NoStream Event, and it's not due to the host DP NoStream rejection.
+ * Ignore the next NoStream event.
+ */
+ if (dep->stream_capable)
+ dep->flags |= DWC3_EP_IGNORE_NEXT_NOSTREAM;
+
dep->flags &= ~DWC3_EP_END_TRANSFER_PENDING;
dep->flags &= ~DWC3_EP_TRANSFER_STARTED;
dwc3_gadget_ep_cleanup_cancelled_requests(dep);
@@ -3574,14 +3599,6 @@ static void dwc3_stop_active_transfer(struct dwc3_ep *dep, bool force,
WARN_ON_ONCE(ret);
dep->resource_index = 0;
- /*
- * The END_TRANSFER command will cause the controller to generate a
- * NoStream Event, and it's not due to the host DP NoStream rejection.
- * Ignore the next NoStream event.
- */
- if (dep->stream_capable)
- dep->flags |= DWC3_EP_IGNORE_NEXT_NOSTREAM;
-
if (!interrupt)
dep->flags &= ~DWC3_EP_TRANSFER_STARTED;
else
diff --git a/drivers/usb/early/xhci-dbc.c b/drivers/usb/early/xhci-dbc.c
index 933d77ad0a64..4502108069cd 100644
--- a/drivers/usb/early/xhci-dbc.c
+++ b/drivers/usb/early/xhci-dbc.c
@@ -14,7 +14,6 @@
#include <linux/pci_ids.h>
#include <linux/memblock.h>
#include <linux/io.h>
-#include <linux/iopoll.h>
#include <asm/pci-direct.h>
#include <asm/fixmap.h>
#include <linux/bcd.h>
@@ -136,9 +135,17 @@ static int handshake(void __iomem *ptr, u32 mask, u32 done, int wait, int delay)
{
u32 result;
- return readl_poll_timeout_atomic(ptr, result,
- ((result & mask) == done),
- delay, wait);
+ /* Can not use readl_poll_timeout_atomic() for early boot things */
+ do {
+ result = readl(ptr);
+ result &= mask;
+ if (result == done)
+ return 0;
+ udelay(delay);
+ wait -= delay;
+ } while (wait > 0);
+
+ return -ETIMEDOUT;
}
static void __init xdbc_bios_handoff(void)
diff --git a/drivers/usb/gadget/composite.c b/drivers/usb/gadget/composite.c
index 504c1cbc255d..3789c329183c 100644
--- a/drivers/usb/gadget/composite.c
+++ b/drivers/usb/gadget/composite.c
@@ -1679,6 +1679,18 @@ composite_setup(struct usb_gadget *gadget, const struct usb_ctrlrequest *ctrl)
struct usb_function *f = NULL;
u8 endp;
+ if (w_length > USB_COMP_EP0_BUFSIZ) {
+ if (ctrl->bRequestType & USB_DIR_IN) {
+ /* Cast away the const, we are going to overwrite on purpose. */
+ __le16 *temp = (__le16 *)&ctrl->wLength;
+
+ *temp = cpu_to_le16(USB_COMP_EP0_BUFSIZ);
+ w_length = USB_COMP_EP0_BUFSIZ;
+ } else {
+ goto done;
+ }
+ }
+
/* partial re-init of the response message; the function or the
* gadget might need to intercept e.g. a control-OUT completion
* when we delegate to it.
@@ -2209,7 +2221,7 @@ int composite_dev_prepare(struct usb_composite_driver *composite,
if (!cdev->req)
return -ENOMEM;
- cdev->req->buf = kmalloc(USB_COMP_EP0_BUFSIZ, GFP_KERNEL);
+ cdev->req->buf = kzalloc(USB_COMP_EP0_BUFSIZ, GFP_KERNEL);
if (!cdev->req->buf)
goto fail;
diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c
index e20c19a0f106..a7e069b18544 100644
--- a/drivers/usb/gadget/function/f_fs.c
+++ b/drivers/usb/gadget/function/f_fs.c
@@ -1773,11 +1773,15 @@ static void ffs_data_clear(struct ffs_data *ffs)
BUG_ON(ffs->gadget);
- if (ffs->epfiles)
+ if (ffs->epfiles) {
ffs_epfiles_destroy(ffs->epfiles, ffs->eps_count);
+ ffs->epfiles = NULL;
+ }
- if (ffs->ffs_eventfd)
+ if (ffs->ffs_eventfd) {
eventfd_ctx_put(ffs->ffs_eventfd);
+ ffs->ffs_eventfd = NULL;
+ }
kfree(ffs->raw_descs_data);
kfree(ffs->raw_strings);
@@ -1790,7 +1794,6 @@ static void ffs_data_reset(struct ffs_data *ffs)
ffs_data_clear(ffs);
- ffs->epfiles = NULL;
ffs->raw_descs_data = NULL;
ffs->raw_descs = NULL;
ffs->raw_strings = NULL;
diff --git a/drivers/usb/gadget/function/u_ether.c b/drivers/usb/gadget/function/u_ether.c
index e0ad5aed6ac9..6f5d45ef2e39 100644
--- a/drivers/usb/gadget/function/u_ether.c
+++ b/drivers/usb/gadget/function/u_ether.c
@@ -17,6 +17,7 @@
#include <linux/etherdevice.h>
#include <linux/ethtool.h>
#include <linux/if_vlan.h>
+#include <linux/etherdevice.h>
#include "u_ether.h"
@@ -863,19 +864,23 @@ int gether_register_netdev(struct net_device *net)
{
struct eth_dev *dev;
struct usb_gadget *g;
- struct sockaddr sa;
int status;
if (!net->dev.parent)
return -EINVAL;
dev = netdev_priv(net);
g = dev->gadget;
+
+ net->addr_assign_type = NET_ADDR_RANDOM;
+ eth_hw_addr_set(net, dev->dev_mac);
+
status = register_netdev(net);
if (status < 0) {
dev_dbg(&g->dev, "register_netdev failed, %d\n", status);
return status;
} else {
INFO(dev, "HOST MAC %pM\n", dev->host_mac);
+ INFO(dev, "MAC %pM\n", dev->dev_mac);
/* two kinds of host-initiated state changes:
* - iff DATA transfer is active, carrier is "on"
@@ -883,15 +888,6 @@ int gether_register_netdev(struct net_device *net)
*/
netif_carrier_off(net);
}
- sa.sa_family = net->type;
- memcpy(sa.sa_data, dev->dev_mac, ETH_ALEN);
- rtnl_lock();
- status = dev_set_mac_address(net, &sa, NULL);
- rtnl_unlock();
- if (status)
- pr_warn("cannot set self ethernet address: %d\n", status);
- else
- INFO(dev, "MAC %pM\n", dev->dev_mac);
return status;
}
diff --git a/drivers/usb/gadget/legacy/dbgp.c b/drivers/usb/gadget/legacy/dbgp.c
index e1d566c9918a..6bcbad382580 100644
--- a/drivers/usb/gadget/legacy/dbgp.c
+++ b/drivers/usb/gadget/legacy/dbgp.c
@@ -137,7 +137,7 @@ static int dbgp_enable_ep_req(struct usb_ep *ep)
goto fail_1;
}
- req->buf = kmalloc(DBGP_REQ_LEN, GFP_KERNEL);
+ req->buf = kzalloc(DBGP_REQ_LEN, GFP_KERNEL);
if (!req->buf) {
err = -ENOMEM;
stp = 2;
@@ -345,6 +345,19 @@ static int dbgp_setup(struct usb_gadget *gadget,
void *data = NULL;
u16 len = 0;
+ if (length > DBGP_REQ_LEN) {
+ if (ctrl->bRequestType & USB_DIR_IN) {
+ /* Cast away the const, we are going to overwrite on purpose. */
+ __le16 *temp = (__le16 *)&ctrl->wLength;
+
+ *temp = cpu_to_le16(DBGP_REQ_LEN);
+ length = DBGP_REQ_LEN;
+ } else {
+ return err;
+ }
+ }
+
+
if (request == USB_REQ_GET_DESCRIPTOR) {
switch (value>>8) {
case USB_DT_DEVICE:
diff --git a/drivers/usb/gadget/legacy/inode.c b/drivers/usb/gadget/legacy/inode.c
index 78be94750232..3b58f4fc0a80 100644
--- a/drivers/usb/gadget/legacy/inode.c
+++ b/drivers/usb/gadget/legacy/inode.c
@@ -110,6 +110,8 @@ enum ep0_state {
/* enough for the whole queue: most events invalidate others */
#define N_EVENT 5
+#define RBUF_SIZE 256
+
struct dev_data {
spinlock_t lock;
refcount_t count;
@@ -144,7 +146,7 @@ struct dev_data {
struct dentry *dentry;
/* except this scratch i/o buffer for ep0 */
- u8 rbuf [256];
+ u8 rbuf[RBUF_SIZE];
};
static inline void get_dev (struct dev_data *data)
@@ -1331,6 +1333,18 @@ gadgetfs_setup (struct usb_gadget *gadget, const struct usb_ctrlrequest *ctrl)
u16 w_value = le16_to_cpu(ctrl->wValue);
u16 w_length = le16_to_cpu(ctrl->wLength);
+ if (w_length > RBUF_SIZE) {
+ if (ctrl->bRequestType & USB_DIR_IN) {
+ /* Cast away the const, we are going to overwrite on purpose. */
+ __le16 *temp = (__le16 *)&ctrl->wLength;
+
+ *temp = cpu_to_le16(RBUF_SIZE);
+ w_length = RBUF_SIZE;
+ } else {
+ return value;
+ }
+ }
+
spin_lock (&dev->lock);
dev->setup_abort = 0;
if (dev->state == STATE_DEV_UNCONNECTED) {
diff --git a/drivers/usb/gadget/udc/udc-xilinx.c b/drivers/usb/gadget/udc/udc-xilinx.c
index f5ca670776a3..857159dd5ae0 100644
--- a/drivers/usb/gadget/udc/udc-xilinx.c
+++ b/drivers/usb/gadget/udc/udc-xilinx.c
@@ -2136,7 +2136,7 @@ static int xudc_probe(struct platform_device *pdev)
ret = usb_add_gadget_udc(&pdev->dev, &udc->gadget);
if (ret)
- goto fail;
+ goto err_disable_unprepare_clk;
udc->dev = &udc->gadget.dev;
@@ -2155,6 +2155,9 @@ static int xudc_probe(struct platform_device *pdev)
udc->dma_enabled ? "with DMA" : "without DMA");
return 0;
+
+err_disable_unprepare_clk:
+ clk_disable_unprepare(udc->clk);
fail:
dev_err(&pdev->dev, "probe failed, %d\n", ret);
return ret;
diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c
index af946c42b6f0..df3522dab31b 100644
--- a/drivers/usb/host/xhci-hub.c
+++ b/drivers/usb/host/xhci-hub.c
@@ -717,6 +717,7 @@ static int xhci_enter_test_mode(struct xhci_hcd *xhci,
continue;
retval = xhci_disable_slot(xhci, i);
+ xhci_free_virt_device(xhci, i);
if (retval)
xhci_err(xhci, "Failed to disable slot %d, %d. Enter test mode anyway\n",
i, retval);
diff --git a/drivers/usb/host/xhci-mtk-sch.c b/drivers/usb/host/xhci-mtk-sch.c
index 1edef7527c11..edbfa82c6565 100644
--- a/drivers/usb/host/xhci-mtk-sch.c
+++ b/drivers/usb/host/xhci-mtk-sch.c
@@ -781,7 +781,7 @@ int xhci_mtk_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
ret = xhci_check_bandwidth(hcd, udev);
if (!ret)
- INIT_LIST_HEAD(&mtk->bw_ep_chk_list);
+ list_del_init(&mtk->bw_ep_chk_list);
return ret;
}
diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
index 92adf6107864..5c351970cdf1 100644
--- a/drivers/usb/host/xhci-pci.c
+++ b/drivers/usb/host/xhci-pci.c
@@ -71,6 +71,8 @@
#define PCI_DEVICE_ID_AMD_YELLOW_CARP_XHCI_4 0x161e
#define PCI_DEVICE_ID_AMD_YELLOW_CARP_XHCI_5 0x15d6
#define PCI_DEVICE_ID_AMD_YELLOW_CARP_XHCI_6 0x15d7
+#define PCI_DEVICE_ID_AMD_YELLOW_CARP_XHCI_7 0x161c
+#define PCI_DEVICE_ID_AMD_YELLOW_CARP_XHCI_8 0x161f
#define PCI_DEVICE_ID_ASMEDIA_1042_XHCI 0x1042
#define PCI_DEVICE_ID_ASMEDIA_1042A_XHCI 0x1142
@@ -121,7 +123,6 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
/* Look for vendor-specific quirks */
if (pdev->vendor == PCI_VENDOR_ID_FRESCO_LOGIC &&
(pdev->device == PCI_DEVICE_ID_FRESCO_LOGIC_PDK ||
- pdev->device == PCI_DEVICE_ID_FRESCO_LOGIC_FL1100 ||
pdev->device == PCI_DEVICE_ID_FRESCO_LOGIC_FL1400)) {
if (pdev->device == PCI_DEVICE_ID_FRESCO_LOGIC_PDK &&
pdev->revision == 0x0) {
@@ -156,6 +157,10 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
pdev->device == PCI_DEVICE_ID_FRESCO_LOGIC_FL1009)
xhci->quirks |= XHCI_BROKEN_STREAMS;
+ if (pdev->vendor == PCI_VENDOR_ID_FRESCO_LOGIC &&
+ pdev->device == PCI_DEVICE_ID_FRESCO_LOGIC_FL1100)
+ xhci->quirks |= XHCI_TRUST_TX_LENGTH;
+
if (pdev->vendor == PCI_VENDOR_ID_NEC)
xhci->quirks |= XHCI_NEC_HOST;
@@ -330,7 +335,9 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
pdev->device == PCI_DEVICE_ID_AMD_YELLOW_CARP_XHCI_3 ||
pdev->device == PCI_DEVICE_ID_AMD_YELLOW_CARP_XHCI_4 ||
pdev->device == PCI_DEVICE_ID_AMD_YELLOW_CARP_XHCI_5 ||
- pdev->device == PCI_DEVICE_ID_AMD_YELLOW_CARP_XHCI_6))
+ pdev->device == PCI_DEVICE_ID_AMD_YELLOW_CARP_XHCI_6 ||
+ pdev->device == PCI_DEVICE_ID_AMD_YELLOW_CARP_XHCI_7 ||
+ pdev->device == PCI_DEVICE_ID_AMD_YELLOW_CARP_XHCI_8))
xhci->quirks |= XHCI_DEFAULT_PM_RUNTIME_ALLOW;
if (xhci->quirks & XHCI_RESET_ON_RESUME)
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index 311597bba80e..d0b6806275e0 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -366,7 +366,9 @@ static void xhci_handle_stopped_cmd_ring(struct xhci_hcd *xhci,
/* Must be called with xhci->lock held, releases and aquires lock back */
static int xhci_abort_cmd_ring(struct xhci_hcd *xhci, unsigned long flags)
{
- u32 temp_32;
+ struct xhci_segment *new_seg = xhci->cmd_ring->deq_seg;
+ union xhci_trb *new_deq = xhci->cmd_ring->dequeue;
+ u64 crcr;
int ret;
xhci_dbg(xhci, "Abort command ring\n");
@@ -375,13 +377,18 @@ static int xhci_abort_cmd_ring(struct xhci_hcd *xhci, unsigned long flags)
/*
* The control bits like command stop, abort are located in lower
- * dword of the command ring control register. Limit the write
- * to the lower dword to avoid corrupting the command ring pointer
- * in case if the command ring is stopped by the time upper dword
- * is written.
+ * dword of the command ring control register.
+ * Some controllers require all 64 bits to be written to abort the ring.
+ * Make sure the upper dword is valid, pointing to the next command,
+ * avoiding corrupting the command ring pointer in case the command ring
+ * is stopped by the time the upper dword is written.
*/
- temp_32 = readl(&xhci->op_regs->cmd_ring);
- writel(temp_32 | CMD_RING_ABORT, &xhci->op_regs->cmd_ring);
+ next_trb(xhci, NULL, &new_seg, &new_deq);
+ if (trb_is_link(new_deq))
+ next_trb(xhci, NULL, &new_seg, &new_deq);
+
+ crcr = xhci_trb_virt_to_dma(new_seg, new_deq);
+ xhci_write_64(xhci, crcr | CMD_RING_ABORT, &xhci->op_regs->cmd_ring);
/* Section 4.6.1.2 of xHCI 1.0 spec says software should also time the
* completion of the Command Abort operation. If CRR is not negated in 5
@@ -1518,7 +1525,6 @@ static void xhci_handle_cmd_disable_slot(struct xhci_hcd *xhci, int slot_id)
if (xhci->quirks & XHCI_EP_LIMIT_QUIRK)
/* Delete default control endpoint resources */
xhci_free_device_endpoint_resources(xhci, virt_dev, true);
- xhci_free_virt_device(xhci, slot_id);
}
static void xhci_handle_cmd_config_ep(struct xhci_hcd *xhci, int slot_id,
diff --git a/drivers/usb/host/xhci-tegra.c b/drivers/usb/host/xhci-tegra.c
index 1bf494b649bd..c8af2cd2216d 100644
--- a/drivers/usb/host/xhci-tegra.c
+++ b/drivers/usb/host/xhci-tegra.c
@@ -1400,6 +1400,7 @@ static void tegra_xusb_deinit_usb_phy(struct tegra_xusb *tegra)
static int tegra_xusb_probe(struct platform_device *pdev)
{
+ struct of_phandle_args args;
struct tegra_xusb *tegra;
struct device_node *np;
struct resource *regs;
@@ -1454,10 +1455,17 @@ static int tegra_xusb_probe(struct platform_device *pdev)
goto put_padctl;
}
- tegra->padctl_irq = of_irq_get(np, 0);
- if (tegra->padctl_irq <= 0) {
- err = (tegra->padctl_irq == 0) ? -ENODEV : tegra->padctl_irq;
- goto put_padctl;
+ /* Older device-trees don't have padctrl interrupt */
+ err = of_irq_parse_one(np, 0, &args);
+ if (!err) {
+ tegra->padctl_irq = of_irq_get(np, 0);
+ if (tegra->padctl_irq <= 0) {
+ err = (tegra->padctl_irq == 0) ? -ENODEV : tegra->padctl_irq;
+ goto put_padctl;
+ }
+ } else {
+ dev_dbg(&pdev->dev,
+ "%pOF is missing an interrupt, disabling PM support\n", np);
}
tegra->host_clk = devm_clk_get(&pdev->dev, "xusb_host");
@@ -1696,11 +1704,15 @@ static int tegra_xusb_probe(struct platform_device *pdev)
goto remove_usb3;
}
- err = devm_request_threaded_irq(&pdev->dev, tegra->padctl_irq, NULL, tegra_xusb_padctl_irq,
- IRQF_ONESHOT, dev_name(&pdev->dev), tegra);
- if (err < 0) {
- dev_err(&pdev->dev, "failed to request padctl IRQ: %d\n", err);
- goto remove_usb3;
+ if (tegra->padctl_irq) {
+ err = devm_request_threaded_irq(&pdev->dev, tegra->padctl_irq,
+ NULL, tegra_xusb_padctl_irq,
+ IRQF_ONESHOT, dev_name(&pdev->dev),
+ tegra);
+ if (err < 0) {
+ dev_err(&pdev->dev, "failed to request padctl IRQ: %d\n", err);
+ goto remove_usb3;
+ }
}
err = tegra_xusb_enable_firmware_messages(tegra);
@@ -1718,13 +1730,16 @@ static int tegra_xusb_probe(struct platform_device *pdev)
/* Enable wake for both USB 2.0 and USB 3.0 roothubs */
device_init_wakeup(&tegra->hcd->self.root_hub->dev, true);
device_init_wakeup(&xhci->shared_hcd->self.root_hub->dev, true);
- device_init_wakeup(tegra->dev, true);
pm_runtime_use_autosuspend(tegra->dev);
pm_runtime_set_autosuspend_delay(tegra->dev, 2000);
pm_runtime_mark_last_busy(tegra->dev);
pm_runtime_set_active(tegra->dev);
- pm_runtime_enable(tegra->dev);
+
+ if (tegra->padctl_irq) {
+ device_init_wakeup(tegra->dev, true);
+ pm_runtime_enable(tegra->dev);
+ }
return 0;
@@ -1772,7 +1787,9 @@ static int tegra_xusb_remove(struct platform_device *pdev)
dma_free_coherent(&pdev->dev, tegra->fw.size, tegra->fw.virt,
tegra->fw.phys);
- pm_runtime_disable(&pdev->dev);
+ if (tegra->padctl_irq)
+ pm_runtime_disable(&pdev->dev);
+
pm_runtime_put(&pdev->dev);
tegra_xusb_powergate_partitions(tegra);
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index 902f410874e8..f5b1bcc875de 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -3934,7 +3934,6 @@ static void xhci_free_dev(struct usb_hcd *hcd, struct usb_device *udev)
struct xhci_slot_ctx *slot_ctx;
int i, ret;
-#ifndef CONFIG_USB_DEFAULT_PERSIST
/*
* We called pm_runtime_get_noresume when the device was attached.
* Decrement the counter here to allow controller to runtime suspend
@@ -3942,7 +3941,6 @@ static void xhci_free_dev(struct usb_hcd *hcd, struct usb_device *udev)
*/
if (xhci->quirks & XHCI_RESET_ON_RESUME)
pm_runtime_put_noidle(hcd->self.controller);
-#endif
ret = xhci_check_args(hcd, udev, NULL, 0, true, __func__);
/* If the host is halted due to driver unload, we still need to free the
@@ -3961,9 +3959,8 @@ static void xhci_free_dev(struct usb_hcd *hcd, struct usb_device *udev)
del_timer_sync(&virt_dev->eps[i].stop_cmd_timer);
}
virt_dev->udev = NULL;
- ret = xhci_disable_slot(xhci, udev->slot_id);
- if (ret)
- xhci_free_virt_device(xhci, udev->slot_id);
+ xhci_disable_slot(xhci, udev->slot_id);
+ xhci_free_virt_device(xhci, udev->slot_id);
}
int xhci_disable_slot(struct xhci_hcd *xhci, u32 slot_id)
@@ -3973,7 +3970,7 @@ int xhci_disable_slot(struct xhci_hcd *xhci, u32 slot_id)
u32 state;
int ret = 0;
- command = xhci_alloc_command(xhci, false, GFP_KERNEL);
+ command = xhci_alloc_command(xhci, true, GFP_KERNEL);
if (!command)
return -ENOMEM;
@@ -3998,6 +3995,15 @@ int xhci_disable_slot(struct xhci_hcd *xhci, u32 slot_id)
}
xhci_ring_cmd_db(xhci);
spin_unlock_irqrestore(&xhci->lock, flags);
+
+ wait_for_completion(command->completion);
+
+ if (command->status != COMP_SUCCESS)
+ xhci_warn(xhci, "Unsuccessful disable slot %u command, status %d\n",
+ slot_id, command->status);
+
+ xhci_free_command(xhci, command);
+
return ret;
}
@@ -4094,23 +4100,20 @@ int xhci_alloc_dev(struct usb_hcd *hcd, struct usb_device *udev)
xhci_debugfs_create_slot(xhci, slot_id);
-#ifndef CONFIG_USB_DEFAULT_PERSIST
/*
* If resetting upon resume, we can't put the controller into runtime
* suspend if there is a device attached.
*/
if (xhci->quirks & XHCI_RESET_ON_RESUME)
pm_runtime_get_noresume(hcd->self.controller);
-#endif
/* Is this a LS or FS device under a HS hub? */
/* Hub or peripherial? */
return 1;
disable_slot:
- ret = xhci_disable_slot(xhci, udev->slot_id);
- if (ret)
- xhci_free_virt_device(xhci, udev->slot_id);
+ xhci_disable_slot(xhci, udev->slot_id);
+ xhci_free_virt_device(xhci, udev->slot_id);
return 0;
}
@@ -4240,6 +4243,7 @@ static int xhci_setup_device(struct usb_hcd *hcd, struct usb_device *udev,
mutex_unlock(&xhci->mutex);
ret = xhci_disable_slot(xhci, udev->slot_id);
+ xhci_free_virt_device(xhci, udev->slot_id);
if (!ret)
xhci_alloc_dev(hcd, udev);
kfree(command->completion);
diff --git a/drivers/usb/mtu3/mtu3_gadget.c b/drivers/usb/mtu3/mtu3_gadget.c
index a9a65b4bbfed..9977600616d7 100644
--- a/drivers/usb/mtu3/mtu3_gadget.c
+++ b/drivers/usb/mtu3/mtu3_gadget.c
@@ -77,7 +77,7 @@ static int mtu3_ep_enable(struct mtu3_ep *mep)
if (usb_endpoint_xfer_int(desc) ||
usb_endpoint_xfer_isoc(desc)) {
interval = desc->bInterval;
- interval = clamp_val(interval, 1, 16) - 1;
+ interval = clamp_val(interval, 1, 16);
if (usb_endpoint_xfer_isoc(desc) && comp_desc)
mult = comp_desc->bmAttributes;
}
@@ -89,10 +89,17 @@ static int mtu3_ep_enable(struct mtu3_ep *mep)
if (usb_endpoint_xfer_isoc(desc) ||
usb_endpoint_xfer_int(desc)) {
interval = desc->bInterval;
- interval = clamp_val(interval, 1, 16) - 1;
+ interval = clamp_val(interval, 1, 16);
mult = usb_endpoint_maxp_mult(desc) - 1;
}
break;
+ case USB_SPEED_FULL:
+ if (usb_endpoint_xfer_isoc(desc))
+ interval = clamp_val(desc->bInterval, 1, 16);
+ else if (usb_endpoint_xfer_int(desc))
+ interval = clamp_val(desc->bInterval, 1, 255);
+
+ break;
default:
break; /*others are ignored */
}
@@ -235,6 +242,7 @@ struct usb_request *mtu3_alloc_request(struct usb_ep *ep, gfp_t gfp_flags)
mreq->request.dma = DMA_ADDR_INVALID;
mreq->epnum = mep->epnum;
mreq->mep = mep;
+ INIT_LIST_HEAD(&mreq->list);
trace_mtu3_alloc_request(mreq);
return &mreq->request;
diff --git a/drivers/usb/mtu3/mtu3_qmu.c b/drivers/usb/mtu3/mtu3_qmu.c
index 3f414f91b589..2ea3157ddb6e 100644
--- a/drivers/usb/mtu3/mtu3_qmu.c
+++ b/drivers/usb/mtu3/mtu3_qmu.c
@@ -273,6 +273,8 @@ static int mtu3_prepare_tx_gpd(struct mtu3_ep *mep, struct mtu3_request *mreq)
gpd->dw3_info |= cpu_to_le32(GPD_EXT_FLAG_ZLP);
}
+ /* prevent reorder, make sure GPD's HWO is set last */
+ mb();
gpd->dw0_info |= cpu_to_le32(GPD_FLAGS_IOC | GPD_FLAGS_HWO);
mreq->gpd = gpd;
@@ -306,6 +308,8 @@ static int mtu3_prepare_rx_gpd(struct mtu3_ep *mep, struct mtu3_request *mreq)
gpd->next_gpd = cpu_to_le32(lower_32_bits(enq_dma));
ext_addr |= GPD_EXT_NGP(mtu, upper_32_bits(enq_dma));
gpd->dw3_info = cpu_to_le32(ext_addr);
+ /* prevent reorder, make sure GPD's HWO is set last */
+ mb();
gpd->dw0_info |= cpu_to_le32(GPD_FLAGS_IOC | GPD_FLAGS_HWO);
mreq->gpd = gpd;
@@ -445,7 +449,8 @@ static void qmu_tx_zlp_error_handler(struct mtu3 *mtu, u8 epnum)
return;
}
mtu3_setbits(mbase, MU3D_EP_TXCR0(mep->epnum), TX_TXPKTRDY);
-
+ /* prevent reorder, make sure GPD's HWO is set last */
+ mb();
/* by pass the current GDP */
gpd_current->dw0_info |= cpu_to_le32(GPD_FLAGS_BPS | GPD_FLAGS_HWO);
diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
index 7705328034ca..8a60c0d56863 100644
--- a/drivers/usb/serial/cp210x.c
+++ b/drivers/usb/serial/cp210x.c
@@ -1635,6 +1635,8 @@ static int cp2105_gpioconf_init(struct usb_serial *serial)
/* 2 banks of GPIO - One for the pins taken from each serial port */
if (intf_num == 0) {
+ priv->gc.ngpio = 2;
+
if (mode.eci == CP210X_PIN_MODE_MODEM) {
/* mark all GPIOs of this interface as reserved */
priv->gpio_altfunc = 0xff;
@@ -1645,8 +1647,9 @@ static int cp2105_gpioconf_init(struct usb_serial *serial)
priv->gpio_pushpull = (u8)((le16_to_cpu(config.gpio_mode) &
CP210X_ECI_GPIO_MODE_MASK) >>
CP210X_ECI_GPIO_MODE_OFFSET);
- priv->gc.ngpio = 2;
} else if (intf_num == 1) {
+ priv->gc.ngpio = 3;
+
if (mode.sci == CP210X_PIN_MODE_MODEM) {
/* mark all GPIOs of this interface as reserved */
priv->gpio_altfunc = 0xff;
@@ -1657,7 +1660,6 @@ static int cp2105_gpioconf_init(struct usb_serial *serial)
priv->gpio_pushpull = (u8)((le16_to_cpu(config.gpio_mode) &
CP210X_SCI_GPIO_MODE_MASK) >>
CP210X_SCI_GPIO_MODE_OFFSET);
- priv->gc.ngpio = 3;
} else {
return -ENODEV;
}
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index a484ff5e4ebf..42420bfc983c 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -1219,6 +1219,14 @@ static const struct usb_device_id option_ids[] = {
.driver_info = NCTRL(2) | RSVD(3) },
{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1063, 0xff), /* Telit LN920 (ECM) */
.driver_info = NCTRL(0) | RSVD(1) },
+ { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1070, 0xff), /* Telit FN990 (rmnet) */
+ .driver_info = NCTRL(0) | RSVD(1) | RSVD(2) },
+ { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1071, 0xff), /* Telit FN990 (MBIM) */
+ .driver_info = NCTRL(0) | RSVD(1) },
+ { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1072, 0xff), /* Telit FN990 (RNDIS) */
+ .driver_info = NCTRL(2) | RSVD(3) },
+ { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1073, 0xff), /* Telit FN990 (ECM) */
+ .driver_info = NCTRL(0) | RSVD(1) },
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_ME910),
.driver_info = NCTRL(0) | RSVD(1) | RSVD(3) },
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_ME910_DUAL_MODEM),
@@ -1267,6 +1275,8 @@ static const struct usb_device_id option_ids[] = {
.driver_info = NCTRL(2) },
{ USB_DEVICE(TELIT_VENDOR_ID, 0x9010), /* Telit SBL FN980 flashing device */
.driver_info = NCTRL(0) | ZLP },
+ { USB_DEVICE(TELIT_VENDOR_ID, 0x9200), /* Telit LE910S1 flashing device */
+ .driver_info = NCTRL(0) | ZLP },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MF622, 0xff, 0xff, 0xff) }, /* ZTE WCDMA products */
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0002, 0xff, 0xff, 0xff),
.driver_info = RSVD(1) },
@@ -2094,6 +2104,9 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE_AND_INTERFACE_INFO(0x2cb7, 0x010b, 0xff, 0xff, 0x30) }, /* Fibocom FG150 Diag */
{ USB_DEVICE_AND_INTERFACE_INFO(0x2cb7, 0x010b, 0xff, 0, 0) }, /* Fibocom FG150 AT */
{ USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x01a0, 0xff) }, /* Fibocom NL668-AM/NL652-EU (laptop MBIM) */
+ { USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x01a2, 0xff) }, /* Fibocom FM101-GL (laptop MBIM) */
+ { USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x01a4, 0xff), /* Fibocom FM101-GL (laptop MBIM) */
+ .driver_info = RSVD(4) },
{ USB_DEVICE_INTERFACE_CLASS(0x2df3, 0x9d03, 0xff) }, /* LongSung M5710 */
{ USB_DEVICE_INTERFACE_CLASS(0x305a, 0x1404, 0xff) }, /* GosunCn GM500 RNDIS */
{ USB_DEVICE_INTERFACE_CLASS(0x305a, 0x1405, 0xff) }, /* GosunCn GM500 MBIM */
diff --git a/drivers/usb/serial/pl2303.c b/drivers/usb/serial/pl2303.c
index f45ca7ddf78e..a70fd86f735c 100644
--- a/drivers/usb/serial/pl2303.c
+++ b/drivers/usb/serial/pl2303.c
@@ -432,6 +432,7 @@ static int pl2303_detect_type(struct usb_serial *serial)
case 0x200:
switch (bcdDevice) {
case 0x100:
+ case 0x105:
case 0x305:
case 0x405:
/*
diff --git a/drivers/usb/typec/tcpm/fusb302.c b/drivers/usb/typec/tcpm/fusb302.c
index 7a2a17866a82..72f9001b0792 100644
--- a/drivers/usb/typec/tcpm/fusb302.c
+++ b/drivers/usb/typec/tcpm/fusb302.c
@@ -669,25 +669,27 @@ static int tcpm_set_cc(struct tcpc_dev *dev, enum typec_cc_status cc)
ret = fusb302_i2c_mask_write(chip, FUSB_REG_MASK,
FUSB_REG_MASK_BC_LVL |
FUSB_REG_MASK_COMP_CHNG,
- FUSB_REG_MASK_COMP_CHNG);
+ FUSB_REG_MASK_BC_LVL);
if (ret < 0) {
fusb302_log(chip, "cannot set SRC interrupt, ret=%d",
ret);
goto done;
}
chip->intr_comp_chng = true;
+ chip->intr_bc_lvl = false;
break;
case TYPEC_CC_RD:
ret = fusb302_i2c_mask_write(chip, FUSB_REG_MASK,
FUSB_REG_MASK_BC_LVL |
FUSB_REG_MASK_COMP_CHNG,
- FUSB_REG_MASK_BC_LVL);
+ FUSB_REG_MASK_COMP_CHNG);
if (ret < 0) {
fusb302_log(chip, "cannot set SRC interrupt, ret=%d",
ret);
goto done;
}
chip->intr_bc_lvl = true;
+ chip->intr_comp_chng = false;
break;
default:
break;
diff --git a/drivers/usb/typec/tcpm/tcpm.c b/drivers/usb/typec/tcpm/tcpm.c
index 7f2f3ff1b391..59d4fa2443f2 100644
--- a/drivers/usb/typec/tcpm/tcpm.c
+++ b/drivers/usb/typec/tcpm/tcpm.c
@@ -324,6 +324,7 @@ struct tcpm_port {
bool attached;
bool connected;
+ bool registered;
bool pd_supported;
enum typec_port_type port_type;
@@ -4110,11 +4111,7 @@ static void run_state_machine(struct tcpm_port *port)
tcpm_try_src(port) ? SRC_TRY
: SNK_ATTACHED,
0);
- else
- /* Wait for VBUS, but not forever */
- tcpm_set_state(port, PORT_RESET, PD_T_PS_SOURCE_ON);
break;
-
case SRC_TRY:
port->try_src_count++;
tcpm_set_cc(port, tcpm_rp_cc(port));
@@ -6295,7 +6292,8 @@ static enum hrtimer_restart state_machine_timer_handler(struct hrtimer *timer)
{
struct tcpm_port *port = container_of(timer, struct tcpm_port, state_machine_timer);
- kthread_queue_work(port->wq, &port->state_machine);
+ if (port->registered)
+ kthread_queue_work(port->wq, &port->state_machine);
return HRTIMER_NORESTART;
}
@@ -6303,7 +6301,8 @@ static enum hrtimer_restart vdm_state_machine_timer_handler(struct hrtimer *time
{
struct tcpm_port *port = container_of(timer, struct tcpm_port, vdm_state_machine_timer);
- kthread_queue_work(port->wq, &port->vdm_state_machine);
+ if (port->registered)
+ kthread_queue_work(port->wq, &port->vdm_state_machine);
return HRTIMER_NORESTART;
}
@@ -6311,7 +6310,8 @@ static enum hrtimer_restart enable_frs_timer_handler(struct hrtimer *timer)
{
struct tcpm_port *port = container_of(timer, struct tcpm_port, enable_frs_timer);
- kthread_queue_work(port->wq, &port->enable_frs);
+ if (port->registered)
+ kthread_queue_work(port->wq, &port->enable_frs);
return HRTIMER_NORESTART;
}
@@ -6319,7 +6319,8 @@ static enum hrtimer_restart send_discover_timer_handler(struct hrtimer *timer)
{
struct tcpm_port *port = container_of(timer, struct tcpm_port, send_discover_timer);
- kthread_queue_work(port->wq, &port->send_discover_work);
+ if (port->registered)
+ kthread_queue_work(port->wq, &port->send_discover_work);
return HRTIMER_NORESTART;
}
@@ -6407,6 +6408,7 @@ struct tcpm_port *tcpm_register_port(struct device *dev, struct tcpc_dev *tcpc)
typec_port_register_altmodes(port->typec_port,
&tcpm_altmode_ops, port,
port->port_altmode, ALTMODE_DISCOVERY_MAX);
+ port->registered = true;
mutex_lock(&port->lock);
tcpm_init(port);
@@ -6428,6 +6430,9 @@ void tcpm_unregister_port(struct tcpm_port *port)
{
int i;
+ port->registered = false;
+ kthread_destroy_worker(port->wq);
+
hrtimer_cancel(&port->send_discover_timer);
hrtimer_cancel(&port->enable_frs_timer);
hrtimer_cancel(&port->vdm_state_machine_timer);
@@ -6439,7 +6444,6 @@ void tcpm_unregister_port(struct tcpm_port *port)
typec_unregister_port(port->typec_port);
usb_role_switch_put(port->role_sw);
tcpm_debugfs_exit(port);
- kthread_destroy_worker(port->wq);
}
EXPORT_SYMBOL_GPL(tcpm_unregister_port);
diff --git a/drivers/usb/typec/tipd/core.c b/drivers/usb/typec/tipd/core.c
index fb8ef12bbe9c..6d27a5b5e3ca 100644
--- a/drivers/usb/typec/tipd/core.c
+++ b/drivers/usb/typec/tipd/core.c
@@ -653,7 +653,7 @@ static int cd321x_switch_power_state(struct tps6598x *tps, u8 target_state)
if (state == target_state)
return 0;
- ret = tps6598x_exec_cmd(tps, "SPSS", sizeof(u8), &target_state, 0, NULL);
+ ret = tps6598x_exec_cmd(tps, "SSPS", sizeof(u8), &target_state, 0, NULL);
if (ret)
return ret;
@@ -707,6 +707,7 @@ static int tps6598x_probe(struct i2c_client *client)
u32 conf;
u32 vid;
int ret;
+ u64 mask1;
tps = devm_kzalloc(&client->dev, sizeof(*tps), GFP_KERNEL);
if (!tps)
@@ -730,11 +731,6 @@ static int tps6598x_probe(struct i2c_client *client)
if (i2c_check_functionality(client->adapter, I2C_FUNC_I2C))
tps->i2c_protocol = true;
- /* Make sure the controller has application firmware running */
- ret = tps6598x_check_mode(tps);
- if (ret)
- return ret;
-
if (np && of_device_is_compatible(np, "apple,cd321x")) {
/* Switch CD321X chips to the correct system power state */
ret = cd321x_switch_power_state(tps, TPS_SYSTEM_POWER_STATE_S0);
@@ -742,24 +738,27 @@ static int tps6598x_probe(struct i2c_client *client)
return ret;
/* CD321X chips have all interrupts masked initially */
- ret = tps6598x_write64(tps, TPS_REG_INT_MASK1,
- APPLE_CD_REG_INT_POWER_STATUS_UPDATE |
- APPLE_CD_REG_INT_DATA_STATUS_UPDATE |
- APPLE_CD_REG_INT_PLUG_EVENT);
- if (ret)
- return ret;
+ mask1 = APPLE_CD_REG_INT_POWER_STATUS_UPDATE |
+ APPLE_CD_REG_INT_DATA_STATUS_UPDATE |
+ APPLE_CD_REG_INT_PLUG_EVENT;
irq_handler = cd321x_interrupt;
} else {
/* Enable power status, data status and plug event interrupts */
- ret = tps6598x_write64(tps, TPS_REG_INT_MASK1,
- TPS_REG_INT_POWER_STATUS_UPDATE |
- TPS_REG_INT_DATA_STATUS_UPDATE |
- TPS_REG_INT_PLUG_EVENT);
- if (ret)
- return ret;
+ mask1 = TPS_REG_INT_POWER_STATUS_UPDATE |
+ TPS_REG_INT_DATA_STATUS_UPDATE |
+ TPS_REG_INT_PLUG_EVENT;
}
+ /* Make sure the controller has application firmware running */
+ ret = tps6598x_check_mode(tps);
+ if (ret)
+ return ret;
+
+ ret = tps6598x_write64(tps, TPS_REG_INT_MASK1, mask1);
+ if (ret)
+ return ret;
+
ret = tps6598x_read32(tps, TPS_REG_STATUS, &status);
if (ret < 0)
return ret;
diff --git a/drivers/usb/typec/ucsi/ucsi.c b/drivers/usb/typec/ucsi/ucsi.c
index 6aa28384f77f..08561bf7c40c 100644
--- a/drivers/usb/typec/ucsi/ucsi.c
+++ b/drivers/usb/typec/ucsi/ucsi.c
@@ -1150,7 +1150,9 @@ static int ucsi_register_port(struct ucsi *ucsi, int index)
ret = 0;
}
- if (UCSI_CONSTAT_PWR_OPMODE(con->status.flags) == UCSI_CONSTAT_PWR_OPMODE_PD) {
+ if (con->partner &&
+ UCSI_CONSTAT_PWR_OPMODE(con->status.flags) ==
+ UCSI_CONSTAT_PWR_OPMODE_PD) {
ucsi_get_src_pdos(con);
ucsi_check_altmodes(con);
}
diff --git a/drivers/vdpa/vdpa.c b/drivers/vdpa/vdpa.c
index 7332a74a4b00..09bbe53c3ac4 100644
--- a/drivers/vdpa/vdpa.c
+++ b/drivers/vdpa/vdpa.c
@@ -404,7 +404,8 @@ static int vdpa_mgmtdev_fill(const struct vdpa_mgmt_dev *mdev, struct sk_buff *m
goto msg_err;
while (mdev->id_table[i].device) {
- supported_classes |= BIT(mdev->id_table[i].device);
+ if (mdev->id_table[i].device <= 63)
+ supported_classes |= BIT_ULL(mdev->id_table[i].device);
i++;
}
diff --git a/drivers/vdpa/vdpa_sim/vdpa_sim.c b/drivers/vdpa/vdpa_sim/vdpa_sim.c
index 5f484fff8dbe..41b0cd17fcba 100644
--- a/drivers/vdpa/vdpa_sim/vdpa_sim.c
+++ b/drivers/vdpa/vdpa_sim/vdpa_sim.c
@@ -591,8 +591,11 @@ static void vdpasim_free(struct vdpa_device *vdpa)
vringh_kiov_cleanup(&vdpasim->vqs[i].in_iov);
}
- put_iova_domain(&vdpasim->iova);
- iova_cache_put();
+ if (vdpa_get_dma_dev(vdpa)) {
+ put_iova_domain(&vdpasim->iova);
+ iova_cache_put();
+ }
+
kvfree(vdpasim->buffer);
if (vdpasim->iommu)
vhost_iotlb_free(vdpasim->iommu);
diff --git a/drivers/vdpa/vdpa_user/vduse_dev.c b/drivers/vdpa/vdpa_user/vduse_dev.c
index c9204c62f339..eddcb64a910a 100644
--- a/drivers/vdpa/vdpa_user/vduse_dev.c
+++ b/drivers/vdpa/vdpa_user/vduse_dev.c
@@ -655,7 +655,8 @@ static void vduse_vdpa_get_config(struct vdpa_device *vdpa, unsigned int offset,
{
struct vduse_dev *dev = vdpa_to_vduse(vdpa);
- if (len > dev->config_size - offset)
+ if (offset > dev->config_size ||
+ len > dev->config_size - offset)
return;
memcpy(buf, dev->config + offset, len);
@@ -975,7 +976,8 @@ static long vduse_dev_ioctl(struct file *file, unsigned int cmd,
break;
ret = -EINVAL;
- if (config.length == 0 ||
+ if (config.offset > dev->config_size ||
+ config.length == 0 ||
config.length > dev->config_size - config.offset)
break;
diff --git a/drivers/vfio/pci/vfio_pci_igd.c b/drivers/vfio/pci/vfio_pci_igd.c
index 56cd551e0e04..362f91ec8845 100644
--- a/drivers/vfio/pci/vfio_pci_igd.c
+++ b/drivers/vfio/pci/vfio_pci_igd.c
@@ -98,7 +98,8 @@ static ssize_t vfio_pci_igd_rw(struct vfio_pci_core_device *vdev,
version = cpu_to_le16(0x0201);
if (igd_opregion_shift_copy(buf, &off,
- &version + (pos - OPREGION_VERSION),
+ (u8 *)&version +
+ (pos - OPREGION_VERSION),
&pos, &remaining, bytes))
return -EFAULT;
}
@@ -121,7 +122,7 @@ static ssize_t vfio_pci_igd_rw(struct vfio_pci_core_device *vdev,
OPREGION_SIZE : 0);
if (igd_opregion_shift_copy(buf, &off,
- &rvda + (pos - OPREGION_RVDA),
+ (u8 *)&rvda + (pos - OPREGION_RVDA),
&pos, &remaining, bytes))
return -EFAULT;
}
diff --git a/drivers/vfio/vfio.c b/drivers/vfio/vfio.c
index 82fb75464f92..735d1d344af9 100644
--- a/drivers/vfio/vfio.c
+++ b/drivers/vfio/vfio.c
@@ -232,7 +232,7 @@ static inline bool vfio_iommu_driver_allowed(struct vfio_container *container,
}
#endif /* CONFIG_VFIO_NOIOMMU */
-/**
+/*
* IOMMU driver registration
*/
int vfio_register_iommu_driver(const struct vfio_iommu_driver_ops *ops)
@@ -285,7 +285,7 @@ static int vfio_iommu_group_notifier(struct notifier_block *nb,
unsigned long action, void *data);
static void vfio_group_get(struct vfio_group *group);
-/**
+/*
* Container objects - containers are created when /dev/vfio/vfio is
* opened, but their lifecycle extends until the last user is done, so
* it's freed via kref. Must support container/group/device being
@@ -309,7 +309,7 @@ static void vfio_container_put(struct vfio_container *container)
kref_put(&container->kref, vfio_container_release);
}
-/**
+/*
* Group objects - create, release, get, put, search
*/
static struct vfio_group *
@@ -488,7 +488,7 @@ static struct vfio_group *vfio_group_get_from_dev(struct device *dev)
return group;
}
-/**
+/*
* Device objects - create, release, get, put, search
*/
/* Device reference always implies a group reference */
@@ -595,7 +595,7 @@ static int vfio_dev_viable(struct device *dev, void *data)
return ret;
}
-/**
+/*
* Async device support
*/
static int vfio_group_nb_add_dev(struct vfio_group *group, struct device *dev)
@@ -689,7 +689,7 @@ static int vfio_iommu_group_notifier(struct notifier_block *nb,
return NOTIFY_OK;
}
-/**
+/*
* VFIO driver API
*/
void vfio_init_group_dev(struct vfio_device *device, struct device *dev,
@@ -831,7 +831,7 @@ int vfio_register_emulated_iommu_dev(struct vfio_device *device)
}
EXPORT_SYMBOL_GPL(vfio_register_emulated_iommu_dev);
-/**
+/*
* Get a reference to the vfio_device for a device. Even if the
* caller thinks they own the device, they could be racing with a
* release call path, so we can't trust drvdata for the shortcut.
@@ -965,7 +965,7 @@ void vfio_unregister_group_dev(struct vfio_device *device)
}
EXPORT_SYMBOL_GPL(vfio_unregister_group_dev);
-/**
+/*
* VFIO base fd, /dev/vfio/vfio
*/
static long vfio_ioctl_check_extension(struct vfio_container *container,
@@ -1183,7 +1183,7 @@ static const struct file_operations vfio_fops = {
.compat_ioctl = compat_ptr_ioctl,
};
-/**
+/*
* VFIO Group fd, /dev/vfio/$GROUP
*/
static void __vfio_group_unset_container(struct vfio_group *group)
@@ -1536,7 +1536,7 @@ static const struct file_operations vfio_group_fops = {
.release = vfio_group_fops_release,
};
-/**
+/*
* VFIO Device fd
*/
static int vfio_device_fops_release(struct inode *inode, struct file *filep)
@@ -1611,7 +1611,7 @@ static const struct file_operations vfio_device_fops = {
.mmap = vfio_device_fops_mmap,
};
-/**
+/*
* External user API, exported by symbols to be linked dynamically.
*
* The protocol includes:
@@ -1659,7 +1659,7 @@ struct vfio_group *vfio_group_get_external_user(struct file *filep)
}
EXPORT_SYMBOL_GPL(vfio_group_get_external_user);
-/**
+/*
* External user API, exported by symbols to be linked dynamically.
* The external user passes in a device pointer
* to verify that:
@@ -1725,7 +1725,7 @@ long vfio_external_check_extension(struct vfio_group *group, unsigned long arg)
}
EXPORT_SYMBOL_GPL(vfio_external_check_extension);
-/**
+/*
* Sub-module support
*/
/*
@@ -2272,7 +2272,7 @@ struct iommu_domain *vfio_group_iommu_domain(struct vfio_group *group)
}
EXPORT_SYMBOL_GPL(vfio_group_iommu_domain);
-/**
+/*
* Module/class support
*/
static char *vfio_devnode(struct device *dev, umode_t *mode)
diff --git a/drivers/vhost/vdpa.c b/drivers/vhost/vdpa.c
index 01c59ce7e250..e3c4f059b21a 100644
--- a/drivers/vhost/vdpa.c
+++ b/drivers/vhost/vdpa.c
@@ -197,7 +197,7 @@ static int vhost_vdpa_config_validate(struct vhost_vdpa *v,
struct vdpa_device *vdpa = v->vdpa;
long size = vdpa->config->get_config_size(vdpa);
- if (c->len == 0)
+ if (c->len == 0 || c->off > size)
return -EINVAL;
if (c->len > size - c->off)
@@ -1014,12 +1014,12 @@ static int vhost_vdpa_release(struct inode *inode, struct file *filep)
mutex_lock(&d->mutex);
filep->private_data = NULL;
+ vhost_vdpa_clean_irq(v);
vhost_vdpa_reset(v);
vhost_dev_stop(&v->vdev);
vhost_vdpa_iotlb_free(v);
vhost_vdpa_free_domain(v);
vhost_vdpa_config_put(v);
- vhost_vdpa_clean_irq(v);
vhost_dev_cleanup(&v->vdev);
kfree(v->vdev.vqs);
mutex_unlock(&d->mutex);
diff --git a/drivers/vhost/vsock.c b/drivers/vhost/vsock.c
index 938aefbc75ec..d6ca1c7ad513 100644
--- a/drivers/vhost/vsock.c
+++ b/drivers/vhost/vsock.c
@@ -511,8 +511,6 @@ static void vhost_vsock_handle_tx_kick(struct vhost_work *work)
vhost_disable_notify(&vsock->dev, vq);
do {
- u32 len;
-
if (!vhost_vsock_more_replies(vsock)) {
/* Stop tx until the device processes already
* pending replies. Leave tx virtqueue
@@ -540,7 +538,7 @@ static void vhost_vsock_handle_tx_kick(struct vhost_work *work)
continue;
}
- len = pkt->len;
+ total_len += sizeof(pkt->hdr) + pkt->len;
/* Deliver to monitoring devices all received packets */
virtio_transport_deliver_tap_pkt(pkt);
@@ -553,9 +551,7 @@ static void vhost_vsock_handle_tx_kick(struct vhost_work *work)
else
virtio_transport_free_pkt(pkt);
- len += sizeof(pkt->hdr);
- vhost_add_used(vq, head, len);
- total_len += len;
+ vhost_add_used(vq, head, 0);
added = true;
} while(likely(!vhost_exceeds_weight(vq, ++pkts, total_len)));
diff --git a/drivers/video/console/vgacon.c b/drivers/video/console/vgacon.c
index d4320b147956..576612f18d59 100644
--- a/drivers/video/console/vgacon.c
+++ b/drivers/video/console/vgacon.c
@@ -345,11 +345,17 @@ static void vgacon_init(struct vc_data *c, int init)
struct uni_pagedir *p;
/*
- * We cannot be loaded as a module, therefore init is always 1,
- * but vgacon_init can be called more than once, and init will
- * not be 1.
+ * We cannot be loaded as a module, therefore init will be 1
+ * if we are the default console, however if we are a fallback
+ * console, for example if fbcon has failed registration, then
+ * init will be 0, so we need to make sure our boot parameters
+ * have been copied to the console structure for vgacon_resize
+ * ultimately called by vc_resize. Any subsequent calls to
+ * vgacon_init init will have init set to 0 too.
*/
c->vc_can_do_color = vga_can_do_color;
+ c->vc_scan_lines = vga_scan_lines;
+ c->vc_font.height = c->vc_cell_height = vga_video_font_height;
/* set dimensions manually if init != 0 since vc_resize() will fail */
if (init) {
@@ -358,8 +364,6 @@ static void vgacon_init(struct vc_data *c, int init)
} else
vc_resize(c, vga_video_num_columns, vga_video_num_lines);
- c->vc_scan_lines = vga_scan_lines;
- c->vc_font.height = c->vc_cell_height = vga_video_font_height;
c->vc_complement_mask = 0x7700;
if (vga_512_chars)
c->vc_hi_font_mask = 0x0800;
diff --git a/drivers/video/fbdev/simplefb.c b/drivers/video/fbdev/simplefb.c
index b63074fd892e..57541887188b 100644
--- a/drivers/video/fbdev/simplefb.c
+++ b/drivers/video/fbdev/simplefb.c
@@ -541,26 +541,7 @@ static struct platform_driver simplefb_driver = {
.remove = simplefb_remove,
};
-static int __init simplefb_init(void)
-{
- int ret;
- struct device_node *np;
-
- ret = platform_driver_register(&simplefb_driver);
- if (ret)
- return ret;
-
- if (IS_ENABLED(CONFIG_OF_ADDRESS) && of_chosen) {
- for_each_child_of_node(of_chosen, np) {
- if (of_device_is_compatible(np, "simple-framebuffer"))
- of_platform_device_create(np, NULL, NULL);
- }
- }
-
- return 0;
-}
-
-fs_initcall(simplefb_init);
+module_platform_driver(simplefb_driver);
MODULE_AUTHOR("Stephen Warren <swarren@wwwdotorg.org>");
MODULE_DESCRIPTION("Simple framebuffer driver");
diff --git a/drivers/video/fbdev/xen-fbfront.c b/drivers/video/fbdev/xen-fbfront.c
index 5ec51445bee8..6826f986da43 100644
--- a/drivers/video/fbdev/xen-fbfront.c
+++ b/drivers/video/fbdev/xen-fbfront.c
@@ -695,6 +695,7 @@ static struct xenbus_driver xenfb_driver = {
.remove = xenfb_remove,
.resume = xenfb_resume,
.otherend_changed = xenfb_backend_changed,
+ .not_essential = true,
};
static int __init xenfb_init(void)
diff --git a/drivers/virt/nitro_enclaves/ne_misc_dev.c b/drivers/virt/nitro_enclaves/ne_misc_dev.c
index 8939612ee0e0..6894ccb868a6 100644
--- a/drivers/virt/nitro_enclaves/ne_misc_dev.c
+++ b/drivers/virt/nitro_enclaves/ne_misc_dev.c
@@ -886,8 +886,9 @@ static int ne_set_user_memory_region_ioctl(struct ne_enclave *ne_enclave,
goto put_pages;
}
- gup_rc = get_user_pages(mem_region.userspace_addr + memory_size, 1, FOLL_GET,
- ne_mem_region->pages + i, NULL);
+ gup_rc = get_user_pages_unlocked(mem_region.userspace_addr + memory_size, 1,
+ ne_mem_region->pages + i, FOLL_GET);
+
if (gup_rc < 0) {
rc = gup_rc;
diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c
index 00f64f2f8b72..028b05d44546 100644
--- a/drivers/virtio/virtio_ring.c
+++ b/drivers/virtio/virtio_ring.c
@@ -14,9 +14,6 @@
#include <linux/spinlock.h>
#include <xen/xen.h>
-static bool force_used_validation = false;
-module_param(force_used_validation, bool, 0444);
-
#ifdef DEBUG
/* For development, we want to crash whenever the ring is screwed. */
#define BAD_RING(_vq, fmt, args...) \
@@ -185,9 +182,6 @@ struct vring_virtqueue {
} packed;
};
- /* Per-descriptor in buffer length */
- u32 *buflen;
-
/* How to notify other side. FIXME: commonalize hcalls! */
bool (*notify)(struct virtqueue *vq);
@@ -274,7 +268,7 @@ size_t virtio_max_dma_size(struct virtio_device *vdev)
size_t max_segment_size = SIZE_MAX;
if (vring_use_dma_api(vdev))
- max_segment_size = dma_max_mapping_size(&vdev->dev);
+ max_segment_size = dma_max_mapping_size(vdev->dev.parent);
return max_segment_size;
}
@@ -496,7 +490,6 @@ static inline int virtqueue_add_split(struct virtqueue *_vq,
unsigned int i, n, avail, descs_used, prev, err_idx;
int head;
bool indirect;
- u32 buflen = 0;
START_USE(vq);
@@ -578,7 +571,6 @@ static inline int virtqueue_add_split(struct virtqueue *_vq,
VRING_DESC_F_NEXT |
VRING_DESC_F_WRITE,
indirect);
- buflen += sg->length;
}
}
/* Last one doesn't continue. */
@@ -618,10 +610,6 @@ static inline int virtqueue_add_split(struct virtqueue *_vq,
else
vq->split.desc_state[head].indir_desc = ctx;
- /* Store in buffer length if necessary */
- if (vq->buflen)
- vq->buflen[head] = buflen;
-
/* Put entry in available array (but don't update avail->idx until they
* do sync). */
avail = vq->split.avail_idx_shadow & (vq->split.vring.num - 1);
@@ -796,11 +784,6 @@ static void *virtqueue_get_buf_ctx_split(struct virtqueue *_vq,
BAD_RING(vq, "id %u is not a head!\n", i);
return NULL;
}
- if (vq->buflen && unlikely(*len > vq->buflen[i])) {
- BAD_RING(vq, "used len %d is larger than in buflen %u\n",
- *len, vq->buflen[i]);
- return NULL;
- }
/* detach_buf_split clears data, so grab it now. */
ret = vq->split.desc_state[i].data;
@@ -1079,7 +1062,6 @@ static int virtqueue_add_indirect_packed(struct vring_virtqueue *vq,
unsigned int i, n, err_idx;
u16 head, id;
dma_addr_t addr;
- u32 buflen = 0;
head = vq->packed.next_avail_idx;
desc = alloc_indirect_packed(total_sg, gfp);
@@ -1109,8 +1091,6 @@ static int virtqueue_add_indirect_packed(struct vring_virtqueue *vq,
desc[i].addr = cpu_to_le64(addr);
desc[i].len = cpu_to_le32(sg->length);
i++;
- if (n >= out_sgs)
- buflen += sg->length;
}
}
@@ -1164,10 +1144,6 @@ static int virtqueue_add_indirect_packed(struct vring_virtqueue *vq,
vq->packed.desc_state[id].indir_desc = desc;
vq->packed.desc_state[id].last = id;
- /* Store in buffer length if necessary */
- if (vq->buflen)
- vq->buflen[id] = buflen;
-
vq->num_added += 1;
pr_debug("Added buffer head %i to %p\n", head, vq);
@@ -1203,7 +1179,6 @@ static inline int virtqueue_add_packed(struct virtqueue *_vq,
__le16 head_flags, flags;
u16 head, id, prev, curr, avail_used_flags;
int err;
- u32 buflen = 0;
START_USE(vq);
@@ -1283,8 +1258,6 @@ static inline int virtqueue_add_packed(struct virtqueue *_vq,
1 << VRING_PACKED_DESC_F_AVAIL |
1 << VRING_PACKED_DESC_F_USED;
}
- if (n >= out_sgs)
- buflen += sg->length;
}
}
@@ -1304,10 +1277,6 @@ static inline int virtqueue_add_packed(struct virtqueue *_vq,
vq->packed.desc_state[id].indir_desc = ctx;
vq->packed.desc_state[id].last = prev;
- /* Store in buffer length if necessary */
- if (vq->buflen)
- vq->buflen[id] = buflen;
-
/*
* A driver MUST NOT make the first descriptor in the list
* available before all subsequent descriptors comprising
@@ -1494,11 +1463,6 @@ static void *virtqueue_get_buf_ctx_packed(struct virtqueue *_vq,
BAD_RING(vq, "id %u is not a head!\n", id);
return NULL;
}
- if (vq->buflen && unlikely(*len > vq->buflen[id])) {
- BAD_RING(vq, "used len %d is larger than in buflen %u\n",
- *len, vq->buflen[id]);
- return NULL;
- }
/* detach_buf_packed clears data, so grab it now. */
ret = vq->packed.desc_state[id].data;
@@ -1704,7 +1668,6 @@ static struct virtqueue *vring_create_virtqueue_packed(
struct vring_virtqueue *vq;
struct vring_packed_desc *ring;
struct vring_packed_desc_event *driver, *device;
- struct virtio_driver *drv = drv_to_virtio(vdev->dev.driver);
dma_addr_t ring_dma_addr, driver_event_dma_addr, device_event_dma_addr;
size_t ring_size_in_bytes, event_size_in_bytes;
@@ -1794,15 +1757,6 @@ static struct virtqueue *vring_create_virtqueue_packed(
if (!vq->packed.desc_extra)
goto err_desc_extra;
- if (!drv->suppress_used_validation || force_used_validation) {
- vq->buflen = kmalloc_array(num, sizeof(*vq->buflen),
- GFP_KERNEL);
- if (!vq->buflen)
- goto err_buflen;
- } else {
- vq->buflen = NULL;
- }
-
/* No callback? Tell other side not to bother us. */
if (!callback) {
vq->packed.event_flags_shadow = VRING_PACKED_EVENT_FLAG_DISABLE;
@@ -1815,8 +1769,6 @@ static struct virtqueue *vring_create_virtqueue_packed(
spin_unlock(&vdev->vqs_list_lock);
return &vq->vq;
-err_buflen:
- kfree(vq->packed.desc_extra);
err_desc_extra:
kfree(vq->packed.desc_state);
err_desc_state:
@@ -2224,7 +2176,6 @@ struct virtqueue *__vring_new_virtqueue(unsigned int index,
void (*callback)(struct virtqueue *),
const char *name)
{
- struct virtio_driver *drv = drv_to_virtio(vdev->dev.driver);
struct vring_virtqueue *vq;
if (virtio_has_feature(vdev, VIRTIO_F_RING_PACKED))
@@ -2284,15 +2235,6 @@ struct virtqueue *__vring_new_virtqueue(unsigned int index,
if (!vq->split.desc_extra)
goto err_extra;
- if (!drv->suppress_used_validation || force_used_validation) {
- vq->buflen = kmalloc_array(vring.num, sizeof(*vq->buflen),
- GFP_KERNEL);
- if (!vq->buflen)
- goto err_buflen;
- } else {
- vq->buflen = NULL;
- }
-
/* Put everything in free lists. */
vq->free_head = 0;
memset(vq->split.desc_state, 0, vring.num *
@@ -2303,8 +2245,6 @@ struct virtqueue *__vring_new_virtqueue(unsigned int index,
spin_unlock(&vdev->vqs_list_lock);
return &vq->vq;
-err_buflen:
- kfree(vq->split.desc_extra);
err_extra:
kfree(vq->split.desc_state);
err_state:
diff --git a/drivers/xen/Kconfig b/drivers/xen/Kconfig
index a1b11c62da9e..33e941e40082 100644
--- a/drivers/xen/Kconfig
+++ b/drivers/xen/Kconfig
@@ -259,9 +259,15 @@ config XEN_SCSI_BACKEND
if guests need generic access to SCSI devices.
config XEN_PRIVCMD
- tristate
+ tristate "Xen hypercall passthrough driver"
depends on XEN
default m
+ help
+ The hypercall passthrough driver allows privileged user programs to
+ perform Xen hypercalls. This driver is normally required for systems
+ running as Dom0 to perform privileged operations, but in some
+ disaggregated Xen setups this driver might be needed for other
+ domains, too.
config XEN_ACPI_PROCESSOR
tristate "Xen ACPI processor"
diff --git a/drivers/xen/events/events_base.c b/drivers/xen/events/events_base.c
index a78704ae3618..46d9295d9a6e 100644
--- a/drivers/xen/events/events_base.c
+++ b/drivers/xen/events/events_base.c
@@ -1251,6 +1251,12 @@ int bind_evtchn_to_irq(evtchn_port_t evtchn)
}
EXPORT_SYMBOL_GPL(bind_evtchn_to_irq);
+int bind_evtchn_to_irq_lateeoi(evtchn_port_t evtchn)
+{
+ return bind_evtchn_to_irq_chip(evtchn, &xen_lateeoi_chip, NULL);
+}
+EXPORT_SYMBOL_GPL(bind_evtchn_to_irq_lateeoi);
+
static int bind_ipi_to_irq(unsigned int ipi, unsigned int cpu)
{
struct evtchn_bind_ipi bind_ipi;
diff --git a/drivers/xen/pvcalls-front.c b/drivers/xen/pvcalls-front.c
index 7984645b5956..3c9ae156b597 100644
--- a/drivers/xen/pvcalls-front.c
+++ b/drivers/xen/pvcalls-front.c
@@ -1275,6 +1275,7 @@ static struct xenbus_driver pvcalls_front_driver = {
.probe = pvcalls_front_probe,
.remove = pvcalls_front_remove,
.otherend_changed = pvcalls_front_changed,
+ .not_essential = true,
};
static int __init pvcalls_frontend_init(void)
diff --git a/drivers/xen/xenbus/xenbus_probe.c b/drivers/xen/xenbus/xenbus_probe.c
index bd003ca8acbe..fe360c33ce71 100644
--- a/drivers/xen/xenbus/xenbus_probe.c
+++ b/drivers/xen/xenbus/xenbus_probe.c
@@ -909,7 +909,7 @@ static struct notifier_block xenbus_resume_nb = {
static int __init xenbus_init(void)
{
- int err = 0;
+ int err;
uint64_t v = 0;
xen_store_domain_type = XS_UNKNOWN;
@@ -949,6 +949,29 @@ static int __init xenbus_init(void)
err = hvm_get_parameter(HVM_PARAM_STORE_PFN, &v);
if (err)
goto out_error;
+ /*
+ * Uninitialized hvm_params are zero and return no error.
+ * Although it is theoretically possible to have
+ * HVM_PARAM_STORE_PFN set to zero on purpose, in reality it is
+ * not zero when valid. If zero, it means that Xenstore hasn't
+ * been properly initialized. Instead of attempting to map a
+ * wrong guest physical address return error.
+ *
+ * Also recognize all bits set as an invalid value.
+ */
+ if (!v || !~v) {
+ err = -ENOENT;
+ goto out_error;
+ }
+ /* Avoid truncation on 32-bit. */
+#if BITS_PER_LONG == 32
+ if (v > ULONG_MAX) {
+ pr_err("%s: cannot handle HVM_PARAM_STORE_PFN=%llx > ULONG_MAX\n",
+ __func__, v);
+ err = -EINVAL;
+ goto out_error;
+ }
+#endif
xen_store_gfn = (unsigned long)v;
xen_store_interface =
xen_remap(xen_store_gfn << XEN_PAGE_SHIFT,
@@ -983,8 +1006,10 @@ static int __init xenbus_init(void)
*/
proc_create_mount_point("xen");
#endif
+ return 0;
out_error:
+ xen_store_domain_type = XS_UNKNOWN;
return err;
}
diff --git a/drivers/xen/xenbus/xenbus_probe_frontend.c b/drivers/xen/xenbus/xenbus_probe_frontend.c
index 480944606a3c..07b010a68fcf 100644
--- a/drivers/xen/xenbus/xenbus_probe_frontend.c
+++ b/drivers/xen/xenbus/xenbus_probe_frontend.c
@@ -211,19 +211,11 @@ static int is_device_connecting(struct device *dev, void *data, bool ignore_none
if (drv && (dev->driver != drv))
return 0;
- if (ignore_nonessential) {
- /* With older QEMU, for PVonHVM guests the guest config files
- * could contain: vfb = [ 'vnc=1, vnclisten=0.0.0.0']
- * which is nonsensical as there is no PV FB (there can be
- * a PVKB) running as HVM guest. */
+ xendrv = to_xenbus_driver(dev->driver);
- if ((strncmp(xendev->nodename, "device/vkbd", 11) == 0))
- return 0;
+ if (ignore_nonessential && xendrv->not_essential)
+ return 0;
- if ((strncmp(xendev->nodename, "device/vfb", 10) == 0))
- return 0;
- }
- xendrv = to_xenbus_driver(dev->driver);
return (xendev->state < XenbusStateConnected ||
(xendev->state == XenbusStateConnected &&
xendrv->is_ready && !xendrv->is_ready(xendev)));
diff --git a/fs/afs/file.c b/fs/afs/file.c
index cb6ad61eec3b..afe4b803f84b 100644
--- a/fs/afs/file.c
+++ b/fs/afs/file.c
@@ -514,8 +514,9 @@ static void afs_add_open_mmap(struct afs_vnode *vnode)
if (atomic_inc_return(&vnode->cb_nr_mmap) == 1) {
down_write(&vnode->volume->cell->fs_open_mmaps_lock);
- list_add_tail(&vnode->cb_mmap_link,
- &vnode->volume->cell->fs_open_mmaps);
+ if (list_empty(&vnode->cb_mmap_link))
+ list_add_tail(&vnode->cb_mmap_link,
+ &vnode->volume->cell->fs_open_mmaps);
up_write(&vnode->volume->cell->fs_open_mmaps_lock);
}
diff --git a/fs/afs/super.c b/fs/afs/super.c
index d110def8aa8e..34c68724c98b 100644
--- a/fs/afs/super.c
+++ b/fs/afs/super.c
@@ -667,6 +667,7 @@ static void afs_i_init_once(void *_vnode)
INIT_LIST_HEAD(&vnode->pending_locks);
INIT_LIST_HEAD(&vnode->granted_locks);
INIT_DELAYED_WORK(&vnode->lock_work, afs_lock_work);
+ INIT_LIST_HEAD(&vnode->cb_mmap_link);
seqlock_init(&vnode->cb_lock);
}
diff --git a/fs/aio.c b/fs/aio.c
index 9c81cf611d65..f6f1cbffef9e 100644
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -181,8 +181,9 @@ struct poll_iocb {
struct file *file;
struct wait_queue_head *head;
__poll_t events;
- bool done;
bool cancelled;
+ bool work_scheduled;
+ bool work_need_resched;
struct wait_queue_entry wait;
struct work_struct work;
};
@@ -1619,6 +1620,51 @@ static void aio_poll_put_work(struct work_struct *work)
iocb_put(iocb);
}
+/*
+ * Safely lock the waitqueue which the request is on, synchronizing with the
+ * case where the ->poll() provider decides to free its waitqueue early.
+ *
+ * Returns true on success, meaning that req->head->lock was locked, req->wait
+ * is on req->head, and an RCU read lock was taken. Returns false if the
+ * request was already removed from its waitqueue (which might no longer exist).
+ */
+static bool poll_iocb_lock_wq(struct poll_iocb *req)
+{
+ wait_queue_head_t *head;
+
+ /*
+ * While we hold the waitqueue lock and the waitqueue is nonempty,
+ * wake_up_pollfree() will wait for us. However, taking the waitqueue
+ * lock in the first place can race with the waitqueue being freed.
+ *
+ * We solve this as eventpoll does: by taking advantage of the fact that
+ * all users of wake_up_pollfree() will RCU-delay the actual free. If
+ * we enter rcu_read_lock() and see that the pointer to the queue is
+ * non-NULL, we can then lock it without the memory being freed out from
+ * under us, then check whether the request is still on the queue.
+ *
+ * Keep holding rcu_read_lock() as long as we hold the queue lock, in
+ * case the caller deletes the entry from the queue, leaving it empty.
+ * In that case, only RCU prevents the queue memory from being freed.
+ */
+ rcu_read_lock();
+ head = smp_load_acquire(&req->head);
+ if (head) {
+ spin_lock(&head->lock);
+ if (!list_empty(&req->wait.entry))
+ return true;
+ spin_unlock(&head->lock);
+ }
+ rcu_read_unlock();
+ return false;
+}
+
+static void poll_iocb_unlock_wq(struct poll_iocb *req)
+{
+ spin_unlock(&req->head->lock);
+ rcu_read_unlock();
+}
+
static void aio_poll_complete_work(struct work_struct *work)
{
struct poll_iocb *req = container_of(work, struct poll_iocb, work);
@@ -1638,14 +1684,27 @@ static void aio_poll_complete_work(struct work_struct *work)
* avoid further branches in the fast path.
*/
spin_lock_irq(&ctx->ctx_lock);
- if (!mask && !READ_ONCE(req->cancelled)) {
- add_wait_queue(req->head, &req->wait);
- spin_unlock_irq(&ctx->ctx_lock);
- return;
- }
+ if (poll_iocb_lock_wq(req)) {
+ if (!mask && !READ_ONCE(req->cancelled)) {
+ /*
+ * The request isn't actually ready to be completed yet.
+ * Reschedule completion if another wakeup came in.
+ */
+ if (req->work_need_resched) {
+ schedule_work(&req->work);
+ req->work_need_resched = false;
+ } else {
+ req->work_scheduled = false;
+ }
+ poll_iocb_unlock_wq(req);
+ spin_unlock_irq(&ctx->ctx_lock);
+ return;
+ }
+ list_del_init(&req->wait.entry);
+ poll_iocb_unlock_wq(req);
+ } /* else, POLLFREE has freed the waitqueue, so we must complete */
list_del_init(&iocb->ki_list);
iocb->ki_res.res = mangle_poll(mask);
- req->done = true;
spin_unlock_irq(&ctx->ctx_lock);
iocb_put(iocb);
@@ -1657,13 +1716,14 @@ static int aio_poll_cancel(struct kiocb *iocb)
struct aio_kiocb *aiocb = container_of(iocb, struct aio_kiocb, rw);
struct poll_iocb *req = &aiocb->poll;
- spin_lock(&req->head->lock);
- WRITE_ONCE(req->cancelled, true);
- if (!list_empty(&req->wait.entry)) {
- list_del_init(&req->wait.entry);
- schedule_work(&aiocb->poll.work);
- }
- spin_unlock(&req->head->lock);
+ if (poll_iocb_lock_wq(req)) {
+ WRITE_ONCE(req->cancelled, true);
+ if (!req->work_scheduled) {
+ schedule_work(&aiocb->poll.work);
+ req->work_scheduled = true;
+ }
+ poll_iocb_unlock_wq(req);
+ } /* else, the request was force-cancelled by POLLFREE already */
return 0;
}
@@ -1680,21 +1740,27 @@ static int aio_poll_wake(struct wait_queue_entry *wait, unsigned mode, int sync,
if (mask && !(mask & req->events))
return 0;
- list_del_init(&req->wait.entry);
-
- if (mask && spin_trylock_irqsave(&iocb->ki_ctx->ctx_lock, flags)) {
+ /*
+ * Complete the request inline if possible. This requires that three
+ * conditions be met:
+ * 1. An event mask must have been passed. If a plain wakeup was done
+ * instead, then mask == 0 and we have to call vfs_poll() to get
+ * the events, so inline completion isn't possible.
+ * 2. The completion work must not have already been scheduled.
+ * 3. ctx_lock must not be busy. We have to use trylock because we
+ * already hold the waitqueue lock, so this inverts the normal
+ * locking order. Use irqsave/irqrestore because not all
+ * filesystems (e.g. fuse) call this function with IRQs disabled,
+ * yet IRQs have to be disabled before ctx_lock is obtained.
+ */
+ if (mask && !req->work_scheduled &&
+ spin_trylock_irqsave(&iocb->ki_ctx->ctx_lock, flags)) {
struct kioctx *ctx = iocb->ki_ctx;
- /*
- * Try to complete the iocb inline if we can. Use
- * irqsave/irqrestore because not all filesystems (e.g. fuse)
- * call this function with IRQs disabled and because IRQs
- * have to be disabled before ctx_lock is obtained.
- */
+ list_del_init(&req->wait.entry);
list_del(&iocb->ki_list);
iocb->ki_res.res = mangle_poll(mask);
- req->done = true;
- if (iocb->ki_eventfd && eventfd_signal_allowed()) {
+ if (iocb->ki_eventfd && !eventfd_signal_allowed()) {
iocb = NULL;
INIT_WORK(&req->work, aio_poll_put_work);
schedule_work(&req->work);
@@ -1703,7 +1769,43 @@ static int aio_poll_wake(struct wait_queue_entry *wait, unsigned mode, int sync,
if (iocb)
iocb_put(iocb);
} else {
- schedule_work(&req->work);
+ /*
+ * Schedule the completion work if needed. If it was already
+ * scheduled, record that another wakeup came in.
+ *
+ * Don't remove the request from the waitqueue here, as it might
+ * not actually be complete yet (we won't know until vfs_poll()
+ * is called), and we must not miss any wakeups. POLLFREE is an
+ * exception to this; see below.
+ */
+ if (req->work_scheduled) {
+ req->work_need_resched = true;
+ } else {
+ schedule_work(&req->work);
+ req->work_scheduled = true;
+ }
+
+ /*
+ * If the waitqueue is being freed early but we can't complete
+ * the request inline, we have to tear down the request as best
+ * we can. That means immediately removing the request from its
+ * waitqueue and preventing all further accesses to the
+ * waitqueue via the request. We also need to schedule the
+ * completion work (done above). Also mark the request as
+ * cancelled, to potentially skip an unneeded call to ->poll().
+ */
+ if (mask & POLLFREE) {
+ WRITE_ONCE(req->cancelled, true);
+ list_del_init(&req->wait.entry);
+
+ /*
+ * Careful: this *must* be the last step, since as soon
+ * as req->head is NULL'ed out, the request can be
+ * completed and freed, since aio_poll_complete_work()
+ * will no longer need to take the waitqueue lock.
+ */
+ smp_store_release(&req->head, NULL);
+ }
}
return 1;
}
@@ -1711,6 +1813,7 @@ static int aio_poll_wake(struct wait_queue_entry *wait, unsigned mode, int sync,
struct aio_poll_table {
struct poll_table_struct pt;
struct aio_kiocb *iocb;
+ bool queued;
int error;
};
@@ -1721,11 +1824,12 @@ aio_poll_queue_proc(struct file *file, struct wait_queue_head *head,
struct aio_poll_table *pt = container_of(p, struct aio_poll_table, pt);
/* multiple wait queues per file are not supported */
- if (unlikely(pt->iocb->poll.head)) {
+ if (unlikely(pt->queued)) {
pt->error = -EINVAL;
return;
}
+ pt->queued = true;
pt->error = 0;
pt->iocb->poll.head = head;
add_wait_queue(head, &pt->iocb->poll.wait);
@@ -1750,12 +1854,14 @@ static int aio_poll(struct aio_kiocb *aiocb, const struct iocb *iocb)
req->events = demangle_poll(iocb->aio_buf) | EPOLLERR | EPOLLHUP;
req->head = NULL;
- req->done = false;
req->cancelled = false;
+ req->work_scheduled = false;
+ req->work_need_resched = false;
apt.pt._qproc = aio_poll_queue_proc;
apt.pt._key = req->events;
apt.iocb = aiocb;
+ apt.queued = false;
apt.error = -EINVAL; /* same as no support for IOCB_CMD_POLL */
/* initialized the list so that we can do list_empty checks */
@@ -1764,23 +1870,35 @@ static int aio_poll(struct aio_kiocb *aiocb, const struct iocb *iocb)
mask = vfs_poll(req->file, &apt.pt) & req->events;
spin_lock_irq(&ctx->ctx_lock);
- if (likely(req->head)) {
- spin_lock(&req->head->lock);
- if (unlikely(list_empty(&req->wait.entry))) {
- if (apt.error)
+ if (likely(apt.queued)) {
+ bool on_queue = poll_iocb_lock_wq(req);
+
+ if (!on_queue || req->work_scheduled) {
+ /*
+ * aio_poll_wake() already either scheduled the async
+ * completion work, or completed the request inline.
+ */
+ if (apt.error) /* unsupported case: multiple queues */
cancel = true;
apt.error = 0;
mask = 0;
}
if (mask || apt.error) {
+ /* Steal to complete synchronously. */
list_del_init(&req->wait.entry);
} else if (cancel) {
+ /* Cancel if possible (may be too late though). */
WRITE_ONCE(req->cancelled, true);
- } else if (!req->done) { /* actually waiting for an event */
+ } else if (on_queue) {
+ /*
+ * Actually waiting for an event, so add the request to
+ * active_reqs so that it can be cancelled if needed.
+ */
list_add_tail(&aiocb->ki_list, &ctx->active_reqs);
aiocb->ki_cancel = aio_poll_cancel;
}
- spin_unlock(&req->head->lock);
+ if (on_queue)
+ poll_iocb_unlock_wq(req);
}
if (mask) { /* no async, we'd stolen it */
aiocb->ki_res.res = mangle_poll(mask);
diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
index c3983bdaf4b8..f704339c6b86 100644
--- a/fs/btrfs/ctree.c
+++ b/fs/btrfs/ctree.c
@@ -463,8 +463,8 @@ static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,
BUG_ON(ret < 0);
rcu_assign_pointer(root->node, cow);
- btrfs_free_tree_block(trans, root, buf, parent_start,
- last_ref);
+ btrfs_free_tree_block(trans, btrfs_root_id(root), buf,
+ parent_start, last_ref);
free_extent_buffer(buf);
add_root_to_dirty_list(root);
} else {
@@ -485,8 +485,8 @@ static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,
return ret;
}
}
- btrfs_free_tree_block(trans, root, buf, parent_start,
- last_ref);
+ btrfs_free_tree_block(trans, btrfs_root_id(root), buf,
+ parent_start, last_ref);
}
if (unlock_orig)
btrfs_tree_unlock(buf);
@@ -927,7 +927,7 @@ static noinline int balance_level(struct btrfs_trans_handle *trans,
free_extent_buffer(mid);
root_sub_used(root, mid->len);
- btrfs_free_tree_block(trans, root, mid, 0, 1);
+ btrfs_free_tree_block(trans, btrfs_root_id(root), mid, 0, 1);
/* once for the root ptr */
free_extent_buffer_stale(mid);
return 0;
@@ -986,7 +986,8 @@ static noinline int balance_level(struct btrfs_trans_handle *trans,
btrfs_tree_unlock(right);
del_ptr(root, path, level + 1, pslot + 1);
root_sub_used(root, right->len);
- btrfs_free_tree_block(trans, root, right, 0, 1);
+ btrfs_free_tree_block(trans, btrfs_root_id(root), right,
+ 0, 1);
free_extent_buffer_stale(right);
right = NULL;
} else {
@@ -1031,7 +1032,7 @@ static noinline int balance_level(struct btrfs_trans_handle *trans,
btrfs_tree_unlock(mid);
del_ptr(root, path, level + 1, pslot);
root_sub_used(root, mid->len);
- btrfs_free_tree_block(trans, root, mid, 0, 1);
+ btrfs_free_tree_block(trans, btrfs_root_id(root), mid, 0, 1);
free_extent_buffer_stale(mid);
mid = NULL;
} else {
@@ -4032,7 +4033,7 @@ static noinline void btrfs_del_leaf(struct btrfs_trans_handle *trans,
root_sub_used(root, leaf->len);
atomic_inc(&leaf->refs);
- btrfs_free_tree_block(trans, root, leaf, 0, 1);
+ btrfs_free_tree_block(trans, btrfs_root_id(root), leaf, 0, 1);
free_extent_buffer_stale(leaf);
}
/*
diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
index 7553e9dc5f93..5fe5eccb3c87 100644
--- a/fs/btrfs/ctree.h
+++ b/fs/btrfs/ctree.h
@@ -2257,6 +2257,11 @@ static inline bool btrfs_root_dead(const struct btrfs_root *root)
return (root->root_item.flags & cpu_to_le64(BTRFS_ROOT_SUBVOL_DEAD)) != 0;
}
+static inline u64 btrfs_root_id(const struct btrfs_root *root)
+{
+ return root->root_key.objectid;
+}
+
/* struct btrfs_root_backup */
BTRFS_SETGET_STACK_FUNCS(backup_tree_root, struct btrfs_root_backup,
tree_root, 64);
@@ -2719,7 +2724,7 @@ struct extent_buffer *btrfs_alloc_tree_block(struct btrfs_trans_handle *trans,
u64 empty_size,
enum btrfs_lock_nesting nest);
void btrfs_free_tree_block(struct btrfs_trans_handle *trans,
- struct btrfs_root *root,
+ u64 root_id,
struct extent_buffer *buf,
u64 parent, int last_ref);
int btrfs_alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
diff --git a/fs/btrfs/delalloc-space.c b/fs/btrfs/delalloc-space.c
index 2059d1504149..40c4d6ba3fb9 100644
--- a/fs/btrfs/delalloc-space.c
+++ b/fs/btrfs/delalloc-space.c
@@ -143,10 +143,13 @@ int btrfs_check_data_free_space(struct btrfs_inode *inode,
/* Use new btrfs_qgroup_reserve_data to reserve precious data space. */
ret = btrfs_qgroup_reserve_data(inode, reserved, start, len);
- if (ret < 0)
+ if (ret < 0) {
btrfs_free_reserved_data_space_noquota(fs_info, len);
- else
+ extent_changeset_free(*reserved);
+ *reserved = NULL;
+ } else {
ret = 0;
+ }
return ret;
}
@@ -452,8 +455,11 @@ int btrfs_delalloc_reserve_space(struct btrfs_inode *inode,
if (ret < 0)
return ret;
ret = btrfs_delalloc_reserve_metadata(inode, len);
- if (ret < 0)
+ if (ret < 0) {
btrfs_free_reserved_data_space(inode, *reserved, start, len);
+ extent_changeset_free(*reserved);
+ *reserved = NULL;
+ }
return ret;
}
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index 514ead6e93b6..b3f2e2232326 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -1732,6 +1732,14 @@ again:
}
return root;
fail:
+ /*
+ * If our caller provided us an anonymous device, then it's his
+ * responsability to free it in case we fail. So we have to set our
+ * root's anon_dev to 0 to avoid a double free, once by btrfs_put_root()
+ * and once again by our caller.
+ */
+ if (anon_dev)
+ root->anon_dev = 0;
btrfs_put_root(root);
return ERR_PTR(ret);
}
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index 3fd736a02c1e..25ef6e3fd306 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -3275,20 +3275,20 @@ out_delayed_unlock:
}
void btrfs_free_tree_block(struct btrfs_trans_handle *trans,
- struct btrfs_root *root,
+ u64 root_id,
struct extent_buffer *buf,
u64 parent, int last_ref)
{
- struct btrfs_fs_info *fs_info = root->fs_info;
+ struct btrfs_fs_info *fs_info = trans->fs_info;
struct btrfs_ref generic_ref = { 0 };
int ret;
btrfs_init_generic_ref(&generic_ref, BTRFS_DROP_DELAYED_REF,
buf->start, buf->len, parent);
btrfs_init_tree_ref(&generic_ref, btrfs_header_level(buf),
- root->root_key.objectid, 0, false);
+ root_id, 0, false);
- if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID) {
+ if (root_id != BTRFS_TREE_LOG_OBJECTID) {
btrfs_ref_tree_mod(fs_info, &generic_ref);
ret = btrfs_add_delayed_tree_ref(trans, &generic_ref, NULL);
BUG_ON(ret); /* -ENOMEM */
@@ -3298,7 +3298,7 @@ void btrfs_free_tree_block(struct btrfs_trans_handle *trans,
struct btrfs_block_group *cache;
bool must_pin = false;
- if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID) {
+ if (root_id != BTRFS_TREE_LOG_OBJECTID) {
ret = check_ref_cleanup(trans, buf->start);
if (!ret) {
btrfs_redirty_list_add(trans->transaction, buf);
@@ -5472,7 +5472,8 @@ static noinline int walk_up_proc(struct btrfs_trans_handle *trans,
goto owner_mismatch;
}
- btrfs_free_tree_block(trans, root, eb, parent, wc->refs[level] == 1);
+ btrfs_free_tree_block(trans, btrfs_root_id(root), eb, parent,
+ wc->refs[level] == 1);
out:
wc->refs[level] = 0;
wc->flags[level] = 0;
@@ -6051,6 +6052,9 @@ int btrfs_trim_fs(struct btrfs_fs_info *fs_info, struct fstrim_range *range)
int dev_ret = 0;
int ret = 0;
+ if (range->start == U64_MAX)
+ return -EINVAL;
+
/*
* Check range overflow if range->len is set.
* The default range->len is U64_MAX.
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index 4e03a6d3aa32..9234d96a7fd5 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -4314,6 +4314,20 @@ static void set_btree_ioerr(struct page *page, struct extent_buffer *eb)
return;
/*
+ * A read may stumble upon this buffer later, make sure that it gets an
+ * error and knows there was an error.
+ */
+ clear_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
+
+ /*
+ * We need to set the mapping with the io error as well because a write
+ * error will flip the file system readonly, and then syncfs() will
+ * return a 0 because we are readonly if we don't modify the err seq for
+ * the superblock.
+ */
+ mapping_set_error(page->mapping, -EIO);
+
+ /*
* If we error out, we should add back the dirty_metadata_bytes
* to make it consistent.
*/
@@ -6597,6 +6611,14 @@ int read_extent_buffer_pages(struct extent_buffer *eb, int wait, int mirror_num)
if (test_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags))
return 0;
+ /*
+ * We could have had EXTENT_BUFFER_UPTODATE cleared by the write
+ * operation, which could potentially still be in flight. In this case
+ * we simply want to return an error.
+ */
+ if (unlikely(test_bit(EXTENT_BUFFER_WRITE_ERR, &eb->bflags)))
+ return -EIO;
+
if (eb->fs_info->sectorsize < PAGE_SIZE)
return read_extent_buffer_subpage(eb, wait, mirror_num);
diff --git a/fs/btrfs/free-space-tree.c b/fs/btrfs/free-space-tree.c
index a33bca94d133..3abec44c6255 100644
--- a/fs/btrfs/free-space-tree.c
+++ b/fs/btrfs/free-space-tree.c
@@ -1256,8 +1256,8 @@ int btrfs_clear_free_space_tree(struct btrfs_fs_info *fs_info)
btrfs_tree_lock(free_space_root->node);
btrfs_clean_tree_block(free_space_root->node);
btrfs_tree_unlock(free_space_root->node);
- btrfs_free_tree_block(trans, free_space_root, free_space_root->node,
- 0, 1);
+ btrfs_free_tree_block(trans, btrfs_root_id(free_space_root),
+ free_space_root->node, 0, 1);
btrfs_put_root(free_space_root);
diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
index 92138ac2a4e2..edfecfe62b4b 100644
--- a/fs/btrfs/ioctl.c
+++ b/fs/btrfs/ioctl.c
@@ -617,11 +617,13 @@ static noinline int create_subvol(struct user_namespace *mnt_userns,
* Since we don't abort the transaction in this case, free the
* tree block so that we don't leak space and leave the
* filesystem in an inconsistent state (an extent item in the
- * extent tree without backreferences). Also no need to have
- * the tree block locked since it is not in any tree at this
- * point, so no other task can find it and use it.
+ * extent tree with a backreference for a root that does not
+ * exists).
*/
- btrfs_free_tree_block(trans, root, leaf, 0, 1);
+ btrfs_tree_lock(leaf);
+ btrfs_clean_tree_block(leaf);
+ btrfs_tree_unlock(leaf);
+ btrfs_free_tree_block(trans, objectid, leaf, 0, 1);
free_extent_buffer(leaf);
goto fail;
}
@@ -3187,10 +3189,8 @@ static long btrfs_ioctl_rm_dev_v2(struct file *file, void __user *arg)
return -EPERM;
vol_args = memdup_user(arg, sizeof(*vol_args));
- if (IS_ERR(vol_args)) {
- ret = PTR_ERR(vol_args);
- goto out;
- }
+ if (IS_ERR(vol_args))
+ return PTR_ERR(vol_args);
if (vol_args->flags & ~BTRFS_DEVICE_REMOVE_ARGS_MASK) {
ret = -EOPNOTSUPP;
diff --git a/fs/btrfs/lzo.c b/fs/btrfs/lzo.c
index 9febb8025825..0fb90cbe7669 100644
--- a/fs/btrfs/lzo.c
+++ b/fs/btrfs/lzo.c
@@ -290,6 +290,8 @@ int lzo_compress_pages(struct list_head *ws, struct address_space *mapping,
*total_out = cur_out;
*total_in = cur_in - start;
out:
+ if (page_in)
+ put_page(page_in);
*out_pages = DIV_ROUND_UP(cur_out, PAGE_SIZE);
return ret;
}
diff --git a/fs/btrfs/qgroup.c b/fs/btrfs/qgroup.c
index db680f5be745..6c037f1252b7 100644
--- a/fs/btrfs/qgroup.c
+++ b/fs/btrfs/qgroup.c
@@ -1219,7 +1219,8 @@ int btrfs_quota_disable(struct btrfs_fs_info *fs_info)
btrfs_tree_lock(quota_root->node);
btrfs_clean_tree_block(quota_root->node);
btrfs_tree_unlock(quota_root->node);
- btrfs_free_tree_block(trans, quota_root, quota_root->node, 0, 1);
+ btrfs_free_tree_block(trans, btrfs_root_id(quota_root),
+ quota_root->node, 0, 1);
btrfs_put_root(quota_root);
diff --git a/fs/btrfs/root-tree.c b/fs/btrfs/root-tree.c
index 12ceb14a1141..d20166336557 100644
--- a/fs/btrfs/root-tree.c
+++ b/fs/btrfs/root-tree.c
@@ -334,7 +334,8 @@ int btrfs_del_root_ref(struct btrfs_trans_handle *trans, u64 root_id,
key.offset = ref_id;
again:
ret = btrfs_search_slot(trans, tree_root, &key, path, -1, 1);
- BUG_ON(ret < 0);
+ if (ret < 0)
+ goto out;
if (ret == 0) {
leaf = path->nodes[0];
ref = btrfs_item_ptr(leaf, path->slots[0],
diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
index 8ab33caf016f..6993dcdba6f1 100644
--- a/fs/btrfs/tree-log.c
+++ b/fs/btrfs/tree-log.c
@@ -1181,6 +1181,7 @@ again:
parent_objectid, victim_name,
victim_name_len);
if (ret < 0) {
+ kfree(victim_name);
return ret;
} else if (!ret) {
ret = -ENOENT;
@@ -2908,6 +2909,8 @@ static noinline int walk_up_log_tree(struct btrfs_trans_handle *trans,
path->nodes[*level]->len);
if (ret)
return ret;
+ btrfs_redirty_list_add(trans->transaction,
+ next);
} else {
if (test_and_clear_bit(EXTENT_BUFFER_DIRTY, &next->bflags))
clear_extent_buffer_dirty(next);
@@ -2988,6 +2991,7 @@ static int walk_log_tree(struct btrfs_trans_handle *trans,
next->start, next->len);
if (ret)
goto out;
+ btrfs_redirty_list_add(trans->transaction, next);
} else {
if (test_and_clear_bit(EXTENT_BUFFER_DIRTY, &next->bflags))
clear_extent_buffer_dirty(next);
@@ -3438,8 +3442,6 @@ static void free_log_tree(struct btrfs_trans_handle *trans,
EXTENT_DIRTY | EXTENT_NEW | EXTENT_NEED_WAIT);
extent_io_tree_release(&log->log_csum_range);
- if (trans && log->node)
- btrfs_redirty_list_add(trans->transaction, log->node);
btrfs_put_root(log);
}
@@ -3976,6 +3978,7 @@ search:
goto done;
}
if (btrfs_header_generation(path->nodes[0]) != trans->transid) {
+ ctx->last_dir_item_offset = min_key.offset;
ret = overwrite_item(trans, log, dst_path,
path->nodes[0], path->slots[0],
&min_key);
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
index 0997e3cd74e9..fd0ced829edb 100644
--- a/fs/btrfs/volumes.c
+++ b/fs/btrfs/volumes.c
@@ -1370,8 +1370,10 @@ struct btrfs_device *btrfs_scan_one_device(const char *path, fmode_t flags,
bytenr_orig = btrfs_sb_offset(0);
ret = btrfs_sb_log_location_bdev(bdev, 0, READ, &bytenr);
- if (ret)
- return ERR_PTR(ret);
+ if (ret) {
+ device = ERR_PTR(ret);
+ goto error_bdev_put;
+ }
disk_super = btrfs_read_disk_super(bdev, bytenr, bytenr_orig);
if (IS_ERR(disk_super)) {
diff --git a/fs/btrfs/zoned.c b/fs/btrfs/zoned.c
index 67d932d70798..678a29469511 100644
--- a/fs/btrfs/zoned.c
+++ b/fs/btrfs/zoned.c
@@ -1860,6 +1860,7 @@ int btrfs_zone_finish(struct btrfs_block_group *block_group)
block_group->alloc_offset = block_group->zone_capacity;
block_group->free_space_ctl->free_space = 0;
btrfs_clear_treelog_bg(block_group);
+ btrfs_clear_data_reloc_bg(block_group);
spin_unlock(&block_group->lock);
ret = blkdev_zone_mgmt(device->bdev, REQ_OP_ZONE_FINISH,
@@ -1942,6 +1943,7 @@ void btrfs_zone_finish_endio(struct btrfs_fs_info *fs_info, u64 logical, u64 len
ASSERT(block_group->alloc_offset == block_group->zone_capacity);
ASSERT(block_group->free_space_ctl->free_space == 0);
btrfs_clear_treelog_bg(block_group);
+ btrfs_clear_data_reloc_bg(block_group);
spin_unlock(&block_group->lock);
map = block_group->physical_map;
diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c
index b9460b6fb76f..c447fa2e2d1f 100644
--- a/fs/ceph/caps.c
+++ b/fs/ceph/caps.c
@@ -4350,7 +4350,7 @@ void ceph_get_fmode(struct ceph_inode_info *ci, int fmode, int count)
{
struct ceph_mds_client *mdsc = ceph_sb_to_mdsc(ci->vfs_inode.i_sb);
int bits = (fmode << 1) | 1;
- bool is_opened = false;
+ bool already_opened = false;
int i;
if (count == 1)
@@ -4358,19 +4358,19 @@ void ceph_get_fmode(struct ceph_inode_info *ci, int fmode, int count)
spin_lock(&ci->i_ceph_lock);
for (i = 0; i < CEPH_FILE_MODE_BITS; i++) {
- if (bits & (1 << i))
- ci->i_nr_by_mode[i] += count;
-
/*
- * If any of the mode ref is larger than 1,
+ * If any of the mode ref is larger than 0,
* that means it has been already opened by
* others. Just skip checking the PIN ref.
*/
- if (i && ci->i_nr_by_mode[i] > 1)
- is_opened = true;
+ if (i && ci->i_nr_by_mode[i])
+ already_opened = true;
+
+ if (bits & (1 << i))
+ ci->i_nr_by_mode[i] += count;
}
- if (!is_opened)
+ if (!already_opened)
percpu_counter_inc(&mdsc->metric.opened_inodes);
spin_unlock(&ci->i_ceph_lock);
}
diff --git a/fs/ceph/file.c b/fs/ceph/file.c
index 02a0a0fd9ccd..c138e8126286 100644
--- a/fs/ceph/file.c
+++ b/fs/ceph/file.c
@@ -605,13 +605,25 @@ static int ceph_finish_async_create(struct inode *dir, struct dentry *dentry,
in.cap.realm = cpu_to_le64(ci->i_snap_realm->ino);
in.cap.flags = CEPH_CAP_FLAG_AUTH;
in.ctime = in.mtime = in.atime = iinfo.btime;
- in.mode = cpu_to_le32((u32)mode);
in.truncate_seq = cpu_to_le32(1);
in.truncate_size = cpu_to_le64(-1ULL);
in.xattr_version = cpu_to_le64(1);
in.uid = cpu_to_le32(from_kuid(&init_user_ns, current_fsuid()));
- in.gid = cpu_to_le32(from_kgid(&init_user_ns, dir->i_mode & S_ISGID ?
- dir->i_gid : current_fsgid()));
+ if (dir->i_mode & S_ISGID) {
+ in.gid = cpu_to_le32(from_kgid(&init_user_ns, dir->i_gid));
+
+ /* Directories always inherit the setgid bit. */
+ if (S_ISDIR(mode))
+ mode |= S_ISGID;
+ else if ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP) &&
+ !in_group_p(dir->i_gid) &&
+ !capable_wrt_inode_uidgid(&init_user_ns, dir, CAP_FSETID))
+ mode &= ~S_ISGID;
+ } else {
+ in.gid = cpu_to_le32(from_kgid(&init_user_ns, current_fsgid()));
+ }
+ in.mode = cpu_to_le32((u32)mode);
+
in.nlink = cpu_to_le32(1);
in.max_size = cpu_to_le64(lo->stripe_unit);
@@ -847,7 +859,7 @@ static ssize_t ceph_sync_read(struct kiocb *iocb, struct iov_iter *to,
ssize_t ret;
u64 off = iocb->ki_pos;
u64 len = iov_iter_count(to);
- u64 i_size;
+ u64 i_size = i_size_read(inode);
dout("sync_read on file %p %llu~%u %s\n", file, off, (unsigned)len,
(file->f_flags & O_DIRECT) ? "O_DIRECT" : "");
diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c
index 250aad330a10..c30eefc0ac19 100644
--- a/fs/ceph/mds_client.c
+++ b/fs/ceph/mds_client.c
@@ -3683,7 +3683,7 @@ static int reconnect_caps_cb(struct inode *inode, struct ceph_cap *cap,
struct ceph_pagelist *pagelist = recon_state->pagelist;
struct dentry *dentry;
char *path;
- int pathlen, err;
+ int pathlen = 0, err;
u64 pathbase;
u64 snap_follows;
@@ -3703,7 +3703,6 @@ static int reconnect_caps_cb(struct inode *inode, struct ceph_cap *cap,
}
} else {
path = NULL;
- pathlen = 0;
pathbase = 0;
}
diff --git a/fs/cifs/cifsfs.h b/fs/cifs/cifsfs.h
index b50da1901ebd..9e5d9e192ef0 100644
--- a/fs/cifs/cifsfs.h
+++ b/fs/cifs/cifsfs.h
@@ -152,5 +152,5 @@ extern struct dentry *cifs_smb3_do_mount(struct file_system_type *fs_type,
extern const struct export_operations cifs_export_ops;
#endif /* CONFIG_CIFS_NFSD_EXPORT */
-#define CIFS_VERSION "2.33"
+#define CIFS_VERSION "2.34"
#endif /* _CIFSFS_H */
diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
index 67e4c5548e9d..1060164b984a 100644
--- a/fs/cifs/connect.c
+++ b/fs/cifs/connect.c
@@ -1271,10 +1271,8 @@ static int match_server(struct TCP_Server_Info *server, struct smb3_fs_context *
{
struct sockaddr *addr = (struct sockaddr *)&ctx->dstaddr;
- if (ctx->nosharesock) {
- server->nosharesock = true;
+ if (ctx->nosharesock)
return 0;
- }
/* this server does not share socket */
if (server->nosharesock)
@@ -1438,6 +1436,9 @@ cifs_get_tcp_session(struct smb3_fs_context *ctx,
goto out_err;
}
+ if (ctx->nosharesock)
+ tcp_ses->nosharesock = true;
+
tcp_ses->ops = ctx->ops;
tcp_ses->vals = ctx->vals;
cifs_set_net_ns(tcp_ses, get_net(current->nsproxy->net_ns));
@@ -1561,6 +1562,10 @@ smbd_connected:
/* fscache server cookies are based on primary channel only */
if (!CIFS_SERVER_IS_CHAN(tcp_ses))
cifs_fscache_get_client_cookie(tcp_ses);
+#ifdef CONFIG_CIFS_FSCACHE
+ else
+ tcp_ses->fscache = tcp_ses->primary_server->fscache;
+#endif /* CONFIG_CIFS_FSCACHE */
/* queue echo request delayed work */
queue_delayed_work(cifsiod_wq, &tcp_ses->echo, tcp_ses->echo_interval);
@@ -3045,12 +3050,6 @@ static int mount_get_conns(struct mount_ctx *mnt_ctx)
cifs_dbg(VFS, "read only mount of RW share\n");
/* no need to log a RW mount of a typical RW share */
}
- /*
- * The cookie is initialized from volume info returned above.
- * Inside cifs_fscache_get_super_cookie it checks
- * that we do not get super cookie twice.
- */
- cifs_fscache_get_super_cookie(tcon);
}
/*
@@ -3065,6 +3064,13 @@ static int mount_get_conns(struct mount_ctx *mnt_ctx)
(cifs_sb->ctx->rsize > server->ops->negotiate_rsize(tcon, ctx)))
cifs_sb->ctx->rsize = server->ops->negotiate_rsize(tcon, ctx);
+ /*
+ * The cookie is initialized from volume info returned above.
+ * Inside cifs_fscache_get_super_cookie it checks
+ * that we do not get super cookie twice.
+ */
+ cifs_fscache_get_super_cookie(tcon);
+
out:
mnt_ctx->server = server;
mnt_ctx->ses = ses;
@@ -3425,6 +3431,7 @@ static int connect_dfs_root(struct mount_ctx *mnt_ctx, struct dfs_cache_tgt_list
*/
mount_put_conns(mnt_ctx);
mount_get_dfs_conns(mnt_ctx);
+ set_root_ses(mnt_ctx);
full_path = build_unc_path_to_root(ctx, cifs_sb, true);
if (IS_ERR(full_path))
diff --git a/fs/cifs/fs_context.c b/fs/cifs/fs_context.c
index 6a179ae753c1..e3ed25dc6f3f 100644
--- a/fs/cifs/fs_context.c
+++ b/fs/cifs/fs_context.c
@@ -435,6 +435,42 @@ out:
}
/*
+ * Remove duplicate path delimiters. Windows is supposed to do that
+ * but there are some bugs that prevent rename from working if there are
+ * multiple delimiters.
+ *
+ * Returns a sanitized duplicate of @path. The caller is responsible for
+ * cleaning up the original.
+ */
+#define IS_DELIM(c) ((c) == '/' || (c) == '\\')
+static char *sanitize_path(char *path)
+{
+ char *cursor1 = path, *cursor2 = path;
+
+ /* skip all prepended delimiters */
+ while (IS_DELIM(*cursor1))
+ cursor1++;
+
+ /* copy the first letter */
+ *cursor2 = *cursor1;
+
+ /* copy the remainder... */
+ while (*(cursor1++)) {
+ /* ... skipping all duplicated delimiters */
+ if (IS_DELIM(*cursor1) && IS_DELIM(*cursor2))
+ continue;
+ *(++cursor2) = *cursor1;
+ }
+
+ /* if the last character is a delimiter, skip it */
+ if (IS_DELIM(*(cursor2 - 1)))
+ cursor2--;
+
+ *(cursor2) = '\0';
+ return kstrdup(path, GFP_KERNEL);
+}
+
+/*
* Parse a devname into substrings and populate the ctx->UNC and ctx->prepath
* fields with the result. Returns 0 on success and an error otherwise
* (e.g. ENOMEM or EINVAL)
@@ -493,7 +529,7 @@ smb3_parse_devname(const char *devname, struct smb3_fs_context *ctx)
if (!*pos)
return 0;
- ctx->prepath = kstrdup(pos, GFP_KERNEL);
+ ctx->prepath = sanitize_path(pos);
if (!ctx->prepath)
return -ENOMEM;
diff --git a/fs/cifs/fscache.c b/fs/cifs/fscache.c
index 7e409a38a2d7..003c5f1f4dfb 100644
--- a/fs/cifs/fscache.c
+++ b/fs/cifs/fscache.c
@@ -16,14 +16,7 @@
* Key layout of CIFS server cache index object
*/
struct cifs_server_key {
- struct {
- uint16_t family; /* address family */
- __be16 port; /* IP port */
- } hdr;
- union {
- struct in_addr ipv4_addr;
- struct in6_addr ipv6_addr;
- };
+ __u64 conn_id;
} __packed;
/*
@@ -31,42 +24,23 @@ struct cifs_server_key {
*/
void cifs_fscache_get_client_cookie(struct TCP_Server_Info *server)
{
- const struct sockaddr *sa = (struct sockaddr *) &server->dstaddr;
- const struct sockaddr_in *addr = (struct sockaddr_in *) sa;
- const struct sockaddr_in6 *addr6 = (struct sockaddr_in6 *) sa;
struct cifs_server_key key;
- uint16_t key_len = sizeof(key.hdr);
-
- memset(&key, 0, sizeof(key));
/*
- * Should not be a problem as sin_family/sin6_family overlays
- * sa_family field
+ * Check if cookie was already initialized so don't reinitialize it.
+ * In the future, as we integrate with newer fscache features,
+ * we may want to instead add a check if cookie has changed
*/
- key.hdr.family = sa->sa_family;
- switch (sa->sa_family) {
- case AF_INET:
- key.hdr.port = addr->sin_port;
- key.ipv4_addr = addr->sin_addr;
- key_len += sizeof(key.ipv4_addr);
- break;
-
- case AF_INET6:
- key.hdr.port = addr6->sin6_port;
- key.ipv6_addr = addr6->sin6_addr;
- key_len += sizeof(key.ipv6_addr);
- break;
-
- default:
- cifs_dbg(VFS, "Unknown network family '%d'\n", sa->sa_family);
- server->fscache = NULL;
+ if (server->fscache)
return;
- }
+
+ memset(&key, 0, sizeof(key));
+ key.conn_id = server->conn_id;
server->fscache =
fscache_acquire_cookie(cifs_fscache_netfs.primary_index,
&cifs_fscache_server_index_def,
- &key, key_len,
+ &key, sizeof(key),
NULL, 0,
server, 0, true);
cifs_dbg(FYI, "%s: (0x%p/0x%p)\n",
@@ -92,7 +66,7 @@ void cifs_fscache_get_super_cookie(struct cifs_tcon *tcon)
* In the future, as we integrate with newer fscache features,
* we may want to instead add a check if cookie has changed
*/
- if (tcon->fscache == NULL)
+ if (tcon->fscache)
return;
sharename = extract_sharename(tcon->treeName);
diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c
index 82848412ad85..279622e4eb1c 100644
--- a/fs/cifs/inode.c
+++ b/fs/cifs/inode.c
@@ -1356,11 +1356,6 @@ iget_no_retry:
goto out;
}
-#ifdef CONFIG_CIFS_FSCACHE
- /* populate tcon->resource_id */
- tcon->resource_id = CIFS_I(inode)->uniqueid;
-#endif
-
if (rc && tcon->pipe) {
cifs_dbg(FYI, "ipc connection - fake read inode\n");
spin_lock(&inode->i_lock);
@@ -1375,7 +1370,6 @@ iget_no_retry:
iget_failed(inode);
inode = ERR_PTR(rc);
}
-
out:
kfree(path);
free_xid(xid);
diff --git a/fs/cifs/sess.c b/fs/cifs/sess.c
index 8ad2993785af..035dc3e245dc 100644
--- a/fs/cifs/sess.c
+++ b/fs/cifs/sess.c
@@ -222,6 +222,7 @@ cifs_ses_add_channel(struct cifs_sb_info *cifs_sb, struct cifs_ses *ses,
/* Auth */
ctx.domainauto = ses->domainAuto;
ctx.domainname = ses->domainName;
+ ctx.server_hostname = ses->server->hostname;
ctx.username = ses->user_name;
ctx.password = ses->password;
ctx.sectype = ses->sectype;
@@ -589,8 +590,8 @@ int decode_ntlmssp_challenge(char *bcc_ptr, int blob_len,
{
unsigned int tioffset; /* challenge message target info area */
unsigned int tilen; /* challenge message target info area length */
-
CHALLENGE_MESSAGE *pblob = (CHALLENGE_MESSAGE *)bcc_ptr;
+ __u32 server_flags;
if (blob_len < sizeof(CHALLENGE_MESSAGE)) {
cifs_dbg(VFS, "challenge blob len %d too small\n", blob_len);
@@ -608,12 +609,37 @@ int decode_ntlmssp_challenge(char *bcc_ptr, int blob_len,
return -EINVAL;
}
+ server_flags = le32_to_cpu(pblob->NegotiateFlags);
+ cifs_dbg(FYI, "%s: negotiate=0x%08x challenge=0x%08x\n", __func__,
+ ses->ntlmssp->client_flags, server_flags);
+
+ if ((ses->ntlmssp->client_flags & (NTLMSSP_NEGOTIATE_SEAL | NTLMSSP_NEGOTIATE_SIGN)) &&
+ (!(server_flags & NTLMSSP_NEGOTIATE_56) && !(server_flags & NTLMSSP_NEGOTIATE_128))) {
+ cifs_dbg(VFS, "%s: requested signing/encryption but server did not return either 56-bit or 128-bit session key size\n",
+ __func__);
+ return -EINVAL;
+ }
+ if (!(server_flags & NTLMSSP_NEGOTIATE_NTLM) && !(server_flags & NTLMSSP_NEGOTIATE_EXTENDED_SEC)) {
+ cifs_dbg(VFS, "%s: server does not seem to support either NTLMv1 or NTLMv2\n", __func__);
+ return -EINVAL;
+ }
+ if (ses->server->sign && !(server_flags & NTLMSSP_NEGOTIATE_SIGN)) {
+ cifs_dbg(VFS, "%s: forced packet signing but server does not seem to support it\n",
+ __func__);
+ return -EOPNOTSUPP;
+ }
+ if ((ses->ntlmssp->client_flags & NTLMSSP_NEGOTIATE_KEY_XCH) &&
+ !(server_flags & NTLMSSP_NEGOTIATE_KEY_XCH))
+ pr_warn_once("%s: authentication has been weakened as server does not support key exchange\n",
+ __func__);
+
+ ses->ntlmssp->server_flags = server_flags;
+
memcpy(ses->ntlmssp->cryptkey, pblob->Challenge, CIFS_CRYPTO_KEY_SIZE);
- /* BB we could decode pblob->NegotiateFlags; some may be useful */
/* In particular we can examine sign flags */
/* BB spec says that if AvId field of MsvAvTimestamp is populated then
we must set the MIC field of the AUTHENTICATE_MESSAGE */
- ses->ntlmssp->server_flags = le32_to_cpu(pblob->NegotiateFlags);
+
tioffset = le32_to_cpu(pblob->TargetInfoArray.BufferOffset);
tilen = le16_to_cpu(pblob->TargetInfoArray.Length);
if (tioffset > blob_len || tioffset + tilen > blob_len) {
@@ -720,13 +746,13 @@ int build_ntlmssp_negotiate_blob(unsigned char **pbuffer,
flags = NTLMSSP_NEGOTIATE_56 | NTLMSSP_REQUEST_TARGET |
NTLMSSP_NEGOTIATE_128 | NTLMSSP_NEGOTIATE_UNICODE |
NTLMSSP_NEGOTIATE_NTLM | NTLMSSP_NEGOTIATE_EXTENDED_SEC |
- NTLMSSP_NEGOTIATE_SEAL;
- if (server->sign)
- flags |= NTLMSSP_NEGOTIATE_SIGN;
+ NTLMSSP_NEGOTIATE_ALWAYS_SIGN | NTLMSSP_NEGOTIATE_SEAL |
+ NTLMSSP_NEGOTIATE_SIGN;
if (!server->session_estab || ses->ntlmssp->sesskey_per_smbsess)
flags |= NTLMSSP_NEGOTIATE_KEY_XCH;
tmp = *pbuffer + sizeof(NEGOTIATE_MESSAGE);
+ ses->ntlmssp->client_flags = flags;
sec_blob->NegotiateFlags = cpu_to_le32(flags);
/* these fields should be null in negotiate phase MS-NLMP 3.1.5.1.1 */
@@ -778,15 +804,8 @@ int build_ntlmssp_auth_blob(unsigned char **pbuffer,
memcpy(sec_blob->Signature, NTLMSSP_SIGNATURE, 8);
sec_blob->MessageType = NtLmAuthenticate;
- flags = NTLMSSP_NEGOTIATE_56 |
- NTLMSSP_REQUEST_TARGET | NTLMSSP_NEGOTIATE_TARGET_INFO |
- NTLMSSP_NEGOTIATE_128 | NTLMSSP_NEGOTIATE_UNICODE |
- NTLMSSP_NEGOTIATE_NTLM | NTLMSSP_NEGOTIATE_EXTENDED_SEC |
- NTLMSSP_NEGOTIATE_SEAL | NTLMSSP_NEGOTIATE_WORKSTATION_SUPPLIED;
- if (ses->server->sign)
- flags |= NTLMSSP_NEGOTIATE_SIGN;
- if (!ses->server->session_estab || ses->ntlmssp->sesskey_per_smbsess)
- flags |= NTLMSSP_NEGOTIATE_KEY_XCH;
+ flags = ses->ntlmssp->server_flags | NTLMSSP_REQUEST_TARGET |
+ NTLMSSP_NEGOTIATE_TARGET_INFO | NTLMSSP_NEGOTIATE_WORKSTATION_SUPPLIED;
tmp = *pbuffer + sizeof(AUTHENTICATE_MESSAGE);
sec_blob->NegotiateFlags = cpu_to_le32(flags);
@@ -833,9 +852,9 @@ int build_ntlmssp_auth_blob(unsigned char **pbuffer,
*pbuffer, &tmp,
nls_cp);
- if (((ses->ntlmssp->server_flags & NTLMSSP_NEGOTIATE_KEY_XCH) ||
- (ses->ntlmssp->server_flags & NTLMSSP_NEGOTIATE_EXTENDED_SEC))
- && !calc_seckey(ses)) {
+ if ((ses->ntlmssp->server_flags & NTLMSSP_NEGOTIATE_KEY_XCH) &&
+ (!ses->server->session_estab || ses->ntlmssp->sesskey_per_smbsess) &&
+ !calc_seckey(ses)) {
memcpy(tmp, ses->ntlmssp->ciphertext, CIFS_CPHTXT_SIZE);
sec_blob->SessionKey.BufferOffset = cpu_to_le32(tmp - *pbuffer);
sec_blob->SessionKey.Length = cpu_to_le16(CIFS_CPHTXT_SIZE);
diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
index 2f5f2c4c6183..8b3670388cda 100644
--- a/fs/cifs/smb2pdu.c
+++ b/fs/cifs/smb2pdu.c
@@ -142,7 +142,7 @@ static int
smb2_reconnect(__le16 smb2_command, struct cifs_tcon *tcon,
struct TCP_Server_Info *server)
{
- int rc;
+ int rc = 0;
struct nls_table *nls_codepage;
struct cifs_ses *ses;
int retries;
diff --git a/fs/erofs/utils.c b/fs/erofs/utils.c
index 84da2c280012..ec9a1d780dc1 100644
--- a/fs/erofs/utils.c
+++ b/fs/erofs/utils.c
@@ -150,7 +150,7 @@ static bool erofs_try_to_release_workgroup(struct erofs_sb_info *sbi,
* however in order to avoid some race conditions, add a
* DBG_BUGON to observe this in advance.
*/
- DBG_BUGON(xa_erase(&sbi->managed_pslots, grp->index) != grp);
+ DBG_BUGON(__xa_erase(&sbi->managed_pslots, grp->index) != grp);
/* last refcount should be connected with its managed pslot. */
erofs_workgroup_unfreeze(grp, 0);
@@ -165,15 +165,19 @@ static unsigned long erofs_shrink_workstation(struct erofs_sb_info *sbi,
unsigned int freed = 0;
unsigned long index;
+ xa_lock(&sbi->managed_pslots);
xa_for_each(&sbi->managed_pslots, index, grp) {
/* try to shrink each valid workgroup */
if (!erofs_try_to_release_workgroup(sbi, grp))
continue;
+ xa_unlock(&sbi->managed_pslots);
++freed;
if (!--nr_shrink)
- break;
+ return freed;
+ xa_lock(&sbi->managed_pslots);
}
+ xa_unlock(&sbi->managed_pslots);
return freed;
}
diff --git a/fs/file.c b/fs/file.c
index 8627dacfc424..97d212a9b814 100644
--- a/fs/file.c
+++ b/fs/file.c
@@ -841,24 +841,68 @@ void do_close_on_exec(struct files_struct *files)
spin_unlock(&files->file_lock);
}
+static inline struct file *__fget_files_rcu(struct files_struct *files,
+ unsigned int fd, fmode_t mask, unsigned int refs)
+{
+ for (;;) {
+ struct file *file;
+ struct fdtable *fdt = rcu_dereference_raw(files->fdt);
+ struct file __rcu **fdentry;
+
+ if (unlikely(fd >= fdt->max_fds))
+ return NULL;
+
+ fdentry = fdt->fd + array_index_nospec(fd, fdt->max_fds);
+ file = rcu_dereference_raw(*fdentry);
+ if (unlikely(!file))
+ return NULL;
+
+ if (unlikely(file->f_mode & mask))
+ return NULL;
+
+ /*
+ * Ok, we have a file pointer. However, because we do
+ * this all locklessly under RCU, we may be racing with
+ * that file being closed.
+ *
+ * Such a race can take two forms:
+ *
+ * (a) the file ref already went down to zero,
+ * and get_file_rcu_many() fails. Just try
+ * again:
+ */
+ if (unlikely(!get_file_rcu_many(file, refs)))
+ continue;
+
+ /*
+ * (b) the file table entry has changed under us.
+ * Note that we don't need to re-check the 'fdt->fd'
+ * pointer having changed, because it always goes
+ * hand-in-hand with 'fdt'.
+ *
+ * If so, we need to put our refs and try again.
+ */
+ if (unlikely(rcu_dereference_raw(files->fdt) != fdt) ||
+ unlikely(rcu_dereference_raw(*fdentry) != file)) {
+ fput_many(file, refs);
+ continue;
+ }
+
+ /*
+ * Ok, we have a ref to the file, and checked that it
+ * still exists.
+ */
+ return file;
+ }
+}
+
static struct file *__fget_files(struct files_struct *files, unsigned int fd,
fmode_t mask, unsigned int refs)
{
struct file *file;
rcu_read_lock();
-loop:
- file = files_lookup_fd_rcu(files, fd);
- if (file) {
- /* File object ref couldn't be taken.
- * dup2() atomicity guarantee is the reason
- * we loop to catch the new file (or NULL pointer)
- */
- if (file->f_mode & mask)
- file = NULL;
- else if (!get_file_rcu_many(file, refs))
- goto loop;
- }
+ file = __fget_files_rcu(files, fd, mask, refs);
rcu_read_unlock();
return file;
diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
index 79f7eda49e06..cd54a529460d 100644
--- a/fs/fuse/dev.c
+++ b/fs/fuse/dev.c
@@ -847,17 +847,17 @@ static int fuse_try_move_page(struct fuse_copy_state *cs, struct page **pagep)
replace_page_cache_page(oldpage, newpage);
+ get_page(newpage);
+
+ if (!(buf->flags & PIPE_BUF_FLAG_LRU))
+ lru_cache_add(newpage);
+
/*
* Release while we have extra ref on stolen page. Otherwise
* anon_pipe_buf_release() might think the page can be reused.
*/
pipe_buf_release(cs->pipe, buf);
- get_page(newpage);
-
- if (!(buf->flags & PIPE_BUF_FLAG_LRU))
- lru_cache_add(newpage);
-
err = 0;
spin_lock(&cs->req->waitq.lock);
if (test_bit(FR_ABORTED, &cs->req->flags))
diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c
index 8dbd6fe66420..44a7a4288956 100644
--- a/fs/gfs2/glock.c
+++ b/fs/gfs2/glock.c
@@ -1857,7 +1857,6 @@ void gfs2_glock_dq_m(unsigned int num_gh, struct gfs2_holder *ghs)
void gfs2_glock_cb(struct gfs2_glock *gl, unsigned int state)
{
- struct gfs2_holder mock_gh = { .gh_gl = gl, .gh_state = state, };
unsigned long delay = 0;
unsigned long holdtime;
unsigned long now = jiffies;
@@ -1890,8 +1889,13 @@ void gfs2_glock_cb(struct gfs2_glock *gl, unsigned int state)
* keep the glock until the last strong holder is done with it.
*/
if (!find_first_strong_holder(gl)) {
- if (state == LM_ST_UNLOCKED)
- mock_gh.gh_state = LM_ST_EXCLUSIVE;
+ struct gfs2_holder mock_gh = {
+ .gh_gl = gl,
+ .gh_state = (state == LM_ST_UNLOCKED) ?
+ LM_ST_EXCLUSIVE : state,
+ .gh_iflags = BIT(HIF_HOLDER)
+ };
+
demote_incompat_holders(gl, &mock_gh);
}
handle_callback(gl, state, delay, true);
diff --git a/fs/gfs2/inode.c b/fs/gfs2/inode.c
index 6424b903e885..89905f4f29bb 100644
--- a/fs/gfs2/inode.c
+++ b/fs/gfs2/inode.c
@@ -40,37 +40,6 @@ static const struct inode_operations gfs2_file_iops;
static const struct inode_operations gfs2_dir_iops;
static const struct inode_operations gfs2_symlink_iops;
-static int iget_test(struct inode *inode, void *opaque)
-{
- u64 no_addr = *(u64 *)opaque;
-
- return GFS2_I(inode)->i_no_addr == no_addr;
-}
-
-static int iget_set(struct inode *inode, void *opaque)
-{
- u64 no_addr = *(u64 *)opaque;
-
- GFS2_I(inode)->i_no_addr = no_addr;
- inode->i_ino = no_addr;
- return 0;
-}
-
-static struct inode *gfs2_iget(struct super_block *sb, u64 no_addr)
-{
- struct inode *inode;
-
-repeat:
- inode = iget5_locked(sb, no_addr, iget_test, iget_set, &no_addr);
- if (!inode)
- return inode;
- if (is_bad_inode(inode)) {
- iput(inode);
- goto repeat;
- }
- return inode;
-}
-
/**
* gfs2_set_iop - Sets inode operations
* @inode: The inode with correct i_mode filled in
@@ -104,6 +73,22 @@ static void gfs2_set_iop(struct inode *inode)
}
}
+static int iget_test(struct inode *inode, void *opaque)
+{
+ u64 no_addr = *(u64 *)opaque;
+
+ return GFS2_I(inode)->i_no_addr == no_addr;
+}
+
+static int iget_set(struct inode *inode, void *opaque)
+{
+ u64 no_addr = *(u64 *)opaque;
+
+ GFS2_I(inode)->i_no_addr = no_addr;
+ inode->i_ino = no_addr;
+ return 0;
+}
+
/**
* gfs2_inode_lookup - Lookup an inode
* @sb: The super block
@@ -132,12 +117,11 @@ struct inode *gfs2_inode_lookup(struct super_block *sb, unsigned int type,
{
struct inode *inode;
struct gfs2_inode *ip;
- struct gfs2_glock *io_gl = NULL;
struct gfs2_holder i_gh;
int error;
gfs2_holder_mark_uninitialized(&i_gh);
- inode = gfs2_iget(sb, no_addr);
+ inode = iget5_locked(sb, no_addr, iget_test, iget_set, &no_addr);
if (!inode)
return ERR_PTR(-ENOMEM);
@@ -145,22 +129,16 @@ struct inode *gfs2_inode_lookup(struct super_block *sb, unsigned int type,
if (inode->i_state & I_NEW) {
struct gfs2_sbd *sdp = GFS2_SB(inode);
+ struct gfs2_glock *io_gl;
error = gfs2_glock_get(sdp, no_addr, &gfs2_inode_glops, CREATE, &ip->i_gl);
if (unlikely(error))
goto fail;
- flush_delayed_work(&ip->i_gl->gl_work);
-
- error = gfs2_glock_get(sdp, no_addr, &gfs2_iopen_glops, CREATE, &io_gl);
- if (unlikely(error))
- goto fail;
- if (blktype != GFS2_BLKST_UNLINKED)
- gfs2_cancel_delete_work(io_gl);
if (type == DT_UNKNOWN || blktype != GFS2_BLKST_FREE) {
/*
* The GL_SKIP flag indicates to skip reading the inode
- * block. We read the inode with gfs2_inode_refresh
+ * block. We read the inode when instantiating it
* after possibly checking the block type.
*/
error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE,
@@ -181,24 +159,31 @@ struct inode *gfs2_inode_lookup(struct super_block *sb, unsigned int type,
}
}
- glock_set_object(ip->i_gl, ip);
set_bit(GLF_INSTANTIATE_NEEDED, &ip->i_gl->gl_flags);
- error = gfs2_glock_nq_init(io_gl, LM_ST_SHARED, GL_EXACT, &ip->i_iopen_gh);
+
+ error = gfs2_glock_get(sdp, no_addr, &gfs2_iopen_glops, CREATE, &io_gl);
if (unlikely(error))
goto fail;
- glock_set_object(ip->i_iopen_gh.gh_gl, ip);
+ if (blktype != GFS2_BLKST_UNLINKED)
+ gfs2_cancel_delete_work(io_gl);
+ error = gfs2_glock_nq_init(io_gl, LM_ST_SHARED, GL_EXACT, &ip->i_iopen_gh);
gfs2_glock_put(io_gl);
- io_gl = NULL;
+ if (unlikely(error))
+ goto fail;
/* Lowest possible timestamp; will be overwritten in gfs2_dinode_in. */
inode->i_atime.tv_sec = 1LL << (8 * sizeof(inode->i_atime.tv_sec) - 1);
inode->i_atime.tv_nsec = 0;
+ glock_set_object(ip->i_gl, ip);
+
if (type == DT_UNKNOWN) {
/* Inode glock must be locked already */
error = gfs2_instantiate(&i_gh);
- if (error)
+ if (error) {
+ glock_clear_object(ip->i_gl, ip);
goto fail;
+ }
} else {
ip->i_no_formal_ino = no_formal_ino;
inode->i_mode = DT2IF(type);
@@ -206,31 +191,23 @@ struct inode *gfs2_inode_lookup(struct super_block *sb, unsigned int type,
if (gfs2_holder_initialized(&i_gh))
gfs2_glock_dq_uninit(&i_gh);
+ glock_set_object(ip->i_iopen_gh.gh_gl, ip);
gfs2_set_iop(inode);
+ unlock_new_inode(inode);
}
if (no_formal_ino && ip->i_no_formal_ino &&
no_formal_ino != ip->i_no_formal_ino) {
- error = -ESTALE;
- if (inode->i_state & I_NEW)
- goto fail;
iput(inode);
- return ERR_PTR(error);
+ return ERR_PTR(-ESTALE);
}
- if (inode->i_state & I_NEW)
- unlock_new_inode(inode);
-
return inode;
fail:
- if (gfs2_holder_initialized(&ip->i_iopen_gh)) {
- glock_clear_object(ip->i_iopen_gh.gh_gl, ip);
+ if (gfs2_holder_initialized(&ip->i_iopen_gh))
gfs2_glock_dq_uninit(&ip->i_iopen_gh);
- }
- if (io_gl)
- gfs2_glock_put(io_gl);
if (gfs2_holder_initialized(&i_gh))
gfs2_glock_dq_uninit(&i_gh);
iget_failed(inode);
@@ -730,18 +707,19 @@ static int gfs2_create_inode(struct inode *dir, struct dentry *dentry,
error = gfs2_glock_get(sdp, ip->i_no_addr, &gfs2_inode_glops, CREATE, &ip->i_gl);
if (error)
goto fail_free_inode;
- flush_delayed_work(&ip->i_gl->gl_work);
error = gfs2_glock_get(sdp, ip->i_no_addr, &gfs2_iopen_glops, CREATE, &io_gl);
if (error)
goto fail_free_inode;
gfs2_cancel_delete_work(io_gl);
+ error = insert_inode_locked4(inode, ip->i_no_addr, iget_test, &ip->i_no_addr);
+ BUG_ON(error);
+
error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, GL_SKIP, ghs + 1);
if (error)
goto fail_gunlock2;
- glock_set_object(ip->i_gl, ip);
error = gfs2_trans_begin(sdp, blocks, 0);
if (error)
goto fail_gunlock2;
@@ -757,9 +735,9 @@ static int gfs2_create_inode(struct inode *dir, struct dentry *dentry,
if (error)
goto fail_gunlock2;
+ glock_set_object(ip->i_gl, ip);
glock_set_object(io_gl, ip);
gfs2_set_iop(inode);
- insert_inode_hash(inode);
free_vfs_inode = 0; /* After this point, the inode is no longer
considered free. Any failures need to undo
@@ -801,17 +779,17 @@ static int gfs2_create_inode(struct inode *dir, struct dentry *dentry,
gfs2_glock_dq_uninit(ghs + 1);
gfs2_glock_put(io_gl);
gfs2_qa_put(dip);
+ unlock_new_inode(inode);
return error;
fail_gunlock3:
+ glock_clear_object(ip->i_gl, ip);
glock_clear_object(io_gl, ip);
gfs2_glock_dq_uninit(&ip->i_iopen_gh);
fail_gunlock2:
- glock_clear_object(io_gl, ip);
gfs2_glock_put(io_gl);
fail_free_inode:
if (ip->i_gl) {
- glock_clear_object(ip->i_gl, ip);
if (free_vfs_inode) /* else evict will do the put for us */
gfs2_glock_put(ip->i_gl);
}
@@ -829,7 +807,10 @@ fail_gunlock:
mark_inode_dirty(inode);
set_bit(free_vfs_inode ? GIF_FREE_VFS_INODE : GIF_ALLOC_FAILED,
&GFS2_I(inode)->i_flags);
- iput(inode);
+ if (inode->i_state & I_NEW)
+ iget_failed(inode);
+ else
+ iput(inode);
}
if (gfs2_holder_initialized(ghs + 1))
gfs2_glock_dq_uninit(ghs + 1);
diff --git a/fs/inode.c b/fs/inode.c
index 3eba0940ffcf..6b80a51129d5 100644
--- a/fs/inode.c
+++ b/fs/inode.c
@@ -180,8 +180,6 @@ int inode_init_always(struct super_block *sb, struct inode *inode)
mapping->a_ops = &empty_aops;
mapping->host = inode;
mapping->flags = 0;
- if (sb->s_type->fs_flags & FS_THP_SUPPORT)
- __set_bit(AS_THP_SUPPORT, &mapping->flags);
mapping->wb_err = 0;
atomic_set(&mapping->i_mmap_writable, 0);
#ifdef CONFIG_READ_ONLY_THP_FOR_FS
diff --git a/fs/io-wq.c b/fs/io-wq.c
index 88202de519f6..5c4f582d6549 100644
--- a/fs/io-wq.c
+++ b/fs/io-wq.c
@@ -142,6 +142,7 @@ static bool io_acct_cancel_pending_work(struct io_wqe *wqe,
struct io_wqe_acct *acct,
struct io_cb_cancel_data *match);
static void create_worker_cb(struct callback_head *cb);
+static void io_wq_cancel_tw_create(struct io_wq *wq);
static bool io_worker_get(struct io_worker *worker)
{
@@ -357,12 +358,22 @@ static bool io_queue_worker_create(struct io_worker *worker,
test_and_set_bit_lock(0, &worker->create_state))
goto fail_release;
+ atomic_inc(&wq->worker_refs);
init_task_work(&worker->create_work, func);
worker->create_index = acct->index;
if (!task_work_add(wq->task, &worker->create_work, TWA_SIGNAL)) {
- clear_bit_unlock(0, &worker->create_state);
+ /*
+ * EXIT may have been set after checking it above, check after
+ * adding the task_work and remove any creation item if it is
+ * now set. wq exit does that too, but we can have added this
+ * work item after we canceled in io_wq_exit_workers().
+ */
+ if (test_bit(IO_WQ_BIT_EXIT, &wq->state))
+ io_wq_cancel_tw_create(wq);
+ io_worker_ref_put(wq);
return true;
}
+ io_worker_ref_put(wq);
clear_bit_unlock(0, &worker->create_state);
fail_release:
io_worker_release(worker);
@@ -384,7 +395,9 @@ static void io_wqe_dec_running(struct io_worker *worker)
if (atomic_dec_and_test(&acct->nr_running) && io_acct_run_queue(acct)) {
atomic_inc(&acct->nr_running);
atomic_inc(&wqe->wq->worker_refs);
+ raw_spin_unlock(&wqe->lock);
io_queue_worker_create(worker, acct, create_worker_cb);
+ raw_spin_lock(&wqe->lock);
}
}
@@ -714,6 +727,13 @@ static bool io_wq_work_match_all(struct io_wq_work *work, void *data)
static inline bool io_should_retry_thread(long err)
{
+ /*
+ * Prevent perpetual task_work retry, if the task (or its group) is
+ * exiting.
+ */
+ if (fatal_signal_pending(current))
+ return false;
+
switch (err) {
case -EAGAIN:
case -ERESTARTSYS:
@@ -1191,13 +1211,9 @@ void io_wq_exit_start(struct io_wq *wq)
set_bit(IO_WQ_BIT_EXIT, &wq->state);
}
-static void io_wq_exit_workers(struct io_wq *wq)
+static void io_wq_cancel_tw_create(struct io_wq *wq)
{
struct callback_head *cb;
- int node;
-
- if (!wq->task)
- return;
while ((cb = task_work_cancel_match(wq->task, io_task_work_match, wq)) != NULL) {
struct io_worker *worker;
@@ -1205,6 +1221,16 @@ static void io_wq_exit_workers(struct io_wq *wq)
worker = container_of(cb, struct io_worker, create_work);
io_worker_cancel_cb(worker);
}
+}
+
+static void io_wq_exit_workers(struct io_wq *wq)
+{
+ int node;
+
+ if (!wq->task)
+ return;
+
+ io_wq_cancel_tw_create(wq);
rcu_read_lock();
for_each_node(node) {
diff --git a/fs/io_uring.c b/fs/io_uring.c
index b07196b4511c..fb2a0cb4aaf8 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -1278,6 +1278,7 @@ static void io_refs_resurrect(struct percpu_ref *ref, struct completion *compl)
static bool io_match_task(struct io_kiocb *head, struct task_struct *task,
bool cancel_all)
+ __must_hold(&req->ctx->timeout_lock)
{
struct io_kiocb *req;
@@ -1293,6 +1294,44 @@ static bool io_match_task(struct io_kiocb *head, struct task_struct *task,
return false;
}
+static bool io_match_linked(struct io_kiocb *head)
+{
+ struct io_kiocb *req;
+
+ io_for_each_link(req, head) {
+ if (req->flags & REQ_F_INFLIGHT)
+ return true;
+ }
+ return false;
+}
+
+/*
+ * As io_match_task() but protected against racing with linked timeouts.
+ * User must not hold timeout_lock.
+ */
+static bool io_match_task_safe(struct io_kiocb *head, struct task_struct *task,
+ bool cancel_all)
+{
+ bool matched;
+
+ if (task && head->task != task)
+ return false;
+ if (cancel_all)
+ return true;
+
+ if (head->flags & REQ_F_LINK_TIMEOUT) {
+ struct io_ring_ctx *ctx = head->ctx;
+
+ /* protect against races with linked timeouts */
+ spin_lock_irq(&ctx->timeout_lock);
+ matched = io_match_linked(head);
+ spin_unlock_irq(&ctx->timeout_lock);
+ } else {
+ matched = io_match_linked(head);
+ }
+ return matched;
+}
+
static inline bool req_has_async_data(struct io_kiocb *req)
{
return req->flags & REQ_F_ASYNC_DATA;
@@ -1502,10 +1541,10 @@ static void io_prep_async_link(struct io_kiocb *req)
if (req->flags & REQ_F_LINK_TIMEOUT) {
struct io_ring_ctx *ctx = req->ctx;
- spin_lock(&ctx->completion_lock);
+ spin_lock_irq(&ctx->timeout_lock);
io_for_each_link(cur, req)
io_prep_async_work(cur);
- spin_unlock(&ctx->completion_lock);
+ spin_unlock_irq(&ctx->timeout_lock);
} else {
io_for_each_link(cur, req)
io_prep_async_work(cur);
@@ -2852,9 +2891,13 @@ static int io_prep_rw(struct io_kiocb *req, const struct io_uring_sqe *sqe)
req->flags |= io_file_get_flags(file) << REQ_F_SUPPORT_NOWAIT_BIT;
kiocb->ki_pos = READ_ONCE(sqe->off);
- if (kiocb->ki_pos == -1 && !(file->f_mode & FMODE_STREAM)) {
- req->flags |= REQ_F_CUR_POS;
- kiocb->ki_pos = file->f_pos;
+ if (kiocb->ki_pos == -1) {
+ if (!(file->f_mode & FMODE_STREAM)) {
+ req->flags |= REQ_F_CUR_POS;
+ kiocb->ki_pos = file->f_pos;
+ } else {
+ kiocb->ki_pos = 0;
+ }
}
kiocb->ki_flags = iocb_flags(file);
ret = kiocb_set_rw_flags(kiocb, READ_ONCE(sqe->rw_flags));
@@ -4327,6 +4370,7 @@ static int __io_remove_buffers(struct io_ring_ctx *ctx, struct io_buffer *buf,
kfree(nxt);
if (++i == nbufs)
return i;
+ cond_resched();
}
i++;
kfree(buf);
@@ -5704,7 +5748,7 @@ static __cold bool io_poll_remove_all(struct io_ring_ctx *ctx,
list = &ctx->cancel_hash[i];
hlist_for_each_entry_safe(req, tmp, list, hash_node) {
- if (io_match_task(req, tsk, cancel_all))
+ if (io_match_task_safe(req, tsk, cancel_all))
posted += io_poll_remove_one(req);
}
}
@@ -6156,6 +6200,9 @@ static int io_timeout_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe,
if (get_timespec64(&data->ts, u64_to_user_ptr(sqe->addr)))
return -EFAULT;
+ if (data->ts.tv_sec < 0 || data->ts.tv_nsec < 0)
+ return -EINVAL;
+
data->mode = io_translate_timeout_mode(flags);
hrtimer_init(&data->timer, io_timeout_get_clock(data), data->mode);
@@ -6880,10 +6927,11 @@ static inline struct file *io_file_get(struct io_ring_ctx *ctx,
static void io_req_task_link_timeout(struct io_kiocb *req, bool *locked)
{
struct io_kiocb *prev = req->timeout.prev;
- int ret;
+ int ret = -ENOENT;
if (prev) {
- ret = io_try_cancel_userdata(req, prev->user_data);
+ if (!(req->task->flags & PF_EXITING))
+ ret = io_try_cancel_userdata(req, prev->user_data);
io_req_complete_post(req, ret ?: -ETIME, 0);
io_put_req(prev);
} else {
@@ -9255,10 +9303,8 @@ static void io_destroy_buffers(struct io_ring_ctx *ctx)
struct io_buffer *buf;
unsigned long index;
- xa_for_each(&ctx->io_buffers, index, buf) {
+ xa_for_each(&ctx->io_buffers, index, buf)
__io_remove_buffers(ctx, buf, index, -1U);
- cond_resched();
- }
}
static void io_req_caches_free(struct io_ring_ctx *ctx)
@@ -9562,19 +9608,8 @@ static bool io_cancel_task_cb(struct io_wq_work *work, void *data)
{
struct io_kiocb *req = container_of(work, struct io_kiocb, work);
struct io_task_cancel *cancel = data;
- bool ret;
- if (!cancel->all && (req->flags & REQ_F_LINK_TIMEOUT)) {
- struct io_ring_ctx *ctx = req->ctx;
-
- /* protect against races with linked timeouts */
- spin_lock(&ctx->completion_lock);
- ret = io_match_task(req, cancel->task, cancel->all);
- spin_unlock(&ctx->completion_lock);
- } else {
- ret = io_match_task(req, cancel->task, cancel->all);
- }
- return ret;
+ return io_match_task_safe(req, cancel->task, cancel->all);
}
static __cold bool io_cancel_defer_files(struct io_ring_ctx *ctx,
@@ -9586,7 +9621,7 @@ static __cold bool io_cancel_defer_files(struct io_ring_ctx *ctx,
spin_lock(&ctx->completion_lock);
list_for_each_entry_reverse(de, &ctx->defer_list, list) {
- if (io_match_task(de->req, task, cancel_all)) {
+ if (io_match_task_safe(de->req, task, cancel_all)) {
list_cut_position(&list, &ctx->defer_list, &de->list);
break;
}
@@ -9764,7 +9799,7 @@ static __cold void io_uring_clean_tctx(struct io_uring_task *tctx)
}
if (wq) {
/*
- * Must be after io_uring_del_task_file() (removes nodes under
+ * Must be after io_uring_del_tctx_node() (removes nodes under
* uring_lock) to avoid race with io_uring_try_cancel_iowq().
*/
io_wq_put_and_exit(wq);
@@ -9793,7 +9828,7 @@ static __cold void io_uring_drop_tctx_refs(struct task_struct *task)
/*
* Find any io_uring ctx that this task has registered or done IO on, and cancel
- * requests. @sqd should be not-null IIF it's an SQPOLL thread cancellation.
+ * requests. @sqd should be not-null IFF it's an SQPOLL thread cancellation.
*/
static __cold void io_uring_cancel_generic(bool cancel_all,
struct io_sq_data *sqd)
@@ -9835,8 +9870,10 @@ static __cold void io_uring_cancel_generic(bool cancel_all,
cancel_all);
}
- prepare_to_wait(&tctx->wait, &wait, TASK_UNINTERRUPTIBLE);
+ prepare_to_wait(&tctx->wait, &wait, TASK_INTERRUPTIBLE);
+ io_run_task_work();
io_uring_drop_tctx_refs(current);
+
/*
* If we've seen completions, retry without waiting. This
* avoids a race where a completion comes in before we did
diff --git a/fs/iomap/buffered-io.c b/fs/iomap/buffered-io.c
index 1753c26c8e76..71a36ae120ee 100644
--- a/fs/iomap/buffered-io.c
+++ b/fs/iomap/buffered-io.c
@@ -205,7 +205,16 @@ struct iomap_readpage_ctx {
struct readahead_control *rac;
};
-static loff_t iomap_read_inline_data(const struct iomap_iter *iter,
+/**
+ * iomap_read_inline_data - copy inline data into the page cache
+ * @iter: iteration structure
+ * @page: page to copy to
+ *
+ * Copy the inline data in @iter into @page and zero out the rest of the page.
+ * Only a single IOMAP_INLINE extent is allowed at the end of each file.
+ * Returns zero for success to complete the read, or the usual negative errno.
+ */
+static int iomap_read_inline_data(const struct iomap_iter *iter,
struct page *page)
{
const struct iomap *iomap = iomap_iter_srcmap(iter);
@@ -214,7 +223,7 @@ static loff_t iomap_read_inline_data(const struct iomap_iter *iter,
void *addr;
if (PageUptodate(page))
- return PAGE_SIZE - poff;
+ return 0;
if (WARN_ON_ONCE(size > PAGE_SIZE - poff))
return -EIO;
@@ -231,7 +240,7 @@ static loff_t iomap_read_inline_data(const struct iomap_iter *iter,
memset(addr + size, 0, PAGE_SIZE - poff - size);
kunmap_local(addr);
iomap_set_range_uptodate(page, poff, PAGE_SIZE - poff);
- return PAGE_SIZE - poff;
+ return 0;
}
static inline bool iomap_block_needs_zeroing(const struct iomap_iter *iter,
@@ -257,7 +266,7 @@ static loff_t iomap_readpage_iter(const struct iomap_iter *iter,
sector_t sector;
if (iomap->type == IOMAP_INLINE)
- return min(iomap_read_inline_data(iter, page), length);
+ return iomap_read_inline_data(iter, page);
/* zero post-eof blocks as the page may be mapped */
iop = iomap_page_create(iter->inode, page);
@@ -370,6 +379,8 @@ static loff_t iomap_readahead_iter(const struct iomap_iter *iter,
ctx->cur_page_in_bio = false;
}
ret = iomap_readpage_iter(iter, ctx, done);
+ if (ret <= 0)
+ return ret;
}
return done;
@@ -580,15 +591,10 @@ static int __iomap_write_begin(const struct iomap_iter *iter, loff_t pos,
static int iomap_write_begin_inline(const struct iomap_iter *iter,
struct page *page)
{
- int ret;
-
/* needs more work for the tailpacking case; disable for now */
if (WARN_ON_ONCE(iomap_iter_srcmap(iter)->offset != 0))
return -EIO;
- ret = iomap_read_inline_data(iter, page);
- if (ret < 0)
- return ret;
- return 0;
+ return iomap_read_inline_data(iter, page);
}
static int iomap_write_begin(const struct iomap_iter *iter, loff_t pos,
diff --git a/fs/ksmbd/ndr.c b/fs/ksmbd/ndr.c
index 8317f7ca402b..5052be9261d9 100644
--- a/fs/ksmbd/ndr.c
+++ b/fs/ksmbd/ndr.c
@@ -148,7 +148,7 @@ static int ndr_read_int16(struct ndr *n, __u16 *value)
static int ndr_read_int32(struct ndr *n, __u32 *value)
{
if (n->offset + sizeof(__u32) > n->length)
- return 0;
+ return -EINVAL;
if (value)
*value = le32_to_cpu(*(__le32 *)ndr_get_field(n));
diff --git a/fs/ksmbd/smb2ops.c b/fs/ksmbd/smb2ops.c
index 0a5d8450e835..02a44d28bdaf 100644
--- a/fs/ksmbd/smb2ops.c
+++ b/fs/ksmbd/smb2ops.c
@@ -271,9 +271,6 @@ int init_smb3_11_server(struct ksmbd_conn *conn)
if (server_conf.flags & KSMBD_GLOBAL_FLAG_SMB2_LEASES)
conn->vals->capabilities |= SMB2_GLOBAL_CAP_LEASING;
- if (conn->cipher_type)
- conn->vals->capabilities |= SMB2_GLOBAL_CAP_ENCRYPTION;
-
if (server_conf.flags & KSMBD_GLOBAL_FLAG_SMB3_MULTICHANNEL)
conn->vals->capabilities |= SMB2_GLOBAL_CAP_MULTI_CHANNEL;
diff --git a/fs/ksmbd/smb2pdu.c b/fs/ksmbd/smb2pdu.c
index 121f8e8c70ac..b8b3a4c28b74 100644
--- a/fs/ksmbd/smb2pdu.c
+++ b/fs/ksmbd/smb2pdu.c
@@ -915,6 +915,25 @@ static void decode_encrypt_ctxt(struct ksmbd_conn *conn,
}
}
+/**
+ * smb3_encryption_negotiated() - checks if server and client agreed on enabling encryption
+ * @conn: smb connection
+ *
+ * Return: true if connection should be encrypted, else false
+ */
+static bool smb3_encryption_negotiated(struct ksmbd_conn *conn)
+{
+ if (!conn->ops->generate_encryptionkey)
+ return false;
+
+ /*
+ * SMB 3.0 and 3.0.2 dialects use the SMB2_GLOBAL_CAP_ENCRYPTION flag.
+ * SMB 3.1.1 uses the cipher_type field.
+ */
+ return (conn->vals->capabilities & SMB2_GLOBAL_CAP_ENCRYPTION) ||
+ conn->cipher_type;
+}
+
static void decode_compress_ctxt(struct ksmbd_conn *conn,
struct smb2_compression_capabilities_context *pneg_ctxt)
{
@@ -1469,8 +1488,7 @@ static int ntlm_authenticate(struct ksmbd_work *work)
(req->SecurityMode & SMB2_NEGOTIATE_SIGNING_REQUIRED))
sess->sign = true;
- if (conn->vals->capabilities & SMB2_GLOBAL_CAP_ENCRYPTION &&
- conn->ops->generate_encryptionkey &&
+ if (smb3_encryption_negotiated(conn) &&
!(req->Flags & SMB2_SESSION_REQ_FLAG_BINDING)) {
rc = conn->ops->generate_encryptionkey(sess);
if (rc) {
@@ -1559,8 +1577,7 @@ static int krb5_authenticate(struct ksmbd_work *work)
(req->SecurityMode & SMB2_NEGOTIATE_SIGNING_REQUIRED))
sess->sign = true;
- if ((conn->vals->capabilities & SMB2_GLOBAL_CAP_ENCRYPTION) &&
- conn->ops->generate_encryptionkey) {
+ if (smb3_encryption_negotiated(conn)) {
retval = conn->ops->generate_encryptionkey(sess);
if (retval) {
ksmbd_debug(SMB,
@@ -1697,8 +1714,10 @@ int smb2_sess_setup(struct ksmbd_work *work)
negblob_off = le16_to_cpu(req->SecurityBufferOffset);
negblob_len = le16_to_cpu(req->SecurityBufferLength);
if (negblob_off < offsetof(struct smb2_sess_setup_req, Buffer) ||
- negblob_len < offsetof(struct negotiate_message, NegotiateFlags))
- return -EINVAL;
+ negblob_len < offsetof(struct negotiate_message, NegotiateFlags)) {
+ rc = -EINVAL;
+ goto out_err;
+ }
negblob = (struct negotiate_message *)((char *)&req->hdr.ProtocolId +
negblob_off);
@@ -2960,6 +2979,10 @@ int smb2_open(struct ksmbd_work *work)
&pntsd_size, &fattr);
posix_acl_release(fattr.cf_acls);
posix_acl_release(fattr.cf_dacls);
+ if (rc) {
+ kfree(pntsd);
+ goto err_out;
+ }
rc = ksmbd_vfs_set_sd_xattr(conn,
user_ns,
@@ -4457,6 +4480,12 @@ static void get_file_stream_info(struct ksmbd_work *work,
&stat);
file_info = (struct smb2_file_stream_info *)rsp->Buffer;
+ buf_free_len =
+ smb2_calc_max_out_buf_len(work, 8,
+ le32_to_cpu(req->OutputBufferLength));
+ if (buf_free_len < 0)
+ goto out;
+
xattr_list_len = ksmbd_vfs_listxattr(path->dentry, &xattr_list);
if (xattr_list_len < 0) {
goto out;
@@ -4465,12 +4494,6 @@ static void get_file_stream_info(struct ksmbd_work *work,
goto out;
}
- buf_free_len =
- smb2_calc_max_out_buf_len(work, 8,
- le32_to_cpu(req->OutputBufferLength));
- if (buf_free_len < 0)
- goto out;
-
while (idx < xattr_list_len) {
stream_name = xattr_list + idx;
streamlen = strlen(stream_name);
@@ -4496,8 +4519,10 @@ static void get_file_stream_info(struct ksmbd_work *work,
":%s", &stream_name[XATTR_NAME_STREAM_LEN]);
next = sizeof(struct smb2_file_stream_info) + streamlen * 2;
- if (next > buf_free_len)
+ if (next > buf_free_len) {
+ kfree(stream_buf);
break;
+ }
file_info = (struct smb2_file_stream_info *)&rsp->Buffer[nbytes];
streamlen = smbConvertToUTF16((__le16 *)file_info->StreamName,
@@ -4514,6 +4539,7 @@ static void get_file_stream_info(struct ksmbd_work *work,
file_info->NextEntryOffset = cpu_to_le32(next);
}
+out:
if (!S_ISDIR(stat.mode) &&
buf_free_len >= sizeof(struct smb2_file_stream_info) + 7 * 2) {
file_info = (struct smb2_file_stream_info *)
@@ -4522,14 +4548,13 @@ static void get_file_stream_info(struct ksmbd_work *work,
"::$DATA", 7, conn->local_nls, 0);
streamlen *= 2;
file_info->StreamNameLength = cpu_to_le32(streamlen);
- file_info->StreamSize = 0;
- file_info->StreamAllocationSize = 0;
+ file_info->StreamSize = cpu_to_le64(stat.size);
+ file_info->StreamAllocationSize = cpu_to_le64(stat.blocks << 9);
nbytes += sizeof(struct smb2_file_stream_info) + streamlen;
}
/* last entry offset should be 0 */
file_info->NextEntryOffset = 0;
-out:
kvfree(xattr_list);
rsp->OutputBufferLength = cpu_to_le32(nbytes);
@@ -5068,7 +5093,7 @@ static int smb2_get_info_sec(struct ksmbd_work *work,
if (addition_info & ~(OWNER_SECINFO | GROUP_SECINFO | DACL_SECINFO |
PROTECTED_DACL_SECINFO |
UNPROTECTED_DACL_SECINFO)) {
- pr_err("Unsupported addition info: 0x%x)\n",
+ ksmbd_debug(SMB, "Unsupported addition info: 0x%x)\n",
addition_info);
pntsd->revision = cpu_to_le16(1);
diff --git a/fs/namespace.c b/fs/namespace.c
index 659a8f39c61a..b696543adab8 100644
--- a/fs/namespace.c
+++ b/fs/namespace.c
@@ -4263,12 +4263,11 @@ SYSCALL_DEFINE5(mount_setattr, int, dfd, const char __user *, path,
return err;
err = user_path_at(dfd, path, kattr.lookup_flags, &target);
- if (err)
- return err;
-
- err = do_mount_setattr(&target, &kattr);
+ if (!err) {
+ err = do_mount_setattr(&target, &kattr);
+ path_put(&target);
+ }
finish_mount_kattr(&kattr);
- path_put(&target);
return err;
}
diff --git a/fs/netfs/read_helper.c b/fs/netfs/read_helper.c
index 9320a42dfaf9..75c76cbb27cc 100644
--- a/fs/netfs/read_helper.c
+++ b/fs/netfs/read_helper.c
@@ -354,16 +354,11 @@ static void netfs_rreq_write_to_cache_work(struct work_struct *work)
netfs_rreq_do_write_to_cache(rreq);
}
-static void netfs_rreq_write_to_cache(struct netfs_read_request *rreq,
- bool was_async)
+static void netfs_rreq_write_to_cache(struct netfs_read_request *rreq)
{
- if (was_async) {
- rreq->work.func = netfs_rreq_write_to_cache_work;
- if (!queue_work(system_unbound_wq, &rreq->work))
- BUG();
- } else {
- netfs_rreq_do_write_to_cache(rreq);
- }
+ rreq->work.func = netfs_rreq_write_to_cache_work;
+ if (!queue_work(system_unbound_wq, &rreq->work))
+ BUG();
}
/*
@@ -558,7 +553,7 @@ again:
wake_up_bit(&rreq->flags, NETFS_RREQ_IN_PROGRESS);
if (test_bit(NETFS_RREQ_WRITE_TO_CACHE, &rreq->flags))
- return netfs_rreq_write_to_cache(rreq, was_async);
+ return netfs_rreq_write_to_cache(rreq);
netfs_rreq_completed(rreq, was_async);
}
@@ -960,7 +955,7 @@ int netfs_readpage(struct file *file,
rreq = netfs_alloc_read_request(ops, netfs_priv, file);
if (!rreq) {
if (netfs_priv)
- ops->cleanup(netfs_priv, folio_file_mapping(folio));
+ ops->cleanup(folio_file_mapping(folio), netfs_priv);
folio_unlock(folio);
return -ENOMEM;
}
@@ -1008,8 +1003,8 @@ out:
}
EXPORT_SYMBOL(netfs_readpage);
-/**
- * netfs_skip_folio_read - prep a folio for writing without reading first
+/*
+ * Prepare a folio for writing without reading first
* @folio: The folio being prepared
* @pos: starting position for the write
* @len: length of write
@@ -1191,7 +1186,7 @@ have_folio:
goto error;
have_folio_no_wait:
if (netfs_priv)
- ops->cleanup(netfs_priv, mapping);
+ ops->cleanup(mapping, netfs_priv);
*_folio = folio;
_leave(" = 0");
return 0;
@@ -1202,7 +1197,7 @@ error:
folio_unlock(folio);
folio_put(folio);
if (netfs_priv)
- ops->cleanup(netfs_priv, mapping);
+ ops->cleanup(mapping, netfs_priv);
_leave(" = %d", ret);
return ret;
}
diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
index dd53704c3f40..fda530d5e764 100644
--- a/fs/nfs/inode.c
+++ b/fs/nfs/inode.c
@@ -219,6 +219,7 @@ void nfs_set_cache_invalid(struct inode *inode, unsigned long flags)
NFS_INO_DATA_INVAL_DEFER);
else if (nfsi->cache_validity & NFS_INO_INVALID_DATA)
nfsi->cache_validity &= ~NFS_INO_DATA_INVAL_DEFER;
+ trace_nfs_set_cache_invalid(inode, 0);
}
EXPORT_SYMBOL_GPL(nfs_set_cache_invalid);
diff --git a/fs/nfs/nfs42proc.c b/fs/nfs/nfs42proc.c
index 08355b66e7cb..8b21ff1be717 100644
--- a/fs/nfs/nfs42proc.c
+++ b/fs/nfs/nfs42proc.c
@@ -289,7 +289,9 @@ static void nfs42_copy_dest_done(struct inode *inode, loff_t pos, loff_t len)
loff_t newsize = pos + len;
loff_t end = newsize - 1;
- truncate_pagecache_range(inode, pos, end);
+ WARN_ON_ONCE(invalidate_inode_pages2_range(inode->i_mapping,
+ pos >> PAGE_SHIFT, end >> PAGE_SHIFT));
+
spin_lock(&inode->i_lock);
if (newsize > i_size_read(inode))
i_size_write(inode, newsize);
diff --git a/fs/nfs/nfs42xdr.c b/fs/nfs/nfs42xdr.c
index c8bad735e4c1..271e5f92ed01 100644
--- a/fs/nfs/nfs42xdr.c
+++ b/fs/nfs/nfs42xdr.c
@@ -1434,8 +1434,7 @@ static int nfs4_xdr_dec_clone(struct rpc_rqst *rqstp,
status = decode_clone(xdr);
if (status)
goto out;
- status = decode_getfattr(xdr, res->dst_fattr, res->server);
-
+ decode_getfattr(xdr, res->dst_fattr, res->server);
out:
res->rpc_status = status;
return status;
diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c
index ecc4594299d6..f63dfa01001c 100644
--- a/fs/nfs/nfs4state.c
+++ b/fs/nfs/nfs4state.c
@@ -1998,6 +1998,10 @@ static int nfs4_handle_reclaim_lease_error(struct nfs_client *clp, int status)
dprintk("%s: exit with error %d for server %s\n",
__func__, -EPROTONOSUPPORT, clp->cl_hostname);
return -EPROTONOSUPPORT;
+ case -ENOSPC:
+ if (clp->cl_cons_state == NFS_CS_SESSION_INITING)
+ nfs_mark_client_ready(clp, -EIO);
+ return -EIO;
case -NFS4ERR_NOT_SAME: /* FixMe: implement recovery
* in nfs4_exchange_id */
default:
diff --git a/fs/nfs/nfstrace.h b/fs/nfs/nfstrace.h
index 21dac847f1e4..b3aee261801e 100644
--- a/fs/nfs/nfstrace.h
+++ b/fs/nfs/nfstrace.h
@@ -162,6 +162,7 @@ DEFINE_NFS_INODE_EVENT_DONE(nfs_writeback_inode_exit);
DEFINE_NFS_INODE_EVENT(nfs_fsync_enter);
DEFINE_NFS_INODE_EVENT_DONE(nfs_fsync_exit);
DEFINE_NFS_INODE_EVENT(nfs_access_enter);
+DEFINE_NFS_INODE_EVENT_DONE(nfs_set_cache_invalid);
TRACE_EVENT(nfs_access_exit,
TP_PROTO(
diff --git a/fs/nfsd/nfs3proc.c b/fs/nfsd/nfs3proc.c
index 4418517f6f12..15dac36ca852 100644
--- a/fs/nfsd/nfs3proc.c
+++ b/fs/nfsd/nfs3proc.c
@@ -438,22 +438,19 @@ nfsd3_proc_link(struct svc_rqst *rqstp)
static void nfsd3_init_dirlist_pages(struct svc_rqst *rqstp,
struct nfsd3_readdirres *resp,
- int count)
+ u32 count)
{
struct xdr_buf *buf = &resp->dirlist;
struct xdr_stream *xdr = &resp->xdr;
- count = min_t(u32, count, svc_max_payload(rqstp));
+ count = clamp(count, (u32)(XDR_UNIT * 2), svc_max_payload(rqstp));
memset(buf, 0, sizeof(*buf));
/* Reserve room for the NULL ptr & eof flag (-2 words) */
buf->buflen = count - XDR_UNIT * 2;
buf->pages = rqstp->rq_next_page;
- while (count > 0) {
- rqstp->rq_next_page++;
- count -= PAGE_SIZE;
- }
+ rqstp->rq_next_page += (buf->buflen + PAGE_SIZE - 1) >> PAGE_SHIFT;
/* This is xdr_init_encode(), but it assumes that
* the head kvec has already been consumed. */
@@ -462,7 +459,7 @@ static void nfsd3_init_dirlist_pages(struct svc_rqst *rqstp,
xdr->page_ptr = buf->pages;
xdr->iov = NULL;
xdr->p = page_address(*buf->pages);
- xdr->end = xdr->p + (PAGE_SIZE >> 2);
+ xdr->end = (void *)xdr->p + min_t(u32, buf->buflen, PAGE_SIZE);
xdr->rqst = NULL;
}
diff --git a/fs/nfsd/nfs4recover.c b/fs/nfsd/nfs4recover.c
index 6fedc49726bf..c634483d85d2 100644
--- a/fs/nfsd/nfs4recover.c
+++ b/fs/nfsd/nfs4recover.c
@@ -2156,6 +2156,7 @@ static struct notifier_block nfsd4_cld_block = {
int
register_cld_notifier(void)
{
+ WARN_ON(!nfsd_net_id);
return rpc_pipefs_notifier_register(&nfsd4_cld_block);
}
diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
index bfad94c70b84..1956d377d1a6 100644
--- a/fs/nfsd/nfs4state.c
+++ b/fs/nfsd/nfs4state.c
@@ -1207,6 +1207,11 @@ hash_delegation_locked(struct nfs4_delegation *dp, struct nfs4_file *fp)
return 0;
}
+static bool delegation_hashed(struct nfs4_delegation *dp)
+{
+ return !(list_empty(&dp->dl_perfile));
+}
+
static bool
unhash_delegation_locked(struct nfs4_delegation *dp)
{
@@ -1214,7 +1219,7 @@ unhash_delegation_locked(struct nfs4_delegation *dp)
lockdep_assert_held(&state_lock);
- if (list_empty(&dp->dl_perfile))
+ if (!delegation_hashed(dp))
return false;
dp->dl_stid.sc_type = NFS4_CLOSED_DELEG_STID;
@@ -4598,7 +4603,7 @@ static void nfsd4_cb_recall_prepare(struct nfsd4_callback *cb)
* queued for a lease break. Don't queue it again.
*/
spin_lock(&state_lock);
- if (dp->dl_time == 0) {
+ if (delegation_hashed(dp) && dp->dl_time == 0) {
dp->dl_time = ktime_get_boottime_seconds();
list_add_tail(&dp->dl_recall_lru, &nn->del_recall_lru);
}
diff --git a/fs/nfsd/nfsctl.c b/fs/nfsd/nfsctl.c
index af8531c3854a..51a49e0cfe37 100644
--- a/fs/nfsd/nfsctl.c
+++ b/fs/nfsd/nfsctl.c
@@ -1521,12 +1521,9 @@ static int __init init_nfsd(void)
int retval;
printk(KERN_INFO "Installing knfsd (copyright (C) 1996 okir@monad.swb.de).\n");
- retval = register_cld_notifier();
- if (retval)
- return retval;
retval = nfsd4_init_slabs();
if (retval)
- goto out_unregister_notifier;
+ return retval;
retval = nfsd4_init_pnfs();
if (retval)
goto out_free_slabs;
@@ -1545,9 +1542,14 @@ static int __init init_nfsd(void)
goto out_free_exports;
retval = register_pernet_subsys(&nfsd_net_ops);
if (retval < 0)
+ goto out_free_filesystem;
+ retval = register_cld_notifier();
+ if (retval)
goto out_free_all;
return 0;
out_free_all:
+ unregister_pernet_subsys(&nfsd_net_ops);
+out_free_filesystem:
unregister_filesystem(&nfsd_fs_type);
out_free_exports:
remove_proc_entry("fs/nfs/exports", NULL);
@@ -1561,13 +1563,12 @@ out_free_pnfs:
nfsd4_exit_pnfs();
out_free_slabs:
nfsd4_free_slabs();
-out_unregister_notifier:
- unregister_cld_notifier();
return retval;
}
static void __exit exit_nfsd(void)
{
+ unregister_cld_notifier();
unregister_pernet_subsys(&nfsd_net_ops);
nfsd_drc_slab_free();
remove_proc_entry("fs/nfs/exports", NULL);
@@ -1577,7 +1578,6 @@ static void __exit exit_nfsd(void)
nfsd4_free_slabs();
nfsd4_exit_pnfs();
unregister_filesystem(&nfsd_fs_type);
- unregister_cld_notifier();
}
MODULE_AUTHOR("Olaf Kirch <okir@monad.swb.de>");
diff --git a/fs/nfsd/nfsproc.c b/fs/nfsd/nfsproc.c
index eea5b59b6a6c..de282f3273c5 100644
--- a/fs/nfsd/nfsproc.c
+++ b/fs/nfsd/nfsproc.c
@@ -556,17 +556,17 @@ nfsd_proc_rmdir(struct svc_rqst *rqstp)
static void nfsd_init_dirlist_pages(struct svc_rqst *rqstp,
struct nfsd_readdirres *resp,
- int count)
+ u32 count)
{
struct xdr_buf *buf = &resp->dirlist;
struct xdr_stream *xdr = &resp->xdr;
- count = min_t(u32, count, PAGE_SIZE);
+ count = clamp(count, (u32)(XDR_UNIT * 2), svc_max_payload(rqstp));
memset(buf, 0, sizeof(*buf));
/* Reserve room for the NULL ptr & eof flag (-2 words) */
- buf->buflen = count - sizeof(__be32) * 2;
+ buf->buflen = count - XDR_UNIT * 2;
buf->pages = rqstp->rq_next_page;
rqstp->rq_next_page++;
@@ -577,7 +577,7 @@ static void nfsd_init_dirlist_pages(struct svc_rqst *rqstp,
xdr->page_ptr = buf->pages;
xdr->iov = NULL;
xdr->p = page_address(*buf->pages);
- xdr->end = xdr->p + (PAGE_SIZE >> 2);
+ xdr->end = (void *)xdr->p + min_t(u32, buf->buflen, PAGE_SIZE);
xdr->rqst = NULL;
}
diff --git a/fs/ntfs/Kconfig b/fs/ntfs/Kconfig
index 1667a7e590d8..f93e69a61283 100644
--- a/fs/ntfs/Kconfig
+++ b/fs/ntfs/Kconfig
@@ -52,6 +52,7 @@ config NTFS_DEBUG
config NTFS_RW
bool "NTFS write support"
depends on NTFS_FS
+ depends on PAGE_SIZE_LESS_THAN_64KB
help
This enables the partial, but safe, write support in the NTFS driver.
diff --git a/fs/signalfd.c b/fs/signalfd.c
index 040e1cf90528..65ce0e72e7b9 100644
--- a/fs/signalfd.c
+++ b/fs/signalfd.c
@@ -35,17 +35,7 @@
void signalfd_cleanup(struct sighand_struct *sighand)
{
- wait_queue_head_t *wqh = &sighand->signalfd_wqh;
- /*
- * The lockless check can race with remove_wait_queue() in progress,
- * but in this case its caller should run under rcu_read_lock() and
- * sighand_cachep is SLAB_TYPESAFE_BY_RCU, we can safely return.
- */
- if (likely(!waitqueue_active(wqh)))
- return;
-
- /* wait_queue_entry_t->func(POLLFREE) should do remove_wait_queue() */
- wake_up_poll(wqh, EPOLLHUP | POLLFREE);
+ wake_up_pollfree(&sighand->signalfd_wqh);
}
struct signalfd_ctx {
diff --git a/fs/smbfs_common/cifs_arc4.c b/fs/smbfs_common/cifs_arc4.c
index 85ba15a60b13..043e4cb839fa 100644
--- a/fs/smbfs_common/cifs_arc4.c
+++ b/fs/smbfs_common/cifs_arc4.c
@@ -72,16 +72,3 @@ void cifs_arc4_crypt(struct arc4_ctx *ctx, u8 *out, const u8 *in, unsigned int l
ctx->y = y;
}
EXPORT_SYMBOL_GPL(cifs_arc4_crypt);
-
-static int __init
-init_smbfs_common(void)
-{
- return 0;
-}
-static void __init
-exit_smbfs_common(void)
-{
-}
-
-module_init(init_smbfs_common)
-module_exit(exit_smbfs_common)
diff --git a/fs/tracefs/inode.c b/fs/tracefs/inode.c
index 925a621b432e..3616839c5c4b 100644
--- a/fs/tracefs/inode.c
+++ b/fs/tracefs/inode.c
@@ -161,6 +161,77 @@ struct tracefs_fs_info {
struct tracefs_mount_opts mount_opts;
};
+static void change_gid(struct dentry *dentry, kgid_t gid)
+{
+ if (!dentry->d_inode)
+ return;
+ dentry->d_inode->i_gid = gid;
+}
+
+/*
+ * Taken from d_walk, but without he need for handling renames.
+ * Nothing can be renamed while walking the list, as tracefs
+ * does not support renames. This is only called when mounting
+ * or remounting the file system, to set all the files to
+ * the given gid.
+ */
+static void set_gid(struct dentry *parent, kgid_t gid)
+{
+ struct dentry *this_parent;
+ struct list_head *next;
+
+ this_parent = parent;
+ spin_lock(&this_parent->d_lock);
+
+ change_gid(this_parent, gid);
+repeat:
+ next = this_parent->d_subdirs.next;
+resume:
+ while (next != &this_parent->d_subdirs) {
+ struct list_head *tmp = next;
+ struct dentry *dentry = list_entry(tmp, struct dentry, d_child);
+ next = tmp->next;
+
+ spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
+
+ change_gid(dentry, gid);
+
+ if (!list_empty(&dentry->d_subdirs)) {
+ spin_unlock(&this_parent->d_lock);
+ spin_release(&dentry->d_lock.dep_map, _RET_IP_);
+ this_parent = dentry;
+ spin_acquire(&this_parent->d_lock.dep_map, 0, 1, _RET_IP_);
+ goto repeat;
+ }
+ spin_unlock(&dentry->d_lock);
+ }
+ /*
+ * All done at this level ... ascend and resume the search.
+ */
+ rcu_read_lock();
+ascend:
+ if (this_parent != parent) {
+ struct dentry *child = this_parent;
+ this_parent = child->d_parent;
+
+ spin_unlock(&child->d_lock);
+ spin_lock(&this_parent->d_lock);
+
+ /* go into the first sibling still alive */
+ do {
+ next = child->d_child.next;
+ if (next == &this_parent->d_subdirs)
+ goto ascend;
+ child = list_entry(next, struct dentry, d_child);
+ } while (unlikely(child->d_flags & DCACHE_DENTRY_KILLED));
+ rcu_read_unlock();
+ goto resume;
+ }
+ rcu_read_unlock();
+ spin_unlock(&this_parent->d_lock);
+ return;
+}
+
static int tracefs_parse_options(char *data, struct tracefs_mount_opts *opts)
{
substring_t args[MAX_OPT_ARGS];
@@ -193,6 +264,7 @@ static int tracefs_parse_options(char *data, struct tracefs_mount_opts *opts)
if (!gid_valid(gid))
return -EINVAL;
opts->gid = gid;
+ set_gid(tracefs_mount->mnt_root, gid);
break;
case Opt_mode:
if (match_octal(&args[0], &option))
@@ -414,6 +486,8 @@ struct dentry *tracefs_create_file(const char *name, umode_t mode,
inode->i_mode = mode;
inode->i_fop = fops ? fops : &tracefs_file_operations;
inode->i_private = data;
+ inode->i_uid = d_inode(dentry->d_parent)->i_uid;
+ inode->i_gid = d_inode(dentry->d_parent)->i_gid;
d_instantiate(dentry, inode);
fsnotify_create(dentry->d_parent->d_inode, dentry);
return end_creating(dentry);
@@ -436,6 +510,8 @@ static struct dentry *__create_dir(const char *name, struct dentry *parent,
inode->i_mode = S_IFDIR | S_IRWXU | S_IRUSR| S_IRGRP | S_IXUSR | S_IXGRP;
inode->i_op = ops;
inode->i_fop = &simple_dir_operations;
+ inode->i_uid = d_inode(dentry->d_parent)->i_uid;
+ inode->i_gid = d_inode(dentry->d_parent)->i_gid;
/* directory inodes start off with i_nlink == 2 (for "." entry) */
inc_nlink(inode);
diff --git a/fs/xfs/libxfs/xfs_attr.c b/fs/xfs/libxfs/xfs_attr.c
index fbc9d816882c..23523b802539 100644
--- a/fs/xfs/libxfs/xfs_attr.c
+++ b/fs/xfs/libxfs/xfs_attr.c
@@ -1077,21 +1077,18 @@ xfs_attr_node_hasname(
state = xfs_da_state_alloc(args);
if (statep != NULL)
- *statep = NULL;
+ *statep = state;
/*
* Search to see if name exists, and get back a pointer to it.
*/
error = xfs_da3_node_lookup_int(state, &retval);
- if (error) {
- xfs_da_state_free(state);
- return error;
- }
+ if (error)
+ retval = error;
- if (statep != NULL)
- *statep = state;
- else
+ if (!statep)
xfs_da_state_free(state);
+
return retval;
}
@@ -1112,7 +1109,7 @@ xfs_attr_node_addname_find_attr(
*/
retval = xfs_attr_node_hasname(args, &dac->da_state);
if (retval != -ENOATTR && retval != -EEXIST)
- return retval;
+ goto error;
if (retval == -ENOATTR && (args->attr_flags & XATTR_REPLACE))
goto error;
@@ -1337,7 +1334,7 @@ int xfs_attr_node_removename_setup(
error = xfs_attr_node_hasname(args, state);
if (error != -EEXIST)
- return error;
+ goto out;
error = 0;
ASSERT((*state)->path.blk[(*state)->path.active - 1].bp != NULL);
diff --git a/fs/xfs/xfs_icache.c b/fs/xfs/xfs_icache.c
index e1472004170e..da4af2142a2b 100644
--- a/fs/xfs/xfs_icache.c
+++ b/fs/xfs/xfs_icache.c
@@ -289,22 +289,6 @@ xfs_perag_clear_inode_tag(
trace_xfs_perag_clear_inode_tag(mp, pag->pag_agno, tag, _RET_IP_);
}
-static inline void
-xfs_inew_wait(
- struct xfs_inode *ip)
-{
- wait_queue_head_t *wq = bit_waitqueue(&ip->i_flags, __XFS_INEW_BIT);
- DEFINE_WAIT_BIT(wait, &ip->i_flags, __XFS_INEW_BIT);
-
- do {
- prepare_to_wait(wq, &wait.wq_entry, TASK_UNINTERRUPTIBLE);
- if (!xfs_iflags_test(ip, XFS_INEW))
- break;
- schedule();
- } while (true);
- finish_wait(wq, &wait.wq_entry);
-}
-
/*
* When we recycle a reclaimable inode, we need to re-initialise the VFS inode
* part of the structure. This is made more complex by the fact we store
@@ -368,18 +352,13 @@ xfs_iget_recycle(
ASSERT(!rwsem_is_locked(&inode->i_rwsem));
error = xfs_reinit_inode(mp, inode);
if (error) {
- bool wake;
-
/*
* Re-initializing the inode failed, and we are in deep
* trouble. Try to re-add it to the reclaim list.
*/
rcu_read_lock();
spin_lock(&ip->i_flags_lock);
- wake = !!__xfs_iflags_test(ip, XFS_INEW);
ip->i_flags &= ~(XFS_INEW | XFS_IRECLAIM);
- if (wake)
- wake_up_bit(&ip->i_flags, __XFS_INEW_BIT);
ASSERT(ip->i_flags & XFS_IRECLAIMABLE);
spin_unlock(&ip->i_flags_lock);
rcu_read_unlock();
diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c
index 64b9bf334806..6771f357ad2c 100644
--- a/fs/xfs/xfs_inode.c
+++ b/fs/xfs/xfs_inode.c
@@ -3122,7 +3122,6 @@ xfs_rename(
* appropriately.
*/
if (flags & RENAME_WHITEOUT) {
- ASSERT(!(flags & (RENAME_NOREPLACE | RENAME_EXCHANGE)));
error = xfs_rename_alloc_whiteout(mnt_userns, target_dp, &wip);
if (error)
return error;
diff --git a/fs/xfs/xfs_inode.h b/fs/xfs/xfs_inode.h
index e635a3d64cba..c447bf04205a 100644
--- a/fs/xfs/xfs_inode.h
+++ b/fs/xfs/xfs_inode.h
@@ -231,8 +231,7 @@ static inline bool xfs_inode_has_bigtime(struct xfs_inode *ip)
#define XFS_IRECLAIM (1 << 0) /* started reclaiming this inode */
#define XFS_ISTALE (1 << 1) /* inode has been staled */
#define XFS_IRECLAIMABLE (1 << 2) /* inode can be reclaimed */
-#define __XFS_INEW_BIT 3 /* inode has just been allocated */
-#define XFS_INEW (1 << __XFS_INEW_BIT)
+#define XFS_INEW (1 << 3) /* inode has just been allocated */
#define XFS_IPRESERVE_DM_FIELDS (1 << 4) /* has legacy DMAPI fields set */
#define XFS_ITRUNCATED (1 << 5) /* truncated down so flush-on-close */
#define XFS_IDIRTY_RELEASE (1 << 6) /* dirty release already seen */
@@ -492,7 +491,6 @@ static inline void xfs_finish_inode_setup(struct xfs_inode *ip)
xfs_iflags_clear(ip, XFS_INEW);
barrier();
unlock_new_inode(VFS_I(ip));
- wake_up_bit(&ip->i_flags, __XFS_INEW_BIT);
}
static inline void xfs_setup_existing_inode(struct xfs_inode *ip)
diff --git a/fs/xfs/xfs_super.c b/fs/xfs/xfs_super.c
index e21459f9923a..778b57b1f020 100644
--- a/fs/xfs/xfs_super.c
+++ b/fs/xfs/xfs_super.c
@@ -1765,7 +1765,10 @@ static int
xfs_remount_ro(
struct xfs_mount *mp)
{
- int error;
+ struct xfs_icwalk icw = {
+ .icw_flags = XFS_ICWALK_FLAG_SYNC,
+ };
+ int error;
/*
* Cancel background eofb scanning so it cannot race with the final
@@ -1773,8 +1776,13 @@ xfs_remount_ro(
*/
xfs_blockgc_stop(mp);
- /* Get rid of any leftover CoW reservations... */
- error = xfs_blockgc_free_space(mp, NULL);
+ /*
+ * Clear out all remaining COW staging extents and speculative post-EOF
+ * preallocations so that we don't leave inodes requiring inactivation
+ * cleanups during reclaim on a read-only mount. We must process every
+ * cached inode, so this requires a synchronous cache scan.
+ */
+ error = xfs_blockgc_free_space(mp, &icw);
if (error) {
xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
return error;
diff --git a/fs/zonefs/super.c b/fs/zonefs/super.c
index 259ee2bda492..b76dfb310ab6 100644
--- a/fs/zonefs/super.c
+++ b/fs/zonefs/super.c
@@ -1787,5 +1787,6 @@ static void __exit zonefs_exit(void)
MODULE_AUTHOR("Damien Le Moal");
MODULE_DESCRIPTION("Zone file system for zoned block devices");
MODULE_LICENSE("GPL");
+MODULE_ALIAS_FS("zonefs");
module_init(zonefs_init);
module_exit(zonefs_exit);
diff --git a/include/asm-generic/cacheflush.h b/include/asm-generic/cacheflush.h
index fedc0dfa4877..4f07afacbc23 100644
--- a/include/asm-generic/cacheflush.h
+++ b/include/asm-generic/cacheflush.h
@@ -50,13 +50,7 @@ static inline void flush_dcache_page(struct page *page)
{
}
-static inline void flush_dcache_folio(struct folio *folio) { }
#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 0
-#define ARCH_IMPLEMENTS_FLUSH_DCACHE_FOLIO
-#endif
-
-#ifndef ARCH_IMPLEMENTS_FLUSH_DCACHE_FOLIO
-void flush_dcache_folio(struct folio *folio);
#endif
#ifndef flush_dcache_mmap_lock
diff --git a/include/drm/bridge/dw_mipi_dsi.h b/include/drm/bridge/dw_mipi_dsi.h
index bda8aa7c2280..5286a53a1875 100644
--- a/include/drm/bridge/dw_mipi_dsi.h
+++ b/include/drm/bridge/dw_mipi_dsi.h
@@ -51,7 +51,9 @@ struct dw_mipi_dsi_plat_data {
unsigned int max_data_lanes;
enum drm_mode_status (*mode_valid)(void *priv_data,
- const struct drm_display_mode *mode);
+ const struct drm_display_mode *mode,
+ unsigned long mode_flags,
+ u32 lanes, u32 format);
const struct dw_mipi_dsi_phy_ops *phy_ops;
const struct dw_mipi_dsi_host_ops *host_ops;
diff --git a/include/drm/drm_device.h b/include/drm/drm_device.h
index 604b1d1b2d72..9923c7a6885e 100644
--- a/include/drm/drm_device.h
+++ b/include/drm/drm_device.h
@@ -6,16 +6,13 @@
#include <linux/mutex.h>
#include <linux/idr.h>
-#include <drm/drm_hashtab.h>
+#include <drm/drm_legacy.h>
#include <drm/drm_mode_config.h>
struct drm_driver;
struct drm_minor;
struct drm_master;
-struct drm_device_dma;
struct drm_vblank_crtc;
-struct drm_sg_mem;
-struct drm_local_map;
struct drm_vma_offset_manager;
struct drm_vram_mm;
struct drm_fb_helper;
diff --git a/include/drm/drm_dp_helper.h b/include/drm/drm_dp_helper.h
index 8b2ed4199284..30359e434c3f 100644
--- a/include/drm/drm_dp_helper.h
+++ b/include/drm/drm_dp_helper.h
@@ -1784,6 +1784,13 @@ drm_dp_tps3_supported(const u8 dpcd[DP_RECEIVER_CAP_SIZE])
}
static inline bool
+drm_dp_max_downspread(const u8 dpcd[DP_RECEIVER_CAP_SIZE])
+{
+ return dpcd[DP_DPCD_REV] >= 0x11 ||
+ dpcd[DP_MAX_DOWNSPREAD] & DP_MAX_DOWNSPREAD_0_5;
+}
+
+static inline bool
drm_dp_tps4_supported(const u8 dpcd[DP_RECEIVER_CAP_SIZE])
{
return dpcd[DP_DPCD_REV] >= 0x14 &&
diff --git a/include/drm/drm_drv.h b/include/drm/drm_drv.h
index da0c836fe8e1..f6159acb8856 100644
--- a/include/drm/drm_drv.h
+++ b/include/drm/drm_drv.h
@@ -291,8 +291,9 @@ struct drm_driver {
/**
* @gem_create_object: constructor for gem objects
*
- * Hook for allocating the GEM object struct, for use by the CMA and
- * SHMEM GEM helpers.
+ * Hook for allocating the GEM object struct, for use by the CMA
+ * and SHMEM GEM helpers. Returns a GEM object on success, or an
+ * ERR_PTR()-encoded error code otherwise.
*/
struct drm_gem_object *(*gem_create_object)(struct drm_device *dev,
size_t size);
diff --git a/include/drm/drm_format_helper.h b/include/drm/drm_format_helper.h
index 97e4c3223af3..b30ed5de0a33 100644
--- a/include/drm/drm_format_helper.h
+++ b/include/drm/drm_format_helper.h
@@ -33,6 +33,9 @@ void drm_fb_xrgb8888_to_rgb888(void *dst, unsigned int dst_pitch, const void *sr
void drm_fb_xrgb8888_to_rgb888_toio(void __iomem *dst, unsigned int dst_pitch,
const void *vaddr, const struct drm_framebuffer *fb,
const struct drm_rect *clip);
+void drm_fb_xrgb8888_to_xrgb2101010_toio(void __iomem *dst, unsigned int dst_pitch,
+ const void *vaddr, const struct drm_framebuffer *fb,
+ const struct drm_rect *clip);
void drm_fb_xrgb8888_to_gray8(void *dst, unsigned int dst_pitch, const void *vaddr,
const struct drm_framebuffer *fb, const struct drm_rect *clip);
diff --git a/include/drm/drm_gem_cma_helper.h b/include/drm/drm_gem_cma_helper.h
index cd13508acbc1..adb507a9dbf0 100644
--- a/include/drm/drm_gem_cma_helper.h
+++ b/include/drm/drm_gem_cma_helper.h
@@ -32,42 +32,108 @@ struct drm_gem_cma_object {
#define to_drm_gem_cma_obj(gem_obj) \
container_of(gem_obj, struct drm_gem_cma_object, base)
-#ifndef CONFIG_MMU
-#define DRM_GEM_CMA_UNMAPPED_AREA_FOPS \
- .get_unmapped_area = drm_gem_cma_get_unmapped_area,
-#else
-#define DRM_GEM_CMA_UNMAPPED_AREA_FOPS
-#endif
+struct drm_gem_cma_object *drm_gem_cma_create(struct drm_device *drm,
+ size_t size);
+void drm_gem_cma_free(struct drm_gem_cma_object *cma_obj);
+void drm_gem_cma_print_info(const struct drm_gem_cma_object *cma_obj,
+ struct drm_printer *p, unsigned int indent);
+struct sg_table *drm_gem_cma_get_sg_table(struct drm_gem_cma_object *cma_obj);
+int drm_gem_cma_vmap(struct drm_gem_cma_object *cma_obj, struct dma_buf_map *map);
+int drm_gem_cma_mmap(struct drm_gem_cma_object *cma_obj, struct vm_area_struct *vma);
+
+extern const struct vm_operations_struct drm_gem_cma_vm_ops;
+
+/*
+ * GEM object functions
+ */
/**
- * DEFINE_DRM_GEM_CMA_FOPS() - macro to generate file operations for CMA drivers
- * @name: name for the generated structure
+ * drm_gem_cma_object_free - GEM object function for drm_gem_cma_free()
+ * @obj: GEM object to free
*
- * This macro autogenerates a suitable &struct file_operations for CMA based
- * drivers, which can be assigned to &drm_driver.fops. Note that this structure
- * cannot be shared between drivers, because it contains a reference to the
- * current module using THIS_MODULE.
+ * This function wraps drm_gem_cma_free_object(). Drivers that employ the CMA helpers
+ * should use it as their &drm_gem_object_funcs.free handler.
+ */
+static inline void drm_gem_cma_object_free(struct drm_gem_object *obj)
+{
+ struct drm_gem_cma_object *cma_obj = to_drm_gem_cma_obj(obj);
+
+ drm_gem_cma_free(cma_obj);
+}
+
+/**
+ * drm_gem_cma_object_print_info() - Print &drm_gem_cma_object info for debugfs
+ * @p: DRM printer
+ * @indent: Tab indentation level
+ * @obj: GEM object
*
- * Note that the declaration is already marked as static - if you need a
- * non-static version of this you're probably doing it wrong and will break the
- * THIS_MODULE reference by accident.
+ * This function wraps drm_gem_cma_print_info(). Drivers that employ the CMA helpers
+ * should use this function as their &drm_gem_object_funcs.print_info handler.
*/
-#define DEFINE_DRM_GEM_CMA_FOPS(name) \
- static const struct file_operations name = {\
- .owner = THIS_MODULE,\
- .open = drm_open,\
- .release = drm_release,\
- .unlocked_ioctl = drm_ioctl,\
- .compat_ioctl = drm_compat_ioctl,\
- .poll = drm_poll,\
- .read = drm_read,\
- .llseek = noop_llseek,\
- .mmap = drm_gem_mmap,\
- DRM_GEM_CMA_UNMAPPED_AREA_FOPS \
- }
+static inline void drm_gem_cma_object_print_info(struct drm_printer *p, unsigned int indent,
+ const struct drm_gem_object *obj)
+{
+ const struct drm_gem_cma_object *cma_obj = to_drm_gem_cma_obj(obj);
+
+ drm_gem_cma_print_info(cma_obj, p, indent);
+}
+
+/**
+ * drm_gem_cma_object_get_sg_table - GEM object function for drm_gem_cma_get_sg_table()
+ * @obj: GEM object
+ *
+ * This function wraps drm_gem_cma_get_sg_table(). Drivers that employ the CMA helpers should
+ * use it as their &drm_gem_object_funcs.get_sg_table handler.
+ *
+ * Returns:
+ * A pointer to the scatter/gather table of pinned pages or NULL on failure.
+ */
+static inline struct sg_table *drm_gem_cma_object_get_sg_table(struct drm_gem_object *obj)
+{
+ struct drm_gem_cma_object *cma_obj = to_drm_gem_cma_obj(obj);
+
+ return drm_gem_cma_get_sg_table(cma_obj);
+}
+
+/*
+ * drm_gem_cma_object_vmap - GEM object function for drm_gem_cma_vmap()
+ * @obj: GEM object
+ * @map: Returns the kernel virtual address of the CMA GEM object's backing store.
+ *
+ * This function wraps drm_gem_cma_vmap(). Drivers that employ the CMA helpers should
+ * use it as their &drm_gem_object_funcs.vmap handler.
+ *
+ * Returns:
+ * 0 on success or a negative error code on failure.
+ */
+static inline int drm_gem_cma_object_vmap(struct drm_gem_object *obj, struct dma_buf_map *map)
+{
+ struct drm_gem_cma_object *cma_obj = to_drm_gem_cma_obj(obj);
+
+ return drm_gem_cma_vmap(cma_obj, map);
+}
+
+/**
+ * drm_gem_cma_object_mmap - GEM object function for drm_gem_cma_mmap()
+ * @obj: GEM object
+ * @vma: VMA for the area to be mapped
+ *
+ * This function wraps drm_gem_cma_mmap(). Drivers that employ the cma helpers should
+ * use it as their &drm_gem_object_funcs.mmap handler.
+ *
+ * Returns:
+ * 0 on success or a negative error code on failure.
+ */
+static inline int drm_gem_cma_object_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
+{
+ struct drm_gem_cma_object *cma_obj = to_drm_gem_cma_obj(obj);
+
+ return drm_gem_cma_mmap(cma_obj, vma);
+}
-/* free GEM object */
-void drm_gem_cma_free_object(struct drm_gem_object *gem_obj);
+/*
+ * Driver ops
+ */
/* create memory region for DRM framebuffer */
int drm_gem_cma_dumb_create_internal(struct drm_file *file_priv,
@@ -79,30 +145,10 @@ int drm_gem_cma_dumb_create(struct drm_file *file_priv,
struct drm_device *drm,
struct drm_mode_create_dumb *args);
-/* allocate physical memory */
-struct drm_gem_cma_object *drm_gem_cma_create(struct drm_device *drm,
- size_t size);
-
-extern const struct vm_operations_struct drm_gem_cma_vm_ops;
-
-#ifndef CONFIG_MMU
-unsigned long drm_gem_cma_get_unmapped_area(struct file *filp,
- unsigned long addr,
- unsigned long len,
- unsigned long pgoff,
- unsigned long flags);
-#endif
-
-void drm_gem_cma_print_info(struct drm_printer *p, unsigned int indent,
- const struct drm_gem_object *obj);
-
-struct sg_table *drm_gem_cma_get_sg_table(struct drm_gem_object *obj);
struct drm_gem_object *
drm_gem_cma_prime_import_sg_table(struct drm_device *dev,
struct dma_buf_attachment *attach,
struct sg_table *sgt);
-int drm_gem_cma_vmap(struct drm_gem_object *obj, struct dma_buf_map *map);
-int drm_gem_cma_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma);
/**
* DRM_GEM_CMA_DRIVER_OPS_WITH_DUMB_CREATE - CMA GEM driver operations
@@ -185,4 +231,47 @@ drm_gem_cma_prime_import_sg_table_vmap(struct drm_device *drm,
struct dma_buf_attachment *attach,
struct sg_table *sgt);
+/*
+ * File ops
+ */
+
+#ifndef CONFIG_MMU
+unsigned long drm_gem_cma_get_unmapped_area(struct file *filp,
+ unsigned long addr,
+ unsigned long len,
+ unsigned long pgoff,
+ unsigned long flags);
+#define DRM_GEM_CMA_UNMAPPED_AREA_FOPS \
+ .get_unmapped_area = drm_gem_cma_get_unmapped_area,
+#else
+#define DRM_GEM_CMA_UNMAPPED_AREA_FOPS
+#endif
+
+/**
+ * DEFINE_DRM_GEM_CMA_FOPS() - macro to generate file operations for CMA drivers
+ * @name: name for the generated structure
+ *
+ * This macro autogenerates a suitable &struct file_operations for CMA based
+ * drivers, which can be assigned to &drm_driver.fops. Note that this structure
+ * cannot be shared between drivers, because it contains a reference to the
+ * current module using THIS_MODULE.
+ *
+ * Note that the declaration is already marked as static - if you need a
+ * non-static version of this you're probably doing it wrong and will break the
+ * THIS_MODULE reference by accident.
+ */
+#define DEFINE_DRM_GEM_CMA_FOPS(name) \
+ static const struct file_operations name = {\
+ .owner = THIS_MODULE,\
+ .open = drm_open,\
+ .release = drm_release,\
+ .unlocked_ioctl = drm_ioctl,\
+ .compat_ioctl = drm_compat_ioctl,\
+ .poll = drm_poll,\
+ .read = drm_read,\
+ .llseek = noop_llseek,\
+ .mmap = drm_gem_mmap,\
+ DRM_GEM_CMA_UNMAPPED_AREA_FOPS \
+ }
+
#endif /* __DRM_GEM_CMA_HELPER_H__ */
diff --git a/include/drm/drm_gem_ttm_helper.h b/include/drm/drm_gem_ttm_helper.h
index c1aa02bd4c89..78040f6cc6f3 100644
--- a/include/drm/drm_gem_ttm_helper.h
+++ b/include/drm/drm_gem_ttm_helper.h
@@ -3,7 +3,7 @@
#ifndef DRM_GEM_TTM_HELPER_H
#define DRM_GEM_TTM_HELPER_H
-#include <linux/kernel.h>
+#include <linux/container_of.h>
#include <drm/drm_device.h>
#include <drm/drm_gem.h>
diff --git a/include/drm/drm_gem_vram_helper.h b/include/drm/drm_gem_vram_helper.h
index d3cf06c9af65..b4ce27a72773 100644
--- a/include/drm/drm_gem_vram_helper.h
+++ b/include/drm/drm_gem_vram_helper.h
@@ -11,8 +11,8 @@
#include <drm/ttm/ttm_bo_api.h>
#include <drm/ttm/ttm_bo_driver.h>
+#include <linux/container_of.h>
#include <linux/dma-buf-map.h>
-#include <linux/kernel.h> /* for container_of() */
struct drm_mode_create_dumb;
struct drm_plane;
diff --git a/include/drm/drm_legacy.h b/include/drm/drm_legacy.h
index 58dc8d8cc907..0fc85418aad8 100644
--- a/include/drm/drm_legacy.h
+++ b/include/drm/drm_legacy.h
@@ -37,7 +37,6 @@
#include <drm/drm.h>
#include <drm/drm_auth.h>
-#include <drm/drm_hashtab.h>
struct drm_device;
struct drm_driver;
@@ -51,6 +50,20 @@ struct pci_driver;
* you're doing it terribly wrong.
*/
+/*
+ * Hash-table Support
+ */
+
+struct drm_hash_item {
+ struct hlist_node head;
+ unsigned long key;
+};
+
+struct drm_open_hash {
+ struct hlist_head *table;
+ u8 order;
+};
+
/**
* DMA buffer.
*/
diff --git a/include/drm/drm_mipi_dbi.h b/include/drm/drm_mipi_dbi.h
index 05e194958265..6fe13cce2670 100644
--- a/include/drm/drm_mipi_dbi.h
+++ b/include/drm/drm_mipi_dbi.h
@@ -194,7 +194,7 @@ int mipi_dbi_buf_copy(void *dst, struct drm_framebuffer *fb,
#ifdef CONFIG_DEBUG_FS
void mipi_dbi_debugfs_init(struct drm_minor *minor);
#else
-#define mipi_dbi_debugfs_init NULL
+static inline void mipi_dbi_debugfs_init(struct drm_minor *minor) {}
#endif
#endif /* __LINUX_MIPI_DBI_H */
diff --git a/include/drm/drm_mm.h b/include/drm/drm_mm.h
index 9b4292f229c6..ac33ba1b18bc 100644
--- a/include/drm/drm_mm.h
+++ b/include/drm/drm_mm.h
@@ -39,13 +39,15 @@
*/
#include <linux/bug.h>
#include <linux/rbtree.h>
-#include <linux/kernel.h>
+#include <linux/limits.h>
#include <linux/mm_types.h>
#include <linux/list.h>
#include <linux/spinlock.h>
#ifdef CONFIG_DRM_DEBUG_MM
#include <linux/stackdepot.h>
#endif
+#include <linux/types.h>
+
#include <drm/drm_print.h>
#ifdef CONFIG_DRM_DEBUG_MM
diff --git a/include/drm/drm_plane.h b/include/drm/drm_plane.h
index 0c1102dc4d88..06759badf99f 100644
--- a/include/drm/drm_plane.h
+++ b/include/drm/drm_plane.h
@@ -516,7 +516,7 @@ struct drm_plane_funcs {
* This optional hook is used for the DRM to determine if the given
* format/modifier combination is valid for the plane. This allows the
* DRM to generate the correct format bitmask (which formats apply to
- * which modifier), and to valdiate modifiers at atomic_check time.
+ * which modifier), and to validate modifiers at atomic_check time.
*
* If not present, then any modifier in the plane's modifier
* list is allowed with any of the plane's formats.
diff --git a/include/drm/ttm/ttm_bo_api.h b/include/drm/ttm/ttm_bo_api.h
index cd785cfa3123..c17b2df9178b 100644
--- a/include/drm/ttm/ttm_bo_api.h
+++ b/include/drm/ttm/ttm_bo_api.h
@@ -32,7 +32,6 @@
#define _TTM_BO_API_H_
#include <drm/drm_gem.h>
-#include <drm/drm_hashtab.h>
#include <drm/drm_vma_manager.h>
#include <linux/kref.h>
#include <linux/list.h>
diff --git a/include/drm/ttm/ttm_placement.h b/include/drm/ttm/ttm_placement.h
index 76d1b9119a2b..8074d0f6cae5 100644
--- a/include/drm/ttm/ttm_placement.h
+++ b/include/drm/ttm/ttm_placement.h
@@ -35,6 +35,17 @@
/*
* Memory regions for data placement.
+ *
+ * Buffers placed in TTM_PL_SYSTEM are considered under TTMs control and can
+ * be swapped out whenever TTMs thinks it is a good idea.
+ * In cases where drivers would like to use TTM_PL_SYSTEM as a valid
+ * placement they need to be able to handle the issues that arise due to the
+ * above manually.
+ *
+ * For BO's which reside in system memory but for which the accelerator
+ * requires direct access (i.e. their usage needs to be synchronized
+ * between the CPU and accelerator via fences) a new, driver private
+ * placement that can handle such scenarios is a good idea.
*/
#define TTM_PL_SYSTEM 0
diff --git a/include/linux/acpi.h b/include/linux/acpi.h
index 668d007f0917..b28f8790192a 100644
--- a/include/linux/acpi.h
+++ b/include/linux/acpi.h
@@ -1182,7 +1182,6 @@ int acpi_node_prop_get(const struct fwnode_handle *fwnode, const char *propname,
struct fwnode_handle *acpi_get_next_subnode(const struct fwnode_handle *fwnode,
struct fwnode_handle *child);
-struct fwnode_handle *acpi_node_get_parent(const struct fwnode_handle *fwnode);
struct acpi_probe_entry;
typedef bool (*acpi_probe_entry_validate_subtbl)(struct acpi_subtable_header *,
@@ -1288,12 +1287,6 @@ acpi_get_next_subnode(const struct fwnode_handle *fwnode,
}
static inline struct fwnode_handle *
-acpi_node_get_parent(const struct fwnode_handle *fwnode)
-{
- return NULL;
-}
-
-static inline struct fwnode_handle *
acpi_graph_get_next_endpoint(const struct fwnode_handle *fwnode,
struct fwnode_handle *prev)
{
diff --git a/include/linux/bpf.h b/include/linux/bpf.h
index e7a163a3146b..755f38e893be 100644
--- a/include/linux/bpf.h
+++ b/include/linux/bpf.h
@@ -732,6 +732,7 @@ int bpf_trampoline_unlink_prog(struct bpf_prog *prog, struct bpf_trampoline *tr)
struct bpf_trampoline *bpf_trampoline_get(u64 key,
struct bpf_attach_target_info *tgt_info);
void bpf_trampoline_put(struct bpf_trampoline *tr);
+int arch_prepare_bpf_dispatcher(void *image, s64 *funcs, int num_funcs);
#define BPF_DISPATCHER_INIT(_name) { \
.mutex = __MUTEX_INITIALIZER(_name.mutex), \
.func = &_name##_func, \
@@ -1352,28 +1353,16 @@ extern struct mutex bpf_stats_enabled_mutex;
* kprobes, tracepoints) to prevent deadlocks on map operations as any of
* these events can happen inside a region which holds a map bucket lock
* and can deadlock on it.
- *
- * Use the preemption safe inc/dec variants on RT because migrate disable
- * is preemptible on RT and preemption in the middle of the RMW operation
- * might lead to inconsistent state. Use the raw variants for non RT
- * kernels as migrate_disable() maps to preempt_disable() so the slightly
- * more expensive save operation can be avoided.
*/
static inline void bpf_disable_instrumentation(void)
{
migrate_disable();
- if (IS_ENABLED(CONFIG_PREEMPT_RT))
- this_cpu_inc(bpf_prog_active);
- else
- __this_cpu_inc(bpf_prog_active);
+ this_cpu_inc(bpf_prog_active);
}
static inline void bpf_enable_instrumentation(void)
{
- if (IS_ENABLED(CONFIG_PREEMPT_RT))
- this_cpu_dec(bpf_prog_active);
- else
- __this_cpu_dec(bpf_prog_active);
+ this_cpu_dec(bpf_prog_active);
migrate_enable();
}
diff --git a/include/linux/btf.h b/include/linux/btf.h
index 203eef993d76..0e1b6281fd8f 100644
--- a/include/linux/btf.h
+++ b/include/linux/btf.h
@@ -245,7 +245,10 @@ struct kfunc_btf_id_set {
struct module *owner;
};
-struct kfunc_btf_id_list;
+struct kfunc_btf_id_list {
+ struct list_head list;
+ struct mutex mutex;
+};
#ifdef CONFIG_DEBUG_INFO_BTF_MODULES
void register_kfunc_btf_id_set(struct kfunc_btf_id_list *l,
@@ -254,6 +257,9 @@ void unregister_kfunc_btf_id_set(struct kfunc_btf_id_list *l,
struct kfunc_btf_id_set *s);
bool bpf_check_mod_kfunc_call(struct kfunc_btf_id_list *klist, u32 kfunc_id,
struct module *owner);
+
+extern struct kfunc_btf_id_list bpf_tcp_ca_kfunc_list;
+extern struct kfunc_btf_id_list prog_test_kfunc_list;
#else
static inline void register_kfunc_btf_id_set(struct kfunc_btf_id_list *l,
struct kfunc_btf_id_set *s)
@@ -268,13 +274,13 @@ static inline bool bpf_check_mod_kfunc_call(struct kfunc_btf_id_list *klist,
{
return false;
}
+
+static struct kfunc_btf_id_list bpf_tcp_ca_kfunc_list __maybe_unused;
+static struct kfunc_btf_id_list prog_test_kfunc_list __maybe_unused;
#endif
#define DEFINE_KFUNC_BTF_ID_SET(set, name) \
struct kfunc_btf_id_set name = { LIST_HEAD_INIT(name.list), (set), \
THIS_MODULE }
-extern struct kfunc_btf_id_list bpf_tcp_ca_kfunc_list;
-extern struct kfunc_btf_id_list prog_test_kfunc_list;
-
#endif
diff --git a/include/linux/cacheflush.h b/include/linux/cacheflush.h
new file mode 100644
index 000000000000..fef8b607f97e
--- /dev/null
+++ b/include/linux/cacheflush.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_CACHEFLUSH_H
+#define _LINUX_CACHEFLUSH_H
+
+#include <asm/cacheflush.h>
+
+#if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE
+#ifndef ARCH_IMPLEMENTS_FLUSH_DCACHE_FOLIO
+void flush_dcache_folio(struct folio *folio);
+#endif
+#else
+static inline void flush_dcache_folio(struct folio *folio)
+{
+}
+#define ARCH_IMPLEMENTS_FLUSH_DCACHE_FOLIO 0
+#endif /* ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE */
+
+#endif /* _LINUX_CACHEFLUSH_H */
diff --git a/include/linux/cacheinfo.h b/include/linux/cacheinfo.h
index 2f909ed084c6..4ff37cb763ae 100644
--- a/include/linux/cacheinfo.h
+++ b/include/linux/cacheinfo.h
@@ -3,7 +3,6 @@
#define _LINUX_CACHEINFO_H
#include <linux/bitops.h>
-#include <linux/cpu.h>
#include <linux/cpumask.h>
#include <linux/smp.h>
diff --git a/include/linux/compiler.h b/include/linux/compiler.h
index 3d5af56337bd..429dcebe2b99 100644
--- a/include/linux/compiler.h
+++ b/include/linux/compiler.h
@@ -121,7 +121,7 @@ void ftrace_likely_update(struct ftrace_likely_data *f, int val,
asm volatile(__stringify_label(c) ":\n\t" \
".pushsection .discard.reachable\n\t" \
".long " __stringify_label(c) "b - .\n\t" \
- ".popsection\n\t"); \
+ ".popsection\n\t" : : "i" (c)); \
})
#define annotate_reachable() __annotate_reachable(__COUNTER__)
@@ -129,7 +129,7 @@ void ftrace_likely_update(struct ftrace_likely_data *f, int val,
asm volatile(__stringify_label(c) ":\n\t" \
".pushsection .discard.unreachable\n\t" \
".long " __stringify_label(c) "b - .\n\t" \
- ".popsection\n\t"); \
+ ".popsection\n\t" : : "i" (c)); \
})
#define annotate_unreachable() __annotate_unreachable(__COUNTER__)
diff --git a/include/linux/delay.h b/include/linux/delay.h
index 8eacf67eb212..039e7e0c7378 100644
--- a/include/linux/delay.h
+++ b/include/linux/delay.h
@@ -20,6 +20,7 @@
*/
#include <linux/math.h>
+#include <linux/sched.h>
extern unsigned long loops_per_jiffy;
@@ -58,7 +59,18 @@ void calibrate_delay(void);
void __attribute__((weak)) calibration_delay_done(void);
void msleep(unsigned int msecs);
unsigned long msleep_interruptible(unsigned int msecs);
-void usleep_range(unsigned long min, unsigned long max);
+void usleep_range_state(unsigned long min, unsigned long max,
+ unsigned int state);
+
+static inline void usleep_range(unsigned long min, unsigned long max)
+{
+ usleep_range_state(min, max, TASK_UNINTERRUPTIBLE);
+}
+
+static inline void usleep_idle_range(unsigned long min, unsigned long max)
+{
+ usleep_range_state(min, max, TASK_IDLE);
+}
static inline void ssleep(unsigned int seconds)
{
diff --git a/include/linux/device/driver.h b/include/linux/device/driver.h
index a498ebcf4993..15e7c5e15d62 100644
--- a/include/linux/device/driver.h
+++ b/include/linux/device/driver.h
@@ -18,6 +18,7 @@
#include <linux/klist.h>
#include <linux/pm.h>
#include <linux/device/bus.h>
+#include <linux/module.h>
/**
* enum probe_type - device driver probe type to try
diff --git a/include/linux/efi.h b/include/linux/efi.h
index dbd39b20e034..ef8dbc0a1522 100644
--- a/include/linux/efi.h
+++ b/include/linux/efi.h
@@ -1283,4 +1283,10 @@ static inline struct efi_mokvar_table_entry *efi_mokvar_entry_find(
}
#endif
+#ifdef CONFIG_SYSFB
+extern void efifb_setup_from_dmi(struct screen_info *si, const char *opt);
+#else
+static inline void efifb_setup_from_dmi(struct screen_info *si, const char *opt) { }
+#endif
+
#endif /* _LINUX_EFI_H */
diff --git a/include/linux/filter.h b/include/linux/filter.h
index 24b7ed2677af..7f1e88e3e2b5 100644
--- a/include/linux/filter.h
+++ b/include/linux/filter.h
@@ -6,6 +6,7 @@
#define __LINUX_FILTER_H__
#include <linux/atomic.h>
+#include <linux/bpf.h>
#include <linux/refcount.h>
#include <linux/compat.h>
#include <linux/skbuff.h>
@@ -26,7 +27,6 @@
#include <asm/byteorder.h>
#include <uapi/linux/filter.h>
-#include <uapi/linux/bpf.h>
struct sk_buff;
struct sock;
@@ -640,9 +640,6 @@ static __always_inline u32 bpf_prog_run(const struct bpf_prog *prog, const void
* This uses migrate_disable/enable() explicitly to document that the
* invocation of a BPF program does not require reentrancy protection
* against a BPF program which is invoked from a preempting task.
- *
- * For non RT enabled kernels migrate_disable/enable() maps to
- * preempt_disable/enable(), i.e. it disables also preemption.
*/
static inline u32 bpf_prog_run_pin_on_cpu(const struct bpf_prog *prog,
const void *ctx)
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 1cb616fc1105..bbf812ce89a8 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -2518,7 +2518,6 @@ struct file_system_type {
#define FS_USERNS_MOUNT 8 /* Can be mounted by userns root */
#define FS_DISALLOW_NOTIFY_PERM 16 /* Disable fanotify permission events */
#define FS_ALLOW_IDMAP 32 /* FS has been updated to handle vfs idmappings. */
-#define FS_THP_SUPPORT 8192 /* Remove once all fs converted */
#define FS_RENAME_DOES_D_MOVE 32768 /* FS will handle d_move() during rename() internally. */
int (*init_fs_context)(struct fs_context *);
const struct fs_parameter_spec *parameters;
diff --git a/include/linux/gfp.h b/include/linux/gfp.h
index b976c4177299..8fcc38467af6 100644
--- a/include/linux/gfp.h
+++ b/include/linux/gfp.h
@@ -624,7 +624,7 @@ extern unsigned long get_zeroed_page(gfp_t gfp_mask);
void *alloc_pages_exact(size_t size, gfp_t gfp_mask) __alloc_size(1);
void free_pages_exact(void *virt, size_t size);
-__meminit void *alloc_pages_exact_nid(int nid, size_t size, gfp_t gfp_mask) __alloc_size(1);
+__meminit void *alloc_pages_exact_nid(int nid, size_t size, gfp_t gfp_mask) __alloc_size(2);
#define __get_free_page(gfp_mask) \
__get_free_pages((gfp_mask), 0)
diff --git a/include/linux/hid.h b/include/linux/hid.h
index 9e067f937dbc..f453be385bd4 100644
--- a/include/linux/hid.h
+++ b/include/linux/hid.h
@@ -840,6 +840,11 @@ static inline bool hid_is_using_ll_driver(struct hid_device *hdev,
return hdev->ll_driver == driver;
}
+static inline bool hid_is_usb(struct hid_device *hdev)
+{
+ return hid_is_using_ll_driver(hdev, &usb_hid_driver);
+}
+
#define PM_HINT_FULLON 1<<5
#define PM_HINT_NORMAL 1<<1
diff --git a/include/linux/highmem.h b/include/linux/highmem.h
index 25aff0f2ed0b..39bb9b47fa9c 100644
--- a/include/linux/highmem.h
+++ b/include/linux/highmem.h
@@ -5,12 +5,11 @@
#include <linux/fs.h>
#include <linux/kernel.h>
#include <linux/bug.h>
+#include <linux/cacheflush.h>
#include <linux/mm.h>
#include <linux/uaccess.h>
#include <linux/hardirq.h>
-#include <asm/cacheflush.h>
-
#include "highmem-internal.h"
/**
@@ -231,10 +230,10 @@ static inline void tag_clear_highpage(struct page *page)
* If we pass in a base or tail page, we can zero up to PAGE_SIZE.
* If we pass in a head page, we can zero up to the size of the compound page.
*/
-#if defined(CONFIG_HIGHMEM) && defined(CONFIG_TRANSPARENT_HUGEPAGE)
+#ifdef CONFIG_HIGHMEM
void zero_user_segments(struct page *page, unsigned start1, unsigned end1,
unsigned start2, unsigned end2);
-#else /* !HIGHMEM || !TRANSPARENT_HUGEPAGE */
+#else
static inline void zero_user_segments(struct page *page,
unsigned start1, unsigned end1,
unsigned start2, unsigned end2)
@@ -254,7 +253,7 @@ static inline void zero_user_segments(struct page *page,
for (i = 0; i < compound_nr(page); i++)
flush_dcache_page(page + i);
}
-#endif /* !HIGHMEM || !TRANSPARENT_HUGEPAGE */
+#endif
static inline void zero_user_segment(struct page *page,
unsigned start, unsigned end)
@@ -364,4 +363,42 @@ static inline void memzero_page(struct page *page, size_t offset, size_t len)
kunmap_local(addr);
}
+/**
+ * folio_zero_segments() - Zero two byte ranges in a folio.
+ * @folio: The folio to write to.
+ * @start1: The first byte to zero.
+ * @xend1: One more than the last byte in the first range.
+ * @start2: The first byte to zero in the second range.
+ * @xend2: One more than the last byte in the second range.
+ */
+static inline void folio_zero_segments(struct folio *folio,
+ size_t start1, size_t xend1, size_t start2, size_t xend2)
+{
+ zero_user_segments(&folio->page, start1, xend1, start2, xend2);
+}
+
+/**
+ * folio_zero_segment() - Zero a byte range in a folio.
+ * @folio: The folio to write to.
+ * @start: The first byte to zero.
+ * @xend: One more than the last byte to zero.
+ */
+static inline void folio_zero_segment(struct folio *folio,
+ size_t start, size_t xend)
+{
+ zero_user_segments(&folio->page, start, xend, 0, 0);
+}
+
+/**
+ * folio_zero_range() - Zero a byte range in a folio.
+ * @folio: The folio to write to.
+ * @start: The first byte to zero.
+ * @length: The number of bytes to zero.
+ */
+static inline void folio_zero_range(struct folio *folio,
+ size_t start, size_t length)
+{
+ zero_user_segments(&folio->page, start, start + length, 0, 0);
+}
+
#endif /* _LINUX_HIGHMEM_H */
diff --git a/include/linux/host1x.h b/include/linux/host1x.h
index 7bccf589aba7..e8dc5bc41f79 100644
--- a/include/linux/host1x.h
+++ b/include/linux/host1x.h
@@ -7,6 +7,8 @@
#define __LINUX_HOST1X_H
#include <linux/device.h>
+#include <linux/dma-direction.h>
+#include <linux/spinlock.h>
#include <linux/types.h>
enum host1x_class {
@@ -15,6 +17,8 @@ enum host1x_class {
HOST1X_CLASS_GR2D_SB = 0x52,
HOST1X_CLASS_VIC = 0x5D,
HOST1X_CLASS_GR3D = 0x60,
+ HOST1X_CLASS_NVDEC = 0xF0,
+ HOST1X_CLASS_NVDEC1 = 0xF5,
};
struct host1x;
@@ -24,6 +28,28 @@ struct iommu_group;
u64 host1x_get_dma_mask(struct host1x *host1x);
/**
+ * struct host1x_bo_cache - host1x buffer object cache
+ * @mappings: list of mappings
+ * @lock: synchronizes accesses to the list of mappings
+ */
+struct host1x_bo_cache {
+ struct list_head mappings;
+ struct mutex lock;
+};
+
+static inline void host1x_bo_cache_init(struct host1x_bo_cache *cache)
+{
+ INIT_LIST_HEAD(&cache->mappings);
+ mutex_init(&cache->lock);
+}
+
+static inline void host1x_bo_cache_destroy(struct host1x_bo_cache *cache)
+{
+ /* XXX warn if not empty? */
+ mutex_destroy(&cache->lock);
+}
+
+/**
* struct host1x_client_ops - host1x client operations
* @early_init: host1x client early initialization code
* @init: host1x client initialization code
@@ -73,6 +99,8 @@ struct host1x_client {
struct host1x_client *parent;
unsigned int usecount;
struct mutex lock;
+
+ struct host1x_bo_cache cache;
};
/*
@@ -82,23 +110,48 @@ struct host1x_client {
struct host1x_bo;
struct sg_table;
+struct host1x_bo_mapping {
+ struct kref ref;
+ struct dma_buf_attachment *attach;
+ enum dma_data_direction direction;
+ struct list_head list;
+ struct host1x_bo *bo;
+ struct sg_table *sgt;
+ unsigned int chunks;
+ struct device *dev;
+ dma_addr_t phys;
+ size_t size;
+
+ struct host1x_bo_cache *cache;
+ struct list_head entry;
+};
+
+static inline struct host1x_bo_mapping *to_host1x_bo_mapping(struct kref *ref)
+{
+ return container_of(ref, struct host1x_bo_mapping, ref);
+}
+
struct host1x_bo_ops {
struct host1x_bo *(*get)(struct host1x_bo *bo);
void (*put)(struct host1x_bo *bo);
- struct sg_table *(*pin)(struct device *dev, struct host1x_bo *bo,
- dma_addr_t *phys);
- void (*unpin)(struct device *dev, struct sg_table *sgt);
+ struct host1x_bo_mapping *(*pin)(struct device *dev, struct host1x_bo *bo,
+ enum dma_data_direction dir);
+ void (*unpin)(struct host1x_bo_mapping *map);
void *(*mmap)(struct host1x_bo *bo);
void (*munmap)(struct host1x_bo *bo, void *addr);
};
struct host1x_bo {
const struct host1x_bo_ops *ops;
+ struct list_head mappings;
+ spinlock_t lock;
};
static inline void host1x_bo_init(struct host1x_bo *bo,
const struct host1x_bo_ops *ops)
{
+ INIT_LIST_HEAD(&bo->mappings);
+ spin_lock_init(&bo->lock);
bo->ops = ops;
}
@@ -112,18 +165,10 @@ static inline void host1x_bo_put(struct host1x_bo *bo)
bo->ops->put(bo);
}
-static inline struct sg_table *host1x_bo_pin(struct device *dev,
- struct host1x_bo *bo,
- dma_addr_t *phys)
-{
- return bo->ops->pin(dev, bo, phys);
-}
-
-static inline void host1x_bo_unpin(struct device *dev, struct host1x_bo *bo,
- struct sg_table *sgt)
-{
- bo->ops->unpin(dev, sgt);
-}
+struct host1x_bo_mapping *host1x_bo_pin(struct device *dev, struct host1x_bo *bo,
+ enum dma_data_direction dir,
+ struct host1x_bo_cache *cache);
+void host1x_bo_unpin(struct host1x_bo_mapping *map);
static inline void *host1x_bo_mmap(struct host1x_bo *bo)
{
@@ -181,6 +226,7 @@ struct host1x_job;
struct host1x_channel *host1x_channel_request(struct host1x_client *client);
struct host1x_channel *host1x_channel_get(struct host1x_channel *channel);
+void host1x_channel_stop(struct host1x_channel *channel);
void host1x_channel_put(struct host1x_channel *channel);
int host1x_job_submit(struct host1x_job *job);
diff --git a/include/linux/instrumentation.h b/include/linux/instrumentation.h
index fa2cd8c63dcc..24359b4a9605 100644
--- a/include/linux/instrumentation.h
+++ b/include/linux/instrumentation.h
@@ -11,7 +11,7 @@
asm volatile(__stringify(c) ": nop\n\t" \
".pushsection .discard.instr_begin\n\t" \
".long " __stringify(c) "b - .\n\t" \
- ".popsection\n\t"); \
+ ".popsection\n\t" : : "i" (c)); \
})
#define instrumentation_begin() __instrumentation_begin(__COUNTER__)
@@ -50,7 +50,7 @@
asm volatile(__stringify(c) ": nop\n\t" \
".pushsection .discard.instr_end\n\t" \
".long " __stringify(c) "b - .\n\t" \
- ".popsection\n\t"); \
+ ".popsection\n\t" : : "i" (c)); \
})
#define instrumentation_end() __instrumentation_end(__COUNTER__)
#else
diff --git a/include/linux/intel-ish-client-if.h b/include/linux/intel-ish-client-if.h
index aee8ff4739b1..f45f13304add 100644
--- a/include/linux/intel-ish-client-if.h
+++ b/include/linux/intel-ish-client-if.h
@@ -9,7 +9,7 @@
#define _INTEL_ISH_CLIENT_IF_H_
#include <linux/device.h>
-#include <linux/uuid.h>
+#include <linux/mod_devicetable.h>
struct ishtp_cl_device;
struct ishtp_device;
@@ -40,7 +40,7 @@ enum cl_state {
struct ishtp_cl_driver {
struct device_driver driver;
const char *name;
- const guid_t *guid;
+ const struct ishtp_device_id *id;
int (*probe)(struct ishtp_cl_device *dev);
void (*remove)(struct ishtp_cl_device *dev);
int (*reset)(struct ishtp_cl_device *dev);
diff --git a/include/linux/kprobes.h b/include/linux/kprobes.h
index e974caf39d3e..8c8f7a4d93af 100644
--- a/include/linux/kprobes.h
+++ b/include/linux/kprobes.h
@@ -153,6 +153,8 @@ struct kretprobe {
struct kretprobe_holder *rph;
};
+#define KRETPROBE_MAX_DATA_SIZE 4096
+
struct kretprobe_instance {
union {
struct freelist_node freelist;
diff --git a/include/linux/memblock.h b/include/linux/memblock.h
index 8adcf1fa8096..9dc7cb239d21 100644
--- a/include/linux/memblock.h
+++ b/include/linux/memblock.h
@@ -405,8 +405,8 @@ phys_addr_t memblock_alloc_range_nid(phys_addr_t size,
phys_addr_t end, int nid, bool exact_nid);
phys_addr_t memblock_phys_alloc_try_nid(phys_addr_t size, phys_addr_t align, int nid);
-static inline phys_addr_t memblock_phys_alloc(phys_addr_t size,
- phys_addr_t align)
+static __always_inline phys_addr_t memblock_phys_alloc(phys_addr_t size,
+ phys_addr_t align)
{
return memblock_phys_alloc_range(size, align, 0,
MEMBLOCK_ALLOC_ACCESSIBLE);
diff --git a/include/linux/mhi.h b/include/linux/mhi.h
index 723985879035..a5cc4cdf9cc8 100644
--- a/include/linux/mhi.h
+++ b/include/linux/mhi.h
@@ -664,6 +664,19 @@ int mhi_pm_suspend(struct mhi_controller *mhi_cntrl);
int mhi_pm_resume(struct mhi_controller *mhi_cntrl);
/**
+ * mhi_pm_resume_force - Force resume MHI from suspended state
+ * @mhi_cntrl: MHI controller
+ *
+ * Resume the device irrespective of its MHI state. As per the MHI spec, devices
+ * has to be in M3 state during resume. But some devices seem to be in a
+ * different MHI state other than M3 but they continue working fine if allowed.
+ * This API is intented to be used for such devices.
+ *
+ * Return: 0 if the resume succeeds, a negative error code otherwise
+ */
+int mhi_pm_resume_force(struct mhi_controller *mhi_cntrl);
+
+/**
* mhi_download_rddm_image - Download ramdump image from device for
* debugging purpose.
* @mhi_cntrl: MHI controller
diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h
index 3636df90899a..fbaab440a484 100644
--- a/include/linux/mlx5/mlx5_ifc.h
+++ b/include/linux/mlx5/mlx5_ifc.h
@@ -9698,7 +9698,10 @@ struct mlx5_ifc_mcam_access_reg_bits {
u8 regs_84_to_68[0x11];
u8 tracer_registers[0x4];
- u8 regs_63_to_32[0x20];
+ u8 regs_63_to_46[0x12];
+ u8 mrtc[0x1];
+ u8 regs_44_to_32[0xd];
+
u8 regs_31_to_0[0x20];
};
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index 58e744b78c2c..936dc0b6c226 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -277,6 +277,7 @@ enum vmscan_throttle_state {
VMSCAN_THROTTLE_WRITEBACK,
VMSCAN_THROTTLE_ISOLATED,
VMSCAN_THROTTLE_NOPROGRESS,
+ VMSCAN_THROTTLE_CONGESTED,
NR_VMSCAN_THROTTLE,
};
diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h
index ae2e75d15b21..4bb71979a8fd 100644
--- a/include/linux/mod_devicetable.h
+++ b/include/linux/mod_devicetable.h
@@ -895,4 +895,18 @@ struct dfl_device_id {
kernel_ulong_t driver_data;
};
+/* ISHTP (Integrated Sensor Hub Transport Protocol) */
+
+#define ISHTP_MODULE_PREFIX "ishtp:"
+
+/**
+ * struct ishtp_device_id - ISHTP device identifier
+ * @guid: GUID of the device.
+ * @driver_data: pointer to driver specific data
+ */
+struct ishtp_device_id {
+ guid_t guid;
+ kernel_ulong_t driver_data;
+};
+
#endif /* LINUX_MOD_DEVICETABLE_H */
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index 3ec42495a43a..6aadcc0ecb5b 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -1937,7 +1937,7 @@ enum netdev_ml_priv_type {
* @udp_tunnel_nic: UDP tunnel offload state
* @xdp_state: stores info on attached XDP BPF programs
*
- * @nested_level: Used as as a parameter of spin_lock_nested() of
+ * @nested_level: Used as a parameter of spin_lock_nested() of
* dev->addr_list_lock.
* @unlink_list: As netif_addr_lock() can be called recursively,
* keep a list of interfaces to be deleted.
@@ -4404,7 +4404,8 @@ static inline u32 netif_msg_init(int debug_value, int default_msg_enable_bits)
static inline void __netif_tx_lock(struct netdev_queue *txq, int cpu)
{
spin_lock(&txq->_xmit_lock);
- txq->xmit_lock_owner = cpu;
+ /* Pairs with READ_ONCE() in __dev_queue_xmit() */
+ WRITE_ONCE(txq->xmit_lock_owner, cpu);
}
static inline bool __netif_tx_acquire(struct netdev_queue *txq)
@@ -4421,26 +4422,32 @@ static inline void __netif_tx_release(struct netdev_queue *txq)
static inline void __netif_tx_lock_bh(struct netdev_queue *txq)
{
spin_lock_bh(&txq->_xmit_lock);
- txq->xmit_lock_owner = smp_processor_id();
+ /* Pairs with READ_ONCE() in __dev_queue_xmit() */
+ WRITE_ONCE(txq->xmit_lock_owner, smp_processor_id());
}
static inline bool __netif_tx_trylock(struct netdev_queue *txq)
{
bool ok = spin_trylock(&txq->_xmit_lock);
- if (likely(ok))
- txq->xmit_lock_owner = smp_processor_id();
+
+ if (likely(ok)) {
+ /* Pairs with READ_ONCE() in __dev_queue_xmit() */
+ WRITE_ONCE(txq->xmit_lock_owner, smp_processor_id());
+ }
return ok;
}
static inline void __netif_tx_unlock(struct netdev_queue *txq)
{
- txq->xmit_lock_owner = -1;
+ /* Pairs with READ_ONCE() in __dev_queue_xmit() */
+ WRITE_ONCE(txq->xmit_lock_owner, -1);
spin_unlock(&txq->_xmit_lock);
}
static inline void __netif_tx_unlock_bh(struct netdev_queue *txq)
{
- txq->xmit_lock_owner = -1;
+ /* Pairs with READ_ONCE() in __dev_queue_xmit() */
+ WRITE_ONCE(txq->xmit_lock_owner, -1);
spin_unlock_bh(&txq->_xmit_lock);
}
diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h
index 52ec4b5e5615..b5f14d581113 100644
--- a/include/linux/page-flags.h
+++ b/include/linux/page-flags.h
@@ -686,13 +686,13 @@ static inline bool test_set_page_writeback(struct page *page)
__PAGEFLAG(Head, head, PF_ANY) CLEARPAGEFLAG(Head, head, PF_ANY)
-/* Whether there are one or multiple pages in a folio */
-static inline bool folio_test_single(struct folio *folio)
-{
- return !folio_test_head(folio);
-}
-
-static inline bool folio_test_multi(struct folio *folio)
+/**
+ * folio_test_large() - Does this folio contain more than one page?
+ * @folio: The folio to test.
+ *
+ * Return: True if the folio is larger than one page.
+ */
+static inline bool folio_test_large(struct folio *folio)
{
return folio_test_head(folio);
}
diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
index 1a0c646eb6ff..d150a9082b31 100644
--- a/include/linux/pagemap.h
+++ b/include/linux/pagemap.h
@@ -84,7 +84,7 @@ enum mapping_flags {
AS_EXITING = 4, /* final truncate in progress */
/* writeback related tags are not used */
AS_NO_WRITEBACK_TAGS = 5,
- AS_THP_SUPPORT = 6, /* THPs supported */
+ AS_LARGE_FOLIO_SUPPORT = 6,
};
/**
@@ -176,9 +176,25 @@ static inline void mapping_set_gfp_mask(struct address_space *m, gfp_t mask)
m->gfp_mask = mask;
}
-static inline bool mapping_thp_support(struct address_space *mapping)
+/**
+ * mapping_set_large_folios() - Indicate the file supports large folios.
+ * @mapping: The file.
+ *
+ * The filesystem should call this function in its inode constructor to
+ * indicate that the VFS can use large folios to cache the contents of
+ * the file.
+ *
+ * Context: This should not be called while the inode is active as it
+ * is non-atomic.
+ */
+static inline void mapping_set_large_folios(struct address_space *mapping)
+{
+ __set_bit(AS_LARGE_FOLIO_SUPPORT, &mapping->flags);
+}
+
+static inline bool mapping_large_folio_support(struct address_space *mapping)
{
- return test_bit(AS_THP_SUPPORT, &mapping->flags);
+ return test_bit(AS_LARGE_FOLIO_SUPPORT, &mapping->flags);
}
static inline int filemap_nr_thps(struct address_space *mapping)
@@ -193,7 +209,7 @@ static inline int filemap_nr_thps(struct address_space *mapping)
static inline void filemap_nr_thps_inc(struct address_space *mapping)
{
#ifdef CONFIG_READ_ONLY_THP_FOR_FS
- if (!mapping_thp_support(mapping))
+ if (!mapping_large_folio_support(mapping))
atomic_inc(&mapping->nr_thps);
#else
WARN_ON_ONCE(1);
@@ -203,7 +219,7 @@ static inline void filemap_nr_thps_inc(struct address_space *mapping)
static inline void filemap_nr_thps_dec(struct address_space *mapping)
{
#ifdef CONFIG_READ_ONLY_THP_FOR_FS
- if (!mapping_thp_support(mapping))
+ if (!mapping_large_folio_support(mapping))
atomic_dec(&mapping->nr_thps);
#else
WARN_ON_ONCE(1);
@@ -269,7 +285,6 @@ static inline struct inode *folio_inode(struct folio *folio)
static inline bool page_cache_add_speculative(struct page *page, int count)
{
- VM_BUG_ON_PAGE(PageTail(page), page);
return folio_ref_try_add_rcu((struct folio *)page, count);
}
diff --git a/include/linux/percpu-refcount.h b/include/linux/percpu-refcount.h
index b31d3f3312ce..d73a1c08c3e3 100644
--- a/include/linux/percpu-refcount.h
+++ b/include/linux/percpu-refcount.h
@@ -51,9 +51,9 @@
#define _LINUX_PERCPU_REFCOUNT_H
#include <linux/atomic.h>
-#include <linux/kernel.h>
#include <linux/percpu.h>
#include <linux/rcupdate.h>
+#include <linux/types.h>
#include <linux/gfp.h>
struct percpu_ref;
diff --git a/include/linux/phy.h b/include/linux/phy.h
index 96e43fbb2dd8..cbf03a5f9cf5 100644
--- a/include/linux/phy.h
+++ b/include/linux/phy.h
@@ -538,11 +538,12 @@ struct macsec_ops;
* @mac_managed_pm: Set true if MAC driver takes of suspending/resuming PHY
* @state: State of the PHY for management purposes
* @dev_flags: Device-specific flags used by the PHY driver.
- * Bits [15:0] are free to use by the PHY driver to communicate
- * driver specific behavior.
- * Bits [23:16] are currently reserved for future use.
- * Bits [31:24] are reserved for defining generic
- * PHY driver behavior.
+ *
+ * - Bits [15:0] are free to use by the PHY driver to communicate
+ * driver specific behavior.
+ * - Bits [23:16] are currently reserved for future use.
+ * - Bits [31:24] are reserved for defining generic
+ * PHY driver behavior.
* @irq: IRQ number of the PHY's interrupt (-1 if none)
* @phy_timer: The timer for handling the state machine
* @phylink: Pointer to phylink instance for this PHY
diff --git a/include/linux/pm_runtime.h b/include/linux/pm_runtime.h
index 222da43b7096..eddd66d426ca 100644
--- a/include/linux/pm_runtime.h
+++ b/include/linux/pm_runtime.h
@@ -129,7 +129,7 @@ static inline bool pm_runtime_suspended(struct device *dev)
* pm_runtime_active - Check whether or not a device is runtime-active.
* @dev: Target device.
*
- * Return %true if runtime PM is enabled for @dev and its runtime PM status is
+ * Return %true if runtime PM is disabled for @dev or its runtime PM status is
* %RPM_ACTIVE, or %false otherwise.
*
* Note that the return value of this function can only be trusted if it is
diff --git a/include/linux/ptp_classify.h b/include/linux/ptp_classify.h
index ae04968a3a47..9afd34a2d36c 100644
--- a/include/linux/ptp_classify.h
+++ b/include/linux/ptp_classify.h
@@ -37,6 +37,7 @@
#define PTP_MSGTYPE_PDELAY_RESP 0x3
#define PTP_EV_PORT 319
+#define PTP_GEN_PORT 320
#define PTP_GEN_BIT 0x08 /* indicates general message, if set in message type */
#define OFF_PTP_SOURCE_UUID 22 /* PTPv1 only */
diff --git a/include/linux/regulator/driver.h b/include/linux/regulator/driver.h
index bd7a73db2e66..54cf566616ae 100644
--- a/include/linux/regulator/driver.h
+++ b/include/linux/regulator/driver.h
@@ -499,7 +499,8 @@ struct regulator_irq_data {
* best to shut-down regulator(s) or reboot the SOC if error
* handling is repeatedly failing. If fatal_cnt is given the IRQ
* handling is aborted if it fails for fatal_cnt times and die()
- * callback (if populated) or BUG() is called to try to prevent
+ * callback (if populated) is called. If die() is not populated
+ * poweroff for the system is attempted in order to prevent any
* further damage.
* @reread_ms: The time which is waited before attempting to re-read status
* at the worker if IC reading fails. Immediate re-read is done
@@ -516,11 +517,12 @@ struct regulator_irq_data {
* @data: Driver private data pointer which will be passed as such to
* the renable, map_event and die callbacks in regulator_irq_data.
* @die: Protection callback. If IC status reading or recovery actions
- * fail fatal_cnt times this callback or BUG() is called. This
- * callback should implement a final protection attempt like
- * disabling the regulator. If protection succeeded this may
- * return 0. If anything else is returned the core assumes final
- * protection failed and calls BUG() as a last resort.
+ * fail fatal_cnt times this callback is called or system is
+ * powered off. This callback should implement a final protection
+ * attempt like disabling the regulator. If protection succeeded
+ * die() may return 0. If anything else is returned the core
+ * assumes final protection failed and attempts to perform a
+ * poweroff as a last resort.
* @map_event: Driver callback to map IRQ status into regulator devices with
* events / errors. NOTE: callback MUST initialize both the
* errors and notifs for all rdevs which it signals having
diff --git a/include/linux/sched/cputime.h b/include/linux/sched/cputime.h
index 6c9f19a33865..ce3c58286062 100644
--- a/include/linux/sched/cputime.h
+++ b/include/linux/sched/cputime.h
@@ -18,15 +18,16 @@
#endif /* CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */
#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
-extern void task_cputime(struct task_struct *t,
+extern bool task_cputime(struct task_struct *t,
u64 *utime, u64 *stime);
extern u64 task_gtime(struct task_struct *t);
#else
-static inline void task_cputime(struct task_struct *t,
+static inline bool task_cputime(struct task_struct *t,
u64 *utime, u64 *stime)
{
*utime = t->utime;
*stime = t->stime;
+ return false;
}
static inline u64 task_gtime(struct task_struct *t)
diff --git a/include/linux/siphash.h b/include/linux/siphash.h
index bf21591a9e5e..0cda61855d90 100644
--- a/include/linux/siphash.h
+++ b/include/linux/siphash.h
@@ -27,9 +27,7 @@ static inline bool siphash_key_is_zero(const siphash_key_t *key)
}
u64 __siphash_aligned(const void *data, size_t len, const siphash_key_t *key);
-#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
u64 __siphash_unaligned(const void *data, size_t len, const siphash_key_t *key);
-#endif
u64 siphash_1u64(const u64 a, const siphash_key_t *key);
u64 siphash_2u64(const u64 a, const u64 b, const siphash_key_t *key);
@@ -82,10 +80,9 @@ static inline u64 ___siphash_aligned(const __le64 *data, size_t len,
static inline u64 siphash(const void *data, size_t len,
const siphash_key_t *key)
{
-#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
- if (!IS_ALIGNED((unsigned long)data, SIPHASH_ALIGNMENT))
+ if (IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) ||
+ !IS_ALIGNED((unsigned long)data, SIPHASH_ALIGNMENT))
return __siphash_unaligned(data, len, key);
-#endif
return ___siphash_aligned(data, len, key);
}
@@ -96,10 +93,8 @@ typedef struct {
u32 __hsiphash_aligned(const void *data, size_t len,
const hsiphash_key_t *key);
-#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
u32 __hsiphash_unaligned(const void *data, size_t len,
const hsiphash_key_t *key);
-#endif
u32 hsiphash_1u32(const u32 a, const hsiphash_key_t *key);
u32 hsiphash_2u32(const u32 a, const u32 b, const hsiphash_key_t *key);
@@ -135,10 +130,9 @@ static inline u32 ___hsiphash_aligned(const __le32 *data, size_t len,
static inline u32 hsiphash(const void *data, size_t len,
const hsiphash_key_t *key)
{
-#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
- if (!IS_ALIGNED((unsigned long)data, HSIPHASH_ALIGNMENT))
+ if (IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) ||
+ !IS_ALIGNED((unsigned long)data, HSIPHASH_ALIGNMENT))
return __hsiphash_unaligned(data, len, key);
-#endif
return ___hsiphash_aligned(data, len, key);
}
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index c8cb7e697d47..4507d77d6941 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -286,6 +286,7 @@ struct nf_bridge_info {
struct tc_skb_ext {
__u32 chain;
__u16 mru;
+ __u16 zone;
bool post_ct;
};
#endif
@@ -1380,7 +1381,7 @@ skb_flow_dissect_ct(const struct sk_buff *skb,
struct flow_dissector *flow_dissector,
void *target_container,
u16 *ctinfo_map, size_t mapsize,
- bool post_ct);
+ bool post_ct, u16 zone);
void
skb_flow_dissect_tunnel_info(const struct sk_buff *skb,
struct flow_dissector *flow_dissector,
diff --git a/include/linux/tee_drv.h b/include/linux/tee_drv.h
index a1f03461369b..cf5999626e28 100644
--- a/include/linux/tee_drv.h
+++ b/include/linux/tee_drv.h
@@ -195,7 +195,7 @@ int tee_session_calc_client_uuid(uuid_t *uuid, u32 connection_method,
* @offset: offset of buffer in user space
* @pages: locked pages from userspace
* @num_pages: number of locked pages
- * @dmabuf: dmabuf used to for exporting to user space
+ * @refcount: reference counter
* @flags: defined by TEE_SHM_* in tee_drv.h
* @id: unique id of a shared memory object on this device, shared
* with user space
@@ -214,7 +214,7 @@ struct tee_shm {
unsigned int offset;
struct page **pages;
size_t num_pages;
- struct dma_buf *dmabuf;
+ refcount_t refcount;
u32 flags;
int id;
u64 sec_world_id;
diff --git a/include/linux/virtio.h b/include/linux/virtio.h
index 44d0e09da2d9..41edbc01ffa4 100644
--- a/include/linux/virtio.h
+++ b/include/linux/virtio.h
@@ -152,7 +152,6 @@ size_t virtio_max_dma_size(struct virtio_device *vdev);
* @feature_table_size: number of entries in the feature table array.
* @feature_table_legacy: same as feature_table but when working in legacy mode.
* @feature_table_size_legacy: number of entries in feature table legacy array.
- * @suppress_used_validation: set to not have core validate used length
* @probe: the function to call when a device is found. Returns 0 or -errno.
* @scan: optional function to call after successful probe; intended
* for virtio-scsi to invoke a scan.
@@ -169,7 +168,6 @@ struct virtio_driver {
unsigned int feature_table_size;
const unsigned int *feature_table_legacy;
unsigned int feature_table_size_legacy;
- bool suppress_used_validation;
int (*validate)(struct virtio_device *dev);
int (*probe)(struct virtio_device *dev);
void (*scan)(struct virtio_device *dev);
diff --git a/include/linux/virtio_net.h b/include/linux/virtio_net.h
index 04e87f4b9417..a960de68ac69 100644
--- a/include/linux/virtio_net.h
+++ b/include/linux/virtio_net.h
@@ -7,9 +7,27 @@
#include <uapi/linux/udp.h>
#include <uapi/linux/virtio_net.h>
+static inline bool virtio_net_hdr_match_proto(__be16 protocol, __u8 gso_type)
+{
+ switch (gso_type & ~VIRTIO_NET_HDR_GSO_ECN) {
+ case VIRTIO_NET_HDR_GSO_TCPV4:
+ return protocol == cpu_to_be16(ETH_P_IP);
+ case VIRTIO_NET_HDR_GSO_TCPV6:
+ return protocol == cpu_to_be16(ETH_P_IPV6);
+ case VIRTIO_NET_HDR_GSO_UDP:
+ return protocol == cpu_to_be16(ETH_P_IP) ||
+ protocol == cpu_to_be16(ETH_P_IPV6);
+ default:
+ return false;
+ }
+}
+
static inline int virtio_net_hdr_set_proto(struct sk_buff *skb,
const struct virtio_net_hdr *hdr)
{
+ if (skb->protocol)
+ return 0;
+
switch (hdr->gso_type & ~VIRTIO_NET_HDR_GSO_ECN) {
case VIRTIO_NET_HDR_GSO_TCPV4:
case VIRTIO_NET_HDR_GSO_UDP:
@@ -88,9 +106,12 @@ static inline int virtio_net_hdr_to_skb(struct sk_buff *skb,
if (!skb->protocol) {
__be16 protocol = dev_parse_header_protocol(skb);
- virtio_net_hdr_set_proto(skb, hdr);
- if (protocol && protocol != skb->protocol)
+ if (!protocol)
+ virtio_net_hdr_set_proto(skb, hdr);
+ else if (!virtio_net_hdr_match_proto(protocol, hdr->gso_type))
return -EINVAL;
+ else
+ skb->protocol = protocol;
}
retry:
if (!skb_flow_dissect_flow_keys_basic(NULL, skb, &keys,
diff --git a/include/linux/wait.h b/include/linux/wait.h
index 2d0df57c9902..851e07da2583 100644
--- a/include/linux/wait.h
+++ b/include/linux/wait.h
@@ -217,6 +217,7 @@ void __wake_up_sync_key(struct wait_queue_head *wq_head, unsigned int mode, void
void __wake_up_locked_sync_key(struct wait_queue_head *wq_head, unsigned int mode, void *key);
void __wake_up_locked(struct wait_queue_head *wq_head, unsigned int mode, int nr);
void __wake_up_sync(struct wait_queue_head *wq_head, unsigned int mode);
+void __wake_up_pollfree(struct wait_queue_head *wq_head);
#define wake_up(x) __wake_up(x, TASK_NORMAL, 1, NULL)
#define wake_up_nr(x, nr) __wake_up(x, TASK_NORMAL, nr, NULL)
@@ -245,6 +246,31 @@ void __wake_up_sync(struct wait_queue_head *wq_head, unsigned int mode);
#define wake_up_interruptible_sync_poll_locked(x, m) \
__wake_up_locked_sync_key((x), TASK_INTERRUPTIBLE, poll_to_key(m))
+/**
+ * wake_up_pollfree - signal that a polled waitqueue is going away
+ * @wq_head: the wait queue head
+ *
+ * In the very rare cases where a ->poll() implementation uses a waitqueue whose
+ * lifetime is tied to a task rather than to the 'struct file' being polled,
+ * this function must be called before the waitqueue is freed so that
+ * non-blocking polls (e.g. epoll) are notified that the queue is going away.
+ *
+ * The caller must also RCU-delay the freeing of the wait_queue_head, e.g. via
+ * an explicit synchronize_rcu() or call_rcu(), or via SLAB_TYPESAFE_BY_RCU.
+ */
+static inline void wake_up_pollfree(struct wait_queue_head *wq_head)
+{
+ /*
+ * For performance reasons, we don't always take the queue lock here.
+ * Therefore, we might race with someone removing the last entry from
+ * the queue, and proceed while they still hold the queue lock.
+ * However, rcu_read_lock() is required to be held in such cases, so we
+ * can safely proceed with an RCU-delayed free.
+ */
+ if (waitqueue_active(wq_head))
+ __wake_up_pollfree(wq_head);
+}
+
#define ___wait_cond_timeout(condition) \
({ \
bool __cond = (condition); \
diff --git a/include/net/bond_alb.h b/include/net/bond_alb.h
index f6af76c87a6c..191c36afa1f4 100644
--- a/include/net/bond_alb.h
+++ b/include/net/bond_alb.h
@@ -126,7 +126,7 @@ struct tlb_slave_info {
struct alb_bond_info {
struct tlb_client_info *tx_hashtbl; /* Dynamically allocated */
u32 unbalanced_load;
- int tx_rebalance_counter;
+ atomic_t tx_rebalance_counter;
int lp_counter;
/* -------- rlb parameters -------- */
int rlb_enabled;
diff --git a/include/net/busy_poll.h b/include/net/busy_poll.h
index 4202c609bb0b..c4898fcbf923 100644
--- a/include/net/busy_poll.h
+++ b/include/net/busy_poll.h
@@ -133,6 +133,19 @@ static inline void sk_mark_napi_id(struct sock *sk, const struct sk_buff *skb)
if (unlikely(READ_ONCE(sk->sk_napi_id) != skb->napi_id))
WRITE_ONCE(sk->sk_napi_id, skb->napi_id);
#endif
+ sk_rx_queue_update(sk, skb);
+}
+
+/* Variant of sk_mark_napi_id() for passive flow setup,
+ * as sk->sk_napi_id and sk->sk_rx_queue_mapping content
+ * needs to be set.
+ */
+static inline void sk_mark_napi_id_set(struct sock *sk,
+ const struct sk_buff *skb)
+{
+#ifdef CONFIG_NET_RX_BUSY_POLL
+ WRITE_ONCE(sk->sk_napi_id, skb->napi_id);
+#endif
sk_rx_queue_set(sk, skb);
}
diff --git a/include/net/dst_cache.h b/include/net/dst_cache.h
index 67634675e919..df6622a5fe98 100644
--- a/include/net/dst_cache.h
+++ b/include/net/dst_cache.h
@@ -80,6 +80,17 @@ static inline void dst_cache_reset(struct dst_cache *dst_cache)
}
/**
+ * dst_cache_reset_now - invalidate the cache contents immediately
+ * @dst_cache: the cache
+ *
+ * The caller must be sure there are no concurrent users, as this frees
+ * all dst_cache users immediately, rather than waiting for the next
+ * per-cpu usage like dst_cache_reset does. Most callers should use the
+ * higher speed lazily-freed dst_cache_reset function instead.
+ */
+void dst_cache_reset_now(struct dst_cache *dst_cache);
+
+/**
* dst_cache_init - initialize the cache, allocating the required storage
* @dst_cache: the cache
* @gfp: allocation flags
diff --git a/include/net/fib_rules.h b/include/net/fib_rules.h
index 4b10676c69d1..bd07484ab9dd 100644
--- a/include/net/fib_rules.h
+++ b/include/net/fib_rules.h
@@ -69,7 +69,7 @@ struct fib_rules_ops {
int (*action)(struct fib_rule *,
struct flowi *, int,
struct fib_lookup_arg *);
- bool (*suppress)(struct fib_rule *,
+ bool (*suppress)(struct fib_rule *, int,
struct fib_lookup_arg *);
int (*match)(struct fib_rule *,
struct flowi *, int);
@@ -218,7 +218,9 @@ INDIRECT_CALLABLE_DECLARE(int fib4_rule_action(struct fib_rule *rule,
struct fib_lookup_arg *arg));
INDIRECT_CALLABLE_DECLARE(bool fib6_rule_suppress(struct fib_rule *rule,
+ int flags,
struct fib_lookup_arg *arg));
INDIRECT_CALLABLE_DECLARE(bool fib4_rule_suppress(struct fib_rule *rule,
+ int flags,
struct fib_lookup_arg *arg));
#endif
diff --git a/include/net/ip6_fib.h b/include/net/ip6_fib.h
index c412dde4d67d..83b8070d1cc9 100644
--- a/include/net/ip6_fib.h
+++ b/include/net/ip6_fib.h
@@ -485,6 +485,7 @@ int fib6_nh_init(struct net *net, struct fib6_nh *fib6_nh,
struct fib6_config *cfg, gfp_t gfp_flags,
struct netlink_ext_ack *extack);
void fib6_nh_release(struct fib6_nh *fib6_nh);
+void fib6_nh_release_dsts(struct fib6_nh *fib6_nh);
int call_fib6_entry_notifiers(struct net *net,
enum fib_event_type event_type,
diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h
index ab5348e57db1..3417ba2d27ad 100644
--- a/include/net/ip_fib.h
+++ b/include/net/ip_fib.h
@@ -438,7 +438,7 @@ int fib_validate_source(struct sk_buff *skb, __be32 src, __be32 dst,
#ifdef CONFIG_IP_ROUTE_CLASSID
static inline int fib_num_tclassid_users(struct net *net)
{
- return net->ipv4.fib_num_tclassid_users;
+ return atomic_read(&net->ipv4.fib_num_tclassid_users);
}
#else
static inline int fib_num_tclassid_users(struct net *net)
diff --git a/include/net/ipv6_stubs.h b/include/net/ipv6_stubs.h
index afbce90c4480..45e0339be6fa 100644
--- a/include/net/ipv6_stubs.h
+++ b/include/net/ipv6_stubs.h
@@ -47,6 +47,7 @@ struct ipv6_stub {
struct fib6_config *cfg, gfp_t gfp_flags,
struct netlink_ext_ack *extack);
void (*fib6_nh_release)(struct fib6_nh *fib6_nh);
+ void (*fib6_nh_release_dsts)(struct fib6_nh *fib6_nh);
void (*fib6_update_sernum)(struct net *net, struct fib6_info *rt);
int (*ip6_del_rt)(struct net *net, struct fib6_info *rt, bool skip_notify);
void (*fib6_rt_update)(struct net *net, struct fib6_info *rt,
diff --git a/include/net/netfilter/nf_conntrack.h b/include/net/netfilter/nf_conntrack.h
index cc663c68ddc4..d24b0a34c8f0 100644
--- a/include/net/netfilter/nf_conntrack.h
+++ b/include/net/netfilter/nf_conntrack.h
@@ -276,14 +276,14 @@ static inline bool nf_is_loopback_packet(const struct sk_buff *skb)
/* jiffies until ct expires, 0 if already expired */
static inline unsigned long nf_ct_expires(const struct nf_conn *ct)
{
- s32 timeout = ct->timeout - nfct_time_stamp;
+ s32 timeout = READ_ONCE(ct->timeout) - nfct_time_stamp;
return timeout > 0 ? timeout : 0;
}
static inline bool nf_ct_is_expired(const struct nf_conn *ct)
{
- return (__s32)(ct->timeout - nfct_time_stamp) <= 0;
+ return (__s32)(READ_ONCE(ct->timeout) - nfct_time_stamp) <= 0;
}
/* use after obtaining a reference count */
@@ -302,7 +302,7 @@ static inline bool nf_ct_should_gc(const struct nf_conn *ct)
static inline void nf_ct_offload_timeout(struct nf_conn *ct)
{
if (nf_ct_expires(ct) < NF_CT_DAY / 2)
- ct->timeout = nfct_time_stamp + NF_CT_DAY;
+ WRITE_ONCE(ct->timeout, nfct_time_stamp + NF_CT_DAY);
}
struct kernel_param;
diff --git a/include/net/netns/ipv4.h b/include/net/netns/ipv4.h
index 2f65701a43c9..6c5b2efc4f17 100644
--- a/include/net/netns/ipv4.h
+++ b/include/net/netns/ipv4.h
@@ -65,7 +65,7 @@ struct netns_ipv4 {
bool fib_has_custom_local_routes;
bool fib_offload_disabled;
#ifdef CONFIG_IP_ROUTE_CLASSID
- int fib_num_tclassid_users;
+ atomic_t fib_num_tclassid_users;
#endif
struct hlist_head *fib_table_hash;
struct sock *fibnl;
diff --git a/include/net/nl802154.h b/include/net/nl802154.h
index ddcee128f5d9..145acb8f2509 100644
--- a/include/net/nl802154.h
+++ b/include/net/nl802154.h
@@ -19,6 +19,8 @@
*
*/
+#include <linux/types.h>
+
#define NL802154_GENL_NAME "nl802154"
enum nl802154_commands {
@@ -150,10 +152,9 @@ enum nl802154_attrs {
};
enum nl802154_iftype {
- /* for backwards compatibility TODO */
- NL802154_IFTYPE_UNSPEC = -1,
+ NL802154_IFTYPE_UNSPEC = (~(__u32)0),
- NL802154_IFTYPE_NODE,
+ NL802154_IFTYPE_NODE = 0,
NL802154_IFTYPE_MONITOR,
NL802154_IFTYPE_COORD,
diff --git a/include/net/pkt_sched.h b/include/net/pkt_sched.h
index bf79f3a890af..9e71691c491b 100644
--- a/include/net/pkt_sched.h
+++ b/include/net/pkt_sched.h
@@ -193,4 +193,20 @@ static inline void skb_txtime_consumed(struct sk_buff *skb)
skb->tstamp = ktime_set(0, 0);
}
+struct tc_skb_cb {
+ struct qdisc_skb_cb qdisc_cb;
+
+ u16 mru;
+ bool post_ct;
+ u16 zone; /* Only valid if post_ct = true */
+};
+
+static inline struct tc_skb_cb *tc_skb_cb(const struct sk_buff *skb)
+{
+ struct tc_skb_cb *cb = (struct tc_skb_cb *)skb->cb;
+
+ BUILD_BUG_ON(sizeof(*cb) > sizeof_field(struct sk_buff, cb));
+ return cb;
+}
+
#endif
diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h
index 22179b2fda72..c70e6d2b2fdd 100644
--- a/include/net/sch_generic.h
+++ b/include/net/sch_generic.h
@@ -447,8 +447,6 @@ struct qdisc_skb_cb {
};
#define QDISC_CB_PRIV_LEN 20
unsigned char data[QDISC_CB_PRIV_LEN];
- u16 mru;
- bool post_ct;
};
typedef void tcf_chain_head_change_t(struct tcf_proto *tp_head, void *priv);
diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h
index 189fdb9db162..d314a180ab93 100644
--- a/include/net/sctp/sctp.h
+++ b/include/net/sctp/sctp.h
@@ -105,6 +105,7 @@ extern struct percpu_counter sctp_sockets_allocated;
int sctp_asconf_mgmt(struct sctp_sock *, struct sctp_sockaddr_entry *);
struct sk_buff *sctp_skb_recv_datagram(struct sock *, int, int, int *);
+typedef int (*sctp_callback_t)(struct sctp_endpoint *, struct sctp_transport *, void *);
void sctp_transport_walk_start(struct rhashtable_iter *iter);
void sctp_transport_walk_stop(struct rhashtable_iter *iter);
struct sctp_transport *sctp_transport_get_next(struct net *net,
@@ -115,9 +116,8 @@ int sctp_transport_lookup_process(int (*cb)(struct sctp_transport *, void *),
struct net *net,
const union sctp_addr *laddr,
const union sctp_addr *paddr, void *p);
-int sctp_for_each_transport(int (*cb)(struct sctp_transport *, void *),
- int (*cb_done)(struct sctp_transport *, void *),
- struct net *net, int *pos, void *p);
+int sctp_transport_traverse_process(sctp_callback_t cb, sctp_callback_t cb_done,
+ struct net *net, int *pos, void *p);
int sctp_for_each_endpoint(int (*cb)(struct sctp_endpoint *, void *), void *p);
int sctp_get_sctp_info(struct sock *sk, struct sctp_association *asoc,
struct sctp_info *info);
diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h
index 899c29c326ba..8dabd8800006 100644
--- a/include/net/sctp/structs.h
+++ b/include/net/sctp/structs.h
@@ -1355,6 +1355,7 @@ struct sctp_endpoint {
reconf_enable:1;
__u8 strreset_enable;
+ struct rcu_head rcu;
};
/* Recover the outter endpoint structure. */
@@ -1370,7 +1371,7 @@ static inline struct sctp_endpoint *sctp_ep(struct sctp_ep_common *base)
struct sctp_endpoint *sctp_endpoint_new(struct sock *, gfp_t);
void sctp_endpoint_free(struct sctp_endpoint *);
void sctp_endpoint_put(struct sctp_endpoint *);
-void sctp_endpoint_hold(struct sctp_endpoint *);
+int sctp_endpoint_hold(struct sctp_endpoint *ep);
void sctp_endpoint_add_asoc(struct sctp_endpoint *, struct sctp_association *);
struct sctp_association *sctp_endpoint_lookup_assoc(
const struct sctp_endpoint *ep,
diff --git a/include/net/sock.h b/include/net/sock.h
index b32906e1ab55..d47e9658da28 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -431,7 +431,7 @@ struct sock {
#ifdef CONFIG_XFRM
struct xfrm_policy __rcu *sk_policy[2];
#endif
- struct dst_entry *sk_rx_dst;
+ struct dst_entry __rcu *sk_rx_dst;
int sk_rx_dst_ifindex;
u32 sk_rx_dst_cookie;
@@ -1913,18 +1913,31 @@ static inline int sk_tx_queue_get(const struct sock *sk)
return -1;
}
-static inline void sk_rx_queue_set(struct sock *sk, const struct sk_buff *skb)
+static inline void __sk_rx_queue_set(struct sock *sk,
+ const struct sk_buff *skb,
+ bool force_set)
{
#ifdef CONFIG_SOCK_RX_QUEUE_MAPPING
if (skb_rx_queue_recorded(skb)) {
u16 rx_queue = skb_get_rx_queue(skb);
- if (unlikely(READ_ONCE(sk->sk_rx_queue_mapping) != rx_queue))
+ if (force_set ||
+ unlikely(READ_ONCE(sk->sk_rx_queue_mapping) != rx_queue))
WRITE_ONCE(sk->sk_rx_queue_mapping, rx_queue);
}
#endif
}
+static inline void sk_rx_queue_set(struct sock *sk, const struct sk_buff *skb)
+{
+ __sk_rx_queue_set(sk, skb, true);
+}
+
+static inline void sk_rx_queue_update(struct sock *sk, const struct sk_buff *skb)
+{
+ __sk_rx_queue_set(sk, skb, false);
+}
+
static inline void sk_rx_queue_clear(struct sock *sk)
{
#ifdef CONFIG_SOCK_RX_QUEUE_MAPPING
@@ -2430,19 +2443,22 @@ static inline void sk_stream_moderate_sndbuf(struct sock *sk)
* @sk: socket
*
* Use the per task page_frag instead of the per socket one for
- * optimization when we know that we're in the normal context and owns
+ * optimization when we know that we're in process context and own
* everything that's associated with %current.
*
- * gfpflags_allow_blocking() isn't enough here as direct reclaim may nest
- * inside other socket operations and end up recursing into sk_page_frag()
- * while it's already in use.
+ * Both direct reclaim and page faults can nest inside other
+ * socket operations and end up recursing into sk_page_frag()
+ * while it's already in use: explicitly avoid task page_frag
+ * usage if the caller is potentially doing any of them.
+ * This assumes that page fault handlers use the GFP_NOFS flags.
*
* Return: a per task page_frag if context allows that,
* otherwise a per socket one.
*/
static inline struct page_frag *sk_page_frag(struct sock *sk)
{
- if (gfpflags_normal_context(sk->sk_allocation))
+ if ((sk->sk_allocation & (__GFP_DIRECT_RECLAIM | __GFP_MEMALLOC | __GFP_FS)) ==
+ (__GFP_DIRECT_RECLAIM | __GFP_FS))
return &current->task_frag;
return &sk->sk_frag;
diff --git a/include/soc/mscc/ocelot_vcap.h b/include/soc/mscc/ocelot_vcap.h
index eeb1142aa1b1..4d1dfa1136b2 100644
--- a/include/soc/mscc/ocelot_vcap.h
+++ b/include/soc/mscc/ocelot_vcap.h
@@ -703,6 +703,8 @@ int ocelot_vcap_filter_add(struct ocelot *ocelot,
struct netlink_ext_ack *extack);
int ocelot_vcap_filter_del(struct ocelot *ocelot,
struct ocelot_vcap_filter *rule);
+int ocelot_vcap_filter_replace(struct ocelot *ocelot,
+ struct ocelot_vcap_filter *filter);
struct ocelot_vcap_filter *
ocelot_vcap_block_find_filter_by_id(struct ocelot_vcap_block *block,
unsigned long cookie, bool tc_offload);
diff --git a/include/soc/tegra/common.h b/include/soc/tegra/common.h
index af41ad80ec21..8ec1ac07fc85 100644
--- a/include/soc/tegra/common.h
+++ b/include/soc/tegra/common.h
@@ -39,4 +39,19 @@ devm_tegra_core_dev_init_opp_table(struct device *dev,
}
#endif
+static inline int
+devm_tegra_core_dev_init_opp_table_common(struct device *dev)
+{
+ struct tegra_core_opp_params opp_params = {};
+ int err;
+
+ opp_params.init_state = true;
+
+ err = devm_tegra_core_dev_init_opp_table(dev, &opp_params);
+ if (err != -ENODEV)
+ return err;
+
+ return 0;
+}
+
#endif /* __SOC_TEGRA_COMMON_H__ */
diff --git a/include/sound/soc-acpi.h b/include/sound/soc-acpi.h
index 31f4c4f9aeea..ac0893df9c76 100644
--- a/include/sound/soc-acpi.h
+++ b/include/sound/soc-acpi.h
@@ -147,7 +147,7 @@ struct snd_soc_acpi_link_adr {
*/
/* Descriptor for SST ASoC machine driver */
struct snd_soc_acpi_mach {
- const u8 id[ACPI_ID_LEN];
+ u8 id[ACPI_ID_LEN];
const struct snd_soc_acpi_codecs *comp_ids;
const u32 link_mask;
const struct snd_soc_acpi_link_adr *links;
diff --git a/include/trace/events/rpcgss.h b/include/trace/events/rpcgss.h
index 3ba63319af3c..c9048f3e471b 100644
--- a/include/trace/events/rpcgss.h
+++ b/include/trace/events/rpcgss.h
@@ -8,7 +8,7 @@
#undef TRACE_SYSTEM
#define TRACE_SYSTEM rpcgss
-#if !defined(_TRACE_RPCRDMA_H) || defined(TRACE_HEADER_MULTI_READ)
+#if !defined(_TRACE_RPCGSS_H) || defined(TRACE_HEADER_MULTI_READ)
#define _TRACE_RPCGSS_H
#include <linux/tracepoint.h>
diff --git a/include/trace/events/vmscan.h b/include/trace/events/vmscan.h
index f25a6149d3ba..ca2e9009a651 100644
--- a/include/trace/events/vmscan.h
+++ b/include/trace/events/vmscan.h
@@ -30,12 +30,14 @@
#define _VMSCAN_THROTTLE_WRITEBACK (1 << VMSCAN_THROTTLE_WRITEBACK)
#define _VMSCAN_THROTTLE_ISOLATED (1 << VMSCAN_THROTTLE_ISOLATED)
#define _VMSCAN_THROTTLE_NOPROGRESS (1 << VMSCAN_THROTTLE_NOPROGRESS)
+#define _VMSCAN_THROTTLE_CONGESTED (1 << VMSCAN_THROTTLE_CONGESTED)
#define show_throttle_flags(flags) \
(flags) ? __print_flags(flags, "|", \
{_VMSCAN_THROTTLE_WRITEBACK, "VMSCAN_THROTTLE_WRITEBACK"}, \
{_VMSCAN_THROTTLE_ISOLATED, "VMSCAN_THROTTLE_ISOLATED"}, \
- {_VMSCAN_THROTTLE_NOPROGRESS, "VMSCAN_THROTTLE_NOPROGRESS"} \
+ {_VMSCAN_THROTTLE_NOPROGRESS, "VMSCAN_THROTTLE_NOPROGRESS"}, \
+ {_VMSCAN_THROTTLE_CONGESTED, "VMSCAN_THROTTLE_CONGESTED"} \
) : "VMSCAN_THROTTLE_NONE"
diff --git a/include/uapi/asm-generic/poll.h b/include/uapi/asm-generic/poll.h
index 41b509f410bf..f9c520ce4bf4 100644
--- a/include/uapi/asm-generic/poll.h
+++ b/include/uapi/asm-generic/poll.h
@@ -29,7 +29,7 @@
#define POLLRDHUP 0x2000
#endif
-#define POLLFREE (__force __poll_t)0x4000 /* currently only for epoll */
+#define POLLFREE (__force __poll_t)0x4000
#define POLL_BUSY_LOOP (__force __poll_t)0x8000
diff --git a/include/uapi/drm/amdgpu_drm.h b/include/uapi/drm/amdgpu_drm.h
index 26e45fc5eb1a..0b94ec7b73e7 100644
--- a/include/uapi/drm/amdgpu_drm.h
+++ b/include/uapi/drm/amdgpu_drm.h
@@ -80,7 +80,7 @@ extern "C" {
*
* %AMDGPU_GEM_DOMAIN_GTT GPU accessible system memory, mapped into the
* GPU's virtual address space via gart. Gart memory linearizes non-contiguous
- * pages of system memory, allows GPU access system memory in a linezrized
+ * pages of system memory, allows GPU access system memory in a linearized
* fashion.
*
* %AMDGPU_GEM_DOMAIN_VRAM Local video memory. For APUs, it is memory
diff --git a/include/uapi/drm/drm.h b/include/uapi/drm/drm.h
index 3b810b53ba8b..642808520d92 100644
--- a/include/uapi/drm/drm.h
+++ b/include/uapi/drm/drm.h
@@ -1096,6 +1096,24 @@ extern "C" {
#define DRM_IOCTL_SYNCOBJ_TRANSFER DRM_IOWR(0xCC, struct drm_syncobj_transfer)
#define DRM_IOCTL_SYNCOBJ_TIMELINE_SIGNAL DRM_IOWR(0xCD, struct drm_syncobj_timeline_array)
+/**
+ * DRM_IOCTL_MODE_GETFB2 - Get framebuffer metadata.
+ *
+ * This queries metadata about a framebuffer. User-space fills
+ * &drm_mode_fb_cmd2.fb_id as the input, and the kernels fills the rest of the
+ * struct as the output.
+ *
+ * If the client is DRM master or has &CAP_SYS_ADMIN, &drm_mode_fb_cmd2.handles
+ * will be filled with GEM buffer handles. Planes are valid until one has a
+ * zero handle -- this can be used to compute the number of planes.
+ *
+ * Otherwise, &drm_mode_fb_cmd2.handles will be zeroed and planes are valid
+ * until one has a zero &drm_mode_fb_cmd2.pitches.
+ *
+ * If the framebuffer has a format modifier, &DRM_MODE_FB_MODIFIERS will be set
+ * in &drm_mode_fb_cmd2.flags and &drm_mode_fb_cmd2.modifier will contain the
+ * modifier. Otherwise, user-space must ignore &drm_mode_fb_cmd2.modifier.
+ */
#define DRM_IOCTL_MODE_GETFB2 DRM_IOWR(0xCE, struct drm_mode_fb_cmd2)
/*
diff --git a/include/uapi/drm/drm_fourcc.h b/include/uapi/drm/drm_fourcc.h
index 7f652c96845b..fc0c1454d275 100644
--- a/include/uapi/drm/drm_fourcc.h
+++ b/include/uapi/drm/drm_fourcc.h
@@ -314,6 +314,13 @@ extern "C" {
*/
#define DRM_FORMAT_P016 fourcc_code('P', '0', '1', '6') /* 2x2 subsampled Cr:Cb plane 16 bits per channel */
+/* 2 plane YCbCr420.
+ * 3 10 bit components and 2 padding bits packed into 4 bytes.
+ * index 0 = Y plane, [31:0] x:Y2:Y1:Y0 2:10:10:10 little endian
+ * index 1 = Cr:Cb plane, [63:0] x:Cr2:Cb2:Cr1:x:Cb1:Cr0:Cb0 [2:10:10:10:2:10:10:10] little endian
+ */
+#define DRM_FORMAT_P030 fourcc_code('P', '0', '3', '0') /* 2x2 subsampled Cr:Cb plane 10 bits per channel packed */
+
/* 3 plane non-subsampled (444) YCbCr
* 16 bits per component, but only 10 bits are used and 6 bits are padded
* index 0: Y plane, [15:0] Y:x [10:6] little endian
@@ -854,6 +861,10 @@ drm_fourcc_canonicalize_nvidia_format_mod(__u64 modifier)
* and UV. Some SAND-using hardware stores UV in a separate tiled
* image from Y to reduce the column height, which is not supported
* with these modifiers.
+ *
+ * The DRM_FORMAT_MOD_BROADCOM_SAND128_COL_HEIGHT modifier is also
+ * supported for DRM_FORMAT_P030 where the columns remain as 128 bytes
+ * wide, but as this is a 10 bpp format that translates to 96 pixels.
*/
#define DRM_FORMAT_MOD_BROADCOM_SAND32_COL_HEIGHT(v) \
diff --git a/include/uapi/drm/virtgpu_drm.h b/include/uapi/drm/virtgpu_drm.h
index a13e20cc66b4..0512fde5e697 100644
--- a/include/uapi/drm/virtgpu_drm.h
+++ b/include/uapi/drm/virtgpu_drm.h
@@ -196,6 +196,13 @@ struct drm_virtgpu_context_init {
__u64 ctx_set_params;
};
+/*
+ * Event code that's given when VIRTGPU_CONTEXT_PARAM_POLL_RINGS_MASK is in
+ * effect. The event size is sizeof(drm_event), since there is no additional
+ * payload.
+ */
+#define VIRTGPU_EVENT_FENCE_SIGNALED 0x90000000
+
#define DRM_IOCTL_VIRTGPU_MAP \
DRM_IOWR(DRM_COMMAND_BASE + DRM_VIRTGPU_MAP, struct drm_virtgpu_map)
diff --git a/include/uapi/drm/vmwgfx_drm.h b/include/uapi/drm/vmwgfx_drm.h
index 9078775feb51..8277644c1144 100644
--- a/include/uapi/drm/vmwgfx_drm.h
+++ b/include/uapi/drm/vmwgfx_drm.h
@@ -110,6 +110,7 @@ extern "C" {
#define DRM_VMW_PARAM_HW_CAPS2 13
#define DRM_VMW_PARAM_SM4_1 14
#define DRM_VMW_PARAM_SM5 15
+#define DRM_VMW_PARAM_GL43 16
/**
* enum drm_vmw_handle_type - handle type for ref ioctls
diff --git a/include/uapi/linux/byteorder/big_endian.h b/include/uapi/linux/byteorder/big_endian.h
index 2199adc6a6c2..80aa5c41a763 100644
--- a/include/uapi/linux/byteorder/big_endian.h
+++ b/include/uapi/linux/byteorder/big_endian.h
@@ -9,6 +9,7 @@
#define __BIG_ENDIAN_BITFIELD
#endif
+#include <linux/stddef.h>
#include <linux/types.h>
#include <linux/swab.h>
diff --git a/include/uapi/linux/byteorder/little_endian.h b/include/uapi/linux/byteorder/little_endian.h
index 601c904fd5cd..cd98982e7523 100644
--- a/include/uapi/linux/byteorder/little_endian.h
+++ b/include/uapi/linux/byteorder/little_endian.h
@@ -9,6 +9,7 @@
#define __LITTLE_ENDIAN_BITFIELD
#endif
+#include <linux/stddef.h>
#include <linux/types.h>
#include <linux/swab.h>
diff --git a/include/uapi/linux/if_ether.h b/include/uapi/linux/if_ether.h
index 5da4ee234e0b..c0c2f3ed5729 100644
--- a/include/uapi/linux/if_ether.h
+++ b/include/uapi/linux/if_ether.h
@@ -117,7 +117,7 @@
#define ETH_P_IFE 0xED3E /* ForCES inter-FE LFB type */
#define ETH_P_AF_IUCV 0xFBFB /* IBM af_iucv [ NOT AN OFFICIALLY REGISTERED ID ] */
-#define ETH_P_802_3_MIN 0x0600 /* If the value in the ethernet type is less than this value
+#define ETH_P_802_3_MIN 0x0600 /* If the value in the ethernet type is more than this value
* then the frame is Ethernet II. Else it is 802.3 */
/*
diff --git a/include/uapi/linux/kfd_sysfs.h b/include/uapi/linux/kfd_sysfs.h
new file mode 100644
index 000000000000..e1fb78b4bf09
--- /dev/null
+++ b/include/uapi/linux/kfd_sysfs.h
@@ -0,0 +1,108 @@
+/* SPDX-License-Identifier: GPL-2.0 OR MIT WITH Linux-syscall-note */
+/*
+ * Copyright 2021 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef KFD_SYSFS_H_INCLUDED
+#define KFD_SYSFS_H_INCLUDED
+
+/* Capability bits in node properties */
+#define HSA_CAP_HOT_PLUGGABLE 0x00000001
+#define HSA_CAP_ATS_PRESENT 0x00000002
+#define HSA_CAP_SHARED_WITH_GRAPHICS 0x00000004
+#define HSA_CAP_QUEUE_SIZE_POW2 0x00000008
+#define HSA_CAP_QUEUE_SIZE_32BIT 0x00000010
+#define HSA_CAP_QUEUE_IDLE_EVENT 0x00000020
+#define HSA_CAP_VA_LIMIT 0x00000040
+#define HSA_CAP_WATCH_POINTS_SUPPORTED 0x00000080
+#define HSA_CAP_WATCH_POINTS_TOTALBITS_MASK 0x00000f00
+#define HSA_CAP_WATCH_POINTS_TOTALBITS_SHIFT 8
+#define HSA_CAP_DOORBELL_TYPE_TOTALBITS_MASK 0x00003000
+#define HSA_CAP_DOORBELL_TYPE_TOTALBITS_SHIFT 12
+
+#define HSA_CAP_DOORBELL_TYPE_PRE_1_0 0x0
+#define HSA_CAP_DOORBELL_TYPE_1_0 0x1
+#define HSA_CAP_DOORBELL_TYPE_2_0 0x2
+#define HSA_CAP_AQL_QUEUE_DOUBLE_MAP 0x00004000
+
+/* Old buggy user mode depends on this being 0 */
+#define HSA_CAP_RESERVED_WAS_SRAM_EDCSUPPORTED 0x00080000
+
+#define HSA_CAP_MEM_EDCSUPPORTED 0x00100000
+#define HSA_CAP_RASEVENTNOTIFY 0x00200000
+#define HSA_CAP_ASIC_REVISION_MASK 0x03c00000
+#define HSA_CAP_ASIC_REVISION_SHIFT 22
+#define HSA_CAP_SRAM_EDCSUPPORTED 0x04000000
+#define HSA_CAP_SVMAPI_SUPPORTED 0x08000000
+#define HSA_CAP_FLAGS_COHERENTHOSTACCESS 0x10000000
+#define HSA_CAP_RESERVED 0xe00f8000
+
+/* Heap types in memory properties */
+#define HSA_MEM_HEAP_TYPE_SYSTEM 0
+#define HSA_MEM_HEAP_TYPE_FB_PUBLIC 1
+#define HSA_MEM_HEAP_TYPE_FB_PRIVATE 2
+#define HSA_MEM_HEAP_TYPE_GPU_GDS 3
+#define HSA_MEM_HEAP_TYPE_GPU_LDS 4
+#define HSA_MEM_HEAP_TYPE_GPU_SCRATCH 5
+
+/* Flag bits in memory properties */
+#define HSA_MEM_FLAGS_HOT_PLUGGABLE 0x00000001
+#define HSA_MEM_FLAGS_NON_VOLATILE 0x00000002
+#define HSA_MEM_FLAGS_RESERVED 0xfffffffc
+
+/* Cache types in cache properties */
+#define HSA_CACHE_TYPE_DATA 0x00000001
+#define HSA_CACHE_TYPE_INSTRUCTION 0x00000002
+#define HSA_CACHE_TYPE_CPU 0x00000004
+#define HSA_CACHE_TYPE_HSACU 0x00000008
+#define HSA_CACHE_TYPE_RESERVED 0xfffffff0
+
+/* Link types in IO link properties (matches CRAT link types) */
+#define HSA_IOLINK_TYPE_UNDEFINED 0
+#define HSA_IOLINK_TYPE_HYPERTRANSPORT 1
+#define HSA_IOLINK_TYPE_PCIEXPRESS 2
+#define HSA_IOLINK_TYPE_AMBA 3
+#define HSA_IOLINK_TYPE_MIPI 4
+#define HSA_IOLINK_TYPE_QPI_1_1 5
+#define HSA_IOLINK_TYPE_RESERVED1 6
+#define HSA_IOLINK_TYPE_RESERVED2 7
+#define HSA_IOLINK_TYPE_RAPID_IO 8
+#define HSA_IOLINK_TYPE_INFINIBAND 9
+#define HSA_IOLINK_TYPE_RESERVED3 10
+#define HSA_IOLINK_TYPE_XGMI 11
+#define HSA_IOLINK_TYPE_XGOP 12
+#define HSA_IOLINK_TYPE_GZ 13
+#define HSA_IOLINK_TYPE_ETHERNET_RDMA 14
+#define HSA_IOLINK_TYPE_RDMA_OTHER 15
+#define HSA_IOLINK_TYPE_OTHER 16
+
+/* Flag bits in IO link properties (matches CRAT flags, excluding the
+ * bi-directional flag, which is not offially part of the CRAT spec, and
+ * only used internally in KFD)
+ */
+#define HSA_IOLINK_FLAGS_ENABLED (1 << 0)
+#define HSA_IOLINK_FLAGS_NON_COHERENT (1 << 1)
+#define HSA_IOLINK_FLAGS_NO_ATOMICS_32_BIT (1 << 2)
+#define HSA_IOLINK_FLAGS_NO_ATOMICS_64_BIT (1 << 3)
+#define HSA_IOLINK_FLAGS_NO_PEER_TO_PEER_DMA (1 << 4)
+#define HSA_IOLINK_FLAGS_RESERVED 0xffffffe0
+
+#endif
diff --git a/include/uapi/linux/mptcp.h b/include/uapi/linux/mptcp.h
index c8cc46f80a16..f106a3941cdf 100644
--- a/include/uapi/linux/mptcp.h
+++ b/include/uapi/linux/mptcp.h
@@ -136,19 +136,21 @@ struct mptcp_info {
* MPTCP_EVENT_REMOVED: token, rem_id
* An address has been lost by the peer.
*
- * MPTCP_EVENT_SUB_ESTABLISHED: token, family, saddr4 | saddr6,
- * daddr4 | daddr6, sport, dport, backup,
- * if_idx [, error]
+ * MPTCP_EVENT_SUB_ESTABLISHED: token, family, loc_id, rem_id,
+ * saddr4 | saddr6, daddr4 | daddr6, sport,
+ * dport, backup, if_idx [, error]
* A new subflow has been established. 'error' should not be set.
*
- * MPTCP_EVENT_SUB_CLOSED: token, family, saddr4 | saddr6, daddr4 | daddr6,
- * sport, dport, backup, if_idx [, error]
+ * MPTCP_EVENT_SUB_CLOSED: token, family, loc_id, rem_id, saddr4 | saddr6,
+ * daddr4 | daddr6, sport, dport, backup, if_idx
+ * [, error]
* A subflow has been closed. An error (copy of sk_err) could be set if an
* error has been detected for this subflow.
*
- * MPTCP_EVENT_SUB_PRIORITY: token, family, saddr4 | saddr6, daddr4 | daddr6,
- * sport, dport, backup, if_idx [, error]
- * The priority of a subflow has changed. 'error' should not be set.
+ * MPTCP_EVENT_SUB_PRIORITY: token, family, loc_id, rem_id, saddr4 | saddr6,
+ * daddr4 | daddr6, sport, dport, backup, if_idx
+ * [, error]
+ * The priority of a subflow has changed. 'error' should not be set.
*/
enum mptcp_event_type {
MPTCP_EVENT_UNSPEC = 0,
diff --git a/include/uapi/linux/nfc.h b/include/uapi/linux/nfc.h
index f6e3c8c9c744..4fa4e979e948 100644
--- a/include/uapi/linux/nfc.h
+++ b/include/uapi/linux/nfc.h
@@ -263,7 +263,7 @@ enum nfc_sdp_attr {
#define NFC_SE_ENABLED 0x1
struct sockaddr_nfc {
- sa_family_t sa_family;
+ __kernel_sa_family_t sa_family;
__u32 dev_idx;
__u32 target_idx;
__u32 nfc_protocol;
@@ -271,14 +271,14 @@ struct sockaddr_nfc {
#define NFC_LLCP_MAX_SERVICE_NAME 63
struct sockaddr_nfc_llcp {
- sa_family_t sa_family;
+ __kernel_sa_family_t sa_family;
__u32 dev_idx;
__u32 target_idx;
__u32 nfc_protocol;
__u8 dsap; /* Destination SAP, if known */
__u8 ssap; /* Source SAP to be bound to */
char service_name[NFC_LLCP_MAX_SERVICE_NAME]; /* Service name URI */;
- size_t service_name_len;
+ __kernel_size_t service_name_len;
};
/* NFC socket protocols */
diff --git a/include/uapi/linux/resource.h b/include/uapi/linux/resource.h
index 74ef57b38f9f..ac5d6a3031db 100644
--- a/include/uapi/linux/resource.h
+++ b/include/uapi/linux/resource.h
@@ -66,10 +66,17 @@ struct rlimit64 {
#define _STK_LIM (8*1024*1024)
/*
- * GPG2 wants 64kB of mlocked memory, to make sure pass phrases
- * and other sensitive information are never written to disk.
+ * Limit the amount of locked memory by some sane default:
+ * root can always increase this limit if needed.
+ *
+ * The main use-cases are (1) preventing sensitive memory
+ * from being swapped; (2) real-time operations; (3) via
+ * IOURING_REGISTER_BUFFERS.
+ *
+ * The first two don't need much. The latter will take as
+ * much as it can get. 8MB is a reasonably sane default.
*/
-#define MLOCK_LIMIT ((PAGE_SIZE > 64*1024) ? PAGE_SIZE : 64*1024)
+#define MLOCK_LIMIT (8*1024*1024)
/*
* Due to binary compatibility, the actual resource numbers
diff --git a/include/xen/events.h b/include/xen/events.h
index c204262d9fc2..344081e71584 100644
--- a/include/xen/events.h
+++ b/include/xen/events.h
@@ -17,6 +17,7 @@ struct xenbus_device;
unsigned xen_evtchn_nr_channels(void);
int bind_evtchn_to_irq(evtchn_port_t evtchn);
+int bind_evtchn_to_irq_lateeoi(evtchn_port_t evtchn);
int bind_evtchn_to_irqhandler(evtchn_port_t evtchn,
irq_handler_t handler,
unsigned long irqflags, const char *devname,
diff --git a/include/xen/xenbus.h b/include/xen/xenbus.h
index b94074c82772..b13eb86395e0 100644
--- a/include/xen/xenbus.h
+++ b/include/xen/xenbus.h
@@ -112,6 +112,7 @@ struct xenbus_driver {
const char *name; /* defaults to ids[0].devicetype */
const struct xenbus_device_id *ids;
bool allow_rebind; /* avoid setting xenstore closed during remove */
+ bool not_essential; /* is not mandatory for boot progress */
int (*probe)(struct xenbus_device *dev,
const struct xenbus_device_id *id);
void (*otherend_changed)(struct xenbus_device *dev,
diff --git a/kernel/audit.c b/kernel/audit.c
index 121d37e700a6..4cebadb5f30d 100644
--- a/kernel/audit.c
+++ b/kernel/audit.c
@@ -718,7 +718,7 @@ static int kauditd_send_queue(struct sock *sk, u32 portid,
{
int rc = 0;
struct sk_buff *skb;
- static unsigned int failed = 0;
+ unsigned int failed = 0;
/* NOTE: kauditd_thread takes care of all our locking, we just use
* the netlink info passed to us (e.g. sk and portid) */
@@ -735,32 +735,30 @@ static int kauditd_send_queue(struct sock *sk, u32 portid,
continue;
}
+retry:
/* grab an extra skb reference in case of error */
skb_get(skb);
rc = netlink_unicast(sk, skb, portid, 0);
if (rc < 0) {
- /* fatal failure for our queue flush attempt? */
+ /* send failed - try a few times unless fatal error */
if (++failed >= retry_limit ||
rc == -ECONNREFUSED || rc == -EPERM) {
- /* yes - error processing for the queue */
sk = NULL;
if (err_hook)
(*err_hook)(skb);
- if (!skb_hook)
- goto out;
- /* keep processing with the skb_hook */
+ if (rc == -EAGAIN)
+ rc = 0;
+ /* continue to drain the queue */
continue;
} else
- /* no - requeue to preserve ordering */
- skb_queue_head(queue, skb);
+ goto retry;
} else {
- /* it worked - drop the extra reference and continue */
+ /* skb sent - drop the extra reference and continue */
consume_skb(skb);
failed = 0;
}
}
-out:
return (rc >= 0 ? 0 : rc);
}
@@ -1609,7 +1607,8 @@ static int __net_init audit_net_init(struct net *net)
audit_panic("cannot initialize netlink socket in namespace");
return -ENOMEM;
}
- aunet->sk->sk_sndtimeo = MAX_SCHEDULE_TIMEOUT;
+ /* limit the timeout in case auditd is blocked/stopped */
+ aunet->sk->sk_sndtimeo = HZ / 10;
return 0;
}
diff --git a/kernel/bpf/btf.c b/kernel/bpf/btf.c
index dbc3ad07e21b..9bdb03767db5 100644
--- a/kernel/bpf/btf.c
+++ b/kernel/bpf/btf.c
@@ -6346,11 +6346,6 @@ BTF_ID_LIST_GLOBAL_SINGLE(btf_task_struct_ids, struct, task_struct)
/* BTF ID set registration API for modules */
-struct kfunc_btf_id_list {
- struct list_head list;
- struct mutex mutex;
-};
-
#ifdef CONFIG_DEBUG_INFO_BTF_MODULES
void register_kfunc_btf_id_set(struct kfunc_btf_id_list *l,
@@ -6376,8 +6371,6 @@ bool bpf_check_mod_kfunc_call(struct kfunc_btf_id_list *klist, u32 kfunc_id,
{
struct kfunc_btf_id_set *s;
- if (!owner)
- return false;
mutex_lock(&klist->mutex);
list_for_each_entry(s, &klist->list, list) {
if (s->owner == owner && btf_id_set_contains(s->set, kfunc_id)) {
@@ -6389,8 +6382,6 @@ bool bpf_check_mod_kfunc_call(struct kfunc_btf_id_list *klist, u32 kfunc_id,
return false;
}
-#endif
-
#define DEFINE_KFUNC_BTF_ID_LIST(name) \
struct kfunc_btf_id_list name = { LIST_HEAD_INIT(name.list), \
__MUTEX_INITIALIZER(name.mutex) }; \
@@ -6398,3 +6389,5 @@ bool bpf_check_mod_kfunc_call(struct kfunc_btf_id_list *klist, u32 kfunc_id,
DEFINE_KFUNC_BTF_ID_LIST(bpf_tcp_ca_kfunc_list);
DEFINE_KFUNC_BTF_ID_LIST(prog_test_kfunc_list);
+
+#endif
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index 50efda51515b..b532f1058d35 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -1366,22 +1366,28 @@ static void __reg_bound_offset(struct bpf_reg_state *reg)
reg->var_off = tnum_or(tnum_clear_subreg(var64_off), var32_off);
}
+static bool __reg32_bound_s64(s32 a)
+{
+ return a >= 0 && a <= S32_MAX;
+}
+
static void __reg_assign_32_into_64(struct bpf_reg_state *reg)
{
reg->umin_value = reg->u32_min_value;
reg->umax_value = reg->u32_max_value;
- /* Attempt to pull 32-bit signed bounds into 64-bit bounds
- * but must be positive otherwise set to worse case bounds
- * and refine later from tnum.
+
+ /* Attempt to pull 32-bit signed bounds into 64-bit bounds but must
+ * be positive otherwise set to worse case bounds and refine later
+ * from tnum.
*/
- if (reg->s32_min_value >= 0 && reg->s32_max_value >= 0)
- reg->smax_value = reg->s32_max_value;
- else
- reg->smax_value = U32_MAX;
- if (reg->s32_min_value >= 0)
+ if (__reg32_bound_s64(reg->s32_min_value) &&
+ __reg32_bound_s64(reg->s32_max_value)) {
reg->smin_value = reg->s32_min_value;
- else
+ reg->smax_value = reg->s32_max_value;
+ } else {
reg->smin_value = 0;
+ reg->smax_value = U32_MAX;
+ }
}
static void __reg_combine_32_into_64(struct bpf_reg_state *reg)
@@ -2379,8 +2385,6 @@ static int backtrack_insn(struct bpf_verifier_env *env, int idx,
*/
if (insn->src_reg != BPF_REG_FP)
return 0;
- if (BPF_SIZE(insn->code) != BPF_DW)
- return 0;
/* dreg = *(u64 *)[fp - off] was a fill from the stack.
* that [fp - off] slot contains scalar that needs to be
@@ -2403,8 +2407,6 @@ static int backtrack_insn(struct bpf_verifier_env *env, int idx,
/* scalars can only be spilled into stack */
if (insn->dst_reg != BPF_REG_FP)
return 0;
- if (BPF_SIZE(insn->code) != BPF_DW)
- return 0;
spi = (-insn->off - 1) / BPF_REG_SIZE;
if (spi >= 64) {
verbose(env, "BUG spi %d\n", spi);
@@ -4551,9 +4553,16 @@ static int check_atomic(struct bpf_verifier_env *env, int insn_idx, struct bpf_i
if (insn->imm == BPF_CMPXCHG) {
/* Check comparison of R0 with memory location */
- err = check_reg_arg(env, BPF_REG_0, SRC_OP);
+ const u32 aux_reg = BPF_REG_0;
+
+ err = check_reg_arg(env, aux_reg, SRC_OP);
if (err)
return err;
+
+ if (is_pointer_value(env, aux_reg)) {
+ verbose(env, "R%d leaks addr into mem\n", aux_reg);
+ return -EACCES;
+ }
}
if (is_pointer_value(env, insn->src_reg)) {
@@ -4588,13 +4597,19 @@ static int check_atomic(struct bpf_verifier_env *env, int insn_idx, struct bpf_i
load_reg = -1;
}
- /* check whether we can read the memory */
+ /* Check whether we can read the memory, with second call for fetch
+ * case to simulate the register fill.
+ */
err = check_mem_access(env, insn_idx, insn->dst_reg, insn->off,
- BPF_SIZE(insn->code), BPF_READ, load_reg, true);
+ BPF_SIZE(insn->code), BPF_READ, -1, true);
+ if (!err && load_reg >= 0)
+ err = check_mem_access(env, insn_idx, insn->dst_reg, insn->off,
+ BPF_SIZE(insn->code), BPF_READ, load_reg,
+ true);
if (err)
return err;
- /* check whether we can write into the same memory */
+ /* Check whether we can write into the same memory. */
err = check_mem_access(env, insn_idx, insn->dst_reg, insn->off,
BPF_SIZE(insn->code), BPF_WRITE, -1, true);
if (err)
@@ -8308,6 +8323,10 @@ static int check_alu_op(struct bpf_verifier_env *env, struct bpf_insn *insn)
insn->dst_reg);
}
zext_32_to_64(dst_reg);
+
+ __update_reg_bounds(dst_reg);
+ __reg_deduce_bounds(dst_reg);
+ __reg_bound_offset(dst_reg);
}
} else {
/* case: R = imm
@@ -8422,7 +8441,7 @@ static void find_good_pkt_pointers(struct bpf_verifier_state *vstate,
new_range = dst_reg->off;
if (range_right_open)
- new_range--;
+ new_range++;
/* Examples for register markings:
*
diff --git a/kernel/cpu.c b/kernel/cpu.c
index 192e43a87407..407a2568f35e 100644
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
@@ -31,6 +31,7 @@
#include <linux/smpboot.h>
#include <linux/relay.h>
#include <linux/slab.h>
+#include <linux/scs.h>
#include <linux/percpu-rwsem.h>
#include <linux/cpuset.h>
@@ -588,6 +589,12 @@ static int bringup_cpu(unsigned int cpu)
int ret;
/*
+ * Reset stale stack state from the last time this CPU was online.
+ */
+ scs_task_reset(idle);
+ kasan_unpoison_task_stack(idle);
+
+ /*
* Some architectures have to walk the irq descriptors to
* setup the vector space for the cpu which comes online.
* Prevent irq alloc/free across the bringup.
diff --git a/kernel/crash_core.c b/kernel/crash_core.c
index eb53f5ec62c9..256cf6db573c 100644
--- a/kernel/crash_core.c
+++ b/kernel/crash_core.c
@@ -6,6 +6,7 @@
#include <linux/buildid.h>
#include <linux/crash_core.h>
+#include <linux/init.h>
#include <linux/utsname.h>
#include <linux/vmalloc.h>
@@ -295,6 +296,16 @@ int __init parse_crashkernel_low(char *cmdline,
"crashkernel=", suffix_tbl[SUFFIX_LOW]);
}
+/*
+ * Add a dummy early_param handler to mark crashkernel= as a known command line
+ * parameter and suppress incorrect warnings in init/main.c.
+ */
+static int __init parse_crashkernel_dummy(char *arg)
+{
+ return 0;
+}
+early_param("crashkernel", parse_crashkernel_dummy);
+
Elf_Word *append_elf_note(Elf_Word *buf, char *name, unsigned int type,
void *data, size_t data_len)
{
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 523106a506ee..30d94f68c5bd 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -9759,6 +9759,9 @@ void perf_tp_event(u16 event_type, u64 count, void *record, int entry_size,
continue;
if (event->attr.config != entry->type)
continue;
+ /* Cannot deliver synchronous signal to other task. */
+ if (event->attr.sigtrap)
+ continue;
if (perf_tp_event_match(event, &data, regs))
perf_swevent_event(event, count, &data, regs);
}
diff --git a/kernel/kprobes.c b/kernel/kprobes.c
index e9db0c810554..21eccc961bba 100644
--- a/kernel/kprobes.c
+++ b/kernel/kprobes.c
@@ -2086,6 +2086,9 @@ int register_kretprobe(struct kretprobe *rp)
}
}
+ if (rp->data_size > KRETPROBE_MAX_DATA_SIZE)
+ return -E2BIG;
+
rp->kp.pre_handler = pre_handler_kretprobe;
rp->kp.post_handler = NULL;
diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c
index 0c6a48dfcecb..1f25a4d7de27 100644
--- a/kernel/locking/rtmutex.c
+++ b/kernel/locking/rtmutex.c
@@ -1380,7 +1380,7 @@ static bool rtmutex_spin_on_owner(struct rt_mutex_base *lock,
* - the VCPU on which owner runs is preempted
*/
if (!owner->on_cpu || need_resched() ||
- rt_mutex_waiter_is_top_waiter(lock, waiter) ||
+ !rt_mutex_waiter_is_top_waiter(lock, waiter) ||
vcpu_is_preempted(task_cpu(owner))) {
res = false;
break;
diff --git a/kernel/locking/rwsem.c b/kernel/locking/rwsem.c
index c51387a43265..04a74d040a6d 100644
--- a/kernel/locking/rwsem.c
+++ b/kernel/locking/rwsem.c
@@ -105,9 +105,9 @@
* atomic_long_cmpxchg() will be used to obtain writer lock.
*
* There are three places where the lock handoff bit may be set or cleared.
- * 1) rwsem_mark_wake() for readers.
- * 2) rwsem_try_write_lock() for writers.
- * 3) Error path of rwsem_down_write_slowpath().
+ * 1) rwsem_mark_wake() for readers -- set, clear
+ * 2) rwsem_try_write_lock() for writers -- set, clear
+ * 3) rwsem_del_waiter() -- clear
*
* For all the above cases, wait_lock will be held. A writer must also
* be the first one in the wait_list to be eligible for setting the handoff
@@ -334,6 +334,9 @@ struct rwsem_waiter {
struct task_struct *task;
enum rwsem_waiter_type type;
unsigned long timeout;
+
+ /* Writer only, not initialized in reader */
+ bool handoff_set;
};
#define rwsem_first_waiter(sem) \
list_first_entry(&sem->wait_list, struct rwsem_waiter, list)
@@ -344,12 +347,6 @@ enum rwsem_wake_type {
RWSEM_WAKE_READ_OWNED /* Waker thread holds the read lock */
};
-enum writer_wait_state {
- WRITER_NOT_FIRST, /* Writer is not first in wait list */
- WRITER_FIRST, /* Writer is first in wait list */
- WRITER_HANDOFF /* Writer is first & handoff needed */
-};
-
/*
* The typical HZ value is either 250 or 1000. So set the minimum waiting
* time to at least 4ms or 1 jiffy (if it is higher than 4ms) in the wait
@@ -365,6 +362,31 @@ enum writer_wait_state {
*/
#define MAX_READERS_WAKEUP 0x100
+static inline void
+rwsem_add_waiter(struct rw_semaphore *sem, struct rwsem_waiter *waiter)
+{
+ lockdep_assert_held(&sem->wait_lock);
+ list_add_tail(&waiter->list, &sem->wait_list);
+ /* caller will set RWSEM_FLAG_WAITERS */
+}
+
+/*
+ * Remove a waiter from the wait_list and clear flags.
+ *
+ * Both rwsem_mark_wake() and rwsem_try_write_lock() contain a full 'copy' of
+ * this function. Modify with care.
+ */
+static inline void
+rwsem_del_waiter(struct rw_semaphore *sem, struct rwsem_waiter *waiter)
+{
+ lockdep_assert_held(&sem->wait_lock);
+ list_del(&waiter->list);
+ if (likely(!list_empty(&sem->wait_list)))
+ return;
+
+ atomic_long_andnot(RWSEM_FLAG_HANDOFF | RWSEM_FLAG_WAITERS, &sem->count);
+}
+
/*
* handle the lock release when processes blocked on it that can now run
* - if we come here from up_xxxx(), then the RWSEM_FLAG_WAITERS bit must
@@ -376,6 +398,8 @@ enum writer_wait_state {
* preferably when the wait_lock is released
* - woken process blocks are discarded from the list after having task zeroed
* - writers are only marked woken if downgrading is false
+ *
+ * Implies rwsem_del_waiter() for all woken readers.
*/
static void rwsem_mark_wake(struct rw_semaphore *sem,
enum rwsem_wake_type wake_type,
@@ -490,18 +514,25 @@ static void rwsem_mark_wake(struct rw_semaphore *sem,
adjustment = woken * RWSEM_READER_BIAS - adjustment;
lockevent_cond_inc(rwsem_wake_reader, woken);
+
+ oldcount = atomic_long_read(&sem->count);
if (list_empty(&sem->wait_list)) {
- /* hit end of list above */
+ /*
+ * Combined with list_move_tail() above, this implies
+ * rwsem_del_waiter().
+ */
adjustment -= RWSEM_FLAG_WAITERS;
+ if (oldcount & RWSEM_FLAG_HANDOFF)
+ adjustment -= RWSEM_FLAG_HANDOFF;
+ } else if (woken) {
+ /*
+ * When we've woken a reader, we no longer need to force
+ * writers to give up the lock and we can clear HANDOFF.
+ */
+ if (oldcount & RWSEM_FLAG_HANDOFF)
+ adjustment -= RWSEM_FLAG_HANDOFF;
}
- /*
- * When we've woken a reader, we no longer need to force writers
- * to give up the lock and we can clear HANDOFF.
- */
- if (woken && (atomic_long_read(&sem->count) & RWSEM_FLAG_HANDOFF))
- adjustment -= RWSEM_FLAG_HANDOFF;
-
if (adjustment)
atomic_long_add(adjustment, &sem->count);
@@ -532,12 +563,12 @@ static void rwsem_mark_wake(struct rw_semaphore *sem,
* race conditions between checking the rwsem wait list and setting the
* sem->count accordingly.
*
- * If wstate is WRITER_HANDOFF, it will make sure that either the handoff
- * bit is set or the lock is acquired with handoff bit cleared.
+ * Implies rwsem_del_waiter() on success.
*/
static inline bool rwsem_try_write_lock(struct rw_semaphore *sem,
- enum writer_wait_state wstate)
+ struct rwsem_waiter *waiter)
{
+ bool first = rwsem_first_waiter(sem) == waiter;
long count, new;
lockdep_assert_held(&sem->wait_lock);
@@ -546,13 +577,19 @@ static inline bool rwsem_try_write_lock(struct rw_semaphore *sem,
do {
bool has_handoff = !!(count & RWSEM_FLAG_HANDOFF);
- if (has_handoff && wstate == WRITER_NOT_FIRST)
- return false;
+ if (has_handoff) {
+ if (!first)
+ return false;
+
+ /* First waiter inherits a previously set handoff bit */
+ waiter->handoff_set = true;
+ }
new = count;
if (count & RWSEM_LOCK_MASK) {
- if (has_handoff || (wstate != WRITER_HANDOFF))
+ if (has_handoff || (!rt_task(waiter->task) &&
+ !time_after(jiffies, waiter->timeout)))
return false;
new |= RWSEM_FLAG_HANDOFF;
@@ -569,9 +606,17 @@ static inline bool rwsem_try_write_lock(struct rw_semaphore *sem,
* We have either acquired the lock with handoff bit cleared or
* set the handoff bit.
*/
- if (new & RWSEM_FLAG_HANDOFF)
+ if (new & RWSEM_FLAG_HANDOFF) {
+ waiter->handoff_set = true;
+ lockevent_inc(rwsem_wlock_handoff);
return false;
+ }
+ /*
+ * Have rwsem_try_write_lock() fully imply rwsem_del_waiter() on
+ * success.
+ */
+ list_del(&waiter->list);
rwsem_set_owner(sem);
return true;
}
@@ -956,7 +1001,7 @@ queue:
}
adjustment += RWSEM_FLAG_WAITERS;
}
- list_add_tail(&waiter.list, &sem->wait_list);
+ rwsem_add_waiter(sem, &waiter);
/* we're now waiting on the lock, but no longer actively locking */
count = atomic_long_add_return(adjustment, &sem->count);
@@ -1002,11 +1047,7 @@ queue:
return sem;
out_nolock:
- list_del(&waiter.list);
- if (list_empty(&sem->wait_list)) {
- atomic_long_andnot(RWSEM_FLAG_WAITERS|RWSEM_FLAG_HANDOFF,
- &sem->count);
- }
+ rwsem_del_waiter(sem, &waiter);
raw_spin_unlock_irq(&sem->wait_lock);
__set_current_state(TASK_RUNNING);
lockevent_inc(rwsem_rlock_fail);
@@ -1020,9 +1061,7 @@ static struct rw_semaphore *
rwsem_down_write_slowpath(struct rw_semaphore *sem, int state)
{
long count;
- enum writer_wait_state wstate;
struct rwsem_waiter waiter;
- struct rw_semaphore *ret = sem;
DEFINE_WAKE_Q(wake_q);
/* do optimistic spinning and steal lock if possible */
@@ -1038,16 +1077,13 @@ rwsem_down_write_slowpath(struct rw_semaphore *sem, int state)
waiter.task = current;
waiter.type = RWSEM_WAITING_FOR_WRITE;
waiter.timeout = jiffies + RWSEM_WAIT_TIMEOUT;
+ waiter.handoff_set = false;
raw_spin_lock_irq(&sem->wait_lock);
-
- /* account for this before adding a new element to the list */
- wstate = list_empty(&sem->wait_list) ? WRITER_FIRST : WRITER_NOT_FIRST;
-
- list_add_tail(&waiter.list, &sem->wait_list);
+ rwsem_add_waiter(sem, &waiter);
/* we're now waiting on the lock */
- if (wstate == WRITER_NOT_FIRST) {
+ if (rwsem_first_waiter(sem) != &waiter) {
count = atomic_long_read(&sem->count);
/*
@@ -1083,13 +1119,16 @@ wait:
/* wait until we successfully acquire the lock */
set_current_state(state);
for (;;) {
- if (rwsem_try_write_lock(sem, wstate)) {
+ if (rwsem_try_write_lock(sem, &waiter)) {
/* rwsem_try_write_lock() implies ACQUIRE on success */
break;
}
raw_spin_unlock_irq(&sem->wait_lock);
+ if (signal_pending_state(state, current))
+ goto out_nolock;
+
/*
* After setting the handoff bit and failing to acquire
* the lock, attempt to spin on owner to accelerate lock
@@ -1098,7 +1137,7 @@ wait:
* In this case, we attempt to acquire the lock again
* without sleeping.
*/
- if (wstate == WRITER_HANDOFF) {
+ if (waiter.handoff_set) {
enum owner_state owner_state;
preempt_disable();
@@ -1109,66 +1148,26 @@ wait:
goto trylock_again;
}
- /* Block until there are no active lockers. */
- for (;;) {
- if (signal_pending_state(state, current))
- goto out_nolock;
-
- schedule();
- lockevent_inc(rwsem_sleep_writer);
- set_current_state(state);
- /*
- * If HANDOFF bit is set, unconditionally do
- * a trylock.
- */
- if (wstate == WRITER_HANDOFF)
- break;
-
- if ((wstate == WRITER_NOT_FIRST) &&
- (rwsem_first_waiter(sem) == &waiter))
- wstate = WRITER_FIRST;
-
- count = atomic_long_read(&sem->count);
- if (!(count & RWSEM_LOCK_MASK))
- break;
-
- /*
- * The setting of the handoff bit is deferred
- * until rwsem_try_write_lock() is called.
- */
- if ((wstate == WRITER_FIRST) && (rt_task(current) ||
- time_after(jiffies, waiter.timeout))) {
- wstate = WRITER_HANDOFF;
- lockevent_inc(rwsem_wlock_handoff);
- break;
- }
- }
+ schedule();
+ lockevent_inc(rwsem_sleep_writer);
+ set_current_state(state);
trylock_again:
raw_spin_lock_irq(&sem->wait_lock);
}
__set_current_state(TASK_RUNNING);
- list_del(&waiter.list);
raw_spin_unlock_irq(&sem->wait_lock);
lockevent_inc(rwsem_wlock);
-
- return ret;
+ return sem;
out_nolock:
__set_current_state(TASK_RUNNING);
raw_spin_lock_irq(&sem->wait_lock);
- list_del(&waiter.list);
-
- if (unlikely(wstate == WRITER_HANDOFF))
- atomic_long_add(-RWSEM_FLAG_HANDOFF, &sem->count);
-
- if (list_empty(&sem->wait_list))
- atomic_long_andnot(RWSEM_FLAG_WAITERS, &sem->count);
- else
+ rwsem_del_waiter(sem, &waiter);
+ if (!list_empty(&sem->wait_list))
rwsem_mark_wake(sem, RWSEM_WAKE_ANY, &wake_q);
raw_spin_unlock_irq(&sem->wait_lock);
wake_up_q(&wake_q);
lockevent_inc(rwsem_wlock_fail);
-
return ERR_PTR(-EINTR);
}
@@ -1249,17 +1248,14 @@ static inline int __down_read_trylock(struct rw_semaphore *sem)
DEBUG_RWSEMS_WARN_ON(sem->magic != sem, sem);
- /*
- * Optimize for the case when the rwsem is not locked at all.
- */
- tmp = RWSEM_UNLOCKED_VALUE;
- do {
+ tmp = atomic_long_read(&sem->count);
+ while (!(tmp & RWSEM_READ_FAILED_MASK)) {
if (atomic_long_try_cmpxchg_acquire(&sem->count, &tmp,
- tmp + RWSEM_READER_BIAS)) {
+ tmp + RWSEM_READER_BIAS)) {
rwsem_set_reader_owned(sem);
return 1;
}
- } while (!(tmp & RWSEM_READ_FAILED_MASK));
+ }
return 0;
}
diff --git a/kernel/power/hibernate.c b/kernel/power/hibernate.c
index 9ed9b744876c..e6af502c2fd7 100644
--- a/kernel/power/hibernate.c
+++ b/kernel/power/hibernate.c
@@ -693,7 +693,7 @@ static int load_image_and_restore(void)
goto Unlock;
error = swsusp_read(&flags);
- swsusp_close(FMODE_READ);
+ swsusp_close(FMODE_READ | FMODE_EXCL);
if (!error)
error = hibernation_restore(flags & SF_PLATFORM_MODE);
@@ -983,7 +983,7 @@ static int software_resume(void)
/* The snapshot device should not be opened while we're running */
if (!hibernate_acquire()) {
error = -EBUSY;
- swsusp_close(FMODE_READ);
+ swsusp_close(FMODE_READ | FMODE_EXCL);
goto Unlock;
}
@@ -1018,7 +1018,7 @@ static int software_resume(void)
pm_pr_dbg("Hibernation image not present or could not be loaded.\n");
return error;
Close_Finish:
- swsusp_close(FMODE_READ);
+ swsusp_close(FMODE_READ | FMODE_EXCL);
goto Finish;
}
diff --git a/kernel/power/user.c b/kernel/power/user.c
index 740723bb3885..ad241b4ff64c 100644
--- a/kernel/power/user.c
+++ b/kernel/power/user.c
@@ -177,7 +177,7 @@ static ssize_t snapshot_write(struct file *filp, const char __user *buf,
if (res <= 0)
goto unlock;
} else {
- res = PAGE_SIZE - pg_offp;
+ res = PAGE_SIZE;
}
if (!data_of(data->handle)) {
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 3c9b0fda64ac..77563109c0ea 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -1918,7 +1918,7 @@ static void __init init_uclamp_rq(struct rq *rq)
};
}
- rq->uclamp_flags = 0;
+ rq->uclamp_flags = UCLAMP_FLAG_IDLE;
}
static void __init init_uclamp(void)
@@ -6617,11 +6617,11 @@ static int __init setup_preempt_mode(char *str)
int mode = sched_dynamic_mode(str);
if (mode < 0) {
pr_warn("Dynamic Preempt: unsupported mode: %s\n", str);
- return 1;
+ return 0;
}
sched_dynamic_update(mode);
- return 0;
+ return 1;
}
__setup("preempt=", setup_preempt_mode);
@@ -8619,9 +8619,6 @@ void __init init_idle(struct task_struct *idle, int cpu)
idle->flags |= PF_IDLE | PF_KTHREAD | PF_NO_SETAFFINITY;
kthread_set_per_cpu(idle, cpu);
- scs_task_reset(idle);
- kasan_unpoison_task_stack(idle);
-
#ifdef CONFIG_SMP
/*
* It's possible that init_idle() gets called multiple times on a task,
@@ -8777,7 +8774,6 @@ void idle_task_exit(void)
finish_arch_post_lock_switch();
}
- scs_task_reset(current);
/* finish_cpu(), as ran on the BP, will clean up the active_mm state */
}
diff --git a/kernel/sched/cputime.c b/kernel/sched/cputime.c
index 872e481d5098..9392aea1804e 100644
--- a/kernel/sched/cputime.c
+++ b/kernel/sched/cputime.c
@@ -615,7 +615,8 @@ void task_cputime_adjusted(struct task_struct *p, u64 *ut, u64 *st)
.sum_exec_runtime = p->se.sum_exec_runtime,
};
- task_cputime(p, &cputime.utime, &cputime.stime);
+ if (task_cputime(p, &cputime.utime, &cputime.stime))
+ cputime.sum_exec_runtime = task_sched_runtime(p);
cputime_adjust(&cputime, &p->prev_cputime, ut, st);
}
EXPORT_SYMBOL_GPL(task_cputime_adjusted);
@@ -828,19 +829,21 @@ u64 task_gtime(struct task_struct *t)
* add up the pending nohz execution time since the last
* cputime snapshot.
*/
-void task_cputime(struct task_struct *t, u64 *utime, u64 *stime)
+bool task_cputime(struct task_struct *t, u64 *utime, u64 *stime)
{
struct vtime *vtime = &t->vtime;
unsigned int seq;
u64 delta;
+ int ret;
if (!vtime_accounting_enabled()) {
*utime = t->utime;
*stime = t->stime;
- return;
+ return false;
}
do {
+ ret = false;
seq = read_seqcount_begin(&vtime->seqcount);
*utime = t->utime;
@@ -850,6 +853,7 @@ void task_cputime(struct task_struct *t, u64 *utime, u64 *stime)
if (vtime->state < VTIME_SYS)
continue;
+ ret = true;
delta = vtime_delta(vtime);
/*
@@ -861,6 +865,8 @@ void task_cputime(struct task_struct *t, u64 *utime, u64 *stime)
else
*utime += vtime->utime + delta;
} while (read_seqcount_retry(&vtime->seqcount, seq));
+
+ return ret;
}
static int vtime_state_fetch(struct vtime *vtime, int cpu)
diff --git a/kernel/sched/wait.c b/kernel/sched/wait.c
index 76577d1642a5..eca38107b32f 100644
--- a/kernel/sched/wait.c
+++ b/kernel/sched/wait.c
@@ -238,6 +238,13 @@ void __wake_up_sync(struct wait_queue_head *wq_head, unsigned int mode)
}
EXPORT_SYMBOL_GPL(__wake_up_sync); /* For internal use only */
+void __wake_up_pollfree(struct wait_queue_head *wq_head)
+{
+ __wake_up(wq_head, TASK_NORMAL, 0, poll_to_key(EPOLLHUP | POLLFREE));
+ /* POLLFREE must have cleared the queue. */
+ WARN_ON_ONCE(waitqueue_active(wq_head));
+}
+
/*
* Note: we use "set_current_state()" _after_ the wait-queue add,
* because we need a memory barrier there on SMP, so that any
diff --git a/kernel/signal.c b/kernel/signal.c
index a629b11bf3e0..dfcee3888b00 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -4185,6 +4185,15 @@ do_sigaltstack (const stack_t *ss, stack_t *oss, unsigned long sp,
ss_mode != 0))
return -EINVAL;
+ /*
+ * Return before taking any locks if no actual
+ * sigaltstack changes were requested.
+ */
+ if (t->sas_ss_sp == (unsigned long)ss_sp &&
+ t->sas_ss_size == ss_size &&
+ t->sas_ss_flags == ss_flags)
+ return 0;
+
sigaltstack_lock();
if (ss_mode == SS_DISABLE) {
ss_size = 0;
diff --git a/kernel/softirq.c b/kernel/softirq.c
index 322b65d45676..41f470929e99 100644
--- a/kernel/softirq.c
+++ b/kernel/softirq.c
@@ -595,7 +595,8 @@ void irq_enter_rcu(void)
{
__irq_enter_raw();
- if (is_idle_task(current) && (irq_count() == HARDIRQ_OFFSET))
+ if (tick_nohz_full_cpu(smp_processor_id()) ||
+ (is_idle_task(current) && (irq_count() == HARDIRQ_OFFSET)))
tick_irq_enter();
account_hardirq_enter(current);
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
index 6bffe5af8cb1..17a283ce2b20 100644
--- a/kernel/time/tick-sched.c
+++ b/kernel/time/tick-sched.c
@@ -1375,6 +1375,13 @@ static inline void tick_nohz_irq_enter(void)
now = ktime_get();
if (ts->idle_active)
tick_nohz_stop_idle(ts, now);
+ /*
+ * If all CPUs are idle. We may need to update a stale jiffies value.
+ * Note nohz_full is a special case: a timekeeper is guaranteed to stay
+ * alive but it might be busy looping with interrupts disabled in some
+ * rare case (typically stop machine). So we must make sure we have a
+ * last resort.
+ */
if (ts->tick_stopped)
tick_nohz_update_jiffies(now);
}
diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
index b348749a9fc6..dcdcb85121e4 100644
--- a/kernel/time/timekeeping.c
+++ b/kernel/time/timekeeping.c
@@ -1306,8 +1306,7 @@ int do_settimeofday64(const struct timespec64 *ts)
timekeeping_forward_now(tk);
xt = tk_xtime(tk);
- ts_delta.tv_sec = ts->tv_sec - xt.tv_sec;
- ts_delta.tv_nsec = ts->tv_nsec - xt.tv_nsec;
+ ts_delta = timespec64_sub(*ts, xt);
if (timespec64_compare(&tk->wall_to_monotonic, &ts_delta) > 0) {
ret = -EINVAL;
diff --git a/kernel/time/timer.c b/kernel/time/timer.c
index e3d2c23c413d..85f1021ad459 100644
--- a/kernel/time/timer.c
+++ b/kernel/time/timer.c
@@ -2054,26 +2054,28 @@ unsigned long msleep_interruptible(unsigned int msecs)
EXPORT_SYMBOL(msleep_interruptible);
/**
- * usleep_range - Sleep for an approximate time
- * @min: Minimum time in usecs to sleep
- * @max: Maximum time in usecs to sleep
+ * usleep_range_state - Sleep for an approximate time in a given state
+ * @min: Minimum time in usecs to sleep
+ * @max: Maximum time in usecs to sleep
+ * @state: State of the current task that will be while sleeping
*
* In non-atomic context where the exact wakeup time is flexible, use
- * usleep_range() instead of udelay(). The sleep improves responsiveness
+ * usleep_range_state() instead of udelay(). The sleep improves responsiveness
* by avoiding the CPU-hogging busy-wait of udelay(), and the range reduces
* power usage by allowing hrtimers to take advantage of an already-
* scheduled interrupt instead of scheduling a new one just for this sleep.
*/
-void __sched usleep_range(unsigned long min, unsigned long max)
+void __sched usleep_range_state(unsigned long min, unsigned long max,
+ unsigned int state)
{
ktime_t exp = ktime_add_us(ktime_get(), min);
u64 delta = (u64)(max - min) * NSEC_PER_USEC;
for (;;) {
- __set_current_state(TASK_UNINTERRUPTIBLE);
+ __set_current_state(state);
/* Do not return before the requested sleep time has elapsed */
if (!schedule_hrtimeout_range(&exp, delta, HRTIMER_MODE_ABS))
break;
}
}
-EXPORT_SYMBOL(usleep_range);
+EXPORT_SYMBOL(usleep_range_state);
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index 30bc880c3849..be5f6b32a012 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -5217,6 +5217,7 @@ int unregister_ftrace_direct(unsigned long ip, unsigned long addr)
{
struct ftrace_direct_func *direct;
struct ftrace_func_entry *entry;
+ struct ftrace_hash *hash;
int ret = -ENODEV;
mutex_lock(&direct_mutex);
@@ -5225,7 +5226,8 @@ int unregister_ftrace_direct(unsigned long ip, unsigned long addr)
if (!entry)
goto out_unlock;
- if (direct_functions->count == 1)
+ hash = direct_ops.func_hash->filter_hash;
+ if (hash->count == 1)
unregister_ftrace_function(&direct_ops);
ret = ftrace_set_filter_ip(&direct_ops, ip, 1, 0);
@@ -5540,6 +5542,10 @@ int unregister_ftrace_direct_multi(struct ftrace_ops *ops, unsigned long addr)
err = unregister_ftrace_function(ops);
remove_direct_functions_hash(hash, addr);
mutex_unlock(&direct_mutex);
+
+ /* cleanup for possible another register call */
+ ops->func = NULL;
+ ops->trampoline = 0;
return err;
}
EXPORT_SYMBOL_GPL(unregister_ftrace_direct_multi);
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
index 6b60ab9475ed..38715aa6cfdf 100644
--- a/kernel/trace/trace.h
+++ b/kernel/trace/trace.h
@@ -1366,14 +1366,26 @@ __event_trigger_test_discard(struct trace_event_file *file,
if (eflags & EVENT_FILE_FL_TRIGGER_COND)
*tt = event_triggers_call(file, buffer, entry, event);
- if (test_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &file->flags) ||
- (unlikely(file->flags & EVENT_FILE_FL_FILTERED) &&
- !filter_match_preds(file->filter, entry))) {
- __trace_event_discard_commit(buffer, event);
- return true;
- }
+ if (likely(!(file->flags & (EVENT_FILE_FL_SOFT_DISABLED |
+ EVENT_FILE_FL_FILTERED |
+ EVENT_FILE_FL_PID_FILTER))))
+ return false;
+
+ if (file->flags & EVENT_FILE_FL_SOFT_DISABLED)
+ goto discard;
+
+ if (file->flags & EVENT_FILE_FL_FILTERED &&
+ !filter_match_preds(file->filter, entry))
+ goto discard;
+
+ if ((file->flags & EVENT_FILE_FL_PID_FILTER) &&
+ trace_event_ignore_this_pid(file))
+ goto discard;
return false;
+ discard:
+ __trace_event_discard_commit(buffer, event);
+ return true;
}
/**
diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
index 4021b9a79f93..92be9cb1d7d4 100644
--- a/kernel/trace/trace_events.c
+++ b/kernel/trace/trace_events.c
@@ -2678,12 +2678,24 @@ static struct trace_event_file *
trace_create_new_event(struct trace_event_call *call,
struct trace_array *tr)
{
+ struct trace_pid_list *no_pid_list;
+ struct trace_pid_list *pid_list;
struct trace_event_file *file;
+ unsigned int first;
file = kmem_cache_alloc(file_cachep, GFP_TRACE);
if (!file)
return NULL;
+ pid_list = rcu_dereference_protected(tr->filtered_pids,
+ lockdep_is_held(&event_mutex));
+ no_pid_list = rcu_dereference_protected(tr->filtered_no_pids,
+ lockdep_is_held(&event_mutex));
+
+ if (!trace_pid_list_first(pid_list, &first) ||
+ !trace_pid_list_first(no_pid_list, &first))
+ file->flags |= EVENT_FILE_FL_PID_FILTER;
+
file->event_call = call;
file->tr = tr;
atomic_set(&file->sm_ref, 0);
diff --git a/kernel/trace/trace_events_hist.c b/kernel/trace/trace_events_hist.c
index 9555b8e1d1e3..319f9c8ca7e7 100644
--- a/kernel/trace/trace_events_hist.c
+++ b/kernel/trace/trace_events_hist.c
@@ -3757,7 +3757,7 @@ static int check_synth_field(struct synth_event *event,
if (strcmp(field->type, hist_field->type) != 0) {
if (field->size != hist_field->size ||
- field->is_signed != hist_field->is_signed)
+ (!field->is_string && field->is_signed != hist_field->is_signed))
return -EINVAL;
}
diff --git a/kernel/trace/trace_events_synth.c b/kernel/trace/trace_events_synth.c
index 22db3ce95e74..ca9c13b2ecf4 100644
--- a/kernel/trace/trace_events_synth.c
+++ b/kernel/trace/trace_events_synth.c
@@ -1237,9 +1237,8 @@ static int __create_synth_event(const char *name, const char *raw_fields)
argv + consumed, &consumed,
&field_version);
if (IS_ERR(field)) {
- argv_free(argv);
ret = PTR_ERR(field);
- goto err;
+ goto err_free_arg;
}
/*
@@ -1262,18 +1261,19 @@ static int __create_synth_event(const char *name, const char *raw_fields)
if (cmd_version > 1 && n_fields_this_loop >= 1) {
synth_err(SYNTH_ERR_INVALID_CMD, errpos(field_str));
ret = -EINVAL;
- goto err;
+ goto err_free_arg;
}
fields[n_fields++] = field;
if (n_fields == SYNTH_FIELDS_MAX) {
synth_err(SYNTH_ERR_TOO_MANY_FIELDS, 0);
ret = -EINVAL;
- goto err;
+ goto err_free_arg;
}
n_fields_this_loop++;
}
+ argv_free(argv);
if (consumed < argc) {
synth_err(SYNTH_ERR_INVALID_CMD, 0);
@@ -1281,7 +1281,6 @@ static int __create_synth_event(const char *name, const char *raw_fields)
goto err;
}
- argv_free(argv);
}
if (n_fields == 0) {
@@ -1307,6 +1306,8 @@ static int __create_synth_event(const char *name, const char *raw_fields)
kfree(saved_fields);
return ret;
+ err_free_arg:
+ argv_free(argv);
err:
for (i = 0; i < n_fields; i++)
free_synth_field(fields[i]);
diff --git a/kernel/trace/trace_uprobe.c b/kernel/trace/trace_uprobe.c
index 0a5c0db3137e..f5f0039d31e5 100644
--- a/kernel/trace/trace_uprobe.c
+++ b/kernel/trace/trace_uprobe.c
@@ -1313,6 +1313,7 @@ static int uprobe_perf_open(struct trace_event_call *call,
return 0;
list_for_each_entry(pos, trace_probe_probe_list(tp), list) {
+ tu = container_of(pos, struct trace_uprobe, tp);
err = uprobe_apply(tu->inode, tu->offset, &tu->consumer, true);
if (err) {
uprobe_perf_close(call, event);
diff --git a/kernel/trace/tracing_map.c b/kernel/trace/tracing_map.c
index 39bb56d2dcbe..9628b5571846 100644
--- a/kernel/trace/tracing_map.c
+++ b/kernel/trace/tracing_map.c
@@ -15,6 +15,7 @@
#include <linux/jhash.h>
#include <linux/slab.h>
#include <linux/sort.h>
+#include <linux/kmemleak.h>
#include "tracing_map.h"
#include "trace.h"
@@ -307,6 +308,7 @@ static void tracing_map_array_free(struct tracing_map_array *a)
for (i = 0; i < a->n_pages; i++) {
if (!a->pages[i])
break;
+ kmemleak_free(a->pages[i]);
free_page((unsigned long)a->pages[i]);
}
@@ -342,6 +344,7 @@ static struct tracing_map_array *tracing_map_array_alloc(unsigned int n_elts,
a->pages[i] = (void *)get_zeroed_page(GFP_KERNEL);
if (!a->pages[i])
goto free;
+ kmemleak_alloc(a->pages[i], PAGE_SIZE, 1, GFP_KERNEL);
}
out:
return a;
diff --git a/kernel/ucount.c b/kernel/ucount.c
index 4f5613dac227..7b32c356ebc5 100644
--- a/kernel/ucount.c
+++ b/kernel/ucount.c
@@ -264,15 +264,16 @@ void dec_ucount(struct ucounts *ucounts, enum ucount_type type)
long inc_rlimit_ucounts(struct ucounts *ucounts, enum ucount_type type, long v)
{
struct ucounts *iter;
+ long max = LONG_MAX;
long ret = 0;
for (iter = ucounts; iter; iter = iter->ns->ucounts) {
- long max = READ_ONCE(iter->ns->ucount_max[type]);
long new = atomic_long_add_return(v, &iter->ucount[type]);
if (new < 0 || new > max)
ret = LONG_MAX;
else if (iter == ucounts)
ret = new;
+ max = READ_ONCE(iter->ns->ucount_max[type]);
}
return ret;
}
@@ -312,15 +313,16 @@ long inc_rlimit_get_ucounts(struct ucounts *ucounts, enum ucount_type type)
{
/* Caller must hold a reference to ucounts */
struct ucounts *iter;
+ long max = LONG_MAX;
long dec, ret = 0;
for (iter = ucounts; iter; iter = iter->ns->ucounts) {
- long max = READ_ONCE(iter->ns->ucount_max[type]);
long new = atomic_long_add_return(1, &iter->ucount[type]);
if (new < 0 || new > max)
goto unwind;
if (iter == ucounts)
ret = new;
+ max = READ_ONCE(iter->ns->ucount_max[type]);
/*
* Grab an extra ucount reference for the caller when
* the rlimit count was previously 0.
@@ -339,15 +341,16 @@ unwind:
return 0;
}
-bool is_ucounts_overlimit(struct ucounts *ucounts, enum ucount_type type, unsigned long max)
+bool is_ucounts_overlimit(struct ucounts *ucounts, enum ucount_type type, unsigned long rlimit)
{
struct ucounts *iter;
- if (get_ucounts_value(ucounts, type) > max)
- return true;
+ long max = rlimit;
+ if (rlimit > LONG_MAX)
+ max = LONG_MAX;
for (iter = ucounts; iter; iter = iter->ns->ucounts) {
- max = READ_ONCE(iter->ns->ucount_max[type]);
if (get_ucounts_value(iter, type) > max)
return true;
+ max = READ_ONCE(iter->ns->ucount_max[type]);
}
return false;
}
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 9ef7ce18b4f5..5e14e32056ad 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -316,6 +316,7 @@ config DEBUG_INFO_BTF
bool "Generate BTF typeinfo"
depends on !DEBUG_INFO_SPLIT && !DEBUG_INFO_REDUCED
depends on !GCC_PLUGIN_RANDSTRUCT || COMPILE_TEST
+ depends on BPF_SYSCALL
help
Generate deduplicated BTF type information from DWARF debug info.
Turning this on expects presence of pahole tool, which will convert
@@ -346,8 +347,9 @@ config FRAME_WARN
int "Warn for stack frames larger than"
range 0 8192
default 2048 if GCC_PLUGIN_LATENT_ENTROPY
- default 1536 if (!64BIT && (PARISC || XTENSA))
- default 1024 if (!64BIT && !PARISC)
+ default 2048 if PARISC
+ default 1536 if (!64BIT && XTENSA)
+ default 1024 if !64BIT
default 2048 if 64BIT
help
Tell gcc to warn at build time for stack frames larger than this.
diff --git a/lib/siphash.c b/lib/siphash.c
index a90112ee72a1..72b9068ab57b 100644
--- a/lib/siphash.c
+++ b/lib/siphash.c
@@ -49,6 +49,7 @@
SIPROUND; \
return (v0 ^ v1) ^ (v2 ^ v3);
+#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
u64 __siphash_aligned(const void *data, size_t len, const siphash_key_t *key)
{
const u8 *end = data + len - (len % sizeof(u64));
@@ -80,8 +81,8 @@ u64 __siphash_aligned(const void *data, size_t len, const siphash_key_t *key)
POSTAMBLE
}
EXPORT_SYMBOL(__siphash_aligned);
+#endif
-#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
u64 __siphash_unaligned(const void *data, size_t len, const siphash_key_t *key)
{
const u8 *end = data + len - (len % sizeof(u64));
@@ -113,7 +114,6 @@ u64 __siphash_unaligned(const void *data, size_t len, const siphash_key_t *key)
POSTAMBLE
}
EXPORT_SYMBOL(__siphash_unaligned);
-#endif
/**
* siphash_1u64 - compute 64-bit siphash PRF value of a u64
@@ -250,6 +250,7 @@ EXPORT_SYMBOL(siphash_3u32);
HSIPROUND; \
return (v0 ^ v1) ^ (v2 ^ v3);
+#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
u32 __hsiphash_aligned(const void *data, size_t len, const hsiphash_key_t *key)
{
const u8 *end = data + len - (len % sizeof(u64));
@@ -280,8 +281,8 @@ u32 __hsiphash_aligned(const void *data, size_t len, const hsiphash_key_t *key)
HPOSTAMBLE
}
EXPORT_SYMBOL(__hsiphash_aligned);
+#endif
-#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
u32 __hsiphash_unaligned(const void *data, size_t len,
const hsiphash_key_t *key)
{
@@ -313,7 +314,6 @@ u32 __hsiphash_unaligned(const void *data, size_t len,
HPOSTAMBLE
}
EXPORT_SYMBOL(__hsiphash_unaligned);
-#endif
/**
* hsiphash_1u32 - compute 64-bit hsiphash PRF value of a u32
@@ -418,6 +418,7 @@ EXPORT_SYMBOL(hsiphash_4u32);
HSIPROUND; \
return v1 ^ v3;
+#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
u32 __hsiphash_aligned(const void *data, size_t len, const hsiphash_key_t *key)
{
const u8 *end = data + len - (len % sizeof(u32));
@@ -438,8 +439,8 @@ u32 __hsiphash_aligned(const void *data, size_t len, const hsiphash_key_t *key)
HPOSTAMBLE
}
EXPORT_SYMBOL(__hsiphash_aligned);
+#endif
-#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
u32 __hsiphash_unaligned(const void *data, size_t len,
const hsiphash_key_t *key)
{
@@ -461,7 +462,6 @@ u32 __hsiphash_unaligned(const void *data, size_t len,
HPOSTAMBLE
}
EXPORT_SYMBOL(__hsiphash_unaligned);
-#endif
/**
* hsiphash_1u32 - compute 32-bit hsiphash PRF value of a u32
diff --git a/mm/Kconfig b/mm/Kconfig
index 28edafc820ad..356f4f2c779e 100644
--- a/mm/Kconfig
+++ b/mm/Kconfig
@@ -428,7 +428,7 @@ config THP_SWAP
# UP and nommu archs use km based percpu allocator
#
config NEED_PER_CPU_KM
- depends on !SMP
+ depends on !SMP || !MMU
bool
default y
diff --git a/mm/backing-dev.c b/mm/backing-dev.c
index 1eead4761011..eae96dfe0261 100644
--- a/mm/backing-dev.c
+++ b/mm/backing-dev.c
@@ -945,6 +945,13 @@ void bdi_unregister(struct backing_dev_info *bdi)
wb_shutdown(&bdi->wb);
cgwb_bdi_unregister(bdi);
+ /*
+ * If this BDI's min ratio has been set, use bdi_set_min_ratio() to
+ * update the global bdi_min_ratio.
+ */
+ if (bdi->min_ratio)
+ bdi_set_min_ratio(bdi, 0);
+
if (bdi->dev) {
bdi_debug_unregister(bdi);
device_unregister(bdi->dev);
diff --git a/mm/damon/core.c b/mm/damon/core.c
index c381b3c525d0..e92497895202 100644
--- a/mm/damon/core.c
+++ b/mm/damon/core.c
@@ -282,7 +282,6 @@ int damon_set_targets(struct damon_ctx *ctx,
for (i = 0; i < nr_ids; i++) {
t = damon_new_target(ids[i]);
if (!t) {
- pr_err("Failed to alloc damon_target\n");
/* The caller should do cleanup of the ids itself */
damon_for_each_target_safe(t, next, ctx)
damon_destroy_target(t);
@@ -312,16 +311,10 @@ int damon_set_attrs(struct damon_ctx *ctx, unsigned long sample_int,
unsigned long aggr_int, unsigned long primitive_upd_int,
unsigned long min_nr_reg, unsigned long max_nr_reg)
{
- if (min_nr_reg < 3) {
- pr_err("min_nr_regions (%lu) must be at least 3\n",
- min_nr_reg);
+ if (min_nr_reg < 3)
return -EINVAL;
- }
- if (min_nr_reg > max_nr_reg) {
- pr_err("invalid nr_regions. min (%lu) > max (%lu)\n",
- min_nr_reg, max_nr_reg);
+ if (min_nr_reg > max_nr_reg)
return -EINVAL;
- }
ctx->sample_interval = sample_int;
ctx->aggr_interval = aggr_int;
@@ -980,10 +973,11 @@ static unsigned long damos_wmark_wait_us(struct damos *scheme)
static void kdamond_usleep(unsigned long usecs)
{
- if (usecs > 100 * 1000)
- schedule_timeout_interruptible(usecs_to_jiffies(usecs));
+ /* See Documentation/timers/timers-howto.rst for the thresholds */
+ if (usecs > 20 * USEC_PER_MSEC)
+ schedule_timeout_idle(usecs_to_jiffies(usecs));
else
- usleep_range(usecs, usecs + 1);
+ usleep_idle_range(usecs, usecs + 1);
}
/* Returns negative error code if it's not activated but should return */
@@ -1038,7 +1032,7 @@ static int kdamond_fn(void *data)
ctx->callback.after_sampling(ctx))
done = true;
- usleep_range(ctx->sample_interval, ctx->sample_interval + 1);
+ kdamond_usleep(ctx->sample_interval);
if (ctx->primitive.check_accesses)
max_nr_accesses = ctx->primitive.check_accesses(ctx);
diff --git a/mm/damon/dbgfs.c b/mm/damon/dbgfs.c
index 9b520bb4a3e7..ad65436756af 100644
--- a/mm/damon/dbgfs.c
+++ b/mm/damon/dbgfs.c
@@ -210,10 +210,8 @@ static struct damos **str_to_schemes(const char *str, ssize_t len,
&wmarks.low, &parsed);
if (ret != 18)
break;
- if (!damos_action_valid(action)) {
- pr_err("wrong action %d\n", action);
+ if (!damos_action_valid(action))
goto fail;
- }
pos += parsed;
scheme = damon_new_scheme(min_sz, max_sz, min_nr_a, max_nr_a,
@@ -355,6 +353,7 @@ static ssize_t dbgfs_target_ids_write(struct file *file,
const char __user *buf, size_t count, loff_t *ppos)
{
struct damon_ctx *ctx = file->private_data;
+ struct damon_target *t, *next_t;
bool id_is_pid = true;
char *kbuf, *nrs;
unsigned long *targets;
@@ -399,8 +398,12 @@ static ssize_t dbgfs_target_ids_write(struct file *file,
goto unlock_out;
}
- /* remove targets with previously-set primitive */
- damon_set_targets(ctx, NULL, 0);
+ /* remove previously set targets */
+ damon_for_each_target_safe(t, next_t, ctx) {
+ if (targetid_is_pid(ctx))
+ put_pid((struct pid *)t->id);
+ damon_destroy_target(t);
+ }
/* Configure the context for the address space type */
if (id_is_pid)
@@ -652,10 +655,12 @@ static void dbgfs_before_terminate(struct damon_ctx *ctx)
if (!targetid_is_pid(ctx))
return;
+ mutex_lock(&ctx->kdamond_lock);
damon_for_each_target_safe(t, next, ctx) {
put_pid((struct pid *)t->id);
damon_destroy_target(t);
}
+ mutex_unlock(&ctx->kdamond_lock);
}
static struct damon_ctx *dbgfs_new_ctx(void)
diff --git a/mm/damon/vaddr-test.h b/mm/damon/vaddr-test.h
index ecfd0b2ed222..6a1b9272ea12 100644
--- a/mm/damon/vaddr-test.h
+++ b/mm/damon/vaddr-test.h
@@ -135,7 +135,6 @@ static void damon_do_test_apply_three_regions(struct kunit *test,
struct damon_addr_range *three_regions,
unsigned long *expected, int nr_expected)
{
- struct damon_ctx *ctx = damon_new_ctx();
struct damon_target *t;
struct damon_region *r;
int i;
@@ -145,7 +144,6 @@ static void damon_do_test_apply_three_regions(struct kunit *test,
r = damon_new_region(regions[i * 2], regions[i * 2 + 1]);
damon_add_region(r, t);
}
- damon_add_target(ctx, t);
damon_va_apply_three_regions(t, three_regions);
@@ -154,8 +152,6 @@ static void damon_do_test_apply_three_regions(struct kunit *test,
KUNIT_EXPECT_EQ(test, r->ar.start, expected[i * 2]);
KUNIT_EXPECT_EQ(test, r->ar.end, expected[i * 2 + 1]);
}
-
- damon_destroy_ctx(ctx);
}
/*
@@ -252,60 +248,59 @@ static void damon_test_apply_three_regions4(struct kunit *test)
new_three_regions, expected, ARRAY_SIZE(expected));
}
-static void damon_test_split_evenly(struct kunit *test)
+static void damon_test_split_evenly_fail(struct kunit *test,
+ unsigned long start, unsigned long end, unsigned int nr_pieces)
{
- struct damon_ctx *c = damon_new_ctx();
- struct damon_target *t;
- struct damon_region *r;
- unsigned long i;
-
- KUNIT_EXPECT_EQ(test, damon_va_evenly_split_region(NULL, NULL, 5),
- -EINVAL);
-
- t = damon_new_target(42);
- r = damon_new_region(0, 100);
- KUNIT_EXPECT_EQ(test, damon_va_evenly_split_region(t, r, 0), -EINVAL);
+ struct damon_target *t = damon_new_target(42);
+ struct damon_region *r = damon_new_region(start, end);
damon_add_region(r, t);
- KUNIT_EXPECT_EQ(test, damon_va_evenly_split_region(t, r, 10), 0);
- KUNIT_EXPECT_EQ(test, damon_nr_regions(t), 10u);
+ KUNIT_EXPECT_EQ(test,
+ damon_va_evenly_split_region(t, r, nr_pieces), -EINVAL);
+ KUNIT_EXPECT_EQ(test, damon_nr_regions(t), 1u);
- i = 0;
damon_for_each_region(r, t) {
- KUNIT_EXPECT_EQ(test, r->ar.start, i++ * 10);
- KUNIT_EXPECT_EQ(test, r->ar.end, i * 10);
+ KUNIT_EXPECT_EQ(test, r->ar.start, start);
+ KUNIT_EXPECT_EQ(test, r->ar.end, end);
}
+
damon_free_target(t);
+}
+
+static void damon_test_split_evenly_succ(struct kunit *test,
+ unsigned long start, unsigned long end, unsigned int nr_pieces)
+{
+ struct damon_target *t = damon_new_target(42);
+ struct damon_region *r = damon_new_region(start, end);
+ unsigned long expected_width = (end - start) / nr_pieces;
+ unsigned long i = 0;
- t = damon_new_target(42);
- r = damon_new_region(5, 59);
damon_add_region(r, t);
- KUNIT_EXPECT_EQ(test, damon_va_evenly_split_region(t, r, 5), 0);
- KUNIT_EXPECT_EQ(test, damon_nr_regions(t), 5u);
+ KUNIT_EXPECT_EQ(test,
+ damon_va_evenly_split_region(t, r, nr_pieces), 0);
+ KUNIT_EXPECT_EQ(test, damon_nr_regions(t), nr_pieces);
- i = 0;
damon_for_each_region(r, t) {
- if (i == 4)
+ if (i == nr_pieces - 1)
break;
- KUNIT_EXPECT_EQ(test, r->ar.start, 5 + 10 * i++);
- KUNIT_EXPECT_EQ(test, r->ar.end, 5 + 10 * i);
+ KUNIT_EXPECT_EQ(test,
+ r->ar.start, start + i++ * expected_width);
+ KUNIT_EXPECT_EQ(test, r->ar.end, start + i * expected_width);
}
- KUNIT_EXPECT_EQ(test, r->ar.start, 5 + 10 * i);
- KUNIT_EXPECT_EQ(test, r->ar.end, 59ul);
+ KUNIT_EXPECT_EQ(test, r->ar.start, start + i * expected_width);
+ KUNIT_EXPECT_EQ(test, r->ar.end, end);
damon_free_target(t);
+}
- t = damon_new_target(42);
- r = damon_new_region(5, 6);
- damon_add_region(r, t);
- KUNIT_EXPECT_EQ(test, damon_va_evenly_split_region(t, r, 2), -EINVAL);
- KUNIT_EXPECT_EQ(test, damon_nr_regions(t), 1u);
+static void damon_test_split_evenly(struct kunit *test)
+{
+ KUNIT_EXPECT_EQ(test, damon_va_evenly_split_region(NULL, NULL, 5),
+ -EINVAL);
- damon_for_each_region(r, t) {
- KUNIT_EXPECT_EQ(test, r->ar.start, 5ul);
- KUNIT_EXPECT_EQ(test, r->ar.end, 6ul);
- }
- damon_free_target(t);
- damon_destroy_ctx(c);
+ damon_test_split_evenly_fail(test, 0, 100, 0);
+ damon_test_split_evenly_succ(test, 0, 100, 10);
+ damon_test_split_evenly_succ(test, 5, 59, 5);
+ damon_test_split_evenly_fail(test, 5, 6, 2);
}
static struct kunit_case damon_test_cases[] = {
diff --git a/mm/damon/vaddr.c b/mm/damon/vaddr.c
index 35fe49080ee9..20a9a9d69eb1 100644
--- a/mm/damon/vaddr.c
+++ b/mm/damon/vaddr.c
@@ -13,6 +13,7 @@
#include <linux/mmu_notifier.h>
#include <linux/page_idle.h>
#include <linux/pagewalk.h>
+#include <linux/sched/mm.h>
#include "prmtv-common.h"
@@ -626,7 +627,6 @@ int damon_va_apply_scheme(struct damon_ctx *ctx, struct damon_target *t,
case DAMOS_STAT:
return 0;
default:
- pr_warn("Wrong action %d\n", scheme->action);
return -EINVAL;
}
diff --git a/mm/filemap.c b/mm/filemap.c
index daa0e23a6ee6..39c4c46c6133 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -3253,8 +3253,6 @@ static struct page *next_uptodate_page(struct page *page,
goto skip;
if (!PageUptodate(page) || PageReadahead(page))
goto skip;
- if (PageHWPoison(page))
- goto skip;
if (!trylock_page(page))
goto skip;
if (page->mapping != mapping)
diff --git a/mm/highmem.c b/mm/highmem.c
index ca9fa8c92593..762679050c9a 100644
--- a/mm/highmem.c
+++ b/mm/highmem.c
@@ -359,7 +359,6 @@ void kunmap_high(struct page *page)
}
EXPORT_SYMBOL(kunmap_high);
-#ifdef CONFIG_TRANSPARENT_HUGEPAGE
void zero_user_segments(struct page *page, unsigned start1, unsigned end1,
unsigned start2, unsigned end2)
{
@@ -416,7 +415,6 @@ void zero_user_segments(struct page *page, unsigned start1, unsigned end1,
BUG_ON((start1 | start2 | end1 | end2) != 0);
}
EXPORT_SYMBOL(zero_user_segments);
-#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
#endif /* CONFIG_HIGHMEM */
#ifdef CONFIG_KMAP_LOCAL
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index f025d234522f..a1baa198519a 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -2973,7 +2973,7 @@ int __alloc_bootmem_huge_page(struct hstate *h, int nid)
struct huge_bootmem_page *m = NULL; /* initialize for clang */
int nr_nodes, node;
- if (nid >= nr_online_nodes)
+ if (nid != NUMA_NO_NODE && nid >= nr_online_nodes)
return 0;
/* do node specific alloc */
if (nid != NUMA_NO_NODE) {
@@ -4919,9 +4919,9 @@ int move_hugetlb_page_tables(struct vm_area_struct *vma,
move_huge_pte(vma, old_addr, new_addr, src_pte);
}
- i_mmap_unlock_write(mapping);
flush_tlb_range(vma, old_end - len, old_end);
mmu_notifier_invalidate_range_end(&range);
+ i_mmap_unlock_write(mapping);
return len + old_addr - old_end;
}
@@ -4939,6 +4939,7 @@ static void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct
struct hstate *h = hstate_vma(vma);
unsigned long sz = huge_page_size(h);
struct mmu_notifier_range range;
+ bool force_flush = false;
WARN_ON(!is_vm_hugetlb_page(vma));
BUG_ON(start & ~huge_page_mask(h));
@@ -4967,10 +4968,8 @@ static void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct
ptl = huge_pte_lock(h, mm, ptep);
if (huge_pmd_unshare(mm, vma, &address, ptep)) {
spin_unlock(ptl);
- /*
- * We just unmapped a page of PMDs by clearing a PUD.
- * The caller's TLB flush range should cover this area.
- */
+ tlb_flush_pmd_range(tlb, address & PUD_MASK, PUD_SIZE);
+ force_flush = true;
continue;
}
@@ -5027,6 +5026,22 @@ static void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct
}
mmu_notifier_invalidate_range_end(&range);
tlb_end_vma(tlb, vma);
+
+ /*
+ * If we unshared PMDs, the TLB flush was not recorded in mmu_gather. We
+ * could defer the flush until now, since by holding i_mmap_rwsem we
+ * guaranteed that the last refernece would not be dropped. But we must
+ * do the flushing before we return, as otherwise i_mmap_rwsem will be
+ * dropped and the last reference to the shared PMDs page might be
+ * dropped as well.
+ *
+ * In theory we could defer the freeing of the PMD pages as well, but
+ * huge_pmd_unshare() relies on the exact page_count for the PMD page to
+ * detect sharing, so we cannot defer the release of the page either.
+ * Instead, do flush now.
+ */
+ if (force_flush)
+ tlb_flush_mmu_tlbonly(tlb);
}
void __unmap_hugepage_range_final(struct mmu_gather *tlb,
diff --git a/mm/kfence/core.c b/mm/kfence/core.c
index 09945784df9e..a19154a8d196 100644
--- a/mm/kfence/core.c
+++ b/mm/kfence/core.c
@@ -683,6 +683,7 @@ static const struct file_operations objects_fops = {
.open = open_objects,
.read = seq_read,
.llseek = seq_lseek,
+ .release = seq_release,
};
static int __init kfence_debugfs_init(void)
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 781605e92015..2ed5f2a0879d 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -776,24 +776,6 @@ void __mod_lruvec_kmem_state(void *p, enum node_stat_item idx, int val)
rcu_read_unlock();
}
-/*
- * mod_objcg_mlstate() may be called with irq enabled, so
- * mod_memcg_lruvec_state() should be used.
- */
-static inline void mod_objcg_mlstate(struct obj_cgroup *objcg,
- struct pglist_data *pgdat,
- enum node_stat_item idx, int nr)
-{
- struct mem_cgroup *memcg;
- struct lruvec *lruvec;
-
- rcu_read_lock();
- memcg = obj_cgroup_memcg(objcg);
- lruvec = mem_cgroup_lruvec(memcg, pgdat);
- mod_memcg_lruvec_state(lruvec, idx, nr);
- rcu_read_unlock();
-}
-
/**
* __count_memcg_events - account VM events in a cgroup
* @memcg: the memory cgroup
@@ -2137,41 +2119,6 @@ static bool obj_stock_flush_required(struct memcg_stock_pcp *stock,
}
#endif
-/*
- * Most kmem_cache_alloc() calls are from user context. The irq disable/enable
- * sequence used in this case to access content from object stock is slow.
- * To optimize for user context access, there are now two object stocks for
- * task context and interrupt context access respectively.
- *
- * The task context object stock can be accessed by disabling preemption only
- * which is cheap in non-preempt kernel. The interrupt context object stock
- * can only be accessed after disabling interrupt. User context code can
- * access interrupt object stock, but not vice versa.
- */
-static inline struct obj_stock *get_obj_stock(unsigned long *pflags)
-{
- struct memcg_stock_pcp *stock;
-
- if (likely(in_task())) {
- *pflags = 0UL;
- preempt_disable();
- stock = this_cpu_ptr(&memcg_stock);
- return &stock->task_obj;
- }
-
- local_irq_save(*pflags);
- stock = this_cpu_ptr(&memcg_stock);
- return &stock->irq_obj;
-}
-
-static inline void put_obj_stock(unsigned long flags)
-{
- if (likely(in_task()))
- preempt_enable();
- else
- local_irq_restore(flags);
-}
-
/**
* consume_stock: Try to consume stocked charge on this cpu.
* @memcg: memcg to consume from.
@@ -2816,6 +2763,59 @@ retry:
*/
#define OBJCGS_CLEAR_MASK (__GFP_DMA | __GFP_RECLAIMABLE | __GFP_ACCOUNT)
+/*
+ * Most kmem_cache_alloc() calls are from user context. The irq disable/enable
+ * sequence used in this case to access content from object stock is slow.
+ * To optimize for user context access, there are now two object stocks for
+ * task context and interrupt context access respectively.
+ *
+ * The task context object stock can be accessed by disabling preemption only
+ * which is cheap in non-preempt kernel. The interrupt context object stock
+ * can only be accessed after disabling interrupt. User context code can
+ * access interrupt object stock, but not vice versa.
+ */
+static inline struct obj_stock *get_obj_stock(unsigned long *pflags)
+{
+ struct memcg_stock_pcp *stock;
+
+ if (likely(in_task())) {
+ *pflags = 0UL;
+ preempt_disable();
+ stock = this_cpu_ptr(&memcg_stock);
+ return &stock->task_obj;
+ }
+
+ local_irq_save(*pflags);
+ stock = this_cpu_ptr(&memcg_stock);
+ return &stock->irq_obj;
+}
+
+static inline void put_obj_stock(unsigned long flags)
+{
+ if (likely(in_task()))
+ preempt_enable();
+ else
+ local_irq_restore(flags);
+}
+
+/*
+ * mod_objcg_mlstate() may be called with irq enabled, so
+ * mod_memcg_lruvec_state() should be used.
+ */
+static inline void mod_objcg_mlstate(struct obj_cgroup *objcg,
+ struct pglist_data *pgdat,
+ enum node_stat_item idx, int nr)
+{
+ struct mem_cgroup *memcg;
+ struct lruvec *lruvec;
+
+ rcu_read_lock();
+ memcg = obj_cgroup_memcg(objcg);
+ lruvec = mem_cgroup_lruvec(memcg, pgdat);
+ mod_memcg_lruvec_state(lruvec, idx, nr);
+ rcu_read_unlock();
+}
+
int memcg_alloc_page_obj_cgroups(struct page *page, struct kmem_cache *s,
gfp_t gfp, bool new_page)
{
@@ -5558,7 +5558,7 @@ static int mem_cgroup_move_account(struct page *page,
VM_BUG_ON(from == to);
VM_BUG_ON_FOLIO(folio_test_lru(folio), folio);
- VM_BUG_ON(compound && !folio_test_multi(folio));
+ VM_BUG_ON(compound && !folio_test_large(folio));
/*
* Prevent mem_cgroup_migrate() from looking at
diff --git a/mm/memory-failure.c b/mm/memory-failure.c
index 07c875fdeaf0..3a274468f193 100644
--- a/mm/memory-failure.c
+++ b/mm/memory-failure.c
@@ -1470,17 +1470,12 @@ static int memory_failure_hugetlb(unsigned long pfn, int flags)
if (!(flags & MF_COUNT_INCREASED)) {
res = get_hwpoison_page(p, flags);
if (!res) {
- /*
- * Check "filter hit" and "race with other subpage."
- */
lock_page(head);
- if (PageHWPoison(head)) {
- if ((hwpoison_filter(p) && TestClearPageHWPoison(p))
- || (p != head && TestSetPageHWPoison(head))) {
+ if (hwpoison_filter(p)) {
+ if (TestClearPageHWPoison(head))
num_poisoned_pages_dec();
- unlock_page(head);
- return 0;
- }
+ unlock_page(head);
+ return 0;
}
unlock_page(head);
res = MF_FAILED;
@@ -2239,6 +2234,7 @@ retry:
} else if (ret == 0) {
if (soft_offline_free_page(page) && try_again) {
try_again = false;
+ flags &= ~MF_COUNT_INCREASED;
goto retry;
}
}
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
index 852041f6be41..2a9627dc784c 100644
--- a/mm/memory_hotplug.c
+++ b/mm/memory_hotplug.c
@@ -35,6 +35,7 @@
#include <linux/memblock.h>
#include <linux/compaction.h>
#include <linux/rmap.h>
+#include <linux/module.h>
#include <asm/tlbflush.h>
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index 10e9c87260ed..f6248affaf38 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -2140,8 +2140,7 @@ struct page *alloc_pages_vma(gfp_t gfp, int order, struct vm_area_struct *vma,
* memory with both reclaim and compact as well.
*/
if (!page && (gfp & __GFP_DIRECT_RECLAIM))
- page = __alloc_pages_node(hpage_node,
- gfp, order);
+ page = __alloc_pages(gfp, order, hpage_node, nmask);
goto out;
}
diff --git a/mm/shmem.c b/mm/shmem.c
index dc038ce78700..18f93c2d68f1 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -2303,6 +2303,7 @@ static struct inode *shmem_get_inode(struct super_block *sb, const struct inode
INIT_LIST_HEAD(&info->swaplist);
simple_xattrs_init(&info->xattrs);
cache_no_acl(inode);
+ mapping_set_large_folios(inode->i_mapping);
switch (mode & S_IFMT) {
default:
@@ -3870,7 +3871,7 @@ static struct file_system_type shmem_fs_type = {
.parameters = shmem_fs_parameters,
#endif
.kill_sb = kill_litter_super,
- .fs_flags = FS_USERNS_MOUNT | FS_THP_SUPPORT,
+ .fs_flags = FS_USERNS_MOUNT,
};
int __init shmem_init(void)
diff --git a/mm/slub.c b/mm/slub.c
index a8626825a829..abe7db581d68 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -5081,6 +5081,7 @@ struct loc_track {
unsigned long max;
unsigned long count;
struct location *loc;
+ loff_t idx;
};
static struct dentry *slab_debugfs_root;
@@ -6052,11 +6053,11 @@ __initcall(slab_sysfs_init);
#if defined(CONFIG_SLUB_DEBUG) && defined(CONFIG_DEBUG_FS)
static int slab_debugfs_show(struct seq_file *seq, void *v)
{
-
- struct location *l;
- unsigned int idx = *(unsigned int *)v;
struct loc_track *t = seq->private;
+ struct location *l;
+ unsigned long idx;
+ idx = (unsigned long) t->idx;
if (idx < t->count) {
l = &t->loc[idx];
@@ -6105,16 +6106,18 @@ static void *slab_debugfs_next(struct seq_file *seq, void *v, loff_t *ppos)
{
struct loc_track *t = seq->private;
- v = ppos;
- ++*ppos;
+ t->idx = ++(*ppos);
if (*ppos <= t->count)
- return v;
+ return ppos;
return NULL;
}
static void *slab_debugfs_start(struct seq_file *seq, loff_t *ppos)
{
+ struct loc_track *t = seq->private;
+
+ t->idx = *ppos;
return ppos;
}
diff --git a/mm/swap_slots.c b/mm/swap_slots.c
index 16f706c55d92..2b5531840583 100644
--- a/mm/swap_slots.c
+++ b/mm/swap_slots.c
@@ -30,6 +30,7 @@
#include <linux/swap_slots.h>
#include <linux/cpu.h>
#include <linux/cpumask.h>
+#include <linux/slab.h>
#include <linux/vmalloc.h>
#include <linux/mutex.h>
#include <linux/mm.h>
diff --git a/mm/util.c b/mm/util.c
index e58151a61255..741ba32a43ac 100644
--- a/mm/util.c
+++ b/mm/util.c
@@ -670,7 +670,7 @@ bool folio_mapped(struct folio *folio)
{
long i, nr;
- if (folio_test_single(folio))
+ if (!folio_test_large(folio))
return atomic_read(&folio->_mapcount) >= 0;
if (atomic_read(folio_mapcount_ptr(folio)) >= 0)
return true;
diff --git a/mm/vmscan.c b/mm/vmscan.c
index fb9584641ac7..700434db5735 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -1021,6 +1021,39 @@ static void handle_write_error(struct address_space *mapping,
unlock_page(page);
}
+static bool skip_throttle_noprogress(pg_data_t *pgdat)
+{
+ int reclaimable = 0, write_pending = 0;
+ int i;
+
+ /*
+ * If kswapd is disabled, reschedule if necessary but do not
+ * throttle as the system is likely near OOM.
+ */
+ if (pgdat->kswapd_failures >= MAX_RECLAIM_RETRIES)
+ return true;
+
+ /*
+ * If there are a lot of dirty/writeback pages then do not
+ * throttle as throttling will occur when the pages cycle
+ * towards the end of the LRU if still under writeback.
+ */
+ for (i = 0; i < MAX_NR_ZONES; i++) {
+ struct zone *zone = pgdat->node_zones + i;
+
+ if (!populated_zone(zone))
+ continue;
+
+ reclaimable += zone_reclaimable_pages(zone);
+ write_pending += zone_page_state_snapshot(zone,
+ NR_ZONE_WRITE_PENDING);
+ }
+ if (2 * write_pending <= reclaimable)
+ return true;
+
+ return false;
+}
+
void reclaim_throttle(pg_data_t *pgdat, enum vmscan_throttle_state reason)
{
wait_queue_head_t *wqh = &pgdat->reclaim_wait[reason];
@@ -1056,8 +1089,16 @@ void reclaim_throttle(pg_data_t *pgdat, enum vmscan_throttle_state reason)
}
break;
+ case VMSCAN_THROTTLE_CONGESTED:
+ fallthrough;
case VMSCAN_THROTTLE_NOPROGRESS:
- timeout = HZ/2;
+ if (skip_throttle_noprogress(pgdat)) {
+ cond_resched();
+ return;
+ }
+
+ timeout = 1;
+
break;
case VMSCAN_THROTTLE_ISOLATED:
timeout = HZ/50;
@@ -3321,7 +3362,7 @@ again:
if (!current_is_kswapd() && current_may_throttle() &&
!sc->hibernation_mode &&
test_bit(LRUVEC_CONGESTED, &target_lruvec->flags))
- reclaim_throttle(pgdat, VMSCAN_THROTTLE_WRITEBACK);
+ reclaim_throttle(pgdat, VMSCAN_THROTTLE_CONGESTED);
if (should_continue_reclaim(pgdat, sc->nr_reclaimed - nr_reclaimed,
sc))
@@ -3386,16 +3427,16 @@ static void consider_reclaim_throttle(pg_data_t *pgdat, struct scan_control *sc)
}
/*
- * Do not throttle kswapd on NOPROGRESS as it will throttle on
- * VMSCAN_THROTTLE_WRITEBACK if there are too many pages under
- * writeback and marked for immediate reclaim at the tail of
- * the LRU.
+ * Do not throttle kswapd or cgroup reclaim on NOPROGRESS as it will
+ * throttle on VMSCAN_THROTTLE_WRITEBACK if there are too many pages
+ * under writeback and marked for immediate reclaim at the tail of the
+ * LRU.
*/
- if (current_is_kswapd())
+ if (current_is_kswapd() || cgroup_reclaim(sc))
return;
/* Throttle if making no progress at high prioities. */
- if (sc->priority < DEF_PRIORITY - 2)
+ if (sc->priority == 1 && !sc->nr_reclaimed)
reclaim_throttle(pgdat, VMSCAN_THROTTLE_NOPROGRESS);
}
@@ -3415,6 +3456,7 @@ static void shrink_zones(struct zonelist *zonelist, struct scan_control *sc)
unsigned long nr_soft_scanned;
gfp_t orig_mask;
pg_data_t *last_pgdat = NULL;
+ pg_data_t *first_pgdat = NULL;
/*
* If the number of buffer_heads in the machine exceeds the maximum
@@ -3478,14 +3520,19 @@ static void shrink_zones(struct zonelist *zonelist, struct scan_control *sc)
/* need some check for avoid more shrink_zone() */
}
+ if (!first_pgdat)
+ first_pgdat = zone->zone_pgdat;
+
/* See comment about same check for global reclaim above */
if (zone->zone_pgdat == last_pgdat)
continue;
last_pgdat = zone->zone_pgdat;
shrink_node(zone->zone_pgdat, sc);
- consider_reclaim_throttle(zone->zone_pgdat, sc);
}
+ if (first_pgdat)
+ consider_reclaim_throttle(first_pgdat, sc);
+
/*
* Restore to original mask to avoid the impact on the caller if we
* promoted it to __GFP_HIGHMEM.
diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c
index a3a0a5e994f5..abaa5d96ded2 100644
--- a/net/8021q/vlan.c
+++ b/net/8021q/vlan.c
@@ -184,9 +184,6 @@ int register_vlan_dev(struct net_device *dev, struct netlink_ext_ack *extack)
if (err)
goto out_unregister_netdev;
- /* Account for reference in struct vlan_dev_priv */
- dev_hold(real_dev);
-
vlan_stacked_transfer_operstate(real_dev, dev, vlan);
linkwatch_fire_event(dev); /* _MUST_ call rfc2863_policy() */
diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c
index ab6dee28536d..a54535cbcf4c 100644
--- a/net/8021q/vlan_dev.c
+++ b/net/8021q/vlan_dev.c
@@ -615,6 +615,9 @@ static int vlan_dev_init(struct net_device *dev)
if (!vlan->vlan_pcpu_stats)
return -ENOMEM;
+ /* Get vlan's reference to real_dev */
+ dev_hold(real_dev);
+
return 0;
}
diff --git a/net/ax25/af_ax25.c b/net/ax25/af_ax25.c
index 2f34bbdde0e8..cfca99e295b8 100644
--- a/net/ax25/af_ax25.c
+++ b/net/ax25/af_ax25.c
@@ -85,8 +85,10 @@ static void ax25_kill_by_device(struct net_device *dev)
again:
ax25_for_each(s, &ax25_list) {
if (s->ax25_dev == ax25_dev) {
- s->ax25_dev = NULL;
spin_unlock_bh(&ax25_list_lock);
+ lock_sock(s->sk);
+ s->ax25_dev = NULL;
+ release_sock(s->sk);
ax25_disconnect(s, ENETUNREACH);
spin_lock_bh(&ax25_list_lock);
diff --git a/net/bridge/br_ioctl.c b/net/bridge/br_ioctl.c
index db4ab2c2ce18..891cfcf45644 100644
--- a/net/bridge/br_ioctl.c
+++ b/net/bridge/br_ioctl.c
@@ -337,7 +337,7 @@ static int old_deviceless(struct net *net, void __user *uarg)
args[2] = get_bridge_ifindices(net, indices, args[2]);
- ret = copy_to_user(uarg, indices,
+ ret = copy_to_user((void __user *)args[1], indices,
array_size(args[2], sizeof(int)))
? -EFAULT : args[2];
diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c
index f3d751105343..de2409889489 100644
--- a/net/bridge/br_multicast.c
+++ b/net/bridge/br_multicast.c
@@ -4522,6 +4522,38 @@ int br_multicast_set_mld_version(struct net_bridge_mcast *brmctx,
}
#endif
+void br_multicast_set_query_intvl(struct net_bridge_mcast *brmctx,
+ unsigned long val)
+{
+ unsigned long intvl_jiffies = clock_t_to_jiffies(val);
+
+ if (intvl_jiffies < BR_MULTICAST_QUERY_INTVL_MIN) {
+ br_info(brmctx->br,
+ "trying to set multicast query interval below minimum, setting to %lu (%ums)\n",
+ jiffies_to_clock_t(BR_MULTICAST_QUERY_INTVL_MIN),
+ jiffies_to_msecs(BR_MULTICAST_QUERY_INTVL_MIN));
+ intvl_jiffies = BR_MULTICAST_QUERY_INTVL_MIN;
+ }
+
+ brmctx->multicast_query_interval = intvl_jiffies;
+}
+
+void br_multicast_set_startup_query_intvl(struct net_bridge_mcast *brmctx,
+ unsigned long val)
+{
+ unsigned long intvl_jiffies = clock_t_to_jiffies(val);
+
+ if (intvl_jiffies < BR_MULTICAST_STARTUP_QUERY_INTVL_MIN) {
+ br_info(brmctx->br,
+ "trying to set multicast startup query interval below minimum, setting to %lu (%ums)\n",
+ jiffies_to_clock_t(BR_MULTICAST_STARTUP_QUERY_INTVL_MIN),
+ jiffies_to_msecs(BR_MULTICAST_STARTUP_QUERY_INTVL_MIN));
+ intvl_jiffies = BR_MULTICAST_STARTUP_QUERY_INTVL_MIN;
+ }
+
+ brmctx->multicast_startup_query_interval = intvl_jiffies;
+}
+
/**
* br_multicast_list_adjacent - Returns snooped multicast addresses
* @dev: The bridge port adjacent to which to retrieve addresses
diff --git a/net/bridge/br_netlink.c b/net/bridge/br_netlink.c
index 0c8b5f1a15bc..2ff83d84230d 100644
--- a/net/bridge/br_netlink.c
+++ b/net/bridge/br_netlink.c
@@ -1357,7 +1357,7 @@ static int br_changelink(struct net_device *brdev, struct nlattr *tb[],
if (data[IFLA_BR_MCAST_QUERY_INTVL]) {
u64 val = nla_get_u64(data[IFLA_BR_MCAST_QUERY_INTVL]);
- br->multicast_ctx.multicast_query_interval = clock_t_to_jiffies(val);
+ br_multicast_set_query_intvl(&br->multicast_ctx, val);
}
if (data[IFLA_BR_MCAST_QUERY_RESPONSE_INTVL]) {
@@ -1369,7 +1369,7 @@ static int br_changelink(struct net_device *brdev, struct nlattr *tb[],
if (data[IFLA_BR_MCAST_STARTUP_QUERY_INTVL]) {
u64 val = nla_get_u64(data[IFLA_BR_MCAST_STARTUP_QUERY_INTVL]);
- br->multicast_ctx.multicast_startup_query_interval = clock_t_to_jiffies(val);
+ br_multicast_set_startup_query_intvl(&br->multicast_ctx, val);
}
if (data[IFLA_BR_MCAST_STATS_ENABLED]) {
diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h
index c0efd697865a..e8c6ee322c71 100644
--- a/net/bridge/br_private.h
+++ b/net/bridge/br_private.h
@@ -28,6 +28,8 @@
#define BR_MAX_PORTS (1<<BR_PORT_BITS)
#define BR_MULTICAST_DEFAULT_HASH_MAX 4096
+#define BR_MULTICAST_QUERY_INTVL_MIN msecs_to_jiffies(1000)
+#define BR_MULTICAST_STARTUP_QUERY_INTVL_MIN BR_MULTICAST_QUERY_INTVL_MIN
#define BR_HWDOM_MAX BITS_PER_LONG
@@ -963,6 +965,10 @@ int br_multicast_dump_querier_state(struct sk_buff *skb,
int nest_attr);
size_t br_multicast_querier_state_size(void);
size_t br_rports_size(const struct net_bridge_mcast *brmctx);
+void br_multicast_set_query_intvl(struct net_bridge_mcast *brmctx,
+ unsigned long val);
+void br_multicast_set_startup_query_intvl(struct net_bridge_mcast *brmctx,
+ unsigned long val);
static inline bool br_group_is_l2(const struct br_ip *group)
{
@@ -1147,9 +1153,9 @@ br_multicast_port_ctx_get_global(const struct net_bridge_mcast_port *pmctx)
static inline bool
br_multicast_ctx_vlan_global_disabled(const struct net_bridge_mcast *brmctx)
{
- return br_opt_get(brmctx->br, BROPT_MCAST_VLAN_SNOOPING_ENABLED) &&
- br_multicast_ctx_is_vlan(brmctx) &&
- !(brmctx->vlan->priv_flags & BR_VLFLAG_GLOBAL_MCAST_ENABLED);
+ return br_multicast_ctx_is_vlan(brmctx) &&
+ (!br_opt_get(brmctx->br, BROPT_MCAST_VLAN_SNOOPING_ENABLED) ||
+ !(brmctx->vlan->priv_flags & BR_VLFLAG_GLOBAL_MCAST_ENABLED));
}
static inline bool
diff --git a/net/bridge/br_sysfs_br.c b/net/bridge/br_sysfs_br.c
index d9a89ddd0331..7b0c19772111 100644
--- a/net/bridge/br_sysfs_br.c
+++ b/net/bridge/br_sysfs_br.c
@@ -658,7 +658,7 @@ static ssize_t multicast_query_interval_show(struct device *d,
static int set_query_interval(struct net_bridge *br, unsigned long val,
struct netlink_ext_ack *extack)
{
- br->multicast_ctx.multicast_query_interval = clock_t_to_jiffies(val);
+ br_multicast_set_query_intvl(&br->multicast_ctx, val);
return 0;
}
@@ -706,7 +706,7 @@ static ssize_t multicast_startup_query_interval_show(
static int set_startup_query_interval(struct net_bridge *br, unsigned long val,
struct netlink_ext_ack *extack)
{
- br->multicast_ctx.multicast_startup_query_interval = clock_t_to_jiffies(val);
+ br_multicast_set_startup_query_intvl(&br->multicast_ctx, val);
return 0;
}
diff --git a/net/bridge/br_vlan_options.c b/net/bridge/br_vlan_options.c
index 8ffd4ed2563c..a6382973b3e7 100644
--- a/net/bridge/br_vlan_options.c
+++ b/net/bridge/br_vlan_options.c
@@ -521,7 +521,7 @@ static int br_vlan_process_global_one_opts(const struct net_bridge *br,
u64 val;
val = nla_get_u64(tb[BRIDGE_VLANDB_GOPTS_MCAST_QUERY_INTVL]);
- v->br_mcast_ctx.multicast_query_interval = clock_t_to_jiffies(val);
+ br_multicast_set_query_intvl(&v->br_mcast_ctx, val);
*changed = true;
}
if (tb[BRIDGE_VLANDB_GOPTS_MCAST_QUERY_RESPONSE_INTVL]) {
@@ -535,7 +535,7 @@ static int br_vlan_process_global_one_opts(const struct net_bridge *br,
u64 val;
val = nla_get_u64(tb[BRIDGE_VLANDB_GOPTS_MCAST_STARTUP_QUERY_INTVL]);
- v->br_mcast_ctx.multicast_startup_query_interval = clock_t_to_jiffies(val);
+ br_multicast_set_startup_query_intvl(&v->br_mcast_ctx, val);
*changed = true;
}
if (tb[BRIDGE_VLANDB_GOPTS_MCAST_QUERIER]) {
diff --git a/net/core/dev.c b/net/core/dev.c
index 15ac064b5562..c4708e2487fb 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -3941,8 +3941,8 @@ sch_handle_egress(struct sk_buff *skb, int *ret, struct net_device *dev)
return skb;
/* qdisc_skb_cb(skb)->pkt_len was already set by the caller. */
- qdisc_skb_cb(skb)->mru = 0;
- qdisc_skb_cb(skb)->post_ct = false;
+ tc_skb_cb(skb)->mru = 0;
+ tc_skb_cb(skb)->post_ct = false;
mini_qdisc_bstats_cpu_update(miniq, skb);
switch (tcf_classify(skb, miniq->block, miniq->filter_list, &cl_res, false)) {
@@ -4210,7 +4210,10 @@ static int __dev_queue_xmit(struct sk_buff *skb, struct net_device *sb_dev)
if (dev->flags & IFF_UP) {
int cpu = smp_processor_id(); /* ok because BHs are off */
- if (txq->xmit_lock_owner != cpu) {
+ /* Other cpus might concurrently change txq->xmit_lock_owner
+ * to -1 or to their cpu id, but not to our id.
+ */
+ if (READ_ONCE(txq->xmit_lock_owner) != cpu) {
if (dev_xmit_recursion())
goto recursion_alert;
@@ -5100,8 +5103,8 @@ sch_handle_ingress(struct sk_buff *skb, struct packet_type **pt_prev, int *ret,
}
qdisc_skb_cb(skb)->pkt_len = skb->len;
- qdisc_skb_cb(skb)->mru = 0;
- qdisc_skb_cb(skb)->post_ct = false;
+ tc_skb_cb(skb)->mru = 0;
+ tc_skb_cb(skb)->post_ct = false;
skb->tc_at_ingress = 1;
mini_qdisc_bstats_cpu_update(miniq, skb);
diff --git a/net/core/devlink.c b/net/core/devlink.c
index 5ad72dbfcd07..c06c9ba6e8c5 100644
--- a/net/core/devlink.c
+++ b/net/core/devlink.c
@@ -4110,14 +4110,6 @@ static int devlink_nl_cmd_reload(struct sk_buff *skb, struct genl_info *info)
return err;
}
- if (info->attrs[DEVLINK_ATTR_NETNS_PID] ||
- info->attrs[DEVLINK_ATTR_NETNS_FD] ||
- info->attrs[DEVLINK_ATTR_NETNS_ID]) {
- dest_net = devlink_netns_get(skb, info);
- if (IS_ERR(dest_net))
- return PTR_ERR(dest_net);
- }
-
if (info->attrs[DEVLINK_ATTR_RELOAD_ACTION])
action = nla_get_u8(info->attrs[DEVLINK_ATTR_RELOAD_ACTION]);
else
@@ -4160,6 +4152,14 @@ static int devlink_nl_cmd_reload(struct sk_buff *skb, struct genl_info *info)
return -EINVAL;
}
}
+ if (info->attrs[DEVLINK_ATTR_NETNS_PID] ||
+ info->attrs[DEVLINK_ATTR_NETNS_FD] ||
+ info->attrs[DEVLINK_ATTR_NETNS_ID]) {
+ dest_net = devlink_netns_get(skb, info);
+ if (IS_ERR(dest_net))
+ return PTR_ERR(dest_net);
+ }
+
err = devlink_reload(devlink, dest_net, action, limit, &actions_performed, info->extack);
if (dest_net)
diff --git a/net/core/dst_cache.c b/net/core/dst_cache.c
index be74ab4551c2..0ccfd5fa5cb9 100644
--- a/net/core/dst_cache.c
+++ b/net/core/dst_cache.c
@@ -162,3 +162,22 @@ void dst_cache_destroy(struct dst_cache *dst_cache)
free_percpu(dst_cache->cache);
}
EXPORT_SYMBOL_GPL(dst_cache_destroy);
+
+void dst_cache_reset_now(struct dst_cache *dst_cache)
+{
+ int i;
+
+ if (!dst_cache->cache)
+ return;
+
+ dst_cache->reset_ts = jiffies;
+ for_each_possible_cpu(i) {
+ struct dst_cache_pcpu *idst = per_cpu_ptr(dst_cache->cache, i);
+ struct dst_entry *dst = idst->dst;
+
+ idst->cookie = 0;
+ idst->dst = NULL;
+ dst_release(dst);
+ }
+}
+EXPORT_SYMBOL_GPL(dst_cache_reset_now);
diff --git a/net/core/fib_rules.c b/net/core/fib_rules.c
index 79df7cd9dbc1..1bb567a3b329 100644
--- a/net/core/fib_rules.c
+++ b/net/core/fib_rules.c
@@ -323,7 +323,7 @@ jumped:
if (!err && ops->suppress && INDIRECT_CALL_MT(ops->suppress,
fib6_rule_suppress,
fib4_rule_suppress,
- rule, arg))
+ rule, flags, arg))
continue;
if (err != -EAGAIN) {
diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c
index 3255f57f5131..1b094c481f1d 100644
--- a/net/core/flow_dissector.c
+++ b/net/core/flow_dissector.c
@@ -238,7 +238,7 @@ void
skb_flow_dissect_ct(const struct sk_buff *skb,
struct flow_dissector *flow_dissector,
void *target_container, u16 *ctinfo_map,
- size_t mapsize, bool post_ct)
+ size_t mapsize, bool post_ct, u16 zone)
{
#if IS_ENABLED(CONFIG_NF_CONNTRACK)
struct flow_dissector_key_ct *key;
@@ -260,6 +260,7 @@ skb_flow_dissect_ct(const struct sk_buff *skb,
if (!ct) {
key->ct_state = TCA_FLOWER_KEY_CT_FLAGS_TRACKED |
TCA_FLOWER_KEY_CT_FLAGS_INVALID;
+ key->ct_zone = zone;
return;
}
diff --git a/net/core/neighbour.c b/net/core/neighbour.c
index 47931c8be04b..dda12fbd177b 100644
--- a/net/core/neighbour.c
+++ b/net/core/neighbour.c
@@ -763,11 +763,10 @@ struct pneigh_entry * pneigh_lookup(struct neigh_table *tbl,
ASSERT_RTNL();
- n = kmalloc(sizeof(*n) + key_len, GFP_KERNEL);
+ n = kzalloc(sizeof(*n) + key_len, GFP_KERNEL);
if (!n)
goto out;
- n->protocol = 0;
write_pnet(&n->net, net);
memcpy(n->key, pkey, key_len);
n->dev = dev;
@@ -1779,6 +1778,7 @@ int neigh_table_clear(int index, struct neigh_table *tbl)
{
neigh_tables[index] = NULL;
/* It is not clean... Fix it to unload IPv6 module safely */
+ cancel_delayed_work_sync(&tbl->managed_work);
cancel_delayed_work_sync(&tbl->gc_work);
del_timer_sync(&tbl->proxy_timer);
pneigh_queue_purge(&tbl->proxy_queue);
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index ba2f38246f07..909db87d7383 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -832,7 +832,7 @@ void skb_dump(const char *level, const struct sk_buff *skb, bool full_pkt)
ntohs(skb->protocol), skb->pkt_type, skb->skb_iif);
if (dev)
- printk("%sdev name=%s feat=0x%pNF\n",
+ printk("%sdev name=%s feat=%pNF\n",
level, dev->name, &dev->features);
if (sk)
printk("%ssk family=%hu type=%u proto=%u\n",
diff --git a/net/core/skmsg.c b/net/core/skmsg.c
index 1ae52ac943f6..8eb671c827f9 100644
--- a/net/core/skmsg.c
+++ b/net/core/skmsg.c
@@ -1124,6 +1124,8 @@ void sk_psock_start_strp(struct sock *sk, struct sk_psock *psock)
void sk_psock_stop_strp(struct sock *sk, struct sk_psock *psock)
{
+ psock_set_prog(&psock->progs.stream_parser, NULL);
+
if (!psock->saved_data_ready)
return;
@@ -1212,6 +1214,9 @@ void sk_psock_start_verdict(struct sock *sk, struct sk_psock *psock)
void sk_psock_stop_verdict(struct sock *sk, struct sk_psock *psock)
{
+ psock_set_prog(&psock->progs.stream_verdict, NULL);
+ psock_set_prog(&psock->progs.skb_verdict, NULL);
+
if (!psock->saved_data_ready)
return;
diff --git a/net/core/sock_map.c b/net/core/sock_map.c
index f39ef79ced67..4ca4b11f4e5f 100644
--- a/net/core/sock_map.c
+++ b/net/core/sock_map.c
@@ -167,8 +167,11 @@ static void sock_map_del_link(struct sock *sk,
write_lock_bh(&sk->sk_callback_lock);
if (strp_stop)
sk_psock_stop_strp(sk, psock);
- else
+ if (verdict_stop)
sk_psock_stop_verdict(sk, psock);
+
+ if (psock->psock_update_sk_prot)
+ psock->psock_update_sk_prot(sk, psock, false);
write_unlock_bh(&sk->sk_callback_lock);
}
}
@@ -282,6 +285,12 @@ static int sock_map_link(struct bpf_map *map, struct sock *sk)
if (msg_parser)
psock_set_prog(&psock->progs.msg_parser, msg_parser);
+ if (stream_parser)
+ psock_set_prog(&psock->progs.stream_parser, stream_parser);
+ if (stream_verdict)
+ psock_set_prog(&psock->progs.stream_verdict, stream_verdict);
+ if (skb_verdict)
+ psock_set_prog(&psock->progs.skb_verdict, skb_verdict);
ret = sock_map_init_proto(sk, psock);
if (ret < 0)
@@ -292,14 +301,10 @@ static int sock_map_link(struct bpf_map *map, struct sock *sk)
ret = sk_psock_init_strp(sk, psock);
if (ret)
goto out_unlock_drop;
- psock_set_prog(&psock->progs.stream_verdict, stream_verdict);
- psock_set_prog(&psock->progs.stream_parser, stream_parser);
sk_psock_start_strp(sk, psock);
} else if (!stream_parser && stream_verdict && !psock->saved_data_ready) {
- psock_set_prog(&psock->progs.stream_verdict, stream_verdict);
sk_psock_start_verdict(sk,psock);
} else if (!stream_verdict && skb_verdict && !psock->saved_data_ready) {
- psock_set_prog(&psock->progs.skb_verdict, skb_verdict);
sk_psock_start_verdict(sk, psock);
}
write_unlock_bh(&sk->sk_callback_lock);
diff --git a/net/dsa/tag_ocelot.c b/net/dsa/tag_ocelot.c
index de1c849a0a70..4ed74d509d6a 100644
--- a/net/dsa/tag_ocelot.c
+++ b/net/dsa/tag_ocelot.c
@@ -47,9 +47,13 @@ static void ocelot_xmit_common(struct sk_buff *skb, struct net_device *netdev,
void *injection;
__be32 *prefix;
u32 rew_op = 0;
+ u64 qos_class;
ocelot_xmit_get_vlan_info(skb, dp, &vlan_tci, &tag_type);
+ qos_class = netdev_get_num_tc(netdev) ?
+ netdev_get_prio_tc_map(netdev, skb->priority) : skb->priority;
+
injection = skb_push(skb, OCELOT_TAG_LEN);
prefix = skb_push(skb, OCELOT_SHORT_PREFIX_LEN);
@@ -57,7 +61,7 @@ static void ocelot_xmit_common(struct sk_buff *skb, struct net_device *netdev,
memset(injection, 0, OCELOT_TAG_LEN);
ocelot_ifh_set_bypass(injection, 1);
ocelot_ifh_set_src(injection, ds->num_ports);
- ocelot_ifh_set_qos_class(injection, skb->priority);
+ ocelot_ifh_set_qos_class(injection, qos_class);
ocelot_ifh_set_vlan_tci(injection, vlan_tci);
ocelot_ifh_set_tag_type(injection, tag_type);
diff --git a/net/ethtool/ioctl.c b/net/ethtool/ioctl.c
index 65e9bc1058b5..20bcf86970ff 100644
--- a/net/ethtool/ioctl.c
+++ b/net/ethtool/ioctl.c
@@ -1719,7 +1719,7 @@ static noinline_for_stack int ethtool_set_coalesce(struct net_device *dev,
struct ethtool_coalesce coalesce;
int ret;
- if (!dev->ethtool_ops->set_coalesce && !dev->ethtool_ops->get_coalesce)
+ if (!dev->ethtool_ops->set_coalesce || !dev->ethtool_ops->get_coalesce)
return -EOPNOTSUPP;
ret = dev->ethtool_ops->get_coalesce(dev, &coalesce, &kernel_coalesce,
diff --git a/net/ethtool/netlink.c b/net/ethtool/netlink.c
index 38b44c0291b1..96f4180aabd2 100644
--- a/net/ethtool/netlink.c
+++ b/net/ethtool/netlink.c
@@ -40,7 +40,8 @@ int ethnl_ops_begin(struct net_device *dev)
if (dev->dev.parent)
pm_runtime_get_sync(dev->dev.parent);
- if (!netif_device_present(dev)) {
+ if (!netif_device_present(dev) ||
+ dev->reg_state == NETREG_UNREGISTERING) {
ret = -ENODEV;
goto err;
}
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
index 0189e3cd4a7d..5f70ffdae1b5 100644
--- a/net/ipv4/af_inet.c
+++ b/net/ipv4/af_inet.c
@@ -154,7 +154,7 @@ void inet_sock_destruct(struct sock *sk)
kfree(rcu_dereference_protected(inet->inet_opt, 1));
dst_release(rcu_dereference_protected(sk->sk_dst_cache, 1));
- dst_release(sk->sk_rx_dst);
+ dst_release(rcu_dereference_protected(sk->sk_rx_dst, 1));
sk_refcnt_debug_dec(sk);
}
EXPORT_SYMBOL(inet_sock_destruct);
@@ -1994,6 +1994,10 @@ static int __init inet_init(void)
ip_init();
+ /* Initialise per-cpu ipv4 mibs */
+ if (init_ipv4_mibs())
+ panic("%s: Cannot init ipv4 mibs\n", __func__);
+
/* Setup TCP slab cache for open requests. */
tcp_init();
@@ -2024,12 +2028,6 @@ static int __init inet_init(void)
if (init_inet_pernet_ops())
pr_crit("%s: Cannot init ipv4 inet pernet ops\n", __func__);
- /*
- * Initialise per-cpu ipv4 mibs
- */
-
- if (init_ipv4_mibs())
- pr_crit("%s: Cannot init ipv4 mibs\n", __func__);
ipv4_proc_init();
diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
index 9fe13e4f5d08..4d61ddd8a0ec 100644
--- a/net/ipv4/fib_frontend.c
+++ b/net/ipv4/fib_frontend.c
@@ -1582,7 +1582,7 @@ static int __net_init fib_net_init(struct net *net)
int error;
#ifdef CONFIG_IP_ROUTE_CLASSID
- net->ipv4.fib_num_tclassid_users = 0;
+ atomic_set(&net->ipv4.fib_num_tclassid_users, 0);
#endif
error = ip_fib_net_init(net);
if (error < 0)
diff --git a/net/ipv4/fib_rules.c b/net/ipv4/fib_rules.c
index ce54a30c2ef1..d279cb8ac158 100644
--- a/net/ipv4/fib_rules.c
+++ b/net/ipv4/fib_rules.c
@@ -141,6 +141,7 @@ INDIRECT_CALLABLE_SCOPE int fib4_rule_action(struct fib_rule *rule,
}
INDIRECT_CALLABLE_SCOPE bool fib4_rule_suppress(struct fib_rule *rule,
+ int flags,
struct fib_lookup_arg *arg)
{
struct fib_result *result = (struct fib_result *) arg->result;
@@ -263,7 +264,7 @@ static int fib4_rule_configure(struct fib_rule *rule, struct sk_buff *skb,
if (tb[FRA_FLOW]) {
rule4->tclassid = nla_get_u32(tb[FRA_FLOW]);
if (rule4->tclassid)
- net->ipv4.fib_num_tclassid_users++;
+ atomic_inc(&net->ipv4.fib_num_tclassid_users);
}
#endif
@@ -295,7 +296,7 @@ static int fib4_rule_delete(struct fib_rule *rule)
#ifdef CONFIG_IP_ROUTE_CLASSID
if (((struct fib4_rule *)rule)->tclassid)
- net->ipv4.fib_num_tclassid_users--;
+ atomic_dec(&net->ipv4.fib_num_tclassid_users);
#endif
net->ipv4.fib_has_custom_rules = true;
diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
index 3364cb9c67e0..fde7797b5806 100644
--- a/net/ipv4/fib_semantics.c
+++ b/net/ipv4/fib_semantics.c
@@ -220,7 +220,7 @@ void fib_nh_release(struct net *net, struct fib_nh *fib_nh)
{
#ifdef CONFIG_IP_ROUTE_CLASSID
if (fib_nh->nh_tclassid)
- net->ipv4.fib_num_tclassid_users--;
+ atomic_dec(&net->ipv4.fib_num_tclassid_users);
#endif
fib_nh_common_release(&fib_nh->nh_common);
}
@@ -632,7 +632,7 @@ int fib_nh_init(struct net *net, struct fib_nh *nh,
#ifdef CONFIG_IP_ROUTE_CLASSID
nh->nh_tclassid = cfg->fc_flow;
if (nh->nh_tclassid)
- net->ipv4.fib_num_tclassid_users++;
+ atomic_inc(&net->ipv4.fib_num_tclassid_users);
#endif
#ifdef CONFIG_IP_ROUTE_MULTIPATH
nh->fib_nh_weight = nh_weight;
diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c
index f7fea3a7c5e6..62a67fdc344c 100644
--- a/net/ipv4/inet_connection_sock.c
+++ b/net/ipv4/inet_connection_sock.c
@@ -721,7 +721,7 @@ static struct request_sock *inet_reqsk_clone(struct request_sock *req,
sk_node_init(&nreq_sk->sk_node);
nreq_sk->sk_tx_queue_mapping = req_sk->sk_tx_queue_mapping;
-#ifdef CONFIG_XPS
+#ifdef CONFIG_SOCK_RX_QUEUE_MAPPING
nreq_sk->sk_rx_queue_mapping = req_sk->sk_rx_queue_mapping;
#endif
nreq_sk->sk_incoming_cpu = req_sk->sk_incoming_cpu;
diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c
index c8fa6e7f7d12..581b5b2d72a5 100644
--- a/net/ipv4/inet_diag.c
+++ b/net/ipv4/inet_diag.c
@@ -261,6 +261,7 @@ int inet_sk_diag_fill(struct sock *sk, struct inet_connection_sock *icsk,
r->idiag_state = sk->sk_state;
r->idiag_timer = 0;
r->idiag_retrans = 0;
+ r->idiag_expires = 0;
if (inet_diag_msg_attrs_fill(sk, skb, r, ext,
sk_user_ns(NETLINK_CB(cb->skb).sk),
@@ -314,9 +315,6 @@ int inet_sk_diag_fill(struct sock *sk, struct inet_connection_sock *icsk,
r->idiag_retrans = icsk->icsk_probes_out;
r->idiag_expires =
jiffies_delta_to_msecs(sk->sk_timer.expires - jiffies);
- } else {
- r->idiag_timer = 0;
- r->idiag_expires = 0;
}
if ((ext & (1 << (INET_DIAG_INFO - 1))) && handler->idiag_info_size) {
diff --git a/net/ipv4/nexthop.c b/net/ipv4/nexthop.c
index 9e8100728d46..5dbd4b5505eb 100644
--- a/net/ipv4/nexthop.c
+++ b/net/ipv4/nexthop.c
@@ -1899,15 +1899,36 @@ static void remove_nexthop(struct net *net, struct nexthop *nh,
/* if any FIB entries reference this nexthop, any dst entries
* need to be regenerated
*/
-static void nh_rt_cache_flush(struct net *net, struct nexthop *nh)
+static void nh_rt_cache_flush(struct net *net, struct nexthop *nh,
+ struct nexthop *replaced_nh)
{
struct fib6_info *f6i;
+ struct nh_group *nhg;
+ int i;
if (!list_empty(&nh->fi_list))
rt_cache_flush(net);
list_for_each_entry(f6i, &nh->f6i_list, nh_list)
ipv6_stub->fib6_update_sernum(net, f6i);
+
+ /* if an IPv6 group was replaced, we have to release all old
+ * dsts to make sure all refcounts are released
+ */
+ if (!replaced_nh->is_group)
+ return;
+
+ /* new dsts must use only the new nexthop group */
+ synchronize_net();
+
+ nhg = rtnl_dereference(replaced_nh->nh_grp);
+ for (i = 0; i < nhg->num_nh; i++) {
+ struct nh_grp_entry *nhge = &nhg->nh_entries[i];
+ struct nh_info *nhi = rtnl_dereference(nhge->nh->nh_info);
+
+ if (nhi->family == AF_INET6)
+ ipv6_stub->fib6_nh_release_dsts(&nhi->fib6_nh);
+ }
}
static int replace_nexthop_grp(struct net *net, struct nexthop *old,
@@ -2247,7 +2268,7 @@ static int replace_nexthop(struct net *net, struct nexthop *old,
err = replace_nexthop_single(net, old, new, extack);
if (!err) {
- nh_rt_cache_flush(net, old);
+ nh_rt_cache_flush(net, old, new);
__remove_nexthop(net, new, NULL);
nexthop_put(new);
@@ -2544,11 +2565,15 @@ static int nh_create_ipv6(struct net *net, struct nexthop *nh,
/* sets nh_dev if successful */
err = ipv6_stub->fib6_nh_init(net, fib6_nh, &fib6_cfg, GFP_KERNEL,
extack);
- if (err)
+ if (err) {
+ /* IPv6 is not enabled, don't call fib6_nh_release */
+ if (err == -EAFNOSUPPORT)
+ goto out;
ipv6_stub->fib6_nh_release(fib6_nh);
- else
+ } else {
nh->nh_flags = fib6_nh->fib_nh_flags;
-
+ }
+out:
return err;
}
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index bbb3d39c69af..2bb28bfd83bf 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -3012,8 +3012,7 @@ int tcp_disconnect(struct sock *sk, int flags)
icsk->icsk_ack.rcv_mss = TCP_MIN_MSS;
memset(&tp->rx_opt, 0, sizeof(tp->rx_opt));
__sk_dst_reset(sk);
- dst_release(sk->sk_rx_dst);
- sk->sk_rx_dst = NULL;
+ dst_release(xchg((__force struct dst_entry **)&sk->sk_rx_dst, NULL));
tcp_saved_syn_free(tp);
tp->compressed_ack = 0;
tp->segs_in = 0;
diff --git a/net/ipv4/tcp_cubic.c b/net/ipv4/tcp_cubic.c
index 5e9d9c51164c..e07837e23b3f 100644
--- a/net/ipv4/tcp_cubic.c
+++ b/net/ipv4/tcp_cubic.c
@@ -330,8 +330,6 @@ static void cubictcp_cong_avoid(struct sock *sk, u32 ack, u32 acked)
return;
if (tcp_in_slow_start(tp)) {
- if (hystart && after(ack, ca->end_seq))
- bictcp_hystart_reset(sk);
acked = tcp_slow_start(tp, acked);
if (!acked)
return;
@@ -391,6 +389,9 @@ static void hystart_update(struct sock *sk, u32 delay)
struct bictcp *ca = inet_csk_ca(sk);
u32 threshold;
+ if (after(tp->snd_una, ca->end_seq))
+ bictcp_hystart_reset(sk);
+
if (hystart_detect & HYSTART_ACK_TRAIN) {
u32 now = bictcp_clock_us(sk);
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 246ab7b5e857..0ce46849ec3d 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -5787,7 +5787,7 @@ void tcp_rcv_established(struct sock *sk, struct sk_buff *skb)
trace_tcp_probe(sk, skb);
tcp_mstamp_refresh(tp);
- if (unlikely(!sk->sk_rx_dst))
+ if (unlikely(!rcu_access_pointer(sk->sk_rx_dst)))
inet_csk(sk)->icsk_af_ops->sk_rx_dst_set(sk, skb);
/*
* Header prediction.
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 13d868c43284..084df223b5df 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -1701,7 +1701,10 @@ int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
struct sock *rsk;
if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
- struct dst_entry *dst = sk->sk_rx_dst;
+ struct dst_entry *dst;
+
+ dst = rcu_dereference_protected(sk->sk_rx_dst,
+ lockdep_sock_is_held(sk));
sock_rps_save_rxhash(sk, skb);
sk_mark_napi_id(sk, skb);
@@ -1709,8 +1712,8 @@ int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
if (sk->sk_rx_dst_ifindex != skb->skb_iif ||
!INDIRECT_CALL_1(dst->ops->check, ipv4_dst_check,
dst, 0)) {
+ RCU_INIT_POINTER(sk->sk_rx_dst, NULL);
dst_release(dst);
- sk->sk_rx_dst = NULL;
}
}
tcp_rcv_established(sk, skb);
@@ -1786,7 +1789,7 @@ int tcp_v4_early_demux(struct sk_buff *skb)
skb->sk = sk;
skb->destructor = sock_edemux;
if (sk_fullsock(sk)) {
- struct dst_entry *dst = READ_ONCE(sk->sk_rx_dst);
+ struct dst_entry *dst = rcu_dereference(sk->sk_rx_dst);
if (dst)
dst = dst_check(dst, 0);
@@ -2201,7 +2204,7 @@ void inet_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb)
struct dst_entry *dst = skb_dst(skb);
if (dst && dst_hold_safe(dst)) {
- sk->sk_rx_dst = dst;
+ rcu_assign_pointer(sk->sk_rx_dst, dst);
sk->sk_rx_dst_ifindex = skb->skb_iif;
}
}
diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
index cf913a66df17..7c2d3ac2363a 100644
--- a/net/ipv4/tcp_minisocks.c
+++ b/net/ipv4/tcp_minisocks.c
@@ -829,8 +829,8 @@ int tcp_child_process(struct sock *parent, struct sock *child,
int ret = 0;
int state = child->sk_state;
- /* record NAPI ID of child */
- sk_mark_napi_id(child, skb);
+ /* record sk_napi_id and sk_rx_queue_mapping of child. */
+ sk_mark_napi_id_set(child, skb);
tcp_segs_in(tcp_sk(child), skb);
if (!sock_owned_by_user(child)) {
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index 8bcecdd6aeda..0cd6b857e7ec 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -916,7 +916,7 @@ static int udp_send_skb(struct sk_buff *skb, struct flowi4 *fl4,
kfree_skb(skb);
return -EINVAL;
}
- if (skb->len > cork->gso_size * UDP_MAX_SEGMENTS) {
+ if (datalen > cork->gso_size * UDP_MAX_SEGMENTS) {
kfree_skb(skb);
return -EINVAL;
}
@@ -2250,7 +2250,7 @@ bool udp_sk_rx_dst_set(struct sock *sk, struct dst_entry *dst)
struct dst_entry *old;
if (dst_hold_safe(dst)) {
- old = xchg(&sk->sk_rx_dst, dst);
+ old = xchg((__force struct dst_entry **)&sk->sk_rx_dst, dst);
dst_release(old);
return old != dst;
}
@@ -2440,7 +2440,7 @@ int __udp4_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
struct dst_entry *dst = skb_dst(skb);
int ret;
- if (unlikely(sk->sk_rx_dst != dst))
+ if (unlikely(rcu_dereference(sk->sk_rx_dst) != dst))
udp_sk_rx_dst_set(sk, dst);
ret = udp_unicast_rcv_skb(sk, skb, uh);
@@ -2599,7 +2599,7 @@ int udp_v4_early_demux(struct sk_buff *skb)
skb->sk = sk;
skb->destructor = sock_efree;
- dst = READ_ONCE(sk->sk_rx_dst);
+ dst = rcu_dereference(sk->sk_rx_dst);
if (dst)
dst = dst_check(dst, 0);
@@ -3075,7 +3075,7 @@ int udp4_seq_show(struct seq_file *seq, void *v)
{
seq_setwidth(seq, 127);
if (v == SEQ_START_TOKEN)
- seq_puts(seq, " sl local_address rem_address st tx_queue "
+ seq_puts(seq, " sl local_address rem_address st tx_queue "
"rx_queue tr tm->when retrnsmt uid timeout "
"inode ref pointer drops");
else {
diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c
index 0c4da163535a..dab4a047590b 100644
--- a/net/ipv6/af_inet6.c
+++ b/net/ipv6/af_inet6.c
@@ -1026,6 +1026,7 @@ static const struct ipv6_stub ipv6_stub_impl = {
.ip6_mtu_from_fib6 = ip6_mtu_from_fib6,
.fib6_nh_init = fib6_nh_init,
.fib6_nh_release = fib6_nh_release,
+ .fib6_nh_release_dsts = fib6_nh_release_dsts,
.fib6_update_sernum = fib6_update_sernum_stub,
.fib6_rt_update = fib6_rt_update,
.ip6_del_rt = ip6_del_rt,
diff --git a/net/ipv6/fib6_rules.c b/net/ipv6/fib6_rules.c
index 40f3e4f9f33a..dcedfe29d9d9 100644
--- a/net/ipv6/fib6_rules.c
+++ b/net/ipv6/fib6_rules.c
@@ -267,6 +267,7 @@ INDIRECT_CALLABLE_SCOPE int fib6_rule_action(struct fib_rule *rule,
}
INDIRECT_CALLABLE_SCOPE bool fib6_rule_suppress(struct fib_rule *rule,
+ int flags,
struct fib_lookup_arg *arg)
{
struct fib6_result *res = arg->result;
@@ -294,8 +295,7 @@ INDIRECT_CALLABLE_SCOPE bool fib6_rule_suppress(struct fib_rule *rule,
return false;
suppress_route:
- if (!(arg->flags & FIB_LOOKUP_NOREF))
- ip6_rt_put(rt);
+ ip6_rt_put_flags(rt, flags);
return true;
}
diff --git a/net/ipv6/ip6_offload.c b/net/ipv6/ip6_offload.c
index 1b9827ff8ccf..1cbd49d5788d 100644
--- a/net/ipv6/ip6_offload.c
+++ b/net/ipv6/ip6_offload.c
@@ -248,9 +248,9 @@ INDIRECT_CALLABLE_SCOPE struct sk_buff *ipv6_gro_receive(struct list_head *head,
* memcmp() alone below is sufficient, right?
*/
if ((first_word & htonl(0xF00FFFFF)) ||
- !ipv6_addr_equal(&iph->saddr, &iph2->saddr) ||
- !ipv6_addr_equal(&iph->daddr, &iph2->daddr) ||
- *(u16 *)&iph->nexthdr != *(u16 *)&iph2->nexthdr) {
+ !ipv6_addr_equal(&iph->saddr, &iph2->saddr) ||
+ !ipv6_addr_equal(&iph->daddr, &iph2->daddr) ||
+ *(u16 *)&iph->nexthdr != *(u16 *)&iph2->nexthdr) {
not_same_flow:
NAPI_GRO_CB(p)->same_flow = 0;
continue;
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index 2f044a49afa8..ff4e83e2a506 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -174,7 +174,7 @@ static int __ip6_finish_output(struct net *net, struct sock *sk, struct sk_buff
#if defined(CONFIG_NETFILTER) && defined(CONFIG_XFRM)
/* Policy lookup after SNAT yielded a new policy */
if (skb_dst(skb)->xfrm) {
- IPCB(skb)->flags |= IPSKB_REROUTED;
+ IP6CB(skb)->flags |= IP6SKB_REROUTED;
return dst_output(net, sk, skb);
}
#endif
diff --git a/net/ipv6/ip6_vti.c b/net/ipv6/ip6_vti.c
index 527e9ead7449..5e9474bc54fc 100644
--- a/net/ipv6/ip6_vti.c
+++ b/net/ipv6/ip6_vti.c
@@ -808,6 +808,8 @@ vti6_siocdevprivate(struct net_device *dev, struct ifreq *ifr, void __user *data
struct net *net = dev_net(dev);
struct vti6_net *ip6n = net_generic(net, vti6_net_id);
+ memset(&p1, 0, sizeof(p1));
+
switch (cmd) {
case SIOCGETTUNNEL:
if (dev == ip6n->fb_tnl_dev) {
diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
index 60f1e4f5be5a..c51d5ce3711c 100644
--- a/net/ipv6/raw.c
+++ b/net/ipv6/raw.c
@@ -1020,6 +1020,9 @@ static int do_rawv6_setsockopt(struct sock *sk, int level, int optname,
struct raw6_sock *rp = raw6_sk(sk);
int val;
+ if (optlen < sizeof(val))
+ return -EINVAL;
+
if (copy_from_sockptr(&val, optval, sizeof(val)))
return -EFAULT;
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index 3ae25b8ffbd6..42d60c76d30a 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -3680,6 +3680,25 @@ void fib6_nh_release(struct fib6_nh *fib6_nh)
fib_nh_common_release(&fib6_nh->nh_common);
}
+void fib6_nh_release_dsts(struct fib6_nh *fib6_nh)
+{
+ int cpu;
+
+ if (!fib6_nh->rt6i_pcpu)
+ return;
+
+ for_each_possible_cpu(cpu) {
+ struct rt6_info *pcpu_rt, **ppcpu_rt;
+
+ ppcpu_rt = per_cpu_ptr(fib6_nh->rt6i_pcpu, cpu);
+ pcpu_rt = xchg(ppcpu_rt, NULL);
+ if (pcpu_rt) {
+ dst_dev_put(&pcpu_rt->dst);
+ dst_release(&pcpu_rt->dst);
+ }
+ }
+}
+
static struct fib6_info *ip6_route_info_create(struct fib6_config *cfg,
gfp_t gfp_flags,
struct netlink_ext_ack *extack)
diff --git a/net/ipv6/seg6_iptunnel.c b/net/ipv6/seg6_iptunnel.c
index 3adc5d9211ad..d64855010948 100644
--- a/net/ipv6/seg6_iptunnel.c
+++ b/net/ipv6/seg6_iptunnel.c
@@ -161,6 +161,14 @@ int seg6_do_srh_encap(struct sk_buff *skb, struct ipv6_sr_hdr *osrh, int proto)
hdr->hop_limit = ip6_dst_hoplimit(skb_dst(skb));
memset(IP6CB(skb), 0, sizeof(*IP6CB(skb)));
+
+ /* the control block has been erased, so we have to set the
+ * iif once again.
+ * We read the receiving interface index directly from the
+ * skb->skb_iif as it is done in the IPv4 receiving path (i.e.:
+ * ip_rcv_core(...)).
+ */
+ IP6CB(skb)->iif = skb->skb_iif;
}
hdr->nexthdr = NEXTHDR_ROUTING;
diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
index 1b57ee36d668..8a3618a30632 100644
--- a/net/ipv6/sit.c
+++ b/net/ipv6/sit.c
@@ -1933,7 +1933,6 @@ static int __net_init sit_init_net(struct net *net)
return 0;
err_reg_dev:
- ipip6_dev_free(sitn->fb_tunnel_dev);
free_netdev(sitn->fb_tunnel_dev);
err_alloc_dev:
return err;
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index 551fce49841d..680e6481b967 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -107,7 +107,7 @@ static void inet6_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb)
if (dst && dst_hold_safe(dst)) {
const struct rt6_info *rt = (const struct rt6_info *)dst;
- sk->sk_rx_dst = dst;
+ rcu_assign_pointer(sk->sk_rx_dst, dst);
sk->sk_rx_dst_ifindex = skb->skb_iif;
sk->sk_rx_dst_cookie = rt6_get_cookie(rt);
}
@@ -1505,7 +1505,10 @@ static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
opt_skb = skb_clone(skb, sk_gfp_mask(sk, GFP_ATOMIC));
if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
- struct dst_entry *dst = sk->sk_rx_dst;
+ struct dst_entry *dst;
+
+ dst = rcu_dereference_protected(sk->sk_rx_dst,
+ lockdep_sock_is_held(sk));
sock_rps_save_rxhash(sk, skb);
sk_mark_napi_id(sk, skb);
@@ -1513,8 +1516,8 @@ static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
if (sk->sk_rx_dst_ifindex != skb->skb_iif ||
INDIRECT_CALL_1(dst->ops->check, ip6_dst_check,
dst, sk->sk_rx_dst_cookie) == NULL) {
+ RCU_INIT_POINTER(sk->sk_rx_dst, NULL);
dst_release(dst);
- sk->sk_rx_dst = NULL;
}
}
@@ -1874,7 +1877,7 @@ INDIRECT_CALLABLE_SCOPE void tcp_v6_early_demux(struct sk_buff *skb)
skb->sk = sk;
skb->destructor = sock_edemux;
if (sk_fullsock(sk)) {
- struct dst_entry *dst = READ_ONCE(sk->sk_rx_dst);
+ struct dst_entry *dst = rcu_dereference(sk->sk_rx_dst);
if (dst)
dst = dst_check(dst, sk->sk_rx_dst_cookie);
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index e43b31d25fb6..8cde9efd7919 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -956,7 +956,7 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
struct dst_entry *dst = skb_dst(skb);
int ret;
- if (unlikely(sk->sk_rx_dst != dst))
+ if (unlikely(rcu_dereference(sk->sk_rx_dst) != dst))
udp6_sk_rx_dst_set(sk, dst);
if (!uh->check && !udp_sk(sk)->no_check6_rx) {
@@ -1070,7 +1070,7 @@ INDIRECT_CALLABLE_SCOPE void udp_v6_early_demux(struct sk_buff *skb)
skb->sk = sk;
skb->destructor = sock_efree;
- dst = READ_ONCE(sk->sk_rx_dst);
+ dst = rcu_dereference(sk->sk_rx_dst);
if (dst)
dst = dst_check(dst, sk->sk_rx_dst_cookie);
@@ -1204,7 +1204,7 @@ static int udp_v6_send_skb(struct sk_buff *skb, struct flowi6 *fl6,
kfree_skb(skb);
return -EINVAL;
}
- if (skb->len > cork->gso_size * UDP_MAX_SEGMENTS) {
+ if (datalen > cork->gso_size * UDP_MAX_SEGMENTS) {
kfree_skb(skb);
return -EINVAL;
}
diff --git a/net/mac80211/agg-rx.c b/net/mac80211/agg-rx.c
index 470ff0ce3dc7..7d2925bb966e 100644
--- a/net/mac80211/agg-rx.c
+++ b/net/mac80211/agg-rx.c
@@ -9,7 +9,7 @@
* Copyright 2007, Michael Wu <flamingice@sourmilk.net>
* Copyright 2007-2010, Intel Corporation
* Copyright(c) 2015-2017 Intel Deutschland GmbH
- * Copyright (C) 2018-2020 Intel Corporation
+ * Copyright (C) 2018-2021 Intel Corporation
*/
/**
@@ -191,7 +191,8 @@ static void ieee80211_add_addbaext(struct ieee80211_sub_if_data *sdata,
sband = ieee80211_get_sband(sdata);
if (!sband)
return;
- he_cap = ieee80211_get_he_iftype_cap(sband, sdata->vif.type);
+ he_cap = ieee80211_get_he_iftype_cap(sband,
+ ieee80211_vif_type_p2p(&sdata->vif));
if (!he_cap)
return;
diff --git a/net/mac80211/agg-tx.c b/net/mac80211/agg-tx.c
index 430a58587538..74a878f213d3 100644
--- a/net/mac80211/agg-tx.c
+++ b/net/mac80211/agg-tx.c
@@ -9,7 +9,7 @@
* Copyright 2007, Michael Wu <flamingice@sourmilk.net>
* Copyright 2007-2010, Intel Corporation
* Copyright(c) 2015-2017 Intel Deutschland GmbH
- * Copyright (C) 2018 - 2020 Intel Corporation
+ * Copyright (C) 2018 - 2021 Intel Corporation
*/
#include <linux/ieee80211.h>
@@ -106,7 +106,7 @@ static void ieee80211_send_addba_request(struct ieee80211_sub_if_data *sdata,
mgmt->u.action.u.addba_req.start_seq_num =
cpu_to_le16(start_seq_num << 4);
- ieee80211_tx_skb(sdata, skb);
+ ieee80211_tx_skb_tid(sdata, skb, tid);
}
void ieee80211_send_bar(struct ieee80211_vif *vif, u8 *ra, u16 tid, u16 ssn)
@@ -213,6 +213,8 @@ ieee80211_agg_start_txq(struct sta_info *sta, int tid, bool enable)
struct ieee80211_txq *txq = sta->sta.txq[tid];
struct txq_info *txqi;
+ lockdep_assert_held(&sta->ampdu_mlme.mtx);
+
if (!txq)
return;
@@ -290,7 +292,6 @@ static void ieee80211_remove_tid_tx(struct sta_info *sta, int tid)
ieee80211_assign_tid_tx(sta, tid, NULL);
ieee80211_agg_splice_finish(sta->sdata, tid);
- ieee80211_agg_start_txq(sta, tid, false);
kfree_rcu(tid_tx, rcu_head);
}
@@ -480,8 +481,7 @@ static void ieee80211_send_addba_with_timeout(struct sta_info *sta,
/* send AddBA request */
ieee80211_send_addba_request(sdata, sta->sta.addr, tid,
- tid_tx->dialog_token,
- sta->tid_seq[tid] >> 4,
+ tid_tx->dialog_token, tid_tx->ssn,
buf_size, tid_tx->timeout);
WARN_ON(test_and_set_bit(HT_AGG_STATE_SENT_ADDBA, &tid_tx->state));
@@ -523,6 +523,7 @@ void ieee80211_tx_ba_session_handle_start(struct sta_info *sta, int tid)
params.ssn = sta->tid_seq[tid] >> 4;
ret = drv_ampdu_action(local, sdata, &params);
+ tid_tx->ssn = params.ssn;
if (ret == IEEE80211_AMPDU_TX_START_DELAY_ADDBA) {
return;
} else if (ret == IEEE80211_AMPDU_TX_START_IMMEDIATE) {
@@ -889,6 +890,7 @@ void ieee80211_stop_tx_ba_cb(struct sta_info *sta, int tid,
{
struct ieee80211_sub_if_data *sdata = sta->sdata;
bool send_delba = false;
+ bool start_txq = false;
ht_dbg(sdata, "Stopping Tx BA session for %pM tid %d\n",
sta->sta.addr, tid);
@@ -906,10 +908,14 @@ void ieee80211_stop_tx_ba_cb(struct sta_info *sta, int tid,
send_delba = true;
ieee80211_remove_tid_tx(sta, tid);
+ start_txq = true;
unlock_sta:
spin_unlock_bh(&sta->lock);
+ if (start_txq)
+ ieee80211_agg_start_txq(sta, tid, false);
+
if (send_delba)
ieee80211_send_delba(sdata, sta->sta.addr, tid,
WLAN_BACK_INITIATOR, WLAN_REASON_QSTA_NOT_USE);
diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
index bd3d3195097f..2d0dd69f9753 100644
--- a/net/mac80211/cfg.c
+++ b/net/mac80211/cfg.c
@@ -1264,7 +1264,10 @@ static int ieee80211_start_ap(struct wiphy *wiphy, struct net_device *dev,
return 0;
error:
+ mutex_lock(&local->mtx);
ieee80211_vif_release_channel(sdata);
+ mutex_unlock(&local->mtx);
+
return err;
}
diff --git a/net/mac80211/driver-ops.h b/net/mac80211/driver-ops.h
index cd3731cbf6c6..c336267f4599 100644
--- a/net/mac80211/driver-ops.h
+++ b/net/mac80211/driver-ops.h
@@ -1219,8 +1219,11 @@ static inline void drv_wake_tx_queue(struct ieee80211_local *local,
{
struct ieee80211_sub_if_data *sdata = vif_to_sdata(txq->txq.vif);
- if (local->in_reconfig)
+ /* In reconfig don't transmit now, but mark for waking later */
+ if (local->in_reconfig) {
+ set_bit(IEEE80211_TXQ_STOP_NETIF_TX, &txq->flags);
return;
+ }
if (!check_sdata_in_driver(sdata))
return;
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
index 54ab0e1ef6ca..37f7d975f3da 100644
--- a/net/mac80211/mlme.c
+++ b/net/mac80211/mlme.c
@@ -2452,11 +2452,18 @@ static void ieee80211_sta_tx_wmm_ac_notify(struct ieee80211_sub_if_data *sdata,
u16 tx_time)
{
struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
- u16 tid = ieee80211_get_tid(hdr);
- int ac = ieee80211_ac_from_tid(tid);
- struct ieee80211_sta_tx_tspec *tx_tspec = &ifmgd->tx_tspec[ac];
+ u16 tid;
+ int ac;
+ struct ieee80211_sta_tx_tspec *tx_tspec;
unsigned long now = jiffies;
+ if (!ieee80211_is_data_qos(hdr->frame_control))
+ return;
+
+ tid = ieee80211_get_tid(hdr);
+ ac = ieee80211_ac_from_tid(tid);
+ tx_tspec = &ifmgd->tx_tspec[ac];
+
if (likely(!tx_tspec->admitted_time))
return;
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
index 9541a4c30aca..0544563ede52 100644
--- a/net/mac80211/rx.c
+++ b/net/mac80211/rx.c
@@ -2944,6 +2944,7 @@ ieee80211_rx_h_mesh_fwding(struct ieee80211_rx_data *rx)
if (!fwd_skb)
goto out;
+ fwd_skb->dev = sdata->dev;
fwd_hdr = (struct ieee80211_hdr *) fwd_skb->data;
fwd_hdr->frame_control &= ~cpu_to_le16(IEEE80211_FCTL_RETRY);
info = IEEE80211_SKB_CB(fwd_skb);
diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c
index 51b49f0d3ad4..537535a88990 100644
--- a/net/mac80211/sta_info.c
+++ b/net/mac80211/sta_info.c
@@ -644,13 +644,13 @@ static int sta_info_insert_finish(struct sta_info *sta) __acquires(RCU)
/* check if STA exists already */
if (sta_info_get_bss(sdata, sta->sta.addr)) {
err = -EEXIST;
- goto out_err;
+ goto out_cleanup;
}
sinfo = kzalloc(sizeof(struct station_info), GFP_KERNEL);
if (!sinfo) {
err = -ENOMEM;
- goto out_err;
+ goto out_cleanup;
}
local->num_sta++;
@@ -667,6 +667,15 @@ static int sta_info_insert_finish(struct sta_info *sta) __acquires(RCU)
list_add_tail_rcu(&sta->list, &local->sta_list);
+ /* update channel context before notifying the driver about state
+ * change, this enables driver using the updated channel context right away.
+ */
+ if (sta->sta_state >= IEEE80211_STA_ASSOC) {
+ ieee80211_recalc_min_chandef(sta->sdata);
+ if (!sta->sta.support_p2p_ps)
+ ieee80211_recalc_p2p_go_ps_allowed(sta->sdata);
+ }
+
/* notify driver */
err = sta_info_insert_drv_state(local, sdata, sta);
if (err)
@@ -674,12 +683,6 @@ static int sta_info_insert_finish(struct sta_info *sta) __acquires(RCU)
set_sta_flag(sta, WLAN_STA_INSERTED);
- if (sta->sta_state >= IEEE80211_STA_ASSOC) {
- ieee80211_recalc_min_chandef(sta->sdata);
- if (!sta->sta.support_p2p_ps)
- ieee80211_recalc_p2p_go_ps_allowed(sta->sdata);
- }
-
/* accept BA sessions now */
clear_sta_flag(sta, WLAN_STA_BLOCK_BA);
@@ -706,8 +709,8 @@ static int sta_info_insert_finish(struct sta_info *sta) __acquires(RCU)
out_drop_sta:
local->num_sta--;
synchronize_net();
+ out_cleanup:
cleanup_single_sta(sta);
- out_err:
mutex_unlock(&local->sta_mtx);
kfree(sinfo);
rcu_read_lock();
diff --git a/net/mac80211/sta_info.h b/net/mac80211/sta_info.h
index ba2796782008..379fd367197f 100644
--- a/net/mac80211/sta_info.h
+++ b/net/mac80211/sta_info.h
@@ -176,6 +176,7 @@ struct sta_info;
* @failed_bar_ssn: ssn of the last failed BAR tx attempt
* @bar_pending: BAR needs to be re-sent
* @amsdu: support A-MSDU withing A-MDPU
+ * @ssn: starting sequence number of the session
*
* This structure's lifetime is managed by RCU, assignments to
* the array holding it must hold the aggregation mutex.
@@ -199,6 +200,7 @@ struct tid_ampdu_tx {
u8 stop_initiator;
bool tx_stop;
u16 buf_size;
+ u16 ssn;
u16 failed_bar_ssn;
bool bar_pending;
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
index 278945e3e08a..86a54df3aabd 100644
--- a/net/mac80211/tx.c
+++ b/net/mac80211/tx.c
@@ -1822,15 +1822,15 @@ static int invoke_tx_handlers_late(struct ieee80211_tx_data *tx)
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb);
ieee80211_tx_result res = TX_CONTINUE;
+ if (!ieee80211_hw_check(&tx->local->hw, HAS_RATE_CONTROL))
+ CALL_TXH(ieee80211_tx_h_rate_ctrl);
+
if (unlikely(info->flags & IEEE80211_TX_INTFL_RETRANSMISSION)) {
__skb_queue_tail(&tx->skbs, tx->skb);
tx->skb = NULL;
goto txh_done;
}
- if (!ieee80211_hw_check(&tx->local->hw, HAS_RATE_CONTROL))
- CALL_TXH(ieee80211_tx_h_rate_ctrl);
-
CALL_TXH(ieee80211_tx_h_michael_mic_add);
CALL_TXH(ieee80211_tx_h_sequence);
CALL_TXH(ieee80211_tx_h_fragment);
@@ -4191,11 +4191,11 @@ void __ieee80211_subif_start_xmit(struct sk_buff *skb,
ieee80211_aggr_check(sdata, sta, skb);
+ sk_pacing_shift_update(skb->sk, sdata->local->hw.tx_sk_pacing_shift);
+
if (sta) {
struct ieee80211_fast_tx *fast_tx;
- sk_pacing_shift_update(skb->sk, sdata->local->hw.tx_sk_pacing_shift);
-
fast_tx = rcu_dereference(sta->fast_tx);
if (fast_tx &&
diff --git a/net/mac80211/util.c b/net/mac80211/util.c
index 43df2f0c5db9..0e4e1956bcea 100644
--- a/net/mac80211/util.c
+++ b/net/mac80211/util.c
@@ -943,7 +943,12 @@ static void ieee80211_parse_extension_element(u32 *crc,
struct ieee802_11_elems *elems)
{
const void *data = elem->data + 1;
- u8 len = elem->datalen - 1;
+ u8 len;
+
+ if (!elem->datalen)
+ return;
+
+ len = elem->datalen - 1;
switch (elem->data[0]) {
case WLAN_EID_EXT_HE_MU_EDCA:
@@ -2063,7 +2068,7 @@ struct sk_buff *ieee80211_build_probe_req(struct ieee80211_sub_if_data *sdata,
chandef.chan = chan;
skb = ieee80211_probereq_get(&local->hw, src, ssid, ssid_len,
- 100 + ie_len);
+ local->scan_ies_len + ie_len);
if (!skb)
return NULL;
@@ -2646,6 +2651,13 @@ int ieee80211_reconfig(struct ieee80211_local *local)
mutex_unlock(&local->sta_mtx);
}
+ /*
+ * If this is for hw restart things are still running.
+ * We may want to change that later, however.
+ */
+ if (local->open_count && (!suspended || reconfig_due_to_wowlan))
+ drv_reconfig_complete(local, IEEE80211_RECONFIG_TYPE_RESTART);
+
if (local->in_reconfig) {
local->in_reconfig = false;
barrier();
@@ -2664,13 +2676,6 @@ int ieee80211_reconfig(struct ieee80211_local *local)
IEEE80211_QUEUE_STOP_REASON_SUSPEND,
false);
- /*
- * If this is for hw restart things are still running.
- * We may want to change that later, however.
- */
- if (local->open_count && (!suspended || reconfig_due_to_wowlan))
- drv_reconfig_complete(local, IEEE80211_RECONFIG_TYPE_RESTART);
-
if (!suspended)
return 0;
diff --git a/net/mctp/route.c b/net/mctp/route.c
index 46c44823edb7..cdf09c2a7007 100644
--- a/net/mctp/route.c
+++ b/net/mctp/route.c
@@ -952,7 +952,7 @@ static int mctp_route_add(struct mctp_dev *mdev, mctp_eid_t daddr_start,
}
static int mctp_route_remove(struct mctp_dev *mdev, mctp_eid_t daddr_start,
- unsigned int daddr_extent)
+ unsigned int daddr_extent, unsigned char type)
{
struct net *net = dev_net(mdev->dev);
struct mctp_route *rt, *tmp;
@@ -969,7 +969,8 @@ static int mctp_route_remove(struct mctp_dev *mdev, mctp_eid_t daddr_start,
list_for_each_entry_safe(rt, tmp, &net->mctp.routes, list) {
if (rt->dev == mdev &&
- rt->min == daddr_start && rt->max == daddr_end) {
+ rt->min == daddr_start && rt->max == daddr_end &&
+ rt->type == type) {
list_del_rcu(&rt->list);
/* TODO: immediate RTM_DELROUTE */
mctp_route_release(rt);
@@ -987,7 +988,7 @@ int mctp_route_add_local(struct mctp_dev *mdev, mctp_eid_t addr)
int mctp_route_remove_local(struct mctp_dev *mdev, mctp_eid_t addr)
{
- return mctp_route_remove(mdev, addr, 0);
+ return mctp_route_remove(mdev, addr, 0, RTN_LOCAL);
}
/* removes all entries for a given device */
@@ -1195,7 +1196,7 @@ static int mctp_delroute(struct sk_buff *skb, struct nlmsghdr *nlh,
if (rtm->rtm_type != RTN_UNICAST)
return -EINVAL;
- rc = mctp_route_remove(mdev, daddr_start, rtm->rtm_dst_len);
+ rc = mctp_route_remove(mdev, daddr_start, rtm->rtm_dst_len, RTN_UNICAST);
return rc;
}
diff --git a/net/mctp/test/utils.c b/net/mctp/test/utils.c
index cc6b8803aa9d..7b7918702592 100644
--- a/net/mctp/test/utils.c
+++ b/net/mctp/test/utils.c
@@ -12,7 +12,7 @@
static netdev_tx_t mctp_test_dev_tx(struct sk_buff *skb,
struct net_device *ndev)
{
- kfree(skb);
+ kfree_skb(skb);
return NETDEV_TX_OK;
}
diff --git a/net/mpls/af_mpls.c b/net/mpls/af_mpls.c
index ffeb2df8be7a..0c7bde1c14a6 100644
--- a/net/mpls/af_mpls.c
+++ b/net/mpls/af_mpls.c
@@ -409,7 +409,7 @@ static int mpls_forward(struct sk_buff *skb, struct net_device *dev,
goto err;
/* Find the output device */
- out_dev = rcu_dereference(nh->nh_dev);
+ out_dev = nh->nh_dev;
if (!mpls_output_possible(out_dev))
goto tx_err;
@@ -698,7 +698,7 @@ static int mpls_nh_assign_dev(struct net *net, struct mpls_route *rt,
(dev->addr_len != nh->nh_via_alen))
goto errout;
- RCU_INIT_POINTER(nh->nh_dev, dev);
+ nh->nh_dev = dev;
if (!(dev->flags & IFF_UP)) {
nh->nh_flags |= RTNH_F_DEAD;
@@ -1491,26 +1491,53 @@ static void mpls_dev_destroy_rcu(struct rcu_head *head)
kfree(mdev);
}
-static void mpls_ifdown(struct net_device *dev, int event)
+static int mpls_ifdown(struct net_device *dev, int event)
{
struct mpls_route __rcu **platform_label;
struct net *net = dev_net(dev);
- u8 alive, deleted;
unsigned index;
platform_label = rtnl_dereference(net->mpls.platform_label);
for (index = 0; index < net->mpls.platform_labels; index++) {
struct mpls_route *rt = rtnl_dereference(platform_label[index]);
+ bool nh_del = false;
+ u8 alive = 0;
if (!rt)
continue;
- alive = 0;
- deleted = 0;
+ if (event == NETDEV_UNREGISTER) {
+ u8 deleted = 0;
+
+ for_nexthops(rt) {
+ if (!nh->nh_dev || nh->nh_dev == dev)
+ deleted++;
+ if (nh->nh_dev == dev)
+ nh_del = true;
+ } endfor_nexthops(rt);
+
+ /* if there are no more nexthops, delete the route */
+ if (deleted == rt->rt_nhn) {
+ mpls_route_update(net, index, NULL, NULL);
+ continue;
+ }
+
+ if (nh_del) {
+ size_t size = sizeof(*rt) + rt->rt_nhn *
+ rt->rt_nh_size;
+ struct mpls_route *orig = rt;
+
+ rt = kmalloc(size, GFP_KERNEL);
+ if (!rt)
+ return -ENOMEM;
+ memcpy(rt, orig, size);
+ }
+ }
+
change_nexthops(rt) {
unsigned int nh_flags = nh->nh_flags;
- if (rtnl_dereference(nh->nh_dev) != dev)
+ if (nh->nh_dev != dev)
goto next;
switch (event) {
@@ -1523,23 +1550,22 @@ static void mpls_ifdown(struct net_device *dev, int event)
break;
}
if (event == NETDEV_UNREGISTER)
- RCU_INIT_POINTER(nh->nh_dev, NULL);
+ nh->nh_dev = NULL;
if (nh->nh_flags != nh_flags)
WRITE_ONCE(nh->nh_flags, nh_flags);
next:
if (!(nh_flags & (RTNH_F_DEAD | RTNH_F_LINKDOWN)))
alive++;
- if (!rtnl_dereference(nh->nh_dev))
- deleted++;
} endfor_nexthops(rt);
WRITE_ONCE(rt->rt_nhn_alive, alive);
- /* if there are no more nexthops, delete the route */
- if (event == NETDEV_UNREGISTER && deleted == rt->rt_nhn)
- mpls_route_update(net, index, NULL, NULL);
+ if (nh_del)
+ mpls_route_update(net, index, rt, NULL);
}
+
+ return 0;
}
static void mpls_ifup(struct net_device *dev, unsigned int flags)
@@ -1559,14 +1585,12 @@ static void mpls_ifup(struct net_device *dev, unsigned int flags)
alive = 0;
change_nexthops(rt) {
unsigned int nh_flags = nh->nh_flags;
- struct net_device *nh_dev =
- rtnl_dereference(nh->nh_dev);
if (!(nh_flags & flags)) {
alive++;
continue;
}
- if (nh_dev != dev)
+ if (nh->nh_dev != dev)
continue;
alive++;
nh_flags &= ~flags;
@@ -1597,8 +1621,12 @@ static int mpls_dev_notify(struct notifier_block *this, unsigned long event,
return NOTIFY_OK;
switch (event) {
+ int err;
+
case NETDEV_DOWN:
- mpls_ifdown(dev, event);
+ err = mpls_ifdown(dev, event);
+ if (err)
+ return notifier_from_errno(err);
break;
case NETDEV_UP:
flags = dev_get_flags(dev);
@@ -1609,13 +1637,18 @@ static int mpls_dev_notify(struct notifier_block *this, unsigned long event,
break;
case NETDEV_CHANGE:
flags = dev_get_flags(dev);
- if (flags & (IFF_RUNNING | IFF_LOWER_UP))
+ if (flags & (IFF_RUNNING | IFF_LOWER_UP)) {
mpls_ifup(dev, RTNH_F_DEAD | RTNH_F_LINKDOWN);
- else
- mpls_ifdown(dev, event);
+ } else {
+ err = mpls_ifdown(dev, event);
+ if (err)
+ return notifier_from_errno(err);
+ }
break;
case NETDEV_UNREGISTER:
- mpls_ifdown(dev, event);
+ err = mpls_ifdown(dev, event);
+ if (err)
+ return notifier_from_errno(err);
mdev = mpls_dev_get(dev);
if (mdev) {
mpls_dev_sysctl_unregister(dev, mdev);
@@ -1626,8 +1659,6 @@ static int mpls_dev_notify(struct notifier_block *this, unsigned long event,
case NETDEV_CHANGENAME:
mdev = mpls_dev_get(dev);
if (mdev) {
- int err;
-
mpls_dev_sysctl_unregister(dev, mdev);
err = mpls_dev_sysctl_register(dev, mdev);
if (err)
@@ -1994,7 +2025,7 @@ static int mpls_dump_route(struct sk_buff *skb, u32 portid, u32 seq, int event,
nla_put_via(skb, nh->nh_via_table, mpls_nh_via(rt, nh),
nh->nh_via_alen))
goto nla_put_failure;
- dev = rtnl_dereference(nh->nh_dev);
+ dev = nh->nh_dev;
if (dev && nla_put_u32(skb, RTA_OIF, dev->ifindex))
goto nla_put_failure;
if (nh->nh_flags & RTNH_F_LINKDOWN)
@@ -2012,7 +2043,7 @@ static int mpls_dump_route(struct sk_buff *skb, u32 portid, u32 seq, int event,
goto nla_put_failure;
for_nexthops(rt) {
- dev = rtnl_dereference(nh->nh_dev);
+ dev = nh->nh_dev;
if (!dev)
continue;
@@ -2123,18 +2154,14 @@ static int mpls_valid_fib_dump_req(struct net *net, const struct nlmsghdr *nlh,
static bool mpls_rt_uses_dev(struct mpls_route *rt,
const struct net_device *dev)
{
- struct net_device *nh_dev;
-
if (rt->rt_nhn == 1) {
struct mpls_nh *nh = rt->rt_nh;
- nh_dev = rtnl_dereference(nh->nh_dev);
- if (dev == nh_dev)
+ if (nh->nh_dev == dev)
return true;
} else {
for_nexthops(rt) {
- nh_dev = rtnl_dereference(nh->nh_dev);
- if (nh_dev == dev)
+ if (nh->nh_dev == dev)
return true;
} endfor_nexthops(rt);
}
@@ -2222,7 +2249,7 @@ static inline size_t lfib_nlmsg_size(struct mpls_route *rt)
size_t nhsize = 0;
for_nexthops(rt) {
- if (!rtnl_dereference(nh->nh_dev))
+ if (!nh->nh_dev)
continue;
nhsize += nla_total_size(sizeof(struct rtnexthop));
/* RTA_VIA */
@@ -2468,7 +2495,7 @@ static int mpls_getroute(struct sk_buff *in_skb, struct nlmsghdr *in_nlh,
nla_put_via(skb, nh->nh_via_table, mpls_nh_via(rt, nh),
nh->nh_via_alen))
goto nla_put_failure;
- dev = rtnl_dereference(nh->nh_dev);
+ dev = nh->nh_dev;
if (dev && nla_put_u32(skb, RTA_OIF, dev->ifindex))
goto nla_put_failure;
@@ -2507,7 +2534,7 @@ static int resize_platform_label_table(struct net *net, size_t limit)
rt0 = mpls_rt_alloc(1, lo->addr_len, 0);
if (IS_ERR(rt0))
goto nort0;
- RCU_INIT_POINTER(rt0->rt_nh->nh_dev, lo);
+ rt0->rt_nh->nh_dev = lo;
rt0->rt_protocol = RTPROT_KERNEL;
rt0->rt_payload_type = MPT_IPV4;
rt0->rt_ttl_propagate = MPLS_TTL_PROP_DEFAULT;
@@ -2521,7 +2548,7 @@ static int resize_platform_label_table(struct net *net, size_t limit)
rt2 = mpls_rt_alloc(1, lo->addr_len, 0);
if (IS_ERR(rt2))
goto nort2;
- RCU_INIT_POINTER(rt2->rt_nh->nh_dev, lo);
+ rt2->rt_nh->nh_dev = lo;
rt2->rt_protocol = RTPROT_KERNEL;
rt2->rt_payload_type = MPT_IPV6;
rt2->rt_ttl_propagate = MPLS_TTL_PROP_DEFAULT;
diff --git a/net/mpls/internal.h b/net/mpls/internal.h
index 838cdfc10e47..893df00b77b6 100644
--- a/net/mpls/internal.h
+++ b/net/mpls/internal.h
@@ -87,7 +87,7 @@ enum mpls_payload_type {
};
struct mpls_nh { /* next hop label forwarding entry */
- struct net_device __rcu *nh_dev;
+ struct net_device *nh_dev;
/* nh_flags is accessed under RCU in the packet path; it is
* modified handling netdev events with rtnl lock held
diff --git a/net/mptcp/options.c b/net/mptcp/options.c
index 7c3420afb1a0..fe98e4f475ba 100644
--- a/net/mptcp/options.c
+++ b/net/mptcp/options.c
@@ -422,28 +422,6 @@ bool mptcp_syn_options(struct sock *sk, const struct sk_buff *skb,
return false;
}
-/* MP_JOIN client subflow must wait for 4th ack before sending any data:
- * TCP can't schedule delack timer before the subflow is fully established.
- * MPTCP uses the delack timer to do 3rd ack retransmissions
- */
-static void schedule_3rdack_retransmission(struct sock *sk)
-{
- struct inet_connection_sock *icsk = inet_csk(sk);
- struct tcp_sock *tp = tcp_sk(sk);
- unsigned long timeout;
-
- /* reschedule with a timeout above RTT, as we must look only for drop */
- if (tp->srtt_us)
- timeout = tp->srtt_us << 1;
- else
- timeout = TCP_TIMEOUT_INIT;
-
- WARN_ON_ONCE(icsk->icsk_ack.pending & ICSK_ACK_TIMER);
- icsk->icsk_ack.pending |= ICSK_ACK_SCHED | ICSK_ACK_TIMER;
- icsk->icsk_ack.timeout = timeout;
- sk_reset_timer(sk, &icsk->icsk_delack_timer, timeout);
-}
-
static void clear_3rdack_retransmission(struct sock *sk)
{
struct inet_connection_sock *icsk = inet_csk(sk);
@@ -526,7 +504,15 @@ static bool mptcp_established_options_mp(struct sock *sk, struct sk_buff *skb,
*size = TCPOLEN_MPTCP_MPJ_ACK;
pr_debug("subflow=%p", subflow);
- schedule_3rdack_retransmission(sk);
+ /* we can use the full delegate action helper only from BH context
+ * If we are in process context - sk is flushing the backlog at
+ * socket lock release time - just set the appropriate flag, will
+ * be handled by the release callback
+ */
+ if (sock_owned_by_user(sk))
+ set_bit(MPTCP_DELEGATE_ACK, &subflow->delegated_status);
+ else
+ mptcp_subflow_delegate(subflow, MPTCP_DELEGATE_ACK);
return true;
}
return false;
diff --git a/net/mptcp/pm_netlink.c b/net/mptcp/pm_netlink.c
index 7b96be1e9f14..f523051f5aef 100644
--- a/net/mptcp/pm_netlink.c
+++ b/net/mptcp/pm_netlink.c
@@ -700,6 +700,9 @@ static void mptcp_pm_nl_rm_addr_or_subflow(struct mptcp_sock *msk,
msk_owned_by_me(msk);
+ if (sk->sk_state == TCP_LISTEN)
+ return;
+
if (!rm_list->nr)
return;
diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c
index b7e32e316738..54613f5b7521 100644
--- a/net/mptcp/protocol.c
+++ b/net/mptcp/protocol.c
@@ -1524,7 +1524,7 @@ void __mptcp_push_pending(struct sock *sk, unsigned int flags)
int ret = 0;
prev_ssk = ssk;
- mptcp_flush_join_list(msk);
+ __mptcp_flush_join_list(msk);
ssk = mptcp_subflow_get_send(msk);
/* First check. If the ssk has changed since
@@ -1596,7 +1596,8 @@ static void __mptcp_subflow_push_pending(struct sock *sk, struct sock *ssk)
if (!xmit_ssk)
goto out;
if (xmit_ssk != ssk) {
- mptcp_subflow_delegate(mptcp_subflow_ctx(xmit_ssk));
+ mptcp_subflow_delegate(mptcp_subflow_ctx(xmit_ssk),
+ MPTCP_DELEGATE_SEND);
goto out;
}
@@ -2878,7 +2879,7 @@ static struct sock *mptcp_accept(struct sock *sk, int flags, int *err,
*/
if (WARN_ON_ONCE(!new_mptcp_sock)) {
tcp_sk(newsk)->is_mptcp = 0;
- return newsk;
+ goto out;
}
/* acquire the 2nd reference for the owning socket */
@@ -2890,6 +2891,8 @@ static struct sock *mptcp_accept(struct sock *sk, int flags, int *err,
MPTCP_MIB_MPCAPABLEPASSIVEFALLBACK);
}
+out:
+ newsk->sk_kern_sock = kern;
return newsk;
}
@@ -2943,7 +2946,7 @@ void __mptcp_check_push(struct sock *sk, struct sock *ssk)
if (xmit_ssk == ssk)
__mptcp_subflow_push_pending(sk, ssk);
else if (xmit_ssk)
- mptcp_subflow_delegate(mptcp_subflow_ctx(xmit_ssk));
+ mptcp_subflow_delegate(mptcp_subflow_ctx(xmit_ssk), MPTCP_DELEGATE_SEND);
} else {
set_bit(MPTCP_PUSH_PENDING, &mptcp_sk(sk)->flags);
}
@@ -2993,18 +2996,50 @@ static void mptcp_release_cb(struct sock *sk)
__mptcp_update_rmem(sk);
}
+/* MP_JOIN client subflow must wait for 4th ack before sending any data:
+ * TCP can't schedule delack timer before the subflow is fully established.
+ * MPTCP uses the delack timer to do 3rd ack retransmissions
+ */
+static void schedule_3rdack_retransmission(struct sock *ssk)
+{
+ struct inet_connection_sock *icsk = inet_csk(ssk);
+ struct tcp_sock *tp = tcp_sk(ssk);
+ unsigned long timeout;
+
+ if (mptcp_subflow_ctx(ssk)->fully_established)
+ return;
+
+ /* reschedule with a timeout above RTT, as we must look only for drop */
+ if (tp->srtt_us)
+ timeout = usecs_to_jiffies(tp->srtt_us >> (3 - 1));
+ else
+ timeout = TCP_TIMEOUT_INIT;
+ timeout += jiffies;
+
+ WARN_ON_ONCE(icsk->icsk_ack.pending & ICSK_ACK_TIMER);
+ icsk->icsk_ack.pending |= ICSK_ACK_SCHED | ICSK_ACK_TIMER;
+ icsk->icsk_ack.timeout = timeout;
+ sk_reset_timer(ssk, &icsk->icsk_delack_timer, timeout);
+}
+
void mptcp_subflow_process_delegated(struct sock *ssk)
{
struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
struct sock *sk = subflow->conn;
- mptcp_data_lock(sk);
- if (!sock_owned_by_user(sk))
- __mptcp_subflow_push_pending(sk, ssk);
- else
- set_bit(MPTCP_PUSH_PENDING, &mptcp_sk(sk)->flags);
- mptcp_data_unlock(sk);
- mptcp_subflow_delegated_done(subflow);
+ if (test_bit(MPTCP_DELEGATE_SEND, &subflow->delegated_status)) {
+ mptcp_data_lock(sk);
+ if (!sock_owned_by_user(sk))
+ __mptcp_subflow_push_pending(sk, ssk);
+ else
+ set_bit(MPTCP_PUSH_PENDING, &mptcp_sk(sk)->flags);
+ mptcp_data_unlock(sk);
+ mptcp_subflow_delegated_done(subflow, MPTCP_DELEGATE_SEND);
+ }
+ if (test_bit(MPTCP_DELEGATE_ACK, &subflow->delegated_status)) {
+ schedule_3rdack_retransmission(ssk);
+ mptcp_subflow_delegated_done(subflow, MPTCP_DELEGATE_ACK);
+ }
}
static int mptcp_hash(struct sock *sk)
diff --git a/net/mptcp/protocol.h b/net/mptcp/protocol.h
index 67a61ac48b20..d87cc040352e 100644
--- a/net/mptcp/protocol.h
+++ b/net/mptcp/protocol.h
@@ -387,6 +387,7 @@ struct mptcp_delegated_action {
DECLARE_PER_CPU(struct mptcp_delegated_action, mptcp_delegated_actions);
#define MPTCP_DELEGATE_SEND 0
+#define MPTCP_DELEGATE_ACK 1
/* MPTCP subflow context */
struct mptcp_subflow_context {
@@ -492,23 +493,23 @@ static inline void mptcp_add_pending_subflow(struct mptcp_sock *msk,
void mptcp_subflow_process_delegated(struct sock *ssk);
-static inline void mptcp_subflow_delegate(struct mptcp_subflow_context *subflow)
+static inline void mptcp_subflow_delegate(struct mptcp_subflow_context *subflow, int action)
{
struct mptcp_delegated_action *delegated;
bool schedule;
+ /* the caller held the subflow bh socket lock */
+ lockdep_assert_in_softirq();
+
/* The implied barrier pairs with mptcp_subflow_delegated_done(), and
* ensures the below list check sees list updates done prior to status
* bit changes
*/
- if (!test_and_set_bit(MPTCP_DELEGATE_SEND, &subflow->delegated_status)) {
+ if (!test_and_set_bit(action, &subflow->delegated_status)) {
/* still on delegated list from previous scheduling */
if (!list_empty(&subflow->delegated_node))
return;
- /* the caller held the subflow bh socket lock */
- lockdep_assert_in_softirq();
-
delegated = this_cpu_ptr(&mptcp_delegated_actions);
schedule = list_empty(&delegated->head);
list_add_tail(&subflow->delegated_node, &delegated->head);
@@ -533,16 +534,16 @@ mptcp_subflow_delegated_next(struct mptcp_delegated_action *delegated)
static inline bool mptcp_subflow_has_delegated_action(const struct mptcp_subflow_context *subflow)
{
- return test_bit(MPTCP_DELEGATE_SEND, &subflow->delegated_status);
+ return !!READ_ONCE(subflow->delegated_status);
}
-static inline void mptcp_subflow_delegated_done(struct mptcp_subflow_context *subflow)
+static inline void mptcp_subflow_delegated_done(struct mptcp_subflow_context *subflow, int action)
{
/* pairs with mptcp_subflow_delegate, ensures delegate_node is updated before
* touching the status bit
*/
smp_wmb();
- clear_bit(MPTCP_DELEGATE_SEND, &subflow->delegated_status);
+ clear_bit(action, &subflow->delegated_status);
}
int mptcp_is_enabled(const struct net *net);
diff --git a/net/mptcp/sockopt.c b/net/mptcp/sockopt.c
index 0f1e661c2032..f8efd478ac97 100644
--- a/net/mptcp/sockopt.c
+++ b/net/mptcp/sockopt.c
@@ -525,7 +525,6 @@ static bool mptcp_supported_sockopt(int level, int optname)
case TCP_NODELAY:
case TCP_THIN_LINEAR_TIMEOUTS:
case TCP_CONGESTION:
- case TCP_ULP:
case TCP_CORK:
case TCP_KEEPIDLE:
case TCP_KEEPINTVL:
diff --git a/net/ncsi/ncsi-cmd.c b/net/ncsi/ncsi-cmd.c
index ba9ae482141b..dda8b76b7798 100644
--- a/net/ncsi/ncsi-cmd.c
+++ b/net/ncsi/ncsi-cmd.c
@@ -18,6 +18,8 @@
#include "internal.h"
#include "ncsi-pkt.h"
+static const int padding_bytes = 26;
+
u32 ncsi_calculate_checksum(unsigned char *data, int len)
{
u32 checksum = 0;
@@ -213,12 +215,17 @@ static int ncsi_cmd_handler_oem(struct sk_buff *skb,
{
struct ncsi_cmd_oem_pkt *cmd;
unsigned int len;
+ int payload;
+ /* NC-SI spec DSP_0222_1.2.0, section 8.2.2.2
+ * requires payload to be padded with 0 to
+ * 32-bit boundary before the checksum field.
+ * Ensure the padding bytes are accounted for in
+ * skb allocation
+ */
+ payload = ALIGN(nca->payload, 4);
len = sizeof(struct ncsi_cmd_pkt_hdr) + 4;
- if (nca->payload < 26)
- len += 26;
- else
- len += nca->payload;
+ len += max(payload, padding_bytes);
cmd = skb_put_zero(skb, len);
memcpy(&cmd->mfr_id, nca->data, nca->payload);
@@ -272,6 +279,7 @@ static struct ncsi_request *ncsi_alloc_command(struct ncsi_cmd_arg *nca)
struct net_device *dev = nd->dev;
int hlen = LL_RESERVED_SPACE(dev);
int tlen = dev->needed_tailroom;
+ int payload;
int len = hlen + tlen;
struct sk_buff *skb;
struct ncsi_request *nr;
@@ -281,14 +289,14 @@ static struct ncsi_request *ncsi_alloc_command(struct ncsi_cmd_arg *nca)
return NULL;
/* NCSI command packet has 16-bytes header, payload, 4 bytes checksum.
+ * Payload needs padding so that the checksum field following payload is
+ * aligned to 32-bit boundary.
* The packet needs padding if its payload is less than 26 bytes to
* meet 64 bytes minimal ethernet frame length.
*/
len += sizeof(struct ncsi_cmd_pkt_hdr) + 4;
- if (nca->payload < 26)
- len += 26;
- else
- len += nca->payload;
+ payload = ALIGN(nca->payload, 4);
+ len += max(payload, padding_bytes);
/* Allocate skb */
skb = alloc_skb(len, GFP_ATOMIC);
diff --git a/net/ncsi/ncsi-netlink.c b/net/ncsi/ncsi-netlink.c
index bb5f1650f11c..c189b4c8a182 100644
--- a/net/ncsi/ncsi-netlink.c
+++ b/net/ncsi/ncsi-netlink.c
@@ -112,7 +112,11 @@ static int ncsi_write_package_info(struct sk_buff *skb,
pnest = nla_nest_start_noflag(skb, NCSI_PKG_ATTR);
if (!pnest)
return -ENOMEM;
- nla_put_u32(skb, NCSI_PKG_ATTR_ID, np->id);
+ rc = nla_put_u32(skb, NCSI_PKG_ATTR_ID, np->id);
+ if (rc) {
+ nla_nest_cancel(skb, pnest);
+ return rc;
+ }
if ((0x1 << np->id) == ndp->package_whitelist)
nla_put_flag(skb, NCSI_PKG_ATTR_FORCED);
cnest = nla_nest_start_noflag(skb, NCSI_PKG_ATTR_CHANNEL_LIST);
diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c
index e93c937a8bf0..51ad557a525b 100644
--- a/net/netfilter/ipvs/ip_vs_core.c
+++ b/net/netfilter/ipvs/ip_vs_core.c
@@ -1919,7 +1919,6 @@ ip_vs_in_hook(void *priv, struct sk_buff *skb, const struct nf_hook_state *state
struct ip_vs_proto_data *pd;
struct ip_vs_conn *cp;
int ret, pkts;
- int conn_reuse_mode;
struct sock *sk;
int af = state->pf;
@@ -1997,15 +1996,16 @@ ip_vs_in_hook(void *priv, struct sk_buff *skb, const struct nf_hook_state *state
cp = INDIRECT_CALL_1(pp->conn_in_get, ip_vs_conn_in_get_proto,
ipvs, af, skb, &iph);
- conn_reuse_mode = sysctl_conn_reuse_mode(ipvs);
- if (conn_reuse_mode && !iph.fragoffs && is_new_conn(skb, &iph) && cp) {
+ if (!iph.fragoffs && is_new_conn(skb, &iph) && cp) {
+ int conn_reuse_mode = sysctl_conn_reuse_mode(ipvs);
bool old_ct = false, resched = false;
if (unlikely(sysctl_expire_nodest_conn(ipvs)) && cp->dest &&
unlikely(!atomic_read(&cp->dest->weight))) {
resched = true;
old_ct = ip_vs_conn_uses_old_conntrack(cp, skb);
- } else if (is_new_conn_expected(cp, conn_reuse_mode)) {
+ } else if (conn_reuse_mode &&
+ is_new_conn_expected(cp, conn_reuse_mode)) {
old_ct = ip_vs_conn_uses_old_conntrack(cp, skb);
if (!atomic_read(&cp->n_control)) {
resched = true;
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
index 770a63103c7a..4712a90a1820 100644
--- a/net/netfilter/nf_conntrack_core.c
+++ b/net/netfilter/nf_conntrack_core.c
@@ -684,7 +684,7 @@ bool nf_ct_delete(struct nf_conn *ct, u32 portid, int report)
tstamp = nf_conn_tstamp_find(ct);
if (tstamp) {
- s32 timeout = ct->timeout - nfct_time_stamp;
+ s32 timeout = READ_ONCE(ct->timeout) - nfct_time_stamp;
tstamp->stop = ktime_get_real_ns();
if (timeout < 0)
@@ -1036,7 +1036,7 @@ static int nf_ct_resolve_clash_harder(struct sk_buff *skb, u32 repl_idx)
}
/* We want the clashing entry to go away real soon: 1 second timeout. */
- loser_ct->timeout = nfct_time_stamp + HZ;
+ WRITE_ONCE(loser_ct->timeout, nfct_time_stamp + HZ);
/* IPS_NAT_CLASH removes the entry automatically on the first
* reply. Also prevents UDP tracker from moving the entry to
@@ -1560,7 +1560,7 @@ __nf_conntrack_alloc(struct net *net,
/* save hash for reusing when confirming */
*(unsigned long *)(&ct->tuplehash[IP_CT_DIR_REPLY].hnnode.pprev) = hash;
ct->status = 0;
- ct->timeout = 0;
+ WRITE_ONCE(ct->timeout, 0);
write_pnet(&ct->ct_net, net);
memset(&ct->__nfct_init_offset, 0,
offsetof(struct nf_conn, proto) -
diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c
index f1e5443fe7c7..ec4164c32d27 100644
--- a/net/netfilter/nf_conntrack_netlink.c
+++ b/net/netfilter/nf_conntrack_netlink.c
@@ -1011,11 +1011,9 @@ ctnetlink_alloc_filter(const struct nlattr * const cda[], u8 family)
CTA_TUPLE_REPLY,
filter->family,
&filter->zone,
- filter->orig_flags);
- if (err < 0) {
- err = -EINVAL;
+ filter->reply_flags);
+ if (err < 0)
goto err_filter;
- }
}
return filter;
@@ -1197,8 +1195,6 @@ restart:
}
hlist_nulls_for_each_entry(h, n, &nf_conntrack_hash[cb->args[0]],
hnnode) {
- if (NF_CT_DIRECTION(h) != IP_CT_DIR_ORIGINAL)
- continue;
ct = nf_ct_tuplehash_to_ctrack(h);
if (nf_ct_is_expired(ct)) {
if (i < ARRAY_SIZE(nf_ct_evict) &&
@@ -1210,6 +1206,9 @@ restart:
if (!net_eq(net, nf_ct_net(ct)))
continue;
+ if (NF_CT_DIRECTION(h) != IP_CT_DIR_ORIGINAL)
+ continue;
+
if (cb->args[1]) {
if (ct != last)
continue;
@@ -2000,7 +1999,7 @@ static int ctnetlink_change_timeout(struct nf_conn *ct,
if (timeout > INT_MAX)
timeout = INT_MAX;
- ct->timeout = nfct_time_stamp + (u32)timeout;
+ WRITE_ONCE(ct->timeout, nfct_time_stamp + (u32)timeout);
if (test_bit(IPS_DYING_BIT, &ct->status))
return -ETIME;
diff --git a/net/netfilter/nf_flow_table_core.c b/net/netfilter/nf_flow_table_core.c
index 87a7388b6c89..ed37bb9b4e58 100644
--- a/net/netfilter/nf_flow_table_core.c
+++ b/net/netfilter/nf_flow_table_core.c
@@ -201,8 +201,8 @@ static void flow_offload_fixup_ct_timeout(struct nf_conn *ct)
if (timeout < 0)
timeout = 0;
- if (nf_flow_timeout_delta(ct->timeout) > (__s32)timeout)
- ct->timeout = nfct_time_stamp + timeout;
+ if (nf_flow_timeout_delta(READ_ONCE(ct->timeout)) > (__s32)timeout)
+ WRITE_ONCE(ct->timeout, nfct_time_stamp + timeout);
}
static void flow_offload_fixup_ct_state(struct nf_conn *ct)
diff --git a/net/netfilter/nf_flow_table_offload.c b/net/netfilter/nf_flow_table_offload.c
index d6bf1b2cd541..b561e0a44a45 100644
--- a/net/netfilter/nf_flow_table_offload.c
+++ b/net/netfilter/nf_flow_table_offload.c
@@ -65,11 +65,11 @@ static void nf_flow_rule_lwt_match(struct nf_flow_match *match,
sizeof(struct in6_addr));
if (memcmp(&key->enc_ipv6.src, &in6addr_any,
sizeof(struct in6_addr)))
- memset(&key->enc_ipv6.src, 0xff,
+ memset(&mask->enc_ipv6.src, 0xff,
sizeof(struct in6_addr));
if (memcmp(&key->enc_ipv6.dst, &in6addr_any,
sizeof(struct in6_addr)))
- memset(&key->enc_ipv6.dst, 0xff,
+ memset(&mask->enc_ipv6.dst, 0xff,
sizeof(struct in6_addr));
enc_keys |= BIT(FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS);
key->enc_control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
index c0851fec11d4..c20772822637 100644
--- a/net/netfilter/nf_tables_api.c
+++ b/net/netfilter/nf_tables_api.c
@@ -4481,9 +4481,9 @@ struct nft_set_elem_catchall {
static void nft_set_catchall_destroy(const struct nft_ctx *ctx,
struct nft_set *set)
{
- struct nft_set_elem_catchall *catchall;
+ struct nft_set_elem_catchall *next, *catchall;
- list_for_each_entry_rcu(catchall, &set->catchall_list, list) {
+ list_for_each_entry_safe(catchall, next, &set->catchall_list, list) {
list_del_rcu(&catchall->list);
nft_set_elem_destroy(set, catchall->elem, true);
kfree_rcu(catchall);
diff --git a/net/netfilter/nfnetlink_log.c b/net/netfilter/nfnetlink_log.c
index 691ef4cffdd9..7f83f9697fc1 100644
--- a/net/netfilter/nfnetlink_log.c
+++ b/net/netfilter/nfnetlink_log.c
@@ -556,7 +556,8 @@ __build_packet_message(struct nfnl_log_net *log,
goto nla_put_failure;
if (indev && skb->dev &&
- skb->mac_header != skb->network_header) {
+ skb_mac_header_was_set(skb) &&
+ skb_mac_header_len(skb) != 0) {
struct nfulnl_msg_packet_hw phw;
int len;
diff --git a/net/netfilter/nfnetlink_queue.c b/net/netfilter/nfnetlink_queue.c
index 4acc4b8e9fe5..f0b9e21a2452 100644
--- a/net/netfilter/nfnetlink_queue.c
+++ b/net/netfilter/nfnetlink_queue.c
@@ -387,7 +387,7 @@ nfqnl_build_packet_message(struct net *net, struct nfqnl_instance *queue,
struct net_device *indev;
struct net_device *outdev;
struct nf_conn *ct = NULL;
- enum ip_conntrack_info ctinfo;
+ enum ip_conntrack_info ctinfo = 0;
struct nfnl_ct_hook *nfnl_ct;
bool csum_verify;
char *secdata = NULL;
@@ -560,7 +560,8 @@ nfqnl_build_packet_message(struct net *net, struct nfqnl_instance *queue,
goto nla_put_failure;
if (indev && entskb->dev &&
- skb_mac_header_was_set(entskb)) {
+ skb_mac_header_was_set(entskb) &&
+ skb_mac_header_len(entskb) != 0) {
struct nfqnl_msg_packet_hw phw;
int len;
diff --git a/net/netfilter/nft_exthdr.c b/net/netfilter/nft_exthdr.c
index af4ee874a067..dbe1f2e7dd9e 100644
--- a/net/netfilter/nft_exthdr.c
+++ b/net/netfilter/nft_exthdr.c
@@ -236,7 +236,7 @@ static void nft_exthdr_tcp_set_eval(const struct nft_expr *expr,
tcph = nft_tcp_header_pointer(pkt, sizeof(buff), buff, &tcphdr_len);
if (!tcph)
- return;
+ goto err;
opt = (u8 *)tcph;
for (i = sizeof(*tcph); i < tcphdr_len - 1; i += optl) {
@@ -251,16 +251,16 @@ static void nft_exthdr_tcp_set_eval(const struct nft_expr *expr,
continue;
if (i + optl > tcphdr_len || priv->len + priv->offset > optl)
- return;
+ goto err;
if (skb_ensure_writable(pkt->skb,
nft_thoff(pkt) + i + priv->len))
- return;
+ goto err;
tcph = nft_tcp_header_pointer(pkt, sizeof(buff), buff,
&tcphdr_len);
if (!tcph)
- return;
+ goto err;
offset = i + priv->offset;
@@ -303,6 +303,9 @@ static void nft_exthdr_tcp_set_eval(const struct nft_expr *expr,
return;
}
+ return;
+err:
+ regs->verdict.code = NFT_BREAK;
}
static void nft_exthdr_sctp_eval(const struct nft_expr *expr,
diff --git a/net/netfilter/nft_payload.c b/net/netfilter/nft_payload.c
index cbfe4e4a4ad7..bd689938a2e0 100644
--- a/net/netfilter/nft_payload.c
+++ b/net/netfilter/nft_payload.c
@@ -22,7 +22,6 @@
#include <linux/icmpv6.h>
#include <linux/ip.h>
#include <linux/ipv6.h>
-#include <linux/ip.h>
#include <net/sctp/checksum.h>
static bool nft_payload_rebuild_vlan_hdr(const struct sk_buff *skb, int mac_off,
diff --git a/net/netfilter/nft_set_pipapo_avx2.c b/net/netfilter/nft_set_pipapo_avx2.c
index e517663e0cd1..6f4116e72958 100644
--- a/net/netfilter/nft_set_pipapo_avx2.c
+++ b/net/netfilter/nft_set_pipapo_avx2.c
@@ -886,7 +886,7 @@ static int nft_pipapo_avx2_lookup_8b_6(unsigned long *map, unsigned long *fill,
NFT_PIPAPO_AVX2_BUCKET_LOAD8(4, lt, 4, pkt[4], bsize);
NFT_PIPAPO_AVX2_AND(5, 0, 1);
- NFT_PIPAPO_AVX2_BUCKET_LOAD8(6, lt, 6, pkt[5], bsize);
+ NFT_PIPAPO_AVX2_BUCKET_LOAD8(6, lt, 5, pkt[5], bsize);
NFT_PIPAPO_AVX2_AND(7, 2, 3);
/* Stall */
diff --git a/net/netfilter/xt_IDLETIMER.c b/net/netfilter/xt_IDLETIMER.c
index 2f7cf5ecebf4..0f8bb0bf558f 100644
--- a/net/netfilter/xt_IDLETIMER.c
+++ b/net/netfilter/xt_IDLETIMER.c
@@ -85,9 +85,9 @@ static ssize_t idletimer_tg_show(struct device *dev,
mutex_unlock(&list_mutex);
if (time_after(expires, jiffies) || ktimespec.tv_sec > 0)
- return snprintf(buf, PAGE_SIZE, "%ld\n", time_diff);
+ return sysfs_emit(buf, "%ld\n", time_diff);
- return snprintf(buf, PAGE_SIZE, "0\n");
+ return sysfs_emit(buf, "0\n");
}
static void idletimer_tg_work(struct work_struct *work)
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index 4c575324a985..9eba2e648385 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -1852,6 +1852,11 @@ static int netlink_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
if (msg->msg_flags & MSG_OOB)
return -EOPNOTSUPP;
+ if (len == 0) {
+ pr_warn_once("Zero length message leads to an empty skb\n");
+ return -ENODATA;
+ }
+
err = scm_send(sock, msg, &scm, true);
if (err < 0)
return err;
diff --git a/net/nfc/netlink.c b/net/nfc/netlink.c
index 334f63c9529e..f184b0db79d4 100644
--- a/net/nfc/netlink.c
+++ b/net/nfc/netlink.c
@@ -636,8 +636,10 @@ static int nfc_genl_dump_devices_done(struct netlink_callback *cb)
{
struct class_dev_iter *iter = (struct class_dev_iter *) cb->args[0];
- nfc_device_iter_exit(iter);
- kfree(iter);
+ if (iter) {
+ nfc_device_iter_exit(iter);
+ kfree(iter);
+ }
return 0;
}
@@ -1392,8 +1394,10 @@ static int nfc_genl_dump_ses_done(struct netlink_callback *cb)
{
struct class_dev_iter *iter = (struct class_dev_iter *) cb->args[0];
- nfc_device_iter_exit(iter);
- kfree(iter);
+ if (iter) {
+ nfc_device_iter_exit(iter);
+ kfree(iter);
+ }
return 0;
}
diff --git a/net/openvswitch/flow.c b/net/openvswitch/flow.c
index 9713035b89e3..6d262d9aa10e 100644
--- a/net/openvswitch/flow.c
+++ b/net/openvswitch/flow.c
@@ -34,6 +34,7 @@
#include <net/mpls.h>
#include <net/ndisc.h>
#include <net/nsh.h>
+#include <net/netfilter/nf_conntrack_zones.h>
#include "conntrack.h"
#include "datapath.h"
@@ -860,6 +861,7 @@ int ovs_flow_key_extract(const struct ip_tunnel_info *tun_info,
#endif
bool post_ct = false;
int res, err;
+ u16 zone = 0;
/* Extract metadata from packet. */
if (tun_info) {
@@ -898,6 +900,7 @@ int ovs_flow_key_extract(const struct ip_tunnel_info *tun_info,
key->recirc_id = tc_ext ? tc_ext->chain : 0;
OVS_CB(skb)->mru = tc_ext ? tc_ext->mru : 0;
post_ct = tc_ext ? tc_ext->post_ct : false;
+ zone = post_ct ? tc_ext->zone : 0;
} else {
key->recirc_id = 0;
}
@@ -906,8 +909,11 @@ int ovs_flow_key_extract(const struct ip_tunnel_info *tun_info,
#endif
err = key_extract(skb, key);
- if (!err)
+ if (!err) {
ovs_ct_fill_key(skb, key, post_ct); /* Must be after key_extract(). */
+ if (post_ct && !skb_get_nfct(skb))
+ key->ct_zone = zone;
+ }
return err;
}
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index 46943a18a10d..76c2dca7f0a5 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -4492,9 +4492,10 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
}
out_free_pg_vec:
- bitmap_free(rx_owner_map);
- if (pg_vec)
+ if (pg_vec) {
+ bitmap_free(rx_owner_map);
free_pg_vec(pg_vec, order, req->tp_block_nr);
+ }
out:
return err;
}
diff --git a/net/phonet/pep.c b/net/phonet/pep.c
index a1525916885a..65d463ad8770 100644
--- a/net/phonet/pep.c
+++ b/net/phonet/pep.c
@@ -868,6 +868,7 @@ static struct sock *pep_sock_accept(struct sock *sk, int flags, int *errp,
err = pep_accept_conn(newsk, skb);
if (err) {
+ __sock_put(sk);
sock_put(newsk);
newsk = NULL;
goto drop;
@@ -946,6 +947,8 @@ static int pep_ioctl(struct sock *sk, int cmd, unsigned long arg)
ret = -EBUSY;
else if (sk->sk_state == TCP_ESTABLISHED)
ret = -EISCONN;
+ else if (!pn->pn_sk.sobject)
+ ret = -EADDRNOTAVAIL;
else
ret = pep_sock_enable(sk, NULL, 0);
release_sock(sk);
diff --git a/net/rds/connection.c b/net/rds/connection.c
index a3bc4b54d491..b4cc699c5fad 100644
--- a/net/rds/connection.c
+++ b/net/rds/connection.c
@@ -253,6 +253,7 @@ static struct rds_connection *__rds_conn_create(struct net *net,
* should end up here, but if it
* does, reset/destroy the connection.
*/
+ kfree(conn->c_path);
kmem_cache_free(rds_conn_slab, conn);
conn = ERR_PTR(-EOPNOTSUPP);
goto out;
diff --git a/net/rds/tcp.c b/net/rds/tcp.c
index abf19c0e3ba0..5327d130c4b5 100644
--- a/net/rds/tcp.c
+++ b/net/rds/tcp.c
@@ -500,7 +500,7 @@ void rds_tcp_tune(struct socket *sock)
sk->sk_userlocks |= SOCK_SNDBUF_LOCK;
}
if (rtn->rcvbuf_size > 0) {
- sk->sk_sndbuf = rtn->rcvbuf_size;
+ sk->sk_rcvbuf = rtn->rcvbuf_size;
sk->sk_userlocks |= SOCK_RCVBUF_LOCK;
}
release_sock(sk);
diff --git a/net/rxrpc/conn_client.c b/net/rxrpc/conn_client.c
index dbea0bfee48e..8120138dac01 100644
--- a/net/rxrpc/conn_client.c
+++ b/net/rxrpc/conn_client.c
@@ -135,16 +135,20 @@ struct rxrpc_bundle *rxrpc_get_bundle(struct rxrpc_bundle *bundle)
return bundle;
}
+static void rxrpc_free_bundle(struct rxrpc_bundle *bundle)
+{
+ rxrpc_put_peer(bundle->params.peer);
+ kfree(bundle);
+}
+
void rxrpc_put_bundle(struct rxrpc_bundle *bundle)
{
unsigned int d = bundle->debug_id;
unsigned int u = atomic_dec_return(&bundle->usage);
_debug("PUT B=%x %u", d, u);
- if (u == 0) {
- rxrpc_put_peer(bundle->params.peer);
- kfree(bundle);
- }
+ if (u == 0)
+ rxrpc_free_bundle(bundle);
}
/*
@@ -328,7 +332,7 @@ static struct rxrpc_bundle *rxrpc_look_up_bundle(struct rxrpc_conn_parameters *c
return candidate;
found_bundle_free:
- kfree(candidate);
+ rxrpc_free_bundle(candidate);
found_bundle:
rxrpc_get_bundle(bundle);
spin_unlock(&local->client_bundles_lock);
diff --git a/net/rxrpc/peer_object.c b/net/rxrpc/peer_object.c
index 68396d052052..0298fe2ad6d3 100644
--- a/net/rxrpc/peer_object.c
+++ b/net/rxrpc/peer_object.c
@@ -299,6 +299,12 @@ static struct rxrpc_peer *rxrpc_create_peer(struct rxrpc_sock *rx,
return peer;
}
+static void rxrpc_free_peer(struct rxrpc_peer *peer)
+{
+ rxrpc_put_local(peer->local);
+ kfree_rcu(peer, rcu);
+}
+
/*
* Set up a new incoming peer. There shouldn't be any other matching peers
* since we've already done a search in the list from the non-reentrant context
@@ -365,7 +371,7 @@ struct rxrpc_peer *rxrpc_lookup_peer(struct rxrpc_sock *rx,
spin_unlock_bh(&rxnet->peer_hash_lock);
if (peer)
- kfree(candidate);
+ rxrpc_free_peer(candidate);
else
peer = candidate;
}
@@ -420,8 +426,7 @@ static void __rxrpc_put_peer(struct rxrpc_peer *peer)
list_del_init(&peer->keepalive_link);
spin_unlock_bh(&rxnet->peer_hash_lock);
- rxrpc_put_local(peer->local);
- kfree_rcu(peer, rcu);
+ rxrpc_free_peer(peer);
}
/*
@@ -457,8 +462,7 @@ void rxrpc_put_peer_locked(struct rxrpc_peer *peer)
if (n == 0) {
hash_del_rcu(&peer->hash_link);
list_del_init(&peer->keepalive_link);
- rxrpc_put_local(peer->local);
- kfree_rcu(peer, rcu);
+ rxrpc_free_peer(peer);
}
}
diff --git a/net/sched/act_ct.c b/net/sched/act_ct.c
index 90866ae45573..ab3591408419 100644
--- a/net/sched/act_ct.c
+++ b/net/sched/act_ct.c
@@ -690,10 +690,10 @@ static int tcf_ct_handle_fragments(struct net *net, struct sk_buff *skb,
u8 family, u16 zone, bool *defrag)
{
enum ip_conntrack_info ctinfo;
- struct qdisc_skb_cb cb;
struct nf_conn *ct;
int err = 0;
bool frag;
+ u16 mru;
/* Previously seen (loopback)? Ignore. */
ct = nf_ct_get(skb, &ctinfo);
@@ -708,7 +708,7 @@ static int tcf_ct_handle_fragments(struct net *net, struct sk_buff *skb,
return err;
skb_get(skb);
- cb = *qdisc_skb_cb(skb);
+ mru = tc_skb_cb(skb)->mru;
if (family == NFPROTO_IPV4) {
enum ip_defrag_users user = IP_DEFRAG_CONNTRACK_IN + zone;
@@ -722,7 +722,7 @@ static int tcf_ct_handle_fragments(struct net *net, struct sk_buff *skb,
if (!err) {
*defrag = true;
- cb.mru = IPCB(skb)->frag_max_size;
+ mru = IPCB(skb)->frag_max_size;
}
} else { /* NFPROTO_IPV6 */
#if IS_ENABLED(CONFIG_NF_DEFRAG_IPV6)
@@ -735,7 +735,7 @@ static int tcf_ct_handle_fragments(struct net *net, struct sk_buff *skb,
if (!err) {
*defrag = true;
- cb.mru = IP6CB(skb)->frag_max_size;
+ mru = IP6CB(skb)->frag_max_size;
}
#else
err = -EOPNOTSUPP;
@@ -744,7 +744,7 @@ static int tcf_ct_handle_fragments(struct net *net, struct sk_buff *skb,
}
if (err != -EINPROGRESS)
- *qdisc_skb_cb(skb) = cb;
+ tc_skb_cb(skb)->mru = mru;
skb_clear_hash(skb);
skb->ignore_df = 1;
return err;
@@ -963,7 +963,7 @@ static int tcf_ct_act(struct sk_buff *skb, const struct tc_action *a,
tcf_action_update_bstats(&c->common, skb);
if (clear) {
- qdisc_skb_cb(skb)->post_ct = false;
+ tc_skb_cb(skb)->post_ct = false;
ct = nf_ct_get(skb, &ctinfo);
if (ct) {
nf_conntrack_put(&ct->ct_general);
@@ -1048,7 +1048,8 @@ do_nat:
out_push:
skb_push_rcsum(skb, nh_ofs);
- qdisc_skb_cb(skb)->post_ct = true;
+ tc_skb_cb(skb)->post_ct = true;
+ tc_skb_cb(skb)->zone = p->zone;
out_clear:
if (defrag)
qdisc_skb_cb(skb)->pkt_len = skb->len;
diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c
index 2ef8f5a6205a..35c74bdde848 100644
--- a/net/sched/cls_api.c
+++ b/net/sched/cls_api.c
@@ -1617,12 +1617,15 @@ int tcf_classify(struct sk_buff *skb,
/* If we missed on some chain */
if (ret == TC_ACT_UNSPEC && last_executed_chain) {
+ struct tc_skb_cb *cb = tc_skb_cb(skb);
+
ext = tc_skb_ext_alloc(skb);
if (WARN_ON_ONCE(!ext))
return TC_ACT_SHOT;
ext->chain = last_executed_chain;
- ext->mru = qdisc_skb_cb(skb)->mru;
- ext->post_ct = qdisc_skb_cb(skb)->post_ct;
+ ext->mru = cb->mru;
+ ext->post_ct = cb->post_ct;
+ ext->zone = cb->zone;
}
return ret;
@@ -3687,6 +3690,7 @@ int tc_setup_flow_action(struct flow_action *flow_action,
entry->mpls_mangle.ttl = tcf_mpls_ttl(act);
break;
default:
+ err = -EOPNOTSUPP;
goto err_out_locked;
}
} else if (is_tcf_skbedit_ptype(act)) {
diff --git a/net/sched/cls_flower.c b/net/sched/cls_flower.c
index aab13ba11767..ef54ed395874 100644
--- a/net/sched/cls_flower.c
+++ b/net/sched/cls_flower.c
@@ -19,6 +19,7 @@
#include <net/sch_generic.h>
#include <net/pkt_cls.h>
+#include <net/pkt_sched.h>
#include <net/ip.h>
#include <net/flow_dissector.h>
#include <net/geneve.h>
@@ -309,7 +310,8 @@ static int fl_classify(struct sk_buff *skb, const struct tcf_proto *tp,
struct tcf_result *res)
{
struct cls_fl_head *head = rcu_dereference_bh(tp->root);
- bool post_ct = qdisc_skb_cb(skb)->post_ct;
+ bool post_ct = tc_skb_cb(skb)->post_ct;
+ u16 zone = tc_skb_cb(skb)->zone;
struct fl_flow_key skb_key;
struct fl_flow_mask *mask;
struct cls_fl_filter *f;
@@ -327,7 +329,7 @@ static int fl_classify(struct sk_buff *skb, const struct tcf_proto *tp,
skb_flow_dissect_ct(skb, &mask->dissector, &skb_key,
fl_ct_info_to_flower_map,
ARRAY_SIZE(fl_ct_info_to_flower_map),
- post_ct);
+ post_ct, zone);
skb_flow_dissect_hash(skb, &mask->dissector, &skb_key);
skb_flow_dissect(skb, &mask->dissector, &skb_key,
FLOW_DISSECTOR_F_STOP_BEFORE_ENCAP);
diff --git a/net/sched/sch_cake.c b/net/sched/sch_cake.c
index 3c2300d14468..857aaebd49f4 100644
--- a/net/sched/sch_cake.c
+++ b/net/sched/sch_cake.c
@@ -2736,7 +2736,7 @@ static int cake_init(struct Qdisc *sch, struct nlattr *opt,
q->tins = kvcalloc(CAKE_MAX_TINS, sizeof(struct cake_tin_data),
GFP_KERNEL);
if (!q->tins)
- goto nomem;
+ return -ENOMEM;
for (i = 0; i < CAKE_MAX_TINS; i++) {
struct cake_tin_data *b = q->tins + i;
@@ -2766,10 +2766,6 @@ static int cake_init(struct Qdisc *sch, struct nlattr *opt,
q->min_netlen = ~0;
q->min_adjlen = ~0;
return 0;
-
-nomem:
- cake_destroy(sch);
- return -ENOMEM;
}
static int cake_dump(struct Qdisc *sch, struct sk_buff *skb)
diff --git a/net/sched/sch_ets.c b/net/sched/sch_ets.c
index 0eae9ff5edf6..d73393493553 100644
--- a/net/sched/sch_ets.c
+++ b/net/sched/sch_ets.c
@@ -665,12 +665,14 @@ static int ets_qdisc_change(struct Qdisc *sch, struct nlattr *opt,
q->classes[i].deficit = quanta[i];
}
}
+ for (i = q->nbands; i < oldbands; i++) {
+ if (i >= q->nstrict && q->classes[i].qdisc->q.qlen)
+ list_del(&q->classes[i].alist);
+ qdisc_tree_flush_backlog(q->classes[i].qdisc);
+ }
q->nstrict = nstrict;
memcpy(q->prio2band, priomap, sizeof(priomap));
- for (i = q->nbands; i < oldbands; i++)
- qdisc_tree_flush_backlog(q->classes[i].qdisc);
-
for (i = 0; i < q->nbands; i++)
q->classes[i].quantum = quanta[i];
diff --git a/net/sched/sch_fq_pie.c b/net/sched/sch_fq_pie.c
index 830f3559f727..d6aba6edd16e 100644
--- a/net/sched/sch_fq_pie.c
+++ b/net/sched/sch_fq_pie.c
@@ -531,6 +531,7 @@ static void fq_pie_destroy(struct Qdisc *sch)
struct fq_pie_sched_data *q = qdisc_priv(sch);
tcf_block_put(q->block);
+ q->p_params.tupdate = 0;
del_timer_sync(&q->adapt_timer);
kvfree(q->flows);
}
diff --git a/net/sched/sch_frag.c b/net/sched/sch_frag.c
index 8c06381391d6..5ded4c8672a6 100644
--- a/net/sched/sch_frag.c
+++ b/net/sched/sch_frag.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
#include <net/netlink.h>
#include <net/sch_generic.h>
+#include <net/pkt_sched.h>
#include <net/dst.h>
#include <net/ip.h>
#include <net/ip6_fib.h>
@@ -137,7 +138,7 @@ err:
int sch_frag_xmit_hook(struct sk_buff *skb, int (*xmit)(struct sk_buff *skb))
{
- u16 mru = qdisc_skb_cb(skb)->mru;
+ u16 mru = tc_skb_cb(skb)->mru;
int err;
if (mru && skb->len > mru + skb->dev->hard_header_len)
diff --git a/net/sctp/diag.c b/net/sctp/diag.c
index 760b367644c1..a7d623171501 100644
--- a/net/sctp/diag.c
+++ b/net/sctp/diag.c
@@ -290,9 +290,8 @@ out:
return err;
}
-static int sctp_sock_dump(struct sctp_transport *tsp, void *p)
+static int sctp_sock_dump(struct sctp_endpoint *ep, struct sctp_transport *tsp, void *p)
{
- struct sctp_endpoint *ep = tsp->asoc->ep;
struct sctp_comm_param *commp = p;
struct sock *sk = ep->base.sk;
struct sk_buff *skb = commp->skb;
@@ -302,6 +301,8 @@ static int sctp_sock_dump(struct sctp_transport *tsp, void *p)
int err = 0;
lock_sock(sk);
+ if (ep != tsp->asoc->ep)
+ goto release;
list_for_each_entry(assoc, &ep->asocs, asocs) {
if (cb->args[4] < cb->args[1])
goto next;
@@ -344,9 +345,8 @@ release:
return err;
}
-static int sctp_sock_filter(struct sctp_transport *tsp, void *p)
+static int sctp_sock_filter(struct sctp_endpoint *ep, struct sctp_transport *tsp, void *p)
{
- struct sctp_endpoint *ep = tsp->asoc->ep;
struct sctp_comm_param *commp = p;
struct sock *sk = ep->base.sk;
const struct inet_diag_req_v2 *r = commp->r;
@@ -505,8 +505,8 @@ skip:
if (!(idiag_states & ~(TCPF_LISTEN | TCPF_CLOSE)))
goto done;
- sctp_for_each_transport(sctp_sock_filter, sctp_sock_dump,
- net, &pos, &commp);
+ sctp_transport_traverse_process(sctp_sock_filter, sctp_sock_dump,
+ net, &pos, &commp);
cb->args[2] = pos;
done:
diff --git a/net/sctp/endpointola.c b/net/sctp/endpointola.c
index 48c9c2c7602f..efffde7f2328 100644
--- a/net/sctp/endpointola.c
+++ b/net/sctp/endpointola.c
@@ -184,6 +184,18 @@ void sctp_endpoint_free(struct sctp_endpoint *ep)
}
/* Final destructor for endpoint. */
+static void sctp_endpoint_destroy_rcu(struct rcu_head *head)
+{
+ struct sctp_endpoint *ep = container_of(head, struct sctp_endpoint, rcu);
+ struct sock *sk = ep->base.sk;
+
+ sctp_sk(sk)->ep = NULL;
+ sock_put(sk);
+
+ kfree(ep);
+ SCTP_DBG_OBJCNT_DEC(ep);
+}
+
static void sctp_endpoint_destroy(struct sctp_endpoint *ep)
{
struct sock *sk;
@@ -213,18 +225,13 @@ static void sctp_endpoint_destroy(struct sctp_endpoint *ep)
if (sctp_sk(sk)->bind_hash)
sctp_put_port(sk);
- sctp_sk(sk)->ep = NULL;
- /* Give up our hold on the sock */
- sock_put(sk);
-
- kfree(ep);
- SCTP_DBG_OBJCNT_DEC(ep);
+ call_rcu(&ep->rcu, sctp_endpoint_destroy_rcu);
}
/* Hold a reference to an endpoint. */
-void sctp_endpoint_hold(struct sctp_endpoint *ep)
+int sctp_endpoint_hold(struct sctp_endpoint *ep)
{
- refcount_inc(&ep->base.refcnt);
+ return refcount_inc_not_zero(&ep->base.refcnt);
}
/* Release a reference to an endpoint and clean up if there are
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index 33391254fa82..ad5028a07b18 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -5338,11 +5338,12 @@ int sctp_transport_lookup_process(int (*cb)(struct sctp_transport *, void *),
}
EXPORT_SYMBOL_GPL(sctp_transport_lookup_process);
-int sctp_for_each_transport(int (*cb)(struct sctp_transport *, void *),
- int (*cb_done)(struct sctp_transport *, void *),
- struct net *net, int *pos, void *p) {
+int sctp_transport_traverse_process(sctp_callback_t cb, sctp_callback_t cb_done,
+ struct net *net, int *pos, void *p)
+{
struct rhashtable_iter hti;
struct sctp_transport *tsp;
+ struct sctp_endpoint *ep;
int ret;
again:
@@ -5351,26 +5352,32 @@ again:
tsp = sctp_transport_get_idx(net, &hti, *pos + 1);
for (; !IS_ERR_OR_NULL(tsp); tsp = sctp_transport_get_next(net, &hti)) {
- ret = cb(tsp, p);
- if (ret)
- break;
+ ep = tsp->asoc->ep;
+ if (sctp_endpoint_hold(ep)) { /* asoc can be peeled off */
+ ret = cb(ep, tsp, p);
+ if (ret)
+ break;
+ sctp_endpoint_put(ep);
+ }
(*pos)++;
sctp_transport_put(tsp);
}
sctp_transport_walk_stop(&hti);
if (ret) {
- if (cb_done && !cb_done(tsp, p)) {
+ if (cb_done && !cb_done(ep, tsp, p)) {
(*pos)++;
+ sctp_endpoint_put(ep);
sctp_transport_put(tsp);
goto again;
}
+ sctp_endpoint_put(ep);
sctp_transport_put(tsp);
}
return ret;
}
-EXPORT_SYMBOL_GPL(sctp_for_each_transport);
+EXPORT_SYMBOL_GPL(sctp_transport_traverse_process);
/* 7.2.1 Association Status (SCTP_STATUS)
diff --git a/net/smc/af_smc.c b/net/smc/af_smc.c
index b61c802e3bf3..1c9289f56dc4 100644
--- a/net/smc/af_smc.c
+++ b/net/smc/af_smc.c
@@ -194,7 +194,9 @@ static int smc_release(struct socket *sock)
/* cleanup for a dangling non-blocking connect */
if (smc->connect_nonblock && sk->sk_state == SMC_INIT)
tcp_abort(smc->clcsock->sk, ECONNABORTED);
- flush_work(&smc->connect_work);
+
+ if (cancel_work_sync(&smc->connect_work))
+ sock_put(&smc->sk); /* sock_hold in smc_connect for passive closing */
if (sk->sk_state == SMC_LISTEN)
/* smc_close_non_accepted() is called and acquires
@@ -585,7 +587,7 @@ static void smc_switch_to_fallback(struct smc_sock *smc, int reason_code)
* to clcsocket->wq during the fallback.
*/
spin_lock_irqsave(&smc_wait->lock, flags);
- spin_lock(&clc_wait->lock);
+ spin_lock_nested(&clc_wait->lock, SINGLE_DEPTH_NESTING);
list_splice_init(&smc_wait->head, &clc_wait->head);
spin_unlock(&clc_wait->lock);
spin_unlock_irqrestore(&smc_wait->lock, flags);
@@ -2134,8 +2136,10 @@ static int smc_listen(struct socket *sock, int backlog)
smc->clcsock->sk->sk_user_data =
(void *)((uintptr_t)smc | SK_USER_DATA_NOCOPY);
rc = kernel_listen(smc->clcsock, backlog);
- if (rc)
+ if (rc) {
+ smc->clcsock->sk->sk_data_ready = smc->clcsk_data_ready;
goto out;
+ }
sk->sk_max_ack_backlog = backlog;
sk->sk_ack_backlog = 0;
sk->sk_state = SMC_LISTEN;
@@ -2368,8 +2372,10 @@ static __poll_t smc_poll(struct file *file, struct socket *sock,
static int smc_shutdown(struct socket *sock, int how)
{
struct sock *sk = sock->sk;
+ bool do_shutdown = true;
struct smc_sock *smc;
int rc = -EINVAL;
+ int old_state;
int rc1 = 0;
smc = smc_sk(sk);
@@ -2396,7 +2402,11 @@ static int smc_shutdown(struct socket *sock, int how)
}
switch (how) {
case SHUT_RDWR: /* shutdown in both directions */
+ old_state = sk->sk_state;
rc = smc_close_active(smc);
+ if (old_state == SMC_ACTIVE &&
+ sk->sk_state == SMC_PEERCLOSEWAIT1)
+ do_shutdown = false;
break;
case SHUT_WR:
rc = smc_close_shutdown_write(smc);
@@ -2406,7 +2416,7 @@ static int smc_shutdown(struct socket *sock, int how)
/* nothing more to do because peer is not involved */
break;
}
- if (smc->clcsock)
+ if (do_shutdown && smc->clcsock)
rc1 = kernel_sock_shutdown(smc->clcsock, how);
/* map sock_shutdown_cmd constants to sk_shutdown value range */
sk->sk_shutdown |= how + 1;
diff --git a/net/smc/smc.h b/net/smc/smc.h
index f4286ca1f228..1a4fc1c6c4ab 100644
--- a/net/smc/smc.h
+++ b/net/smc/smc.h
@@ -180,6 +180,11 @@ struct smc_connection {
u16 tx_cdc_seq; /* sequence # for CDC send */
u16 tx_cdc_seq_fin; /* sequence # - tx completed */
spinlock_t send_lock; /* protect wr_sends */
+ atomic_t cdc_pend_tx_wr; /* number of pending tx CDC wqe
+ * - inc when post wqe,
+ * - dec on polled tx cqe
+ */
+ wait_queue_head_t cdc_pend_tx_wq; /* wakeup on no cdc_pend_tx_wr*/
struct delayed_work tx_work; /* retry of smc_cdc_msg_send */
u32 tx_off; /* base offset in peer rmb */
diff --git a/net/smc/smc_cdc.c b/net/smc/smc_cdc.c
index 99acd337ba90..84c8a4374fdd 100644
--- a/net/smc/smc_cdc.c
+++ b/net/smc/smc_cdc.c
@@ -31,10 +31,6 @@ static void smc_cdc_tx_handler(struct smc_wr_tx_pend_priv *pnd_snd,
struct smc_sock *smc;
int diff;
- if (!conn)
- /* already dismissed */
- return;
-
smc = container_of(conn, struct smc_sock, conn);
bh_lock_sock(&smc->sk);
if (!wc_status) {
@@ -51,6 +47,12 @@ static void smc_cdc_tx_handler(struct smc_wr_tx_pend_priv *pnd_snd,
conn);
conn->tx_cdc_seq_fin = cdcpend->ctrl_seq;
}
+
+ if (atomic_dec_and_test(&conn->cdc_pend_tx_wr) &&
+ unlikely(wq_has_sleeper(&conn->cdc_pend_tx_wq)))
+ wake_up(&conn->cdc_pend_tx_wq);
+ WARN_ON(atomic_read(&conn->cdc_pend_tx_wr) < 0);
+
smc_tx_sndbuf_nonfull(smc);
bh_unlock_sock(&smc->sk);
}
@@ -107,6 +109,10 @@ int smc_cdc_msg_send(struct smc_connection *conn,
conn->tx_cdc_seq++;
conn->local_tx_ctrl.seqno = conn->tx_cdc_seq;
smc_host_msg_to_cdc((struct smc_cdc_msg *)wr_buf, conn, &cfed);
+
+ atomic_inc(&conn->cdc_pend_tx_wr);
+ smp_mb__after_atomic(); /* Make sure cdc_pend_tx_wr added before post */
+
rc = smc_wr_tx_send(link, (struct smc_wr_tx_pend_priv *)pend);
if (!rc) {
smc_curs_copy(&conn->rx_curs_confirmed, &cfed, conn);
@@ -114,6 +120,7 @@ int smc_cdc_msg_send(struct smc_connection *conn,
} else {
conn->tx_cdc_seq--;
conn->local_tx_ctrl.seqno = conn->tx_cdc_seq;
+ atomic_dec(&conn->cdc_pend_tx_wr);
}
return rc;
@@ -136,7 +143,18 @@ int smcr_cdc_msg_send_validation(struct smc_connection *conn,
peer->token = htonl(local->token);
peer->prod_flags.failover_validation = 1;
+ /* We need to set pend->conn here to make sure smc_cdc_tx_handler()
+ * can handle properly
+ */
+ smc_cdc_add_pending_send(conn, pend);
+
+ atomic_inc(&conn->cdc_pend_tx_wr);
+ smp_mb__after_atomic(); /* Make sure cdc_pend_tx_wr added before post */
+
rc = smc_wr_tx_send(link, (struct smc_wr_tx_pend_priv *)pend);
+ if (unlikely(rc))
+ atomic_dec(&conn->cdc_pend_tx_wr);
+
return rc;
}
@@ -193,31 +211,9 @@ int smc_cdc_get_slot_and_msg_send(struct smc_connection *conn)
return rc;
}
-static bool smc_cdc_tx_filter(struct smc_wr_tx_pend_priv *tx_pend,
- unsigned long data)
+void smc_cdc_wait_pend_tx_wr(struct smc_connection *conn)
{
- struct smc_connection *conn = (struct smc_connection *)data;
- struct smc_cdc_tx_pend *cdc_pend =
- (struct smc_cdc_tx_pend *)tx_pend;
-
- return cdc_pend->conn == conn;
-}
-
-static void smc_cdc_tx_dismisser(struct smc_wr_tx_pend_priv *tx_pend)
-{
- struct smc_cdc_tx_pend *cdc_pend =
- (struct smc_cdc_tx_pend *)tx_pend;
-
- cdc_pend->conn = NULL;
-}
-
-void smc_cdc_tx_dismiss_slots(struct smc_connection *conn)
-{
- struct smc_link *link = conn->lnk;
-
- smc_wr_tx_dismiss_slots(link, SMC_CDC_MSG_TYPE,
- smc_cdc_tx_filter, smc_cdc_tx_dismisser,
- (unsigned long)conn);
+ wait_event(conn->cdc_pend_tx_wq, !atomic_read(&conn->cdc_pend_tx_wr));
}
/* Send a SMC-D CDC header.
diff --git a/net/smc/smc_cdc.h b/net/smc/smc_cdc.h
index 0a0a89abd38b..696cc11f2303 100644
--- a/net/smc/smc_cdc.h
+++ b/net/smc/smc_cdc.h
@@ -291,7 +291,7 @@ int smc_cdc_get_free_slot(struct smc_connection *conn,
struct smc_wr_buf **wr_buf,
struct smc_rdma_wr **wr_rdma_buf,
struct smc_cdc_tx_pend **pend);
-void smc_cdc_tx_dismiss_slots(struct smc_connection *conn);
+void smc_cdc_wait_pend_tx_wr(struct smc_connection *conn);
int smc_cdc_msg_send(struct smc_connection *conn, struct smc_wr_buf *wr_buf,
struct smc_cdc_tx_pend *pend);
int smc_cdc_get_slot_and_msg_send(struct smc_connection *conn);
diff --git a/net/smc/smc_close.c b/net/smc/smc_close.c
index 0f9ffba07d26..292e4d904ab6 100644
--- a/net/smc/smc_close.c
+++ b/net/smc/smc_close.c
@@ -195,6 +195,7 @@ int smc_close_active(struct smc_sock *smc)
int old_state;
long timeout;
int rc = 0;
+ int rc1 = 0;
timeout = current->flags & PF_EXITING ?
0 : sock_flag(sk, SOCK_LINGER) ?
@@ -228,6 +229,15 @@ again:
/* send close request */
rc = smc_close_final(conn);
sk->sk_state = SMC_PEERCLOSEWAIT1;
+
+ /* actively shutdown clcsock before peer close it,
+ * prevent peer from entering TIME_WAIT state.
+ */
+ if (smc->clcsock && smc->clcsock->sk) {
+ rc1 = kernel_sock_shutdown(smc->clcsock,
+ SHUT_RDWR);
+ rc = rc ? rc : rc1;
+ }
} else {
/* peer event has changed the state */
goto again;
@@ -354,9 +364,9 @@ static void smc_close_passive_work(struct work_struct *work)
if (rxflags->peer_conn_abort) {
/* peer has not received all data */
smc_close_passive_abort_received(smc);
- release_sock(&smc->sk);
+ release_sock(sk);
cancel_delayed_work_sync(&conn->tx_work);
- lock_sock(&smc->sk);
+ lock_sock(sk);
goto wakeup;
}
diff --git a/net/smc/smc_core.c b/net/smc/smc_core.c
index 25ebd30feecd..a6849362f4dd 100644
--- a/net/smc/smc_core.c
+++ b/net/smc/smc_core.c
@@ -625,18 +625,17 @@ int smcd_nl_get_lgr(struct sk_buff *skb, struct netlink_callback *cb)
void smc_lgr_cleanup_early(struct smc_connection *conn)
{
struct smc_link_group *lgr = conn->lgr;
- struct list_head *lgr_list;
spinlock_t *lgr_lock;
if (!lgr)
return;
smc_conn_free(conn);
- lgr_list = smc_lgr_list_head(lgr, &lgr_lock);
+ smc_lgr_list_head(lgr, &lgr_lock);
spin_lock_bh(lgr_lock);
/* do not use this link group for new connections */
- if (!list_empty(lgr_list))
- list_del_init(lgr_list);
+ if (!list_empty(&lgr->list))
+ list_del_init(&lgr->list);
spin_unlock_bh(lgr_lock);
__smc_lgr_terminate(lgr, true);
}
@@ -648,7 +647,7 @@ static void smcr_lgr_link_deactivate_all(struct smc_link_group *lgr)
for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) {
struct smc_link *lnk = &lgr->lnk[i];
- if (smc_link_usable(lnk))
+ if (smc_link_sendable(lnk))
lnk->state = SMC_LNK_INACTIVE;
}
wake_up_all(&lgr->llc_msg_waiter);
@@ -1128,7 +1127,7 @@ void smc_conn_free(struct smc_connection *conn)
smc_ism_unset_conn(conn);
tasklet_kill(&conn->rx_tsklet);
} else {
- smc_cdc_tx_dismiss_slots(conn);
+ smc_cdc_wait_pend_tx_wr(conn);
if (current_work() != &conn->abort_work)
cancel_work_sync(&conn->abort_work);
}
@@ -1205,7 +1204,7 @@ void smcr_link_clear(struct smc_link *lnk, bool log)
smc_llc_link_clear(lnk, log);
smcr_buf_unmap_lgr(lnk);
smcr_rtoken_clear_link(lnk);
- smc_ib_modify_qp_reset(lnk);
+ smc_ib_modify_qp_error(lnk);
smc_wr_free_link(lnk);
smc_ib_destroy_queue_pair(lnk);
smc_ib_dealloc_protection_domain(lnk);
@@ -1337,7 +1336,7 @@ static void smc_conn_kill(struct smc_connection *conn, bool soft)
else
tasklet_unlock_wait(&conn->rx_tsklet);
} else {
- smc_cdc_tx_dismiss_slots(conn);
+ smc_cdc_wait_pend_tx_wr(conn);
}
smc_lgr_unregister_conn(conn);
smc_close_active_abort(smc);
@@ -1460,11 +1459,16 @@ void smc_smcd_terminate_all(struct smcd_dev *smcd)
/* Called when an SMCR device is removed or the smc module is unloaded.
* If smcibdev is given, all SMCR link groups using this device are terminated.
* If smcibdev is NULL, all SMCR link groups are terminated.
+ *
+ * We must wait here for QPs been destroyed before we destroy the CQs,
+ * or we won't received any CQEs and cdc_pend_tx_wr cannot reach 0 thus
+ * smc_sock cannot be released.
*/
void smc_smcr_terminate_all(struct smc_ib_device *smcibdev)
{
struct smc_link_group *lgr, *lg;
LIST_HEAD(lgr_free_list);
+ LIST_HEAD(lgr_linkdown_list);
int i;
spin_lock_bh(&smc_lgr_list.lock);
@@ -1476,7 +1480,7 @@ void smc_smcr_terminate_all(struct smc_ib_device *smcibdev)
list_for_each_entry_safe(lgr, lg, &smc_lgr_list.list, list) {
for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) {
if (lgr->lnk[i].smcibdev == smcibdev)
- smcr_link_down_cond_sched(&lgr->lnk[i]);
+ list_move_tail(&lgr->list, &lgr_linkdown_list);
}
}
}
@@ -1488,6 +1492,16 @@ void smc_smcr_terminate_all(struct smc_ib_device *smcibdev)
__smc_lgr_terminate(lgr, false);
}
+ list_for_each_entry_safe(lgr, lg, &lgr_linkdown_list, list) {
+ for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) {
+ if (lgr->lnk[i].smcibdev == smcibdev) {
+ mutex_lock(&lgr->llc_conf_mutex);
+ smcr_link_down_cond(&lgr->lnk[i]);
+ mutex_unlock(&lgr->llc_conf_mutex);
+ }
+ }
+ }
+
if (smcibdev) {
if (atomic_read(&smcibdev->lnk_cnt))
wait_event(smcibdev->lnks_deleted,
@@ -1587,7 +1601,6 @@ static void smcr_link_down(struct smc_link *lnk)
if (!lgr || lnk->state == SMC_LNK_UNUSED || list_empty(&lgr->list))
return;
- smc_ib_modify_qp_reset(lnk);
to_lnk = smc_switch_conns(lgr, lnk, true);
if (!to_lnk) { /* no backup link available */
smcr_link_clear(lnk, true);
@@ -1672,14 +1685,26 @@ static void smc_link_down_work(struct work_struct *work)
mutex_unlock(&lgr->llc_conf_mutex);
}
-/* Determine vlan of internal TCP socket.
- * @vlan_id: address to store the determined vlan id into
- */
+static int smc_vlan_by_tcpsk_walk(struct net_device *lower_dev,
+ struct netdev_nested_priv *priv)
+{
+ unsigned short *vlan_id = (unsigned short *)priv->data;
+
+ if (is_vlan_dev(lower_dev)) {
+ *vlan_id = vlan_dev_vlan_id(lower_dev);
+ return 1;
+ }
+
+ return 0;
+}
+
+/* Determine vlan of internal TCP socket. */
int smc_vlan_by_tcpsk(struct socket *clcsock, struct smc_init_info *ini)
{
struct dst_entry *dst = sk_dst_get(clcsock->sk);
+ struct netdev_nested_priv priv;
struct net_device *ndev;
- int i, nest_lvl, rc = 0;
+ int rc = 0;
ini->vlan_id = 0;
if (!dst) {
@@ -1697,20 +1722,9 @@ int smc_vlan_by_tcpsk(struct socket *clcsock, struct smc_init_info *ini)
goto out_rel;
}
+ priv.data = (void *)&ini->vlan_id;
rtnl_lock();
- nest_lvl = ndev->lower_level;
- for (i = 0; i < nest_lvl; i++) {
- struct list_head *lower = &ndev->adj_list.lower;
-
- if (list_empty(lower))
- break;
- lower = lower->next;
- ndev = (struct net_device *)netdev_lower_get_next(ndev, &lower);
- if (is_vlan_dev(ndev)) {
- ini->vlan_id = vlan_dev_vlan_id(ndev);
- break;
- }
- }
+ netdev_walk_all_lower_dev(ndev, smc_vlan_by_tcpsk_walk, &priv);
rtnl_unlock();
out_rel:
@@ -1824,6 +1838,7 @@ create:
conn->local_tx_ctrl.common.type = SMC_CDC_MSG_TYPE;
conn->local_tx_ctrl.len = SMC_WR_TX_SIZE;
conn->urg_state = SMC_URG_READ;
+ init_waitqueue_head(&conn->cdc_pend_tx_wq);
INIT_WORK(&smc->conn.abort_work, smc_conn_abort_work);
if (ini->is_smcd) {
conn->rx_off = sizeof(struct smcd_cdc_msg);
diff --git a/net/smc/smc_core.h b/net/smc/smc_core.h
index 59cef3b830d8..d63b08274197 100644
--- a/net/smc/smc_core.h
+++ b/net/smc/smc_core.h
@@ -415,6 +415,12 @@ static inline bool smc_link_usable(struct smc_link *lnk)
return true;
}
+static inline bool smc_link_sendable(struct smc_link *lnk)
+{
+ return smc_link_usable(lnk) &&
+ lnk->qp_attr.cur_qp_state == IB_QPS_RTS;
+}
+
static inline bool smc_link_active(struct smc_link *lnk)
{
return lnk->state == SMC_LNK_ACTIVE;
diff --git a/net/smc/smc_ib.c b/net/smc/smc_ib.c
index d93055ec17ae..fe5d5399c4e8 100644
--- a/net/smc/smc_ib.c
+++ b/net/smc/smc_ib.c
@@ -109,12 +109,12 @@ int smc_ib_modify_qp_rts(struct smc_link *lnk)
IB_QP_MAX_QP_RD_ATOMIC);
}
-int smc_ib_modify_qp_reset(struct smc_link *lnk)
+int smc_ib_modify_qp_error(struct smc_link *lnk)
{
struct ib_qp_attr qp_attr;
memset(&qp_attr, 0, sizeof(qp_attr));
- qp_attr.qp_state = IB_QPS_RESET;
+ qp_attr.qp_state = IB_QPS_ERR;
return ib_modify_qp(lnk->roce_qp, &qp_attr, IB_QP_STATE);
}
diff --git a/net/smc/smc_ib.h b/net/smc/smc_ib.h
index 07585937370e..bfa1c6bf6313 100644
--- a/net/smc/smc_ib.h
+++ b/net/smc/smc_ib.h
@@ -90,6 +90,7 @@ int smc_ib_create_queue_pair(struct smc_link *lnk);
int smc_ib_ready_link(struct smc_link *lnk);
int smc_ib_modify_qp_rts(struct smc_link *lnk);
int smc_ib_modify_qp_reset(struct smc_link *lnk);
+int smc_ib_modify_qp_error(struct smc_link *lnk);
long smc_ib_setup_per_ibdev(struct smc_ib_device *smcibdev);
int smc_ib_get_memory_region(struct ib_pd *pd, int access_flags,
struct smc_buf_desc *buf_slot, u8 link_idx);
diff --git a/net/smc/smc_llc.c b/net/smc/smc_llc.c
index b102680296b8..3e9fd8a3124c 100644
--- a/net/smc/smc_llc.c
+++ b/net/smc/smc_llc.c
@@ -1630,7 +1630,7 @@ void smc_llc_send_link_delete_all(struct smc_link_group *lgr, bool ord, u32 rsn)
delllc.reason = htonl(rsn);
for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) {
- if (!smc_link_usable(&lgr->lnk[i]))
+ if (!smc_link_sendable(&lgr->lnk[i]))
continue;
if (!smc_llc_send_message_wait(&lgr->lnk[i], &delllc))
break;
diff --git a/net/smc/smc_wr.c b/net/smc/smc_wr.c
index 600ab5889227..c6cfdea8b71b 100644
--- a/net/smc/smc_wr.c
+++ b/net/smc/smc_wr.c
@@ -62,13 +62,9 @@ static inline bool smc_wr_is_tx_pend(struct smc_link *link)
}
/* wait till all pending tx work requests on the given link are completed */
-int smc_wr_tx_wait_no_pending_sends(struct smc_link *link)
+void smc_wr_tx_wait_no_pending_sends(struct smc_link *link)
{
- if (wait_event_timeout(link->wr_tx_wait, !smc_wr_is_tx_pend(link),
- SMC_WR_TX_WAIT_PENDING_TIME))
- return 0;
- else /* timeout */
- return -EPIPE;
+ wait_event(link->wr_tx_wait, !smc_wr_is_tx_pend(link));
}
static inline int smc_wr_tx_find_pending_index(struct smc_link *link, u64 wr_id)
@@ -87,7 +83,6 @@ static inline void smc_wr_tx_process_cqe(struct ib_wc *wc)
struct smc_wr_tx_pend pnd_snd;
struct smc_link *link;
u32 pnd_snd_idx;
- int i;
link = wc->qp->qp_context;
@@ -128,14 +123,6 @@ static inline void smc_wr_tx_process_cqe(struct ib_wc *wc)
}
if (wc->status) {
- for_each_set_bit(i, link->wr_tx_mask, link->wr_tx_cnt) {
- /* clear full struct smc_wr_tx_pend including .priv */
- memset(&link->wr_tx_pends[i], 0,
- sizeof(link->wr_tx_pends[i]));
- memset(&link->wr_tx_bufs[i], 0,
- sizeof(link->wr_tx_bufs[i]));
- clear_bit(i, link->wr_tx_mask);
- }
if (link->lgr->smc_version == SMC_V2) {
memset(link->wr_tx_v2_pend, 0,
sizeof(*link->wr_tx_v2_pend));
@@ -188,7 +175,7 @@ void smc_wr_tx_cq_handler(struct ib_cq *ib_cq, void *cq_context)
static inline int smc_wr_tx_get_free_slot_index(struct smc_link *link, u32 *idx)
{
*idx = link->wr_tx_cnt;
- if (!smc_link_usable(link))
+ if (!smc_link_sendable(link))
return -ENOLINK;
for_each_clear_bit(*idx, link->wr_tx_mask, link->wr_tx_cnt) {
if (!test_and_set_bit(*idx, link->wr_tx_mask))
@@ -231,7 +218,7 @@ int smc_wr_tx_get_free_slot(struct smc_link *link,
} else {
rc = wait_event_interruptible_timeout(
link->wr_tx_wait,
- !smc_link_usable(link) ||
+ !smc_link_sendable(link) ||
lgr->terminating ||
(smc_wr_tx_get_free_slot_index(link, &idx) != -EBUSY),
SMC_WR_TX_WAIT_FREE_SLOT_TIME);
@@ -358,18 +345,20 @@ int smc_wr_tx_send_wait(struct smc_link *link, struct smc_wr_tx_pend_priv *priv,
unsigned long timeout)
{
struct smc_wr_tx_pend *pend;
+ u32 pnd_idx;
int rc;
pend = container_of(priv, struct smc_wr_tx_pend, priv);
pend->compl_requested = 1;
- init_completion(&link->wr_tx_compl[pend->idx]);
+ pnd_idx = pend->idx;
+ init_completion(&link->wr_tx_compl[pnd_idx]);
rc = smc_wr_tx_send(link, priv);
if (rc)
return rc;
/* wait for completion by smc_wr_tx_process_cqe() */
rc = wait_for_completion_interruptible_timeout(
- &link->wr_tx_compl[pend->idx], timeout);
+ &link->wr_tx_compl[pnd_idx], timeout);
if (rc <= 0)
rc = -ENODATA;
if (rc > 0)
@@ -419,25 +408,6 @@ int smc_wr_reg_send(struct smc_link *link, struct ib_mr *mr)
return rc;
}
-void smc_wr_tx_dismiss_slots(struct smc_link *link, u8 wr_tx_hdr_type,
- smc_wr_tx_filter filter,
- smc_wr_tx_dismisser dismisser,
- unsigned long data)
-{
- struct smc_wr_tx_pend_priv *tx_pend;
- struct smc_wr_rx_hdr *wr_tx;
- int i;
-
- for_each_set_bit(i, link->wr_tx_mask, link->wr_tx_cnt) {
- wr_tx = (struct smc_wr_rx_hdr *)&link->wr_tx_bufs[i];
- if (wr_tx->type != wr_tx_hdr_type)
- continue;
- tx_pend = &link->wr_tx_pends[i].priv;
- if (filter(tx_pend, data))
- dismisser(tx_pend);
- }
-}
-
/****************************** receive queue ********************************/
int smc_wr_rx_register_handler(struct smc_wr_rx_handler *handler)
@@ -673,10 +643,7 @@ void smc_wr_free_link(struct smc_link *lnk)
smc_wr_wakeup_reg_wait(lnk);
smc_wr_wakeup_tx_wait(lnk);
- if (smc_wr_tx_wait_no_pending_sends(lnk))
- memset(lnk->wr_tx_mask, 0,
- BITS_TO_LONGS(SMC_WR_BUF_CNT) *
- sizeof(*lnk->wr_tx_mask));
+ smc_wr_tx_wait_no_pending_sends(lnk);
wait_event(lnk->wr_reg_wait, (!atomic_read(&lnk->wr_reg_refcnt)));
wait_event(lnk->wr_tx_wait, (!atomic_read(&lnk->wr_tx_refcnt)));
diff --git a/net/smc/smc_wr.h b/net/smc/smc_wr.h
index f353311e6f84..47512ccce5ef 100644
--- a/net/smc/smc_wr.h
+++ b/net/smc/smc_wr.h
@@ -22,7 +22,6 @@
#define SMC_WR_BUF_CNT 16 /* # of ctrl buffers per link */
#define SMC_WR_TX_WAIT_FREE_SLOT_TIME (10 * HZ)
-#define SMC_WR_TX_WAIT_PENDING_TIME (5 * HZ)
#define SMC_WR_TX_SIZE 44 /* actual size of wr_send data (<=SMC_WR_BUF_SIZE) */
@@ -62,7 +61,7 @@ static inline void smc_wr_tx_set_wr_id(atomic_long_t *wr_tx_id, long val)
static inline bool smc_wr_tx_link_hold(struct smc_link *link)
{
- if (!smc_link_usable(link))
+ if (!smc_link_sendable(link))
return false;
atomic_inc(&link->wr_tx_refcnt);
return true;
@@ -130,7 +129,7 @@ void smc_wr_tx_dismiss_slots(struct smc_link *lnk, u8 wr_rx_hdr_type,
smc_wr_tx_filter filter,
smc_wr_tx_dismisser dismisser,
unsigned long data);
-int smc_wr_tx_wait_no_pending_sends(struct smc_link *link);
+void smc_wr_tx_wait_no_pending_sends(struct smc_link *link);
int smc_wr_rx_register_handler(struct smc_wr_rx_handler *handler);
int smc_wr_rx_post_init(struct smc_link *link);
diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
index ae48c9c84ee1..d8ee06a9650a 100644
--- a/net/sunrpc/xprtsock.c
+++ b/net/sunrpc/xprtsock.c
@@ -1720,15 +1720,15 @@ static void xs_local_set_port(struct rpc_xprt *xprt, unsigned short port)
}
#ifdef CONFIG_DEBUG_LOCK_ALLOC
-static struct lock_class_key xs_key[2];
-static struct lock_class_key xs_slock_key[2];
+static struct lock_class_key xs_key[3];
+static struct lock_class_key xs_slock_key[3];
static inline void xs_reclassify_socketu(struct socket *sock)
{
struct sock *sk = sock->sk;
sock_lock_init_class_and_name(sk, "slock-AF_LOCAL-RPC",
- &xs_slock_key[1], "sk_lock-AF_LOCAL-RPC", &xs_key[1]);
+ &xs_slock_key[0], "sk_lock-AF_LOCAL-RPC", &xs_key[0]);
}
static inline void xs_reclassify_socket4(struct socket *sock)
@@ -1736,7 +1736,7 @@ static inline void xs_reclassify_socket4(struct socket *sock)
struct sock *sk = sock->sk;
sock_lock_init_class_and_name(sk, "slock-AF_INET-RPC",
- &xs_slock_key[0], "sk_lock-AF_INET-RPC", &xs_key[0]);
+ &xs_slock_key[1], "sk_lock-AF_INET-RPC", &xs_key[1]);
}
static inline void xs_reclassify_socket6(struct socket *sock)
@@ -1744,7 +1744,7 @@ static inline void xs_reclassify_socket6(struct socket *sock)
struct sock *sk = sock->sk;
sock_lock_init_class_and_name(sk, "slock-AF_INET6-RPC",
- &xs_slock_key[1], "sk_lock-AF_INET6-RPC", &xs_key[1]);
+ &xs_slock_key[2], "sk_lock-AF_INET6-RPC", &xs_key[2]);
}
static inline void xs_reclassify_socket(int family, struct socket *sock)
diff --git a/net/tipc/crypto.c b/net/tipc/crypto.c
index b4d9419a015b..d293614d5fc6 100644
--- a/net/tipc/crypto.c
+++ b/net/tipc/crypto.c
@@ -524,7 +524,7 @@ static int tipc_aead_init(struct tipc_aead **aead, struct tipc_aead_key *ukey,
return -EEXIST;
/* Allocate a new AEAD */
- tmp = kzalloc(sizeof(*tmp), GFP_KERNEL);
+ tmp = kzalloc(sizeof(*tmp), GFP_ATOMIC);
if (unlikely(!tmp))
return -ENOMEM;
@@ -1474,7 +1474,7 @@ int tipc_crypto_start(struct tipc_crypto **crypto, struct net *net,
return -EEXIST;
/* Allocate crypto */
- c = kzalloc(sizeof(*c), GFP_KERNEL);
+ c = kzalloc(sizeof(*c), GFP_ATOMIC);
if (!c)
return -ENOMEM;
@@ -1488,7 +1488,7 @@ int tipc_crypto_start(struct tipc_crypto **crypto, struct net *net,
}
/* Allocate statistic structure */
- c->stats = alloc_percpu(struct tipc_crypto_stats);
+ c->stats = alloc_percpu_gfp(struct tipc_crypto_stats, GFP_ATOMIC);
if (!c->stats) {
if (c->wq)
destroy_workqueue(c->wq);
@@ -2461,7 +2461,7 @@ static void tipc_crypto_work_tx(struct work_struct *work)
}
/* Lets duplicate it first */
- skey = kmemdup(aead->key, tipc_aead_key_size(aead->key), GFP_KERNEL);
+ skey = kmemdup(aead->key, tipc_aead_key_size(aead->key), GFP_ATOMIC);
rcu_read_unlock();
/* Now, generate new key, initiate & distribute it */
diff --git a/net/tls/tls_main.c b/net/tls/tls_main.c
index acfba9f1ba72..6bc2879ba637 100644
--- a/net/tls/tls_main.c
+++ b/net/tls/tls_main.c
@@ -61,7 +61,7 @@ static DEFINE_MUTEX(tcpv6_prot_mutex);
static const struct proto *saved_tcpv4_prot;
static DEFINE_MUTEX(tcpv4_prot_mutex);
static struct proto tls_prots[TLS_NUM_PROTS][TLS_NUM_CONFIG][TLS_NUM_CONFIG];
-static struct proto_ops tls_sw_proto_ops;
+static struct proto_ops tls_proto_ops[TLS_NUM_PROTS][TLS_NUM_CONFIG][TLS_NUM_CONFIG];
static void build_protos(struct proto prot[TLS_NUM_CONFIG][TLS_NUM_CONFIG],
const struct proto *base);
@@ -71,6 +71,8 @@ void update_sk_prot(struct sock *sk, struct tls_context *ctx)
WRITE_ONCE(sk->sk_prot,
&tls_prots[ip_ver][ctx->tx_conf][ctx->rx_conf]);
+ WRITE_ONCE(sk->sk_socket->ops,
+ &tls_proto_ops[ip_ver][ctx->tx_conf][ctx->rx_conf]);
}
int wait_on_pending_writer(struct sock *sk, long *timeo)
@@ -669,8 +671,6 @@ static int do_tls_setsockopt_conf(struct sock *sk, sockptr_t optval,
if (tx) {
ctx->sk_write_space = sk->sk_write_space;
sk->sk_write_space = tls_write_space;
- } else {
- sk->sk_socket->ops = &tls_sw_proto_ops;
}
goto out;
@@ -728,6 +728,39 @@ struct tls_context *tls_ctx_create(struct sock *sk)
return ctx;
}
+static void build_proto_ops(struct proto_ops ops[TLS_NUM_CONFIG][TLS_NUM_CONFIG],
+ const struct proto_ops *base)
+{
+ ops[TLS_BASE][TLS_BASE] = *base;
+
+ ops[TLS_SW ][TLS_BASE] = ops[TLS_BASE][TLS_BASE];
+ ops[TLS_SW ][TLS_BASE].sendpage_locked = tls_sw_sendpage_locked;
+
+ ops[TLS_BASE][TLS_SW ] = ops[TLS_BASE][TLS_BASE];
+ ops[TLS_BASE][TLS_SW ].splice_read = tls_sw_splice_read;
+
+ ops[TLS_SW ][TLS_SW ] = ops[TLS_SW ][TLS_BASE];
+ ops[TLS_SW ][TLS_SW ].splice_read = tls_sw_splice_read;
+
+#ifdef CONFIG_TLS_DEVICE
+ ops[TLS_HW ][TLS_BASE] = ops[TLS_BASE][TLS_BASE];
+ ops[TLS_HW ][TLS_BASE].sendpage_locked = NULL;
+
+ ops[TLS_HW ][TLS_SW ] = ops[TLS_BASE][TLS_SW ];
+ ops[TLS_HW ][TLS_SW ].sendpage_locked = NULL;
+
+ ops[TLS_BASE][TLS_HW ] = ops[TLS_BASE][TLS_SW ];
+
+ ops[TLS_SW ][TLS_HW ] = ops[TLS_SW ][TLS_SW ];
+
+ ops[TLS_HW ][TLS_HW ] = ops[TLS_HW ][TLS_SW ];
+ ops[TLS_HW ][TLS_HW ].sendpage_locked = NULL;
+#endif
+#ifdef CONFIG_TLS_TOE
+ ops[TLS_HW_RECORD][TLS_HW_RECORD] = *base;
+#endif
+}
+
static void tls_build_proto(struct sock *sk)
{
int ip_ver = sk->sk_family == AF_INET6 ? TLSV6 : TLSV4;
@@ -739,6 +772,8 @@ static void tls_build_proto(struct sock *sk)
mutex_lock(&tcpv6_prot_mutex);
if (likely(prot != saved_tcpv6_prot)) {
build_protos(tls_prots[TLSV6], prot);
+ build_proto_ops(tls_proto_ops[TLSV6],
+ sk->sk_socket->ops);
smp_store_release(&saved_tcpv6_prot, prot);
}
mutex_unlock(&tcpv6_prot_mutex);
@@ -749,6 +784,8 @@ static void tls_build_proto(struct sock *sk)
mutex_lock(&tcpv4_prot_mutex);
if (likely(prot != saved_tcpv4_prot)) {
build_protos(tls_prots[TLSV4], prot);
+ build_proto_ops(tls_proto_ops[TLSV4],
+ sk->sk_socket->ops);
smp_store_release(&saved_tcpv4_prot, prot);
}
mutex_unlock(&tcpv4_prot_mutex);
@@ -959,10 +996,6 @@ static int __init tls_register(void)
if (err)
return err;
- tls_sw_proto_ops = inet_stream_ops;
- tls_sw_proto_ops.splice_read = tls_sw_splice_read;
- tls_sw_proto_ops.sendpage_locked = tls_sw_sendpage_locked;
-
tls_device_init();
tcp_register_ulp(&tcp_tls_ulp_ops);
diff --git a/net/tls/tls_sw.c b/net/tls/tls_sw.c
index d81564078557..dfe623a4e72f 100644
--- a/net/tls/tls_sw.c
+++ b/net/tls/tls_sw.c
@@ -521,7 +521,7 @@ static int tls_do_encryption(struct sock *sk,
memcpy(&rec->iv_data[iv_offset], tls_ctx->tx.iv,
prot->iv_size + prot->salt_size);
- xor_iv_with_seq(prot, rec->iv_data, tls_ctx->tx.rec_seq);
+ xor_iv_with_seq(prot, rec->iv_data + iv_offset, tls_ctx->tx.rec_seq);
sge->offset += prot->prepend_size;
sge->length -= prot->prepend_size;
@@ -1499,7 +1499,7 @@ static int decrypt_internal(struct sock *sk, struct sk_buff *skb,
else
memcpy(iv + iv_offset, tls_ctx->rx.iv, prot->salt_size);
- xor_iv_with_seq(prot, iv, tls_ctx->rx.rec_seq);
+ xor_iv_with_seq(prot, iv + iv_offset, tls_ctx->rx.rec_seq);
/* Prepare AAD */
tls_make_aad(aad, rxm->full_len - prot->overhead_size +
@@ -2005,6 +2005,7 @@ ssize_t tls_sw_splice_read(struct socket *sock, loff_t *ppos,
struct sock *sk = sock->sk;
struct sk_buff *skb;
ssize_t copied = 0;
+ bool from_queue;
int err = 0;
long timeo;
int chunk;
@@ -2014,25 +2015,28 @@ ssize_t tls_sw_splice_read(struct socket *sock, loff_t *ppos,
timeo = sock_rcvtimeo(sk, flags & SPLICE_F_NONBLOCK);
- skb = tls_wait_data(sk, NULL, flags & SPLICE_F_NONBLOCK, timeo, &err);
- if (!skb)
- goto splice_read_end;
-
- if (!ctx->decrypted) {
- err = decrypt_skb_update(sk, skb, NULL, &chunk, &zc, false);
-
- /* splice does not support reading control messages */
- if (ctx->control != TLS_RECORD_TYPE_DATA) {
- err = -EINVAL;
+ from_queue = !skb_queue_empty(&ctx->rx_list);
+ if (from_queue) {
+ skb = __skb_dequeue(&ctx->rx_list);
+ } else {
+ skb = tls_wait_data(sk, NULL, flags & SPLICE_F_NONBLOCK, timeo,
+ &err);
+ if (!skb)
goto splice_read_end;
- }
+ err = decrypt_skb_update(sk, skb, NULL, &chunk, &zc, false);
if (err < 0) {
tls_err_abort(sk, -EBADMSG);
goto splice_read_end;
}
- ctx->decrypted = 1;
}
+
+ /* splice does not support reading control messages */
+ if (ctx->control != TLS_RECORD_TYPE_DATA) {
+ err = -EINVAL;
+ goto splice_read_end;
+ }
+
rxm = strp_msg(skb);
chunk = min_t(unsigned int, rxm->full_len, len);
@@ -2040,7 +2044,17 @@ ssize_t tls_sw_splice_read(struct socket *sock, loff_t *ppos,
if (copied < 0)
goto splice_read_end;
- tls_sw_advance_skb(sk, skb, copied);
+ if (!from_queue) {
+ ctx->recv_pkt = NULL;
+ __strp_unpause(&ctx->strp);
+ }
+ if (chunk < rxm->full_len) {
+ __skb_queue_head(&ctx->rx_list, skb);
+ rxm->offset += len;
+ rxm->full_len -= len;
+ } else {
+ consume_skb(skb);
+ }
splice_read_end:
release_sock(sk);
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
index 78e08e82c08c..b0bfc78e421c 100644
--- a/net/unix/af_unix.c
+++ b/net/unix/af_unix.c
@@ -2882,9 +2882,6 @@ static int unix_shutdown(struct socket *sock, int mode)
unix_state_lock(sk);
sk->sk_shutdown |= mode;
- if ((sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_SEQPACKET) &&
- mode == SHUTDOWN_MASK)
- sk->sk_state = TCP_CLOSE;
other = unix_peer(sk);
if (other)
sock_hold(other);
diff --git a/net/vmw_vsock/virtio_transport_common.c b/net/vmw_vsock/virtio_transport_common.c
index 59ee1be5a6dd..ec2c2afbf0d0 100644
--- a/net/vmw_vsock/virtio_transport_common.c
+++ b/net/vmw_vsock/virtio_transport_common.c
@@ -1299,7 +1299,8 @@ void virtio_transport_recv_pkt(struct virtio_transport *t,
space_available = virtio_transport_space_update(sk, pkt);
/* Update CID in case it has changed after a transport reset event */
- vsk->local_addr.svm_cid = dst.svm_cid;
+ if (vsk->local_addr.svm_cid != VMADDR_CID_ANY)
+ vsk->local_addr.svm_cid = dst.svm_cid;
if (space_available)
sk->sk_write_space(sk);
diff --git a/net/wireless/reg.c b/net/wireless/reg.c
index df87c7f3a049..f8f01a3e020b 100644
--- a/net/wireless/reg.c
+++ b/net/wireless/reg.c
@@ -133,6 +133,7 @@ static u32 reg_is_indoor_portid;
static void restore_regulatory_settings(bool reset_user, bool cached);
static void print_regdomain(const struct ieee80211_regdomain *rd);
+static void reg_process_hint(struct regulatory_request *reg_request);
static const struct ieee80211_regdomain *get_cfg80211_regdom(void)
{
@@ -1098,6 +1099,8 @@ int reg_reload_regdb(void)
const struct firmware *fw;
void *db;
int err;
+ const struct ieee80211_regdomain *current_regdomain;
+ struct regulatory_request *request;
err = request_firmware(&fw, "regulatory.db", &reg_pdev->dev);
if (err)
@@ -1118,8 +1121,26 @@ int reg_reload_regdb(void)
if (!IS_ERR_OR_NULL(regdb))
kfree(regdb);
regdb = db;
- rtnl_unlock();
+ /* reset regulatory domain */
+ current_regdomain = get_cfg80211_regdom();
+
+ request = kzalloc(sizeof(*request), GFP_KERNEL);
+ if (!request) {
+ err = -ENOMEM;
+ goto out_unlock;
+ }
+
+ request->wiphy_idx = WIPHY_IDX_INVALID;
+ request->alpha2[0] = current_regdomain->alpha2[0];
+ request->alpha2[1] = current_regdomain->alpha2[1];
+ request->initiator = NL80211_REGDOM_SET_BY_CORE;
+ request->user_reg_hint_type = NL80211_USER_REG_HINT_USER;
+
+ reg_process_hint(request);
+
+out_unlock:
+ rtnl_unlock();
out:
release_firmware(fw);
return err;
@@ -2338,6 +2359,7 @@ static bool reg_wdev_chan_valid(struct wiphy *wiphy, struct wireless_dev *wdev)
struct cfg80211_chan_def chandef = {};
struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy);
enum nl80211_iftype iftype;
+ bool ret;
wdev_lock(wdev);
iftype = wdev->iftype;
@@ -2387,7 +2409,11 @@ static bool reg_wdev_chan_valid(struct wiphy *wiphy, struct wireless_dev *wdev)
case NL80211_IFTYPE_AP:
case NL80211_IFTYPE_P2P_GO:
case NL80211_IFTYPE_ADHOC:
- return cfg80211_reg_can_beacon_relax(wiphy, &chandef, iftype);
+ wiphy_lock(wiphy);
+ ret = cfg80211_reg_can_beacon_relax(wiphy, &chandef, iftype);
+ wiphy_unlock(wiphy);
+
+ return ret;
case NL80211_IFTYPE_STATION:
case NL80211_IFTYPE_P2P_CLIENT:
return cfg80211_chandef_usable(wiphy, &chandef,
diff --git a/net/xdp/xsk.c b/net/xdp/xsk.c
index f16074eb53c7..7a466ea962c5 100644
--- a/net/xdp/xsk.c
+++ b/net/xdp/xsk.c
@@ -677,8 +677,6 @@ static __poll_t xsk_poll(struct file *file, struct socket *sock,
struct xdp_sock *xs = xdp_sk(sk);
struct xsk_buff_pool *pool;
- sock_poll_wait(file, sock, wait);
-
if (unlikely(!xsk_is_bound(xs)))
return mask;
@@ -690,6 +688,8 @@ static __poll_t xsk_poll(struct file *file, struct socket *sock,
else
/* Poll needs to drive Tx also in copy mode */
__xsk_sendmsg(sk);
+ } else {
+ sock_poll_wait(file, sock, wait);
}
if (xs->rx && !xskq_prod_is_empty(xs->rx))
diff --git a/net/xdp/xsk_buff_pool.c b/net/xdp/xsk_buff_pool.c
index bc4ad48ea4f0..fd39bb660ebc 100644
--- a/net/xdp/xsk_buff_pool.c
+++ b/net/xdp/xsk_buff_pool.c
@@ -83,6 +83,7 @@ struct xsk_buff_pool *xp_create_and_assign_umem(struct xdp_sock *xs,
xskb = &pool->heads[i];
xskb->pool = pool;
xskb->xdp.frame_sz = umem->chunk_size - umem->headroom;
+ INIT_LIST_HEAD(&xskb->free_list_node);
if (pool->unaligned)
pool->free_heads[i] = xskb;
else
diff --git a/samples/ftrace/Makefile b/samples/ftrace/Makefile
index b9198e2eef28..faf8cdb79c5f 100644
--- a/samples/ftrace/Makefile
+++ b/samples/ftrace/Makefile
@@ -4,6 +4,7 @@ obj-$(CONFIG_SAMPLE_FTRACE_DIRECT) += ftrace-direct.o
obj-$(CONFIG_SAMPLE_FTRACE_DIRECT) += ftrace-direct-too.o
obj-$(CONFIG_SAMPLE_FTRACE_DIRECT) += ftrace-direct-modify.o
obj-$(CONFIG_SAMPLE_FTRACE_DIRECT_MULTI) += ftrace-direct-multi.o
+obj-$(CONFIG_SAMPLE_FTRACE_DIRECT_MULTI) += ftrace-direct-multi-modify.o
CFLAGS_sample-trace-array.o := -I$(src)
obj-$(CONFIG_SAMPLE_TRACE_ARRAY) += sample-trace-array.o
diff --git a/samples/ftrace/ftrace-direct-multi-modify.c b/samples/ftrace/ftrace-direct-multi-modify.c
new file mode 100644
index 000000000000..91bc42a7adb9
--- /dev/null
+++ b/samples/ftrace/ftrace-direct-multi-modify.c
@@ -0,0 +1,152 @@
+// SPDX-License-Identifier: GPL-2.0-only
+#include <linux/module.h>
+#include <linux/kthread.h>
+#include <linux/ftrace.h>
+#include <asm/asm-offsets.h>
+
+void my_direct_func1(unsigned long ip)
+{
+ trace_printk("my direct func1 ip %lx\n", ip);
+}
+
+void my_direct_func2(unsigned long ip)
+{
+ trace_printk("my direct func2 ip %lx\n", ip);
+}
+
+extern void my_tramp1(void *);
+extern void my_tramp2(void *);
+
+#ifdef CONFIG_X86_64
+
+asm (
+" .pushsection .text, \"ax\", @progbits\n"
+" .type my_tramp1, @function\n"
+" .globl my_tramp1\n"
+" my_tramp1:"
+" pushq %rbp\n"
+" movq %rsp, %rbp\n"
+" pushq %rdi\n"
+" movq 8(%rbp), %rdi\n"
+" call my_direct_func1\n"
+" popq %rdi\n"
+" leave\n"
+" ret\n"
+" .size my_tramp1, .-my_tramp1\n"
+" .type my_tramp2, @function\n"
+"\n"
+" .globl my_tramp2\n"
+" my_tramp2:"
+" pushq %rbp\n"
+" movq %rsp, %rbp\n"
+" pushq %rdi\n"
+" movq 8(%rbp), %rdi\n"
+" call my_direct_func2\n"
+" popq %rdi\n"
+" leave\n"
+" ret\n"
+" .size my_tramp2, .-my_tramp2\n"
+" .popsection\n"
+);
+
+#endif /* CONFIG_X86_64 */
+
+#ifdef CONFIG_S390
+
+asm (
+" .pushsection .text, \"ax\", @progbits\n"
+" .type my_tramp1, @function\n"
+" .globl my_tramp1\n"
+" my_tramp1:"
+" lgr %r1,%r15\n"
+" stmg %r0,%r5,"__stringify(__SF_GPRS)"(%r15)\n"
+" stg %r14,"__stringify(__SF_GPRS+8*8)"(%r15)\n"
+" aghi %r15,"__stringify(-STACK_FRAME_OVERHEAD)"\n"
+" stg %r1,"__stringify(__SF_BACKCHAIN)"(%r15)\n"
+" lgr %r2,%r0\n"
+" brasl %r14,my_direct_func1\n"
+" aghi %r15,"__stringify(STACK_FRAME_OVERHEAD)"\n"
+" lmg %r0,%r5,"__stringify(__SF_GPRS)"(%r15)\n"
+" lg %r14,"__stringify(__SF_GPRS+8*8)"(%r15)\n"
+" lgr %r1,%r0\n"
+" br %r1\n"
+" .size my_tramp1, .-my_tramp1\n"
+"\n"
+" .type my_tramp2, @function\n"
+" .globl my_tramp2\n"
+" my_tramp2:"
+" lgr %r1,%r15\n"
+" stmg %r0,%r5,"__stringify(__SF_GPRS)"(%r15)\n"
+" stg %r14,"__stringify(__SF_GPRS+8*8)"(%r15)\n"
+" aghi %r15,"__stringify(-STACK_FRAME_OVERHEAD)"\n"
+" stg %r1,"__stringify(__SF_BACKCHAIN)"(%r15)\n"
+" lgr %r2,%r0\n"
+" brasl %r14,my_direct_func2\n"
+" aghi %r15,"__stringify(STACK_FRAME_OVERHEAD)"\n"
+" lmg %r0,%r5,"__stringify(__SF_GPRS)"(%r15)\n"
+" lg %r14,"__stringify(__SF_GPRS+8*8)"(%r15)\n"
+" lgr %r1,%r0\n"
+" br %r1\n"
+" .size my_tramp2, .-my_tramp2\n"
+" .popsection\n"
+);
+
+#endif /* CONFIG_S390 */
+
+static unsigned long my_tramp = (unsigned long)my_tramp1;
+static unsigned long tramps[2] = {
+ (unsigned long)my_tramp1,
+ (unsigned long)my_tramp2,
+};
+
+static struct ftrace_ops direct;
+
+static int simple_thread(void *arg)
+{
+ static int t;
+ int ret = 0;
+
+ while (!kthread_should_stop()) {
+ set_current_state(TASK_INTERRUPTIBLE);
+ schedule_timeout(2 * HZ);
+
+ if (ret)
+ continue;
+ t ^= 1;
+ ret = modify_ftrace_direct_multi(&direct, tramps[t]);
+ if (!ret)
+ my_tramp = tramps[t];
+ WARN_ON_ONCE(ret);
+ }
+
+ return 0;
+}
+
+static struct task_struct *simple_tsk;
+
+static int __init ftrace_direct_multi_init(void)
+{
+ int ret;
+
+ ftrace_set_filter_ip(&direct, (unsigned long) wake_up_process, 0, 0);
+ ftrace_set_filter_ip(&direct, (unsigned long) schedule, 0, 0);
+
+ ret = register_ftrace_direct_multi(&direct, my_tramp);
+
+ if (!ret)
+ simple_tsk = kthread_run(simple_thread, NULL, "event-sample-fn");
+ return ret;
+}
+
+static void __exit ftrace_direct_multi_exit(void)
+{
+ kthread_stop(simple_tsk);
+ unregister_ftrace_direct_multi(&direct, my_tramp);
+}
+
+module_init(ftrace_direct_multi_init);
+module_exit(ftrace_direct_multi_exit);
+
+MODULE_AUTHOR("Jiri Olsa");
+MODULE_DESCRIPTION("Example use case of using modify_ftrace_direct_multi()");
+MODULE_LICENSE("GPL");
diff --git a/scripts/mod/devicetable-offsets.c b/scripts/mod/devicetable-offsets.c
index cc3625617a0e..c0d3bcb99138 100644
--- a/scripts/mod/devicetable-offsets.c
+++ b/scripts/mod/devicetable-offsets.c
@@ -259,5 +259,8 @@ int main(void)
DEVID_FIELD(dfl_device_id, type);
DEVID_FIELD(dfl_device_id, feature_id);
+ DEVID(ishtp_device_id);
+ DEVID_FIELD(ishtp_device_id, guid);
+
return 0;
}
diff --git a/scripts/mod/file2alias.c b/scripts/mod/file2alias.c
index 49aba862073e..5258247d78ac 100644
--- a/scripts/mod/file2alias.c
+++ b/scripts/mod/file2alias.c
@@ -115,6 +115,17 @@ static inline void add_uuid(char *str, uuid_le uuid)
uuid.b[12], uuid.b[13], uuid.b[14], uuid.b[15]);
}
+static inline void add_guid(char *str, guid_t guid)
+{
+ int len = strlen(str);
+
+ sprintf(str + len, "%02X%02X%02X%02X-%02X%02X-%02X%02X-%02X%02X-%02X%02X%02X%02X%02X%02X",
+ guid.b[3], guid.b[2], guid.b[1], guid.b[0],
+ guid.b[5], guid.b[4], guid.b[7], guid.b[6],
+ guid.b[8], guid.b[9], guid.b[10], guid.b[11],
+ guid.b[12], guid.b[13], guid.b[14], guid.b[15]);
+}
+
/**
* Check that sizeof(device_id type) are consistent with size of section
* in .o file. If in-consistent then userspace and kernel does not agree
@@ -1380,6 +1391,18 @@ static int do_mhi_entry(const char *filename, void *symval, char *alias)
return 1;
}
+/* Looks like: ishtp:{guid} */
+static int do_ishtp_entry(const char *filename, void *symval, char *alias)
+{
+ DEF_FIELD(symval, ishtp_device_id, guid);
+
+ strcpy(alias, ISHTP_MODULE_PREFIX "{");
+ add_guid(alias, guid);
+ strcat(alias, "}");
+
+ return 1;
+}
+
static int do_auxiliary_entry(const char *filename, void *symval, char *alias)
{
DEF_FIELD_ADDR(symval, auxiliary_device_id, name);
@@ -1499,6 +1522,7 @@ static const struct devtable devtable[] = {
{"auxiliary", SIZE_auxiliary_device_id, do_auxiliary_entry},
{"ssam", SIZE_ssam_device_id, do_ssam_entry},
{"dfl", SIZE_dfl_device_id, do_dfl_entry},
+ {"ishtp", SIZE_ishtp_device_id, do_ishtp_entry},
};
/* Create MODULE_ALIAS() statements.
diff --git a/scripts/recordmcount.pl b/scripts/recordmcount.pl
index 7d631aaa0ae1..3ccb2c70add4 100755
--- a/scripts/recordmcount.pl
+++ b/scripts/recordmcount.pl
@@ -219,7 +219,7 @@ if ($arch eq "x86_64") {
} elsif ($arch eq "s390" && $bits == 64) {
if ($cc =~ /-DCC_USING_HOTPATCH/) {
- $mcount_regex = "^\\s*([0-9a-fA-F]+):\\s*c0 04 00 00 00 00\\s*brcl\\s*0,[0-9a-f]+ <([^\+]*)>\$";
+ $mcount_regex = "^\\s*([0-9a-fA-F]+):\\s*c0 04 00 00 00 00\\s*(brcl\\s*0,|jgnop\\s*)[0-9a-f]+ <([^\+]*)>\$";
$mcount_adjust = 0;
}
$alignment = 8;
diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
index 62d30c0a30c2..dde4ecc0cd18 100644
--- a/security/selinux/hooks.c
+++ b/security/selinux/hooks.c
@@ -611,10 +611,11 @@ static int bad_option(struct superblock_security_struct *sbsec, char flag,
return 0;
}
-static int parse_sid(struct super_block *sb, const char *s, u32 *sid)
+static int parse_sid(struct super_block *sb, const char *s, u32 *sid,
+ gfp_t gfp)
{
int rc = security_context_str_to_sid(&selinux_state, s,
- sid, GFP_KERNEL);
+ sid, gfp);
if (rc)
pr_warn("SELinux: security_context_str_to_sid"
"(%s) failed for (dev %s, type %s) errno=%d\n",
@@ -685,7 +686,8 @@ static int selinux_set_mnt_opts(struct super_block *sb,
*/
if (opts) {
if (opts->fscontext) {
- rc = parse_sid(sb, opts->fscontext, &fscontext_sid);
+ rc = parse_sid(sb, opts->fscontext, &fscontext_sid,
+ GFP_KERNEL);
if (rc)
goto out;
if (bad_option(sbsec, FSCONTEXT_MNT, sbsec->sid,
@@ -694,7 +696,8 @@ static int selinux_set_mnt_opts(struct super_block *sb,
sbsec->flags |= FSCONTEXT_MNT;
}
if (opts->context) {
- rc = parse_sid(sb, opts->context, &context_sid);
+ rc = parse_sid(sb, opts->context, &context_sid,
+ GFP_KERNEL);
if (rc)
goto out;
if (bad_option(sbsec, CONTEXT_MNT, sbsec->mntpoint_sid,
@@ -703,7 +706,8 @@ static int selinux_set_mnt_opts(struct super_block *sb,
sbsec->flags |= CONTEXT_MNT;
}
if (opts->rootcontext) {
- rc = parse_sid(sb, opts->rootcontext, &rootcontext_sid);
+ rc = parse_sid(sb, opts->rootcontext, &rootcontext_sid,
+ GFP_KERNEL);
if (rc)
goto out;
if (bad_option(sbsec, ROOTCONTEXT_MNT, root_isec->sid,
@@ -712,7 +716,8 @@ static int selinux_set_mnt_opts(struct super_block *sb,
sbsec->flags |= ROOTCONTEXT_MNT;
}
if (opts->defcontext) {
- rc = parse_sid(sb, opts->defcontext, &defcontext_sid);
+ rc = parse_sid(sb, opts->defcontext, &defcontext_sid,
+ GFP_KERNEL);
if (rc)
goto out;
if (bad_option(sbsec, DEFCONTEXT_MNT, sbsec->def_sid,
@@ -2702,14 +2707,14 @@ static int selinux_sb_mnt_opts_compat(struct super_block *sb, void *mnt_opts)
return (sbsec->flags & SE_MNTMASK) ? 1 : 0;
if (opts->fscontext) {
- rc = parse_sid(sb, opts->fscontext, &sid);
+ rc = parse_sid(sb, opts->fscontext, &sid, GFP_NOWAIT);
if (rc)
return 1;
if (bad_option(sbsec, FSCONTEXT_MNT, sbsec->sid, sid))
return 1;
}
if (opts->context) {
- rc = parse_sid(sb, opts->context, &sid);
+ rc = parse_sid(sb, opts->context, &sid, GFP_NOWAIT);
if (rc)
return 1;
if (bad_option(sbsec, CONTEXT_MNT, sbsec->mntpoint_sid, sid))
@@ -2719,14 +2724,14 @@ static int selinux_sb_mnt_opts_compat(struct super_block *sb, void *mnt_opts)
struct inode_security_struct *root_isec;
root_isec = backing_inode_security(sb->s_root);
- rc = parse_sid(sb, opts->rootcontext, &sid);
+ rc = parse_sid(sb, opts->rootcontext, &sid, GFP_NOWAIT);
if (rc)
return 1;
if (bad_option(sbsec, ROOTCONTEXT_MNT, root_isec->sid, sid))
return 1;
}
if (opts->defcontext) {
- rc = parse_sid(sb, opts->defcontext, &sid);
+ rc = parse_sid(sb, opts->defcontext, &sid, GFP_NOWAIT);
if (rc)
return 1;
if (bad_option(sbsec, DEFCONTEXT_MNT, sbsec->def_sid, sid))
@@ -2749,14 +2754,14 @@ static int selinux_sb_remount(struct super_block *sb, void *mnt_opts)
return 0;
if (opts->fscontext) {
- rc = parse_sid(sb, opts->fscontext, &sid);
+ rc = parse_sid(sb, opts->fscontext, &sid, GFP_KERNEL);
if (rc)
return rc;
if (bad_option(sbsec, FSCONTEXT_MNT, sbsec->sid, sid))
goto out_bad_option;
}
if (opts->context) {
- rc = parse_sid(sb, opts->context, &sid);
+ rc = parse_sid(sb, opts->context, &sid, GFP_KERNEL);
if (rc)
return rc;
if (bad_option(sbsec, CONTEXT_MNT, sbsec->mntpoint_sid, sid))
@@ -2765,14 +2770,14 @@ static int selinux_sb_remount(struct super_block *sb, void *mnt_opts)
if (opts->rootcontext) {
struct inode_security_struct *root_isec;
root_isec = backing_inode_security(sb->s_root);
- rc = parse_sid(sb, opts->rootcontext, &sid);
+ rc = parse_sid(sb, opts->rootcontext, &sid, GFP_KERNEL);
if (rc)
return rc;
if (bad_option(sbsec, ROOTCONTEXT_MNT, root_isec->sid, sid))
goto out_bad_option;
}
if (opts->defcontext) {
- rc = parse_sid(sb, opts->defcontext, &sid);
+ rc = parse_sid(sb, opts->defcontext, &sid, GFP_KERNEL);
if (rc)
return rc;
if (bad_option(sbsec, DEFCONTEXT_MNT, sbsec->def_sid, sid))
@@ -5780,7 +5785,7 @@ static unsigned int selinux_ip_postroute_compat(struct sk_buff *skb,
struct sk_security_struct *sksec;
struct common_audit_data ad;
struct lsm_network_audit net = {0,};
- u8 proto;
+ u8 proto = 0;
sk = skb_to_full_sk(skb);
if (sk == NULL)
diff --git a/security/selinux/ss/hashtab.c b/security/selinux/ss/hashtab.c
index 727c3b484bd3..0ae4e4e57a40 100644
--- a/security/selinux/ss/hashtab.c
+++ b/security/selinux/ss/hashtab.c
@@ -31,13 +31,20 @@ static u32 hashtab_compute_size(u32 nel)
int hashtab_init(struct hashtab *h, u32 nel_hint)
{
- h->size = hashtab_compute_size(nel_hint);
+ u32 size = hashtab_compute_size(nel_hint);
+
+ /* should already be zeroed, but better be safe */
h->nel = 0;
- if (!h->size)
- return 0;
+ h->size = 0;
+ h->htable = NULL;
- h->htable = kcalloc(h->size, sizeof(*h->htable), GFP_KERNEL);
- return h->htable ? 0 : -ENOMEM;
+ if (size) {
+ h->htable = kcalloc(size, sizeof(*h->htable), GFP_KERNEL);
+ if (!h->htable)
+ return -ENOMEM;
+ h->size = size;
+ }
+ return 0;
}
int __hashtab_insert(struct hashtab *h, struct hashtab_node **dst,
diff --git a/security/tomoyo/util.c b/security/tomoyo/util.c
index 1da2e3722b12..6799b1122c9d 100644
--- a/security/tomoyo/util.c
+++ b/security/tomoyo/util.c
@@ -1051,10 +1051,11 @@ bool tomoyo_domain_quota_is_ok(struct tomoyo_request_info *r)
return false;
if (!domain)
return true;
+ if (READ_ONCE(domain->flags[TOMOYO_DIF_QUOTA_WARNED]))
+ return false;
list_for_each_entry_rcu(ptr, &domain->acl_info_list, list,
srcu_read_lock_held(&tomoyo_ss)) {
u16 perm;
- u8 i;
if (ptr->is_deleted)
continue;
@@ -1065,23 +1066,23 @@ bool tomoyo_domain_quota_is_ok(struct tomoyo_request_info *r)
*/
switch (ptr->type) {
case TOMOYO_TYPE_PATH_ACL:
- data_race(perm = container_of(ptr, struct tomoyo_path_acl, head)->perm);
+ perm = data_race(container_of(ptr, struct tomoyo_path_acl, head)->perm);
break;
case TOMOYO_TYPE_PATH2_ACL:
- data_race(perm = container_of(ptr, struct tomoyo_path2_acl, head)->perm);
+ perm = data_race(container_of(ptr, struct tomoyo_path2_acl, head)->perm);
break;
case TOMOYO_TYPE_PATH_NUMBER_ACL:
- data_race(perm = container_of(ptr, struct tomoyo_path_number_acl, head)
+ perm = data_race(container_of(ptr, struct tomoyo_path_number_acl, head)
->perm);
break;
case TOMOYO_TYPE_MKDEV_ACL:
- data_race(perm = container_of(ptr, struct tomoyo_mkdev_acl, head)->perm);
+ perm = data_race(container_of(ptr, struct tomoyo_mkdev_acl, head)->perm);
break;
case TOMOYO_TYPE_INET_ACL:
- data_race(perm = container_of(ptr, struct tomoyo_inet_acl, head)->perm);
+ perm = data_race(container_of(ptr, struct tomoyo_inet_acl, head)->perm);
break;
case TOMOYO_TYPE_UNIX_ACL:
- data_race(perm = container_of(ptr, struct tomoyo_unix_acl, head)->perm);
+ perm = data_race(container_of(ptr, struct tomoyo_unix_acl, head)->perm);
break;
case TOMOYO_TYPE_MANUAL_TASK_ACL:
perm = 0;
@@ -1089,21 +1090,17 @@ bool tomoyo_domain_quota_is_ok(struct tomoyo_request_info *r)
default:
perm = 1;
}
- for (i = 0; i < 16; i++)
- if (perm & (1 << i))
- count++;
+ count += hweight16(perm);
}
if (count < tomoyo_profile(domain->ns, domain->profile)->
pref[TOMOYO_PREF_MAX_LEARNING_ENTRY])
return true;
- if (!domain->flags[TOMOYO_DIF_QUOTA_WARNED]) {
- domain->flags[TOMOYO_DIF_QUOTA_WARNED] = true;
- /* r->granted = false; */
- tomoyo_write_log(r, "%s", tomoyo_dif[TOMOYO_DIF_QUOTA_WARNED]);
+ WRITE_ONCE(domain->flags[TOMOYO_DIF_QUOTA_WARNED], true);
+ /* r->granted = false; */
+ tomoyo_write_log(r, "%s", tomoyo_dif[TOMOYO_DIF_QUOTA_WARNED]);
#ifndef CONFIG_SECURITY_TOMOYO_INSECURE_BUILTIN_SETTING
- pr_warn("WARNING: Domain '%s' has too many ACLs to hold. Stopped learning mode.\n",
- domain->domainname->name);
+ pr_warn("WARNING: Domain '%s' has too many ACLs to hold. Stopped learning mode.\n",
+ domain->domainname->name);
#endif
- }
return false;
}
diff --git a/sound/core/control_compat.c b/sound/core/control_compat.c
index 470dabc60aa0..edff063e088d 100644
--- a/sound/core/control_compat.c
+++ b/sound/core/control_compat.c
@@ -264,6 +264,7 @@ static int copy_ctl_value_to_user(void __user *userdata,
struct snd_ctl_elem_value *data,
int type, int count)
{
+ struct snd_ctl_elem_value32 __user *data32 = userdata;
int i, size;
if (type == SNDRV_CTL_ELEM_TYPE_BOOLEAN ||
@@ -280,6 +281,8 @@ static int copy_ctl_value_to_user(void __user *userdata,
if (copy_to_user(valuep, data->value.bytes.data, size))
return -EFAULT;
}
+ if (copy_to_user(&data32->id, &data->id, sizeof(data32->id)))
+ return -EFAULT;
return 0;
}
diff --git a/sound/core/jack.c b/sound/core/jack.c
index 32350c6aba84..537df1e98f8a 100644
--- a/sound/core/jack.c
+++ b/sound/core/jack.c
@@ -509,6 +509,10 @@ int snd_jack_new(struct snd_card *card, const char *id, int type,
return -ENOMEM;
jack->id = kstrdup(id, GFP_KERNEL);
+ if (jack->id == NULL) {
+ kfree(jack);
+ return -ENOMEM;
+ }
/* don't creat input device for phantom jack */
if (!phantom_jack) {
diff --git a/sound/core/oss/pcm_oss.c b/sound/core/oss/pcm_oss.c
index 82a818734a5f..20a0a4771b9a 100644
--- a/sound/core/oss/pcm_oss.c
+++ b/sound/core/oss/pcm_oss.c
@@ -147,7 +147,7 @@ snd_pcm_hw_param_value_min(const struct snd_pcm_hw_params *params,
*
* Return the maximum value for field PAR.
*/
-static unsigned int
+static int
snd_pcm_hw_param_value_max(const struct snd_pcm_hw_params *params,
snd_pcm_hw_param_t var, int *dir)
{
@@ -682,18 +682,24 @@ static int snd_pcm_oss_period_size(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *oss_params,
struct snd_pcm_hw_params *slave_params)
{
- size_t s;
- size_t oss_buffer_size, oss_period_size, oss_periods;
- size_t min_period_size, max_period_size;
+ ssize_t s;
+ ssize_t oss_buffer_size;
+ ssize_t oss_period_size, oss_periods;
+ ssize_t min_period_size, max_period_size;
struct snd_pcm_runtime *runtime = substream->runtime;
size_t oss_frame_size;
oss_frame_size = snd_pcm_format_physical_width(params_format(oss_params)) *
params_channels(oss_params) / 8;
+ oss_buffer_size = snd_pcm_hw_param_value_max(slave_params,
+ SNDRV_PCM_HW_PARAM_BUFFER_SIZE,
+ NULL);
+ if (oss_buffer_size <= 0)
+ return -EINVAL;
oss_buffer_size = snd_pcm_plug_client_size(substream,
- snd_pcm_hw_param_value_max(slave_params, SNDRV_PCM_HW_PARAM_BUFFER_SIZE, NULL)) * oss_frame_size;
- if (!oss_buffer_size)
+ oss_buffer_size * oss_frame_size);
+ if (oss_buffer_size <= 0)
return -EINVAL;
oss_buffer_size = rounddown_pow_of_two(oss_buffer_size);
if (atomic_read(&substream->mmap_count)) {
@@ -730,7 +736,7 @@ static int snd_pcm_oss_period_size(struct snd_pcm_substream *substream,
min_period_size = snd_pcm_plug_client_size(substream,
snd_pcm_hw_param_value_min(slave_params, SNDRV_PCM_HW_PARAM_PERIOD_SIZE, NULL));
- if (min_period_size) {
+ if (min_period_size > 0) {
min_period_size *= oss_frame_size;
min_period_size = roundup_pow_of_two(min_period_size);
if (oss_period_size < min_period_size)
@@ -739,7 +745,7 @@ static int snd_pcm_oss_period_size(struct snd_pcm_substream *substream,
max_period_size = snd_pcm_plug_client_size(substream,
snd_pcm_hw_param_value_max(slave_params, SNDRV_PCM_HW_PARAM_PERIOD_SIZE, NULL));
- if (max_period_size) {
+ if (max_period_size > 0) {
max_period_size *= oss_frame_size;
max_period_size = rounddown_pow_of_two(max_period_size);
if (oss_period_size > max_period_size)
@@ -752,7 +758,7 @@ static int snd_pcm_oss_period_size(struct snd_pcm_substream *substream,
oss_periods = substream->oss.setup.periods;
s = snd_pcm_hw_param_value_max(slave_params, SNDRV_PCM_HW_PARAM_PERIODS, NULL);
- if (runtime->oss.maxfrags && s > runtime->oss.maxfrags)
+ if (s > 0 && runtime->oss.maxfrags && s > runtime->oss.maxfrags)
s = runtime->oss.maxfrags;
if (oss_periods > s)
oss_periods = s;
@@ -878,8 +884,15 @@ static int snd_pcm_oss_change_params_locked(struct snd_pcm_substream *substream)
err = -EINVAL;
goto failure;
}
- choose_rate(substream, sparams, runtime->oss.rate);
- snd_pcm_hw_param_near(substream, sparams, SNDRV_PCM_HW_PARAM_CHANNELS, runtime->oss.channels, NULL);
+
+ err = choose_rate(substream, sparams, runtime->oss.rate);
+ if (err < 0)
+ goto failure;
+ err = snd_pcm_hw_param_near(substream, sparams,
+ SNDRV_PCM_HW_PARAM_CHANNELS,
+ runtime->oss.channels, NULL);
+ if (err < 0)
+ goto failure;
format = snd_pcm_oss_format_from(runtime->oss.format);
@@ -1956,7 +1969,7 @@ static int snd_pcm_oss_set_fragment1(struct snd_pcm_substream *substream, unsign
if (runtime->oss.subdivision || runtime->oss.fragshift)
return -EINVAL;
fragshift = val & 0xffff;
- if (fragshift >= 31)
+ if (fragshift >= 25) /* should be large enough */
return -EINVAL;
runtime->oss.fragshift = fragshift;
runtime->oss.maxfrags = (val >> 16) & 0xffff;
diff --git a/sound/core/rawmidi.c b/sound/core/rawmidi.c
index 6f30231bdb88..befa9809ff00 100644
--- a/sound/core/rawmidi.c
+++ b/sound/core/rawmidi.c
@@ -447,6 +447,7 @@ static int snd_rawmidi_open(struct inode *inode, struct file *file)
err = -ENOMEM;
goto __error;
}
+ rawmidi_file->user_pversion = 0;
init_waitqueue_entry(&wait, current);
add_wait_queue(&rmidi->open_wait, &wait);
while (1) {
diff --git a/sound/drivers/opl3/opl3_midi.c b/sound/drivers/opl3/opl3_midi.c
index e1b69c65c3c8..e2b7be67f0e3 100644
--- a/sound/drivers/opl3/opl3_midi.c
+++ b/sound/drivers/opl3/opl3_midi.c
@@ -397,7 +397,7 @@ void snd_opl3_note_on(void *p, int note, int vel, struct snd_midi_channel *chan)
}
if (instr_4op) {
vp2 = &opl3->voices[voice + 3];
- if (vp->state > 0) {
+ if (vp2->state > 0) {
opl3_reg = reg_side | (OPL3_REG_KEYON_BLOCK +
voice_offset + 3);
reg_val = vp->keyon_reg & ~OPL3_KEYON_BIT;
diff --git a/sound/hda/intel-dsp-config.c b/sound/hda/intel-dsp-config.c
index b9ac9e9e45a4..4208fa8a4db5 100644
--- a/sound/hda/intel-dsp-config.c
+++ b/sound/hda/intel-dsp-config.c
@@ -252,6 +252,11 @@ static const struct config_entry config_table[] = {
.flags = FLAG_SOF | FLAG_SOF_ONLY_IF_DMIC_OR_SOUNDWIRE,
.device = 0x02c8,
},
+ {
+ .flags = FLAG_SOF,
+ .device = 0x02c8,
+ .codec_hid = "ESSX8336",
+ },
/* Cometlake-H */
{
.flags = FLAG_SOF,
@@ -276,6 +281,11 @@ static const struct config_entry config_table[] = {
.flags = FLAG_SOF | FLAG_SOF_ONLY_IF_DMIC_OR_SOUNDWIRE,
.device = 0x06c8,
},
+ {
+ .flags = FLAG_SOF,
+ .device = 0x06c8,
+ .codec_hid = "ESSX8336",
+ },
#endif
/* Icelake */
@@ -299,6 +309,15 @@ static const struct config_entry config_table[] = {
},
#endif
+/* JasperLake */
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_JASPERLAKE)
+ {
+ .flags = FLAG_SOF,
+ .device = 0x4dc8,
+ .codec_hid = "ESSX8336",
+ },
+#endif
+
/* Tigerlake */
#if IS_ENABLED(CONFIG_SND_SOC_SOF_TIGERLAKE)
{
diff --git a/sound/hda/intel-sdw-acpi.c b/sound/hda/intel-sdw-acpi.c
index c0123bc31c0d..b7758dbe2371 100644
--- a/sound/hda/intel-sdw-acpi.c
+++ b/sound/hda/intel-sdw-acpi.c
@@ -132,8 +132,6 @@ static acpi_status sdw_intel_acpi_cb(acpi_handle handle, u32 level,
return AE_NOT_FOUND;
}
- info->handle = handle;
-
/*
* On some Intel platforms, multiple children of the HDAS
* device can be found, but only one of them is the SoundWire
@@ -144,6 +142,9 @@ static acpi_status sdw_intel_acpi_cb(acpi_handle handle, u32 level,
if (FIELD_GET(GENMASK(31, 28), adr) != SDW_LINK_TYPE)
return AE_OK; /* keep going */
+ /* found the correct SoundWire controller */
+ info->handle = handle;
+
/* device found, stop namespace walk */
return AE_CTRL_TERMINATE;
}
@@ -164,8 +165,14 @@ int sdw_intel_acpi_scan(acpi_handle *parent_handle,
acpi_status status;
info->handle = NULL;
+ /*
+ * In the HDAS ACPI scope, 'SNDW' may be either the child of
+ * 'HDAS' or the grandchild of 'HDAS'. So let's go through
+ * the ACPI from 'HDAS' at max depth of 2 to find the 'SNDW'
+ * device.
+ */
status = acpi_walk_namespace(ACPI_TYPE_DEVICE,
- parent_handle, 1,
+ parent_handle, 2,
sdw_intel_acpi_cb,
NULL, info, NULL);
if (ACPI_FAILURE(status) || info->handle == NULL)
diff --git a/sound/pci/cmipci.c b/sound/pci/cmipci.c
index ea20236f35db..9a678b5cf285 100644
--- a/sound/pci/cmipci.c
+++ b/sound/pci/cmipci.c
@@ -3218,7 +3218,6 @@ static int snd_cmipci_probe(struct pci_dev *pci,
{
static int dev;
struct snd_card *card;
- struct cmipci *cm;
int err;
if (dev >= SNDRV_CARDS)
@@ -3229,10 +3228,9 @@ static int snd_cmipci_probe(struct pci_dev *pci,
}
err = snd_devm_card_new(&pci->dev, index[dev], id[dev], THIS_MODULE,
- sizeof(*cm), &card);
+ sizeof(struct cmipci), &card);
if (err < 0)
return err;
- cm = card->private_data;
switch (pci->device) {
case PCI_DEVICE_ID_CMEDIA_CM8738:
diff --git a/sound/pci/ctxfi/ctamixer.c b/sound/pci/ctxfi/ctamixer.c
index da6e6350ceaf..d074727c3e21 100644
--- a/sound/pci/ctxfi/ctamixer.c
+++ b/sound/pci/ctxfi/ctamixer.c
@@ -23,16 +23,15 @@
#define BLANK_SLOT 4094
-static int amixer_master(struct rsc *rsc)
+static void amixer_master(struct rsc *rsc)
{
rsc->conj = 0;
- return rsc->idx = container_of(rsc, struct amixer, rsc)->idx[0];
+ rsc->idx = container_of(rsc, struct amixer, rsc)->idx[0];
}
-static int amixer_next_conj(struct rsc *rsc)
+static void amixer_next_conj(struct rsc *rsc)
{
rsc->conj++;
- return container_of(rsc, struct amixer, rsc)->idx[rsc->conj];
}
static int amixer_index(const struct rsc *rsc)
@@ -331,16 +330,15 @@ int amixer_mgr_destroy(struct amixer_mgr *amixer_mgr)
/* SUM resource management */
-static int sum_master(struct rsc *rsc)
+static void sum_master(struct rsc *rsc)
{
rsc->conj = 0;
- return rsc->idx = container_of(rsc, struct sum, rsc)->idx[0];
+ rsc->idx = container_of(rsc, struct sum, rsc)->idx[0];
}
-static int sum_next_conj(struct rsc *rsc)
+static void sum_next_conj(struct rsc *rsc)
{
rsc->conj++;
- return container_of(rsc, struct sum, rsc)->idx[rsc->conj];
}
static int sum_index(const struct rsc *rsc)
diff --git a/sound/pci/ctxfi/ctdaio.c b/sound/pci/ctxfi/ctdaio.c
index f589da045342..7fc720046ce2 100644
--- a/sound/pci/ctxfi/ctdaio.c
+++ b/sound/pci/ctxfi/ctdaio.c
@@ -51,12 +51,12 @@ static const struct daio_rsc_idx idx_20k2[NUM_DAIOTYP] = {
[SPDIFIO] = {.left = 0x05, .right = 0x85},
};
-static int daio_master(struct rsc *rsc)
+static void daio_master(struct rsc *rsc)
{
/* Actually, this is not the resource index of DAIO.
* For DAO, it is the input mapper index. And, for DAI,
* it is the output time-slot index. */
- return rsc->conj = rsc->idx;
+ rsc->conj = rsc->idx;
}
static int daio_index(const struct rsc *rsc)
@@ -64,19 +64,19 @@ static int daio_index(const struct rsc *rsc)
return rsc->conj;
}
-static int daio_out_next_conj(struct rsc *rsc)
+static void daio_out_next_conj(struct rsc *rsc)
{
- return rsc->conj += 2;
+ rsc->conj += 2;
}
-static int daio_in_next_conj_20k1(struct rsc *rsc)
+static void daio_in_next_conj_20k1(struct rsc *rsc)
{
- return rsc->conj += 0x200;
+ rsc->conj += 0x200;
}
-static int daio_in_next_conj_20k2(struct rsc *rsc)
+static void daio_in_next_conj_20k2(struct rsc *rsc)
{
- return rsc->conj += 0x100;
+ rsc->conj += 0x100;
}
static const struct rsc_ops daio_out_rsc_ops = {
diff --git a/sound/pci/ctxfi/ctresource.c b/sound/pci/ctxfi/ctresource.c
index 81ad26934518..be1d3e61309c 100644
--- a/sound/pci/ctxfi/ctresource.c
+++ b/sound/pci/ctxfi/ctresource.c
@@ -109,18 +109,17 @@ static int audio_ring_slot(const struct rsc *rsc)
return (rsc->conj << 4) + offset_in_audio_slot_block[rsc->type];
}
-static int rsc_next_conj(struct rsc *rsc)
+static void rsc_next_conj(struct rsc *rsc)
{
unsigned int i;
for (i = 0; (i < 8) && (!(rsc->msr & (0x1 << i))); )
i++;
rsc->conj += (AUDIO_SLOT_BLOCK_NUM >> i);
- return rsc->conj;
}
-static int rsc_master(struct rsc *rsc)
+static void rsc_master(struct rsc *rsc)
{
- return rsc->conj = rsc->idx;
+ rsc->conj = rsc->idx;
}
static const struct rsc_ops rsc_generic_ops = {
diff --git a/sound/pci/ctxfi/ctresource.h b/sound/pci/ctxfi/ctresource.h
index fdbfd808816d..58553bda44f4 100644
--- a/sound/pci/ctxfi/ctresource.h
+++ b/sound/pci/ctxfi/ctresource.h
@@ -39,8 +39,8 @@ struct rsc {
};
struct rsc_ops {
- int (*master)(struct rsc *rsc); /* Move to master resource */
- int (*next_conj)(struct rsc *rsc); /* Move to next conjugate resource */
+ void (*master)(struct rsc *rsc); /* Move to master resource */
+ void (*next_conj)(struct rsc *rsc); /* Move to next conjugate resource */
int (*index)(const struct rsc *rsc); /* Return the index of resource */
/* Return the output slot number */
int (*output_slot)(const struct rsc *rsc);
diff --git a/sound/pci/ctxfi/ctsrc.c b/sound/pci/ctxfi/ctsrc.c
index bd4697b44233..4a94b4708a77 100644
--- a/sound/pci/ctxfi/ctsrc.c
+++ b/sound/pci/ctxfi/ctsrc.c
@@ -590,16 +590,15 @@ int src_mgr_destroy(struct src_mgr *src_mgr)
/* SRCIMP resource manager operations */
-static int srcimp_master(struct rsc *rsc)
+static void srcimp_master(struct rsc *rsc)
{
rsc->conj = 0;
- return rsc->idx = container_of(rsc, struct srcimp, rsc)->idx[0];
+ rsc->idx = container_of(rsc, struct srcimp, rsc)->idx[0];
}
-static int srcimp_next_conj(struct rsc *rsc)
+static void srcimp_next_conj(struct rsc *rsc)
{
rsc->conj++;
- return container_of(rsc, struct srcimp, rsc)->idx[rsc->conj];
}
static int srcimp_index(const struct rsc *rsc)
diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
index fe51163f2d82..1b46b599a5cf 100644
--- a/sound/pci/hda/hda_intel.c
+++ b/sound/pci/hda/hda_intel.c
@@ -335,7 +335,10 @@ enum {
((pci)->device == 0x0c0c) || \
((pci)->device == 0x0d0c) || \
((pci)->device == 0x160c) || \
- ((pci)->device == 0x490d))
+ ((pci)->device == 0x490d) || \
+ ((pci)->device == 0x4f90) || \
+ ((pci)->device == 0x4f91) || \
+ ((pci)->device == 0x4f92))
#define IS_BXT(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0x5a98)
@@ -2473,6 +2476,13 @@ static const struct pci_device_id azx_ids[] = {
/* DG1 */
{ PCI_DEVICE(0x8086, 0x490d),
.driver_data = AZX_DRIVER_SKL | AZX_DCAPS_INTEL_SKYLAKE},
+ /* DG2 */
+ { PCI_DEVICE(0x8086, 0x4f90),
+ .driver_data = AZX_DRIVER_SKL | AZX_DCAPS_INTEL_SKYLAKE},
+ { PCI_DEVICE(0x8086, 0x4f91),
+ .driver_data = AZX_DRIVER_SKL | AZX_DCAPS_INTEL_SKYLAKE},
+ { PCI_DEVICE(0x8086, 0x4f92),
+ .driver_data = AZX_DRIVER_SKL | AZX_DCAPS_INTEL_SKYLAKE},
/* Alderlake-S */
{ PCI_DEVICE(0x8086, 0x7ad0),
.driver_data = AZX_DRIVER_SKL | AZX_DCAPS_INTEL_SKYLAKE},
diff --git a/sound/pci/hda/hda_local.h b/sound/pci/hda/hda_local.h
index ea8ab8b43337..d22c96eb2f8f 100644
--- a/sound/pci/hda/hda_local.h
+++ b/sound/pci/hda/hda_local.h
@@ -438,6 +438,15 @@ int snd_hda_codec_set_pin_target(struct hda_codec *codec, hda_nid_t nid,
#define for_each_hda_codec_node(nid, codec) \
for ((nid) = (codec)->core.start_nid; (nid) < (codec)->core.end_nid; (nid)++)
+/* Set the codec power_state flag to indicate to allow unsol event handling;
+ * see hda_codec_unsol_event() in hda_bind.c. Calling this might confuse the
+ * state tracking, so use with care.
+ */
+static inline void snd_hda_codec_allow_unsol_events(struct hda_codec *codec)
+{
+ codec->core.dev.power.power_state = PMSG_ON;
+}
+
/*
* get widget capabilities
*/
diff --git a/sound/pci/hda/patch_cs8409.c b/sound/pci/hda/patch_cs8409.c
index 31ff11ab868e..039b9f2f8e94 100644
--- a/sound/pci/hda/patch_cs8409.c
+++ b/sound/pci/hda/patch_cs8409.c
@@ -750,6 +750,11 @@ static void cs42l42_resume(struct sub_codec *cs42l42)
if (cs42l42->full_scale_vol)
cs8409_i2c_write(cs42l42, 0x2001, 0x01);
+ /* we have to explicitly allow unsol event handling even during the
+ * resume phase so that the jack event is processed properly
+ */
+ snd_hda_codec_allow_unsol_events(cs42l42->codec);
+
cs42l42_enable_jack_detect(cs42l42);
}
diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c
index 65d2c5539919..ffcde7409d2a 100644
--- a/sound/pci/hda/patch_hdmi.c
+++ b/sound/pci/hda/patch_hdmi.c
@@ -2947,7 +2947,8 @@ static int parse_intel_hdmi(struct hda_codec *codec)
/* Intel Haswell and onwards; audio component with eld notifier */
static int intel_hsw_common_init(struct hda_codec *codec, hda_nid_t vendor_nid,
- const int *port_map, int port_num, int dev_num)
+ const int *port_map, int port_num, int dev_num,
+ bool send_silent_stream)
{
struct hdmi_spec *spec;
int err;
@@ -2980,7 +2981,7 @@ static int intel_hsw_common_init(struct hda_codec *codec, hda_nid_t vendor_nid,
* Enable silent stream feature, if it is enabled via
* module param or Kconfig option
*/
- if (enable_silent_stream)
+ if (send_silent_stream)
spec->send_silent_stream = true;
return parse_intel_hdmi(codec);
@@ -2988,12 +2989,18 @@ static int intel_hsw_common_init(struct hda_codec *codec, hda_nid_t vendor_nid,
static int patch_i915_hsw_hdmi(struct hda_codec *codec)
{
- return intel_hsw_common_init(codec, 0x08, NULL, 0, 3);
+ return intel_hsw_common_init(codec, 0x08, NULL, 0, 3,
+ enable_silent_stream);
}
static int patch_i915_glk_hdmi(struct hda_codec *codec)
{
- return intel_hsw_common_init(codec, 0x0b, NULL, 0, 3);
+ /*
+ * Silent stream calls audio component .get_power() from
+ * .pin_eld_notify(). On GLK this will deadlock in i915 due
+ * to the audio vs. CDCLK workaround.
+ */
+ return intel_hsw_common_init(codec, 0x0b, NULL, 0, 3, false);
}
static int patch_i915_icl_hdmi(struct hda_codec *codec)
@@ -3004,7 +3011,8 @@ static int patch_i915_icl_hdmi(struct hda_codec *codec)
*/
static const int map[] = {0x0, 0x4, 0x6, 0x8, 0xa, 0xb};
- return intel_hsw_common_init(codec, 0x02, map, ARRAY_SIZE(map), 3);
+ return intel_hsw_common_init(codec, 0x02, map, ARRAY_SIZE(map), 3,
+ enable_silent_stream);
}
static int patch_i915_tgl_hdmi(struct hda_codec *codec)
@@ -3016,7 +3024,8 @@ static int patch_i915_tgl_hdmi(struct hda_codec *codec)
static const int map[] = {0x4, 0x6, 0x8, 0xa, 0xb, 0xc, 0xd, 0xe, 0xf};
int ret;
- ret = intel_hsw_common_init(codec, 0x02, map, ARRAY_SIZE(map), 4);
+ ret = intel_hsw_common_init(codec, 0x02, map, ARRAY_SIZE(map), 4,
+ enable_silent_stream);
if (!ret) {
struct hdmi_spec *spec = codec->spec;
@@ -4380,10 +4389,11 @@ HDA_CODEC_ENTRY(0x8086280f, "Icelake HDMI", patch_i915_icl_hdmi),
HDA_CODEC_ENTRY(0x80862812, "Tigerlake HDMI", patch_i915_tgl_hdmi),
HDA_CODEC_ENTRY(0x80862814, "DG1 HDMI", patch_i915_tgl_hdmi),
HDA_CODEC_ENTRY(0x80862815, "Alderlake HDMI", patch_i915_tgl_hdmi),
-HDA_CODEC_ENTRY(0x8086281c, "Alderlake-P HDMI", patch_i915_tgl_hdmi),
HDA_CODEC_ENTRY(0x80862816, "Rocketlake HDMI", patch_i915_tgl_hdmi),
+HDA_CODEC_ENTRY(0x80862819, "DG2 HDMI", patch_i915_tgl_hdmi),
HDA_CODEC_ENTRY(0x8086281a, "Jasperlake HDMI", patch_i915_icl_hdmi),
HDA_CODEC_ENTRY(0x8086281b, "Elkhartlake HDMI", patch_i915_icl_hdmi),
+HDA_CODEC_ENTRY(0x8086281c, "Alderlake-P HDMI", patch_i915_tgl_hdmi),
HDA_CODEC_ENTRY(0x80862880, "CedarTrail HDMI", patch_generic_hdmi),
HDA_CODEC_ENTRY(0x80862882, "Valleyview2 HDMI", patch_i915_byt_hdmi),
HDA_CODEC_ENTRY(0x80862883, "Braswell HDMI", patch_i915_byt_hdmi),
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index 2f1727faec69..28255e752c4a 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -6503,22 +6503,64 @@ static void alc287_fixup_legion_15imhg05_speakers(struct hda_codec *codec,
/* for alc285_fixup_ideapad_s740_coef() */
#include "ideapad_s740_helper.c"
-static void alc256_fixup_tongfang_reset_persistent_settings(struct hda_codec *codec,
- const struct hda_fixup *fix,
- int action)
+static const struct coef_fw alc256_fixup_set_coef_defaults_coefs[] = {
+ WRITE_COEF(0x10, 0x0020), WRITE_COEF(0x24, 0x0000),
+ WRITE_COEF(0x26, 0x0000), WRITE_COEF(0x29, 0x3000),
+ WRITE_COEF(0x37, 0xfe05), WRITE_COEF(0x45, 0x5089),
+ {}
+};
+
+static void alc256_fixup_set_coef_defaults(struct hda_codec *codec,
+ const struct hda_fixup *fix,
+ int action)
{
/*
- * A certain other OS sets these coeffs to different values. On at least one TongFang
- * barebone these settings might survive even a cold reboot. So to restore a clean slate the
- * values are explicitly reset to default here. Without this, the external microphone is
- * always in a plugged-in state, while the internal microphone is always in an unplugged
- * state, breaking the ability to use the internal microphone.
- */
- alc_write_coef_idx(codec, 0x24, 0x0000);
- alc_write_coef_idx(codec, 0x26, 0x0000);
- alc_write_coef_idx(codec, 0x29, 0x3000);
- alc_write_coef_idx(codec, 0x37, 0xfe05);
- alc_write_coef_idx(codec, 0x45, 0x5089);
+ * A certain other OS sets these coeffs to different values. On at least
+ * one TongFang barebone these settings might survive even a cold
+ * reboot. So to restore a clean slate the values are explicitly reset
+ * to default here. Without this, the external microphone is always in a
+ * plugged-in state, while the internal microphone is always in an
+ * unplugged state, breaking the ability to use the internal microphone.
+ */
+ alc_process_coef_fw(codec, alc256_fixup_set_coef_defaults_coefs);
+}
+
+static const struct coef_fw alc233_fixup_no_audio_jack_coefs[] = {
+ WRITE_COEF(0x1a, 0x9003), WRITE_COEF(0x1b, 0x0e2b), WRITE_COEF(0x37, 0xfe06),
+ WRITE_COEF(0x38, 0x4981), WRITE_COEF(0x45, 0xd489), WRITE_COEF(0x46, 0x0074),
+ WRITE_COEF(0x49, 0x0149),
+ {}
+};
+
+static void alc233_fixup_no_audio_jack(struct hda_codec *codec,
+ const struct hda_fixup *fix,
+ int action)
+{
+ /*
+ * The audio jack input and output is not detected on the ASRock NUC Box
+ * 1100 series when cold booting without this fix. Warm rebooting from a
+ * certain other OS makes the audio functional, as COEF settings are
+ * preserved in this case. This fix sets these altered COEF values as
+ * the default.
+ */
+ alc_process_coef_fw(codec, alc233_fixup_no_audio_jack_coefs);
+}
+
+static void alc256_fixup_mic_no_presence_and_resume(struct hda_codec *codec,
+ const struct hda_fixup *fix,
+ int action)
+{
+ /*
+ * The Clevo NJ51CU comes either with the ALC293 or the ALC256 codec,
+ * but uses the 0x8686 subproduct id in both cases. The ALC256 codec
+ * needs an additional quirk for sound working after suspend and resume.
+ */
+ if (codec->core.vendor_id == 0x10ec0256) {
+ alc_update_coef_idx(codec, 0x10, 1<<9, 0);
+ snd_hda_codec_set_pincfg(codec, 0x19, 0x04a11120);
+ } else {
+ snd_hda_codec_set_pincfg(codec, 0x1a, 0x04a1113c);
+ }
}
enum {
@@ -6738,8 +6780,10 @@ enum {
ALC287_FIXUP_LEGION_15IMHG05_AUTOMUTE,
ALC287_FIXUP_YOGA7_14ITL_SPEAKERS,
ALC287_FIXUP_13S_GEN2_SPEAKERS,
- ALC256_FIXUP_TONGFANG_RESET_PERSISTENT_SETTINGS,
+ ALC256_FIXUP_SET_COEF_DEFAULTS,
ALC256_FIXUP_SYSTEM76_MIC_NO_PRESENCE,
+ ALC233_FIXUP_NO_AUDIO_JACK,
+ ALC256_FIXUP_MIC_NO_PRESENCE_AND_RESUME,
};
static const struct hda_fixup alc269_fixups[] = {
@@ -8443,9 +8487,9 @@ static const struct hda_fixup alc269_fixups[] = {
.chained = true,
.chain_id = ALC269_FIXUP_HEADSET_MODE,
},
- [ALC256_FIXUP_TONGFANG_RESET_PERSISTENT_SETTINGS] = {
+ [ALC256_FIXUP_SET_COEF_DEFAULTS] = {
.type = HDA_FIXUP_FUNC,
- .v.func = alc256_fixup_tongfang_reset_persistent_settings,
+ .v.func = alc256_fixup_set_coef_defaults,
},
[ALC245_FIXUP_HP_GPIO_LED] = {
.type = HDA_FIXUP_FUNC,
@@ -8460,6 +8504,16 @@ static const struct hda_fixup alc269_fixups[] = {
.chained = true,
.chain_id = ALC269_FIXUP_HEADSET_MODE_NO_HP_MIC,
},
+ [ALC233_FIXUP_NO_AUDIO_JACK] = {
+ .type = HDA_FIXUP_FUNC,
+ .v.func = alc233_fixup_no_audio_jack,
+ },
+ [ALC256_FIXUP_MIC_NO_PRESENCE_AND_RESUME] = {
+ .type = HDA_FIXUP_FUNC,
+ .v.func = alc256_fixup_mic_no_presence_and_resume,
+ .chained = true,
+ .chain_id = ALC269_FIXUP_HEADSET_MODE_NO_HP_MIC
+ },
};
static const struct snd_pci_quirk alc269_fixup_tbl[] = {
@@ -8630,6 +8684,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x103c, 0x84da, "HP OMEN dc0019-ur", ALC295_FIXUP_HP_OMEN),
SND_PCI_QUIRK(0x103c, 0x84e7, "HP Pavilion 15", ALC269_FIXUP_HP_MUTE_LED_MIC3),
SND_PCI_QUIRK(0x103c, 0x8519, "HP Spectre x360 15-df0xxx", ALC285_FIXUP_HP_SPECTRE_X360),
+ SND_PCI_QUIRK(0x103c, 0x860f, "HP ZBook 15 G6", ALC285_FIXUP_HP_GPIO_AMP_INIT),
SND_PCI_QUIRK(0x103c, 0x861f, "HP Elite Dragonfly G1", ALC285_FIXUP_HP_GPIO_AMP_INIT),
SND_PCI_QUIRK(0x103c, 0x869d, "HP", ALC236_FIXUP_HP_MUTE_LED),
SND_PCI_QUIRK(0x103c, 0x86c7, "HP Envy AiO 32", ALC274_FIXUP_HP_ENVY_GPIO),
@@ -8639,6 +8694,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x103c, 0x8728, "HP EliteBook 840 G7", ALC285_FIXUP_HP_GPIO_LED),
SND_PCI_QUIRK(0x103c, 0x8729, "HP", ALC285_FIXUP_HP_GPIO_LED),
SND_PCI_QUIRK(0x103c, 0x8730, "HP ProBook 445 G7", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF),
+ SND_PCI_QUIRK(0x103c, 0x8735, "HP ProBook 435 G7", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF),
SND_PCI_QUIRK(0x103c, 0x8736, "HP", ALC285_FIXUP_HP_GPIO_AMP_INIT),
SND_PCI_QUIRK(0x103c, 0x8760, "HP", ALC285_FIXUP_HP_MUTE_LED),
SND_PCI_QUIRK(0x103c, 0x877a, "HP", ALC285_FIXUP_HP_MUTE_LED),
@@ -8674,6 +8730,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x103c, 0x8896, "HP EliteBook 855 G8 Notebook PC", ALC285_FIXUP_HP_MUTE_LED),
SND_PCI_QUIRK(0x103c, 0x8898, "HP EliteBook 845 G8 Notebook PC", ALC285_FIXUP_HP_LIMIT_INT_MIC_BOOST),
SND_PCI_QUIRK(0x103c, 0x88d0, "HP Pavilion 15-eh1xxx (mainboard 88D0)", ALC287_FIXUP_HP_GPIO_LED),
+ SND_PCI_QUIRK(0x103c, 0x89ca, "HP", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF),
SND_PCI_QUIRK(0x1043, 0x103e, "ASUS X540SA", ALC256_FIXUP_ASUS_MIC),
SND_PCI_QUIRK(0x1043, 0x103f, "ASUS TX300", ALC282_FIXUP_ASUS_TX300),
SND_PCI_QUIRK(0x1043, 0x106d, "Asus K53BE", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
@@ -8798,7 +8855,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x1558, 0x8562, "Clevo NH[57][0-9]RZ[Q]", ALC269_FIXUP_DMIC),
SND_PCI_QUIRK(0x1558, 0x8668, "Clevo NP50B[BE]", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1558, 0x8680, "Clevo NJ50LU", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
- SND_PCI_QUIRK(0x1558, 0x8686, "Clevo NH50[CZ]U", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
+ SND_PCI_QUIRK(0x1558, 0x8686, "Clevo NH50[CZ]U", ALC256_FIXUP_MIC_NO_PRESENCE_AND_RESUME),
SND_PCI_QUIRK(0x1558, 0x8a20, "Clevo NH55DCQ-Y", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1558, 0x8a51, "Clevo NH70RCQ-Y", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1558, 0x8d50, "Clevo NH55RCQ-M", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
@@ -8894,6 +8951,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x17aa, 0x511e, "Thinkpad", ALC298_FIXUP_TPT470_DOCK),
SND_PCI_QUIRK(0x17aa, 0x511f, "Thinkpad", ALC298_FIXUP_TPT470_DOCK),
SND_PCI_QUIRK(0x17aa, 0x9e54, "LENOVO NB", ALC269_FIXUP_LENOVO_EAPD),
+ SND_PCI_QUIRK(0x1849, 0x1233, "ASRock NUC Box 1100", ALC233_FIXUP_NO_AUDIO_JACK),
SND_PCI_QUIRK(0x19e5, 0x3204, "Huawei MACH-WX9", ALC256_FIXUP_HUAWEI_MACH_WX9_PINS),
SND_PCI_QUIRK(0x1b35, 0x1235, "CZC B20", ALC269_FIXUP_CZC_B20),
SND_PCI_QUIRK(0x1b35, 0x1236, "CZC TMI", ALC269_FIXUP_CZC_TMI),
@@ -8901,7 +8959,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x1b7d, 0xa831, "Ordissimo EVE2 ", ALC269VB_FIXUP_ORDISSIMO_EVE2), /* Also known as Malata PC-B1303 */
SND_PCI_QUIRK(0x1c06, 0x2013, "Lemote A1802", ALC269_FIXUP_LEMOTE_A1802),
SND_PCI_QUIRK(0x1c06, 0x2015, "Lemote A190X", ALC269_FIXUP_LEMOTE_A190X),
- SND_PCI_QUIRK(0x1d05, 0x1132, "TongFang PHxTxX1", ALC256_FIXUP_TONGFANG_RESET_PERSISTENT_SETTINGS),
+ SND_PCI_QUIRK(0x1d05, 0x1132, "TongFang PHxTxX1", ALC256_FIXUP_SET_COEF_DEFAULTS),
SND_PCI_QUIRK(0x1d72, 0x1602, "RedmiBook", ALC255_FIXUP_XIAOMI_HEADSET_MIC),
SND_PCI_QUIRK(0x1d72, 0x1701, "XiaomiNotebook Pro", ALC298_FIXUP_DELL1_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1d72, 0x1901, "RedmiBook 14", ALC256_FIXUP_ASUS_HEADSET_MIC),
@@ -9091,6 +9149,7 @@ static const struct hda_model_fixup alc269_fixup_models[] = {
{.id = ALC287_FIXUP_IDEAPAD_BASS_SPK_AMP, .name = "alc287-ideapad-bass-spk-amp"},
{.id = ALC623_FIXUP_LENOVO_THINKSTATION_P340, .name = "alc623-lenovo-thinkstation-p340"},
{.id = ALC255_FIXUP_ACER_HEADPHONE_AND_MIC, .name = "alc255-acer-headphone-and-mic"},
+ {.id = ALC285_FIXUP_HP_GPIO_AMP_INIT, .name = "alc285-hp-amp-init"},
{}
};
#define ALC225_STANDARD_PINS \
@@ -10203,6 +10262,27 @@ static void alc671_fixup_hp_headset_mic2(struct hda_codec *codec,
}
}
+static void alc897_hp_automute_hook(struct hda_codec *codec,
+ struct hda_jack_callback *jack)
+{
+ struct alc_spec *spec = codec->spec;
+ int vref;
+
+ snd_hda_gen_hp_automute(codec, jack);
+ vref = spec->gen.hp_jack_present ? (PIN_HP | AC_PINCTL_VREF_100) : PIN_HP;
+ snd_hda_codec_write(codec, 0x1b, 0, AC_VERB_SET_PIN_WIDGET_CONTROL,
+ vref);
+}
+
+static void alc897_fixup_lenovo_headset_mic(struct hda_codec *codec,
+ const struct hda_fixup *fix, int action)
+{
+ struct alc_spec *spec = codec->spec;
+ if (action == HDA_FIXUP_ACT_PRE_PROBE) {
+ spec->gen.hp_automute_hook = alc897_hp_automute_hook;
+ }
+}
+
static const struct coef_fw alc668_coefs[] = {
WRITE_COEF(0x01, 0xbebe), WRITE_COEF(0x02, 0xaaaa), WRITE_COEF(0x03, 0x0),
WRITE_COEF(0x04, 0x0180), WRITE_COEF(0x06, 0x0), WRITE_COEF(0x07, 0x0f80),
@@ -10283,6 +10363,8 @@ enum {
ALC668_FIXUP_ASUS_NO_HEADSET_MIC,
ALC668_FIXUP_HEADSET_MIC,
ALC668_FIXUP_MIC_DET_COEF,
+ ALC897_FIXUP_LENOVO_HEADSET_MIC,
+ ALC897_FIXUP_HEADSET_MIC_PIN,
};
static const struct hda_fixup alc662_fixups[] = {
@@ -10689,6 +10771,19 @@ static const struct hda_fixup alc662_fixups[] = {
{}
},
},
+ [ALC897_FIXUP_LENOVO_HEADSET_MIC] = {
+ .type = HDA_FIXUP_FUNC,
+ .v.func = alc897_fixup_lenovo_headset_mic,
+ },
+ [ALC897_FIXUP_HEADSET_MIC_PIN] = {
+ .type = HDA_FIXUP_PINS,
+ .v.pins = (const struct hda_pintbl[]) {
+ { 0x1a, 0x03a11050 },
+ { }
+ },
+ .chained = true,
+ .chain_id = ALC897_FIXUP_LENOVO_HEADSET_MIC
+ },
};
static const struct snd_pci_quirk alc662_fixup_tbl[] = {
@@ -10733,6 +10828,10 @@ static const struct snd_pci_quirk alc662_fixup_tbl[] = {
SND_PCI_QUIRK(0x144d, 0xc051, "Samsung R720", ALC662_FIXUP_IDEAPAD),
SND_PCI_QUIRK(0x14cd, 0x5003, "USI", ALC662_FIXUP_USI_HEADSET_MODE),
SND_PCI_QUIRK(0x17aa, 0x1036, "Lenovo P520", ALC662_FIXUP_LENOVO_MULTI_CODECS),
+ SND_PCI_QUIRK(0x17aa, 0x32ca, "Lenovo ThinkCentre M80", ALC897_FIXUP_HEADSET_MIC_PIN),
+ SND_PCI_QUIRK(0x17aa, 0x32cb, "Lenovo ThinkCentre M70", ALC897_FIXUP_HEADSET_MIC_PIN),
+ SND_PCI_QUIRK(0x17aa, 0x32cf, "Lenovo ThinkCentre M950", ALC897_FIXUP_HEADSET_MIC_PIN),
+ SND_PCI_QUIRK(0x17aa, 0x32f7, "Lenovo ThinkCentre M90", ALC897_FIXUP_HEADSET_MIC_PIN),
SND_PCI_QUIRK(0x17aa, 0x38af, "Lenovo Ideapad Y550P", ALC662_FIXUP_IDEAPAD),
SND_PCI_QUIRK(0x17aa, 0x3a0d, "Lenovo Ideapad Y550", ALC662_FIXUP_IDEAPAD),
SND_PCI_QUIRK(0x1849, 0x5892, "ASRock B150M", ALC892_FIXUP_ASROCK_MOBO),
diff --git a/sound/soc/amd/yc/pci-acp6x.c b/sound/soc/amd/yc/pci-acp6x.c
index 957eeb6fb8e3..7e9a9a9d8ddd 100644
--- a/sound/soc/amd/yc/pci-acp6x.c
+++ b/sound/soc/amd/yc/pci-acp6x.c
@@ -146,10 +146,11 @@ static int snd_acp6x_probe(struct pci_dev *pci,
{
struct acp6x_dev_data *adata;
struct platform_device_info pdevinfo[ACP6x_DEVS];
- int ret, index;
+ int index = 0;
int val = 0x00;
u32 addr;
unsigned int irqflags;
+ int ret;
irqflags = IRQF_SHARED;
/* Yellow Carp device check */
diff --git a/sound/soc/codecs/cs35l41-spi.c b/sound/soc/codecs/cs35l41-spi.c
index 90a921f726c3..3fa99741779a 100644
--- a/sound/soc/codecs/cs35l41-spi.c
+++ b/sound/soc/codecs/cs35l41-spi.c
@@ -42,34 +42,6 @@ static const struct spi_device_id cs35l41_id_spi[] = {
MODULE_DEVICE_TABLE(spi, cs35l41_id_spi);
-static void cs35l41_spi_otp_setup(struct cs35l41_private *cs35l41,
- bool is_pre_setup, unsigned int *freq)
-{
- struct spi_device *spi;
- u32 orig_spi_freq;
-
- spi = to_spi_device(cs35l41->dev);
-
- if (!spi) {
- dev_err(cs35l41->dev, "%s: No SPI device\n", __func__);
- return;
- }
-
- if (is_pre_setup) {
- orig_spi_freq = spi->max_speed_hz;
- if (orig_spi_freq > CS35L41_SPI_MAX_FREQ_OTP) {
- spi->max_speed_hz = CS35L41_SPI_MAX_FREQ_OTP;
- spi_setup(spi);
- }
- *freq = orig_spi_freq;
- } else {
- if (spi->max_speed_hz != *freq) {
- spi->max_speed_hz = *freq;
- spi_setup(spi);
- }
- }
-}
-
static int cs35l41_spi_probe(struct spi_device *spi)
{
const struct regmap_config *regmap_config = &cs35l41_regmap_spi;
@@ -81,6 +53,9 @@ static int cs35l41_spi_probe(struct spi_device *spi)
if (!cs35l41)
return -ENOMEM;
+ spi->max_speed_hz = CS35L41_SPI_MAX_FREQ;
+ spi_setup(spi);
+
spi_set_drvdata(spi, cs35l41);
cs35l41->regmap = devm_regmap_init_spi(spi, regmap_config);
if (IS_ERR(cs35l41->regmap)) {
@@ -91,7 +66,6 @@ static int cs35l41_spi_probe(struct spi_device *spi)
cs35l41->dev = &spi->dev;
cs35l41->irq = spi->irq;
- cs35l41->otp_setup = cs35l41_spi_otp_setup;
return cs35l41_probe(cs35l41, pdata);
}
diff --git a/sound/soc/codecs/cs35l41.c b/sound/soc/codecs/cs35l41.c
index 94ed21d7676f..9c4d481f7614 100644
--- a/sound/soc/codecs/cs35l41.c
+++ b/sound/soc/codecs/cs35l41.c
@@ -302,7 +302,6 @@ static int cs35l41_otp_unpack(void *data)
const struct cs35l41_otp_packed_element_t *otp_map;
struct cs35l41_private *cs35l41 = data;
int bit_offset, word_offset, ret, i;
- unsigned int orig_spi_freq;
unsigned int bit_sum = 8;
u32 otp_val, otp_id_reg;
u32 *otp_mem;
@@ -326,9 +325,6 @@ static int cs35l41_otp_unpack(void *data)
goto err_otp_unpack;
}
- if (cs35l41->otp_setup)
- cs35l41->otp_setup(cs35l41, true, &orig_spi_freq);
-
ret = regmap_bulk_read(cs35l41->regmap, CS35L41_OTP_MEM0, otp_mem,
CS35L41_OTP_SIZE_WORDS);
if (ret < 0) {
@@ -336,9 +332,6 @@ static int cs35l41_otp_unpack(void *data)
goto err_otp_unpack;
}
- if (cs35l41->otp_setup)
- cs35l41->otp_setup(cs35l41, false, &orig_spi_freq);
-
otp_map = otp_map_match->map;
bit_offset = otp_map_match->bit_offset;
@@ -612,6 +605,12 @@ static const struct snd_soc_dapm_widget cs35l41_dapm_widgets[] = {
SND_SOC_DAPM_AIF_OUT("ASPTX3", NULL, 0, CS35L41_SP_ENABLES, 2, 0),
SND_SOC_DAPM_AIF_OUT("ASPTX4", NULL, 0, CS35L41_SP_ENABLES, 3, 0),
+ SND_SOC_DAPM_SIGGEN("VSENSE"),
+ SND_SOC_DAPM_SIGGEN("ISENSE"),
+ SND_SOC_DAPM_SIGGEN("VP"),
+ SND_SOC_DAPM_SIGGEN("VBST"),
+ SND_SOC_DAPM_SIGGEN("TEMP"),
+
SND_SOC_DAPM_ADC("VMON ADC", NULL, CS35L41_PWR_CTRL2, 12, 0),
SND_SOC_DAPM_ADC("IMON ADC", NULL, CS35L41_PWR_CTRL2, 13, 0),
SND_SOC_DAPM_ADC("VPMON ADC", NULL, CS35L41_PWR_CTRL2, 8, 0),
@@ -623,12 +622,6 @@ static const struct snd_soc_dapm_widget cs35l41_dapm_widgets[] = {
cs35l41_main_amp_event,
SND_SOC_DAPM_POST_PMD | SND_SOC_DAPM_POST_PMU),
- SND_SOC_DAPM_INPUT("VP"),
- SND_SOC_DAPM_INPUT("VBST"),
- SND_SOC_DAPM_INPUT("ISENSE"),
- SND_SOC_DAPM_INPUT("VSENSE"),
- SND_SOC_DAPM_INPUT("TEMP"),
-
SND_SOC_DAPM_MUX("ASP TX1 Source", SND_SOC_NOPM, 0, 0, &asp_tx1_mux),
SND_SOC_DAPM_MUX("ASP TX2 Source", SND_SOC_NOPM, 0, 0, &asp_tx2_mux),
SND_SOC_DAPM_MUX("ASP TX3 Source", SND_SOC_NOPM, 0, 0, &asp_tx3_mux),
@@ -674,8 +667,8 @@ static const struct snd_soc_dapm_route cs35l41_audio_map[] = {
{"VMON ADC", NULL, "VSENSE"},
{"IMON ADC", NULL, "ISENSE"},
{"VPMON ADC", NULL, "VP"},
- {"TEMPMON ADC", NULL, "TEMP"},
{"VBSTMON ADC", NULL, "VBST"},
+ {"TEMPMON ADC", NULL, "TEMP"},
{"ASPRX1", NULL, "AMP Playback"},
{"ASPRX2", NULL, "AMP Playback"},
diff --git a/sound/soc/codecs/cs35l41.h b/sound/soc/codecs/cs35l41.h
index 6cffe8a55beb..48485b08a6f1 100644
--- a/sound/soc/codecs/cs35l41.h
+++ b/sound/soc/codecs/cs35l41.h
@@ -726,7 +726,7 @@
#define CS35L41_FS2_WINDOW_MASK 0x00FFF800
#define CS35L41_FS2_WINDOW_SHIFT 12
-#define CS35L41_SPI_MAX_FREQ_OTP 4000000
+#define CS35L41_SPI_MAX_FREQ 4000000
#define CS35L41_RX_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S24_LE)
#define CS35L41_TX_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S24_LE)
@@ -764,8 +764,6 @@ struct cs35l41_private {
int irq;
/* GPIO for /RST */
struct gpio_desc *reset_gpio;
- void (*otp_setup)(struct cs35l41_private *cs35l41, bool is_pre_setup,
- unsigned int *freq);
};
int cs35l41_probe(struct cs35l41_private *cs35l41,
diff --git a/sound/soc/codecs/lpass-rx-macro.c b/sound/soc/codecs/lpass-rx-macro.c
index 2bed5cf229be..aec5127260fd 100644
--- a/sound/soc/codecs/lpass-rx-macro.c
+++ b/sound/soc/codecs/lpass-rx-macro.c
@@ -2188,7 +2188,7 @@ static int rx_macro_config_classh(struct snd_soc_component *component,
snd_soc_component_update_bits(component,
CDC_RX_CLSH_DECAY_CTRL,
CDC_RX_CLSH_DECAY_RATE_MASK, 0x0);
- snd_soc_component_update_bits(component,
+ snd_soc_component_write_field(component,
CDC_RX_RX1_RX_PATH_CFG0,
CDC_RX_RXn_CLSH_EN_MASK, 0x1);
break;
diff --git a/sound/soc/codecs/rk817_codec.c b/sound/soc/codecs/rk817_codec.c
index 943d7d933e81..03f24edfe4f6 100644
--- a/sound/soc/codecs/rk817_codec.c
+++ b/sound/soc/codecs/rk817_codec.c
@@ -539,3 +539,4 @@ module_platform_driver(rk817_codec_driver);
MODULE_DESCRIPTION("ASoC RK817 codec driver");
MODULE_AUTHOR("binyuan <kevan.lan@rock-chips.com>");
MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:rk817-codec");
diff --git a/sound/soc/codecs/rt1011.c b/sound/soc/codecs/rt1011.c
index 297af7ff824c..b62301a6281f 100644
--- a/sound/soc/codecs/rt1011.c
+++ b/sound/soc/codecs/rt1011.c
@@ -1311,13 +1311,54 @@ static int rt1011_r0_load_info(struct snd_kcontrol *kcontrol,
.put = rt1011_r0_load_mode_put \
}
-static const char * const rt1011_i2s_ref_texts[] = {
- "Left Channel", "Right Channel"
+static const char * const rt1011_i2s_ref[] = {
+ "None", "Left Channel", "Right Channel"
};
-static SOC_ENUM_SINGLE_DECL(rt1011_i2s_ref_enum,
- RT1011_TDM1_SET_1, 7,
- rt1011_i2s_ref_texts);
+static SOC_ENUM_SINGLE_DECL(rt1011_i2s_ref_enum, 0, 0,
+ rt1011_i2s_ref);
+
+static int rt1011_i2s_ref_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_component *component =
+ snd_soc_kcontrol_component(kcontrol);
+ struct rt1011_priv *rt1011 =
+ snd_soc_component_get_drvdata(component);
+
+ rt1011->i2s_ref = ucontrol->value.enumerated.item[0];
+ switch (rt1011->i2s_ref) {
+ case RT1011_I2S_REF_LEFT_CH:
+ regmap_write(rt1011->regmap, RT1011_TDM_TOTAL_SET, 0x0240);
+ regmap_write(rt1011->regmap, RT1011_TDM1_SET_2, 0x8);
+ regmap_write(rt1011->regmap, RT1011_TDM1_SET_1, 0x1022);
+ regmap_write(rt1011->regmap, RT1011_ADCDAT_OUT_SOURCE, 0x4);
+ break;
+ case RT1011_I2S_REF_RIGHT_CH:
+ regmap_write(rt1011->regmap, RT1011_TDM_TOTAL_SET, 0x0240);
+ regmap_write(rt1011->regmap, RT1011_TDM1_SET_2, 0x8);
+ regmap_write(rt1011->regmap, RT1011_TDM1_SET_1, 0x10a2);
+ regmap_write(rt1011->regmap, RT1011_ADCDAT_OUT_SOURCE, 0x4);
+ break;
+ default:
+ dev_info(component->dev, "I2S Reference: Do nothing\n");
+ }
+
+ return 0;
+}
+
+static int rt1011_i2s_ref_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_component *component =
+ snd_soc_kcontrol_component(kcontrol);
+ struct rt1011_priv *rt1011 =
+ snd_soc_component_get_drvdata(component);
+
+ ucontrol->value.enumerated.item[0] = rt1011->i2s_ref;
+
+ return 0;
+}
static const struct snd_kcontrol_new rt1011_snd_controls[] = {
/* I2S Data In Selection */
@@ -1358,7 +1399,8 @@ static const struct snd_kcontrol_new rt1011_snd_controls[] = {
SOC_SINGLE("R0 Temperature", RT1011_STP_INITIAL_RESISTANCE_TEMP,
2, 255, 0),
/* I2S Reference */
- SOC_ENUM("I2S Reference", rt1011_i2s_ref_enum),
+ SOC_ENUM_EXT("I2S Reference", rt1011_i2s_ref_enum,
+ rt1011_i2s_ref_get, rt1011_i2s_ref_put),
};
static int rt1011_is_sys_clk_from_pll(struct snd_soc_dapm_widget *source,
@@ -2017,6 +2059,7 @@ static int rt1011_probe(struct snd_soc_component *component)
schedule_work(&rt1011->cali_work);
+ rt1011->i2s_ref = 0;
rt1011->bq_drc_params = devm_kcalloc(component->dev,
RT1011_ADVMODE_NUM, sizeof(struct rt1011_bq_drc_params *),
GFP_KERNEL);
diff --git a/sound/soc/codecs/rt1011.h b/sound/soc/codecs/rt1011.h
index 68fadc15fa8c..4d6e7492d99c 100644
--- a/sound/soc/codecs/rt1011.h
+++ b/sound/soc/codecs/rt1011.h
@@ -654,6 +654,12 @@ enum {
RT1011_AIFS
};
+enum {
+ RT1011_I2S_REF_NONE,
+ RT1011_I2S_REF_LEFT_CH,
+ RT1011_I2S_REF_RIGHT_CH,
+};
+
/* BiQual & DRC related settings */
#define RT1011_BQ_DRC_NUM 128
struct rt1011_bq_drc_params {
@@ -692,6 +698,7 @@ struct rt1011_priv {
unsigned int r0_reg, cali_done;
unsigned int r0_calib, temperature_calib;
int recv_spk_mode;
+ int i2s_ref;
};
#endif /* end of _RT1011_H_ */
diff --git a/sound/soc/codecs/rt5682-i2c.c b/sound/soc/codecs/rt5682-i2c.c
index 983347b65127..20e0f90ea498 100644
--- a/sound/soc/codecs/rt5682-i2c.c
+++ b/sound/soc/codecs/rt5682-i2c.c
@@ -198,6 +198,7 @@ static int rt5682_i2c_probe(struct i2c_client *i2c,
}
mutex_init(&rt5682->calibrate_mutex);
+ mutex_init(&rt5682->jdet_mutex);
rt5682_calibrate(rt5682);
rt5682_apply_patch_list(rt5682, &i2c->dev);
diff --git a/sound/soc/codecs/rt5682.c b/sound/soc/codecs/rt5682.c
index 78b4cb5fb6c8..b34a8542077d 100644
--- a/sound/soc/codecs/rt5682.c
+++ b/sound/soc/codecs/rt5682.c
@@ -48,6 +48,8 @@ static const struct reg_sequence patch_list[] = {
{RT5682_SAR_IL_CMD_6, 0x0110},
{RT5682_CHARGE_PUMP_1, 0x0210},
{RT5682_HP_LOGIC_CTRL_2, 0x0007},
+ {RT5682_SAR_IL_CMD_2, 0xac00},
+ {RT5682_CBJ_CTRL_7, 0x0104},
};
void rt5682_apply_patch_list(struct rt5682_priv *rt5682, struct device *dev)
@@ -927,6 +929,8 @@ int rt5682_headset_detect(struct snd_soc_component *component, int jack_insert)
unsigned int val, count;
if (jack_insert) {
+ snd_soc_dapm_mutex_lock(dapm);
+
snd_soc_component_update_bits(component, RT5682_PWR_ANLG_1,
RT5682_PWR_VREF2 | RT5682_PWR_MB,
RT5682_PWR_VREF2 | RT5682_PWR_MB);
@@ -940,6 +944,10 @@ int rt5682_headset_detect(struct snd_soc_component *component, int jack_insert)
snd_soc_component_update_bits(component,
RT5682_HP_CHARGE_PUMP_1,
RT5682_OSW_L_MASK | RT5682_OSW_R_MASK, 0);
+ rt5682_enable_push_button_irq(component, false);
+ snd_soc_component_update_bits(component, RT5682_CBJ_CTRL_1,
+ RT5682_TRIG_JD_MASK, RT5682_TRIG_JD_LOW);
+ usleep_range(55000, 60000);
snd_soc_component_update_bits(component, RT5682_CBJ_CTRL_1,
RT5682_TRIG_JD_MASK, RT5682_TRIG_JD_HIGH);
@@ -973,6 +981,8 @@ int rt5682_headset_detect(struct snd_soc_component *component, int jack_insert)
snd_soc_component_update_bits(component, RT5682_MICBIAS_2,
RT5682_PWR_CLK25M_MASK | RT5682_PWR_CLK1M_MASK,
RT5682_PWR_CLK25M_PU | RT5682_PWR_CLK1M_PU);
+
+ snd_soc_dapm_mutex_unlock(dapm);
} else {
rt5682_enable_push_button_irq(component, false);
snd_soc_component_update_bits(component, RT5682_CBJ_CTRL_1,
@@ -1092,6 +1102,7 @@ void rt5682_jack_detect_handler(struct work_struct *work)
while (!rt5682->component->card->instantiated)
usleep_range(10000, 15000);
+ mutex_lock(&rt5682->jdet_mutex);
mutex_lock(&rt5682->calibrate_mutex);
val = snd_soc_component_read(rt5682->component, RT5682_AJD1_CTRL)
@@ -1165,6 +1176,7 @@ void rt5682_jack_detect_handler(struct work_struct *work)
}
mutex_unlock(&rt5682->calibrate_mutex);
+ mutex_unlock(&rt5682->jdet_mutex);
}
EXPORT_SYMBOL_GPL(rt5682_jack_detect_handler);
@@ -1514,6 +1526,7 @@ static int rt5682_hp_event(struct snd_soc_dapm_widget *w,
{
struct snd_soc_component *component =
snd_soc_dapm_to_component(w->dapm);
+ struct rt5682_priv *rt5682 = snd_soc_component_get_drvdata(component);
switch (event) {
case SND_SOC_DAPM_PRE_PMU:
@@ -1525,12 +1538,17 @@ static int rt5682_hp_event(struct snd_soc_dapm_widget *w,
RT5682_DEPOP_1, 0x60, 0x60);
snd_soc_component_update_bits(component,
RT5682_DAC_ADC_DIG_VOL1, 0x00c0, 0x0080);
+
+ mutex_lock(&rt5682->jdet_mutex);
+
snd_soc_component_update_bits(component, RT5682_HP_CTRL_2,
RT5682_HP_C2_DAC_L_EN | RT5682_HP_C2_DAC_R_EN,
RT5682_HP_C2_DAC_L_EN | RT5682_HP_C2_DAC_R_EN);
usleep_range(5000, 10000);
snd_soc_component_update_bits(component, RT5682_CHARGE_PUMP_1,
RT5682_CP_SW_SIZE_MASK, RT5682_CP_SW_SIZE_L);
+
+ mutex_unlock(&rt5682->jdet_mutex);
break;
case SND_SOC_DAPM_POST_PMD:
@@ -2844,6 +2862,8 @@ int rt5682_register_dai_clks(struct rt5682_priv *rt5682)
for (i = 0; i < RT5682_DAI_NUM_CLKS; ++i) {
struct clk_init_data init = { };
+ struct clk_parent_data parent_data;
+ const struct clk_hw *parent;
dai_clk_hw = &rt5682->dai_clks_hw[i];
@@ -2851,17 +2871,17 @@ int rt5682_register_dai_clks(struct rt5682_priv *rt5682)
case RT5682_DAI_WCLK_IDX:
/* Make MCLK the parent of WCLK */
if (rt5682->mclk) {
- init.parent_data = &(struct clk_parent_data){
+ parent_data = (struct clk_parent_data){
.fw_name = "mclk",
};
+ init.parent_data = &parent_data;
init.num_parents = 1;
}
break;
case RT5682_DAI_BCLK_IDX:
/* Make WCLK the parent of BCLK */
- init.parent_hws = &(const struct clk_hw *){
- &rt5682->dai_clks_hw[RT5682_DAI_WCLK_IDX]
- };
+ parent = &rt5682->dai_clks_hw[RT5682_DAI_WCLK_IDX];
+ init.parent_hws = &parent;
init.num_parents = 1;
break;
default:
@@ -2942,10 +2962,7 @@ static int rt5682_suspend(struct snd_soc_component *component)
cancel_delayed_work_sync(&rt5682->jack_detect_work);
cancel_delayed_work_sync(&rt5682->jd_check_work);
- if (rt5682->hs_jack && rt5682->jack_type == SND_JACK_HEADSET) {
- snd_soc_component_update_bits(component, RT5682_CBJ_CTRL_1,
- RT5682_MB1_PATH_MASK | RT5682_MB2_PATH_MASK,
- RT5682_CTRL_MB1_REG | RT5682_CTRL_MB2_REG);
+ if (rt5682->hs_jack && (rt5682->jack_type & SND_JACK_HEADSET) == SND_JACK_HEADSET) {
val = snd_soc_component_read(component,
RT5682_CBJ_CTRL_2) & RT5682_JACK_TYPE_MASK;
@@ -2967,10 +2984,17 @@ static int rt5682_suspend(struct snd_soc_component *component)
/* enter SAR ADC power saving mode */
snd_soc_component_update_bits(component, RT5682_SAR_IL_CMD_1,
RT5682_SAR_BUTT_DET_MASK | RT5682_SAR_BUTDET_MODE_MASK |
- RT5682_SAR_BUTDET_RST_MASK | RT5682_SAR_SEL_MB1_MB2_MASK, 0);
+ RT5682_SAR_SEL_MB1_MB2_MASK, 0);
+ usleep_range(5000, 6000);
+ snd_soc_component_update_bits(component, RT5682_CBJ_CTRL_1,
+ RT5682_MB1_PATH_MASK | RT5682_MB2_PATH_MASK,
+ RT5682_CTRL_MB1_REG | RT5682_CTRL_MB2_REG);
+ usleep_range(10000, 12000);
snd_soc_component_update_bits(component, RT5682_SAR_IL_CMD_1,
- RT5682_SAR_BUTT_DET_MASK | RT5682_SAR_BUTDET_MODE_MASK | RT5682_SAR_BUTDET_RST_MASK,
- RT5682_SAR_BUTT_DET_EN | RT5682_SAR_BUTDET_POW_SAV | RT5682_SAR_BUTDET_RST_NORMAL);
+ RT5682_SAR_BUTT_DET_MASK | RT5682_SAR_BUTDET_MODE_MASK,
+ RT5682_SAR_BUTT_DET_EN | RT5682_SAR_BUTDET_POW_SAV);
+ snd_soc_component_update_bits(component, RT5682_HP_CHARGE_PUMP_1,
+ RT5682_OSW_L_MASK | RT5682_OSW_R_MASK, 0);
}
regcache_cache_only(rt5682->regmap, true);
@@ -2988,10 +3012,11 @@ static int rt5682_resume(struct snd_soc_component *component)
regcache_cache_only(rt5682->regmap, false);
regcache_sync(rt5682->regmap);
- if (rt5682->hs_jack && rt5682->jack_type == SND_JACK_HEADSET) {
+ if (rt5682->hs_jack && (rt5682->jack_type & SND_JACK_HEADSET) == SND_JACK_HEADSET) {
snd_soc_component_update_bits(component, RT5682_SAR_IL_CMD_1,
RT5682_SAR_BUTDET_MODE_MASK | RT5682_SAR_SEL_MB1_MB2_MASK,
RT5682_SAR_BUTDET_POW_NORM | RT5682_SAR_SEL_MB1_MB2_AUTO);
+ usleep_range(5000, 6000);
snd_soc_component_update_bits(component, RT5682_CBJ_CTRL_1,
RT5682_MB1_PATH_MASK | RT5682_MB2_PATH_MASK,
RT5682_CTRL_MB1_FSM | RT5682_CTRL_MB2_FSM);
@@ -2999,8 +3024,9 @@ static int rt5682_resume(struct snd_soc_component *component)
RT5682_PWR_CBJ, RT5682_PWR_CBJ);
}
+ rt5682->jack_type = 0;
mod_delayed_work(system_power_efficient_wq,
- &rt5682->jack_detect_work, msecs_to_jiffies(250));
+ &rt5682->jack_detect_work, msecs_to_jiffies(0));
return 0;
}
diff --git a/sound/soc/codecs/rt5682.h b/sound/soc/codecs/rt5682.h
index d93829c35585..c917c76200ea 100644
--- a/sound/soc/codecs/rt5682.h
+++ b/sound/soc/codecs/rt5682.h
@@ -1463,6 +1463,7 @@ struct rt5682_priv {
int jack_type;
int irq_work_delay_time;
+ struct mutex jdet_mutex;
};
extern const char *rt5682_supply_names[RT5682_NUM_SUPPLIES];
diff --git a/sound/soc/codecs/rt5682s.c b/sound/soc/codecs/rt5682s.c
index 470957fcad6b..d49a4f68566d 100644
--- a/sound/soc/codecs/rt5682s.c
+++ b/sound/soc/codecs/rt5682s.c
@@ -2693,6 +2693,8 @@ static int rt5682s_register_dai_clks(struct snd_soc_component *component)
for (i = 0; i < RT5682S_DAI_NUM_CLKS; ++i) {
struct clk_init_data init = { };
+ struct clk_parent_data parent_data;
+ const struct clk_hw *parent;
dai_clk_hw = &rt5682s->dai_clks_hw[i];
@@ -2700,17 +2702,17 @@ static int rt5682s_register_dai_clks(struct snd_soc_component *component)
case RT5682S_DAI_WCLK_IDX:
/* Make MCLK the parent of WCLK */
if (rt5682s->mclk) {
- init.parent_data = &(struct clk_parent_data){
+ parent_data = (struct clk_parent_data){
.fw_name = "mclk",
};
+ init.parent_data = &parent_data;
init.num_parents = 1;
}
break;
case RT5682S_DAI_BCLK_IDX:
/* Make WCLK the parent of BCLK */
- init.parent_hws = &(const struct clk_hw *){
- &rt5682s->dai_clks_hw[RT5682S_DAI_WCLK_IDX]
- };
+ parent = &rt5682s->dai_clks_hw[RT5682S_DAI_WCLK_IDX];
+ init.parent_hws = &parent;
init.num_parents = 1;
break;
default:
diff --git a/sound/soc/codecs/rt9120.c b/sound/soc/codecs/rt9120.c
index f9574980a407..7aa1772a915f 100644
--- a/sound/soc/codecs/rt9120.c
+++ b/sound/soc/codecs/rt9120.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
#include <linux/bits.h>
+#include <linux/bitfield.h>
#include <linux/delay.h>
#include <linux/gpio/consumer.h>
#include <linux/i2c.h>
@@ -23,9 +24,11 @@
#define RT9120_REG_ERRRPT 0x10
#define RT9120_REG_MSVOL 0x20
#define RT9120_REG_SWRESET 0x40
+#define RT9120_REG_INTERCFG 0x63
#define RT9120_REG_INTERNAL0 0x65
#define RT9120_REG_INTERNAL1 0x69
#define RT9120_REG_UVPOPT 0x6C
+#define RT9120_REG_DIGCFG 0xF8
#define RT9120_VID_MASK GENMASK(15, 8)
#define RT9120_SWRST_MASK BIT(7)
@@ -46,8 +49,10 @@
#define RT9120_CFG_WORDLEN_24 24
#define RT9120_CFG_WORDLEN_32 32
#define RT9120_DVDD_UVSEL_MASK GENMASK(5, 4)
+#define RT9120_AUTOSYNC_MASK BIT(6)
-#define RT9120_VENDOR_ID 0x4200
+#define RT9120_VENDOR_ID 0x42
+#define RT9120S_VENDOR_ID 0x43
#define RT9120_RESET_WAITMS 20
#define RT9120_CHIPON_WAITMS 20
#define RT9120_AMPON_WAITMS 50
@@ -61,9 +66,16 @@
SNDRV_PCM_FMTBIT_S24_LE |\
SNDRV_PCM_FMTBIT_S32_LE)
+enum {
+ CHIP_IDX_RT9120 = 0,
+ CHIP_IDX_RT9120S,
+ CHIP_IDX_MAX
+};
+
struct rt9120_data {
struct device *dev;
struct regmap *regmap;
+ int chip_idx;
};
/* 11bit [min,max,step] = [-103.9375dB, 24dB, 0.0625dB] */
@@ -149,8 +161,12 @@ static int rt9120_codec_probe(struct snd_soc_component *comp)
snd_soc_component_init_regmap(comp, data->regmap);
/* Internal setting */
- snd_soc_component_write(comp, RT9120_REG_INTERNAL1, 0x03);
- snd_soc_component_write(comp, RT9120_REG_INTERNAL0, 0x69);
+ if (data->chip_idx == CHIP_IDX_RT9120S) {
+ snd_soc_component_write(comp, RT9120_REG_INTERCFG, 0xde);
+ snd_soc_component_write(comp, RT9120_REG_INTERNAL0, 0x66);
+ } else
+ snd_soc_component_write(comp, RT9120_REG_INTERNAL0, 0x04);
+
return 0;
}
@@ -201,8 +217,8 @@ static int rt9120_hw_params(struct snd_pcm_substream *substream,
struct snd_soc_dai *dai)
{
struct snd_soc_component *comp = dai->component;
- unsigned int param_width, param_slot_width;
- int width;
+ unsigned int param_width, param_slot_width, auto_sync;
+ int width, fs;
switch (width = params_width(param)) {
case 16:
@@ -240,6 +256,16 @@ static int rt9120_hw_params(struct snd_pcm_substream *substream,
snd_soc_component_update_bits(comp, RT9120_REG_I2SWL,
RT9120_AUDWL_MASK, param_slot_width);
+
+ fs = width * params_channels(param);
+ /* If fs is divided by 48, disable auto sync */
+ if (fs % 48 == 0)
+ auto_sync = 0;
+ else
+ auto_sync = RT9120_AUTOSYNC_MASK;
+
+ snd_soc_component_update_bits(comp, RT9120_REG_DIGCFG,
+ RT9120_AUTOSYNC_MASK, auto_sync);
return 0;
}
@@ -279,9 +305,11 @@ static const struct regmap_range rt9120_rd_yes_ranges[] = {
regmap_reg_range(0x20, 0x27),
regmap_reg_range(0x30, 0x38),
regmap_reg_range(0x3A, 0x40),
+ regmap_reg_range(0x63, 0x63),
regmap_reg_range(0x65, 0x65),
regmap_reg_range(0x69, 0x69),
- regmap_reg_range(0x6C, 0x6C)
+ regmap_reg_range(0x6C, 0x6C),
+ regmap_reg_range(0xF8, 0xF8)
};
static const struct regmap_access_table rt9120_rd_table = {
@@ -297,9 +325,11 @@ static const struct regmap_range rt9120_wr_yes_ranges[] = {
regmap_reg_range(0x30, 0x38),
regmap_reg_range(0x3A, 0x3D),
regmap_reg_range(0x40, 0x40),
+ regmap_reg_range(0x63, 0x63),
regmap_reg_range(0x65, 0x65),
regmap_reg_range(0x69, 0x69),
- regmap_reg_range(0x6C, 0x6C)
+ regmap_reg_range(0x6C, 0x6C),
+ regmap_reg_range(0xF8, 0xF8)
};
static const struct regmap_access_table rt9120_wr_table = {
@@ -370,7 +400,7 @@ static int rt9120_reg_write(void *context, unsigned int reg, unsigned int val)
static const struct regmap_config rt9120_regmap_config = {
.reg_bits = 8,
.val_bits = 32,
- .max_register = RT9120_REG_UVPOPT,
+ .max_register = RT9120_REG_DIGCFG,
.reg_read = rt9120_reg_read,
.reg_write = rt9120_reg_write,
@@ -388,8 +418,16 @@ static int rt9120_check_vendor_info(struct rt9120_data *data)
if (ret)
return ret;
- if ((devid & RT9120_VID_MASK) != RT9120_VENDOR_ID) {
- dev_err(data->dev, "DEVID not correct [0x%04x]\n", devid);
+ devid = FIELD_GET(RT9120_VID_MASK, devid);
+ switch (devid) {
+ case RT9120_VENDOR_ID:
+ data->chip_idx = CHIP_IDX_RT9120;
+ break;
+ case RT9120S_VENDOR_ID:
+ data->chip_idx = CHIP_IDX_RT9120S;
+ break;
+ default:
+ dev_err(data->dev, "DEVID not correct [0x%0x]\n", devid);
return -ENODEV;
}
diff --git a/sound/soc/codecs/tas2770.c b/sound/soc/codecs/tas2770.c
index 172e79cbe0da..6549e7fef3e3 100644
--- a/sound/soc/codecs/tas2770.c
+++ b/sound/soc/codecs/tas2770.c
@@ -291,11 +291,11 @@ static int tas2770_set_samplerate(struct tas2770_priv *tas2770, int samplerate)
ramp_rate_val = TAS2770_TDM_CFG_REG0_SMP_44_1KHZ |
TAS2770_TDM_CFG_REG0_31_88_2_96KHZ;
break;
- case 19200:
+ case 192000:
ramp_rate_val = TAS2770_TDM_CFG_REG0_SMP_48KHZ |
TAS2770_TDM_CFG_REG0_31_176_4_192KHZ;
break;
- case 17640:
+ case 176400:
ramp_rate_val = TAS2770_TDM_CFG_REG0_SMP_44_1KHZ |
TAS2770_TDM_CFG_REG0_31_176_4_192KHZ;
break;
diff --git a/sound/soc/codecs/wcd934x.c b/sound/soc/codecs/wcd934x.c
index c496b359f2f4..e63c6b723d76 100644
--- a/sound/soc/codecs/wcd934x.c
+++ b/sound/soc/codecs/wcd934x.c
@@ -1896,9 +1896,8 @@ static int wcd934x_hw_params(struct snd_pcm_substream *substream,
}
wcd->dai[dai->id].sconfig.rate = params_rate(params);
- wcd934x_slim_set_hw_params(wcd, &wcd->dai[dai->id], substream->stream);
- return 0;
+ return wcd934x_slim_set_hw_params(wcd, &wcd->dai[dai->id], substream->stream);
}
static int wcd934x_hw_free(struct snd_pcm_substream *substream,
@@ -3257,6 +3256,9 @@ static int wcd934x_compander_set(struct snd_kcontrol *kc,
int value = ucontrol->value.integer.value[0];
int sel;
+ if (wcd->comp_enabled[comp] == value)
+ return 0;
+
wcd->comp_enabled[comp] = value;
sel = value ? WCD934X_HPH_GAIN_SRC_SEL_COMPANDER :
WCD934X_HPH_GAIN_SRC_SEL_REGISTER;
@@ -3280,10 +3282,10 @@ static int wcd934x_compander_set(struct snd_kcontrol *kc,
case COMPANDER_8:
break;
default:
- break;
+ return 0;
}
- return 0;
+ return 1;
}
static int wcd934x_rx_hph_mode_get(struct snd_kcontrol *kc,
@@ -3327,6 +3329,31 @@ static int slim_rx_mux_get(struct snd_kcontrol *kc,
return 0;
}
+static int slim_rx_mux_to_dai_id(int mux)
+{
+ int aif_id;
+
+ switch (mux) {
+ case 1:
+ aif_id = AIF1_PB;
+ break;
+ case 2:
+ aif_id = AIF2_PB;
+ break;
+ case 3:
+ aif_id = AIF3_PB;
+ break;
+ case 4:
+ aif_id = AIF4_PB;
+ break;
+ default:
+ aif_id = -1;
+ break;
+ }
+
+ return aif_id;
+}
+
static int slim_rx_mux_put(struct snd_kcontrol *kc,
struct snd_ctl_elem_value *ucontrol)
{
@@ -3334,43 +3361,59 @@ static int slim_rx_mux_put(struct snd_kcontrol *kc,
struct wcd934x_codec *wcd = dev_get_drvdata(w->dapm->dev);
struct soc_enum *e = (struct soc_enum *)kc->private_value;
struct snd_soc_dapm_update *update = NULL;
+ struct wcd934x_slim_ch *ch, *c;
u32 port_id = w->shift;
+ bool found = false;
+ int mux_idx;
+ int prev_mux_idx = wcd->rx_port_value[port_id];
+ int aif_id;
- if (wcd->rx_port_value[port_id] == ucontrol->value.enumerated.item[0])
- return 0;
+ mux_idx = ucontrol->value.enumerated.item[0];
- wcd->rx_port_value[port_id] = ucontrol->value.enumerated.item[0];
+ if (mux_idx == prev_mux_idx)
+ return 0;
- switch (wcd->rx_port_value[port_id]) {
+ switch(mux_idx) {
case 0:
- list_del_init(&wcd->rx_chs[port_id].list);
- break;
- case 1:
- list_add_tail(&wcd->rx_chs[port_id].list,
- &wcd->dai[AIF1_PB].slim_ch_list);
- break;
- case 2:
- list_add_tail(&wcd->rx_chs[port_id].list,
- &wcd->dai[AIF2_PB].slim_ch_list);
- break;
- case 3:
- list_add_tail(&wcd->rx_chs[port_id].list,
- &wcd->dai[AIF3_PB].slim_ch_list);
+ aif_id = slim_rx_mux_to_dai_id(prev_mux_idx);
+ if (aif_id < 0)
+ return 0;
+
+ list_for_each_entry_safe(ch, c, &wcd->dai[aif_id].slim_ch_list, list) {
+ if (ch->port == port_id + WCD934X_RX_START) {
+ found = true;
+ list_del_init(&ch->list);
+ break;
+ }
+ }
+ if (!found)
+ return 0;
+
break;
- case 4:
- list_add_tail(&wcd->rx_chs[port_id].list,
- &wcd->dai[AIF4_PB].slim_ch_list);
+ case 1 ... 4:
+ aif_id = slim_rx_mux_to_dai_id(mux_idx);
+ if (aif_id < 0)
+ return 0;
+
+ if (list_empty(&wcd->rx_chs[port_id].list)) {
+ list_add_tail(&wcd->rx_chs[port_id].list,
+ &wcd->dai[aif_id].slim_ch_list);
+ } else {
+ dev_err(wcd->dev ,"SLIM_RX%d PORT is busy\n", port_id);
+ return 0;
+ }
break;
+
default:
- dev_err(wcd->dev, "Unknown AIF %d\n",
- wcd->rx_port_value[port_id]);
+ dev_err(wcd->dev, "Unknown AIF %d\n", mux_idx);
goto err;
}
+ wcd->rx_port_value[port_id] = mux_idx;
snd_soc_dapm_mux_update_power(w->dapm, kc, wcd->rx_port_value[port_id],
e, update);
- return 0;
+ return 1;
err:
return -EINVAL;
}
@@ -3816,6 +3859,7 @@ static int slim_tx_mixer_put(struct snd_kcontrol *kc,
struct soc_mixer_control *mixer =
(struct soc_mixer_control *)kc->private_value;
int enable = ucontrol->value.integer.value[0];
+ struct wcd934x_slim_ch *ch, *c;
int dai_id = widget->shift;
int port_id = mixer->shift;
@@ -3823,17 +3867,32 @@ static int slim_tx_mixer_put(struct snd_kcontrol *kc,
if (enable == wcd->tx_port_value[port_id])
return 0;
- wcd->tx_port_value[port_id] = enable;
-
- if (enable)
- list_add_tail(&wcd->tx_chs[port_id].list,
- &wcd->dai[dai_id].slim_ch_list);
- else
- list_del_init(&wcd->tx_chs[port_id].list);
+ if (enable) {
+ if (list_empty(&wcd->tx_chs[port_id].list)) {
+ list_add_tail(&wcd->tx_chs[port_id].list,
+ &wcd->dai[dai_id].slim_ch_list);
+ } else {
+ dev_err(wcd->dev ,"SLIM_TX%d PORT is busy\n", port_id);
+ return 0;
+ }
+ } else {
+ bool found = false;
+
+ list_for_each_entry_safe(ch, c, &wcd->dai[dai_id].slim_ch_list, list) {
+ if (ch->port == port_id) {
+ found = true;
+ list_del_init(&wcd->tx_chs[port_id].list);
+ break;
+ }
+ }
+ if (!found)
+ return 0;
+ }
+ wcd->tx_port_value[port_id] = enable;
snd_soc_dapm_mixer_update_power(widget->dapm, kc, enable, update);
- return 0;
+ return 1;
}
static const struct snd_kcontrol_new aif1_slim_cap_mixer[] = {
diff --git a/sound/soc/codecs/wcd938x.c b/sound/soc/codecs/wcd938x.c
index 52de7d14b139..67151c7770c6 100644
--- a/sound/soc/codecs/wcd938x.c
+++ b/sound/soc/codecs/wcd938x.c
@@ -1174,6 +1174,9 @@ static bool wcd938x_readonly_register(struct device *dev, unsigned int reg)
case WCD938X_DIGITAL_INTR_STATUS_0:
case WCD938X_DIGITAL_INTR_STATUS_1:
case WCD938X_DIGITAL_INTR_STATUS_2:
+ case WCD938X_DIGITAL_INTR_CLEAR_0:
+ case WCD938X_DIGITAL_INTR_CLEAR_1:
+ case WCD938X_DIGITAL_INTR_CLEAR_2:
case WCD938X_DIGITAL_SWR_HM_TEST_0:
case WCD938X_DIGITAL_SWR_HM_TEST_1:
case WCD938X_DIGITAL_EFUSE_T_DATA_0:
diff --git a/sound/soc/codecs/wm_adsp.c b/sound/soc/codecs/wm_adsp.c
index d4f0d72cbcc8..6cb01a8e08fb 100644
--- a/sound/soc/codecs/wm_adsp.c
+++ b/sound/soc/codecs/wm_adsp.c
@@ -617,8 +617,9 @@ static int wm_adsp_control_add(struct cs_dsp_coeff_ctl *cs_ctl)
switch (cs_dsp->fw_ver) {
case 0:
case 1:
- snprintf(name, SNDRV_CTL_ELEM_ID_NAME_MAXLEN, "%s %s %x",
- cs_dsp->name, region_name, cs_ctl->alg_region.alg);
+ ret = scnprintf(name, SNDRV_CTL_ELEM_ID_NAME_MAXLEN,
+ "%s %s %x", cs_dsp->name, region_name,
+ cs_ctl->alg_region.alg);
break;
case 2:
ret = scnprintf(name, SNDRV_CTL_ELEM_ID_NAME_MAXLEN,
diff --git a/sound/soc/codecs/wsa881x.c b/sound/soc/codecs/wsa881x.c
index 2da4a5fa7a18..564b78f3cdd0 100644
--- a/sound/soc/codecs/wsa881x.c
+++ b/sound/soc/codecs/wsa881x.c
@@ -772,7 +772,8 @@ static int wsa881x_put_pa_gain(struct snd_kcontrol *kc,
usleep_range(1000, 1010);
}
- return 0;
+
+ return 1;
}
static int wsa881x_get_port(struct snd_kcontrol *kcontrol,
@@ -816,15 +817,22 @@ static int wsa881x_set_port(struct snd_kcontrol *kcontrol,
(struct soc_mixer_control *)kcontrol->private_value;
int portidx = mixer->reg;
- if (ucontrol->value.integer.value[0])
+ if (ucontrol->value.integer.value[0]) {
+ if (data->port_enable[portidx])
+ return 0;
+
data->port_enable[portidx] = true;
- else
+ } else {
+ if (!data->port_enable[portidx])
+ return 0;
+
data->port_enable[portidx] = false;
+ }
if (portidx == WSA881X_PORT_BOOST) /* Boost Switch */
wsa881x_boost_ctrl(comp, data->port_enable[portidx]);
- return 0;
+ return 1;
}
static const char * const smart_boost_lvl_text[] = {
diff --git a/sound/soc/intel/boards/sof_sdw.c b/sound/soc/intel/boards/sof_sdw.c
index f10496206cee..77219c3f8766 100644
--- a/sound/soc/intel/boards/sof_sdw.c
+++ b/sound/soc/intel/boards/sof_sdw.c
@@ -248,6 +248,75 @@ static const struct dmi_system_id sof_sdw_quirk_table[] = {
SOF_BT_OFFLOAD_SSP(2) |
SOF_SSP_BT_OFFLOAD_PRESENT),
},
+ {
+ .callback = sof_sdw_quirk_cb,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc"),
+ DMI_EXACT_MATCH(DMI_PRODUCT_SKU, "0AF3"),
+ },
+ /* No Jack */
+ .driver_data = (void *)(SOF_SDW_TGL_HDMI |
+ SOF_SDW_FOUR_SPK),
+ },
+ {
+ .callback = sof_sdw_quirk_cb,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc"),
+ DMI_EXACT_MATCH(DMI_PRODUCT_SKU, "0B00")
+ },
+ .driver_data = (void *)(SOF_SDW_TGL_HDMI |
+ RT711_JD2 |
+ SOF_SDW_FOUR_SPK),
+ },
+ {
+ .callback = sof_sdw_quirk_cb,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc"),
+ DMI_EXACT_MATCH(DMI_PRODUCT_SKU, "0B01")
+ },
+ .driver_data = (void *)(SOF_SDW_TGL_HDMI |
+ RT711_JD2 |
+ SOF_SDW_FOUR_SPK),
+ },
+ {
+ .callback = sof_sdw_quirk_cb,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc"),
+ DMI_EXACT_MATCH(DMI_PRODUCT_SKU, "0B11")
+ },
+ .driver_data = (void *)(SOF_SDW_TGL_HDMI |
+ RT711_JD2 |
+ SOF_SDW_FOUR_SPK),
+ },
+ {
+ .callback = sof_sdw_quirk_cb,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc"),
+ DMI_EXACT_MATCH(DMI_PRODUCT_SKU, "0B12")
+ },
+ .driver_data = (void *)(SOF_SDW_TGL_HDMI |
+ RT711_JD2 |
+ SOF_SDW_FOUR_SPK),
+ },
+ {
+ .callback = sof_sdw_quirk_cb,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc"),
+ DMI_EXACT_MATCH(DMI_PRODUCT_SKU, "0B13"),
+ },
+ /* No Jack */
+ .driver_data = (void *)SOF_SDW_TGL_HDMI,
+ },
+ {
+ .callback = sof_sdw_quirk_cb,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc"),
+ DMI_EXACT_MATCH(DMI_PRODUCT_SKU, "0B29"),
+ },
+ .driver_data = (void *)(SOF_SDW_TGL_HDMI |
+ RT711_JD2 |
+ SOF_SDW_FOUR_SPK),
+ },
{}
};
diff --git a/sound/soc/intel/common/soc-acpi-intel-adl-match.c b/sound/soc/intel/common/soc-acpi-intel-adl-match.c
index 06f503452aa5..b61a778a9d26 100644
--- a/sound/soc/intel/common/soc-acpi-intel-adl-match.c
+++ b/sound/soc/intel/common/soc-acpi-intel-adl-match.c
@@ -74,6 +74,15 @@ static const struct snd_soc_acpi_adr_device rt711_sdca_0_adr[] = {
}
};
+static const struct snd_soc_acpi_adr_device rt711_sdca_2_adr[] = {
+ {
+ .adr = 0x000230025D071101ull,
+ .num_endpoints = 1,
+ .endpoints = &single_endpoint,
+ .name_prefix = "rt711"
+ }
+};
+
static const struct snd_soc_acpi_adr_device rt1316_1_group1_adr[] = {
{
.adr = 0x000131025D131601ull, /* unique ID is set for some reason */
@@ -101,6 +110,24 @@ static const struct snd_soc_acpi_adr_device rt1316_3_group1_adr[] = {
}
};
+static const struct snd_soc_acpi_adr_device rt1316_0_group2_adr[] = {
+ {
+ .adr = 0x000031025D131601ull,
+ .num_endpoints = 1,
+ .endpoints = &spk_l_endpoint,
+ .name_prefix = "rt1316-1"
+ }
+};
+
+static const struct snd_soc_acpi_adr_device rt1316_1_group2_adr[] = {
+ {
+ .adr = 0x000130025D131601ull,
+ .num_endpoints = 1,
+ .endpoints = &spk_r_endpoint,
+ .name_prefix = "rt1316-2"
+ }
+};
+
static const struct snd_soc_acpi_adr_device rt1316_2_single_adr[] = {
{
.adr = 0x000230025D131601ull,
@@ -209,6 +236,63 @@ static const struct snd_soc_acpi_link_adr adl_sdca_3_in_1[] = {
{}
};
+static const struct snd_soc_acpi_link_adr adl_sdw_rt711_link2_rt1316_link01_rt714_link3[] = {
+ {
+ .mask = BIT(2),
+ .num_adr = ARRAY_SIZE(rt711_sdca_2_adr),
+ .adr_d = rt711_sdca_2_adr,
+ },
+ {
+ .mask = BIT(0),
+ .num_adr = ARRAY_SIZE(rt1316_0_group2_adr),
+ .adr_d = rt1316_0_group2_adr,
+ },
+ {
+ .mask = BIT(1),
+ .num_adr = ARRAY_SIZE(rt1316_1_group2_adr),
+ .adr_d = rt1316_1_group2_adr,
+ },
+ {
+ .mask = BIT(3),
+ .num_adr = ARRAY_SIZE(rt714_3_adr),
+ .adr_d = rt714_3_adr,
+ },
+ {}
+};
+
+static const struct snd_soc_acpi_link_adr adl_sdw_rt1316_link12_rt714_link0[] = {
+ {
+ .mask = BIT(1),
+ .num_adr = ARRAY_SIZE(rt1316_1_group1_adr),
+ .adr_d = rt1316_1_group1_adr,
+ },
+ {
+ .mask = BIT(2),
+ .num_adr = ARRAY_SIZE(rt1316_2_group1_adr),
+ .adr_d = rt1316_2_group1_adr,
+ },
+ {
+ .mask = BIT(0),
+ .num_adr = ARRAY_SIZE(rt714_0_adr),
+ .adr_d = rt714_0_adr,
+ },
+ {}
+};
+
+static const struct snd_soc_acpi_link_adr adl_sdw_rt1316_link2_rt714_link3[] = {
+ {
+ .mask = BIT(2),
+ .num_adr = ARRAY_SIZE(rt1316_2_single_adr),
+ .adr_d = rt1316_2_single_adr,
+ },
+ {
+ .mask = BIT(3),
+ .num_adr = ARRAY_SIZE(rt714_3_adr),
+ .adr_d = rt714_3_adr,
+ },
+ {}
+};
+
static const struct snd_soc_acpi_link_adr adl_sdw_rt1316_link2_rt714_link0[] = {
{
.mask = BIT(2),
@@ -340,6 +424,27 @@ struct snd_soc_acpi_mach snd_soc_acpi_intel_adl_sdw_machines[] = {
.sof_tplg_filename = "sof-adl-rt711-l0-rt1316-l13-rt714-l2.tplg",
},
{
+ .link_mask = 0xF, /* 4 active links required */
+ .links = adl_sdw_rt711_link2_rt1316_link01_rt714_link3,
+ .drv_name = "sof_sdw",
+ .sof_fw_filename = "sof-adl.ri",
+ .sof_tplg_filename = "sof-adl-rt711-l2-rt1316-l01-rt714-l3.tplg",
+ },
+ {
+ .link_mask = 0xC, /* rt1316 on link2 & rt714 on link3 */
+ .links = adl_sdw_rt1316_link2_rt714_link3,
+ .drv_name = "sof_sdw",
+ .sof_fw_filename = "sof-adl.ri",
+ .sof_tplg_filename = "sof-adl-rt1316-l2-mono-rt714-l3.tplg",
+ },
+ {
+ .link_mask = 0x7, /* rt714 on link0 & two rt1316s on link1 and link2 */
+ .links = adl_sdw_rt1316_link12_rt714_link0,
+ .drv_name = "sof_sdw",
+ .sof_fw_filename = "sof-adl.ri",
+ .sof_tplg_filename = "sof-adl-rt1316-l12-rt714-l0.tplg",
+ },
+ {
.link_mask = 0x5, /* 2 active links required */
.links = adl_sdw_rt1316_link2_rt714_link0,
.drv_name = "sof_sdw",
diff --git a/sound/soc/intel/common/soc-acpi-intel-cml-match.c b/sound/soc/intel/common/soc-acpi-intel-cml-match.c
index b4eb0c97edf1..4eebc79d4b48 100644
--- a/sound/soc/intel/common/soc-acpi-intel-cml-match.c
+++ b/sound/soc/intel/common/soc-acpi-intel-cml-match.c
@@ -81,6 +81,12 @@ struct snd_soc_acpi_mach snd_soc_acpi_intel_cml_machines[] = {
.sof_fw_filename = "sof-cml.ri",
.sof_tplg_filename = "sof-cml-da7219-max98390.tplg",
},
+ {
+ .id = "ESSX8336",
+ .drv_name = "sof-essx8336",
+ .sof_fw_filename = "sof-cml.ri",
+ .sof_tplg_filename = "sof-cml-es8336.tplg",
+ },
{},
};
EXPORT_SYMBOL_GPL(snd_soc_acpi_intel_cml_machines);
diff --git a/sound/soc/mediatek/mt8173/mt8173-afe-pcm.c b/sound/soc/mediatek/mt8173/mt8173-afe-pcm.c
index 6350390414d4..31494930433f 100644
--- a/sound/soc/mediatek/mt8173/mt8173-afe-pcm.c
+++ b/sound/soc/mediatek/mt8173/mt8173-afe-pcm.c
@@ -1054,6 +1054,7 @@ static int mt8173_afe_pcm_dev_probe(struct platform_device *pdev)
int irq_id;
struct mtk_base_afe *afe;
struct mt8173_afe_private *afe_priv;
+ struct snd_soc_component *comp_pcm, *comp_hdmi;
ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(33));
if (ret)
@@ -1142,23 +1143,55 @@ static int mt8173_afe_pcm_dev_probe(struct platform_device *pdev)
if (ret)
goto err_pm_disable;
- ret = devm_snd_soc_register_component(&pdev->dev,
- &mt8173_afe_pcm_dai_component,
- mt8173_afe_pcm_dais,
- ARRAY_SIZE(mt8173_afe_pcm_dais));
+ comp_pcm = devm_kzalloc(&pdev->dev, sizeof(*comp_pcm), GFP_KERNEL);
+ if (!comp_pcm) {
+ ret = -ENOMEM;
+ goto err_pm_disable;
+ }
+
+ ret = snd_soc_component_initialize(comp_pcm,
+ &mt8173_afe_pcm_dai_component,
+ &pdev->dev);
if (ret)
goto err_pm_disable;
- ret = devm_snd_soc_register_component(&pdev->dev,
- &mt8173_afe_hdmi_dai_component,
- mt8173_afe_hdmi_dais,
- ARRAY_SIZE(mt8173_afe_hdmi_dais));
+#ifdef CONFIG_DEBUG_FS
+ comp_pcm->debugfs_prefix = "pcm";
+#endif
+
+ ret = snd_soc_add_component(comp_pcm,
+ mt8173_afe_pcm_dais,
+ ARRAY_SIZE(mt8173_afe_pcm_dais));
+ if (ret)
+ goto err_pm_disable;
+
+ comp_hdmi = devm_kzalloc(&pdev->dev, sizeof(*comp_hdmi), GFP_KERNEL);
+ if (!comp_hdmi) {
+ ret = -ENOMEM;
+ goto err_pm_disable;
+ }
+
+ ret = snd_soc_component_initialize(comp_hdmi,
+ &mt8173_afe_hdmi_dai_component,
+ &pdev->dev);
if (ret)
goto err_pm_disable;
+#ifdef CONFIG_DEBUG_FS
+ comp_hdmi->debugfs_prefix = "hdmi";
+#endif
+
+ ret = snd_soc_add_component(comp_hdmi,
+ mt8173_afe_hdmi_dais,
+ ARRAY_SIZE(mt8173_afe_hdmi_dais));
+ if (ret)
+ goto err_cleanup_components;
+
dev_info(&pdev->dev, "MT8173 AFE driver initialized.\n");
return 0;
+err_cleanup_components:
+ snd_soc_unregister_component(&pdev->dev);
err_pm_disable:
pm_runtime_disable(&pdev->dev);
return ret;
@@ -1166,6 +1199,8 @@ err_pm_disable:
static int mt8173_afe_pcm_dev_remove(struct platform_device *pdev)
{
+ snd_soc_unregister_component(&pdev->dev);
+
pm_runtime_disable(&pdev->dev);
if (!pm_runtime_status_suspended(&pdev->dev))
mt8173_afe_runtime_suspend(&pdev->dev);
diff --git a/sound/soc/mediatek/mt8173/mt8173-rt5650.c b/sound/soc/mediatek/mt8173/mt8173-rt5650.c
index c28ebf891cb0..2cbf679f5c74 100644
--- a/sound/soc/mediatek/mt8173/mt8173-rt5650.c
+++ b/sound/soc/mediatek/mt8173/mt8173-rt5650.c
@@ -30,15 +30,15 @@ static struct mt8173_rt5650_platform_data mt8173_rt5650_priv = {
};
static const struct snd_soc_dapm_widget mt8173_rt5650_widgets[] = {
- SND_SOC_DAPM_SPK("Speaker", NULL),
+ SND_SOC_DAPM_SPK("Ext Spk", NULL),
SND_SOC_DAPM_MIC("Int Mic", NULL),
SND_SOC_DAPM_HP("Headphone", NULL),
SND_SOC_DAPM_MIC("Headset Mic", NULL),
};
static const struct snd_soc_dapm_route mt8173_rt5650_routes[] = {
- {"Speaker", NULL, "SPOL"},
- {"Speaker", NULL, "SPOR"},
+ {"Ext Spk", NULL, "SPOL"},
+ {"Ext Spk", NULL, "SPOR"},
{"DMIC L1", NULL, "Int Mic"},
{"DMIC R1", NULL, "Int Mic"},
{"Headphone", NULL, "HPOL"},
@@ -48,7 +48,7 @@ static const struct snd_soc_dapm_route mt8173_rt5650_routes[] = {
};
static const struct snd_kcontrol_new mt8173_rt5650_controls[] = {
- SOC_DAPM_PIN_SWITCH("Speaker"),
+ SOC_DAPM_PIN_SWITCH("Ext Spk"),
SOC_DAPM_PIN_SWITCH("Int Mic"),
SOC_DAPM_PIN_SWITCH("Headphone"),
SOC_DAPM_PIN_SWITCH("Headset Mic"),
diff --git a/sound/soc/meson/aiu-encoder-i2s.c b/sound/soc/meson/aiu-encoder-i2s.c
index 932224552146..67729de41a73 100644
--- a/sound/soc/meson/aiu-encoder-i2s.c
+++ b/sound/soc/meson/aiu-encoder-i2s.c
@@ -18,7 +18,6 @@
#define AIU_RST_SOFT_I2S_FAST BIT(0)
#define AIU_I2S_DAC_CFG_MSB_FIRST BIT(2)
-#define AIU_I2S_MISC_HOLD_EN BIT(2)
#define AIU_CLK_CTRL_I2S_DIV_EN BIT(0)
#define AIU_CLK_CTRL_I2S_DIV GENMASK(3, 2)
#define AIU_CLK_CTRL_AOCLK_INVERT BIT(6)
@@ -36,37 +35,6 @@ static void aiu_encoder_i2s_divider_enable(struct snd_soc_component *component,
enable ? AIU_CLK_CTRL_I2S_DIV_EN : 0);
}
-static void aiu_encoder_i2s_hold(struct snd_soc_component *component,
- bool enable)
-{
- snd_soc_component_update_bits(component, AIU_I2S_MISC,
- AIU_I2S_MISC_HOLD_EN,
- enable ? AIU_I2S_MISC_HOLD_EN : 0);
-}
-
-static int aiu_encoder_i2s_trigger(struct snd_pcm_substream *substream, int cmd,
- struct snd_soc_dai *dai)
-{
- struct snd_soc_component *component = dai->component;
-
- switch (cmd) {
- case SNDRV_PCM_TRIGGER_START:
- case SNDRV_PCM_TRIGGER_RESUME:
- case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
- aiu_encoder_i2s_hold(component, false);
- return 0;
-
- case SNDRV_PCM_TRIGGER_STOP:
- case SNDRV_PCM_TRIGGER_SUSPEND:
- case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
- aiu_encoder_i2s_hold(component, true);
- return 0;
-
- default:
- return -EINVAL;
- }
-}
-
static int aiu_encoder_i2s_setup_desc(struct snd_soc_component *component,
struct snd_pcm_hw_params *params)
{
@@ -353,7 +321,6 @@ static void aiu_encoder_i2s_shutdown(struct snd_pcm_substream *substream,
}
const struct snd_soc_dai_ops aiu_encoder_i2s_dai_ops = {
- .trigger = aiu_encoder_i2s_trigger,
.hw_params = aiu_encoder_i2s_hw_params,
.hw_free = aiu_encoder_i2s_hw_free,
.set_fmt = aiu_encoder_i2s_set_fmt,
diff --git a/sound/soc/meson/aiu-fifo-i2s.c b/sound/soc/meson/aiu-fifo-i2s.c
index 2388a2d0b3a6..57e6e7160d2f 100644
--- a/sound/soc/meson/aiu-fifo-i2s.c
+++ b/sound/soc/meson/aiu-fifo-i2s.c
@@ -20,6 +20,8 @@
#define AIU_MEM_I2S_CONTROL_MODE_16BIT BIT(6)
#define AIU_MEM_I2S_BUF_CNTL_INIT BIT(0)
#define AIU_RST_SOFT_I2S_FAST BIT(0)
+#define AIU_I2S_MISC_HOLD_EN BIT(2)
+#define AIU_I2S_MISC_FORCE_LEFT_RIGHT BIT(4)
#define AIU_FIFO_I2S_BLOCK 256
@@ -90,6 +92,10 @@ static int aiu_fifo_i2s_hw_params(struct snd_pcm_substream *substream,
unsigned int val;
int ret;
+ snd_soc_component_update_bits(component, AIU_I2S_MISC,
+ AIU_I2S_MISC_HOLD_EN,
+ AIU_I2S_MISC_HOLD_EN);
+
ret = aiu_fifo_hw_params(substream, params, dai);
if (ret)
return ret;
@@ -117,6 +123,19 @@ static int aiu_fifo_i2s_hw_params(struct snd_pcm_substream *substream,
snd_soc_component_update_bits(component, AIU_MEM_I2S_MASKS,
AIU_MEM_I2S_MASKS_IRQ_BLOCK, val);
+ /*
+ * Most (all?) supported SoCs have this bit set by default. The vendor
+ * driver however sets it manually (depending on the version either
+ * while un-setting AIU_I2S_MISC_HOLD_EN or right before that). Follow
+ * the same approach for consistency with the vendor driver.
+ */
+ snd_soc_component_update_bits(component, AIU_I2S_MISC,
+ AIU_I2S_MISC_FORCE_LEFT_RIGHT,
+ AIU_I2S_MISC_FORCE_LEFT_RIGHT);
+
+ snd_soc_component_update_bits(component, AIU_I2S_MISC,
+ AIU_I2S_MISC_HOLD_EN, 0);
+
return 0;
}
diff --git a/sound/soc/meson/aiu-fifo.c b/sound/soc/meson/aiu-fifo.c
index 4ad23267cace..d67ff4cdabd5 100644
--- a/sound/soc/meson/aiu-fifo.c
+++ b/sound/soc/meson/aiu-fifo.c
@@ -5,6 +5,7 @@
#include <linux/bitfield.h>
#include <linux/clk.h>
+#include <linux/dma-mapping.h>
#include <sound/pcm_params.h>
#include <sound/soc.h>
#include <sound/soc-dai.h>
@@ -179,6 +180,11 @@ int aiu_fifo_pcm_new(struct snd_soc_pcm_runtime *rtd,
struct snd_card *card = rtd->card->snd_card;
struct aiu_fifo *fifo = dai->playback_dma_data;
size_t size = fifo->pcm->buffer_bytes_max;
+ int ret;
+
+ ret = dma_coerce_mask_and_coherent(card->dev, DMA_BIT_MASK(32));
+ if (ret)
+ return ret;
snd_pcm_set_managed_buffer_all(rtd->pcm, SNDRV_DMA_TYPE_DEV,
card->dev, size, size);
diff --git a/sound/soc/qcom/qdsp6/audioreach.h b/sound/soc/qcom/qdsp6/audioreach.h
index 4f693a2660b5..3ee8bfcd0121 100644
--- a/sound/soc/qcom/qdsp6/audioreach.h
+++ b/sound/soc/qcom/qdsp6/audioreach.h
@@ -550,6 +550,10 @@ struct audio_hw_clk_cfg {
uint32_t clock_root;
} __packed;
+struct audio_hw_clk_rel_cfg {
+ uint32_t clock_id;
+} __packed;
+
#define PARAM_ID_HW_EP_POWER_MODE_CFG 0x8001176
#define AR_HW_EP_POWER_MODE_0 0 /* default */
#define AR_HW_EP_POWER_MODE_1 1 /* XO Shutdown allowed */
diff --git a/sound/soc/qcom/qdsp6/q6adm.c b/sound/soc/qcom/qdsp6/q6adm.c
index 3d831b635524..72c5719f1d25 100644
--- a/sound/soc/qcom/qdsp6/q6adm.c
+++ b/sound/soc/qcom/qdsp6/q6adm.c
@@ -390,7 +390,7 @@ struct q6copp *q6adm_open(struct device *dev, int port_id, int path, int rate,
int ret = 0;
if (port_id < 0) {
- dev_err(dev, "Invalid port_id 0x%x\n", port_id);
+ dev_err(dev, "Invalid port_id %d\n", port_id);
return ERR_PTR(-EINVAL);
}
@@ -508,7 +508,7 @@ int q6adm_matrix_map(struct device *dev, int path,
int port_idx = payload_map.port_id[i];
if (port_idx < 0) {
- dev_err(dev, "Invalid port_id 0x%x\n",
+ dev_err(dev, "Invalid port_id %d\n",
payload_map.port_id[i]);
kfree(pkt);
return -EINVAL;
diff --git a/sound/soc/qcom/qdsp6/q6asm-dai.c b/sound/soc/qcom/qdsp6/q6asm-dai.c
index 46f365528d50..b74b67720ef4 100644
--- a/sound/soc/qcom/qdsp6/q6asm-dai.c
+++ b/sound/soc/qcom/qdsp6/q6asm-dai.c
@@ -269,9 +269,7 @@ static int q6asm_dai_prepare(struct snd_soc_component *component,
if (ret < 0) {
dev_err(dev, "%s: q6asm_open_write failed\n", __func__);
- q6asm_audio_client_free(prtd->audio_client);
- prtd->audio_client = NULL;
- return -ENOMEM;
+ goto open_err;
}
prtd->session_id = q6asm_get_session_id(prtd->audio_client);
@@ -279,7 +277,7 @@ static int q6asm_dai_prepare(struct snd_soc_component *component,
prtd->session_id, substream->stream);
if (ret) {
dev_err(dev, "%s: stream reg failed ret:%d\n", __func__, ret);
- return ret;
+ goto routing_err;
}
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
@@ -301,10 +299,19 @@ static int q6asm_dai_prepare(struct snd_soc_component *component,
}
if (ret < 0)
dev_info(dev, "%s: CMD Format block failed\n", __func__);
+ else
+ prtd->state = Q6ASM_STREAM_RUNNING;
- prtd->state = Q6ASM_STREAM_RUNNING;
+ return ret;
- return 0;
+routing_err:
+ q6asm_cmd(prtd->audio_client, prtd->stream_id, CMD_CLOSE);
+open_err:
+ q6asm_unmap_memory_regions(substream->stream, prtd->audio_client);
+ q6asm_audio_client_free(prtd->audio_client);
+ prtd->audio_client = NULL;
+
+ return ret;
}
static int q6asm_dai_trigger(struct snd_soc_component *component,
diff --git a/sound/soc/qcom/qdsp6/q6prm.c b/sound/soc/qcom/qdsp6/q6prm.c
index 82c40f2d4e1d..cda33ded29be 100644
--- a/sound/soc/qcom/qdsp6/q6prm.c
+++ b/sound/soc/qcom/qdsp6/q6prm.c
@@ -42,6 +42,12 @@ struct prm_cmd_request_rsc {
struct audio_hw_clk_cfg clock_id;
} __packed;
+struct prm_cmd_release_rsc {
+ struct apm_module_param_data param_data;
+ uint32_t num_clk_id;
+ struct audio_hw_clk_rel_cfg clock_id;
+} __packed;
+
static int q6prm_send_cmd_sync(struct q6prm *prm, struct gpr_pkt *pkt, uint32_t rsp_opcode)
{
return audioreach_send_cmd_sync(prm->dev, prm->gdev, &prm->result, &prm->lock,
@@ -102,8 +108,8 @@ int q6prm_unvote_lpass_core_hw(struct device *dev, uint32_t hw_block_id, uint32_
}
EXPORT_SYMBOL_GPL(q6prm_unvote_lpass_core_hw);
-int q6prm_set_lpass_clock(struct device *dev, int clk_id, int clk_attr, int clk_root,
- unsigned int freq)
+static int q6prm_request_lpass_clock(struct device *dev, int clk_id, int clk_attr, int clk_root,
+ unsigned int freq)
{
struct q6prm *prm = dev_get_drvdata(dev->parent);
struct apm_module_param_data *param_data;
@@ -138,6 +144,49 @@ int q6prm_set_lpass_clock(struct device *dev, int clk_id, int clk_attr, int clk_
return rc;
}
+
+static int q6prm_release_lpass_clock(struct device *dev, int clk_id, int clk_attr, int clk_root,
+ unsigned int freq)
+{
+ struct q6prm *prm = dev_get_drvdata(dev->parent);
+ struct apm_module_param_data *param_data;
+ struct prm_cmd_release_rsc *rel;
+ gpr_device_t *gdev = prm->gdev;
+ struct gpr_pkt *pkt;
+ int rc;
+
+ pkt = audioreach_alloc_cmd_pkt(sizeof(*rel), PRM_CMD_RELEASE_HW_RSC, 0, gdev->svc.id,
+ GPR_PRM_MODULE_IID);
+ if (IS_ERR(pkt))
+ return PTR_ERR(pkt);
+
+ rel = (void *)pkt + GPR_HDR_SIZE + APM_CMD_HDR_SIZE;
+
+ param_data = &rel->param_data;
+
+ param_data->module_instance_id = GPR_PRM_MODULE_IID;
+ param_data->error_code = 0;
+ param_data->param_id = PARAM_ID_RSC_AUDIO_HW_CLK;
+ param_data->param_size = sizeof(*rel) - APM_MODULE_PARAM_DATA_SIZE;
+
+ rel->num_clk_id = 1;
+ rel->clock_id.clock_id = clk_id;
+
+ rc = q6prm_send_cmd_sync(prm, pkt, PRM_CMD_RSP_RELEASE_HW_RSC);
+
+ kfree(pkt);
+
+ return rc;
+}
+
+int q6prm_set_lpass_clock(struct device *dev, int clk_id, int clk_attr, int clk_root,
+ unsigned int freq)
+{
+ if (freq)
+ return q6prm_request_lpass_clock(dev, clk_id, clk_attr, clk_attr, freq);
+
+ return q6prm_release_lpass_clock(dev, clk_id, clk_attr, clk_attr, freq);
+}
EXPORT_SYMBOL_GPL(q6prm_set_lpass_clock);
static int prm_callback(struct gpr_resp_pkt *data, void *priv, int op)
diff --git a/sound/soc/qcom/qdsp6/q6routing.c b/sound/soc/qcom/qdsp6/q6routing.c
index 3390ebef9549..928fd23e2c27 100644
--- a/sound/soc/qcom/qdsp6/q6routing.c
+++ b/sound/soc/qcom/qdsp6/q6routing.c
@@ -372,6 +372,12 @@ int q6routing_stream_open(int fedai_id, int perf_mode,
}
session = &routing_data->sessions[stream_id - 1];
+ if (session->port_id < 0) {
+ dev_err(routing_data->dev, "Routing not setup for MultiMedia%d Session\n",
+ session->fedai_id);
+ return -EINVAL;
+ }
+
pdata = &routing_data->port_data[session->port_id];
mutex_lock(&routing_data->lock);
@@ -492,9 +498,15 @@ static int msm_routing_put_audio_mixer(struct snd_kcontrol *kcontrol,
struct session_data *session = &data->sessions[session_id];
if (ucontrol->value.integer.value[0]) {
+ if (session->port_id == be_id)
+ return 0;
+
session->port_id = be_id;
snd_soc_dapm_mixer_update_power(dapm, kcontrol, 1, update);
} else {
+ if (session->port_id == -1 || session->port_id != be_id)
+ return 0;
+
session->port_id = -1;
snd_soc_dapm_mixer_update_power(dapm, kcontrol, 0, update);
}
diff --git a/sound/soc/rockchip/rockchip_i2s_tdm.c b/sound/soc/rockchip/rockchip_i2s_tdm.c
index 17b9b287853a..5f9cb5c4c7f0 100644
--- a/sound/soc/rockchip/rockchip_i2s_tdm.c
+++ b/sound/soc/rockchip/rockchip_i2s_tdm.c
@@ -95,6 +95,7 @@ struct rk_i2s_tdm_dev {
spinlock_t lock; /* xfer lock */
bool has_playback;
bool has_capture;
+ struct snd_soc_dai_driver *dai;
};
static int to_ch_num(unsigned int val)
@@ -1310,19 +1311,14 @@ static const struct of_device_id rockchip_i2s_tdm_match[] = {
{},
};
-static struct snd_soc_dai_driver i2s_tdm_dai = {
+static const struct snd_soc_dai_driver i2s_tdm_dai = {
.probe = rockchip_i2s_tdm_dai_probe,
- .playback = {
- .stream_name = "Playback",
- },
- .capture = {
- .stream_name = "Capture",
- },
.ops = &rockchip_i2s_tdm_dai_ops,
};
-static void rockchip_i2s_tdm_init_dai(struct rk_i2s_tdm_dev *i2s_tdm)
+static int rockchip_i2s_tdm_init_dai(struct rk_i2s_tdm_dev *i2s_tdm)
{
+ struct snd_soc_dai_driver *dai;
struct property *dma_names;
const char *dma_name;
u64 formats = (SNDRV_PCM_FMTBIT_S8 | SNDRV_PCM_FMTBIT_S16_LE |
@@ -1337,19 +1333,33 @@ static void rockchip_i2s_tdm_init_dai(struct rk_i2s_tdm_dev *i2s_tdm)
i2s_tdm->has_capture = true;
}
+ dai = devm_kmemdup(i2s_tdm->dev, &i2s_tdm_dai,
+ sizeof(*dai), GFP_KERNEL);
+ if (!dai)
+ return -ENOMEM;
+
if (i2s_tdm->has_playback) {
- i2s_tdm_dai.playback.channels_min = 2;
- i2s_tdm_dai.playback.channels_max = 8;
- i2s_tdm_dai.playback.rates = SNDRV_PCM_RATE_8000_192000;
- i2s_tdm_dai.playback.formats = formats;
+ dai->playback.stream_name = "Playback";
+ dai->playback.channels_min = 2;
+ dai->playback.channels_max = 8;
+ dai->playback.rates = SNDRV_PCM_RATE_8000_192000;
+ dai->playback.formats = formats;
}
if (i2s_tdm->has_capture) {
- i2s_tdm_dai.capture.channels_min = 2;
- i2s_tdm_dai.capture.channels_max = 8;
- i2s_tdm_dai.capture.rates = SNDRV_PCM_RATE_8000_192000;
- i2s_tdm_dai.capture.formats = formats;
+ dai->capture.stream_name = "Capture";
+ dai->capture.channels_min = 2;
+ dai->capture.channels_max = 8;
+ dai->capture.rates = SNDRV_PCM_RATE_8000_192000;
+ dai->capture.formats = formats;
}
+
+ if (i2s_tdm->clk_trcm != TRCM_TXRX)
+ dai->symmetric_rate = 1;
+
+ i2s_tdm->dai = dai;
+
+ return 0;
}
static int rockchip_i2s_tdm_path_check(struct rk_i2s_tdm_dev *i2s_tdm,
@@ -1541,8 +1551,6 @@ static int rockchip_i2s_tdm_probe(struct platform_device *pdev)
spin_lock_init(&i2s_tdm->lock);
i2s_tdm->soc_data = (struct rk_i2s_soc_data *)of_id->data;
- rockchip_i2s_tdm_init_dai(i2s_tdm);
-
i2s_tdm->frame_width = 64;
i2s_tdm->clk_trcm = TRCM_TXRX;
@@ -1555,8 +1563,10 @@ static int rockchip_i2s_tdm_probe(struct platform_device *pdev)
}
i2s_tdm->clk_trcm = TRCM_RX;
}
- if (i2s_tdm->clk_trcm != TRCM_TXRX)
- i2s_tdm_dai.symmetric_rate = 1;
+
+ ret = rockchip_i2s_tdm_init_dai(i2s_tdm);
+ if (ret)
+ return ret;
i2s_tdm->grf = syscon_regmap_lookup_by_phandle(node, "rockchip,grf");
if (IS_ERR(i2s_tdm->grf))
@@ -1678,7 +1688,7 @@ static int rockchip_i2s_tdm_probe(struct platform_device *pdev)
ret = devm_snd_soc_register_component(&pdev->dev,
&rockchip_i2s_tdm_component,
- &i2s_tdm_dai, 1);
+ i2s_tdm->dai, 1);
if (ret) {
dev_err(&pdev->dev, "Could not register DAI\n");
diff --git a/sound/soc/sh/rcar/dma.c b/sound/soc/sh/rcar/dma.c
index 16c6e0265749..03e0d4eca781 100644
--- a/sound/soc/sh/rcar/dma.c
+++ b/sound/soc/sh/rcar/dma.c
@@ -102,7 +102,7 @@ static int rsnd_dmaen_stop(struct rsnd_mod *mod,
struct rsnd_dmaen *dmaen = rsnd_dma_to_dmaen(dma);
if (dmaen->chan)
- dmaengine_terminate_sync(dmaen->chan);
+ dmaengine_terminate_async(dmaen->chan);
return 0;
}
diff --git a/sound/soc/soc-acpi.c b/sound/soc/soc-acpi.c
index 2ae99b49d3f5..cbd7ea48837b 100644
--- a/sound/soc/soc-acpi.c
+++ b/sound/soc/soc-acpi.c
@@ -20,8 +20,10 @@ static bool snd_soc_acpi_id_present(struct snd_soc_acpi_mach *machine)
if (comp_ids) {
for (i = 0; i < comp_ids->num_codecs; i++) {
- if (acpi_dev_present(comp_ids->codecs[i], NULL, -1))
+ if (acpi_dev_present(comp_ids->codecs[i], NULL, -1)) {
+ strscpy(machine->id, comp_ids->codecs[i], ACPI_ID_LEN);
return true;
+ }
}
}
diff --git a/sound/soc/soc-dapm.c b/sound/soc/soc-dapm.c
index 2892b0aba151..b06c5682445c 100644
--- a/sound/soc/soc-dapm.c
+++ b/sound/soc/soc-dapm.c
@@ -2559,8 +2559,13 @@ static struct snd_soc_dapm_widget *dapm_find_widget(
return NULL;
}
-static int snd_soc_dapm_set_pin(struct snd_soc_dapm_context *dapm,
- const char *pin, int status)
+/*
+ * set the DAPM pin status:
+ * returns 1 when the value has been updated, 0 when unchanged, or a negative
+ * error code; called from kcontrol put callback
+ */
+static int __snd_soc_dapm_set_pin(struct snd_soc_dapm_context *dapm,
+ const char *pin, int status)
{
struct snd_soc_dapm_widget *w = dapm_find_widget(dapm, pin, true);
int ret = 0;
@@ -2586,6 +2591,18 @@ static int snd_soc_dapm_set_pin(struct snd_soc_dapm_context *dapm,
return ret;
}
+/*
+ * similar as __snd_soc_dapm_set_pin(), but returns 0 when successful;
+ * called from several API functions below
+ */
+static int snd_soc_dapm_set_pin(struct snd_soc_dapm_context *dapm,
+ const char *pin, int status)
+{
+ int ret = __snd_soc_dapm_set_pin(dapm, pin, status);
+
+ return ret < 0 ? ret : 0;
+}
+
/**
* snd_soc_dapm_sync_unlocked - scan and power dapm paths
* @dapm: DAPM context
@@ -3589,10 +3606,10 @@ int snd_soc_dapm_put_pin_switch(struct snd_kcontrol *kcontrol,
const char *pin = (const char *)kcontrol->private_value;
int ret;
- if (ucontrol->value.integer.value[0])
- ret = snd_soc_dapm_enable_pin(&card->dapm, pin);
- else
- ret = snd_soc_dapm_disable_pin(&card->dapm, pin);
+ mutex_lock_nested(&card->dapm_mutex, SND_SOC_DAPM_CLASS_RUNTIME);
+ ret = __snd_soc_dapm_set_pin(&card->dapm, pin,
+ !!ucontrol->value.integer.value[0]);
+ mutex_unlock(&card->dapm_mutex);
snd_soc_dapm_sync(&card->dapm);
return ret;
diff --git a/sound/soc/soc-topology.c b/sound/soc/soc-topology.c
index 557e22c5254c..f5b9e66ac3b8 100644
--- a/sound/soc/soc-topology.c
+++ b/sound/soc/soc-topology.c
@@ -2700,6 +2700,7 @@ EXPORT_SYMBOL_GPL(snd_soc_tplg_component_load);
/* remove dynamic controls from the component driver */
int snd_soc_tplg_component_remove(struct snd_soc_component *comp)
{
+ struct snd_card *card = comp->card->snd_card;
struct snd_soc_dobj *dobj, *next_dobj;
int pass = SOC_TPLG_PASS_END;
@@ -2707,6 +2708,7 @@ int snd_soc_tplg_component_remove(struct snd_soc_component *comp)
while (pass >= SOC_TPLG_PASS_START) {
/* remove mixer controls */
+ down_write(&card->controls_rwsem);
list_for_each_entry_safe(dobj, next_dobj, &comp->dobj_list,
list) {
@@ -2745,6 +2747,7 @@ int snd_soc_tplg_component_remove(struct snd_soc_component *comp)
break;
}
}
+ up_write(&card->controls_rwsem);
pass--;
}
diff --git a/sound/soc/sof/Kconfig b/sound/soc/sof/Kconfig
index 6bb4db87af03..041c54639c4d 100644
--- a/sound/soc/sof/Kconfig
+++ b/sound/soc/sof/Kconfig
@@ -47,7 +47,7 @@ config SND_SOC_SOF_OF
Say Y if you need this option. If unsure select "N".
config SND_SOC_SOF_COMPRESS
- tristate
+ bool
select SND_SOC_COMPRESS
config SND_SOC_SOF_DEBUG_PROBES
diff --git a/sound/soc/sof/control.c b/sound/soc/sof/control.c
index 58bb89af4de1..bb1dfe4f6d40 100644
--- a/sound/soc/sof/control.c
+++ b/sound/soc/sof/control.c
@@ -69,7 +69,7 @@ static void snd_sof_refresh_control(struct snd_sof_control *scontrol)
{
struct sof_ipc_ctrl_data *cdata = scontrol->control_data;
struct snd_soc_component *scomp = scontrol->scomp;
- enum sof_ipc_ctrl_type ctrl_type;
+ u32 ipc_cmd;
int ret;
if (!scontrol->comp_data_dirty)
@@ -79,9 +79,9 @@ static void snd_sof_refresh_control(struct snd_sof_control *scontrol)
return;
if (scontrol->cmd == SOF_CTRL_CMD_BINARY)
- ctrl_type = SOF_IPC_COMP_GET_DATA;
+ ipc_cmd = SOF_IPC_COMP_GET_DATA;
else
- ctrl_type = SOF_IPC_COMP_GET_VALUE;
+ ipc_cmd = SOF_IPC_COMP_GET_VALUE;
/* set the ABI header values */
cdata->data->magic = SOF_ABI_MAGIC;
@@ -89,7 +89,7 @@ static void snd_sof_refresh_control(struct snd_sof_control *scontrol)
/* refresh the component data from DSP */
scontrol->comp_data_dirty = false;
- ret = snd_sof_ipc_set_get_comp_data(scontrol, ctrl_type,
+ ret = snd_sof_ipc_set_get_comp_data(scontrol, ipc_cmd,
SOF_CTRL_TYPE_VALUE_CHAN_GET,
scontrol->cmd, false);
if (ret < 0) {
diff --git a/sound/soc/sof/intel/hda-bus.c b/sound/soc/sof/intel/hda-bus.c
index 30025d3c16b6..0862ff8b6627 100644
--- a/sound/soc/sof/intel/hda-bus.c
+++ b/sound/soc/sof/intel/hda-bus.c
@@ -10,6 +10,8 @@
#include <linux/io.h>
#include <sound/hdaudio.h>
#include <sound/hda_i915.h>
+#include <sound/hda_codec.h>
+#include <sound/hda_register.h>
#include "../sof-priv.h"
#include "hda.h"
@@ -21,6 +23,18 @@
#endif
#if IS_ENABLED(CONFIG_SND_SOC_SOF_HDA)
+static void update_codec_wake_enable(struct hdac_bus *bus, unsigned int addr, bool link_power)
+{
+ unsigned int mask = snd_hdac_chip_readw(bus, WAKEEN);
+
+ if (link_power)
+ mask &= ~BIT(addr);
+ else
+ mask |= BIT(addr);
+
+ snd_hdac_chip_updatew(bus, WAKEEN, STATESTS_INT_MASK, mask);
+}
+
static void sof_hda_bus_link_power(struct hdac_device *codec, bool enable)
{
struct hdac_bus *bus = codec->bus;
@@ -41,6 +55,9 @@ static void sof_hda_bus_link_power(struct hdac_device *codec, bool enable)
*/
if (codec->addr == HDA_IDISP_ADDR && !enable)
snd_hdac_display_power(bus, HDA_CODEC_IDX_CONTROLLER, false);
+
+ /* WAKEEN needs to be set for disabled links */
+ update_codec_wake_enable(bus, codec->addr, enable);
}
static const struct hdac_bus_ops bus_core_ops = {
diff --git a/sound/soc/sof/intel/hda-codec.c b/sound/soc/sof/intel/hda-codec.c
index 6744318de612..13cd96e6724a 100644
--- a/sound/soc/sof/intel/hda-codec.c
+++ b/sound/soc/sof/intel/hda-codec.c
@@ -22,6 +22,7 @@
#if IS_ENABLED(CONFIG_SND_SOC_SOF_HDA_AUDIO_CODEC)
#define IDISP_VID_INTEL 0x80860000
+#define CODEC_PROBE_RETRIES 3
/* load the legacy HDA codec driver */
static int request_codec_module(struct hda_codec *codec)
@@ -121,12 +122,15 @@ static int hda_codec_probe(struct snd_sof_dev *sdev, int address,
u32 hda_cmd = (address << 28) | (AC_NODE_ROOT << 20) |
(AC_VERB_PARAMETERS << 8) | AC_PAR_VENDOR_ID;
u32 resp = -1;
- int ret;
+ int ret, retry = 0;
+
+ do {
+ mutex_lock(&hbus->core.cmd_mutex);
+ snd_hdac_bus_send_cmd(&hbus->core, hda_cmd);
+ snd_hdac_bus_get_response(&hbus->core, address, &resp);
+ mutex_unlock(&hbus->core.cmd_mutex);
+ } while (resp == -1 && retry++ < CODEC_PROBE_RETRIES);
- mutex_lock(&hbus->core.cmd_mutex);
- snd_hdac_bus_send_cmd(&hbus->core, hda_cmd);
- snd_hdac_bus_get_response(&hbus->core, address, &resp);
- mutex_unlock(&hbus->core.cmd_mutex);
if (resp == -1)
return -EIO;
dev_dbg(sdev->dev, "HDA codec #%d probed OK: response: %x\n",
diff --git a/sound/soc/sof/intel/hda-dsp.c b/sound/soc/sof/intel/hda-dsp.c
index 058baca2cd0e..287dc0eb6686 100644
--- a/sound/soc/sof/intel/hda-dsp.c
+++ b/sound/soc/sof/intel/hda-dsp.c
@@ -622,8 +622,7 @@ static int hda_suspend(struct snd_sof_dev *sdev, bool runtime_suspend)
hda_dsp_ipc_int_disable(sdev);
#if IS_ENABLED(CONFIG_SND_SOC_SOF_HDA)
- if (runtime_suspend)
- hda_codec_jack_wake_enable(sdev, true);
+ hda_codec_jack_wake_enable(sdev, runtime_suspend);
/* power down all hda link */
snd_hdac_ext_bus_link_power_down_all(bus);
diff --git a/sound/soc/sof/intel/hda.c b/sound/soc/sof/intel/hda.c
index 883d78dd01b5..2c0d4d06ab36 100644
--- a/sound/soc/sof/intel/hda.c
+++ b/sound/soc/sof/intel/hda.c
@@ -58,6 +58,13 @@ int hda_ctrl_dai_widget_setup(struct snd_soc_dapm_widget *w)
return -EINVAL;
}
+ /* DAI already configured, reset it before reconfiguring it */
+ if (sof_dai->configured) {
+ ret = hda_ctrl_dai_widget_free(w);
+ if (ret < 0)
+ return ret;
+ }
+
config = &sof_dai->dai_config[sof_dai->current_config];
/*
@@ -810,6 +817,20 @@ skip_soundwire:
return 0;
}
+static void hda_check_for_state_change(struct snd_sof_dev *sdev)
+{
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_HDA)
+ struct hdac_bus *bus = sof_to_bus(sdev);
+ unsigned int codec_mask;
+
+ codec_mask = snd_hdac_chip_readw(bus, STATESTS);
+ if (codec_mask) {
+ hda_codec_jack_check(sdev);
+ snd_hdac_chip_writew(bus, STATESTS, codec_mask);
+ }
+#endif
+}
+
static irqreturn_t hda_dsp_interrupt_handler(int irq, void *context)
{
struct snd_sof_dev *sdev = context;
@@ -851,6 +872,8 @@ static irqreturn_t hda_dsp_interrupt_thread(int irq, void *context)
if (hda_sdw_check_wakeen_irq(sdev))
hda_sdw_process_wakeen(sdev);
+ hda_check_for_state_change(sdev);
+
/* enable GIE interrupt */
snd_sof_dsp_update_bits(sdev, HDA_DSP_HDA_BAR,
SOF_HDA_INTCTL,
diff --git a/sound/soc/sof/intel/pci-tgl.c b/sound/soc/sof/intel/pci-tgl.c
index f2ea34df9741..fd46210f1730 100644
--- a/sound/soc/sof/intel/pci-tgl.c
+++ b/sound/soc/sof/intel/pci-tgl.c
@@ -112,8 +112,12 @@ static const struct pci_device_id sof_pci_ids[] = {
.driver_data = (unsigned long)&adls_desc},
{ PCI_DEVICE(0x8086, 0x51c8), /* ADL-P */
.driver_data = (unsigned long)&adl_desc},
+ { PCI_DEVICE(0x8086, 0x51cd), /* ADL-P */
+ .driver_data = (unsigned long)&adl_desc},
{ PCI_DEVICE(0x8086, 0x51cc), /* ADL-M */
.driver_data = (unsigned long)&adl_desc},
+ { PCI_DEVICE(0x8086, 0x54c8), /* ADL-N */
+ .driver_data = (unsigned long)&adl_desc},
{ 0, }
};
MODULE_DEVICE_TABLE(pci, sof_pci_ids);
diff --git a/sound/soc/stm/stm32_i2s.c b/sound/soc/stm/stm32_i2s.c
index 6254bacad6eb..717f45a83445 100644
--- a/sound/soc/stm/stm32_i2s.c
+++ b/sound/soc/stm/stm32_i2s.c
@@ -700,7 +700,7 @@ static int stm32_i2s_configure_clock(struct snd_soc_dai *cpu_dai,
if (ret < 0)
return ret;
- nb_bits = frame_len * ((cgfr & I2S_CGFR_CHLEN) + 1);
+ nb_bits = frame_len * (FIELD_GET(I2S_CGFR_CHLEN, cgfr) + 1);
ret = stm32_i2s_calc_clk_div(i2s, i2s_clock_rate,
(nb_bits * rate));
if (ret)
diff --git a/sound/soc/tegra/tegra186_dspk.c b/sound/soc/tegra/tegra186_dspk.c
index 8ee9a77bd83d..a74c980ee775 100644
--- a/sound/soc/tegra/tegra186_dspk.c
+++ b/sound/soc/tegra/tegra186_dspk.c
@@ -26,51 +26,162 @@ static const struct reg_default tegra186_dspk_reg_defaults[] = {
{ TEGRA186_DSPK_CODEC_CTRL, 0x03000000 },
};
-static int tegra186_dspk_get_control(struct snd_kcontrol *kcontrol,
+static int tegra186_dspk_get_fifo_th(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct snd_soc_component *codec = snd_soc_kcontrol_component(kcontrol);
struct tegra186_dspk *dspk = snd_soc_component_get_drvdata(codec);
- if (strstr(kcontrol->id.name, "FIFO Threshold"))
- ucontrol->value.integer.value[0] = dspk->rx_fifo_th;
- else if (strstr(kcontrol->id.name, "OSR Value"))
- ucontrol->value.integer.value[0] = dspk->osr_val;
- else if (strstr(kcontrol->id.name, "LR Polarity Select"))
- ucontrol->value.integer.value[0] = dspk->lrsel;
- else if (strstr(kcontrol->id.name, "Channel Select"))
- ucontrol->value.integer.value[0] = dspk->ch_sel;
- else if (strstr(kcontrol->id.name, "Mono To Stereo"))
- ucontrol->value.integer.value[0] = dspk->mono_to_stereo;
- else if (strstr(kcontrol->id.name, "Stereo To Mono"))
- ucontrol->value.integer.value[0] = dspk->stereo_to_mono;
+ ucontrol->value.integer.value[0] = dspk->rx_fifo_th;
return 0;
}
-static int tegra186_dspk_put_control(struct snd_kcontrol *kcontrol,
+static int tegra186_dspk_put_fifo_th(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct snd_soc_component *codec = snd_soc_kcontrol_component(kcontrol);
struct tegra186_dspk *dspk = snd_soc_component_get_drvdata(codec);
- int val = ucontrol->value.integer.value[0];
-
- if (strstr(kcontrol->id.name, "FIFO Threshold"))
- dspk->rx_fifo_th = val;
- else if (strstr(kcontrol->id.name, "OSR Value"))
- dspk->osr_val = val;
- else if (strstr(kcontrol->id.name, "LR Polarity Select"))
- dspk->lrsel = val;
- else if (strstr(kcontrol->id.name, "Channel Select"))
- dspk->ch_sel = val;
- else if (strstr(kcontrol->id.name, "Mono To Stereo"))
- dspk->mono_to_stereo = val;
- else if (strstr(kcontrol->id.name, "Stereo To Mono"))
- dspk->stereo_to_mono = val;
+ int value = ucontrol->value.integer.value[0];
+
+ if (value == dspk->rx_fifo_th)
+ return 0;
+
+ dspk->rx_fifo_th = value;
+
+ return 1;
+}
+
+static int tegra186_dspk_get_osr_val(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_component *codec = snd_soc_kcontrol_component(kcontrol);
+ struct tegra186_dspk *dspk = snd_soc_component_get_drvdata(codec);
+
+ ucontrol->value.enumerated.item[0] = dspk->osr_val;
return 0;
}
+static int tegra186_dspk_put_osr_val(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_component *codec = snd_soc_kcontrol_component(kcontrol);
+ struct tegra186_dspk *dspk = snd_soc_component_get_drvdata(codec);
+ unsigned int value = ucontrol->value.enumerated.item[0];
+
+ if (value == dspk->osr_val)
+ return 0;
+
+ dspk->osr_val = value;
+
+ return 1;
+}
+
+static int tegra186_dspk_get_pol_sel(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_component *codec = snd_soc_kcontrol_component(kcontrol);
+ struct tegra186_dspk *dspk = snd_soc_component_get_drvdata(codec);
+
+ ucontrol->value.enumerated.item[0] = dspk->lrsel;
+
+ return 0;
+}
+
+static int tegra186_dspk_put_pol_sel(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_component *codec = snd_soc_kcontrol_component(kcontrol);
+ struct tegra186_dspk *dspk = snd_soc_component_get_drvdata(codec);
+ unsigned int value = ucontrol->value.enumerated.item[0];
+
+ if (value == dspk->lrsel)
+ return 0;
+
+ dspk->lrsel = value;
+
+ return 1;
+}
+
+static int tegra186_dspk_get_ch_sel(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_component *codec = snd_soc_kcontrol_component(kcontrol);
+ struct tegra186_dspk *dspk = snd_soc_component_get_drvdata(codec);
+
+ ucontrol->value.enumerated.item[0] = dspk->ch_sel;
+
+ return 0;
+}
+
+static int tegra186_dspk_put_ch_sel(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_component *codec = snd_soc_kcontrol_component(kcontrol);
+ struct tegra186_dspk *dspk = snd_soc_component_get_drvdata(codec);
+ unsigned int value = ucontrol->value.enumerated.item[0];
+
+ if (value == dspk->ch_sel)
+ return 0;
+
+ dspk->ch_sel = value;
+
+ return 1;
+}
+
+static int tegra186_dspk_get_mono_to_stereo(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_component *codec = snd_soc_kcontrol_component(kcontrol);
+ struct tegra186_dspk *dspk = snd_soc_component_get_drvdata(codec);
+
+ ucontrol->value.enumerated.item[0] = dspk->mono_to_stereo;
+
+ return 0;
+}
+
+static int tegra186_dspk_put_mono_to_stereo(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_component *codec = snd_soc_kcontrol_component(kcontrol);
+ struct tegra186_dspk *dspk = snd_soc_component_get_drvdata(codec);
+ unsigned int value = ucontrol->value.enumerated.item[0];
+
+ if (value == dspk->mono_to_stereo)
+ return 0;
+
+ dspk->mono_to_stereo = value;
+
+ return 1;
+}
+
+static int tegra186_dspk_get_stereo_to_mono(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_component *codec = snd_soc_kcontrol_component(kcontrol);
+ struct tegra186_dspk *dspk = snd_soc_component_get_drvdata(codec);
+
+ ucontrol->value.enumerated.item[0] = dspk->stereo_to_mono;
+
+ return 0;
+}
+
+static int tegra186_dspk_put_stereo_to_mono(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_component *codec = snd_soc_kcontrol_component(kcontrol);
+ struct tegra186_dspk *dspk = snd_soc_component_get_drvdata(codec);
+ unsigned int value = ucontrol->value.enumerated.item[0];
+
+ if (value == dspk->stereo_to_mono)
+ return 0;
+
+ dspk->stereo_to_mono = value;
+
+ return 1;
+}
+
static int __maybe_unused tegra186_dspk_runtime_suspend(struct device *dev)
{
struct tegra186_dspk *dspk = dev_get_drvdata(dev);
@@ -279,17 +390,19 @@ static const struct soc_enum tegra186_dspk_lrsel_enum =
static const struct snd_kcontrol_new tegrat186_dspk_controls[] = {
SOC_SINGLE_EXT("FIFO Threshold", SND_SOC_NOPM, 0,
TEGRA186_DSPK_RX_FIFO_DEPTH - 1, 0,
- tegra186_dspk_get_control, tegra186_dspk_put_control),
+ tegra186_dspk_get_fifo_th, tegra186_dspk_put_fifo_th),
SOC_ENUM_EXT("OSR Value", tegra186_dspk_osr_enum,
- tegra186_dspk_get_control, tegra186_dspk_put_control),
+ tegra186_dspk_get_osr_val, tegra186_dspk_put_osr_val),
SOC_ENUM_EXT("LR Polarity Select", tegra186_dspk_lrsel_enum,
- tegra186_dspk_get_control, tegra186_dspk_put_control),
+ tegra186_dspk_get_pol_sel, tegra186_dspk_put_pol_sel),
SOC_ENUM_EXT("Channel Select", tegra186_dspk_ch_sel_enum,
- tegra186_dspk_get_control, tegra186_dspk_put_control),
+ tegra186_dspk_get_ch_sel, tegra186_dspk_put_ch_sel),
SOC_ENUM_EXT("Mono To Stereo", tegra186_dspk_mono_conv_enum,
- tegra186_dspk_get_control, tegra186_dspk_put_control),
+ tegra186_dspk_get_mono_to_stereo,
+ tegra186_dspk_put_mono_to_stereo),
SOC_ENUM_EXT("Stereo To Mono", tegra186_dspk_stereo_conv_enum,
- tegra186_dspk_get_control, tegra186_dspk_put_control),
+ tegra186_dspk_get_stereo_to_mono,
+ tegra186_dspk_put_stereo_to_mono),
};
static const struct snd_soc_component_driver tegra186_dspk_cmpnt = {
diff --git a/sound/soc/tegra/tegra210_admaif.c b/sound/soc/tegra/tegra210_admaif.c
index bcccdf3ddc52..1a2e868a6220 100644
--- a/sound/soc/tegra/tegra210_admaif.c
+++ b/sound/soc/tegra/tegra210_admaif.c
@@ -424,46 +424,122 @@ static const struct snd_soc_dai_ops tegra_admaif_dai_ops = {
.trigger = tegra_admaif_trigger,
};
-static int tegra_admaif_get_control(struct snd_kcontrol *kcontrol,
- struct snd_ctl_elem_value *ucontrol)
+static int tegra210_admaif_pget_mono_to_stereo(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
{
struct snd_soc_component *cmpnt = snd_soc_kcontrol_component(kcontrol);
+ struct tegra_admaif *admaif = snd_soc_component_get_drvdata(cmpnt);
+ struct soc_enum *ec = (struct soc_enum *)kcontrol->private_value;
+
+ ucontrol->value.enumerated.item[0] =
+ admaif->mono_to_stereo[ADMAIF_TX_PATH][ec->reg];
+
+ return 0;
+}
+
+static int tegra210_admaif_pput_mono_to_stereo(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_component *cmpnt = snd_soc_kcontrol_component(kcontrol);
+ struct tegra_admaif *admaif = snd_soc_component_get_drvdata(cmpnt);
+ struct soc_enum *ec = (struct soc_enum *)kcontrol->private_value;
+ unsigned int value = ucontrol->value.enumerated.item[0];
+
+ if (value == admaif->mono_to_stereo[ADMAIF_TX_PATH][ec->reg])
+ return 0;
+
+ admaif->mono_to_stereo[ADMAIF_TX_PATH][ec->reg] = value;
+
+ return 1;
+}
+
+static int tegra210_admaif_cget_mono_to_stereo(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_component *cmpnt = snd_soc_kcontrol_component(kcontrol);
+ struct tegra_admaif *admaif = snd_soc_component_get_drvdata(cmpnt);
+ struct soc_enum *ec = (struct soc_enum *)kcontrol->private_value;
+
+ ucontrol->value.enumerated.item[0] =
+ admaif->mono_to_stereo[ADMAIF_RX_PATH][ec->reg];
+
+ return 0;
+}
+
+static int tegra210_admaif_cput_mono_to_stereo(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_component *cmpnt = snd_soc_kcontrol_component(kcontrol);
+ struct tegra_admaif *admaif = snd_soc_component_get_drvdata(cmpnt);
struct soc_enum *ec = (struct soc_enum *)kcontrol->private_value;
+ unsigned int value = ucontrol->value.enumerated.item[0];
+
+ if (value == admaif->mono_to_stereo[ADMAIF_RX_PATH][ec->reg])
+ return 0;
+
+ admaif->mono_to_stereo[ADMAIF_RX_PATH][ec->reg] = value;
+
+ return 1;
+}
+
+static int tegra210_admaif_pget_stereo_to_mono(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_component *cmpnt = snd_soc_kcontrol_component(kcontrol);
struct tegra_admaif *admaif = snd_soc_component_get_drvdata(cmpnt);
- long *uctl_val = &ucontrol->value.integer.value[0];
+ struct soc_enum *ec = (struct soc_enum *)kcontrol->private_value;
- if (strstr(kcontrol->id.name, "Playback Mono To Stereo"))
- *uctl_val = admaif->mono_to_stereo[ADMAIF_TX_PATH][ec->reg];
- else if (strstr(kcontrol->id.name, "Capture Mono To Stereo"))
- *uctl_val = admaif->mono_to_stereo[ADMAIF_RX_PATH][ec->reg];
- else if (strstr(kcontrol->id.name, "Playback Stereo To Mono"))
- *uctl_val = admaif->stereo_to_mono[ADMAIF_TX_PATH][ec->reg];
- else if (strstr(kcontrol->id.name, "Capture Stereo To Mono"))
- *uctl_val = admaif->stereo_to_mono[ADMAIF_RX_PATH][ec->reg];
+ ucontrol->value.enumerated.item[0] =
+ admaif->stereo_to_mono[ADMAIF_TX_PATH][ec->reg];
return 0;
}
-static int tegra_admaif_put_control(struct snd_kcontrol *kcontrol,
- struct snd_ctl_elem_value *ucontrol)
+static int tegra210_admaif_pput_stereo_to_mono(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
{
struct snd_soc_component *cmpnt = snd_soc_kcontrol_component(kcontrol);
+ struct tegra_admaif *admaif = snd_soc_component_get_drvdata(cmpnt);
struct soc_enum *ec = (struct soc_enum *)kcontrol->private_value;
+ unsigned int value = ucontrol->value.enumerated.item[0];
+
+ if (value == admaif->stereo_to_mono[ADMAIF_TX_PATH][ec->reg])
+ return 0;
+
+ admaif->stereo_to_mono[ADMAIF_TX_PATH][ec->reg] = value;
+
+ return 1;
+}
+
+static int tegra210_admaif_cget_stereo_to_mono(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_component *cmpnt = snd_soc_kcontrol_component(kcontrol);
struct tegra_admaif *admaif = snd_soc_component_get_drvdata(cmpnt);
- int value = ucontrol->value.integer.value[0];
+ struct soc_enum *ec = (struct soc_enum *)kcontrol->private_value;
- if (strstr(kcontrol->id.name, "Playback Mono To Stereo"))
- admaif->mono_to_stereo[ADMAIF_TX_PATH][ec->reg] = value;
- else if (strstr(kcontrol->id.name, "Capture Mono To Stereo"))
- admaif->mono_to_stereo[ADMAIF_RX_PATH][ec->reg] = value;
- else if (strstr(kcontrol->id.name, "Playback Stereo To Mono"))
- admaif->stereo_to_mono[ADMAIF_TX_PATH][ec->reg] = value;
- else if (strstr(kcontrol->id.name, "Capture Stereo To Mono"))
- admaif->stereo_to_mono[ADMAIF_RX_PATH][ec->reg] = value;
+ ucontrol->value.enumerated.item[0] =
+ admaif->stereo_to_mono[ADMAIF_RX_PATH][ec->reg];
return 0;
}
+static int tegra210_admaif_cput_stereo_to_mono(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_component *cmpnt = snd_soc_kcontrol_component(kcontrol);
+ struct tegra_admaif *admaif = snd_soc_component_get_drvdata(cmpnt);
+ struct soc_enum *ec = (struct soc_enum *)kcontrol->private_value;
+ unsigned int value = ucontrol->value.enumerated.item[0];
+
+ if (value == admaif->stereo_to_mono[ADMAIF_RX_PATH][ec->reg])
+ return 0;
+
+ admaif->stereo_to_mono[ADMAIF_RX_PATH][ec->reg] = value;
+
+ return 1;
+}
+
static int tegra_admaif_dai_probe(struct snd_soc_dai *dai)
{
struct tegra_admaif *admaif = snd_soc_dai_get_drvdata(dai);
@@ -559,17 +635,21 @@ static const char * const tegra_admaif_mono_conv_text[] = {
}
#define TEGRA_ADMAIF_CIF_CTRL(reg) \
- NV_SOC_ENUM_EXT("ADMAIF" #reg " Playback Mono To Stereo", reg - 1,\
- tegra_admaif_get_control, tegra_admaif_put_control, \
+ NV_SOC_ENUM_EXT("ADMAIF" #reg " Playback Mono To Stereo", reg - 1, \
+ tegra210_admaif_pget_mono_to_stereo, \
+ tegra210_admaif_pput_mono_to_stereo, \
tegra_admaif_mono_conv_text), \
- NV_SOC_ENUM_EXT("ADMAIF" #reg " Playback Stereo To Mono", reg - 1,\
- tegra_admaif_get_control, tegra_admaif_put_control, \
+ NV_SOC_ENUM_EXT("ADMAIF" #reg " Playback Stereo To Mono", reg - 1, \
+ tegra210_admaif_pget_stereo_to_mono, \
+ tegra210_admaif_pput_stereo_to_mono, \
tegra_admaif_stereo_conv_text), \
- NV_SOC_ENUM_EXT("ADMAIF" #reg " Capture Mono To Stereo", reg - 1, \
- tegra_admaif_get_control, tegra_admaif_put_control, \
+ NV_SOC_ENUM_EXT("ADMAIF" #reg " Capture Mono To Stereo", reg - 1, \
+ tegra210_admaif_cget_mono_to_stereo, \
+ tegra210_admaif_cput_mono_to_stereo, \
tegra_admaif_mono_conv_text), \
- NV_SOC_ENUM_EXT("ADMAIF" #reg " Capture Stereo To Mono", reg - 1, \
- tegra_admaif_get_control, tegra_admaif_put_control, \
+ NV_SOC_ENUM_EXT("ADMAIF" #reg " Capture Stereo To Mono", reg - 1, \
+ tegra210_admaif_cget_stereo_to_mono, \
+ tegra210_admaif_cput_stereo_to_mono, \
tegra_admaif_stereo_conv_text)
static struct snd_kcontrol_new tegra210_admaif_controls[] = {
diff --git a/sound/soc/tegra/tegra210_adx.c b/sound/soc/tegra/tegra210_adx.c
index d7c7849c2f92..3785cade2d9a 100644
--- a/sound/soc/tegra/tegra210_adx.c
+++ b/sound/soc/tegra/tegra210_adx.c
@@ -193,6 +193,9 @@ static int tegra210_adx_put_byte_map(struct snd_kcontrol *kcontrol,
struct soc_mixer_control *mc =
(struct soc_mixer_control *)kcontrol->private_value;;
+ if (value == bytes_map[mc->reg])
+ return 0;
+
if (value >= 0 && value <= 255) {
/* update byte map and enable slot */
bytes_map[mc->reg] = value;
@@ -511,8 +514,8 @@ static int tegra210_adx_platform_remove(struct platform_device *pdev)
static const struct dev_pm_ops tegra210_adx_pm_ops = {
SET_RUNTIME_PM_OPS(tegra210_adx_runtime_suspend,
tegra210_adx_runtime_resume, NULL)
- SET_LATE_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
- pm_runtime_force_resume)
+ SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+ pm_runtime_force_resume)
};
static struct platform_driver tegra210_adx_driver = {
diff --git a/sound/soc/tegra/tegra210_ahub.c b/sound/soc/tegra/tegra210_ahub.c
index a1989eae2b52..388b815443c7 100644
--- a/sound/soc/tegra/tegra210_ahub.c
+++ b/sound/soc/tegra/tegra210_ahub.c
@@ -62,6 +62,7 @@ static int tegra_ahub_put_value_enum(struct snd_kcontrol *kctl,
unsigned int *item = uctl->value.enumerated.item;
unsigned int value = e->values[item[0]];
unsigned int i, bit_pos, reg_idx = 0, reg_val = 0;
+ int change = 0;
if (item[0] >= e->items)
return -EINVAL;
@@ -86,12 +87,14 @@ static int tegra_ahub_put_value_enum(struct snd_kcontrol *kctl,
/* Update widget power if state has changed */
if (snd_soc_component_test_bits(cmpnt, update[i].reg,
- update[i].mask, update[i].val))
- snd_soc_dapm_mux_update_power(dapm, kctl, item[0], e,
- &update[i]);
+ update[i].mask,
+ update[i].val))
+ change |= snd_soc_dapm_mux_update_power(dapm, kctl,
+ item[0], e,
+ &update[i]);
}
- return 0;
+ return change;
}
static struct snd_soc_dai_driver tegra210_ahub_dais[] = {
diff --git a/sound/soc/tegra/tegra210_amx.c b/sound/soc/tegra/tegra210_amx.c
index af9bddfc3120..d064cc67fea6 100644
--- a/sound/soc/tegra/tegra210_amx.c
+++ b/sound/soc/tegra/tegra210_amx.c
@@ -222,6 +222,9 @@ static int tegra210_amx_put_byte_map(struct snd_kcontrol *kcontrol,
int reg = mc->reg;
int value = ucontrol->value.integer.value[0];
+ if (value == bytes_map[reg])
+ return 0;
+
if (value >= 0 && value <= 255) {
/* Update byte map and enable slot */
bytes_map[reg] = value;
@@ -580,8 +583,8 @@ static int tegra210_amx_platform_remove(struct platform_device *pdev)
static const struct dev_pm_ops tegra210_amx_pm_ops = {
SET_RUNTIME_PM_OPS(tegra210_amx_runtime_suspend,
tegra210_amx_runtime_resume, NULL)
- SET_LATE_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
- pm_runtime_force_resume)
+ SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+ pm_runtime_force_resume)
};
static struct platform_driver tegra210_amx_driver = {
diff --git a/sound/soc/tegra/tegra210_dmic.c b/sound/soc/tegra/tegra210_dmic.c
index b096478cd2ef..db95794530f4 100644
--- a/sound/soc/tegra/tegra210_dmic.c
+++ b/sound/soc/tegra/tegra210_dmic.c
@@ -156,51 +156,162 @@ static int tegra210_dmic_hw_params(struct snd_pcm_substream *substream,
return 0;
}
-static int tegra210_dmic_get_control(struct snd_kcontrol *kcontrol,
+static int tegra210_dmic_get_boost_gain(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_component *comp = snd_soc_kcontrol_component(kcontrol);
+ struct tegra210_dmic *dmic = snd_soc_component_get_drvdata(comp);
+
+ ucontrol->value.integer.value[0] = dmic->boost_gain;
+
+ return 0;
+}
+
+static int tegra210_dmic_put_boost_gain(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_component *comp = snd_soc_kcontrol_component(kcontrol);
+ struct tegra210_dmic *dmic = snd_soc_component_get_drvdata(comp);
+ int value = ucontrol->value.integer.value[0];
+
+ if (value == dmic->boost_gain)
+ return 0;
+
+ dmic->boost_gain = value;
+
+ return 1;
+}
+
+static int tegra210_dmic_get_ch_select(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_component *comp = snd_soc_kcontrol_component(kcontrol);
+ struct tegra210_dmic *dmic = snd_soc_component_get_drvdata(comp);
+
+ ucontrol->value.enumerated.item[0] = dmic->ch_select;
+
+ return 0;
+}
+
+static int tegra210_dmic_put_ch_select(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_component *comp = snd_soc_kcontrol_component(kcontrol);
+ struct tegra210_dmic *dmic = snd_soc_component_get_drvdata(comp);
+ unsigned int value = ucontrol->value.enumerated.item[0];
+
+ if (value == dmic->ch_select)
+ return 0;
+
+ dmic->ch_select = value;
+
+ return 1;
+}
+
+static int tegra210_dmic_get_mono_to_stereo(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_component *comp = snd_soc_kcontrol_component(kcontrol);
+ struct tegra210_dmic *dmic = snd_soc_component_get_drvdata(comp);
+
+ ucontrol->value.enumerated.item[0] = dmic->mono_to_stereo;
+
+ return 0;
+}
+
+static int tegra210_dmic_put_mono_to_stereo(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_component *comp = snd_soc_kcontrol_component(kcontrol);
+ struct tegra210_dmic *dmic = snd_soc_component_get_drvdata(comp);
+ unsigned int value = ucontrol->value.enumerated.item[0];
+
+ if (value == dmic->mono_to_stereo)
+ return 0;
+
+ dmic->mono_to_stereo = value;
+
+ return 1;
+}
+
+static int tegra210_dmic_get_stereo_to_mono(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_component *comp = snd_soc_kcontrol_component(kcontrol);
+ struct tegra210_dmic *dmic = snd_soc_component_get_drvdata(comp);
+
+ ucontrol->value.enumerated.item[0] = dmic->stereo_to_mono;
+
+ return 0;
+}
+
+static int tegra210_dmic_put_stereo_to_mono(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_component *comp = snd_soc_kcontrol_component(kcontrol);
+ struct tegra210_dmic *dmic = snd_soc_component_get_drvdata(comp);
+ unsigned int value = ucontrol->value.enumerated.item[0];
+
+ if (value == dmic->stereo_to_mono)
+ return 0;
+
+ dmic->stereo_to_mono = value;
+
+ return 1;
+}
+
+static int tegra210_dmic_get_osr_val(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct snd_soc_component *comp = snd_soc_kcontrol_component(kcontrol);
struct tegra210_dmic *dmic = snd_soc_component_get_drvdata(comp);
- if (strstr(kcontrol->id.name, "Boost Gain Volume"))
- ucontrol->value.integer.value[0] = dmic->boost_gain;
- else if (strstr(kcontrol->id.name, "Channel Select"))
- ucontrol->value.integer.value[0] = dmic->ch_select;
- else if (strstr(kcontrol->id.name, "Mono To Stereo"))
- ucontrol->value.integer.value[0] = dmic->mono_to_stereo;
- else if (strstr(kcontrol->id.name, "Stereo To Mono"))
- ucontrol->value.integer.value[0] = dmic->stereo_to_mono;
- else if (strstr(kcontrol->id.name, "OSR Value"))
- ucontrol->value.integer.value[0] = dmic->osr_val;
- else if (strstr(kcontrol->id.name, "LR Polarity Select"))
- ucontrol->value.integer.value[0] = dmic->lrsel;
+ ucontrol->value.enumerated.item[0] = dmic->osr_val;
return 0;
}
-static int tegra210_dmic_put_control(struct snd_kcontrol *kcontrol,
+static int tegra210_dmic_put_osr_val(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct snd_soc_component *comp = snd_soc_kcontrol_component(kcontrol);
struct tegra210_dmic *dmic = snd_soc_component_get_drvdata(comp);
- int value = ucontrol->value.integer.value[0];
+ unsigned int value = ucontrol->value.enumerated.item[0];
- if (strstr(kcontrol->id.name, "Boost Gain Volume"))
- dmic->boost_gain = value;
- else if (strstr(kcontrol->id.name, "Channel Select"))
- dmic->ch_select = ucontrol->value.integer.value[0];
- else if (strstr(kcontrol->id.name, "Mono To Stereo"))
- dmic->mono_to_stereo = value;
- else if (strstr(kcontrol->id.name, "Stereo To Mono"))
- dmic->stereo_to_mono = value;
- else if (strstr(kcontrol->id.name, "OSR Value"))
- dmic->osr_val = value;
- else if (strstr(kcontrol->id.name, "LR Polarity Select"))
- dmic->lrsel = value;
+ if (value == dmic->osr_val)
+ return 0;
+
+ dmic->osr_val = value;
+
+ return 1;
+}
+
+static int tegra210_dmic_get_pol_sel(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_component *comp = snd_soc_kcontrol_component(kcontrol);
+ struct tegra210_dmic *dmic = snd_soc_component_get_drvdata(comp);
+
+ ucontrol->value.enumerated.item[0] = dmic->lrsel;
return 0;
}
+static int tegra210_dmic_put_pol_sel(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_component *comp = snd_soc_kcontrol_component(kcontrol);
+ struct tegra210_dmic *dmic = snd_soc_component_get_drvdata(comp);
+ unsigned int value = ucontrol->value.enumerated.item[0];
+
+ if (value == dmic->lrsel)
+ return 0;
+
+ dmic->lrsel = value;
+
+ return 1;
+}
+
static const struct snd_soc_dai_ops tegra210_dmic_dai_ops = {
.hw_params = tegra210_dmic_hw_params,
};
@@ -287,19 +398,22 @@ static const struct soc_enum tegra210_dmic_lrsel_enum =
static const struct snd_kcontrol_new tegra210_dmic_controls[] = {
SOC_SINGLE_EXT("Boost Gain Volume", 0, 0, MAX_BOOST_GAIN, 0,
- tegra210_dmic_get_control, tegra210_dmic_put_control),
+ tegra210_dmic_get_boost_gain,
+ tegra210_dmic_put_boost_gain),
SOC_ENUM_EXT("Channel Select", tegra210_dmic_ch_enum,
- tegra210_dmic_get_control, tegra210_dmic_put_control),
+ tegra210_dmic_get_ch_select, tegra210_dmic_put_ch_select),
SOC_ENUM_EXT("Mono To Stereo",
- tegra210_dmic_mono_conv_enum, tegra210_dmic_get_control,
- tegra210_dmic_put_control),
+ tegra210_dmic_mono_conv_enum,
+ tegra210_dmic_get_mono_to_stereo,
+ tegra210_dmic_put_mono_to_stereo),
SOC_ENUM_EXT("Stereo To Mono",
- tegra210_dmic_stereo_conv_enum, tegra210_dmic_get_control,
- tegra210_dmic_put_control),
+ tegra210_dmic_stereo_conv_enum,
+ tegra210_dmic_get_stereo_to_mono,
+ tegra210_dmic_put_stereo_to_mono),
SOC_ENUM_EXT("OSR Value", tegra210_dmic_osr_enum,
- tegra210_dmic_get_control, tegra210_dmic_put_control),
+ tegra210_dmic_get_osr_val, tegra210_dmic_put_osr_val),
SOC_ENUM_EXT("LR Polarity Select", tegra210_dmic_lrsel_enum,
- tegra210_dmic_get_control, tegra210_dmic_put_control),
+ tegra210_dmic_get_pol_sel, tegra210_dmic_put_pol_sel),
};
static const struct snd_soc_component_driver tegra210_dmic_compnt = {
diff --git a/sound/soc/tegra/tegra210_i2s.c b/sound/soc/tegra/tegra210_i2s.c
index 45f31ccb49d8..9552bbb939dd 100644
--- a/sound/soc/tegra/tegra210_i2s.c
+++ b/sound/soc/tegra/tegra210_i2s.c
@@ -302,85 +302,235 @@ static int tegra210_i2s_set_tdm_slot(struct snd_soc_dai *dai,
return 0;
}
-static int tegra210_i2s_set_dai_bclk_ratio(struct snd_soc_dai *dai,
- unsigned int ratio)
+static int tegra210_i2s_get_loopback(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
{
- struct tegra210_i2s *i2s = snd_soc_dai_get_drvdata(dai);
+ struct snd_soc_component *compnt = snd_soc_kcontrol_component(kcontrol);
+ struct tegra210_i2s *i2s = snd_soc_component_get_drvdata(compnt);
- i2s->bclk_ratio = ratio;
+ ucontrol->value.integer.value[0] = i2s->loopback;
return 0;
}
-static int tegra210_i2s_get_control(struct snd_kcontrol *kcontrol,
- struct snd_ctl_elem_value *ucontrol)
+static int tegra210_i2s_put_loopback(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_component *compnt = snd_soc_kcontrol_component(kcontrol);
+ struct tegra210_i2s *i2s = snd_soc_component_get_drvdata(compnt);
+ int value = ucontrol->value.integer.value[0];
+
+ if (value == i2s->loopback)
+ return 0;
+
+ i2s->loopback = value;
+
+ regmap_update_bits(i2s->regmap, TEGRA210_I2S_CTRL, I2S_CTRL_LPBK_MASK,
+ i2s->loopback << I2S_CTRL_LPBK_SHIFT);
+
+ return 1;
+}
+
+static int tegra210_i2s_get_fsync_width(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
{
struct snd_soc_component *compnt = snd_soc_kcontrol_component(kcontrol);
struct tegra210_i2s *i2s = snd_soc_component_get_drvdata(compnt);
- long *uctl_val = &ucontrol->value.integer.value[0];
-
- if (strstr(kcontrol->id.name, "Loopback"))
- *uctl_val = i2s->loopback;
- else if (strstr(kcontrol->id.name, "FSYNC Width"))
- *uctl_val = i2s->fsync_width;
- else if (strstr(kcontrol->id.name, "Capture Stereo To Mono"))
- *uctl_val = i2s->stereo_to_mono[I2S_TX_PATH];
- else if (strstr(kcontrol->id.name, "Capture Mono To Stereo"))
- *uctl_val = i2s->mono_to_stereo[I2S_TX_PATH];
- else if (strstr(kcontrol->id.name, "Playback Stereo To Mono"))
- *uctl_val = i2s->stereo_to_mono[I2S_RX_PATH];
- else if (strstr(kcontrol->id.name, "Playback Mono To Stereo"))
- *uctl_val = i2s->mono_to_stereo[I2S_RX_PATH];
- else if (strstr(kcontrol->id.name, "Playback FIFO Threshold"))
- *uctl_val = i2s->rx_fifo_th;
- else if (strstr(kcontrol->id.name, "BCLK Ratio"))
- *uctl_val = i2s->bclk_ratio;
+
+ ucontrol->value.integer.value[0] = i2s->fsync_width;
return 0;
}
-static int tegra210_i2s_put_control(struct snd_kcontrol *kcontrol,
- struct snd_ctl_elem_value *ucontrol)
+static int tegra210_i2s_put_fsync_width(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
{
struct snd_soc_component *compnt = snd_soc_kcontrol_component(kcontrol);
struct tegra210_i2s *i2s = snd_soc_component_get_drvdata(compnt);
int value = ucontrol->value.integer.value[0];
- if (strstr(kcontrol->id.name, "Loopback")) {
- i2s->loopback = value;
+ if (value == i2s->fsync_width)
+ return 0;
- regmap_update_bits(i2s->regmap, TEGRA210_I2S_CTRL,
- I2S_CTRL_LPBK_MASK,
- i2s->loopback << I2S_CTRL_LPBK_SHIFT);
+ i2s->fsync_width = value;
- } else if (strstr(kcontrol->id.name, "FSYNC Width")) {
- /*
- * Frame sync width is used only for FSYNC modes and not
- * applicable for LRCK modes. Reset value for this field is "0",
- * which means the width is one bit clock wide.
- * The width requirement may depend on the codec and in such
- * cases mixer control is used to update custom values. A value
- * of "N" here means, width is "N + 1" bit clock wide.
- */
- i2s->fsync_width = value;
-
- regmap_update_bits(i2s->regmap, TEGRA210_I2S_CTRL,
- I2S_CTRL_FSYNC_WIDTH_MASK,
- i2s->fsync_width << I2S_FSYNC_WIDTH_SHIFT);
-
- } else if (strstr(kcontrol->id.name, "Capture Stereo To Mono")) {
- i2s->stereo_to_mono[I2S_TX_PATH] = value;
- } else if (strstr(kcontrol->id.name, "Capture Mono To Stereo")) {
- i2s->mono_to_stereo[I2S_TX_PATH] = value;
- } else if (strstr(kcontrol->id.name, "Playback Stereo To Mono")) {
- i2s->stereo_to_mono[I2S_RX_PATH] = value;
- } else if (strstr(kcontrol->id.name, "Playback Mono To Stereo")) {
- i2s->mono_to_stereo[I2S_RX_PATH] = value;
- } else if (strstr(kcontrol->id.name, "Playback FIFO Threshold")) {
- i2s->rx_fifo_th = value;
- } else if (strstr(kcontrol->id.name, "BCLK Ratio")) {
- i2s->bclk_ratio = value;
- }
+ /*
+ * Frame sync width is used only for FSYNC modes and not
+ * applicable for LRCK modes. Reset value for this field is "0",
+ * which means the width is one bit clock wide.
+ * The width requirement may depend on the codec and in such
+ * cases mixer control is used to update custom values. A value
+ * of "N" here means, width is "N + 1" bit clock wide.
+ */
+ regmap_update_bits(i2s->regmap, TEGRA210_I2S_CTRL,
+ I2S_CTRL_FSYNC_WIDTH_MASK,
+ i2s->fsync_width << I2S_FSYNC_WIDTH_SHIFT);
+
+ return 1;
+}
+
+static int tegra210_i2s_cget_stereo_to_mono(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_component *compnt = snd_soc_kcontrol_component(kcontrol);
+ struct tegra210_i2s *i2s = snd_soc_component_get_drvdata(compnt);
+
+ ucontrol->value.enumerated.item[0] = i2s->stereo_to_mono[I2S_TX_PATH];
+
+ return 0;
+}
+
+static int tegra210_i2s_cput_stereo_to_mono(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_component *compnt = snd_soc_kcontrol_component(kcontrol);
+ struct tegra210_i2s *i2s = snd_soc_component_get_drvdata(compnt);
+ unsigned int value = ucontrol->value.enumerated.item[0];
+
+ if (value == i2s->stereo_to_mono[I2S_TX_PATH])
+ return 0;
+
+ i2s->stereo_to_mono[I2S_TX_PATH] = value;
+
+ return 1;
+}
+
+static int tegra210_i2s_cget_mono_to_stereo(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_component *compnt = snd_soc_kcontrol_component(kcontrol);
+ struct tegra210_i2s *i2s = snd_soc_component_get_drvdata(compnt);
+
+ ucontrol->value.enumerated.item[0] = i2s->mono_to_stereo[I2S_TX_PATH];
+
+ return 0;
+}
+
+static int tegra210_i2s_cput_mono_to_stereo(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_component *compnt = snd_soc_kcontrol_component(kcontrol);
+ struct tegra210_i2s *i2s = snd_soc_component_get_drvdata(compnt);
+ unsigned int value = ucontrol->value.enumerated.item[0];
+
+ if (value == i2s->mono_to_stereo[I2S_TX_PATH])
+ return 0;
+
+ i2s->mono_to_stereo[I2S_TX_PATH] = value;
+
+ return 1;
+}
+
+static int tegra210_i2s_pget_stereo_to_mono(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_component *compnt = snd_soc_kcontrol_component(kcontrol);
+ struct tegra210_i2s *i2s = snd_soc_component_get_drvdata(compnt);
+
+ ucontrol->value.enumerated.item[0] = i2s->stereo_to_mono[I2S_RX_PATH];
+
+ return 0;
+}
+
+static int tegra210_i2s_pput_stereo_to_mono(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_component *compnt = snd_soc_kcontrol_component(kcontrol);
+ struct tegra210_i2s *i2s = snd_soc_component_get_drvdata(compnt);
+ unsigned int value = ucontrol->value.enumerated.item[0];
+
+ if (value == i2s->stereo_to_mono[I2S_RX_PATH])
+ return 0;
+
+ i2s->stereo_to_mono[I2S_RX_PATH] = value;
+
+ return 1;
+}
+
+static int tegra210_i2s_pget_mono_to_stereo(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_component *compnt = snd_soc_kcontrol_component(kcontrol);
+ struct tegra210_i2s *i2s = snd_soc_component_get_drvdata(compnt);
+
+ ucontrol->value.enumerated.item[0] = i2s->mono_to_stereo[I2S_RX_PATH];
+
+ return 0;
+}
+
+static int tegra210_i2s_pput_mono_to_stereo(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_component *compnt = snd_soc_kcontrol_component(kcontrol);
+ struct tegra210_i2s *i2s = snd_soc_component_get_drvdata(compnt);
+ unsigned int value = ucontrol->value.enumerated.item[0];
+
+ if (value == i2s->mono_to_stereo[I2S_RX_PATH])
+ return 0;
+
+ i2s->mono_to_stereo[I2S_RX_PATH] = value;
+
+ return 1;
+}
+
+static int tegra210_i2s_pget_fifo_th(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_component *compnt = snd_soc_kcontrol_component(kcontrol);
+ struct tegra210_i2s *i2s = snd_soc_component_get_drvdata(compnt);
+
+ ucontrol->value.integer.value[0] = i2s->rx_fifo_th;
+
+ return 0;
+}
+
+static int tegra210_i2s_pput_fifo_th(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_component *compnt = snd_soc_kcontrol_component(kcontrol);
+ struct tegra210_i2s *i2s = snd_soc_component_get_drvdata(compnt);
+ int value = ucontrol->value.integer.value[0];
+
+ if (value == i2s->rx_fifo_th)
+ return 0;
+
+ i2s->rx_fifo_th = value;
+
+ return 1;
+}
+
+static int tegra210_i2s_get_bclk_ratio(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_component *compnt = snd_soc_kcontrol_component(kcontrol);
+ struct tegra210_i2s *i2s = snd_soc_component_get_drvdata(compnt);
+
+ ucontrol->value.integer.value[0] = i2s->bclk_ratio;
+
+ return 0;
+}
+
+static int tegra210_i2s_put_bclk_ratio(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_component *compnt = snd_soc_kcontrol_component(kcontrol);
+ struct tegra210_i2s *i2s = snd_soc_component_get_drvdata(compnt);
+ int value = ucontrol->value.integer.value[0];
+
+ if (value == i2s->bclk_ratio)
+ return 0;
+
+ i2s->bclk_ratio = value;
+
+ return 1;
+}
+
+static int tegra210_i2s_set_dai_bclk_ratio(struct snd_soc_dai *dai,
+ unsigned int ratio)
+{
+ struct tegra210_i2s *i2s = snd_soc_dai_get_drvdata(dai);
+
+ i2s->bclk_ratio = ratio;
return 0;
}
@@ -598,22 +748,28 @@ static const struct soc_enum tegra210_i2s_stereo_conv_enum =
tegra210_i2s_stereo_conv_text);
static const struct snd_kcontrol_new tegra210_i2s_controls[] = {
- SOC_SINGLE_EXT("Loopback", 0, 0, 1, 0, tegra210_i2s_get_control,
- tegra210_i2s_put_control),
- SOC_SINGLE_EXT("FSYNC Width", 0, 0, 255, 0, tegra210_i2s_get_control,
- tegra210_i2s_put_control),
+ SOC_SINGLE_EXT("Loopback", 0, 0, 1, 0, tegra210_i2s_get_loopback,
+ tegra210_i2s_put_loopback),
+ SOC_SINGLE_EXT("FSYNC Width", 0, 0, 255, 0,
+ tegra210_i2s_get_fsync_width,
+ tegra210_i2s_put_fsync_width),
SOC_ENUM_EXT("Capture Stereo To Mono", tegra210_i2s_stereo_conv_enum,
- tegra210_i2s_get_control, tegra210_i2s_put_control),
+ tegra210_i2s_cget_stereo_to_mono,
+ tegra210_i2s_cput_stereo_to_mono),
SOC_ENUM_EXT("Capture Mono To Stereo", tegra210_i2s_mono_conv_enum,
- tegra210_i2s_get_control, tegra210_i2s_put_control),
+ tegra210_i2s_cget_mono_to_stereo,
+ tegra210_i2s_cput_mono_to_stereo),
SOC_ENUM_EXT("Playback Stereo To Mono", tegra210_i2s_stereo_conv_enum,
- tegra210_i2s_get_control, tegra210_i2s_put_control),
+ tegra210_i2s_pget_mono_to_stereo,
+ tegra210_i2s_pput_mono_to_stereo),
SOC_ENUM_EXT("Playback Mono To Stereo", tegra210_i2s_mono_conv_enum,
- tegra210_i2s_get_control, tegra210_i2s_put_control),
+ tegra210_i2s_pget_stereo_to_mono,
+ tegra210_i2s_pput_stereo_to_mono),
SOC_SINGLE_EXT("Playback FIFO Threshold", 0, 0, I2S_RX_FIFO_DEPTH - 1,
- 0, tegra210_i2s_get_control, tegra210_i2s_put_control),
- SOC_SINGLE_EXT("BCLK Ratio", 0, 0, INT_MAX, 0, tegra210_i2s_get_control,
- tegra210_i2s_put_control),
+ 0, tegra210_i2s_pget_fifo_th, tegra210_i2s_pput_fifo_th),
+ SOC_SINGLE_EXT("BCLK Ratio", 0, 0, INT_MAX, 0,
+ tegra210_i2s_get_bclk_ratio,
+ tegra210_i2s_put_bclk_ratio),
};
static const struct snd_soc_dapm_widget tegra210_i2s_widgets[] = {
diff --git a/sound/soc/tegra/tegra210_mixer.c b/sound/soc/tegra/tegra210_mixer.c
index 55e61776c565..16e679a95658 100644
--- a/sound/soc/tegra/tegra210_mixer.c
+++ b/sound/soc/tegra/tegra210_mixer.c
@@ -192,24 +192,24 @@ static int tegra210_mixer_get_gain(struct snd_kcontrol *kcontrol,
return 0;
}
-static int tegra210_mixer_put_gain(struct snd_kcontrol *kcontrol,
- struct snd_ctl_elem_value *ucontrol)
+static int tegra210_mixer_apply_gain(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol,
+ bool instant_gain)
{
struct soc_mixer_control *mc =
(struct soc_mixer_control *)kcontrol->private_value;
struct snd_soc_component *cmpnt = snd_soc_kcontrol_component(kcontrol);
struct tegra210_mixer *mixer = snd_soc_component_get_drvdata(cmpnt);
unsigned int reg = mc->reg, id;
- bool instant_gain = false;
int err;
- if (strstr(kcontrol->id.name, "Instant Gain Volume"))
- instant_gain = true;
-
/* Save gain value for specific MIXER input */
id = (reg - TEGRA210_MIXER_GAIN_CFG_RAM_ADDR_0) /
TEGRA210_MIXER_GAIN_CFG_RAM_ADDR_STRIDE;
+ if (mixer->gain_value[id] == ucontrol->value.integer.value[0])
+ return 0;
+
mixer->gain_value[id] = ucontrol->value.integer.value[0];
err = tegra210_mixer_configure_gain(cmpnt, id, instant_gain);
@@ -221,6 +221,18 @@ static int tegra210_mixer_put_gain(struct snd_kcontrol *kcontrol,
return 1;
}
+static int tegra210_mixer_put_gain(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ return tegra210_mixer_apply_gain(kcontrol, ucontrol, false);
+}
+
+static int tegra210_mixer_put_instant_gain(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ return tegra210_mixer_apply_gain(kcontrol, ucontrol, true);
+}
+
static int tegra210_mixer_set_audio_cif(struct tegra210_mixer *mixer,
struct snd_pcm_hw_params *params,
unsigned int reg,
@@ -388,7 +400,7 @@ ADDER_CTRL_DECL(adder5, TEGRA210_MIXER_TX5_ADDER_CONFIG);
SOC_SINGLE_EXT("RX" #id " Instant Gain Volume", \
MIXER_GAIN_CFG_RAM_ADDR((id) - 1), 0, \
0x20000, 0, tegra210_mixer_get_gain, \
- tegra210_mixer_put_gain),
+ tegra210_mixer_put_instant_gain),
/* Volume controls for all MIXER inputs */
static const struct snd_kcontrol_new tegra210_mixer_gain_ctls[] = {
@@ -654,8 +666,8 @@ static int tegra210_mixer_platform_remove(struct platform_device *pdev)
static const struct dev_pm_ops tegra210_mixer_pm_ops = {
SET_RUNTIME_PM_OPS(tegra210_mixer_runtime_suspend,
tegra210_mixer_runtime_resume, NULL)
- SET_LATE_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
- pm_runtime_force_resume)
+ SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+ pm_runtime_force_resume)
};
static struct platform_driver tegra210_mixer_driver = {
diff --git a/sound/soc/tegra/tegra210_mvc.c b/sound/soc/tegra/tegra210_mvc.c
index 7b9c7006e419..acf59328dcb6 100644
--- a/sound/soc/tegra/tegra210_mvc.c
+++ b/sound/soc/tegra/tegra210_mvc.c
@@ -136,7 +136,7 @@ static int tegra210_mvc_put_mute(struct snd_kcontrol *kcontrol,
struct snd_soc_component *cmpnt = snd_soc_kcontrol_component(kcontrol);
struct tegra210_mvc *mvc = snd_soc_component_get_drvdata(cmpnt);
unsigned int value;
- u8 mute_mask;
+ u8 new_mask, old_mask;
int err;
pm_runtime_get_sync(cmpnt->dev);
@@ -148,15 +148,23 @@ static int tegra210_mvc_put_mute(struct snd_kcontrol *kcontrol,
if (err < 0)
goto end;
- mute_mask = ucontrol->value.integer.value[0];
+ regmap_read(mvc->regmap, TEGRA210_MVC_CTRL, &value);
+
+ old_mask = (value >> TEGRA210_MVC_MUTE_SHIFT) & TEGRA210_MUTE_MASK_EN;
+ new_mask = ucontrol->value.integer.value[0];
+
+ if (new_mask == old_mask) {
+ err = 0;
+ goto end;
+ }
err = regmap_update_bits(mvc->regmap, mc->reg,
TEGRA210_MVC_MUTE_MASK,
- mute_mask << TEGRA210_MVC_MUTE_SHIFT);
+ new_mask << TEGRA210_MVC_MUTE_SHIFT);
if (err < 0)
goto end;
- return 1;
+ err = 1;
end:
pm_runtime_put(cmpnt->dev);
@@ -195,7 +203,7 @@ static int tegra210_mvc_put_vol(struct snd_kcontrol *kcontrol,
unsigned int reg = mc->reg;
unsigned int value;
u8 chan;
- int err;
+ int err, old_volume;
pm_runtime_get_sync(cmpnt->dev);
@@ -207,10 +215,16 @@ static int tegra210_mvc_put_vol(struct snd_kcontrol *kcontrol,
goto end;
chan = (reg - TEGRA210_MVC_TARGET_VOL) / REG_SIZE;
+ old_volume = mvc->volume[chan];
tegra210_mvc_conv_vol(mvc, chan,
ucontrol->value.integer.value[0]);
+ if (mvc->volume[chan] == old_volume) {
+ err = 0;
+ goto end;
+ }
+
/* Configure init volume same as target volume */
regmap_write(mvc->regmap,
TEGRA210_MVC_REG_OFFSET(TEGRA210_MVC_INIT_VOL, chan),
@@ -222,7 +236,7 @@ static int tegra210_mvc_put_vol(struct snd_kcontrol *kcontrol,
TEGRA210_MVC_VOLUME_SWITCH_MASK,
TEGRA210_MVC_VOLUME_SWITCH_TRIGGER);
- return 1;
+ err = 1;
end:
pm_runtime_put(cmpnt->dev);
@@ -275,7 +289,7 @@ static int tegra210_mvc_get_curve_type(struct snd_kcontrol *kcontrol,
struct snd_soc_component *cmpnt = snd_soc_kcontrol_component(kcontrol);
struct tegra210_mvc *mvc = snd_soc_component_get_drvdata(cmpnt);
- ucontrol->value.integer.value[0] = mvc->curve_type;
+ ucontrol->value.enumerated.item[0] = mvc->curve_type;
return 0;
}
@@ -285,7 +299,7 @@ static int tegra210_mvc_put_curve_type(struct snd_kcontrol *kcontrol,
{
struct snd_soc_component *cmpnt = snd_soc_kcontrol_component(kcontrol);
struct tegra210_mvc *mvc = snd_soc_component_get_drvdata(cmpnt);
- int value;
+ unsigned int value;
regmap_read(mvc->regmap, TEGRA210_MVC_ENABLE, &value);
if (value & TEGRA210_MVC_EN) {
@@ -294,10 +308,10 @@ static int tegra210_mvc_put_curve_type(struct snd_kcontrol *kcontrol,
return -EINVAL;
}
- if (mvc->curve_type == ucontrol->value.integer.value[0])
+ if (mvc->curve_type == ucontrol->value.enumerated.item[0])
return 0;
- mvc->curve_type = ucontrol->value.integer.value[0];
+ mvc->curve_type = ucontrol->value.enumerated.item[0];
tegra210_mvc_reset_vol_settings(mvc, cmpnt->dev);
@@ -625,8 +639,8 @@ static int tegra210_mvc_platform_remove(struct platform_device *pdev)
static const struct dev_pm_ops tegra210_mvc_pm_ops = {
SET_RUNTIME_PM_OPS(tegra210_mvc_runtime_suspend,
tegra210_mvc_runtime_resume, NULL)
- SET_LATE_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
- pm_runtime_force_resume)
+ SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+ pm_runtime_force_resume)
};
static struct platform_driver tegra210_mvc_driver = {
diff --git a/sound/soc/tegra/tegra210_sfc.c b/sound/soc/tegra/tegra210_sfc.c
index dc477ee1b82c..368f077e7bee 100644
--- a/sound/soc/tegra/tegra210_sfc.c
+++ b/sound/soc/tegra/tegra210_sfc.c
@@ -3244,46 +3244,107 @@ static int tegra210_sfc_init(struct snd_soc_dapm_widget *w,
return tegra210_sfc_write_coeff_ram(cmpnt);
}
-static int tegra210_sfc_get_control(struct snd_kcontrol *kcontrol,
+static int tegra210_sfc_iget_stereo_to_mono(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct snd_soc_component *cmpnt = snd_soc_kcontrol_component(kcontrol);
struct tegra210_sfc *sfc = snd_soc_component_get_drvdata(cmpnt);
- if (strstr(kcontrol->id.name, "Input Stereo To Mono"))
- ucontrol->value.integer.value[0] =
- sfc->stereo_to_mono[SFC_RX_PATH];
- else if (strstr(kcontrol->id.name, "Input Mono To Stereo"))
- ucontrol->value.integer.value[0] =
- sfc->mono_to_stereo[SFC_RX_PATH];
- else if (strstr(kcontrol->id.name, "Output Stereo To Mono"))
- ucontrol->value.integer.value[0] =
- sfc->stereo_to_mono[SFC_TX_PATH];
- else if (strstr(kcontrol->id.name, "Output Mono To Stereo"))
- ucontrol->value.integer.value[0] =
- sfc->mono_to_stereo[SFC_TX_PATH];
+ ucontrol->value.enumerated.item[0] = sfc->stereo_to_mono[SFC_RX_PATH];
return 0;
}
-static int tegra210_sfc_put_control(struct snd_kcontrol *kcontrol,
+static int tegra210_sfc_iput_stereo_to_mono(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct snd_soc_component *cmpnt = snd_soc_kcontrol_component(kcontrol);
struct tegra210_sfc *sfc = snd_soc_component_get_drvdata(cmpnt);
- int value = ucontrol->value.integer.value[0];
-
- if (strstr(kcontrol->id.name, "Input Stereo To Mono"))
- sfc->stereo_to_mono[SFC_RX_PATH] = value;
- else if (strstr(kcontrol->id.name, "Input Mono To Stereo"))
- sfc->mono_to_stereo[SFC_RX_PATH] = value;
- else if (strstr(kcontrol->id.name, "Output Stereo To Mono"))
- sfc->stereo_to_mono[SFC_TX_PATH] = value;
- else if (strstr(kcontrol->id.name, "Output Mono To Stereo"))
- sfc->mono_to_stereo[SFC_TX_PATH] = value;
- else
+ unsigned int value = ucontrol->value.enumerated.item[0];
+
+ if (value == sfc->stereo_to_mono[SFC_RX_PATH])
+ return 0;
+
+ sfc->stereo_to_mono[SFC_RX_PATH] = value;
+
+ return 1;
+}
+
+static int tegra210_sfc_iget_mono_to_stereo(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_component *cmpnt = snd_soc_kcontrol_component(kcontrol);
+ struct tegra210_sfc *sfc = snd_soc_component_get_drvdata(cmpnt);
+
+ ucontrol->value.enumerated.item[0] = sfc->mono_to_stereo[SFC_RX_PATH];
+
+ return 0;
+}
+
+static int tegra210_sfc_iput_mono_to_stereo(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_component *cmpnt = snd_soc_kcontrol_component(kcontrol);
+ struct tegra210_sfc *sfc = snd_soc_component_get_drvdata(cmpnt);
+ unsigned int value = ucontrol->value.enumerated.item[0];
+
+ if (value == sfc->mono_to_stereo[SFC_RX_PATH])
return 0;
+ sfc->mono_to_stereo[SFC_RX_PATH] = value;
+
+ return 1;
+}
+
+static int tegra210_sfc_oget_stereo_to_mono(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_component *cmpnt = snd_soc_kcontrol_component(kcontrol);
+ struct tegra210_sfc *sfc = snd_soc_component_get_drvdata(cmpnt);
+
+ ucontrol->value.enumerated.item[0] = sfc->stereo_to_mono[SFC_TX_PATH];
+
+ return 0;
+}
+
+static int tegra210_sfc_oput_stereo_to_mono(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_component *cmpnt = snd_soc_kcontrol_component(kcontrol);
+ struct tegra210_sfc *sfc = snd_soc_component_get_drvdata(cmpnt);
+ unsigned int value = ucontrol->value.enumerated.item[0];
+
+ if (value == sfc->stereo_to_mono[SFC_TX_PATH])
+ return 0;
+
+ sfc->stereo_to_mono[SFC_TX_PATH] = value;
+
+ return 1;
+}
+
+static int tegra210_sfc_oget_mono_to_stereo(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_component *cmpnt = snd_soc_kcontrol_component(kcontrol);
+ struct tegra210_sfc *sfc = snd_soc_component_get_drvdata(cmpnt);
+
+ ucontrol->value.enumerated.item[0] = sfc->mono_to_stereo[SFC_TX_PATH];
+
+ return 0;
+}
+
+static int tegra210_sfc_oput_mono_to_stereo(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_component *cmpnt = snd_soc_kcontrol_component(kcontrol);
+ struct tegra210_sfc *sfc = snd_soc_component_get_drvdata(cmpnt);
+ unsigned int value = ucontrol->value.enumerated.item[0];
+
+ if (value == sfc->mono_to_stereo[SFC_TX_PATH])
+ return 0;
+
+ sfc->mono_to_stereo[SFC_TX_PATH] = value;
+
return 1;
}
@@ -3384,13 +3445,17 @@ static const struct soc_enum tegra210_sfc_mono_conv_enum =
static const struct snd_kcontrol_new tegra210_sfc_controls[] = {
SOC_ENUM_EXT("Input Stereo To Mono", tegra210_sfc_stereo_conv_enum,
- tegra210_sfc_get_control, tegra210_sfc_put_control),
+ tegra210_sfc_iget_stereo_to_mono,
+ tegra210_sfc_iput_stereo_to_mono),
SOC_ENUM_EXT("Input Mono To Stereo", tegra210_sfc_mono_conv_enum,
- tegra210_sfc_get_control, tegra210_sfc_put_control),
+ tegra210_sfc_iget_mono_to_stereo,
+ tegra210_sfc_iput_mono_to_stereo),
SOC_ENUM_EXT("Output Stereo To Mono", tegra210_sfc_stereo_conv_enum,
- tegra210_sfc_get_control, tegra210_sfc_put_control),
+ tegra210_sfc_oget_stereo_to_mono,
+ tegra210_sfc_oput_stereo_to_mono),
SOC_ENUM_EXT("Output Mono To Stereo", tegra210_sfc_mono_conv_enum,
- tegra210_sfc_get_control, tegra210_sfc_put_control),
+ tegra210_sfc_oget_mono_to_stereo,
+ tegra210_sfc_oput_mono_to_stereo),
};
static const struct snd_soc_component_driver tegra210_sfc_cmpnt = {
@@ -3529,8 +3594,8 @@ static int tegra210_sfc_platform_remove(struct platform_device *pdev)
static const struct dev_pm_ops tegra210_sfc_pm_ops = {
SET_RUNTIME_PM_OPS(tegra210_sfc_runtime_suspend,
tegra210_sfc_runtime_resume, NULL)
- SET_LATE_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
- pm_runtime_force_resume)
+ SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+ pm_runtime_force_resume)
};
static struct platform_driver tegra210_sfc_driver = {
diff --git a/sound/soc/tegra/tegra_asoc_machine.c b/sound/soc/tegra/tegra_asoc_machine.c
index b95438c3dbf7..a73404879aa1 100644
--- a/sound/soc/tegra/tegra_asoc_machine.c
+++ b/sound/soc/tegra/tegra_asoc_machine.c
@@ -116,16 +116,24 @@ static const struct snd_kcontrol_new tegra_machine_controls[] = {
SOC_DAPM_PIN_SWITCH("Headset Mic"),
SOC_DAPM_PIN_SWITCH("Internal Mic 1"),
SOC_DAPM_PIN_SWITCH("Internal Mic 2"),
+ SOC_DAPM_PIN_SWITCH("Headphones"),
+ SOC_DAPM_PIN_SWITCH("Mic Jack"),
};
int tegra_asoc_machine_init(struct snd_soc_pcm_runtime *rtd)
{
struct snd_soc_card *card = rtd->card;
struct tegra_machine *machine = snd_soc_card_get_drvdata(card);
+ const char *jack_name;
int err;
if (machine->gpiod_hp_det && machine->asoc->add_hp_jack) {
- err = snd_soc_card_jack_new(card, "Headphones Jack",
+ if (machine->asoc->hp_jack_name)
+ jack_name = machine->asoc->hp_jack_name;
+ else
+ jack_name = "Headphones Jack";
+
+ err = snd_soc_card_jack_new(card, jack_name,
SND_JACK_HEADPHONE,
&tegra_machine_hp_jack,
tegra_machine_hp_jack_pins,
@@ -658,6 +666,7 @@ static struct snd_soc_card snd_soc_tegra_max98090 = {
static const struct tegra_asoc_data tegra_max98090_data = {
.mclk_rate = tegra_machine_mclk_rate_12mhz,
.card = &snd_soc_tegra_max98090,
+ .hp_jack_name = "Headphones",
.add_common_dapm_widgets = true,
.add_common_controls = true,
.add_common_snd_ops = true,
diff --git a/sound/soc/tegra/tegra_asoc_machine.h b/sound/soc/tegra/tegra_asoc_machine.h
index d6a8d1320551..6f795d7dff7c 100644
--- a/sound/soc/tegra/tegra_asoc_machine.h
+++ b/sound/soc/tegra/tegra_asoc_machine.h
@@ -14,6 +14,7 @@ struct snd_soc_pcm_runtime;
struct tegra_asoc_data {
unsigned int (*mclk_rate)(unsigned int srate);
const char *codec_dev_name;
+ const char *hp_jack_name;
struct snd_soc_card *card;
unsigned int mclk_id;
bool hp_jack_gpio_active_low;
diff --git a/sound/usb/mixer_quirks.c b/sound/usb/mixer_quirks.c
index d489c1de3bae..823b6b8de942 100644
--- a/sound/usb/mixer_quirks.c
+++ b/sound/usb/mixer_quirks.c
@@ -3016,11 +3016,11 @@ static const struct snd_djm_ctl snd_djm_ctls_750mk2[] = {
static const struct snd_djm_device snd_djm_devices[] = {
- SND_DJM_DEVICE(250mk2),
- SND_DJM_DEVICE(750),
- SND_DJM_DEVICE(750mk2),
- SND_DJM_DEVICE(850),
- SND_DJM_DEVICE(900nxs2)
+ [SND_DJM_250MK2_IDX] = SND_DJM_DEVICE(250mk2),
+ [SND_DJM_750_IDX] = SND_DJM_DEVICE(750),
+ [SND_DJM_850_IDX] = SND_DJM_DEVICE(850),
+ [SND_DJM_900NXS2_IDX] = SND_DJM_DEVICE(900nxs2),
+ [SND_DJM_750MK2_IDX] = SND_DJM_DEVICE(750mk2),
};
diff --git a/sound/usb/pcm.c b/sound/usb/pcm.c
index 95ec8eec1bb0..cec6e91afea2 100644
--- a/sound/usb/pcm.c
+++ b/sound/usb/pcm.c
@@ -581,6 +581,12 @@ static int snd_usb_hw_free(struct snd_pcm_substream *substream)
return 0;
}
+/* free-wheeling mode? (e.g. dmix) */
+static int in_free_wheeling_mode(struct snd_pcm_runtime *runtime)
+{
+ return runtime->stop_threshold > runtime->buffer_size;
+}
+
/* check whether early start is needed for playback stream */
static int lowlatency_playback_available(struct snd_pcm_runtime *runtime,
struct snd_usb_substream *subs)
@@ -592,8 +598,7 @@ static int lowlatency_playback_available(struct snd_pcm_runtime *runtime,
/* disabled via module option? */
if (!chip->lowlatency)
return false;
- /* free-wheeling mode? (e.g. dmix) */
- if (runtime->stop_threshold > runtime->buffer_size)
+ if (in_free_wheeling_mode(runtime))
return false;
/* implicit feedback mode has own operation mode */
if (snd_usb_endpoint_implicit_feedback_sink(subs->data_endpoint))
@@ -635,7 +640,8 @@ static int snd_usb_pcm_prepare(struct snd_pcm_substream *substream)
runtime->delay = 0;
subs->lowlatency_playback = lowlatency_playback_available(runtime, subs);
- if (!subs->lowlatency_playback)
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK &&
+ !subs->lowlatency_playback)
ret = start_endpoints(subs);
unlock:
@@ -1552,6 +1558,8 @@ static int snd_usb_substream_playback_trigger(struct snd_pcm_substream *substrea
subs);
if (subs->lowlatency_playback &&
cmd == SNDRV_PCM_TRIGGER_START) {
+ if (in_free_wheeling_mode(substream->runtime))
+ subs->lowlatency_playback = false;
err = start_endpoints(subs);
if (err < 0) {
snd_usb_endpoint_set_callback(subs->data_endpoint,
diff --git a/sound/xen/xen_snd_front.c b/sound/xen/xen_snd_front.c
index 2cb0a19be2b8..4041748c12e5 100644
--- a/sound/xen/xen_snd_front.c
+++ b/sound/xen/xen_snd_front.c
@@ -358,6 +358,7 @@ static struct xenbus_driver xen_driver = {
.probe = xen_drv_probe,
.remove = xen_drv_remove,
.otherend_changed = sndback_changed,
+ .not_essential = true,
};
static int __init xen_drv_init(void)
diff --git a/tools/bpf/resolve_btfids/main.c b/tools/bpf/resolve_btfids/main.c
index a59cb0ee609c..73409e27be01 100644
--- a/tools/bpf/resolve_btfids/main.c
+++ b/tools/bpf/resolve_btfids/main.c
@@ -83,6 +83,7 @@ struct btf_id {
int cnt;
};
int addr_cnt;
+ bool is_set;
Elf64_Addr addr[ADDR_CNT];
};
@@ -451,8 +452,10 @@ static int symbols_collect(struct object *obj)
* in symbol's size, together with 'cnt' field hence
* that - 1.
*/
- if (id)
+ if (id) {
id->cnt = sym.st_size / sizeof(int) - 1;
+ id->is_set = true;
+ }
} else {
pr_err("FAILED unsupported prefix %s\n", prefix);
return -1;
@@ -568,9 +571,8 @@ static int id_patch(struct object *obj, struct btf_id *id)
int *ptr = data->d_buf;
int i;
- if (!id->id) {
+ if (!id->id && !id->is_set)
pr_err("WARN: resolve_btfids: unresolved symbol %s\n", id->name);
- }
for (i = 0; i < id->addr_cnt; i++) {
unsigned long addr = id->addr[i];
diff --git a/tools/build/Makefile.feature b/tools/build/Makefile.feature
index 45a9a59828c3..ae61f464043a 100644
--- a/tools/build/Makefile.feature
+++ b/tools/build/Makefile.feature
@@ -48,7 +48,6 @@ FEATURE_TESTS_BASIC := \
numa_num_possible_cpus \
libperl \
libpython \
- libpython-version \
libslang \
libslang-include-subdir \
libtraceevent \
diff --git a/tools/build/feature/Makefile b/tools/build/feature/Makefile
index 0a3244ad9673..1480910c792e 100644
--- a/tools/build/feature/Makefile
+++ b/tools/build/feature/Makefile
@@ -32,7 +32,6 @@ FILES= \
test-numa_num_possible_cpus.bin \
test-libperl.bin \
test-libpython.bin \
- test-libpython-version.bin \
test-libslang.bin \
test-libslang-include-subdir.bin \
test-libtraceevent.bin \
@@ -227,9 +226,6 @@ $(OUTPUT)test-libperl.bin:
$(OUTPUT)test-libpython.bin:
$(BUILD) $(FLAGS_PYTHON_EMBED)
-$(OUTPUT)test-libpython-version.bin:
- $(BUILD)
-
$(OUTPUT)test-libbfd.bin:
$(BUILD) -DPACKAGE='"perf"' -lbfd -ldl
diff --git a/tools/build/feature/test-all.c b/tools/build/feature/test-all.c
index 0b243ce842be..5ffafb967b6e 100644
--- a/tools/build/feature/test-all.c
+++ b/tools/build/feature/test-all.c
@@ -14,10 +14,6 @@
# include "test-libpython.c"
#undef main
-#define main main_test_libpython_version
-# include "test-libpython-version.c"
-#undef main
-
#define main main_test_libperl
# include "test-libperl.c"
#undef main
@@ -177,7 +173,6 @@
int main(int argc, char *argv[])
{
main_test_libpython();
- main_test_libpython_version();
main_test_libperl();
main_test_hello();
main_test_libelf();
diff --git a/tools/build/feature/test-libpython-version.c b/tools/build/feature/test-libpython-version.c
deleted file mode 100644
index 47714b942d4d..000000000000
--- a/tools/build/feature/test-libpython-version.c
+++ /dev/null
@@ -1,11 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-#include <Python.h>
-
-#if PY_VERSION_HEX >= 0x03000000
- #error
-#endif
-
-int main(void)
-{
- return 0;
-}
diff --git a/tools/include/linux/debug_locks.h b/tools/include/linux/debug_locks.h
deleted file mode 100644
index 72d595ce764a..000000000000
--- a/tools/include/linux/debug_locks.h
+++ /dev/null
@@ -1,14 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _LIBLOCKDEP_DEBUG_LOCKS_H_
-#define _LIBLOCKDEP_DEBUG_LOCKS_H_
-
-#include <stddef.h>
-#include <linux/compiler.h>
-#include <asm/bug.h>
-
-#define DEBUG_LOCKS_WARN_ON(x) WARN_ON(x)
-
-extern bool debug_locks;
-extern bool debug_locks_silent;
-
-#endif
diff --git a/tools/include/linux/hardirq.h b/tools/include/linux/hardirq.h
deleted file mode 100644
index b25580b6a9be..000000000000
--- a/tools/include/linux/hardirq.h
+++ /dev/null
@@ -1,12 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _LIBLOCKDEP_LINUX_HARDIRQ_H_
-#define _LIBLOCKDEP_LINUX_HARDIRQ_H_
-
-#define SOFTIRQ_BITS 0UL
-#define HARDIRQ_BITS 0UL
-#define SOFTIRQ_SHIFT 0UL
-#define HARDIRQ_SHIFT 0UL
-#define hardirq_count() 0UL
-#define softirq_count() 0UL
-
-#endif
diff --git a/tools/include/linux/irqflags.h b/tools/include/linux/irqflags.h
deleted file mode 100644
index 501262aee8ff..000000000000
--- a/tools/include/linux/irqflags.h
+++ /dev/null
@@ -1,39 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _LIBLOCKDEP_LINUX_TRACE_IRQFLAGS_H_
-#define _LIBLOCKDEP_LINUX_TRACE_IRQFLAGS_H_
-
-# define lockdep_hardirq_context() 0
-# define lockdep_softirq_context(p) 0
-# define lockdep_hardirqs_enabled() 0
-# define lockdep_softirqs_enabled(p) 0
-# define lockdep_hardirq_enter() do { } while (0)
-# define lockdep_hardirq_exit() do { } while (0)
-# define lockdep_softirq_enter() do { } while (0)
-# define lockdep_softirq_exit() do { } while (0)
-# define INIT_TRACE_IRQFLAGS
-
-# define stop_critical_timings() do { } while (0)
-# define start_critical_timings() do { } while (0)
-
-#define raw_local_irq_disable() do { } while (0)
-#define raw_local_irq_enable() do { } while (0)
-#define raw_local_irq_save(flags) ((flags) = 0)
-#define raw_local_irq_restore(flags) ((void)(flags))
-#define raw_local_save_flags(flags) ((flags) = 0)
-#define raw_irqs_disabled_flags(flags) ((void)(flags))
-#define raw_irqs_disabled() 0
-#define raw_safe_halt()
-
-#define local_irq_enable() do { } while (0)
-#define local_irq_disable() do { } while (0)
-#define local_irq_save(flags) ((flags) = 0)
-#define local_irq_restore(flags) ((void)(flags))
-#define local_save_flags(flags) ((flags) = 0)
-#define irqs_disabled() (1)
-#define irqs_disabled_flags(flags) ((void)(flags), 0)
-#define safe_halt() do { } while (0)
-
-#define trace_lock_release(x, y)
-#define trace_lock_acquire(a, b, c, d, e, f, g)
-
-#endif
diff --git a/tools/include/linux/kernel.h b/tools/include/linux/kernel.h
index a7e54a08fb54..3e8df500cfbd 100644
--- a/tools/include/linux/kernel.h
+++ b/tools/include/linux/kernel.h
@@ -7,6 +7,7 @@
#include <assert.h>
#include <linux/build_bug.h>
#include <linux/compiler.h>
+#include <linux/math.h>
#include <endian.h>
#include <byteswap.h>
@@ -14,8 +15,6 @@
#define UINT_MAX (~0U)
#endif
-#define DIV_ROUND_UP(n,d) (((n) + (d) - 1) / (d))
-
#define PERF_ALIGN(x, a) __PERF_ALIGN_MASK(x, (typeof(x))(a)-1)
#define __PERF_ALIGN_MASK(x, mask) (((x)+(mask))&~(mask))
@@ -52,15 +51,6 @@
_min1 < _min2 ? _min1 : _min2; })
#endif
-#ifndef roundup
-#define roundup(x, y) ( \
-{ \
- const typeof(y) __y = y; \
- (((x) + (__y - 1)) / __y) * __y; \
-} \
-)
-#endif
-
#ifndef BUG_ON
#ifdef NDEBUG
#define BUG_ON(cond) do { if (cond) {} } while (0)
@@ -104,16 +94,6 @@ int scnprintf_pad(char * buf, size_t size, const char * fmt, ...);
#define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0]) + __must_be_array(arr))
-/*
- * This looks more complex than it should be. But we need to
- * get the type for the ~ right in round_down (it needs to be
- * as wide as the result!), and we want to evaluate the macro
- * arguments just once each.
- */
-#define __round_mask(x, y) ((__typeof__(x))((y)-1))
-#define round_up(x, y) ((((x)-1) | __round_mask(x, y))+1)
-#define round_down(x, y) ((x) & ~__round_mask(x, y))
-
#define current_gfp_context(k) 0
#define synchronize_rcu()
diff --git a/tools/include/linux/lockdep.h b/tools/include/linux/lockdep.h
deleted file mode 100644
index e56997288f2b..000000000000
--- a/tools/include/linux/lockdep.h
+++ /dev/null
@@ -1,72 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _LIBLOCKDEP_LOCKDEP_H_
-#define _LIBLOCKDEP_LOCKDEP_H_
-
-#include <sys/prctl.h>
-#include <sys/syscall.h>
-#include <string.h>
-#include <limits.h>
-#include <linux/utsname.h>
-#include <linux/compiler.h>
-#include <linux/export.h>
-#include <linux/kern_levels.h>
-#include <linux/err.h>
-#include <linux/rcu.h>
-#include <linux/list.h>
-#include <linux/hardirq.h>
-#include <unistd.h>
-
-#define MAX_LOCK_DEPTH 63UL
-
-#define asmlinkage
-#define __visible
-
-#include "../../../include/linux/lockdep.h"
-
-struct task_struct {
- u64 curr_chain_key;
- int lockdep_depth;
- unsigned int lockdep_recursion;
- struct held_lock held_locks[MAX_LOCK_DEPTH];
- gfp_t lockdep_reclaim_gfp;
- int pid;
- int state;
- char comm[17];
-};
-
-#define TASK_RUNNING 0
-
-extern struct task_struct *__curr(void);
-
-#define current (__curr())
-
-static inline int debug_locks_off(void)
-{
- return 1;
-}
-
-#define task_pid_nr(tsk) ((tsk)->pid)
-
-#define KSYM_NAME_LEN 128
-#define printk(...) dprintf(STDOUT_FILENO, __VA_ARGS__)
-#define pr_err(format, ...) fprintf (stderr, format, ## __VA_ARGS__)
-#define pr_warn pr_err
-#define pr_cont pr_err
-
-#define list_del_rcu list_del
-
-#define atomic_t unsigned long
-#define atomic_inc(x) ((*(x))++)
-
-#define print_tainted() ""
-#define static_obj(x) 1
-
-#define debug_show_all_locks()
-extern void debug_check_no_locks_held(void);
-
-static __used bool __is_kernel_percpu_address(unsigned long addr, void *can_addr)
-{
- return false;
-}
-
-#endif
diff --git a/tools/include/linux/math.h b/tools/include/linux/math.h
new file mode 100644
index 000000000000..4e7af99ec9eb
--- /dev/null
+++ b/tools/include/linux/math.h
@@ -0,0 +1,25 @@
+#ifndef _TOOLS_MATH_H
+#define _TOOLS_MATH_H
+
+/*
+ * This looks more complex than it should be. But we need to
+ * get the type for the ~ right in round_down (it needs to be
+ * as wide as the result!), and we want to evaluate the macro
+ * arguments just once each.
+ */
+#define __round_mask(x, y) ((__typeof__(x))((y)-1))
+#define round_up(x, y) ((((x)-1) | __round_mask(x, y))+1)
+#define round_down(x, y) ((x) & ~__round_mask(x, y))
+
+#define DIV_ROUND_UP(n,d) (((n) + (d) - 1) / (d))
+
+#ifndef roundup
+#define roundup(x, y) ( \
+{ \
+ const typeof(y) __y = y; \
+ (((x) + (__y - 1)) / __y) * __y; \
+} \
+)
+#endif
+
+#endif
diff --git a/tools/include/linux/proc_fs.h b/tools/include/linux/proc_fs.h
deleted file mode 100644
index 8b3b03b64fda..000000000000
--- a/tools/include/linux/proc_fs.h
+++ /dev/null
@@ -1,4 +0,0 @@
-#ifndef _TOOLS_INCLUDE_LINUX_PROC_FS_H
-#define _TOOLS_INCLUDE_LINUX_PROC_FS_H
-
-#endif /* _TOOLS_INCLUDE_LINUX_PROC_FS_H */
diff --git a/tools/include/linux/spinlock.h b/tools/include/linux/spinlock.h
index c934572d935c..622266b197d0 100644
--- a/tools/include/linux/spinlock.h
+++ b/tools/include/linux/spinlock.h
@@ -37,6 +37,4 @@ static inline bool arch_spin_is_locked(arch_spinlock_t *mutex)
return true;
}
-#include <linux/lockdep.h>
-
#endif
diff --git a/tools/include/linux/stacktrace.h b/tools/include/linux/stacktrace.h
deleted file mode 100644
index ae343ac35bfa..000000000000
--- a/tools/include/linux/stacktrace.h
+++ /dev/null
@@ -1,33 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _LIBLOCKDEP_LINUX_STACKTRACE_H_
-#define _LIBLOCKDEP_LINUX_STACKTRACE_H_
-
-#include <execinfo.h>
-
-struct stack_trace {
- unsigned int nr_entries, max_entries;
- unsigned long *entries;
- int skip;
-};
-
-static inline void print_stack_trace(struct stack_trace *trace, int spaces)
-{
- backtrace_symbols_fd((void **)trace->entries, trace->nr_entries, 1);
-}
-
-#define save_stack_trace(trace) \
- ((trace)->nr_entries = \
- backtrace((void **)(trace)->entries, (trace)->max_entries))
-
-static inline int dump_stack(void)
-{
- void *array[64];
- size_t size;
-
- size = backtrace(array, 64);
- backtrace_symbols_fd(array, size, 1);
-
- return 0;
-}
-
-#endif
diff --git a/tools/include/uapi/linux/if_link.h b/tools/include/uapi/linux/if_link.h
index b3610fdd1fee..eebd3894fe89 100644
--- a/tools/include/uapi/linux/if_link.h
+++ b/tools/include/uapi/linux/if_link.h
@@ -7,24 +7,23 @@
/* This struct should be in sync with struct rtnl_link_stats64 */
struct rtnl_link_stats {
- __u32 rx_packets; /* total packets received */
- __u32 tx_packets; /* total packets transmitted */
- __u32 rx_bytes; /* total bytes received */
- __u32 tx_bytes; /* total bytes transmitted */
- __u32 rx_errors; /* bad packets received */
- __u32 tx_errors; /* packet transmit problems */
- __u32 rx_dropped; /* no space in linux buffers */
- __u32 tx_dropped; /* no space available in linux */
- __u32 multicast; /* multicast packets received */
+ __u32 rx_packets;
+ __u32 tx_packets;
+ __u32 rx_bytes;
+ __u32 tx_bytes;
+ __u32 rx_errors;
+ __u32 tx_errors;
+ __u32 rx_dropped;
+ __u32 tx_dropped;
+ __u32 multicast;
__u32 collisions;
-
/* detailed rx_errors: */
__u32 rx_length_errors;
- __u32 rx_over_errors; /* receiver ring buff overflow */
- __u32 rx_crc_errors; /* recved pkt with crc error */
- __u32 rx_frame_errors; /* recv'd frame alignment error */
- __u32 rx_fifo_errors; /* recv'r fifo overrun */
- __u32 rx_missed_errors; /* receiver missed packet */
+ __u32 rx_over_errors;
+ __u32 rx_crc_errors;
+ __u32 rx_frame_errors;
+ __u32 rx_fifo_errors;
+ __u32 rx_missed_errors;
/* detailed tx_errors */
__u32 tx_aborted_errors;
@@ -37,29 +36,201 @@ struct rtnl_link_stats {
__u32 rx_compressed;
__u32 tx_compressed;
- __u32 rx_nohandler; /* dropped, no handler found */
+ __u32 rx_nohandler;
};
-/* The main device statistics structure */
+/**
+ * struct rtnl_link_stats64 - The main device statistics structure.
+ *
+ * @rx_packets: Number of good packets received by the interface.
+ * For hardware interfaces counts all good packets received from the device
+ * by the host, including packets which host had to drop at various stages
+ * of processing (even in the driver).
+ *
+ * @tx_packets: Number of packets successfully transmitted.
+ * For hardware interfaces counts packets which host was able to successfully
+ * hand over to the device, which does not necessarily mean that packets
+ * had been successfully transmitted out of the device, only that device
+ * acknowledged it copied them out of host memory.
+ *
+ * @rx_bytes: Number of good received bytes, corresponding to @rx_packets.
+ *
+ * For IEEE 802.3 devices should count the length of Ethernet Frames
+ * excluding the FCS.
+ *
+ * @tx_bytes: Number of good transmitted bytes, corresponding to @tx_packets.
+ *
+ * For IEEE 802.3 devices should count the length of Ethernet Frames
+ * excluding the FCS.
+ *
+ * @rx_errors: Total number of bad packets received on this network device.
+ * This counter must include events counted by @rx_length_errors,
+ * @rx_crc_errors, @rx_frame_errors and other errors not otherwise
+ * counted.
+ *
+ * @tx_errors: Total number of transmit problems.
+ * This counter must include events counter by @tx_aborted_errors,
+ * @tx_carrier_errors, @tx_fifo_errors, @tx_heartbeat_errors,
+ * @tx_window_errors and other errors not otherwise counted.
+ *
+ * @rx_dropped: Number of packets received but not processed,
+ * e.g. due to lack of resources or unsupported protocol.
+ * For hardware interfaces this counter may include packets discarded
+ * due to L2 address filtering but should not include packets dropped
+ * by the device due to buffer exhaustion which are counted separately in
+ * @rx_missed_errors (since procfs folds those two counters together).
+ *
+ * @tx_dropped: Number of packets dropped on their way to transmission,
+ * e.g. due to lack of resources.
+ *
+ * @multicast: Multicast packets received.
+ * For hardware interfaces this statistic is commonly calculated
+ * at the device level (unlike @rx_packets) and therefore may include
+ * packets which did not reach the host.
+ *
+ * For IEEE 802.3 devices this counter may be equivalent to:
+ *
+ * - 30.3.1.1.21 aMulticastFramesReceivedOK
+ *
+ * @collisions: Number of collisions during packet transmissions.
+ *
+ * @rx_length_errors: Number of packets dropped due to invalid length.
+ * Part of aggregate "frame" errors in `/proc/net/dev`.
+ *
+ * For IEEE 802.3 devices this counter should be equivalent to a sum
+ * of the following attributes:
+ *
+ * - 30.3.1.1.23 aInRangeLengthErrors
+ * - 30.3.1.1.24 aOutOfRangeLengthField
+ * - 30.3.1.1.25 aFrameTooLongErrors
+ *
+ * @rx_over_errors: Receiver FIFO overflow event counter.
+ *
+ * Historically the count of overflow events. Such events may be
+ * reported in the receive descriptors or via interrupts, and may
+ * not correspond one-to-one with dropped packets.
+ *
+ * The recommended interpretation for high speed interfaces is -
+ * number of packets dropped because they did not fit into buffers
+ * provided by the host, e.g. packets larger than MTU or next buffer
+ * in the ring was not available for a scatter transfer.
+ *
+ * Part of aggregate "frame" errors in `/proc/net/dev`.
+ *
+ * This statistics was historically used interchangeably with
+ * @rx_fifo_errors.
+ *
+ * This statistic corresponds to hardware events and is not commonly used
+ * on software devices.
+ *
+ * @rx_crc_errors: Number of packets received with a CRC error.
+ * Part of aggregate "frame" errors in `/proc/net/dev`.
+ *
+ * For IEEE 802.3 devices this counter must be equivalent to:
+ *
+ * - 30.3.1.1.6 aFrameCheckSequenceErrors
+ *
+ * @rx_frame_errors: Receiver frame alignment errors.
+ * Part of aggregate "frame" errors in `/proc/net/dev`.
+ *
+ * For IEEE 802.3 devices this counter should be equivalent to:
+ *
+ * - 30.3.1.1.7 aAlignmentErrors
+ *
+ * @rx_fifo_errors: Receiver FIFO error counter.
+ *
+ * Historically the count of overflow events. Those events may be
+ * reported in the receive descriptors or via interrupts, and may
+ * not correspond one-to-one with dropped packets.
+ *
+ * This statistics was used interchangeably with @rx_over_errors.
+ * Not recommended for use in drivers for high speed interfaces.
+ *
+ * This statistic is used on software devices, e.g. to count software
+ * packet queue overflow (can) or sequencing errors (GRE).
+ *
+ * @rx_missed_errors: Count of packets missed by the host.
+ * Folded into the "drop" counter in `/proc/net/dev`.
+ *
+ * Counts number of packets dropped by the device due to lack
+ * of buffer space. This usually indicates that the host interface
+ * is slower than the network interface, or host is not keeping up
+ * with the receive packet rate.
+ *
+ * This statistic corresponds to hardware events and is not used
+ * on software devices.
+ *
+ * @tx_aborted_errors:
+ * Part of aggregate "carrier" errors in `/proc/net/dev`.
+ * For IEEE 802.3 devices capable of half-duplex operation this counter
+ * must be equivalent to:
+ *
+ * - 30.3.1.1.11 aFramesAbortedDueToXSColls
+ *
+ * High speed interfaces may use this counter as a general device
+ * discard counter.
+ *
+ * @tx_carrier_errors: Number of frame transmission errors due to loss
+ * of carrier during transmission.
+ * Part of aggregate "carrier" errors in `/proc/net/dev`.
+ *
+ * For IEEE 802.3 devices this counter must be equivalent to:
+ *
+ * - 30.3.1.1.13 aCarrierSenseErrors
+ *
+ * @tx_fifo_errors: Number of frame transmission errors due to device
+ * FIFO underrun / underflow. This condition occurs when the device
+ * begins transmission of a frame but is unable to deliver the
+ * entire frame to the transmitter in time for transmission.
+ * Part of aggregate "carrier" errors in `/proc/net/dev`.
+ *
+ * @tx_heartbeat_errors: Number of Heartbeat / SQE Test errors for
+ * old half-duplex Ethernet.
+ * Part of aggregate "carrier" errors in `/proc/net/dev`.
+ *
+ * For IEEE 802.3 devices possibly equivalent to:
+ *
+ * - 30.3.2.1.4 aSQETestErrors
+ *
+ * @tx_window_errors: Number of frame transmission errors due
+ * to late collisions (for Ethernet - after the first 64B of transmission).
+ * Part of aggregate "carrier" errors in `/proc/net/dev`.
+ *
+ * For IEEE 802.3 devices this counter must be equivalent to:
+ *
+ * - 30.3.1.1.10 aLateCollisions
+ *
+ * @rx_compressed: Number of correctly received compressed packets.
+ * This counters is only meaningful for interfaces which support
+ * packet compression (e.g. CSLIP, PPP).
+ *
+ * @tx_compressed: Number of transmitted compressed packets.
+ * This counters is only meaningful for interfaces which support
+ * packet compression (e.g. CSLIP, PPP).
+ *
+ * @rx_nohandler: Number of packets received on the interface
+ * but dropped by the networking stack because the device is
+ * not designated to receive packets (e.g. backup link in a bond).
+ */
struct rtnl_link_stats64 {
- __u64 rx_packets; /* total packets received */
- __u64 tx_packets; /* total packets transmitted */
- __u64 rx_bytes; /* total bytes received */
- __u64 tx_bytes; /* total bytes transmitted */
- __u64 rx_errors; /* bad packets received */
- __u64 tx_errors; /* packet transmit problems */
- __u64 rx_dropped; /* no space in linux buffers */
- __u64 tx_dropped; /* no space available in linux */
- __u64 multicast; /* multicast packets received */
+ __u64 rx_packets;
+ __u64 tx_packets;
+ __u64 rx_bytes;
+ __u64 tx_bytes;
+ __u64 rx_errors;
+ __u64 tx_errors;
+ __u64 rx_dropped;
+ __u64 tx_dropped;
+ __u64 multicast;
__u64 collisions;
/* detailed rx_errors: */
__u64 rx_length_errors;
- __u64 rx_over_errors; /* receiver ring buff overflow */
- __u64 rx_crc_errors; /* recved pkt with crc error */
- __u64 rx_frame_errors; /* recv'd frame alignment error */
- __u64 rx_fifo_errors; /* recv'r fifo overrun */
- __u64 rx_missed_errors; /* receiver missed packet */
+ __u64 rx_over_errors;
+ __u64 rx_crc_errors;
+ __u64 rx_frame_errors;
+ __u64 rx_fifo_errors;
+ __u64 rx_missed_errors;
/* detailed tx_errors */
__u64 tx_aborted_errors;
@@ -71,8 +242,7 @@ struct rtnl_link_stats64 {
/* for cslip etc */
__u64 rx_compressed;
__u64 tx_compressed;
-
- __u64 rx_nohandler; /* dropped, no handler found */
+ __u64 rx_nohandler;
};
/* The struct should be in sync with struct ifmap */
@@ -170,12 +340,29 @@ enum {
IFLA_PROP_LIST,
IFLA_ALT_IFNAME, /* Alternative ifname */
IFLA_PERM_ADDRESS,
+ IFLA_PROTO_DOWN_REASON,
+
+ /* device (sysfs) name as parent, used instead
+ * of IFLA_LINK where there's no parent netdev
+ */
+ IFLA_PARENT_DEV_NAME,
+ IFLA_PARENT_DEV_BUS_NAME,
+
__IFLA_MAX
};
#define IFLA_MAX (__IFLA_MAX - 1)
+enum {
+ IFLA_PROTO_DOWN_REASON_UNSPEC,
+ IFLA_PROTO_DOWN_REASON_MASK, /* u32, mask for reason bits */
+ IFLA_PROTO_DOWN_REASON_VALUE, /* u32, reason bit value */
+
+ __IFLA_PROTO_DOWN_REASON_CNT,
+ IFLA_PROTO_DOWN_REASON_MAX = __IFLA_PROTO_DOWN_REASON_CNT - 1
+};
+
/* backwards compatibility for userspace */
#ifndef __KERNEL__
#define IFLA_RTA(r) ((struct rtattr*)(((char*)(r)) + NLMSG_ALIGN(sizeof(struct ifinfomsg))))
@@ -293,6 +480,7 @@ enum {
IFLA_BR_MCAST_MLD_VERSION,
IFLA_BR_VLAN_STATS_PER_PORT,
IFLA_BR_MULTI_BOOLOPT,
+ IFLA_BR_MCAST_QUERIER_STATE,
__IFLA_BR_MAX,
};
@@ -346,6 +534,8 @@ enum {
IFLA_BRPORT_BACKUP_PORT,
IFLA_BRPORT_MRP_RING_OPEN,
IFLA_BRPORT_MRP_IN_OPEN,
+ IFLA_BRPORT_MCAST_EHT_HOSTS_LIMIT,
+ IFLA_BRPORT_MCAST_EHT_HOSTS_CNT,
__IFLA_BRPORT_MAX
};
#define IFLA_BRPORT_MAX (__IFLA_BRPORT_MAX - 1)
@@ -433,6 +623,7 @@ enum macvlan_macaddr_mode {
};
#define MACVLAN_FLAG_NOPROMISC 1
+#define MACVLAN_FLAG_NODST 2 /* skip dst macvlan if matching src macvlan */
/* VRF section */
enum {
@@ -597,6 +788,18 @@ enum ifla_geneve_df {
GENEVE_DF_MAX = __GENEVE_DF_END - 1,
};
+/* Bareudp section */
+enum {
+ IFLA_BAREUDP_UNSPEC,
+ IFLA_BAREUDP_PORT,
+ IFLA_BAREUDP_ETHERTYPE,
+ IFLA_BAREUDP_SRCPORT_MIN,
+ IFLA_BAREUDP_MULTIPROTO_MODE,
+ __IFLA_BAREUDP_MAX
+};
+
+#define IFLA_BAREUDP_MAX (__IFLA_BAREUDP_MAX - 1)
+
/* PPP section */
enum {
IFLA_PPP_UNSPEC,
@@ -899,7 +1102,14 @@ enum {
#define IFLA_IPOIB_MAX (__IFLA_IPOIB_MAX - 1)
-/* HSR section */
+/* HSR/PRP section, both uses same interface */
+
+/* Different redundancy protocols for hsr device */
+enum {
+ HSR_PROTOCOL_HSR,
+ HSR_PROTOCOL_PRP,
+ HSR_PROTOCOL_MAX,
+};
enum {
IFLA_HSR_UNSPEC,
@@ -909,6 +1119,9 @@ enum {
IFLA_HSR_SUPERVISION_ADDR, /* Supervision frame multicast addr */
IFLA_HSR_SEQ_NR,
IFLA_HSR_VERSION, /* HSR version */
+ IFLA_HSR_PROTOCOL, /* Indicate different protocol than
+ * HSR. For example PRP.
+ */
__IFLA_HSR_MAX,
};
@@ -1033,6 +1246,8 @@ enum {
#define RMNET_FLAGS_INGRESS_MAP_COMMANDS (1U << 1)
#define RMNET_FLAGS_INGRESS_MAP_CKSUMV4 (1U << 2)
#define RMNET_FLAGS_EGRESS_MAP_CKSUMV4 (1U << 3)
+#define RMNET_FLAGS_INGRESS_MAP_CKSUMV5 (1U << 4)
+#define RMNET_FLAGS_EGRESS_MAP_CKSUMV5 (1U << 5)
enum {
IFLA_RMNET_UNSPEC,
@@ -1048,4 +1263,14 @@ struct ifla_rmnet_flags {
__u32 mask;
};
+/* MCTP section */
+
+enum {
+ IFLA_MCTP_UNSPEC,
+ IFLA_MCTP_NET,
+ __IFLA_MCTP_MAX,
+};
+
+#define IFLA_MCTP_MAX (__IFLA_MCTP_MAX - 1)
+
#endif /* _UAPI_LINUX_IF_LINK_H */
diff --git a/tools/objtool/elf.c b/tools/objtool/elf.c
index 81a4c543ff7e..4b384c907027 100644
--- a/tools/objtool/elf.c
+++ b/tools/objtool/elf.c
@@ -375,6 +375,7 @@ static int read_symbols(struct elf *elf)
return -1;
}
memset(sym, 0, sizeof(*sym));
+ INIT_LIST_HEAD(&sym->pv_target);
sym->alias = sym;
sym->idx = i;
diff --git a/tools/objtool/objtool.c b/tools/objtool/objtool.c
index c90c7084e45a..bdf699f6552b 100644
--- a/tools/objtool/objtool.c
+++ b/tools/objtool/objtool.c
@@ -153,6 +153,10 @@ void objtool_pv_add(struct objtool_file *f, int idx, struct symbol *func)
!strcmp(func->name, "_paravirt_ident_64"))
return;
+ /* already added this function */
+ if (!list_empty(&func->pv_target))
+ return;
+
list_add(&func->pv_target, &f->pv_ops[idx].targets);
f->pv_ops[idx].clean = false;
}
diff --git a/tools/perf/Makefile.config b/tools/perf/Makefile.config
index afd144725a0b..3df74cf5651a 100644
--- a/tools/perf/Makefile.config
+++ b/tools/perf/Makefile.config
@@ -271,8 +271,6 @@ endif
FEATURE_CHECK_CFLAGS-libpython := $(PYTHON_EMBED_CCOPTS)
FEATURE_CHECK_LDFLAGS-libpython := $(PYTHON_EMBED_LDOPTS)
-FEATURE_CHECK_CFLAGS-libpython-version := $(PYTHON_EMBED_CCOPTS)
-FEATURE_CHECK_LDFLAGS-libpython-version := $(PYTHON_EMBED_LDOPTS)
FEATURE_CHECK_LDFLAGS-libaio = -lrt
diff --git a/tools/perf/arch/powerpc/entry/syscalls/syscall.tbl b/tools/perf/arch/powerpc/entry/syscalls/syscall.tbl
index 7bef917cc84e..15109af9d075 100644
--- a/tools/perf/arch/powerpc/entry/syscalls/syscall.tbl
+++ b/tools/perf/arch/powerpc/entry/syscalls/syscall.tbl
@@ -528,3 +528,4 @@
446 common landlock_restrict_self sys_landlock_restrict_self
# 447 reserved for memfd_secret
448 common process_mrelease sys_process_mrelease
+449 common futex_waitv sys_futex_waitv
diff --git a/tools/perf/arch/s390/entry/syscalls/syscall.tbl b/tools/perf/arch/s390/entry/syscalls/syscall.tbl
index df5261e5cfe1..ed9c5c2eafad 100644
--- a/tools/perf/arch/s390/entry/syscalls/syscall.tbl
+++ b/tools/perf/arch/s390/entry/syscalls/syscall.tbl
@@ -451,3 +451,4 @@
446 common landlock_restrict_self sys_landlock_restrict_self sys_landlock_restrict_self
# 447 reserved for memfd_secret
448 common process_mrelease sys_process_mrelease sys_process_mrelease
+449 common futex_waitv sys_futex_waitv sys_futex_waitv
diff --git a/tools/perf/bench/sched-messaging.c b/tools/perf/bench/sched-messaging.c
index fa0ff4ce2b74..488f6e6ba1a5 100644
--- a/tools/perf/bench/sched-messaging.c
+++ b/tools/perf/bench/sched-messaging.c
@@ -223,8 +223,6 @@ static unsigned int group(pthread_t *pth,
snd_ctx->out_fds[i] = fds[1];
if (!thread_mode)
close(fds[0]);
-
- free(ctx);
}
/* Now we have all the fds, fork the senders */
@@ -241,8 +239,6 @@ static unsigned int group(pthread_t *pth,
for (i = 0; i < num_fds; i++)
close(snd_ctx->out_fds[i]);
- free(snd_ctx);
-
/* Return number of children to reap */
return num_fds * 2;
}
diff --git a/tools/perf/builtin-inject.c b/tools/perf/builtin-inject.c
index bc5259db5fd9..409b721666cb 100644
--- a/tools/perf/builtin-inject.c
+++ b/tools/perf/builtin-inject.c
@@ -755,12 +755,16 @@ static int parse_vm_time_correlation(const struct option *opt, const char *str,
return inject->itrace_synth_opts.vm_tm_corr_args ? 0 : -ENOMEM;
}
+static int output_fd(struct perf_inject *inject)
+{
+ return inject->in_place_update ? -1 : perf_data__fd(&inject->output);
+}
+
static int __cmd_inject(struct perf_inject *inject)
{
int ret = -EINVAL;
struct perf_session *session = inject->session;
- struct perf_data *data_out = &inject->output;
- int fd = inject->in_place_update ? -1 : perf_data__fd(data_out);
+ int fd = output_fd(inject);
u64 output_data_offset;
signal(SIGINT, sig_handler);
@@ -820,7 +824,7 @@ static int __cmd_inject(struct perf_inject *inject)
inject->tool.ordered_events = true;
inject->tool.ordering_requires_timestamps = true;
/* Allow space in the header for new attributes */
- output_data_offset = 4096;
+ output_data_offset = roundup(8192 + session->header.data_offset, 4096);
if (inject->strip)
strip_init(inject);
}
@@ -1015,7 +1019,7 @@ int cmd_inject(int argc, const char **argv)
}
inject.session = __perf_session__new(&data, repipe,
- perf_data__fd(&inject.output),
+ output_fd(&inject),
&inject.tool);
if (IS_ERR(inject.session)) {
ret = PTR_ERR(inject.session);
@@ -1078,7 +1082,8 @@ out_delete:
zstd_fini(&(inject.session->zstd_data));
perf_session__delete(inject.session);
out_close_output:
- perf_data__close(&inject.output);
+ if (!inject.in_place_update)
+ perf_data__close(&inject.output);
free(inject.itrace_synth_opts.vm_tm_corr_args);
return ret;
}
diff --git a/tools/perf/builtin-script.c b/tools/perf/builtin-script.c
index 9434367af166..c82b033e8942 100644
--- a/tools/perf/builtin-script.c
+++ b/tools/perf/builtin-script.c
@@ -2473,7 +2473,7 @@ static int process_switch_event(struct perf_tool *tool,
if (perf_event__process_switch(tool, event, sample, machine) < 0)
return -1;
- if (scripting_ops && scripting_ops->process_switch)
+ if (scripting_ops && scripting_ops->process_switch && !filter_cpu(sample))
scripting_ops->process_switch(event, sample, machine);
if (!script->show_switch_events)
diff --git a/tools/perf/scripts/python/intel-pt-events.py b/tools/perf/scripts/python/intel-pt-events.py
index 1d3a189a9a54..66452a8ec358 100644
--- a/tools/perf/scripts/python/intel-pt-events.py
+++ b/tools/perf/scripts/python/intel-pt-events.py
@@ -32,8 +32,7 @@ try:
except:
broken_pipe_exception = IOError
-glb_switch_str = None
-glb_switch_printed = True
+glb_switch_str = {}
glb_insn = False
glb_disassembler = None
glb_src = False
@@ -70,6 +69,7 @@ def trace_begin():
ap = argparse.ArgumentParser(usage = "", add_help = False)
ap.add_argument("--insn-trace", action='store_true')
ap.add_argument("--src-trace", action='store_true')
+ ap.add_argument("--all-switch-events", action='store_true')
global glb_args
global glb_insn
global glb_src
@@ -256,10 +256,6 @@ def print_srccode(comm, param_dict, sample, symbol, dso, with_insn):
print(start_str, src_str)
def do_process_event(param_dict):
- global glb_switch_printed
- if not glb_switch_printed:
- print(glb_switch_str)
- glb_switch_printed = True
event_attr = param_dict["attr"]
sample = param_dict["sample"]
raw_buf = param_dict["raw_buf"]
@@ -274,6 +270,11 @@ def do_process_event(param_dict):
dso = get_optional(param_dict, "dso")
symbol = get_optional(param_dict, "symbol")
+ cpu = sample["cpu"]
+ if cpu in glb_switch_str:
+ print(glb_switch_str[cpu])
+ del glb_switch_str[cpu]
+
if name[0:12] == "instructions":
if glb_src:
print_srccode(comm, param_dict, sample, symbol, dso, True)
@@ -336,8 +337,6 @@ def auxtrace_error(typ, code, cpu, pid, tid, ip, ts, msg, cpumode, *x):
sys.exit(1)
def context_switch(ts, cpu, pid, tid, np_pid, np_tid, machine_pid, out, out_preempt, *x):
- global glb_switch_printed
- global glb_switch_str
if out:
out_str = "Switch out "
else:
@@ -350,6 +349,10 @@ def context_switch(ts, cpu, pid, tid, np_pid, np_tid, machine_pid, out, out_pree
machine_str = ""
else:
machine_str = "machine PID %d" % machine_pid
- glb_switch_str = "%16s %5d/%-5d [%03u] %9u.%09u %5d/%-5d %s %s" % \
+ switch_str = "%16s %5d/%-5d [%03u] %9u.%09u %5d/%-5d %s %s" % \
(out_str, pid, tid, cpu, ts / 1000000000, ts %1000000000, np_pid, np_tid, machine_str, preempt_str)
- glb_switch_printed = False
+ if glb_args.all_switch_events:
+ print(switch_str);
+ else:
+ global glb_switch_str
+ glb_switch_str[cpu] = switch_str
diff --git a/tools/perf/tests/expr.c b/tools/perf/tests/expr.c
index c895de481fe1..d54c5371c6a6 100644
--- a/tools/perf/tests/expr.c
+++ b/tools/perf/tests/expr.c
@@ -169,7 +169,9 @@ static int test__expr(struct test_suite *t __maybe_unused, int subtest __maybe_u
TEST_ASSERT_VAL("#num_dies", expr__parse(&num_dies, ctx, "#num_dies") == 0);
TEST_ASSERT_VAL("#num_cores >= #num_dies", num_cores >= num_dies);
TEST_ASSERT_VAL("#num_packages", expr__parse(&num_packages, ctx, "#num_packages") == 0);
- TEST_ASSERT_VAL("#num_dies >= #num_packages", num_dies >= num_packages);
+
+ if (num_dies) // Some platforms do not have CPU die support, for example s390
+ TEST_ASSERT_VAL("#num_dies >= #num_packages", num_dies >= num_packages);
/*
* Source count returns the number of events aggregating in a leader
diff --git a/tools/perf/tests/parse-metric.c b/tools/perf/tests/parse-metric.c
index 574b7e4efd3a..07b6f4ec024f 100644
--- a/tools/perf/tests/parse-metric.c
+++ b/tools/perf/tests/parse-metric.c
@@ -109,6 +109,7 @@ static void load_runtime_stat(struct runtime_stat *st, struct evlist *evlist,
struct evsel *evsel;
u64 count;
+ perf_stat__reset_shadow_stats();
evlist__for_each_entry(evlist, evsel) {
count = find_value(evsel->name, vals);
perf_stat__update_shadow_stats(evsel, count, 0, st);
diff --git a/tools/perf/ui/tui/setup.c b/tools/perf/ui/tui/setup.c
index e9bfe856a5de..b1be59b4e2a4 100644
--- a/tools/perf/ui/tui/setup.c
+++ b/tools/perf/ui/tui/setup.c
@@ -170,9 +170,11 @@ void ui__exit(bool wait_for_ok)
"Press any key...", 0);
SLtt_set_cursor_visibility(1);
- SLsmg_refresh();
- SLsmg_reset_smg();
+ if (!pthread_mutex_trylock(&ui__lock)) {
+ SLsmg_refresh();
+ SLsmg_reset_smg();
+ pthread_mutex_unlock(&ui__lock);
+ }
SLang_reset_tty();
-
perf_error__unregister(&perf_tui_eops);
}
diff --git a/tools/perf/util/bpf_skel/bperf.h b/tools/perf/util/bpf_skel/bperf.h
deleted file mode 100644
index 186a5551ddb9..000000000000
--- a/tools/perf/util/bpf_skel/bperf.h
+++ /dev/null
@@ -1,14 +0,0 @@
-// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
-// Copyright (c) 2021 Facebook
-
-#ifndef __BPERF_STAT_H
-#define __BPERF_STAT_H
-
-typedef struct {
- __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
- __uint(key_size, sizeof(__u32));
- __uint(value_size, sizeof(struct bpf_perf_event_value));
- __uint(max_entries, 1);
-} reading_map;
-
-#endif /* __BPERF_STAT_H */
diff --git a/tools/perf/util/bpf_skel/bperf_follower.bpf.c b/tools/perf/util/bpf_skel/bperf_follower.bpf.c
index b8fa3cb2da23..f193998530d4 100644
--- a/tools/perf/util/bpf_skel/bperf_follower.bpf.c
+++ b/tools/perf/util/bpf_skel/bperf_follower.bpf.c
@@ -1,14 +1,23 @@
// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
// Copyright (c) 2021 Facebook
-#include <linux/bpf.h>
-#include <linux/perf_event.h>
+#include "vmlinux.h"
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h>
-#include "bperf.h"
#include "bperf_u.h"
-reading_map diff_readings SEC(".maps");
-reading_map accum_readings SEC(".maps");
+struct {
+ __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
+ __uint(key_size, sizeof(__u32));
+ __uint(value_size, sizeof(struct bpf_perf_event_value));
+ __uint(max_entries, 1);
+} diff_readings SEC(".maps");
+
+struct {
+ __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
+ __uint(key_size, sizeof(__u32));
+ __uint(value_size, sizeof(struct bpf_perf_event_value));
+ __uint(max_entries, 1);
+} accum_readings SEC(".maps");
struct {
__uint(type, BPF_MAP_TYPE_HASH);
diff --git a/tools/perf/util/bpf_skel/bperf_leader.bpf.c b/tools/perf/util/bpf_skel/bperf_leader.bpf.c
index 4f70d1459e86..e2a2d4cd7779 100644
--- a/tools/perf/util/bpf_skel/bperf_leader.bpf.c
+++ b/tools/perf/util/bpf_skel/bperf_leader.bpf.c
@@ -1,10 +1,8 @@
// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
// Copyright (c) 2021 Facebook
-#include <linux/bpf.h>
-#include <linux/perf_event.h>
+#include "vmlinux.h"
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h>
-#include "bperf.h"
struct {
__uint(type, BPF_MAP_TYPE_PERF_EVENT_ARRAY);
@@ -13,8 +11,19 @@ struct {
__uint(map_flags, BPF_F_PRESERVE_ELEMS);
} events SEC(".maps");
-reading_map prev_readings SEC(".maps");
-reading_map diff_readings SEC(".maps");
+struct {
+ __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
+ __uint(key_size, sizeof(__u32));
+ __uint(value_size, sizeof(struct bpf_perf_event_value));
+ __uint(max_entries, 1);
+} prev_readings SEC(".maps");
+
+struct {
+ __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
+ __uint(key_size, sizeof(__u32));
+ __uint(value_size, sizeof(struct bpf_perf_event_value));
+ __uint(max_entries, 1);
+} diff_readings SEC(".maps");
SEC("raw_tp/sched_switch")
int BPF_PROG(on_switch)
diff --git a/tools/perf/util/bpf_skel/bpf_prog_profiler.bpf.c b/tools/perf/util/bpf_skel/bpf_prog_profiler.bpf.c
index ab12b4c4ece2..97037d3b3d9f 100644
--- a/tools/perf/util/bpf_skel/bpf_prog_profiler.bpf.c
+++ b/tools/perf/util/bpf_skel/bpf_prog_profiler.bpf.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
// Copyright (c) 2020 Facebook
-#include <linux/bpf.h>
+#include "vmlinux.h"
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h>
diff --git a/tools/perf/util/event.h b/tools/perf/util/event.h
index 95ffed66369c..c59331eea1d9 100644
--- a/tools/perf/util/event.h
+++ b/tools/perf/util/event.h
@@ -44,13 +44,16 @@ struct perf_event_attr;
/* perf sample has 16 bits size limit */
#define PERF_SAMPLE_MAX_SIZE (1 << 16)
+/* number of register is bound by the number of bits in regs_dump::mask (64) */
+#define PERF_SAMPLE_REGS_CACHE_SIZE (8 * sizeof(u64))
+
struct regs_dump {
u64 abi;
u64 mask;
u64 *regs;
/* Cached values/mask filled by first register access. */
- u64 cache_regs[PERF_REGS_MAX];
+ u64 cache_regs[PERF_SAMPLE_REGS_CACHE_SIZE];
u64 cache_mask;
};
diff --git a/tools/perf/util/expr.c b/tools/perf/util/expr.c
index 1d532b9fed29..666b59baeb70 100644
--- a/tools/perf/util/expr.c
+++ b/tools/perf/util/expr.c
@@ -12,6 +12,7 @@
#include "expr-bison.h"
#include "expr-flex.h"
#include "smt.h"
+#include <linux/err.h>
#include <linux/kernel.h>
#include <linux/zalloc.h>
#include <ctype.h>
@@ -65,7 +66,12 @@ static bool key_equal(const void *key1, const void *key2,
struct hashmap *ids__new(void)
{
- return hashmap__new(key_hash, key_equal, NULL);
+ struct hashmap *hash;
+
+ hash = hashmap__new(key_hash, key_equal, NULL);
+ if (IS_ERR(hash))
+ return NULL;
+ return hash;
}
void ids__free(struct hashmap *ids)
@@ -299,6 +305,10 @@ struct expr_parse_ctx *expr__ctx_new(void)
return NULL;
ctx->ids = hashmap__new(key_hash, key_equal, NULL);
+ if (IS_ERR(ctx->ids)) {
+ free(ctx);
+ return NULL;
+ }
ctx->runtime = 0;
return ctx;
diff --git a/tools/perf/util/header.c b/tools/perf/util/header.c
index 79cce216727e..e3c1a532d059 100644
--- a/tools/perf/util/header.c
+++ b/tools/perf/util/header.c
@@ -2321,6 +2321,7 @@ out:
#define FEAT_PROCESS_STR_FUN(__feat, __feat_env) \
static int process_##__feat(struct feat_fd *ff, void *data __maybe_unused) \
{\
+ free(ff->ph->env.__feat_env); \
ff->ph->env.__feat_env = do_read_string(ff); \
return ff->ph->env.__feat_env ? 0 : -ENOMEM; \
}
@@ -4124,6 +4125,7 @@ int perf_event__process_feature(struct perf_session *session,
struct perf_record_header_feature *fe = (struct perf_record_header_feature *)event;
int type = fe->header.type;
u64 feat = fe->feat_id;
+ int ret = 0;
if (type < 0 || type >= PERF_RECORD_HEADER_MAX) {
pr_warning("invalid record type %d in pipe-mode\n", type);
@@ -4141,11 +4143,13 @@ int perf_event__process_feature(struct perf_session *session,
ff.size = event->header.size - sizeof(*fe);
ff.ph = &session->header;
- if (feat_ops[feat].process(&ff, NULL))
- return -1;
+ if (feat_ops[feat].process(&ff, NULL)) {
+ ret = -1;
+ goto out;
+ }
if (!feat_ops[feat].print || !tool->show_feat_hdr)
- return 0;
+ goto out;
if (!feat_ops[feat].full_only ||
tool->show_feat_hdr >= SHOW_FEAT_HEADER_FULL_INFO) {
@@ -4154,8 +4158,9 @@ int perf_event__process_feature(struct perf_session *session,
fprintf(stdout, "# %s info available, use -I to display\n",
feat_ops[feat].name);
}
-
- return 0;
+out:
+ free_event_desc(ff.events);
+ return ret;
}
size_t perf_event__fprintf_event_update(union perf_event *event, FILE *fp)
diff --git a/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c b/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c
index 5f83937bf8f3..0e013c2d9eb4 100644
--- a/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c
+++ b/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c
@@ -1205,61 +1205,69 @@ out_no_progress:
static bool intel_pt_fup_event(struct intel_pt_decoder *decoder)
{
+ enum intel_pt_sample_type type = decoder->state.type;
bool ret = false;
+ decoder->state.type &= ~INTEL_PT_BRANCH;
+
if (decoder->set_fup_tx_flags) {
decoder->set_fup_tx_flags = false;
decoder->tx_flags = decoder->fup_tx_flags;
- decoder->state.type = INTEL_PT_TRANSACTION;
+ decoder->state.type |= INTEL_PT_TRANSACTION;
if (decoder->fup_tx_flags & INTEL_PT_ABORT_TX)
decoder->state.type |= INTEL_PT_BRANCH;
- decoder->state.from_ip = decoder->ip;
- decoder->state.to_ip = 0;
decoder->state.flags = decoder->fup_tx_flags;
- return true;
+ ret = true;
}
if (decoder->set_fup_ptw) {
decoder->set_fup_ptw = false;
- decoder->state.type = INTEL_PT_PTW;
+ decoder->state.type |= INTEL_PT_PTW;
decoder->state.flags |= INTEL_PT_FUP_IP;
- decoder->state.from_ip = decoder->ip;
- decoder->state.to_ip = 0;
decoder->state.ptw_payload = decoder->fup_ptw_payload;
- return true;
+ ret = true;
}
if (decoder->set_fup_mwait) {
decoder->set_fup_mwait = false;
- decoder->state.type = INTEL_PT_MWAIT_OP;
- decoder->state.from_ip = decoder->ip;
- decoder->state.to_ip = 0;
+ decoder->state.type |= INTEL_PT_MWAIT_OP;
decoder->state.mwait_payload = decoder->fup_mwait_payload;
ret = true;
}
if (decoder->set_fup_pwre) {
decoder->set_fup_pwre = false;
decoder->state.type |= INTEL_PT_PWR_ENTRY;
- decoder->state.type &= ~INTEL_PT_BRANCH;
- decoder->state.from_ip = decoder->ip;
- decoder->state.to_ip = 0;
decoder->state.pwre_payload = decoder->fup_pwre_payload;
ret = true;
}
if (decoder->set_fup_exstop) {
decoder->set_fup_exstop = false;
decoder->state.type |= INTEL_PT_EX_STOP;
- decoder->state.type &= ~INTEL_PT_BRANCH;
decoder->state.flags |= INTEL_PT_FUP_IP;
- decoder->state.from_ip = decoder->ip;
- decoder->state.to_ip = 0;
ret = true;
}
if (decoder->set_fup_bep) {
decoder->set_fup_bep = false;
decoder->state.type |= INTEL_PT_BLK_ITEMS;
- decoder->state.type &= ~INTEL_PT_BRANCH;
+ ret = true;
+ }
+ if (decoder->overflow) {
+ decoder->overflow = false;
+ if (!ret && !decoder->pge) {
+ if (decoder->hop) {
+ decoder->state.type = 0;
+ decoder->pkt_state = INTEL_PT_STATE_RESAMPLE;
+ }
+ decoder->pge = true;
+ decoder->state.type |= INTEL_PT_BRANCH | INTEL_PT_TRACE_BEGIN;
+ decoder->state.from_ip = 0;
+ decoder->state.to_ip = decoder->ip;
+ return true;
+ }
+ }
+ if (ret) {
decoder->state.from_ip = decoder->ip;
decoder->state.to_ip = 0;
- ret = true;
+ } else {
+ decoder->state.type = type;
}
return ret;
}
@@ -1608,7 +1616,16 @@ static int intel_pt_overflow(struct intel_pt_decoder *decoder)
intel_pt_clear_tx_flags(decoder);
intel_pt_set_nr(decoder);
decoder->timestamp_insn_cnt = 0;
- decoder->pkt_state = INTEL_PT_STATE_ERR_RESYNC;
+ decoder->pkt_state = INTEL_PT_STATE_IN_SYNC;
+ decoder->state.from_ip = decoder->ip;
+ decoder->ip = 0;
+ decoder->pge = false;
+ decoder->set_fup_tx_flags = false;
+ decoder->set_fup_ptw = false;
+ decoder->set_fup_mwait = false;
+ decoder->set_fup_pwre = false;
+ decoder->set_fup_exstop = false;
+ decoder->set_fup_bep = false;
decoder->overflow = true;
return -EOVERFLOW;
}
@@ -2666,6 +2683,8 @@ static int intel_pt_scan_for_psb(struct intel_pt_decoder *decoder);
/* Hop mode: Ignore TNT, do not walk code, but get ip from FUPs and TIPs */
static int intel_pt_hop_trace(struct intel_pt_decoder *decoder, bool *no_tip, int *err)
{
+ *err = 0;
+
/* Leap from PSB to PSB, getting ip from FUP within PSB+ */
if (decoder->leap && !decoder->in_psb && decoder->packet.type != INTEL_PT_PSB) {
*err = intel_pt_scan_for_psb(decoder);
@@ -2678,6 +2697,7 @@ static int intel_pt_hop_trace(struct intel_pt_decoder *decoder, bool *no_tip, in
return HOP_IGNORE;
case INTEL_PT_TIP_PGD:
+ decoder->pge = false;
if (!decoder->packet.count) {
intel_pt_set_nr(decoder);
return HOP_IGNORE;
@@ -2705,18 +2725,21 @@ static int intel_pt_hop_trace(struct intel_pt_decoder *decoder, bool *no_tip, in
if (!decoder->packet.count)
return HOP_IGNORE;
intel_pt_set_ip(decoder);
- if (intel_pt_fup_event(decoder))
- return HOP_RETURN;
- if (!decoder->branch_enable)
+ if (decoder->set_fup_mwait || decoder->set_fup_pwre)
+ *no_tip = true;
+ if (!decoder->branch_enable || !decoder->pge)
*no_tip = true;
if (*no_tip) {
decoder->state.type = INTEL_PT_INSTRUCTION;
decoder->state.from_ip = decoder->ip;
decoder->state.to_ip = 0;
+ intel_pt_fup_event(decoder);
return HOP_RETURN;
}
+ intel_pt_fup_event(decoder);
+ decoder->state.type |= INTEL_PT_INSTRUCTION | INTEL_PT_BRANCH;
*err = intel_pt_walk_fup_tip(decoder);
- if (!*err)
+ if (!*err && decoder->state.to_ip)
decoder->pkt_state = INTEL_PT_STATE_RESAMPLE;
return HOP_RETURN;
@@ -2897,7 +2920,7 @@ static bool intel_pt_psb_with_fup(struct intel_pt_decoder *decoder, int *err)
{
struct intel_pt_psb_info data = { .fup = false };
- if (!decoder->branch_enable || !decoder->pge)
+ if (!decoder->branch_enable)
return false;
intel_pt_pkt_lookahead(decoder, intel_pt_psb_lookahead_cb, &data);
@@ -2924,6 +2947,7 @@ static int intel_pt_walk_trace(struct intel_pt_decoder *decoder)
if (err)
return err;
next:
+ err = 0;
if (decoder->cyc_threshold) {
if (decoder->sample_cyc && last_packet_type != INTEL_PT_CYC)
decoder->sample_cyc = false;
@@ -2962,6 +2986,7 @@ next:
case INTEL_PT_TIP_PGE: {
decoder->pge = true;
+ decoder->overflow = false;
intel_pt_mtc_cyc_cnt_pge(decoder);
intel_pt_set_nr(decoder);
if (decoder->packet.count == 0) {
@@ -2999,7 +3024,7 @@ next:
break;
}
intel_pt_set_last_ip(decoder);
- if (!decoder->branch_enable) {
+ if (!decoder->branch_enable || !decoder->pge) {
decoder->ip = decoder->last_ip;
if (intel_pt_fup_event(decoder))
return 0;
@@ -3467,10 +3492,10 @@ static int intel_pt_sync_ip(struct intel_pt_decoder *decoder)
decoder->set_fup_pwre = false;
decoder->set_fup_exstop = false;
decoder->set_fup_bep = false;
+ decoder->overflow = false;
if (!decoder->branch_enable) {
decoder->pkt_state = INTEL_PT_STATE_IN_SYNC;
- decoder->overflow = false;
decoder->state.type = 0; /* Do not have a sample */
return 0;
}
@@ -3485,7 +3510,6 @@ static int intel_pt_sync_ip(struct intel_pt_decoder *decoder)
decoder->pkt_state = INTEL_PT_STATE_RESAMPLE;
else
decoder->pkt_state = INTEL_PT_STATE_IN_SYNC;
- decoder->overflow = false;
decoder->state.from_ip = 0;
decoder->state.to_ip = decoder->ip;
@@ -3607,7 +3631,7 @@ static int intel_pt_sync(struct intel_pt_decoder *decoder)
}
decoder->have_last_ip = true;
- decoder->pkt_state = INTEL_PT_STATE_NO_IP;
+ decoder->pkt_state = INTEL_PT_STATE_IN_SYNC;
err = intel_pt_walk_psb(decoder);
if (err)
@@ -3704,7 +3728,8 @@ const struct intel_pt_state *intel_pt_decode(struct intel_pt_decoder *decoder)
if (err) {
decoder->state.err = intel_pt_ext_err(err);
- decoder->state.from_ip = decoder->ip;
+ if (err != -EOVERFLOW)
+ decoder->state.from_ip = decoder->ip;
intel_pt_update_sample_time(decoder);
decoder->sample_tot_cyc_cnt = decoder->tot_cyc_cnt;
intel_pt_set_nr(decoder);
diff --git a/tools/perf/util/intel-pt.c b/tools/perf/util/intel-pt.c
index 556a893508da..e8613cbda331 100644
--- a/tools/perf/util/intel-pt.c
+++ b/tools/perf/util/intel-pt.c
@@ -2565,6 +2565,7 @@ static int intel_pt_run_decoder(struct intel_pt_queue *ptq, u64 *timestamp)
ptq->sync_switch = false;
intel_pt_next_tid(pt, ptq);
}
+ ptq->timestamp = state->est_timestamp;
if (pt->synth_opts.errors) {
err = intel_ptq_synth_error(ptq, state);
if (err)
@@ -3624,6 +3625,7 @@ static int intel_pt_parse_vm_tm_corr_arg(struct intel_pt *pt, char **args)
*args = p;
return 0;
}
+ p += 1;
while (1) {
vmcs = strtoull(p, &p, 0);
if (errno)
diff --git a/tools/perf/util/perf_regs.c b/tools/perf/util/perf_regs.c
index 5ee47ae1509c..06a7461ba864 100644
--- a/tools/perf/util/perf_regs.c
+++ b/tools/perf/util/perf_regs.c
@@ -25,6 +25,9 @@ int perf_reg_value(u64 *valp, struct regs_dump *regs, int id)
int i, idx = 0;
u64 mask = regs->mask;
+ if ((u64)id >= PERF_SAMPLE_REGS_CACHE_SIZE)
+ return -EINVAL;
+
if (regs->cache_mask & (1ULL << id))
goto out;
diff --git a/tools/perf/util/pmu.c b/tools/perf/util/pmu.c
index 6ae58406f4fc..8dfbba15aeb8 100644
--- a/tools/perf/util/pmu.c
+++ b/tools/perf/util/pmu.c
@@ -1659,6 +1659,21 @@ bool is_pmu_core(const char *name)
return !strcmp(name, "cpu") || is_arm_pmu_core(name);
}
+static bool pmu_alias_is_duplicate(struct sevent *alias_a,
+ struct sevent *alias_b)
+{
+ /* Different names -> never duplicates */
+ if (strcmp(alias_a->name, alias_b->name))
+ return false;
+
+ /* Don't remove duplicates for hybrid PMUs */
+ if (perf_pmu__is_hybrid(alias_a->pmu) &&
+ perf_pmu__is_hybrid(alias_b->pmu))
+ return false;
+
+ return true;
+}
+
void print_pmu_events(const char *event_glob, bool name_only, bool quiet_flag,
bool long_desc, bool details_flag, bool deprecated,
const char *pmu_name)
@@ -1744,12 +1759,8 @@ void print_pmu_events(const char *event_glob, bool name_only, bool quiet_flag,
qsort(aliases, len, sizeof(struct sevent), cmp_sevent);
for (j = 0; j < len; j++) {
/* Skip duplicates */
- if (j > 0 && !strcmp(aliases[j].name, aliases[j - 1].name)) {
- if (!aliases[j].pmu || !aliases[j - 1].pmu ||
- !strcmp(aliases[j].pmu, aliases[j - 1].pmu)) {
- continue;
- }
- }
+ if (j > 0 && pmu_alias_is_duplicate(&aliases[j], &aliases[j - 1]))
+ continue;
if (name_only) {
printf("%s ", aliases[j].name);
diff --git a/tools/perf/util/python.c b/tools/perf/util/python.c
index 563a9ba8954f..7f782a31bda3 100644
--- a/tools/perf/util/python.c
+++ b/tools/perf/util/python.c
@@ -461,7 +461,7 @@ get_tracepoint_field(struct pyrf_event *pevent, PyObject *attr_name)
struct tep_event *tp_format;
tp_format = trace_event__tp_format_id(evsel->core.attr.config);
- if (!tp_format)
+ if (IS_ERR_OR_NULL(tp_format))
return NULL;
evsel->tp_format = tp_format;
diff --git a/tools/perf/util/smt.c b/tools/perf/util/smt.c
index 20bacd5972ad..34f1b1b1176c 100644
--- a/tools/perf/util/smt.c
+++ b/tools/perf/util/smt.c
@@ -15,7 +15,7 @@ int smt_on(void)
if (cached)
return cached_result;
- if (sysfs__read_int("devices/system/cpu/smt/active", &cached_result) > 0)
+ if (sysfs__read_int("devices/system/cpu/smt/active", &cached_result) >= 0)
goto done;
ncpu = sysconf(_SC_NPROCESSORS_CONF);
diff --git a/tools/power/acpi/Makefile.config b/tools/power/acpi/Makefile.config
index 331f6d30f472..cd7106876a5f 100644
--- a/tools/power/acpi/Makefile.config
+++ b/tools/power/acpi/Makefile.config
@@ -69,6 +69,7 @@ KERNEL_INCLUDE := $(OUTPUT)include
ACPICA_INCLUDE := $(srctree)/../../../drivers/acpi/acpica
CFLAGS += -D_LINUX -I$(KERNEL_INCLUDE) -I$(ACPICA_INCLUDE)
CFLAGS += $(WARNINGS)
+MKDIR = mkdir
ifeq ($(strip $(V)),false)
QUIET=@
diff --git a/tools/power/acpi/Makefile.rules b/tools/power/acpi/Makefile.rules
index 2a6c170b57cd..1d7616f5d0ae 100644
--- a/tools/power/acpi/Makefile.rules
+++ b/tools/power/acpi/Makefile.rules
@@ -21,6 +21,7 @@ $(KERNEL_INCLUDE):
$(objdir)%.o: %.c $(KERNEL_INCLUDE)
$(ECHO) " CC " $(subst $(OUTPUT),,$@)
+ $(QUIET) $(MKDIR) -p $(objdir) 2>/dev/null
$(QUIET) $(CC) -c $(CFLAGS) -o $@ $<
all: $(OUTPUT)$(TOOL)
diff --git a/tools/testing/radix-tree/linux/lockdep.h b/tools/testing/radix-tree/linux/lockdep.h
index 565fccdfe6e9..016cff473cfc 100644
--- a/tools/testing/radix-tree/linux/lockdep.h
+++ b/tools/testing/radix-tree/linux/lockdep.h
@@ -1,5 +1,8 @@
#ifndef _LINUX_LOCKDEP_H
#define _LINUX_LOCKDEP_H
+
+#include <linux/spinlock.h>
+
struct lock_class_key {
unsigned int a;
};
diff --git a/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.c b/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.c
index 5d52ea2768df..df3b292a8ffe 100644
--- a/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.c
+++ b/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.c
@@ -33,6 +33,22 @@ noinline int bpf_testmod_loop_test(int n)
return sum;
}
+__weak noinline struct file *bpf_testmod_return_ptr(int arg)
+{
+ static struct file f = {};
+
+ switch (arg) {
+ case 1: return (void *)EINVAL; /* user addr */
+ case 2: return (void *)0xcafe4a11; /* user addr */
+ case 3: return (void *)-EINVAL; /* canonical, but invalid */
+ case 4: return (void *)(1ull << 60); /* non-canonical and invalid */
+ case 5: return (void *)~(1ull << 30); /* trigger extable */
+ case 6: return &f; /* valid addr */
+ case 7: return (void *)((long)&f | 1); /* kernel tricks */
+ default: return NULL;
+ }
+}
+
noinline ssize_t
bpf_testmod_test_read(struct file *file, struct kobject *kobj,
struct bin_attribute *bin_attr,
@@ -43,6 +59,10 @@ bpf_testmod_test_read(struct file *file, struct kobject *kobj,
.off = off,
.len = len,
};
+ int i = 1;
+
+ while (bpf_testmod_return_ptr(i))
+ i++;
/* This is always true. Use the check to make sure the compiler
* doesn't remove bpf_testmod_loop_test.
diff --git a/tools/testing/selftests/bpf/prog_tests/btf_skc_cls_ingress.c b/tools/testing/selftests/bpf/prog_tests/btf_skc_cls_ingress.c
index 762f6a9da8b5..664ffc0364f4 100644
--- a/tools/testing/selftests/bpf/prog_tests/btf_skc_cls_ingress.c
+++ b/tools/testing/selftests/bpf/prog_tests/btf_skc_cls_ingress.c
@@ -90,7 +90,7 @@ static void print_err_line(void)
static void test_conn(void)
{
- int listen_fd = -1, cli_fd = -1, err;
+ int listen_fd = -1, cli_fd = -1, srv_fd = -1, err;
socklen_t addrlen = sizeof(srv_sa6);
int srv_port;
@@ -112,6 +112,10 @@ static void test_conn(void)
if (CHECK_FAIL(cli_fd == -1))
goto done;
+ srv_fd = accept(listen_fd, NULL, NULL);
+ if (CHECK_FAIL(srv_fd == -1))
+ goto done;
+
if (CHECK(skel->bss->listen_tp_sport != srv_port ||
skel->bss->req_sk_sport != srv_port,
"Unexpected sk src port",
@@ -134,11 +138,13 @@ done:
close(listen_fd);
if (cli_fd != -1)
close(cli_fd);
+ if (srv_fd != -1)
+ close(srv_fd);
}
static void test_syncookie(void)
{
- int listen_fd = -1, cli_fd = -1, err;
+ int listen_fd = -1, cli_fd = -1, srv_fd = -1, err;
socklen_t addrlen = sizeof(srv_sa6);
int srv_port;
@@ -161,6 +167,10 @@ static void test_syncookie(void)
if (CHECK_FAIL(cli_fd == -1))
goto done;
+ srv_fd = accept(listen_fd, NULL, NULL);
+ if (CHECK_FAIL(srv_fd == -1))
+ goto done;
+
if (CHECK(skel->bss->listen_tp_sport != srv_port,
"Unexpected tp src port",
"listen_tp_sport:%u expected:%u\n",
@@ -188,6 +198,8 @@ done:
close(listen_fd);
if (cli_fd != -1)
close(cli_fd);
+ if (srv_fd != -1)
+ close(srv_fd);
}
struct test {
diff --git a/tools/testing/selftests/bpf/progs/test_module_attach.c b/tools/testing/selftests/bpf/progs/test_module_attach.c
index b36857093f71..50ce16d02da7 100644
--- a/tools/testing/selftests/bpf/progs/test_module_attach.c
+++ b/tools/testing/selftests/bpf/progs/test_module_attach.c
@@ -87,6 +87,18 @@ int BPF_PROG(handle_fexit,
return 0;
}
+SEC("fexit/bpf_testmod_return_ptr")
+int BPF_PROG(handle_fexit_ret, int arg, struct file *ret)
+{
+ long buf = 0;
+
+ bpf_probe_read_kernel(&buf, 8, ret);
+ bpf_probe_read_kernel(&buf, 8, (char *)ret + 256);
+ *(volatile long long *)ret;
+ *(volatile int *)&ret->f_mode;
+ return 0;
+}
+
__u32 fmod_ret_read_sz = 0;
SEC("fmod_ret/bpf_testmod_test_read")
diff --git a/tools/testing/selftests/bpf/test_verifier.c b/tools/testing/selftests/bpf/test_verifier.c
index 465ef3f112c0..d3bf83d5c6cf 100644
--- a/tools/testing/selftests/bpf/test_verifier.c
+++ b/tools/testing/selftests/bpf/test_verifier.c
@@ -54,7 +54,7 @@
#define MAX_INSNS BPF_MAXINSNS
#define MAX_TEST_INSNS 1000000
#define MAX_FIXUPS 8
-#define MAX_NR_MAPS 21
+#define MAX_NR_MAPS 22
#define MAX_TEST_RUNS 8
#define POINTER_VALUE 0xcafe4all
#define TEST_DATA_LEN 64
diff --git a/tools/testing/selftests/bpf/verifier/atomic_cmpxchg.c b/tools/testing/selftests/bpf/verifier/atomic_cmpxchg.c
index c22dc83a41fd..b39665f33524 100644
--- a/tools/testing/selftests/bpf/verifier/atomic_cmpxchg.c
+++ b/tools/testing/selftests/bpf/verifier/atomic_cmpxchg.c
@@ -138,6 +138,8 @@
BPF_EXIT_INSN(),
},
.result = ACCEPT,
+ .result_unpriv = REJECT,
+ .errstr_unpriv = "R0 leaks addr into mem",
},
{
"Dest pointer in r0 - succeed",
@@ -156,4 +158,88 @@
BPF_EXIT_INSN(),
},
.result = ACCEPT,
+ .result_unpriv = REJECT,
+ .errstr_unpriv = "R0 leaks addr into mem",
+},
+{
+ "Dest pointer in r0 - succeed, check 2",
+ .insns = {
+ /* r0 = &val */
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_10),
+ /* val = r0; */
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
+ /* r5 = &val */
+ BPF_MOV64_REG(BPF_REG_5, BPF_REG_10),
+ /* r0 = atomic_cmpxchg(&val, r0, r5); */
+ BPF_ATOMIC_OP(BPF_DW, BPF_CMPXCHG, BPF_REG_10, BPF_REG_5, -8),
+ /* r1 = *r0 */
+ BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, -8),
+ /* exit(0); */
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .result_unpriv = REJECT,
+ .errstr_unpriv = "R0 leaks addr into mem",
+},
+{
+ "Dest pointer in r0 - succeed, check 3",
+ .insns = {
+ /* r0 = &val */
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_10),
+ /* val = r0; */
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
+ /* r5 = &val */
+ BPF_MOV64_REG(BPF_REG_5, BPF_REG_10),
+ /* r0 = atomic_cmpxchg(&val, r0, r5); */
+ BPF_ATOMIC_OP(BPF_W, BPF_CMPXCHG, BPF_REG_10, BPF_REG_5, -8),
+ /* exit(0); */
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .errstr = "invalid size of register fill",
+ .errstr_unpriv = "R0 leaks addr into mem",
+},
+{
+ "Dest pointer in r0 - succeed, check 4",
+ .insns = {
+ /* r0 = &val */
+ BPF_MOV32_REG(BPF_REG_0, BPF_REG_10),
+ /* val = r0; */
+ BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_0, -8),
+ /* r5 = &val */
+ BPF_MOV32_REG(BPF_REG_5, BPF_REG_10),
+ /* r0 = atomic_cmpxchg(&val, r0, r5); */
+ BPF_ATOMIC_OP(BPF_W, BPF_CMPXCHG, BPF_REG_10, BPF_REG_5, -8),
+ /* r1 = *r10 */
+ BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_10, -8),
+ /* exit(0); */
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .result_unpriv = REJECT,
+ .errstr_unpriv = "R10 partial copy of pointer",
+},
+{
+ "Dest pointer in r0 - succeed, check 5",
+ .insns = {
+ /* r0 = &val */
+ BPF_MOV32_REG(BPF_REG_0, BPF_REG_10),
+ /* val = r0; */
+ BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_0, -8),
+ /* r5 = &val */
+ BPF_MOV32_REG(BPF_REG_5, BPF_REG_10),
+ /* r0 = atomic_cmpxchg(&val, r0, r5); */
+ BPF_ATOMIC_OP(BPF_W, BPF_CMPXCHG, BPF_REG_10, BPF_REG_5, -8),
+ /* r1 = *r0 */
+ BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, -8),
+ /* exit(0); */
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .errstr = "R0 invalid mem access",
+ .errstr_unpriv = "R10 partial copy of pointer",
},
diff --git a/tools/testing/selftests/bpf/verifier/atomic_fetch.c b/tools/testing/selftests/bpf/verifier/atomic_fetch.c
index 3bc9ff7a860b..5bf03fb4fa2b 100644
--- a/tools/testing/selftests/bpf/verifier/atomic_fetch.c
+++ b/tools/testing/selftests/bpf/verifier/atomic_fetch.c
@@ -1,3 +1,97 @@
+{
+ "atomic dw/fetch and address leakage of (map ptr & -1) via stack slot",
+ .insns = {
+ BPF_LD_IMM64(BPF_REG_1, -1),
+ BPF_LD_MAP_FD(BPF_REG_8, 0),
+ BPF_LD_MAP_FD(BPF_REG_9, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_9, 0),
+ BPF_ATOMIC_OP(BPF_DW, BPF_AND | BPF_FETCH, BPF_REG_2, BPF_REG_1, 0),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_9, BPF_REG_2, 0),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_8),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
+ BPF_STX_MEM(BPF_DW, BPF_REG_0, BPF_REG_9, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_array_48b = { 2, 4 },
+ .result = ACCEPT,
+ .result_unpriv = REJECT,
+ .errstr_unpriv = "leaking pointer from stack off -8",
+},
+{
+ "atomic dw/fetch and address leakage of (map ptr & -1) via returned value",
+ .insns = {
+ BPF_LD_IMM64(BPF_REG_1, -1),
+ BPF_LD_MAP_FD(BPF_REG_8, 0),
+ BPF_LD_MAP_FD(BPF_REG_9, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_9, 0),
+ BPF_ATOMIC_OP(BPF_DW, BPF_AND | BPF_FETCH, BPF_REG_2, BPF_REG_1, 0),
+ BPF_MOV64_REG(BPF_REG_9, BPF_REG_1),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_8),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
+ BPF_STX_MEM(BPF_DW, BPF_REG_0, BPF_REG_9, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_array_48b = { 2, 4 },
+ .result = ACCEPT,
+ .result_unpriv = REJECT,
+ .errstr_unpriv = "leaking pointer from stack off -8",
+},
+{
+ "atomic w/fetch and address leakage of (map ptr & -1) via stack slot",
+ .insns = {
+ BPF_LD_IMM64(BPF_REG_1, -1),
+ BPF_LD_MAP_FD(BPF_REG_8, 0),
+ BPF_LD_MAP_FD(BPF_REG_9, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_9, 0),
+ BPF_ATOMIC_OP(BPF_W, BPF_AND | BPF_FETCH, BPF_REG_2, BPF_REG_1, 0),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_9, BPF_REG_2, 0),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_8),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
+ BPF_STX_MEM(BPF_DW, BPF_REG_0, BPF_REG_9, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_array_48b = { 2, 4 },
+ .result = REJECT,
+ .errstr = "invalid size of register fill",
+},
+{
+ "atomic w/fetch and address leakage of (map ptr & -1) via returned value",
+ .insns = {
+ BPF_LD_IMM64(BPF_REG_1, -1),
+ BPF_LD_MAP_FD(BPF_REG_8, 0),
+ BPF_LD_MAP_FD(BPF_REG_9, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_9, 0),
+ BPF_ATOMIC_OP(BPF_W, BPF_AND | BPF_FETCH, BPF_REG_2, BPF_REG_1, 0),
+ BPF_MOV64_REG(BPF_REG_9, BPF_REG_1),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_8),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
+ BPF_STX_MEM(BPF_DW, BPF_REG_0, BPF_REG_9, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_array_48b = { 2, 4 },
+ .result = REJECT,
+ .errstr = "invalid size of register fill",
+},
#define __ATOMIC_FETCH_OP_TEST(src_reg, dst_reg, operand1, op, operand2, expect) \
{ \
"atomic fetch " #op ", src=" #dst_reg " dst=" #dst_reg, \
diff --git a/tools/testing/selftests/bpf/verifier/search_pruning.c b/tools/testing/selftests/bpf/verifier/search_pruning.c
index 7e50cb80873a..682519769fe3 100644
--- a/tools/testing/selftests/bpf/verifier/search_pruning.c
+++ b/tools/testing/selftests/bpf/verifier/search_pruning.c
@@ -133,6 +133,77 @@
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
},
{
+ "precision tracking for u32 spill/fill",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_7, BPF_REG_1),
+ BPF_EMIT_CALL(BPF_FUNC_get_prandom_u32),
+ BPF_MOV32_IMM(BPF_REG_6, 32),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
+ BPF_MOV32_IMM(BPF_REG_6, 4),
+ /* Additional insns to introduce a pruning point. */
+ BPF_EMIT_CALL(BPF_FUNC_get_prandom_u32),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ /* u32 spill/fill */
+ BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_6, -8),
+ BPF_LDX_MEM(BPF_W, BPF_REG_8, BPF_REG_10, -8),
+ /* out-of-bound map value access for r6=32 */
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_8),
+ BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_8b = { 15 },
+ .result = REJECT,
+ .errstr = "R0 min value is outside of the allowed memory range",
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+},
+{
+ "precision tracking for u32 spills, u64 fill",
+ .insns = {
+ BPF_EMIT_CALL(BPF_FUNC_get_prandom_u32),
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
+ BPF_MOV32_IMM(BPF_REG_7, 0xffffffff),
+ /* Additional insns to introduce a pruning point. */
+ BPF_MOV64_IMM(BPF_REG_3, 1),
+ BPF_MOV64_IMM(BPF_REG_3, 1),
+ BPF_MOV64_IMM(BPF_REG_3, 1),
+ BPF_MOV64_IMM(BPF_REG_3, 1),
+ BPF_EMIT_CALL(BPF_FUNC_get_prandom_u32),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
+ BPF_MOV64_IMM(BPF_REG_3, 1),
+ BPF_ALU32_IMM(BPF_DIV, BPF_REG_3, 0),
+ /* u32 spills, u64 fill */
+ BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_6, -4),
+ BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_7, -8),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_8, BPF_REG_10, -8),
+ /* if r8 != X goto pc+1 r8 known in fallthrough branch */
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_8, 0xffffffff, 1),
+ BPF_MOV64_IMM(BPF_REG_3, 1),
+ /* if r8 == X goto pc+1 condition always true on first
+ * traversal, so starts backtracking to mark r8 as requiring
+ * precision. r7 marked as needing precision. r6 not marked
+ * since it's not tracked.
+ */
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_8, 0xffffffff, 1),
+ /* fails if r8 correctly marked unknown after fill. */
+ BPF_ALU32_IMM(BPF_DIV, BPF_REG_3, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .errstr = "div by zero",
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+},
+{
"allocated_stack",
.insns = {
BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_1),
diff --git a/tools/testing/selftests/bpf/verifier/spill_fill.c b/tools/testing/selftests/bpf/verifier/spill_fill.c
index 7ab3de108761..6c907144311f 100644
--- a/tools/testing/selftests/bpf/verifier/spill_fill.c
+++ b/tools/testing/selftests/bpf/verifier/spill_fill.c
@@ -176,6 +176,38 @@
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
},
{
+ "Spill u32 const scalars. Refill as u64. Offset to skb->data",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ /* r6 = 0 */
+ BPF_MOV32_IMM(BPF_REG_6, 0),
+ /* r7 = 20 */
+ BPF_MOV32_IMM(BPF_REG_7, 20),
+ /* *(u32 *)(r10 -4) = r6 */
+ BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_6, -4),
+ /* *(u32 *)(r10 -8) = r7 */
+ BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_7, -8),
+ /* r4 = *(u64 *)(r10 -8) */
+ BPF_LDX_MEM(BPF_H, BPF_REG_4, BPF_REG_10, -8),
+ /* r0 = r2 */
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+ /* r0 += r4 R0=pkt R2=pkt R3=pkt_end R4=inv,umax=65535 */
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_4),
+ /* if (r0 > r3) R0=pkt,umax=65535 R2=pkt R3=pkt_end R4=inv,umax=65535 */
+ BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
+ /* r0 = *(u32 *)r2 R0=pkt,umax=65535 R2=pkt R3=pkt_end R4=inv20 */
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_2, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .errstr = "invalid access to packet",
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+},
+{
"Spill a u32 const scalar. Refill as u16 from fp-6. Offset to skb->data",
.insns = {
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
diff --git a/tools/testing/selftests/bpf/verifier/value_ptr_arith.c b/tools/testing/selftests/bpf/verifier/value_ptr_arith.c
index 2debba4e8a3a..4d347bc53aa2 100644
--- a/tools/testing/selftests/bpf/verifier/value_ptr_arith.c
+++ b/tools/testing/selftests/bpf/verifier/value_ptr_arith.c
@@ -1078,6 +1078,29 @@
.errstr_unpriv = "R0 pointer -= pointer prohibited",
},
{
+ "map access: trying to leak tained dst reg",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
+ BPF_MOV32_IMM(BPF_REG_1, 0xFFFFFFFF),
+ BPF_MOV32_REG(BPF_REG_1, BPF_REG_1),
+ BPF_ALU64_REG(BPF_SUB, BPF_REG_2, BPF_REG_1),
+ BPF_STX_MEM(BPF_DW, BPF_REG_0, BPF_REG_2, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_array_48b = { 4 },
+ .result = REJECT,
+ .errstr = "math between map_value pointer and 4294967295 is not allowed",
+},
+{
"32bit pkt_ptr -= scalar",
.insns = {
BPF_LDX_MEM(BPF_W, BPF_REG_8, BPF_REG_1,
diff --git a/tools/testing/selftests/bpf/verifier/xdp_direct_packet_access.c b/tools/testing/selftests/bpf/verifier/xdp_direct_packet_access.c
index bfb97383e6b5..b4ec228eb95d 100644
--- a/tools/testing/selftests/bpf/verifier/xdp_direct_packet_access.c
+++ b/tools/testing/selftests/bpf/verifier/xdp_direct_packet_access.c
@@ -35,7 +35,7 @@
.prog_type = BPF_PROG_TYPE_XDP,
},
{
- "XDP pkt read, pkt_data' > pkt_end, good access",
+ "XDP pkt read, pkt_data' > pkt_end, corner case, good access",
.insns = {
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)),
BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
@@ -88,6 +88,41 @@
.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
},
{
+ "XDP pkt read, pkt_data' > pkt_end, corner case +1, good access",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct xdp_md, data_end)),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 9),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -9),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "XDP pkt read, pkt_data' > pkt_end, corner case -1, bad access",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct xdp_md, data_end)),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -7),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "R1 offset is outside of the packet",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
"XDP pkt read, pkt_end > pkt_data', good access",
.insns = {
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)),
@@ -106,16 +141,16 @@
.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
},
{
- "XDP pkt read, pkt_end > pkt_data', bad access 1",
+ "XDP pkt read, pkt_end > pkt_data', corner case -1, bad access",
.insns = {
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)),
BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
offsetof(struct xdp_md, data_end)),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 6),
BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_1, 1),
BPF_JMP_IMM(BPF_JA, 0, 0, 1),
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -6),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
@@ -143,6 +178,42 @@
.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
},
{
+ "XDP pkt read, pkt_end > pkt_data', corner case, good access",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct xdp_md, data_end)),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_1, 1),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 1),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -7),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "XDP pkt read, pkt_end > pkt_data', corner case +1, good access",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct xdp_md, data_end)),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_1, 1),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 1),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
"XDP pkt read, pkt_data' < pkt_end, good access",
.insns = {
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)),
@@ -161,16 +232,16 @@
.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
},
{
- "XDP pkt read, pkt_data' < pkt_end, bad access 1",
+ "XDP pkt read, pkt_data' < pkt_end, corner case -1, bad access",
.insns = {
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)),
BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
offsetof(struct xdp_md, data_end)),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 6),
BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 1),
BPF_JMP_IMM(BPF_JA, 0, 0, 1),
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -6),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
@@ -198,7 +269,43 @@
.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
},
{
- "XDP pkt read, pkt_end < pkt_data', good access",
+ "XDP pkt read, pkt_data' < pkt_end, corner case, good access",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct xdp_md, data_end)),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
+ BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 1),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 1),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -7),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "XDP pkt read, pkt_data' < pkt_end, corner case +1, good access",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct xdp_md, data_end)),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+ BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 1),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 1),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "XDP pkt read, pkt_end < pkt_data', corner case, good access",
.insns = {
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)),
BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
@@ -251,6 +358,41 @@
.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
},
{
+ "XDP pkt read, pkt_end < pkt_data', corner case +1, good access",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct xdp_md, data_end)),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 9),
+ BPF_JMP_REG(BPF_JLT, BPF_REG_3, BPF_REG_1, 1),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -9),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "XDP pkt read, pkt_end < pkt_data', corner case -1, bad access",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct xdp_md, data_end)),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
+ BPF_JMP_REG(BPF_JLT, BPF_REG_3, BPF_REG_1, 1),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -7),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "R1 offset is outside of the packet",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
"XDP pkt read, pkt_data' >= pkt_end, good access",
.insns = {
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)),
@@ -268,15 +410,15 @@
.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
},
{
- "XDP pkt read, pkt_data' >= pkt_end, bad access 1",
+ "XDP pkt read, pkt_data' >= pkt_end, corner case -1, bad access",
.insns = {
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)),
BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
offsetof(struct xdp_md, data_end)),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 6),
BPF_JMP_REG(BPF_JGE, BPF_REG_1, BPF_REG_3, 1),
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -6),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
@@ -304,7 +446,41 @@
.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
},
{
- "XDP pkt read, pkt_end >= pkt_data', good access",
+ "XDP pkt read, pkt_data' >= pkt_end, corner case, good access",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct xdp_md, data_end)),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
+ BPF_JMP_REG(BPF_JGE, BPF_REG_1, BPF_REG_3, 1),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -7),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "XDP pkt read, pkt_data' >= pkt_end, corner case +1, good access",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct xdp_md, data_end)),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+ BPF_JMP_REG(BPF_JGE, BPF_REG_1, BPF_REG_3, 1),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "XDP pkt read, pkt_end >= pkt_data', corner case, good access",
.insns = {
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)),
BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
@@ -359,7 +535,44 @@
.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
},
{
- "XDP pkt read, pkt_data' <= pkt_end, good access",
+ "XDP pkt read, pkt_end >= pkt_data', corner case +1, good access",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct xdp_md, data_end)),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 9),
+ BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_1, 1),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 1),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -9),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "XDP pkt read, pkt_end >= pkt_data', corner case -1, bad access",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct xdp_md, data_end)),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
+ BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_1, 1),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 1),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -7),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "R1 offset is outside of the packet",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "XDP pkt read, pkt_data' <= pkt_end, corner case, good access",
.insns = {
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)),
BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
@@ -414,6 +627,43 @@
.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
},
{
+ "XDP pkt read, pkt_data' <= pkt_end, corner case +1, good access",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct xdp_md, data_end)),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 9),
+ BPF_JMP_REG(BPF_JLE, BPF_REG_1, BPF_REG_3, 1),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 1),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -9),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "XDP pkt read, pkt_data' <= pkt_end, corner case -1, bad access",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct xdp_md, data_end)),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
+ BPF_JMP_REG(BPF_JLE, BPF_REG_1, BPF_REG_3, 1),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 1),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -7),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "R1 offset is outside of the packet",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
"XDP pkt read, pkt_end <= pkt_data', good access",
.insns = {
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)),
@@ -431,15 +681,15 @@
.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
},
{
- "XDP pkt read, pkt_end <= pkt_data', bad access 1",
+ "XDP pkt read, pkt_end <= pkt_data', corner case -1, bad access",
.insns = {
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)),
BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
offsetof(struct xdp_md, data_end)),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 6),
BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_1, 1),
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -6),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
@@ -467,7 +717,41 @@
.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
},
{
- "XDP pkt read, pkt_meta' > pkt_data, good access",
+ "XDP pkt read, pkt_end <= pkt_data', corner case, good access",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct xdp_md, data_end)),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
+ BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_1, 1),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -7),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "XDP pkt read, pkt_end <= pkt_data', corner case +1, good access",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct xdp_md, data_end)),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+ BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_1, 1),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "XDP pkt read, pkt_meta' > pkt_data, corner case, good access",
.insns = {
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
offsetof(struct xdp_md, data_meta)),
@@ -520,6 +804,41 @@
.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
},
{
+ "XDP pkt read, pkt_meta' > pkt_data, corner case +1, good access",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct xdp_md, data_meta)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 9),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -9),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "XDP pkt read, pkt_meta' > pkt_data, corner case -1, bad access",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct xdp_md, data_meta)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -7),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "R1 offset is outside of the packet",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
"XDP pkt read, pkt_data > pkt_meta', good access",
.insns = {
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
@@ -538,16 +857,16 @@
.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
},
{
- "XDP pkt read, pkt_data > pkt_meta', bad access 1",
+ "XDP pkt read, pkt_data > pkt_meta', corner case -1, bad access",
.insns = {
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
offsetof(struct xdp_md, data_meta)),
BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 6),
BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_1, 1),
BPF_JMP_IMM(BPF_JA, 0, 0, 1),
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -6),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
@@ -575,6 +894,42 @@
.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
},
{
+ "XDP pkt read, pkt_data > pkt_meta', corner case, good access",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct xdp_md, data_meta)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_1, 1),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 1),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -7),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "XDP pkt read, pkt_data > pkt_meta', corner case +1, good access",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct xdp_md, data_meta)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_1, 1),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 1),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
"XDP pkt read, pkt_meta' < pkt_data, good access",
.insns = {
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
@@ -593,16 +948,16 @@
.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
},
{
- "XDP pkt read, pkt_meta' < pkt_data, bad access 1",
+ "XDP pkt read, pkt_meta' < pkt_data, corner case -1, bad access",
.insns = {
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
offsetof(struct xdp_md, data_meta)),
BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 6),
BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 1),
BPF_JMP_IMM(BPF_JA, 0, 0, 1),
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -6),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
@@ -630,7 +985,43 @@
.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
},
{
- "XDP pkt read, pkt_data < pkt_meta', good access",
+ "XDP pkt read, pkt_meta' < pkt_data, corner case, good access",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct xdp_md, data_meta)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
+ BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 1),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 1),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -7),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "XDP pkt read, pkt_meta' < pkt_data, corner case +1, good access",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct xdp_md, data_meta)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+ BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 1),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 1),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "XDP pkt read, pkt_data < pkt_meta', corner case, good access",
.insns = {
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
offsetof(struct xdp_md, data_meta)),
@@ -683,6 +1074,41 @@
.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
},
{
+ "XDP pkt read, pkt_data < pkt_meta', corner case +1, good access",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct xdp_md, data_meta)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 9),
+ BPF_JMP_REG(BPF_JLT, BPF_REG_3, BPF_REG_1, 1),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -9),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "XDP pkt read, pkt_data < pkt_meta', corner case -1, bad access",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct xdp_md, data_meta)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
+ BPF_JMP_REG(BPF_JLT, BPF_REG_3, BPF_REG_1, 1),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -7),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "R1 offset is outside of the packet",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
"XDP pkt read, pkt_meta' >= pkt_data, good access",
.insns = {
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
@@ -700,15 +1126,15 @@
.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
},
{
- "XDP pkt read, pkt_meta' >= pkt_data, bad access 1",
+ "XDP pkt read, pkt_meta' >= pkt_data, corner case -1, bad access",
.insns = {
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
offsetof(struct xdp_md, data_meta)),
BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 6),
BPF_JMP_REG(BPF_JGE, BPF_REG_1, BPF_REG_3, 1),
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -6),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
@@ -736,7 +1162,41 @@
.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
},
{
- "XDP pkt read, pkt_data >= pkt_meta', good access",
+ "XDP pkt read, pkt_meta' >= pkt_data, corner case, good access",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct xdp_md, data_meta)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
+ BPF_JMP_REG(BPF_JGE, BPF_REG_1, BPF_REG_3, 1),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -7),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "XDP pkt read, pkt_meta' >= pkt_data, corner case +1, good access",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct xdp_md, data_meta)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+ BPF_JMP_REG(BPF_JGE, BPF_REG_1, BPF_REG_3, 1),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "XDP pkt read, pkt_data >= pkt_meta', corner case, good access",
.insns = {
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
offsetof(struct xdp_md, data_meta)),
@@ -791,7 +1251,44 @@
.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
},
{
- "XDP pkt read, pkt_meta' <= pkt_data, good access",
+ "XDP pkt read, pkt_data >= pkt_meta', corner case +1, good access",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct xdp_md, data_meta)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 9),
+ BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_1, 1),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 1),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -9),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "XDP pkt read, pkt_data >= pkt_meta', corner case -1, bad access",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct xdp_md, data_meta)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
+ BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_1, 1),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 1),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -7),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "R1 offset is outside of the packet",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "XDP pkt read, pkt_meta' <= pkt_data, corner case, good access",
.insns = {
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
offsetof(struct xdp_md, data_meta)),
@@ -846,6 +1343,43 @@
.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
},
{
+ "XDP pkt read, pkt_meta' <= pkt_data, corner case +1, good access",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct xdp_md, data_meta)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 9),
+ BPF_JMP_REG(BPF_JLE, BPF_REG_1, BPF_REG_3, 1),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 1),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -9),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "XDP pkt read, pkt_meta' <= pkt_data, corner case -1, bad access",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct xdp_md, data_meta)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
+ BPF_JMP_REG(BPF_JLE, BPF_REG_1, BPF_REG_3, 1),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 1),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -7),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "R1 offset is outside of the packet",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
"XDP pkt read, pkt_data <= pkt_meta', good access",
.insns = {
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
@@ -863,15 +1397,15 @@
.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
},
{
- "XDP pkt read, pkt_data <= pkt_meta', bad access 1",
+ "XDP pkt read, pkt_data <= pkt_meta', corner case -1, bad access",
.insns = {
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
offsetof(struct xdp_md, data_meta)),
BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 6),
BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_1, 1),
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -6),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
@@ -898,3 +1432,37 @@
.prog_type = BPF_PROG_TYPE_XDP,
.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
},
+{
+ "XDP pkt read, pkt_data <= pkt_meta', corner case, good access",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct xdp_md, data_meta)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
+ BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_1, 1),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -7),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "XDP pkt read, pkt_data <= pkt_meta', corner case +1, good access",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct xdp_md, data_meta)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+ BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_1, 1),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
diff --git a/tools/testing/selftests/damon/.gitignore b/tools/testing/selftests/damon/.gitignore
new file mode 100644
index 000000000000..c6c2965a6607
--- /dev/null
+++ b/tools/testing/selftests/damon/.gitignore
@@ -0,0 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0-only
+huge_count_read_write
diff --git a/tools/testing/selftests/damon/Makefile b/tools/testing/selftests/damon/Makefile
index 8a3f2cd9fec0..937d36ae9a69 100644
--- a/tools/testing/selftests/damon/Makefile
+++ b/tools/testing/selftests/damon/Makefile
@@ -1,7 +1,10 @@
# SPDX-License-Identifier: GPL-2.0
# Makefile for damon selftests
-TEST_FILES = _chk_dependency.sh
-TEST_PROGS = debugfs_attrs.sh
+TEST_GEN_FILES += huge_count_read_write
+
+TEST_FILES = _chk_dependency.sh _debugfs_common.sh
+TEST_PROGS = debugfs_attrs.sh debugfs_schemes.sh debugfs_target_ids.sh
+TEST_PROGS += debugfs_empty_targets.sh debugfs_huge_count_read_write.sh
include ../lib.mk
diff --git a/tools/testing/selftests/damon/_debugfs_common.sh b/tools/testing/selftests/damon/_debugfs_common.sh
new file mode 100644
index 000000000000..48989d4813ae
--- /dev/null
+++ b/tools/testing/selftests/damon/_debugfs_common.sh
@@ -0,0 +1,52 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+
+test_write_result() {
+ file=$1
+ content=$2
+ orig_content=$3
+ expect_reason=$4
+ expected=$5
+
+ echo "$content" > "$file"
+ if [ $? -ne "$expected" ]
+ then
+ echo "writing $content to $file doesn't return $expected"
+ echo "expected because: $expect_reason"
+ echo "$orig_content" > "$file"
+ exit 1
+ fi
+}
+
+test_write_succ() {
+ test_write_result "$1" "$2" "$3" "$4" 0
+}
+
+test_write_fail() {
+ test_write_result "$1" "$2" "$3" "$4" 1
+}
+
+test_content() {
+ file=$1
+ orig_content=$2
+ expected=$3
+ expect_reason=$4
+
+ content=$(cat "$file")
+ if [ "$content" != "$expected" ]
+ then
+ echo "reading $file expected $expected but $content"
+ echo "expected because: $expect_reason"
+ echo "$orig_content" > "$file"
+ exit 1
+ fi
+}
+
+source ./_chk_dependency.sh
+
+damon_onoff="$DBGFS/monitor_on"
+if [ $(cat "$damon_onoff") = "on" ]
+then
+ echo "monitoring is on"
+ exit $ksft_skip
+fi
diff --git a/tools/testing/selftests/damon/debugfs_attrs.sh b/tools/testing/selftests/damon/debugfs_attrs.sh
index 196b6640bf37..902e312bca89 100644
--- a/tools/testing/selftests/damon/debugfs_attrs.sh
+++ b/tools/testing/selftests/damon/debugfs_attrs.sh
@@ -1,48 +1,7 @@
#!/bin/bash
# SPDX-License-Identifier: GPL-2.0
-test_write_result() {
- file=$1
- content=$2
- orig_content=$3
- expect_reason=$4
- expected=$5
-
- echo "$content" > "$file"
- if [ $? -ne "$expected" ]
- then
- echo "writing $content to $file doesn't return $expected"
- echo "expected because: $expect_reason"
- echo "$orig_content" > "$file"
- exit 1
- fi
-}
-
-test_write_succ() {
- test_write_result "$1" "$2" "$3" "$4" 0
-}
-
-test_write_fail() {
- test_write_result "$1" "$2" "$3" "$4" 1
-}
-
-test_content() {
- file=$1
- orig_content=$2
- expected=$3
- expect_reason=$4
-
- content=$(cat "$file")
- if [ "$content" != "$expected" ]
- then
- echo "reading $file expected $expected but $content"
- echo "expected because: $expect_reason"
- echo "$orig_content" > "$file"
- exit 1
- fi
-}
-
-source ./_chk_dependency.sh
+source _debugfs_common.sh
# Test attrs file
# ===============
@@ -56,33 +15,3 @@ test_write_fail "$file" "1 2 3 5 4" "$orig_content" \
"min_nr_regions > max_nr_regions"
test_content "$file" "$orig_content" "1 2 3 4 5" "successfully written"
echo "$orig_content" > "$file"
-
-# Test schemes file
-# =================
-
-file="$DBGFS/schemes"
-orig_content=$(cat "$file")
-
-test_write_succ "$file" "1 2 3 4 5 6 4 0 0 0 1 2 3 1 100 3 2 1" \
- "$orig_content" "valid input"
-test_write_fail "$file" "1 2
-3 4 5 6 3 0 0 0 1 2 3 1 100 3 2 1" "$orig_content" "multi lines"
-test_write_succ "$file" "" "$orig_content" "disabling"
-echo "$orig_content" > "$file"
-
-# Test target_ids file
-# ====================
-
-file="$DBGFS/target_ids"
-orig_content=$(cat "$file")
-
-test_write_succ "$file" "1 2 3 4" "$orig_content" "valid input"
-test_write_succ "$file" "1 2 abc 4" "$orig_content" "still valid input"
-test_content "$file" "$orig_content" "1 2" "non-integer was there"
-test_write_succ "$file" "abc 2 3" "$orig_content" "the file allows wrong input"
-test_content "$file" "$orig_content" "" "wrong input written"
-test_write_succ "$file" "" "$orig_content" "empty input"
-test_content "$file" "$orig_content" "" "empty input written"
-echo "$orig_content" > "$file"
-
-echo "PASS"
diff --git a/tools/testing/selftests/damon/debugfs_empty_targets.sh b/tools/testing/selftests/damon/debugfs_empty_targets.sh
new file mode 100644
index 000000000000..87aff8083822
--- /dev/null
+++ b/tools/testing/selftests/damon/debugfs_empty_targets.sh
@@ -0,0 +1,13 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+
+source _debugfs_common.sh
+
+# Test empty targets case
+# =======================
+
+orig_target_ids=$(cat "$DBGFS/target_ids")
+echo "" > "$DBGFS/target_ids"
+orig_monitor_on=$(cat "$DBGFS/monitor_on")
+test_write_fail "$DBGFS/monitor_on" "on" "orig_monitor_on" "empty target ids"
+echo "$orig_target_ids" > "$DBGFS/target_ids"
diff --git a/tools/testing/selftests/damon/debugfs_huge_count_read_write.sh b/tools/testing/selftests/damon/debugfs_huge_count_read_write.sh
new file mode 100644
index 000000000000..922cadac2950
--- /dev/null
+++ b/tools/testing/selftests/damon/debugfs_huge_count_read_write.sh
@@ -0,0 +1,22 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+
+source _debugfs_common.sh
+
+# Test huge count read write
+# ==========================
+
+dmesg -C
+
+for file in "$DBGFS/"*
+do
+ ./huge_count_read_write "$file"
+done
+
+if dmesg | grep -q WARNING
+then
+ dmesg
+ exit 1
+else
+ exit 0
+fi
diff --git a/tools/testing/selftests/damon/debugfs_schemes.sh b/tools/testing/selftests/damon/debugfs_schemes.sh
new file mode 100644
index 000000000000..5b39ab44731c
--- /dev/null
+++ b/tools/testing/selftests/damon/debugfs_schemes.sh
@@ -0,0 +1,19 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+
+source _debugfs_common.sh
+
+# Test schemes file
+# =================
+
+file="$DBGFS/schemes"
+orig_content=$(cat "$file")
+
+test_write_succ "$file" "1 2 3 4 5 6 4 0 0 0 1 2 3 1 100 3 2 1" \
+ "$orig_content" "valid input"
+test_write_fail "$file" "1 2
+3 4 5 6 3 0 0 0 1 2 3 1 100 3 2 1" "$orig_content" "multi lines"
+test_write_succ "$file" "" "$orig_content" "disabling"
+test_write_fail "$file" "2 1 2 1 10 1 3 10 1 1 1 1 1 1 1 1 2 3" \
+ "$orig_content" "wrong condition ranges"
+echo "$orig_content" > "$file"
diff --git a/tools/testing/selftests/damon/debugfs_target_ids.sh b/tools/testing/selftests/damon/debugfs_target_ids.sh
new file mode 100644
index 000000000000..49aeabdb0aae
--- /dev/null
+++ b/tools/testing/selftests/damon/debugfs_target_ids.sh
@@ -0,0 +1,19 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+
+source _debugfs_common.sh
+
+# Test target_ids file
+# ====================
+
+file="$DBGFS/target_ids"
+orig_content=$(cat "$file")
+
+test_write_succ "$file" "1 2 3 4" "$orig_content" "valid input"
+test_write_succ "$file" "1 2 abc 4" "$orig_content" "still valid input"
+test_content "$file" "$orig_content" "1 2" "non-integer was there"
+test_write_succ "$file" "abc 2 3" "$orig_content" "the file allows wrong input"
+test_content "$file" "$orig_content" "" "wrong input written"
+test_write_succ "$file" "" "$orig_content" "empty input"
+test_content "$file" "$orig_content" "" "empty input written"
+echo "$orig_content" > "$file"
diff --git a/tools/testing/selftests/damon/huge_count_read_write.c b/tools/testing/selftests/damon/huge_count_read_write.c
new file mode 100644
index 000000000000..ad7a6b4cf338
--- /dev/null
+++ b/tools/testing/selftests/damon/huge_count_read_write.c
@@ -0,0 +1,39 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Author: SeongJae Park <sj@kernel.org>
+ */
+
+#include <fcntl.h>
+#include <stdlib.h>
+#include <unistd.h>
+#include <stdio.h>
+
+void write_read_with_huge_count(char *file)
+{
+ int filedesc = open(file, O_RDWR);
+ char buf[25];
+ int ret;
+
+ printf("%s %s\n", __func__, file);
+ if (filedesc < 0) {
+ fprintf(stderr, "failed opening %s\n", file);
+ exit(1);
+ }
+
+ write(filedesc, "", 0xfffffffful);
+ perror("after write: ");
+ ret = read(filedesc, buf, 0xfffffffful);
+ perror("after read: ");
+ close(filedesc);
+}
+
+int main(int argc, char *argv[])
+{
+ if (argc != 2) {
+ fprintf(stderr, "Usage: %s <file>\n", argv[0]);
+ exit(1);
+ }
+ write_read_with_huge_count(argv[1]);
+
+ return 0;
+}
diff --git a/tools/testing/selftests/drivers/net/mlxsw/rif_mac_profiles_occ.sh b/tools/testing/selftests/drivers/net/mlxsw/rif_mac_profiles_occ.sh
index b513f64d9092..026a126f584d 100755
--- a/tools/testing/selftests/drivers/net/mlxsw/rif_mac_profiles_occ.sh
+++ b/tools/testing/selftests/drivers/net/mlxsw/rif_mac_profiles_occ.sh
@@ -72,6 +72,35 @@ rif_mac_profile_replacement_test()
ip link set $h1.10 address $h1_10_mac
}
+rif_mac_profile_consolidation_test()
+{
+ local count=$1; shift
+ local h1_20_mac
+
+ RET=0
+
+ if [[ $count -eq 1 ]]; then
+ return
+ fi
+
+ h1_20_mac=$(mac_get $h1.20)
+
+ # Set the MAC of $h1.20 to that of $h1.10 and confirm that they are
+ # using the same MAC profile.
+ ip link set $h1.20 address 00:11:11:11:11:11
+ check_err $?
+
+ occ=$(devlink -j resource show $DEVLINK_DEV \
+ | jq '.[][][] | select(.name=="rif_mac_profiles") |.["occ"]')
+
+ [[ $occ -eq $((count - 1)) ]]
+ check_err $? "MAC profile occupancy did not decrease"
+
+ log_test "RIF MAC profile consolidation"
+
+ ip link set $h1.20 address $h1_20_mac
+}
+
rif_mac_profile_shared_replacement_test()
{
local count=$1; shift
@@ -104,6 +133,7 @@ rif_mac_profile_edit_test()
create_max_rif_mac_profiles $count
rif_mac_profile_replacement_test
+ rif_mac_profile_consolidation_test $count
rif_mac_profile_shared_replacement_test $count
}
diff --git a/tools/testing/selftests/kvm/.gitignore b/tools/testing/selftests/kvm/.gitignore
index 3763105029fb..3cb5ac5da087 100644
--- a/tools/testing/selftests/kvm/.gitignore
+++ b/tools/testing/selftests/kvm/.gitignore
@@ -30,10 +30,12 @@
/x86_64/svm_int_ctl_test
/x86_64/sync_regs_test
/x86_64/tsc_msrs_test
+/x86_64/userspace_io_test
/x86_64/userspace_msr_exit_test
/x86_64/vmx_apic_access_test
/x86_64/vmx_close_while_nested_test
/x86_64/vmx_dirty_log_test
+/x86_64/vmx_invalid_nested_guest_state
/x86_64/vmx_preemption_timer_test
/x86_64/vmx_set_nested_state_test
/x86_64/vmx_tsc_adjust_test
diff --git a/tools/testing/selftests/kvm/Makefile b/tools/testing/selftests/kvm/Makefile
index c4e34717826a..17342b575e85 100644
--- a/tools/testing/selftests/kvm/Makefile
+++ b/tools/testing/selftests/kvm/Makefile
@@ -59,10 +59,12 @@ TEST_GEN_PROGS_x86_64 += x86_64/vmx_preemption_timer_test
TEST_GEN_PROGS_x86_64 += x86_64/svm_vmcall_test
TEST_GEN_PROGS_x86_64 += x86_64/svm_int_ctl_test
TEST_GEN_PROGS_x86_64 += x86_64/sync_regs_test
+TEST_GEN_PROGS_x86_64 += x86_64/userspace_io_test
TEST_GEN_PROGS_x86_64 += x86_64/userspace_msr_exit_test
TEST_GEN_PROGS_x86_64 += x86_64/vmx_apic_access_test
TEST_GEN_PROGS_x86_64 += x86_64/vmx_close_while_nested_test
TEST_GEN_PROGS_x86_64 += x86_64/vmx_dirty_log_test
+TEST_GEN_PROGS_x86_64 += x86_64/vmx_invalid_nested_guest_state
TEST_GEN_PROGS_x86_64 += x86_64/vmx_set_nested_state_test
TEST_GEN_PROGS_x86_64 += x86_64/vmx_tsc_adjust_test
TEST_GEN_PROGS_x86_64 += x86_64/vmx_nested_tsc_scaling_test
diff --git a/tools/testing/selftests/kvm/include/kvm_util.h b/tools/testing/selftests/kvm/include/kvm_util.h
index 6a1a37f30494..2d62edc49d67 100644
--- a/tools/testing/selftests/kvm/include/kvm_util.h
+++ b/tools/testing/selftests/kvm/include/kvm_util.h
@@ -321,6 +321,7 @@ bool vm_is_unrestricted_guest(struct kvm_vm *vm);
unsigned int vm_get_page_size(struct kvm_vm *vm);
unsigned int vm_get_page_shift(struct kvm_vm *vm);
+unsigned long vm_compute_max_gfn(struct kvm_vm *vm);
uint64_t vm_get_max_gfn(struct kvm_vm *vm);
int vm_get_fd(struct kvm_vm *vm);
diff --git a/tools/testing/selftests/kvm/kvm_create_max_vcpus.c b/tools/testing/selftests/kvm/kvm_create_max_vcpus.c
index f968dfd4ee88..aed9dc3ca1e9 100644
--- a/tools/testing/selftests/kvm/kvm_create_max_vcpus.c
+++ b/tools/testing/selftests/kvm/kvm_create_max_vcpus.c
@@ -12,6 +12,7 @@
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
+#include <sys/resource.h>
#include "test_util.h"
@@ -40,11 +41,40 @@ int main(int argc, char *argv[])
{
int kvm_max_vcpu_id = kvm_check_cap(KVM_CAP_MAX_VCPU_ID);
int kvm_max_vcpus = kvm_check_cap(KVM_CAP_MAX_VCPUS);
+ /*
+ * Number of file descriptors reqired, KVM_CAP_MAX_VCPUS for vCPU fds +
+ * an arbitrary number for everything else.
+ */
+ int nr_fds_wanted = kvm_max_vcpus + 100;
+ struct rlimit rl;
pr_info("KVM_CAP_MAX_VCPU_ID: %d\n", kvm_max_vcpu_id);
pr_info("KVM_CAP_MAX_VCPUS: %d\n", kvm_max_vcpus);
/*
+ * Check that we're allowed to open nr_fds_wanted file descriptors and
+ * try raising the limits if needed.
+ */
+ TEST_ASSERT(!getrlimit(RLIMIT_NOFILE, &rl), "getrlimit() failed!");
+
+ if (rl.rlim_cur < nr_fds_wanted) {
+ rl.rlim_cur = nr_fds_wanted;
+ if (rl.rlim_max < nr_fds_wanted) {
+ int old_rlim_max = rl.rlim_max;
+ rl.rlim_max = nr_fds_wanted;
+
+ int r = setrlimit(RLIMIT_NOFILE, &rl);
+ if (r < 0) {
+ printf("RLIMIT_NOFILE hard limit is too low (%d, wanted %d)\n",
+ old_rlim_max, nr_fds_wanted);
+ exit(KSFT_SKIP);
+ }
+ } else {
+ TEST_ASSERT(!setrlimit(RLIMIT_NOFILE, &rl), "setrlimit() failed!");
+ }
+ }
+
+ /*
* Upstream KVM prior to 4.8 does not support KVM_CAP_MAX_VCPU_ID.
* Userspace is supposed to use KVM_CAP_MAX_VCPUS as the maximum ID
* in this case.
diff --git a/tools/testing/selftests/kvm/kvm_page_table_test.c b/tools/testing/selftests/kvm/kvm_page_table_test.c
index 3836322add00..ba1fdc3dcf4a 100644
--- a/tools/testing/selftests/kvm/kvm_page_table_test.c
+++ b/tools/testing/selftests/kvm/kvm_page_table_test.c
@@ -280,7 +280,7 @@ static struct kvm_vm *pre_init_before_test(enum vm_guest_mode mode, void *arg)
#ifdef __s390x__
alignment = max(0x100000, alignment);
#endif
- guest_test_phys_mem = align_down(guest_test_virt_mem, alignment);
+ guest_test_phys_mem = align_down(guest_test_phys_mem, alignment);
/* Set up the shared data structure test_args */
test_args.vm = vm;
diff --git a/tools/testing/selftests/kvm/lib/kvm_util.c b/tools/testing/selftests/kvm/lib/kvm_util.c
index 8f2e0bb1ef96..53d2b5d04b82 100644
--- a/tools/testing/selftests/kvm/lib/kvm_util.c
+++ b/tools/testing/selftests/kvm/lib/kvm_util.c
@@ -302,7 +302,7 @@ struct kvm_vm *vm_create(enum vm_guest_mode mode, uint64_t phy_pages, int perm)
(1ULL << (vm->va_bits - 1)) >> vm->page_shift);
/* Limit physical addresses to PA-bits. */
- vm->max_gfn = ((1ULL << vm->pa_bits) >> vm->page_shift) - 1;
+ vm->max_gfn = vm_compute_max_gfn(vm);
/* Allocate and setup memory for guest. */
vm->vpages_mapped = sparsebit_alloc();
@@ -2328,6 +2328,11 @@ unsigned int vm_get_page_shift(struct kvm_vm *vm)
return vm->page_shift;
}
+unsigned long __attribute__((weak)) vm_compute_max_gfn(struct kvm_vm *vm)
+{
+ return ((1ULL << vm->pa_bits) >> vm->page_shift) - 1;
+}
+
uint64_t vm_get_max_gfn(struct kvm_vm *vm)
{
return vm->max_gfn;
diff --git a/tools/testing/selftests/kvm/lib/x86_64/processor.c b/tools/testing/selftests/kvm/lib/x86_64/processor.c
index 82c39db91369..eef7b34756d5 100644
--- a/tools/testing/selftests/kvm/lib/x86_64/processor.c
+++ b/tools/testing/selftests/kvm/lib/x86_64/processor.c
@@ -1431,3 +1431,71 @@ struct kvm_cpuid2 *vcpu_get_supported_hv_cpuid(struct kvm_vm *vm, uint32_t vcpui
return cpuid;
}
+
+#define X86EMUL_CPUID_VENDOR_AuthenticAMD_ebx 0x68747541
+#define X86EMUL_CPUID_VENDOR_AuthenticAMD_ecx 0x444d4163
+#define X86EMUL_CPUID_VENDOR_AuthenticAMD_edx 0x69746e65
+
+static inline unsigned x86_family(unsigned int eax)
+{
+ unsigned int x86;
+
+ x86 = (eax >> 8) & 0xf;
+
+ if (x86 == 0xf)
+ x86 += (eax >> 20) & 0xff;
+
+ return x86;
+}
+
+unsigned long vm_compute_max_gfn(struct kvm_vm *vm)
+{
+ const unsigned long num_ht_pages = 12 << (30 - vm->page_shift); /* 12 GiB */
+ unsigned long ht_gfn, max_gfn, max_pfn;
+ uint32_t eax, ebx, ecx, edx, max_ext_leaf;
+
+ max_gfn = (1ULL << (vm->pa_bits - vm->page_shift)) - 1;
+
+ /* Avoid reserved HyperTransport region on AMD processors. */
+ eax = ecx = 0;
+ cpuid(&eax, &ebx, &ecx, &edx);
+ if (ebx != X86EMUL_CPUID_VENDOR_AuthenticAMD_ebx ||
+ ecx != X86EMUL_CPUID_VENDOR_AuthenticAMD_ecx ||
+ edx != X86EMUL_CPUID_VENDOR_AuthenticAMD_edx)
+ return max_gfn;
+
+ /* On parts with <40 physical address bits, the area is fully hidden */
+ if (vm->pa_bits < 40)
+ return max_gfn;
+
+ /* Before family 17h, the HyperTransport area is just below 1T. */
+ ht_gfn = (1 << 28) - num_ht_pages;
+ eax = 1;
+ cpuid(&eax, &ebx, &ecx, &edx);
+ if (x86_family(eax) < 0x17)
+ goto done;
+
+ /*
+ * Otherwise it's at the top of the physical address space, possibly
+ * reduced due to SME by bits 11:6 of CPUID[0x8000001f].EBX. Use
+ * the old conservative value if MAXPHYADDR is not enumerated.
+ */
+ eax = 0x80000000;
+ cpuid(&eax, &ebx, &ecx, &edx);
+ max_ext_leaf = eax;
+ if (max_ext_leaf < 0x80000008)
+ goto done;
+
+ eax = 0x80000008;
+ cpuid(&eax, &ebx, &ecx, &edx);
+ max_pfn = (1ULL << ((eax & 0xff) - vm->page_shift)) - 1;
+ if (max_ext_leaf >= 0x8000001f) {
+ eax = 0x8000001f;
+ cpuid(&eax, &ebx, &ecx, &edx);
+ max_pfn >>= (ebx >> 6) & 0x3f;
+ }
+
+ ht_gfn = max_pfn - num_ht_pages;
+done:
+ return min(max_gfn, ht_gfn - 1);
+}
diff --git a/tools/testing/selftests/kvm/x86_64/hyperv_features.c b/tools/testing/selftests/kvm/x86_64/hyperv_features.c
index 91d88aaa9899..672915ce73d8 100644
--- a/tools/testing/selftests/kvm/x86_64/hyperv_features.c
+++ b/tools/testing/selftests/kvm/x86_64/hyperv_features.c
@@ -165,10 +165,10 @@ static void hv_set_cpuid(struct kvm_vm *vm, struct kvm_cpuid2 *cpuid,
vcpu_set_cpuid(vm, VCPU_ID, cpuid);
}
-static void guest_test_msrs_access(struct kvm_vm *vm, struct msr_data *msr,
- struct kvm_cpuid2 *best)
+static void guest_test_msrs_access(void)
{
struct kvm_run *run;
+ struct kvm_vm *vm;
struct ucall uc;
int stage = 0, r;
struct kvm_cpuid_entry2 feat = {
@@ -180,11 +180,34 @@ static void guest_test_msrs_access(struct kvm_vm *vm, struct msr_data *msr,
struct kvm_cpuid_entry2 dbg = {
.function = HYPERV_CPUID_SYNDBG_PLATFORM_CAPABILITIES
};
- struct kvm_enable_cap cap = {0};
-
- run = vcpu_state(vm, VCPU_ID);
+ struct kvm_cpuid2 *best;
+ vm_vaddr_t msr_gva;
+ struct kvm_enable_cap cap = {
+ .cap = KVM_CAP_HYPERV_ENFORCE_CPUID,
+ .args = {1}
+ };
+ struct msr_data *msr;
while (true) {
+ vm = vm_create_default(VCPU_ID, 0, guest_msr);
+
+ msr_gva = vm_vaddr_alloc_page(vm);
+ memset(addr_gva2hva(vm, msr_gva), 0x0, getpagesize());
+ msr = addr_gva2hva(vm, msr_gva);
+
+ vcpu_args_set(vm, VCPU_ID, 1, msr_gva);
+ vcpu_enable_cap(vm, VCPU_ID, &cap);
+
+ vcpu_set_hv_cpuid(vm, VCPU_ID);
+
+ best = kvm_get_supported_hv_cpuid();
+
+ vm_init_descriptor_tables(vm);
+ vcpu_init_descriptor_tables(vm, VCPU_ID);
+ vm_install_exception_handler(vm, GP_VECTOR, guest_gp_handler);
+
+ run = vcpu_state(vm, VCPU_ID);
+
switch (stage) {
case 0:
/*
@@ -315,6 +338,7 @@ static void guest_test_msrs_access(struct kvm_vm *vm, struct msr_data *msr,
* capability enabled and guest visible CPUID bit unset.
*/
cap.cap = KVM_CAP_HYPERV_SYNIC2;
+ cap.args[0] = 0;
vcpu_enable_cap(vm, VCPU_ID, &cap);
break;
case 22:
@@ -461,9 +485,9 @@ static void guest_test_msrs_access(struct kvm_vm *vm, struct msr_data *msr,
switch (get_ucall(vm, VCPU_ID, &uc)) {
case UCALL_SYNC:
- TEST_ASSERT(uc.args[1] == stage,
- "Unexpected stage: %ld (%d expected)\n",
- uc.args[1], stage);
+ TEST_ASSERT(uc.args[1] == 0,
+ "Unexpected stage: %ld (0 expected)\n",
+ uc.args[1]);
break;
case UCALL_ABORT:
TEST_FAIL("%s at %s:%ld", (const char *)uc.args[0],
@@ -474,13 +498,14 @@ static void guest_test_msrs_access(struct kvm_vm *vm, struct msr_data *msr,
}
stage++;
+ kvm_vm_free(vm);
}
}
-static void guest_test_hcalls_access(struct kvm_vm *vm, struct hcall_data *hcall,
- void *input, void *output, struct kvm_cpuid2 *best)
+static void guest_test_hcalls_access(void)
{
struct kvm_run *run;
+ struct kvm_vm *vm;
struct ucall uc;
int stage = 0, r;
struct kvm_cpuid_entry2 feat = {
@@ -493,10 +518,38 @@ static void guest_test_hcalls_access(struct kvm_vm *vm, struct hcall_data *hcall
struct kvm_cpuid_entry2 dbg = {
.function = HYPERV_CPUID_SYNDBG_PLATFORM_CAPABILITIES
};
-
- run = vcpu_state(vm, VCPU_ID);
+ struct kvm_enable_cap cap = {
+ .cap = KVM_CAP_HYPERV_ENFORCE_CPUID,
+ .args = {1}
+ };
+ vm_vaddr_t hcall_page, hcall_params;
+ struct hcall_data *hcall;
+ struct kvm_cpuid2 *best;
while (true) {
+ vm = vm_create_default(VCPU_ID, 0, guest_hcall);
+
+ vm_init_descriptor_tables(vm);
+ vcpu_init_descriptor_tables(vm, VCPU_ID);
+ vm_install_exception_handler(vm, UD_VECTOR, guest_ud_handler);
+
+ /* Hypercall input/output */
+ hcall_page = vm_vaddr_alloc_pages(vm, 2);
+ hcall = addr_gva2hva(vm, hcall_page);
+ memset(addr_gva2hva(vm, hcall_page), 0x0, 2 * getpagesize());
+
+ hcall_params = vm_vaddr_alloc_page(vm);
+ memset(addr_gva2hva(vm, hcall_params), 0x0, getpagesize());
+
+ vcpu_args_set(vm, VCPU_ID, 2, addr_gva2gpa(vm, hcall_page), hcall_params);
+ vcpu_enable_cap(vm, VCPU_ID, &cap);
+
+ vcpu_set_hv_cpuid(vm, VCPU_ID);
+
+ best = kvm_get_supported_hv_cpuid();
+
+ run = vcpu_state(vm, VCPU_ID);
+
switch (stage) {
case 0:
hcall->control = 0xdeadbeef;
@@ -606,9 +659,9 @@ static void guest_test_hcalls_access(struct kvm_vm *vm, struct hcall_data *hcall
switch (get_ucall(vm, VCPU_ID, &uc)) {
case UCALL_SYNC:
- TEST_ASSERT(uc.args[1] == stage,
- "Unexpected stage: %ld (%d expected)\n",
- uc.args[1], stage);
+ TEST_ASSERT(uc.args[1] == 0,
+ "Unexpected stage: %ld (0 expected)\n",
+ uc.args[1]);
break;
case UCALL_ABORT:
TEST_FAIL("%s at %s:%ld", (const char *)uc.args[0],
@@ -619,66 +672,15 @@ static void guest_test_hcalls_access(struct kvm_vm *vm, struct hcall_data *hcall
}
stage++;
+ kvm_vm_free(vm);
}
}
int main(void)
{
- struct kvm_cpuid2 *best;
- struct kvm_vm *vm;
- vm_vaddr_t msr_gva, hcall_page, hcall_params;
- struct kvm_enable_cap cap = {
- .cap = KVM_CAP_HYPERV_ENFORCE_CPUID,
- .args = {1}
- };
-
- /* Test MSRs */
- vm = vm_create_default(VCPU_ID, 0, guest_msr);
-
- msr_gva = vm_vaddr_alloc_page(vm);
- memset(addr_gva2hva(vm, msr_gva), 0x0, getpagesize());
- vcpu_args_set(vm, VCPU_ID, 1, msr_gva);
- vcpu_enable_cap(vm, VCPU_ID, &cap);
-
- vcpu_set_hv_cpuid(vm, VCPU_ID);
-
- best = kvm_get_supported_hv_cpuid();
-
- vm_init_descriptor_tables(vm);
- vcpu_init_descriptor_tables(vm, VCPU_ID);
- vm_install_exception_handler(vm, GP_VECTOR, guest_gp_handler);
-
pr_info("Testing access to Hyper-V specific MSRs\n");
- guest_test_msrs_access(vm, addr_gva2hva(vm, msr_gva),
- best);
- kvm_vm_free(vm);
-
- /* Test hypercalls */
- vm = vm_create_default(VCPU_ID, 0, guest_hcall);
-
- vm_init_descriptor_tables(vm);
- vcpu_init_descriptor_tables(vm, VCPU_ID);
- vm_install_exception_handler(vm, UD_VECTOR, guest_ud_handler);
-
- /* Hypercall input/output */
- hcall_page = vm_vaddr_alloc_pages(vm, 2);
- memset(addr_gva2hva(vm, hcall_page), 0x0, 2 * getpagesize());
-
- hcall_params = vm_vaddr_alloc_page(vm);
- memset(addr_gva2hva(vm, hcall_params), 0x0, getpagesize());
-
- vcpu_args_set(vm, VCPU_ID, 2, addr_gva2gpa(vm, hcall_page), hcall_params);
- vcpu_enable_cap(vm, VCPU_ID, &cap);
-
- vcpu_set_hv_cpuid(vm, VCPU_ID);
-
- best = kvm_get_supported_hv_cpuid();
+ guest_test_msrs_access();
pr_info("Testing access to Hyper-V hypercalls\n");
- guest_test_hcalls_access(vm, addr_gva2hva(vm, hcall_params),
- addr_gva2hva(vm, hcall_page),
- addr_gva2hva(vm, hcall_page) + getpagesize(),
- best);
-
- kvm_vm_free(vm);
+ guest_test_hcalls_access();
}
diff --git a/tools/testing/selftests/kvm/x86_64/sev_migrate_tests.c b/tools/testing/selftests/kvm/x86_64/sev_migrate_tests.c
index 5ba325cd64bf..29b18d565cf4 100644
--- a/tools/testing/selftests/kvm/x86_64/sev_migrate_tests.c
+++ b/tools/testing/selftests/kvm/x86_64/sev_migrate_tests.c
@@ -54,12 +54,15 @@ static struct kvm_vm *sev_vm_create(bool es)
return vm;
}
-static struct kvm_vm *__vm_create(void)
+static struct kvm_vm *aux_vm_create(bool with_vcpus)
{
struct kvm_vm *vm;
int i;
vm = vm_create(VM_MODE_DEFAULT, 0, O_RDWR);
+ if (!with_vcpus)
+ return vm;
+
for (i = 0; i < NR_MIGRATE_TEST_VCPUS; ++i)
vm_vcpu_add(vm, i);
@@ -89,11 +92,11 @@ static void test_sev_migrate_from(bool es)
{
struct kvm_vm *src_vm;
struct kvm_vm *dst_vms[NR_MIGRATE_TEST_VMS];
- int i;
+ int i, ret;
src_vm = sev_vm_create(es);
for (i = 0; i < NR_MIGRATE_TEST_VMS; ++i)
- dst_vms[i] = __vm_create();
+ dst_vms[i] = aux_vm_create(true);
/* Initial migration from the src to the first dst. */
sev_migrate_from(dst_vms[0]->fd, src_vm->fd);
@@ -102,7 +105,10 @@ static void test_sev_migrate_from(bool es)
sev_migrate_from(dst_vms[i]->fd, dst_vms[i - 1]->fd);
/* Migrate the guest back to the original VM. */
- sev_migrate_from(src_vm->fd, dst_vms[NR_MIGRATE_TEST_VMS - 1]->fd);
+ ret = __sev_migrate_from(src_vm->fd, dst_vms[NR_MIGRATE_TEST_VMS - 1]->fd);
+ TEST_ASSERT(ret == -1 && errno == EIO,
+ "VM that was migrated from should be dead. ret %d, errno: %d\n", ret,
+ errno);
kvm_vm_free(src_vm);
for (i = 0; i < NR_MIGRATE_TEST_VMS; ++i)
@@ -146,6 +152,8 @@ static void test_sev_migrate_locking(void)
for (i = 0; i < NR_LOCK_TESTING_THREADS; ++i)
pthread_join(pt[i], NULL);
+ for (i = 0; i < NR_LOCK_TESTING_THREADS; ++i)
+ kvm_vm_free(input[i].vm);
}
static void test_sev_migrate_parameters(void)
@@ -157,12 +165,11 @@ static void test_sev_migrate_parameters(void)
sev_vm = sev_vm_create(/* es= */ false);
sev_es_vm = sev_vm_create(/* es= */ true);
vm_no_vcpu = vm_create(VM_MODE_DEFAULT, 0, O_RDWR);
- vm_no_sev = __vm_create();
+ vm_no_sev = aux_vm_create(true);
sev_es_vm_no_vmsa = vm_create(VM_MODE_DEFAULT, 0, O_RDWR);
sev_ioctl(sev_es_vm_no_vmsa->fd, KVM_SEV_ES_INIT, NULL);
vm_vcpu_add(sev_es_vm_no_vmsa, 1);
-
ret = __sev_migrate_from(sev_vm->fd, sev_es_vm->fd);
TEST_ASSERT(
ret == -1 && errno == EINVAL,
@@ -191,13 +198,151 @@ static void test_sev_migrate_parameters(void)
TEST_ASSERT(ret == -1 && errno == EINVAL,
"Migrations require SEV enabled. ret %d, errno: %d\n", ret,
errno);
+
+ kvm_vm_free(sev_vm);
+ kvm_vm_free(sev_es_vm);
+ kvm_vm_free(sev_es_vm_no_vmsa);
+ kvm_vm_free(vm_no_vcpu);
+ kvm_vm_free(vm_no_sev);
+}
+
+static int __sev_mirror_create(int dst_fd, int src_fd)
+{
+ struct kvm_enable_cap cap = {
+ .cap = KVM_CAP_VM_COPY_ENC_CONTEXT_FROM,
+ .args = { src_fd }
+ };
+
+ return ioctl(dst_fd, KVM_ENABLE_CAP, &cap);
+}
+
+
+static void sev_mirror_create(int dst_fd, int src_fd)
+{
+ int ret;
+
+ ret = __sev_mirror_create(dst_fd, src_fd);
+ TEST_ASSERT(!ret, "Copying context failed, ret: %d, errno: %d\n", ret, errno);
+}
+
+static void test_sev_mirror(bool es)
+{
+ struct kvm_vm *src_vm, *dst_vm;
+ struct kvm_sev_launch_start start = {
+ .policy = es ? SEV_POLICY_ES : 0
+ };
+ int i;
+
+ src_vm = sev_vm_create(es);
+ dst_vm = aux_vm_create(false);
+
+ sev_mirror_create(dst_vm->fd, src_vm->fd);
+
+ /* Check that we can complete creation of the mirror VM. */
+ for (i = 0; i < NR_MIGRATE_TEST_VCPUS; ++i)
+ vm_vcpu_add(dst_vm, i);
+ sev_ioctl(dst_vm->fd, KVM_SEV_LAUNCH_START, &start);
+ if (es)
+ sev_ioctl(dst_vm->fd, KVM_SEV_LAUNCH_UPDATE_VMSA, NULL);
+
+ kvm_vm_free(src_vm);
+ kvm_vm_free(dst_vm);
+}
+
+static void test_sev_mirror_parameters(void)
+{
+ struct kvm_vm *sev_vm, *sev_es_vm, *vm_no_vcpu, *vm_with_vcpu;
+ int ret;
+
+ sev_vm = sev_vm_create(/* es= */ false);
+ sev_es_vm = sev_vm_create(/* es= */ true);
+ vm_with_vcpu = aux_vm_create(true);
+ vm_no_vcpu = aux_vm_create(false);
+
+ ret = __sev_mirror_create(sev_vm->fd, sev_vm->fd);
+ TEST_ASSERT(
+ ret == -1 && errno == EINVAL,
+ "Should not be able copy context to self. ret: %d, errno: %d\n",
+ ret, errno);
+
+ ret = __sev_mirror_create(sev_vm->fd, sev_es_vm->fd);
+ TEST_ASSERT(
+ ret == -1 && errno == EINVAL,
+ "Should not be able copy context to SEV enabled VM. ret: %d, errno: %d\n",
+ ret, errno);
+
+ ret = __sev_mirror_create(sev_es_vm->fd, sev_vm->fd);
+ TEST_ASSERT(
+ ret == -1 && errno == EINVAL,
+ "Should not be able copy context to SEV-ES enabled VM. ret: %d, errno: %d\n",
+ ret, errno);
+
+ ret = __sev_mirror_create(vm_no_vcpu->fd, vm_with_vcpu->fd);
+ TEST_ASSERT(ret == -1 && errno == EINVAL,
+ "Copy context requires SEV enabled. ret %d, errno: %d\n", ret,
+ errno);
+
+ ret = __sev_mirror_create(vm_with_vcpu->fd, sev_vm->fd);
+ TEST_ASSERT(
+ ret == -1 && errno == EINVAL,
+ "SEV copy context requires no vCPUS on the destination. ret: %d, errno: %d\n",
+ ret, errno);
+
+ kvm_vm_free(sev_vm);
+ kvm_vm_free(sev_es_vm);
+ kvm_vm_free(vm_with_vcpu);
+ kvm_vm_free(vm_no_vcpu);
+}
+
+static void test_sev_move_copy(void)
+{
+ struct kvm_vm *dst_vm, *sev_vm, *mirror_vm, *dst_mirror_vm;
+ int ret;
+
+ sev_vm = sev_vm_create(/* es= */ false);
+ dst_vm = aux_vm_create(true);
+ mirror_vm = aux_vm_create(false);
+ dst_mirror_vm = aux_vm_create(false);
+
+ sev_mirror_create(mirror_vm->fd, sev_vm->fd);
+ ret = __sev_migrate_from(dst_vm->fd, sev_vm->fd);
+ TEST_ASSERT(ret == -1 && errno == EBUSY,
+ "Cannot migrate VM that has mirrors. ret %d, errno: %d\n", ret,
+ errno);
+
+ /* The mirror itself can be migrated. */
+ sev_migrate_from(dst_mirror_vm->fd, mirror_vm->fd);
+ ret = __sev_migrate_from(dst_vm->fd, sev_vm->fd);
+ TEST_ASSERT(ret == -1 && errno == EBUSY,
+ "Cannot migrate VM that has mirrors. ret %d, errno: %d\n", ret,
+ errno);
+
+ /*
+ * mirror_vm is not a mirror anymore, dst_mirror_vm is. Thus,
+ * the owner can be copied as soon as dst_mirror_vm is gone.
+ */
+ kvm_vm_free(dst_mirror_vm);
+ sev_migrate_from(dst_vm->fd, sev_vm->fd);
+
+ kvm_vm_free(mirror_vm);
+ kvm_vm_free(dst_vm);
+ kvm_vm_free(sev_vm);
}
int main(int argc, char *argv[])
{
- test_sev_migrate_from(/* es= */ false);
- test_sev_migrate_from(/* es= */ true);
- test_sev_migrate_locking();
- test_sev_migrate_parameters();
+ if (kvm_check_cap(KVM_CAP_VM_MOVE_ENC_CONTEXT_FROM)) {
+ test_sev_migrate_from(/* es= */ false);
+ test_sev_migrate_from(/* es= */ true);
+ test_sev_migrate_locking();
+ test_sev_migrate_parameters();
+ if (kvm_check_cap(KVM_CAP_VM_COPY_ENC_CONTEXT_FROM))
+ test_sev_move_copy();
+ }
+ if (kvm_check_cap(KVM_CAP_VM_COPY_ENC_CONTEXT_FROM)) {
+ test_sev_mirror(/* es= */ false);
+ test_sev_mirror(/* es= */ true);
+ test_sev_mirror_parameters();
+ }
return 0;
}
diff --git a/tools/testing/selftests/kvm/x86_64/svm_int_ctl_test.c b/tools/testing/selftests/kvm/x86_64/svm_int_ctl_test.c
index df04f56ce859..30a81038df46 100644
--- a/tools/testing/selftests/kvm/x86_64/svm_int_ctl_test.c
+++ b/tools/testing/selftests/kvm/x86_64/svm_int_ctl_test.c
@@ -75,7 +75,7 @@ static void l1_guest_code(struct svm_test_data *svm)
vmcb->control.int_ctl &= ~V_INTR_MASKING_MASK;
/* No intercepts for real and virtual interrupts */
- vmcb->control.intercept &= ~(1ULL << INTERCEPT_INTR | INTERCEPT_VINTR);
+ vmcb->control.intercept &= ~(BIT(INTERCEPT_INTR) | BIT(INTERCEPT_VINTR));
/* Make a virtual interrupt VINTR_IRQ_NUMBER pending */
vmcb->control.int_ctl |= V_IRQ_MASK | (0x1 << V_INTR_PRIO_SHIFT);
diff --git a/tools/testing/selftests/kvm/x86_64/userspace_io_test.c b/tools/testing/selftests/kvm/x86_64/userspace_io_test.c
new file mode 100644
index 000000000000..e4bef2e05686
--- /dev/null
+++ b/tools/testing/selftests/kvm/x86_64/userspace_io_test.c
@@ -0,0 +1,114 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <fcntl.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sys/ioctl.h>
+
+#include "test_util.h"
+
+#include "kvm_util.h"
+#include "processor.h"
+
+#define VCPU_ID 1
+
+static void guest_ins_port80(uint8_t *buffer, unsigned int count)
+{
+ unsigned long end;
+
+ if (count == 2)
+ end = (unsigned long)buffer + 1;
+ else
+ end = (unsigned long)buffer + 8192;
+
+ asm volatile("cld; rep; insb" : "+D"(buffer), "+c"(count) : "d"(0x80) : "memory");
+ GUEST_ASSERT_1(count == 0, count);
+ GUEST_ASSERT_2((unsigned long)buffer == end, buffer, end);
+}
+
+static void guest_code(void)
+{
+ uint8_t buffer[8192];
+ int i;
+
+ /*
+ * Special case tests. main() will adjust RCX 2 => 1 and 3 => 8192 to
+ * test that KVM doesn't explode when userspace modifies the "count" on
+ * a userspace I/O exit. KVM isn't required to play nice with the I/O
+ * itself as KVM doesn't support manipulating the count, it just needs
+ * to not explode or overflow a buffer.
+ */
+ guest_ins_port80(buffer, 2);
+ guest_ins_port80(buffer, 3);
+
+ /* Verify KVM fills the buffer correctly when not stuffing RCX. */
+ memset(buffer, 0, sizeof(buffer));
+ guest_ins_port80(buffer, 8192);
+ for (i = 0; i < 8192; i++)
+ GUEST_ASSERT_2(buffer[i] == 0xaa, i, buffer[i]);
+
+ GUEST_DONE();
+}
+
+int main(int argc, char *argv[])
+{
+ struct kvm_regs regs;
+ struct kvm_run *run;
+ struct kvm_vm *vm;
+ struct ucall uc;
+ int rc;
+
+ /* Tell stdout not to buffer its content */
+ setbuf(stdout, NULL);
+
+ /* Create VM */
+ vm = vm_create_default(VCPU_ID, 0, guest_code);
+ run = vcpu_state(vm, VCPU_ID);
+
+ memset(&regs, 0, sizeof(regs));
+
+ while (1) {
+ rc = _vcpu_run(vm, VCPU_ID);
+
+ TEST_ASSERT(rc == 0, "vcpu_run failed: %d\n", rc);
+ TEST_ASSERT(run->exit_reason == KVM_EXIT_IO,
+ "Unexpected exit reason: %u (%s),\n",
+ run->exit_reason,
+ exit_reason_str(run->exit_reason));
+
+ if (get_ucall(vm, VCPU_ID, &uc))
+ break;
+
+ TEST_ASSERT(run->io.port == 0x80,
+ "Expected I/O at port 0x80, got port 0x%x\n", run->io.port);
+
+ /*
+ * Modify the rep string count in RCX: 2 => 1 and 3 => 8192.
+ * Note, this abuses KVM's batching of rep string I/O to avoid
+ * getting stuck in an infinite loop. That behavior isn't in
+ * scope from a testing perspective as it's not ABI in any way,
+ * i.e. it really is abusing internal KVM knowledge.
+ */
+ vcpu_regs_get(vm, VCPU_ID, &regs);
+ if (regs.rcx == 2)
+ regs.rcx = 1;
+ if (regs.rcx == 3)
+ regs.rcx = 8192;
+ memset((void *)run + run->io.data_offset, 0xaa, 4096);
+ vcpu_regs_set(vm, VCPU_ID, &regs);
+ }
+
+ switch (uc.cmd) {
+ case UCALL_DONE:
+ break;
+ case UCALL_ABORT:
+ TEST_FAIL("%s at %s:%ld : argN+1 = 0x%lx, argN+2 = 0x%lx",
+ (const char *)uc.args[0], __FILE__, uc.args[1],
+ uc.args[2], uc.args[3]);
+ default:
+ TEST_FAIL("Unknown ucall %lu", uc.cmd);
+ }
+
+ kvm_vm_free(vm);
+ return 0;
+}
diff --git a/tools/testing/selftests/kvm/x86_64/vmx_invalid_nested_guest_state.c b/tools/testing/selftests/kvm/x86_64/vmx_invalid_nested_guest_state.c
new file mode 100644
index 000000000000..489fbed4ca6f
--- /dev/null
+++ b/tools/testing/selftests/kvm/x86_64/vmx_invalid_nested_guest_state.c
@@ -0,0 +1,105 @@
+// SPDX-License-Identifier: GPL-2.0-only
+#include "test_util.h"
+#include "kvm_util.h"
+#include "processor.h"
+#include "vmx.h"
+
+#include <string.h>
+#include <sys/ioctl.h>
+
+#include "kselftest.h"
+
+#define VCPU_ID 0
+#define ARBITRARY_IO_PORT 0x2000
+
+static struct kvm_vm *vm;
+
+static void l2_guest_code(void)
+{
+ /*
+ * Generate an exit to L0 userspace, i.e. main(), via I/O to an
+ * arbitrary port.
+ */
+ asm volatile("inb %%dx, %%al"
+ : : [port] "d" (ARBITRARY_IO_PORT) : "rax");
+}
+
+static void l1_guest_code(struct vmx_pages *vmx_pages)
+{
+#define L2_GUEST_STACK_SIZE 64
+ unsigned long l2_guest_stack[L2_GUEST_STACK_SIZE];
+
+ GUEST_ASSERT(prepare_for_vmx_operation(vmx_pages));
+ GUEST_ASSERT(load_vmcs(vmx_pages));
+
+ /* Prepare the VMCS for L2 execution. */
+ prepare_vmcs(vmx_pages, l2_guest_code,
+ &l2_guest_stack[L2_GUEST_STACK_SIZE]);
+
+ /*
+ * L2 must be run without unrestricted guest, verify that the selftests
+ * library hasn't enabled it. Because KVM selftests jump directly to
+ * 64-bit mode, unrestricted guest support isn't required.
+ */
+ GUEST_ASSERT(!(vmreadz(CPU_BASED_VM_EXEC_CONTROL) & CPU_BASED_ACTIVATE_SECONDARY_CONTROLS) ||
+ !(vmreadz(SECONDARY_VM_EXEC_CONTROL) & SECONDARY_EXEC_UNRESTRICTED_GUEST));
+
+ GUEST_ASSERT(!vmlaunch());
+
+ /* L2 should triple fault after main() stuffs invalid guest state. */
+ GUEST_ASSERT(vmreadz(VM_EXIT_REASON) == EXIT_REASON_TRIPLE_FAULT);
+ GUEST_DONE();
+}
+
+int main(int argc, char *argv[])
+{
+ vm_vaddr_t vmx_pages_gva;
+ struct kvm_sregs sregs;
+ struct kvm_run *run;
+ struct ucall uc;
+
+ nested_vmx_check_supported();
+
+ vm = vm_create_default(VCPU_ID, 0, (void *) l1_guest_code);
+
+ /* Allocate VMX pages and shared descriptors (vmx_pages). */
+ vcpu_alloc_vmx(vm, &vmx_pages_gva);
+ vcpu_args_set(vm, VCPU_ID, 1, vmx_pages_gva);
+
+ vcpu_run(vm, VCPU_ID);
+
+ run = vcpu_state(vm, VCPU_ID);
+
+ /*
+ * The first exit to L0 userspace should be an I/O access from L2.
+ * Running L1 should launch L2 without triggering an exit to userspace.
+ */
+ TEST_ASSERT(run->exit_reason == KVM_EXIT_IO,
+ "Expected KVM_EXIT_IO, got: %u (%s)\n",
+ run->exit_reason, exit_reason_str(run->exit_reason));
+
+ TEST_ASSERT(run->io.port == ARBITRARY_IO_PORT,
+ "Expected IN from port %d from L2, got port %d",
+ ARBITRARY_IO_PORT, run->io.port);
+
+ /*
+ * Stuff invalid guest state for L2 by making TR unusuable. The next
+ * KVM_RUN should induce a TRIPLE_FAULT in L2 as KVM doesn't support
+ * emulating invalid guest state for L2.
+ */
+ memset(&sregs, 0, sizeof(sregs));
+ vcpu_sregs_get(vm, VCPU_ID, &sregs);
+ sregs.tr.unusable = 1;
+ vcpu_sregs_set(vm, VCPU_ID, &sregs);
+
+ vcpu_run(vm, VCPU_ID);
+
+ switch (get_ucall(vm, VCPU_ID, &uc)) {
+ case UCALL_DONE:
+ break;
+ case UCALL_ABORT:
+ TEST_FAIL("%s", (const char *)uc.args[0]);
+ default:
+ TEST_FAIL("Unexpected ucall: %lu", uc.cmd);
+ }
+}
diff --git a/tools/testing/selftests/kvm/x86_64/vmx_pmu_msrs_test.c b/tools/testing/selftests/kvm/x86_64/vmx_pmu_msrs_test.c
index 23051d84b907..2454a1f2ca0c 100644
--- a/tools/testing/selftests/kvm/x86_64/vmx_pmu_msrs_test.c
+++ b/tools/testing/selftests/kvm/x86_64/vmx_pmu_msrs_test.c
@@ -110,22 +110,5 @@ int main(int argc, char *argv[])
ret = _vcpu_set_msr(vm, 0, MSR_IA32_PERF_CAPABILITIES, PMU_CAP_LBR_FMT);
TEST_ASSERT(ret == 0, "Bad PERF_CAPABILITIES didn't fail.");
- /* testcase 4, set capabilities when we don't have PDCM bit */
- entry_1_0->ecx &= ~X86_FEATURE_PDCM;
- vcpu_set_cpuid(vm, VCPU_ID, cpuid);
- ret = _vcpu_set_msr(vm, 0, MSR_IA32_PERF_CAPABILITIES, host_cap.capabilities);
- TEST_ASSERT(ret == 0, "Bad PERF_CAPABILITIES didn't fail.");
-
- /* testcase 5, set capabilities when we don't have PMU version bits */
- entry_1_0->ecx |= X86_FEATURE_PDCM;
- eax.split.version_id = 0;
- entry_1_0->ecx = eax.full;
- vcpu_set_cpuid(vm, VCPU_ID, cpuid);
- ret = _vcpu_set_msr(vm, 0, MSR_IA32_PERF_CAPABILITIES, PMU_CAP_FW_WRITES);
- TEST_ASSERT(ret == 0, "Bad PERF_CAPABILITIES didn't fail.");
-
- vcpu_set_msr(vm, 0, MSR_IA32_PERF_CAPABILITIES, 0);
- ASSERT_EQ(vcpu_get_msr(vm, VCPU_ID, MSR_IA32_PERF_CAPABILITIES), 0);
-
kvm_vm_free(vm);
}
diff --git a/tools/testing/selftests/net/Makefile b/tools/testing/selftests/net/Makefile
index 7615f29831eb..9897fa9ab953 100644
--- a/tools/testing/selftests/net/Makefile
+++ b/tools/testing/selftests/net/Makefile
@@ -34,6 +34,7 @@ TEST_PROGS += srv6_end_dt46_l3vpn_test.sh
TEST_PROGS += srv6_end_dt4_l3vpn_test.sh
TEST_PROGS += srv6_end_dt6_l3vpn_test.sh
TEST_PROGS += vrf_strict_mode_test.sh
+TEST_PROGS += arp_ndisc_evict_nocarrier.sh
TEST_PROGS_EXTENDED := in_netns.sh setup_loopback.sh setup_veth.sh
TEST_PROGS_EXTENDED += toeplitz_client.sh toeplitz.sh
TEST_GEN_FILES = socket nettest
diff --git a/tools/testing/selftests/net/fcnal-test.sh b/tools/testing/selftests/net/fcnal-test.sh
index 3313566ce906..ad2982b72e02 100755
--- a/tools/testing/selftests/net/fcnal-test.sh
+++ b/tools/testing/selftests/net/fcnal-test.sh
@@ -455,6 +455,22 @@ cleanup()
ip netns del ${NSC} >/dev/null 2>&1
}
+cleanup_vrf_dup()
+{
+ ip link del ${NSA_DEV2} >/dev/null 2>&1
+ ip netns pids ${NSC} | xargs kill 2>/dev/null
+ ip netns del ${NSC} >/dev/null 2>&1
+}
+
+setup_vrf_dup()
+{
+ # some VRF tests use ns-C which has the same config as
+ # ns-B but for a device NOT in the VRF
+ create_ns ${NSC} "-" "-"
+ connect_ns ${NSA} ${NSA_DEV2} ${NSA_IP}/24 ${NSA_IP6}/64 \
+ ${NSC} ${NSC_DEV} ${NSB_IP}/24 ${NSB_IP6}/64
+}
+
setup()
{
local with_vrf=${1}
@@ -484,12 +500,6 @@ setup()
ip -netns ${NSB} ro add ${VRF_IP}/32 via ${NSA_IP} dev ${NSB_DEV}
ip -netns ${NSB} -6 ro add ${VRF_IP6}/128 via ${NSA_IP6} dev ${NSB_DEV}
-
- # some VRF tests use ns-C which has the same config as
- # ns-B but for a device NOT in the VRF
- create_ns ${NSC} "-" "-"
- connect_ns ${NSA} ${NSA_DEV2} ${NSA_IP}/24 ${NSA_IP6}/64 \
- ${NSC} ${NSC_DEV} ${NSB_IP}/24 ${NSB_IP6}/64
else
ip -netns ${NSA} ro add ${NSB_LO_IP}/32 via ${NSB_IP} dev ${NSA_DEV}
ip -netns ${NSA} ro add ${NSB_LO_IP6}/128 via ${NSB_IP6} dev ${NSA_DEV}
@@ -1240,7 +1250,9 @@ ipv4_tcp_vrf()
log_test_addr ${a} $? 1 "Global server, local connection"
# run MD5 tests
+ setup_vrf_dup
ipv4_tcp_md5
+ cleanup_vrf_dup
#
# enable VRF global server
@@ -1798,8 +1810,9 @@ ipv4_addr_bind_vrf()
for a in ${NSA_IP} ${VRF_IP}
do
log_start
+ show_hint "Socket not bound to VRF, but address is in VRF"
run_cmd nettest -s -R -P icmp -l ${a} -b
- log_test_addr ${a} $? 0 "Raw socket bind to local address"
+ log_test_addr ${a} $? 1 "Raw socket bind to local address"
log_start
run_cmd nettest -s -R -P icmp -l ${a} -I ${NSA_DEV} -b
@@ -2191,7 +2204,7 @@ ipv6_ping_vrf()
log_start
show_hint "Fails since VRF device does not support linklocal or multicast"
run_cmd ${ping6} -c1 -w1 ${a}
- log_test_addr ${a} $? 2 "ping out, VRF bind"
+ log_test_addr ${a} $? 1 "ping out, VRF bind"
done
for a in ${NSB_IP6} ${NSB_LO_IP6} ${NSB_LINKIP6}%${NSA_DEV} ${MCAST}%${NSA_DEV}
@@ -2719,7 +2732,9 @@ ipv6_tcp_vrf()
log_test_addr ${a} $? 1 "Global server, local connection"
# run MD5 tests
+ setup_vrf_dup
ipv6_tcp_md5
+ cleanup_vrf_dup
#
# enable VRF global server
@@ -3414,11 +3429,14 @@ ipv6_addr_bind_novrf()
run_cmd nettest -6 -s -l ${a} -I ${NSA_DEV} -t1 -b
log_test_addr ${a} $? 0 "TCP socket bind to local address after device bind"
+ # Sadly, the kernel allows binding a socket to a device and then
+ # binding to an address not on the device. So this test passes
+ # when it really should not
a=${NSA_LO_IP6}
log_start
- show_hint "Should fail with 'Cannot assign requested address'"
+ show_hint "Tecnically should fail since address is not on device but kernel allows"
run_cmd nettest -6 -s -l ${a} -I ${NSA_DEV} -t1 -b
- log_test_addr ${a} $? 1 "TCP socket bind to out of scope local address"
+ log_test_addr ${a} $? 0 "TCP socket bind to out of scope local address"
}
ipv6_addr_bind_vrf()
@@ -3459,10 +3477,15 @@ ipv6_addr_bind_vrf()
run_cmd nettest -6 -s -l ${a} -I ${NSA_DEV} -t1 -b
log_test_addr ${a} $? 0 "TCP socket bind to local address with device bind"
+ # Sadly, the kernel allows binding a socket to a device and then
+ # binding to an address not on the device. The only restriction
+ # is that the address is valid in the L3 domain. So this test
+ # passes when it really should not
a=${VRF_IP6}
log_start
+ show_hint "Tecnically should fail since address is not on device but kernel allows"
run_cmd nettest -6 -s -l ${a} -I ${NSA_DEV} -t1 -b
- log_test_addr ${a} $? 1 "TCP socket bind to VRF address with device bind"
+ log_test_addr ${a} $? 0 "TCP socket bind to VRF address with device bind"
a=${NSA_LO_IP6}
log_start
@@ -4002,8 +4025,8 @@ EOF
################################################################################
# main
-TESTS_IPV4="ipv4_ping ipv4_tcp ipv4_udp ipv4_addr_bind ipv4_runtime ipv4_netfilter"
-TESTS_IPV6="ipv6_ping ipv6_tcp ipv6_udp ipv6_addr_bind ipv6_runtime ipv6_netfilter"
+TESTS_IPV4="ipv4_ping ipv4_tcp ipv4_udp ipv4_bind ipv4_runtime ipv4_netfilter"
+TESTS_IPV6="ipv6_ping ipv6_tcp ipv6_udp ipv6_bind ipv6_runtime ipv6_netfilter"
TESTS_OTHER="use_cases"
PAUSE_ON_FAIL=no
@@ -4077,3 +4100,11 @@ cleanup 2>/dev/null
printf "\nTests passed: %3d\n" ${nsuccess}
printf "Tests failed: %3d\n" ${nfail}
+
+if [ $nfail -ne 0 ]; then
+ exit 1 # KSFT_FAIL
+elif [ $nsuccess -eq 0 ]; then
+ exit $ksft_skip
+fi
+
+exit 0 # KSFT_PASS
diff --git a/tools/testing/selftests/net/fib_nexthops.sh b/tools/testing/selftests/net/fib_nexthops.sh
index b5a69ad191b0..d444ee6aa3cb 100755
--- a/tools/testing/selftests/net/fib_nexthops.sh
+++ b/tools/testing/selftests/net/fib_nexthops.sh
@@ -629,6 +629,66 @@ ipv6_fcnal()
log_test $? 0 "Nexthops removed on admin down"
}
+ipv6_grp_refs()
+{
+ if [ ! -x "$(command -v mausezahn)" ]; then
+ echo "SKIP: Could not run test; need mausezahn tool"
+ return
+ fi
+
+ run_cmd "$IP link set dev veth1 up"
+ run_cmd "$IP link add veth1.10 link veth1 up type vlan id 10"
+ run_cmd "$IP link add veth1.20 link veth1 up type vlan id 20"
+ run_cmd "$IP -6 addr add 2001:db8:91::1/64 dev veth1.10"
+ run_cmd "$IP -6 addr add 2001:db8:92::1/64 dev veth1.20"
+ run_cmd "$IP -6 neigh add 2001:db8:91::2 lladdr 00:11:22:33:44:55 dev veth1.10"
+ run_cmd "$IP -6 neigh add 2001:db8:92::2 lladdr 00:11:22:33:44:55 dev veth1.20"
+ run_cmd "$IP nexthop add id 100 via 2001:db8:91::2 dev veth1.10"
+ run_cmd "$IP nexthop add id 101 via 2001:db8:92::2 dev veth1.20"
+ run_cmd "$IP nexthop add id 102 group 100"
+ run_cmd "$IP route add 2001:db8:101::1/128 nhid 102"
+
+ # create per-cpu dsts through nh 100
+ run_cmd "ip netns exec me mausezahn -6 veth1.10 -B 2001:db8:101::1 -A 2001:db8:91::1 -c 5 -t tcp "dp=1-1023, flags=syn" >/dev/null 2>&1"
+
+ # remove nh 100 from the group to delete the route potentially leaving
+ # a stale per-cpu dst which holds a reference to the nexthop's net
+ # device and to the IPv6 route
+ run_cmd "$IP nexthop replace id 102 group 101"
+ run_cmd "$IP route del 2001:db8:101::1/128"
+
+ # add both nexthops to the group so a reference is taken on them
+ run_cmd "$IP nexthop replace id 102 group 100/101"
+
+ # if the bug described in commit "net: nexthop: release IPv6 per-cpu
+ # dsts when replacing a nexthop group" exists at this point we have
+ # an unlinked IPv6 route (but not freed due to stale dst) with a
+ # reference over the group so we delete the group which will again
+ # only unlink it due to the route reference
+ run_cmd "$IP nexthop del id 102"
+
+ # delete the nexthop with stale dst, since we have an unlinked
+ # group with a ref to it and an unlinked IPv6 route with ref to the
+ # group, the nh will only be unlinked and not freed so the stale dst
+ # remains forever and we get a net device refcount imbalance
+ run_cmd "$IP nexthop del id 100"
+
+ # if a reference was lost this command will hang because the net device
+ # cannot be removed
+ timeout -s KILL 5 ip netns exec me ip link del veth1.10 >/dev/null 2>&1
+
+ # we can't cleanup if the command is hung trying to delete the netdev
+ if [ $? -eq 137 ]; then
+ return 1
+ fi
+
+ # cleanup
+ run_cmd "$IP link del veth1.20"
+ run_cmd "$IP nexthop flush"
+
+ return 0
+}
+
ipv6_grp_fcnal()
{
local rc
@@ -734,6 +794,9 @@ ipv6_grp_fcnal()
run_cmd "$IP nexthop add id 108 group 31/24"
log_test $? 2 "Nexthop group can not have a blackhole and another nexthop"
+
+ ipv6_grp_refs
+ log_test $? 0 "Nexthop group replace refcounts"
}
ipv6_res_grp_fcnal()
diff --git a/tools/testing/selftests/net/fib_tests.sh b/tools/testing/selftests/net/fib_tests.sh
index 5abe92d55b69..996af1ae3d3d 100755
--- a/tools/testing/selftests/net/fib_tests.sh
+++ b/tools/testing/selftests/net/fib_tests.sh
@@ -444,24 +444,63 @@ fib_rp_filter_test()
setup
set -e
+ ip netns add ns2
+ ip netns set ns2 auto
+
+ ip -netns ns2 link set dev lo up
+
+ $IP link add name veth1 type veth peer name veth2
+ $IP link set dev veth2 netns ns2
+ $IP address add 192.0.2.1/24 dev veth1
+ ip -netns ns2 address add 192.0.2.1/24 dev veth2
+ $IP link set dev veth1 up
+ ip -netns ns2 link set dev veth2 up
+
$IP link set dev lo address 52:54:00:6a:c7:5e
- $IP link set dummy0 address 52:54:00:6a:c7:5e
- $IP link add dummy1 type dummy
- $IP link set dummy1 address 52:54:00:6a:c7:5e
- $IP link set dev dummy1 up
+ $IP link set dev veth1 address 52:54:00:6a:c7:5e
+ ip -netns ns2 link set dev lo address 52:54:00:6a:c7:5e
+ ip -netns ns2 link set dev veth2 address 52:54:00:6a:c7:5e
+
+ # 1. (ns2) redirect lo's egress to veth2's egress
+ ip netns exec ns2 tc qdisc add dev lo parent root handle 1: fq_codel
+ ip netns exec ns2 tc filter add dev lo parent 1: protocol arp basic \
+ action mirred egress redirect dev veth2
+ ip netns exec ns2 tc filter add dev lo parent 1: protocol ip basic \
+ action mirred egress redirect dev veth2
+
+ # 2. (ns1) redirect veth1's ingress to lo's ingress
+ $NS_EXEC tc qdisc add dev veth1 ingress
+ $NS_EXEC tc filter add dev veth1 ingress protocol arp basic \
+ action mirred ingress redirect dev lo
+ $NS_EXEC tc filter add dev veth1 ingress protocol ip basic \
+ action mirred ingress redirect dev lo
+
+ # 3. (ns1) redirect lo's egress to veth1's egress
+ $NS_EXEC tc qdisc add dev lo parent root handle 1: fq_codel
+ $NS_EXEC tc filter add dev lo parent 1: protocol arp basic \
+ action mirred egress redirect dev veth1
+ $NS_EXEC tc filter add dev lo parent 1: protocol ip basic \
+ action mirred egress redirect dev veth1
+
+ # 4. (ns2) redirect veth2's ingress to lo's ingress
+ ip netns exec ns2 tc qdisc add dev veth2 ingress
+ ip netns exec ns2 tc filter add dev veth2 ingress protocol arp basic \
+ action mirred ingress redirect dev lo
+ ip netns exec ns2 tc filter add dev veth2 ingress protocol ip basic \
+ action mirred ingress redirect dev lo
+
$NS_EXEC sysctl -qw net.ipv4.conf.all.rp_filter=1
$NS_EXEC sysctl -qw net.ipv4.conf.all.accept_local=1
$NS_EXEC sysctl -qw net.ipv4.conf.all.route_localnet=1
-
- $NS_EXEC tc qd add dev dummy1 parent root handle 1: fq_codel
- $NS_EXEC tc filter add dev dummy1 parent 1: protocol arp basic action mirred egress redirect dev lo
- $NS_EXEC tc filter add dev dummy1 parent 1: protocol ip basic action mirred egress redirect dev lo
+ ip netns exec ns2 sysctl -qw net.ipv4.conf.all.rp_filter=1
+ ip netns exec ns2 sysctl -qw net.ipv4.conf.all.accept_local=1
+ ip netns exec ns2 sysctl -qw net.ipv4.conf.all.route_localnet=1
set +e
- run_cmd "ip netns exec ns1 ping -I dummy1 -w1 -c1 198.51.100.1"
+ run_cmd "ip netns exec ns2 ping -w1 -c1 192.0.2.1"
log_test $? 0 "rp_filter passes local packets"
- run_cmd "ip netns exec ns1 ping -I dummy1 -w1 -c1 127.0.0.1"
+ run_cmd "ip netns exec ns2 ping -w1 -c1 127.0.0.1"
log_test $? 0 "rp_filter passes loopback packets"
cleanup
diff --git a/tools/testing/selftests/net/forwarding/forwarding.config.sample b/tools/testing/selftests/net/forwarding/forwarding.config.sample
index bf17e485684f..b0980a2efa31 100644
--- a/tools/testing/selftests/net/forwarding/forwarding.config.sample
+++ b/tools/testing/selftests/net/forwarding/forwarding.config.sample
@@ -13,6 +13,8 @@ NETIFS[p5]=veth4
NETIFS[p6]=veth5
NETIFS[p7]=veth6
NETIFS[p8]=veth7
+NETIFS[p9]=veth8
+NETIFS[p10]=veth9
# Port that does not have a cable connected.
NETIF_NO_CABLE=eth8
diff --git a/tools/testing/selftests/net/icmp_redirect.sh b/tools/testing/selftests/net/icmp_redirect.sh
index ecbf57f264ed..7b9d6e31b8e7 100755
--- a/tools/testing/selftests/net/icmp_redirect.sh
+++ b/tools/testing/selftests/net/icmp_redirect.sh
@@ -311,7 +311,7 @@ check_exception()
ip -netns h1 ro get ${H1_VRF_ARG} ${H2_N2_IP} | \
grep -E -v 'mtu|redirected' | grep -q "cache"
fi
- log_test $? 0 "IPv4: ${desc}"
+ log_test $? 0 "IPv4: ${desc}" 0
# No PMTU info for test "redirect" and "mtu exception plus redirect"
if [ "$with_redirect" = "yes" ] && [ "$desc" != "redirect exception plus mtu" ]; then
diff --git a/tools/testing/selftests/net/mptcp/config b/tools/testing/selftests/net/mptcp/config
index 0faaccd21447..2b82628decb1 100644
--- a/tools/testing/selftests/net/mptcp/config
+++ b/tools/testing/selftests/net/mptcp/config
@@ -9,7 +9,6 @@ CONFIG_NETFILTER=y
CONFIG_NETFILTER_ADVANCED=y
CONFIG_NETFILTER_NETLINK=m
CONFIG_NF_TABLES=m
-CONFIG_NFT_COUNTER=m
CONFIG_NFT_COMPAT=m
CONFIG_NETFILTER_XTABLES=m
CONFIG_NETFILTER_XT_MATCH_BPF=m
diff --git a/tools/testing/selftests/net/tls.c b/tools/testing/selftests/net/tls.c
index e61fc4c32ba2..6e468e0f42f7 100644
--- a/tools/testing/selftests/net/tls.c
+++ b/tools/testing/selftests/net/tls.c
@@ -31,6 +31,8 @@ struct tls_crypto_info_keys {
struct tls12_crypto_info_chacha20_poly1305 chacha20;
struct tls12_crypto_info_sm4_gcm sm4gcm;
struct tls12_crypto_info_sm4_ccm sm4ccm;
+ struct tls12_crypto_info_aes_ccm_128 aesccm128;
+ struct tls12_crypto_info_aes_gcm_256 aesgcm256;
};
size_t len;
};
@@ -61,6 +63,16 @@ static void tls_crypto_info_init(uint16_t tls_version, uint16_t cipher_type,
tls12->sm4ccm.info.version = tls_version;
tls12->sm4ccm.info.cipher_type = cipher_type;
break;
+ case TLS_CIPHER_AES_CCM_128:
+ tls12->len = sizeof(struct tls12_crypto_info_aes_ccm_128);
+ tls12->aesccm128.info.version = tls_version;
+ tls12->aesccm128.info.cipher_type = cipher_type;
+ break;
+ case TLS_CIPHER_AES_GCM_256:
+ tls12->len = sizeof(struct tls12_crypto_info_aes_gcm_256);
+ tls12->aesgcm256.info.version = tls_version;
+ tls12->aesgcm256.info.cipher_type = cipher_type;
+ break;
default:
break;
}
@@ -78,26 +90,21 @@ static void memrnd(void *s, size_t n)
*byte++ = rand();
}
-FIXTURE(tls_basic)
-{
- int fd, cfd;
- bool notls;
-};
-
-FIXTURE_SETUP(tls_basic)
+static void ulp_sock_pair(struct __test_metadata *_metadata,
+ int *fd, int *cfd, bool *notls)
{
struct sockaddr_in addr;
socklen_t len;
int sfd, ret;
- self->notls = false;
+ *notls = false;
len = sizeof(addr);
addr.sin_family = AF_INET;
addr.sin_addr.s_addr = htonl(INADDR_ANY);
addr.sin_port = 0;
- self->fd = socket(AF_INET, SOCK_STREAM, 0);
+ *fd = socket(AF_INET, SOCK_STREAM, 0);
sfd = socket(AF_INET, SOCK_STREAM, 0);
ret = bind(sfd, &addr, sizeof(addr));
@@ -108,26 +115,96 @@ FIXTURE_SETUP(tls_basic)
ret = getsockname(sfd, &addr, &len);
ASSERT_EQ(ret, 0);
- ret = connect(self->fd, &addr, sizeof(addr));
+ ret = connect(*fd, &addr, sizeof(addr));
ASSERT_EQ(ret, 0);
- self->cfd = accept(sfd, &addr, &len);
- ASSERT_GE(self->cfd, 0);
+ *cfd = accept(sfd, &addr, &len);
+ ASSERT_GE(*cfd, 0);
close(sfd);
- ret = setsockopt(self->fd, IPPROTO_TCP, TCP_ULP, "tls", sizeof("tls"));
+ ret = setsockopt(*fd, IPPROTO_TCP, TCP_ULP, "tls", sizeof("tls"));
if (ret != 0) {
ASSERT_EQ(errno, ENOENT);
- self->notls = true;
+ *notls = true;
printf("Failure setting TCP_ULP, testing without tls\n");
return;
}
- ret = setsockopt(self->cfd, IPPROTO_TCP, TCP_ULP, "tls", sizeof("tls"));
+ ret = setsockopt(*cfd, IPPROTO_TCP, TCP_ULP, "tls", sizeof("tls"));
ASSERT_EQ(ret, 0);
}
+/* Produce a basic cmsg */
+static int tls_send_cmsg(int fd, unsigned char record_type,
+ void *data, size_t len, int flags)
+{
+ char cbuf[CMSG_SPACE(sizeof(char))];
+ int cmsg_len = sizeof(char);
+ struct cmsghdr *cmsg;
+ struct msghdr msg;
+ struct iovec vec;
+
+ vec.iov_base = data;
+ vec.iov_len = len;
+ memset(&msg, 0, sizeof(struct msghdr));
+ msg.msg_iov = &vec;
+ msg.msg_iovlen = 1;
+ msg.msg_control = cbuf;
+ msg.msg_controllen = sizeof(cbuf);
+ cmsg = CMSG_FIRSTHDR(&msg);
+ cmsg->cmsg_level = SOL_TLS;
+ /* test sending non-record types. */
+ cmsg->cmsg_type = TLS_SET_RECORD_TYPE;
+ cmsg->cmsg_len = CMSG_LEN(cmsg_len);
+ *CMSG_DATA(cmsg) = record_type;
+ msg.msg_controllen = cmsg->cmsg_len;
+
+ return sendmsg(fd, &msg, flags);
+}
+
+static int tls_recv_cmsg(struct __test_metadata *_metadata,
+ int fd, unsigned char record_type,
+ void *data, size_t len, int flags)
+{
+ char cbuf[CMSG_SPACE(sizeof(char))];
+ struct cmsghdr *cmsg;
+ unsigned char ctype;
+ struct msghdr msg;
+ struct iovec vec;
+ int n;
+
+ vec.iov_base = data;
+ vec.iov_len = len;
+ memset(&msg, 0, sizeof(struct msghdr));
+ msg.msg_iov = &vec;
+ msg.msg_iovlen = 1;
+ msg.msg_control = cbuf;
+ msg.msg_controllen = sizeof(cbuf);
+
+ n = recvmsg(fd, &msg, flags);
+
+ cmsg = CMSG_FIRSTHDR(&msg);
+ EXPECT_NE(cmsg, NULL);
+ EXPECT_EQ(cmsg->cmsg_level, SOL_TLS);
+ EXPECT_EQ(cmsg->cmsg_type, TLS_GET_RECORD_TYPE);
+ ctype = *((unsigned char *)CMSG_DATA(cmsg));
+ EXPECT_EQ(ctype, record_type);
+
+ return n;
+}
+
+FIXTURE(tls_basic)
+{
+ int fd, cfd;
+ bool notls;
+};
+
+FIXTURE_SETUP(tls_basic)
+{
+ ulp_sock_pair(_metadata, &self->fd, &self->cfd, &self->notls);
+}
+
FIXTURE_TEARDOWN(tls_basic)
{
close(self->fd);
@@ -196,63 +273,48 @@ FIXTURE_VARIANT_ADD(tls, 13_sm4_ccm)
.cipher_type = TLS_CIPHER_SM4_CCM,
};
+FIXTURE_VARIANT_ADD(tls, 12_aes_ccm)
+{
+ .tls_version = TLS_1_2_VERSION,
+ .cipher_type = TLS_CIPHER_AES_CCM_128,
+};
+
+FIXTURE_VARIANT_ADD(tls, 13_aes_ccm)
+{
+ .tls_version = TLS_1_3_VERSION,
+ .cipher_type = TLS_CIPHER_AES_CCM_128,
+};
+
+FIXTURE_VARIANT_ADD(tls, 12_aes_gcm_256)
+{
+ .tls_version = TLS_1_2_VERSION,
+ .cipher_type = TLS_CIPHER_AES_GCM_256,
+};
+
+FIXTURE_VARIANT_ADD(tls, 13_aes_gcm_256)
+{
+ .tls_version = TLS_1_3_VERSION,
+ .cipher_type = TLS_CIPHER_AES_GCM_256,
+};
+
FIXTURE_SETUP(tls)
{
struct tls_crypto_info_keys tls12;
- struct sockaddr_in addr;
- socklen_t len;
- int sfd, ret;
-
- self->notls = false;
- len = sizeof(addr);
+ int ret;
tls_crypto_info_init(variant->tls_version, variant->cipher_type,
&tls12);
- addr.sin_family = AF_INET;
- addr.sin_addr.s_addr = htonl(INADDR_ANY);
- addr.sin_port = 0;
-
- self->fd = socket(AF_INET, SOCK_STREAM, 0);
- sfd = socket(AF_INET, SOCK_STREAM, 0);
+ ulp_sock_pair(_metadata, &self->fd, &self->cfd, &self->notls);
- ret = bind(sfd, &addr, sizeof(addr));
- ASSERT_EQ(ret, 0);
- ret = listen(sfd, 10);
- ASSERT_EQ(ret, 0);
+ if (self->notls)
+ return;
- ret = getsockname(sfd, &addr, &len);
+ ret = setsockopt(self->fd, SOL_TLS, TLS_TX, &tls12, tls12.len);
ASSERT_EQ(ret, 0);
- ret = connect(self->fd, &addr, sizeof(addr));
+ ret = setsockopt(self->cfd, SOL_TLS, TLS_RX, &tls12, tls12.len);
ASSERT_EQ(ret, 0);
-
- ret = setsockopt(self->fd, IPPROTO_TCP, TCP_ULP, "tls", sizeof("tls"));
- if (ret != 0) {
- self->notls = true;
- printf("Failure setting TCP_ULP, testing without tls\n");
- }
-
- if (!self->notls) {
- ret = setsockopt(self->fd, SOL_TLS, TLS_TX, &tls12,
- tls12.len);
- ASSERT_EQ(ret, 0);
- }
-
- self->cfd = accept(sfd, &addr, &len);
- ASSERT_GE(self->cfd, 0);
-
- if (!self->notls) {
- ret = setsockopt(self->cfd, IPPROTO_TCP, TCP_ULP, "tls",
- sizeof("tls"));
- ASSERT_EQ(ret, 0);
-
- ret = setsockopt(self->cfd, SOL_TLS, TLS_RX, &tls12,
- tls12.len);
- ASSERT_EQ(ret, 0);
- }
-
- close(sfd);
}
FIXTURE_TEARDOWN(tls)
@@ -613,6 +675,95 @@ TEST_F(tls, splice_to_pipe)
EXPECT_EQ(memcmp(mem_send, mem_recv, send_len), 0);
}
+TEST_F(tls, splice_cmsg_to_pipe)
+{
+ char *test_str = "test_read";
+ char record_type = 100;
+ int send_len = 10;
+ char buf[10];
+ int p[2];
+
+ ASSERT_GE(pipe(p), 0);
+ EXPECT_EQ(tls_send_cmsg(self->fd, 100, test_str, send_len, 0), 10);
+ EXPECT_EQ(splice(self->cfd, NULL, p[1], NULL, send_len, 0), -1);
+ EXPECT_EQ(errno, EINVAL);
+ EXPECT_EQ(recv(self->cfd, buf, send_len, 0), -1);
+ EXPECT_EQ(errno, EIO);
+ EXPECT_EQ(tls_recv_cmsg(_metadata, self->cfd, record_type,
+ buf, sizeof(buf), MSG_WAITALL),
+ send_len);
+ EXPECT_EQ(memcmp(test_str, buf, send_len), 0);
+}
+
+TEST_F(tls, splice_dec_cmsg_to_pipe)
+{
+ char *test_str = "test_read";
+ char record_type = 100;
+ int send_len = 10;
+ char buf[10];
+ int p[2];
+
+ ASSERT_GE(pipe(p), 0);
+ EXPECT_EQ(tls_send_cmsg(self->fd, 100, test_str, send_len, 0), 10);
+ EXPECT_EQ(recv(self->cfd, buf, send_len, 0), -1);
+ EXPECT_EQ(errno, EIO);
+ EXPECT_EQ(splice(self->cfd, NULL, p[1], NULL, send_len, 0), -1);
+ EXPECT_EQ(errno, EINVAL);
+ EXPECT_EQ(tls_recv_cmsg(_metadata, self->cfd, record_type,
+ buf, sizeof(buf), MSG_WAITALL),
+ send_len);
+ EXPECT_EQ(memcmp(test_str, buf, send_len), 0);
+}
+
+TEST_F(tls, recv_and_splice)
+{
+ int send_len = TLS_PAYLOAD_MAX_LEN;
+ char mem_send[TLS_PAYLOAD_MAX_LEN];
+ char mem_recv[TLS_PAYLOAD_MAX_LEN];
+ int half = send_len / 2;
+ int p[2];
+
+ ASSERT_GE(pipe(p), 0);
+ EXPECT_EQ(send(self->fd, mem_send, send_len, 0), send_len);
+ /* Recv hald of the record, splice the other half */
+ EXPECT_EQ(recv(self->cfd, mem_recv, half, MSG_WAITALL), half);
+ EXPECT_EQ(splice(self->cfd, NULL, p[1], NULL, half, SPLICE_F_NONBLOCK),
+ half);
+ EXPECT_EQ(read(p[0], &mem_recv[half], half), half);
+ EXPECT_EQ(memcmp(mem_send, mem_recv, send_len), 0);
+}
+
+TEST_F(tls, peek_and_splice)
+{
+ int send_len = TLS_PAYLOAD_MAX_LEN;
+ char mem_send[TLS_PAYLOAD_MAX_LEN];
+ char mem_recv[TLS_PAYLOAD_MAX_LEN];
+ int chunk = TLS_PAYLOAD_MAX_LEN / 4;
+ int n, i, p[2];
+
+ memrnd(mem_send, sizeof(mem_send));
+
+ ASSERT_GE(pipe(p), 0);
+ for (i = 0; i < 4; i++)
+ EXPECT_EQ(send(self->fd, &mem_send[chunk * i], chunk, 0),
+ chunk);
+
+ EXPECT_EQ(recv(self->cfd, mem_recv, chunk * 5 / 2,
+ MSG_WAITALL | MSG_PEEK),
+ chunk * 5 / 2);
+ EXPECT_EQ(memcmp(mem_send, mem_recv, chunk * 5 / 2), 0);
+
+ n = 0;
+ while (n < send_len) {
+ i = splice(self->cfd, NULL, p[1], NULL, send_len - n, 0);
+ EXPECT_GT(i, 0);
+ n += i;
+ }
+ EXPECT_EQ(n, send_len);
+ EXPECT_EQ(read(p[0], mem_recv, send_len), send_len);
+ EXPECT_EQ(memcmp(mem_send, mem_recv, send_len), 0);
+}
+
TEST_F(tls, recvmsg_single)
{
char const *test_str = "test_recvmsg_single";
@@ -1193,60 +1344,30 @@ TEST_F(tls, mutliproc_sendpage_writers)
TEST_F(tls, control_msg)
{
- if (self->notls)
- return;
-
- char cbuf[CMSG_SPACE(sizeof(char))];
- char const *test_str = "test_read";
- int cmsg_len = sizeof(char);
+ char *test_str = "test_read";
char record_type = 100;
- struct cmsghdr *cmsg;
- struct msghdr msg;
int send_len = 10;
- struct iovec vec;
char buf[10];
- vec.iov_base = (char *)test_str;
- vec.iov_len = 10;
- memset(&msg, 0, sizeof(struct msghdr));
- msg.msg_iov = &vec;
- msg.msg_iovlen = 1;
- msg.msg_control = cbuf;
- msg.msg_controllen = sizeof(cbuf);
- cmsg = CMSG_FIRSTHDR(&msg);
- cmsg->cmsg_level = SOL_TLS;
- /* test sending non-record types. */
- cmsg->cmsg_type = TLS_SET_RECORD_TYPE;
- cmsg->cmsg_len = CMSG_LEN(cmsg_len);
- *CMSG_DATA(cmsg) = record_type;
- msg.msg_controllen = cmsg->cmsg_len;
+ if (self->notls)
+ SKIP(return, "no TLS support");
- EXPECT_EQ(sendmsg(self->fd, &msg, 0), send_len);
+ EXPECT_EQ(tls_send_cmsg(self->fd, record_type, test_str, send_len, 0),
+ send_len);
/* Should fail because we didn't provide a control message */
EXPECT_EQ(recv(self->cfd, buf, send_len, 0), -1);
- vec.iov_base = buf;
- EXPECT_EQ(recvmsg(self->cfd, &msg, MSG_WAITALL | MSG_PEEK), send_len);
-
- cmsg = CMSG_FIRSTHDR(&msg);
- EXPECT_NE(cmsg, NULL);
- EXPECT_EQ(cmsg->cmsg_level, SOL_TLS);
- EXPECT_EQ(cmsg->cmsg_type, TLS_GET_RECORD_TYPE);
- record_type = *((unsigned char *)CMSG_DATA(cmsg));
- EXPECT_EQ(record_type, 100);
+ EXPECT_EQ(tls_recv_cmsg(_metadata, self->cfd, record_type,
+ buf, sizeof(buf), MSG_WAITALL | MSG_PEEK),
+ send_len);
EXPECT_EQ(memcmp(buf, test_str, send_len), 0);
/* Recv the message again without MSG_PEEK */
- record_type = 0;
memset(buf, 0, sizeof(buf));
- EXPECT_EQ(recvmsg(self->cfd, &msg, MSG_WAITALL), send_len);
- cmsg = CMSG_FIRSTHDR(&msg);
- EXPECT_NE(cmsg, NULL);
- EXPECT_EQ(cmsg->cmsg_level, SOL_TLS);
- EXPECT_EQ(cmsg->cmsg_type, TLS_GET_RECORD_TYPE);
- record_type = *((unsigned char *)CMSG_DATA(cmsg));
- EXPECT_EQ(record_type, 100);
+ EXPECT_EQ(tls_recv_cmsg(_metadata, self->cfd, record_type,
+ buf, sizeof(buf), MSG_WAITALL),
+ send_len);
EXPECT_EQ(memcmp(buf, test_str, send_len), 0);
}
@@ -1301,6 +1422,160 @@ TEST_F(tls, shutdown_reuse)
EXPECT_EQ(errno, EISCONN);
}
+FIXTURE(tls_err)
+{
+ int fd, cfd;
+ int fd2, cfd2;
+ bool notls;
+};
+
+FIXTURE_VARIANT(tls_err)
+{
+ uint16_t tls_version;
+};
+
+FIXTURE_VARIANT_ADD(tls_err, 12_aes_gcm)
+{
+ .tls_version = TLS_1_2_VERSION,
+};
+
+FIXTURE_VARIANT_ADD(tls_err, 13_aes_gcm)
+{
+ .tls_version = TLS_1_3_VERSION,
+};
+
+FIXTURE_SETUP(tls_err)
+{
+ struct tls_crypto_info_keys tls12;
+ int ret;
+
+ tls_crypto_info_init(variant->tls_version, TLS_CIPHER_AES_GCM_128,
+ &tls12);
+
+ ulp_sock_pair(_metadata, &self->fd, &self->cfd, &self->notls);
+ ulp_sock_pair(_metadata, &self->fd2, &self->cfd2, &self->notls);
+ if (self->notls)
+ return;
+
+ ret = setsockopt(self->fd, SOL_TLS, TLS_TX, &tls12, tls12.len);
+ ASSERT_EQ(ret, 0);
+
+ ret = setsockopt(self->cfd2, SOL_TLS, TLS_RX, &tls12, tls12.len);
+ ASSERT_EQ(ret, 0);
+}
+
+FIXTURE_TEARDOWN(tls_err)
+{
+ close(self->fd);
+ close(self->cfd);
+ close(self->fd2);
+ close(self->cfd2);
+}
+
+TEST_F(tls_err, bad_rec)
+{
+ char buf[64];
+
+ if (self->notls)
+ SKIP(return, "no TLS support");
+
+ memset(buf, 0x55, sizeof(buf));
+ EXPECT_EQ(send(self->fd2, buf, sizeof(buf), 0), sizeof(buf));
+ EXPECT_EQ(recv(self->cfd2, buf, sizeof(buf), 0), -1);
+ EXPECT_EQ(errno, EMSGSIZE);
+ EXPECT_EQ(recv(self->cfd2, buf, sizeof(buf), MSG_DONTWAIT), -1);
+ EXPECT_EQ(errno, EAGAIN);
+}
+
+TEST_F(tls_err, bad_auth)
+{
+ char buf[128];
+ int n;
+
+ if (self->notls)
+ SKIP(return, "no TLS support");
+
+ memrnd(buf, sizeof(buf) / 2);
+ EXPECT_EQ(send(self->fd, buf, sizeof(buf) / 2, 0), sizeof(buf) / 2);
+ n = recv(self->cfd, buf, sizeof(buf), 0);
+ EXPECT_GT(n, sizeof(buf) / 2);
+
+ buf[n - 1]++;
+
+ EXPECT_EQ(send(self->fd2, buf, n, 0), n);
+ EXPECT_EQ(recv(self->cfd2, buf, sizeof(buf), 0), -1);
+ EXPECT_EQ(errno, EBADMSG);
+ EXPECT_EQ(recv(self->cfd2, buf, sizeof(buf), 0), -1);
+ EXPECT_EQ(errno, EBADMSG);
+}
+
+TEST_F(tls_err, bad_in_large_read)
+{
+ char txt[3][64];
+ char cip[3][128];
+ char buf[3 * 128];
+ int i, n;
+
+ if (self->notls)
+ SKIP(return, "no TLS support");
+
+ /* Put 3 records in the sockets */
+ for (i = 0; i < 3; i++) {
+ memrnd(txt[i], sizeof(txt[i]));
+ EXPECT_EQ(send(self->fd, txt[i], sizeof(txt[i]), 0),
+ sizeof(txt[i]));
+ n = recv(self->cfd, cip[i], sizeof(cip[i]), 0);
+ EXPECT_GT(n, sizeof(txt[i]));
+ /* Break the third message */
+ if (i == 2)
+ cip[2][n - 1]++;
+ EXPECT_EQ(send(self->fd2, cip[i], n, 0), n);
+ }
+
+ /* We should be able to receive the first two messages */
+ EXPECT_EQ(recv(self->cfd2, buf, sizeof(buf), 0), sizeof(txt[0]) * 2);
+ EXPECT_EQ(memcmp(buf, txt[0], sizeof(txt[0])), 0);
+ EXPECT_EQ(memcmp(buf + sizeof(txt[0]), txt[1], sizeof(txt[1])), 0);
+ /* Third mesasge is bad */
+ EXPECT_EQ(recv(self->cfd2, buf, sizeof(buf), 0), -1);
+ EXPECT_EQ(errno, EBADMSG);
+ EXPECT_EQ(recv(self->cfd2, buf, sizeof(buf), 0), -1);
+ EXPECT_EQ(errno, EBADMSG);
+}
+
+TEST_F(tls_err, bad_cmsg)
+{
+ char *test_str = "test_read";
+ int send_len = 10;
+ char cip[128];
+ char buf[128];
+ char txt[64];
+ int n;
+
+ if (self->notls)
+ SKIP(return, "no TLS support");
+
+ /* Queue up one data record */
+ memrnd(txt, sizeof(txt));
+ EXPECT_EQ(send(self->fd, txt, sizeof(txt), 0), sizeof(txt));
+ n = recv(self->cfd, cip, sizeof(cip), 0);
+ EXPECT_GT(n, sizeof(txt));
+ EXPECT_EQ(send(self->fd2, cip, n, 0), n);
+
+ EXPECT_EQ(tls_send_cmsg(self->fd, 100, test_str, send_len, 0), 10);
+ n = recv(self->cfd, cip, sizeof(cip), 0);
+ cip[n - 1]++; /* Break it */
+ EXPECT_GT(n, send_len);
+ EXPECT_EQ(send(self->fd2, cip, n, 0), n);
+
+ EXPECT_EQ(recv(self->cfd2, buf, sizeof(buf), 0), sizeof(txt));
+ EXPECT_EQ(memcmp(buf, txt, sizeof(txt)), 0);
+ EXPECT_EQ(recv(self->cfd2, buf, sizeof(buf), 0), -1);
+ EXPECT_EQ(errno, EBADMSG);
+ EXPECT_EQ(recv(self->cfd2, buf, sizeof(buf), 0), -1);
+ EXPECT_EQ(errno, EBADMSG);
+}
+
TEST(non_established) {
struct tls12_crypto_info_aes_gcm_256 tls12;
struct sockaddr_in addr;
@@ -1355,64 +1630,82 @@ TEST(non_established) {
TEST(keysizes) {
struct tls12_crypto_info_aes_gcm_256 tls12;
- struct sockaddr_in addr;
- int sfd, ret, fd, cfd;
- socklen_t len;
+ int ret, fd, cfd;
bool notls;
- notls = false;
- len = sizeof(addr);
-
memset(&tls12, 0, sizeof(tls12));
tls12.info.version = TLS_1_2_VERSION;
tls12.info.cipher_type = TLS_CIPHER_AES_GCM_256;
- addr.sin_family = AF_INET;
- addr.sin_addr.s_addr = htonl(INADDR_ANY);
- addr.sin_port = 0;
+ ulp_sock_pair(_metadata, &fd, &cfd, &notls);
- fd = socket(AF_INET, SOCK_STREAM, 0);
- sfd = socket(AF_INET, SOCK_STREAM, 0);
+ if (!notls) {
+ ret = setsockopt(fd, SOL_TLS, TLS_TX, &tls12,
+ sizeof(tls12));
+ EXPECT_EQ(ret, 0);
+
+ ret = setsockopt(cfd, SOL_TLS, TLS_RX, &tls12,
+ sizeof(tls12));
+ EXPECT_EQ(ret, 0);
+ }
+
+ close(fd);
+ close(cfd);
+}
+
+TEST(tls_v6ops) {
+ struct tls_crypto_info_keys tls12;
+ struct sockaddr_in6 addr, addr2;
+ int sfd, ret, fd;
+ socklen_t len, len2;
+
+ tls_crypto_info_init(TLS_1_2_VERSION, TLS_CIPHER_AES_GCM_128, &tls12);
+
+ addr.sin6_family = AF_INET6;
+ addr.sin6_addr = in6addr_any;
+ addr.sin6_port = 0;
+
+ fd = socket(AF_INET6, SOCK_STREAM, 0);
+ sfd = socket(AF_INET6, SOCK_STREAM, 0);
ret = bind(sfd, &addr, sizeof(addr));
ASSERT_EQ(ret, 0);
ret = listen(sfd, 10);
ASSERT_EQ(ret, 0);
+ len = sizeof(addr);
ret = getsockname(sfd, &addr, &len);
ASSERT_EQ(ret, 0);
ret = connect(fd, &addr, sizeof(addr));
ASSERT_EQ(ret, 0);
+ len = sizeof(addr);
+ ret = getsockname(fd, &addr, &len);
+ ASSERT_EQ(ret, 0);
+
ret = setsockopt(fd, IPPROTO_TCP, TCP_ULP, "tls", sizeof("tls"));
- if (ret != 0) {
- notls = true;
- printf("Failure setting TCP_ULP, testing without tls\n");
+ if (ret) {
+ ASSERT_EQ(errno, ENOENT);
+ SKIP(return, "no TLS support");
}
+ ASSERT_EQ(ret, 0);
- if (!notls) {
- ret = setsockopt(fd, SOL_TLS, TLS_TX, &tls12,
- sizeof(tls12));
- EXPECT_EQ(ret, 0);
- }
+ ret = setsockopt(fd, SOL_TLS, TLS_TX, &tls12, tls12.len);
+ ASSERT_EQ(ret, 0);
- cfd = accept(sfd, &addr, &len);
- ASSERT_GE(cfd, 0);
+ ret = setsockopt(fd, SOL_TLS, TLS_RX, &tls12, tls12.len);
+ ASSERT_EQ(ret, 0);
- if (!notls) {
- ret = setsockopt(cfd, IPPROTO_TCP, TCP_ULP, "tls",
- sizeof("tls"));
- EXPECT_EQ(ret, 0);
+ len2 = sizeof(addr2);
+ ret = getsockname(fd, &addr2, &len2);
+ ASSERT_EQ(ret, 0);
- ret = setsockopt(cfd, SOL_TLS, TLS_RX, &tls12,
- sizeof(tls12));
- EXPECT_EQ(ret, 0);
- }
+ EXPECT_EQ(len2, len);
+ EXPECT_EQ(memcmp(&addr, &addr2, len), 0);
- close(sfd);
close(fd);
- close(cfd);
+ close(sfd);
}
TEST_HARNESS_MAIN
diff --git a/tools/testing/selftests/net/toeplitz.c b/tools/testing/selftests/net/toeplitz.c
index 710ac956bdb3..c5489341cfb8 100644
--- a/tools/testing/selftests/net/toeplitz.c
+++ b/tools/testing/selftests/net/toeplitz.c
@@ -498,7 +498,7 @@ static void parse_opts(int argc, char **argv)
bool have_toeplitz = false;
int index, c;
- while ((c = getopt_long(argc, argv, "46C:d:i:k:r:stT:u:v", long_options, &index)) != -1) {
+ while ((c = getopt_long(argc, argv, "46C:d:i:k:r:stT:uv", long_options, &index)) != -1) {
switch (c) {
case '4':
cfg_family = AF_INET;
diff --git a/tools/testing/selftests/net/udpgro_fwd.sh b/tools/testing/selftests/net/udpgro_fwd.sh
index 7f26591f236b..3ea73013d956 100755
--- a/tools/testing/selftests/net/udpgro_fwd.sh
+++ b/tools/testing/selftests/net/udpgro_fwd.sh
@@ -132,7 +132,7 @@ run_test() {
local rcv=`ip netns exec $NS_DST $ipt"-save" -c | grep 'dport 8000' | \
sed -e 's/\[//' -e 's/:.*//'`
if [ $rcv != $pkts ]; then
- echo " fail - received $rvs packets, expected $pkts"
+ echo " fail - received $rcv packets, expected $pkts"
ret=1
return
fi
@@ -185,6 +185,7 @@ for family in 4 6; do
IPT=iptables
SUFFIX=24
VXDEV=vxlan
+ PING=ping
if [ $family = 6 ]; then
BM_NET=$BM_NET_V6
@@ -192,6 +193,7 @@ for family in 4 6; do
SUFFIX="64 nodad"
VXDEV=vxlan6
IPT=ip6tables
+ PING="ping6"
fi
echo "IPv$family"
@@ -237,7 +239,7 @@ for family in 4 6; do
# load arp cache before running the test to reduce the amount of
# stray traffic on top of the UDP tunnel
- ip netns exec $NS_SRC ping -q -c 1 $OL_NET$DST_NAT >/dev/null
+ ip netns exec $NS_SRC $PING -q -c 1 $OL_NET$DST_NAT >/dev/null
run_test "GRO fwd over UDP tunnel" $OL_NET$DST_NAT 1 1 $OL_NET$DST
cleanup
diff --git a/tools/testing/selftests/net/udpgso.c b/tools/testing/selftests/net/udpgso.c
index c66da6ffd6d8..7badaf215de2 100644
--- a/tools/testing/selftests/net/udpgso.c
+++ b/tools/testing/selftests/net/udpgso.c
@@ -156,13 +156,13 @@ struct testcase testcases_v4[] = {
},
{
/* send max number of min sized segments */
- .tlen = UDP_MAX_SEGMENTS - CONST_HDRLEN_V4,
+ .tlen = UDP_MAX_SEGMENTS,
.gso_len = 1,
- .r_num_mss = UDP_MAX_SEGMENTS - CONST_HDRLEN_V4,
+ .r_num_mss = UDP_MAX_SEGMENTS,
},
{
/* send max number + 1 of min sized segments: fail */
- .tlen = UDP_MAX_SEGMENTS - CONST_HDRLEN_V4 + 1,
+ .tlen = UDP_MAX_SEGMENTS + 1,
.gso_len = 1,
.tfail = true,
},
@@ -259,13 +259,13 @@ struct testcase testcases_v6[] = {
},
{
/* send max number of min sized segments */
- .tlen = UDP_MAX_SEGMENTS - CONST_HDRLEN_V6,
+ .tlen = UDP_MAX_SEGMENTS,
.gso_len = 1,
- .r_num_mss = UDP_MAX_SEGMENTS - CONST_HDRLEN_V6,
+ .r_num_mss = UDP_MAX_SEGMENTS,
},
{
/* send max number + 1 of min sized segments: fail */
- .tlen = UDP_MAX_SEGMENTS - CONST_HDRLEN_V6 + 1,
+ .tlen = UDP_MAX_SEGMENTS + 1,
.gso_len = 1,
.tfail = true,
},
diff --git a/tools/testing/selftests/net/udpgso_bench_tx.c b/tools/testing/selftests/net/udpgso_bench_tx.c
index 17512a43885e..f1fdaa270291 100644
--- a/tools/testing/selftests/net/udpgso_bench_tx.c
+++ b/tools/testing/selftests/net/udpgso_bench_tx.c
@@ -419,6 +419,7 @@ static void usage(const char *filepath)
static void parse_opts(int argc, char **argv)
{
+ const char *bind_addr = NULL;
int max_len, hdrlen;
int c;
@@ -446,7 +447,7 @@ static void parse_opts(int argc, char **argv)
cfg_cpu = strtol(optarg, NULL, 0);
break;
case 'D':
- setup_sockaddr(cfg_family, optarg, &cfg_dst_addr);
+ bind_addr = optarg;
break;
case 'l':
cfg_runtime_ms = strtoul(optarg, NULL, 10) * 1000;
@@ -492,6 +493,11 @@ static void parse_opts(int argc, char **argv)
}
}
+ if (!bind_addr)
+ bind_addr = cfg_family == PF_INET6 ? "::" : "0.0.0.0";
+
+ setup_sockaddr(cfg_family, bind_addr, &cfg_dst_addr);
+
if (optind != argc)
usage(argv[0]);
diff --git a/tools/testing/selftests/netfilter/Makefile b/tools/testing/selftests/netfilter/Makefile
index 8748199ac109..ffca314897c4 100644
--- a/tools/testing/selftests/netfilter/Makefile
+++ b/tools/testing/selftests/netfilter/Makefile
@@ -5,7 +5,8 @@ TEST_PROGS := nft_trans_stress.sh nft_fib.sh nft_nat.sh bridge_brouter.sh \
conntrack_icmp_related.sh nft_flowtable.sh ipvs.sh \
nft_concat_range.sh nft_conntrack_helper.sh \
nft_queue.sh nft_meta.sh nf_nat_edemux.sh \
- ipip-conntrack-mtu.sh conntrack_tcp_unreplied.sh
+ ipip-conntrack-mtu.sh conntrack_tcp_unreplied.sh \
+ conntrack_vrf.sh
LDLIBS = -lmnl
TEST_GEN_FILES = nf-queue
diff --git a/tools/testing/selftests/netfilter/conntrack_vrf.sh b/tools/testing/selftests/netfilter/conntrack_vrf.sh
new file mode 100755
index 000000000000..8b5ea9234588
--- /dev/null
+++ b/tools/testing/selftests/netfilter/conntrack_vrf.sh
@@ -0,0 +1,241 @@
+#!/bin/sh
+
+# This script demonstrates interaction of conntrack and vrf.
+# The vrf driver calls the netfilter hooks again, with oif/iif
+# pointing at the VRF device.
+#
+# For ingress, this means first iteration has iifname of lower/real
+# device. In this script, thats veth0.
+# Second iteration is iifname set to vrf device, tvrf in this script.
+#
+# For egress, this is reversed: first iteration has the vrf device,
+# second iteration is done with the lower/real/veth0 device.
+#
+# test_ct_zone_in demonstrates unexpected change of nftables
+# behavior # caused by commit 09e856d54bda5f28 "vrf: Reset skb conntrack
+# connection on VRF rcv"
+#
+# It was possible to assign conntrack zone to a packet (or mark it for
+# `notracking`) in the prerouting chain before conntrack, based on real iif.
+#
+# After the change, the zone assignment is lost and the zone is assigned based
+# on the VRF master interface (in case such a rule exists).
+# assignment is lost. Instead, assignment based on the `iif` matching
+# Thus it is impossible to distinguish packets based on the original
+# interface.
+#
+# test_masquerade_vrf and test_masquerade_veth0 demonstrate the problem
+# that was supposed to be fixed by the commit mentioned above to make sure
+# that any fix to test case 1 won't break masquerade again.
+
+ksft_skip=4
+
+IP0=172.30.30.1
+IP1=172.30.30.2
+PFXL=30
+ret=0
+
+sfx=$(mktemp -u "XXXXXXXX")
+ns0="ns0-$sfx"
+ns1="ns1-$sfx"
+
+cleanup()
+{
+ ip netns pids $ns0 | xargs kill 2>/dev/null
+ ip netns pids $ns1 | xargs kill 2>/dev/null
+
+ ip netns del $ns0 $ns1
+}
+
+nft --version > /dev/null 2>&1
+if [ $? -ne 0 ];then
+ echo "SKIP: Could not run test without nft tool"
+ exit $ksft_skip
+fi
+
+ip -Version > /dev/null 2>&1
+if [ $? -ne 0 ];then
+ echo "SKIP: Could not run test without ip tool"
+ exit $ksft_skip
+fi
+
+ip netns add "$ns0"
+if [ $? -ne 0 ];then
+ echo "SKIP: Could not create net namespace $ns0"
+ exit $ksft_skip
+fi
+ip netns add "$ns1"
+
+trap cleanup EXIT
+
+ip netns exec $ns0 sysctl -q -w net.ipv4.conf.default.rp_filter=0
+ip netns exec $ns0 sysctl -q -w net.ipv4.conf.all.rp_filter=0
+ip netns exec $ns0 sysctl -q -w net.ipv4.conf.all.rp_filter=0
+
+ip link add veth0 netns "$ns0" type veth peer name veth0 netns "$ns1" > /dev/null 2>&1
+if [ $? -ne 0 ];then
+ echo "SKIP: Could not add veth device"
+ exit $ksft_skip
+fi
+
+ip -net $ns0 li add tvrf type vrf table 9876
+if [ $? -ne 0 ];then
+ echo "SKIP: Could not add vrf device"
+ exit $ksft_skip
+fi
+
+ip -net $ns0 li set lo up
+
+ip -net $ns0 li set veth0 master tvrf
+ip -net $ns0 li set tvrf up
+ip -net $ns0 li set veth0 up
+ip -net $ns1 li set veth0 up
+
+ip -net $ns0 addr add $IP0/$PFXL dev veth0
+ip -net $ns1 addr add $IP1/$PFXL dev veth0
+
+ip netns exec $ns1 iperf3 -s > /dev/null 2>&1&
+if [ $? -ne 0 ];then
+ echo "SKIP: Could not start iperf3"
+ exit $ksft_skip
+fi
+
+# test vrf ingress handling.
+# The incoming connection should be placed in conntrack zone 1,
+# as decided by the first iteration of the ruleset.
+test_ct_zone_in()
+{
+ip netns exec $ns0 nft -f - <<EOF
+table testct {
+ chain rawpre {
+ type filter hook prerouting priority raw;
+
+ iif { veth0, tvrf } counter meta nftrace set 1
+ iif veth0 counter ct zone set 1 counter return
+ iif tvrf counter ct zone set 2 counter return
+ ip protocol icmp counter
+ notrack counter
+ }
+
+ chain rawout {
+ type filter hook output priority raw;
+
+ oif veth0 counter ct zone set 1 counter return
+ oif tvrf counter ct zone set 2 counter return
+ notrack counter
+ }
+}
+EOF
+ ip netns exec $ns1 ping -W 1 -c 1 -I veth0 $IP0 > /dev/null
+
+ # should be in zone 1, not zone 2
+ count=$(ip netns exec $ns0 conntrack -L -s $IP1 -d $IP0 -p icmp --zone 1 2>/dev/null | wc -l)
+ if [ $count -eq 1 ]; then
+ echo "PASS: entry found in conntrack zone 1"
+ else
+ echo "FAIL: entry not found in conntrack zone 1"
+ count=$(ip netns exec $ns0 conntrack -L -s $IP1 -d $IP0 -p icmp --zone 2 2> /dev/null | wc -l)
+ if [ $count -eq 1 ]; then
+ echo "FAIL: entry found in zone 2 instead"
+ else
+ echo "FAIL: entry not in zone 1 or 2, dumping table"
+ ip netns exec $ns0 conntrack -L
+ ip netns exec $ns0 nft list ruleset
+ fi
+ fi
+}
+
+# add masq rule that gets evaluated w. outif set to vrf device.
+# This tests the first iteration of the packet through conntrack,
+# oifname is the vrf device.
+test_masquerade_vrf()
+{
+ local qdisc=$1
+
+ if [ "$qdisc" != "default" ]; then
+ tc -net $ns0 qdisc add dev tvrf root $qdisc
+ fi
+
+ ip netns exec $ns0 conntrack -F 2>/dev/null
+
+ip netns exec $ns0 nft -f - <<EOF
+flush ruleset
+table ip nat {
+ chain rawout {
+ type filter hook output priority raw;
+
+ oif tvrf ct state untracked counter
+ }
+ chain postrouting2 {
+ type filter hook postrouting priority mangle;
+
+ oif tvrf ct state untracked counter
+ }
+ chain postrouting {
+ type nat hook postrouting priority 0;
+ # NB: masquerade should always be combined with 'oif(name) bla',
+ # lack of this is intentional here, we want to exercise double-snat.
+ ip saddr 172.30.30.0/30 counter masquerade random
+ }
+}
+EOF
+ ip netns exec $ns0 ip vrf exec tvrf iperf3 -t 1 -c $IP1 >/dev/null
+ if [ $? -ne 0 ]; then
+ echo "FAIL: iperf3 connect failure with masquerade + sport rewrite on vrf device"
+ ret=1
+ return
+ fi
+
+ # must also check that nat table was evaluated on second (lower device) iteration.
+ ip netns exec $ns0 nft list table ip nat |grep -q 'counter packets 2' &&
+ ip netns exec $ns0 nft list table ip nat |grep -q 'untracked counter packets [1-9]'
+ if [ $? -eq 0 ]; then
+ echo "PASS: iperf3 connect with masquerade + sport rewrite on vrf device ($qdisc qdisc)"
+ else
+ echo "FAIL: vrf rules have unexpected counter value"
+ ret=1
+ fi
+
+ if [ "$qdisc" != "default" ]; then
+ tc -net $ns0 qdisc del dev tvrf root
+ fi
+}
+
+# add masq rule that gets evaluated w. outif set to veth device.
+# This tests the 2nd iteration of the packet through conntrack,
+# oifname is the lower device (veth0 in this case).
+test_masquerade_veth()
+{
+ ip netns exec $ns0 conntrack -F 2>/dev/null
+ip netns exec $ns0 nft -f - <<EOF
+flush ruleset
+table ip nat {
+ chain postrouting {
+ type nat hook postrouting priority 0;
+ meta oif veth0 ip saddr 172.30.30.0/30 counter masquerade random
+ }
+}
+EOF
+ ip netns exec $ns0 ip vrf exec tvrf iperf3 -t 1 -c $IP1 > /dev/null
+ if [ $? -ne 0 ]; then
+ echo "FAIL: iperf3 connect failure with masquerade + sport rewrite on veth device"
+ ret=1
+ return
+ fi
+
+ # must also check that nat table was evaluated on second (lower device) iteration.
+ ip netns exec $ns0 nft list table ip nat |grep -q 'counter packets 2'
+ if [ $? -eq 0 ]; then
+ echo "PASS: iperf3 connect with masquerade + sport rewrite on veth device"
+ else
+ echo "FAIL: vrf masq rule has unexpected counter value"
+ ret=1
+ fi
+}
+
+test_ct_zone_in
+test_masquerade_vrf "default"
+test_masquerade_vrf "pfifo"
+test_masquerade_veth
+
+exit $ret
diff --git a/tools/testing/selftests/netfilter/nft_concat_range.sh b/tools/testing/selftests/netfilter/nft_concat_range.sh
index 5a4938d6dcf2..ed61f6cab60f 100755
--- a/tools/testing/selftests/netfilter/nft_concat_range.sh
+++ b/tools/testing/selftests/netfilter/nft_concat_range.sh
@@ -23,8 +23,8 @@ TESTS="reported_issues correctness concurrency timeout"
# Set types, defined by TYPE_ variables below
TYPES="net_port port_net net6_port port_proto net6_port_mac net6_port_mac_proto
- net_port_net net_mac net_mac_icmp net6_mac_icmp net6_port_net6_port
- net_port_mac_proto_net"
+ net_port_net net_mac mac_net net_mac_icmp net6_mac_icmp
+ net6_port_net6_port net_port_mac_proto_net"
# Reported bugs, also described by TYPE_ variables below
BUGS="flush_remove_add"
@@ -277,6 +277,23 @@ perf_entries 1000
perf_proto ipv4
"
+TYPE_mac_net="
+display mac,net
+type_spec ether_addr . ipv4_addr
+chain_spec ether saddr . ip saddr
+dst
+src mac addr4
+start 1
+count 5
+src_delta 2000
+tools sendip nc bash
+proto udp
+
+race_repeat 0
+
+perf_duration 0
+"
+
TYPE_net_mac_icmp="
display net,mac - ICMP
type_spec ipv4_addr . ether_addr
@@ -984,7 +1001,8 @@ format() {
fi
done
for f in ${src}; do
- __expr="${__expr} . "
+ [ "${__expr}" != "{ " ] && __expr="${__expr} . "
+
__start="$(eval format_"${f}" "${srcstart}")"
__end="$(eval format_"${f}" "${srcend}")"
diff --git a/tools/testing/selftests/netfilter/nft_nat.sh b/tools/testing/selftests/netfilter/nft_nat.sh
index da1c1e4b6c86..d88867d2fed7 100755
--- a/tools/testing/selftests/netfilter/nft_nat.sh
+++ b/tools/testing/selftests/netfilter/nft_nat.sh
@@ -759,19 +759,21 @@ test_port_shadow()
local result=""
local logmsg=""
- echo ROUTER | ip netns exec "$ns0" nc -w 5 -u -l -p 1405 >/dev/null 2>&1 &
- nc_r=$!
+ # make shadow entry, from client (ns2), going to (ns1), port 41404, sport 1405.
+ echo "fake-entry" | ip netns exec "$ns2" timeout 1 socat -u STDIN UDP:"$daddrc":41404,sourceport=1405
- echo CLIENT | ip netns exec "$ns2" nc -w 5 -u -l -p 1405 >/dev/null 2>&1 &
- nc_c=$!
+ echo ROUTER | ip netns exec "$ns0" timeout 5 socat -u STDIN UDP4-LISTEN:1405 &
+ sc_r=$!
- # make shadow entry, from client (ns2), going to (ns1), port 41404, sport 1405.
- echo "fake-entry" | ip netns exec "$ns2" nc -w 1 -p 1405 -u "$daddrc" 41404 > /dev/null
+ echo CLIENT | ip netns exec "$ns2" timeout 5 socat -u STDIN UDP4-LISTEN:1405,reuseport &
+ sc_c=$!
+
+ sleep 0.3
# ns1 tries to connect to ns0:1405. With default settings this should connect
# to client, it matches the conntrack entry created above.
- result=$(echo "" | ip netns exec "$ns1" nc -w 1 -p 41404 -u "$daddrs" 1405)
+ result=$(echo "data" | ip netns exec "$ns1" timeout 1 socat - UDP:"$daddrs":1405,sourceport=41404)
if [ "$result" = "$expect" ] ;then
echo "PASS: portshadow test $test: got reply from ${expect}${logmsg}"
@@ -780,7 +782,7 @@ test_port_shadow()
ret=1
fi
- kill $nc_r $nc_c 2>/dev/null
+ kill $sc_r $sc_c 2>/dev/null
# flush udp entries for next test round, if any
ip netns exec "$ns0" conntrack -F >/dev/null 2>&1
@@ -816,11 +818,10 @@ table $family raw {
chain prerouting {
type filter hook prerouting priority -300; policy accept;
meta iif veth0 udp dport 1405 notrack
- udp dport 1405 notrack
}
chain output {
type filter hook output priority -300; policy accept;
- udp sport 1405 notrack
+ meta oif veth0 udp sport 1405 notrack
}
}
EOF
@@ -851,6 +852,18 @@ test_port_shadowing()
{
local family="ip"
+ conntrack -h >/dev/null 2>&1
+ if [ $? -ne 0 ];then
+ echo "SKIP: Could not run nat port shadowing test without conntrack tool"
+ return
+ fi
+
+ socat -h > /dev/null 2>&1
+ if [ $? -ne 0 ];then
+ echo "SKIP: Could not run nat port shadowing test without socat tool"
+ return
+ fi
+
ip netns exec "$ns0" sysctl net.ipv4.conf.veth0.forwarding=1 > /dev/null
ip netns exec "$ns0" sysctl net.ipv4.conf.veth1.forwarding=1 > /dev/null
diff --git a/tools/testing/selftests/netfilter/nft_queue.sh b/tools/testing/selftests/netfilter/nft_queue.sh
index 3d202b90b33d..7d27f1f3bc01 100755
--- a/tools/testing/selftests/netfilter/nft_queue.sh
+++ b/tools/testing/selftests/netfilter/nft_queue.sh
@@ -16,6 +16,10 @@ timeout=4
cleanup()
{
+ ip netns pids ${ns1} | xargs kill 2>/dev/null
+ ip netns pids ${ns2} | xargs kill 2>/dev/null
+ ip netns pids ${nsrouter} | xargs kill 2>/dev/null
+
ip netns del ${ns1}
ip netns del ${ns2}
ip netns del ${nsrouter}
@@ -332,6 +336,55 @@ EOF
echo "PASS: tcp via loopback and re-queueing"
}
+test_icmp_vrf() {
+ ip -net $ns1 link add tvrf type vrf table 9876
+ if [ $? -ne 0 ];then
+ echo "SKIP: Could not add vrf device"
+ return
+ fi
+
+ ip -net $ns1 li set eth0 master tvrf
+ ip -net $ns1 li set tvrf up
+
+ ip -net $ns1 route add 10.0.2.0/24 via 10.0.1.1 dev eth0 table 9876
+ip netns exec ${ns1} nft -f /dev/stdin <<EOF
+flush ruleset
+table inet filter {
+ chain output {
+ type filter hook output priority 0; policy accept;
+ meta oifname "tvrf" icmp type echo-request counter queue num 1
+ meta oifname "eth0" icmp type echo-request counter queue num 1
+ }
+ chain post {
+ type filter hook postrouting priority 0; policy accept;
+ meta oifname "tvrf" icmp type echo-request counter queue num 1
+ meta oifname "eth0" icmp type echo-request counter queue num 1
+ }
+}
+EOF
+ ip netns exec ${ns1} ./nf-queue -q 1 -t $timeout &
+ local nfqpid=$!
+
+ sleep 1
+ ip netns exec ${ns1} ip vrf exec tvrf ping -c 1 10.0.2.99 > /dev/null
+
+ for n in output post; do
+ for d in tvrf eth0; do
+ ip netns exec ${ns1} nft list chain inet filter $n | grep -q "oifname \"$d\" icmp type echo-request counter packets 1"
+ if [ $? -ne 0 ] ; then
+ echo "FAIL: chain $n: icmp packet counter mismatch for device $d" 1>&2
+ ip netns exec ${ns1} nft list ruleset
+ ret=1
+ return
+ fi
+ done
+ done
+
+ wait $nfqpid
+ [ $? -eq 0 ] && echo "PASS: icmp+nfqueue via vrf"
+ wait 2>/dev/null
+}
+
ip netns exec ${nsrouter} sysctl net.ipv6.conf.all.forwarding=1 > /dev/null
ip netns exec ${nsrouter} sysctl net.ipv4.conf.veth0.forwarding=1 > /dev/null
ip netns exec ${nsrouter} sysctl net.ipv4.conf.veth1.forwarding=1 > /dev/null
@@ -372,5 +425,6 @@ test_queue 20
test_tcp_forward
test_tcp_localhost
test_tcp_localhost_requeue
+test_icmp_vrf
exit $ret
diff --git a/tools/testing/selftests/netfilter/nft_zones_many.sh b/tools/testing/selftests/netfilter/nft_zones_many.sh
index ac646376eb01..04633119b29a 100755
--- a/tools/testing/selftests/netfilter/nft_zones_many.sh
+++ b/tools/testing/selftests/netfilter/nft_zones_many.sh
@@ -18,11 +18,17 @@ cleanup()
ip netns del $ns
}
-ip netns add $ns
-if [ $? -ne 0 ];then
- echo "SKIP: Could not create net namespace $gw"
- exit $ksft_skip
-fi
+checktool (){
+ if ! $1 > /dev/null 2>&1; then
+ echo "SKIP: Could not $2"
+ exit $ksft_skip
+ fi
+}
+
+checktool "nft --version" "run test without nft tool"
+checktool "ip -Version" "run test without ip tool"
+checktool "socat -V" "run test without socat tool"
+checktool "ip netns add $ns" "create net namespace"
trap cleanup EXIT
@@ -71,7 +77,8 @@ EOF
local start=$(date +%s%3N)
i=$((i + 10000))
j=$((j + 1))
- dd if=/dev/zero of=/dev/stdout bs=8k count=10000 2>/dev/null | ip netns exec "$ns" nc -w 1 -q 1 -u -p 12345 127.0.0.1 12345 > /dev/null
+ # nft rule in output places each packet in a different zone.
+ dd if=/dev/zero of=/dev/stdout bs=8k count=10000 2>/dev/null | ip netns exec "$ns" socat STDIN UDP:127.0.0.1:12345,sourceport=12345
if [ $? -ne 0 ] ;then
ret=1
break
diff --git a/tools/testing/selftests/tc-testing/config b/tools/testing/selftests/tc-testing/config
index b71828df5a6d..a3239d5e40c7 100644
--- a/tools/testing/selftests/tc-testing/config
+++ b/tools/testing/selftests/tc-testing/config
@@ -60,6 +60,8 @@ CONFIG_NET_IFE_SKBTCINDEX=m
CONFIG_NET_SCH_FIFO=y
CONFIG_NET_SCH_ETS=m
CONFIG_NET_SCH_RED=m
+CONFIG_NET_SCH_FQ_PIE=m
+CONFIG_NETDEVSIM=m
#
## Network testing
diff --git a/tools/testing/selftests/tc-testing/tc-tests/actions/bpf.json b/tools/testing/selftests/tc-testing/tc-tests/actions/bpf.json
index 503982b8f295..91832400ddbd 100644
--- a/tools/testing/selftests/tc-testing/tc-tests/actions/bpf.json
+++ b/tools/testing/selftests/tc-testing/tc-tests/actions/bpf.json
@@ -68,7 +68,7 @@
"cmdUnderTest": "$TC action add action bpf object-file $EBPFDIR/action.o section action-ok index 667",
"expExitCode": "0",
"verifyCmd": "$TC action get action bpf index 667",
- "matchPattern": "action order [0-9]*: bpf action.o:\\[action-ok\\] id [0-9]* tag [0-9a-f]{16}( jited)? default-action pipe.*index 667 ref",
+ "matchPattern": "action order [0-9]*: bpf action.o:\\[action-ok\\] id [0-9].* tag [0-9a-f]{16}( jited)? default-action pipe.*index 667 ref",
"matchCount": "1",
"teardown": [
"$TC action flush action bpf"
diff --git a/tools/testing/selftests/tc-testing/tc-tests/qdiscs/mq.json b/tools/testing/selftests/tc-testing/tc-tests/qdiscs/mq.json
index 88a20c781e49..c6046096d9db 100644
--- a/tools/testing/selftests/tc-testing/tc-tests/qdiscs/mq.json
+++ b/tools/testing/selftests/tc-testing/tc-tests/qdiscs/mq.json
@@ -15,7 +15,7 @@
"cmdUnderTest": "$TC qdisc add dev $ETH root handle 1: mq",
"expExitCode": "0",
"verifyCmd": "$TC qdisc show dev $ETH",
- "matchPattern": "qdisc pfifo_fast 0: parent 1:[1-4] bands 3 priomap 1 2 2 2 1 2 0 0 1 1 1 1 1 1 1 1",
+ "matchPattern": "qdisc [a-zA-Z0-9_]+ 0: parent 1:[1-4]",
"matchCount": "4",
"teardown": [
"echo \"1\" > /sys/bus/netdevsim/del_device"
@@ -37,7 +37,7 @@
"cmdUnderTest": "$TC qdisc add dev $ETH root handle 1: mq",
"expExitCode": "0",
"verifyCmd": "$TC qdisc show dev $ETH",
- "matchPattern": "qdisc pfifo_fast 0: parent 1:[1-9,a-f][0-9,a-f]{0,2} bands 3 priomap 1 2 2 2 1 2 0 0 1 1 1 1 1 1 1 1",
+ "matchPattern": "qdisc [a-zA-Z0-9_]+ 0: parent 1:[1-9,a-f][0-9,a-f]{0,2}",
"matchCount": "256",
"teardown": [
"echo \"1\" > /sys/bus/netdevsim/del_device"
@@ -60,7 +60,7 @@
"cmdUnderTest": "$TC qdisc add dev $ETH root handle 1: mq",
"expExitCode": "2",
"verifyCmd": "$TC qdisc show dev $ETH",
- "matchPattern": "qdisc pfifo_fast 0: parent 1:[1-4] bands 3 priomap 1 2 2 2 1 2 0 0 1 1 1 1 1 1 1 1",
+ "matchPattern": "qdisc [a-zA-Z0-9_]+ 0: parent 1:[1-4]",
"matchCount": "4",
"teardown": [
"echo \"1\" > /sys/bus/netdevsim/del_device"
@@ -82,7 +82,7 @@
"cmdUnderTest": "$TC qdisc del dev $ETH root handle 1: mq",
"expExitCode": "2",
"verifyCmd": "$TC qdisc show dev $ETH",
- "matchPattern": "qdisc pfifo_fast 0: parent 1:[1-4] bands 3 priomap 1 2 2 2 1 2 0 0 1 1 1 1 1 1 1 1",
+ "matchPattern": "qdisc [a-zA-Z0-9_]+ 0: parent 1:[1-4]",
"matchCount": "0",
"teardown": [
"echo \"1\" > /sys/bus/netdevsim/del_device"
@@ -106,7 +106,7 @@
"cmdUnderTest": "$TC qdisc del dev $ETH root handle 1: mq",
"expExitCode": "2",
"verifyCmd": "$TC qdisc show dev $ETH",
- "matchPattern": "qdisc pfifo_fast 0: parent 1:[1-4] bands 3 priomap 1 2 2 2 1 2 0 0 1 1 1 1 1 1 1 1",
+ "matchPattern": "qdisc [a-zA-Z0-9_]+ 0: parent 1:[1-4]",
"matchCount": "0",
"teardown": [
"echo \"1\" > /sys/bus/netdevsim/del_device"
@@ -128,7 +128,7 @@
"cmdUnderTest": "$TC qdisc add dev $ETH root handle 1: mq",
"expExitCode": "2",
"verifyCmd": "$TC qdisc show dev $ETH",
- "matchPattern": "qdisc pfifo_fast 0: parent 1:[1-4] bands 3 priomap 1 2 2 2 1 2 0 0 1 1 1 1 1 1 1 1",
+ "matchPattern": "qdisc [a-zA-Z0-9_]+ 0: parent 1:[1-4]",
"matchCount": "0",
"teardown": [
"echo \"1\" > /sys/bus/netdevsim/del_device"
diff --git a/tools/testing/selftests/tc-testing/tdc.py b/tools/testing/selftests/tc-testing/tdc.py
index a3e43189d940..ee22e3447ec7 100755
--- a/tools/testing/selftests/tc-testing/tdc.py
+++ b/tools/testing/selftests/tc-testing/tdc.py
@@ -716,6 +716,7 @@ def set_operation_mode(pm, parser, args, remaining):
list_test_cases(alltests)
exit(0)
+ exit_code = 0 # KSFT_PASS
if len(alltests):
req_plugins = pm.get_required_plugins(alltests)
try:
@@ -724,6 +725,8 @@ def set_operation_mode(pm, parser, args, remaining):
print('The following plugins were not found:')
print('{}'.format(pde.missing_pg))
catresults = test_runner(pm, args, alltests)
+ if catresults.count_failures() != 0:
+ exit_code = 1 # KSFT_FAIL
if args.format == 'none':
print('Test results output suppression requested\n')
else:
@@ -748,6 +751,8 @@ def set_operation_mode(pm, parser, args, remaining):
gid=int(os.getenv('SUDO_GID')))
else:
print('No tests found\n')
+ exit_code = 4 # KSFT_SKIP
+ exit(exit_code)
def main():
"""
@@ -767,8 +772,5 @@ def main():
set_operation_mode(pm, parser, args, remaining)
- exit(0)
-
-
if __name__ == "__main__":
main()
diff --git a/tools/testing/selftests/tc-testing/tdc.sh b/tools/testing/selftests/tc-testing/tdc.sh
index 7fe38c76db44..afb0cd86fa3d 100755
--- a/tools/testing/selftests/tc-testing/tdc.sh
+++ b/tools/testing/selftests/tc-testing/tdc.sh
@@ -1,5 +1,6 @@
#!/bin/sh
# SPDX-License-Identifier: GPL-2.0
+modprobe netdevsim
./tdc.py -c actions --nobuildebpf
./tdc.py -c qdisc
diff --git a/tools/testing/selftests/vm/userfaultfd.c b/tools/testing/selftests/vm/userfaultfd.c
index 8a09057d2f22..9354a5e0321c 100644
--- a/tools/testing/selftests/vm/userfaultfd.c
+++ b/tools/testing/selftests/vm/userfaultfd.c
@@ -87,7 +87,7 @@ static bool test_uffdio_minor = false;
static bool map_shared;
static int shm_fd;
-static int huge_fd;
+static int huge_fd = -1; /* only used for hugetlb_shared test */
static char *huge_fd_off0;
static unsigned long long *count_verify;
static int uffd = -1;
@@ -223,6 +223,9 @@ static void noop_alias_mapping(__u64 *start, size_t len, unsigned long offset)
static void hugetlb_release_pages(char *rel_area)
{
+ if (huge_fd == -1)
+ return;
+
if (fallocate(huge_fd, FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE,
rel_area == huge_fd_off0 ? 0 : nr_pages * page_size,
nr_pages * page_size))
@@ -235,16 +238,17 @@ static void hugetlb_allocate_area(void **alloc_area)
char **alloc_area_alias;
*alloc_area = mmap(NULL, nr_pages * page_size, PROT_READ | PROT_WRITE,
- (map_shared ? MAP_SHARED : MAP_PRIVATE) |
- MAP_HUGETLB,
- huge_fd, *alloc_area == area_src ? 0 :
- nr_pages * page_size);
+ map_shared ? MAP_SHARED :
+ MAP_PRIVATE | MAP_HUGETLB |
+ (*alloc_area == area_src ? 0 : MAP_NORESERVE),
+ huge_fd,
+ *alloc_area == area_src ? 0 : nr_pages * page_size);
if (*alloc_area == MAP_FAILED)
err("mmap of hugetlbfs file failed");
if (map_shared) {
area_alias = mmap(NULL, nr_pages * page_size, PROT_READ | PROT_WRITE,
- MAP_SHARED | MAP_HUGETLB,
+ MAP_SHARED,
huge_fd, *alloc_area == area_src ? 0 :
nr_pages * page_size);
if (area_alias == MAP_FAILED)
diff --git a/tools/testing/selftests/wireguard/netns.sh b/tools/testing/selftests/wireguard/netns.sh
index ebc4ee0fe179..8a9461aa0878 100755
--- a/tools/testing/selftests/wireguard/netns.sh
+++ b/tools/testing/selftests/wireguard/netns.sh
@@ -276,7 +276,11 @@ n0 ping -W 1 -c 1 192.168.241.2
n1 wg set wg0 peer "$pub2" endpoint 192.168.241.2:7
ip2 link del wg0
ip2 link del wg1
-! n0 ping -W 1 -c 10 -f 192.168.241.2 || false # Should not crash kernel
+read _ _ tx_bytes_before < <(n0 wg show wg1 transfer)
+! n0 ping -W 1 -c 10 -f 192.168.241.2 || false
+sleep 1
+read _ _ tx_bytes_after < <(n0 wg show wg1 transfer)
+(( tx_bytes_after - tx_bytes_before < 70000 ))
ip0 link del wg1
ip1 link del wg0
@@ -609,6 +613,28 @@ ip0 link set wg0 up
kill $ncat_pid
ip0 link del wg0
+# Ensure that dst_cache references don't outlive netns lifetime
+ip1 link add dev wg0 type wireguard
+ip2 link add dev wg0 type wireguard
+configure_peers
+ip1 link add veth1 type veth peer name veth2
+ip1 link set veth2 netns $netns2
+ip1 addr add fd00:aa::1/64 dev veth1
+ip2 addr add fd00:aa::2/64 dev veth2
+ip1 link set veth1 up
+ip2 link set veth2 up
+waitiface $netns1 veth1
+waitiface $netns2 veth2
+ip1 -6 route add default dev veth1 via fd00:aa::2
+ip2 -6 route add default dev veth2 via fd00:aa::1
+n1 wg set wg0 peer "$pub2" endpoint [fd00:aa::2]:2
+n2 wg set wg0 peer "$pub1" endpoint [fd00:aa::1]:1
+n1 ping6 -c 1 fd00::2
+pp ip netns delete $netns1
+pp ip netns delete $netns2
+pp ip netns add $netns1
+pp ip netns add $netns2
+
# Ensure there aren't circular reference loops
ip1 link add wg1 type wireguard
ip2 link add wg2 type wireguard
@@ -627,7 +653,7 @@ while read -t 0.1 -r line 2>/dev/null || [[ $? -ne 142 ]]; do
done < /dev/kmsg
alldeleted=1
for object in "${!objects[@]}"; do
- if [[ ${objects["$object"]} != *createddestroyed ]]; then
+ if [[ ${objects["$object"]} != *createddestroyed && ${objects["$object"]} != *createdcreateddestroyeddestroyed ]]; then
echo "Error: $object: merely ${objects["$object"]}" >&3
alldeleted=0
fi
diff --git a/tools/testing/selftests/wireguard/qemu/debug.config b/tools/testing/selftests/wireguard/qemu/debug.config
index fe07d97df9fa..2b321b8a96cf 100644
--- a/tools/testing/selftests/wireguard/qemu/debug.config
+++ b/tools/testing/selftests/wireguard/qemu/debug.config
@@ -47,7 +47,7 @@ CONFIG_DEBUG_ATOMIC_SLEEP=y
CONFIG_TRACE_IRQFLAGS=y
CONFIG_DEBUG_BUGVERBOSE=y
CONFIG_DEBUG_LIST=y
-CONFIG_DEBUG_PI_LIST=y
+CONFIG_DEBUG_PLIST=y
CONFIG_PROVE_RCU=y
CONFIG_SPARSE_RCU_POINTER=y
CONFIG_RCU_CPU_STALL_TIMEOUT=21
diff --git a/tools/testing/selftests/wireguard/qemu/kernel.config b/tools/testing/selftests/wireguard/qemu/kernel.config
index 74db83a0aedd..a9b5a520a1d2 100644
--- a/tools/testing/selftests/wireguard/qemu/kernel.config
+++ b/tools/testing/selftests/wireguard/qemu/kernel.config
@@ -66,6 +66,7 @@ CONFIG_PROC_SYSCTL=y
CONFIG_SYSFS=y
CONFIG_TMPFS=y
CONFIG_CONSOLE_LOGLEVEL_DEFAULT=15
+CONFIG_LOG_BUF_SHIFT=18
CONFIG_PRINTK_TIME=y
CONFIG_BLK_DEV_INITRD=y
CONFIG_LEGACY_VSYSCALL_NONE=y
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 9646bb9112c1..72c4e6b39389 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -1531,11 +1531,10 @@ static struct kvm_memslots *kvm_dup_memslots(struct kvm_memslots *old,
static int kvm_set_memslot(struct kvm *kvm,
const struct kvm_userspace_memory_region *mem,
- struct kvm_memory_slot *old,
struct kvm_memory_slot *new, int as_id,
enum kvm_mr_change change)
{
- struct kvm_memory_slot *slot;
+ struct kvm_memory_slot *slot, old;
struct kvm_memslots *slots;
int r;
@@ -1566,7 +1565,7 @@ static int kvm_set_memslot(struct kvm *kvm,
* Note, the INVALID flag needs to be in the appropriate entry
* in the freshly allocated memslots, not in @old or @new.
*/
- slot = id_to_memslot(slots, old->id);
+ slot = id_to_memslot(slots, new->id);
slot->flags |= KVM_MEMSLOT_INVALID;
/*
@@ -1597,6 +1596,26 @@ static int kvm_set_memslot(struct kvm *kvm,
kvm_copy_memslots(slots, __kvm_memslots(kvm, as_id));
}
+ /*
+ * Make a full copy of the old memslot, the pointer will become stale
+ * when the memslots are re-sorted by update_memslots(), and the old
+ * memslot needs to be referenced after calling update_memslots(), e.g.
+ * to free its resources and for arch specific behavior. This needs to
+ * happen *after* (re)acquiring slots_arch_lock.
+ */
+ slot = id_to_memslot(slots, new->id);
+ if (slot) {
+ old = *slot;
+ } else {
+ WARN_ON_ONCE(change != KVM_MR_CREATE);
+ memset(&old, 0, sizeof(old));
+ old.id = new->id;
+ old.as_id = as_id;
+ }
+
+ /* Copy the arch-specific data, again after (re)acquiring slots_arch_lock. */
+ memcpy(&new->arch, &old.arch, sizeof(old.arch));
+
r = kvm_arch_prepare_memory_region(kvm, new, mem, change);
if (r)
goto out_slots;
@@ -1604,14 +1623,18 @@ static int kvm_set_memslot(struct kvm *kvm,
update_memslots(slots, new, change);
slots = install_new_memslots(kvm, as_id, slots);
- kvm_arch_commit_memory_region(kvm, mem, old, new, change);
+ kvm_arch_commit_memory_region(kvm, mem, &old, new, change);
+
+ /* Free the old memslot's metadata. Note, this is the full copy!!! */
+ if (change == KVM_MR_DELETE)
+ kvm_free_memslot(kvm, &old);
kvfree(slots);
return 0;
out_slots:
if (change == KVM_MR_DELETE || change == KVM_MR_MOVE) {
- slot = id_to_memslot(slots, old->id);
+ slot = id_to_memslot(slots, new->id);
slot->flags &= ~KVM_MEMSLOT_INVALID;
slots = install_new_memslots(kvm, as_id, slots);
} else {
@@ -1626,7 +1649,6 @@ static int kvm_delete_memslot(struct kvm *kvm,
struct kvm_memory_slot *old, int as_id)
{
struct kvm_memory_slot new;
- int r;
if (!old->npages)
return -EINVAL;
@@ -1639,12 +1661,7 @@ static int kvm_delete_memslot(struct kvm *kvm,
*/
new.as_id = as_id;
- r = kvm_set_memslot(kvm, mem, old, &new, as_id, KVM_MR_DELETE);
- if (r)
- return r;
-
- kvm_free_memslot(kvm, old);
- return 0;
+ return kvm_set_memslot(kvm, mem, &new, as_id, KVM_MR_DELETE);
}
/*
@@ -1672,7 +1689,8 @@ int __kvm_set_memory_region(struct kvm *kvm,
id = (u16)mem->slot;
/* General sanity checks */
- if (mem->memory_size & (PAGE_SIZE - 1))
+ if ((mem->memory_size & (PAGE_SIZE - 1)) ||
+ (mem->memory_size != (unsigned long)mem->memory_size))
return -EINVAL;
if (mem->guest_phys_addr & (PAGE_SIZE - 1))
return -EINVAL;
@@ -1718,7 +1736,6 @@ int __kvm_set_memory_region(struct kvm *kvm,
if (!old.npages) {
change = KVM_MR_CREATE;
new.dirty_bitmap = NULL;
- memset(&new.arch, 0, sizeof(new.arch));
} else { /* Modify an existing slot. */
if ((new.userspace_addr != old.userspace_addr) ||
(new.npages != old.npages) ||
@@ -1732,9 +1749,8 @@ int __kvm_set_memory_region(struct kvm *kvm,
else /* Nothing to change. */
return 0;
- /* Copy dirty_bitmap and arch from the current memslot. */
+ /* Copy dirty_bitmap from the current memslot. */
new.dirty_bitmap = old.dirty_bitmap;
- memcpy(&new.arch, &old.arch, sizeof(new.arch));
}
if ((change == KVM_MR_CREATE) || (change == KVM_MR_MOVE)) {
@@ -1760,7 +1776,7 @@ int __kvm_set_memory_region(struct kvm *kvm,
bitmap_set(new.dirty_bitmap, 0, new.npages);
}
- r = kvm_set_memslot(kvm, mem, &old, &new, as_id, change);
+ r = kvm_set_memslot(kvm, mem, &new, as_id, change);
if (r)
goto out_bitmap;
@@ -2915,7 +2931,8 @@ int kvm_write_guest_offset_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
int r;
gpa_t gpa = ghc->gpa + offset;
- BUG_ON(len + offset > ghc->len);
+ if (WARN_ON_ONCE(len + offset > ghc->len))
+ return -EINVAL;
if (slots->generation != ghc->generation) {
if (__kvm_gfn_to_hva_cache_init(slots, ghc, ghc->gpa, ghc->len))
@@ -2952,7 +2969,8 @@ int kvm_read_guest_offset_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
int r;
gpa_t gpa = ghc->gpa + offset;
- BUG_ON(len + offset > ghc->len);
+ if (WARN_ON_ONCE(len + offset > ghc->len))
+ return -EINVAL;
if (slots->generation != ghc->generation) {
if (__kvm_gfn_to_hva_cache_init(slots, ghc, ghc->gpa, ghc->len))