diff options
94 files changed, 973 insertions, 602 deletions
diff --git a/Documentation/ABI/stable/sysfs-bus-xen-backend b/Documentation/ABI/stable/sysfs-bus-xen-backend index 3d5951c8bf5f..e8b60bd766f7 100644 --- a/Documentation/ABI/stable/sysfs-bus-xen-backend +++ b/Documentation/ABI/stable/sysfs-bus-xen-backend @@ -73,3 +73,12 @@ KernelVersion: 3.0 Contact: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> Description: Number of sectors written by the frontend. + +What: /sys/bus/xen-backend/devices/*/state +Date: August 2018 +KernelVersion: 4.19 +Contact: Joe Jin <joe.jin@oracle.com> +Description: + The state of the device. One of: 'Unknown', + 'Initialising', 'Initialised', 'Connected', 'Closing', + 'Closed', 'Reconfiguring', 'Reconfigured'. diff --git a/Documentation/ABI/testing/sysfs-driver-xen-blkback b/Documentation/ABI/testing/sysfs-driver-xen-blkback index 8bb43b66eb55..4e7babb3ba1f 100644 --- a/Documentation/ABI/testing/sysfs-driver-xen-blkback +++ b/Documentation/ABI/testing/sysfs-driver-xen-blkback @@ -15,3 +15,13 @@ Description: blkback. If the frontend tries to use more than max_persistent_grants, the LRU kicks in and starts removing 5% of max_persistent_grants every 100ms. + +What: /sys/module/xen_blkback/parameters/persistent_grant_unused_seconds +Date: August 2018 +KernelVersion: 4.19 +Contact: Roger Pau Monné <roger.pau@citrix.com> +Description: + How long a persistent grant is allowed to remain + allocated without being in use. The time is in + seconds, 0 means indefinitely long. + The default is 60 seconds. diff --git a/Documentation/arm64/sve.txt b/Documentation/arm64/sve.txt index f128f736b4a5..7169a0ec41d8 100644 --- a/Documentation/arm64/sve.txt +++ b/Documentation/arm64/sve.txt @@ -200,7 +200,7 @@ prctl(PR_SVE_SET_VL, unsigned long arg) thread. * Changing the vector length causes all of P0..P15, FFR and all bits of - Z0..V31 except for Z0 bits [127:0] .. Z31 bits [127:0] to become + Z0..Z31 except for Z0 bits [127:0] .. Z31 bits [127:0] to become unspecified. Calling PR_SVE_SET_VL with vl equal to the thread's current vector length, or calling PR_SVE_SET_VL with the PR_SVE_SET_VL_ONEXEC flag, does not constitute a change to the vector length for this purpose. @@ -500,7 +500,7 @@ References [2] arch/arm64/include/uapi/asm/ptrace.h AArch64 Linux ptrace ABI definitions -[3] linux/Documentation/arm64/cpu-feature-registers.txt +[3] Documentation/arm64/cpu-feature-registers.txt [4] ARM IHI0055C http://infocenter.arm.com/help/topic/com.arm.doc.ihi0055c/IHI0055C_beta_aapcs64.pdf diff --git a/Documentation/devicetree/bindings/interrupt-controller/riscv,cpu-intc.txt b/Documentation/devicetree/bindings/interrupt-controller/riscv,cpu-intc.txt index b0a8af51c388..265b223cd978 100644 --- a/Documentation/devicetree/bindings/interrupt-controller/riscv,cpu-intc.txt +++ b/Documentation/devicetree/bindings/interrupt-controller/riscv,cpu-intc.txt @@ -11,7 +11,7 @@ The RISC-V supervisor ISA manual specifies three interrupt sources that are attached to every HLIC: software interrupts, the timer interrupt, and external interrupts. Software interrupts are used to send IPIs between cores. The timer interrupt comes from an architecturally mandated real-time timer that is -controller via Supervisor Binary Interface (SBI) calls and CSR reads. External +controlled via Supervisor Binary Interface (SBI) calls and CSR reads. External interrupts connect all other device interrupts to the HLIC, which are routed via the platform-level interrupt controller (PLIC). @@ -25,7 +25,15 @@ in the system. Required properties: - compatible : "riscv,cpu-intc" -- #interrupt-cells : should be <1> +- #interrupt-cells : should be <1>. The interrupt sources are defined by the + RISC-V supervisor ISA manual, with only the following three interrupts being + defined for supervisor mode: + - Source 1 is the supervisor software interrupt, which can be sent by an SBI + call and is reserved for use by software. + - Source 5 is the supervisor timer interrupt, which can be configured by + SBI calls and implements a one-shot timer. + - Source 9 is the supervisor external interrupt, which chains to all other + device interrupts. - interrupt-controller : Identifies the node as an interrupt controller Furthermore, this interrupt-controller MUST be embedded inside the cpu @@ -38,7 +46,7 @@ An example device tree entry for a HLIC is show below. ... cpu1-intc: interrupt-controller { #interrupt-cells = <1>; - compatible = "riscv,cpu-intc", "sifive,fu540-c000-cpu-intc"; + compatible = "sifive,fu540-c000-cpu-intc", "riscv,cpu-intc"; interrupt-controller; }; }; diff --git a/Documentation/hwmon/ina2xx b/Documentation/hwmon/ina2xx index 72d16f08e431..b8df81f6d6bc 100644 --- a/Documentation/hwmon/ina2xx +++ b/Documentation/hwmon/ina2xx @@ -32,7 +32,7 @@ Supported chips: Datasheet: Publicly available at the Texas Instruments website http://www.ti.com/ -Author: Lothar Felten <l-felten@ti.com> +Author: Lothar Felten <lothar.felten@gmail.com> Description ----------- diff --git a/Documentation/i2c/DMA-considerations b/Documentation/i2c/DMA-considerations index 966610aa4620..203002054120 100644 --- a/Documentation/i2c/DMA-considerations +++ b/Documentation/i2c/DMA-considerations @@ -50,10 +50,14 @@ bounce buffer. But you don't need to care about that detail, just use the returned buffer. If NULL is returned, the threshold was not met or a bounce buffer could not be allocated. Fall back to PIO in that case. -In any case, a buffer obtained from above needs to be released. It ensures data -is copied back to the message and a potentially used bounce buffer is freed:: +In any case, a buffer obtained from above needs to be released. Another helper +function ensures a potentially used bounce buffer is freed:: - i2c_release_dma_safe_msg_buf(msg, dma_buf); + i2c_put_dma_safe_msg_buf(dma_buf, msg, xferred); + +The last argument 'xferred' controls if the buffer is synced back to the +message or not. No syncing is needed in cases setting up DMA had an error and +there was no data transferred. The bounce buffer handling from the core is generic and simple. It will always allocate a new bounce buffer. If you want a more sophisticated handling (e.g. @@ -807,6 +807,9 @@ KBUILD_CFLAGS += $(call cc-option,-Wdeclaration-after-statement,) # disable pointer signed / unsigned warnings in gcc 4.0 KBUILD_CFLAGS += $(call cc-disable-warning, pointer-sign) +# disable stringop warnings in gcc 8+ +KBUILD_CFLAGS += $(call cc-disable-warning, stringop-truncation) + # disable invalid "can't wrap" optimizations for signed / pointers KBUILD_CFLAGS += $(call cc-option,-fno-strict-overflow) diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index 29e75b47becd..1b1a0e95c751 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -763,7 +763,6 @@ config NEED_PER_CPU_EMBED_FIRST_CHUNK config HOLES_IN_ZONE def_bool y - depends on NUMA source kernel/Kconfig.hz diff --git a/arch/arm64/crypto/ghash-ce-glue.c b/arch/arm64/crypto/ghash-ce-glue.c index 6e9f33d14930..067d8937d5af 100644 --- a/arch/arm64/crypto/ghash-ce-glue.c +++ b/arch/arm64/crypto/ghash-ce-glue.c @@ -417,7 +417,7 @@ static int gcm_encrypt(struct aead_request *req) __aes_arm64_encrypt(ctx->aes_key.key_enc, tag, iv, nrounds); put_unaligned_be32(2, iv + GCM_IV_SIZE); - while (walk.nbytes >= AES_BLOCK_SIZE) { + while (walk.nbytes >= (2 * AES_BLOCK_SIZE)) { int blocks = walk.nbytes / AES_BLOCK_SIZE; u8 *dst = walk.dst.virt.addr; u8 *src = walk.src.virt.addr; @@ -437,11 +437,18 @@ static int gcm_encrypt(struct aead_request *req) NULL); err = skcipher_walk_done(&walk, - walk.nbytes % AES_BLOCK_SIZE); + walk.nbytes % (2 * AES_BLOCK_SIZE)); } - if (walk.nbytes) + if (walk.nbytes) { __aes_arm64_encrypt(ctx->aes_key.key_enc, ks, iv, nrounds); + if (walk.nbytes > AES_BLOCK_SIZE) { + crypto_inc(iv, AES_BLOCK_SIZE); + __aes_arm64_encrypt(ctx->aes_key.key_enc, + ks + AES_BLOCK_SIZE, iv, + nrounds); + } + } } /* handle the tail */ @@ -545,7 +552,7 @@ static int gcm_decrypt(struct aead_request *req) __aes_arm64_encrypt(ctx->aes_key.key_enc, tag, iv, nrounds); put_unaligned_be32(2, iv + GCM_IV_SIZE); - while (walk.nbytes >= AES_BLOCK_SIZE) { + while (walk.nbytes >= (2 * AES_BLOCK_SIZE)) { int blocks = walk.nbytes / AES_BLOCK_SIZE; u8 *dst = walk.dst.virt.addr; u8 *src = walk.src.virt.addr; @@ -564,11 +571,21 @@ static int gcm_decrypt(struct aead_request *req) } while (--blocks > 0); err = skcipher_walk_done(&walk, - walk.nbytes % AES_BLOCK_SIZE); + walk.nbytes % (2 * AES_BLOCK_SIZE)); } - if (walk.nbytes) + if (walk.nbytes) { + if (walk.nbytes > AES_BLOCK_SIZE) { + u8 *iv2 = iv + AES_BLOCK_SIZE; + + memcpy(iv2, iv, AES_BLOCK_SIZE); + crypto_inc(iv2, AES_BLOCK_SIZE); + + __aes_arm64_encrypt(ctx->aes_key.key_enc, iv2, + iv2, nrounds); + } __aes_arm64_encrypt(ctx->aes_key.key_enc, iv, iv, nrounds); + } } /* handle the tail */ diff --git a/arch/arm64/crypto/sm4-ce-glue.c b/arch/arm64/crypto/sm4-ce-glue.c index b7fb5274b250..0c4fc223f225 100644 --- a/arch/arm64/crypto/sm4-ce-glue.c +++ b/arch/arm64/crypto/sm4-ce-glue.c @@ -69,5 +69,5 @@ static void __exit sm4_ce_mod_fini(void) crypto_unregister_alg(&sm4_ce_alg); } -module_cpu_feature_match(SM3, sm4_ce_mod_init); +module_cpu_feature_match(SM4, sm4_ce_mod_init); module_exit(sm4_ce_mod_fini); diff --git a/arch/m68k/mac/misc.c b/arch/m68k/mac/misc.c index 3534aa6a4dc2..1b083c500b9a 100644 --- a/arch/m68k/mac/misc.c +++ b/arch/m68k/mac/misc.c @@ -98,11 +98,10 @@ static time64_t pmu_read_time(void) if (pmu_request(&req, NULL, 1, PMU_READ_RTC) < 0) return 0; - while (!req.complete) - pmu_poll(); + pmu_wait_complete(&req); - time = (u32)((req.reply[1] << 24) | (req.reply[2] << 16) | - (req.reply[3] << 8) | req.reply[4]); + time = (u32)((req.reply[0] << 24) | (req.reply[1] << 16) | + (req.reply[2] << 8) | req.reply[3]); return time - RTC_OFFSET; } @@ -116,8 +115,7 @@ static void pmu_write_time(time64_t time) (data >> 24) & 0xFF, (data >> 16) & 0xFF, (data >> 8) & 0xFF, data & 0xFF) < 0) return; - while (!req.complete) - pmu_poll(); + pmu_wait_complete(&req); } static __u8 pmu_read_pram(int offset) diff --git a/arch/nios2/Kconfig.debug b/arch/nios2/Kconfig.debug index 7a49f0d28d14..f1da8a7b17ff 100644 --- a/arch/nios2/Kconfig.debug +++ b/arch/nios2/Kconfig.debug @@ -3,15 +3,6 @@ config TRACE_IRQFLAGS_SUPPORT def_bool y -config DEBUG_STACK_USAGE - bool "Enable stack utilization instrumentation" - depends on DEBUG_KERNEL - help - Enables the display of the minimum amount of free stack which each - task has ever had available in the sysrq-T and sysrq-P debug output. - - This option will slow down process creation somewhat. - config EARLY_PRINTK bool "Activate early kernel debugging" default y diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig index db0b6eebbfa5..a80669209155 100644 --- a/arch/powerpc/Kconfig +++ b/arch/powerpc/Kconfig @@ -177,7 +177,6 @@ config PPC select HAVE_ARCH_KGDB select HAVE_ARCH_MMAP_RND_BITS select HAVE_ARCH_MMAP_RND_COMPAT_BITS if COMPAT - select HAVE_ARCH_PREL32_RELOCATIONS select HAVE_ARCH_SECCOMP_FILTER select HAVE_ARCH_TRACEHOOK select HAVE_CBPF_JIT if !PPC64 diff --git a/arch/riscv/include/asm/tlb.h b/arch/riscv/include/asm/tlb.h index c229509288ea..439dc7072e05 100644 --- a/arch/riscv/include/asm/tlb.h +++ b/arch/riscv/include/asm/tlb.h @@ -14,6 +14,10 @@ #ifndef _ASM_RISCV_TLB_H #define _ASM_RISCV_TLB_H +struct mmu_gather; + +static void tlb_flush(struct mmu_gather *tlb); + #include <asm-generic/tlb.h> static inline void tlb_flush(struct mmu_gather *tlb) diff --git a/arch/riscv/kernel/sys_riscv.c b/arch/riscv/kernel/sys_riscv.c index 568026ccf6e8..fb03a4482ad6 100644 --- a/arch/riscv/kernel/sys_riscv.c +++ b/arch/riscv/kernel/sys_riscv.c @@ -65,24 +65,11 @@ SYSCALL_DEFINE6(mmap2, unsigned long, addr, unsigned long, len, SYSCALL_DEFINE3(riscv_flush_icache, uintptr_t, start, uintptr_t, end, uintptr_t, flags) { -#ifdef CONFIG_SMP - struct mm_struct *mm = current->mm; - bool local = (flags & SYS_RISCV_FLUSH_ICACHE_LOCAL) != 0; -#endif - /* Check the reserved flags. */ if (unlikely(flags & ~SYS_RISCV_FLUSH_ICACHE_ALL)) return -EINVAL; - /* - * Without CONFIG_SMP flush_icache_mm is a just a flush_icache_all(), - * which generates unused variable warnings all over this function. - */ -#ifdef CONFIG_SMP - flush_icache_mm(mm, local); -#else - flush_icache_all(); -#endif + flush_icache_mm(current->mm, flags & SYS_RISCV_FLUSH_ICACHE_LOCAL); return 0; } diff --git a/arch/x86/crypto/aesni-intel_asm.S b/arch/x86/crypto/aesni-intel_asm.S index 9bd139569b41..cb2deb61c5d9 100644 --- a/arch/x86/crypto/aesni-intel_asm.S +++ b/arch/x86/crypto/aesni-intel_asm.S @@ -223,34 +223,34 @@ ALL_F: .octa 0xffffffffffffffffffffffffffffffff pcmpeqd TWOONE(%rip), \TMP2 pand POLY(%rip), \TMP2 pxor \TMP2, \TMP3 - movdqa \TMP3, HashKey(%arg2) + movdqu \TMP3, HashKey(%arg2) movdqa \TMP3, \TMP5 pshufd $78, \TMP3, \TMP1 pxor \TMP3, \TMP1 - movdqa \TMP1, HashKey_k(%arg2) + movdqu \TMP1, HashKey_k(%arg2) GHASH_MUL \TMP5, \TMP3, \TMP1, \TMP2, \TMP4, \TMP6, \TMP7 # TMP5 = HashKey^2<<1 (mod poly) - movdqa \TMP5, HashKey_2(%arg2) + movdqu \TMP5, HashKey_2(%arg2) # HashKey_2 = HashKey^2<<1 (mod poly) pshufd $78, \TMP5, \TMP1 pxor \TMP5, \TMP1 - movdqa \TMP1, HashKey_2_k(%arg2) + movdqu \TMP1, HashKey_2_k(%arg2) GHASH_MUL \TMP5, \TMP3, \TMP1, \TMP2, \TMP4, \TMP6, \TMP7 # TMP5 = HashKey^3<<1 (mod poly) - movdqa \TMP5, HashKey_3(%arg2) + movdqu \TMP5, HashKey_3(%arg2) pshufd $78, \TMP5, \TMP1 pxor \TMP5, \TMP1 - movdqa \TMP1, HashKey_3_k(%arg2) + movdqu \TMP1, HashKey_3_k(%arg2) GHASH_MUL \TMP5, \TMP3, \TMP1, \TMP2, \TMP4, \TMP6, \TMP7 # TMP5 = HashKey^3<<1 (mod poly) - movdqa \TMP5, HashKey_4(%arg2) + movdqu \TMP5, HashKey_4(%arg2) pshufd $78, \TMP5, \TMP1 pxor \TMP5, \TMP1 - movdqa \TMP1, HashKey_4_k(%arg2) + movdqu \TMP1, HashKey_4_k(%arg2) .endm # GCM_INIT initializes a gcm_context struct to prepare for encoding/decoding. @@ -271,7 +271,7 @@ ALL_F: .octa 0xffffffffffffffffffffffffffffffff movdqu %xmm0, CurCount(%arg2) # ctx_data.current_counter = iv PRECOMPUTE \SUBKEY, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7, - movdqa HashKey(%arg2), %xmm13 + movdqu HashKey(%arg2), %xmm13 CALC_AAD_HASH %xmm13, \AAD, \AADLEN, %xmm0, %xmm1, %xmm2, %xmm3, \ %xmm4, %xmm5, %xmm6 @@ -997,7 +997,7 @@ TMP6 XMM0 XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 operation pshufd $78, \XMM5, \TMP6 pxor \XMM5, \TMP6 paddd ONE(%rip), \XMM0 # INCR CNT - movdqa HashKey_4(%arg2), \TMP5 + movdqu HashKey_4(%arg2), \TMP5 PCLMULQDQ 0x11, \TMP5, \TMP4 # TMP4 = a1*b1 movdqa \XMM0, \XMM1 paddd ONE(%rip), \XMM0 # INCR CNT @@ -1016,7 +1016,7 @@ TMP6 XMM0 XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 operation pxor (%arg1), \XMM2 pxor (%arg1), \XMM3 pxor (%arg1), \XMM4 - movdqa HashKey_4_k(%arg2), \TMP5 + movdqu HashKey_4_k(%arg2), \TMP5 PCLMULQDQ 0x00, \TMP5, \TMP6 # TMP6 = (a1+a0)*(b1+b0) movaps 0x10(%arg1), \TMP1 AESENC \TMP1, \XMM1 # Round 1 @@ -1031,7 +1031,7 @@ TMP6 XMM0 XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 operation movdqa \XMM6, \TMP1 pshufd $78, \XMM6, \TMP2 pxor \XMM6, \TMP2 - movdqa HashKey_3(%arg2), \TMP5 + movdqu HashKey_3(%arg2), \TMP5 PCLMULQDQ 0x11, \TMP5, \TMP1 # TMP1 = a1 * b1 movaps 0x30(%arg1), \TMP3 AESENC \TMP3, \XMM1 # Round 3 @@ -1044,7 +1044,7 @@ TMP6 XMM0 XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 operation AESENC \TMP3, \XMM2 AESENC \TMP3, \XMM3 AESENC \TMP3, \XMM4 - movdqa HashKey_3_k(%arg2), \TMP5 + movdqu HashKey_3_k(%arg2), \TMP5 PCLMULQDQ 0x00, \TMP5, \TMP2 # TMP2 = (a1+a0)*(b1+b0) movaps 0x50(%arg1), \TMP3 AESENC \TMP3, \XMM1 # Round 5 @@ -1058,7 +1058,7 @@ TMP6 XMM0 XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 operation movdqa \XMM7, \TMP1 pshufd $78, \XMM7, \TMP2 pxor \XMM7, \TMP2 - movdqa HashKey_2(%arg2), \TMP5 + movdqu HashKey_2(%arg2), \TMP5 # Multiply TMP5 * HashKey using karatsuba @@ -1074,7 +1074,7 @@ TMP6 XMM0 XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 operation AESENC \TMP3, \XMM2 AESENC \TMP3, \XMM3 AESENC \TMP3, \XMM4 - movdqa HashKey_2_k(%arg2), \TMP5 + movdqu HashKey_2_k(%arg2), \TMP5 PCLMULQDQ 0x00, \TMP5, \TMP2 # TMP2 = (a1+a0)*(b1+b0) movaps 0x80(%arg1), \TMP3 AESENC \TMP3, \XMM1 # Round 8 @@ -1092,7 +1092,7 @@ TMP6 XMM0 XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 operation movdqa \XMM8, \TMP1 pshufd $78, \XMM8, \TMP2 pxor \XMM8, \TMP2 - movdqa HashKey(%arg2), \TMP5 + movdqu HashKey(%arg2), \TMP5 PCLMULQDQ 0x11, \TMP5, \TMP1 # TMP1 = a1*b1 movaps 0x90(%arg1), \TMP3 AESENC \TMP3, \XMM1 # Round 9 @@ -1121,7 +1121,7 @@ aes_loop_par_enc_done\@: AESENCLAST \TMP3, \XMM2 AESENCLAST \TMP3, \XMM3 AESENCLAST \TMP3, \XMM4 - movdqa HashKey_k(%arg2), \TMP5 + movdqu HashKey_k(%arg2), \TMP5 PCLMULQDQ 0x00, \TMP5, \TMP2 # TMP2 = (a1+a0)*(b1+b0) movdqu (%arg4,%r11,1), \TMP3 pxor \TMP3, \XMM1 # Ciphertext/Plaintext XOR EK @@ -1205,7 +1205,7 @@ TMP6 XMM0 XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 operation pshufd $78, \XMM5, \TMP6 pxor \XMM5, \TMP6 paddd ONE(%rip), \XMM0 # INCR CNT - movdqa HashKey_4(%arg2), \TMP5 + movdqu HashKey_4(%arg2), \TMP5 PCLMULQDQ 0x11, \TMP5, \TMP4 # TMP4 = a1*b1 movdqa \XMM0, \XMM1 paddd ONE(%rip), \XMM0 # INCR CNT @@ -1224,7 +1224,7 @@ TMP6 XMM0 XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 operation pxor (%arg1), \XMM2 pxor (%arg1), \XMM3 pxor (%arg1), \XMM4 - movdqa HashKey_4_k(%arg2), \TMP5 + movdqu HashKey_4_k(%arg2), \TMP5 PCLMULQDQ 0x00, \TMP5, \TMP6 # TMP6 = (a1+a0)*(b1+b0) movaps 0x10(%arg1), \TMP1 AESENC \TMP1, \XMM1 # Round 1 @@ -1239,7 +1239,7 @@ TMP6 XMM0 XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 operation movdqa \XMM6, \TMP1 pshufd $78, \XMM6, \TMP2 pxor \XMM6, \TMP2 - movdqa HashKey_3(%arg2), \TMP5 + movdqu HashKey_3(%arg2), \TMP5 PCLMULQDQ 0x11, \TMP5, \TMP1 # TMP1 = a1 * b1 movaps 0x30(%arg1), \TMP3 AESENC \TMP3, \XMM1 # Round 3 @@ -1252,7 +1252,7 @@ TMP6 XMM0 XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 operation AESENC \TMP3, \XMM2 AESENC \TMP3, \XMM3 AESENC \TMP3, \XMM4 - movdqa HashKey_3_k(%arg2), \TMP5 + movdqu HashKey_3_k(%arg2), \TMP5 PCLMULQDQ 0x00, \TMP5, \TMP2 # TMP2 = (a1+a0)*(b1+b0) movaps 0x50(%arg1), \TMP3 AESENC \TMP3, \XMM1 # Round 5 @@ -1266,7 +1266,7 @@ TMP6 XMM0 XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 operation movdqa \XMM7, \TMP1 pshufd $78, \XMM7, \TMP2 pxor \XMM7, \TMP2 - movdqa HashKey_2(%arg2), \TMP5 + movdqu HashKey_2(%arg2), \TMP5 # Multiply TMP5 * HashKey using karatsuba @@ -1282,7 +1282,7 @@ TMP6 XMM0 XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 operation AESENC \TMP3, \XMM2 AESENC \TMP3, \XMM3 AESENC \TMP3, \XMM4 - movdqa HashKey_2_k(%arg2), \TMP5 + movdqu HashKey_2_k(%arg2), \TMP5 PCLMULQDQ 0x00, \TMP5, \TMP2 # TMP2 = (a1+a0)*(b1+b0) movaps 0x80(%arg1), \TMP3 AESENC \TMP3, \XMM1 # Round 8 @@ -1300,7 +1300,7 @@ TMP6 XMM0 XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 operation movdqa \XMM8, \TMP1 pshufd $78, \XMM8, \TMP2 pxor \XMM8, \TMP2 - movdqa HashKey(%arg2), \TMP5 + movdqu HashKey(%arg2), \TMP5 PCLMULQDQ 0x11, \TMP5, \TMP1 # TMP1 = a1*b1 movaps 0x90(%arg1), \TMP3 AESENC \TMP3, \XMM1 # Round 9 @@ -1329,7 +1329,7 @@ aes_loop_par_dec_done\@: AESENCLAST \TMP3, \XMM2 AESENCLAST \TMP3, \XMM3 AESENCLAST \TMP3, \XMM4 - movdqa HashKey_k(%arg2), \TMP5 + movdqu HashKey_k(%arg2), \TMP5 PCLMULQDQ 0x00, \TMP5, \TMP2 # TMP2 = (a1+a0)*(b1+b0) movdqu (%arg4,%r11,1), \TMP3 pxor \TMP3, \XMM1 # Ciphertext/Plaintext XOR EK @@ -1405,10 +1405,10 @@ TMP7 XMM1 XMM2 XMM3 XMM4 XMMDst movdqa \XMM1, \TMP6 pshufd $78, \XMM1, \TMP2 pxor \XMM1, \TMP2 - movdqa HashKey_4(%arg2), \TMP5 + movdqu HashKey_4(%arg2), \TMP5 PCLMULQDQ 0x11, \TMP5, \TMP6 # TMP6 = a1*b1 PCLMULQDQ 0x00, \TMP5, \XMM1 # XMM1 = a0*b0 - movdqa HashKey_4_k(%arg2), \TMP4 + movdqu HashKey_4_k(%arg2), \TMP4 PCLMULQDQ 0x00, \TMP4, \TMP2 # TMP2 = (a1+a0)*(b1+b0) movdqa \XMM1, \XMMDst movdqa \TMP2, \XMM1 # result in TMP6, XMMDst, XMM1 @@ -1418,10 +1418,10 @@ TMP7 XMM1 XMM2 XMM3 XMM4 XMMDst movdqa \XMM2, \TMP1 pshufd $78, \XMM2, \TMP2 pxor \XMM2, \TMP2 - movdqa HashKey_3(%arg2), \TMP5 + movdqu HashKey_3(%arg2), \TMP5 PCLMULQDQ 0x11, \TMP5, \TMP1 # TMP1 = a1*b1 PCLMULQDQ 0x00, \TMP5, \XMM2 # XMM2 = a0*b0 - movdqa HashKey_3_k(%arg2), \TMP4 + movdqu HashKey_3_k(%arg2), \TMP4 PCLMULQDQ 0x00, \TMP4, \TMP2 # TMP2 = (a1+a0)*(b1+b0) pxor \TMP1, \TMP6 pxor \XMM2, \XMMDst @@ -1433,10 +1433,10 @@ TMP7 XMM1 XMM2 XMM3 XMM4 XMMDst movdqa \XMM3, \TMP1 pshufd $78, \XMM3, \TMP2 pxor \XMM3, \TMP2 - movdqa HashKey_2(%arg2), \TMP5 + movdqu HashKey_2(%arg2), \TMP5 PCLMULQDQ 0x11, \TMP5, \TMP1 # TMP1 = a1*b1 PCLMULQDQ 0x00, \TMP5, \XMM3 # XMM3 = a0*b0 - movdqa HashKey_2_k(%arg2), \TMP4 + movdqu HashKey_2_k(%arg2), \TMP4 PCLMULQDQ 0x00, \TMP4, \TMP2 # TMP2 = (a1+a0)*(b1+b0) pxor \TMP1, \TMP6 pxor \XMM3, \XMMDst @@ -1446,10 +1446,10 @@ TMP7 XMM1 XMM2 XMM3 XMM4 XMMDst movdqa \XMM4, \TMP1 pshufd $78, \XMM4, \TMP2 pxor \XMM4, \TMP2 - movdqa HashKey(%arg2), \TMP5 + movdqu HashKey(%arg2), \TMP5 PCLMULQDQ 0x11, \TMP5, \TMP1 # TMP1 = a1*b1 PCLMULQDQ 0x00, \TMP5, \XMM4 # XMM4 = a0*b0 - movdqa HashKey_k(%arg2), \TMP4 + movdqu HashKey_k(%arg2), \TMP4 PCLMULQDQ 0x00, \TMP4, \TMP2 # TMP2 = (a1+a0)*(b1+b0) pxor \TMP1, \TMP6 pxor \XMM4, \XMMDst diff --git a/arch/x86/include/asm/pgtable-3level.h b/arch/x86/include/asm/pgtable-3level.h index a564084c6141..f8b1ad2c3828 100644 --- a/arch/x86/include/asm/pgtable-3level.h +++ b/arch/x86/include/asm/pgtable-3level.h @@ -2,6 +2,8 @@ #ifndef _ASM_X86_PGTABLE_3LEVEL_H #define _ASM_X86_PGTABLE_3LEVEL_H +#include <asm/atomic64_32.h> + /* * Intel Physical Address Extension (PAE) Mode - three-level page * tables on PPro+ CPUs. @@ -150,10 +152,7 @@ static inline pte_t native_ptep_get_and_clear(pte_t *ptep) { pte_t res; - /* xchg acts as a barrier before the setting of the high bits */ - res.pte_low = xchg(&ptep->pte_low, 0); - res.pte_high = ptep->pte_high; - ptep->pte_high = 0; + res.pte = (pteval_t)arch_atomic64_xchg((atomic64_t *)ptep, 0); return res; } diff --git a/arch/x86/xen/mmu_pv.c b/arch/x86/xen/mmu_pv.c index 45b700ac5fe7..2fe5c9b1816b 100644 --- a/arch/x86/xen/mmu_pv.c +++ b/arch/x86/xen/mmu_pv.c @@ -435,14 +435,13 @@ static void xen_set_pud(pud_t *ptr, pud_t val) static void xen_set_pte_atomic(pte_t *ptep, pte_t pte) { trace_xen_mmu_set_pte_atomic(ptep, pte); - set_64bit((u64 *)ptep, native_pte_val(pte)); + __xen_set_pte(ptep, pte); } static void xen_pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) { trace_xen_mmu_pte_clear(mm, addr, ptep); - if (!xen_batched_set_pte(ptep, native_make_pte(0))) - native_pte_clear(mm, addr, ptep); + __xen_set_pte(ptep, native_make_pte(0)); } static void xen_pmd_clear(pmd_t *pmdp) @@ -1570,7 +1569,7 @@ static void __init xen_set_pte_init(pte_t *ptep, pte_t pte) pte = __pte_ma(((pte_val_ma(*ptep) & _PAGE_RW) | ~_PAGE_RW) & pte_val_ma(pte)); #endif - native_set_pte(ptep, pte); + __xen_set_pte(ptep, pte); } /* Early in boot, while setting up the initial pagetable, assume @@ -2061,7 +2060,6 @@ void __init xen_relocate_p2m(void) pud_t *pud; pgd_t *pgd; unsigned long *new_p2m; - int save_pud; size = PAGE_ALIGN(xen_start_info->nr_pages * sizeof(unsigned long)); n_pte = roundup(size, PAGE_SIZE) >> PAGE_SHIFT; @@ -2091,7 +2089,6 @@ void __init xen_relocate_p2m(void) pgd = __va(read_cr3_pa()); new_p2m = (unsigned long *)(2 * PGDIR_SIZE); - save_pud = n_pud; for (idx_pud = 0; idx_pud < n_pud; idx_pud++) { pud = early_memremap(pud_phys, PAGE_SIZE); clear_page(pud); diff --git a/block/blk-wbt.c b/block/blk-wbt.c index 84507d3e9a98..8e20a0677dcf 100644 --- a/block/blk-wbt.c +++ b/block/blk-wbt.c @@ -123,16 +123,11 @@ static void rwb_wake_all(struct rq_wb *rwb) } } -static void __wbt_done(struct rq_qos *rqos, enum wbt_flags wb_acct) +static void wbt_rqw_done(struct rq_wb *rwb, struct rq_wait *rqw, + enum wbt_flags wb_acct) { - struct rq_wb *rwb = RQWB(rqos); - struct rq_wait *rqw; int inflight, limit; - if (!(wb_acct & WBT_TRACKED)) - return; - - rqw = get_rq_wait(rwb, wb_acct); inflight = atomic_dec_return(&rqw->inflight); /* @@ -166,10 +161,22 @@ static void __wbt_done(struct rq_qos *rqos, enum wbt_flags wb_acct) int diff = limit - inflight; if (!inflight || diff >= rwb->wb_background / 2) - wake_up(&rqw->wait); + wake_up_all(&rqw->wait); } } +static void __wbt_done(struct rq_qos *rqos, enum wbt_flags wb_acct) +{ + struct rq_wb *rwb = RQWB(rqos); + struct rq_wait *rqw; + + if (!(wb_acct & WBT_TRACKED)) + return; + + rqw = get_rq_wait(rwb, wb_acct); + wbt_rqw_done(rwb, rqw, wb_acct); +} + /* * Called on completion of a request. Note that it's also called when * a request is merged, when the request gets freed. @@ -481,6 +488,34 @@ static inline unsigned int get_limit(struct rq_wb *rwb, unsigned long rw) return limit; } +struct wbt_wait_data { + struct wait_queue_entry wq; + struct task_struct *task; + struct rq_wb *rwb; + struct rq_wait *rqw; + unsigned long rw; + bool got_token; +}; + +static int wbt_wake_function(struct wait_queue_entry *curr, unsigned int mode, + int wake_flags, void *key) +{ + struct wbt_wait_data *data = container_of(curr, struct wbt_wait_data, + wq); + + /* + * If we fail to get a budget, return -1 to interrupt the wake up + * loop in __wake_up_common. + */ + if (!rq_wait_inc_below(data->rqw, get_limit(data->rwb, data->rw))) + return -1; + + data->got_token = true; + list_del_init(&curr->entry); + wake_up_process(data->task); + return 1; +} + /* * Block if we will exceed our limit, or if we are currently waiting for * the timer to kick off queuing again. @@ -491,19 +526,40 @@ static void __wbt_wait(struct rq_wb *rwb, enum wbt_flags wb_acct, __acquires(lock) { struct rq_wait *rqw = get_rq_wait(rwb, wb_acct); - DECLARE_WAITQUEUE(wait, current); + struct wbt_wait_data data = { + .wq = { + .func = wbt_wake_function, + .entry = LIST_HEAD_INIT(data.wq.entry), + }, + .task = current, + .rwb = rwb, + .rqw = rqw, + .rw = rw, + }; bool has_sleeper; has_sleeper = wq_has_sleeper(&rqw->wait); if (!has_sleeper && rq_wait_inc_below(rqw, get_limit(rwb, rw))) return; - add_wait_queue_exclusive(&rqw->wait, &wait); + prepare_to_wait_exclusive(&rqw->wait, &data.wq, TASK_UNINTERRUPTIBLE); do { - set_current_state(TASK_UNINTERRUPTIBLE); + if (data.got_token) + break; - if (!has_sleeper && rq_wait_inc_below(rqw, get_limit(rwb, rw))) + if (!has_sleeper && + rq_wait_inc_below(rqw, get_limit(rwb, rw))) { + finish_wait(&rqw->wait, &data.wq); + + /* + * We raced with wbt_wake_function() getting a token, + * which means we now have two. Put our local token + * and wake anyone else potentially waiting for one. + */ + if (data.got_token) + wbt_rqw_done(rwb, rqw, wb_acct); break; + } if (lock) { spin_unlock_irq(lock); @@ -511,11 +567,11 @@ static void __wbt_wait(struct rq_wb *rwb, enum wbt_flags wb_acct, spin_lock_irq(lock); } else io_schedule(); + has_sleeper = false; } while (1); - __set_current_state(TASK_RUNNING); - remove_wait_queue(&rqw->wait, &wait); + finish_wait(&rqw->wait, &data.wq); } static inline bool wbt_should_throttle(struct rq_wb *rwb, struct bio *bio) @@ -580,11 +636,6 @@ static void wbt_wait(struct rq_qos *rqos, struct bio *bio, spinlock_t *lock) return; } - if (current_is_kswapd()) - flags |= WBT_KSWAPD; - if (bio_op(bio) == REQ_OP_DISCARD) - flags |= WBT_DISCARD; - __wbt_wait(rwb, flags, bio->bi_opf, lock); if (!blk_stat_is_active(rwb->cb)) diff --git a/block/bsg.c b/block/bsg.c index db588add6ba6..9a442c23a715 100644 --- a/block/bsg.c +++ b/block/bsg.c @@ -37,7 +37,7 @@ struct bsg_device { struct request_queue *queue; spinlock_t lock; struct hlist_node dev_list; - atomic_t ref_count; + refcount_t ref_count; char name[20]; int max_queue; }; @@ -252,7 +252,7 @@ static int bsg_put_device(struct bsg_device *bd) mutex_lock(&bsg_mutex); - if (!atomic_dec_and_test(&bd->ref_count)) { + if (!refcount_dec_and_test(&bd->ref_count)) { mutex_unlock(&bsg_mutex); return 0; } @@ -290,7 +290,7 @@ static struct bsg_device *bsg_add_device(struct inode *inode, bd->queue = rq; - atomic_set(&bd->ref_count, 1); + refcount_set(&bd->ref_count, 1); hlist_add_head(&bd->dev_list, bsg_dev_idx_hash(iminor(inode))); strncpy(bd->name, dev_name(rq->bsg_dev.class_dev), sizeof(bd->name) - 1); @@ -308,7 +308,7 @@ static struct bsg_device *__bsg_get_device(int minor, struct request_queue *q) hlist_for_each_entry(bd, bsg_dev_idx_hash(minor), dev_list) { if (bd->queue == q) { - atomic_inc(&bd->ref_count); + refcount_inc(&bd->ref_count); goto found; } } diff --git a/block/elevator.c b/block/elevator.c index 5ea6e7d600e4..6a06b5d040e5 100644 --- a/block/elevator.c +++ b/block/elevator.c @@ -895,8 +895,7 @@ int elv_register(struct elevator_type *e) spin_lock(&elv_list_lock); if (elevator_find(e->elevator_name, e->uses_mq)) { spin_unlock(&elv_list_lock); - if (e->icq_cache) - kmem_cache_destroy(e->icq_cache); + kmem_cache_destroy(e->icq_cache); return -EBUSY; } list_add_tail(&e->list, &elv_list); diff --git a/drivers/ata/pata_ftide010.c b/drivers/ata/pata_ftide010.c index 5d4b72e21161..569a4a662dcd 100644 --- a/drivers/ata/pata_ftide010.c +++ b/drivers/ata/pata_ftide010.c @@ -256,14 +256,12 @@ static struct ata_port_operations pata_ftide010_port_ops = { .qc_issue = ftide010_qc_issue, }; -static struct ata_port_info ftide010_port_info[] = { - { - .flags = ATA_FLAG_SLAVE_POSS, - .mwdma_mask = ATA_MWDMA2, - .udma_mask = ATA_UDMA6, - .pio_mask = ATA_PIO4, - .port_ops = &pata_ftide010_port_ops, - }, +static struct ata_port_info ftide010_port_info = { + .flags = ATA_FLAG_SLAVE_POSS, + .mwdma_mask = ATA_MWDMA2, + .udma_mask = ATA_UDMA6, + .pio_mask = ATA_PIO4, + .port_ops = &pata_ftide010_port_ops, }; #if IS_ENABLED(CONFIG_SATA_GEMINI) @@ -349,6 +347,7 @@ static int pata_ftide010_gemini_cable_detect(struct ata_port *ap) } static int pata_ftide010_gemini_init(struct ftide010 *ftide, + struct ata_port_info *pi, bool is_ata1) { struct device *dev = ftide->dev; @@ -373,7 +372,13 @@ static int pata_ftide010_gemini_init(struct ftide010 *ftide, /* Flag port as SATA-capable */ if (gemini_sata_bridge_enabled(sg, is_ata1)) - ftide010_port_info[0].flags |= ATA_FLAG_SATA; + pi->flags |= ATA_FLAG_SATA; + + /* This device has broken DMA, only PIO works */ + if (of_machine_is_compatible("itian,sq201")) { + pi->mwdma_mask = 0; + pi->udma_mask = 0; + } /* * We assume that a simple 40-wire cable is used in the PATA mode. @@ -435,6 +440,7 @@ static int pata_ftide010_gemini_init(struct ftide010 *ftide, } #else static int pata_ftide010_gemini_init(struct ftide010 *ftide, + struct ata_port_info *pi, bool is_ata1) { return -ENOTSUPP; @@ -446,7 +452,7 @@ static int pata_ftide010_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct device_node *np = dev->of_node; - const struct ata_port_info pi = ftide010_port_info[0]; + struct ata_port_info pi = ftide010_port_info; const struct ata_port_info *ppi[] = { &pi, NULL }; struct ftide010 *ftide; struct resource *res; @@ -490,6 +496,7 @@ static int pata_ftide010_probe(struct platform_device *pdev) * are ATA0. This will also set up the cable types. */ ret = pata_ftide010_gemini_init(ftide, + &pi, (res->start == 0x63400000)); if (ret) goto err_dis_clk; diff --git a/drivers/base/power/clock_ops.c b/drivers/base/power/clock_ops.c index 8e2e4757adcb..5a42ae4078c2 100644 --- a/drivers/base/power/clock_ops.c +++ b/drivers/base/power/clock_ops.c @@ -185,7 +185,7 @@ EXPORT_SYMBOL_GPL(of_pm_clk_add_clk); int of_pm_clk_add_clks(struct device *dev) { struct clk **clks; - unsigned int i, count; + int i, count; int ret; if (!dev || !dev->of_node) diff --git a/drivers/block/xen-blkback/blkback.c b/drivers/block/xen-blkback/blkback.c index b55b245e8052..fd1e19f1a49f 100644 --- a/drivers/block/xen-blkback/blkback.c +++ b/drivers/block/xen-blkback/blkback.c @@ -84,6 +84,18 @@ MODULE_PARM_DESC(max_persistent_grants, "Maximum number of grants to map persistently"); /* + * How long a persistent grant is allowed to remain allocated without being in + * use. The time is in seconds, 0 means indefinitely long. + */ + +static unsigned int xen_blkif_pgrant_timeout = 60; +module_param_named(persistent_grant_unused_seconds, xen_blkif_pgrant_timeout, + uint, 0644); +MODULE_PARM_DESC(persistent_grant_unused_seconds, + "Time in seconds an unused persistent grant is allowed to " + "remain allocated. Default is 60, 0 means unlimited."); + +/* * Maximum number of rings/queues blkback supports, allow as many queues as there * are CPUs if user has not specified a value. */ @@ -123,6 +135,13 @@ module_param(log_stats, int, 0644); /* Number of free pages to remove on each call to gnttab_free_pages */ #define NUM_BATCH_FREE_PAGES 10 +static inline bool persistent_gnt_timeout(struct persistent_gnt *persistent_gnt) +{ + return xen_blkif_pgrant_timeout && + (jiffies - persistent_gnt->last_used >= + HZ * xen_blkif_pgrant_timeout); +} + static inline int get_free_page(struct xen_blkif_ring *ring, struct page **page) { unsigned long flags; @@ -236,8 +255,7 @@ static int add_persistent_gnt(struct xen_blkif_ring *ring, } } - bitmap_zero(persistent_gnt->flags, PERSISTENT_GNT_FLAGS_SIZE); - set_bit(PERSISTENT_GNT_ACTIVE, persistent_gnt->flags); + persistent_gnt->active = true; /* Add new node and rebalance tree. */ rb_link_node(&(persistent_gnt->node), parent, new); rb_insert_color(&(persistent_gnt->node), &ring->persistent_gnts); @@ -261,11 +279,11 @@ static struct persistent_gnt *get_persistent_gnt(struct xen_blkif_ring *ring, else if (gref > data->gnt) node = node->rb_right; else { - if(test_bit(PERSISTENT_GNT_ACTIVE, data->flags)) { + if (data->active) { pr_alert_ratelimited("requesting a grant already in use\n"); return NULL; } - set_bit(PERSISTENT_GNT_ACTIVE, data->flags); + data->active = true; atomic_inc(&ring->persistent_gnt_in_use); return data; } @@ -276,10 +294,10 @@ static struct persistent_gnt *get_persistent_gnt(struct xen_blkif_ring *ring, static void put_persistent_gnt(struct xen_blkif_ring *ring, struct persistent_gnt *persistent_gnt) { - if(!test_bit(PERSISTENT_GNT_ACTIVE, persistent_gnt->flags)) + if (!persistent_gnt->active) pr_alert_ratelimited("freeing a grant already unused\n"); - set_bit(PERSISTENT_GNT_WAS_ACTIVE, persistent_gnt->flags); - clear_bit(PERSISTENT_GNT_ACTIVE, persistent_gnt->flags); + persistent_gnt->last_used = jiffies; + persistent_gnt->active = false; atomic_dec(&ring->persistent_gnt_in_use); } @@ -371,26 +389,26 @@ static void purge_persistent_gnt(struct xen_blkif_ring *ring) struct persistent_gnt *persistent_gnt; struct rb_node *n; unsigned int num_clean, total; - bool scan_used = false, clean_used = false; + bool scan_used = false; struct rb_root *root; - if (ring->persistent_gnt_c < xen_blkif_max_pgrants || - (ring->persistent_gnt_c == xen_blkif_max_pgrants && - !ring->blkif->vbd.overflow_max_grants)) { - goto out; - } - if (work_busy(&ring->persistent_purge_work)) { pr_alert_ratelimited("Scheduled work from previous purge is still busy, cannot purge list\n"); goto out; } - num_clean = (xen_blkif_max_pgrants / 100) * LRU_PERCENT_CLEAN; - num_clean = ring->persistent_gnt_c - xen_blkif_max_pgrants + num_clean; - num_clean = min(ring->persistent_gnt_c, num_clean); - if ((num_clean == 0) || - (num_clean > (ring->persistent_gnt_c - atomic_read(&ring->persistent_gnt_in_use)))) - goto out; + if (ring->persistent_gnt_c < xen_blkif_max_pgrants || + (ring->persistent_gnt_c == xen_blkif_max_pgrants && + !ring->blkif->vbd.overflow_max_grants)) { + num_clean = 0; + } else { + num_clean = (xen_blkif_max_pgrants / 100) * LRU_PERCENT_CLEAN; + num_clean = ring->persistent_gnt_c - xen_blkif_max_pgrants + + num_clean; + num_clean = min(ring->persistent_gnt_c, num_clean); + pr_debug("Going to purge at least %u persistent grants\n", + num_clean); + } /* * At this point, we can assure that there will be no calls @@ -401,9 +419,7 @@ static void purge_persistent_gnt(struct xen_blkif_ring *ring) * number of grants. */ - total = num_clean; - - pr_debug("Going to purge %u persistent grants\n", num_clean); + total = 0; BUG_ON(!list_empty(&ring->persistent_purge_list)); root = &ring->persistent_gnts; @@ -412,46 +428,37 @@ purge_list: BUG_ON(persistent_gnt->handle == BLKBACK_INVALID_HANDLE); - if (clean_used) { - clear_bit(PERSISTENT_GNT_WAS_ACTIVE, persistent_gnt->flags); + if (persistent_gnt->active) continue; - } - - if (test_bit(PERSISTENT_GNT_ACTIVE, persistent_gnt->flags)) + if (!scan_used && !persistent_gnt_timeout(persistent_gnt)) continue; - if (!scan_used && - (test_bit(PERSISTENT_GNT_WAS_ACTIVE, persistent_gnt->flags))) + if (scan_used && total >= num_clean) continue; rb_erase(&persistent_gnt->node, root); list_add(&persistent_gnt->remove_node, &ring->persistent_purge_list); - if (--num_clean == 0) - goto finished; + total++; } /* - * If we get here it means we also need to start cleaning + * Check whether we also need to start cleaning * grants that were used since last purge in order to cope * with the requested num */ - if (!scan_used && !clean_used) { - pr_debug("Still missing %u purged frames\n", num_clean); + if (!scan_used && total < num_clean) { + pr_debug("Still missing %u purged frames\n", num_clean - total); scan_used = true; goto purge_list; } -finished: - if (!clean_used) { - pr_debug("Finished scanning for grants to clean, removing used flag\n"); - clean_used = true; - goto purge_list; - } - ring->persistent_gnt_c -= (total - num_clean); - ring->blkif->vbd.overflow_max_grants = 0; + if (total) { + ring->persistent_gnt_c -= total; + ring->blkif->vbd.overflow_max_grants = 0; - /* We can defer this work */ - schedule_work(&ring->persistent_purge_work); - pr_debug("Purged %u/%u\n", (total - num_clean), total); + /* We can defer this work */ + schedule_work(&ring->persistent_purge_work); + pr_debug("Purged %u/%u\n", num_clean, total); + } out: return; diff --git a/drivers/block/xen-blkback/common.h b/drivers/block/xen-blkback/common.h index ecb35fe8ca8d..1d3002d773f7 100644 --- a/drivers/block/xen-blkback/common.h +++ b/drivers/block/xen-blkback/common.h @@ -233,16 +233,6 @@ struct xen_vbd { struct backend_info; -/* Number of available flags */ -#define PERSISTENT_GNT_FLAGS_SIZE 2 -/* This persistent grant is currently in use */ -#define PERSISTENT_GNT_ACTIVE 0 -/* - * This persistent grant has been used, this flag is set when we remove the - * PERSISTENT_GNT_ACTIVE, to know that this grant has been used recently. - */ -#define PERSISTENT_GNT_WAS_ACTIVE 1 - /* Number of requests that we can fit in a ring */ #define XEN_BLKIF_REQS_PER_PAGE 32 @@ -250,7 +240,8 @@ struct persistent_gnt { struct page *page; grant_ref_t gnt; grant_handle_t handle; - DECLARE_BITMAP(flags, PERSISTENT_GNT_FLAGS_SIZE); + unsigned long last_used; + bool active; struct rb_node node; struct list_head remove_node; }; @@ -278,7 +269,6 @@ struct xen_blkif_ring { wait_queue_head_t pending_free_wq; /* Tree to store persistent grants. */ - spinlock_t pers_gnts_lock; struct rb_root persistent_gnts; unsigned int persistent_gnt_c; atomic_t persistent_gnt_in_use; diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c index 8986adab9bf5..a71d817e900d 100644 --- a/drivers/block/xen-blkfront.c +++ b/drivers/block/xen-blkfront.c @@ -46,6 +46,7 @@ #include <linux/scatterlist.h> #include <linux/bitmap.h> #include <linux/list.h> +#include <linux/workqueue.h> #include <xen/xen.h> #include <xen/xenbus.h> @@ -121,6 +122,8 @@ static inline struct blkif_req *blkif_req(struct request *rq) static DEFINE_MUTEX(blkfront_mutex); static const struct block_device_operations xlvbd_block_fops; +static struct delayed_work blkfront_work; +static LIST_HEAD(info_list); /* * Maximum number of segments in indirect requests, the actual value used by @@ -216,6 +219,7 @@ struct blkfront_info /* Save uncomplete reqs and bios for migration. */ struct list_head requests; struct bio_list bio_list; + struct list_head info_list; }; static unsigned int nr_minors; @@ -1759,6 +1763,12 @@ abort_transaction: return err; } +static void free_info(struct blkfront_info *info) +{ + list_del(&info->info_list); + kfree(info); +} + /* Common code used when first setting up, and when resuming. */ static int talk_to_blkback(struct xenbus_device *dev, struct blkfront_info *info) @@ -1880,7 +1890,10 @@ again: destroy_blkring: blkif_free(info, 0); - kfree(info); + mutex_lock(&blkfront_mutex); + free_info(info); + mutex_unlock(&blkfront_mutex); + dev_set_drvdata(&dev->dev, NULL); return err; @@ -1991,6 +2004,10 @@ static int blkfront_probe(struct xenbus_device *dev, info->handle = simple_strtoul(strrchr(dev->nodename, '/')+1, NULL, 0); dev_set_drvdata(&dev->dev, info); + mutex_lock(&blkfront_mutex); + list_add(&info->info_list, &info_list); + mutex_unlock(&blkfront_mutex); + return 0; } @@ -2301,6 +2318,12 @@ static void blkfront_gather_backend_features(struct blkfront_info *info) if (indirect_segments <= BLKIF_MAX_SEGMENTS_PER_REQUEST) indirect_segments = 0; info->max_indirect_segments = indirect_segments; + + if (info->feature_persistent) { + mutex_lock(&blkfront_mutex); + schedule_delayed_work(&blkfront_work, HZ * 10); + mutex_unlock(&blkfront_mutex); + } } /* @@ -2482,7 +2505,9 @@ static int blkfront_remove(struct xenbus_device *xbdev) mutex_unlock(&info->mutex); if (!bdev) { - kfree(info); + mutex_lock(&blkfront_mutex); + free_info(info); + mutex_unlock(&blkfront_mutex); return 0; } @@ -2502,7 +2527,9 @@ static int blkfront_remove(struct xenbus_device *xbdev) if (info && !bdev->bd_openers) { xlvbd_release_gendisk(info); disk->private_data = NULL; - kfree(info); + mutex_lock(&blkfront_mutex); + free_info(info); + mutex_unlock(&blkfront_mutex); } mutex_unlock(&bdev->bd_mutex); @@ -2585,7 +2612,7 @@ static void blkif_release(struct gendisk *disk, fmode_t mode) dev_info(disk_to_dev(bdev->bd_disk), "releasing disk\n"); xlvbd_release_gendisk(info); disk->private_data = NULL; - kfree(info); + free_info(info); } out: @@ -2618,6 +2645,61 @@ static struct xenbus_driver blkfront_driver = { .is_ready = blkfront_is_ready, }; +static void purge_persistent_grants(struct blkfront_info *info) +{ + unsigned int i; + unsigned long flags; + + for (i = 0; i < info->nr_rings; i++) { + struct blkfront_ring_info *rinfo = &info->rinfo[i]; + struct grant *gnt_list_entry, *tmp; + + spin_lock_irqsave(&rinfo->ring_lock, flags); + + if (rinfo->persistent_gnts_c == 0) { + spin_unlock_irqrestore(&rinfo->ring_lock, flags); + continue; + } + + list_for_each_entry_safe(gnt_list_entry, tmp, &rinfo->grants, + node) { + if (gnt_list_entry->gref == GRANT_INVALID_REF || + gnttab_query_foreign_access(gnt_list_entry->gref)) + continue; + + list_del(&gnt_list_entry->node); + gnttab_end_foreign_access(gnt_list_entry->gref, 0, 0UL); + rinfo->persistent_gnts_c--; + __free_page(gnt_list_entry->page); + kfree(gnt_list_entry); + } + + spin_unlock_irqrestore(&rinfo->ring_lock, flags); + } +} + +static void blkfront_delay_work(struct work_struct *work) +{ + struct blkfront_info *info; + bool need_schedule_work = false; + + mutex_lock(&blkfront_mutex); + + list_for_each_entry(info, &info_list, info_list) { + if (info->feature_persistent) { + need_schedule_work = true; + mutex_lock(&info->mutex); + purge_persistent_grants(info); + mutex_unlock(&info->mutex); + } + } + + if (need_schedule_work) + schedule_delayed_work(&blkfront_work, HZ * 10); + + mutex_unlock(&blkfront_mutex); +} + static int __init xlblk_init(void) { int ret; @@ -2626,6 +2708,15 @@ static int __init xlblk_init(void) if (!xen_domain()) return -ENODEV; + if (!xen_has_pv_disk_devices()) + return -ENODEV; + + if (register_blkdev(XENVBD_MAJOR, DEV_NAME)) { + pr_warn("xen_blk: can't get major %d with name %s\n", + XENVBD_MAJOR, DEV_NAME); + return -ENODEV; + } + if (xen_blkif_max_segments < BLKIF_MAX_SEGMENTS_PER_REQUEST) xen_blkif_max_segments = BLKIF_MAX_SEGMENTS_PER_REQUEST; @@ -2641,14 +2732,7 @@ static int __init xlblk_init(void) xen_blkif_max_queues = nr_cpus; } - if (!xen_has_pv_disk_devices()) - return -ENODEV; - - if (register_blkdev(XENVBD_MAJOR, DEV_NAME)) { - printk(KERN_WARNING "xen_blk: can't get major %d with name %s\n", - XENVBD_MAJOR, DEV_NAME); - return -ENODEV; - } + INIT_DELAYED_WORK(&blkfront_work, blkfront_delay_work); ret = xenbus_register_frontend(&blkfront_driver); if (ret) { @@ -2663,6 +2747,8 @@ module_init(xlblk_init); static void __exit xlblk_exit(void) { + cancel_delayed_work_sync(&blkfront_work); + xenbus_unregister_driver(&blkfront_driver); unregister_blkdev(XENVBD_MAJOR, DEV_NAME); kfree(minors); diff --git a/drivers/cdrom/cdrom.c b/drivers/cdrom/cdrom.c index 113fc6edb2b0..a5d5a96479bf 100644 --- a/drivers/cdrom/cdrom.c +++ b/drivers/cdrom/cdrom.c @@ -2546,7 +2546,7 @@ static int cdrom_ioctl_drive_status(struct cdrom_device_info *cdi, if (!CDROM_CAN(CDC_SELECT_DISC) || (arg == CDSL_CURRENT || arg == CDSL_NONE)) return cdi->ops->drive_status(cdi, CDSL_CURRENT); - if (((int)arg >= cdi->capacity)) + if (arg >= cdi->capacity) return -EINVAL; return cdrom_slot_status(cdi, arg); } diff --git a/drivers/clk/clk-npcm7xx.c b/drivers/clk/clk-npcm7xx.c index 740af90a9508..c5edf8f2fd19 100644 --- a/drivers/clk/clk-npcm7xx.c +++ b/drivers/clk/clk-npcm7xx.c @@ -558,8 +558,8 @@ static void __init npcm7xx_clk_init(struct device_node *clk_np) if (!clk_base) goto npcm7xx_init_error; - npcm7xx_clk_data = kzalloc(sizeof(*npcm7xx_clk_data->hws) * - NPCM7XX_NUM_CLOCKS + sizeof(npcm7xx_clk_data), GFP_KERNEL); + npcm7xx_clk_data = kzalloc(struct_size(npcm7xx_clk_data, hws, + NPCM7XX_NUM_CLOCKS), GFP_KERNEL); if (!npcm7xx_clk_data) goto npcm7xx_init_np_err; diff --git a/drivers/clk/x86/clk-st.c b/drivers/clk/x86/clk-st.c index fb62f3938008..3a0996f2d556 100644 --- a/drivers/clk/x86/clk-st.c +++ b/drivers/clk/x86/clk-st.c @@ -46,7 +46,7 @@ static int st_clk_probe(struct platform_device *pdev) clk_oscout1_parents, ARRAY_SIZE(clk_oscout1_parents), 0, st_data->base + CLKDRVSTR2, OSCOUT1CLK25MHZ, 3, 0, NULL); - clk_set_parent(hws[ST_CLK_MUX]->clk, hws[ST_CLK_25M]->clk); + clk_set_parent(hws[ST_CLK_MUX]->clk, hws[ST_CLK_48M]->clk); hws[ST_CLK_GATE] = clk_hw_register_gate(NULL, "oscout1", "oscout1_mux", 0, st_data->base + MISCCLKCNTL1, OSCCLKENB, diff --git a/drivers/cpuidle/governors/menu.c b/drivers/cpuidle/governors/menu.c index 110483f0e3fb..e26a40971b26 100644 --- a/drivers/cpuidle/governors/menu.c +++ b/drivers/cpuidle/governors/menu.c @@ -379,9 +379,20 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev, if (idx == -1) idx = i; /* first enabled state */ if (s->target_residency > data->predicted_us) { - if (!tick_nohz_tick_stopped()) + if (data->predicted_us < TICK_USEC) break; + if (!tick_nohz_tick_stopped()) { + /* + * If the state selected so far is shallow, + * waking up early won't hurt, so retain the + * tick in that case and let the governor run + * again in the next iteration of the loop. + */ + expected_interval = drv->states[idx].target_residency; + break; + } + /* * If the state selected so far is shallow and this * state's target residency matches the time till the diff --git a/drivers/crypto/caam/caamalg_qi.c b/drivers/crypto/caam/caamalg_qi.c index 6e61cc93c2b0..d7aa7d7ff102 100644 --- a/drivers/crypto/caam/caamalg_qi.c +++ b/drivers/crypto/caam/caamalg_qi.c @@ -679,10 +679,8 @@ static int xts_ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher, int ret = 0; if (keylen != 2 * AES_MIN_KEY_SIZE && keylen != 2 * AES_MAX_KEY_SIZE) { - crypto_ablkcipher_set_flags(ablkcipher, - CRYPTO_TFM_RES_BAD_KEY_LEN); dev_err(jrdev, "key size mismatch\n"); - return -EINVAL; + goto badkey; } ctx->cdata.keylen = keylen; @@ -715,7 +713,7 @@ static int xts_ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher, return ret; badkey: crypto_ablkcipher_set_flags(ablkcipher, CRYPTO_TFM_RES_BAD_KEY_LEN); - return 0; + return -EINVAL; } /* diff --git a/drivers/crypto/caam/caampkc.c b/drivers/crypto/caam/caampkc.c index 578ea63a3109..f26d62e5533a 100644 --- a/drivers/crypto/caam/caampkc.c +++ b/drivers/crypto/caam/caampkc.c @@ -71,8 +71,8 @@ static void rsa_priv_f2_unmap(struct device *dev, struct rsa_edesc *edesc, dma_unmap_single(dev, pdb->d_dma, key->d_sz, DMA_TO_DEVICE); dma_unmap_single(dev, pdb->p_dma, p_sz, DMA_TO_DEVICE); dma_unmap_single(dev, pdb->q_dma, q_sz, DMA_TO_DEVICE); - dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_TO_DEVICE); - dma_unmap_single(dev, pdb->tmp2_dma, q_sz, DMA_TO_DEVICE); + dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_BIDIRECTIONAL); + dma_unmap_single(dev, pdb->tmp2_dma, q_sz, DMA_BIDIRECTIONAL); } static void rsa_priv_f3_unmap(struct device *dev, struct rsa_edesc *edesc, @@ -90,8 +90,8 @@ static void rsa_priv_f3_unmap(struct device *dev, struct rsa_edesc *edesc, dma_unmap_single(dev, pdb->dp_dma, p_sz, DMA_TO_DEVICE); dma_unmap_single(dev, pdb->dq_dma, q_sz, DMA_TO_DEVICE); dma_unmap_single(dev, pdb->c_dma, p_sz, DMA_TO_DEVICE); - dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_TO_DEVICE); - dma_unmap_single(dev, pdb->tmp2_dma, q_sz, DMA_TO_DEVICE); + dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_BIDIRECTIONAL); + dma_unmap_single(dev, pdb->tmp2_dma, q_sz, DMA_BIDIRECTIONAL); } /* RSA Job Completion handler */ @@ -417,13 +417,13 @@ static int set_rsa_priv_f2_pdb(struct akcipher_request *req, goto unmap_p; } - pdb->tmp1_dma = dma_map_single(dev, key->tmp1, p_sz, DMA_TO_DEVICE); + pdb->tmp1_dma = dma_map_single(dev, key->tmp1, p_sz, DMA_BIDIRECTIONAL); if (dma_mapping_error(dev, pdb->tmp1_dma)) { dev_err(dev, "Unable to map RSA tmp1 memory\n"); goto unmap_q; } - pdb->tmp2_dma = dma_map_single(dev, key->tmp2, q_sz, DMA_TO_DEVICE); + pdb->tmp2_dma = dma_map_single(dev, key->tmp2, q_sz, DMA_BIDIRECTIONAL); if (dma_mapping_error(dev, pdb->tmp2_dma)) { dev_err(dev, "Unable to map RSA tmp2 memory\n"); goto unmap_tmp1; @@ -451,7 +451,7 @@ static int set_rsa_priv_f2_pdb(struct akcipher_request *req, return 0; unmap_tmp1: - dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_TO_DEVICE); + dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_BIDIRECTIONAL); unmap_q: dma_unmap_single(dev, pdb->q_dma, q_sz, DMA_TO_DEVICE); unmap_p: @@ -504,13 +504,13 @@ static int set_rsa_priv_f3_pdb(struct akcipher_request *req, goto unmap_dq; } - pdb->tmp1_dma = dma_map_single(dev, key->tmp1, p_sz, DMA_TO_DEVICE); + pdb->tmp1_dma = dma_map_single(dev, key->tmp1, p_sz, DMA_BIDIRECTIONAL); if (dma_mapping_error(dev, pdb->tmp1_dma)) { dev_err(dev, "Unable to map RSA tmp1 memory\n"); goto unmap_qinv; } - pdb->tmp2_dma = dma_map_single(dev, key->tmp2, q_sz, DMA_TO_DEVICE); + pdb->tmp2_dma = dma_map_single(dev, key->tmp2, q_sz, DMA_BIDIRECTIONAL); if (dma_mapping_error(dev, pdb->tmp2_dma)) { dev_err(dev, "Unable to map RSA tmp2 memory\n"); goto unmap_tmp1; @@ -538,7 +538,7 @@ static int set_rsa_priv_f3_pdb(struct akcipher_request *req, return 0; unmap_tmp1: - dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_TO_DEVICE); + dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_BIDIRECTIONAL); unmap_qinv: dma_unmap_single(dev, pdb->c_dma, p_sz, DMA_TO_DEVICE); unmap_dq: diff --git a/drivers/crypto/caam/jr.c b/drivers/crypto/caam/jr.c index f4f258075b89..acdd72016ffe 100644 --- a/drivers/crypto/caam/jr.c +++ b/drivers/crypto/caam/jr.c @@ -190,7 +190,8 @@ static void caam_jr_dequeue(unsigned long devarg) BUG_ON(CIRC_CNT(head, tail + i, JOBR_DEPTH) <= 0); /* Unmap just-run descriptor so we can post-process */ - dma_unmap_single(dev, jrp->outring[hw_idx].desc, + dma_unmap_single(dev, + caam_dma_to_cpu(jrp->outring[hw_idx].desc), jrp->entinfo[sw_idx].desc_size, DMA_TO_DEVICE); diff --git a/drivers/crypto/cavium/nitrox/nitrox_dev.h b/drivers/crypto/cavium/nitrox/nitrox_dev.h index 9a476bb6d4c7..af596455b420 100644 --- a/drivers/crypto/cavium/nitrox/nitrox_dev.h +++ b/drivers/crypto/cavium/nitrox/nitrox_dev.h @@ -35,6 +35,7 @@ struct nitrox_cmdq { /* requests in backlog queues */ atomic_t backlog_count; + int write_idx; /* command size 32B/64B */ u8 instr_size; u8 qno; @@ -87,7 +88,7 @@ struct nitrox_bh { struct bh_data *slc; }; -/* NITROX-5 driver state */ +/* NITROX-V driver state */ #define NITROX_UCODE_LOADED 0 #define NITROX_READY 1 diff --git a/drivers/crypto/cavium/nitrox/nitrox_lib.c b/drivers/crypto/cavium/nitrox/nitrox_lib.c index ebe267379ac9..4d31df07777f 100644 --- a/drivers/crypto/cavium/nitrox/nitrox_lib.c +++ b/drivers/crypto/cavium/nitrox/nitrox_lib.c @@ -36,6 +36,7 @@ static int cmdq_common_init(struct nitrox_cmdq *cmdq) cmdq->head = PTR_ALIGN(cmdq->head_unaligned, PKT_IN_ALIGN); cmdq->dma = PTR_ALIGN(cmdq->dma_unaligned, PKT_IN_ALIGN); cmdq->qsize = (qsize + PKT_IN_ALIGN); + cmdq->write_idx = 0; spin_lock_init(&cmdq->response_lock); spin_lock_init(&cmdq->cmdq_lock); diff --git a/drivers/crypto/cavium/nitrox/nitrox_reqmgr.c b/drivers/crypto/cavium/nitrox/nitrox_reqmgr.c index deaefd532aaa..4a362fc22f62 100644 --- a/drivers/crypto/cavium/nitrox/nitrox_reqmgr.c +++ b/drivers/crypto/cavium/nitrox/nitrox_reqmgr.c @@ -42,6 +42,16 @@ * Invalid flag options in AES-CCM IV. */ +static inline int incr_index(int index, int count, int max) +{ + if ((index + count) >= max) + index = index + count - max; + else + index += count; + + return index; +} + /** * dma_free_sglist - unmap and free the sg lists. * @ndev: N5 device @@ -426,30 +436,29 @@ static void post_se_instr(struct nitrox_softreq *sr, struct nitrox_cmdq *cmdq) { struct nitrox_device *ndev = sr->ndev; - union nps_pkt_in_instr_baoff_dbell pkt_in_baoff_dbell; - u64 offset; + int idx; u8 *ent; spin_lock_bh(&cmdq->cmdq_lock); - /* get the next write offset */ - offset = NPS_PKT_IN_INSTR_BAOFF_DBELLX(cmdq->qno); - pkt_in_baoff_dbell.value = nitrox_read_csr(ndev, offset); + idx = cmdq->write_idx; /* copy the instruction */ - ent = cmdq->head + pkt_in_baoff_dbell.s.aoff; + ent = cmdq->head + (idx * cmdq->instr_size); memcpy(ent, &sr->instr, cmdq->instr_size); - /* flush the command queue updates */ - dma_wmb(); - sr->tstamp = jiffies; atomic_set(&sr->status, REQ_POSTED); response_list_add(sr, cmdq); + sr->tstamp = jiffies; + /* flush the command queue updates */ + dma_wmb(); /* Ring doorbell with count 1 */ writeq(1, cmdq->dbell_csr_addr); /* orders the doorbell rings */ mmiowb(); + cmdq->write_idx = incr_index(idx, 1, ndev->qlen); + spin_unlock_bh(&cmdq->cmdq_lock); } @@ -459,6 +468,9 @@ static int post_backlog_cmds(struct nitrox_cmdq *cmdq) struct nitrox_softreq *sr, *tmp; int ret = 0; + if (!atomic_read(&cmdq->backlog_count)) + return 0; + spin_lock_bh(&cmdq->backlog_lock); list_for_each_entry_safe(sr, tmp, &cmdq->backlog_head, backlog) { @@ -466,7 +478,7 @@ static int post_backlog_cmds(struct nitrox_cmdq *cmdq) /* submit until space available */ if (unlikely(cmdq_full(cmdq, ndev->qlen))) { - ret = -EBUSY; + ret = -ENOSPC; break; } /* delete from backlog list */ @@ -491,23 +503,20 @@ static int nitrox_enqueue_request(struct nitrox_softreq *sr) { struct nitrox_cmdq *cmdq = sr->cmdq; struct nitrox_device *ndev = sr->ndev; - int ret = -EBUSY; + + /* try to post backlog requests */ + post_backlog_cmds(cmdq); if (unlikely(cmdq_full(cmdq, ndev->qlen))) { if (!(sr->flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) - return -EAGAIN; - + return -ENOSPC; + /* add to backlog list */ backlog_list_add(sr, cmdq); - } else { - ret = post_backlog_cmds(cmdq); - if (ret) { - backlog_list_add(sr, cmdq); - return ret; - } - post_se_instr(sr, cmdq); - ret = -EINPROGRESS; + return -EBUSY; } - return ret; + post_se_instr(sr, cmdq); + + return -EINPROGRESS; } /** @@ -624,11 +633,9 @@ int nitrox_process_se_request(struct nitrox_device *ndev, */ sr->instr.fdata[0] = *((u64 *)&req->gph); sr->instr.fdata[1] = 0; - /* flush the soft_req changes before posting the cmd */ - wmb(); ret = nitrox_enqueue_request(sr); - if (ret == -EAGAIN) + if (ret == -ENOSPC) goto send_fail; return ret; diff --git a/drivers/crypto/chelsio/chtls/chtls.h b/drivers/crypto/chelsio/chtls/chtls.h index a53a0e6ba024..7725b6ee14ef 100644 --- a/drivers/crypto/chelsio/chtls/chtls.h +++ b/drivers/crypto/chelsio/chtls/chtls.h @@ -96,6 +96,10 @@ enum csk_flags { CSK_CONN_INLINE, /* Connection on HW */ }; +enum chtls_cdev_state { + CHTLS_CDEV_STATE_UP = 1 +}; + struct listen_ctx { struct sock *lsk; struct chtls_dev *cdev; @@ -146,6 +150,7 @@ struct chtls_dev { unsigned int send_page_order; int max_host_sndbuf; struct key_map kmap; + unsigned int cdev_state; }; struct chtls_hws { diff --git a/drivers/crypto/chelsio/chtls/chtls_main.c b/drivers/crypto/chelsio/chtls/chtls_main.c index 9b07f9165658..f59b044ebd25 100644 --- a/drivers/crypto/chelsio/chtls/chtls_main.c +++ b/drivers/crypto/chelsio/chtls/chtls_main.c @@ -160,6 +160,7 @@ static void chtls_register_dev(struct chtls_dev *cdev) tlsdev->hash = chtls_create_hash; tlsdev->unhash = chtls_destroy_hash; tls_register_device(&cdev->tlsdev); + cdev->cdev_state = CHTLS_CDEV_STATE_UP; } static void chtls_unregister_dev(struct chtls_dev *cdev) @@ -281,8 +282,10 @@ static void chtls_free_all_uld(void) struct chtls_dev *cdev, *tmp; mutex_lock(&cdev_mutex); - list_for_each_entry_safe(cdev, tmp, &cdev_list, list) - chtls_free_uld(cdev); + list_for_each_entry_safe(cdev, tmp, &cdev_list, list) { + if (cdev->cdev_state == CHTLS_CDEV_STATE_UP) + chtls_free_uld(cdev); + } mutex_unlock(&cdev_mutex); } diff --git a/drivers/crypto/vmx/aes_cbc.c b/drivers/crypto/vmx/aes_cbc.c index 5285ece4f33a..b71895871be3 100644 --- a/drivers/crypto/vmx/aes_cbc.c +++ b/drivers/crypto/vmx/aes_cbc.c @@ -107,24 +107,23 @@ static int p8_aes_cbc_encrypt(struct blkcipher_desc *desc, ret = crypto_skcipher_encrypt(req); skcipher_request_zero(req); } else { - preempt_disable(); - pagefault_disable(); - enable_kernel_vsx(); - blkcipher_walk_init(&walk, dst, src, nbytes); ret = blkcipher_walk_virt(desc, &walk); while ((nbytes = walk.nbytes)) { + preempt_disable(); + pagefault_disable(); + enable_kernel_vsx(); aes_p8_cbc_encrypt(walk.src.virt.addr, walk.dst.virt.addr, nbytes & AES_BLOCK_MASK, &ctx->enc_key, walk.iv, 1); + disable_kernel_vsx(); + pagefault_enable(); + preempt_enable(); + nbytes &= AES_BLOCK_SIZE - 1; ret = blkcipher_walk_done(desc, &walk, nbytes); } - - disable_kernel_vsx(); - pagefault_enable(); - preempt_enable(); } return ret; @@ -147,24 +146,23 @@ static int p8_aes_cbc_decrypt(struct blkcipher_desc *desc, ret = crypto_skcipher_decrypt(req); skcipher_request_zero(req); } else { - preempt_disable(); - pagefault_disable(); - enable_kernel_vsx(); - blkcipher_walk_init(&walk, dst, src, nbytes); ret = blkcipher_walk_virt(desc, &walk); while ((nbytes = walk.nbytes)) { + preempt_disable(); + pagefault_disable(); + enable_kernel_vsx(); aes_p8_cbc_encrypt(walk.src.virt.addr, walk.dst.virt.addr, nbytes & AES_BLOCK_MASK, &ctx->dec_key, walk.iv, 0); + disable_kernel_vsx(); + pagefault_enable(); + preempt_enable(); + nbytes &= AES_BLOCK_SIZE - 1; ret = blkcipher_walk_done(desc, &walk, nbytes); } - - disable_kernel_vsx(); - pagefault_enable(); - preempt_enable(); } return ret; diff --git a/drivers/crypto/vmx/aes_xts.c b/drivers/crypto/vmx/aes_xts.c index 8bd9aff0f55f..e9954a7d4694 100644 --- a/drivers/crypto/vmx/aes_xts.c +++ b/drivers/crypto/vmx/aes_xts.c @@ -116,32 +116,39 @@ static int p8_aes_xts_crypt(struct blkcipher_desc *desc, ret = enc? crypto_skcipher_encrypt(req) : crypto_skcipher_decrypt(req); skcipher_request_zero(req); } else { + blkcipher_walk_init(&walk, dst, src, nbytes); + + ret = blkcipher_walk_virt(desc, &walk); + preempt_disable(); pagefault_disable(); enable_kernel_vsx(); - blkcipher_walk_init(&walk, dst, src, nbytes); - - ret = blkcipher_walk_virt(desc, &walk); iv = walk.iv; memset(tweak, 0, AES_BLOCK_SIZE); aes_p8_encrypt(iv, tweak, &ctx->tweak_key); + disable_kernel_vsx(); + pagefault_enable(); + preempt_enable(); + while ((nbytes = walk.nbytes)) { + preempt_disable(); + pagefault_disable(); + enable_kernel_vsx(); if (enc) aes_p8_xts_encrypt(walk.src.virt.addr, walk.dst.virt.addr, nbytes & AES_BLOCK_MASK, &ctx->enc_key, NULL, tweak); else aes_p8_xts_decrypt(walk.src.virt.addr, walk.dst.virt.addr, nbytes & AES_BLOCK_MASK, &ctx->dec_key, NULL, tweak); + disable_kernel_vsx(); + pagefault_enable(); + preempt_enable(); nbytes &= AES_BLOCK_SIZE - 1; ret = blkcipher_walk_done(desc, &walk, nbytes); } - - disable_kernel_vsx(); - pagefault_enable(); - preempt_enable(); } return ret; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c index 502b94fb116a..b6e9df11115d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c @@ -1012,13 +1012,9 @@ static int amdgpu_cs_ib_fill(struct amdgpu_device *adev, if (r) return r; - if (chunk_ib->flags & AMDGPU_IB_FLAG_PREAMBLE) { - parser->job->preamble_status |= AMDGPU_PREAMBLE_IB_PRESENT; - if (!parser->ctx->preamble_presented) { - parser->job->preamble_status |= AMDGPU_PREAMBLE_IB_PRESENT_FIRST; - parser->ctx->preamble_presented = true; - } - } + if (chunk_ib->flags & AMDGPU_IB_FLAG_PREAMBLE) + parser->job->preamble_status |= + AMDGPU_PREAMBLE_IB_PRESENT; if (parser->ring && parser->ring != ring) return -EINVAL; @@ -1207,26 +1203,24 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p, int r; + job = p->job; + p->job = NULL; + + r = drm_sched_job_init(&job->base, entity, p->filp); + if (r) + goto error_unlock; + + /* No memory allocation is allowed while holding the mn lock */ amdgpu_mn_lock(p->mn); amdgpu_bo_list_for_each_userptr_entry(e, p->bo_list) { struct amdgpu_bo *bo = e->robj; if (amdgpu_ttm_tt_userptr_needs_pages(bo->tbo.ttm)) { - amdgpu_mn_unlock(p->mn); - return -ERESTARTSYS; + r = -ERESTARTSYS; + goto error_abort; } } - job = p->job; - p->job = NULL; - - r = drm_sched_job_init(&job->base, entity, p->filp); - if (r) { - amdgpu_job_free(job); - amdgpu_mn_unlock(p->mn); - return r; - } - job->owner = p->filp; p->fence = dma_fence_get(&job->base.s_fence->finished); @@ -1241,6 +1235,12 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p, amdgpu_cs_post_dependencies(p); + if ((job->preamble_status & AMDGPU_PREAMBLE_IB_PRESENT) && + !p->ctx->preamble_presented) { + job->preamble_status |= AMDGPU_PREAMBLE_IB_PRESENT_FIRST; + p->ctx->preamble_presented = true; + } + cs->out.handle = seq; job->uf_sequence = seq; @@ -1258,6 +1258,15 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p, amdgpu_mn_unlock(p->mn); return 0; + +error_abort: + dma_fence_put(&job->base.s_fence->finished); + job->base.s_fence = NULL; + +error_unlock: + amdgpu_job_free(job); + amdgpu_mn_unlock(p->mn); + return r; } int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c index 5518e623fed2..51b5e977ca88 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c @@ -164,8 +164,10 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs, return r; } + need_ctx_switch = ring->current_ctx != fence_ctx; if (ring->funcs->emit_pipeline_sync && job && ((tmp = amdgpu_sync_get_fence(&job->sched_sync, NULL)) || + (amdgpu_sriov_vf(adev) && need_ctx_switch) || amdgpu_vm_need_pipeline_sync(ring, job))) { need_pipe_sync = true; dma_fence_put(tmp); @@ -196,7 +198,6 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs, } skip_preamble = ring->current_ctx == fence_ctx; - need_ctx_switch = ring->current_ctx != fence_ctx; if (job && ring->funcs->emit_cntxcntl) { if (need_ctx_switch) status |= AMDGPU_HAVE_CTX_SWITCH; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c index 8f98629fbe59..7b4e657a95c7 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c @@ -1932,14 +1932,6 @@ void amdgpu_pm_compute_clocks(struct amdgpu_device *adev) amdgpu_fence_wait_empty(ring); } - mutex_lock(&adev->pm.mutex); - /* update battery/ac status */ - if (power_supply_is_system_supplied() > 0) - adev->pm.ac_power = true; - else - adev->pm.ac_power = false; - mutex_unlock(&adev->pm.mutex); - if (adev->powerplay.pp_funcs->dispatch_tasks) { if (!amdgpu_device_has_dc_support(adev)) { mutex_lock(&adev->pm.mutex); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index ece0ac703e27..b17771dd5ce7 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -172,6 +172,7 @@ static void amdgpu_vm_bo_base_init(struct amdgpu_vm_bo_base *base, * is validated on next vm use to avoid fault. * */ list_move_tail(&base->vm_status, &vm->evicted); + base->moved = true; } /** @@ -369,7 +370,6 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev, uint64_t addr; int r; - addr = amdgpu_bo_gpu_offset(bo); entries = amdgpu_bo_size(bo) / 8; if (pte_support_ats) { @@ -401,6 +401,7 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev, if (r) goto error; + addr = amdgpu_bo_gpu_offset(bo); if (ats_entries) { uint64_t ats_value; @@ -2483,28 +2484,52 @@ static uint32_t amdgpu_vm_get_block_size(uint64_t vm_size) * amdgpu_vm_adjust_size - adjust vm size, block size and fragment size * * @adev: amdgpu_device pointer - * @vm_size: the default vm size if it's set auto + * @min_vm_size: the minimum vm size in GB if it's set auto * @fragment_size_default: Default PTE fragment size * @max_level: max VMPT level * @max_bits: max address space size in bits * */ -void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint32_t vm_size, +void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint32_t min_vm_size, uint32_t fragment_size_default, unsigned max_level, unsigned max_bits) { + unsigned int max_size = 1 << (max_bits - 30); + unsigned int vm_size; uint64_t tmp; /* adjust vm size first */ if (amdgpu_vm_size != -1) { - unsigned max_size = 1 << (max_bits - 30); - vm_size = amdgpu_vm_size; if (vm_size > max_size) { dev_warn(adev->dev, "VM size (%d) too large, max is %u GB\n", amdgpu_vm_size, max_size); vm_size = max_size; } + } else { + struct sysinfo si; + unsigned int phys_ram_gb; + + /* Optimal VM size depends on the amount of physical + * RAM available. Underlying requirements and + * assumptions: + * + * - Need to map system memory and VRAM from all GPUs + * - VRAM from other GPUs not known here + * - Assume VRAM <= system memory + * - On GFX8 and older, VM space can be segmented for + * different MTYPEs + * - Need to allow room for fragmentation, guard pages etc. + * + * This adds up to a rough guess of system memory x3. + * Round up to power of two to maximize the available + * VM size with the given page table size. + */ + si_meminfo(&si); + phys_ram_gb = ((uint64_t)si.totalram * si.mem_unit + + (1 << 30) - 1) >> 30; + vm_size = roundup_pow_of_two( + min(max(phys_ram_gb * 3, min_vm_size), max_size)); } adev->vm_manager.max_pfn = (uint64_t)vm_size << 18; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h index 67a15d439ac0..9fa9df0c5e7f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h @@ -321,7 +321,7 @@ struct amdgpu_bo_va_mapping *amdgpu_vm_bo_lookup_mapping(struct amdgpu_vm *vm, void amdgpu_vm_bo_trace_cs(struct amdgpu_vm *vm, struct ww_acquire_ctx *ticket); void amdgpu_vm_bo_rmv(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va); -void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint32_t vm_size, +void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint32_t min_vm_size, uint32_t fragment_size_default, unsigned max_level, unsigned max_bits); int amdgpu_vm_ioctl(struct drm_device *dev, void *data, struct drm_file *filp); diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c index 5cd45210113f..5a9534a82d40 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c @@ -5664,6 +5664,11 @@ static int gfx_v8_0_set_powergating_state(void *handle, if (amdgpu_sriov_vf(adev)) return 0; + if (adev->pg_flags & (AMD_PG_SUPPORT_GFX_SMG | + AMD_PG_SUPPORT_RLC_SMU_HS | + AMD_PG_SUPPORT_CP | + AMD_PG_SUPPORT_GFX_DMG)) + adev->gfx.rlc.funcs->enter_safe_mode(adev); switch (adev->asic_type) { case CHIP_CARRIZO: case CHIP_STONEY: @@ -5713,7 +5718,11 @@ static int gfx_v8_0_set_powergating_state(void *handle, default: break; } - + if (adev->pg_flags & (AMD_PG_SUPPORT_GFX_SMG | + AMD_PG_SUPPORT_RLC_SMU_HS | + AMD_PG_SUPPORT_CP | + AMD_PG_SUPPORT_GFX_DMG)) + adev->gfx.rlc.funcs->exit_safe_mode(adev); return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c index 75317f283c69..ad151fefa41f 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c @@ -632,12 +632,6 @@ static void gmc_v6_0_gart_disable(struct amdgpu_device *adev) amdgpu_gart_table_vram_unpin(adev); } -static void gmc_v6_0_gart_fini(struct amdgpu_device *adev) -{ - amdgpu_gart_table_vram_free(adev); - amdgpu_gart_fini(adev); -} - static void gmc_v6_0_vm_decode_fault(struct amdgpu_device *adev, u32 status, u32 addr, u32 mc_client) { @@ -935,8 +929,9 @@ static int gmc_v6_0_sw_fini(void *handle) amdgpu_gem_force_release(adev); amdgpu_vm_manager_fini(adev); - gmc_v6_0_gart_fini(adev); + amdgpu_gart_table_vram_free(adev); amdgpu_bo_fini(adev); + amdgpu_gart_fini(adev); release_firmware(adev->gmc.fw); adev->gmc.fw = NULL; diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c index 36dc367c4b45..f8d8a3a73e42 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c @@ -747,19 +747,6 @@ static void gmc_v7_0_gart_disable(struct amdgpu_device *adev) } /** - * gmc_v7_0_gart_fini - vm fini callback - * - * @adev: amdgpu_device pointer - * - * Tears down the driver GART/VM setup (CIK). - */ -static void gmc_v7_0_gart_fini(struct amdgpu_device *adev) -{ - amdgpu_gart_table_vram_free(adev); - amdgpu_gart_fini(adev); -} - -/** * gmc_v7_0_vm_decode_fault - print human readable fault info * * @adev: amdgpu_device pointer @@ -1095,8 +1082,9 @@ static int gmc_v7_0_sw_fini(void *handle) amdgpu_gem_force_release(adev); amdgpu_vm_manager_fini(adev); kfree(adev->gmc.vm_fault_info); - gmc_v7_0_gart_fini(adev); + amdgpu_gart_table_vram_free(adev); amdgpu_bo_fini(adev); + amdgpu_gart_fini(adev); release_firmware(adev->gmc.fw); adev->gmc.fw = NULL; diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c index 70fc97b59b4f..9333109b210d 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c @@ -969,19 +969,6 @@ static void gmc_v8_0_gart_disable(struct amdgpu_device *adev) } /** - * gmc_v8_0_gart_fini - vm fini callback - * - * @adev: amdgpu_device pointer - * - * Tears down the driver GART/VM setup (CIK). - */ -static void gmc_v8_0_gart_fini(struct amdgpu_device *adev) -{ - amdgpu_gart_table_vram_free(adev); - amdgpu_gart_fini(adev); -} - -/** * gmc_v8_0_vm_decode_fault - print human readable fault info * * @adev: amdgpu_device pointer @@ -1199,8 +1186,9 @@ static int gmc_v8_0_sw_fini(void *handle) amdgpu_gem_force_release(adev); amdgpu_vm_manager_fini(adev); kfree(adev->gmc.vm_fault_info); - gmc_v8_0_gart_fini(adev); + amdgpu_gart_table_vram_free(adev); amdgpu_bo_fini(adev); + amdgpu_gart_fini(adev); release_firmware(adev->gmc.fw); adev->gmc.fw = NULL; diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c index 399a5db27649..72f8018fa2a8 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c @@ -942,26 +942,12 @@ static int gmc_v9_0_sw_init(void *handle) return 0; } -/** - * gmc_v9_0_gart_fini - vm fini callback - * - * @adev: amdgpu_device pointer - * - * Tears down the driver GART/VM setup (CIK). - */ -static void gmc_v9_0_gart_fini(struct amdgpu_device *adev) -{ - amdgpu_gart_table_vram_free(adev); - amdgpu_gart_fini(adev); -} - static int gmc_v9_0_sw_fini(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; amdgpu_gem_force_release(adev); amdgpu_vm_manager_fini(adev); - gmc_v9_0_gart_fini(adev); /* * TODO: @@ -974,7 +960,9 @@ static int gmc_v9_0_sw_fini(void *handle) */ amdgpu_bo_free_kernel(&adev->stolen_vga_memory, NULL, NULL); + amdgpu_gart_table_vram_free(adev); amdgpu_bo_fini(adev); + amdgpu_gart_fini(adev); return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/kv_dpm.c b/drivers/gpu/drm/amd/amdgpu/kv_dpm.c index 3f57f6463dc8..cb79a93c2eb7 100644 --- a/drivers/gpu/drm/amd/amdgpu/kv_dpm.c +++ b/drivers/gpu/drm/amd/amdgpu/kv_dpm.c @@ -65,8 +65,6 @@ static int kv_set_thermal_temperature_range(struct amdgpu_device *adev, int min_temp, int max_temp); static int kv_init_fps_limits(struct amdgpu_device *adev); -static void kv_dpm_powergate_uvd(void *handle, bool gate); -static void kv_dpm_powergate_vce(struct amdgpu_device *adev, bool gate); static void kv_dpm_powergate_samu(struct amdgpu_device *adev, bool gate); static void kv_dpm_powergate_acp(struct amdgpu_device *adev, bool gate); @@ -1354,8 +1352,6 @@ static int kv_dpm_enable(struct amdgpu_device *adev) return ret; } - kv_update_current_ps(adev, adev->pm.dpm.boot_ps); - if (adev->irq.installed && amdgpu_is_internal_thermal_sensor(adev->pm.int_thermal_type)) { ret = kv_set_thermal_temperature_range(adev, KV_TEMP_RANGE_MIN, KV_TEMP_RANGE_MAX); @@ -1374,6 +1370,8 @@ static int kv_dpm_enable(struct amdgpu_device *adev) static void kv_dpm_disable(struct amdgpu_device *adev) { + struct kv_power_info *pi = kv_get_pi(adev); + amdgpu_irq_put(adev, &adev->pm.dpm.thermal.irq, AMDGPU_THERMAL_IRQ_LOW_TO_HIGH); amdgpu_irq_put(adev, &adev->pm.dpm.thermal.irq, @@ -1387,8 +1385,10 @@ static void kv_dpm_disable(struct amdgpu_device *adev) /* powerup blocks */ kv_dpm_powergate_acp(adev, false); kv_dpm_powergate_samu(adev, false); - kv_dpm_powergate_vce(adev, false); - kv_dpm_powergate_uvd(adev, false); + if (pi->caps_vce_pg) /* power on the VCE block */ + amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_VCEPowerON); + if (pi->caps_uvd_pg) /* power on the UVD block */ + amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_UVDPowerON); kv_enable_smc_cac(adev, false); kv_enable_didt(adev, false); @@ -1551,7 +1551,6 @@ static int kv_update_vce_dpm(struct amdgpu_device *adev, int ret; if (amdgpu_new_state->evclk > 0 && amdgpu_current_state->evclk == 0) { - kv_dpm_powergate_vce(adev, false); if (pi->caps_stable_p_state) pi->vce_boot_level = table->count - 1; else @@ -1573,7 +1572,6 @@ static int kv_update_vce_dpm(struct amdgpu_device *adev, kv_enable_vce_dpm(adev, true); } else if (amdgpu_new_state->evclk == 0 && amdgpu_current_state->evclk > 0) { kv_enable_vce_dpm(adev, false); - kv_dpm_powergate_vce(adev, true); } return 0; @@ -1702,24 +1700,32 @@ static void kv_dpm_powergate_uvd(void *handle, bool gate) } } -static void kv_dpm_powergate_vce(struct amdgpu_device *adev, bool gate) +static void kv_dpm_powergate_vce(void *handle, bool gate) { + struct amdgpu_device *adev = (struct amdgpu_device *)handle; struct kv_power_info *pi = kv_get_pi(adev); - - if (pi->vce_power_gated == gate) - return; + int ret; pi->vce_power_gated = gate; - if (!pi->caps_vce_pg) - return; - - if (gate) - amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_VCEPowerOFF); - else - amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_VCEPowerON); + if (gate) { + /* stop the VCE block */ + ret = amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCE, + AMD_PG_STATE_GATE); + kv_enable_vce_dpm(adev, false); + if (pi->caps_vce_pg) /* power off the VCE block */ + amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_VCEPowerOFF); + } else { + if (pi->caps_vce_pg) /* power on the VCE block */ + amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_VCEPowerON); + kv_enable_vce_dpm(adev, true); + /* re-init the VCE block */ + ret = amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCE, + AMD_PG_STATE_UNGATE); + } } + static void kv_dpm_powergate_samu(struct amdgpu_device *adev, bool gate) { struct kv_power_info *pi = kv_get_pi(adev); @@ -3061,7 +3067,7 @@ static int kv_dpm_hw_init(void *handle) else adev->pm.dpm_enabled = true; mutex_unlock(&adev->pm.mutex); - + amdgpu_pm_compute_clocks(adev); return ret; } @@ -3313,6 +3319,9 @@ static int kv_set_powergating_by_smu(void *handle, case AMD_IP_BLOCK_TYPE_UVD: kv_dpm_powergate_uvd(handle, gate); break; + case AMD_IP_BLOCK_TYPE_VCE: + kv_dpm_powergate_vce(handle, gate); + break; default: break; } diff --git a/drivers/gpu/drm/amd/amdgpu/si_dpm.c b/drivers/gpu/drm/amd/amdgpu/si_dpm.c index db327b412562..1de96995e690 100644 --- a/drivers/gpu/drm/amd/amdgpu/si_dpm.c +++ b/drivers/gpu/drm/amd/amdgpu/si_dpm.c @@ -6887,7 +6887,6 @@ static int si_dpm_enable(struct amdgpu_device *adev) si_enable_auto_throttle_source(adev, AMDGPU_DPM_AUTO_THROTTLE_SRC_THERMAL, true); si_thermal_start_thermal_controller(adev); - ni_update_current_ps(adev, boot_ps); return 0; } @@ -7763,7 +7762,7 @@ static int si_dpm_hw_init(void *handle) else adev->pm.dpm_enabled = true; mutex_unlock(&adev->pm.mutex); - + amdgpu_pm_compute_clocks(adev); return ret; } diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c index fbe878ae1e8c..4ba0003a9d32 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c @@ -480,12 +480,20 @@ void pp_rv_set_display_requirement(struct pp_smu *pp, { struct dc_context *ctx = pp->ctx; struct amdgpu_device *adev = ctx->driver_context; + void *pp_handle = adev->powerplay.pp_handle; const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; + struct pp_display_clock_request clock = {0}; - if (!pp_funcs || !pp_funcs->display_configuration_changed) + if (!pp_funcs || !pp_funcs->display_clock_voltage_request) return; - amdgpu_dpm_display_configuration_changed(adev); + clock.clock_type = amd_pp_dcf_clock; + clock.clock_freq_in_khz = req->hard_min_dcefclk_khz; + pp_funcs->display_clock_voltage_request(pp_handle, &clock); + + clock.clock_type = amd_pp_f_clock; + clock.clock_freq_in_khz = req->hard_min_fclk_khz; + pp_funcs->display_clock_voltage_request(pp_handle, &clock); } void pp_rv_set_wm_ranges(struct pp_smu *pp, diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c index 567867915d32..37eaf72ace54 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c @@ -754,8 +754,12 @@ bool dc_link_detect(struct dc_link *link, enum dc_detect_reason reason) * fail-safe mode */ if (dc_is_hdmi_signal(link->connector_signal) || - dc_is_dvi_signal(link->connector_signal)) + dc_is_dvi_signal(link->connector_signal)) { + if (prev_sink != NULL) + dc_sink_release(prev_sink); + return false; + } default: break; } diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c index 11d834f94220..98358b4b36de 100644 --- a/drivers/gpu/drm/i915/i915_vma.c +++ b/drivers/gpu/drm/i915/i915_vma.c @@ -199,7 +199,6 @@ vma_create(struct drm_i915_gem_object *obj, vma->flags |= I915_VMA_GGTT; list_add(&vma->obj_link, &obj->vma_list); } else { - i915_ppgtt_get(i915_vm_to_ppgtt(vm)); list_add_tail(&vma->obj_link, &obj->vma_list); } @@ -807,9 +806,6 @@ static void __i915_vma_destroy(struct i915_vma *vma) if (vma->obj) rb_erase(&vma->obj_node, &vma->obj->vma_tree); - if (!i915_vma_is_ggtt(vma)) - i915_ppgtt_put(i915_vm_to_ppgtt(vma->vm)); - rbtree_postorder_for_each_entry_safe(iter, n, &vma->active, node) { GEM_BUG_ON(i915_gem_active_isset(&iter->base)); kfree(iter); diff --git a/drivers/gpu/drm/i915/intel_audio.c b/drivers/gpu/drm/i915/intel_audio.c index b725835b47ef..769f3f586661 100644 --- a/drivers/gpu/drm/i915/intel_audio.c +++ b/drivers/gpu/drm/i915/intel_audio.c @@ -962,9 +962,6 @@ void i915_audio_component_init(struct drm_i915_private *dev_priv) { int ret; - if (INTEL_INFO(dev_priv)->num_pipes == 0) - return; - ret = component_add(dev_priv->drm.dev, &i915_audio_component_bind_ops); if (ret < 0) { DRM_ERROR("failed to add audio component (%d)\n", ret); diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index ed3fa1c8a983..4a3c8ee9a973 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -2988,6 +2988,7 @@ static int skl_check_main_surface(const struct intel_crtc_state *crtc_state, int w = drm_rect_width(&plane_state->base.src) >> 16; int h = drm_rect_height(&plane_state->base.src) >> 16; int dst_x = plane_state->base.dst.x1; + int dst_w = drm_rect_width(&plane_state->base.dst); int pipe_src_w = crtc_state->pipe_src_w; int max_width = skl_max_plane_width(fb, 0, rotation); int max_height = 4096; @@ -3009,10 +3010,10 @@ static int skl_check_main_surface(const struct intel_crtc_state *crtc_state, * screen may cause FIFO underflow and display corruption. */ if ((IS_GEMINILAKE(dev_priv) || IS_CANNONLAKE(dev_priv)) && - (dst_x + w < 4 || dst_x > pipe_src_w - 4)) { + (dst_x + dst_w < 4 || dst_x > pipe_src_w - 4)) { DRM_DEBUG_KMS("requested plane X %s position %d invalid (valid range %d-%d)\n", - dst_x + w < 4 ? "end" : "start", - dst_x + w < 4 ? dst_x + w : dst_x, + dst_x + dst_w < 4 ? "end" : "start", + dst_x + dst_w < 4 ? dst_x + dst_w : dst_x, 4, pipe_src_w - 4); return -ERANGE; } diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c index a9076402dcb0..192972a7d287 100644 --- a/drivers/gpu/drm/i915/intel_hdmi.c +++ b/drivers/gpu/drm/i915/intel_hdmi.c @@ -943,8 +943,12 @@ static int intel_hdmi_hdcp_write(struct intel_digital_port *intel_dig_port, ret = i2c_transfer(adapter, &msg, 1); if (ret == 1) - return 0; - return ret >= 0 ? -EIO : ret; + ret = 0; + else if (ret >= 0) + ret = -EIO; + + kfree(write_buf); + return ret; } static diff --git a/drivers/gpu/drm/i915/intel_lspcon.c b/drivers/gpu/drm/i915/intel_lspcon.c index 5dae16ccd9f1..3e085c5f2b81 100644 --- a/drivers/gpu/drm/i915/intel_lspcon.c +++ b/drivers/gpu/drm/i915/intel_lspcon.c @@ -74,7 +74,7 @@ static enum drm_lspcon_mode lspcon_wait_mode(struct intel_lspcon *lspcon, DRM_DEBUG_KMS("Waiting for LSPCON mode %s to settle\n", lspcon_mode_name(mode)); - wait_for((current_mode = lspcon_get_current_mode(lspcon)) == mode, 100); + wait_for((current_mode = lspcon_get_current_mode(lspcon)) == mode, 400); if (current_mode != mode) DRM_ERROR("LSPCON mode hasn't settled\n"); diff --git a/drivers/gpu/drm/mediatek/mtk_disp_ovl.c b/drivers/gpu/drm/mediatek/mtk_disp_ovl.c index 978782a77629..28d191192945 100644 --- a/drivers/gpu/drm/mediatek/mtk_disp_ovl.c +++ b/drivers/gpu/drm/mediatek/mtk_disp_ovl.c @@ -132,6 +132,11 @@ static void mtk_ovl_config(struct mtk_ddp_comp *comp, unsigned int w, writel(0x0, comp->regs + DISP_REG_OVL_RST); } +static unsigned int mtk_ovl_layer_nr(struct mtk_ddp_comp *comp) +{ + return 4; +} + static void mtk_ovl_layer_on(struct mtk_ddp_comp *comp, unsigned int idx) { unsigned int reg; @@ -157,6 +162,11 @@ static void mtk_ovl_layer_off(struct mtk_ddp_comp *comp, unsigned int idx) static unsigned int ovl_fmt_convert(struct mtk_disp_ovl *ovl, unsigned int fmt) { + /* The return value in switch "MEM_MODE_INPUT_FORMAT_XXX" + * is defined in mediatek HW data sheet. + * The alphabet order in XXX is no relation to data + * arrangement in memory. + */ switch (fmt) { default: case DRM_FORMAT_RGB565: @@ -221,6 +231,7 @@ static const struct mtk_ddp_comp_funcs mtk_disp_ovl_funcs = { .stop = mtk_ovl_stop, .enable_vblank = mtk_ovl_enable_vblank, .disable_vblank = mtk_ovl_disable_vblank, + .layer_nr = mtk_ovl_layer_nr, .layer_on = mtk_ovl_layer_on, .layer_off = mtk_ovl_layer_off, .layer_config = mtk_ovl_layer_config, diff --git a/drivers/gpu/drm/mediatek/mtk_disp_rdma.c b/drivers/gpu/drm/mediatek/mtk_disp_rdma.c index 585943c81e1f..b0a5cffe345a 100644 --- a/drivers/gpu/drm/mediatek/mtk_disp_rdma.c +++ b/drivers/gpu/drm/mediatek/mtk_disp_rdma.c @@ -31,14 +31,31 @@ #define RDMA_REG_UPDATE_INT BIT(0) #define DISP_REG_RDMA_GLOBAL_CON 0x0010 #define RDMA_ENGINE_EN BIT(0) +#define RDMA_MODE_MEMORY BIT(1) #define DISP_REG_RDMA_SIZE_CON_0 0x0014 +#define RDMA_MATRIX_ENABLE BIT(17) +#define RDMA_MATRIX_INT_MTX_SEL GENMASK(23, 20) +#define RDMA_MATRIX_INT_MTX_BT601_to_RGB (6 << 20) #define DISP_REG_RDMA_SIZE_CON_1 0x0018 #define DISP_REG_RDMA_TARGET_LINE 0x001c +#define DISP_RDMA_MEM_CON 0x0024 +#define MEM_MODE_INPUT_FORMAT_RGB565 (0x000 << 4) +#define MEM_MODE_INPUT_FORMAT_RGB888 (0x001 << 4) +#define MEM_MODE_INPUT_FORMAT_RGBA8888 (0x002 << 4) +#define MEM_MODE_INPUT_FORMAT_ARGB8888 (0x003 << 4) +#define MEM_MODE_INPUT_FORMAT_UYVY (0x004 << 4) +#define MEM_MODE_INPUT_FORMAT_YUYV (0x005 << 4) +#define MEM_MODE_INPUT_SWAP BIT(8) +#define DISP_RDMA_MEM_SRC_PITCH 0x002c +#define DISP_RDMA_MEM_GMC_SETTING_0 0x0030 #define DISP_REG_RDMA_FIFO_CON 0x0040 #define RDMA_FIFO_UNDERFLOW_EN BIT(31) #define RDMA_FIFO_PSEUDO_SIZE(bytes) (((bytes) / 16) << 16) #define RDMA_OUTPUT_VALID_FIFO_THRESHOLD(bytes) ((bytes) / 16) #define RDMA_FIFO_SIZE(rdma) ((rdma)->data->fifo_size) +#define DISP_RDMA_MEM_START_ADDR 0x0f00 + +#define RDMA_MEM_GMC 0x40402020 struct mtk_disp_rdma_data { unsigned int fifo_size; @@ -138,12 +155,87 @@ static void mtk_rdma_config(struct mtk_ddp_comp *comp, unsigned int width, writel(reg, comp->regs + DISP_REG_RDMA_FIFO_CON); } +static unsigned int rdma_fmt_convert(struct mtk_disp_rdma *rdma, + unsigned int fmt) +{ + /* The return value in switch "MEM_MODE_INPUT_FORMAT_XXX" + * is defined in mediatek HW data sheet. + * The alphabet order in XXX is no relation to data + * arrangement in memory. + */ + switch (fmt) { + default: + case DRM_FORMAT_RGB565: + return MEM_MODE_INPUT_FORMAT_RGB565; + case DRM_FORMAT_BGR565: + return MEM_MODE_INPUT_FORMAT_RGB565 | MEM_MODE_INPUT_SWAP; + case DRM_FORMAT_RGB888: + return MEM_MODE_INPUT_FORMAT_RGB888; + case DRM_FORMAT_BGR888: + return MEM_MODE_INPUT_FORMAT_RGB888 | MEM_MODE_INPUT_SWAP; + case DRM_FORMAT_RGBX8888: + case DRM_FORMAT_RGBA8888: + return MEM_MODE_INPUT_FORMAT_ARGB8888; + case DRM_FORMAT_BGRX8888: + case DRM_FORMAT_BGRA8888: + return MEM_MODE_INPUT_FORMAT_ARGB8888 | MEM_MODE_INPUT_SWAP; + case DRM_FORMAT_XRGB8888: + case DRM_FORMAT_ARGB8888: + return MEM_MODE_INPUT_FORMAT_RGBA8888; + case DRM_FORMAT_XBGR8888: + case DRM_FORMAT_ABGR8888: + return MEM_MODE_INPUT_FORMAT_RGBA8888 | MEM_MODE_INPUT_SWAP; + case DRM_FORMAT_UYVY: + return MEM_MODE_INPUT_FORMAT_UYVY; + case DRM_FORMAT_YUYV: + return MEM_MODE_INPUT_FORMAT_YUYV; + } +} + +static unsigned int mtk_rdma_layer_nr(struct mtk_ddp_comp *comp) +{ + return 1; +} + +static void mtk_rdma_layer_config(struct mtk_ddp_comp *comp, unsigned int idx, + struct mtk_plane_state *state) +{ + struct mtk_disp_rdma *rdma = comp_to_rdma(comp); + struct mtk_plane_pending_state *pending = &state->pending; + unsigned int addr = pending->addr; + unsigned int pitch = pending->pitch & 0xffff; + unsigned int fmt = pending->format; + unsigned int con; + + con = rdma_fmt_convert(rdma, fmt); + writel_relaxed(con, comp->regs + DISP_RDMA_MEM_CON); + + if (fmt == DRM_FORMAT_UYVY || fmt == DRM_FORMAT_YUYV) { + rdma_update_bits(comp, DISP_REG_RDMA_SIZE_CON_0, + RDMA_MATRIX_ENABLE, RDMA_MATRIX_ENABLE); + rdma_update_bits(comp, DISP_REG_RDMA_SIZE_CON_0, + RDMA_MATRIX_INT_MTX_SEL, + RDMA_MATRIX_INT_MTX_BT601_to_RGB); + } else { + rdma_update_bits(comp, DISP_REG_RDMA_SIZE_CON_0, + RDMA_MATRIX_ENABLE, 0); + } + + writel_relaxed(addr, comp->regs + DISP_RDMA_MEM_START_ADDR); + writel_relaxed(pitch, comp->regs + DISP_RDMA_MEM_SRC_PITCH); + writel(RDMA_MEM_GMC, comp->regs + DISP_RDMA_MEM_GMC_SETTING_0); + rdma_update_bits(comp, DISP_REG_RDMA_GLOBAL_CON, + RDMA_MODE_MEMORY, RDMA_MODE_MEMORY); +} + static const struct mtk_ddp_comp_funcs mtk_disp_rdma_funcs = { .config = mtk_rdma_config, .start = mtk_rdma_start, .stop = mtk_rdma_stop, .enable_vblank = mtk_rdma_enable_vblank, .disable_vblank = mtk_rdma_disable_vblank, + .layer_nr = mtk_rdma_layer_nr, + .layer_config = mtk_rdma_layer_config, }; static int mtk_disp_rdma_bind(struct device *dev, struct device *master, diff --git a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c index 2d6aa150a9ff..0b976dfd04df 100644 --- a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c +++ b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c @@ -45,7 +45,8 @@ struct mtk_drm_crtc { bool pending_needs_vblank; struct drm_pending_vblank_event *event; - struct drm_plane planes[OVL_LAYER_NR]; + struct drm_plane *planes; + unsigned int layer_nr; bool pending_planes; void __iomem *config_regs; @@ -171,9 +172,9 @@ static void mtk_drm_crtc_mode_set_nofb(struct drm_crtc *crtc) static int mtk_drm_crtc_enable_vblank(struct drm_crtc *crtc) { struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc); - struct mtk_ddp_comp *ovl = mtk_crtc->ddp_comp[0]; + struct mtk_ddp_comp *comp = mtk_crtc->ddp_comp[0]; - mtk_ddp_comp_enable_vblank(ovl, &mtk_crtc->base); + mtk_ddp_comp_enable_vblank(comp, &mtk_crtc->base); return 0; } @@ -181,9 +182,9 @@ static int mtk_drm_crtc_enable_vblank(struct drm_crtc *crtc) static void mtk_drm_crtc_disable_vblank(struct drm_crtc *crtc) { struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc); - struct mtk_ddp_comp *ovl = mtk_crtc->ddp_comp[0]; + struct mtk_ddp_comp *comp = mtk_crtc->ddp_comp[0]; - mtk_ddp_comp_disable_vblank(ovl); + mtk_ddp_comp_disable_vblank(comp); } static int mtk_crtc_ddp_clk_enable(struct mtk_drm_crtc *mtk_crtc) @@ -286,7 +287,7 @@ static int mtk_crtc_ddp_hw_init(struct mtk_drm_crtc *mtk_crtc) } /* Initially configure all planes */ - for (i = 0; i < OVL_LAYER_NR; i++) { + for (i = 0; i < mtk_crtc->layer_nr; i++) { struct drm_plane *plane = &mtk_crtc->planes[i]; struct mtk_plane_state *plane_state; @@ -334,7 +335,7 @@ static void mtk_crtc_ddp_config(struct drm_crtc *crtc) { struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc); struct mtk_crtc_state *state = to_mtk_crtc_state(mtk_crtc->base.state); - struct mtk_ddp_comp *ovl = mtk_crtc->ddp_comp[0]; + struct mtk_ddp_comp *comp = mtk_crtc->ddp_comp[0]; unsigned int i; /* @@ -343,7 +344,7 @@ static void mtk_crtc_ddp_config(struct drm_crtc *crtc) * queue update module registers on vblank. */ if (state->pending_config) { - mtk_ddp_comp_config(ovl, state->pending_width, + mtk_ddp_comp_config(comp, state->pending_width, state->pending_height, state->pending_vrefresh, 0); @@ -351,14 +352,14 @@ static void mtk_crtc_ddp_config(struct drm_crtc *crtc) } if (mtk_crtc->pending_planes) { - for (i = 0; i < OVL_LAYER_NR; i++) { + for (i = 0; i < mtk_crtc->layer_nr; i++) { struct drm_plane *plane = &mtk_crtc->planes[i]; struct mtk_plane_state *plane_state; plane_state = to_mtk_plane_state(plane->state); if (plane_state->pending.config) { - mtk_ddp_comp_layer_config(ovl, i, plane_state); + mtk_ddp_comp_layer_config(comp, i, plane_state); plane_state->pending.config = false; } } @@ -370,12 +371,12 @@ static void mtk_drm_crtc_atomic_enable(struct drm_crtc *crtc, struct drm_crtc_state *old_state) { struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc); - struct mtk_ddp_comp *ovl = mtk_crtc->ddp_comp[0]; + struct mtk_ddp_comp *comp = mtk_crtc->ddp_comp[0]; int ret; DRM_DEBUG_DRIVER("%s %d\n", __func__, crtc->base.id); - ret = mtk_smi_larb_get(ovl->larb_dev); + ret = mtk_smi_larb_get(comp->larb_dev); if (ret) { DRM_ERROR("Failed to get larb: %d\n", ret); return; @@ -383,7 +384,7 @@ static void mtk_drm_crtc_atomic_enable(struct drm_crtc *crtc, ret = mtk_crtc_ddp_hw_init(mtk_crtc); if (ret) { - mtk_smi_larb_put(ovl->larb_dev); + mtk_smi_larb_put(comp->larb_dev); return; } @@ -395,7 +396,7 @@ static void mtk_drm_crtc_atomic_disable(struct drm_crtc *crtc, struct drm_crtc_state *old_state) { struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc); - struct mtk_ddp_comp *ovl = mtk_crtc->ddp_comp[0]; + struct mtk_ddp_comp *comp = mtk_crtc->ddp_comp[0]; int i; DRM_DEBUG_DRIVER("%s %d\n", __func__, crtc->base.id); @@ -403,7 +404,7 @@ static void mtk_drm_crtc_atomic_disable(struct drm_crtc *crtc, return; /* Set all pending plane state to disabled */ - for (i = 0; i < OVL_LAYER_NR; i++) { + for (i = 0; i < mtk_crtc->layer_nr; i++) { struct drm_plane *plane = &mtk_crtc->planes[i]; struct mtk_plane_state *plane_state; @@ -418,7 +419,7 @@ static void mtk_drm_crtc_atomic_disable(struct drm_crtc *crtc, drm_crtc_vblank_off(crtc); mtk_crtc_ddp_hw_fini(mtk_crtc); - mtk_smi_larb_put(ovl->larb_dev); + mtk_smi_larb_put(comp->larb_dev); mtk_crtc->enabled = false; } @@ -450,7 +451,7 @@ static void mtk_drm_crtc_atomic_flush(struct drm_crtc *crtc, if (mtk_crtc->event) mtk_crtc->pending_needs_vblank = true; - for (i = 0; i < OVL_LAYER_NR; i++) { + for (i = 0; i < mtk_crtc->layer_nr; i++) { struct drm_plane *plane = &mtk_crtc->planes[i]; struct mtk_plane_state *plane_state; @@ -516,7 +517,7 @@ err_cleanup_crtc: return ret; } -void mtk_crtc_ddp_irq(struct drm_crtc *crtc, struct mtk_ddp_comp *ovl) +void mtk_crtc_ddp_irq(struct drm_crtc *crtc, struct mtk_ddp_comp *comp) { struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc); struct mtk_drm_private *priv = crtc->dev->dev_private; @@ -598,7 +599,12 @@ int mtk_drm_crtc_create(struct drm_device *drm_dev, mtk_crtc->ddp_comp[i] = comp; } - for (zpos = 0; zpos < OVL_LAYER_NR; zpos++) { + mtk_crtc->layer_nr = mtk_ddp_comp_layer_nr(mtk_crtc->ddp_comp[0]); + mtk_crtc->planes = devm_kzalloc(dev, mtk_crtc->layer_nr * + sizeof(struct drm_plane), + GFP_KERNEL); + + for (zpos = 0; zpos < mtk_crtc->layer_nr; zpos++) { type = (zpos == 0) ? DRM_PLANE_TYPE_PRIMARY : (zpos == 1) ? DRM_PLANE_TYPE_CURSOR : DRM_PLANE_TYPE_OVERLAY; @@ -609,7 +615,8 @@ int mtk_drm_crtc_create(struct drm_device *drm_dev, } ret = mtk_drm_crtc_init(drm_dev, mtk_crtc, &mtk_crtc->planes[0], - &mtk_crtc->planes[1], pipe); + mtk_crtc->layer_nr > 1 ? &mtk_crtc->planes[1] : + NULL, pipe); if (ret < 0) goto unprepare; drm_mode_crtc_set_gamma_size(&mtk_crtc->base, MTK_LUT_SIZE); diff --git a/drivers/gpu/drm/mediatek/mtk_drm_crtc.h b/drivers/gpu/drm/mediatek/mtk_drm_crtc.h index 9d9410c67ae9..091adb2087eb 100644 --- a/drivers/gpu/drm/mediatek/mtk_drm_crtc.h +++ b/drivers/gpu/drm/mediatek/mtk_drm_crtc.h @@ -18,13 +18,12 @@ #include "mtk_drm_ddp_comp.h" #include "mtk_drm_plane.h" -#define OVL_LAYER_NR 4 #define MTK_LUT_SIZE 512 #define MTK_MAX_BPC 10 #define MTK_MIN_BPC 3 void mtk_drm_crtc_commit(struct drm_crtc *crtc); -void mtk_crtc_ddp_irq(struct drm_crtc *crtc, struct mtk_ddp_comp *ovl); +void mtk_crtc_ddp_irq(struct drm_crtc *crtc, struct mtk_ddp_comp *comp); int mtk_drm_crtc_create(struct drm_device *drm_dev, const enum mtk_ddp_comp_id *path, unsigned int path_len); diff --git a/drivers/gpu/drm/mediatek/mtk_drm_ddp.c b/drivers/gpu/drm/mediatek/mtk_drm_ddp.c index 87e4191c250e..546b3e3b300b 100644 --- a/drivers/gpu/drm/mediatek/mtk_drm_ddp.c +++ b/drivers/gpu/drm/mediatek/mtk_drm_ddp.c @@ -106,6 +106,8 @@ #define OVL1_MOUT_EN_COLOR1 0x1 #define GAMMA_MOUT_EN_RDMA1 0x1 #define RDMA0_SOUT_DPI0 0x2 +#define RDMA0_SOUT_DPI1 0x3 +#define RDMA0_SOUT_DSI1 0x1 #define RDMA0_SOUT_DSI2 0x4 #define RDMA0_SOUT_DSI3 0x5 #define RDMA1_SOUT_DPI0 0x2 @@ -122,6 +124,8 @@ #define DPI0_SEL_IN_RDMA2 0x3 #define DPI1_SEL_IN_RDMA1 (0x1 << 8) #define DPI1_SEL_IN_RDMA2 (0x3 << 8) +#define DSI0_SEL_IN_RDMA1 0x1 +#define DSI0_SEL_IN_RDMA2 0x4 #define DSI1_SEL_IN_RDMA1 0x1 #define DSI1_SEL_IN_RDMA2 0x4 #define DSI2_SEL_IN_RDMA1 (0x1 << 16) @@ -224,6 +228,12 @@ static unsigned int mtk_ddp_mout_en(enum mtk_ddp_comp_id cur, } else if (cur == DDP_COMPONENT_RDMA0 && next == DDP_COMPONENT_DPI0) { *addr = DISP_REG_CONFIG_DISP_RDMA0_SOUT_EN; value = RDMA0_SOUT_DPI0; + } else if (cur == DDP_COMPONENT_RDMA0 && next == DDP_COMPONENT_DPI1) { + *addr = DISP_REG_CONFIG_DISP_RDMA0_SOUT_EN; + value = RDMA0_SOUT_DPI1; + } else if (cur == DDP_COMPONENT_RDMA0 && next == DDP_COMPONENT_DSI1) { + *addr = DISP_REG_CONFIG_DISP_RDMA0_SOUT_EN; + value = RDMA0_SOUT_DSI1; } else if (cur == DDP_COMPONENT_RDMA0 && next == DDP_COMPONENT_DSI2) { *addr = DISP_REG_CONFIG_DISP_RDMA0_SOUT_EN; value = RDMA0_SOUT_DSI2; @@ -282,6 +292,9 @@ static unsigned int mtk_ddp_sel_in(enum mtk_ddp_comp_id cur, } else if (cur == DDP_COMPONENT_RDMA1 && next == DDP_COMPONENT_DPI1) { *addr = DISP_REG_CONFIG_DPI_SEL_IN; value = DPI1_SEL_IN_RDMA1; + } else if (cur == DDP_COMPONENT_RDMA1 && next == DDP_COMPONENT_DSI0) { + *addr = DISP_REG_CONFIG_DSIE_SEL_IN; + value = DSI0_SEL_IN_RDMA1; } else if (cur == DDP_COMPONENT_RDMA1 && next == DDP_COMPONENT_DSI1) { *addr = DISP_REG_CONFIG_DSIO_SEL_IN; value = DSI1_SEL_IN_RDMA1; @@ -297,8 +310,11 @@ static unsigned int mtk_ddp_sel_in(enum mtk_ddp_comp_id cur, } else if (cur == DDP_COMPONENT_RDMA2 && next == DDP_COMPONENT_DPI1) { *addr = DISP_REG_CONFIG_DPI_SEL_IN; value = DPI1_SEL_IN_RDMA2; - } else if (cur == DDP_COMPONENT_RDMA2 && next == DDP_COMPONENT_DSI1) { + } else if (cur == DDP_COMPONENT_RDMA2 && next == DDP_COMPONENT_DSI0) { *addr = DISP_REG_CONFIG_DSIE_SEL_IN; + value = DSI0_SEL_IN_RDMA2; + } else if (cur == DDP_COMPONENT_RDMA2 && next == DDP_COMPONENT_DSI1) { + *addr = DISP_REG_CONFIG_DSIO_SEL_IN; value = DSI1_SEL_IN_RDMA2; } else if (cur == DDP_COMPONENT_RDMA2 && next == DDP_COMPONENT_DSI2) { *addr = DISP_REG_CONFIG_DSIE_SEL_IN; diff --git a/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.h b/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.h index 7413ffeb3c9d..8399229e6ad2 100644 --- a/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.h +++ b/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.h @@ -78,6 +78,7 @@ struct mtk_ddp_comp_funcs { void (*stop)(struct mtk_ddp_comp *comp); void (*enable_vblank)(struct mtk_ddp_comp *comp, struct drm_crtc *crtc); void (*disable_vblank)(struct mtk_ddp_comp *comp); + unsigned int (*layer_nr)(struct mtk_ddp_comp *comp); void (*layer_on)(struct mtk_ddp_comp *comp, unsigned int idx); void (*layer_off)(struct mtk_ddp_comp *comp, unsigned int idx); void (*layer_config)(struct mtk_ddp_comp *comp, unsigned int idx, @@ -128,6 +129,14 @@ static inline void mtk_ddp_comp_disable_vblank(struct mtk_ddp_comp *comp) comp->funcs->disable_vblank(comp); } +static inline unsigned int mtk_ddp_comp_layer_nr(struct mtk_ddp_comp *comp) +{ + if (comp->funcs && comp->funcs->layer_nr) + return comp->funcs->layer_nr(comp); + + return 0; +} + static inline void mtk_ddp_comp_layer_on(struct mtk_ddp_comp *comp, unsigned int idx) { diff --git a/drivers/gpu/drm/mediatek/mtk_drm_drv.c b/drivers/gpu/drm/mediatek/mtk_drm_drv.c index 39721119713b..47ec604289b7 100644 --- a/drivers/gpu/drm/mediatek/mtk_drm_drv.c +++ b/drivers/gpu/drm/mediatek/mtk_drm_drv.c @@ -381,7 +381,7 @@ static int mtk_drm_bind(struct device *dev) err_deinit: mtk_drm_kms_deinit(drm); err_free: - drm_dev_unref(drm); + drm_dev_put(drm); return ret; } @@ -390,7 +390,7 @@ static void mtk_drm_unbind(struct device *dev) struct mtk_drm_private *private = dev_get_drvdata(dev); drm_dev_unregister(private->drm); - drm_dev_unref(private->drm); + drm_dev_put(private->drm); private->drm = NULL; } @@ -564,7 +564,7 @@ static int mtk_drm_remove(struct platform_device *pdev) drm_dev_unregister(drm); mtk_drm_kms_deinit(drm); - drm_dev_unref(drm); + drm_dev_put(drm); component_master_del(&pdev->dev, &mtk_drm_ops); pm_runtime_disable(&pdev->dev); @@ -580,29 +580,24 @@ static int mtk_drm_sys_suspend(struct device *dev) { struct mtk_drm_private *private = dev_get_drvdata(dev); struct drm_device *drm = private->drm; + int ret; - drm_kms_helper_poll_disable(drm); - - private->suspend_state = drm_atomic_helper_suspend(drm); - if (IS_ERR(private->suspend_state)) { - drm_kms_helper_poll_enable(drm); - return PTR_ERR(private->suspend_state); - } - + ret = drm_mode_config_helper_suspend(drm); DRM_DEBUG_DRIVER("mtk_drm_sys_suspend\n"); - return 0; + + return ret; } static int mtk_drm_sys_resume(struct device *dev) { struct mtk_drm_private *private = dev_get_drvdata(dev); struct drm_device *drm = private->drm; + int ret; - drm_atomic_helper_resume(drm, private->suspend_state); - drm_kms_helper_poll_enable(drm); - + ret = drm_mode_config_helper_resume(drm); DRM_DEBUG_DRIVER("mtk_drm_sys_resume\n"); - return 0; + + return ret; } #endif diff --git a/drivers/hwmon/adt7475.c b/drivers/hwmon/adt7475.c index 90837f7c7d0f..f4c7516eb989 100644 --- a/drivers/hwmon/adt7475.c +++ b/drivers/hwmon/adt7475.c @@ -302,14 +302,18 @@ static inline u16 volt2reg(int channel, long volt, u8 bypass_attn) return clamp_val(reg, 0, 1023) & (0xff << 2); } -static u16 adt7475_read_word(struct i2c_client *client, int reg) +static int adt7475_read_word(struct i2c_client *client, int reg) { - u16 val; + int val1, val2; - val = i2c_smbus_read_byte_data(client, reg); - val |= (i2c_smbus_read_byte_data(client, reg + 1) << 8); + val1 = i2c_smbus_read_byte_data(client, reg); + if (val1 < 0) + return val1; + val2 = i2c_smbus_read_byte_data(client, reg + 1); + if (val2 < 0) + return val2; - return val; + return val1 | (val2 << 8); } static void adt7475_write_word(struct i2c_client *client, int reg, u16 val) @@ -962,13 +966,14 @@ static ssize_t show_pwmfreq(struct device *dev, struct device_attribute *attr, { struct adt7475_data *data = adt7475_update_device(dev); struct sensor_device_attribute_2 *sattr = to_sensor_dev_attr_2(attr); - int i = clamp_val(data->range[sattr->index] & 0xf, 0, - ARRAY_SIZE(pwmfreq_table) - 1); + int idx; if (IS_ERR(data)) return PTR_ERR(data); + idx = clamp_val(data->range[sattr->index] & 0xf, 0, + ARRAY_SIZE(pwmfreq_table) - 1); - return sprintf(buf, "%d\n", pwmfreq_table[i]); + return sprintf(buf, "%d\n", pwmfreq_table[idx]); } static ssize_t set_pwmfreq(struct device *dev, struct device_attribute *attr, @@ -1004,6 +1009,10 @@ static ssize_t pwm_use_point2_pwm_at_crit_show(struct device *dev, char *buf) { struct adt7475_data *data = adt7475_update_device(dev); + + if (IS_ERR(data)) + return PTR_ERR(data); + return sprintf(buf, "%d\n", !!(data->config4 & CONFIG4_MAXDUTY)); } diff --git a/drivers/hwmon/ina2xx.c b/drivers/hwmon/ina2xx.c index e9e6aeabbf84..71d3445ba869 100644 --- a/drivers/hwmon/ina2xx.c +++ b/drivers/hwmon/ina2xx.c @@ -17,7 +17,7 @@ * Bi-directional Current/Power Monitor with I2C Interface * Datasheet: http://www.ti.com/product/ina230 * - * Copyright (C) 2012 Lothar Felten <l-felten@ti.com> + * Copyright (C) 2012 Lothar Felten <lothar.felten@gmail.com> * Thanks to Jan Volkering * * This program is free software; you can redistribute it and/or modify @@ -329,6 +329,15 @@ static int ina2xx_set_shunt(struct ina2xx_data *data, long val) return 0; } +static ssize_t ina2xx_show_shunt(struct device *dev, + struct device_attribute *da, + char *buf) +{ + struct ina2xx_data *data = dev_get_drvdata(dev); + + return snprintf(buf, PAGE_SIZE, "%li\n", data->rshunt); +} + static ssize_t ina2xx_store_shunt(struct device *dev, struct device_attribute *da, const char *buf, size_t count) @@ -403,7 +412,7 @@ static SENSOR_DEVICE_ATTR(power1_input, S_IRUGO, ina2xx_show_value, NULL, /* shunt resistance */ static SENSOR_DEVICE_ATTR(shunt_resistor, S_IRUGO | S_IWUSR, - ina2xx_show_value, ina2xx_store_shunt, + ina2xx_show_shunt, ina2xx_store_shunt, INA2XX_CALIBRATION); /* update interval (ina226 only) */ diff --git a/drivers/hwmon/nct6775.c b/drivers/hwmon/nct6775.c index c6bd61e4695a..944f5b63aecd 100644 --- a/drivers/hwmon/nct6775.c +++ b/drivers/hwmon/nct6775.c @@ -63,6 +63,7 @@ #include <linux/bitops.h> #include <linux/dmi.h> #include <linux/io.h> +#include <linux/nospec.h> #include "lm75.h" #define USE_ALTERNATE @@ -2689,6 +2690,7 @@ store_pwm_weight_temp_sel(struct device *dev, struct device_attribute *attr, return err; if (val > NUM_TEMP) return -EINVAL; + val = array_index_nospec(val, NUM_TEMP + 1); if (val && (!(data->have_temp & BIT(val - 1)) || !data->temp_src[val - 1])) return -EINVAL; diff --git a/drivers/i2c/algos/i2c-algo-bit.c b/drivers/i2c/algos/i2c-algo-bit.c index 6ec65adaba49..c33dcfb87993 100644 --- a/drivers/i2c/algos/i2c-algo-bit.c +++ b/drivers/i2c/algos/i2c-algo-bit.c @@ -110,8 +110,8 @@ static int sclhi(struct i2c_algo_bit_data *adap) } #ifdef DEBUG if (jiffies != start && i2c_debug >= 3) - pr_debug("i2c-algo-bit: needed %ld jiffies for SCL to go " - "high\n", jiffies - start); + pr_debug("i2c-algo-bit: needed %ld jiffies for SCL to go high\n", + jiffies - start); #endif done: @@ -171,8 +171,9 @@ static int i2c_outb(struct i2c_adapter *i2c_adap, unsigned char c) setsda(adap, sb); udelay((adap->udelay + 1) / 2); if (sclhi(adap) < 0) { /* timed out */ - bit_dbg(1, &i2c_adap->dev, "i2c_outb: 0x%02x, " - "timeout at bit #%d\n", (int)c, i); + bit_dbg(1, &i2c_adap->dev, + "i2c_outb: 0x%02x, timeout at bit #%d\n", + (int)c, i); return -ETIMEDOUT; } /* FIXME do arbitration here: @@ -185,8 +186,8 @@ static int i2c_outb(struct i2c_adapter *i2c_adap, unsigned char c) } sdahi(adap); if (sclhi(adap) < 0) { /* timeout */ - bit_dbg(1, &i2c_adap->dev, "i2c_outb: 0x%02x, " - "timeout at ack\n", (int)c); + bit_dbg(1, &i2c_adap->dev, + "i2c_outb: 0x%02x, timeout at ack\n", (int)c); return -ETIMEDOUT; } @@ -215,8 +216,9 @@ static int i2c_inb(struct i2c_adapter *i2c_adap) sdahi(adap); for (i = 0; i < 8; i++) { if (sclhi(adap) < 0) { /* timeout */ - bit_dbg(1, &i2c_adap->dev, "i2c_inb: timeout at bit " - "#%d\n", 7 - i); + bit_dbg(1, &i2c_adap->dev, + "i2c_inb: timeout at bit #%d\n", + 7 - i); return -ETIMEDOUT; } indata *= 2; @@ -265,8 +267,9 @@ static int test_bus(struct i2c_adapter *i2c_adap) goto bailout; } if (!scl) { - printk(KERN_WARNING "%s: SCL unexpected low " - "while pulling SDA low!\n", name); + printk(KERN_WARNING + "%s: SCL unexpected low while pulling SDA low!\n", + name); goto bailout; } @@ -278,8 +281,9 @@ static int test_bus(struct i2c_adapter *i2c_adap) goto bailout; } if (!scl) { - printk(KERN_WARNING "%s: SCL unexpected low " - "while pulling SDA high!\n", name); + printk(KERN_WARNING + "%s: SCL unexpected low while pulling SDA high!\n", + name); goto bailout; } @@ -291,8 +295,9 @@ static int test_bus(struct i2c_adapter *i2c_adap) goto bailout; } if (!sda) { - printk(KERN_WARNING "%s: SDA unexpected low " - "while pulling SCL low!\n", name); + printk(KERN_WARNING + "%s: SDA unexpected low while pulling SCL low!\n", + name); goto bailout; } @@ -304,8 +309,9 @@ static int test_bus(struct i2c_adapter *i2c_adap) goto bailout; } if (!sda) { - printk(KERN_WARNING "%s: SDA unexpected low " - "while pulling SCL high!\n", name); + printk(KERN_WARNING + "%s: SDA unexpected low while pulling SCL high!\n", + name); goto bailout; } @@ -352,8 +358,8 @@ static int try_address(struct i2c_adapter *i2c_adap, i2c_start(adap); } if (i && ret) - bit_dbg(1, &i2c_adap->dev, "Used %d tries to %s client at " - "0x%02x: %s\n", i + 1, + bit_dbg(1, &i2c_adap->dev, + "Used %d tries to %s client at 0x%02x: %s\n", i + 1, addr & 1 ? "read from" : "write to", addr >> 1, ret == 1 ? "success" : "failed, timeout?"); return ret; @@ -442,8 +448,9 @@ static int readbytes(struct i2c_adapter *i2c_adap, struct i2c_msg *msg) if (inval <= 0 || inval > I2C_SMBUS_BLOCK_MAX) { if (!(flags & I2C_M_NO_RD_ACK)) acknak(i2c_adap, 0); - dev_err(&i2c_adap->dev, "readbytes: invalid " - "block length (%d)\n", inval); + dev_err(&i2c_adap->dev, + "readbytes: invalid block length (%d)\n", + inval); return -EPROTO; } /* The original count value accounts for the extra @@ -506,8 +513,8 @@ static int bit_doAddress(struct i2c_adapter *i2c_adap, struct i2c_msg *msg) return -ENXIO; } if (flags & I2C_M_RD) { - bit_dbg(3, &i2c_adap->dev, "emitting repeated " - "start condition\n"); + bit_dbg(3, &i2c_adap->dev, + "emitting repeated start condition\n"); i2c_repstart(adap); /* okay, now switch into reading mode */ addr |= 0x01; @@ -564,8 +571,8 @@ static int bit_xfer(struct i2c_adapter *i2c_adap, } ret = bit_doAddress(i2c_adap, pmsg); if ((ret != 0) && !nak_ok) { - bit_dbg(1, &i2c_adap->dev, "NAK from " - "device addr 0x%02x msg #%d\n", + bit_dbg(1, &i2c_adap->dev, + "NAK from device addr 0x%02x msg #%d\n", msgs[i].addr, i); goto bailout; } diff --git a/drivers/i2c/busses/i2c-designware-master.c b/drivers/i2c/busses/i2c-designware-master.c index e18442b9973a..94d94b4a9a0d 100644 --- a/drivers/i2c/busses/i2c-designware-master.c +++ b/drivers/i2c/busses/i2c-designware-master.c @@ -708,7 +708,6 @@ int i2c_dw_probe(struct dw_i2c_dev *dev) i2c_set_adapdata(adap, dev); if (dev->pm_disabled) { - dev_pm_syscore_device(dev->dev, true); irq_flags = IRQF_NO_SUSPEND; } else { irq_flags = IRQF_SHARED | IRQF_COND_SUSPEND; diff --git a/drivers/i2c/busses/i2c-designware-platdrv.c b/drivers/i2c/busses/i2c-designware-platdrv.c index 1a8d2da5b000..b5750fd85125 100644 --- a/drivers/i2c/busses/i2c-designware-platdrv.c +++ b/drivers/i2c/busses/i2c-designware-platdrv.c @@ -434,6 +434,9 @@ static int dw_i2c_plat_suspend(struct device *dev) { struct dw_i2c_dev *i_dev = dev_get_drvdata(dev); + if (i_dev->pm_disabled) + return 0; + i_dev->disable(i_dev); i2c_dw_prepare_clk(i_dev, false); @@ -444,7 +447,9 @@ static int dw_i2c_plat_resume(struct device *dev) { struct dw_i2c_dev *i_dev = dev_get_drvdata(dev); - i2c_dw_prepare_clk(i_dev, true); + if (!i_dev->pm_disabled) + i2c_dw_prepare_clk(i_dev, true); + i_dev->init(i_dev); return 0; diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c index 941c223f6491..04b60a349d7e 100644 --- a/drivers/i2c/busses/i2c-i801.c +++ b/drivers/i2c/busses/i2c-i801.c @@ -1415,6 +1415,13 @@ static void i801_add_tco(struct i801_priv *priv) } #ifdef CONFIG_ACPI +static bool i801_acpi_is_smbus_ioport(const struct i801_priv *priv, + acpi_physical_address address) +{ + return address >= priv->smba && + address <= pci_resource_end(priv->pci_dev, SMBBAR); +} + static acpi_status i801_acpi_io_handler(u32 function, acpi_physical_address address, u32 bits, u64 *value, void *handler_context, void *region_context) @@ -1430,7 +1437,7 @@ i801_acpi_io_handler(u32 function, acpi_physical_address address, u32 bits, */ mutex_lock(&priv->acpi_lock); - if (!priv->acpi_reserved) { + if (!priv->acpi_reserved && i801_acpi_is_smbus_ioport(priv, address)) { priv->acpi_reserved = true; dev_warn(&pdev->dev, "BIOS is accessing SMBus registers\n"); diff --git a/drivers/i2c/busses/i2c-sh_mobile.c b/drivers/i2c/busses/i2c-sh_mobile.c index 439e8778f849..818cab14e87c 100644 --- a/drivers/i2c/busses/i2c-sh_mobile.c +++ b/drivers/i2c/busses/i2c-sh_mobile.c @@ -507,8 +507,6 @@ static void sh_mobile_i2c_dma_callback(void *data) pd->pos = pd->msg->len; pd->stop_after_dma = true; - i2c_release_dma_safe_msg_buf(pd->msg, pd->dma_buf); - iic_set_clr(pd, ICIC, 0, ICIC_TDMAE | ICIC_RDMAE); } @@ -602,8 +600,8 @@ static void sh_mobile_i2c_xfer_dma(struct sh_mobile_i2c_data *pd) dma_async_issue_pending(chan); } -static int start_ch(struct sh_mobile_i2c_data *pd, struct i2c_msg *usr_msg, - bool do_init) +static void start_ch(struct sh_mobile_i2c_data *pd, struct i2c_msg *usr_msg, + bool do_init) { if (do_init) { /* Initialize channel registers */ @@ -627,7 +625,6 @@ static int start_ch(struct sh_mobile_i2c_data *pd, struct i2c_msg *usr_msg, /* Enable all interrupts to begin with */ iic_wr(pd, ICIC, ICIC_DTEE | ICIC_WAITE | ICIC_ALE | ICIC_TACKE); - return 0; } static int poll_dte(struct sh_mobile_i2c_data *pd) @@ -698,9 +695,7 @@ static int sh_mobile_i2c_xfer(struct i2c_adapter *adapter, pd->send_stop = i == num - 1 || msg->flags & I2C_M_STOP; pd->stop_after_dma = false; - err = start_ch(pd, msg, do_start); - if (err) - break; + start_ch(pd, msg, do_start); if (do_start) i2c_op(pd, OP_START, 0); @@ -709,6 +704,10 @@ static int sh_mobile_i2c_xfer(struct i2c_adapter *adapter, timeout = wait_event_timeout(pd->wait, pd->sr & (ICSR_TACK | SW_DONE), adapter->timeout); + + /* 'stop_after_dma' tells if DMA transfer was complete */ + i2c_put_dma_safe_msg_buf(pd->dma_buf, pd->msg, pd->stop_after_dma); + if (!timeout) { dev_err(pd->dev, "Transfer request timed out\n"); if (pd->dma_direction != DMA_NONE) diff --git a/drivers/i2c/i2c-core-base.c b/drivers/i2c/i2c-core-base.c index f15737763608..9ee9a15e7134 100644 --- a/drivers/i2c/i2c-core-base.c +++ b/drivers/i2c/i2c-core-base.c @@ -2293,21 +2293,22 @@ u8 *i2c_get_dma_safe_msg_buf(struct i2c_msg *msg, unsigned int threshold) EXPORT_SYMBOL_GPL(i2c_get_dma_safe_msg_buf); /** - * i2c_release_dma_safe_msg_buf - release DMA safe buffer and sync with i2c_msg - * @msg: the message to be synced with + * i2c_put_dma_safe_msg_buf - release DMA safe buffer and sync with i2c_msg * @buf: the buffer obtained from i2c_get_dma_safe_msg_buf(). May be NULL. + * @msg: the message which the buffer corresponds to + * @xferred: bool saying if the message was transferred */ -void i2c_release_dma_safe_msg_buf(struct i2c_msg *msg, u8 *buf) +void i2c_put_dma_safe_msg_buf(u8 *buf, struct i2c_msg *msg, bool xferred) { if (!buf || buf == msg->buf) return; - if (msg->flags & I2C_M_RD) + if (xferred && msg->flags & I2C_M_RD) memcpy(msg->buf, buf, msg->len); kfree(buf); } -EXPORT_SYMBOL_GPL(i2c_release_dma_safe_msg_buf); +EXPORT_SYMBOL_GPL(i2c_put_dma_safe_msg_buf); MODULE_AUTHOR("Simon G. Vogl <simon@tk.uni-linz.ac.at>"); MODULE_DESCRIPTION("I2C-Bus main module"); diff --git a/drivers/mmc/core/queue.c b/drivers/mmc/core/queue.c index 648eb6743ed5..6edffeed9953 100644 --- a/drivers/mmc/core/queue.c +++ b/drivers/mmc/core/queue.c @@ -238,10 +238,6 @@ static void mmc_mq_exit_request(struct blk_mq_tag_set *set, struct request *req, mmc_exit_request(mq->queue, req); } -/* - * We use BLK_MQ_F_BLOCKING and have only 1 hardware queue, which means requests - * will not be dispatched in parallel. - */ static blk_status_t mmc_mq_queue_rq(struct blk_mq_hw_ctx *hctx, const struct blk_mq_queue_data *bd) { @@ -264,7 +260,7 @@ static blk_status_t mmc_mq_queue_rq(struct blk_mq_hw_ctx *hctx, spin_lock_irq(q->queue_lock); - if (mq->recovery_needed) { + if (mq->recovery_needed || mq->busy) { spin_unlock_irq(q->queue_lock); return BLK_STS_RESOURCE; } @@ -291,6 +287,9 @@ static blk_status_t mmc_mq_queue_rq(struct blk_mq_hw_ctx *hctx, break; } + /* Parallel dispatch of requests is not supported at the moment */ + mq->busy = true; + mq->in_flight[issue_type] += 1; get_card = (mmc_tot_in_flight(mq) == 1); cqe_retune_ok = (mmc_cqe_qcnt(mq) == 1); @@ -333,9 +332,12 @@ static blk_status_t mmc_mq_queue_rq(struct blk_mq_hw_ctx *hctx, mq->in_flight[issue_type] -= 1; if (mmc_tot_in_flight(mq) == 0) put_card = true; + mq->busy = false; spin_unlock_irq(q->queue_lock); if (put_card) mmc_put_card(card, &mq->ctx); + } else { + WRITE_ONCE(mq->busy, false); } return ret; diff --git a/drivers/mmc/core/queue.h b/drivers/mmc/core/queue.h index 17e59d50b496..9bf3c9245075 100644 --- a/drivers/mmc/core/queue.h +++ b/drivers/mmc/core/queue.h @@ -81,6 +81,7 @@ struct mmc_queue { unsigned int cqe_busy; #define MMC_CQE_DCMD_BUSY BIT(0) #define MMC_CQE_QUEUE_FULL BIT(1) + bool busy; bool use_cqe; bool recovery_needed; bool in_recovery; diff --git a/drivers/mmc/host/android-goldfish.c b/drivers/mmc/host/android-goldfish.c index 294de177632c..61e4e2a213c9 100644 --- a/drivers/mmc/host/android-goldfish.c +++ b/drivers/mmc/host/android-goldfish.c @@ -217,7 +217,7 @@ static void goldfish_mmc_xfer_done(struct goldfish_mmc_host *host, * We don't really have DMA, so we need * to copy from our platform driver buffer */ - sg_copy_to_buffer(data->sg, 1, host->virt_base, + sg_copy_from_buffer(data->sg, 1, host->virt_base, data->sg->length); } host->data->bytes_xfered += data->sg->length; @@ -393,7 +393,7 @@ static void goldfish_mmc_prepare_data(struct goldfish_mmc_host *host, * We don't really have DMA, so we need to copy to our * platform driver buffer */ - sg_copy_from_buffer(data->sg, 1, host->virt_base, + sg_copy_to_buffer(data->sg, 1, host->virt_base, data->sg->length); } } diff --git a/drivers/mmc/host/atmel-mci.c b/drivers/mmc/host/atmel-mci.c index 5aa2c9404e92..be53044086c7 100644 --- a/drivers/mmc/host/atmel-mci.c +++ b/drivers/mmc/host/atmel-mci.c @@ -1976,7 +1976,7 @@ static void atmci_read_data_pio(struct atmel_mci *host) do { value = atmci_readl(host, ATMCI_RDR); if (likely(offset + 4 <= sg->length)) { - sg_pcopy_to_buffer(sg, 1, &value, sizeof(u32), offset); + sg_pcopy_from_buffer(sg, 1, &value, sizeof(u32), offset); offset += 4; nbytes += 4; @@ -1993,7 +1993,7 @@ static void atmci_read_data_pio(struct atmel_mci *host) } else { unsigned int remaining = sg->length - offset; - sg_pcopy_to_buffer(sg, 1, &value, remaining, offset); + sg_pcopy_from_buffer(sg, 1, &value, remaining, offset); nbytes += remaining; flush_dcache_page(sg_page(sg)); @@ -2003,7 +2003,7 @@ static void atmci_read_data_pio(struct atmel_mci *host) goto done; offset = 4 - remaining; - sg_pcopy_to_buffer(sg, 1, (u8 *)&value + remaining, + sg_pcopy_from_buffer(sg, 1, (u8 *)&value + remaining, offset, 0); nbytes += offset; } @@ -2042,7 +2042,7 @@ static void atmci_write_data_pio(struct atmel_mci *host) do { if (likely(offset + 4 <= sg->length)) { - sg_pcopy_from_buffer(sg, 1, &value, sizeof(u32), offset); + sg_pcopy_to_buffer(sg, 1, &value, sizeof(u32), offset); atmci_writel(host, ATMCI_TDR, value); offset += 4; @@ -2059,7 +2059,7 @@ static void atmci_write_data_pio(struct atmel_mci *host) unsigned int remaining = sg->length - offset; value = 0; - sg_pcopy_from_buffer(sg, 1, &value, remaining, offset); + sg_pcopy_to_buffer(sg, 1, &value, remaining, offset); nbytes += remaining; host->sg = sg = sg_next(sg); @@ -2070,7 +2070,7 @@ static void atmci_write_data_pio(struct atmel_mci *host) } offset = 4 - remaining; - sg_pcopy_from_buffer(sg, 1, (u8 *)&value + remaining, + sg_pcopy_to_buffer(sg, 1, (u8 *)&value + remaining, offset, 0); atmci_writel(host, ATMCI_TDR, value); nbytes += offset; diff --git a/drivers/mmc/host/renesas_sdhi_internal_dmac.c b/drivers/mmc/host/renesas_sdhi_internal_dmac.c index 35cc0de6be67..ca0b43973769 100644 --- a/drivers/mmc/host/renesas_sdhi_internal_dmac.c +++ b/drivers/mmc/host/renesas_sdhi_internal_dmac.c @@ -45,14 +45,16 @@ /* DM_CM_RST */ #define RST_DTRANRST1 BIT(9) #define RST_DTRANRST0 BIT(8) -#define RST_RESERVED_BITS GENMASK_ULL(32, 0) +#define RST_RESERVED_BITS GENMASK_ULL(31, 0) /* DM_CM_INFO1 and DM_CM_INFO1_MASK */ #define INFO1_CLEAR 0 +#define INFO1_MASK_CLEAR GENMASK_ULL(31, 0) #define INFO1_DTRANEND1 BIT(17) #define INFO1_DTRANEND0 BIT(16) /* DM_CM_INFO2 and DM_CM_INFO2_MASK */ +#define INFO2_MASK_CLEAR GENMASK_ULL(31, 0) #define INFO2_DTRANERR1 BIT(17) #define INFO2_DTRANERR0 BIT(16) @@ -252,6 +254,12 @@ renesas_sdhi_internal_dmac_request_dma(struct tmio_mmc_host *host, { struct renesas_sdhi *priv = host_to_priv(host); + /* Disable DMAC interrupts, we don't use them */ + renesas_sdhi_internal_dmac_dm_write(host, DM_CM_INFO1_MASK, + INFO1_MASK_CLEAR); + renesas_sdhi_internal_dmac_dm_write(host, DM_CM_INFO2_MASK, + INFO2_MASK_CLEAR); + /* Each value is set to non-zero to assume "enabling" each DMA */ host->chan_rx = host->chan_tx = (void *)0xdeadbeaf; diff --git a/drivers/mtd/nand/raw/denali.c b/drivers/mtd/nand/raw/denali.c index ca18612c4201..67b2065e7a19 100644 --- a/drivers/mtd/nand/raw/denali.c +++ b/drivers/mtd/nand/raw/denali.c @@ -1338,6 +1338,11 @@ int denali_init(struct denali_nand_info *denali) denali_enable_irq(denali); denali_reset_banks(denali); + if (!denali->max_banks) { + /* Error out earlier if no chip is found for some reasons. */ + ret = -ENODEV; + goto disable_irq; + } denali->active_bank = DENALI_INVALID_BANK; diff --git a/drivers/mtd/nand/raw/docg4.c b/drivers/mtd/nand/raw/docg4.c index a3f04315c05c..427fcbc1b71c 100644 --- a/drivers/mtd/nand/raw/docg4.c +++ b/drivers/mtd/nand/raw/docg4.c @@ -1218,7 +1218,7 @@ static int docg4_resume(struct platform_device *pdev) return 0; } -static void __init init_mtd_structs(struct mtd_info *mtd) +static void init_mtd_structs(struct mtd_info *mtd) { /* initialize mtd and nand data structures */ @@ -1290,7 +1290,7 @@ static void __init init_mtd_structs(struct mtd_info *mtd) } -static int __init read_id_reg(struct mtd_info *mtd) +static int read_id_reg(struct mtd_info *mtd) { struct nand_chip *nand = mtd_to_nand(mtd); struct docg4_priv *doc = nand_get_controller_data(nand); diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c index 1b9951d2067e..d668682f91df 100644 --- a/drivers/nvme/host/pci.c +++ b/drivers/nvme/host/pci.c @@ -316,6 +316,14 @@ static bool nvme_dbbuf_update_and_check_event(u16 value, u32 *dbbuf_db, old_value = *dbbuf_db; *dbbuf_db = value; + /* + * Ensure that the doorbell is updated before reading the event + * index from memory. The controller needs to provide similar + * ordering to ensure the envent index is updated before reading + * the doorbell. + */ + mb(); + if (!nvme_dbbuf_need_event(*dbbuf_ei, value, old_value)) return false; } diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c index ebf3e7a6c49e..b5ec96abd048 100644 --- a/drivers/nvme/target/core.c +++ b/drivers/nvme/target/core.c @@ -1210,7 +1210,7 @@ static int __init nvmet_init(void) error = nvmet_init_discovery(); if (error) - goto out; + goto out_free_work_queue; error = nvmet_init_configfs(); if (error) @@ -1219,6 +1219,8 @@ static int __init nvmet_init(void) out_exit_discovery: nvmet_exit_discovery(); +out_free_work_queue: + destroy_workqueue(buffered_io_wq); out: return error; } diff --git a/drivers/nvme/target/fcloop.c b/drivers/nvme/target/fcloop.c index 34712def81b1..5251689a1d9a 100644 --- a/drivers/nvme/target/fcloop.c +++ b/drivers/nvme/target/fcloop.c @@ -311,7 +311,7 @@ fcloop_tgt_lsrqst_done_work(struct work_struct *work) struct fcloop_tport *tport = tls_req->tport; struct nvmefc_ls_req *lsreq = tls_req->lsreq; - if (tport->remoteport) + if (!tport || tport->remoteport) lsreq->done(lsreq, tls_req->status); } @@ -329,6 +329,7 @@ fcloop_ls_req(struct nvme_fc_local_port *localport, if (!rport->targetport) { tls_req->status = -ECONNREFUSED; + tls_req->tport = NULL; schedule_work(&tls_req->work); return ret; } diff --git a/drivers/xen/xenbus/xenbus_probe.c b/drivers/xen/xenbus/xenbus_probe.c index f2088838f690..5b471889d723 100644 --- a/drivers/xen/xenbus/xenbus_probe.c +++ b/drivers/xen/xenbus/xenbus_probe.c @@ -402,10 +402,19 @@ static ssize_t modalias_show(struct device *dev, } static DEVICE_ATTR_RO(modalias); +static ssize_t state_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + return sprintf(buf, "%s\n", + xenbus_strstate(to_xenbus_device(dev)->state)); +} +static DEVICE_ATTR_RO(state); + static struct attribute *xenbus_dev_attrs[] = { &dev_attr_nodename.attr, &dev_attr_devtype.attr, &dev_attr_modalias.attr, + &dev_attr_state.attr, NULL, }; diff --git a/fs/isofs/inode.c b/fs/isofs/inode.c index ec3fba7d492f..488a9e7f8f66 100644 --- a/fs/isofs/inode.c +++ b/fs/isofs/inode.c @@ -24,6 +24,7 @@ #include <linux/mpage.h> #include <linux/user_namespace.h> #include <linux/seq_file.h> +#include <linux/blkdev.h> #include "isofs.h" #include "zisofs.h" @@ -653,6 +654,12 @@ static int isofs_fill_super(struct super_block *s, void *data, int silent) /* * What if bugger tells us to go beyond page size? */ + if (bdev_logical_block_size(s->s_bdev) > 2048) { + printk(KERN_WARNING + "ISOFS: unsupported/invalid hardware sector size %d\n", + bdev_logical_block_size(s->s_bdev)); + goto out_freesbi; + } opt.blocksize = sb_min_blocksize(s, opt.blocksize); sbi->s_high_sierra = 0; /* default is iso9660 */ diff --git a/fs/notify/mark.c b/fs/notify/mark.c index 05506d60131c..59cdb27826de 100644 --- a/fs/notify/mark.c +++ b/fs/notify/mark.c @@ -132,13 +132,13 @@ static void __fsnotify_recalc_mask(struct fsnotify_mark_connector *conn) struct fsnotify_mark *mark; assert_spin_locked(&conn->lock); + /* We can get detached connector here when inode is getting unlinked. */ + if (!fsnotify_valid_obj_type(conn->type)) + return; hlist_for_each_entry(mark, &conn->list, obj_list) { if (mark->flags & FSNOTIFY_MARK_FLAG_ATTACHED) new_mask |= mark->mask; } - if (WARN_ON(!fsnotify_valid_obj_type(conn->type))) - return; - *fsnotify_conn_mask_p(conn) = new_mask; } diff --git a/fs/quota/quota.c b/fs/quota/quota.c index 860bfbe7a07a..f0cbf58ad4da 100644 --- a/fs/quota/quota.c +++ b/fs/quota/quota.c @@ -18,6 +18,7 @@ #include <linux/quotaops.h> #include <linux/types.h> #include <linux/writeback.h> +#include <linux/nospec.h> static int check_quotactl_permission(struct super_block *sb, int type, int cmd, qid_t id) @@ -120,8 +121,6 @@ static int quota_getinfo(struct super_block *sb, int type, void __user *addr) struct if_dqinfo uinfo; int ret; - /* This checks whether qc_state has enough entries... */ - BUILD_BUG_ON(MAXQUOTAS > XQM_MAXQUOTAS); if (!sb->s_qcop->get_state) return -ENOSYS; ret = sb->s_qcop->get_state(sb, &state); @@ -354,10 +353,10 @@ static int quota_getstate(struct super_block *sb, struct fs_quota_stat *fqs) * GETXSTATE quotactl has space for just one set of time limits so * report them for the first enabled quota type */ - for (type = 0; type < XQM_MAXQUOTAS; type++) + for (type = 0; type < MAXQUOTAS; type++) if (state.s_state[type].flags & QCI_ACCT_ENABLED) break; - BUG_ON(type == XQM_MAXQUOTAS); + BUG_ON(type == MAXQUOTAS); fqs->qs_btimelimit = state.s_state[type].spc_timelimit; fqs->qs_itimelimit = state.s_state[type].ino_timelimit; fqs->qs_rtbtimelimit = state.s_state[type].rt_spc_timelimit; @@ -427,10 +426,10 @@ static int quota_getstatev(struct super_block *sb, struct fs_quota_statv *fqs) * GETXSTATV quotactl has space for just one set of time limits so * report them for the first enabled quota type */ - for (type = 0; type < XQM_MAXQUOTAS; type++) + for (type = 0; type < MAXQUOTAS; type++) if (state.s_state[type].flags & QCI_ACCT_ENABLED) break; - BUG_ON(type == XQM_MAXQUOTAS); + BUG_ON(type == MAXQUOTAS); fqs->qs_btimelimit = state.s_state[type].spc_timelimit; fqs->qs_itimelimit = state.s_state[type].ino_timelimit; fqs->qs_rtbtimelimit = state.s_state[type].rt_spc_timelimit; @@ -701,8 +700,9 @@ static int do_quotactl(struct super_block *sb, int type, int cmd, qid_t id, { int ret; - if (type >= (XQM_COMMAND(cmd) ? XQM_MAXQUOTAS : MAXQUOTAS)) + if (type >= MAXQUOTAS) return -EINVAL; + type = array_index_nospec(type, MAXQUOTAS); /* * Quota not supported on this fs? Check this before s_quota_types * since they needn't be set if quota is not supported at all. diff --git a/fs/udf/super.c b/fs/udf/super.c index 3040dc2a32f6..6f515651a2c2 100644 --- a/fs/udf/super.c +++ b/fs/udf/super.c @@ -764,9 +764,7 @@ static int udf_find_fileset(struct super_block *sb, struct kernel_lb_addr *root) { struct buffer_head *bh = NULL; - long lastblock; uint16_t ident; - struct udf_sb_info *sbi; if (fileset->logicalBlockNum != 0xFFFFFFFF || fileset->partitionReferenceNum != 0xFFFF) { @@ -779,69 +777,11 @@ static int udf_find_fileset(struct super_block *sb, return 1; } - } - - sbi = UDF_SB(sb); - if (!bh) { - /* Search backwards through the partitions */ - struct kernel_lb_addr newfileset; - -/* --> cvg: FIXME - is it reasonable? */ - return 1; - - for (newfileset.partitionReferenceNum = sbi->s_partitions - 1; - (newfileset.partitionReferenceNum != 0xFFFF && - fileset->logicalBlockNum == 0xFFFFFFFF && - fileset->partitionReferenceNum == 0xFFFF); - newfileset.partitionReferenceNum--) { - lastblock = sbi->s_partmaps - [newfileset.partitionReferenceNum] - .s_partition_len; - newfileset.logicalBlockNum = 0; - - do { - bh = udf_read_ptagged(sb, &newfileset, 0, - &ident); - if (!bh) { - newfileset.logicalBlockNum++; - continue; - } - - switch (ident) { - case TAG_IDENT_SBD: - { - struct spaceBitmapDesc *sp; - sp = (struct spaceBitmapDesc *) - bh->b_data; - newfileset.logicalBlockNum += 1 + - ((le32_to_cpu(sp->numOfBytes) + - sizeof(struct spaceBitmapDesc) - - 1) >> sb->s_blocksize_bits); - brelse(bh); - break; - } - case TAG_IDENT_FSD: - *fileset = newfileset; - break; - default: - newfileset.logicalBlockNum++; - brelse(bh); - bh = NULL; - break; - } - } while (newfileset.logicalBlockNum < lastblock && - fileset->logicalBlockNum == 0xFFFFFFFF && - fileset->partitionReferenceNum == 0xFFFF); - } - } - - if ((fileset->logicalBlockNum != 0xFFFFFFFF || - fileset->partitionReferenceNum != 0xFFFF) && bh) { udf_debug("Fileset at block=%u, partition=%u\n", fileset->logicalBlockNum, fileset->partitionReferenceNum); - sbi->s_partition = fileset->partitionReferenceNum; + UDF_SB(sb)->s_partition = fileset->partitionReferenceNum; udf_load_fileset(sb, bh, root); brelse(bh); return 0; @@ -1570,10 +1510,16 @@ static void udf_load_logicalvolint(struct super_block *sb, struct kernel_extent_ */ #define PART_DESC_ALLOC_STEP 32 +struct part_desc_seq_scan_data { + struct udf_vds_record rec; + u32 partnum; +}; + struct desc_seq_scan_data { struct udf_vds_record vds[VDS_POS_LENGTH]; unsigned int size_part_descs; - struct udf_vds_record *part_descs_loc; + unsigned int num_part_descs; + struct part_desc_seq_scan_data *part_descs_loc; }; static struct udf_vds_record *handle_partition_descriptor( @@ -1582,10 +1528,14 @@ static struct udf_vds_record *handle_partition_descriptor( { struct partitionDesc *desc = (struct partitionDesc *)bh->b_data; int partnum; + int i; partnum = le16_to_cpu(desc->partitionNumber); - if (partnum >= data->size_part_descs) { - struct udf_vds_record *new_loc; + for (i = 0; i < data->num_part_descs; i++) + if (partnum == data->part_descs_loc[i].partnum) + return &(data->part_descs_loc[i].rec); + if (data->num_part_descs >= data->size_part_descs) { + struct part_desc_seq_scan_data *new_loc; unsigned int new_size = ALIGN(partnum, PART_DESC_ALLOC_STEP); new_loc = kcalloc(new_size, sizeof(*new_loc), GFP_KERNEL); @@ -1597,7 +1547,7 @@ static struct udf_vds_record *handle_partition_descriptor( data->part_descs_loc = new_loc; data->size_part_descs = new_size; } - return &(data->part_descs_loc[partnum]); + return &(data->part_descs_loc[data->num_part_descs++].rec); } @@ -1647,6 +1597,7 @@ static noinline int udf_process_sequence( memset(data.vds, 0, sizeof(struct udf_vds_record) * VDS_POS_LENGTH); data.size_part_descs = PART_DESC_ALLOC_STEP; + data.num_part_descs = 0; data.part_descs_loc = kcalloc(data.size_part_descs, sizeof(*data.part_descs_loc), GFP_KERNEL); @@ -1658,7 +1609,6 @@ static noinline int udf_process_sequence( * are in it. */ for (; (!done && block <= lastblock); block++) { - bh = udf_read_tagged(sb, block, block, &ident); if (!bh) break; @@ -1730,13 +1680,10 @@ static noinline int udf_process_sequence( } /* Now handle prevailing Partition Descriptors */ - for (i = 0; i < data.size_part_descs; i++) { - if (data.part_descs_loc[i].block) { - ret = udf_load_partdesc(sb, - data.part_descs_loc[i].block); - if (ret < 0) - return ret; - } + for (i = 0; i < data.num_part_descs; i++) { + ret = udf_load_partdesc(sb, data.part_descs_loc[i].rec.block); + if (ret < 0) + return ret; } return 0; diff --git a/include/linux/arm-smccc.h b/include/linux/arm-smccc.h index ca1d2cc2cdfa..18863d56273c 100644 --- a/include/linux/arm-smccc.h +++ b/include/linux/arm-smccc.h @@ -199,47 +199,57 @@ asmlinkage void __arm_smccc_hvc(unsigned long a0, unsigned long a1, #define __declare_arg_0(a0, res) \ struct arm_smccc_res *___res = res; \ - register u32 r0 asm("r0") = a0; \ + register unsigned long r0 asm("r0") = (u32)a0; \ register unsigned long r1 asm("r1"); \ register unsigned long r2 asm("r2"); \ register unsigned long r3 asm("r3") #define __declare_arg_1(a0, a1, res) \ + typeof(a1) __a1 = a1; \ struct arm_smccc_res *___res = res; \ - register u32 r0 asm("r0") = a0; \ - register typeof(a1) r1 asm("r1") = a1; \ + register unsigned long r0 asm("r0") = (u32)a0; \ + register unsigned long r1 asm("r1") = __a1; \ register unsigned long r2 asm("r2"); \ register unsigned long r3 asm("r3") #define __declare_arg_2(a0, a1, a2, res) \ + typeof(a1) __a1 = a1; \ + typeof(a2) __a2 = a2; \ struct arm_smccc_res *___res = res; \ - register u32 r0 asm("r0") = a0; \ - register typeof(a1) r1 asm("r1") = a1; \ - register typeof(a2) r2 asm("r2") = a2; \ + register unsigned long r0 asm("r0") = (u32)a0; \ + register unsigned long r1 asm("r1") = __a1; \ + register unsigned long r2 asm("r2") = __a2; \ register unsigned long r3 asm("r3") #define __declare_arg_3(a0, a1, a2, a3, res) \ + typeof(a1) __a1 = a1; \ + typeof(a2) __a2 = a2; \ + typeof(a3) __a3 = a3; \ struct arm_smccc_res *___res = res; \ - register u32 r0 asm("r0") = a0; \ - register typeof(a1) r1 asm("r1") = a1; \ - register typeof(a2) r2 asm("r2") = a2; \ - register typeof(a3) r3 asm("r3") = a3 + register unsigned long r0 asm("r0") = (u32)a0; \ + register unsigned long r1 asm("r1") = __a1; \ + register unsigned long r2 asm("r2") = __a2; \ + register unsigned long r3 asm("r3") = __a3 #define __declare_arg_4(a0, a1, a2, a3, a4, res) \ + typeof(a4) __a4 = a4; \ __declare_arg_3(a0, a1, a2, a3, res); \ - register typeof(a4) r4 asm("r4") = a4 + register unsigned long r4 asm("r4") = __a4 #define __declare_arg_5(a0, a1, a2, a3, a4, a5, res) \ + typeof(a5) __a5 = a5; \ __declare_arg_4(a0, a1, a2, a3, a4, res); \ - register typeof(a5) r5 asm("r5") = a5 + register unsigned long r5 asm("r5") = __a5 #define __declare_arg_6(a0, a1, a2, a3, a4, a5, a6, res) \ + typeof(a6) __a6 = a6; \ __declare_arg_5(a0, a1, a2, a3, a4, a5, res); \ - register typeof(a6) r6 asm("r6") = a6 + register unsigned long r6 asm("r6") = __a6 #define __declare_arg_7(a0, a1, a2, a3, a4, a5, a6, a7, res) \ + typeof(a7) __a7 = a7; \ __declare_arg_6(a0, a1, a2, a3, a4, a5, a6, res); \ - register typeof(a7) r7 asm("r7") = a7 + register unsigned long r7 asm("r7") = __a7 #define ___declare_args(count, ...) __declare_arg_ ## count(__VA_ARGS__) #define __declare_args(count, ...) ___declare_args(count, __VA_ARGS__) diff --git a/include/linux/i2c.h b/include/linux/i2c.h index b79387fd57da..65b4eaed1d96 100644 --- a/include/linux/i2c.h +++ b/include/linux/i2c.h @@ -855,7 +855,7 @@ static inline u8 i2c_8bit_addr_from_msg(const struct i2c_msg *msg) } u8 *i2c_get_dma_safe_msg_buf(struct i2c_msg *msg, unsigned int threshold); -void i2c_release_dma_safe_msg_buf(struct i2c_msg *msg, u8 *buf); +void i2c_put_dma_safe_msg_buf(u8 *buf, struct i2c_msg *msg, bool xferred); int i2c_handle_smbus_host_notify(struct i2c_adapter *adap, unsigned short addr); /** diff --git a/include/linux/platform_data/ina2xx.h b/include/linux/platform_data/ina2xx.h index 9abc0ca7259b..9f0aa1b48c78 100644 --- a/include/linux/platform_data/ina2xx.h +++ b/include/linux/platform_data/ina2xx.h @@ -1,7 +1,7 @@ /* * Driver for Texas Instruments INA219, INA226 power monitor chips * - * Copyright (C) 2012 Lothar Felten <l-felten@ti.com> + * Copyright (C) 2012 Lothar Felten <lothar.felten@gmail.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as diff --git a/include/linux/quota.h b/include/linux/quota.h index ca9772c8e48b..f32dd270b8e3 100644 --- a/include/linux/quota.h +++ b/include/linux/quota.h @@ -408,13 +408,7 @@ struct qc_type_state { struct qc_state { unsigned int s_incoredqs; /* Number of dquots in core */ - /* - * Per quota type information. The array should really have - * max(MAXQUOTAS, XQM_MAXQUOTAS) entries. BUILD_BUG_ON in - * quota_getinfo() makes sure XQM_MAXQUOTAS is large enough. Once VFS - * supports project quotas, this can be changed to MAXQUOTAS - */ - struct qc_type_state s_state[XQM_MAXQUOTAS]; + struct qc_type_state s_state[MAXQUOTAS]; /* Per quota type information */ }; /* Structure for communicating via ->set_info */ |