summaryrefslogtreecommitdiff
path: root/arch
diff options
context:
space:
mode:
Diffstat (limited to 'arch')
-rw-r--r--arch/arm/Makefile1
-rw-r--r--arch/arm/boot/dts/vexpress.dts10
-rw-r--r--arch/arm/mach-exynos4/mach-smdkv310.c2
-rw-r--r--arch/arm/mach-vexpress/Makefile.boot2
-rw-r--r--arch/arm/mach-vexpress/v2m.c6
-rw-r--r--arch/powerpc/kernel/time.c2
-rw-r--r--arch/x86/crypto/aesni-intel_asm.S5
-rw-r--r--arch/x86/crypto/aesni-intel_glue.c14
-rw-r--r--arch/x86/kernel/cpu/mtrr/main.c20
-rw-r--r--arch/x86/kernel/head64.c3
-rw-r--r--arch/x86/kernel/setup.c5
-rw-r--r--arch/x86/mm/init.c19
-rw-r--r--arch/x86/mm/init_64.c11
13 files changed, 79 insertions, 21 deletions
diff --git a/arch/arm/Makefile b/arch/arm/Makefile
index bb797e4bdea..4570ca76a9a 100644
--- a/arch/arm/Makefile
+++ b/arch/arm/Makefile
@@ -311,6 +311,7 @@ define archhelp
echo ' uImage - U-Boot wrapped zImage'
echo ' bootpImage - Combined zImage and initial RAM disk'
echo ' (supply initrd image via make variable INITRD=<path>)'
+ echo ' dtbs - Build device tree blobs for enabled boards'
echo ' install - Install uncompressed kernel'
echo ' zinstall - Install compressed kernel'
echo ' uinstall - Install U-Boot wrapped compressed kernel'
diff --git a/arch/arm/boot/dts/vexpress.dts b/arch/arm/boot/dts/vexpress.dts
new file mode 100644
index 00000000000..5f3bc1d1f1c
--- /dev/null
+++ b/arch/arm/boot/dts/vexpress.dts
@@ -0,0 +1,10 @@
+/dts-v1/;
+/include/ "skeleton.dtsi"
+
+/ {
+ model = "ARM Versatile Express";
+ compatible = "arm,vexpress";
+ memory {
+ reg = <0x60000000 0x40000000>;
+ };
+};
diff --git a/arch/arm/mach-exynos4/mach-smdkv310.c b/arch/arm/mach-exynos4/mach-smdkv310.c
index 2691f19ad6e..82e0df737c0 100644
--- a/arch/arm/mach-exynos4/mach-smdkv310.c
+++ b/arch/arm/mach-exynos4/mach-smdkv310.c
@@ -168,9 +168,9 @@ static struct i2c_board_info i2c_devs1[] __initdata = {
};
static struct platform_device *smdkv310_devices[] __initdata = {
+ &s3c_device_hsmmc2,
&s3c_device_hsmmc0,
&s3c_device_hsmmc1,
- &s3c_device_hsmmc2,
&s3c_device_hsmmc3,
&s3c_device_i2c1,
&s3c_device_rtc,
diff --git a/arch/arm/mach-vexpress/Makefile.boot b/arch/arm/mach-vexpress/Makefile.boot
index 07c2d9c457e..9920a1053e2 100644
--- a/arch/arm/mach-vexpress/Makefile.boot
+++ b/arch/arm/mach-vexpress/Makefile.boot
@@ -1,3 +1,5 @@
zreladdr-y := 0x60008000
params_phys-y := 0x60000100
initrd_phys-y := 0x60800000
+
+dtb-$(CONFIG_ARCH_VEXPRESS_CA9X4) += vexpress.dtb
diff --git a/arch/arm/mach-vexpress/v2m.c b/arch/arm/mach-vexpress/v2m.c
index ba46e8e0743..e318df39efd 100644
--- a/arch/arm/mach-vexpress/v2m.c
+++ b/arch/arm/mach-vexpress/v2m.c
@@ -437,6 +437,11 @@ static void __init v2m_init(void)
ct_desc->init_tile();
}
+static const char *vexpress_dt_match[] __initdata = {
+ "arm,vexpress",
+ NULL,
+};
+
MACHINE_START(VEXPRESS, "ARM-Versatile Express")
.boot_params = PLAT_PHYS_OFFSET + 0x00000100,
.map_io = v2m_map_io,
@@ -444,4 +449,5 @@ MACHINE_START(VEXPRESS, "ARM-Versatile Express")
.init_irq = v2m_init_irq,
.timer = &v2m_timer,
.init_machine = v2m_init,
+ .dt_compat = vexpress_dt_match,
MACHINE_END
diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c
index fcbe3f5c074..59abf59ef6c 100644
--- a/arch/powerpc/kernel/time.c
+++ b/arch/powerpc/kernel/time.c
@@ -357,7 +357,7 @@ void account_system_vtime(struct task_struct *tsk)
}
get_paca()->user_time_scaled += user_scaled;
- if (in_irq() || idle_task(smp_processor_id()) != tsk) {
+ if (in_interrupt() || idle_task(smp_processor_id()) != tsk) {
account_system_time(tsk, 0, delta, sys_scaled);
if (stolen)
account_steal_time(stolen);
diff --git a/arch/x86/crypto/aesni-intel_asm.S b/arch/x86/crypto/aesni-intel_asm.S
index 8fe2a4966b7..4292df78c9d 100644
--- a/arch/x86/crypto/aesni-intel_asm.S
+++ b/arch/x86/crypto/aesni-intel_asm.S
@@ -1612,6 +1612,7 @@ _zero_cipher_left_encrypt:
movdqa SHUF_MASK(%rip), %xmm10
PSHUFB_XMM %xmm10, %xmm0
+
ENCRYPT_SINGLE_BLOCK %xmm0, %xmm1 # Encrypt(K, Yn)
sub $16, %r11
add %r13, %r11
@@ -1634,7 +1635,9 @@ _zero_cipher_left_encrypt:
# GHASH computation for the last <16 byte block
sub %r13, %r11
add $16, %r11
- PSHUFB_XMM %xmm10, %xmm1
+
+ movdqa SHUF_MASK(%rip), %xmm10
+ PSHUFB_XMM %xmm10, %xmm0
# shuffle xmm0 back to output as ciphertext
diff --git a/arch/x86/crypto/aesni-intel_glue.c b/arch/x86/crypto/aesni-intel_glue.c
index e1e60c7d581..b375b2a7a14 100644
--- a/arch/x86/crypto/aesni-intel_glue.c
+++ b/arch/x86/crypto/aesni-intel_glue.c
@@ -828,9 +828,15 @@ static int rfc4106_init(struct crypto_tfm *tfm)
struct cryptd_aead *cryptd_tfm;
struct aesni_rfc4106_gcm_ctx *ctx = (struct aesni_rfc4106_gcm_ctx *)
PTR_ALIGN((u8 *)crypto_tfm_ctx(tfm), AESNI_ALIGN);
+ struct crypto_aead *cryptd_child;
+ struct aesni_rfc4106_gcm_ctx *child_ctx;
cryptd_tfm = cryptd_alloc_aead("__driver-gcm-aes-aesni", 0, 0);
if (IS_ERR(cryptd_tfm))
return PTR_ERR(cryptd_tfm);
+
+ cryptd_child = cryptd_aead_child(cryptd_tfm);
+ child_ctx = aesni_rfc4106_gcm_ctx_get(cryptd_child);
+ memcpy(child_ctx, ctx, sizeof(*ctx));
ctx->cryptd_tfm = cryptd_tfm;
tfm->crt_aead.reqsize = sizeof(struct aead_request)
+ crypto_aead_reqsize(&cryptd_tfm->base);
@@ -925,6 +931,9 @@ static int rfc4106_set_key(struct crypto_aead *parent, const u8 *key,
int ret = 0;
struct crypto_tfm *tfm = crypto_aead_tfm(parent);
struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(parent);
+ struct crypto_aead *cryptd_child = cryptd_aead_child(ctx->cryptd_tfm);
+ struct aesni_rfc4106_gcm_ctx *child_ctx =
+ aesni_rfc4106_gcm_ctx_get(cryptd_child);
u8 *new_key_mem = NULL;
if (key_len < 4) {
@@ -968,6 +977,7 @@ static int rfc4106_set_key(struct crypto_aead *parent, const u8 *key,
goto exit;
}
ret = rfc4106_set_hash_subkey(ctx->hash_subkey, key, key_len);
+ memcpy(child_ctx, ctx, sizeof(*ctx));
exit:
kfree(new_key_mem);
return ret;
@@ -999,7 +1009,6 @@ static int rfc4106_encrypt(struct aead_request *req)
int ret;
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(tfm);
- struct crypto_aead *cryptd_child = cryptd_aead_child(ctx->cryptd_tfm);
if (!irq_fpu_usable()) {
struct aead_request *cryptd_req =
@@ -1008,6 +1017,7 @@ static int rfc4106_encrypt(struct aead_request *req)
aead_request_set_tfm(cryptd_req, &ctx->cryptd_tfm->base);
return crypto_aead_encrypt(cryptd_req);
} else {
+ struct crypto_aead *cryptd_child = cryptd_aead_child(ctx->cryptd_tfm);
kernel_fpu_begin();
ret = cryptd_child->base.crt_aead.encrypt(req);
kernel_fpu_end();
@@ -1020,7 +1030,6 @@ static int rfc4106_decrypt(struct aead_request *req)
int ret;
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(tfm);
- struct crypto_aead *cryptd_child = cryptd_aead_child(ctx->cryptd_tfm);
if (!irq_fpu_usable()) {
struct aead_request *cryptd_req =
@@ -1029,6 +1038,7 @@ static int rfc4106_decrypt(struct aead_request *req)
aead_request_set_tfm(cryptd_req, &ctx->cryptd_tfm->base);
return crypto_aead_decrypt(cryptd_req);
} else {
+ struct crypto_aead *cryptd_child = cryptd_aead_child(ctx->cryptd_tfm);
kernel_fpu_begin();
ret = cryptd_child->base.crt_aead.decrypt(req);
kernel_fpu_end();
diff --git a/arch/x86/kernel/cpu/mtrr/main.c b/arch/x86/kernel/cpu/mtrr/main.c
index bebabec5b44..151787e382c 100644
--- a/arch/x86/kernel/cpu/mtrr/main.c
+++ b/arch/x86/kernel/cpu/mtrr/main.c
@@ -292,14 +292,24 @@ set_mtrr(unsigned int reg, unsigned long base, unsigned long size, mtrr_type typ
/*
* HACK!
- * We use this same function to initialize the mtrrs on boot.
- * The state of the boot cpu's mtrrs has been saved, and we want
- * to replicate across all the APs.
- * If we're doing that @reg is set to something special...
+ *
+ * We use this same function to initialize the mtrrs during boot,
+ * resume, runtime cpu online and on an explicit request to set a
+ * specific MTRR.
+ *
+ * During boot or suspend, the state of the boot cpu's mtrrs has been
+ * saved, and we want to replicate that across all the cpus that come
+ * online (either at the end of boot or resume or during a runtime cpu
+ * online). If we're doing that, @reg is set to something special and on
+ * this cpu we still do mtrr_if->set_all(). During boot/resume, this
+ * is unnecessary if at this point we are still on the cpu that started
+ * the boot/resume sequence. But there is no guarantee that we are still
+ * on the same cpu. So we do mtrr_if->set_all() on this cpu aswell to be
+ * sure that we are in sync with everyone else.
*/
if (reg != ~0U)
mtrr_if->set(reg, base, size, type);
- else if (!mtrr_aps_delayed_init)
+ else
mtrr_if->set_all();
/* Wait for the others */
diff --git a/arch/x86/kernel/head64.c b/arch/x86/kernel/head64.c
index 5655c2272ad..2d2673c28af 100644
--- a/arch/x86/kernel/head64.c
+++ b/arch/x86/kernel/head64.c
@@ -77,6 +77,9 @@ void __init x86_64_start_kernel(char * real_mode_data)
/* Make NULL pointers segfault */
zap_identity_mappings();
+ /* Cleanup the over mapped high alias */
+ cleanup_highmap();
+
max_pfn_mapped = KERNEL_IMAGE_SIZE >> PAGE_SHIFT;
for (i = 0; i < NUM_EXCEPTION_VECTORS; i++) {
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
index e543fe9311e..d3cfe26c025 100644
--- a/arch/x86/kernel/setup.c
+++ b/arch/x86/kernel/setup.c
@@ -297,9 +297,6 @@ static void __init init_gbpages(void)
static inline void init_gbpages(void)
{
}
-static void __init cleanup_highmap(void)
-{
-}
#endif
static void __init reserve_brk(void)
@@ -925,8 +922,6 @@ void __init setup_arch(char **cmdline_p)
*/
reserve_brk();
- cleanup_highmap();
-
memblock.current_limit = get_max_mapped();
memblock_x86_fill();
diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
index f13ff3a2267..947f42abe82 100644
--- a/arch/x86/mm/init.c
+++ b/arch/x86/mm/init.c
@@ -279,6 +279,25 @@ unsigned long __init_refok init_memory_mapping(unsigned long start,
load_cr3(swapper_pg_dir);
#endif
+#ifdef CONFIG_X86_64
+ if (!after_bootmem && !start) {
+ pud_t *pud;
+ pmd_t *pmd;
+
+ mmu_cr4_features = read_cr4();
+
+ /*
+ * _brk_end cannot change anymore, but it and _end may be
+ * located on different 2M pages. cleanup_highmap(), however,
+ * can only consider _end when it runs, so destroy any
+ * mappings beyond _brk_end here.
+ */
+ pud = pud_offset(pgd_offset_k(_brk_end), _brk_end);
+ pmd = pmd_offset(pud, _brk_end - 1);
+ while (++pmd <= pmd_offset(pud, (unsigned long)_end - 1))
+ pmd_clear(pmd);
+ }
+#endif
__flush_tlb_all();
if (!after_bootmem && e820_table_end > e820_table_start)
diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
index 68f9921ae9c..c14a5422e15 100644
--- a/arch/x86/mm/init_64.c
+++ b/arch/x86/mm/init_64.c
@@ -51,7 +51,6 @@
#include <asm/numa.h>
#include <asm/cacheflush.h>
#include <asm/init.h>
-#include <asm/setup.h>
static int __init parse_direct_gbpages_off(char *arg)
{
@@ -294,18 +293,18 @@ void __init init_extra_mapping_uc(unsigned long phys, unsigned long size)
* to the compile time generated pmds. This results in invalid pmds up
* to the point where we hit the physaddr 0 mapping.
*
- * We limit the mappings to the region from _text to _brk_end. _brk_end
- * is rounded up to the 2MB boundary. This catches the invalid pmds as
+ * We limit the mappings to the region from _text to _end. _end is
+ * rounded up to the 2MB boundary. This catches the invalid pmds as
* well, as they are located before _text:
*/
void __init cleanup_highmap(void)
{
unsigned long vaddr = __START_KERNEL_map;
- unsigned long vaddr_end = __START_KERNEL_map + (max_pfn_mapped << PAGE_SHIFT);
- unsigned long end = roundup((unsigned long)_brk_end, PMD_SIZE) - 1;
+ unsigned long end = roundup((unsigned long)_end, PMD_SIZE) - 1;
pmd_t *pmd = level2_kernel_pgt;
+ pmd_t *last_pmd = pmd + PTRS_PER_PMD;
- for (; vaddr + PMD_SIZE - 1 < vaddr_end; pmd++, vaddr += PMD_SIZE) {
+ for (; pmd < last_pmd; pmd++, vaddr += PMD_SIZE) {
if (pmd_none(*pmd))
continue;
if (vaddr < (unsigned long) _text || vaddr > end)