summaryrefslogtreecommitdiff
path: root/arch/powerpc
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc')
-rw-r--r--arch/powerpc/include/asm/kprobes.h3
-rw-r--r--arch/powerpc/include/asm/page.h4
-rw-r--r--arch/powerpc/kvm/book3s_64_vio.c4
-rw-r--r--arch/powerpc/kvm/book3s_hv_builtin.c3
-rw-r--r--arch/powerpc/lib/code-patching.c1
-rw-r--r--arch/powerpc/platforms/cell/spufs/file.c39
-rw-r--r--arch/powerpc/xmon/xmon.c2
7 files changed, 31 insertions, 25 deletions
diff --git a/arch/powerpc/include/asm/kprobes.h b/arch/powerpc/include/asm/kprobes.h
index d821835ade86..0503c98b2117 100644
--- a/arch/powerpc/include/asm/kprobes.h
+++ b/arch/powerpc/include/asm/kprobes.h
@@ -1,5 +1,8 @@
#ifndef _ASM_POWERPC_KPROBES_H
#define _ASM_POWERPC_KPROBES_H
+
+#include <asm-generic/kprobes.h>
+
#ifdef __KERNEL__
/*
* Kernel Probes (KProbes)
diff --git a/arch/powerpc/include/asm/page.h b/arch/powerpc/include/asm/page.h
index 47120bf2670c..2a32483c7b6c 100644
--- a/arch/powerpc/include/asm/page.h
+++ b/arch/powerpc/include/asm/page.h
@@ -230,7 +230,9 @@ extern long long virt_phys_offset;
* and needs to be executable. This means the whole heap ends
* up being executable.
*/
-#define VM_DATA_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
+#define VM_DATA_DEFAULT_FLAGS32 \
+ (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
+ VM_READ | VM_WRITE | \
VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
#define VM_DATA_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
diff --git a/arch/powerpc/kvm/book3s_64_vio.c b/arch/powerpc/kvm/book3s_64_vio.c
index 491c5d8120f7..ab9d14c0e460 100644
--- a/arch/powerpc/kvm/book3s_64_vio.c
+++ b/arch/powerpc/kvm/book3s_64_vio.c
@@ -102,9 +102,9 @@ static void release_spapr_tce_table(struct rcu_head *head)
kfree(stt);
}
-static int kvm_spapr_tce_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+static int kvm_spapr_tce_fault(struct vm_fault *vmf)
{
- struct kvmppc_spapr_tce_table *stt = vma->vm_file->private_data;
+ struct kvmppc_spapr_tce_table *stt = vmf->vma->vm_file->private_data;
struct page *page;
if (vmf->pgoff >= kvmppc_tce_pages(stt->size))
diff --git a/arch/powerpc/kvm/book3s_hv_builtin.c b/arch/powerpc/kvm/book3s_hv_builtin.c
index c42a7e63b39e..4d6c64b3041c 100644
--- a/arch/powerpc/kvm/book3s_hv_builtin.c
+++ b/arch/powerpc/kvm/book3s_hv_builtin.c
@@ -56,7 +56,8 @@ struct page *kvm_alloc_hpt_cma(unsigned long nr_pages)
{
VM_BUG_ON(order_base_2(nr_pages) < KVM_CMA_CHUNK_ORDER - PAGE_SHIFT);
- return cma_alloc(kvm_cma, nr_pages, order_base_2(HPT_ALIGN_PAGES));
+ return cma_alloc(kvm_cma, nr_pages, order_base_2(HPT_ALIGN_PAGES),
+ GFP_KERNEL);
}
EXPORT_SYMBOL_GPL(kvm_alloc_hpt_cma);
diff --git a/arch/powerpc/lib/code-patching.c b/arch/powerpc/lib/code-patching.c
index 0899315e1434..0d3002b7e2b4 100644
--- a/arch/powerpc/lib/code-patching.c
+++ b/arch/powerpc/lib/code-patching.c
@@ -14,6 +14,7 @@
#include <asm/page.h>
#include <asm/code-patching.h>
#include <linux/uaccess.h>
+#include <linux/kprobes.h>
int patch_instruction(unsigned int *addr, unsigned int instr)
diff --git a/arch/powerpc/platforms/cell/spufs/file.c b/arch/powerpc/platforms/cell/spufs/file.c
index 03f2cdfabf23..ae2f740a82f1 100644
--- a/arch/powerpc/platforms/cell/spufs/file.c
+++ b/arch/powerpc/platforms/cell/spufs/file.c
@@ -233,8 +233,9 @@ spufs_mem_write(struct file *file, const char __user *buffer,
}
static int
-spufs_mem_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+spufs_mem_mmap_fault(struct vm_fault *vmf)
{
+ struct vm_area_struct *vma = vmf->vma;
struct spu_context *ctx = vma->vm_file->private_data;
unsigned long pfn, offset;
@@ -311,12 +312,11 @@ static const struct file_operations spufs_mem_fops = {
.mmap = spufs_mem_mmap,
};
-static int spufs_ps_fault(struct vm_area_struct *vma,
- struct vm_fault *vmf,
+static int spufs_ps_fault(struct vm_fault *vmf,
unsigned long ps_offs,
unsigned long ps_size)
{
- struct spu_context *ctx = vma->vm_file->private_data;
+ struct spu_context *ctx = vmf->vma->vm_file->private_data;
unsigned long area, offset = vmf->pgoff << PAGE_SHIFT;
int ret = 0;
@@ -354,7 +354,7 @@ static int spufs_ps_fault(struct vm_area_struct *vma,
down_read(&current->mm->mmap_sem);
} else {
area = ctx->spu->problem_phys + ps_offs;
- vm_insert_pfn(vma, vmf->address, (area + offset) >> PAGE_SHIFT);
+ vm_insert_pfn(vmf->vma, vmf->address, (area + offset) >> PAGE_SHIFT);
spu_context_trace(spufs_ps_fault__insert, ctx, ctx->spu);
}
@@ -367,10 +367,9 @@ refault:
}
#if SPUFS_MMAP_4K
-static int spufs_cntl_mmap_fault(struct vm_area_struct *vma,
- struct vm_fault *vmf)
+static int spufs_cntl_mmap_fault(struct vm_fault *vmf)
{
- return spufs_ps_fault(vma, vmf, 0x4000, SPUFS_CNTL_MAP_SIZE);
+ return spufs_ps_fault(vmf, 0x4000, SPUFS_CNTL_MAP_SIZE);
}
static const struct vm_operations_struct spufs_cntl_mmap_vmops = {
@@ -1042,15 +1041,15 @@ static ssize_t spufs_signal1_write(struct file *file, const char __user *buf,
}
static int
-spufs_signal1_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+spufs_signal1_mmap_fault(struct vm_fault *vmf)
{
#if SPUFS_SIGNAL_MAP_SIZE == 0x1000
- return spufs_ps_fault(vma, vmf, 0x14000, SPUFS_SIGNAL_MAP_SIZE);
+ return spufs_ps_fault(vmf, 0x14000, SPUFS_SIGNAL_MAP_SIZE);
#elif SPUFS_SIGNAL_MAP_SIZE == 0x10000
/* For 64k pages, both signal1 and signal2 can be used to mmap the whole
* signal 1 and 2 area
*/
- return spufs_ps_fault(vma, vmf, 0x10000, SPUFS_SIGNAL_MAP_SIZE);
+ return spufs_ps_fault(vmf, 0x10000, SPUFS_SIGNAL_MAP_SIZE);
#else
#error unsupported page size
#endif
@@ -1180,15 +1179,15 @@ static ssize_t spufs_signal2_write(struct file *file, const char __user *buf,
#if SPUFS_MMAP_4K
static int
-spufs_signal2_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+spufs_signal2_mmap_fault(struct vm_fault *vmf)
{
#if SPUFS_SIGNAL_MAP_SIZE == 0x1000
- return spufs_ps_fault(vma, vmf, 0x1c000, SPUFS_SIGNAL_MAP_SIZE);
+ return spufs_ps_fault(vmf, 0x1c000, SPUFS_SIGNAL_MAP_SIZE);
#elif SPUFS_SIGNAL_MAP_SIZE == 0x10000
/* For 64k pages, both signal1 and signal2 can be used to mmap the whole
* signal 1 and 2 area
*/
- return spufs_ps_fault(vma, vmf, 0x10000, SPUFS_SIGNAL_MAP_SIZE);
+ return spufs_ps_fault(vmf, 0x10000, SPUFS_SIGNAL_MAP_SIZE);
#else
#error unsupported page size
#endif
@@ -1309,9 +1308,9 @@ DEFINE_SPUFS_ATTRIBUTE(spufs_signal2_type, spufs_signal2_type_get,
#if SPUFS_MMAP_4K
static int
-spufs_mss_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+spufs_mss_mmap_fault(struct vm_fault *vmf)
{
- return spufs_ps_fault(vma, vmf, 0x0000, SPUFS_MSS_MAP_SIZE);
+ return spufs_ps_fault(vmf, 0x0000, SPUFS_MSS_MAP_SIZE);
}
static const struct vm_operations_struct spufs_mss_mmap_vmops = {
@@ -1371,9 +1370,9 @@ static const struct file_operations spufs_mss_fops = {
};
static int
-spufs_psmap_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+spufs_psmap_mmap_fault(struct vm_fault *vmf)
{
- return spufs_ps_fault(vma, vmf, 0x0000, SPUFS_PS_MAP_SIZE);
+ return spufs_ps_fault(vmf, 0x0000, SPUFS_PS_MAP_SIZE);
}
static const struct vm_operations_struct spufs_psmap_mmap_vmops = {
@@ -1431,9 +1430,9 @@ static const struct file_operations spufs_psmap_fops = {
#if SPUFS_MMAP_4K
static int
-spufs_mfc_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+spufs_mfc_mmap_fault(struct vm_fault *vmf)
{
- return spufs_ps_fault(vma, vmf, 0x3000, SPUFS_MFC_MAP_SIZE);
+ return spufs_ps_fault(vmf, 0x3000, SPUFS_MFC_MAP_SIZE);
}
static const struct vm_operations_struct spufs_mfc_mmap_vmops = {
diff --git a/arch/powerpc/xmon/xmon.c b/arch/powerpc/xmon/xmon.c
index 1be0499f5397..5720236d0266 100644
--- a/arch/powerpc/xmon/xmon.c
+++ b/arch/powerpc/xmon/xmon.c
@@ -916,7 +916,7 @@ cmds(struct pt_regs *excp)
memzcan();
break;
case 'i':
- show_mem(0);
+ show_mem(0, NULL);
break;
default:
termch = cmd;