summaryrefslogtreecommitdiff
path: root/include/linux
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux')
-rw-r--r--include/linux/byteorder/generic.h4
-rw-r--r--include/linux/mm.h10
-rw-r--r--include/linux/syscalls.h2
3 files changed, 11 insertions, 5 deletions
diff --git a/include/linux/byteorder/generic.h b/include/linux/byteorder/generic.h
index 5fde6f4d6c1..04bd756efc6 100644
--- a/include/linux/byteorder/generic.h
+++ b/include/linux/byteorder/generic.h
@@ -5,6 +5,10 @@
* linux/byteorder_generic.h
* Generic Byte-reordering support
*
+ * The "... p" macros, like le64_to_cpup, can be used with pointers
+ * to unaligned data, but there will be a performance penalty on
+ * some architectures. Use get_unaligned for unaligned data.
+ *
* Francois-Rene Rideau <fare@tunes.org> 19970707
* gathered all the good ideas from all asm-foo/byteorder.h into one file,
* cleaned them up.
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 82d7024f076..097b3a3c693 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -136,6 +136,7 @@ extern unsigned int kobjsize(const void *objp);
#define VM_EXEC 0x00000004
#define VM_SHARED 0x00000008
+/* mprotect() hardcodes VM_MAYREAD >> 4 == VM_READ, and so for r/w/x bits. */
#define VM_MAYREAD 0x00000010 /* limits for mprotect() etc */
#define VM_MAYWRITE 0x00000020
#define VM_MAYEXEC 0x00000040
@@ -350,7 +351,8 @@ static inline void put_page(struct page *page)
* only one copy in memory, at most, normally.
*
* For the non-reserved pages, page_count(page) denotes a reference count.
- * page_count() == 0 means the page is free.
+ * page_count() == 0 means the page is free. page->lru is then used for
+ * freelist management in the buddy allocator.
* page_count() == 1 means the page is used for exactly one purpose
* (e.g. a private data page of one process).
*
@@ -376,10 +378,8 @@ static inline void put_page(struct page *page)
* attaches, plus 1 if `private' contains something, plus one for
* the page cache itself.
*
- * All pages belonging to an inode are in these doubly linked lists:
- * mapping->clean_pages, mapping->dirty_pages and mapping->locked_pages;
- * using the page->list list_head. These fields are also used for
- * freelist managemet (when page_count()==0).
+ * Instead of keeping dirty/clean pages in per address-space lists, we instead
+ * now tag pages as dirty/under writeback in the radix tree.
*
* There is also a per-mapping radix tree mapping index to the page
* in memory if present. The tree is rooted at mapping->root.
diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h
index 425f58c8ea4..a6f03e47373 100644
--- a/include/linux/syscalls.h
+++ b/include/linux/syscalls.h
@@ -508,5 +508,7 @@ asmlinkage long sys_keyctl(int cmd, unsigned long arg2, unsigned long arg3,
asmlinkage long sys_ioprio_set(int which, int who, int ioprio);
asmlinkage long sys_ioprio_get(int which, int who);
+asmlinkage long sys_set_mempolicy(int mode, unsigned long __user *nmask,
+ unsigned long maxnode);
#endif