summaryrefslogtreecommitdiff
path: root/mm
diff options
context:
space:
mode:
authorMathieu Desnoyers <mathieu.desnoyers@efficios.com>2011-03-16 19:04:36 -0400
committerMathieu Desnoyers <mathieu.desnoyers@polymtl.ca>2011-03-16 19:04:36 -0400
commit0a9c8d5c51c3e20809f36bdccb55af3ea3101cbf (patch)
treece6e5d5cc21bd46282fb0de50a37af88f1518028 /mm
parentefc1681fb4d936ee043f181898a38aeec5b7851d (diff)
lttng-instrumentation/lttng-instrumentation-hugetlb
LTTng instrumentation - hugetlb Instrumentation of hugetlb activity (alloc/free/reserve/grab/release). Those tracepoints are used by LTTng. About the performance impact of tracepoints (which is comparable to markers), even without immediate values optimizations, tests done by Hideo Aoki on ia64 show no regression. His test case was using hackbench on a kernel where scheduler instrumentation (about 5 events in code scheduler code) was added. See the "Tracepoints" patch header for performance result detail. Changelog : - instrument page grab, buddy allocator alloc, page release. Signed-off-by: Mathieu Desnoyers <mathieu.desnoyers@polymtl.ca> CC: William Lee Irwin III <wli@holomorphy.com> CC: Masami Hiramatsu <mhiramat@redhat.com> CC: 'Peter Zijlstra' <peterz@infradead.org> CC: "Frank Ch. Eigler" <fche@redhat.com> CC: 'Ingo Molnar' <mingo@elte.hu> CC: 'Hideo AOKI' <haoki@redhat.com> CC: Takashi Nishiie <t-nishiie@np.css.fujitsu.com> CC: 'Steven Rostedt' <rostedt@goodmis.org> CC: Eduard - Gabriel Munteanu <eduard.munteanu@linux360.ro>
Diffstat (limited to 'mm')
-rw-r--r--mm/hugetlb.c63
1 files changed, 46 insertions, 17 deletions
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index bb0b7c12801..2114fb2615e 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -21,6 +21,7 @@
#include <linux/rmap.h>
#include <linux/swap.h>
#include <linux/swapops.h>
+#include <trace/hugetlb.h>
#include <asm/page.h>
#include <asm/pgtable.h>
@@ -53,6 +54,14 @@ static unsigned long __initdata default_hstate_size;
*/
static DEFINE_SPINLOCK(hugetlb_lock);
+DEFINE_TRACE(hugetlb_page_release);
+DEFINE_TRACE(hugetlb_page_grab);
+DEFINE_TRACE(hugetlb_buddy_pgalloc);
+DEFINE_TRACE(hugetlb_page_alloc);
+DEFINE_TRACE(hugetlb_page_free);
+DEFINE_TRACE(hugetlb_pages_reserve);
+DEFINE_TRACE(hugetlb_pages_unreserve);
+
/*
* Region tracking -- allows tracking of reservations and instantiated pages
* across the pages in a mapping.
@@ -500,6 +509,7 @@ static void update_and_free_page(struct hstate *h, struct page *page)
VM_BUG_ON(h->order >= MAX_ORDER);
+ trace_hugetlb_page_release(page);
h->nr_huge_pages--;
h->nr_huge_pages_node[page_to_nid(page)]--;
for (i = 0; i < pages_per_huge_page(h); i++) {
@@ -534,6 +544,7 @@ static void free_huge_page(struct page *page)
int nid = page_to_nid(page);
struct address_space *mapping;
+ trace_hugetlb_page_free(page);
mapping = (struct address_space *) page_private(page);
set_page_private(page, 0);
page->mapping = NULL;
@@ -598,8 +609,10 @@ static struct page *alloc_fresh_huge_page_node(struct hstate *h, int nid)
{
struct page *page;
- if (h->order >= MAX_ORDER)
- return NULL;
+ if (h->order >= MAX_ORDER) {
+ page = NULL;
+ goto end;
+ }
page = alloc_pages_exact_node(nid,
htlb_alloc_mask|__GFP_COMP|__GFP_THISNODE|
@@ -608,11 +621,13 @@ static struct page *alloc_fresh_huge_page_node(struct hstate *h, int nid)
if (page) {
if (arch_prepare_hugepage(page)) {
__free_pages(page, huge_page_order(h));
- return NULL;
+ page = NULL;
+ goto end;
}
prep_new_huge_page(h, page, nid);
}
-
+end:
+ trace_hugetlb_page_grab(page);
return page;
}
@@ -781,7 +796,8 @@ static struct page *alloc_buddy_huge_page(struct hstate *h, int nid)
spin_lock(&hugetlb_lock);
if (h->surplus_huge_pages >= h->nr_overcommit_huge_pages) {
spin_unlock(&hugetlb_lock);
- return NULL;
+ page = NULL;
+ goto end;
} else {
h->nr_huge_pages++;
h->surplus_huge_pages++;
@@ -818,7 +834,8 @@ static struct page *alloc_buddy_huge_page(struct hstate *h, int nid)
__count_vm_event(HTLB_BUDDY_PGALLOC_FAIL);
}
spin_unlock(&hugetlb_lock);
-
+end:
+ trace_hugetlb_buddy_pgalloc(page);
return page;
}
@@ -1054,6 +1071,7 @@ static struct page *alloc_huge_page(struct vm_area_struct *vma,
vma_commit_reservation(h, vma, addr);
+ trace_hugetlb_page_alloc(page);
return page;
}
@@ -2837,7 +2855,8 @@ int hugetlb_reserve_pages(struct inode *inode,
struct vm_area_struct *vma,
int acctflag)
{
- long ret, chg;
+ int ret = 0;
+ long chg;
struct hstate *h = hstate_inode(inode);
/*
@@ -2846,7 +2865,7 @@ int hugetlb_reserve_pages(struct inode *inode,
* and filesystem quota without using reserves
*/
if (acctflag & VM_NORESERVE)
- return 0;
+ goto end;
/*
* Shared mappings base their reservation on the number of pages that
@@ -2858,8 +2877,10 @@ int hugetlb_reserve_pages(struct inode *inode,
chg = region_chg(&inode->i_mapping->private_list, from, to);
else {
struct resv_map *resv_map = resv_map_alloc();
- if (!resv_map)
- return -ENOMEM;
+ if (!resv_map) {
+ ret = -ENOMEM;
+ goto end;
+ }
chg = to - from;
@@ -2867,12 +2888,16 @@ int hugetlb_reserve_pages(struct inode *inode,
set_vma_resv_flags(vma, HPAGE_RESV_OWNER);
}
- if (chg < 0)
- return chg;
+ if (chg < 0) {
+ ret = chg;
+ goto end;
+ }
/* There must be enough filesystem quota for the mapping */
- if (hugetlb_get_quota(inode->i_mapping, chg))
- return -ENOSPC;
+ if (hugetlb_get_quota(inode->i_mapping, chg)) {
+ ret = -ENOSPC;
+ goto end;
+ }
/*
* Check enough hugepages are available for the reservation.
@@ -2881,7 +2906,7 @@ int hugetlb_reserve_pages(struct inode *inode,
ret = hugetlb_acct_memory(h, chg);
if (ret < 0) {
hugetlb_put_quota(inode->i_mapping, chg);
- return ret;
+ goto end;
}
/*
@@ -2897,14 +2922,18 @@ int hugetlb_reserve_pages(struct inode *inode,
*/
if (!vma || vma->vm_flags & VM_MAYSHARE)
region_add(&inode->i_mapping->private_list, from, to);
- return 0;
+end:
+ trace_hugetlb_pages_reserve(inode, from, to, ret);
+ return ret;
}
void hugetlb_unreserve_pages(struct inode *inode, long offset, long freed)
{
struct hstate *h = hstate_inode(inode);
- long chg = region_truncate(&inode->i_mapping->private_list, offset);
+ long chg;
+ trace_hugetlb_pages_unreserve(inode, offset, freed);
+ chg = region_truncate(&inode->i_mapping->private_list, offset);
spin_lock(&inode->i_lock);
inode->i_blocks -= (blocks_per_huge_page(h) * freed);
spin_unlock(&inode->i_lock);