path: root/mm
diff options
authorMatt Mackall <mpm@selenic.com>2007-07-21 04:37:40 -0700
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2007-07-21 17:49:16 -0700
commitd6269543ef24aa012aa228c27af3adb074f7b36b (patch)
tree5e0c24ce1dd5ed3947ea00b7863782e24848b8d3 /mm
parent41f9dc5c871600f53c8912b2975971d2a11c1c25 (diff)
slob: reduce list scanning
The version of SLOB in -mm always scans its free list from the beginning, which results in small allocations and free segments clustering at the beginning of the list over time. This causes the average search to scan over a large stretch at the beginning on each allocation. By starting each page search where the last one left off, we evenly distribute the allocations and greatly shorten the average search. Without this patch, kernel compiles on a 1.5G machine take a large amount of system time for list scanning. With this patch, compiles are within a few seconds of performance of a SLAB kernel with no notable change in system time. Signed-off-by: Matt Mackall <mpm@selenic.com> Cc: Christoph Lameter <clameter@sgi.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
1 files changed, 16 insertions, 5 deletions
diff --git a/mm/slob.c b/mm/slob.c
index d50920ecc02..ec33fcdc852 100644
--- a/mm/slob.c
+++ b/mm/slob.c
@@ -293,6 +293,7 @@ static void *slob_page_alloc(struct slob_page *sp, size_t size, int align)
static void *slob_alloc(size_t size, gfp_t gfp, int align, int node)
struct slob_page *sp;
+ struct list_head *prev;
slob_t *b = NULL;
unsigned long flags;
@@ -307,12 +308,22 @@ static void *slob_alloc(size_t size, gfp_t gfp, int align, int node)
if (node != -1 && page_to_nid(&sp->page) != node)
+ /* Enough room on this page? */
+ if (sp->units < SLOB_UNITS(size))
+ continue;
- if (sp->units >= SLOB_UNITS(size)) {
- b = slob_page_alloc(sp, size, align);
- if (b)
- break;
- }
+ /* Attempt to alloc */
+ prev = sp->list.prev;
+ b = slob_page_alloc(sp, size, align);
+ if (!b)
+ continue;
+ /* Improve fragment distribution and reduce our average
+ * search time by starting our next search here. (see
+ * Knuth vol 1, sec 2.5, pg 449) */
+ if (free_slob_pages.next != prev->next)
+ list_move_tail(&free_slob_pages, prev->next);
+ break;
spin_unlock_irqrestore(&slob_lock, flags);