Matt Mackall | 10cef60 | 2006-01-08 01:01:45 -0800 | [diff] [blame] | 1 | /* |
| 2 | * SLOB Allocator: Simple List Of Blocks |
| 3 | * |
| 4 | * Matt Mackall <mpm@selenic.com> 12/30/03 |
| 5 | * |
| 6 | * How SLOB works: |
| 7 | * |
| 8 | * The core of SLOB is a traditional K&R style heap allocator, with |
| 9 | * support for returning aligned objects. The granularity of this |
Nick Piggin | 95b3512 | 2007-07-15 23:38:07 -0700 | [diff] [blame] | 10 | * allocator is 4 bytes on 32-bit and 8 bytes on 64-bit, though it |
| 11 | * could be as low as 2 if the compiler alignment requirements allow. |
| 12 | * |
| 13 | * The slob heap is a linked list of pages from __get_free_page, and |
| 14 | * within each page, there is a singly-linked list of free blocks (slob_t). |
| 15 | * The heap is grown on demand and allocation from the heap is currently |
| 16 | * first-fit. |
Matt Mackall | 10cef60 | 2006-01-08 01:01:45 -0800 | [diff] [blame] | 17 | * |
| 18 | * Above this is an implementation of kmalloc/kfree. Blocks returned |
Nick Piggin | 95b3512 | 2007-07-15 23:38:07 -0700 | [diff] [blame] | 19 | * from kmalloc are 4-byte aligned and prepended with a 4-byte header. |
Matt Mackall | 10cef60 | 2006-01-08 01:01:45 -0800 | [diff] [blame] | 20 | * If kmalloc is asked for objects of PAGE_SIZE or larger, it calls |
Nick Piggin | d87a133 | 2007-07-15 23:38:08 -0700 | [diff] [blame^] | 21 | * __get_free_pages directly, allocating compound pages so the page order |
| 22 | * does not have to be separately tracked, and also stores the exact |
| 23 | * allocation size in page->private so that it can be used to accurately |
| 24 | * provide ksize(). These objects are detected in kfree() because slob_page() |
| 25 | * is false for them. |
Matt Mackall | 10cef60 | 2006-01-08 01:01:45 -0800 | [diff] [blame] | 26 | * |
| 27 | * SLAB is emulated on top of SLOB by simply calling constructors and |
Nick Piggin | 95b3512 | 2007-07-15 23:38:07 -0700 | [diff] [blame] | 28 | * destructors for every SLAB allocation. Objects are returned with the |
| 29 | * 4-byte alignment unless the SLAB_HWCACHE_ALIGN flag is set, in which |
| 30 | * case the low-level allocator will fragment blocks to create the proper |
| 31 | * alignment. Again, objects of page-size or greater are allocated by |
| 32 | * calling __get_free_pages. As SLAB objects know their size, no separate |
| 33 | * size bookkeeping is necessary and there is essentially no allocation |
Nick Piggin | d87a133 | 2007-07-15 23:38:08 -0700 | [diff] [blame^] | 34 | * space overhead, and compound pages aren't needed for multi-page |
| 35 | * allocations. |
Matt Mackall | 10cef60 | 2006-01-08 01:01:45 -0800 | [diff] [blame] | 36 | */ |
| 37 | |
Nick Piggin | 95b3512 | 2007-07-15 23:38:07 -0700 | [diff] [blame] | 38 | #include <linux/kernel.h> |
Matt Mackall | 10cef60 | 2006-01-08 01:01:45 -0800 | [diff] [blame] | 39 | #include <linux/slab.h> |
| 40 | #include <linux/mm.h> |
| 41 | #include <linux/cache.h> |
| 42 | #include <linux/init.h> |
| 43 | #include <linux/module.h> |
Nick Piggin | afc0ced | 2007-05-16 22:10:49 -0700 | [diff] [blame] | 44 | #include <linux/rcupdate.h> |
Nick Piggin | 95b3512 | 2007-07-15 23:38:07 -0700 | [diff] [blame] | 45 | #include <linux/list.h> |
| 46 | #include <asm/atomic.h> |
Matt Mackall | 10cef60 | 2006-01-08 01:01:45 -0800 | [diff] [blame] | 47 | |
Nick Piggin | 95b3512 | 2007-07-15 23:38:07 -0700 | [diff] [blame] | 48 | /* SLOB_MIN_ALIGN == sizeof(long) */ |
| 49 | #if BITS_PER_BYTE == 32 |
| 50 | #define SLOB_MIN_ALIGN 4 |
| 51 | #else |
| 52 | #define SLOB_MIN_ALIGN 8 |
| 53 | #endif |
| 54 | |
| 55 | /* |
| 56 | * slob_block has a field 'units', which indicates size of block if +ve, |
| 57 | * or offset of next block if -ve (in SLOB_UNITs). |
| 58 | * |
| 59 | * Free blocks of size 1 unit simply contain the offset of the next block. |
| 60 | * Those with larger size contain their size in the first SLOB_UNIT of |
| 61 | * memory, and the offset of the next free block in the second SLOB_UNIT. |
| 62 | */ |
| 63 | #if PAGE_SIZE <= (32767 * SLOB_MIN_ALIGN) |
| 64 | typedef s16 slobidx_t; |
| 65 | #else |
| 66 | typedef s32 slobidx_t; |
| 67 | #endif |
| 68 | |
| 69 | /* |
| 70 | * Align struct slob_block to long for now, but can some embedded |
| 71 | * architectures get away with less? |
| 72 | */ |
Matt Mackall | 10cef60 | 2006-01-08 01:01:45 -0800 | [diff] [blame] | 73 | struct slob_block { |
Nick Piggin | 95b3512 | 2007-07-15 23:38:07 -0700 | [diff] [blame] | 74 | slobidx_t units; |
| 75 | } __attribute__((aligned(SLOB_MIN_ALIGN))); |
Matt Mackall | 10cef60 | 2006-01-08 01:01:45 -0800 | [diff] [blame] | 76 | typedef struct slob_block slob_t; |
| 77 | |
Nick Piggin | 95b3512 | 2007-07-15 23:38:07 -0700 | [diff] [blame] | 78 | /* |
| 79 | * We use struct page fields to manage some slob allocation aspects, |
| 80 | * however to avoid the horrible mess in include/linux/mm_types.h, we'll |
| 81 | * just define our own struct page type variant here. |
| 82 | */ |
| 83 | struct slob_page { |
| 84 | union { |
| 85 | struct { |
| 86 | unsigned long flags; /* mandatory */ |
| 87 | atomic_t _count; /* mandatory */ |
| 88 | slobidx_t units; /* free units left in page */ |
| 89 | unsigned long pad[2]; |
| 90 | slob_t *free; /* first free slob_t in page */ |
| 91 | struct list_head list; /* linked list of free pages */ |
| 92 | }; |
| 93 | struct page page; |
| 94 | }; |
| 95 | }; |
| 96 | static inline void struct_slob_page_wrong_size(void) |
| 97 | { BUILD_BUG_ON(sizeof(struct slob_page) != sizeof(struct page)); } |
| 98 | |
| 99 | /* |
| 100 | * free_slob_page: call before a slob_page is returned to the page allocator. |
| 101 | */ |
| 102 | static inline void free_slob_page(struct slob_page *sp) |
| 103 | { |
| 104 | reset_page_mapcount(&sp->page); |
| 105 | sp->page.mapping = NULL; |
| 106 | } |
| 107 | |
| 108 | /* |
| 109 | * All (partially) free slob pages go on this list. |
| 110 | */ |
| 111 | static LIST_HEAD(free_slob_pages); |
| 112 | |
| 113 | /* |
| 114 | * slob_page: True for all slob pages (false for bigblock pages) |
| 115 | */ |
| 116 | static inline int slob_page(struct slob_page *sp) |
| 117 | { |
| 118 | return test_bit(PG_active, &sp->flags); |
| 119 | } |
| 120 | |
| 121 | static inline void set_slob_page(struct slob_page *sp) |
| 122 | { |
| 123 | __set_bit(PG_active, &sp->flags); |
| 124 | } |
| 125 | |
| 126 | static inline void clear_slob_page(struct slob_page *sp) |
| 127 | { |
| 128 | __clear_bit(PG_active, &sp->flags); |
| 129 | } |
| 130 | |
| 131 | /* |
| 132 | * slob_page_free: true for pages on free_slob_pages list. |
| 133 | */ |
| 134 | static inline int slob_page_free(struct slob_page *sp) |
| 135 | { |
| 136 | return test_bit(PG_private, &sp->flags); |
| 137 | } |
| 138 | |
| 139 | static inline void set_slob_page_free(struct slob_page *sp) |
| 140 | { |
| 141 | list_add(&sp->list, &free_slob_pages); |
| 142 | __set_bit(PG_private, &sp->flags); |
| 143 | } |
| 144 | |
| 145 | static inline void clear_slob_page_free(struct slob_page *sp) |
| 146 | { |
| 147 | list_del(&sp->list); |
| 148 | __clear_bit(PG_private, &sp->flags); |
| 149 | } |
| 150 | |
Matt Mackall | 10cef60 | 2006-01-08 01:01:45 -0800 | [diff] [blame] | 151 | #define SLOB_UNIT sizeof(slob_t) |
| 152 | #define SLOB_UNITS(size) (((size) + SLOB_UNIT - 1)/SLOB_UNIT) |
| 153 | #define SLOB_ALIGN L1_CACHE_BYTES |
| 154 | |
Nick Piggin | afc0ced | 2007-05-16 22:10:49 -0700 | [diff] [blame] | 155 | /* |
| 156 | * struct slob_rcu is inserted at the tail of allocated slob blocks, which |
| 157 | * were created with a SLAB_DESTROY_BY_RCU slab. slob_rcu is used to free |
| 158 | * the block using call_rcu. |
| 159 | */ |
| 160 | struct slob_rcu { |
| 161 | struct rcu_head head; |
| 162 | int size; |
| 163 | }; |
| 164 | |
Nick Piggin | 95b3512 | 2007-07-15 23:38:07 -0700 | [diff] [blame] | 165 | /* |
| 166 | * slob_lock protects all slob allocator structures. |
| 167 | */ |
Matt Mackall | 10cef60 | 2006-01-08 01:01:45 -0800 | [diff] [blame] | 168 | static DEFINE_SPINLOCK(slob_lock); |
Matt Mackall | 10cef60 | 2006-01-08 01:01:45 -0800 | [diff] [blame] | 169 | |
Nick Piggin | 95b3512 | 2007-07-15 23:38:07 -0700 | [diff] [blame] | 170 | /* |
| 171 | * Encode the given size and next info into a free slob block s. |
| 172 | */ |
| 173 | static void set_slob(slob_t *s, slobidx_t size, slob_t *next) |
| 174 | { |
| 175 | slob_t *base = (slob_t *)((unsigned long)s & PAGE_MASK); |
| 176 | slobidx_t offset = next - base; |
Dimitri Gorokhovik | bcb4ddb | 2006-12-29 16:48:28 -0800 | [diff] [blame] | 177 | |
Nick Piggin | 95b3512 | 2007-07-15 23:38:07 -0700 | [diff] [blame] | 178 | if (size > 1) { |
| 179 | s[0].units = size; |
| 180 | s[1].units = offset; |
| 181 | } else |
| 182 | s[0].units = -offset; |
| 183 | } |
Matt Mackall | 10cef60 | 2006-01-08 01:01:45 -0800 | [diff] [blame] | 184 | |
Nick Piggin | 95b3512 | 2007-07-15 23:38:07 -0700 | [diff] [blame] | 185 | /* |
| 186 | * Return the size of a slob block. |
| 187 | */ |
| 188 | static slobidx_t slob_units(slob_t *s) |
| 189 | { |
| 190 | if (s->units > 0) |
| 191 | return s->units; |
| 192 | return 1; |
| 193 | } |
| 194 | |
| 195 | /* |
| 196 | * Return the next free slob block pointer after this one. |
| 197 | */ |
| 198 | static slob_t *slob_next(slob_t *s) |
| 199 | { |
| 200 | slob_t *base = (slob_t *)((unsigned long)s & PAGE_MASK); |
| 201 | slobidx_t next; |
| 202 | |
| 203 | if (s[0].units < 0) |
| 204 | next = -s[0].units; |
| 205 | else |
| 206 | next = s[1].units; |
| 207 | return base+next; |
| 208 | } |
| 209 | |
| 210 | /* |
| 211 | * Returns true if s is the last free block in its page. |
| 212 | */ |
| 213 | static int slob_last(slob_t *s) |
| 214 | { |
| 215 | return !((unsigned long)slob_next(s) & ~PAGE_MASK); |
| 216 | } |
| 217 | |
| 218 | /* |
| 219 | * Allocate a slob block within a given slob_page sp. |
| 220 | */ |
| 221 | static void *slob_page_alloc(struct slob_page *sp, size_t size, int align) |
Matt Mackall | 10cef60 | 2006-01-08 01:01:45 -0800 | [diff] [blame] | 222 | { |
| 223 | slob_t *prev, *cur, *aligned = 0; |
| 224 | int delta = 0, units = SLOB_UNITS(size); |
Matt Mackall | 10cef60 | 2006-01-08 01:01:45 -0800 | [diff] [blame] | 225 | |
Nick Piggin | 95b3512 | 2007-07-15 23:38:07 -0700 | [diff] [blame] | 226 | for (prev = NULL, cur = sp->free; ; prev = cur, cur = slob_next(cur)) { |
| 227 | slobidx_t avail = slob_units(cur); |
| 228 | |
Matt Mackall | 10cef60 | 2006-01-08 01:01:45 -0800 | [diff] [blame] | 229 | if (align) { |
| 230 | aligned = (slob_t *)ALIGN((unsigned long)cur, align); |
| 231 | delta = aligned - cur; |
| 232 | } |
Nick Piggin | 95b3512 | 2007-07-15 23:38:07 -0700 | [diff] [blame] | 233 | if (avail >= units + delta) { /* room enough? */ |
| 234 | slob_t *next; |
| 235 | |
Matt Mackall | 10cef60 | 2006-01-08 01:01:45 -0800 | [diff] [blame] | 236 | if (delta) { /* need to fragment head to align? */ |
Nick Piggin | 95b3512 | 2007-07-15 23:38:07 -0700 | [diff] [blame] | 237 | next = slob_next(cur); |
| 238 | set_slob(aligned, avail - delta, next); |
| 239 | set_slob(cur, delta, aligned); |
Matt Mackall | 10cef60 | 2006-01-08 01:01:45 -0800 | [diff] [blame] | 240 | prev = cur; |
| 241 | cur = aligned; |
Nick Piggin | 95b3512 | 2007-07-15 23:38:07 -0700 | [diff] [blame] | 242 | avail = slob_units(cur); |
Matt Mackall | 10cef60 | 2006-01-08 01:01:45 -0800 | [diff] [blame] | 243 | } |
| 244 | |
Nick Piggin | 95b3512 | 2007-07-15 23:38:07 -0700 | [diff] [blame] | 245 | next = slob_next(cur); |
| 246 | if (avail == units) { /* exact fit? unlink. */ |
| 247 | if (prev) |
| 248 | set_slob(prev, slob_units(prev), next); |
| 249 | else |
| 250 | sp->free = next; |
| 251 | } else { /* fragment */ |
| 252 | if (prev) |
| 253 | set_slob(prev, slob_units(prev), cur + units); |
| 254 | else |
| 255 | sp->free = cur + units; |
| 256 | set_slob(cur + units, avail - units, next); |
Matt Mackall | 10cef60 | 2006-01-08 01:01:45 -0800 | [diff] [blame] | 257 | } |
| 258 | |
Nick Piggin | 95b3512 | 2007-07-15 23:38:07 -0700 | [diff] [blame] | 259 | sp->units -= units; |
| 260 | if (!sp->units) |
| 261 | clear_slob_page_free(sp); |
Matt Mackall | 10cef60 | 2006-01-08 01:01:45 -0800 | [diff] [blame] | 262 | return cur; |
| 263 | } |
Nick Piggin | 95b3512 | 2007-07-15 23:38:07 -0700 | [diff] [blame] | 264 | if (slob_last(cur)) |
| 265 | return NULL; |
Matt Mackall | 10cef60 | 2006-01-08 01:01:45 -0800 | [diff] [blame] | 266 | } |
| 267 | } |
| 268 | |
Nick Piggin | 95b3512 | 2007-07-15 23:38:07 -0700 | [diff] [blame] | 269 | /* |
| 270 | * slob_alloc: entry point into the slob allocator. |
| 271 | */ |
| 272 | static void *slob_alloc(size_t size, gfp_t gfp, int align) |
| 273 | { |
| 274 | struct slob_page *sp; |
| 275 | slob_t *b = NULL; |
| 276 | unsigned long flags; |
| 277 | |
| 278 | spin_lock_irqsave(&slob_lock, flags); |
| 279 | /* Iterate through each partially free page, try to find room */ |
| 280 | list_for_each_entry(sp, &free_slob_pages, list) { |
| 281 | if (sp->units >= SLOB_UNITS(size)) { |
| 282 | b = slob_page_alloc(sp, size, align); |
| 283 | if (b) |
| 284 | break; |
| 285 | } |
| 286 | } |
| 287 | spin_unlock_irqrestore(&slob_lock, flags); |
| 288 | |
| 289 | /* Not enough space: must allocate a new page */ |
| 290 | if (!b) { |
| 291 | b = (slob_t *)__get_free_page(gfp); |
| 292 | if (!b) |
| 293 | return 0; |
| 294 | sp = (struct slob_page *)virt_to_page(b); |
| 295 | set_slob_page(sp); |
| 296 | |
| 297 | spin_lock_irqsave(&slob_lock, flags); |
| 298 | sp->units = SLOB_UNITS(PAGE_SIZE); |
| 299 | sp->free = b; |
| 300 | INIT_LIST_HEAD(&sp->list); |
| 301 | set_slob(b, SLOB_UNITS(PAGE_SIZE), b + SLOB_UNITS(PAGE_SIZE)); |
| 302 | set_slob_page_free(sp); |
| 303 | b = slob_page_alloc(sp, size, align); |
| 304 | BUG_ON(!b); |
| 305 | spin_unlock_irqrestore(&slob_lock, flags); |
| 306 | } |
| 307 | return b; |
| 308 | } |
| 309 | |
| 310 | /* |
| 311 | * slob_free: entry point into the slob allocator. |
| 312 | */ |
Matt Mackall | 10cef60 | 2006-01-08 01:01:45 -0800 | [diff] [blame] | 313 | static void slob_free(void *block, int size) |
| 314 | { |
Nick Piggin | 95b3512 | 2007-07-15 23:38:07 -0700 | [diff] [blame] | 315 | struct slob_page *sp; |
| 316 | slob_t *prev, *next, *b = (slob_t *)block; |
| 317 | slobidx_t units; |
Matt Mackall | 10cef60 | 2006-01-08 01:01:45 -0800 | [diff] [blame] | 318 | unsigned long flags; |
| 319 | |
| 320 | if (!block) |
| 321 | return; |
Nick Piggin | 95b3512 | 2007-07-15 23:38:07 -0700 | [diff] [blame] | 322 | BUG_ON(!size); |
Matt Mackall | 10cef60 | 2006-01-08 01:01:45 -0800 | [diff] [blame] | 323 | |
Nick Piggin | 95b3512 | 2007-07-15 23:38:07 -0700 | [diff] [blame] | 324 | sp = (struct slob_page *)virt_to_page(block); |
| 325 | units = SLOB_UNITS(size); |
Matt Mackall | 10cef60 | 2006-01-08 01:01:45 -0800 | [diff] [blame] | 326 | |
Matt Mackall | 10cef60 | 2006-01-08 01:01:45 -0800 | [diff] [blame] | 327 | spin_lock_irqsave(&slob_lock, flags); |
Matt Mackall | 10cef60 | 2006-01-08 01:01:45 -0800 | [diff] [blame] | 328 | |
Nick Piggin | 95b3512 | 2007-07-15 23:38:07 -0700 | [diff] [blame] | 329 | if (sp->units + units == SLOB_UNITS(PAGE_SIZE)) { |
| 330 | /* Go directly to page allocator. Do not pass slob allocator */ |
| 331 | if (slob_page_free(sp)) |
| 332 | clear_slob_page_free(sp); |
| 333 | clear_slob_page(sp); |
| 334 | free_slob_page(sp); |
| 335 | free_page((unsigned long)b); |
| 336 | goto out; |
| 337 | } |
Matt Mackall | 10cef60 | 2006-01-08 01:01:45 -0800 | [diff] [blame] | 338 | |
Nick Piggin | 95b3512 | 2007-07-15 23:38:07 -0700 | [diff] [blame] | 339 | if (!slob_page_free(sp)) { |
| 340 | /* This slob page is about to become partially free. Easy! */ |
| 341 | sp->units = units; |
| 342 | sp->free = b; |
| 343 | set_slob(b, units, |
| 344 | (void *)((unsigned long)(b + |
| 345 | SLOB_UNITS(PAGE_SIZE)) & PAGE_MASK)); |
| 346 | set_slob_page_free(sp); |
| 347 | goto out; |
| 348 | } |
Matt Mackall | 10cef60 | 2006-01-08 01:01:45 -0800 | [diff] [blame] | 349 | |
Nick Piggin | 95b3512 | 2007-07-15 23:38:07 -0700 | [diff] [blame] | 350 | /* |
| 351 | * Otherwise the page is already partially free, so find reinsertion |
| 352 | * point. |
| 353 | */ |
| 354 | sp->units += units; |
Matt Mackall | 10cef60 | 2006-01-08 01:01:45 -0800 | [diff] [blame] | 355 | |
Nick Piggin | 95b3512 | 2007-07-15 23:38:07 -0700 | [diff] [blame] | 356 | if (b < sp->free) { |
| 357 | set_slob(b, units, sp->free); |
| 358 | sp->free = b; |
| 359 | } else { |
| 360 | prev = sp->free; |
| 361 | next = slob_next(prev); |
| 362 | while (b > next) { |
| 363 | prev = next; |
| 364 | next = slob_next(prev); |
| 365 | } |
| 366 | |
| 367 | if (!slob_last(prev) && b + units == next) { |
| 368 | units += slob_units(next); |
| 369 | set_slob(b, units, slob_next(next)); |
| 370 | } else |
| 371 | set_slob(b, units, next); |
| 372 | |
| 373 | if (prev + slob_units(prev) == b) { |
| 374 | units = slob_units(b) + slob_units(prev); |
| 375 | set_slob(prev, units, slob_next(b)); |
| 376 | } else |
| 377 | set_slob(prev, slob_units(prev), b); |
| 378 | } |
| 379 | out: |
Matt Mackall | 10cef60 | 2006-01-08 01:01:45 -0800 | [diff] [blame] | 380 | spin_unlock_irqrestore(&slob_lock, flags); |
| 381 | } |
| 382 | |
Nick Piggin | 95b3512 | 2007-07-15 23:38:07 -0700 | [diff] [blame] | 383 | /* |
| 384 | * End of slob allocator proper. Begin kmem_cache_alloc and kmalloc frontend. |
| 385 | */ |
| 386 | |
Christoph Lameter | 2e892f4 | 2006-12-13 00:34:23 -0800 | [diff] [blame] | 387 | void *__kmalloc(size_t size, gfp_t gfp) |
Matt Mackall | 10cef60 | 2006-01-08 01:01:45 -0800 | [diff] [blame] | 388 | { |
Matt Mackall | 10cef60 | 2006-01-08 01:01:45 -0800 | [diff] [blame] | 389 | if (size < PAGE_SIZE - SLOB_UNIT) { |
Nick Piggin | d87a133 | 2007-07-15 23:38:08 -0700 | [diff] [blame^] | 390 | slob_t *m; |
Matt Mackall | 10cef60 | 2006-01-08 01:01:45 -0800 | [diff] [blame] | 391 | m = slob_alloc(size + SLOB_UNIT, gfp, 0); |
Nick Piggin | 95b3512 | 2007-07-15 23:38:07 -0700 | [diff] [blame] | 392 | if (m) |
| 393 | m->units = size; |
| 394 | return m+1; |
Nick Piggin | d87a133 | 2007-07-15 23:38:08 -0700 | [diff] [blame^] | 395 | } else { |
| 396 | void *ret; |
| 397 | |
| 398 | ret = (void *) __get_free_pages(gfp | __GFP_COMP, |
| 399 | get_order(size)); |
| 400 | if (ret) { |
| 401 | struct page *page; |
| 402 | page = virt_to_page(ret); |
| 403 | page->private = size; |
| 404 | } |
| 405 | return ret; |
Matt Mackall | 10cef60 | 2006-01-08 01:01:45 -0800 | [diff] [blame] | 406 | } |
Matt Mackall | 10cef60 | 2006-01-08 01:01:45 -0800 | [diff] [blame] | 407 | } |
Christoph Lameter | 2e892f4 | 2006-12-13 00:34:23 -0800 | [diff] [blame] | 408 | EXPORT_SYMBOL(__kmalloc); |
Matt Mackall | 10cef60 | 2006-01-08 01:01:45 -0800 | [diff] [blame] | 409 | |
Pekka Enberg | fd76bab | 2007-05-06 14:48:40 -0700 | [diff] [blame] | 410 | /** |
| 411 | * krealloc - reallocate memory. The contents will remain unchanged. |
| 412 | * |
| 413 | * @p: object to reallocate memory for. |
| 414 | * @new_size: how many bytes of memory are required. |
| 415 | * @flags: the type of memory to allocate. |
| 416 | * |
| 417 | * The contents of the object pointed to are preserved up to the |
| 418 | * lesser of the new and old sizes. If @p is %NULL, krealloc() |
| 419 | * behaves exactly like kmalloc(). If @size is 0 and @p is not a |
| 420 | * %NULL pointer, the object pointed to is freed. |
| 421 | */ |
| 422 | void *krealloc(const void *p, size_t new_size, gfp_t flags) |
| 423 | { |
| 424 | void *ret; |
| 425 | |
| 426 | if (unlikely(!p)) |
| 427 | return kmalloc_track_caller(new_size, flags); |
| 428 | |
| 429 | if (unlikely(!new_size)) { |
| 430 | kfree(p); |
| 431 | return NULL; |
| 432 | } |
| 433 | |
| 434 | ret = kmalloc_track_caller(new_size, flags); |
| 435 | if (ret) { |
| 436 | memcpy(ret, p, min(new_size, ksize(p))); |
| 437 | kfree(p); |
| 438 | } |
| 439 | return ret; |
| 440 | } |
| 441 | EXPORT_SYMBOL(krealloc); |
| 442 | |
Matt Mackall | 10cef60 | 2006-01-08 01:01:45 -0800 | [diff] [blame] | 443 | void kfree(const void *block) |
| 444 | { |
Nick Piggin | 95b3512 | 2007-07-15 23:38:07 -0700 | [diff] [blame] | 445 | struct slob_page *sp; |
Matt Mackall | 10cef60 | 2006-01-08 01:01:45 -0800 | [diff] [blame] | 446 | |
| 447 | if (!block) |
| 448 | return; |
| 449 | |
Nick Piggin | 95b3512 | 2007-07-15 23:38:07 -0700 | [diff] [blame] | 450 | sp = (struct slob_page *)virt_to_page(block); |
Nick Piggin | d87a133 | 2007-07-15 23:38:08 -0700 | [diff] [blame^] | 451 | if (slob_page(sp)) { |
| 452 | slob_t *m = (slob_t *)block - 1; |
| 453 | slob_free(m, m->units + SLOB_UNIT); |
| 454 | } else |
| 455 | put_page(&sp->page); |
Matt Mackall | 10cef60 | 2006-01-08 01:01:45 -0800 | [diff] [blame] | 456 | } |
| 457 | |
| 458 | EXPORT_SYMBOL(kfree); |
| 459 | |
Nick Piggin | d87a133 | 2007-07-15 23:38:08 -0700 | [diff] [blame^] | 460 | /* can't use ksize for kmem_cache_alloc memory, only kmalloc */ |
Pekka Enberg | fd76bab | 2007-05-06 14:48:40 -0700 | [diff] [blame] | 461 | size_t ksize(const void *block) |
Matt Mackall | 10cef60 | 2006-01-08 01:01:45 -0800 | [diff] [blame] | 462 | { |
Nick Piggin | 95b3512 | 2007-07-15 23:38:07 -0700 | [diff] [blame] | 463 | struct slob_page *sp; |
Matt Mackall | 10cef60 | 2006-01-08 01:01:45 -0800 | [diff] [blame] | 464 | |
| 465 | if (!block) |
| 466 | return 0; |
| 467 | |
Nick Piggin | 95b3512 | 2007-07-15 23:38:07 -0700 | [diff] [blame] | 468 | sp = (struct slob_page *)virt_to_page(block); |
Nick Piggin | d87a133 | 2007-07-15 23:38:08 -0700 | [diff] [blame^] | 469 | if (slob_page(sp)) |
| 470 | return ((slob_t *)block - 1)->units + SLOB_UNIT; |
| 471 | else |
| 472 | return sp->page.private; |
Matt Mackall | 10cef60 | 2006-01-08 01:01:45 -0800 | [diff] [blame] | 473 | } |
| 474 | |
| 475 | struct kmem_cache { |
| 476 | unsigned int size, align; |
Nick Piggin | afc0ced | 2007-05-16 22:10:49 -0700 | [diff] [blame] | 477 | unsigned long flags; |
Matt Mackall | 10cef60 | 2006-01-08 01:01:45 -0800 | [diff] [blame] | 478 | const char *name; |
| 479 | void (*ctor)(void *, struct kmem_cache *, unsigned long); |
Matt Mackall | 10cef60 | 2006-01-08 01:01:45 -0800 | [diff] [blame] | 480 | }; |
| 481 | |
| 482 | struct kmem_cache *kmem_cache_create(const char *name, size_t size, |
| 483 | size_t align, unsigned long flags, |
| 484 | void (*ctor)(void*, struct kmem_cache *, unsigned long), |
| 485 | void (*dtor)(void*, struct kmem_cache *, unsigned long)) |
| 486 | { |
| 487 | struct kmem_cache *c; |
| 488 | |
| 489 | c = slob_alloc(sizeof(struct kmem_cache), flags, 0); |
| 490 | |
| 491 | if (c) { |
| 492 | c->name = name; |
| 493 | c->size = size; |
Nick Piggin | afc0ced | 2007-05-16 22:10:49 -0700 | [diff] [blame] | 494 | if (flags & SLAB_DESTROY_BY_RCU) { |
Nick Piggin | afc0ced | 2007-05-16 22:10:49 -0700 | [diff] [blame] | 495 | /* leave room for rcu footer at the end of object */ |
| 496 | c->size += sizeof(struct slob_rcu); |
| 497 | } |
| 498 | c->flags = flags; |
Matt Mackall | 10cef60 | 2006-01-08 01:01:45 -0800 | [diff] [blame] | 499 | c->ctor = ctor; |
Matt Mackall | 10cef60 | 2006-01-08 01:01:45 -0800 | [diff] [blame] | 500 | /* ignore alignment unless it's forced */ |
Christoph Lameter | 5af6083 | 2007-05-06 14:49:56 -0700 | [diff] [blame] | 501 | c->align = (flags & SLAB_HWCACHE_ALIGN) ? SLOB_ALIGN : 0; |
Matt Mackall | 10cef60 | 2006-01-08 01:01:45 -0800 | [diff] [blame] | 502 | if (c->align < align) |
| 503 | c->align = align; |
Akinobu Mita | bc0055a | 2007-05-06 14:49:52 -0700 | [diff] [blame] | 504 | } else if (flags & SLAB_PANIC) |
| 505 | panic("Cannot create slab cache %s\n", name); |
Matt Mackall | 10cef60 | 2006-01-08 01:01:45 -0800 | [diff] [blame] | 506 | |
| 507 | return c; |
| 508 | } |
| 509 | EXPORT_SYMBOL(kmem_cache_create); |
| 510 | |
Alexey Dobriyan | 133d205 | 2006-09-27 01:49:41 -0700 | [diff] [blame] | 511 | void kmem_cache_destroy(struct kmem_cache *c) |
Matt Mackall | 10cef60 | 2006-01-08 01:01:45 -0800 | [diff] [blame] | 512 | { |
| 513 | slob_free(c, sizeof(struct kmem_cache)); |
Matt Mackall | 10cef60 | 2006-01-08 01:01:45 -0800 | [diff] [blame] | 514 | } |
| 515 | EXPORT_SYMBOL(kmem_cache_destroy); |
| 516 | |
| 517 | void *kmem_cache_alloc(struct kmem_cache *c, gfp_t flags) |
| 518 | { |
| 519 | void *b; |
| 520 | |
| 521 | if (c->size < PAGE_SIZE) |
| 522 | b = slob_alloc(c->size, flags, c->align); |
| 523 | else |
Akinobu Mita | 4ab688c | 2007-05-06 14:50:19 -0700 | [diff] [blame] | 524 | b = (void *)__get_free_pages(flags, get_order(c->size)); |
Matt Mackall | 10cef60 | 2006-01-08 01:01:45 -0800 | [diff] [blame] | 525 | |
| 526 | if (c->ctor) |
Christoph Lameter | a35afb8 | 2007-05-16 22:10:57 -0700 | [diff] [blame] | 527 | c->ctor(b, c, 0); |
Matt Mackall | 10cef60 | 2006-01-08 01:01:45 -0800 | [diff] [blame] | 528 | |
| 529 | return b; |
| 530 | } |
| 531 | EXPORT_SYMBOL(kmem_cache_alloc); |
| 532 | |
Pekka Enberg | a8c0f9a | 2006-03-25 03:06:42 -0800 | [diff] [blame] | 533 | void *kmem_cache_zalloc(struct kmem_cache *c, gfp_t flags) |
| 534 | { |
| 535 | void *ret = kmem_cache_alloc(c, flags); |
| 536 | if (ret) |
| 537 | memset(ret, 0, c->size); |
| 538 | |
| 539 | return ret; |
| 540 | } |
| 541 | EXPORT_SYMBOL(kmem_cache_zalloc); |
| 542 | |
Nick Piggin | afc0ced | 2007-05-16 22:10:49 -0700 | [diff] [blame] | 543 | static void __kmem_cache_free(void *b, int size) |
| 544 | { |
| 545 | if (size < PAGE_SIZE) |
| 546 | slob_free(b, size); |
| 547 | else |
| 548 | free_pages((unsigned long)b, get_order(size)); |
| 549 | } |
| 550 | |
| 551 | static void kmem_rcu_free(struct rcu_head *head) |
| 552 | { |
| 553 | struct slob_rcu *slob_rcu = (struct slob_rcu *)head; |
| 554 | void *b = (void *)slob_rcu - (slob_rcu->size - sizeof(struct slob_rcu)); |
| 555 | |
| 556 | __kmem_cache_free(b, slob_rcu->size); |
| 557 | } |
| 558 | |
Matt Mackall | 10cef60 | 2006-01-08 01:01:45 -0800 | [diff] [blame] | 559 | void kmem_cache_free(struct kmem_cache *c, void *b) |
| 560 | { |
Nick Piggin | afc0ced | 2007-05-16 22:10:49 -0700 | [diff] [blame] | 561 | if (unlikely(c->flags & SLAB_DESTROY_BY_RCU)) { |
| 562 | struct slob_rcu *slob_rcu; |
| 563 | slob_rcu = b + (c->size - sizeof(struct slob_rcu)); |
| 564 | INIT_RCU_HEAD(&slob_rcu->head); |
| 565 | slob_rcu->size = c->size; |
| 566 | call_rcu(&slob_rcu->head, kmem_rcu_free); |
| 567 | } else { |
Nick Piggin | afc0ced | 2007-05-16 22:10:49 -0700 | [diff] [blame] | 568 | __kmem_cache_free(b, c->size); |
| 569 | } |
Matt Mackall | 10cef60 | 2006-01-08 01:01:45 -0800 | [diff] [blame] | 570 | } |
| 571 | EXPORT_SYMBOL(kmem_cache_free); |
| 572 | |
| 573 | unsigned int kmem_cache_size(struct kmem_cache *c) |
| 574 | { |
| 575 | return c->size; |
| 576 | } |
| 577 | EXPORT_SYMBOL(kmem_cache_size); |
| 578 | |
| 579 | const char *kmem_cache_name(struct kmem_cache *c) |
| 580 | { |
| 581 | return c->name; |
| 582 | } |
| 583 | EXPORT_SYMBOL(kmem_cache_name); |
| 584 | |
Christoph Lameter | 2e892f4 | 2006-12-13 00:34:23 -0800 | [diff] [blame] | 585 | int kmem_cache_shrink(struct kmem_cache *d) |
| 586 | { |
| 587 | return 0; |
| 588 | } |
| 589 | EXPORT_SYMBOL(kmem_cache_shrink); |
| 590 | |
Christoph Lameter | 55935a3 | 2006-12-13 00:34:24 -0800 | [diff] [blame] | 591 | int kmem_ptr_validate(struct kmem_cache *a, const void *b) |
Christoph Lameter | 2e892f4 | 2006-12-13 00:34:23 -0800 | [diff] [blame] | 592 | { |
| 593 | return 0; |
| 594 | } |
| 595 | |
Dimitri Gorokhovik | bcb4ddb | 2006-12-29 16:48:28 -0800 | [diff] [blame] | 596 | void __init kmem_cache_init(void) |
| 597 | { |
Matt Mackall | 10cef60 | 2006-01-08 01:01:45 -0800 | [diff] [blame] | 598 | } |