aboutsummaryrefslogtreecommitdiff
path: root/mm
diff options
context:
space:
mode:
authorChris Zankel <chris@zankel.net>2009-04-03 02:29:05 -0700
committerChris Zankel <chris@zankel.net>2009-04-03 02:29:05 -0700
commit65127d28e312bb6b38ce84a7bb71d762ef63ad4c (patch)
treed5fdf52a2d0731f7fab0ce0ed394faac50b04fbc /mm
parentb8bb76713ec50df2f11efee386e16f93d51e1076 (diff)
parent8fe74cf053de7ad2124a894996f84fa890a81093 (diff)
Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux-2.6 into merge
Diffstat (limited to 'mm')
-rw-r--r--mm/Kconfig.debug9
-rw-r--r--mm/filemap_xip.c4
-rw-r--r--mm/memcontrol.c687
-rw-r--r--mm/mmap.c3
-rw-r--r--mm/nommu.c52
-rw-r--r--mm/oom_kill.c1
-rw-r--r--mm/page_cgroup.c37
-rw-r--r--mm/slab.c3
-rw-r--r--mm/vmstat.c5
9 files changed, 495 insertions, 306 deletions
diff --git a/mm/Kconfig.debug b/mm/Kconfig.debug
index c8d62d49a44..bb01e298f26 100644
--- a/mm/Kconfig.debug
+++ b/mm/Kconfig.debug
@@ -1,3 +1,12 @@
+config DEBUG_PAGEALLOC
+ bool "Debug page memory allocations"
+ depends on DEBUG_KERNEL && ARCH_SUPPORTS_DEBUG_PAGEALLOC
+ depends on !HIBERNATION || !PPC && !SPARC
+ ---help---
+ Unmap pages from the kernel linear mapping after free_pages().
+ This results in a large slowdown, but helps to find certain types
+ of memory corruptions.
+
config WANT_PAGE_DEBUG_FLAGS
bool
diff --git a/mm/filemap_xip.c b/mm/filemap_xip.c
index 0c04615651b..427dfe3ce78 100644
--- a/mm/filemap_xip.c
+++ b/mm/filemap_xip.c
@@ -89,8 +89,8 @@ do_xip_mapping_read(struct address_space *mapping,
}
}
nr = nr - offset;
- if (nr > len)
- nr = len;
+ if (nr > len - copied)
+ nr = len - copied;
error = mapping->a_ops->get_xip_mem(mapping, index, 0,
&xip_mem, &xip_pfn);
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 8e4be9cb2a6..2fc6d6c4823 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -27,6 +27,7 @@
#include <linux/backing-dev.h>
#include <linux/bit_spinlock.h>
#include <linux/rcupdate.h>
+#include <linux/limits.h>
#include <linux/mutex.h>
#include <linux/slab.h>
#include <linux/swap.h>
@@ -95,6 +96,15 @@ static s64 mem_cgroup_read_stat(struct mem_cgroup_stat *stat,
return ret;
}
+static s64 mem_cgroup_local_usage(struct mem_cgroup_stat *stat)
+{
+ s64 ret;
+
+ ret = mem_cgroup_read_stat(stat, MEM_CGROUP_STAT_CACHE);
+ ret += mem_cgroup_read_stat(stat, MEM_CGROUP_STAT_RSS);
+ return ret;
+}
+
/*
* per-zone information in memory controller.
*/
@@ -154,9 +164,9 @@ struct mem_cgroup {
/*
* While reclaiming in a hiearchy, we cache the last child we
- * reclaimed from. Protected by hierarchy_mutex
+ * reclaimed from.
*/
- struct mem_cgroup *last_scanned_child;
+ int last_scanned_child;
/*
* Should the accounting and control be hierarchical, per subtree?
*/
@@ -247,7 +257,7 @@ page_cgroup_zoneinfo(struct page_cgroup *pc)
return mem_cgroup_zoneinfo(mem, nid, zid);
}
-static unsigned long mem_cgroup_get_all_zonestat(struct mem_cgroup *mem,
+static unsigned long mem_cgroup_get_local_zonestat(struct mem_cgroup *mem,
enum lru_list idx)
{
int nid, zid;
@@ -286,6 +296,9 @@ struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p)
static struct mem_cgroup *try_get_mem_cgroup_from_mm(struct mm_struct *mm)
{
struct mem_cgroup *mem = NULL;
+
+ if (!mm)
+ return NULL;
/*
* Because we have no locks, mm->owner's may be being moved to other
* cgroup. We use css_tryget() here even if this looks
@@ -308,6 +321,42 @@ static bool mem_cgroup_is_obsolete(struct mem_cgroup *mem)
return css_is_removed(&mem->css);
}
+
+/*
+ * Call callback function against all cgroup under hierarchy tree.
+ */
+static int mem_cgroup_walk_tree(struct mem_cgroup *root, void *data,
+ int (*func)(struct mem_cgroup *, void *))
+{
+ int found, ret, nextid;
+ struct cgroup_subsys_state *css;
+ struct mem_cgroup *mem;
+
+ if (!root->use_hierarchy)
+ return (*func)(root, data);
+
+ nextid = 1;
+ do {
+ ret = 0;
+ mem = NULL;
+
+ rcu_read_lock();
+ css = css_get_next(&mem_cgroup_subsys, nextid, &root->css,
+ &found);
+ if (css && css_tryget(css))
+ mem = container_of(css, struct mem_cgroup, css);
+ rcu_read_unlock();
+
+ if (mem) {
+ ret = (*func)(mem, data);
+ css_put(&mem->css);
+ }
+ nextid = found + 1;
+ } while (!ret && css);
+
+ return ret;
+}
+
/*
* Following LRU functions are allowed to be used without PCG_LOCK.
* Operations are called by routine of global LRU independently from memcg.
@@ -441,31 +490,24 @@ void mem_cgroup_move_lists(struct page *page,
int task_in_mem_cgroup(struct task_struct *task, const struct mem_cgroup *mem)
{
int ret;
+ struct mem_cgroup *curr = NULL;
task_lock(task);
- ret = task->mm && mm_match_cgroup(task->mm, mem);
+ rcu_read_lock();
+ curr = try_get_mem_cgroup_from_mm(task->mm);
+ rcu_read_unlock();
task_unlock(task);
+ if (!curr)
+ return 0;
+ if (curr->use_hierarchy)
+ ret = css_is_ancestor(&curr->css, &mem->css);
+ else
+ ret = (curr == mem);
+ css_put(&curr->css);
return ret;
}
/*
- * Calculate mapped_ratio under memory controller. This will be used in
- * vmscan.c for deteremining we have to reclaim mapped pages.
- */
-int mem_cgroup_calc_mapped_ratio(struct mem_cgroup *mem)
-{
- long total, rss;
-
- /*
- * usage is recorded in bytes. But, here, we assume the number of
- * physical pages can be represented by "long" on any arch.
- */
- total = (long) (mem->res.usage >> PAGE_SHIFT) + 1L;
- rss = (long)mem_cgroup_read_stat(&mem->stat, MEM_CGROUP_STAT_RSS);
- return (int)((rss * 100L) / total);
-}
-
-/*
* prev_priority control...this will be used in memory reclaim path.
*/
int mem_cgroup_get_reclaim_priority(struct mem_cgroup *mem)
@@ -501,8 +543,8 @@ static int calc_inactive_ratio(struct mem_cgroup *memcg, unsigned long *present_
unsigned long gb;
unsigned long inactive_ratio;
- inactive = mem_cgroup_get_all_zonestat(memcg, LRU_INACTIVE_ANON);
- active = mem_cgroup_get_all_zonestat(memcg, LRU_ACTIVE_ANON);
+ inactive = mem_cgroup_get_local_zonestat(memcg, LRU_INACTIVE_ANON);
+ active = mem_cgroup_get_local_zonestat(memcg, LRU_ACTIVE_ANON);
gb = (inactive + active) >> (30 - PAGE_SHIFT);
if (gb)
@@ -629,172 +671,202 @@ unsigned long mem_cgroup_isolate_pages(unsigned long nr_to_scan,
#define mem_cgroup_from_res_counter(counter, member) \
container_of(counter, struct mem_cgroup, member)
-/*
- * This routine finds the DFS walk successor. This routine should be
- * called with hierarchy_mutex held
- */
-static struct mem_cgroup *
-__mem_cgroup_get_next_node(struct mem_cgroup *curr, struct mem_cgroup *root_mem)
+static bool mem_cgroup_check_under_limit(struct mem_cgroup *mem)
{
- struct cgroup *cgroup, *curr_cgroup, *root_cgroup;
-
- curr_cgroup = curr->css.cgroup;
- root_cgroup = root_mem->css.cgroup;
+ if (do_swap_account) {
+ if (res_counter_check_under_limit(&mem->res) &&
+ res_counter_check_under_limit(&mem->memsw))
+ return true;
+ } else
+ if (res_counter_check_under_limit(&mem->res))
+ return true;
+ return false;
+}
- if (!list_empty(&curr_cgroup->children)) {
- /*
- * Walk down to children
- */
- cgroup = list_entry(curr_cgroup->children.next,
- struct cgroup, sibling);
- curr = mem_cgroup_from_cont(cgroup);
- goto done;
- }
+static unsigned int get_swappiness(struct mem_cgroup *memcg)
+{
+ struct cgroup *cgrp = memcg->css.cgroup;
+ unsigned int swappiness;
-visit_parent:
- if (curr_cgroup == root_cgroup) {
- /* caller handles NULL case */
- curr = NULL;
- goto done;
- }
+ /* root ? */
+ if (cgrp->parent == NULL)
+ return vm_swappiness;
- /*
- * Goto next sibling
- */
- if (curr_cgroup->sibling.next != &curr_cgroup->parent->children) {
- cgroup = list_entry(curr_cgroup->sibling.next, struct cgroup,
- sibling);
- curr = mem_cgroup_from_cont(cgroup);
- goto done;
- }
+ spin_lock(&memcg->reclaim_param_lock);
+ swappiness = memcg->swappiness;
+ spin_unlock(&memcg->reclaim_param_lock);
- /*
- * Go up to next parent and next parent's sibling if need be
- */
- curr_cgroup = curr_cgroup->parent;
- goto visit_parent;
+ return swappiness;
+}
-done:
- return curr;
+static int mem_cgroup_count_children_cb(struct mem_cgroup *mem, void *data)
+{
+ int *val = data;
+ (*val)++;
+ return 0;
}
-/*
- * Visit the first child (need not be the first child as per the ordering
- * of the cgroup list, since we track last_scanned_child) of @mem and use
- * that to reclaim free pages from.
+/**
+ * mem_cgroup_print_mem_info: Called from OOM with tasklist_lock held in read mode.
+ * @memcg: The memory cgroup that went over limit
+ * @p: Task that is going to be killed
+ *
+ * NOTE: @memcg and @p's mem_cgroup can be different when hierarchy is
+ * enabled
*/
-static struct mem_cgroup *
-mem_cgroup_get_next_node(struct mem_cgroup *root_mem)
+void mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p)
{
- struct cgroup *cgroup;
- struct mem_cgroup *orig, *next;
- bool obsolete;
-
+ struct cgroup *task_cgrp;
+ struct cgroup *mem_cgrp;
/*
- * Scan all children under the mem_cgroup mem
+ * Need a buffer in BSS, can't rely on allocations. The code relies
+ * on the assumption that OOM is serialized for memory controller.
+ * If this assumption is broken, revisit this code.
*/
- mutex_lock(&mem_cgroup_subsys.hierarchy_mutex);
+ static char memcg_name[PATH_MAX];
+ int ret;
+
+ if (!memcg)
+ return;
- orig = root_mem->last_scanned_child;
- obsolete = mem_cgroup_is_obsolete(orig);
- if (list_empty(&root_mem->css.cgroup->children)) {
+ rcu_read_lock();
+
+ mem_cgrp = memcg->css.cgroup;
+ task_cgrp = task_cgroup(p, mem_cgroup_subsys_id);
+
+ ret = cgroup_path(task_cgrp, memcg_name, PATH_MAX);
+ if (ret < 0) {
/*
- * root_mem might have children before and last_scanned_child
- * may point to one of them. We put it later.
+ * Unfortunately, we are unable to convert to a useful name
+ * But we'll still print out the usage information
*/
- if (orig)
- VM_BUG_ON(!obsolete);
- next = NULL;
+ rcu_read_unlock();
goto done;
}
+ rcu_read_unlock();
- if (!orig || obsolete) {
- cgroup = list_first_entry(&root_mem->css.cgroup->children,
- struct cgroup, sibling);
- next = mem_cgroup_from_cont(cgroup);
- } else
- next = __mem_cgroup_get_next_node(orig, root_mem);
+ printk(KERN_INFO "Task in %s killed", memcg_name);
+ rcu_read_lock();
+ ret = cgroup_path(mem_cgrp, memcg_name, PATH_MAX);
+ if (ret < 0) {
+ rcu_read_unlock();
+ goto done;
+ }
+ rcu_read_unlock();
+
+ /*
+ * Continues from above, so we don't need an KERN_ level
+ */
+ printk(KERN_CONT " as a result of limit of %s\n", memcg_name);
done:
- if (next)
- mem_cgroup_get(next);
- root_mem->last_scanned_child = next;
- if (orig)
- mem_cgroup_put(orig);
- mutex_unlock(&mem_cgroup_subsys.hierarchy_mutex);
- return (next) ? next : root_mem;
+
+ printk(KERN_INFO "memory: usage %llukB, limit %llukB, failcnt %llu\n",
+ res_counter_read_u64(&memcg->res, RES_USAGE) >> 10,
+ res_counter_read_u64(&memcg->res, RES_LIMIT) >> 10,
+ res_counter_read_u64(&memcg->res, RES_FAILCNT));
+ printk(KERN_INFO "memory+swap: usage %llukB, limit %llukB, "
+ "failcnt %llu\n",
+ res_counter_read_u64(&memcg->memsw, RES_USAGE) >> 10,
+ res_counter_read_u64(&memcg->memsw, RES_LIMIT) >> 10,
+ res_counter_read_u64(&memcg->memsw, RES_FAILCNT));
}
-static bool mem_cgroup_check_under_limit(struct mem_cgroup *mem)
+/*
+ * This function returns the number of memcg under hierarchy tree. Returns
+ * 1(self count) if no children.
+ */
+static int mem_cgroup_count_children(struct mem_cgroup *mem)
{
- if (do_swap_account) {
- if (res_counter_check_under_limit(&mem->res) &&
- res_counter_check_under_limit(&mem->memsw))
- return true;
- } else
- if (res_counter_check_under_limit(&mem->res))
- return true;
- return false;
+ int num = 0;
+ mem_cgroup_walk_tree(mem, &num, mem_cgroup_count_children_cb);
+ return num;
}
-static unsigned int get_swappiness(struct mem_cgroup *memcg)
+/*
+ * Visit the first child (need not be the first child as per the ordering
+ * of the cgroup list, since we track last_scanned_child) of @mem and use
+ * that to reclaim free pages from.
+ */
+static struct mem_cgroup *
+mem_cgroup_select_victim(struct mem_cgroup *root_mem)
{
- struct cgroup *cgrp = memcg->css.cgroup;
- unsigned int swappiness;
+ struct mem_cgroup *ret = NULL;
+ struct cgroup_subsys_state *css;
+ int nextid, found;
- /* root ? */
- if (cgrp->parent == NULL)
- return vm_swappiness;
+ if (!root_mem->use_hierarchy) {
+ css_get(&root_mem->css);
+ ret = root_mem;
+ }
- spin_lock(&memcg->reclaim_param_lock);
- swappiness = memcg->swappiness;
- spin_unlock(&memcg->reclaim_param_lock);
+ while (!ret) {
+ rcu_read_lock();
+ nextid = root_mem->last_scanned_child + 1;
+ css = css_get_next(&mem_cgroup_subsys, nextid, &root_mem->css,
+ &found);
+ if (css && css_tryget(css))
+ ret = container_of(css, struct mem_cgroup, css);
+
+ rcu_read_unlock();
+ /* Updates scanning parameter */
+ spin_lock(&root_mem->reclaim_param_lock);
+ if (!css) {
+ /* this means start scan from ID:1 */
+ root_mem->last_scanned_child = 0;
+ } else
+ root_mem->last_scanned_child = found;
+ spin_unlock(&root_mem->reclaim_param_lock);
+ }
- return swappiness;
+ return ret;
}
/*
- * Dance down the hierarchy if needed to reclaim memory. We remember the
- * last child we reclaimed from, so that we don't end up penalizing
- * one child extensively based on its position in the children list.
+ * Scan the hierarchy if needed to reclaim memory. We remember the last child
+ * we reclaimed from, so that we don't end up penalizing one child extensively
+ * based on its position in the children list.
*
* root_mem is the original ancestor that we've been reclaim from.
+ *
+ * We give up and return to the caller when we visit root_mem twice.
+ * (other groups can be removed while we're walking....)
+ *
+ * If shrink==true, for avoiding to free too much, this returns immedieately.
*/
static int mem_cgroup_hierarchical_reclaim(struct mem_cgroup *root_mem,
- gfp_t gfp_mask, bool noswap)
-{
- struct mem_cgroup *next_mem;
- int ret = 0;
-
- /*
- * Reclaim unconditionally and don't check for return value.
- * We need to reclaim in the current group and down the tree.
- * One might think about checking for children before reclaiming,
- * but there might be left over accounting, even after children
- * have left.
- */
- ret += try_to_free_mem_cgroup_pages(root_mem, gfp_mask, noswap,
- get_swappiness(root_mem));
- if (mem_cgroup_check_under_limit(root_mem))
- return 1; /* indicate reclaim has succeeded */
- if (!root_mem->use_hierarchy)
- return ret;
-
- next_mem = mem_cgroup_get_next_node(root_mem);
-
- while (next_mem != root_mem) {
- if (mem_cgroup_is_obsolete(next_mem)) {
- next_mem = mem_cgroup_get_next_node(root_mem);
+ gfp_t gfp_mask, bool noswap, bool shrink)
+{
+ struct mem_cgroup *victim;
+ int ret, total = 0;
+ int loop = 0;
+
+ while (loop < 2) {
+ victim = mem_cgroup_select_victim(root_mem);
+ if (victim == root_mem)
+ loop++;
+ if (!mem_cgroup_local_usage(&victim->stat)) {
+ /* this cgroup's local usage == 0 */
+ css_put(&victim->css);
continue;
}
- ret += try_to_free_mem_cgroup_pages(next_mem, gfp_mask, noswap,
- get_swappiness(next_mem));
+ /* we use swappiness of local cgroup */
+ ret = try_to_free_mem_cgroup_pages(victim, gfp_mask, noswap,
+ get_swappiness(victim));
+ css_put(&victim->css);
+ /*
+ * At shrinking usage, we can't check we should stop here or
+ * reclaim more. It's depends on callers. last_scanned_child
+ * will work enough for keeping fairness under tree.
+ */
+ if (shrink)
+ return ret;
+ total += ret;
if (mem_cgroup_check_under_limit(root_mem))
- return 1; /* indicate reclaim has succeeded */
- next_mem = mem_cgroup_get_next_node(root_mem);
+ return 1 + total;
}
- return ret;
+ return total;
}
bool mem_cgroup_oom_called(struct task_struct *task)
@@ -813,6 +885,19 @@ bool mem_cgroup_oom_called(struct task_struct *task)
rcu_read_unlock();
return ret;
}
+
+static int record_last_oom_cb(struct mem_cgroup *mem, void *data)
+{
+ mem->last_oom_jiffies = jiffies;
+ return 0;
+}
+
+static void record_last_oom(struct mem_cgroup *mem)
+{
+ mem_cgroup_walk_tree(mem, NULL, record_last_oom_cb);
+}
+
+
/*
* Unlike exported interface, "oom" parameter is added. if oom==true,
* oom-killer can be invoked.
@@ -875,7 +960,7 @@ static int __mem_cgroup_try_charge(struct mm_struct *mm,
goto nomem;
ret = mem_cgroup_hierarchical_reclaim(mem_over_limit, gfp_mask,
- noswap);
+ noswap, false);
if (ret)
continue;
@@ -895,7 +980,7 @@ static int __mem_cgroup_try_charge(struct mm_struct *mm,
mutex_lock(&memcg_tasklist);
mem_cgroup_out_of_memory(mem_over_limit, gfp_mask);
mutex_unlock(&memcg_tasklist);
- mem_over_limit->last_oom_jiffies = jiffies;
+ record_last_oom(mem_over_limit);
}
goto nomem;
}
@@ -906,20 +991,55 @@ nomem:
return -ENOMEM;
}
+
+/*
+ * A helper function to get mem_cgroup from ID. must be called under
+ * rcu_read_lock(). The caller must check css_is_removed() or some if
+ * it's concern. (dropping refcnt from swap can be called against removed
+ * memcg.)
+ */
+static struct mem_cgroup *mem_cgroup_lookup(unsigned short id)
+{
+ struct cgroup_subsys_state *css;
+
+ /* ID 0 is unused ID */
+ if (!id)
+ return NULL;
+ css = css_lookup(&mem_cgroup_subsys, id);
+ if (!css)
+ return NULL;
+ return container_of(css, struct mem_cgroup, css);
+}
+
static struct mem_cgroup *try_get_mem_cgroup_from_swapcache(struct page *page)
{
struct mem_cgroup *mem;
+ struct page_cgroup *pc;
+ unsigned short id;
swp_entry_t ent;
+ VM_BUG_ON(!PageLocked(page));
+
if (!PageSwapCache(page))
return NULL;
- ent.val = page_private(page);
- mem = lookup_swap_cgroup(ent);
- if (!mem)
- return NULL;
- if (!css_tryget(&mem->css))
- return NULL;
+ pc = lookup_page_cgroup(page);
+ /*
+ * Used bit of swapcache is solid under page lock.
+ */
+ if (PageCgroupUsed(pc)) {
+ mem = pc->mem_cgroup;
+ if (mem && !css_tryget(&mem->css))
+ mem = NULL;
+ } else {
+ ent.val = page_private(page);
+ id = lookup_swap_cgroup(ent);
+ rcu_read_lock();
+ mem = mem_cgroup_lookup(id);
+ if (mem && !css_tryget(&mem->css))
+ mem = NULL;
+ rcu_read_unlock();
+ }
return mem;
}
@@ -1118,6 +1238,10 @@ int mem_cgroup_newpage_charge(struct page *page,
MEM_CGROUP_CHARGE_TYPE_MAPPED, NULL);
}
+static void
+__mem_cgroup_commit_charge_swapin(struct page *page, struct mem_cgroup *ptr,
+ enum charge_type ctype);
+
int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm,
gfp_t gfp_mask)
{
@@ -1154,16 +1278,6 @@ int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm,
unlock_page_cgroup(pc);
}
- if (do_swap_account && PageSwapCache(page)) {
- mem = try_get_mem_cgroup_from_swapcache(page);
- if (mem)
- mm = NULL;
- else
- mem = NULL;
- /* SwapCache may be still linked to LRU now. */
- mem_cgroup_lru_del_before_commit_swapcache(page);
- }
-
if (unlikely(!mm && !mem))
mm = &init_mm;
@@ -1171,22 +1285,16 @@ int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm,
return mem_cgroup_charge_common(page, mm, gfp_mask,
MEM_CGROUP_CHARGE_TYPE_CACHE, NULL);
- ret = mem_cgroup_charge_common(page, mm, gfp_mask,
- MEM_CGROUP_CHARGE_TYPE_SHMEM, mem);
- if (mem)
- css_put(&mem->css);
- if (PageSwapCache(page))
- mem_cgroup_lru_add_after_commit_swapcache(page);
+ /* shmem */
+ if (PageSwapCache(page)) {
+ ret = mem_cgroup_try_charge_swapin(mm, page, gfp_mask, &mem);
+ if (!ret)
+ __mem_cgroup_commit_charge_swapin(page, mem,
+ MEM_CGROUP_CHARGE_TYPE_SHMEM);
+ } else
+ ret = mem_cgroup_charge_common(page, mm, gfp_mask,
+ MEM_CGROUP_CHARGE_TYPE_SHMEM, mem);
- if (do_swap_account && !ret && PageSwapCache(page)) {
- swp_entry_t ent = {.val = page_private(page)};
- /* avoid double counting */
- mem = swap_cgroup_record(ent, NULL);
- if (mem) {
- res_counter_uncharge(&mem->memsw, PAGE_SIZE);
- mem_cgroup_put(mem);
- }
- }
return ret;
}
@@ -1229,7 +1337,9 @@ charge_cur_mm:
return __mem_cgroup_try_charge(mm, mask, ptr, true);
}
-void mem_cgroup_commit_charge_swapin(struct page *page, struct mem_cgroup *ptr)
+static void
+__mem_cgroup_commit_charge_swapin(struct page *page, struct mem_cgroup *ptr,
+ enum charge_type ctype)
{
struct page_cgroup *pc;
@@ -1239,7 +1349,7 @@ void mem_cgroup_commit_charge_swapin(struct page *page, struct mem_cgroup *ptr)
return;
pc = lookup_page_cgroup(page);
mem_cgroup_lru_del_before_commit_swapcache(page);
- __mem_cgroup_commit_charge(ptr, pc, MEM_CGROUP_CHARGE_TYPE_MAPPED);
+ __mem_cgroup_commit_charge(ptr, pc, ctype);
mem_cgroup_lru_add_after_commit_swapcache(page);
/*
* Now swap is on-memory. This means this page may be
@@ -1250,18 +1360,32 @@ void mem_cgroup_commit_charge_swapin(struct page *page, struct mem_cgroup *ptr)
*/
if (do_swap_account && PageSwapCache(page)) {
swp_entry_t ent = {.val = page_private(page)};
+ unsigned short id;
struct mem_cgroup *memcg;
- memcg = swap_cgroup_record(ent, NULL);
+
+ id = swap_cgroup_record(ent, 0);
+ rcu_read_lock();
+ memcg = mem_cgroup_lookup(id);
if (memcg) {
+ /*
+ * This recorded memcg can be obsolete one. So, avoid
+ * calling css_tryget
+ */
res_counter_uncharge(&memcg->memsw, PAGE_SIZE);
mem_cgroup_put(memcg);
}
-
+ rcu_read_unlock();
}
/* add this page(page_cgroup) to the LRU we want. */
}
+void mem_cgroup_commit_charge_swapin(struct page *page, struct mem_cgroup *ptr)
+{
+ __mem_cgroup_commit_charge_swapin(page, ptr,
+ MEM_CGROUP_CHARGE_TYPE_MAPPED);
+}
+
void mem_cgroup_cancel_charge_swapin(struct mem_cgroup *mem)
{
if (mem_cgroup_disabled())
@@ -1324,8 +1448,8 @@ __mem_cgroup_uncharge_common(struct page *page, enum charge_type ctype)
res_counter_uncharge(&mem->res, PAGE_SIZE);
if (do_swap_account && (ctype != MEM_CGROUP_CHARGE_TYPE_SWAPOUT))
res_counter_uncharge(&mem->memsw, PAGE_SIZE);
-
mem_cgroup_charge_statistics(mem, pc, false);
+
ClearPageCgroupUsed(pc);
/*
* pc->mem_cgroup is not cleared here. It will be accessed when it's
@@ -1377,7 +1501,7 @@ void mem_cgroup_uncharge_swapcache(struct page *page, swp_entry_t ent)
MEM_CGROUP_CHARGE_TYPE_SWAPOUT);
/* record memcg information */
if (do_swap_account && memcg) {
- swap_cgroup_record(ent, memcg);
+ swap_cgroup_record(ent, css_id(&memcg->css));
mem_cgroup_get(memcg);
}
if (memcg)
@@ -1392,15 +1516,23 @@ void mem_cgroup_uncharge_swapcache(struct page *page, swp_entry_t ent)
void mem_cgroup_uncharge_swap(swp_entry_t ent)
{
struct mem_cgroup *memcg;
+ unsigned short id;
if (!do_swap_account)
return;
- memcg = swap_cgroup_record(ent, NULL);
+ id = swap_cgroup_record(ent, 0);
+ rcu_read_lock();
+ memcg = mem_cgroup_lookup(id);
if (memcg) {
+ /*
+ * We uncharge this because swap is freed.
+ * This memcg can be obsolete one. We avoid calling css_tryget
+ */
res_counter_uncharge(&memcg->memsw, PAGE_SIZE);
mem_cgroup_put(memcg);
}
+ rcu_read_unlock();
}
#endif
@@ -1508,7 +1640,8 @@ int mem_cgroup_shrink_usage(struct page *page,
return 0;
do {
- progress = mem_cgroup_hierarchical_reclaim(mem, gfp_mask, true);
+ progress = mem_cgroup_hierarchical_reclaim(mem,
+ gfp_mask, true, false);
progress += mem_cgroup_check_under_limit(mem);
} while (!progress && --retry);
@@ -1523,11 +1656,21 @@ static DEFINE_MUTEX(set_limit_mutex);
static int mem_cgroup_resize_limit(struct mem_cgroup *memcg,
unsigned long long val)
{
-
- int retry_count = MEM_CGROUP_RECLAIM_RETRIES;
+ int retry_count;
int progress;
u64 memswlimit;
int ret = 0;
+ int children = mem_cgroup_count_children(memcg);
+ u64 curusage, oldusage;
+
+ /*
+ * For keeping hierarchical_reclaim simple, how long we should retry
+ * is depends on callers. We set our retry-count to be function
+ * of # of children which we should visit in this loop.
+ */
+ retry_count = MEM_CGROUP_RECLAIM_RETRIES * children;
+
+ oldusage = res_counter_read_u64(&memcg->res, RES_USAGE);
while (retry_count) {
if (signal_pending(current)) {
@@ -1553,8 +1696,13 @@ static int mem_cgroup_resize_limit(struct mem_cgroup *memcg,
break;
progress = mem_cgroup_hierarchical_reclaim(memcg, GFP_KERNEL,
- false);
- if (!progress) retry_count--;
+ false, true);
+ curusage = res_counter_read_u64(&memcg->res, RES_USAGE);
+ /* Usage is reduced ? */
+ if (curusage >= oldusage)
+ retry_count--;
+ else
+ oldusage = curusage;
}
return ret;
@@ -1563,13 +1711,16 @@ static int mem_cgroup_resize_limit(struct mem_cgroup *memcg,
int mem_cgroup_resize_memsw_limit(struct mem_cgroup *memcg,
unsigned long long val)
{
- int retry_count = MEM_CGROUP_RECLAIM_RETRIES;
+ int retry_count;
u64 memlimit, oldusage, curusage;
- int ret;
+ int children = mem_cgroup_count_children(memcg);
+ int ret = -EBUSY;
if (!do_swap_account)
return -EINVAL;
-
+ /* see mem_cgroup_resize_res_limit */
+ retry_count = children * MEM_CGROUP_RECLAIM_RETRIES;
+ oldusage = res_counter_read_u64(&memcg->memsw, RES_USAGE);
while (retry_count) {
if (signal_pending(current)) {
ret = -EINTR;
@@ -1593,11 +1744,13 @@ int mem_cgroup_resize_memsw_limit(struct mem_cgroup *memcg,
if (!ret)
break;
- oldusage = res_counter_read_u64(&memcg->memsw, RES_USAGE);
- mem_cgroup_hierarchical_reclaim(memcg, GFP_KERNEL, true);
+ mem_cgroup_hierarchical_reclaim(memcg, GFP_KERNEL, true, true);
curusage = res_counter_read_u64(&memcg->memsw, RES_USAGE);
+ /* Usage is reduced ? */
if (curusage >= oldusage)
retry_count--;
+ else
+ oldusage = curusage;
}
return ret;
}
@@ -1893,54 +2046,90 @@ static int mem_cgroup_reset(struct cgroup *cont, unsigned int event)
return 0;
}
-static const struct mem_cgroup_stat_desc {
- const char *msg;
- u64 unit;
-} mem_cgroup_stat_desc[] = {
- [MEM_CGROUP_STAT_CACHE] = { "cache", PAGE_SIZE, },
- [MEM_CGROUP_STAT_RSS] = { "rss", PAGE_SIZE, },
- [MEM_CGROUP_STAT_PGPGIN_COUNT] = {"pgpgin", 1, },
- [MEM_CGROUP_STAT_PGPGOUT_COUNT] = {"pgpgout", 1, },
+
+/* For read statistics */
+enum {
+ MCS_CACHE,
+ MCS_RSS,
+ MCS_PGPGIN,
+ MCS_PGPGOUT,
+ MCS_INACTIVE_ANON,
+ MCS_ACTIVE_ANON,
+ MCS_INACTIVE_FILE,
+ MCS_ACTIVE_FILE,
+ MCS_UNEVICTABLE,
+ NR_MCS_STAT,
+};
+
+struct mcs_total_stat {
+ s64 stat[NR_MCS_STAT];
+};
+
+struct {
+ char *local_name;
+ char *total_name;
+} memcg_stat_strings[NR_MCS_STAT] = {
+ {"cache", "total_cache"},
+ {"rss", "total_rss"},
+ {"pgpgin", "total_pgpgin"},
+ {"pgpgout", "total_pgpgout"},
+ {"inactive_anon", "total_inactive_anon"},
+ {"active_anon", "total_active_anon"},
+ {"inactive_file", "total_inactive_file"},
+ {"active_file", "total_active_file"},
+ {"unevictable", "total_unevictable"}
};
+
+static int mem_cgroup_get_local_stat(struct mem_cgroup *mem, void *data)
+{
+ struct mcs_total_stat *s = data;
+ s64 val;
+
+ /* per cpu stat */
+ val = mem_cgroup_read_stat(&mem->stat, MEM_CGROUP_STAT_CACHE);
+ s->stat[MCS_CACHE] += val * PAGE_SIZE;
+ val = mem_cgroup_read_stat(&mem->stat, MEM_CGROUP_STAT_RSS);
+ s->stat[MCS_RSS] += val * PAGE_SIZE;
+ val = mem_cgroup_read_stat(&mem->stat, MEM_CGROUP_STAT_PGPGIN_COUNT);
+ s->stat[MCS_PGPGIN] += val;
+ val = mem_cgroup_read_stat(&mem->stat, MEM_CGROUP_STAT_PGPGOUT_COUNT);
+ s->stat[MCS_PGPGOUT] += val;
+
+ /* per zone stat */
+ val = mem_cgroup_get_local_zonestat(mem, LRU_INACTIVE_ANON);
+ s->stat[MCS_INACTIVE_ANON] += val * PAGE_SIZE;
+ val = mem_cgroup_get_local_zonestat(mem, LRU_ACTIVE_ANON);
+ s->stat[MCS_ACTIVE_ANON] += val * PAGE_SIZE;
+ val = mem_cgroup_get_local_zonestat(mem, LRU_INACTIVE_FILE);
+ s->stat[MCS_INACTIVE_FILE] += val * PAGE_SIZE;
+ val = mem_cgroup_get_local_zonestat(mem, LRU_ACTIVE_FILE);
+ s->stat[MCS_ACTIVE_FILE] += val * PAGE_SIZE;
+ val = mem_cgroup_get_local_zonestat(mem, LRU_UNEVICTABLE);
+ s->stat[MCS_UNEVICTABLE] += val * PAGE_SIZE;
+ return 0;
+}
+
+static void
+mem_cgroup_get_total_stat(struct mem_cgroup *mem, struct mcs_total_stat *s)
+{
+ mem_cgroup_walk_tree(mem, s, mem_cgroup_get_local_stat);
+}
+
static int mem_control_stat_show(struct cgroup *cont, struct cftype *cft,
struct cgroup_map_cb *cb)
{
struct mem_cgroup *mem_cont = mem_cgroup_from_cont(cont);
- struct mem_cgroup_stat *stat = &mem_cont->stat;
+ struct mcs_total_stat mystat;
int i;
- for (i = 0; i < ARRAY_SIZE(stat->cpustat[0].count); i++) {
- s64 val;
+ memset(&mystat, 0, sizeof(mystat));
+ mem_cgroup_get_local_stat(mem_cont, &mystat);
- val = mem_cgroup_read_stat(stat, i);
- val *= mem_cgroup_stat_desc[i].unit;
- cb->fill(cb, mem_cgroup_stat_desc[i].msg, val);
- }
- /* showing # of active pages */
- {
- unsigned long active_anon, inactive_anon;
- unsigned long active_file, inactive_file;
- unsigned long unevictable;
-
- inactive_anon = mem_cgroup_get_all_zonestat(mem_cont,
- LRU_INACTIVE_ANON);
- active_anon = mem_cgroup_get_all_zonestat(mem_cont,
- LRU_ACTIVE_ANON);
- inactive_file = mem_cgroup_get_all_zonestat(mem_cont,
- LRU_INACTIVE_FILE);
- active_file = mem_cgroup_get_all_zonestat(mem_cont,
- LRU_ACTIVE_FILE);
- unevictable = mem_cgroup_get_all_zonestat(mem_cont,
- LRU_UNEVICTABLE);
-
- cb->fill(cb, "active_anon", (active_anon) * PAGE_SIZE);
- cb->fill(cb, "inactive_anon", (inactive_anon) * PAGE_SIZE);
- cb->fill(cb, "active_file", (active_file) * PAGE_SIZE);
- cb->fill(cb, "inactive_file", (inactive_file) * PAGE_SIZE);
- cb->fill(cb, "unevictable", unevictable * PAGE_SIZE);
+ for (i = 0; i < NR_MCS_STAT; i++)
+ cb->fill(cb, memcg_stat_strings[i].local_name, mystat.stat[i]);
- }
+ /* Hierarchical information */
{
unsigned long long limit, memsw_limit;
memcg_get_hierarchical_limit(mem_cont, &limit, &memsw_limit);
@@ -1949,6 +2138,12 @@ static int mem_control_stat_show(struct cgroup *cont, struct cftype *cft,
cb->fill(cb, "hierarchical_memsw_limit", memsw_limit);
}
+ memset(&mystat, 0, sizeof(mystat));
+ mem_cgroup_get_total_stat(mem_cont, &mystat);
+ for (i = 0; i < NR_MCS_STAT; i++)
+ cb->fill(cb, memcg_stat_strings[i].total_name, mystat.stat[i]);
+
+
#ifdef CONFIG_DEBUG_VM
cb->fill(cb, "inactive_ratio", calc_inactive_ratio(mem_cont, NULL));
@@ -2178,6 +2373,8 @@ static void __mem_cgroup_free(struct mem_cgroup *mem)
{
int node;
+ free_css_id(&mem_cgroup_subsys, &mem->css);
+
for_each_node_state(node, N_POSSIBLE)
free_mem_cgroup_per_zone_info(mem, node);
@@ -2228,11 +2425,12 @@ static struct cgroup_subsys_state * __ref
mem_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cont)
{
struct mem_cgroup *mem, *parent;
+ long error = -ENOMEM;
int node;
mem = mem_cgroup_alloc();
if (!mem)
- return ERR_PTR(-ENOMEM);
+ return ERR_PTR(error);
for_each_node_state(node, N_POSSIBLE)
if (alloc_mem_cgroup_per_zone_info(mem, node))
@@ -2260,7 +2458,7 @@ mem_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cont)
res_counter_init(&mem->res, NULL);
res_counter_init(&mem->memsw, NULL);
}
- mem->last_scanned_child = NULL;
+ mem->last_scanned_child = 0;
spin_lock_init(&mem->reclaim_param_lock);
if (parent)
@@ -2269,26 +2467,22 @@ mem_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cont)
return &mem->css;
free_out:
__mem_cgroup_free(mem);
- return ERR_PTR(-ENOMEM);
+ return ERR_PTR(error);
}
-static void mem_cgroup_pre_destroy(struct cgroup_subsys *ss,
+static int mem_cgroup_pre_destroy(struct cgroup_subsys *ss,
struct cgroup *cont)
{
struct mem_cgroup *mem = mem_cgroup_from_cont(cont);
- mem_cgroup_force_empty(mem, false);
+
+ return mem_cgroup_force_empty(mem, false);
}
static void mem_cgroup_destroy(struct cgroup_subsys *ss,
struct cgroup *cont)
{
struct mem_cgroup *mem = mem_cgroup_from_cont(cont);
- struct mem_cgroup *last_scanned_child = mem->last_scanned_child;
- if (last_scanned_child) {
- VM_BUG_ON(!mem_cgroup_is_obsolete(last_scanned_child));
- mem_cgroup_put(last_scanned_child);
- }
mem_cgroup_put(mem);
}
@@ -2327,6 +2521,7 @@ struct cgroup_subsys mem_cgroup_subsys = {
.populate = mem_cgroup_populate,
.attach = mem_cgroup_move_task,
.early_init = 0,
+ .use_id = 1,
};
#ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
diff --git a/mm/mmap.c b/mm/mmap.c
index 1abb9185a68..4a3841186c1 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -2481,7 +2481,4 @@ void mm_drop_all_locks(struct mm_struct *mm)
*/
void __init mmap_init(void)
{
- vm_area_cachep = kmem_cache_create("vm_area_struct",
- sizeof(struct vm_area_struct), 0,
- SLAB_PANIC, NULL);
}
diff --git a/mm/nommu.c b/mm/nommu.c
index 2fcf47d449b..72eda4aee2c 100644
--- a/mm/nommu.c
+++ b/mm/nommu.c
@@ -69,7 +69,7 @@ int sysctl_max_map_count = DEFAULT_MAX_MAP_COUNT;
int sysctl_nr_trim_pages = 1; /* page trimming behaviour */
int heap_stack_gap = 0;
-atomic_t mmap_pages_allocated;
+atomic_long_t mmap_pages_allocated;
EXPORT_SYMBOL(mem_map);
EXPORT_SYMBOL(num_physpages);
@@ -463,12 +463,7 @@ SYSCALL_DEFINE1(brk, unsigned long, brk)
*/
void __init mmap_init(void)
{
- vm_region_jar = kmem_cache_create("vm_region_jar",
- sizeof(struct vm_region), 0,
- SLAB_PANIC, NULL);
- vm_area_cachep = kmem_cache_create("vm_area_struct",
- sizeof(struct vm_area_struct), 0,
- SLAB_PANIC, NULL);
+ vm_region_jar = KMEM_CACHE(vm_region, SLAB_PANIC);
}
/*
@@ -486,27 +481,24 @@ static noinline void validate_nommu_regions(void)
return;
last = rb_entry(lastp, struct vm_region, vm_rb);
- if (unlikely(last->vm_end <= last->vm_start))
- BUG();
- if (unlikely(last->vm_top < last->vm_end))
- BUG();
+ BUG_ON(unlikely(last->vm_end <= last->vm_start));
+ BUG_ON(unlikely(last->vm_top < last->vm_end));
while ((p = rb_next(lastp))) {
region = rb_entry(p, struct vm_region, vm_rb);
last = rb_entry(lastp, struct vm_region, vm_rb);
- if (unlikely(region->vm_end <= region->vm_start))
- BUG();
- if (unlikely(region->vm_top < region->vm_end))
- BUG();
- if (unlikely(region->vm_start < last->vm_top))
- BUG();
+ BUG_ON(unlikely(region->vm_end <= region->vm_start));
+ BUG_ON(unlikely(region->vm_top < region->vm_end));
+ BUG_ON(unlikely(region->vm_start < last->vm_top));
lastp = p;
}
}
#else
-#define validate_nommu_regions() do {} while(0)
+static void validate_nommu_regions(void)
+{
+}
#endif
/*
@@ -563,16 +555,17 @@ static void free_page_series(unsigned long from, unsigned long to)
struct page *page = virt_to_page(from);
kdebug("- free %lx", from);
- atomic_dec(&mmap_pages_allocated);
+ atomic_long_dec(&mmap_pages_allocated);
if (page_count(page) != 1)
- kdebug("free page %p [%d]", page, page_count(page));
+ kdebug("free page %p: refcount not one: %d",
+ page, page_count(page));
put_page(page);
}
}
/*
* release a reference to a region
- * - the caller must hold the region semaphore, which this releases
+ * - the caller must hold the region semaphore for writing, which this releases
* - the region may not have been added to the tree yet, in which case vm_top
* will equal vm_start
*/
@@ -1096,7 +1089,7 @@ static int do_mmap_private(struct vm_area_struct *vma,
goto enomem;
total = 1 << order;
- atomic_add(total, &mmap_pages_allocated);
+ atomic_long_add(total, &mmap_pages_allocated);
point = rlen >> PAGE_SHIFT;
@@ -1107,7 +1100,7 @@ static int do_mmap_private(struct vm_area_struct *vma,
order = ilog2(total - point);
n = 1 << order;
kdebug("shave %lu/%lu @%lu", n, total - point, total);
- atomic_sub(n, &mmap_pages_allocated);
+ atomic_long_sub(n, &mmap_pages_allocated);
total -= n;
set_page_refcounted(pages + total);
__free_pages(pages + total, order);
@@ -1536,10 +1529,15 @@ int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
/* find the first potentially overlapping VMA */
vma = find_vma(mm, start);
if (!vma) {
- printk(KERN_WARNING
- "munmap of memory not mmapped by process %d (%s):"
- " 0x%lx-0x%lx\n",
- current->pid, current->comm, start, start + len - 1);
+ static int limit = 0;
+ if (limit < 5) {
+ printk(KERN_WARNING
+ "munmap of memory not mmapped by process %d"
+ " (%s): 0x%lx-0x%lx\n",
+ current->pid, current->comm,
+ start, start + len - 1);
+ limit++;
+ }
return -EINVAL;
}
diff --git a/mm/oom_kill.c b/mm/oom_kill.c
index d3b9bac085b..2f3166e308d 100644
--- a/mm/oom_kill.c
+++ b/mm/oom_kill.c
@@ -394,6 +394,7 @@ static int oom_kill_process(struct task_struct *p, gfp_t gfp_mask, int order,
cpuset_print_task_mems_allowed(current);
task_unlock(current);
dump_stack();
+ mem_cgroup_print_oom_info(mem, current);
show_mem();
if (sysctl_oom_dump_tasks)
dump_tasks(mem);
diff --git a/mm/page_cgroup.c b/mm/page_cgroup.c
index ceecfbb143f..791905c991d 100644
--- a/mm/page_cgroup.c
+++ b/mm/page_cgroup.c
@@ -285,12 +285,8 @@ struct swap_cgroup_ctrl {
struct swap_cgroup_ctrl swap_cgroup_ctrl[MAX_SWAPFILES];
-/*
- * This 8bytes seems big..maybe we can reduce this when we can use "id" for
- * cgroup rather than pointer.
- */
struct swap_cgroup {
- struct mem_cgroup *val;
+ unsigned short id;
};
#define SC_PER_PAGE (PAGE_SIZE/sizeof(struct swap_cgroup))
#define SC_POS_MASK (SC_PER_PAGE - 1)
@@ -342,10 +338,10 @@ not_enough_page:
* @ent: swap entry to be recorded into
* @mem: mem_cgroup to be recorded
*
- * Returns old value at success, NULL at failure.
- * (Of course, old value can be NULL.)
+ * Returns old value at success, 0 at failure.
+ * (Of course, old value can be 0.)
*/
-struct mem_cgroup *swap_cgroup_record(swp_entry_t ent, struct mem_cgroup *mem)
+unsigned short swap_cgroup_record(swp_entry_t ent, unsigned short id)
{
int type = swp_type(ent);
unsigned long offset = swp_offset(ent);
@@ -354,18 +350,18 @@ struct mem_cgroup *swap_cgroup_record(swp_entry_t ent, struct mem_cgroup *mem)
struct swap_cgroup_ctrl *ctrl;
struct page *mappage;
struct swap_cgroup *sc;
- struct mem_cgroup *old;
+ unsigned short old;
if (!do_swap_account)
- return NULL;
+ return 0;
ctrl = &swap_cgroup_ctrl[type];
mappage = ctrl->map[idx];
sc = page_address(mappage);
sc += pos;
- old = sc->val;
- sc->val = mem;
+ old = sc->id;
+ sc->id = id;
return old;
}
@@ -374,9 +370,9 @@ struct mem_cgroup *swap_cgroup_record(swp_entry_t ent, struct mem_cgroup *mem)
* lookup_swap_cgroup - lookup mem_cgroup tied to swap entry
* @ent: swap entry to be looked up.
*
- * Returns pointer to mem_cgroup at success. NULL at failure.
+ * Returns CSS ID of mem_cgroup at success. 0 at failure. (0 is invalid ID)
*/
-struct mem_cgroup *lookup_swap_cgroup(swp_entry_t ent)
+unsigned short lookup_swap_cgroup(swp_entry_t ent)
{
int type = swp_type(ent);
unsigned long offset = swp_offset(ent);
@@ -385,16 +381,16 @@ struct mem_cgroup *lookup_swap_cgroup(swp_entry_t ent)
struct swap_cgroup_ctrl *ctrl;
struct page *mappage;
struct swap_cgroup *sc;
- struct mem_cgroup *ret;
+ unsigned short ret;
if (!do_swap_account)
- return NULL;
+ return 0;
ctrl = &swap_cgroup_ctrl[type];
mappage = ctrl->map[idx];
sc = page_address(mappage);
sc += pos;
- ret = sc->val;
+ ret = sc->id;
return ret;
}
@@ -430,13 +426,6 @@ int swap_cgroup_swapon(int type, unsigned long max_pages)
}
mutex_unlock(&swap_cgroup_mutex);
- printk(KERN_INFO
- "swap_cgroup: uses %ld bytes of vmalloc for pointer array space"
- " and %ld bytes to hold mem_cgroup pointers on swap\n",
- array_size, length * PAGE_SIZE);
- printk(KERN_INFO
- "swap_cgroup can be disabled by noswapaccount boot option.\n");
-
return 0;
nomem:
printk(KERN_INFO "couldn't allocate enough memory for swap_cgroup.\n");
diff --git a/mm/slab.c b/mm/slab.c
index 825c606f691..208323fd37b 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -3992,8 +3992,7 @@ static void cache_reap(struct work_struct *w)
struct kmem_cache *searchp;
struct kmem_list3 *l3;
int node = numa_node_id();
- struct delayed_work *work =
- container_of(w, struct delayed_work, work);
+ struct delayed_work *work = to_delayed_work(w);
if (!mutex_trylock(&cache_chain_mutex))
/* Give up. Setup the next iteration. */
diff --git a/mm/vmstat.c b/mm/vmstat.c
index 9826766f127..66f6130976c 100644
--- a/mm/vmstat.c
+++ b/mm/vmstat.c
@@ -891,7 +891,7 @@ static void vmstat_update(struct work_struct *w)
{
refresh_cpu_vm_stats(smp_processor_id());
schedule_delayed_work(&__get_cpu_var(vmstat_work),
- sysctl_stat_interval);
+ round_jiffies_relative(sysctl_stat_interval));
}
static void __cpuinit start_cpu_timer(int cpu)
@@ -899,7 +899,8 @@ static void __cpuinit start_cpu_timer(int cpu)
struct delayed_work *vmstat_work = &per_cpu(vmstat_work, cpu);
INIT_DELAYED_WORK_DEFERRABLE(vmstat_work, vmstat_update);
- schedule_delayed_work_on(cpu, vmstat_work, HZ + cpu);
+ schedule_delayed_work_on(cpu, vmstat_work,
+ __round_jiffies_relative(HZ, cpu));
}
/*