aboutsummaryrefslogtreecommitdiff
path: root/drivers/staging/android/ashmem.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/staging/android/ashmem.c')
-rw-r--r--drivers/staging/android/ashmem.c44
1 files changed, 29 insertions, 15 deletions
diff --git a/drivers/staging/android/ashmem.c b/drivers/staging/android/ashmem.c
index 21a3f7250531..8e76ddca0999 100644
--- a/drivers/staging/android/ashmem.c
+++ b/drivers/staging/android/ashmem.c
@@ -341,27 +341,26 @@ out:
/*
* ashmem_shrink - our cache shrinker, called from mm/vmscan.c :: shrink_slab
*
- * 'nr_to_scan' is the number of objects (pages) to prune, or 0 to query how
- * many objects (pages) we have in total.
+ * 'nr_to_scan' is the number of objects to scan for freeing.
*
* 'gfp_mask' is the mask of the allocation that got us into this mess.
*
- * Return value is the number of objects (pages) remaining, or -1 if we cannot
+ * Return value is the number of objects freed or -1 if we cannot
* proceed without risk of deadlock (due to gfp_mask).
*
* We approximate LRU via least-recently-unpinned, jettisoning unpinned partial
* chunks of ashmem regions LRU-wise one-at-a-time until we hit 'nr_to_scan'
* pages freed.
*/
-static int ashmem_shrink(struct shrinker *s, struct shrink_control *sc)
+static unsigned long
+ashmem_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
{
struct ashmem_range *range, *next;
+ unsigned long freed = 0;
/* We might recurse into filesystem code, so bail out if necessary */
- if (sc->nr_to_scan && !(sc->gfp_mask & __GFP_FS))
- return -1;
- if (!sc->nr_to_scan)
- return lru_count;
+ if (!(sc->gfp_mask & __GFP_FS))
+ return SHRINK_STOP;
mutex_lock(&ashmem_mutex);
list_for_each_entry_safe(range, next, &ashmem_lru_list, lru) {
@@ -374,17 +373,32 @@ static int ashmem_shrink(struct shrinker *s, struct shrink_control *sc)
range->purged = ASHMEM_WAS_PURGED;
lru_del(range);
- sc->nr_to_scan -= range_size(range);
- if (sc->nr_to_scan <= 0)
+ freed += range_size(range);
+ if (--sc->nr_to_scan <= 0)
break;
}
mutex_unlock(&ashmem_mutex);
+ return freed;
+}
+static unsigned long
+ashmem_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
+{
+ /*
+ * note that lru_count is count of pages on the lru, not a count of
+ * objects on the list. This means the scan function needs to return the
+ * number of pages freed, not the number of objects scanned.
+ */
return lru_count;
}
static struct shrinker ashmem_shrinker = {
- .shrink = ashmem_shrink,
+ .count_objects = ashmem_shrink_count,
+ .scan_objects = ashmem_shrink_scan,
+ /*
+ * XXX (dchinner): I wish people would comment on why they need on
+ * significant changes to the default value here
+ */
.seeks = DEFAULT_SEEKS * 4,
};
@@ -690,11 +704,11 @@ static long ashmem_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
if (capable(CAP_SYS_ADMIN)) {
struct shrink_control sc = {
.gfp_mask = GFP_KERNEL,
- .nr_to_scan = 0,
+ .nr_to_scan = LONG_MAX,
};
- ret = ashmem_shrink(&ashmem_shrinker, &sc);
- sc.nr_to_scan = ret;
- ashmem_shrink(&ashmem_shrinker, &sc);
+
+ nodes_setall(sc.nodes_to_scan);
+ ashmem_shrink_scan(&ashmem_shrinker, &sc);
}
break;
}