aboutsummaryrefslogtreecommitdiff
path: root/drivers/staging/android/ion/ion_priv.h
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/staging/android/ion/ion_priv.h')
-rw-r--r--drivers/staging/android/ion/ion_priv.h124
1 files changed, 101 insertions, 23 deletions
diff --git a/drivers/staging/android/ion/ion_priv.h b/drivers/staging/android/ion/ion_priv.h
index fc2e4fccf69d..385cb4caea97 100644
--- a/drivers/staging/android/ion/ion_priv.h
+++ b/drivers/staging/android/ion/ion_priv.h
@@ -26,6 +26,10 @@
#include <linux/sched.h>
#include <linux/shrinker.h>
#include <linux/types.h>
+#include <linux/device.h>
+#ifdef CONFIG_ION_POOL_CACHE_POLICY
+#include <asm/cacheflush.h>
+#endif
#include "ion.h"
@@ -38,6 +42,7 @@ struct ion_buffer *ion_handle_buffer(struct ion_handle *handle);
* @dev: back pointer to the ion_device
* @heap: back pointer to the heap the buffer came from
* @flags: buffer specific flags
+ * @private_flags: internal buffer specific flags
* @size: size of the buffer
* @priv_virt: private data to the buffer representable as
* a void *
@@ -66,6 +71,7 @@ struct ion_buffer {
struct ion_device *dev;
struct ion_heap *heap;
unsigned long flags;
+ unsigned long private_flags;
size_t size;
union {
void *priv_virt;
@@ -98,22 +104,27 @@ void ion_buffer_destroy(struct ion_buffer *buffer);
* @map_user map memory to userspace
*
* allocate, phys, and map_user return 0 on success, -errno on error.
- * map_dma and map_kernel return pointer on success, ERR_PTR on error.
+ * map_dma and map_kernel return pointer on success, ERR_PTR on
+ * error. @free will be called with ION_PRIV_FLAG_SHRINKER_FREE set in
+ * the buffer's private_flags when called from a shrinker. In that
+ * case, the pages being free'd must be truly free'd back to the
+ * system, not put in a page pool or otherwise cached.
*/
struct ion_heap_ops {
- int (*allocate) (struct ion_heap *heap,
- struct ion_buffer *buffer, unsigned long len,
- unsigned long align, unsigned long flags);
- void (*free) (struct ion_buffer *buffer);
- int (*phys) (struct ion_heap *heap, struct ion_buffer *buffer,
- ion_phys_addr_t *addr, size_t *len);
- struct sg_table *(*map_dma) (struct ion_heap *heap,
- struct ion_buffer *buffer);
- void (*unmap_dma) (struct ion_heap *heap, struct ion_buffer *buffer);
- void * (*map_kernel) (struct ion_heap *heap, struct ion_buffer *buffer);
- void (*unmap_kernel) (struct ion_heap *heap, struct ion_buffer *buffer);
- int (*map_user) (struct ion_heap *mapper, struct ion_buffer *buffer,
- struct vm_area_struct *vma);
+ int (*allocate)(struct ion_heap *heap,
+ struct ion_buffer *buffer, unsigned long len,
+ unsigned long align, unsigned long flags);
+ void (*free)(struct ion_buffer *buffer);
+ int (*phys)(struct ion_heap *heap, struct ion_buffer *buffer,
+ ion_phys_addr_t *addr, size_t *len);
+ struct sg_table * (*map_dma)(struct ion_heap *heap,
+ struct ion_buffer *buffer);
+ void (*unmap_dma)(struct ion_heap *heap, struct ion_buffer *buffer);
+ void * (*map_kernel)(struct ion_heap *heap, struct ion_buffer *buffer);
+ void (*unmap_kernel)(struct ion_heap *heap, struct ion_buffer *buffer);
+ int (*map_user)(struct ion_heap *mapper, struct ion_buffer *buffer,
+ struct vm_area_struct *vma);
+ int (*shrink)(struct ion_heap *heap, gfp_t gfp_mask, int nr_to_scan);
};
/**
@@ -122,6 +133,17 @@ struct ion_heap_ops {
#define ION_HEAP_FLAG_DEFER_FREE (1 << 0)
/**
+ * private flags - flags internal to ion
+ */
+/*
+ * Buffer is being freed from a shrinker function. Skip any possible
+ * heap-specific caching mechanism (e.g. page pools). Guarantees that
+ * any buffer storage that came from the system allocator will be
+ * returned to the system allocator.
+ */
+#define ION_PRIV_FLAG_SHRINKER_FREE (1 << 0)
+
+/**
* struct ion_heap - represents a heap in the system
* @node: rb node to put the heap on the device's tree of heaps
* @dev: back pointer to the ion_device
@@ -132,10 +154,7 @@ struct ion_heap_ops {
* allocating. These are specified by platform data and
* MUST be unique
* @name: used for debugging
- * @shrinker: a shrinker for the heap, if the heap caches system
- * memory, it must define a shrinker to return it on low
- * memory conditions, this includes system memory cached
- * in the deferred free lists for heaps that support it
+ * @shrinker: a shrinker for the heap
* @free_list: free list head if deferred free is used
* @free_list_size size of the deferred free list in bytes
* @lock: protects the free list
@@ -219,6 +238,16 @@ int ion_heap_buffer_zero(struct ion_buffer *buffer);
int ion_heap_pages_zero(struct page *page, size_t size, pgprot_t pgprot);
/**
+ * ion_heap_init_shrinker
+ * @heap: the heap
+ *
+ * If a heap sets the ION_HEAP_FLAG_DEFER_FREE flag or defines the shrink op
+ * this function will be called to setup a shrinker to shrink the freelists
+ * and call the heap's shrink op.
+ */
+void ion_heap_init_shrinker(struct ion_heap *heap);
+
+/**
* ion_heap_init_deferred_free -- initialize deferred free functionality
* @heap: the heap
*
@@ -250,6 +279,29 @@ void ion_heap_freelist_add(struct ion_heap *heap, struct ion_buffer *buffer);
size_t ion_heap_freelist_drain(struct ion_heap *heap, size_t size);
/**
+ * ion_heap_freelist_shrink - drain the deferred free
+ * list, skipping any heap-specific
+ * pooling or caching mechanisms
+ *
+ * @heap: the heap
+ * @size: amount of memory to drain in bytes
+ *
+ * Drains the indicated amount of memory from the deferred freelist immediately.
+ * Returns the total amount freed. The total freed may be higher depending
+ * on the size of the items in the list, or lower if there is insufficient
+ * total memory on the freelist.
+ *
+ * Unlike with @ion_heap_freelist_drain, don't put any pages back into
+ * page pools or otherwise cache the pages. Everything must be
+ * genuinely free'd back to the system. If you're free'ing from a
+ * shrinker you probably want to use this. Note that this relies on
+ * the heap.ops.free callback honoring the ION_PRIV_FLAG_SHRINKER_FREE
+ * flag.
+ */
+size_t ion_heap_freelist_shrink(struct ion_heap *heap,
+ size_t size);
+
+/**
* ion_heap_freelist_size - returns the size of the freelist in bytes
* @heap: the heap
*/
@@ -305,13 +357,8 @@ void ion_carveout_free(struct ion_heap *heap, ion_phys_addr_t addr,
* @low_count: number of lowmem items in the pool
* @high_items: list of highmem items
* @low_items: list of lowmem items
- * @shrinker: a shrinker for the items
* @mutex: lock protecting this struct and especially the count
* item list
- * @alloc: function to be used to allocate pageory when the pool
- * is empty
- * @free: function to be used to free pageory back to the system
- * when the shrinker fires
* @gfp_mask: gfp_mask to use from alloc
* @order: order of pages in the pool
* @list: plist node for list of pools
@@ -336,6 +383,37 @@ struct ion_page_pool *ion_page_pool_create(gfp_t gfp_mask, unsigned int order);
void ion_page_pool_destroy(struct ion_page_pool *);
void *ion_page_pool_alloc(struct ion_page_pool *);
void ion_page_pool_free(struct ion_page_pool *, struct page *);
+void ion_page_pool_free_immediate(struct ion_page_pool *, struct page *);
+
+#ifdef CONFIG_ION_POOL_CACHE_POLICY
+static inline void ion_page_pool_alloc_set_cache_policy
+ (struct ion_page_pool *pool,
+ struct page *page){
+ void *va = page_address(page);
+
+ if (va)
+ set_memory_wc((unsigned long)va, 1 << pool->order);
+}
+
+static inline void ion_page_pool_free_set_cache_policy
+ (struct ion_page_pool *pool,
+ struct page *page){
+ void *va = page_address(page);
+
+ if (va)
+ set_memory_wb((unsigned long)va, 1 << pool->order);
+
+}
+#else
+static inline void ion_page_pool_alloc_set_cache_policy
+ (struct ion_page_pool *pool,
+ struct page *page){ }
+
+static inline void ion_page_pool_free_set_cache_policy
+ (struct ion_page_pool *pool,
+ struct page *page){ }
+#endif
+
/** ion_page_pool_shrink - shrinks the size of the memory cached in the pool
* @pool: the pool