blob: 439d1a17d3b1c7b5c1a3071533cdd1e500a2db33 [file] [log] [blame]
Jerome Glisse249d6042009-04-08 17:11:16 +02001/**************************************************************************
2 *
3 * Copyright 2006-2008 Tungsten Graphics, Inc., Cedar Park, TX. USA.
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 *
27 **************************************************************************/
28/*
29 * Authors:
30 * Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
31 */
32
33#ifndef _DRM_MM_H_
34#define _DRM_MM_H_
35
36/*
37 * Generic range manager structs
38 */
David Herrmann86e81f0e2013-07-25 18:02:31 +020039#include <linux/bug.h>
40#include <linux/kernel.h>
Jerome Glisse249d6042009-04-08 17:11:16 +020041#include <linux/list.h>
David Herrmann86e81f0e2013-07-25 18:02:31 +020042#include <linux/spinlock.h>
Dave Airlief1938cd2009-09-08 11:32:08 +100043#ifdef CONFIG_DEBUG_FS
44#include <linux/seq_file.h>
45#endif
Jerome Glisse249d6042009-04-08 17:11:16 +020046
David Herrmann31e5d7c2013-07-27 13:36:27 +020047enum drm_mm_search_flags {
48 DRM_MM_SEARCH_DEFAULT = 0,
49 DRM_MM_SEARCH_BEST = 1 << 0,
50};
51
Jerome Glisse249d6042009-04-08 17:11:16 +020052struct drm_mm_node {
Daniel Vetterd1024ce2010-07-02 15:02:14 +010053 struct list_head node_list;
Daniel Vetterea7b1dd2011-02-18 17:59:12 +010054 struct list_head hole_stack;
55 unsigned hole_follows : 1;
Daniel Vetter709ea972010-07-02 15:02:16 +010056 unsigned scanned_block : 1;
57 unsigned scanned_prev_free : 1;
58 unsigned scanned_next_free : 1;
Daniel Vetterea7b1dd2011-02-18 17:59:12 +010059 unsigned scanned_preceeds_hole : 1;
Daniel Vetterb0b7af12011-02-18 17:59:14 +010060 unsigned allocated : 1;
Chris Wilson6b9d89b2012-07-10 11:15:23 +010061 unsigned long color;
Jerome Glisse249d6042009-04-08 17:11:16 +020062 unsigned long start;
63 unsigned long size;
64 struct drm_mm *mm;
Jerome Glisse249d6042009-04-08 17:11:16 +020065};
66
67struct drm_mm {
Lucas De Marchi25985ed2011-03-30 22:57:33 -030068 /* List of all memory nodes that immediately precede a free hole. */
Daniel Vetterea7b1dd2011-02-18 17:59:12 +010069 struct list_head hole_stack;
70 /* head_node.node_list is the list of all memory nodes, ordered
71 * according to the (increasing) start address of the memory node. */
72 struct drm_mm_node head_node;
Jerome Glisse249d6042009-04-08 17:11:16 +020073 struct list_head unused_nodes;
74 int num_unused;
75 spinlock_t unused_lock;
Daniel Vetterd935cc62010-09-16 15:13:11 +020076 unsigned int scan_check_range : 1;
Daniel Vetter709ea972010-07-02 15:02:16 +010077 unsigned scan_alignment;
Chris Wilson6b9d89b2012-07-10 11:15:23 +010078 unsigned long scan_color;
Daniel Vetter709ea972010-07-02 15:02:16 +010079 unsigned long scan_size;
80 unsigned long scan_hit_start;
Chris Wilson901593f2012-12-19 16:51:06 +000081 unsigned long scan_hit_end;
Daniel Vetter709ea972010-07-02 15:02:16 +010082 unsigned scanned_blocks;
Daniel Vetterd935cc62010-09-16 15:13:11 +020083 unsigned long scan_start;
84 unsigned long scan_end;
Daniel Vetterae0cec22011-02-18 17:59:15 +010085 struct drm_mm_node *prev_scanned_node;
Chris Wilson6b9d89b2012-07-10 11:15:23 +010086
87 void (*color_adjust)(struct drm_mm_node *node, unsigned long color,
88 unsigned long *start, unsigned long *end);
Jerome Glisse249d6042009-04-08 17:11:16 +020089};
90
Daniel Vetterb0b7af12011-02-18 17:59:14 +010091static inline bool drm_mm_node_allocated(struct drm_mm_node *node)
92{
93 return node->allocated;
94}
95
Daniel Vetter31a5b8c2011-02-18 17:59:11 +010096static inline bool drm_mm_initialized(struct drm_mm *mm)
97{
Daniel Vetterea7b1dd2011-02-18 17:59:12 +010098 return mm->hole_stack.next;
Daniel Vetter31a5b8c2011-02-18 17:59:11 +010099}
Chris Wilson9e8944a2012-11-15 11:32:17 +0000100
101static inline unsigned long __drm_mm_hole_node_start(struct drm_mm_node *hole_node)
102{
103 return hole_node->start + hole_node->size;
104}
105
106static inline unsigned long drm_mm_hole_node_start(struct drm_mm_node *hole_node)
107{
108 BUG_ON(!hole_node->hole_follows);
109 return __drm_mm_hole_node_start(hole_node);
110}
111
112static inline unsigned long __drm_mm_hole_node_end(struct drm_mm_node *hole_node)
113{
114 return list_entry(hole_node->node_list.next,
115 struct drm_mm_node, node_list)->start;
116}
117
118static inline unsigned long drm_mm_hole_node_end(struct drm_mm_node *hole_node)
119{
120 return __drm_mm_hole_node_end(hole_node);
121}
122
Daniel Vetterea7b1dd2011-02-18 17:59:12 +0100123#define drm_mm_for_each_node(entry, mm) list_for_each_entry(entry, \
124 &(mm)->head_node.node_list, \
Daniel Vetter2bbd4492011-05-06 23:47:53 +0200125 node_list)
Daniel Vetterae0cec22011-02-18 17:59:15 +0100126#define drm_mm_for_each_scanned_node_reverse(entry, n, mm) \
127 for (entry = (mm)->prev_scanned_node, \
128 next = entry ? list_entry(entry->node_list.next, \
129 struct drm_mm_node, node_list) : NULL; \
130 entry != NULL; entry = next, \
131 next = entry ? list_entry(entry->node_list.next, \
132 struct drm_mm_node, node_list) : NULL) \
Chris Wilson9e8944a2012-11-15 11:32:17 +0000133
134/* Note that we need to unroll list_for_each_entry in order to inline
135 * setting hole_start and hole_end on each iteration and keep the
136 * macro sane.
137 */
138#define drm_mm_for_each_hole(entry, mm, hole_start, hole_end) \
139 for (entry = list_entry((mm)->hole_stack.next, struct drm_mm_node, hole_stack); \
140 &entry->hole_stack != &(mm)->hole_stack ? \
141 hole_start = drm_mm_hole_node_start(entry), \
142 hole_end = drm_mm_hole_node_end(entry), \
143 1 : 0; \
144 entry = list_entry(entry->hole_stack.next, struct drm_mm_node, hole_stack))
145
Jerome Glisse249d6042009-04-08 17:11:16 +0200146/*
147 * Basic range manager support (drm_mm.c)
148 */
Ben Widawsky338710e2013-07-05 14:41:03 -0700149extern int drm_mm_reserve_node(struct drm_mm *mm, struct drm_mm_node *node);
Thomas Hellstrom89579f72009-06-17 12:29:56 +0200150extern struct drm_mm_node *drm_mm_get_block_generic(struct drm_mm_node *node,
151 unsigned long size,
152 unsigned alignment,
Chris Wilson6b9d89b2012-07-10 11:15:23 +0100153 unsigned long color,
Thomas Hellstrom89579f72009-06-17 12:29:56 +0200154 int atomic);
Jerome Glissea2e68e92009-12-07 15:52:56 +0100155extern struct drm_mm_node *drm_mm_get_block_range_generic(
156 struct drm_mm_node *node,
157 unsigned long size,
158 unsigned alignment,
Chris Wilson6b9d89b2012-07-10 11:15:23 +0100159 unsigned long color,
Jerome Glissea2e68e92009-12-07 15:52:56 +0100160 unsigned long start,
161 unsigned long end,
162 int atomic);
Ben Widawskyb3a070c2013-07-05 14:41:02 -0700163
Thomas Hellstrom89579f72009-06-17 12:29:56 +0200164static inline struct drm_mm_node *drm_mm_get_block(struct drm_mm_node *parent,
Jerome Glisse249d6042009-04-08 17:11:16 +0200165 unsigned long size,
Thomas Hellstrom89579f72009-06-17 12:29:56 +0200166 unsigned alignment)
167{
Chris Wilson6b9d89b2012-07-10 11:15:23 +0100168 return drm_mm_get_block_generic(parent, size, alignment, 0, 0);
Thomas Hellstrom89579f72009-06-17 12:29:56 +0200169}
170static inline struct drm_mm_node *drm_mm_get_block_atomic(struct drm_mm_node *parent,
171 unsigned long size,
172 unsigned alignment)
173{
Chris Wilson6b9d89b2012-07-10 11:15:23 +0100174 return drm_mm_get_block_generic(parent, size, alignment, 0, 1);
Thomas Hellstrom89579f72009-06-17 12:29:56 +0200175}
Jerome Glissea2e68e92009-12-07 15:52:56 +0100176static inline struct drm_mm_node *drm_mm_get_block_range(
177 struct drm_mm_node *parent,
178 unsigned long size,
179 unsigned alignment,
180 unsigned long start,
181 unsigned long end)
182{
Chris Wilson6b9d89b2012-07-10 11:15:23 +0100183 return drm_mm_get_block_range_generic(parent, size, alignment, 0,
184 start, end, 0);
185}
Jerome Glissea2e68e92009-12-07 15:52:56 +0100186static inline struct drm_mm_node *drm_mm_get_block_atomic_range(
187 struct drm_mm_node *parent,
188 unsigned long size,
189 unsigned alignment,
190 unsigned long start,
191 unsigned long end)
192{
Chris Wilson6b9d89b2012-07-10 11:15:23 +0100193 return drm_mm_get_block_range_generic(parent, size, alignment, 0,
Jerome Glissea2e68e92009-12-07 15:52:56 +0100194 start, end, 1);
195}
Chris Wilsonb8103452012-12-07 20:37:06 +0000196
Chris Wilsonb8103452012-12-07 20:37:06 +0000197extern int drm_mm_insert_node_generic(struct drm_mm *mm,
198 struct drm_mm_node *node,
199 unsigned long size,
200 unsigned alignment,
David Herrmann31e5d7c2013-07-27 13:36:27 +0200201 unsigned long color,
202 enum drm_mm_search_flags flags);
203static inline int drm_mm_insert_node(struct drm_mm *mm,
204 struct drm_mm_node *node,
205 unsigned long size,
206 unsigned alignment,
207 enum drm_mm_search_flags flags)
208{
209 return drm_mm_insert_node_generic(mm, node, size, alignment, 0, flags);
210}
211
Chris Wilsonb8103452012-12-07 20:37:06 +0000212extern int drm_mm_insert_node_in_range_generic(struct drm_mm *mm,
213 struct drm_mm_node *node,
214 unsigned long size,
215 unsigned alignment,
216 unsigned long color,
217 unsigned long start,
David Herrmann31e5d7c2013-07-27 13:36:27 +0200218 unsigned long end,
219 enum drm_mm_search_flags flags);
220static inline int drm_mm_insert_node_in_range(struct drm_mm *mm,
221 struct drm_mm_node *node,
222 unsigned long size,
223 unsigned alignment,
224 unsigned long start,
225 unsigned long end,
226 enum drm_mm_search_flags flags)
227{
228 return drm_mm_insert_node_in_range_generic(mm, node, size, alignment,
229 0, start, end, flags);
230}
231
Jerome Glisse249d6042009-04-08 17:11:16 +0200232extern void drm_mm_put_block(struct drm_mm_node *cur);
Daniel Vetterb0b7af12011-02-18 17:59:14 +0100233extern void drm_mm_remove_node(struct drm_mm_node *node);
234extern void drm_mm_replace_node(struct drm_mm_node *old, struct drm_mm_node *new);
Chris Wilson6b9d89b2012-07-10 11:15:23 +0100235extern struct drm_mm_node *drm_mm_search_free_generic(const struct drm_mm *mm,
236 unsigned long size,
237 unsigned alignment,
238 unsigned long color,
David Herrmann31e5d7c2013-07-27 13:36:27 +0200239 enum drm_mm_search_flags flags);
Chris Wilson6b9d89b2012-07-10 11:15:23 +0100240extern struct drm_mm_node *drm_mm_search_free_in_range_generic(
241 const struct drm_mm *mm,
242 unsigned long size,
243 unsigned alignment,
244 unsigned long color,
245 unsigned long start,
246 unsigned long end,
David Herrmann31e5d7c2013-07-27 13:36:27 +0200247 enum drm_mm_search_flags flags);
Chris Wilson6b9d89b2012-07-10 11:15:23 +0100248static inline struct drm_mm_node *drm_mm_search_free(const struct drm_mm *mm,
249 unsigned long size,
250 unsigned alignment,
David Herrmann31e5d7c2013-07-27 13:36:27 +0200251 enum drm_mm_search_flags flags)
Chris Wilson6b9d89b2012-07-10 11:15:23 +0100252{
David Herrmann31e5d7c2013-07-27 13:36:27 +0200253 return drm_mm_search_free_generic(mm,size, alignment, 0, flags);
Chris Wilson6b9d89b2012-07-10 11:15:23 +0100254}
255static inline struct drm_mm_node *drm_mm_search_free_in_range(
Jerome Glissea2e68e92009-12-07 15:52:56 +0100256 const struct drm_mm *mm,
257 unsigned long size,
258 unsigned alignment,
259 unsigned long start,
260 unsigned long end,
David Herrmann31e5d7c2013-07-27 13:36:27 +0200261 enum drm_mm_search_flags flags)
Chris Wilson6b9d89b2012-07-10 11:15:23 +0100262{
263 return drm_mm_search_free_in_range_generic(mm, size, alignment, 0,
David Herrmann31e5d7c2013-07-27 13:36:27 +0200264 start, end, flags);
Chris Wilson6b9d89b2012-07-10 11:15:23 +0100265}
Daniel Vetter69163ea2013-07-01 22:05:53 +0200266
David Herrmann77ef8bb2013-07-01 20:32:58 +0200267extern void drm_mm_init(struct drm_mm *mm,
268 unsigned long start,
269 unsigned long size);
Jerome Glisse249d6042009-04-08 17:11:16 +0200270extern void drm_mm_takedown(struct drm_mm *mm);
271extern int drm_mm_clean(struct drm_mm *mm);
Jerome Glisse249d6042009-04-08 17:11:16 +0200272extern int drm_mm_pre_get(struct drm_mm *mm);
273
274static inline struct drm_mm *drm_get_mm(struct drm_mm_node *block)
275{
276 return block->mm;
277}
278
Chris Wilson6b9d89b2012-07-10 11:15:23 +0100279void drm_mm_init_scan(struct drm_mm *mm,
280 unsigned long size,
281 unsigned alignment,
282 unsigned long color);
283void drm_mm_init_scan_with_range(struct drm_mm *mm,
284 unsigned long size,
Daniel Vetterd935cc62010-09-16 15:13:11 +0200285 unsigned alignment,
Chris Wilson6b9d89b2012-07-10 11:15:23 +0100286 unsigned long color,
Daniel Vetterd935cc62010-09-16 15:13:11 +0200287 unsigned long start,
288 unsigned long end);
Daniel Vetter709ea972010-07-02 15:02:16 +0100289int drm_mm_scan_add_block(struct drm_mm_node *node);
290int drm_mm_scan_remove_block(struct drm_mm_node *node);
291
Jerome Glisse99d7e482009-12-09 21:55:09 +0100292extern void drm_mm_debug_table(struct drm_mm *mm, const char *prefix);
Dave Airliefa8a1232009-08-26 13:13:37 +1000293#ifdef CONFIG_DEBUG_FS
294int drm_mm_dump_table(struct seq_file *m, struct drm_mm *mm);
295#endif
296
Jerome Glisse249d6042009-04-08 17:11:16 +0200297#endif