blob: 81ab81f030a3df28029b003f0d6be4346611a875 [file] [log] [blame]
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001/**************************************************************************
2 *
3 * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27/*
28 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
29 */
30
Thomas Hellstromba4e7d92009-06-10 15:20:19 +020031#include <linux/vmalloc.h>
32#include <linux/sched.h>
33#include <linux/highmem.h>
34#include <linux/pagemap.h>
35#include <linux/file.h>
36#include <linux/swap.h>
37#include "ttm/ttm_module.h"
38#include "ttm/ttm_bo_driver.h"
39#include "ttm/ttm_placement.h"
40
41static int ttm_tt_swapin(struct ttm_tt *ttm);
42
43#if defined(CONFIG_X86)
44static void ttm_tt_clflush_page(struct page *page)
45{
46 uint8_t *page_virtual;
47 unsigned int i;
48
49 if (unlikely(page == NULL))
50 return;
51
52 page_virtual = kmap_atomic(page, KM_USER0);
53
54 for (i = 0; i < PAGE_SIZE; i += boot_cpu_data.x86_clflush_size)
55 clflush(page_virtual + i);
56
57 kunmap_atomic(page_virtual, KM_USER0);
58}
59
60static void ttm_tt_cache_flush_clflush(struct page *pages[],
61 unsigned long num_pages)
62{
63 unsigned long i;
64
65 mb();
66 for (i = 0; i < num_pages; ++i)
67 ttm_tt_clflush_page(*pages++);
68 mb();
69}
Michel Dänzer46f4b3e2009-06-15 16:56:13 +020070#elif !defined(__powerpc__)
Thomas Hellstromba4e7d92009-06-10 15:20:19 +020071static void ttm_tt_ipi_handler(void *null)
72{
73 ;
74}
75#endif
76
77void ttm_tt_cache_flush(struct page *pages[], unsigned long num_pages)
78{
79
80#if defined(CONFIG_X86)
81 if (cpu_has_clflush) {
82 ttm_tt_cache_flush_clflush(pages, num_pages);
83 return;
84 }
Michel Dänzer46f4b3e2009-06-15 16:56:13 +020085#elif defined(__powerpc__)
86 unsigned long i;
87
88 for (i = 0; i < num_pages; ++i) {
89 if (pages[i]) {
90 unsigned long start = (unsigned long)page_address(pages[i]);
91 flush_dcache_range(start, start + PAGE_SIZE);
92 }
93 }
Thomas Hellstromba4e7d92009-06-10 15:20:19 +020094#else
95 if (on_each_cpu(ttm_tt_ipi_handler, NULL, 1) != 0)
96 printk(KERN_ERR TTM_PFX
97 "Timed out waiting for drm cache flush.\n");
98#endif
99}
100
101/**
102 * Allocates storage for pointers to the pages that back the ttm.
103 *
104 * Uses kmalloc if possible. Otherwise falls back to vmalloc.
105 */
106static void ttm_tt_alloc_page_directory(struct ttm_tt *ttm)
107{
108 unsigned long size = ttm->num_pages * sizeof(*ttm->pages);
109 ttm->pages = NULL;
110
111 if (size <= PAGE_SIZE)
112 ttm->pages = kzalloc(size, GFP_KERNEL);
113
114 if (!ttm->pages) {
115 ttm->pages = vmalloc_user(size);
116 if (ttm->pages)
117 ttm->page_flags |= TTM_PAGE_FLAG_VMALLOC;
118 }
119}
120
121static void ttm_tt_free_page_directory(struct ttm_tt *ttm)
122{
123 if (ttm->page_flags & TTM_PAGE_FLAG_VMALLOC) {
124 vfree(ttm->pages);
125 ttm->page_flags &= ~TTM_PAGE_FLAG_VMALLOC;
126 } else {
127 kfree(ttm->pages);
128 }
129 ttm->pages = NULL;
130}
131
132static struct page *ttm_tt_alloc_page(unsigned page_flags)
133{
Dave Airliead49f502009-07-10 22:36:26 +1000134 gfp_t gfp_flags = GFP_HIGHUSER;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200135
Dave Airliead49f502009-07-10 22:36:26 +1000136 if (page_flags & TTM_PAGE_FLAG_ZERO_ALLOC)
137 gfp_flags |= __GFP_ZERO;
138
139 if (page_flags & TTM_PAGE_FLAG_DMA32)
140 gfp_flags |= __GFP_DMA32;
141
142 return alloc_page(gfp_flags);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200143}
144
145static void ttm_tt_free_user_pages(struct ttm_tt *ttm)
146{
147 int write;
148 int dirty;
149 struct page *page;
150 int i;
151 struct ttm_backend *be = ttm->be;
152
153 BUG_ON(!(ttm->page_flags & TTM_PAGE_FLAG_USER));
154 write = ((ttm->page_flags & TTM_PAGE_FLAG_WRITE) != 0);
155 dirty = ((ttm->page_flags & TTM_PAGE_FLAG_USER_DIRTY) != 0);
156
157 if (be)
158 be->func->clear(be);
159
160 for (i = 0; i < ttm->num_pages; ++i) {
161 page = ttm->pages[i];
162 if (page == NULL)
163 continue;
164
165 if (page == ttm->dummy_read_page) {
166 BUG_ON(write);
167 continue;
168 }
169
170 if (write && dirty && !PageReserved(page))
171 set_page_dirty_lock(page);
172
173 ttm->pages[i] = NULL;
174 ttm_mem_global_free(ttm->bdev->mem_glob, PAGE_SIZE, false);
175 put_page(page);
176 }
177 ttm->state = tt_unpopulated;
178 ttm->first_himem_page = ttm->num_pages;
179 ttm->last_lomem_page = -1;
180}
181
182static struct page *__ttm_tt_get_page(struct ttm_tt *ttm, int index)
183{
184 struct page *p;
185 struct ttm_bo_device *bdev = ttm->bdev;
186 struct ttm_mem_global *mem_glob = bdev->mem_glob;
187 int ret;
188
189 while (NULL == (p = ttm->pages[index])) {
190 p = ttm_tt_alloc_page(ttm->page_flags);
191
192 if (!p)
193 return NULL;
194
195 if (PageHighMem(p)) {
196 ret =
197 ttm_mem_global_alloc(mem_glob, PAGE_SIZE,
198 false, false, true);
199 if (unlikely(ret != 0))
200 goto out_err;
201 ttm->pages[--ttm->first_himem_page] = p;
202 } else {
203 ret =
204 ttm_mem_global_alloc(mem_glob, PAGE_SIZE,
205 false, false, false);
206 if (unlikely(ret != 0))
207 goto out_err;
208 ttm->pages[++ttm->last_lomem_page] = p;
209 }
210 }
211 return p;
212out_err:
213 put_page(p);
214 return NULL;
215}
216
217struct page *ttm_tt_get_page(struct ttm_tt *ttm, int index)
218{
219 int ret;
220
221 if (unlikely(ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)) {
222 ret = ttm_tt_swapin(ttm);
223 if (unlikely(ret != 0))
224 return NULL;
225 }
226 return __ttm_tt_get_page(ttm, index);
227}
228
229int ttm_tt_populate(struct ttm_tt *ttm)
230{
231 struct page *page;
232 unsigned long i;
233 struct ttm_backend *be;
234 int ret;
235
236 if (ttm->state != tt_unpopulated)
237 return 0;
238
239 if (unlikely(ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)) {
240 ret = ttm_tt_swapin(ttm);
241 if (unlikely(ret != 0))
242 return ret;
243 }
244
245 be = ttm->be;
246
247 for (i = 0; i < ttm->num_pages; ++i) {
248 page = __ttm_tt_get_page(ttm, i);
249 if (!page)
250 return -ENOMEM;
251 }
252
253 be->func->populate(be, ttm->num_pages, ttm->pages,
254 ttm->dummy_read_page);
255 ttm->state = tt_unbound;
256 return 0;
257}
258
259#ifdef CONFIG_X86
260static inline int ttm_tt_set_page_caching(struct page *p,
261 enum ttm_caching_state c_state)
262{
263 if (PageHighMem(p))
264 return 0;
265
266 switch (c_state) {
267 case tt_cached:
268 return set_pages_wb(p, 1);
269 case tt_wc:
270 return set_memory_wc((unsigned long) page_address(p), 1);
271 default:
272 return set_pages_uc(p, 1);
273 }
274}
275#else /* CONFIG_X86 */
276static inline int ttm_tt_set_page_caching(struct page *p,
277 enum ttm_caching_state c_state)
278{
279 return 0;
280}
281#endif /* CONFIG_X86 */
282
283/*
284 * Change caching policy for the linear kernel map
285 * for range of pages in a ttm.
286 */
287
288static int ttm_tt_set_caching(struct ttm_tt *ttm,
289 enum ttm_caching_state c_state)
290{
291 int i, j;
292 struct page *cur_page;
293 int ret;
294
295 if (ttm->caching_state == c_state)
296 return 0;
297
298 if (c_state != tt_cached) {
299 ret = ttm_tt_populate(ttm);
300 if (unlikely(ret != 0))
301 return ret;
302 }
303
304 if (ttm->caching_state == tt_cached)
305 ttm_tt_cache_flush(ttm->pages, ttm->num_pages);
306
307 for (i = 0; i < ttm->num_pages; ++i) {
308 cur_page = ttm->pages[i];
309 if (likely(cur_page != NULL)) {
310 ret = ttm_tt_set_page_caching(cur_page, c_state);
311 if (unlikely(ret != 0))
312 goto out_err;
313 }
314 }
315
316 ttm->caching_state = c_state;
317
318 return 0;
319
320out_err:
321 for (j = 0; j < i; ++j) {
322 cur_page = ttm->pages[j];
323 if (likely(cur_page != NULL)) {
324 (void)ttm_tt_set_page_caching(cur_page,
325 ttm->caching_state);
326 }
327 }
328
329 return ret;
330}
331
332int ttm_tt_set_placement_caching(struct ttm_tt *ttm, uint32_t placement)
333{
334 enum ttm_caching_state state;
335
336 if (placement & TTM_PL_FLAG_WC)
337 state = tt_wc;
338 else if (placement & TTM_PL_FLAG_UNCACHED)
339 state = tt_uncached;
340 else
341 state = tt_cached;
342
343 return ttm_tt_set_caching(ttm, state);
344}
345
346static void ttm_tt_free_alloced_pages(struct ttm_tt *ttm)
347{
348 int i;
349 struct page *cur_page;
350 struct ttm_backend *be = ttm->be;
351
352 if (be)
353 be->func->clear(be);
354 (void)ttm_tt_set_caching(ttm, tt_cached);
355 for (i = 0; i < ttm->num_pages; ++i) {
356 cur_page = ttm->pages[i];
357 ttm->pages[i] = NULL;
358 if (cur_page) {
359 if (page_count(cur_page) != 1)
360 printk(KERN_ERR TTM_PFX
361 "Erroneous page count. "
362 "Leaking pages.\n");
363 ttm_mem_global_free(ttm->bdev->mem_glob, PAGE_SIZE,
364 PageHighMem(cur_page));
365 __free_page(cur_page);
366 }
367 }
368 ttm->state = tt_unpopulated;
369 ttm->first_himem_page = ttm->num_pages;
370 ttm->last_lomem_page = -1;
371}
372
373void ttm_tt_destroy(struct ttm_tt *ttm)
374{
375 struct ttm_backend *be;
376
377 if (unlikely(ttm == NULL))
378 return;
379
380 be = ttm->be;
381 if (likely(be != NULL)) {
382 be->func->destroy(be);
383 ttm->be = NULL;
384 }
385
386 if (likely(ttm->pages != NULL)) {
387 if (ttm->page_flags & TTM_PAGE_FLAG_USER)
388 ttm_tt_free_user_pages(ttm);
389 else
390 ttm_tt_free_alloced_pages(ttm);
391
392 ttm_tt_free_page_directory(ttm);
393 }
394
395 if (!(ttm->page_flags & TTM_PAGE_FLAG_PERSISTANT_SWAP) &&
396 ttm->swap_storage)
397 fput(ttm->swap_storage);
398
399 kfree(ttm);
400}
401
402int ttm_tt_set_user(struct ttm_tt *ttm,
403 struct task_struct *tsk,
404 unsigned long start, unsigned long num_pages)
405{
406 struct mm_struct *mm = tsk->mm;
407 int ret;
408 int write = (ttm->page_flags & TTM_PAGE_FLAG_WRITE) != 0;
409 struct ttm_mem_global *mem_glob = ttm->bdev->mem_glob;
410
411 BUG_ON(num_pages != ttm->num_pages);
412 BUG_ON((ttm->page_flags & TTM_PAGE_FLAG_USER) == 0);
413
414 /**
415 * Account user pages as lowmem pages for now.
416 */
417
418 ret = ttm_mem_global_alloc(mem_glob, num_pages * PAGE_SIZE,
419 false, false, false);
420 if (unlikely(ret != 0))
421 return ret;
422
423 down_read(&mm->mmap_sem);
424 ret = get_user_pages(tsk, mm, start, num_pages,
425 write, 0, ttm->pages, NULL);
426 up_read(&mm->mmap_sem);
427
428 if (ret != num_pages && write) {
429 ttm_tt_free_user_pages(ttm);
430 ttm_mem_global_free(mem_glob, num_pages * PAGE_SIZE, false);
431 return -ENOMEM;
432 }
433
434 ttm->tsk = tsk;
435 ttm->start = start;
436 ttm->state = tt_unbound;
437
438 return 0;
439}
440
441struct ttm_tt *ttm_tt_create(struct ttm_bo_device *bdev, unsigned long size,
442 uint32_t page_flags, struct page *dummy_read_page)
443{
444 struct ttm_bo_driver *bo_driver = bdev->driver;
445 struct ttm_tt *ttm;
446
447 if (!bo_driver)
448 return NULL;
449
450 ttm = kzalloc(sizeof(*ttm), GFP_KERNEL);
451 if (!ttm)
452 return NULL;
453
454 ttm->bdev = bdev;
455
456 ttm->num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
457 ttm->first_himem_page = ttm->num_pages;
458 ttm->last_lomem_page = -1;
459 ttm->caching_state = tt_cached;
460 ttm->page_flags = page_flags;
461
462 ttm->dummy_read_page = dummy_read_page;
463
464 ttm_tt_alloc_page_directory(ttm);
465 if (!ttm->pages) {
466 ttm_tt_destroy(ttm);
467 printk(KERN_ERR TTM_PFX "Failed allocating page table\n");
468 return NULL;
469 }
470 ttm->be = bo_driver->create_ttm_backend_entry(bdev);
471 if (!ttm->be) {
472 ttm_tt_destroy(ttm);
473 printk(KERN_ERR TTM_PFX "Failed creating ttm backend entry\n");
474 return NULL;
475 }
476 ttm->state = tt_unpopulated;
477 return ttm;
478}
479
480void ttm_tt_unbind(struct ttm_tt *ttm)
481{
482 int ret;
483 struct ttm_backend *be = ttm->be;
484
485 if (ttm->state == tt_bound) {
486 ret = be->func->unbind(be);
487 BUG_ON(ret);
488 ttm->state = tt_unbound;
489 }
490}
491
492int ttm_tt_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem)
493{
494 int ret = 0;
495 struct ttm_backend *be;
496
497 if (!ttm)
498 return -EINVAL;
499
500 if (ttm->state == tt_bound)
501 return 0;
502
503 be = ttm->be;
504
505 ret = ttm_tt_populate(ttm);
506 if (ret)
507 return ret;
508
509 ret = be->func->bind(be, bo_mem);
510 if (ret) {
511 printk(KERN_ERR TTM_PFX "Couldn't bind backend.\n");
512 return ret;
513 }
514
515 ttm->state = tt_bound;
516
517 if (ttm->page_flags & TTM_PAGE_FLAG_USER)
518 ttm->page_flags |= TTM_PAGE_FLAG_USER_DIRTY;
519 return 0;
520}
521EXPORT_SYMBOL(ttm_tt_bind);
522
523static int ttm_tt_swapin(struct ttm_tt *ttm)
524{
525 struct address_space *swap_space;
526 struct file *swap_storage;
527 struct page *from_page;
528 struct page *to_page;
529 void *from_virtual;
530 void *to_virtual;
531 int i;
532 int ret;
533
534 if (ttm->page_flags & TTM_PAGE_FLAG_USER) {
535 ret = ttm_tt_set_user(ttm, ttm->tsk, ttm->start,
536 ttm->num_pages);
537 if (unlikely(ret != 0))
538 return ret;
539
540 ttm->page_flags &= ~TTM_PAGE_FLAG_SWAPPED;
541 return 0;
542 }
543
544 swap_storage = ttm->swap_storage;
545 BUG_ON(swap_storage == NULL);
546
547 swap_space = swap_storage->f_path.dentry->d_inode->i_mapping;
548
549 for (i = 0; i < ttm->num_pages; ++i) {
550 from_page = read_mapping_page(swap_space, i, NULL);
551 if (IS_ERR(from_page))
552 goto out_err;
553 to_page = __ttm_tt_get_page(ttm, i);
554 if (unlikely(to_page == NULL))
555 goto out_err;
556
557 preempt_disable();
558 from_virtual = kmap_atomic(from_page, KM_USER0);
559 to_virtual = kmap_atomic(to_page, KM_USER1);
560 memcpy(to_virtual, from_virtual, PAGE_SIZE);
561 kunmap_atomic(to_virtual, KM_USER1);
562 kunmap_atomic(from_virtual, KM_USER0);
563 preempt_enable();
564 page_cache_release(from_page);
565 }
566
567 if (!(ttm->page_flags & TTM_PAGE_FLAG_PERSISTANT_SWAP))
568 fput(swap_storage);
569 ttm->swap_storage = NULL;
570 ttm->page_flags &= ~TTM_PAGE_FLAG_SWAPPED;
571
572 return 0;
573out_err:
574 ttm_tt_free_alloced_pages(ttm);
575 return -ENOMEM;
576}
577
578int ttm_tt_swapout(struct ttm_tt *ttm, struct file *persistant_swap_storage)
579{
580 struct address_space *swap_space;
581 struct file *swap_storage;
582 struct page *from_page;
583 struct page *to_page;
584 void *from_virtual;
585 void *to_virtual;
586 int i;
587
588 BUG_ON(ttm->state != tt_unbound && ttm->state != tt_unpopulated);
589 BUG_ON(ttm->caching_state != tt_cached);
590
591 /*
592 * For user buffers, just unpin the pages, as there should be
593 * vma references.
594 */
595
596 if (ttm->page_flags & TTM_PAGE_FLAG_USER) {
597 ttm_tt_free_user_pages(ttm);
598 ttm->page_flags |= TTM_PAGE_FLAG_SWAPPED;
599 ttm->swap_storage = NULL;
600 return 0;
601 }
602
603 if (!persistant_swap_storage) {
604 swap_storage = shmem_file_setup("ttm swap",
605 ttm->num_pages << PAGE_SHIFT,
606 0);
607 if (unlikely(IS_ERR(swap_storage))) {
608 printk(KERN_ERR "Failed allocating swap storage.\n");
609 return -ENOMEM;
610 }
611 } else
612 swap_storage = persistant_swap_storage;
613
614 swap_space = swap_storage->f_path.dentry->d_inode->i_mapping;
615
616 for (i = 0; i < ttm->num_pages; ++i) {
617 from_page = ttm->pages[i];
618 if (unlikely(from_page == NULL))
619 continue;
620 to_page = read_mapping_page(swap_space, i, NULL);
621 if (unlikely(to_page == NULL))
622 goto out_err;
623
624 preempt_disable();
625 from_virtual = kmap_atomic(from_page, KM_USER0);
626 to_virtual = kmap_atomic(to_page, KM_USER1);
627 memcpy(to_virtual, from_virtual, PAGE_SIZE);
628 kunmap_atomic(to_virtual, KM_USER1);
629 kunmap_atomic(from_virtual, KM_USER0);
630 preempt_enable();
631 set_page_dirty(to_page);
632 mark_page_accessed(to_page);
633 page_cache_release(to_page);
634 }
635
636 ttm_tt_free_alloced_pages(ttm);
637 ttm->swap_storage = swap_storage;
638 ttm->page_flags |= TTM_PAGE_FLAG_SWAPPED;
639 if (persistant_swap_storage)
640 ttm->page_flags |= TTM_PAGE_FLAG_PERSISTANT_SWAP;
641
642 return 0;
643out_err:
644 if (!persistant_swap_storage)
645 fput(swap_storage);
646
647 return -ENOMEM;
648}