blob: de9c1adce2b051f5d2079b92d2703cae92ef46d7 [file] [log] [blame]
Matthew Wilcoxd475c632015-02-16 15:58:56 -08001/*
2 * fs/dax.c - Direct Access filesystem code
3 * Copyright (c) 2013-2014 Intel Corporation
4 * Author: Matthew Wilcox <matthew.r.wilcox@intel.com>
5 * Author: Ross Zwisler <ross.zwisler@linux.intel.com>
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms and conditions of the GNU General Public License,
9 * version 2, as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * more details.
15 */
16
17#include <linux/atomic.h>
18#include <linux/blkdev.h>
19#include <linux/buffer_head.h>
20#include <linux/fs.h>
21#include <linux/genhd.h>
Matthew Wilcoxf7ca90b2015-02-16 15:59:02 -080022#include <linux/highmem.h>
23#include <linux/memcontrol.h>
24#include <linux/mm.h>
Matthew Wilcoxd475c632015-02-16 15:58:56 -080025#include <linux/mutex.h>
Matthew Wilcox289c6ae2015-02-16 15:58:59 -080026#include <linux/sched.h>
Matthew Wilcoxd475c632015-02-16 15:58:56 -080027#include <linux/uio.h>
Matthew Wilcoxf7ca90b2015-02-16 15:59:02 -080028#include <linux/vmstat.h>
Matthew Wilcoxd475c632015-02-16 15:58:56 -080029
Matthew Wilcox289c6ae2015-02-16 15:58:59 -080030int dax_clear_blocks(struct inode *inode, sector_t block, long size)
31{
32 struct block_device *bdev = inode->i_sb->s_bdev;
33 sector_t sector = block << (inode->i_blkbits - 9);
34
35 might_sleep();
36 do {
37 void *addr;
38 unsigned long pfn;
39 long count;
40
41 count = bdev_direct_access(bdev, sector, &addr, &pfn, size);
42 if (count < 0)
43 return count;
44 BUG_ON(size < count);
45 while (count > 0) {
46 unsigned pgsz = PAGE_SIZE - offset_in_page(addr);
47 if (pgsz > count)
48 pgsz = count;
49 if (pgsz < PAGE_SIZE)
50 memset(addr, 0, pgsz);
51 else
52 clear_page(addr);
53 addr += pgsz;
54 size -= pgsz;
55 count -= pgsz;
56 BUG_ON(pgsz & 511);
57 sector += pgsz / 512;
58 cond_resched();
59 }
60 } while (size);
61
62 return 0;
63}
64EXPORT_SYMBOL_GPL(dax_clear_blocks);
65
Matthew Wilcoxd475c632015-02-16 15:58:56 -080066static long dax_get_addr(struct buffer_head *bh, void **addr, unsigned blkbits)
67{
68 unsigned long pfn;
69 sector_t sector = bh->b_blocknr << (blkbits - 9);
70 return bdev_direct_access(bh->b_bdev, sector, addr, &pfn, bh->b_size);
71}
72
73static void dax_new_buf(void *addr, unsigned size, unsigned first, loff_t pos,
74 loff_t end)
75{
76 loff_t final = end - pos + first; /* The final byte of the buffer */
77
78 if (first > 0)
79 memset(addr, 0, first);
80 if (final < size)
81 memset(addr + final, 0, size - final);
82}
83
84static bool buffer_written(struct buffer_head *bh)
85{
86 return buffer_mapped(bh) && !buffer_unwritten(bh);
87}
88
89/*
90 * When ext4 encounters a hole, it returns without modifying the buffer_head
91 * which means that we can't trust b_size. To cope with this, we set b_state
92 * to 0 before calling get_block and, if any bit is set, we know we can trust
93 * b_size. Unfortunate, really, since ext4 knows precisely how long a hole is
94 * and would save us time calling get_block repeatedly.
95 */
96static bool buffer_size_valid(struct buffer_head *bh)
97{
98 return bh->b_state != 0;
99}
100
Omar Sandovala95cd632015-03-16 04:33:51 -0700101static ssize_t dax_io(struct inode *inode, struct iov_iter *iter,
102 loff_t start, loff_t end, get_block_t get_block,
103 struct buffer_head *bh)
Matthew Wilcoxd475c632015-02-16 15:58:56 -0800104{
105 ssize_t retval = 0;
106 loff_t pos = start;
107 loff_t max = start;
108 loff_t bh_max = start;
109 void *addr;
110 bool hole = false;
111
Omar Sandovala95cd632015-03-16 04:33:51 -0700112 if (iov_iter_rw(iter) != WRITE)
Matthew Wilcoxd475c632015-02-16 15:58:56 -0800113 end = min(end, i_size_read(inode));
114
115 while (pos < end) {
116 unsigned len;
117 if (pos == max) {
118 unsigned blkbits = inode->i_blkbits;
119 sector_t block = pos >> blkbits;
120 unsigned first = pos - (block << blkbits);
121 long size;
122
123 if (pos == bh_max) {
124 bh->b_size = PAGE_ALIGN(end - pos);
125 bh->b_state = 0;
126 retval = get_block(inode, block, bh,
Omar Sandovala95cd632015-03-16 04:33:51 -0700127 iov_iter_rw(iter) == WRITE);
Matthew Wilcoxd475c632015-02-16 15:58:56 -0800128 if (retval)
129 break;
130 if (!buffer_size_valid(bh))
131 bh->b_size = 1 << blkbits;
132 bh_max = pos - first + bh->b_size;
133 } else {
134 unsigned done = bh->b_size -
135 (bh_max - (pos - first));
136 bh->b_blocknr += done >> blkbits;
137 bh->b_size -= done;
138 }
139
Omar Sandovala95cd632015-03-16 04:33:51 -0700140 hole = iov_iter_rw(iter) != WRITE && !buffer_written(bh);
Matthew Wilcoxd475c632015-02-16 15:58:56 -0800141 if (hole) {
142 addr = NULL;
143 size = bh->b_size - first;
144 } else {
145 retval = dax_get_addr(bh, &addr, blkbits);
146 if (retval < 0)
147 break;
148 if (buffer_unwritten(bh) || buffer_new(bh))
149 dax_new_buf(addr, retval, first, pos,
150 end);
151 addr += first;
152 size = retval - first;
153 }
154 max = min(pos + size, end);
155 }
156
Omar Sandovala95cd632015-03-16 04:33:51 -0700157 if (iov_iter_rw(iter) == WRITE)
Matthew Wilcoxd475c632015-02-16 15:58:56 -0800158 len = copy_from_iter(addr, max - pos, iter);
159 else if (!hole)
160 len = copy_to_iter(addr, max - pos, iter);
161 else
162 len = iov_iter_zero(max - pos, iter);
163
164 if (!len)
165 break;
166
167 pos += len;
168 addr += len;
169 }
170
171 return (pos == start) ? retval : pos - start;
172}
173
174/**
175 * dax_do_io - Perform I/O to a DAX file
Matthew Wilcoxd475c632015-02-16 15:58:56 -0800176 * @iocb: The control block for this I/O
177 * @inode: The file which the I/O is directed at
178 * @iter: The addresses to do I/O from or to
179 * @pos: The file offset where the I/O starts
180 * @get_block: The filesystem method used to translate file offsets to blocks
181 * @end_io: A filesystem callback for I/O completion
182 * @flags: See below
183 *
184 * This function uses the same locking scheme as do_blockdev_direct_IO:
185 * If @flags has DIO_LOCKING set, we assume that the i_mutex is held by the
186 * caller for writes. For reads, we take and release the i_mutex ourselves.
187 * If DIO_LOCKING is not set, the filesystem takes care of its own locking.
188 * As with do_blockdev_direct_IO(), we increment i_dio_count while the I/O
189 * is in progress.
190 */
Omar Sandovala95cd632015-03-16 04:33:51 -0700191ssize_t dax_do_io(struct kiocb *iocb, struct inode *inode,
192 struct iov_iter *iter, loff_t pos, get_block_t get_block,
193 dio_iodone_t end_io, int flags)
Matthew Wilcoxd475c632015-02-16 15:58:56 -0800194{
195 struct buffer_head bh;
196 ssize_t retval = -EINVAL;
197 loff_t end = pos + iov_iter_count(iter);
198
199 memset(&bh, 0, sizeof(bh));
200
Omar Sandovala95cd632015-03-16 04:33:51 -0700201 if ((flags & DIO_LOCKING) && iov_iter_rw(iter) == READ) {
Matthew Wilcoxd475c632015-02-16 15:58:56 -0800202 struct address_space *mapping = inode->i_mapping;
203 mutex_lock(&inode->i_mutex);
204 retval = filemap_write_and_wait_range(mapping, pos, end - 1);
205 if (retval) {
206 mutex_unlock(&inode->i_mutex);
207 goto out;
208 }
209 }
210
211 /* Protects against truncate */
Jens Axboefe0f07d2015-04-15 17:05:48 -0600212 inode_dio_begin(inode);
Matthew Wilcoxd475c632015-02-16 15:58:56 -0800213
Omar Sandovala95cd632015-03-16 04:33:51 -0700214 retval = dax_io(inode, iter, pos, end, get_block, &bh);
Matthew Wilcoxd475c632015-02-16 15:58:56 -0800215
Omar Sandovala95cd632015-03-16 04:33:51 -0700216 if ((flags & DIO_LOCKING) && iov_iter_rw(iter) == READ)
Matthew Wilcoxd475c632015-02-16 15:58:56 -0800217 mutex_unlock(&inode->i_mutex);
218
219 if ((retval > 0) && end_io)
220 end_io(iocb, pos, retval, bh.b_private);
221
Jens Axboefe0f07d2015-04-15 17:05:48 -0600222 inode_dio_end(inode);
Matthew Wilcoxd475c632015-02-16 15:58:56 -0800223 out:
224 return retval;
225}
226EXPORT_SYMBOL_GPL(dax_do_io);
Matthew Wilcoxf7ca90b2015-02-16 15:59:02 -0800227
228/*
229 * The user has performed a load from a hole in the file. Allocating
230 * a new page in the file would cause excessive storage usage for
231 * workloads with sparse files. We allocate a page cache page instead.
232 * We'll kick it out of the page cache if it's ever written to,
233 * otherwise it will simply fall out of the page cache under memory
234 * pressure without ever having been dirtied.
235 */
236static int dax_load_hole(struct address_space *mapping, struct page *page,
237 struct vm_fault *vmf)
238{
239 unsigned long size;
240 struct inode *inode = mapping->host;
241 if (!page)
242 page = find_or_create_page(mapping, vmf->pgoff,
243 GFP_KERNEL | __GFP_ZERO);
244 if (!page)
245 return VM_FAULT_OOM;
246 /* Recheck i_size under page lock to avoid truncate race */
247 size = (i_size_read(inode) + PAGE_SIZE - 1) >> PAGE_SHIFT;
248 if (vmf->pgoff >= size) {
249 unlock_page(page);
250 page_cache_release(page);
251 return VM_FAULT_SIGBUS;
252 }
253
254 vmf->page = page;
255 return VM_FAULT_LOCKED;
256}
257
258static int copy_user_bh(struct page *to, struct buffer_head *bh,
259 unsigned blkbits, unsigned long vaddr)
260{
261 void *vfrom, *vto;
262 if (dax_get_addr(bh, &vfrom, blkbits) < 0)
263 return -EIO;
264 vto = kmap_atomic(to);
265 copy_user_page(vto, vfrom, vaddr, to);
266 kunmap_atomic(vto);
267 return 0;
268}
269
270static int dax_insert_mapping(struct inode *inode, struct buffer_head *bh,
271 struct vm_area_struct *vma, struct vm_fault *vmf)
272{
273 struct address_space *mapping = inode->i_mapping;
274 sector_t sector = bh->b_blocknr << (inode->i_blkbits - 9);
275 unsigned long vaddr = (unsigned long)vmf->virtual_address;
276 void *addr;
277 unsigned long pfn;
278 pgoff_t size;
279 int error;
280
281 i_mmap_lock_read(mapping);
282
283 /*
284 * Check truncate didn't happen while we were allocating a block.
285 * If it did, this block may or may not be still allocated to the
286 * file. We can't tell the filesystem to free it because we can't
287 * take i_mutex here. In the worst case, the file still has blocks
288 * allocated past the end of the file.
289 */
290 size = (i_size_read(inode) + PAGE_SIZE - 1) >> PAGE_SHIFT;
291 if (unlikely(vmf->pgoff >= size)) {
292 error = -EIO;
293 goto out;
294 }
295
296 error = bdev_direct_access(bh->b_bdev, sector, &addr, &pfn, bh->b_size);
297 if (error < 0)
298 goto out;
299 if (error < PAGE_SIZE) {
300 error = -EIO;
301 goto out;
302 }
303
304 if (buffer_unwritten(bh) || buffer_new(bh))
305 clear_page(addr);
306
307 error = vm_insert_mixed(vma, vaddr, pfn);
308
309 out:
310 i_mmap_unlock_read(mapping);
311
Matthew Wilcoxf7ca90b2015-02-16 15:59:02 -0800312 return error;
313}
314
Dave Chinner57895a02015-06-04 09:18:18 +1000315/**
316 * __dax_fault - handle a page fault on a DAX file
317 * @vma: The virtual memory area where the fault occurred
318 * @vmf: The description of the fault
319 * @get_block: The filesystem method used to translate file offsets to blocks
320 *
321 * When a page fault occurs, filesystems may call this helper in their
322 * fault handler for DAX files. __dax_fault() assumes the caller has done all
323 * the necessary locking for the page fault to proceed successfully.
324 */
325int __dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf,
Dave Chinner034ad5c2015-06-04 09:18:18 +1000326 get_block_t get_block, dax_iodone_t complete_unwritten)
Matthew Wilcoxf7ca90b2015-02-16 15:59:02 -0800327{
328 struct file *file = vma->vm_file;
329 struct address_space *mapping = file->f_mapping;
330 struct inode *inode = mapping->host;
331 struct page *page;
332 struct buffer_head bh;
333 unsigned long vaddr = (unsigned long)vmf->virtual_address;
334 unsigned blkbits = inode->i_blkbits;
335 sector_t block;
336 pgoff_t size;
337 int error;
338 int major = 0;
339
340 size = (i_size_read(inode) + PAGE_SIZE - 1) >> PAGE_SHIFT;
341 if (vmf->pgoff >= size)
342 return VM_FAULT_SIGBUS;
343
344 memset(&bh, 0, sizeof(bh));
345 block = (sector_t)vmf->pgoff << (PAGE_SHIFT - blkbits);
346 bh.b_size = PAGE_SIZE;
347
348 repeat:
349 page = find_get_page(mapping, vmf->pgoff);
350 if (page) {
351 if (!lock_page_or_retry(page, vma->vm_mm, vmf->flags)) {
352 page_cache_release(page);
353 return VM_FAULT_RETRY;
354 }
355 if (unlikely(page->mapping != mapping)) {
356 unlock_page(page);
357 page_cache_release(page);
358 goto repeat;
359 }
360 size = (i_size_read(inode) + PAGE_SIZE - 1) >> PAGE_SHIFT;
361 if (unlikely(vmf->pgoff >= size)) {
362 /*
363 * We have a struct page covering a hole in the file
364 * from a read fault and we've raced with a truncate
365 */
366 error = -EIO;
367 goto unlock_page;
368 }
369 }
370
371 error = get_block(inode, block, &bh, 0);
372 if (!error && (bh.b_size < PAGE_SIZE))
373 error = -EIO; /* fs corruption? */
374 if (error)
375 goto unlock_page;
376
377 if (!buffer_mapped(&bh) && !buffer_unwritten(&bh) && !vmf->cow_page) {
378 if (vmf->flags & FAULT_FLAG_WRITE) {
379 error = get_block(inode, block, &bh, 1);
380 count_vm_event(PGMAJFAULT);
381 mem_cgroup_count_vm_event(vma->vm_mm, PGMAJFAULT);
382 major = VM_FAULT_MAJOR;
383 if (!error && (bh.b_size < PAGE_SIZE))
384 error = -EIO;
385 if (error)
386 goto unlock_page;
387 } else {
388 return dax_load_hole(mapping, page, vmf);
389 }
390 }
391
392 if (vmf->cow_page) {
393 struct page *new_page = vmf->cow_page;
394 if (buffer_written(&bh))
395 error = copy_user_bh(new_page, &bh, blkbits, vaddr);
396 else
397 clear_user_highpage(new_page, vaddr);
398 if (error)
399 goto unlock_page;
400 vmf->page = page;
401 if (!page) {
402 i_mmap_lock_read(mapping);
403 /* Check we didn't race with truncate */
404 size = (i_size_read(inode) + PAGE_SIZE - 1) >>
405 PAGE_SHIFT;
406 if (vmf->pgoff >= size) {
407 i_mmap_unlock_read(mapping);
408 error = -EIO;
409 goto out;
410 }
411 }
412 return VM_FAULT_LOCKED;
413 }
414
415 /* Check we didn't race with a read fault installing a new page */
416 if (!page && major)
417 page = find_lock_page(mapping, vmf->pgoff);
418
419 if (page) {
420 unmap_mapping_range(mapping, vmf->pgoff << PAGE_SHIFT,
421 PAGE_CACHE_SIZE, 0);
422 delete_from_page_cache(page);
423 unlock_page(page);
424 page_cache_release(page);
425 }
426
Dave Chinner034ad5c2015-06-04 09:18:18 +1000427 /*
428 * If we successfully insert the new mapping over an unwritten extent,
429 * we need to ensure we convert the unwritten extent. If there is an
430 * error inserting the mapping, the filesystem needs to leave it as
431 * unwritten to prevent exposure of the stale underlying data to
432 * userspace, but we still need to call the completion function so
433 * the private resources on the mapping buffer can be released. We
434 * indicate what the callback should do via the uptodate variable, same
435 * as for normal BH based IO completions.
436 */
Matthew Wilcoxf7ca90b2015-02-16 15:59:02 -0800437 error = dax_insert_mapping(inode, &bh, vma, vmf);
Dave Chinner034ad5c2015-06-04 09:18:18 +1000438 if (buffer_unwritten(&bh))
439 complete_unwritten(&bh, !error);
Matthew Wilcoxf7ca90b2015-02-16 15:59:02 -0800440
441 out:
442 if (error == -ENOMEM)
443 return VM_FAULT_OOM | major;
444 /* -EBUSY is fine, somebody else faulted on the same PTE */
445 if ((error < 0) && (error != -EBUSY))
446 return VM_FAULT_SIGBUS | major;
447 return VM_FAULT_NOPAGE | major;
448
449 unlock_page:
450 if (page) {
451 unlock_page(page);
452 page_cache_release(page);
453 }
454 goto out;
455}
Dave Chinner57895a02015-06-04 09:18:18 +1000456EXPORT_SYMBOL(__dax_fault);
Matthew Wilcoxf7ca90b2015-02-16 15:59:02 -0800457
458/**
459 * dax_fault - handle a page fault on a DAX file
460 * @vma: The virtual memory area where the fault occurred
461 * @vmf: The description of the fault
462 * @get_block: The filesystem method used to translate file offsets to blocks
463 *
464 * When a page fault occurs, filesystems may call this helper in their
465 * fault handler for DAX files.
466 */
467int dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf,
Dave Chinner034ad5c2015-06-04 09:18:18 +1000468 get_block_t get_block, dax_iodone_t complete_unwritten)
Matthew Wilcoxf7ca90b2015-02-16 15:59:02 -0800469{
470 int result;
471 struct super_block *sb = file_inode(vma->vm_file)->i_sb;
472
473 if (vmf->flags & FAULT_FLAG_WRITE) {
474 sb_start_pagefault(sb);
475 file_update_time(vma->vm_file);
476 }
Dave Chinner57895a02015-06-04 09:18:18 +1000477 result = __dax_fault(vma, vmf, get_block, complete_unwritten);
Matthew Wilcoxf7ca90b2015-02-16 15:59:02 -0800478 if (vmf->flags & FAULT_FLAG_WRITE)
479 sb_end_pagefault(sb);
480
481 return result;
482}
483EXPORT_SYMBOL_GPL(dax_fault);
Matthew Wilcox4c0ccfe2015-02-16 15:59:06 -0800484
Matthew Wilcox597cf152015-09-08 14:58:57 -0700485#ifdef CONFIG_TRANSPARENT_HUGEPAGE
486/*
487 * The 'colour' (ie low bits) within a PMD of a page offset. This comes up
488 * more often than one might expect in the below function.
489 */
490#define PG_PMD_COLOUR ((PMD_SIZE >> PAGE_SHIFT) - 1)
491
492int __dax_pmd_fault(struct vm_area_struct *vma, unsigned long address,
493 pmd_t *pmd, unsigned int flags, get_block_t get_block,
494 dax_iodone_t complete_unwritten)
495{
496 struct file *file = vma->vm_file;
497 struct address_space *mapping = file->f_mapping;
498 struct inode *inode = mapping->host;
499 struct buffer_head bh;
500 unsigned blkbits = inode->i_blkbits;
501 unsigned long pmd_addr = address & PMD_MASK;
502 bool write = flags & FAULT_FLAG_WRITE;
503 long length;
504 void *kaddr;
505 pgoff_t size, pgoff;
506 sector_t block, sector;
507 unsigned long pfn;
508 int result = 0;
509
510 /* Fall back to PTEs if we're going to COW */
511 if (write && !(vma->vm_flags & VM_SHARED))
512 return VM_FAULT_FALLBACK;
513 /* If the PMD would extend outside the VMA */
514 if (pmd_addr < vma->vm_start)
515 return VM_FAULT_FALLBACK;
516 if ((pmd_addr + PMD_SIZE) > vma->vm_end)
517 return VM_FAULT_FALLBACK;
518
519 pgoff = ((pmd_addr - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
520 size = (i_size_read(inode) + PAGE_SIZE - 1) >> PAGE_SHIFT;
521 if (pgoff >= size)
522 return VM_FAULT_SIGBUS;
523 /* If the PMD would cover blocks out of the file */
524 if ((pgoff | PG_PMD_COLOUR) >= size)
525 return VM_FAULT_FALLBACK;
526
527 memset(&bh, 0, sizeof(bh));
528 block = (sector_t)pgoff << (PAGE_SHIFT - blkbits);
529
530 bh.b_size = PMD_SIZE;
531 length = get_block(inode, block, &bh, write);
532 if (length)
533 return VM_FAULT_SIGBUS;
534 i_mmap_lock_read(mapping);
535
536 /*
537 * If the filesystem isn't willing to tell us the length of a hole,
538 * just fall back to PTEs. Calling get_block 512 times in a loop
539 * would be silly.
540 */
541 if (!buffer_size_valid(&bh) || bh.b_size < PMD_SIZE)
542 goto fallback;
543
544 /* Guard against a race with truncate */
545 size = (i_size_read(inode) + PAGE_SIZE - 1) >> PAGE_SHIFT;
546 if (pgoff >= size) {
547 result = VM_FAULT_SIGBUS;
548 goto out;
549 }
550 if ((pgoff | PG_PMD_COLOUR) >= size)
551 goto fallback;
552
553 if (is_huge_zero_pmd(*pmd))
554 unmap_mapping_range(mapping, pgoff << PAGE_SHIFT, PMD_SIZE, 0);
555
556 if (!write && !buffer_mapped(&bh) && buffer_uptodate(&bh)) {
Matthew Wilcox597cf152015-09-08 14:58:57 -0700557 spinlock_t *ptl;
Kirill A. Shutemov6a3ec212015-09-08 14:59:34 -0700558 pmd_t entry;
Matthew Wilcox597cf152015-09-08 14:58:57 -0700559 struct page *zero_page = get_huge_zero_page();
Kirill A. Shutemov6a3ec212015-09-08 14:59:34 -0700560
Matthew Wilcox597cf152015-09-08 14:58:57 -0700561 if (unlikely(!zero_page))
562 goto fallback;
563
Kirill A. Shutemov6a3ec212015-09-08 14:59:34 -0700564 ptl = pmd_lock(vma->vm_mm, pmd);
565 if (!pmd_none(*pmd)) {
566 spin_unlock(ptl);
567 goto fallback;
568 }
569
570 entry = mk_pmd(zero_page, vma->vm_page_prot);
571 entry = pmd_mkhuge(entry);
572 set_pmd_at(vma->vm_mm, pmd_addr, pmd, entry);
Matthew Wilcox597cf152015-09-08 14:58:57 -0700573 result = VM_FAULT_NOPAGE;
Kirill A. Shutemov6a3ec212015-09-08 14:59:34 -0700574 spin_unlock(ptl);
Matthew Wilcox597cf152015-09-08 14:58:57 -0700575 } else {
576 sector = bh.b_blocknr << (blkbits - 9);
577 length = bdev_direct_access(bh.b_bdev, sector, &kaddr, &pfn,
578 bh.b_size);
579 if (length < 0) {
580 result = VM_FAULT_SIGBUS;
581 goto out;
582 }
583 if ((length < PMD_SIZE) || (pfn & PG_PMD_COLOUR))
584 goto fallback;
585
586 if (buffer_unwritten(&bh) || buffer_new(&bh)) {
587 int i;
588 for (i = 0; i < PTRS_PER_PMD; i++)
589 clear_page(kaddr + i * PAGE_SIZE);
590 count_vm_event(PGMAJFAULT);
591 mem_cgroup_count_vm_event(vma->vm_mm, PGMAJFAULT);
592 result |= VM_FAULT_MAJOR;
593 }
594
595 result |= vmf_insert_pfn_pmd(vma, address, pmd, pfn, write);
596 }
597
598 out:
599 i_mmap_unlock_read(mapping);
600
601 if (buffer_unwritten(&bh))
602 complete_unwritten(&bh, !(result & VM_FAULT_ERROR));
603
604 return result;
605
606 fallback:
607 count_vm_event(THP_FAULT_FALLBACK);
608 result = VM_FAULT_FALLBACK;
609 goto out;
610}
611EXPORT_SYMBOL_GPL(__dax_pmd_fault);
612
613/**
614 * dax_pmd_fault - handle a PMD fault on a DAX file
615 * @vma: The virtual memory area where the fault occurred
616 * @vmf: The description of the fault
617 * @get_block: The filesystem method used to translate file offsets to blocks
618 *
619 * When a page fault occurs, filesystems may call this helper in their
620 * pmd_fault handler for DAX files.
621 */
622int dax_pmd_fault(struct vm_area_struct *vma, unsigned long address,
623 pmd_t *pmd, unsigned int flags, get_block_t get_block,
624 dax_iodone_t complete_unwritten)
625{
626 int result;
627 struct super_block *sb = file_inode(vma->vm_file)->i_sb;
628
629 if (flags & FAULT_FLAG_WRITE) {
630 sb_start_pagefault(sb);
631 file_update_time(vma->vm_file);
632 }
633 result = __dax_pmd_fault(vma, address, pmd, flags, get_block,
634 complete_unwritten);
635 if (flags & FAULT_FLAG_WRITE)
636 sb_end_pagefault(sb);
637
638 return result;
639}
640EXPORT_SYMBOL_GPL(dax_pmd_fault);
641#endif /* CONFIG_TRANSPARENT_HUGEPAGES */
642
Matthew Wilcox4c0ccfe2015-02-16 15:59:06 -0800643/**
Boaz Harrosh0e3b2102015-04-15 16:15:14 -0700644 * dax_pfn_mkwrite - handle first write to DAX page
645 * @vma: The virtual memory area where the fault occurred
646 * @vmf: The description of the fault
647 *
648 */
649int dax_pfn_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
650{
651 struct super_block *sb = file_inode(vma->vm_file)->i_sb;
652
653 sb_start_pagefault(sb);
654 file_update_time(vma->vm_file);
655 sb_end_pagefault(sb);
656 return VM_FAULT_NOPAGE;
657}
658EXPORT_SYMBOL_GPL(dax_pfn_mkwrite);
659
660/**
Matthew Wilcox25726bc2015-02-16 15:59:35 -0800661 * dax_zero_page_range - zero a range within a page of a DAX file
Matthew Wilcox4c0ccfe2015-02-16 15:59:06 -0800662 * @inode: The file being truncated
663 * @from: The file offset that is being truncated to
Matthew Wilcox25726bc2015-02-16 15:59:35 -0800664 * @length: The number of bytes to zero
Matthew Wilcox4c0ccfe2015-02-16 15:59:06 -0800665 * @get_block: The filesystem method used to translate file offsets to blocks
666 *
Matthew Wilcox25726bc2015-02-16 15:59:35 -0800667 * This function can be called by a filesystem when it is zeroing part of a
668 * page in a DAX file. This is intended for hole-punch operations. If
669 * you are truncating a file, the helper function dax_truncate_page() may be
670 * more convenient.
Matthew Wilcox4c0ccfe2015-02-16 15:59:06 -0800671 *
672 * We work in terms of PAGE_CACHE_SIZE here for commonality with
673 * block_truncate_page(), but we could go down to PAGE_SIZE if the filesystem
674 * took care of disposing of the unnecessary blocks. Even if the filesystem
675 * block size is smaller than PAGE_SIZE, we have to zero the rest of the page
Matthew Wilcox25726bc2015-02-16 15:59:35 -0800676 * since the file might be mmapped.
Matthew Wilcox4c0ccfe2015-02-16 15:59:06 -0800677 */
Matthew Wilcox25726bc2015-02-16 15:59:35 -0800678int dax_zero_page_range(struct inode *inode, loff_t from, unsigned length,
679 get_block_t get_block)
Matthew Wilcox4c0ccfe2015-02-16 15:59:06 -0800680{
681 struct buffer_head bh;
682 pgoff_t index = from >> PAGE_CACHE_SHIFT;
683 unsigned offset = from & (PAGE_CACHE_SIZE-1);
Matthew Wilcox4c0ccfe2015-02-16 15:59:06 -0800684 int err;
685
686 /* Block boundary? Nothing to do */
687 if (!length)
688 return 0;
Matthew Wilcox25726bc2015-02-16 15:59:35 -0800689 BUG_ON((offset + length) > PAGE_CACHE_SIZE);
Matthew Wilcox4c0ccfe2015-02-16 15:59:06 -0800690
691 memset(&bh, 0, sizeof(bh));
692 bh.b_size = PAGE_CACHE_SIZE;
693 err = get_block(inode, index, &bh, 0);
694 if (err < 0)
695 return err;
696 if (buffer_written(&bh)) {
697 void *addr;
698 err = dax_get_addr(&bh, &addr, inode->i_blkbits);
699 if (err < 0)
700 return err;
701 memset(addr + offset, 0, length);
702 }
703
704 return 0;
705}
Matthew Wilcox25726bc2015-02-16 15:59:35 -0800706EXPORT_SYMBOL_GPL(dax_zero_page_range);
707
708/**
709 * dax_truncate_page - handle a partial page being truncated in a DAX file
710 * @inode: The file being truncated
711 * @from: The file offset that is being truncated to
712 * @get_block: The filesystem method used to translate file offsets to blocks
713 *
714 * Similar to block_truncate_page(), this function can be called by a
715 * filesystem when it is truncating a DAX file to handle the partial page.
716 *
717 * We work in terms of PAGE_CACHE_SIZE here for commonality with
718 * block_truncate_page(), but we could go down to PAGE_SIZE if the filesystem
719 * took care of disposing of the unnecessary blocks. Even if the filesystem
720 * block size is smaller than PAGE_SIZE, we have to zero the rest of the page
721 * since the file might be mmapped.
722 */
723int dax_truncate_page(struct inode *inode, loff_t from, get_block_t get_block)
724{
725 unsigned length = PAGE_CACHE_ALIGN(from) - from;
726 return dax_zero_page_range(inode, from, length, get_block);
727}
Matthew Wilcox4c0ccfe2015-02-16 15:59:06 -0800728EXPORT_SYMBOL_GPL(dax_truncate_page);