blob: 26897fbfbe1977940e4e7a219ea32ebaad73f4ee [file] [log] [blame]
Carsten Otteceffc072005-06-23 22:05:25 -07001/*
2 * linux/mm/filemap_xip.c
3 *
4 * Copyright (C) 2005 IBM Corporation
5 * Author: Carsten Otte <cotte@de.ibm.com>
6 *
7 * derived from linux/mm/filemap.c - Copyright (C) Linus Torvalds
8 *
9 */
10
11#include <linux/fs.h>
Christoph Hellwigde1414a2015-01-14 10:42:36 +010012#include <linux/backing-dev.h>
Carsten Otteceffc072005-06-23 22:05:25 -070013#include <linux/pagemap.h>
Paul Gortmakerb95f1b312011-10-16 02:01:52 -040014#include <linux/export.h>
Carsten Otteceffc072005-06-23 22:05:25 -070015#include <linux/uio.h>
16#include <linux/rmap.h>
Andrea Arcangelicddb8a52008-07-28 15:46:29 -070017#include <linux/mmu_notifier.h>
Alexey Dobriyane8edc6e2007-05-21 01:22:52 +040018#include <linux/sched.h>
Nick Piggin538f8ea62008-08-20 14:09:20 -070019#include <linux/seqlock.h>
20#include <linux/mutex.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090021#include <linux/gfp.h>
Carsten Otteceffc072005-06-23 22:05:25 -070022#include <asm/tlbflush.h>
Nick Piggin70688e42008-04-28 02:13:02 -070023#include <asm/io.h>
Carsten Otteceffc072005-06-23 22:05:25 -070024
25/*
Carsten Ottea76c0b92007-03-29 01:20:39 -070026 * We do use our own empty page to avoid interference with other users
27 * of ZERO_PAGE(), such as /dev/zero
28 */
Nick Piggin538f8ea62008-08-20 14:09:20 -070029static DEFINE_MUTEX(xip_sparse_mutex);
John Stultz1ca7d672013-10-07 15:51:59 -070030static seqcount_t xip_sparse_seq = SEQCNT_ZERO(xip_sparse_seq);
Carsten Ottea76c0b92007-03-29 01:20:39 -070031static struct page *__xip_sparse_page;
32
Nick Piggin538f8ea62008-08-20 14:09:20 -070033/* called under xip_sparse_mutex */
Carsten Ottea76c0b92007-03-29 01:20:39 -070034static struct page *xip_sparse_page(void)
35{
36 if (!__xip_sparse_page) {
Akinobu Mitac51b1a12008-01-08 15:32:57 -080037 struct page *page = alloc_page(GFP_HIGHUSER | __GFP_ZERO);
38
Nick Piggin538f8ea62008-08-20 14:09:20 -070039 if (page)
40 __xip_sparse_page = page;
Carsten Ottea76c0b92007-03-29 01:20:39 -070041 }
42 return __xip_sparse_page;
43}
44
45/*
Carsten Otteceffc072005-06-23 22:05:25 -070046 * This is a file read routine for execute in place files, and uses
Nick Piggin70688e42008-04-28 02:13:02 -070047 * the mapping->a_ops->get_xip_mem() function for the actual low-level
Carsten Otteceffc072005-06-23 22:05:25 -070048 * stuff.
49 *
50 * Note the struct file* is not used at all. It may be NULL.
51 */
Nick Piggin70688e42008-04-28 02:13:02 -070052static ssize_t
Carsten Otteceffc072005-06-23 22:05:25 -070053do_xip_mapping_read(struct address_space *mapping,
54 struct file_ra_state *_ra,
55 struct file *filp,
Nick Piggin70688e42008-04-28 02:13:02 -070056 char __user *buf,
57 size_t len,
58 loff_t *ppos)
Carsten Otteceffc072005-06-23 22:05:25 -070059{
60 struct inode *inode = mapping->host;
Jan Kara2004dc82008-02-08 04:20:11 -080061 pgoff_t index, end_index;
62 unsigned long offset;
Nick Piggin70688e42008-04-28 02:13:02 -070063 loff_t isize, pos;
64 size_t copied = 0, error = 0;
Carsten Otteceffc072005-06-23 22:05:25 -070065
Nick Piggin70688e42008-04-28 02:13:02 -070066 BUG_ON(!mapping->a_ops->get_xip_mem);
Carsten Otteceffc072005-06-23 22:05:25 -070067
Nick Piggin70688e42008-04-28 02:13:02 -070068 pos = *ppos;
69 index = pos >> PAGE_CACHE_SHIFT;
70 offset = pos & ~PAGE_CACHE_MASK;
Carsten Otteceffc072005-06-23 22:05:25 -070071
72 isize = i_size_read(inode);
73 if (!isize)
74 goto out;
75
76 end_index = (isize - 1) >> PAGE_CACHE_SHIFT;
Nick Piggin70688e42008-04-28 02:13:02 -070077 do {
78 unsigned long nr, left;
79 void *xip_mem;
80 unsigned long xip_pfn;
81 int zero = 0;
Carsten Otteceffc072005-06-23 22:05:25 -070082
83 /* nr is the maximum number of bytes to copy from this page */
84 nr = PAGE_CACHE_SIZE;
85 if (index >= end_index) {
86 if (index > end_index)
87 goto out;
88 nr = ((isize - 1) & ~PAGE_CACHE_MASK) + 1;
89 if (nr <= offset) {
90 goto out;
91 }
92 }
93 nr = nr - offset;
Martin Schwidefsky58984ce2009-04-02 16:56:42 -070094 if (nr > len - copied)
95 nr = len - copied;
Carsten Otteceffc072005-06-23 22:05:25 -070096
Nick Piggin70688e42008-04-28 02:13:02 -070097 error = mapping->a_ops->get_xip_mem(mapping, index, 0,
98 &xip_mem, &xip_pfn);
99 if (unlikely(error)) {
100 if (error == -ENODATA) {
Carsten Otteceffc072005-06-23 22:05:25 -0700101 /* sparse */
Nick Piggin70688e42008-04-28 02:13:02 -0700102 zero = 1;
103 } else
Carsten Otteceffc072005-06-23 22:05:25 -0700104 goto out;
Carsten Otteafa597b2005-07-15 03:56:30 -0700105 }
Carsten Otteceffc072005-06-23 22:05:25 -0700106
107 /* If users can be writing to this page using arbitrary
108 * virtual addresses, take care about potential aliasing
109 * before reading the page on the kernel side.
110 */
111 if (mapping_writably_mapped(mapping))
Nick Piggin70688e42008-04-28 02:13:02 -0700112 /* address based flush */ ;
Carsten Otteceffc072005-06-23 22:05:25 -0700113
114 /*
Nick Piggin70688e42008-04-28 02:13:02 -0700115 * Ok, we have the mem, so now we can copy it to user space...
Carsten Otteceffc072005-06-23 22:05:25 -0700116 *
117 * The actor routine returns how many bytes were actually used..
118 * NOTE! This may not be the same as how much of a user buffer
119 * we filled up (we may be padding etc), so we can only update
120 * "pos" here (the actor routine has to update the user buffer
121 * pointers and the remaining count).
122 */
Nick Piggin70688e42008-04-28 02:13:02 -0700123 if (!zero)
124 left = __copy_to_user(buf+copied, xip_mem+offset, nr);
125 else
126 left = __clear_user(buf + copied, nr);
127
128 if (left) {
129 error = -EFAULT;
130 goto out;
131 }
132
133 copied += (nr - left);
134 offset += (nr - left);
Carsten Otteceffc072005-06-23 22:05:25 -0700135 index += offset >> PAGE_CACHE_SHIFT;
136 offset &= ~PAGE_CACHE_MASK;
Nick Piggin70688e42008-04-28 02:13:02 -0700137 } while (copied < len);
Carsten Otteceffc072005-06-23 22:05:25 -0700138
139out:
Nick Piggin70688e42008-04-28 02:13:02 -0700140 *ppos = pos + copied;
Carsten Otteceffc072005-06-23 22:05:25 -0700141 if (filp)
142 file_accessed(filp);
Nick Piggin70688e42008-04-28 02:13:02 -0700143
144 return (copied ? copied : error);
Carsten Otteceffc072005-06-23 22:05:25 -0700145}
146
Carsten Otteceffc072005-06-23 22:05:25 -0700147ssize_t
Carsten Otteeb6fe0c2005-06-23 22:05:28 -0700148xip_file_read(struct file *filp, char __user *buf, size_t len, loff_t *ppos)
Carsten Otteceffc072005-06-23 22:05:25 -0700149{
Carsten Otteeb6fe0c2005-06-23 22:05:28 -0700150 if (!access_ok(VERIFY_WRITE, buf, len))
151 return -EFAULT;
152
Nick Piggin70688e42008-04-28 02:13:02 -0700153 return do_xip_mapping_read(filp->f_mapping, &filp->f_ra, filp,
154 buf, len, ppos);
Carsten Otteceffc072005-06-23 22:05:25 -0700155}
Carsten Otteeb6fe0c2005-06-23 22:05:28 -0700156EXPORT_SYMBOL_GPL(xip_file_read);
Carsten Otteceffc072005-06-23 22:05:25 -0700157
Carsten Otteceffc072005-06-23 22:05:25 -0700158/*
Davidlohr Bueso874bfca2014-12-12 16:54:33 -0800159 * __xip_unmap is invoked from xip_unmap and xip_write
Carsten Otteceffc072005-06-23 22:05:25 -0700160 *
161 * This function walks all vmas of the address_space and unmaps the
Carsten Ottea76c0b92007-03-29 01:20:39 -0700162 * __xip_sparse_page when found at pgoff.
Carsten Otteceffc072005-06-23 22:05:25 -0700163 */
Davidlohr Bueso874bfca2014-12-12 16:54:33 -0800164static void __xip_unmap(struct address_space * mapping, unsigned long pgoff)
Carsten Otteceffc072005-06-23 22:05:25 -0700165{
166 struct vm_area_struct *vma;
Hugh Dickins67b02f12005-10-29 18:16:31 -0700167 struct page *page;
Nick Piggin538f8ea62008-08-20 14:09:20 -0700168 unsigned count;
169 int locked = 0;
170
171 count = read_seqcount_begin(&xip_sparse_seq);
Carsten Otteceffc072005-06-23 22:05:25 -0700172
Carsten Ottea76c0b92007-03-29 01:20:39 -0700173 page = __xip_sparse_page;
174 if (!page)
175 return;
176
Nick Piggin538f8ea62008-08-20 14:09:20 -0700177retry:
Davidlohr Bueso874bfca2014-12-12 16:54:33 -0800178 i_mmap_lock_read(mapping);
Michel Lespinasse6b2dbba2012-10-08 16:31:25 -0700179 vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff) {
Davidlohr Bueso874bfca2014-12-12 16:54:33 -0800180 pte_t *pte, pteval;
181 spinlock_t *ptl;
182 struct mm_struct *mm = vma->vm_mm;
183 unsigned long address = vma->vm_start +
Carsten Otteceffc072005-06-23 22:05:25 -0700184 ((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
Davidlohr Bueso874bfca2014-12-12 16:54:33 -0800185
Carsten Otteceffc072005-06-23 22:05:25 -0700186 BUG_ON(address < vma->vm_start || address >= vma->vm_end);
Nick Piggin479db0b2008-08-20 14:09:18 -0700187 pte = page_check_address(page, mm, address, &ptl, 1);
Hugh Dickinsc0718802005-10-29 18:16:31 -0700188 if (pte) {
Carsten Otteceffc072005-06-23 22:05:25 -0700189 /* Nuke the page table entry. */
Geert Uytterhoeven082ff0a2005-07-12 13:58:18 -0700190 flush_cache_page(vma, address, pte_pfn(*pte));
Sagi Grimberg2ec74c32012-10-08 16:33:33 -0700191 pteval = ptep_clear_flush(vma, address, pte);
Hugh Dickinsedc315f2009-01-06 14:40:11 -0800192 page_remove_rmap(page);
KAMEZAWA Hiroyukid559db02010-03-05 13:41:39 -0800193 dec_mm_counter(mm, MM_FILEPAGES);
Carsten Otteceffc072005-06-23 22:05:25 -0700194 BUG_ON(pte_dirty(pteval));
Hugh Dickinsc0718802005-10-29 18:16:31 -0700195 pte_unmap_unlock(pte, ptl);
Sagi Grimberg2ec74c32012-10-08 16:33:33 -0700196 /* must invalidate_page _before_ freeing the page */
197 mmu_notifier_invalidate_page(mm, address);
Nick Pigginb5810032005-10-29 18:16:12 -0700198 page_cache_release(page);
Carsten Otteceffc072005-06-23 22:05:25 -0700199 }
200 }
Davidlohr Bueso874bfca2014-12-12 16:54:33 -0800201 i_mmap_unlock_read(mapping);
Nick Piggin538f8ea62008-08-20 14:09:20 -0700202
203 if (locked) {
204 mutex_unlock(&xip_sparse_mutex);
205 } else if (read_seqcount_retry(&xip_sparse_seq, count)) {
206 mutex_lock(&xip_sparse_mutex);
207 locked = 1;
208 goto retry;
209 }
Carsten Otteceffc072005-06-23 22:05:25 -0700210}
211
212/*
Nick Piggin54cb8822007-07-19 01:46:59 -0700213 * xip_fault() is invoked via the vma operations vector for a
Carsten Otteceffc072005-06-23 22:05:25 -0700214 * mapped memory region to read in file data during a page fault.
215 *
Nick Piggin54cb8822007-07-19 01:46:59 -0700216 * This function is derived from filemap_fault, but used for execute in place
Carsten Otteceffc072005-06-23 22:05:25 -0700217 */
Nick Piggin70688e42008-04-28 02:13:02 -0700218static int xip_file_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
Carsten Otteceffc072005-06-23 22:05:25 -0700219{
Nick Piggin70688e42008-04-28 02:13:02 -0700220 struct file *file = vma->vm_file;
Carsten Otteceffc072005-06-23 22:05:25 -0700221 struct address_space *mapping = file->f_mapping;
222 struct inode *inode = mapping->host;
Nick Piggin54cb8822007-07-19 01:46:59 -0700223 pgoff_t size;
Nick Piggin70688e42008-04-28 02:13:02 -0700224 void *xip_mem;
225 unsigned long xip_pfn;
226 struct page *page;
227 int error;
Carsten Otteceffc072005-06-23 22:05:25 -0700228
Nick Piggin54cb8822007-07-19 01:46:59 -0700229 /* XXX: are VM_FAULT_ codes OK? */
Nick Piggin538f8ea62008-08-20 14:09:20 -0700230again:
Carsten Otteceffc072005-06-23 22:05:25 -0700231 size = (i_size_read(inode) + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
Nick Piggind0217ac2007-07-19 01:47:03 -0700232 if (vmf->pgoff >= size)
233 return VM_FAULT_SIGBUS;
Carsten Otteceffc072005-06-23 22:05:25 -0700234
Nick Piggin70688e42008-04-28 02:13:02 -0700235 error = mapping->a_ops->get_xip_mem(mapping, vmf->pgoff, 0,
236 &xip_mem, &xip_pfn);
237 if (likely(!error))
238 goto found;
239 if (error != -ENODATA)
Nick Piggind0217ac2007-07-19 01:47:03 -0700240 return VM_FAULT_OOM;
Carsten Otteceffc072005-06-23 22:05:25 -0700241
242 /* sparse block */
Nick Piggin70688e42008-04-28 02:13:02 -0700243 if ((vma->vm_flags & (VM_WRITE | VM_MAYWRITE)) &&
244 (vma->vm_flags & (VM_SHARED | VM_MAYSHARE)) &&
Carsten Otteceffc072005-06-23 22:05:25 -0700245 (!(mapping->host->i_sb->s_flags & MS_RDONLY))) {
Nick Piggin70688e42008-04-28 02:13:02 -0700246 int err;
247
Carsten Otteceffc072005-06-23 22:05:25 -0700248 /* maybe shared writable, allocate new block */
Nick Piggin14bac5a2008-08-20 14:09:20 -0700249 mutex_lock(&xip_sparse_mutex);
Nick Piggin70688e42008-04-28 02:13:02 -0700250 error = mapping->a_ops->get_xip_mem(mapping, vmf->pgoff, 1,
251 &xip_mem, &xip_pfn);
Nick Piggin14bac5a2008-08-20 14:09:20 -0700252 mutex_unlock(&xip_sparse_mutex);
Nick Piggin70688e42008-04-28 02:13:02 -0700253 if (error)
Nick Piggind0217ac2007-07-19 01:47:03 -0700254 return VM_FAULT_SIGBUS;
Nick Piggin70688e42008-04-28 02:13:02 -0700255 /* unmap sparse mappings at pgoff from all other vmas */
Nick Piggind0217ac2007-07-19 01:47:03 -0700256 __xip_unmap(mapping, vmf->pgoff);
Nick Piggin70688e42008-04-28 02:13:02 -0700257
258found:
259 err = vm_insert_mixed(vma, (unsigned long)vmf->virtual_address,
260 xip_pfn);
261 if (err == -ENOMEM)
262 return VM_FAULT_OOM;
Carsten Otte99f02ef2012-02-03 15:37:14 -0800263 /*
264 * err == -EBUSY is fine, we've raced against another thread
265 * that faulted-in the same page
266 */
267 if (err != -EBUSY)
268 BUG_ON(err);
Nick Piggin70688e42008-04-28 02:13:02 -0700269 return VM_FAULT_NOPAGE;
Carsten Otteceffc072005-06-23 22:05:25 -0700270 } else {
Nick Piggin538f8ea62008-08-20 14:09:20 -0700271 int err, ret = VM_FAULT_OOM;
272
273 mutex_lock(&xip_sparse_mutex);
274 write_seqcount_begin(&xip_sparse_seq);
275 error = mapping->a_ops->get_xip_mem(mapping, vmf->pgoff, 0,
276 &xip_mem, &xip_pfn);
277 if (unlikely(!error)) {
278 write_seqcount_end(&xip_sparse_seq);
279 mutex_unlock(&xip_sparse_mutex);
280 goto again;
281 }
282 if (error != -ENODATA)
283 goto out;
Carsten Ottea76c0b92007-03-29 01:20:39 -0700284 /* not shared and writable, use xip_sparse_page() */
285 page = xip_sparse_page();
Nick Piggind0217ac2007-07-19 01:47:03 -0700286 if (!page)
Nick Piggin538f8ea62008-08-20 14:09:20 -0700287 goto out;
288 err = vm_insert_page(vma, (unsigned long)vmf->virtual_address,
289 page);
290 if (err == -ENOMEM)
291 goto out;
Carsten Otteceffc072005-06-23 22:05:25 -0700292
Nick Piggin538f8ea62008-08-20 14:09:20 -0700293 ret = VM_FAULT_NOPAGE;
294out:
295 write_seqcount_end(&xip_sparse_seq);
296 mutex_unlock(&xip_sparse_mutex);
297
298 return ret;
Nick Piggin70688e42008-04-28 02:13:02 -0700299 }
Carsten Otteceffc072005-06-23 22:05:25 -0700300}
301
Alexey Dobriyanf0f37e22009-09-27 22:29:37 +0400302static const struct vm_operations_struct xip_file_vm_ops = {
Nick Piggin54cb8822007-07-19 01:46:59 -0700303 .fault = xip_file_fault,
Jan Kara4fcf1c62012-06-12 16:20:29 +0200304 .page_mkwrite = filemap_page_mkwrite,
Konstantin Khlebnikov0b173bc2012-10-08 16:28:46 -0700305 .remap_pages = generic_file_remap_pages,
Carsten Otteceffc072005-06-23 22:05:25 -0700306};
307
308int xip_file_mmap(struct file * file, struct vm_area_struct * vma)
309{
Nick Piggin70688e42008-04-28 02:13:02 -0700310 BUG_ON(!file->f_mapping->a_ops->get_xip_mem);
Carsten Otteceffc072005-06-23 22:05:25 -0700311
312 file_accessed(file);
313 vma->vm_ops = &xip_file_vm_ops;
Konstantin Khlebnikov0b173bc2012-10-08 16:28:46 -0700314 vma->vm_flags |= VM_MIXEDMAP;
Carsten Otteceffc072005-06-23 22:05:25 -0700315 return 0;
316}
317EXPORT_SYMBOL_GPL(xip_file_mmap);
318
319static ssize_t
Carsten Otteeb6fe0c2005-06-23 22:05:28 -0700320__xip_file_write(struct file *filp, const char __user *buf,
321 size_t count, loff_t pos, loff_t *ppos)
Carsten Otteceffc072005-06-23 22:05:25 -0700322{
Carsten Otteeb6fe0c2005-06-23 22:05:28 -0700323 struct address_space * mapping = filp->f_mapping;
Christoph Hellwigf5e54d62006-06-28 04:26:44 -0700324 const struct address_space_operations *a_ops = mapping->a_ops;
Carsten Otteceffc072005-06-23 22:05:25 -0700325 struct inode *inode = mapping->host;
326 long status = 0;
Carsten Otteceffc072005-06-23 22:05:25 -0700327 size_t bytes;
Carsten Otteceffc072005-06-23 22:05:25 -0700328 ssize_t written = 0;
329
Nick Piggin70688e42008-04-28 02:13:02 -0700330 BUG_ON(!mapping->a_ops->get_xip_mem);
Carsten Otteceffc072005-06-23 22:05:25 -0700331
Carsten Otteceffc072005-06-23 22:05:25 -0700332 do {
333 unsigned long index;
334 unsigned long offset;
335 size_t copied;
Nick Piggin70688e42008-04-28 02:13:02 -0700336 void *xip_mem;
337 unsigned long xip_pfn;
Carsten Otteceffc072005-06-23 22:05:25 -0700338
339 offset = (pos & (PAGE_CACHE_SIZE -1)); /* Within page */
340 index = pos >> PAGE_CACHE_SHIFT;
341 bytes = PAGE_CACHE_SIZE - offset;
342 if (bytes > count)
343 bytes = count;
344
Nick Piggin70688e42008-04-28 02:13:02 -0700345 status = a_ops->get_xip_mem(mapping, index, 0,
346 &xip_mem, &xip_pfn);
347 if (status == -ENODATA) {
Carsten Otteceffc072005-06-23 22:05:25 -0700348 /* we allocate a new page unmap it */
Nick Piggin14bac5a2008-08-20 14:09:20 -0700349 mutex_lock(&xip_sparse_mutex);
Nick Piggin70688e42008-04-28 02:13:02 -0700350 status = a_ops->get_xip_mem(mapping, index, 1,
351 &xip_mem, &xip_pfn);
Nick Piggin14bac5a2008-08-20 14:09:20 -0700352 mutex_unlock(&xip_sparse_mutex);
Nick Piggin70688e42008-04-28 02:13:02 -0700353 if (!status)
Carsten Otteeb6fe0c2005-06-23 22:05:28 -0700354 /* unmap page at pgoff from all other vmas */
355 __xip_unmap(mapping, index);
Carsten Otteceffc072005-06-23 22:05:25 -0700356 }
357
Nick Piggin70688e42008-04-28 02:13:02 -0700358 if (status)
Carsten Otteceffc072005-06-23 22:05:25 -0700359 break;
Carsten Otteceffc072005-06-23 22:05:25 -0700360
Nick Piggin4a9e5ef2007-10-16 01:24:58 -0700361 copied = bytes -
Nick Piggin70688e42008-04-28 02:13:02 -0700362 __copy_from_user_nocache(xip_mem + offset, buf, bytes);
Nick Piggin4a9e5ef2007-10-16 01:24:58 -0700363
Carsten Otteceffc072005-06-23 22:05:25 -0700364 if (likely(copied > 0)) {
365 status = copied;
366
367 if (status >= 0) {
368 written += status;
369 count -= status;
370 pos += status;
371 buf += status;
Carsten Otteceffc072005-06-23 22:05:25 -0700372 }
373 }
374 if (unlikely(copied != bytes))
375 if (status >= 0)
376 status = -EFAULT;
377 if (status < 0)
378 break;
379 } while (count);
380 *ppos = pos;
381 /*
382 * No need to use i_size_read() here, the i_size
Jes Sorensen1b1dcc12006-01-09 15:59:24 -0800383 * cannot change under us because we hold i_mutex.
Carsten Otteceffc072005-06-23 22:05:25 -0700384 */
385 if (pos > inode->i_size) {
386 i_size_write(inode, pos);
387 mark_inode_dirty(inode);
388 }
389
390 return written ? written : status;
391}
392
Carsten Otteeb6fe0c2005-06-23 22:05:28 -0700393ssize_t
394xip_file_write(struct file *filp, const char __user *buf, size_t len,
395 loff_t *ppos)
Carsten Otteceffc072005-06-23 22:05:25 -0700396{
Carsten Otteeb6fe0c2005-06-23 22:05:28 -0700397 struct address_space *mapping = filp->f_mapping;
398 struct inode *inode = mapping->host;
399 size_t count;
400 loff_t pos;
401 ssize_t ret;
Carsten Otteceffc072005-06-23 22:05:25 -0700402
Jes Sorensen1b1dcc12006-01-09 15:59:24 -0800403 mutex_lock(&inode->i_mutex);
Carsten Otteceffc072005-06-23 22:05:25 -0700404
Carsten Otteeb6fe0c2005-06-23 22:05:28 -0700405 if (!access_ok(VERIFY_READ, buf, len)) {
406 ret=-EFAULT;
407 goto out_up;
Carsten Otteceffc072005-06-23 22:05:25 -0700408 }
409
Carsten Otteceffc072005-06-23 22:05:25 -0700410 pos = *ppos;
Carsten Otteeb6fe0c2005-06-23 22:05:28 -0700411 count = len;
Carsten Otteceffc072005-06-23 22:05:25 -0700412
Carsten Otteeb6fe0c2005-06-23 22:05:28 -0700413 /* We can write back this queue in page reclaim */
Christoph Hellwigde1414a2015-01-14 10:42:36 +0100414 current->backing_dev_info = inode_to_bdi(inode);
Carsten Otteceffc072005-06-23 22:05:25 -0700415
Carsten Otteeb6fe0c2005-06-23 22:05:28 -0700416 ret = generic_write_checks(filp, &pos, &count, S_ISBLK(inode->i_mode));
417 if (ret)
418 goto out_backing;
Carsten Otteceffc072005-06-23 22:05:25 -0700419 if (count == 0)
Carsten Otteeb6fe0c2005-06-23 22:05:28 -0700420 goto out_backing;
Carsten Otteceffc072005-06-23 22:05:25 -0700421
Miklos Szeredi2f1936b2008-06-24 16:50:14 +0200422 ret = file_remove_suid(filp);
Carsten Otteeb6fe0c2005-06-23 22:05:28 -0700423 if (ret)
424 goto out_backing;
Carsten Otteceffc072005-06-23 22:05:25 -0700425
Josef Bacikc3b2da32012-03-26 09:59:21 -0400426 ret = file_update_time(filp);
427 if (ret)
428 goto out_backing;
Carsten Otteceffc072005-06-23 22:05:25 -0700429
Carsten Otteeb6fe0c2005-06-23 22:05:28 -0700430 ret = __xip_file_write (filp, buf, count, pos, ppos);
Carsten Otteceffc072005-06-23 22:05:25 -0700431
Carsten Otteeb6fe0c2005-06-23 22:05:28 -0700432 out_backing:
433 current->backing_dev_info = NULL;
434 out_up:
Jes Sorensen1b1dcc12006-01-09 15:59:24 -0800435 mutex_unlock(&inode->i_mutex);
Carsten Otteceffc072005-06-23 22:05:25 -0700436 return ret;
437}
Carsten Otteeb6fe0c2005-06-23 22:05:28 -0700438EXPORT_SYMBOL_GPL(xip_file_write);
Carsten Otteceffc072005-06-23 22:05:25 -0700439
440/*
441 * truncate a page used for execute in place
Nick Piggin70688e42008-04-28 02:13:02 -0700442 * functionality is analog to block_truncate_page but does use get_xip_mem
Carsten Otteceffc072005-06-23 22:05:25 -0700443 * to get the page instead of page cache
444 */
445int
446xip_truncate_page(struct address_space *mapping, loff_t from)
447{
448 pgoff_t index = from >> PAGE_CACHE_SHIFT;
449 unsigned offset = from & (PAGE_CACHE_SIZE-1);
450 unsigned blocksize;
451 unsigned length;
Nick Piggin70688e42008-04-28 02:13:02 -0700452 void *xip_mem;
453 unsigned long xip_pfn;
454 int err;
Carsten Otteceffc072005-06-23 22:05:25 -0700455
Nick Piggin70688e42008-04-28 02:13:02 -0700456 BUG_ON(!mapping->a_ops->get_xip_mem);
Carsten Otteceffc072005-06-23 22:05:25 -0700457
458 blocksize = 1 << mapping->host->i_blkbits;
459 length = offset & (blocksize - 1);
460
461 /* Block boundary? Nothing to do */
462 if (!length)
463 return 0;
464
465 length = blocksize - length;
466
Nick Piggin70688e42008-04-28 02:13:02 -0700467 err = mapping->a_ops->get_xip_mem(mapping, index, 0,
468 &xip_mem, &xip_pfn);
469 if (unlikely(err)) {
470 if (err == -ENODATA)
Carsten Otteceffc072005-06-23 22:05:25 -0700471 /* Hole? No need to truncate */
472 return 0;
Carsten Otteeb6fe0c2005-06-23 22:05:28 -0700473 else
Nick Piggin70688e42008-04-28 02:13:02 -0700474 return err;
Carsten Otteafa597b2005-07-15 03:56:30 -0700475 }
Nick Piggin70688e42008-04-28 02:13:02 -0700476 memset(xip_mem + offset, 0, length);
Carsten Otteeb6fe0c2005-06-23 22:05:28 -0700477 return 0;
Carsten Otteceffc072005-06-23 22:05:25 -0700478}
479EXPORT_SYMBOL_GPL(xip_truncate_page);