blob: c2934f8701daca59e8b9f04c5f8d7869d176e396 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * fs/cifs/file.c
3 *
4 * vfs operations that deal with files
Steve Frenchfb8c4b12007-07-10 01:16:18 +00005 *
Steve Frenchf19159d2010-04-21 04:12:10 +00006 * Copyright (C) International Business Machines Corp., 2002,2010
Linus Torvalds1da177e2005-04-16 15:20:36 -07007 * Author(s): Steve French (sfrench@us.ibm.com)
Jeremy Allison7ee1af72006-08-02 21:56:33 +00008 * Jeremy Allison (jra@samba.org)
Linus Torvalds1da177e2005-04-16 15:20:36 -07009 *
10 * This library is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU Lesser General Public License as published
12 * by the Free Software Foundation; either version 2.1 of the License, or
13 * (at your option) any later version.
14 *
15 * This library is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
18 * the GNU Lesser General Public License for more details.
19 *
20 * You should have received a copy of the GNU Lesser General Public License
21 * along with this library; if not, write to the Free Software
22 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
23 */
24#include <linux/fs.h>
Steve French37c0eb42005-10-05 14:50:29 -070025#include <linux/backing-dev.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070026#include <linux/stat.h>
27#include <linux/fcntl.h>
28#include <linux/pagemap.h>
29#include <linux/pagevec.h>
Steve French37c0eb42005-10-05 14:50:29 -070030#include <linux/writeback.h>
Andrew Morton6f88cc22006-12-10 02:19:44 -080031#include <linux/task_io_accounting_ops.h>
Steve French23e7dd72005-10-20 13:44:56 -070032#include <linux/delay.h>
Jeff Layton3bc303c2009-09-21 06:47:50 -040033#include <linux/mount.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090034#include <linux/slab.h>
Jeff Layton690c5e32011-10-19 15:30:16 -040035#include <linux/swap.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070036#include <asm/div64.h>
37#include "cifsfs.h"
38#include "cifspdu.h"
39#include "cifsglob.h"
40#include "cifsproto.h"
41#include "cifs_unicode.h"
42#include "cifs_debug.h"
43#include "cifs_fs_sb.h"
Suresh Jayaraman9451a9a2010-07-05 18:12:45 +053044#include "fscache.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070045
Steve French07b92d02013-02-18 10:34:26 -060046
Linus Torvalds1da177e2005-04-16 15:20:36 -070047static inline int cifs_convert_flags(unsigned int flags)
48{
49 if ((flags & O_ACCMODE) == O_RDONLY)
50 return GENERIC_READ;
51 else if ((flags & O_ACCMODE) == O_WRONLY)
52 return GENERIC_WRITE;
53 else if ((flags & O_ACCMODE) == O_RDWR) {
54 /* GENERIC_ALL is too much permission to request
55 can cause unnecessary access denied on create */
56 /* return GENERIC_ALL; */
57 return (GENERIC_READ | GENERIC_WRITE);
58 }
59
Jeff Laytone10f7b52008-05-14 10:21:33 -070060 return (READ_CONTROL | FILE_WRITE_ATTRIBUTES | FILE_READ_ATTRIBUTES |
61 FILE_WRITE_EA | FILE_APPEND_DATA | FILE_WRITE_DATA |
62 FILE_READ_DATA);
Steve French7fc8f4e2009-02-23 20:43:11 +000063}
Jeff Laytone10f7b52008-05-14 10:21:33 -070064
Jeff Layton608712f2010-10-15 15:33:56 -040065static u32 cifs_posix_convert_flags(unsigned int flags)
Steve French7fc8f4e2009-02-23 20:43:11 +000066{
Jeff Layton608712f2010-10-15 15:33:56 -040067 u32 posix_flags = 0;
Jeff Laytone10f7b52008-05-14 10:21:33 -070068
Steve French7fc8f4e2009-02-23 20:43:11 +000069 if ((flags & O_ACCMODE) == O_RDONLY)
Jeff Layton608712f2010-10-15 15:33:56 -040070 posix_flags = SMB_O_RDONLY;
Steve French7fc8f4e2009-02-23 20:43:11 +000071 else if ((flags & O_ACCMODE) == O_WRONLY)
Jeff Layton608712f2010-10-15 15:33:56 -040072 posix_flags = SMB_O_WRONLY;
73 else if ((flags & O_ACCMODE) == O_RDWR)
74 posix_flags = SMB_O_RDWR;
75
Steve French07b92d02013-02-18 10:34:26 -060076 if (flags & O_CREAT) {
Jeff Layton608712f2010-10-15 15:33:56 -040077 posix_flags |= SMB_O_CREAT;
Steve French07b92d02013-02-18 10:34:26 -060078 if (flags & O_EXCL)
79 posix_flags |= SMB_O_EXCL;
80 } else if (flags & O_EXCL)
Joe Perchesf96637b2013-05-04 22:12:25 -050081 cifs_dbg(FYI, "Application %s pid %d has incorrectly set O_EXCL flag but not O_CREAT on file open. Ignoring O_EXCL\n",
82 current->comm, current->tgid);
Steve French07b92d02013-02-18 10:34:26 -060083
Jeff Layton608712f2010-10-15 15:33:56 -040084 if (flags & O_TRUNC)
85 posix_flags |= SMB_O_TRUNC;
86 /* be safe and imply O_SYNC for O_DSYNC */
Christoph Hellwig6b2f3d12009-10-27 11:05:28 +010087 if (flags & O_DSYNC)
Jeff Layton608712f2010-10-15 15:33:56 -040088 posix_flags |= SMB_O_SYNC;
Steve French7fc8f4e2009-02-23 20:43:11 +000089 if (flags & O_DIRECTORY)
Jeff Layton608712f2010-10-15 15:33:56 -040090 posix_flags |= SMB_O_DIRECTORY;
Steve French7fc8f4e2009-02-23 20:43:11 +000091 if (flags & O_NOFOLLOW)
Jeff Layton608712f2010-10-15 15:33:56 -040092 posix_flags |= SMB_O_NOFOLLOW;
Steve French7fc8f4e2009-02-23 20:43:11 +000093 if (flags & O_DIRECT)
Jeff Layton608712f2010-10-15 15:33:56 -040094 posix_flags |= SMB_O_DIRECT;
Steve French7fc8f4e2009-02-23 20:43:11 +000095
96 return posix_flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -070097}
98
99static inline int cifs_get_disposition(unsigned int flags)
100{
101 if ((flags & (O_CREAT | O_EXCL)) == (O_CREAT | O_EXCL))
102 return FILE_CREATE;
103 else if ((flags & (O_CREAT | O_TRUNC)) == (O_CREAT | O_TRUNC))
104 return FILE_OVERWRITE_IF;
105 else if ((flags & O_CREAT) == O_CREAT)
106 return FILE_OPEN_IF;
Steve French55aa2e02006-05-30 18:09:31 +0000107 else if ((flags & O_TRUNC) == O_TRUNC)
108 return FILE_OVERWRITE;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700109 else
110 return FILE_OPEN;
111}
112
Jeff Layton608712f2010-10-15 15:33:56 -0400113int cifs_posix_open(char *full_path, struct inode **pinode,
114 struct super_block *sb, int mode, unsigned int f_flags,
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400115 __u32 *poplock, __u16 *pnetfid, unsigned int xid)
Jeff Layton608712f2010-10-15 15:33:56 -0400116{
117 int rc;
118 FILE_UNIX_BASIC_INFO *presp_data;
119 __u32 posix_flags = 0;
120 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
121 struct cifs_fattr fattr;
122 struct tcon_link *tlink;
Steve French96daf2b2011-05-27 04:34:02 +0000123 struct cifs_tcon *tcon;
Jeff Layton608712f2010-10-15 15:33:56 -0400124
Joe Perchesf96637b2013-05-04 22:12:25 -0500125 cifs_dbg(FYI, "posix open %s\n", full_path);
Jeff Layton608712f2010-10-15 15:33:56 -0400126
127 presp_data = kzalloc(sizeof(FILE_UNIX_BASIC_INFO), GFP_KERNEL);
128 if (presp_data == NULL)
129 return -ENOMEM;
130
131 tlink = cifs_sb_tlink(cifs_sb);
132 if (IS_ERR(tlink)) {
133 rc = PTR_ERR(tlink);
134 goto posix_open_ret;
135 }
136
137 tcon = tlink_tcon(tlink);
138 mode &= ~current_umask();
139
140 posix_flags = cifs_posix_convert_flags(f_flags);
141 rc = CIFSPOSIXCreate(xid, tcon, posix_flags, mode, pnetfid, presp_data,
142 poplock, full_path, cifs_sb->local_nls,
143 cifs_sb->mnt_cifs_flags &
144 CIFS_MOUNT_MAP_SPECIAL_CHR);
145 cifs_put_tlink(tlink);
146
147 if (rc)
148 goto posix_open_ret;
149
150 if (presp_data->Type == cpu_to_le32(-1))
151 goto posix_open_ret; /* open ok, caller does qpathinfo */
152
153 if (!pinode)
154 goto posix_open_ret; /* caller does not need info */
155
156 cifs_unix_basic_to_fattr(&fattr, presp_data, cifs_sb);
157
158 /* get new inode and set it up */
159 if (*pinode == NULL) {
160 cifs_fill_uniqueid(sb, &fattr);
161 *pinode = cifs_iget(sb, &fattr);
162 if (!*pinode) {
163 rc = -ENOMEM;
164 goto posix_open_ret;
165 }
166 } else {
167 cifs_fattr_to_inode(*pinode, &fattr);
168 }
169
170posix_open_ret:
171 kfree(presp_data);
172 return rc;
173}
174
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300175static int
176cifs_nt_open(char *full_path, struct inode *inode, struct cifs_sb_info *cifs_sb,
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700177 struct cifs_tcon *tcon, unsigned int f_flags, __u32 *oplock,
178 struct cifs_fid *fid, unsigned int xid)
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300179{
180 int rc;
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700181 int desired_access;
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300182 int disposition;
Shirish Pargaonkar3d3ea8e2011-09-26 09:56:44 -0500183 int create_options = CREATE_NOT_DIR;
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300184 FILE_ALL_INFO *buf;
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700185 struct TCP_Server_Info *server = tcon->ses->server;
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300186
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700187 if (!server->ops->open)
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700188 return -ENOSYS;
189
190 desired_access = cifs_convert_flags(f_flags);
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300191
192/*********************************************************************
193 * open flag mapping table:
194 *
195 * POSIX Flag CIFS Disposition
196 * ---------- ----------------
197 * O_CREAT FILE_OPEN_IF
198 * O_CREAT | O_EXCL FILE_CREATE
199 * O_CREAT | O_TRUNC FILE_OVERWRITE_IF
200 * O_TRUNC FILE_OVERWRITE
201 * none of the above FILE_OPEN
202 *
203 * Note that there is not a direct match between disposition
204 * FILE_SUPERSEDE (ie create whether or not file exists although
205 * O_CREAT | O_TRUNC is similar but truncates the existing
206 * file rather than creating a new file as FILE_SUPERSEDE does
207 * (which uses the attributes / metadata passed in on open call)
208 *?
209 *? O_SYNC is a reasonable match to CIFS writethrough flag
210 *? and the read write flags match reasonably. O_LARGEFILE
211 *? is irrelevant because largefile support is always used
212 *? by this client. Flags O_APPEND, O_DIRECT, O_DIRECTORY,
213 * O_FASYNC, O_NOFOLLOW, O_NONBLOCK need further investigation
214 *********************************************************************/
215
216 disposition = cifs_get_disposition(f_flags);
217
218 /* BB pass O_SYNC flag through on file attributes .. BB */
219
220 buf = kmalloc(sizeof(FILE_ALL_INFO), GFP_KERNEL);
221 if (!buf)
222 return -ENOMEM;
223
Shirish Pargaonkar3d3ea8e2011-09-26 09:56:44 -0500224 if (backup_cred(cifs_sb))
225 create_options |= CREATE_OPEN_BACKUP_INTENT;
226
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700227 rc = server->ops->open(xid, tcon, full_path, disposition,
228 desired_access, create_options, fid, oplock, buf,
229 cifs_sb);
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300230
231 if (rc)
232 goto out;
233
234 if (tcon->unix_ext)
235 rc = cifs_get_inode_info_unix(&inode, full_path, inode->i_sb,
236 xid);
237 else
238 rc = cifs_get_inode_info(&inode, full_path, buf, inode->i_sb,
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700239 xid, &fid->netfid);
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300240
241out:
242 kfree(buf);
243 return rc;
244}
245
Pavel Shilovsky63b7d3a2012-12-24 14:41:19 +0400246static bool
247cifs_has_mand_locks(struct cifsInodeInfo *cinode)
248{
249 struct cifs_fid_locks *cur;
250 bool has_locks = false;
251
252 down_read(&cinode->lock_sem);
253 list_for_each_entry(cur, &cinode->llist, llist) {
254 if (!list_empty(&cur->locks)) {
255 has_locks = true;
256 break;
257 }
258 }
259 up_read(&cinode->lock_sem);
260 return has_locks;
261}
262
Jeff Layton15ecb432010-10-15 15:34:02 -0400263struct cifsFileInfo *
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700264cifs_new_fileinfo(struct cifs_fid *fid, struct file *file,
Jeff Layton15ecb432010-10-15 15:34:02 -0400265 struct tcon_link *tlink, __u32 oplock)
266{
267 struct dentry *dentry = file->f_path.dentry;
268 struct inode *inode = dentry->d_inode;
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700269 struct cifsInodeInfo *cinode = CIFS_I(inode);
270 struct cifsFileInfo *cfile;
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700271 struct cifs_fid_locks *fdlocks;
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700272 struct cifs_tcon *tcon = tlink_tcon(tlink);
Pavel Shilovsky63b7d3a2012-12-24 14:41:19 +0400273 struct TCP_Server_Info *server = tcon->ses->server;
Jeff Layton15ecb432010-10-15 15:34:02 -0400274
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700275 cfile = kzalloc(sizeof(struct cifsFileInfo), GFP_KERNEL);
276 if (cfile == NULL)
277 return cfile;
Jeff Layton15ecb432010-10-15 15:34:02 -0400278
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700279 fdlocks = kzalloc(sizeof(struct cifs_fid_locks), GFP_KERNEL);
280 if (!fdlocks) {
281 kfree(cfile);
282 return NULL;
283 }
284
285 INIT_LIST_HEAD(&fdlocks->locks);
286 fdlocks->cfile = cfile;
287 cfile->llist = fdlocks;
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700288 down_write(&cinode->lock_sem);
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700289 list_add(&fdlocks->llist, &cinode->llist);
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700290 up_write(&cinode->lock_sem);
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700291
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700292 cfile->count = 1;
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700293 cfile->pid = current->tgid;
294 cfile->uid = current_fsuid();
295 cfile->dentry = dget(dentry);
296 cfile->f_flags = file->f_flags;
297 cfile->invalidHandle = false;
298 cfile->tlink = cifs_get_tlink(tlink);
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700299 INIT_WORK(&cfile->oplock_break, cifs_oplock_break);
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700300 mutex_init(&cfile->fh_mutex);
Jeff Layton15ecb432010-10-15 15:34:02 -0400301
Mateusz Guzik24261fc2013-03-08 16:30:03 +0100302 cifs_sb_active(inode->i_sb);
303
Pavel Shilovsky63b7d3a2012-12-24 14:41:19 +0400304 /*
305 * If the server returned a read oplock and we have mandatory brlocks,
306 * set oplock level to None.
307 */
308 if (oplock == server->vals->oplock_read &&
309 cifs_has_mand_locks(cinode)) {
Joe Perchesf96637b2013-05-04 22:12:25 -0500310 cifs_dbg(FYI, "Reset oplock val from read to None due to mand locks\n");
Pavel Shilovsky63b7d3a2012-12-24 14:41:19 +0400311 oplock = 0;
312 }
313
Jeff Layton44772882010-10-15 15:34:03 -0400314 spin_lock(&cifs_file_list_lock);
Pavel Shilovsky63b7d3a2012-12-24 14:41:19 +0400315 if (fid->pending_open->oplock != CIFS_OPLOCK_NO_CHANGE && oplock)
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700316 oplock = fid->pending_open->oplock;
317 list_del(&fid->pending_open->olist);
318
Pavel Shilovsky63b7d3a2012-12-24 14:41:19 +0400319 server->ops->set_fid(cfile, fid, oplock);
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700320
321 list_add(&cfile->tlist, &tcon->openFileList);
Jeff Layton15ecb432010-10-15 15:34:02 -0400322 /* if readable file instance put first in list*/
323 if (file->f_mode & FMODE_READ)
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700324 list_add(&cfile->flist, &cinode->openFileList);
Jeff Layton15ecb432010-10-15 15:34:02 -0400325 else
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700326 list_add_tail(&cfile->flist, &cinode->openFileList);
Jeff Layton44772882010-10-15 15:34:03 -0400327 spin_unlock(&cifs_file_list_lock);
Jeff Layton15ecb432010-10-15 15:34:02 -0400328
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700329 file->private_data = cfile;
330 return cfile;
Jeff Layton15ecb432010-10-15 15:34:02 -0400331}
332
Jeff Layton764a1b12012-07-25 14:59:54 -0400333struct cifsFileInfo *
334cifsFileInfo_get(struct cifsFileInfo *cifs_file)
335{
336 spin_lock(&cifs_file_list_lock);
337 cifsFileInfo_get_locked(cifs_file);
338 spin_unlock(&cifs_file_list_lock);
339 return cifs_file;
340}
341
Steve Frenchcdff08e2010-10-21 22:46:14 +0000342/*
343 * Release a reference on the file private data. This may involve closing
Jeff Layton5f6dbc92010-10-15 15:34:06 -0400344 * the filehandle out on the server. Must be called without holding
345 * cifs_file_list_lock.
Steve Frenchcdff08e2010-10-21 22:46:14 +0000346 */
Jeff Laytonb33879a2010-10-15 15:34:04 -0400347void cifsFileInfo_put(struct cifsFileInfo *cifs_file)
348{
Pavel Shilovskye66673e2010-11-02 12:00:42 +0300349 struct inode *inode = cifs_file->dentry->d_inode;
Steve French96daf2b2011-05-27 04:34:02 +0000350 struct cifs_tcon *tcon = tlink_tcon(cifs_file->tlink);
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700351 struct TCP_Server_Info *server = tcon->ses->server;
Pavel Shilovskye66673e2010-11-02 12:00:42 +0300352 struct cifsInodeInfo *cifsi = CIFS_I(inode);
Mateusz Guzik24261fc2013-03-08 16:30:03 +0100353 struct super_block *sb = inode->i_sb;
354 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
Steve Frenchcdff08e2010-10-21 22:46:14 +0000355 struct cifsLockInfo *li, *tmp;
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700356 struct cifs_fid fid;
357 struct cifs_pending_open open;
Steve Frenchcdff08e2010-10-21 22:46:14 +0000358
359 spin_lock(&cifs_file_list_lock);
Jeff Layton5f6dbc92010-10-15 15:34:06 -0400360 if (--cifs_file->count > 0) {
Steve Frenchcdff08e2010-10-21 22:46:14 +0000361 spin_unlock(&cifs_file_list_lock);
362 return;
Jeff Laytonb33879a2010-10-15 15:34:04 -0400363 }
Steve Frenchcdff08e2010-10-21 22:46:14 +0000364
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700365 if (server->ops->get_lease_key)
366 server->ops->get_lease_key(inode, &fid);
367
368 /* store open in pending opens to make sure we don't miss lease break */
369 cifs_add_pending_open_locked(&fid, cifs_file->tlink, &open);
370
Steve Frenchcdff08e2010-10-21 22:46:14 +0000371 /* remove it from the lists */
372 list_del(&cifs_file->flist);
373 list_del(&cifs_file->tlist);
374
375 if (list_empty(&cifsi->openFileList)) {
Joe Perchesf96637b2013-05-04 22:12:25 -0500376 cifs_dbg(FYI, "closing last open instance for inode %p\n",
377 cifs_file->dentry->d_inode);
Pavel Shilovsky25364132012-09-18 16:20:27 -0700378 /*
379 * In strict cache mode we need invalidate mapping on the last
380 * close because it may cause a error when we open this file
381 * again and get at least level II oplock.
382 */
Pavel Shilovsky4f8ba8a2010-11-21 22:36:12 +0300383 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_STRICT_IO)
384 CIFS_I(inode)->invalid_mapping = true;
Pavel Shilovskyc6723622010-11-03 10:58:57 +0300385 cifs_set_oplock_level(cifsi, 0);
Steve Frenchcdff08e2010-10-21 22:46:14 +0000386 }
387 spin_unlock(&cifs_file_list_lock);
388
Jeff Laytonad635942011-07-26 12:20:17 -0400389 cancel_work_sync(&cifs_file->oplock_break);
390
Steve Frenchcdff08e2010-10-21 22:46:14 +0000391 if (!tcon->need_reconnect && !cifs_file->invalidHandle) {
Pavel Shilovsky0ff78a22012-09-18 16:20:26 -0700392 struct TCP_Server_Info *server = tcon->ses->server;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400393 unsigned int xid;
Pavel Shilovsky0ff78a22012-09-18 16:20:26 -0700394
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400395 xid = get_xid();
Pavel Shilovsky0ff78a22012-09-18 16:20:26 -0700396 if (server->ops->close)
Pavel Shilovsky760ad0c2012-09-25 11:00:07 +0400397 server->ops->close(xid, tcon, &cifs_file->fid);
398 _free_xid(xid);
Steve Frenchcdff08e2010-10-21 22:46:14 +0000399 }
400
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700401 cifs_del_pending_open(&open);
402
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700403 /*
404 * Delete any outstanding lock records. We'll lose them when the file
Steve Frenchcdff08e2010-10-21 22:46:14 +0000405 * is closed anyway.
406 */
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700407 down_write(&cifsi->lock_sem);
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700408 list_for_each_entry_safe(li, tmp, &cifs_file->llist->locks, llist) {
Steve Frenchcdff08e2010-10-21 22:46:14 +0000409 list_del(&li->llist);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400410 cifs_del_lock_waiters(li);
Steve Frenchcdff08e2010-10-21 22:46:14 +0000411 kfree(li);
412 }
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700413 list_del(&cifs_file->llist->llist);
414 kfree(cifs_file->llist);
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700415 up_write(&cifsi->lock_sem);
Steve Frenchcdff08e2010-10-21 22:46:14 +0000416
417 cifs_put_tlink(cifs_file->tlink);
418 dput(cifs_file->dentry);
Mateusz Guzik24261fc2013-03-08 16:30:03 +0100419 cifs_sb_deactive(sb);
Steve Frenchcdff08e2010-10-21 22:46:14 +0000420 kfree(cifs_file);
Jeff Laytonb33879a2010-10-15 15:34:04 -0400421}
422
Linus Torvalds1da177e2005-04-16 15:20:36 -0700423int cifs_open(struct inode *inode, struct file *file)
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700424
Linus Torvalds1da177e2005-04-16 15:20:36 -0700425{
426 int rc = -EACCES;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400427 unsigned int xid;
Jeff Layton590a3fe2009-09-12 11:54:28 -0400428 __u32 oplock;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700429 struct cifs_sb_info *cifs_sb;
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700430 struct TCP_Server_Info *server;
Steve French96daf2b2011-05-27 04:34:02 +0000431 struct cifs_tcon *tcon;
Jeff Layton7ffec372010-09-29 19:51:11 -0400432 struct tcon_link *tlink;
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700433 struct cifsFileInfo *cfile = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700434 char *full_path = NULL;
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300435 bool posix_open_ok = false;
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700436 struct cifs_fid fid;
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700437 struct cifs_pending_open open;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700438
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400439 xid = get_xid();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700440
441 cifs_sb = CIFS_SB(inode->i_sb);
Jeff Layton7ffec372010-09-29 19:51:11 -0400442 tlink = cifs_sb_tlink(cifs_sb);
443 if (IS_ERR(tlink)) {
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400444 free_xid(xid);
Jeff Layton7ffec372010-09-29 19:51:11 -0400445 return PTR_ERR(tlink);
446 }
447 tcon = tlink_tcon(tlink);
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700448 server = tcon->ses->server;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700449
Josef "Jeff" Sipeke6a00292006-12-08 02:36:48 -0800450 full_path = build_path_from_dentry(file->f_path.dentry);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700451 if (full_path == NULL) {
Suresh Jayaraman0f3bc092009-06-25 18:12:34 +0530452 rc = -ENOMEM;
Jeff Layton232341b2010-08-05 13:58:38 -0400453 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700454 }
455
Joe Perchesf96637b2013-05-04 22:12:25 -0500456 cifs_dbg(FYI, "inode = 0x%p file flags are 0x%x for %s\n",
Joe Perchesb6b38f72010-04-21 03:50:45 +0000457 inode, file->f_flags, full_path);
Steve French276a74a2009-03-03 18:00:34 +0000458
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700459 if (server->oplocks)
Steve French276a74a2009-03-03 18:00:34 +0000460 oplock = REQ_OPLOCK;
461 else
462 oplock = 0;
463
Steve French64cc2c62009-03-04 19:54:08 +0000464 if (!tcon->broken_posix_open && tcon->unix_ext &&
Pavel Shilovsky29e20f92012-07-13 13:58:14 +0400465 cap_unix(tcon->ses) && (CIFS_UNIX_POSIX_PATH_OPS_CAP &
466 le64_to_cpu(tcon->fsUnixInfo.Capability))) {
Steve French276a74a2009-03-03 18:00:34 +0000467 /* can not refresh inode info since size could be stale */
Jeff Layton2422f672010-06-16 13:40:16 -0400468 rc = cifs_posix_open(full_path, &inode, inode->i_sb,
Steve Frenchfa588e02010-04-22 19:21:55 +0000469 cifs_sb->mnt_file_mode /* ignored */,
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700470 file->f_flags, &oplock, &fid.netfid, xid);
Steve French276a74a2009-03-03 18:00:34 +0000471 if (rc == 0) {
Joe Perchesf96637b2013-05-04 22:12:25 -0500472 cifs_dbg(FYI, "posix open succeeded\n");
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300473 posix_open_ok = true;
Steve French64cc2c62009-03-04 19:54:08 +0000474 } else if ((rc == -EINVAL) || (rc == -EOPNOTSUPP)) {
475 if (tcon->ses->serverNOS)
Joe Perchesf96637b2013-05-04 22:12:25 -0500476 cifs_dbg(VFS, "server %s of type %s returned unexpected error on SMB posix open, disabling posix open support. Check if server update available.\n",
477 tcon->ses->serverName,
478 tcon->ses->serverNOS);
Steve French64cc2c62009-03-04 19:54:08 +0000479 tcon->broken_posix_open = true;
Steve French276a74a2009-03-03 18:00:34 +0000480 } else if ((rc != -EIO) && (rc != -EREMOTE) &&
481 (rc != -EOPNOTSUPP)) /* path not found or net err */
482 goto out;
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700483 /*
484 * Else fallthrough to retry open the old way on network i/o
485 * or DFS errors.
486 */
Steve French276a74a2009-03-03 18:00:34 +0000487 }
488
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700489 if (server->ops->get_lease_key)
490 server->ops->get_lease_key(inode, &fid);
491
492 cifs_add_pending_open(&fid, tlink, &open);
493
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300494 if (!posix_open_ok) {
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700495 if (server->ops->get_lease_key)
496 server->ops->get_lease_key(inode, &fid);
497
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300498 rc = cifs_nt_open(full_path, inode, cifs_sb, tcon,
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700499 file->f_flags, &oplock, &fid, xid);
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700500 if (rc) {
501 cifs_del_pending_open(&open);
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300502 goto out;
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700503 }
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300504 }
Jeff Layton47c78b72010-06-16 13:40:17 -0400505
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700506 cfile = cifs_new_fileinfo(&fid, file, tlink, oplock);
507 if (cfile == NULL) {
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700508 if (server->ops->close)
509 server->ops->close(xid, tcon, &fid);
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700510 cifs_del_pending_open(&open);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700511 rc = -ENOMEM;
512 goto out;
513 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700514
Suresh Jayaraman9451a9a2010-07-05 18:12:45 +0530515 cifs_fscache_set_inode_cookie(inode, file);
516
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300517 if ((oplock & CIFS_CREATE_ACTION) && !posix_open_ok && tcon->unix_ext) {
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700518 /*
519 * Time to set mode which we can not set earlier due to
520 * problems creating new read-only files.
521 */
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300522 struct cifs_unix_set_info_args args = {
523 .mode = inode->i_mode,
Eric W. Biederman49418b22013-02-06 00:57:56 -0800524 .uid = INVALID_UID, /* no change */
525 .gid = INVALID_GID, /* no change */
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300526 .ctime = NO_CHANGE_64,
527 .atime = NO_CHANGE_64,
528 .mtime = NO_CHANGE_64,
529 .device = 0,
530 };
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700531 CIFSSMBUnixSetFileInfo(xid, tcon, &args, fid.netfid,
532 cfile->pid);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700533 }
534
535out:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700536 kfree(full_path);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400537 free_xid(xid);
Jeff Layton7ffec372010-09-29 19:51:11 -0400538 cifs_put_tlink(tlink);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700539 return rc;
540}
541
Pavel Shilovskyf152fd52012-11-22 17:10:57 +0400542static int cifs_push_posix_locks(struct cifsFileInfo *cfile);
543
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700544/*
545 * Try to reacquire byte range locks that were released when session
Pavel Shilovskyf152fd52012-11-22 17:10:57 +0400546 * to server was lost.
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700547 */
Pavel Shilovskyf152fd52012-11-22 17:10:57 +0400548static int
549cifs_relock_file(struct cifsFileInfo *cfile)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700550{
Pavel Shilovskyf152fd52012-11-22 17:10:57 +0400551 struct cifs_sb_info *cifs_sb = CIFS_SB(cfile->dentry->d_sb);
552 struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
553 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700554 int rc = 0;
555
Pavel Shilovsky059a1672013-07-11 11:17:45 +0400556 down_read(&cinode->lock_sem);
Pavel Shilovskyf152fd52012-11-22 17:10:57 +0400557 if (cinode->can_cache_brlcks) {
Pavel Shilovsky059a1672013-07-11 11:17:45 +0400558 /* can cache locks - no need to relock */
559 up_read(&cinode->lock_sem);
Pavel Shilovskyf152fd52012-11-22 17:10:57 +0400560 return rc;
561 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700562
Pavel Shilovskyf152fd52012-11-22 17:10:57 +0400563 if (cap_unix(tcon->ses) &&
564 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
565 ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
566 rc = cifs_push_posix_locks(cfile);
567 else
568 rc = tcon->ses->server->ops->push_mand_locks(cfile);
569
Pavel Shilovsky059a1672013-07-11 11:17:45 +0400570 up_read(&cinode->lock_sem);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700571 return rc;
572}
573
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700574static int
575cifs_reopen_file(struct cifsFileInfo *cfile, bool can_flush)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700576{
577 int rc = -EACCES;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400578 unsigned int xid;
Jeff Layton590a3fe2009-09-12 11:54:28 -0400579 __u32 oplock;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700580 struct cifs_sb_info *cifs_sb;
Steve French96daf2b2011-05-27 04:34:02 +0000581 struct cifs_tcon *tcon;
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700582 struct TCP_Server_Info *server;
583 struct cifsInodeInfo *cinode;
Steve Frenchfb8c4b12007-07-10 01:16:18 +0000584 struct inode *inode;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700585 char *full_path = NULL;
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700586 int desired_access;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700587 int disposition = FILE_OPEN;
Shirish Pargaonkar3d3ea8e2011-09-26 09:56:44 -0500588 int create_options = CREATE_NOT_DIR;
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700589 struct cifs_fid fid;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700590
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400591 xid = get_xid();
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700592 mutex_lock(&cfile->fh_mutex);
593 if (!cfile->invalidHandle) {
594 mutex_unlock(&cfile->fh_mutex);
Suresh Jayaraman0f3bc092009-06-25 18:12:34 +0530595 rc = 0;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400596 free_xid(xid);
Suresh Jayaraman0f3bc092009-06-25 18:12:34 +0530597 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700598 }
599
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700600 inode = cfile->dentry->d_inode;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700601 cifs_sb = CIFS_SB(inode->i_sb);
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700602 tcon = tlink_tcon(cfile->tlink);
603 server = tcon->ses->server;
Steve French3a9f4622007-04-04 17:10:24 +0000604
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700605 /*
606 * Can not grab rename sem here because various ops, including those
607 * that already have the rename sem can end up causing writepage to get
608 * called and if the server was down that means we end up here, and we
609 * can never tell if the caller already has the rename_sem.
610 */
611 full_path = build_path_from_dentry(cfile->dentry);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700612 if (full_path == NULL) {
Steve French3a9f4622007-04-04 17:10:24 +0000613 rc = -ENOMEM;
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700614 mutex_unlock(&cfile->fh_mutex);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400615 free_xid(xid);
Steve French3a9f4622007-04-04 17:10:24 +0000616 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700617 }
618
Joe Perchesf96637b2013-05-04 22:12:25 -0500619 cifs_dbg(FYI, "inode = 0x%p file flags 0x%x for %s\n",
620 inode, cfile->f_flags, full_path);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700621
Pavel Shilovsky10b9b982012-03-20 12:55:09 +0300622 if (tcon->ses->server->oplocks)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700623 oplock = REQ_OPLOCK;
624 else
Steve French4b18f2a2008-04-29 00:06:05 +0000625 oplock = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700626
Pavel Shilovsky29e20f92012-07-13 13:58:14 +0400627 if (tcon->unix_ext && cap_unix(tcon->ses) &&
Steve French7fc8f4e2009-02-23 20:43:11 +0000628 (CIFS_UNIX_POSIX_PATH_OPS_CAP &
Pavel Shilovsky29e20f92012-07-13 13:58:14 +0400629 le64_to_cpu(tcon->fsUnixInfo.Capability))) {
Jeff Layton608712f2010-10-15 15:33:56 -0400630 /*
631 * O_CREAT, O_EXCL and O_TRUNC already had their effect on the
632 * original open. Must mask them off for a reopen.
633 */
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700634 unsigned int oflags = cfile->f_flags &
Jeff Layton15886172010-10-15 15:33:59 -0400635 ~(O_CREAT | O_EXCL | O_TRUNC);
Jeff Layton608712f2010-10-15 15:33:56 -0400636
Jeff Layton2422f672010-06-16 13:40:16 -0400637 rc = cifs_posix_open(full_path, NULL, inode->i_sb,
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700638 cifs_sb->mnt_file_mode /* ignored */,
639 oflags, &oplock, &fid.netfid, xid);
Steve French7fc8f4e2009-02-23 20:43:11 +0000640 if (rc == 0) {
Joe Perchesf96637b2013-05-04 22:12:25 -0500641 cifs_dbg(FYI, "posix reopen succeeded\n");
Steve French7fc8f4e2009-02-23 20:43:11 +0000642 goto reopen_success;
643 }
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700644 /*
645 * fallthrough to retry open the old way on errors, especially
646 * in the reconnect path it is important to retry hard
647 */
Steve French7fc8f4e2009-02-23 20:43:11 +0000648 }
649
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700650 desired_access = cifs_convert_flags(cfile->f_flags);
Steve French7fc8f4e2009-02-23 20:43:11 +0000651
Shirish Pargaonkar3d3ea8e2011-09-26 09:56:44 -0500652 if (backup_cred(cifs_sb))
653 create_options |= CREATE_OPEN_BACKUP_INTENT;
654
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700655 if (server->ops->get_lease_key)
656 server->ops->get_lease_key(inode, &fid);
657
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700658 /*
659 * Can not refresh inode by passing in file_info buf to be returned by
660 * CIFSSMBOpen and then calling get_inode_info with returned buf since
661 * file might have write behind data that needs to be flushed and server
662 * version of file size can be stale. If we knew for sure that inode was
663 * not dirty locally we could do this.
664 */
665 rc = server->ops->open(xid, tcon, full_path, disposition,
666 desired_access, create_options, &fid, &oplock,
667 NULL, cifs_sb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700668 if (rc) {
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700669 mutex_unlock(&cfile->fh_mutex);
Joe Perchesf96637b2013-05-04 22:12:25 -0500670 cifs_dbg(FYI, "cifs_reopen returned 0x%x\n", rc);
671 cifs_dbg(FYI, "oplock: %d\n", oplock);
Jeff Layton15886172010-10-15 15:33:59 -0400672 goto reopen_error_exit;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700673 }
Jeff Layton15886172010-10-15 15:33:59 -0400674
675reopen_success:
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700676 cfile->invalidHandle = false;
677 mutex_unlock(&cfile->fh_mutex);
678 cinode = CIFS_I(inode);
Jeff Layton15886172010-10-15 15:33:59 -0400679
680 if (can_flush) {
681 rc = filemap_write_and_wait(inode->i_mapping);
Jeff Laytoneb4b7562010-10-22 14:52:29 -0400682 mapping_set_error(inode->i_mapping, rc);
Jeff Layton15886172010-10-15 15:33:59 -0400683
Jeff Layton15886172010-10-15 15:33:59 -0400684 if (tcon->unix_ext)
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700685 rc = cifs_get_inode_info_unix(&inode, full_path,
686 inode->i_sb, xid);
Jeff Layton15886172010-10-15 15:33:59 -0400687 else
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700688 rc = cifs_get_inode_info(&inode, full_path, NULL,
689 inode->i_sb, xid, NULL);
690 }
691 /*
692 * Else we are writing out data to server already and could deadlock if
693 * we tried to flush data, and since we do not know if we have data that
694 * would invalidate the current end of file on the server we can not go
695 * to the server to get the new inode info.
696 */
Pavel Shilovskye66673e2010-11-02 12:00:42 +0300697
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700698 server->ops->set_fid(cfile, &fid, oplock);
699 cifs_relock_file(cfile);
Jeff Layton15886172010-10-15 15:33:59 -0400700
701reopen_error_exit:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700702 kfree(full_path);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400703 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700704 return rc;
705}
706
707int cifs_close(struct inode *inode, struct file *file)
708{
Jeff Layton77970692011-04-05 16:23:47 -0700709 if (file->private_data != NULL) {
710 cifsFileInfo_put(file->private_data);
711 file->private_data = NULL;
712 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700713
Steve Frenchcdff08e2010-10-21 22:46:14 +0000714 /* return code from the ->release op is always ignored */
715 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700716}
717
718int cifs_closedir(struct inode *inode, struct file *file)
719{
720 int rc = 0;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400721 unsigned int xid;
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700722 struct cifsFileInfo *cfile = file->private_data;
Pavel Shilovsky92fc65a2012-09-18 16:20:32 -0700723 struct cifs_tcon *tcon;
724 struct TCP_Server_Info *server;
725 char *buf;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700726
Joe Perchesf96637b2013-05-04 22:12:25 -0500727 cifs_dbg(FYI, "Closedir inode = 0x%p\n", inode);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700728
Pavel Shilovsky92fc65a2012-09-18 16:20:32 -0700729 if (cfile == NULL)
730 return rc;
731
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400732 xid = get_xid();
Pavel Shilovsky92fc65a2012-09-18 16:20:32 -0700733 tcon = tlink_tcon(cfile->tlink);
734 server = tcon->ses->server;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700735
Joe Perchesf96637b2013-05-04 22:12:25 -0500736 cifs_dbg(FYI, "Freeing private data in close dir\n");
Pavel Shilovsky92fc65a2012-09-18 16:20:32 -0700737 spin_lock(&cifs_file_list_lock);
738 if (!cfile->srch_inf.endOfSearch && !cfile->invalidHandle) {
739 cfile->invalidHandle = true;
740 spin_unlock(&cifs_file_list_lock);
741 if (server->ops->close_dir)
742 rc = server->ops->close_dir(xid, tcon, &cfile->fid);
743 else
744 rc = -ENOSYS;
Joe Perchesf96637b2013-05-04 22:12:25 -0500745 cifs_dbg(FYI, "Closing uncompleted readdir with rc %d\n", rc);
Pavel Shilovsky92fc65a2012-09-18 16:20:32 -0700746 /* not much we can do if it fails anyway, ignore rc */
747 rc = 0;
748 } else
749 spin_unlock(&cifs_file_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700750
Pavel Shilovsky92fc65a2012-09-18 16:20:32 -0700751 buf = cfile->srch_inf.ntwrk_buf_start;
752 if (buf) {
Joe Perchesf96637b2013-05-04 22:12:25 -0500753 cifs_dbg(FYI, "closedir free smb buf in srch struct\n");
Pavel Shilovsky92fc65a2012-09-18 16:20:32 -0700754 cfile->srch_inf.ntwrk_buf_start = NULL;
755 if (cfile->srch_inf.smallBuf)
756 cifs_small_buf_release(buf);
757 else
758 cifs_buf_release(buf);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700759 }
Pavel Shilovsky92fc65a2012-09-18 16:20:32 -0700760
761 cifs_put_tlink(cfile->tlink);
762 kfree(file->private_data);
763 file->private_data = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700764 /* BB can we lock the filestruct while this is going on? */
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400765 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700766 return rc;
767}
768
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400769static struct cifsLockInfo *
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300770cifs_lock_init(__u64 offset, __u64 length, __u8 type)
Jeremy Allison7ee1af72006-08-02 21:56:33 +0000771{
Pavel Shilovskya88b4702011-10-29 17:17:59 +0400772 struct cifsLockInfo *lock =
Steve Frenchfb8c4b12007-07-10 01:16:18 +0000773 kmalloc(sizeof(struct cifsLockInfo), GFP_KERNEL);
Pavel Shilovskya88b4702011-10-29 17:17:59 +0400774 if (!lock)
775 return lock;
776 lock->offset = offset;
777 lock->length = length;
778 lock->type = type;
Pavel Shilovskya88b4702011-10-29 17:17:59 +0400779 lock->pid = current->tgid;
780 INIT_LIST_HEAD(&lock->blist);
781 init_waitqueue_head(&lock->block_q);
782 return lock;
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400783}
784
Pavel Shilovskyf7ba7fe2012-09-19 06:22:43 -0700785void
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400786cifs_del_lock_waiters(struct cifsLockInfo *lock)
787{
788 struct cifsLockInfo *li, *tmp;
789 list_for_each_entry_safe(li, tmp, &lock->blist, blist) {
790 list_del_init(&li->blist);
791 wake_up(&li->block_q);
792 }
793}
794
Pavel Shilovsky081c0412012-11-27 18:38:53 +0400795#define CIFS_LOCK_OP 0
796#define CIFS_READ_OP 1
797#define CIFS_WRITE_OP 2
798
799/* @rw_check : 0 - no op, 1 - read, 2 - write */
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400800static bool
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700801cifs_find_fid_lock_conflict(struct cifs_fid_locks *fdlocks, __u64 offset,
802 __u64 length, __u8 type, struct cifsFileInfo *cfile,
Pavel Shilovsky081c0412012-11-27 18:38:53 +0400803 struct cifsLockInfo **conf_lock, int rw_check)
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400804{
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300805 struct cifsLockInfo *li;
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700806 struct cifsFileInfo *cur_cfile = fdlocks->cfile;
Pavel Shilovsky106dc532012-02-28 14:23:34 +0300807 struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server;
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400808
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700809 list_for_each_entry(li, &fdlocks->locks, llist) {
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400810 if (offset + length <= li->offset ||
811 offset >= li->offset + li->length)
812 continue;
Pavel Shilovsky081c0412012-11-27 18:38:53 +0400813 if (rw_check != CIFS_LOCK_OP && current->tgid == li->pid &&
814 server->ops->compare_fids(cfile, cur_cfile)) {
815 /* shared lock prevents write op through the same fid */
816 if (!(li->type & server->vals->shared_lock_type) ||
817 rw_check != CIFS_WRITE_OP)
818 continue;
819 }
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700820 if ((type & server->vals->shared_lock_type) &&
821 ((server->ops->compare_fids(cfile, cur_cfile) &&
822 current->tgid == li->pid) || type == li->type))
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400823 continue;
Pavel Shilovsky579f9052012-09-19 06:22:44 -0700824 if (conf_lock)
825 *conf_lock = li;
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700826 return true;
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400827 }
828 return false;
829}
830
Pavel Shilovsky579f9052012-09-19 06:22:44 -0700831bool
Pavel Shilovsky55157df2012-02-28 14:04:17 +0300832cifs_find_lock_conflict(struct cifsFileInfo *cfile, __u64 offset, __u64 length,
Pavel Shilovsky579f9052012-09-19 06:22:44 -0700833 __u8 type, struct cifsLockInfo **conf_lock,
Pavel Shilovsky081c0412012-11-27 18:38:53 +0400834 int rw_check)
Pavel Shilovsky161ebf92011-10-29 17:17:58 +0400835{
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300836 bool rc = false;
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700837 struct cifs_fid_locks *cur;
Pavel Shilovsky55157df2012-02-28 14:04:17 +0300838 struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300839
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700840 list_for_each_entry(cur, &cinode->llist, llist) {
841 rc = cifs_find_fid_lock_conflict(cur, offset, length, type,
Pavel Shilovsky579f9052012-09-19 06:22:44 -0700842 cfile, conf_lock, rw_check);
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300843 if (rc)
844 break;
845 }
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300846
847 return rc;
Pavel Shilovsky161ebf92011-10-29 17:17:58 +0400848}
849
Pavel Shilovsky9a5101c2011-11-07 16:11:24 +0300850/*
851 * Check if there is another lock that prevents us to set the lock (mandatory
852 * style). If such a lock exists, update the flock structure with its
853 * properties. Otherwise, set the flock type to F_UNLCK if we can cache brlocks
854 * or leave it the same if we can't. Returns 0 if we don't need to request to
855 * the server or 1 otherwise.
856 */
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400857static int
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300858cifs_lock_test(struct cifsFileInfo *cfile, __u64 offset, __u64 length,
859 __u8 type, struct file_lock *flock)
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400860{
861 int rc = 0;
862 struct cifsLockInfo *conf_lock;
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300863 struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
Pavel Shilovsky106dc532012-02-28 14:23:34 +0300864 struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server;
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400865 bool exist;
866
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700867 down_read(&cinode->lock_sem);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400868
Pavel Shilovsky55157df2012-02-28 14:04:17 +0300869 exist = cifs_find_lock_conflict(cfile, offset, length, type,
Pavel Shilovsky081c0412012-11-27 18:38:53 +0400870 &conf_lock, CIFS_LOCK_OP);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400871 if (exist) {
872 flock->fl_start = conf_lock->offset;
873 flock->fl_end = conf_lock->offset + conf_lock->length - 1;
874 flock->fl_pid = conf_lock->pid;
Pavel Shilovsky106dc532012-02-28 14:23:34 +0300875 if (conf_lock->type & server->vals->shared_lock_type)
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400876 flock->fl_type = F_RDLCK;
877 else
878 flock->fl_type = F_WRLCK;
879 } else if (!cinode->can_cache_brlcks)
880 rc = 1;
881 else
882 flock->fl_type = F_UNLCK;
883
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700884 up_read(&cinode->lock_sem);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400885 return rc;
886}
887
Pavel Shilovsky161ebf92011-10-29 17:17:58 +0400888static void
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300889cifs_lock_add(struct cifsFileInfo *cfile, struct cifsLockInfo *lock)
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400890{
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300891 struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700892 down_write(&cinode->lock_sem);
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700893 list_add_tail(&lock->llist, &cfile->llist->locks);
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700894 up_write(&cinode->lock_sem);
Jeremy Allison7ee1af72006-08-02 21:56:33 +0000895}
896
Pavel Shilovsky9a5101c2011-11-07 16:11:24 +0300897/*
898 * Set the byte-range lock (mandatory style). Returns:
899 * 1) 0, if we set the lock and don't need to request to the server;
900 * 2) 1, if no locks prevent us but we need to request to the server;
901 * 3) -EACCESS, if there is a lock that prevents us and wait is false.
902 */
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400903static int
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300904cifs_lock_add_if(struct cifsFileInfo *cfile, struct cifsLockInfo *lock,
Pavel Shilovsky161ebf92011-10-29 17:17:58 +0400905 bool wait)
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400906{
Pavel Shilovsky161ebf92011-10-29 17:17:58 +0400907 struct cifsLockInfo *conf_lock;
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300908 struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400909 bool exist;
910 int rc = 0;
911
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400912try_again:
913 exist = false;
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700914 down_write(&cinode->lock_sem);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400915
Pavel Shilovsky55157df2012-02-28 14:04:17 +0300916 exist = cifs_find_lock_conflict(cfile, lock->offset, lock->length,
Pavel Shilovsky081c0412012-11-27 18:38:53 +0400917 lock->type, &conf_lock, CIFS_LOCK_OP);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400918 if (!exist && cinode->can_cache_brlcks) {
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700919 list_add_tail(&lock->llist, &cfile->llist->locks);
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700920 up_write(&cinode->lock_sem);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400921 return rc;
922 }
923
924 if (!exist)
925 rc = 1;
926 else if (!wait)
927 rc = -EACCES;
928 else {
929 list_add_tail(&lock->blist, &conf_lock->blist);
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700930 up_write(&cinode->lock_sem);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400931 rc = wait_event_interruptible(lock->block_q,
932 (lock->blist.prev == &lock->blist) &&
933 (lock->blist.next == &lock->blist));
934 if (!rc)
935 goto try_again;
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700936 down_write(&cinode->lock_sem);
Pavel Shilovskya88b4702011-10-29 17:17:59 +0400937 list_del_init(&lock->blist);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400938 }
939
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700940 up_write(&cinode->lock_sem);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400941 return rc;
942}
943
Pavel Shilovsky9a5101c2011-11-07 16:11:24 +0300944/*
945 * Check if there is another lock that prevents us to set the lock (posix
946 * style). If such a lock exists, update the flock structure with its
947 * properties. Otherwise, set the flock type to F_UNLCK if we can cache brlocks
948 * or leave it the same if we can't. Returns 0 if we don't need to request to
949 * the server or 1 otherwise.
950 */
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400951static int
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +0400952cifs_posix_lock_test(struct file *file, struct file_lock *flock)
953{
954 int rc = 0;
Al Viro496ad9a2013-01-23 17:07:38 -0500955 struct cifsInodeInfo *cinode = CIFS_I(file_inode(file));
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +0400956 unsigned char saved_type = flock->fl_type;
957
Pavel Shilovsky50792762011-10-29 17:17:57 +0400958 if ((flock->fl_flags & FL_POSIX) == 0)
959 return 1;
960
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700961 down_read(&cinode->lock_sem);
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +0400962 posix_test_lock(file, flock);
963
964 if (flock->fl_type == F_UNLCK && !cinode->can_cache_brlcks) {
965 flock->fl_type = saved_type;
966 rc = 1;
967 }
968
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700969 up_read(&cinode->lock_sem);
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +0400970 return rc;
971}
972
Pavel Shilovsky9a5101c2011-11-07 16:11:24 +0300973/*
974 * Set the byte-range lock (posix style). Returns:
975 * 1) 0, if we set the lock and don't need to request to the server;
976 * 2) 1, if we need to request to the server;
977 * 3) <0, if the error occurs while setting the lock.
978 */
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +0400979static int
980cifs_posix_lock_set(struct file *file, struct file_lock *flock)
981{
Al Viro496ad9a2013-01-23 17:07:38 -0500982 struct cifsInodeInfo *cinode = CIFS_I(file_inode(file));
Pavel Shilovsky50792762011-10-29 17:17:57 +0400983 int rc = 1;
984
985 if ((flock->fl_flags & FL_POSIX) == 0)
986 return rc;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +0400987
Pavel Shilovsky66189be2012-03-28 21:56:19 +0400988try_again:
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700989 down_write(&cinode->lock_sem);
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +0400990 if (!cinode->can_cache_brlcks) {
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700991 up_write(&cinode->lock_sem);
Pavel Shilovsky50792762011-10-29 17:17:57 +0400992 return rc;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +0400993 }
Pavel Shilovsky66189be2012-03-28 21:56:19 +0400994
995 rc = posix_lock_file(file, flock, NULL);
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700996 up_write(&cinode->lock_sem);
Pavel Shilovsky66189be2012-03-28 21:56:19 +0400997 if (rc == FILE_LOCK_DEFERRED) {
998 rc = wait_event_interruptible(flock->fl_wait, !flock->fl_next);
999 if (!rc)
1000 goto try_again;
1001 locks_delete_block(flock);
1002 }
Steve French9ebb3892012-04-01 13:52:54 -05001003 return rc;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001004}
1005
Pavel Shilovskyd39a4f72012-09-19 06:22:43 -07001006int
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001007cifs_push_mandatory_locks(struct cifsFileInfo *cfile)
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001008{
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001009 unsigned int xid;
1010 int rc = 0, stored_rc;
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001011 struct cifsLockInfo *li, *tmp;
1012 struct cifs_tcon *tcon;
Pavel Shilovsky0013fb42012-05-31 13:03:26 +04001013 unsigned int num, max_num, max_buf;
Pavel Shilovsky32b9aaf2011-10-22 15:33:32 +04001014 LOCKING_ANDX_RANGE *buf, *cur;
1015 int types[] = {LOCKING_ANDX_LARGE_FILES,
1016 LOCKING_ANDX_SHARED_LOCK | LOCKING_ANDX_LARGE_FILES};
1017 int i;
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001018
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001019 xid = get_xid();
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001020 tcon = tlink_tcon(cfile->tlink);
1021
Pavel Shilovsky0013fb42012-05-31 13:03:26 +04001022 /*
1023 * Accessing maxBuf is racy with cifs_reconnect - need to store value
1024 * and check it for zero before using.
1025 */
1026 max_buf = tcon->ses->server->maxBuf;
1027 if (!max_buf) {
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001028 free_xid(xid);
Pavel Shilovsky0013fb42012-05-31 13:03:26 +04001029 return -EINVAL;
1030 }
1031
1032 max_num = (max_buf - sizeof(struct smb_hdr)) /
1033 sizeof(LOCKING_ANDX_RANGE);
Pavel Shilovsky32b9aaf2011-10-22 15:33:32 +04001034 buf = kzalloc(max_num * sizeof(LOCKING_ANDX_RANGE), GFP_KERNEL);
1035 if (!buf) {
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001036 free_xid(xid);
Pavel Shilovskye2f28862012-08-29 21:13:38 +04001037 return -ENOMEM;
Pavel Shilovsky32b9aaf2011-10-22 15:33:32 +04001038 }
1039
1040 for (i = 0; i < 2; i++) {
1041 cur = buf;
1042 num = 0;
Pavel Shilovskyf45d3412012-09-19 06:22:43 -07001043 list_for_each_entry_safe(li, tmp, &cfile->llist->locks, llist) {
Pavel Shilovsky32b9aaf2011-10-22 15:33:32 +04001044 if (li->type != types[i])
1045 continue;
1046 cur->Pid = cpu_to_le16(li->pid);
1047 cur->LengthLow = cpu_to_le32((u32)li->length);
1048 cur->LengthHigh = cpu_to_le32((u32)(li->length>>32));
1049 cur->OffsetLow = cpu_to_le32((u32)li->offset);
1050 cur->OffsetHigh = cpu_to_le32((u32)(li->offset>>32));
1051 if (++num == max_num) {
Pavel Shilovsky4b4de762012-09-18 16:20:26 -07001052 stored_rc = cifs_lockv(xid, tcon,
1053 cfile->fid.netfid,
Pavel Shilovsky04a6aa82012-02-28 14:16:55 +03001054 (__u8)li->type, 0, num,
1055 buf);
Pavel Shilovsky32b9aaf2011-10-22 15:33:32 +04001056 if (stored_rc)
1057 rc = stored_rc;
1058 cur = buf;
1059 num = 0;
1060 } else
1061 cur++;
1062 }
1063
1064 if (num) {
Pavel Shilovsky4b4de762012-09-18 16:20:26 -07001065 stored_rc = cifs_lockv(xid, tcon, cfile->fid.netfid,
Pavel Shilovsky04a6aa82012-02-28 14:16:55 +03001066 (__u8)types[i], 0, num, buf);
Pavel Shilovsky32b9aaf2011-10-22 15:33:32 +04001067 if (stored_rc)
1068 rc = stored_rc;
1069 }
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001070 }
1071
Pavel Shilovsky32b9aaf2011-10-22 15:33:32 +04001072 kfree(buf);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001073 free_xid(xid);
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001074 return rc;
1075}
1076
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001077/* copied from fs/locks.c with a name change */
1078#define cifs_for_each_lock(inode, lockp) \
1079 for (lockp = &inode->i_flock; *lockp != NULL; \
1080 lockp = &(*lockp)->fl_next)
1081
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001082struct lock_to_push {
1083 struct list_head llist;
1084 __u64 offset;
1085 __u64 length;
1086 __u32 pid;
1087 __u16 netfid;
1088 __u8 type;
1089};
1090
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001091static int
Pavel Shilovskyb8db9282012-11-22 17:07:16 +04001092cifs_push_posix_locks(struct cifsFileInfo *cfile)
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001093{
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001094 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
1095 struct file_lock *flock, **before;
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001096 unsigned int count = 0, i = 0;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001097 int rc = 0, xid, type;
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001098 struct list_head locks_to_send, *el;
1099 struct lock_to_push *lck, *tmp;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001100 __u64 length;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001101
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001102 xid = get_xid();
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001103
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001104 lock_flocks();
1105 cifs_for_each_lock(cfile->dentry->d_inode, before) {
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001106 if ((*before)->fl_flags & FL_POSIX)
1107 count++;
1108 }
1109 unlock_flocks();
1110
1111 INIT_LIST_HEAD(&locks_to_send);
1112
1113 /*
Pavel Shilovskyce858522012-03-17 09:46:55 +03001114 * Allocating count locks is enough because no FL_POSIX locks can be
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -07001115 * added to the list while we are holding cinode->lock_sem that
Pavel Shilovskyce858522012-03-17 09:46:55 +03001116 * protects locking operations of this inode.
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001117 */
1118 for (; i < count; i++) {
1119 lck = kmalloc(sizeof(struct lock_to_push), GFP_KERNEL);
1120 if (!lck) {
1121 rc = -ENOMEM;
1122 goto err_out;
1123 }
1124 list_add_tail(&lck->llist, &locks_to_send);
1125 }
1126
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001127 el = locks_to_send.next;
1128 lock_flocks();
1129 cifs_for_each_lock(cfile->dentry->d_inode, before) {
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001130 flock = *before;
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001131 if ((flock->fl_flags & FL_POSIX) == 0)
1132 continue;
Pavel Shilovskyce858522012-03-17 09:46:55 +03001133 if (el == &locks_to_send) {
1134 /*
1135 * The list ended. We don't have enough allocated
1136 * structures - something is really wrong.
1137 */
Joe Perchesf96637b2013-05-04 22:12:25 -05001138 cifs_dbg(VFS, "Can't push all brlocks!\n");
Pavel Shilovskyce858522012-03-17 09:46:55 +03001139 break;
1140 }
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001141 length = 1 + flock->fl_end - flock->fl_start;
1142 if (flock->fl_type == F_RDLCK || flock->fl_type == F_SHLCK)
1143 type = CIFS_RDLCK;
1144 else
1145 type = CIFS_WRLCK;
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001146 lck = list_entry(el, struct lock_to_push, llist);
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001147 lck->pid = flock->fl_pid;
Pavel Shilovsky4b4de762012-09-18 16:20:26 -07001148 lck->netfid = cfile->fid.netfid;
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001149 lck->length = length;
1150 lck->type = type;
1151 lck->offset = flock->fl_start;
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001152 el = el->next;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001153 }
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001154 unlock_flocks();
1155
1156 list_for_each_entry_safe(lck, tmp, &locks_to_send, llist) {
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001157 int stored_rc;
1158
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001159 stored_rc = CIFSSMBPosixLock(xid, tcon, lck->netfid, lck->pid,
Jeff Laytonc5fd3632012-07-23 13:28:37 -04001160 lck->offset, lck->length, NULL,
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001161 lck->type, 0);
1162 if (stored_rc)
1163 rc = stored_rc;
1164 list_del(&lck->llist);
1165 kfree(lck);
1166 }
1167
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001168out:
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001169 free_xid(xid);
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001170 return rc;
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001171err_out:
1172 list_for_each_entry_safe(lck, tmp, &locks_to_send, llist) {
1173 list_del(&lck->llist);
1174 kfree(lck);
1175 }
1176 goto out;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001177}
1178
1179static int
Pavel Shilovskyb8db9282012-11-22 17:07:16 +04001180cifs_push_locks(struct cifsFileInfo *cfile)
Pavel Shilovsky9ec3c882012-11-22 17:00:10 +04001181{
Pavel Shilovskyb8db9282012-11-22 17:07:16 +04001182 struct cifs_sb_info *cifs_sb = CIFS_SB(cfile->dentry->d_sb);
Pavel Shilovsky9ec3c882012-11-22 17:00:10 +04001183 struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
Pavel Shilovskyb8db9282012-11-22 17:07:16 +04001184 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
Pavel Shilovsky9ec3c882012-11-22 17:00:10 +04001185 int rc = 0;
1186
1187 /* we are going to update can_cache_brlcks here - need a write access */
1188 down_write(&cinode->lock_sem);
1189 if (!cinode->can_cache_brlcks) {
1190 up_write(&cinode->lock_sem);
1191 return rc;
1192 }
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001193
Pavel Shilovsky29e20f92012-07-13 13:58:14 +04001194 if (cap_unix(tcon->ses) &&
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001195 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
1196 ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
Pavel Shilovskyb8db9282012-11-22 17:07:16 +04001197 rc = cifs_push_posix_locks(cfile);
1198 else
1199 rc = tcon->ses->server->ops->push_mand_locks(cfile);
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001200
Pavel Shilovskyb8db9282012-11-22 17:07:16 +04001201 cinode->can_cache_brlcks = false;
1202 up_write(&cinode->lock_sem);
1203 return rc;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001204}
1205
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001206static void
Pavel Shilovsky04a6aa82012-02-28 14:16:55 +03001207cifs_read_flock(struct file_lock *flock, __u32 *type, int *lock, int *unlock,
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001208 bool *wait_flag, struct TCP_Server_Info *server)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001209{
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001210 if (flock->fl_flags & FL_POSIX)
Joe Perchesf96637b2013-05-04 22:12:25 -05001211 cifs_dbg(FYI, "Posix\n");
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001212 if (flock->fl_flags & FL_FLOCK)
Joe Perchesf96637b2013-05-04 22:12:25 -05001213 cifs_dbg(FYI, "Flock\n");
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001214 if (flock->fl_flags & FL_SLEEP) {
Joe Perchesf96637b2013-05-04 22:12:25 -05001215 cifs_dbg(FYI, "Blocking lock\n");
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001216 *wait_flag = true;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001217 }
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001218 if (flock->fl_flags & FL_ACCESS)
Joe Perchesf96637b2013-05-04 22:12:25 -05001219 cifs_dbg(FYI, "Process suspended by mandatory locking - not implemented yet\n");
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001220 if (flock->fl_flags & FL_LEASE)
Joe Perchesf96637b2013-05-04 22:12:25 -05001221 cifs_dbg(FYI, "Lease on file - not implemented yet\n");
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001222 if (flock->fl_flags &
Jeff Layton3d6d8542012-09-19 06:22:46 -07001223 (~(FL_POSIX | FL_FLOCK | FL_SLEEP |
1224 FL_ACCESS | FL_LEASE | FL_CLOSE)))
Joe Perchesf96637b2013-05-04 22:12:25 -05001225 cifs_dbg(FYI, "Unknown lock flags 0x%x\n", flock->fl_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001226
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001227 *type = server->vals->large_lock_type;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001228 if (flock->fl_type == F_WRLCK) {
Joe Perchesf96637b2013-05-04 22:12:25 -05001229 cifs_dbg(FYI, "F_WRLCK\n");
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001230 *type |= server->vals->exclusive_lock_type;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001231 *lock = 1;
1232 } else if (flock->fl_type == F_UNLCK) {
Joe Perchesf96637b2013-05-04 22:12:25 -05001233 cifs_dbg(FYI, "F_UNLCK\n");
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001234 *type |= server->vals->unlock_lock_type;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001235 *unlock = 1;
1236 /* Check if unlock includes more than one lock range */
1237 } else if (flock->fl_type == F_RDLCK) {
Joe Perchesf96637b2013-05-04 22:12:25 -05001238 cifs_dbg(FYI, "F_RDLCK\n");
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001239 *type |= server->vals->shared_lock_type;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001240 *lock = 1;
1241 } else if (flock->fl_type == F_EXLCK) {
Joe Perchesf96637b2013-05-04 22:12:25 -05001242 cifs_dbg(FYI, "F_EXLCK\n");
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001243 *type |= server->vals->exclusive_lock_type;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001244 *lock = 1;
1245 } else if (flock->fl_type == F_SHLCK) {
Joe Perchesf96637b2013-05-04 22:12:25 -05001246 cifs_dbg(FYI, "F_SHLCK\n");
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001247 *type |= server->vals->shared_lock_type;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001248 *lock = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001249 } else
Joe Perchesf96637b2013-05-04 22:12:25 -05001250 cifs_dbg(FYI, "Unknown type of lock\n");
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001251}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001252
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001253static int
Pavel Shilovsky04a6aa82012-02-28 14:16:55 +03001254cifs_getlk(struct file *file, struct file_lock *flock, __u32 type,
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001255 bool wait_flag, bool posix_lck, unsigned int xid)
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001256{
1257 int rc = 0;
1258 __u64 length = 1 + flock->fl_end - flock->fl_start;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001259 struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data;
1260 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001261 struct TCP_Server_Info *server = tcon->ses->server;
Pavel Shilovsky4b4de762012-09-18 16:20:26 -07001262 __u16 netfid = cfile->fid.netfid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001263
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001264 if (posix_lck) {
1265 int posix_lock_type;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001266
1267 rc = cifs_posix_lock_test(file, flock);
1268 if (!rc)
1269 return rc;
1270
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001271 if (type & server->vals->shared_lock_type)
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001272 posix_lock_type = CIFS_RDLCK;
1273 else
1274 posix_lock_type = CIFS_WRLCK;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001275 rc = CIFSSMBPosixLock(xid, tcon, netfid, current->tgid,
Jeff Laytonc5fd3632012-07-23 13:28:37 -04001276 flock->fl_start, length, flock,
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001277 posix_lock_type, wait_flag);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001278 return rc;
1279 }
Jeremy Allison7ee1af72006-08-02 21:56:33 +00001280
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +03001281 rc = cifs_lock_test(cfile, flock->fl_start, length, type, flock);
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001282 if (!rc)
1283 return rc;
1284
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001285 /* BB we could chain these into one lock request BB */
Pavel Shilovskyd39a4f72012-09-19 06:22:43 -07001286 rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length, type,
1287 1, 0, false);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001288 if (rc == 0) {
Pavel Shilovskyd39a4f72012-09-19 06:22:43 -07001289 rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
1290 type, 0, 1, false);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001291 flock->fl_type = F_UNLCK;
1292 if (rc != 0)
Joe Perchesf96637b2013-05-04 22:12:25 -05001293 cifs_dbg(VFS, "Error unlocking previously locked range %d during test of lock\n",
1294 rc);
Pavel Shilovskya88b4702011-10-29 17:17:59 +04001295 return 0;
Jeremy Allison7ee1af72006-08-02 21:56:33 +00001296 }
1297
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001298 if (type & server->vals->shared_lock_type) {
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001299 flock->fl_type = F_WRLCK;
Pavel Shilovskya88b4702011-10-29 17:17:59 +04001300 return 0;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001301 }
1302
Pavel Shilovskyd39a4f72012-09-19 06:22:43 -07001303 type &= ~server->vals->exclusive_lock_type;
1304
1305 rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
1306 type | server->vals->shared_lock_type,
1307 1, 0, false);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001308 if (rc == 0) {
Pavel Shilovskyd39a4f72012-09-19 06:22:43 -07001309 rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
1310 type | server->vals->shared_lock_type, 0, 1, false);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001311 flock->fl_type = F_RDLCK;
1312 if (rc != 0)
Joe Perchesf96637b2013-05-04 22:12:25 -05001313 cifs_dbg(VFS, "Error unlocking previously locked range %d during test of lock\n",
1314 rc);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001315 } else
1316 flock->fl_type = F_WRLCK;
1317
Pavel Shilovskya88b4702011-10-29 17:17:59 +04001318 return 0;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001319}
1320
Pavel Shilovskyf7ba7fe2012-09-19 06:22:43 -07001321void
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001322cifs_move_llist(struct list_head *source, struct list_head *dest)
1323{
1324 struct list_head *li, *tmp;
1325 list_for_each_safe(li, tmp, source)
1326 list_move(li, dest);
1327}
1328
Pavel Shilovskyf7ba7fe2012-09-19 06:22:43 -07001329void
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001330cifs_free_llist(struct list_head *llist)
1331{
1332 struct cifsLockInfo *li, *tmp;
1333 list_for_each_entry_safe(li, tmp, llist, llist) {
1334 cifs_del_lock_waiters(li);
1335 list_del(&li->llist);
1336 kfree(li);
1337 }
1338}
1339
Pavel Shilovskyd39a4f72012-09-19 06:22:43 -07001340int
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001341cifs_unlock_range(struct cifsFileInfo *cfile, struct file_lock *flock,
1342 unsigned int xid)
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001343{
1344 int rc = 0, stored_rc;
1345 int types[] = {LOCKING_ANDX_LARGE_FILES,
1346 LOCKING_ANDX_SHARED_LOCK | LOCKING_ANDX_LARGE_FILES};
1347 unsigned int i;
Pavel Shilovsky0013fb42012-05-31 13:03:26 +04001348 unsigned int max_num, num, max_buf;
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001349 LOCKING_ANDX_RANGE *buf, *cur;
1350 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
1351 struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
1352 struct cifsLockInfo *li, *tmp;
1353 __u64 length = 1 + flock->fl_end - flock->fl_start;
1354 struct list_head tmp_llist;
1355
1356 INIT_LIST_HEAD(&tmp_llist);
1357
Pavel Shilovsky0013fb42012-05-31 13:03:26 +04001358 /*
1359 * Accessing maxBuf is racy with cifs_reconnect - need to store value
1360 * and check it for zero before using.
1361 */
1362 max_buf = tcon->ses->server->maxBuf;
1363 if (!max_buf)
1364 return -EINVAL;
1365
1366 max_num = (max_buf - sizeof(struct smb_hdr)) /
1367 sizeof(LOCKING_ANDX_RANGE);
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001368 buf = kzalloc(max_num * sizeof(LOCKING_ANDX_RANGE), GFP_KERNEL);
1369 if (!buf)
1370 return -ENOMEM;
1371
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -07001372 down_write(&cinode->lock_sem);
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001373 for (i = 0; i < 2; i++) {
1374 cur = buf;
1375 num = 0;
Pavel Shilovskyf45d3412012-09-19 06:22:43 -07001376 list_for_each_entry_safe(li, tmp, &cfile->llist->locks, llist) {
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001377 if (flock->fl_start > li->offset ||
1378 (flock->fl_start + length) <
1379 (li->offset + li->length))
1380 continue;
1381 if (current->tgid != li->pid)
1382 continue;
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001383 if (types[i] != li->type)
1384 continue;
Pavel Shilovskyea319d52012-05-31 13:59:36 +04001385 if (cinode->can_cache_brlcks) {
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001386 /*
1387 * We can cache brlock requests - simply remove
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +03001388 * a lock from the file's list.
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001389 */
1390 list_del(&li->llist);
1391 cifs_del_lock_waiters(li);
1392 kfree(li);
Pavel Shilovskyea319d52012-05-31 13:59:36 +04001393 continue;
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001394 }
Pavel Shilovskyea319d52012-05-31 13:59:36 +04001395 cur->Pid = cpu_to_le16(li->pid);
1396 cur->LengthLow = cpu_to_le32((u32)li->length);
1397 cur->LengthHigh = cpu_to_le32((u32)(li->length>>32));
1398 cur->OffsetLow = cpu_to_le32((u32)li->offset);
1399 cur->OffsetHigh = cpu_to_le32((u32)(li->offset>>32));
1400 /*
1401 * We need to save a lock here to let us add it again to
1402 * the file's list if the unlock range request fails on
1403 * the server.
1404 */
1405 list_move(&li->llist, &tmp_llist);
1406 if (++num == max_num) {
Pavel Shilovsky4b4de762012-09-18 16:20:26 -07001407 stored_rc = cifs_lockv(xid, tcon,
1408 cfile->fid.netfid,
Pavel Shilovskyea319d52012-05-31 13:59:36 +04001409 li->type, num, 0, buf);
1410 if (stored_rc) {
1411 /*
1412 * We failed on the unlock range
1413 * request - add all locks from the tmp
1414 * list to the head of the file's list.
1415 */
1416 cifs_move_llist(&tmp_llist,
Pavel Shilovskyf45d3412012-09-19 06:22:43 -07001417 &cfile->llist->locks);
Pavel Shilovskyea319d52012-05-31 13:59:36 +04001418 rc = stored_rc;
1419 } else
1420 /*
1421 * The unlock range request succeed -
1422 * free the tmp list.
1423 */
1424 cifs_free_llist(&tmp_llist);
1425 cur = buf;
1426 num = 0;
1427 } else
1428 cur++;
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001429 }
1430 if (num) {
Pavel Shilovsky4b4de762012-09-18 16:20:26 -07001431 stored_rc = cifs_lockv(xid, tcon, cfile->fid.netfid,
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001432 types[i], num, 0, buf);
1433 if (stored_rc) {
Pavel Shilovskyf45d3412012-09-19 06:22:43 -07001434 cifs_move_llist(&tmp_llist,
1435 &cfile->llist->locks);
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001436 rc = stored_rc;
1437 } else
1438 cifs_free_llist(&tmp_llist);
1439 }
1440 }
1441
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -07001442 up_write(&cinode->lock_sem);
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001443 kfree(buf);
1444 return rc;
1445}
1446
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001447static int
Pavel Shilovskyf45d3412012-09-19 06:22:43 -07001448cifs_setlk(struct file *file, struct file_lock *flock, __u32 type,
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001449 bool wait_flag, bool posix_lck, int lock, int unlock,
1450 unsigned int xid)
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001451{
1452 int rc = 0;
1453 __u64 length = 1 + flock->fl_end - flock->fl_start;
1454 struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data;
1455 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001456 struct TCP_Server_Info *server = tcon->ses->server;
Pavel Shilovsky63b7d3a2012-12-24 14:41:19 +04001457 struct inode *inode = cfile->dentry->d_inode;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001458
1459 if (posix_lck) {
Steve French08547b02006-02-28 22:39:25 +00001460 int posix_lock_type;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001461
1462 rc = cifs_posix_lock_set(file, flock);
1463 if (!rc || rc < 0)
1464 return rc;
1465
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001466 if (type & server->vals->shared_lock_type)
Steve French08547b02006-02-28 22:39:25 +00001467 posix_lock_type = CIFS_RDLCK;
1468 else
1469 posix_lock_type = CIFS_WRLCK;
Steve French50c2f752007-07-13 00:33:32 +00001470
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001471 if (unlock == 1)
Steve Frenchbeb84dc2006-03-03 23:36:34 +00001472 posix_lock_type = CIFS_UNLCK;
Jeremy Allison7ee1af72006-08-02 21:56:33 +00001473
Pavel Shilovskyf45d3412012-09-19 06:22:43 -07001474 rc = CIFSSMBPosixLock(xid, tcon, cfile->fid.netfid,
1475 current->tgid, flock->fl_start, length,
1476 NULL, posix_lock_type, wait_flag);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001477 goto out;
Jeremy Allison7ee1af72006-08-02 21:56:33 +00001478 }
1479
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001480 if (lock) {
Pavel Shilovsky161ebf92011-10-29 17:17:58 +04001481 struct cifsLockInfo *lock;
1482
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +03001483 lock = cifs_lock_init(flock->fl_start, length, type);
Pavel Shilovsky161ebf92011-10-29 17:17:58 +04001484 if (!lock)
1485 return -ENOMEM;
1486
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +03001487 rc = cifs_lock_add_if(cfile, lock, wait_flag);
Pavel Shilovsky21cb2d92012-11-22 18:56:39 +04001488 if (rc < 0) {
Pavel Shilovsky161ebf92011-10-29 17:17:58 +04001489 kfree(lock);
Pavel Shilovsky21cb2d92012-11-22 18:56:39 +04001490 return rc;
1491 }
1492 if (!rc)
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001493 goto out;
1494
Pavel Shilovsky63b7d3a2012-12-24 14:41:19 +04001495 /*
1496 * Windows 7 server can delay breaking lease from read to None
1497 * if we set a byte-range lock on a file - break it explicitly
1498 * before sending the lock to the server to be sure the next
1499 * read won't conflict with non-overlapted locks due to
1500 * pagereading.
1501 */
1502 if (!CIFS_I(inode)->clientCanCacheAll &&
1503 CIFS_I(inode)->clientCanCacheRead) {
1504 cifs_invalidate_mapping(inode);
Joe Perchesf96637b2013-05-04 22:12:25 -05001505 cifs_dbg(FYI, "Set no oplock for inode=%p due to mand locks\n",
1506 inode);
Pavel Shilovsky63b7d3a2012-12-24 14:41:19 +04001507 CIFS_I(inode)->clientCanCacheRead = false;
1508 }
1509
Pavel Shilovskyd39a4f72012-09-19 06:22:43 -07001510 rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
1511 type, 1, 0, wait_flag);
Pavel Shilovsky161ebf92011-10-29 17:17:58 +04001512 if (rc) {
1513 kfree(lock);
Pavel Shilovsky21cb2d92012-11-22 18:56:39 +04001514 return rc;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001515 }
Pavel Shilovsky161ebf92011-10-29 17:17:58 +04001516
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +03001517 cifs_lock_add(cfile, lock);
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001518 } else if (unlock)
Pavel Shilovskyd39a4f72012-09-19 06:22:43 -07001519 rc = server->ops->mand_unlock_range(cfile, flock, xid);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001520
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001521out:
1522 if (flock->fl_flags & FL_POSIX)
Steve French9ebb3892012-04-01 13:52:54 -05001523 posix_lock_file_wait(file, flock);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001524 return rc;
1525}
1526
1527int cifs_lock(struct file *file, int cmd, struct file_lock *flock)
1528{
1529 int rc, xid;
1530 int lock = 0, unlock = 0;
1531 bool wait_flag = false;
1532 bool posix_lck = false;
1533 struct cifs_sb_info *cifs_sb;
1534 struct cifs_tcon *tcon;
1535 struct cifsInodeInfo *cinode;
1536 struct cifsFileInfo *cfile;
1537 __u16 netfid;
Pavel Shilovsky04a6aa82012-02-28 14:16:55 +03001538 __u32 type;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001539
1540 rc = -EACCES;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001541 xid = get_xid();
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001542
Joe Perchesf96637b2013-05-04 22:12:25 -05001543 cifs_dbg(FYI, "Lock parm: 0x%x flockflags: 0x%x flocktype: 0x%x start: %lld end: %lld\n",
1544 cmd, flock->fl_flags, flock->fl_type,
1545 flock->fl_start, flock->fl_end);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001546
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001547 cfile = (struct cifsFileInfo *)file->private_data;
1548 tcon = tlink_tcon(cfile->tlink);
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001549
1550 cifs_read_flock(flock, &type, &lock, &unlock, &wait_flag,
1551 tcon->ses->server);
1552
1553 cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
Pavel Shilovsky4b4de762012-09-18 16:20:26 -07001554 netfid = cfile->fid.netfid;
Al Viro496ad9a2013-01-23 17:07:38 -05001555 cinode = CIFS_I(file_inode(file));
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001556
Pavel Shilovsky29e20f92012-07-13 13:58:14 +04001557 if (cap_unix(tcon->ses) &&
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001558 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
1559 ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
1560 posix_lck = true;
1561 /*
1562 * BB add code here to normalize offset and length to account for
1563 * negative length which we can not accept over the wire.
1564 */
1565 if (IS_GETLK(cmd)) {
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001566 rc = cifs_getlk(file, flock, type, wait_flag, posix_lck, xid);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001567 free_xid(xid);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001568 return rc;
1569 }
1570
1571 if (!lock && !unlock) {
1572 /*
1573 * if no lock or unlock then nothing to do since we do not
1574 * know what it is
1575 */
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001576 free_xid(xid);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001577 return -EOPNOTSUPP;
1578 }
1579
1580 rc = cifs_setlk(file, flock, type, wait_flag, posix_lck, lock, unlock,
1581 xid);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001582 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001583 return rc;
1584}
1585
Jeff Layton597b0272012-03-23 14:40:56 -04001586/*
1587 * update the file size (if needed) after a write. Should be called with
1588 * the inode->i_lock held
1589 */
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05001590void
Jeff Laytonfbec9ab2009-04-03 13:44:00 -04001591cifs_update_eof(struct cifsInodeInfo *cifsi, loff_t offset,
1592 unsigned int bytes_written)
1593{
1594 loff_t end_of_write = offset + bytes_written;
1595
1596 if (end_of_write > cifsi->server_eof)
1597 cifsi->server_eof = end_of_write;
1598}
1599
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001600static ssize_t
1601cifs_write(struct cifsFileInfo *open_file, __u32 pid, const char *write_data,
1602 size_t write_size, loff_t *offset)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001603{
1604 int rc = 0;
1605 unsigned int bytes_written = 0;
1606 unsigned int total_written;
1607 struct cifs_sb_info *cifs_sb;
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001608 struct cifs_tcon *tcon;
1609 struct TCP_Server_Info *server;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001610 unsigned int xid;
Jeff Layton7da4b492010-10-15 15:34:00 -04001611 struct dentry *dentry = open_file->dentry;
1612 struct cifsInodeInfo *cifsi = CIFS_I(dentry->d_inode);
Pavel Shilovskyfa2989f2011-05-26 10:01:59 +04001613 struct cifs_io_parms io_parms;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001614
Jeff Layton7da4b492010-10-15 15:34:00 -04001615 cifs_sb = CIFS_SB(dentry->d_sb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001616
Joe Perchesf96637b2013-05-04 22:12:25 -05001617 cifs_dbg(FYI, "write %zd bytes to offset %lld of %s\n",
1618 write_size, *offset, dentry->d_name.name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001619
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001620 tcon = tlink_tcon(open_file->tlink);
1621 server = tcon->ses->server;
1622
1623 if (!server->ops->sync_write)
1624 return -ENOSYS;
Steve French50c2f752007-07-13 00:33:32 +00001625
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001626 xid = get_xid();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001627
Linus Torvalds1da177e2005-04-16 15:20:36 -07001628 for (total_written = 0; write_size > total_written;
1629 total_written += bytes_written) {
1630 rc = -EAGAIN;
1631 while (rc == -EAGAIN) {
Jeff Laytonca83ce32011-04-12 09:13:44 -04001632 struct kvec iov[2];
1633 unsigned int len;
1634
Linus Torvalds1da177e2005-04-16 15:20:36 -07001635 if (open_file->invalidHandle) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001636 /* we could deadlock if we called
1637 filemap_fdatawait from here so tell
Steve Frenchfb8c4b12007-07-10 01:16:18 +00001638 reopen_file not to flush data to
Linus Torvalds1da177e2005-04-16 15:20:36 -07001639 server now */
Jeff Layton15886172010-10-15 15:33:59 -04001640 rc = cifs_reopen_file(open_file, false);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001641 if (rc != 0)
1642 break;
1643 }
Steve French3e844692005-10-03 13:37:24 -07001644
Jeff Laytonca83ce32011-04-12 09:13:44 -04001645 len = min((size_t)cifs_sb->wsize,
1646 write_size - total_written);
1647 /* iov[0] is reserved for smb header */
1648 iov[1].iov_base = (char *)write_data + total_written;
1649 iov[1].iov_len = len;
Pavel Shilovskyfa2989f2011-05-26 10:01:59 +04001650 io_parms.pid = pid;
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001651 io_parms.tcon = tcon;
1652 io_parms.offset = *offset;
Pavel Shilovskyfa2989f2011-05-26 10:01:59 +04001653 io_parms.length = len;
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001654 rc = server->ops->sync_write(xid, open_file, &io_parms,
1655 &bytes_written, iov, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001656 }
1657 if (rc || (bytes_written == 0)) {
1658 if (total_written)
1659 break;
1660 else {
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001661 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001662 return rc;
1663 }
Jeff Laytonfbec9ab2009-04-03 13:44:00 -04001664 } else {
Jeff Layton597b0272012-03-23 14:40:56 -04001665 spin_lock(&dentry->d_inode->i_lock);
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001666 cifs_update_eof(cifsi, *offset, bytes_written);
Jeff Layton597b0272012-03-23 14:40:56 -04001667 spin_unlock(&dentry->d_inode->i_lock);
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001668 *offset += bytes_written;
Jeff Laytonfbec9ab2009-04-03 13:44:00 -04001669 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001670 }
1671
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001672 cifs_stats_bytes_written(tcon, total_written);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001673
Jeff Layton7da4b492010-10-15 15:34:00 -04001674 if (total_written > 0) {
1675 spin_lock(&dentry->d_inode->i_lock);
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001676 if (*offset > dentry->d_inode->i_size)
1677 i_size_write(dentry->d_inode, *offset);
Jeff Layton7da4b492010-10-15 15:34:00 -04001678 spin_unlock(&dentry->d_inode->i_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001679 }
Jeff Layton7da4b492010-10-15 15:34:00 -04001680 mark_inode_dirty_sync(dentry->d_inode);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001681 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001682 return total_written;
1683}
1684
Jeff Layton6508d902010-09-29 19:51:11 -04001685struct cifsFileInfo *find_readable_file(struct cifsInodeInfo *cifs_inode,
1686 bool fsuid_only)
Steve French630f3f0c2007-10-25 21:17:17 +00001687{
1688 struct cifsFileInfo *open_file = NULL;
Jeff Layton6508d902010-09-29 19:51:11 -04001689 struct cifs_sb_info *cifs_sb = CIFS_SB(cifs_inode->vfs_inode.i_sb);
1690
1691 /* only filter by fsuid on multiuser mounts */
1692 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER))
1693 fsuid_only = false;
Steve French630f3f0c2007-10-25 21:17:17 +00001694
Jeff Layton44772882010-10-15 15:34:03 -04001695 spin_lock(&cifs_file_list_lock);
Steve French630f3f0c2007-10-25 21:17:17 +00001696 /* we could simply get the first_list_entry since write-only entries
1697 are always at the end of the list but since the first entry might
1698 have a close pending, we go through the whole list */
1699 list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
Eric W. Biedermanfef59fd2013-02-06 02:23:02 -08001700 if (fsuid_only && !uid_eq(open_file->uid, current_fsuid()))
Jeff Layton6508d902010-09-29 19:51:11 -04001701 continue;
Jeff Layton2e396b82010-10-15 15:34:01 -04001702 if (OPEN_FMODE(open_file->f_flags) & FMODE_READ) {
Steve French630f3f0c2007-10-25 21:17:17 +00001703 if (!open_file->invalidHandle) {
1704 /* found a good file */
1705 /* lock it so it will not be closed on us */
Jeff Layton764a1b12012-07-25 14:59:54 -04001706 cifsFileInfo_get_locked(open_file);
Jeff Layton44772882010-10-15 15:34:03 -04001707 spin_unlock(&cifs_file_list_lock);
Steve French630f3f0c2007-10-25 21:17:17 +00001708 return open_file;
1709 } /* else might as well continue, and look for
1710 another, or simply have the caller reopen it
1711 again rather than trying to fix this handle */
1712 } else /* write only file */
1713 break; /* write only files are last so must be done */
1714 }
Jeff Layton44772882010-10-15 15:34:03 -04001715 spin_unlock(&cifs_file_list_lock);
Steve French630f3f0c2007-10-25 21:17:17 +00001716 return NULL;
1717}
Steve French630f3f0c2007-10-25 21:17:17 +00001718
Jeff Layton6508d902010-09-29 19:51:11 -04001719struct cifsFileInfo *find_writable_file(struct cifsInodeInfo *cifs_inode,
1720 bool fsuid_only)
Steve French6148a742005-10-05 12:23:19 -07001721{
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001722 struct cifsFileInfo *open_file, *inv_file = NULL;
Jeff Laytond3892292010-11-02 16:22:50 -04001723 struct cifs_sb_info *cifs_sb;
Jeff Layton2846d382008-09-22 21:33:33 -04001724 bool any_available = false;
Steve Frenchdd99cd82005-10-05 19:32:49 -07001725 int rc;
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001726 unsigned int refind = 0;
Steve French6148a742005-10-05 12:23:19 -07001727
Steve French60808232006-04-22 15:53:05 +00001728 /* Having a null inode here (because mapping->host was set to zero by
1729 the VFS or MM) should not happen but we had reports of on oops (due to
1730 it being zero) during stress testcases so we need to check for it */
1731
Steve Frenchfb8c4b12007-07-10 01:16:18 +00001732 if (cifs_inode == NULL) {
Joe Perchesf96637b2013-05-04 22:12:25 -05001733 cifs_dbg(VFS, "Null inode passed to cifs_writeable_file\n");
Steve French60808232006-04-22 15:53:05 +00001734 dump_stack();
1735 return NULL;
1736 }
1737
Jeff Laytond3892292010-11-02 16:22:50 -04001738 cifs_sb = CIFS_SB(cifs_inode->vfs_inode.i_sb);
1739
Jeff Layton6508d902010-09-29 19:51:11 -04001740 /* only filter by fsuid on multiuser mounts */
1741 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER))
1742 fsuid_only = false;
1743
Jeff Layton44772882010-10-15 15:34:03 -04001744 spin_lock(&cifs_file_list_lock);
Steve French9b22b0b2007-10-02 01:11:08 +00001745refind_writable:
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001746 if (refind > MAX_REOPEN_ATT) {
1747 spin_unlock(&cifs_file_list_lock);
1748 return NULL;
1749 }
Steve French6148a742005-10-05 12:23:19 -07001750 list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
Jeff Layton6508d902010-09-29 19:51:11 -04001751 if (!any_available && open_file->pid != current->tgid)
1752 continue;
Eric W. Biedermanfef59fd2013-02-06 02:23:02 -08001753 if (fsuid_only && !uid_eq(open_file->uid, current_fsuid()))
Jeff Layton6508d902010-09-29 19:51:11 -04001754 continue;
Jeff Layton2e396b82010-10-15 15:34:01 -04001755 if (OPEN_FMODE(open_file->f_flags) & FMODE_WRITE) {
Steve French9b22b0b2007-10-02 01:11:08 +00001756 if (!open_file->invalidHandle) {
1757 /* found a good writable file */
Jeff Layton764a1b12012-07-25 14:59:54 -04001758 cifsFileInfo_get_locked(open_file);
Jeff Layton44772882010-10-15 15:34:03 -04001759 spin_unlock(&cifs_file_list_lock);
Steve French9b22b0b2007-10-02 01:11:08 +00001760 return open_file;
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001761 } else {
1762 if (!inv_file)
1763 inv_file = open_file;
Steve French9b22b0b2007-10-02 01:11:08 +00001764 }
Steve French6148a742005-10-05 12:23:19 -07001765 }
1766 }
Jeff Layton2846d382008-09-22 21:33:33 -04001767 /* couldn't find useable FH with same pid, try any available */
1768 if (!any_available) {
1769 any_available = true;
1770 goto refind_writable;
1771 }
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001772
1773 if (inv_file) {
1774 any_available = false;
Jeff Layton764a1b12012-07-25 14:59:54 -04001775 cifsFileInfo_get_locked(inv_file);
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001776 }
1777
Jeff Layton44772882010-10-15 15:34:03 -04001778 spin_unlock(&cifs_file_list_lock);
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001779
1780 if (inv_file) {
1781 rc = cifs_reopen_file(inv_file, false);
1782 if (!rc)
1783 return inv_file;
1784 else {
1785 spin_lock(&cifs_file_list_lock);
1786 list_move_tail(&inv_file->flist,
1787 &cifs_inode->openFileList);
1788 spin_unlock(&cifs_file_list_lock);
1789 cifsFileInfo_put(inv_file);
1790 spin_lock(&cifs_file_list_lock);
1791 ++refind;
1792 goto refind_writable;
1793 }
1794 }
1795
Steve French6148a742005-10-05 12:23:19 -07001796 return NULL;
1797}
1798
Linus Torvalds1da177e2005-04-16 15:20:36 -07001799static int cifs_partialpagewrite(struct page *page, unsigned from, unsigned to)
1800{
1801 struct address_space *mapping = page->mapping;
1802 loff_t offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
1803 char *write_data;
1804 int rc = -EFAULT;
1805 int bytes_written = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001806 struct inode *inode;
Steve French6148a742005-10-05 12:23:19 -07001807 struct cifsFileInfo *open_file;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001808
1809 if (!mapping || !mapping->host)
1810 return -EFAULT;
1811
1812 inode = page->mapping->host;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001813
1814 offset += (loff_t)from;
1815 write_data = kmap(page);
1816 write_data += from;
1817
1818 if ((to > PAGE_CACHE_SIZE) || (from > to)) {
1819 kunmap(page);
1820 return -EIO;
1821 }
1822
1823 /* racing with truncate? */
1824 if (offset > mapping->host->i_size) {
1825 kunmap(page);
1826 return 0; /* don't care */
1827 }
1828
1829 /* check to make sure that we are not extending the file */
1830 if (mapping->host->i_size - offset < (loff_t)to)
Steve Frenchfb8c4b12007-07-10 01:16:18 +00001831 to = (unsigned)(mapping->host->i_size - offset);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001832
Jeff Layton6508d902010-09-29 19:51:11 -04001833 open_file = find_writable_file(CIFS_I(mapping->host), false);
Steve French6148a742005-10-05 12:23:19 -07001834 if (open_file) {
Pavel Shilovskyfa2989f2011-05-26 10:01:59 +04001835 bytes_written = cifs_write(open_file, open_file->pid,
1836 write_data, to - from, &offset);
Dave Kleikamp6ab409b2009-08-31 11:07:12 -04001837 cifsFileInfo_put(open_file);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001838 /* Does mm or vfs already set times? */
Steve French6148a742005-10-05 12:23:19 -07001839 inode->i_atime = inode->i_mtime = current_fs_time(inode->i_sb);
Steve Frenchbb5a9a02007-12-31 04:21:29 +00001840 if ((bytes_written > 0) && (offset))
Steve French6148a742005-10-05 12:23:19 -07001841 rc = 0;
Steve Frenchbb5a9a02007-12-31 04:21:29 +00001842 else if (bytes_written < 0)
1843 rc = bytes_written;
Steve French6148a742005-10-05 12:23:19 -07001844 } else {
Joe Perchesf96637b2013-05-04 22:12:25 -05001845 cifs_dbg(FYI, "No writeable filehandles for inode\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001846 rc = -EIO;
1847 }
1848
1849 kunmap(page);
1850 return rc;
1851}
1852
Linus Torvalds1da177e2005-04-16 15:20:36 -07001853static int cifs_writepages(struct address_space *mapping,
Steve French37c0eb42005-10-05 14:50:29 -07001854 struct writeback_control *wbc)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001855{
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001856 struct cifs_sb_info *cifs_sb = CIFS_SB(mapping->host->i_sb);
1857 bool done = false, scanned = false, range_whole = false;
1858 pgoff_t end, index;
1859 struct cifs_writedata *wdata;
Pavel Shilovskyc9de5c82012-09-18 16:20:29 -07001860 struct TCP_Server_Info *server;
Steve French37c0eb42005-10-05 14:50:29 -07001861 struct page *page;
Steve French37c0eb42005-10-05 14:50:29 -07001862 int rc = 0;
Steve French50c2f752007-07-13 00:33:32 +00001863
Steve French37c0eb42005-10-05 14:50:29 -07001864 /*
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001865 * If wsize is smaller than the page cache size, default to writing
Steve French37c0eb42005-10-05 14:50:29 -07001866 * one page at a time via cifs_writepage
1867 */
1868 if (cifs_sb->wsize < PAGE_CACHE_SIZE)
1869 return generic_writepages(mapping, wbc);
1870
OGAWA Hirofumi111ebb62006-06-23 02:03:26 -07001871 if (wbc->range_cyclic) {
Steve French37c0eb42005-10-05 14:50:29 -07001872 index = mapping->writeback_index; /* Start from prev offset */
OGAWA Hirofumi111ebb62006-06-23 02:03:26 -07001873 end = -1;
1874 } else {
1875 index = wbc->range_start >> PAGE_CACHE_SHIFT;
1876 end = wbc->range_end >> PAGE_CACHE_SHIFT;
1877 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001878 range_whole = true;
1879 scanned = true;
Steve French37c0eb42005-10-05 14:50:29 -07001880 }
1881retry:
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001882 while (!done && index <= end) {
1883 unsigned int i, nr_pages, found_pages;
1884 pgoff_t next = 0, tofind;
1885 struct page **pages;
Steve French37c0eb42005-10-05 14:50:29 -07001886
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001887 tofind = min((cifs_sb->wsize / PAGE_CACHE_SIZE) - 1,
1888 end - index) + 1;
Steve French37c0eb42005-10-05 14:50:29 -07001889
Jeff Laytonc2e87642012-03-23 14:40:55 -04001890 wdata = cifs_writedata_alloc((unsigned int)tofind,
1891 cifs_writev_complete);
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001892 if (!wdata) {
1893 rc = -ENOMEM;
1894 break;
1895 }
1896
1897 /*
1898 * find_get_pages_tag seems to return a max of 256 on each
1899 * iteration, so we must call it several times in order to
1900 * fill the array or the wsize is effectively limited to
1901 * 256 * PAGE_CACHE_SIZE.
1902 */
1903 found_pages = 0;
1904 pages = wdata->pages;
1905 do {
1906 nr_pages = find_get_pages_tag(mapping, &index,
1907 PAGECACHE_TAG_DIRTY,
1908 tofind, pages);
1909 found_pages += nr_pages;
1910 tofind -= nr_pages;
1911 pages += nr_pages;
1912 } while (nr_pages && tofind && index <= end);
1913
1914 if (found_pages == 0) {
1915 kref_put(&wdata->refcount, cifs_writedata_release);
1916 break;
1917 }
1918
1919 nr_pages = 0;
1920 for (i = 0; i < found_pages; i++) {
1921 page = wdata->pages[i];
Steve French37c0eb42005-10-05 14:50:29 -07001922 /*
1923 * At this point we hold neither mapping->tree_lock nor
1924 * lock on the page itself: the page may be truncated or
1925 * invalidated (changing page->mapping to NULL), or even
1926 * swizzled back from swapper_space to tmpfs file
1927 * mapping
1928 */
1929
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001930 if (nr_pages == 0)
Steve French37c0eb42005-10-05 14:50:29 -07001931 lock_page(page);
Nick Piggin529ae9a2008-08-02 12:01:03 +02001932 else if (!trylock_page(page))
Steve French37c0eb42005-10-05 14:50:29 -07001933 break;
1934
1935 if (unlikely(page->mapping != mapping)) {
1936 unlock_page(page);
1937 break;
1938 }
1939
OGAWA Hirofumi111ebb62006-06-23 02:03:26 -07001940 if (!wbc->range_cyclic && page->index > end) {
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001941 done = true;
Steve French37c0eb42005-10-05 14:50:29 -07001942 unlock_page(page);
1943 break;
1944 }
1945
1946 if (next && (page->index != next)) {
1947 /* Not next consecutive page */
1948 unlock_page(page);
1949 break;
1950 }
1951
1952 if (wbc->sync_mode != WB_SYNC_NONE)
1953 wait_on_page_writeback(page);
1954
1955 if (PageWriteback(page) ||
Linus Torvaldscb876f42006-12-23 16:19:07 -08001956 !clear_page_dirty_for_io(page)) {
Steve French37c0eb42005-10-05 14:50:29 -07001957 unlock_page(page);
1958 break;
1959 }
Steve French84d2f072005-10-12 15:32:05 -07001960
Linus Torvaldscb876f42006-12-23 16:19:07 -08001961 /*
1962 * This actually clears the dirty bit in the radix tree.
1963 * See cifs_writepage() for more commentary.
1964 */
1965 set_page_writeback(page);
1966
Jeff Layton3a98b862012-11-26 09:48:41 -05001967 if (page_offset(page) >= i_size_read(mapping->host)) {
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001968 done = true;
Steve French84d2f072005-10-12 15:32:05 -07001969 unlock_page(page);
Linus Torvaldscb876f42006-12-23 16:19:07 -08001970 end_page_writeback(page);
Steve French84d2f072005-10-12 15:32:05 -07001971 break;
1972 }
1973
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001974 wdata->pages[i] = page;
Steve French37c0eb42005-10-05 14:50:29 -07001975 next = page->index + 1;
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001976 ++nr_pages;
Steve French37c0eb42005-10-05 14:50:29 -07001977 }
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001978
1979 /* reset index to refind any pages skipped */
1980 if (nr_pages == 0)
1981 index = wdata->pages[0]->index + 1;
1982
1983 /* put any pages we aren't going to use */
1984 for (i = nr_pages; i < found_pages; i++) {
1985 page_cache_release(wdata->pages[i]);
1986 wdata->pages[i] = NULL;
1987 }
1988
1989 /* nothing to write? */
1990 if (nr_pages == 0) {
1991 kref_put(&wdata->refcount, cifs_writedata_release);
1992 continue;
1993 }
1994
1995 wdata->sync_mode = wbc->sync_mode;
1996 wdata->nr_pages = nr_pages;
1997 wdata->offset = page_offset(wdata->pages[0]);
Jeff Laytoneddb0792012-09-18 16:20:35 -07001998 wdata->pagesz = PAGE_CACHE_SIZE;
1999 wdata->tailsz =
Jeff Layton3a98b862012-11-26 09:48:41 -05002000 min(i_size_read(mapping->host) -
2001 page_offset(wdata->pages[nr_pages - 1]),
Jeff Laytoneddb0792012-09-18 16:20:35 -07002002 (loff_t)PAGE_CACHE_SIZE);
2003 wdata->bytes = ((nr_pages - 1) * PAGE_CACHE_SIZE) +
2004 wdata->tailsz;
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002005
2006 do {
2007 if (wdata->cfile != NULL)
2008 cifsFileInfo_put(wdata->cfile);
2009 wdata->cfile = find_writable_file(CIFS_I(mapping->host),
2010 false);
2011 if (!wdata->cfile) {
Joe Perchesf96637b2013-05-04 22:12:25 -05002012 cifs_dbg(VFS, "No writable handles for inode\n");
Steve French23e7dd72005-10-20 13:44:56 -07002013 rc = -EBADF;
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002014 break;
Steve French37c0eb42005-10-05 14:50:29 -07002015 }
Jeff Laytonfe5f5d22012-03-23 14:40:55 -04002016 wdata->pid = wdata->cfile->pid;
Pavel Shilovskyc9de5c82012-09-18 16:20:29 -07002017 server = tlink_tcon(wdata->cfile->tlink)->ses->server;
2018 rc = server->ops->async_writev(wdata);
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002019 } while (wbc->sync_mode == WB_SYNC_ALL && rc == -EAGAIN);
Jeff Laytonf3983c22010-09-22 16:17:40 -07002020
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002021 for (i = 0; i < nr_pages; ++i)
2022 unlock_page(wdata->pages[i]);
Jeff Layton941b8532011-01-11 07:24:01 -05002023
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002024 /* send failure -- clean up the mess */
2025 if (rc != 0) {
2026 for (i = 0; i < nr_pages; ++i) {
Jeff Layton941b8532011-01-11 07:24:01 -05002027 if (rc == -EAGAIN)
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002028 redirty_page_for_writepage(wbc,
2029 wdata->pages[i]);
2030 else
2031 SetPageError(wdata->pages[i]);
2032 end_page_writeback(wdata->pages[i]);
2033 page_cache_release(wdata->pages[i]);
Steve French37c0eb42005-10-05 14:50:29 -07002034 }
Jeff Layton941b8532011-01-11 07:24:01 -05002035 if (rc != -EAGAIN)
2036 mapping_set_error(mapping, rc);
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002037 }
2038 kref_put(&wdata->refcount, cifs_writedata_release);
Jeff Layton941b8532011-01-11 07:24:01 -05002039
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002040 wbc->nr_to_write -= nr_pages;
2041 if (wbc->nr_to_write <= 0)
2042 done = true;
Dave Kleikampb066a482008-11-18 03:49:05 +00002043
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002044 index = next;
Steve French37c0eb42005-10-05 14:50:29 -07002045 }
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002046
Steve French37c0eb42005-10-05 14:50:29 -07002047 if (!scanned && !done) {
2048 /*
2049 * We hit the last page and there is more work to be done: wrap
2050 * back to the start of the file
2051 */
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002052 scanned = true;
Steve French37c0eb42005-10-05 14:50:29 -07002053 index = 0;
2054 goto retry;
2055 }
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002056
OGAWA Hirofumi111ebb62006-06-23 02:03:26 -07002057 if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
Steve French37c0eb42005-10-05 14:50:29 -07002058 mapping->writeback_index = index;
2059
Linus Torvalds1da177e2005-04-16 15:20:36 -07002060 return rc;
2061}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002062
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04002063static int
2064cifs_writepage_locked(struct page *page, struct writeback_control *wbc)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002065{
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04002066 int rc;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002067 unsigned int xid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002068
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002069 xid = get_xid();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002070/* BB add check for wbc flags */
2071 page_cache_get(page);
Steve Frenchad7a2922008-02-07 23:25:02 +00002072 if (!PageUptodate(page))
Joe Perchesf96637b2013-05-04 22:12:25 -05002073 cifs_dbg(FYI, "ppw - page not up to date\n");
Linus Torvaldscb876f42006-12-23 16:19:07 -08002074
2075 /*
2076 * Set the "writeback" flag, and clear "dirty" in the radix tree.
2077 *
2078 * A writepage() implementation always needs to do either this,
2079 * or re-dirty the page with "redirty_page_for_writepage()" in
2080 * the case of a failure.
2081 *
2082 * Just unlocking the page will cause the radix tree tag-bits
2083 * to fail to update with the state of the page correctly.
2084 */
Steve Frenchfb8c4b12007-07-10 01:16:18 +00002085 set_page_writeback(page);
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04002086retry_write:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002087 rc = cifs_partialpagewrite(page, 0, PAGE_CACHE_SIZE);
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04002088 if (rc == -EAGAIN && wbc->sync_mode == WB_SYNC_ALL)
2089 goto retry_write;
2090 else if (rc == -EAGAIN)
2091 redirty_page_for_writepage(wbc, page);
2092 else if (rc != 0)
2093 SetPageError(page);
2094 else
2095 SetPageUptodate(page);
Linus Torvaldscb876f42006-12-23 16:19:07 -08002096 end_page_writeback(page);
2097 page_cache_release(page);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002098 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002099 return rc;
2100}
2101
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04002102static int cifs_writepage(struct page *page, struct writeback_control *wbc)
2103{
2104 int rc = cifs_writepage_locked(page, wbc);
2105 unlock_page(page);
2106 return rc;
2107}
2108
Nick Piggind9414772008-09-24 11:32:59 -04002109static int cifs_write_end(struct file *file, struct address_space *mapping,
2110 loff_t pos, unsigned len, unsigned copied,
2111 struct page *page, void *fsdata)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002112{
Nick Piggind9414772008-09-24 11:32:59 -04002113 int rc;
2114 struct inode *inode = mapping->host;
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00002115 struct cifsFileInfo *cfile = file->private_data;
2116 struct cifs_sb_info *cifs_sb = CIFS_SB(cfile->dentry->d_sb);
2117 __u32 pid;
2118
2119 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
2120 pid = cfile->pid;
2121 else
2122 pid = current->tgid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002123
Joe Perchesf96637b2013-05-04 22:12:25 -05002124 cifs_dbg(FYI, "write_end for page %p from pos %lld with %d bytes\n",
Joe Perchesb6b38f72010-04-21 03:50:45 +00002125 page, pos, copied);
Steve Frenchad7a2922008-02-07 23:25:02 +00002126
Jeff Laytona98ee8c2008-11-26 19:32:33 +00002127 if (PageChecked(page)) {
2128 if (copied == len)
2129 SetPageUptodate(page);
2130 ClearPageChecked(page);
2131 } else if (!PageUptodate(page) && copied == PAGE_CACHE_SIZE)
Nick Piggind9414772008-09-24 11:32:59 -04002132 SetPageUptodate(page);
2133
Linus Torvalds1da177e2005-04-16 15:20:36 -07002134 if (!PageUptodate(page)) {
Nick Piggind9414772008-09-24 11:32:59 -04002135 char *page_data;
2136 unsigned offset = pos & (PAGE_CACHE_SIZE - 1);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002137 unsigned int xid;
Nick Piggind9414772008-09-24 11:32:59 -04002138
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002139 xid = get_xid();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002140 /* this is probably better than directly calling
2141 partialpage_write since in this function the file handle is
2142 known which we might as well leverage */
2143 /* BB check if anything else missing out of ppw
2144 such as updating last write time */
2145 page_data = kmap(page);
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00002146 rc = cifs_write(cfile, pid, page_data + offset, copied, &pos);
Nick Piggind9414772008-09-24 11:32:59 -04002147 /* if (rc < 0) should we set writebehind rc? */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002148 kunmap(page);
Nick Piggind9414772008-09-24 11:32:59 -04002149
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002150 free_xid(xid);
Steve Frenchfb8c4b12007-07-10 01:16:18 +00002151 } else {
Nick Piggind9414772008-09-24 11:32:59 -04002152 rc = copied;
2153 pos += copied;
Pavel Shilovskyca8aa292012-12-21 15:05:47 +04002154 set_page_dirty(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002155 }
2156
Nick Piggind9414772008-09-24 11:32:59 -04002157 if (rc > 0) {
2158 spin_lock(&inode->i_lock);
2159 if (pos > inode->i_size)
2160 i_size_write(inode, pos);
2161 spin_unlock(&inode->i_lock);
2162 }
2163
2164 unlock_page(page);
2165 page_cache_release(page);
2166
Linus Torvalds1da177e2005-04-16 15:20:36 -07002167 return rc;
2168}
2169
Josef Bacik02c24a82011-07-16 20:44:56 -04002170int cifs_strict_fsync(struct file *file, loff_t start, loff_t end,
2171 int datasync)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002172{
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002173 unsigned int xid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002174 int rc = 0;
Steve French96daf2b2011-05-27 04:34:02 +00002175 struct cifs_tcon *tcon;
Pavel Shilovsky1d8c4c02012-09-18 16:20:27 -07002176 struct TCP_Server_Info *server;
Joe Perchesc21dfb62010-07-12 13:50:14 -07002177 struct cifsFileInfo *smbfile = file->private_data;
Al Viro496ad9a2013-01-23 17:07:38 -05002178 struct inode *inode = file_inode(file);
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002179 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002180
Josef Bacik02c24a82011-07-16 20:44:56 -04002181 rc = filemap_write_and_wait_range(inode->i_mapping, start, end);
2182 if (rc)
2183 return rc;
2184 mutex_lock(&inode->i_mutex);
2185
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002186 xid = get_xid();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002187
Joe Perchesf96637b2013-05-04 22:12:25 -05002188 cifs_dbg(FYI, "Sync file - name: %s datasync: 0x%x\n",
2189 file->f_path.dentry->d_name.name, datasync);
Steve French50c2f752007-07-13 00:33:32 +00002190
Pavel Shilovsky6feb9892011-04-07 18:18:11 +04002191 if (!CIFS_I(inode)->clientCanCacheRead) {
2192 rc = cifs_invalidate_mapping(inode);
2193 if (rc) {
Joe Perchesf96637b2013-05-04 22:12:25 -05002194 cifs_dbg(FYI, "rc: %d during invalidate phase\n", rc);
Pavel Shilovsky6feb9892011-04-07 18:18:11 +04002195 rc = 0; /* don't care about it in fsync */
2196 }
2197 }
Jeff Laytoneb4b7562010-10-22 14:52:29 -04002198
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002199 tcon = tlink_tcon(smbfile->tlink);
Pavel Shilovsky1d8c4c02012-09-18 16:20:27 -07002200 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC)) {
2201 server = tcon->ses->server;
2202 if (server->ops->flush)
2203 rc = server->ops->flush(xid, tcon, &smbfile->fid);
2204 else
2205 rc = -ENOSYS;
2206 }
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002207
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002208 free_xid(xid);
Josef Bacik02c24a82011-07-16 20:44:56 -04002209 mutex_unlock(&inode->i_mutex);
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002210 return rc;
2211}
2212
Josef Bacik02c24a82011-07-16 20:44:56 -04002213int cifs_fsync(struct file *file, loff_t start, loff_t end, int datasync)
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002214{
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002215 unsigned int xid;
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002216 int rc = 0;
Steve French96daf2b2011-05-27 04:34:02 +00002217 struct cifs_tcon *tcon;
Pavel Shilovsky1d8c4c02012-09-18 16:20:27 -07002218 struct TCP_Server_Info *server;
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002219 struct cifsFileInfo *smbfile = file->private_data;
2220 struct cifs_sb_info *cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
Josef Bacik02c24a82011-07-16 20:44:56 -04002221 struct inode *inode = file->f_mapping->host;
2222
2223 rc = filemap_write_and_wait_range(inode->i_mapping, start, end);
2224 if (rc)
2225 return rc;
2226 mutex_lock(&inode->i_mutex);
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002227
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002228 xid = get_xid();
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002229
Joe Perchesf96637b2013-05-04 22:12:25 -05002230 cifs_dbg(FYI, "Sync file - name: %s datasync: 0x%x\n",
2231 file->f_path.dentry->d_name.name, datasync);
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002232
2233 tcon = tlink_tcon(smbfile->tlink);
Pavel Shilovsky1d8c4c02012-09-18 16:20:27 -07002234 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC)) {
2235 server = tcon->ses->server;
2236 if (server->ops->flush)
2237 rc = server->ops->flush(xid, tcon, &smbfile->fid);
2238 else
2239 rc = -ENOSYS;
2240 }
Steve Frenchb298f222009-02-21 21:17:43 +00002241
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002242 free_xid(xid);
Josef Bacik02c24a82011-07-16 20:44:56 -04002243 mutex_unlock(&inode->i_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002244 return rc;
2245}
2246
Linus Torvalds1da177e2005-04-16 15:20:36 -07002247/*
2248 * As file closes, flush all cached write data for this inode checking
2249 * for write behind errors.
2250 */
Miklos Szeredi75e1fcc2006-06-23 02:05:12 -07002251int cifs_flush(struct file *file, fl_owner_t id)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002252{
Al Viro496ad9a2013-01-23 17:07:38 -05002253 struct inode *inode = file_inode(file);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002254 int rc = 0;
2255
Jeff Laytoneb4b7562010-10-22 14:52:29 -04002256 if (file->f_mode & FMODE_WRITE)
Jeff Laytond3f13222010-10-15 15:34:07 -04002257 rc = filemap_write_and_wait(inode->i_mapping);
Steve French50c2f752007-07-13 00:33:32 +00002258
Joe Perchesf96637b2013-05-04 22:12:25 -05002259 cifs_dbg(FYI, "Flush inode %p file %p rc %d\n", inode, file, rc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002260
2261 return rc;
2262}
2263
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002264static int
2265cifs_write_allocate_pages(struct page **pages, unsigned long num_pages)
2266{
2267 int rc = 0;
2268 unsigned long i;
2269
2270 for (i = 0; i < num_pages; i++) {
Jeff Laytone94f7ba2012-03-23 14:40:56 -04002271 pages[i] = alloc_page(GFP_KERNEL|__GFP_HIGHMEM);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002272 if (!pages[i]) {
2273 /*
2274 * save number of pages we have already allocated and
2275 * return with ENOMEM error
2276 */
2277 num_pages = i;
2278 rc = -ENOMEM;
Jeff Laytone94f7ba2012-03-23 14:40:56 -04002279 break;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002280 }
2281 }
2282
Jeff Laytone94f7ba2012-03-23 14:40:56 -04002283 if (rc) {
2284 for (i = 0; i < num_pages; i++)
2285 put_page(pages[i]);
2286 }
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002287 return rc;
2288}
2289
2290static inline
2291size_t get_numpages(const size_t wsize, const size_t len, size_t *cur_len)
2292{
2293 size_t num_pages;
2294 size_t clen;
2295
2296 clen = min_t(const size_t, len, wsize);
Jeff Laytona7103b92012-03-23 14:40:56 -04002297 num_pages = DIV_ROUND_UP(clen, PAGE_SIZE);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002298
2299 if (cur_len)
2300 *cur_len = clen;
2301
2302 return num_pages;
2303}
2304
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002305static void
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002306cifs_uncached_writev_complete(struct work_struct *work)
2307{
2308 int i;
2309 struct cifs_writedata *wdata = container_of(work,
2310 struct cifs_writedata, work);
2311 struct inode *inode = wdata->cfile->dentry->d_inode;
2312 struct cifsInodeInfo *cifsi = CIFS_I(inode);
2313
2314 spin_lock(&inode->i_lock);
2315 cifs_update_eof(cifsi, wdata->offset, wdata->bytes);
2316 if (cifsi->server_eof > inode->i_size)
2317 i_size_write(inode, cifsi->server_eof);
2318 spin_unlock(&inode->i_lock);
2319
2320 complete(&wdata->done);
2321
2322 if (wdata->result != -EAGAIN) {
2323 for (i = 0; i < wdata->nr_pages; i++)
2324 put_page(wdata->pages[i]);
2325 }
2326
2327 kref_put(&wdata->refcount, cifs_writedata_release);
2328}
2329
2330/* attempt to send write to server, retry on any -EAGAIN errors */
2331static int
2332cifs_uncached_retry_writev(struct cifs_writedata *wdata)
2333{
2334 int rc;
Pavel Shilovskyc9de5c82012-09-18 16:20:29 -07002335 struct TCP_Server_Info *server;
2336
2337 server = tlink_tcon(wdata->cfile->tlink)->ses->server;
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002338
2339 do {
2340 if (wdata->cfile->invalidHandle) {
2341 rc = cifs_reopen_file(wdata->cfile, false);
2342 if (rc != 0)
2343 continue;
2344 }
Pavel Shilovskyc9de5c82012-09-18 16:20:29 -07002345 rc = server->ops->async_writev(wdata);
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002346 } while (rc == -EAGAIN);
2347
2348 return rc;
2349}
2350
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002351static ssize_t
2352cifs_iovec_write(struct file *file, const struct iovec *iov,
2353 unsigned long nr_segs, loff_t *poffset)
2354{
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002355 unsigned long nr_pages, i;
Pavel Shilovsky76429c12011-01-31 16:03:08 +03002356 size_t copied, len, cur_len;
2357 ssize_t total_written = 0;
Jeff Layton3af9d8f2012-04-13 17:16:59 -04002358 loff_t offset;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002359 struct iov_iter it;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002360 struct cifsFileInfo *open_file;
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002361 struct cifs_tcon *tcon;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002362 struct cifs_sb_info *cifs_sb;
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002363 struct cifs_writedata *wdata, *tmp;
2364 struct list_head wdata_list;
2365 int rc;
2366 pid_t pid;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002367
2368 len = iov_length(iov, nr_segs);
2369 if (!len)
2370 return 0;
2371
2372 rc = generic_write_checks(file, poffset, &len, 0);
2373 if (rc)
2374 return rc;
2375
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002376 INIT_LIST_HEAD(&wdata_list);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002377 cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002378 open_file = file->private_data;
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002379 tcon = tlink_tcon(open_file->tlink);
Pavel Shilovskyc9de5c82012-09-18 16:20:29 -07002380
2381 if (!tcon->ses->server->ops->async_writev)
2382 return -ENOSYS;
2383
Jeff Layton3af9d8f2012-04-13 17:16:59 -04002384 offset = *poffset;
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00002385
2386 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
2387 pid = open_file->pid;
2388 else
2389 pid = current->tgid;
2390
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002391 iov_iter_init(&it, iov, nr_segs, len, 0);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002392 do {
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002393 size_t save_len;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002394
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002395 nr_pages = get_numpages(cifs_sb->wsize, len, &cur_len);
2396 wdata = cifs_writedata_alloc(nr_pages,
2397 cifs_uncached_writev_complete);
2398 if (!wdata) {
2399 rc = -ENOMEM;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002400 break;
2401 }
2402
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002403 rc = cifs_write_allocate_pages(wdata->pages, nr_pages);
2404 if (rc) {
2405 kfree(wdata);
2406 break;
2407 }
2408
2409 save_len = cur_len;
2410 for (i = 0; i < nr_pages; i++) {
2411 copied = min_t(const size_t, cur_len, PAGE_SIZE);
2412 copied = iov_iter_copy_from_user(wdata->pages[i], &it,
2413 0, copied);
2414 cur_len -= copied;
2415 iov_iter_advance(&it, copied);
2416 }
2417 cur_len = save_len - cur_len;
2418
2419 wdata->sync_mode = WB_SYNC_ALL;
2420 wdata->nr_pages = nr_pages;
2421 wdata->offset = (__u64)offset;
2422 wdata->cfile = cifsFileInfo_get(open_file);
2423 wdata->pid = pid;
2424 wdata->bytes = cur_len;
Jeff Laytoneddb0792012-09-18 16:20:35 -07002425 wdata->pagesz = PAGE_SIZE;
2426 wdata->tailsz = cur_len - ((nr_pages - 1) * PAGE_SIZE);
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002427 rc = cifs_uncached_retry_writev(wdata);
2428 if (rc) {
2429 kref_put(&wdata->refcount, cifs_writedata_release);
2430 break;
2431 }
2432
2433 list_add_tail(&wdata->list, &wdata_list);
2434 offset += cur_len;
2435 len -= cur_len;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002436 } while (len > 0);
2437
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002438 /*
2439 * If at least one write was successfully sent, then discard any rc
2440 * value from the later writes. If the other write succeeds, then
2441 * we'll end up returning whatever was written. If it fails, then
2442 * we'll get a new rc value from that.
2443 */
2444 if (!list_empty(&wdata_list))
2445 rc = 0;
2446
2447 /*
2448 * Wait for and collect replies for any successful sends in order of
2449 * increasing offset. Once an error is hit or we get a fatal signal
2450 * while waiting, then return without waiting for any more replies.
2451 */
2452restart_loop:
2453 list_for_each_entry_safe(wdata, tmp, &wdata_list, list) {
2454 if (!rc) {
2455 /* FIXME: freezable too? */
2456 rc = wait_for_completion_killable(&wdata->done);
2457 if (rc)
2458 rc = -EINTR;
2459 else if (wdata->result)
2460 rc = wdata->result;
2461 else
2462 total_written += wdata->bytes;
2463
2464 /* resend call if it's a retryable error */
2465 if (rc == -EAGAIN) {
2466 rc = cifs_uncached_retry_writev(wdata);
2467 goto restart_loop;
2468 }
2469 }
2470 list_del_init(&wdata->list);
2471 kref_put(&wdata->refcount, cifs_writedata_release);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002472 }
2473
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002474 if (total_written > 0)
2475 *poffset += total_written;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002476
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002477 cifs_stats_bytes_written(tcon, total_written);
2478 return total_written ? total_written : (ssize_t)rc;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002479}
2480
Pavel Shilovsky0b81c1c2011-03-10 10:11:05 +03002481ssize_t cifs_user_writev(struct kiocb *iocb, const struct iovec *iov,
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002482 unsigned long nr_segs, loff_t pos)
2483{
2484 ssize_t written;
2485 struct inode *inode;
2486
Al Viro496ad9a2013-01-23 17:07:38 -05002487 inode = file_inode(iocb->ki_filp);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002488
2489 /*
2490 * BB - optimize the way when signing is disabled. We can drop this
2491 * extra memory-to-memory copying and use iovec buffers for constructing
2492 * write request.
2493 */
2494
2495 written = cifs_iovec_write(iocb->ki_filp, iov, nr_segs, &pos);
2496 if (written > 0) {
2497 CIFS_I(inode)->invalid_mapping = true;
2498 iocb->ki_pos = pos;
2499 }
2500
2501 return written;
2502}
2503
Pavel Shilovsky579f9052012-09-19 06:22:44 -07002504static ssize_t
Pavel Shilovskyca8aa292012-12-21 15:05:47 +04002505cifs_writev(struct kiocb *iocb, const struct iovec *iov,
2506 unsigned long nr_segs, loff_t pos)
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002507{
Pavel Shilovsky579f9052012-09-19 06:22:44 -07002508 struct file *file = iocb->ki_filp;
2509 struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data;
2510 struct inode *inode = file->f_mapping->host;
2511 struct cifsInodeInfo *cinode = CIFS_I(inode);
2512 struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server;
2513 ssize_t rc = -EACCES;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002514
Pavel Shilovsky579f9052012-09-19 06:22:44 -07002515 BUG_ON(iocb->ki_pos != pos);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002516
Pavel Shilovsky579f9052012-09-19 06:22:44 -07002517 /*
2518 * We need to hold the sem to be sure nobody modifies lock list
2519 * with a brlock that prevents writing.
2520 */
2521 down_read(&cinode->lock_sem);
2522 if (!cifs_find_lock_conflict(cfile, pos, iov_length(iov, nr_segs),
2523 server->vals->exclusive_lock_type, NULL,
Pavel Shilovsky081c0412012-11-27 18:38:53 +04002524 CIFS_WRITE_OP)) {
Pavel Shilovsky579f9052012-09-19 06:22:44 -07002525 mutex_lock(&inode->i_mutex);
2526 rc = __generic_file_aio_write(iocb, iov, nr_segs,
Pavel Shilovskyca8aa292012-12-21 15:05:47 +04002527 &iocb->ki_pos);
Pavel Shilovsky579f9052012-09-19 06:22:44 -07002528 mutex_unlock(&inode->i_mutex);
2529 }
2530
2531 if (rc > 0 || rc == -EIOCBQUEUED) {
2532 ssize_t err;
2533
2534 err = generic_write_sync(file, pos, rc);
2535 if (err < 0 && rc > 0)
2536 rc = err;
2537 }
2538
2539 up_read(&cinode->lock_sem);
Pavel Shilovsky579f9052012-09-19 06:22:44 -07002540 return rc;
2541}
2542
2543ssize_t
2544cifs_strict_writev(struct kiocb *iocb, const struct iovec *iov,
2545 unsigned long nr_segs, loff_t pos)
2546{
Al Viro496ad9a2013-01-23 17:07:38 -05002547 struct inode *inode = file_inode(iocb->ki_filp);
Pavel Shilovsky579f9052012-09-19 06:22:44 -07002548 struct cifsInodeInfo *cinode = CIFS_I(inode);
2549 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
2550 struct cifsFileInfo *cfile = (struct cifsFileInfo *)
2551 iocb->ki_filp->private_data;
2552 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
Pavel Shilovsky88cf75a2012-12-21 15:07:52 +04002553 ssize_t written;
Pavel Shilovskyca8aa292012-12-21 15:05:47 +04002554
Pavel Shilovsky88cf75a2012-12-21 15:07:52 +04002555 if (cinode->clientCanCacheAll) {
2556 if (cap_unix(tcon->ses) &&
2557 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability))
2558 && ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
2559 return generic_file_aio_write(iocb, iov, nr_segs, pos);
2560 return cifs_writev(iocb, iov, nr_segs, pos);
Pavel Shilovskyc299dd02012-12-06 22:07:52 +04002561 }
Pavel Shilovskyca8aa292012-12-21 15:05:47 +04002562 /*
2563 * For non-oplocked files in strict cache mode we need to write the data
2564 * to the server exactly from the pos to pos+len-1 rather than flush all
2565 * affected pages because it may cause a error with mandatory locks on
2566 * these pages but not on the region from pos to ppos+len-1.
2567 */
Pavel Shilovsky88cf75a2012-12-21 15:07:52 +04002568 written = cifs_user_writev(iocb, iov, nr_segs, pos);
2569 if (written > 0 && cinode->clientCanCacheRead) {
2570 /*
2571 * Windows 7 server can delay breaking level2 oplock if a write
2572 * request comes - break it on the client to prevent reading
2573 * an old data.
2574 */
2575 cifs_invalidate_mapping(inode);
Joe Perchesf96637b2013-05-04 22:12:25 -05002576 cifs_dbg(FYI, "Set no oplock for inode=%p after a write operation\n",
2577 inode);
Pavel Shilovsky88cf75a2012-12-21 15:07:52 +04002578 cinode->clientCanCacheRead = false;
2579 }
2580 return written;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002581}
2582
Jeff Layton0471ca32012-05-16 07:13:16 -04002583static struct cifs_readdata *
Jeff Laytonf4e49cd2012-09-18 16:20:36 -07002584cifs_readdata_alloc(unsigned int nr_pages, work_func_t complete)
Jeff Layton0471ca32012-05-16 07:13:16 -04002585{
2586 struct cifs_readdata *rdata;
Jeff Laytonf4e49cd2012-09-18 16:20:36 -07002587
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002588 rdata = kzalloc(sizeof(*rdata) + (sizeof(struct page *) * nr_pages),
2589 GFP_KERNEL);
Jeff Layton0471ca32012-05-16 07:13:16 -04002590 if (rdata != NULL) {
Jeff Layton6993f742012-05-16 07:13:17 -04002591 kref_init(&rdata->refcount);
Jeff Layton1c892542012-05-16 07:13:17 -04002592 INIT_LIST_HEAD(&rdata->list);
2593 init_completion(&rdata->done);
Jeff Layton0471ca32012-05-16 07:13:16 -04002594 INIT_WORK(&rdata->work, complete);
Jeff Layton0471ca32012-05-16 07:13:16 -04002595 }
Jeff Laytonf4e49cd2012-09-18 16:20:36 -07002596
Jeff Layton0471ca32012-05-16 07:13:16 -04002597 return rdata;
2598}
2599
Jeff Layton6993f742012-05-16 07:13:17 -04002600void
2601cifs_readdata_release(struct kref *refcount)
Jeff Layton0471ca32012-05-16 07:13:16 -04002602{
Jeff Layton6993f742012-05-16 07:13:17 -04002603 struct cifs_readdata *rdata = container_of(refcount,
2604 struct cifs_readdata, refcount);
2605
2606 if (rdata->cfile)
2607 cifsFileInfo_put(rdata->cfile);
2608
Jeff Layton0471ca32012-05-16 07:13:16 -04002609 kfree(rdata);
2610}
2611
Jeff Layton2a1bb132012-05-16 07:13:17 -04002612static int
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002613cifs_read_allocate_pages(struct cifs_readdata *rdata, unsigned int nr_pages)
Jeff Layton1c892542012-05-16 07:13:17 -04002614{
2615 int rc = 0;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002616 struct page *page;
Jeff Layton1c892542012-05-16 07:13:17 -04002617 unsigned int i;
2618
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002619 for (i = 0; i < nr_pages; i++) {
Jeff Layton1c892542012-05-16 07:13:17 -04002620 page = alloc_page(GFP_KERNEL|__GFP_HIGHMEM);
2621 if (!page) {
2622 rc = -ENOMEM;
2623 break;
2624 }
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002625 rdata->pages[i] = page;
Jeff Layton1c892542012-05-16 07:13:17 -04002626 }
2627
2628 if (rc) {
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002629 for (i = 0; i < nr_pages; i++) {
2630 put_page(rdata->pages[i]);
2631 rdata->pages[i] = NULL;
Jeff Layton1c892542012-05-16 07:13:17 -04002632 }
2633 }
2634 return rc;
2635}
2636
2637static void
2638cifs_uncached_readdata_release(struct kref *refcount)
2639{
Jeff Layton1c892542012-05-16 07:13:17 -04002640 struct cifs_readdata *rdata = container_of(refcount,
2641 struct cifs_readdata, refcount);
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002642 unsigned int i;
Jeff Layton1c892542012-05-16 07:13:17 -04002643
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002644 for (i = 0; i < rdata->nr_pages; i++) {
2645 put_page(rdata->pages[i]);
2646 rdata->pages[i] = NULL;
Jeff Layton1c892542012-05-16 07:13:17 -04002647 }
2648 cifs_readdata_release(refcount);
2649}
2650
2651static int
Jeff Layton2a1bb132012-05-16 07:13:17 -04002652cifs_retry_async_readv(struct cifs_readdata *rdata)
2653{
2654 int rc;
Pavel Shilovskyfc9c5962012-09-18 16:20:28 -07002655 struct TCP_Server_Info *server;
2656
2657 server = tlink_tcon(rdata->cfile->tlink)->ses->server;
Jeff Layton2a1bb132012-05-16 07:13:17 -04002658
2659 do {
2660 if (rdata->cfile->invalidHandle) {
2661 rc = cifs_reopen_file(rdata->cfile, true);
2662 if (rc != 0)
2663 continue;
2664 }
Pavel Shilovskyfc9c5962012-09-18 16:20:28 -07002665 rc = server->ops->async_readv(rdata);
Jeff Layton2a1bb132012-05-16 07:13:17 -04002666 } while (rc == -EAGAIN);
2667
2668 return rc;
2669}
2670
Jeff Layton1c892542012-05-16 07:13:17 -04002671/**
2672 * cifs_readdata_to_iov - copy data from pages in response to an iovec
2673 * @rdata: the readdata response with list of pages holding data
2674 * @iov: vector in which we should copy the data
2675 * @nr_segs: number of segments in vector
2676 * @offset: offset into file of the first iovec
2677 * @copied: used to return the amount of data copied to the iov
2678 *
2679 * This function copies data from a list of pages in a readdata response into
2680 * an array of iovecs. It will first calculate where the data should go
2681 * based on the info in the readdata and then copy the data into that spot.
2682 */
2683static ssize_t
2684cifs_readdata_to_iov(struct cifs_readdata *rdata, const struct iovec *iov,
2685 unsigned long nr_segs, loff_t offset, ssize_t *copied)
2686{
2687 int rc = 0;
2688 struct iov_iter ii;
2689 size_t pos = rdata->offset - offset;
Jeff Layton1c892542012-05-16 07:13:17 -04002690 ssize_t remaining = rdata->bytes;
2691 unsigned char *pdata;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002692 unsigned int i;
Jeff Layton1c892542012-05-16 07:13:17 -04002693
2694 /* set up iov_iter and advance to the correct offset */
2695 iov_iter_init(&ii, iov, nr_segs, iov_length(iov, nr_segs), 0);
2696 iov_iter_advance(&ii, pos);
2697
2698 *copied = 0;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002699 for (i = 0; i < rdata->nr_pages; i++) {
Jeff Layton1c892542012-05-16 07:13:17 -04002700 ssize_t copy;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002701 struct page *page = rdata->pages[i];
Jeff Layton1c892542012-05-16 07:13:17 -04002702
2703 /* copy a whole page or whatever's left */
2704 copy = min_t(ssize_t, remaining, PAGE_SIZE);
2705
2706 /* ...but limit it to whatever space is left in the iov */
2707 copy = min_t(ssize_t, copy, iov_iter_count(&ii));
2708
2709 /* go while there's data to be copied and no errors */
2710 if (copy && !rc) {
2711 pdata = kmap(page);
2712 rc = memcpy_toiovecend(ii.iov, pdata, ii.iov_offset,
2713 (int)copy);
2714 kunmap(page);
2715 if (!rc) {
2716 *copied += copy;
2717 remaining -= copy;
2718 iov_iter_advance(&ii, copy);
2719 }
2720 }
Jeff Layton1c892542012-05-16 07:13:17 -04002721 }
2722
2723 return rc;
2724}
2725
2726static void
2727cifs_uncached_readv_complete(struct work_struct *work)
2728{
2729 struct cifs_readdata *rdata = container_of(work,
2730 struct cifs_readdata, work);
Jeff Layton1c892542012-05-16 07:13:17 -04002731
2732 complete(&rdata->done);
2733 kref_put(&rdata->refcount, cifs_uncached_readdata_release);
2734}
2735
2736static int
Jeff Layton8321fec2012-09-19 06:22:32 -07002737cifs_uncached_read_into_pages(struct TCP_Server_Info *server,
2738 struct cifs_readdata *rdata, unsigned int len)
Jeff Layton1c892542012-05-16 07:13:17 -04002739{
Jeff Layton8321fec2012-09-19 06:22:32 -07002740 int total_read = 0, result = 0;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002741 unsigned int i;
2742 unsigned int nr_pages = rdata->nr_pages;
Jeff Layton8321fec2012-09-19 06:22:32 -07002743 struct kvec iov;
Jeff Layton1c892542012-05-16 07:13:17 -04002744
Jeff Layton8321fec2012-09-19 06:22:32 -07002745 rdata->tailsz = PAGE_SIZE;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002746 for (i = 0; i < nr_pages; i++) {
2747 struct page *page = rdata->pages[i];
2748
Jeff Layton8321fec2012-09-19 06:22:32 -07002749 if (len >= PAGE_SIZE) {
Jeff Layton1c892542012-05-16 07:13:17 -04002750 /* enough data to fill the page */
Jeff Layton8321fec2012-09-19 06:22:32 -07002751 iov.iov_base = kmap(page);
2752 iov.iov_len = PAGE_SIZE;
Joe Perchesf96637b2013-05-04 22:12:25 -05002753 cifs_dbg(FYI, "%u: iov_base=%p iov_len=%zu\n",
2754 i, iov.iov_base, iov.iov_len);
Jeff Layton8321fec2012-09-19 06:22:32 -07002755 len -= PAGE_SIZE;
2756 } else if (len > 0) {
Jeff Layton1c892542012-05-16 07:13:17 -04002757 /* enough for partial page, fill and zero the rest */
Jeff Layton8321fec2012-09-19 06:22:32 -07002758 iov.iov_base = kmap(page);
2759 iov.iov_len = len;
Joe Perchesf96637b2013-05-04 22:12:25 -05002760 cifs_dbg(FYI, "%u: iov_base=%p iov_len=%zu\n",
2761 i, iov.iov_base, iov.iov_len);
Jeff Layton8321fec2012-09-19 06:22:32 -07002762 memset(iov.iov_base + len, '\0', PAGE_SIZE - len);
2763 rdata->tailsz = len;
2764 len = 0;
Jeff Layton1c892542012-05-16 07:13:17 -04002765 } else {
2766 /* no need to hold page hostage */
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002767 rdata->pages[i] = NULL;
2768 rdata->nr_pages--;
Jeff Layton1c892542012-05-16 07:13:17 -04002769 put_page(page);
Jeff Layton8321fec2012-09-19 06:22:32 -07002770 continue;
Jeff Layton1c892542012-05-16 07:13:17 -04002771 }
Jeff Layton8321fec2012-09-19 06:22:32 -07002772
2773 result = cifs_readv_from_socket(server, &iov, 1, iov.iov_len);
2774 kunmap(page);
2775 if (result < 0)
2776 break;
2777
2778 total_read += result;
Jeff Layton1c892542012-05-16 07:13:17 -04002779 }
2780
Jeff Layton8321fec2012-09-19 06:22:32 -07002781 return total_read > 0 ? total_read : result;
Jeff Layton1c892542012-05-16 07:13:17 -04002782}
2783
Pavel Shilovskya70307e2010-12-14 11:50:41 +03002784static ssize_t
2785cifs_iovec_read(struct file *file, const struct iovec *iov,
2786 unsigned long nr_segs, loff_t *poffset)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002787{
Jeff Layton1c892542012-05-16 07:13:17 -04002788 ssize_t rc;
Pavel Shilovskya70307e2010-12-14 11:50:41 +03002789 size_t len, cur_len;
Jeff Layton1c892542012-05-16 07:13:17 -04002790 ssize_t total_read = 0;
2791 loff_t offset = *poffset;
2792 unsigned int npages;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002793 struct cifs_sb_info *cifs_sb;
Jeff Layton1c892542012-05-16 07:13:17 -04002794 struct cifs_tcon *tcon;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002795 struct cifsFileInfo *open_file;
Jeff Layton1c892542012-05-16 07:13:17 -04002796 struct cifs_readdata *rdata, *tmp;
2797 struct list_head rdata_list;
2798 pid_t pid;
Pavel Shilovskya70307e2010-12-14 11:50:41 +03002799
2800 if (!nr_segs)
2801 return 0;
2802
2803 len = iov_length(iov, nr_segs);
2804 if (!len)
2805 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002806
Jeff Layton1c892542012-05-16 07:13:17 -04002807 INIT_LIST_HEAD(&rdata_list);
Josef "Jeff" Sipeke6a00292006-12-08 02:36:48 -08002808 cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
Joe Perchesc21dfb62010-07-12 13:50:14 -07002809 open_file = file->private_data;
Jeff Layton1c892542012-05-16 07:13:17 -04002810 tcon = tlink_tcon(open_file->tlink);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002811
Pavel Shilovskyfc9c5962012-09-18 16:20:28 -07002812 if (!tcon->ses->server->ops->async_readv)
2813 return -ENOSYS;
2814
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00002815 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
2816 pid = open_file->pid;
2817 else
2818 pid = current->tgid;
2819
Steve Frenchad7a2922008-02-07 23:25:02 +00002820 if ((file->f_flags & O_ACCMODE) == O_WRONLY)
Joe Perchesf96637b2013-05-04 22:12:25 -05002821 cifs_dbg(FYI, "attempting read on write only file instance\n");
Steve Frenchad7a2922008-02-07 23:25:02 +00002822
Jeff Layton1c892542012-05-16 07:13:17 -04002823 do {
2824 cur_len = min_t(const size_t, len - total_read, cifs_sb->rsize);
2825 npages = DIV_ROUND_UP(cur_len, PAGE_SIZE);
Pavel Shilovskya70307e2010-12-14 11:50:41 +03002826
Jeff Layton1c892542012-05-16 07:13:17 -04002827 /* allocate a readdata struct */
2828 rdata = cifs_readdata_alloc(npages,
2829 cifs_uncached_readv_complete);
2830 if (!rdata) {
2831 rc = -ENOMEM;
2832 goto error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002833 }
Pavel Shilovskya70307e2010-12-14 11:50:41 +03002834
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002835 rc = cifs_read_allocate_pages(rdata, npages);
Jeff Layton1c892542012-05-16 07:13:17 -04002836 if (rc)
2837 goto error;
2838
2839 rdata->cfile = cifsFileInfo_get(open_file);
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002840 rdata->nr_pages = npages;
Jeff Layton1c892542012-05-16 07:13:17 -04002841 rdata->offset = offset;
2842 rdata->bytes = cur_len;
2843 rdata->pid = pid;
Jeff Layton8321fec2012-09-19 06:22:32 -07002844 rdata->pagesz = PAGE_SIZE;
2845 rdata->read_into_pages = cifs_uncached_read_into_pages;
Jeff Layton1c892542012-05-16 07:13:17 -04002846
2847 rc = cifs_retry_async_readv(rdata);
2848error:
2849 if (rc) {
2850 kref_put(&rdata->refcount,
2851 cifs_uncached_readdata_release);
2852 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002853 }
Jeff Layton1c892542012-05-16 07:13:17 -04002854
2855 list_add_tail(&rdata->list, &rdata_list);
2856 offset += cur_len;
2857 len -= cur_len;
2858 } while (len > 0);
2859
2860 /* if at least one read request send succeeded, then reset rc */
2861 if (!list_empty(&rdata_list))
2862 rc = 0;
2863
2864 /* the loop below should proceed in the order of increasing offsets */
2865restart_loop:
2866 list_for_each_entry_safe(rdata, tmp, &rdata_list, list) {
2867 if (!rc) {
2868 ssize_t copied;
2869
2870 /* FIXME: freezable sleep too? */
2871 rc = wait_for_completion_killable(&rdata->done);
2872 if (rc)
2873 rc = -EINTR;
2874 else if (rdata->result)
2875 rc = rdata->result;
2876 else {
2877 rc = cifs_readdata_to_iov(rdata, iov,
2878 nr_segs, *poffset,
2879 &copied);
2880 total_read += copied;
2881 }
2882
2883 /* resend call if it's a retryable error */
2884 if (rc == -EAGAIN) {
2885 rc = cifs_retry_async_readv(rdata);
2886 goto restart_loop;
2887 }
2888 }
2889 list_del_init(&rdata->list);
2890 kref_put(&rdata->refcount, cifs_uncached_readdata_release);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002891 }
Pavel Shilovskya70307e2010-12-14 11:50:41 +03002892
Jeff Layton1c892542012-05-16 07:13:17 -04002893 cifs_stats_bytes_read(tcon, total_read);
2894 *poffset += total_read;
2895
Pavel Shilovsky09a47072012-09-18 16:20:29 -07002896 /* mask nodata case */
2897 if (rc == -ENODATA)
2898 rc = 0;
2899
Jeff Layton1c892542012-05-16 07:13:17 -04002900 return total_read ? total_read : rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002901}
2902
Pavel Shilovsky0b81c1c2011-03-10 10:11:05 +03002903ssize_t cifs_user_readv(struct kiocb *iocb, const struct iovec *iov,
Pavel Shilovskya70307e2010-12-14 11:50:41 +03002904 unsigned long nr_segs, loff_t pos)
2905{
2906 ssize_t read;
2907
2908 read = cifs_iovec_read(iocb->ki_filp, iov, nr_segs, &pos);
2909 if (read > 0)
2910 iocb->ki_pos = pos;
2911
2912 return read;
2913}
2914
Pavel Shilovsky579f9052012-09-19 06:22:44 -07002915ssize_t
2916cifs_strict_readv(struct kiocb *iocb, const struct iovec *iov,
2917 unsigned long nr_segs, loff_t pos)
Pavel Shilovskya70307e2010-12-14 11:50:41 +03002918{
Al Viro496ad9a2013-01-23 17:07:38 -05002919 struct inode *inode = file_inode(iocb->ki_filp);
Pavel Shilovsky579f9052012-09-19 06:22:44 -07002920 struct cifsInodeInfo *cinode = CIFS_I(inode);
2921 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
2922 struct cifsFileInfo *cfile = (struct cifsFileInfo *)
2923 iocb->ki_filp->private_data;
2924 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
2925 int rc = -EACCES;
Pavel Shilovskya70307e2010-12-14 11:50:41 +03002926
2927 /*
2928 * In strict cache mode we need to read from the server all the time
2929 * if we don't have level II oplock because the server can delay mtime
2930 * change - so we can't make a decision about inode invalidating.
2931 * And we can also fail with pagereading if there are mandatory locks
2932 * on pages affected by this read but not on the region from pos to
2933 * pos+len-1.
2934 */
Pavel Shilovsky579f9052012-09-19 06:22:44 -07002935 if (!cinode->clientCanCacheRead)
2936 return cifs_user_readv(iocb, iov, nr_segs, pos);
Pavel Shilovskya70307e2010-12-14 11:50:41 +03002937
Pavel Shilovsky579f9052012-09-19 06:22:44 -07002938 if (cap_unix(tcon->ses) &&
2939 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
2940 ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
2941 return generic_file_aio_read(iocb, iov, nr_segs, pos);
2942
2943 /*
2944 * We need to hold the sem to be sure nobody modifies lock list
2945 * with a brlock that prevents reading.
2946 */
2947 down_read(&cinode->lock_sem);
2948 if (!cifs_find_lock_conflict(cfile, pos, iov_length(iov, nr_segs),
2949 tcon->ses->server->vals->shared_lock_type,
Pavel Shilovsky081c0412012-11-27 18:38:53 +04002950 NULL, CIFS_READ_OP))
Pavel Shilovsky579f9052012-09-19 06:22:44 -07002951 rc = generic_file_aio_read(iocb, iov, nr_segs, pos);
2952 up_read(&cinode->lock_sem);
2953 return rc;
Pavel Shilovskya70307e2010-12-14 11:50:41 +03002954}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002955
Pavel Shilovskyf9c6e232012-09-18 16:20:29 -07002956static ssize_t
2957cifs_read(struct file *file, char *read_data, size_t read_size, loff_t *offset)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002958{
2959 int rc = -EACCES;
2960 unsigned int bytes_read = 0;
2961 unsigned int total_read;
2962 unsigned int current_read_size;
Jeff Layton5eba8ab2011-10-19 15:30:26 -04002963 unsigned int rsize;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002964 struct cifs_sb_info *cifs_sb;
Pavel Shilovsky29e20f92012-07-13 13:58:14 +04002965 struct cifs_tcon *tcon;
Pavel Shilovskyf9c6e232012-09-18 16:20:29 -07002966 struct TCP_Server_Info *server;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002967 unsigned int xid;
Pavel Shilovskyf9c6e232012-09-18 16:20:29 -07002968 char *cur_offset;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002969 struct cifsFileInfo *open_file;
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00002970 struct cifs_io_parms io_parms;
Steve Frenchec637e32005-12-12 20:53:18 -08002971 int buf_type = CIFS_NO_BUFFER;
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00002972 __u32 pid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002973
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002974 xid = get_xid();
Josef "Jeff" Sipeke6a00292006-12-08 02:36:48 -08002975 cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002976
Jeff Layton5eba8ab2011-10-19 15:30:26 -04002977 /* FIXME: set up handlers for larger reads and/or convert to async */
2978 rsize = min_t(unsigned int, cifs_sb->rsize, CIFSMaxBufSize);
2979
Linus Torvalds1da177e2005-04-16 15:20:36 -07002980 if (file->private_data == NULL) {
Suresh Jayaraman0f3bc092009-06-25 18:12:34 +05302981 rc = -EBADF;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002982 free_xid(xid);
Suresh Jayaraman0f3bc092009-06-25 18:12:34 +05302983 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002984 }
Joe Perchesc21dfb62010-07-12 13:50:14 -07002985 open_file = file->private_data;
Pavel Shilovsky29e20f92012-07-13 13:58:14 +04002986 tcon = tlink_tcon(open_file->tlink);
Pavel Shilovskyf9c6e232012-09-18 16:20:29 -07002987 server = tcon->ses->server;
2988
2989 if (!server->ops->sync_read) {
2990 free_xid(xid);
2991 return -ENOSYS;
2992 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002993
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00002994 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
2995 pid = open_file->pid;
2996 else
2997 pid = current->tgid;
2998
Linus Torvalds1da177e2005-04-16 15:20:36 -07002999 if ((file->f_flags & O_ACCMODE) == O_WRONLY)
Joe Perchesf96637b2013-05-04 22:12:25 -05003000 cifs_dbg(FYI, "attempting read on write only file instance\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07003001
Pavel Shilovskyf9c6e232012-09-18 16:20:29 -07003002 for (total_read = 0, cur_offset = read_data; read_size > total_read;
3003 total_read += bytes_read, cur_offset += bytes_read) {
Jeff Layton5eba8ab2011-10-19 15:30:26 -04003004 current_read_size = min_t(uint, read_size - total_read, rsize);
Pavel Shilovsky29e20f92012-07-13 13:58:14 +04003005 /*
3006 * For windows me and 9x we do not want to request more than it
3007 * negotiated since it will refuse the read then.
3008 */
3009 if ((tcon->ses) && !(tcon->ses->capabilities &
3010 tcon->ses->server->vals->cap_large_files)) {
Dan Carpenter7748dd62011-10-18 12:41:35 +03003011 current_read_size = min_t(uint, current_read_size,
Jeff Laytonc974bef2011-10-11 06:41:32 -04003012 CIFSMaxBufSize);
Steve Frenchf9f5c8172005-09-15 23:06:38 -07003013 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003014 rc = -EAGAIN;
3015 while (rc == -EAGAIN) {
Steve Frenchcdff08e2010-10-21 22:46:14 +00003016 if (open_file->invalidHandle) {
Jeff Layton15886172010-10-15 15:33:59 -04003017 rc = cifs_reopen_file(open_file, true);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003018 if (rc != 0)
3019 break;
3020 }
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00003021 io_parms.pid = pid;
Pavel Shilovsky29e20f92012-07-13 13:58:14 +04003022 io_parms.tcon = tcon;
Pavel Shilovskyf9c6e232012-09-18 16:20:29 -07003023 io_parms.offset = *offset;
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00003024 io_parms.length = current_read_size;
Pavel Shilovskyf9c6e232012-09-18 16:20:29 -07003025 rc = server->ops->sync_read(xid, open_file, &io_parms,
3026 &bytes_read, &cur_offset,
3027 &buf_type);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003028 }
3029 if (rc || (bytes_read == 0)) {
3030 if (total_read) {
3031 break;
3032 } else {
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003033 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003034 return rc;
3035 }
3036 } else {
Pavel Shilovsky29e20f92012-07-13 13:58:14 +04003037 cifs_stats_bytes_read(tcon, total_read);
Pavel Shilovskyf9c6e232012-09-18 16:20:29 -07003038 *offset += bytes_read;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003039 }
3040 }
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003041 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003042 return total_read;
3043}
3044
Jeff Laytonca83ce32011-04-12 09:13:44 -04003045/*
3046 * If the page is mmap'ed into a process' page tables, then we need to make
3047 * sure that it doesn't change while being written back.
3048 */
3049static int
3050cifs_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
3051{
3052 struct page *page = vmf->page;
3053
3054 lock_page(page);
3055 return VM_FAULT_LOCKED;
3056}
3057
3058static struct vm_operations_struct cifs_file_vm_ops = {
3059 .fault = filemap_fault,
3060 .page_mkwrite = cifs_page_mkwrite,
Konstantin Khlebnikov0b173bc2012-10-08 16:28:46 -07003061 .remap_pages = generic_file_remap_pages,
Jeff Laytonca83ce32011-04-12 09:13:44 -04003062};
3063
Pavel Shilovsky7a6a19b2010-12-14 11:29:51 +03003064int cifs_file_strict_mmap(struct file *file, struct vm_area_struct *vma)
3065{
3066 int rc, xid;
Al Viro496ad9a2013-01-23 17:07:38 -05003067 struct inode *inode = file_inode(file);
Pavel Shilovsky7a6a19b2010-12-14 11:29:51 +03003068
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003069 xid = get_xid();
Pavel Shilovsky7a6a19b2010-12-14 11:29:51 +03003070
Pavel Shilovsky6feb9892011-04-07 18:18:11 +04003071 if (!CIFS_I(inode)->clientCanCacheRead) {
3072 rc = cifs_invalidate_mapping(inode);
3073 if (rc)
3074 return rc;
3075 }
Pavel Shilovsky7a6a19b2010-12-14 11:29:51 +03003076
3077 rc = generic_file_mmap(file, vma);
Jeff Laytonca83ce32011-04-12 09:13:44 -04003078 if (rc == 0)
3079 vma->vm_ops = &cifs_file_vm_ops;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003080 free_xid(xid);
Pavel Shilovsky7a6a19b2010-12-14 11:29:51 +03003081 return rc;
3082}
3083
Linus Torvalds1da177e2005-04-16 15:20:36 -07003084int cifs_file_mmap(struct file *file, struct vm_area_struct *vma)
3085{
Linus Torvalds1da177e2005-04-16 15:20:36 -07003086 int rc, xid;
3087
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003088 xid = get_xid();
Jeff Laytonabab0952010-02-12 07:44:18 -05003089 rc = cifs_revalidate_file(file);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003090 if (rc) {
Joe Perchesf96637b2013-05-04 22:12:25 -05003091 cifs_dbg(FYI, "Validation prior to mmap failed, error=%d\n",
3092 rc);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003093 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003094 return rc;
3095 }
3096 rc = generic_file_mmap(file, vma);
Jeff Laytonca83ce32011-04-12 09:13:44 -04003097 if (rc == 0)
3098 vma->vm_ops = &cifs_file_vm_ops;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003099 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003100 return rc;
3101}
3102
Jeff Layton0471ca32012-05-16 07:13:16 -04003103static void
3104cifs_readv_complete(struct work_struct *work)
3105{
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003106 unsigned int i;
Jeff Layton0471ca32012-05-16 07:13:16 -04003107 struct cifs_readdata *rdata = container_of(work,
3108 struct cifs_readdata, work);
Jeff Layton0471ca32012-05-16 07:13:16 -04003109
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003110 for (i = 0; i < rdata->nr_pages; i++) {
3111 struct page *page = rdata->pages[i];
3112
Jeff Layton0471ca32012-05-16 07:13:16 -04003113 lru_cache_add_file(page);
3114
3115 if (rdata->result == 0) {
Jeff Layton0471ca32012-05-16 07:13:16 -04003116 flush_dcache_page(page);
3117 SetPageUptodate(page);
3118 }
3119
3120 unlock_page(page);
3121
3122 if (rdata->result == 0)
3123 cifs_readpage_to_fscache(rdata->mapping->host, page);
3124
3125 page_cache_release(page);
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003126 rdata->pages[i] = NULL;
Jeff Layton0471ca32012-05-16 07:13:16 -04003127 }
Jeff Layton6993f742012-05-16 07:13:17 -04003128 kref_put(&rdata->refcount, cifs_readdata_release);
Jeff Layton0471ca32012-05-16 07:13:16 -04003129}
3130
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003131static int
Jeff Layton8321fec2012-09-19 06:22:32 -07003132cifs_readpages_read_into_pages(struct TCP_Server_Info *server,
3133 struct cifs_readdata *rdata, unsigned int len)
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003134{
Jeff Layton8321fec2012-09-19 06:22:32 -07003135 int total_read = 0, result = 0;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003136 unsigned int i;
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003137 u64 eof;
3138 pgoff_t eof_index;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003139 unsigned int nr_pages = rdata->nr_pages;
Jeff Layton8321fec2012-09-19 06:22:32 -07003140 struct kvec iov;
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003141
3142 /* determine the eof that the server (probably) has */
3143 eof = CIFS_I(rdata->mapping->host)->server_eof;
3144 eof_index = eof ? (eof - 1) >> PAGE_CACHE_SHIFT : 0;
Joe Perchesf96637b2013-05-04 22:12:25 -05003145 cifs_dbg(FYI, "eof=%llu eof_index=%lu\n", eof, eof_index);
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003146
Jeff Layton8321fec2012-09-19 06:22:32 -07003147 rdata->tailsz = PAGE_CACHE_SIZE;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003148 for (i = 0; i < nr_pages; i++) {
3149 struct page *page = rdata->pages[i];
3150
Jeff Layton8321fec2012-09-19 06:22:32 -07003151 if (len >= PAGE_CACHE_SIZE) {
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003152 /* enough data to fill the page */
Jeff Layton8321fec2012-09-19 06:22:32 -07003153 iov.iov_base = kmap(page);
3154 iov.iov_len = PAGE_CACHE_SIZE;
Joe Perchesf96637b2013-05-04 22:12:25 -05003155 cifs_dbg(FYI, "%u: idx=%lu iov_base=%p iov_len=%zu\n",
3156 i, page->index, iov.iov_base, iov.iov_len);
Jeff Layton8321fec2012-09-19 06:22:32 -07003157 len -= PAGE_CACHE_SIZE;
3158 } else if (len > 0) {
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003159 /* enough for partial page, fill and zero the rest */
Jeff Layton8321fec2012-09-19 06:22:32 -07003160 iov.iov_base = kmap(page);
3161 iov.iov_len = len;
Joe Perchesf96637b2013-05-04 22:12:25 -05003162 cifs_dbg(FYI, "%u: idx=%lu iov_base=%p iov_len=%zu\n",
3163 i, page->index, iov.iov_base, iov.iov_len);
Jeff Layton8321fec2012-09-19 06:22:32 -07003164 memset(iov.iov_base + len,
3165 '\0', PAGE_CACHE_SIZE - len);
3166 rdata->tailsz = len;
3167 len = 0;
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003168 } else if (page->index > eof_index) {
3169 /*
3170 * The VFS will not try to do readahead past the
3171 * i_size, but it's possible that we have outstanding
3172 * writes with gaps in the middle and the i_size hasn't
3173 * caught up yet. Populate those with zeroed out pages
3174 * to prevent the VFS from repeatedly attempting to
3175 * fill them until the writes are flushed.
3176 */
3177 zero_user(page, 0, PAGE_CACHE_SIZE);
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003178 lru_cache_add_file(page);
3179 flush_dcache_page(page);
3180 SetPageUptodate(page);
3181 unlock_page(page);
3182 page_cache_release(page);
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003183 rdata->pages[i] = NULL;
3184 rdata->nr_pages--;
Jeff Layton8321fec2012-09-19 06:22:32 -07003185 continue;
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003186 } else {
3187 /* no need to hold page hostage */
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003188 lru_cache_add_file(page);
3189 unlock_page(page);
3190 page_cache_release(page);
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003191 rdata->pages[i] = NULL;
3192 rdata->nr_pages--;
Jeff Layton8321fec2012-09-19 06:22:32 -07003193 continue;
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003194 }
Jeff Layton8321fec2012-09-19 06:22:32 -07003195
3196 result = cifs_readv_from_socket(server, &iov, 1, iov.iov_len);
3197 kunmap(page);
3198 if (result < 0)
3199 break;
3200
3201 total_read += result;
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003202 }
3203
Jeff Layton8321fec2012-09-19 06:22:32 -07003204 return total_read > 0 ? total_read : result;
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003205}
3206
Linus Torvalds1da177e2005-04-16 15:20:36 -07003207static int cifs_readpages(struct file *file, struct address_space *mapping,
3208 struct list_head *page_list, unsigned num_pages)
3209{
Jeff Layton690c5e32011-10-19 15:30:16 -04003210 int rc;
3211 struct list_head tmplist;
3212 struct cifsFileInfo *open_file = file->private_data;
3213 struct cifs_sb_info *cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
3214 unsigned int rsize = cifs_sb->rsize;
3215 pid_t pid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003216
Jeff Layton690c5e32011-10-19 15:30:16 -04003217 /*
3218 * Give up immediately if rsize is too small to read an entire page.
3219 * The VFS will fall back to readpage. We should never reach this
3220 * point however since we set ra_pages to 0 when the rsize is smaller
3221 * than a cache page.
3222 */
3223 if (unlikely(rsize < PAGE_CACHE_SIZE))
3224 return 0;
Steve Frenchbfa0d752005-08-31 21:50:37 -07003225
Suresh Jayaraman56698232010-07-05 18:13:25 +05303226 /*
3227 * Reads as many pages as possible from fscache. Returns -ENOBUFS
3228 * immediately if the cookie is negative
3229 */
3230 rc = cifs_readpages_from_fscache(mapping->host, mapping, page_list,
3231 &num_pages);
3232 if (rc == 0)
Jeff Layton690c5e32011-10-19 15:30:16 -04003233 return rc;
Suresh Jayaraman56698232010-07-05 18:13:25 +05303234
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00003235 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
3236 pid = open_file->pid;
3237 else
3238 pid = current->tgid;
3239
Jeff Layton690c5e32011-10-19 15:30:16 -04003240 rc = 0;
3241 INIT_LIST_HEAD(&tmplist);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003242
Joe Perchesf96637b2013-05-04 22:12:25 -05003243 cifs_dbg(FYI, "%s: file=%p mapping=%p num_pages=%u\n",
3244 __func__, file, mapping, num_pages);
Jeff Layton690c5e32011-10-19 15:30:16 -04003245
3246 /*
3247 * Start with the page at end of list and move it to private
3248 * list. Do the same with any following pages until we hit
3249 * the rsize limit, hit an index discontinuity, or run out of
3250 * pages. Issue the async read and then start the loop again
3251 * until the list is empty.
3252 *
3253 * Note that list order is important. The page_list is in
3254 * the order of declining indexes. When we put the pages in
3255 * the rdata->pages, then we want them in increasing order.
3256 */
3257 while (!list_empty(page_list)) {
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003258 unsigned int i;
Jeff Layton690c5e32011-10-19 15:30:16 -04003259 unsigned int bytes = PAGE_CACHE_SIZE;
3260 unsigned int expected_index;
3261 unsigned int nr_pages = 1;
3262 loff_t offset;
3263 struct page *page, *tpage;
3264 struct cifs_readdata *rdata;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003265
3266 page = list_entry(page_list->prev, struct page, lru);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003267
Jeff Layton690c5e32011-10-19 15:30:16 -04003268 /*
3269 * Lock the page and put it in the cache. Since no one else
3270 * should have access to this page, we're safe to simply set
3271 * PG_locked without checking it first.
3272 */
3273 __set_page_locked(page);
3274 rc = add_to_page_cache_locked(page, mapping,
3275 page->index, GFP_KERNEL);
3276
3277 /* give up if we can't stick it in the cache */
3278 if (rc) {
3279 __clear_page_locked(page);
3280 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003281 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003282
Jeff Layton690c5e32011-10-19 15:30:16 -04003283 /* move first page to the tmplist */
3284 offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
3285 list_move_tail(&page->lru, &tmplist);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003286
Jeff Layton690c5e32011-10-19 15:30:16 -04003287 /* now try and add more pages onto the request */
3288 expected_index = page->index + 1;
3289 list_for_each_entry_safe_reverse(page, tpage, page_list, lru) {
3290 /* discontinuity ? */
3291 if (page->index != expected_index)
3292 break;
3293
3294 /* would this page push the read over the rsize? */
3295 if (bytes + PAGE_CACHE_SIZE > rsize)
3296 break;
3297
3298 __set_page_locked(page);
3299 if (add_to_page_cache_locked(page, mapping,
3300 page->index, GFP_KERNEL)) {
3301 __clear_page_locked(page);
3302 break;
3303 }
3304 list_move_tail(&page->lru, &tmplist);
3305 bytes += PAGE_CACHE_SIZE;
3306 expected_index++;
3307 nr_pages++;
3308 }
3309
Jeff Layton0471ca32012-05-16 07:13:16 -04003310 rdata = cifs_readdata_alloc(nr_pages, cifs_readv_complete);
Jeff Layton690c5e32011-10-19 15:30:16 -04003311 if (!rdata) {
3312 /* best to give up if we're out of mem */
3313 list_for_each_entry_safe(page, tpage, &tmplist, lru) {
3314 list_del(&page->lru);
3315 lru_cache_add_file(page);
3316 unlock_page(page);
3317 page_cache_release(page);
3318 }
3319 rc = -ENOMEM;
3320 break;
3321 }
3322
Jeff Layton6993f742012-05-16 07:13:17 -04003323 rdata->cfile = cifsFileInfo_get(open_file);
Jeff Layton690c5e32011-10-19 15:30:16 -04003324 rdata->mapping = mapping;
3325 rdata->offset = offset;
3326 rdata->bytes = bytes;
3327 rdata->pid = pid;
Jeff Layton8321fec2012-09-19 06:22:32 -07003328 rdata->pagesz = PAGE_CACHE_SIZE;
3329 rdata->read_into_pages = cifs_readpages_read_into_pages;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003330
3331 list_for_each_entry_safe(page, tpage, &tmplist, lru) {
3332 list_del(&page->lru);
3333 rdata->pages[rdata->nr_pages++] = page;
3334 }
Jeff Layton690c5e32011-10-19 15:30:16 -04003335
Jeff Layton2a1bb132012-05-16 07:13:17 -04003336 rc = cifs_retry_async_readv(rdata);
Jeff Layton690c5e32011-10-19 15:30:16 -04003337 if (rc != 0) {
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003338 for (i = 0; i < rdata->nr_pages; i++) {
3339 page = rdata->pages[i];
Jeff Layton690c5e32011-10-19 15:30:16 -04003340 lru_cache_add_file(page);
3341 unlock_page(page);
3342 page_cache_release(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003343 }
Jeff Layton6993f742012-05-16 07:13:17 -04003344 kref_put(&rdata->refcount, cifs_readdata_release);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003345 break;
3346 }
Jeff Layton6993f742012-05-16 07:13:17 -04003347
3348 kref_put(&rdata->refcount, cifs_readdata_release);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003349 }
3350
Linus Torvalds1da177e2005-04-16 15:20:36 -07003351 return rc;
3352}
3353
3354static int cifs_readpage_worker(struct file *file, struct page *page,
3355 loff_t *poffset)
3356{
3357 char *read_data;
3358 int rc;
3359
Suresh Jayaraman56698232010-07-05 18:13:25 +05303360 /* Is the page cached? */
Al Viro496ad9a2013-01-23 17:07:38 -05003361 rc = cifs_readpage_from_fscache(file_inode(file), page);
Suresh Jayaraman56698232010-07-05 18:13:25 +05303362 if (rc == 0)
3363 goto read_complete;
3364
Linus Torvalds1da177e2005-04-16 15:20:36 -07003365 page_cache_get(page);
3366 read_data = kmap(page);
3367 /* for reads over a certain size could initiate async read ahead */
Steve Frenchfb8c4b12007-07-10 01:16:18 +00003368
Linus Torvalds1da177e2005-04-16 15:20:36 -07003369 rc = cifs_read(file, read_data, PAGE_CACHE_SIZE, poffset);
Steve Frenchfb8c4b12007-07-10 01:16:18 +00003370
Linus Torvalds1da177e2005-04-16 15:20:36 -07003371 if (rc < 0)
3372 goto io_error;
3373 else
Joe Perchesf96637b2013-05-04 22:12:25 -05003374 cifs_dbg(FYI, "Bytes read %d\n", rc);
Steve Frenchfb8c4b12007-07-10 01:16:18 +00003375
Al Viro496ad9a2013-01-23 17:07:38 -05003376 file_inode(file)->i_atime =
3377 current_fs_time(file_inode(file)->i_sb);
Steve Frenchfb8c4b12007-07-10 01:16:18 +00003378
Linus Torvalds1da177e2005-04-16 15:20:36 -07003379 if (PAGE_CACHE_SIZE > rc)
3380 memset(read_data + rc, 0, PAGE_CACHE_SIZE - rc);
3381
3382 flush_dcache_page(page);
3383 SetPageUptodate(page);
Suresh Jayaraman9dc06552010-07-05 18:13:11 +05303384
3385 /* send this page to the cache */
Al Viro496ad9a2013-01-23 17:07:38 -05003386 cifs_readpage_to_fscache(file_inode(file), page);
Suresh Jayaraman9dc06552010-07-05 18:13:11 +05303387
Linus Torvalds1da177e2005-04-16 15:20:36 -07003388 rc = 0;
Steve Frenchfb8c4b12007-07-10 01:16:18 +00003389
Linus Torvalds1da177e2005-04-16 15:20:36 -07003390io_error:
Steve Frenchfb8c4b12007-07-10 01:16:18 +00003391 kunmap(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003392 page_cache_release(page);
Suresh Jayaraman56698232010-07-05 18:13:25 +05303393
3394read_complete:
Linus Torvalds1da177e2005-04-16 15:20:36 -07003395 return rc;
3396}
3397
3398static int cifs_readpage(struct file *file, struct page *page)
3399{
3400 loff_t offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
3401 int rc = -EACCES;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003402 unsigned int xid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003403
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003404 xid = get_xid();
Linus Torvalds1da177e2005-04-16 15:20:36 -07003405
3406 if (file->private_data == NULL) {
Suresh Jayaraman0f3bc092009-06-25 18:12:34 +05303407 rc = -EBADF;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003408 free_xid(xid);
Suresh Jayaraman0f3bc092009-06-25 18:12:34 +05303409 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003410 }
3411
Joe Perchesf96637b2013-05-04 22:12:25 -05003412 cifs_dbg(FYI, "readpage %p at offset %d 0x%x\n",
Joe Perchesb6b38f72010-04-21 03:50:45 +00003413 page, (int)offset, (int)offset);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003414
3415 rc = cifs_readpage_worker(file, page, &offset);
3416
3417 unlock_page(page);
3418
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003419 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003420 return rc;
3421}
3422
Steve Frencha403a0a2007-07-26 15:54:16 +00003423static int is_inode_writable(struct cifsInodeInfo *cifs_inode)
3424{
3425 struct cifsFileInfo *open_file;
3426
Jeff Layton44772882010-10-15 15:34:03 -04003427 spin_lock(&cifs_file_list_lock);
Steve Frencha403a0a2007-07-26 15:54:16 +00003428 list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
Jeff Layton2e396b82010-10-15 15:34:01 -04003429 if (OPEN_FMODE(open_file->f_flags) & FMODE_WRITE) {
Jeff Layton44772882010-10-15 15:34:03 -04003430 spin_unlock(&cifs_file_list_lock);
Steve Frencha403a0a2007-07-26 15:54:16 +00003431 return 1;
3432 }
3433 }
Jeff Layton44772882010-10-15 15:34:03 -04003434 spin_unlock(&cifs_file_list_lock);
Steve Frencha403a0a2007-07-26 15:54:16 +00003435 return 0;
3436}
3437
Linus Torvalds1da177e2005-04-16 15:20:36 -07003438/* We do not want to update the file size from server for inodes
3439 open for write - to avoid races with writepage extending
3440 the file - in the future we could consider allowing
Steve Frenchfb8c4b12007-07-10 01:16:18 +00003441 refreshing the inode only on increases in the file size
Linus Torvalds1da177e2005-04-16 15:20:36 -07003442 but this is tricky to do without racing with writebehind
3443 page caching in the current Linux kernel design */
Steve French4b18f2a2008-04-29 00:06:05 +00003444bool is_size_safe_to_change(struct cifsInodeInfo *cifsInode, __u64 end_of_file)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003445{
Steve Frencha403a0a2007-07-26 15:54:16 +00003446 if (!cifsInode)
Steve French4b18f2a2008-04-29 00:06:05 +00003447 return true;
Steve French23e7dd72005-10-20 13:44:56 -07003448
Steve Frencha403a0a2007-07-26 15:54:16 +00003449 if (is_inode_writable(cifsInode)) {
3450 /* This inode is open for write at least once */
Steve Frenchc32a0b62006-01-12 14:41:28 -08003451 struct cifs_sb_info *cifs_sb;
3452
Steve Frenchc32a0b62006-01-12 14:41:28 -08003453 cifs_sb = CIFS_SB(cifsInode->vfs_inode.i_sb);
Steve Frenchad7a2922008-02-07 23:25:02 +00003454 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DIRECT_IO) {
Steve Frenchfb8c4b12007-07-10 01:16:18 +00003455 /* since no page cache to corrupt on directio
Steve Frenchc32a0b62006-01-12 14:41:28 -08003456 we can change size safely */
Steve French4b18f2a2008-04-29 00:06:05 +00003457 return true;
Steve Frenchc32a0b62006-01-12 14:41:28 -08003458 }
3459
Steve Frenchfb8c4b12007-07-10 01:16:18 +00003460 if (i_size_read(&cifsInode->vfs_inode) < end_of_file)
Steve French4b18f2a2008-04-29 00:06:05 +00003461 return true;
Steve French7ba52632007-02-08 18:14:13 +00003462
Steve French4b18f2a2008-04-29 00:06:05 +00003463 return false;
Steve French23e7dd72005-10-20 13:44:56 -07003464 } else
Steve French4b18f2a2008-04-29 00:06:05 +00003465 return true;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003466}
3467
Nick Piggind9414772008-09-24 11:32:59 -04003468static int cifs_write_begin(struct file *file, struct address_space *mapping,
3469 loff_t pos, unsigned len, unsigned flags,
3470 struct page **pagep, void **fsdata)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003471{
Nick Piggind9414772008-09-24 11:32:59 -04003472 pgoff_t index = pos >> PAGE_CACHE_SHIFT;
3473 loff_t offset = pos & (PAGE_CACHE_SIZE - 1);
Jeff Laytona98ee8c2008-11-26 19:32:33 +00003474 loff_t page_start = pos & PAGE_MASK;
3475 loff_t i_size;
3476 struct page *page;
3477 int rc = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003478
Joe Perchesf96637b2013-05-04 22:12:25 -05003479 cifs_dbg(FYI, "write_begin from %lld len %d\n", (long long)pos, len);
Nick Piggind9414772008-09-24 11:32:59 -04003480
Nick Piggin54566b22009-01-04 12:00:53 -08003481 page = grab_cache_page_write_begin(mapping, index, flags);
Jeff Laytona98ee8c2008-11-26 19:32:33 +00003482 if (!page) {
3483 rc = -ENOMEM;
3484 goto out;
3485 }
Nick Piggind9414772008-09-24 11:32:59 -04003486
Jeff Laytona98ee8c2008-11-26 19:32:33 +00003487 if (PageUptodate(page))
3488 goto out;
Steve French8a236262007-03-06 00:31:00 +00003489
Jeff Laytona98ee8c2008-11-26 19:32:33 +00003490 /*
3491 * If we write a full page it will be up to date, no need to read from
3492 * the server. If the write is short, we'll end up doing a sync write
3493 * instead.
3494 */
3495 if (len == PAGE_CACHE_SIZE)
3496 goto out;
3497
3498 /*
3499 * optimize away the read when we have an oplock, and we're not
3500 * expecting to use any of the data we'd be reading in. That
3501 * is, when the page lies beyond the EOF, or straddles the EOF
3502 * and the write will cover all of the existing data.
3503 */
3504 if (CIFS_I(mapping->host)->clientCanCacheRead) {
3505 i_size = i_size_read(mapping->host);
3506 if (page_start >= i_size ||
3507 (offset == 0 && (pos + len) >= i_size)) {
3508 zero_user_segments(page, 0, offset,
3509 offset + len,
3510 PAGE_CACHE_SIZE);
3511 /*
3512 * PageChecked means that the parts of the page
3513 * to which we're not writing are considered up
3514 * to date. Once the data is copied to the
3515 * page, it can be set uptodate.
3516 */
3517 SetPageChecked(page);
3518 goto out;
3519 }
3520 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003521
Nick Piggind9414772008-09-24 11:32:59 -04003522 if ((file->f_flags & O_ACCMODE) != O_WRONLY) {
Jeff Laytona98ee8c2008-11-26 19:32:33 +00003523 /*
3524 * might as well read a page, it is fast enough. If we get
3525 * an error, we don't need to return it. cifs_write_end will
3526 * do a sync write instead since PG_uptodate isn't set.
3527 */
3528 cifs_readpage_worker(file, page, &page_start);
Steve French8a236262007-03-06 00:31:00 +00003529 } else {
3530 /* we could try using another file handle if there is one -
3531 but how would we lock it to prevent close of that handle
3532 racing with this read? In any case
Nick Piggind9414772008-09-24 11:32:59 -04003533 this will be written out by write_end so is fine */
Steve French8a236262007-03-06 00:31:00 +00003534 }
Jeff Laytona98ee8c2008-11-26 19:32:33 +00003535out:
3536 *pagep = page;
3537 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003538}
3539
Suresh Jayaraman85f2d6b2010-07-05 18:13:00 +05303540static int cifs_release_page(struct page *page, gfp_t gfp)
3541{
3542 if (PagePrivate(page))
3543 return 0;
3544
3545 return cifs_fscache_release_page(page, gfp);
3546}
3547
3548static void cifs_invalidate_page(struct page *page, unsigned long offset)
3549{
3550 struct cifsInodeInfo *cifsi = CIFS_I(page->mapping->host);
3551
3552 if (offset == 0)
3553 cifs_fscache_invalidate_page(page, &cifsi->vfs_inode);
3554}
3555
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04003556static int cifs_launder_page(struct page *page)
3557{
3558 int rc = 0;
3559 loff_t range_start = page_offset(page);
3560 loff_t range_end = range_start + (loff_t)(PAGE_CACHE_SIZE - 1);
3561 struct writeback_control wbc = {
3562 .sync_mode = WB_SYNC_ALL,
3563 .nr_to_write = 0,
3564 .range_start = range_start,
3565 .range_end = range_end,
3566 };
3567
Joe Perchesf96637b2013-05-04 22:12:25 -05003568 cifs_dbg(FYI, "Launder page: %p\n", page);
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04003569
3570 if (clear_page_dirty_for_io(page))
3571 rc = cifs_writepage_locked(page, &wbc);
3572
3573 cifs_fscache_invalidate_page(page, page->mapping->host);
3574 return rc;
3575}
3576
Tejun Heo9b646972010-07-20 22:09:02 +02003577void cifs_oplock_break(struct work_struct *work)
Jeff Layton3bc303c2009-09-21 06:47:50 -04003578{
3579 struct cifsFileInfo *cfile = container_of(work, struct cifsFileInfo,
3580 oplock_break);
Jeff Laytona5e18bc2010-10-11 15:07:18 -04003581 struct inode *inode = cfile->dentry->d_inode;
Jeff Layton3bc303c2009-09-21 06:47:50 -04003582 struct cifsInodeInfo *cinode = CIFS_I(inode);
Pavel Shilovsky95a3f2f2012-09-18 16:20:33 -07003583 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
Jeff Laytoneb4b7562010-10-22 14:52:29 -04003584 int rc = 0;
Jeff Layton3bc303c2009-09-21 06:47:50 -04003585
Pavel Shilovsky63b7d3a2012-12-24 14:41:19 +04003586 if (!cinode->clientCanCacheAll && cinode->clientCanCacheRead &&
3587 cifs_has_mand_locks(cinode)) {
Joe Perchesf96637b2013-05-04 22:12:25 -05003588 cifs_dbg(FYI, "Reset oplock to None for inode=%p due to mand locks\n",
3589 inode);
Pavel Shilovsky63b7d3a2012-12-24 14:41:19 +04003590 cinode->clientCanCacheRead = false;
3591 }
3592
Jeff Layton3bc303c2009-09-21 06:47:50 -04003593 if (inode && S_ISREG(inode->i_mode)) {
Steve Frenchd54ff732010-04-27 04:38:15 +00003594 if (cinode->clientCanCacheRead)
Al Viro8737c932009-12-24 06:47:55 -05003595 break_lease(inode, O_RDONLY);
Steve Frenchd54ff732010-04-27 04:38:15 +00003596 else
Al Viro8737c932009-12-24 06:47:55 -05003597 break_lease(inode, O_WRONLY);
Jeff Layton3bc303c2009-09-21 06:47:50 -04003598 rc = filemap_fdatawrite(inode->i_mapping);
3599 if (cinode->clientCanCacheRead == 0) {
Jeff Laytoneb4b7562010-10-22 14:52:29 -04003600 rc = filemap_fdatawait(inode->i_mapping);
3601 mapping_set_error(inode->i_mapping, rc);
Pavel Shilovsky03eca702012-12-06 21:24:33 +04003602 cifs_invalidate_mapping(inode);
Jeff Layton3bc303c2009-09-21 06:47:50 -04003603 }
Joe Perchesf96637b2013-05-04 22:12:25 -05003604 cifs_dbg(FYI, "Oplock flush inode %p rc %d\n", inode, rc);
Jeff Layton3bc303c2009-09-21 06:47:50 -04003605 }
3606
Pavel Shilovsky85160e02011-10-22 15:33:29 +04003607 rc = cifs_push_locks(cfile);
3608 if (rc)
Joe Perchesf96637b2013-05-04 22:12:25 -05003609 cifs_dbg(VFS, "Push locks rc = %d\n", rc);
Pavel Shilovsky85160e02011-10-22 15:33:29 +04003610
Jeff Layton3bc303c2009-09-21 06:47:50 -04003611 /*
3612 * releasing stale oplock after recent reconnect of smb session using
3613 * a now incorrect file handle is not a data integrity issue but do
3614 * not bother sending an oplock release if session to server still is
3615 * disconnected since oplock already released by the server
3616 */
Steve Frenchcdff08e2010-10-21 22:46:14 +00003617 if (!cfile->oplock_break_cancelled) {
Pavel Shilovsky95a3f2f2012-09-18 16:20:33 -07003618 rc = tcon->ses->server->ops->oplock_response(tcon, &cfile->fid,
3619 cinode);
Joe Perchesf96637b2013-05-04 22:12:25 -05003620 cifs_dbg(FYI, "Oplock release rc = %d\n", rc);
Jeff Layton3bc303c2009-09-21 06:47:50 -04003621 }
Jeff Layton3bc303c2009-09-21 06:47:50 -04003622}
3623
Christoph Hellwigf5e54d62006-06-28 04:26:44 -07003624const struct address_space_operations cifs_addr_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003625 .readpage = cifs_readpage,
3626 .readpages = cifs_readpages,
3627 .writepage = cifs_writepage,
Steve French37c0eb42005-10-05 14:50:29 -07003628 .writepages = cifs_writepages,
Nick Piggind9414772008-09-24 11:32:59 -04003629 .write_begin = cifs_write_begin,
3630 .write_end = cifs_write_end,
Linus Torvalds1da177e2005-04-16 15:20:36 -07003631 .set_page_dirty = __set_page_dirty_nobuffers,
Suresh Jayaraman85f2d6b2010-07-05 18:13:00 +05303632 .releasepage = cifs_release_page,
3633 .invalidatepage = cifs_invalidate_page,
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04003634 .launder_page = cifs_launder_page,
Linus Torvalds1da177e2005-04-16 15:20:36 -07003635};
Dave Kleikamp273d81d2006-06-01 19:41:23 +00003636
3637/*
3638 * cifs_readpages requires the server to support a buffer large enough to
3639 * contain the header plus one complete page of data. Otherwise, we need
3640 * to leave cifs_readpages out of the address space operations.
3641 */
Christoph Hellwigf5e54d62006-06-28 04:26:44 -07003642const struct address_space_operations cifs_addr_ops_smallbuf = {
Dave Kleikamp273d81d2006-06-01 19:41:23 +00003643 .readpage = cifs_readpage,
3644 .writepage = cifs_writepage,
3645 .writepages = cifs_writepages,
Nick Piggind9414772008-09-24 11:32:59 -04003646 .write_begin = cifs_write_begin,
3647 .write_end = cifs_write_end,
Dave Kleikamp273d81d2006-06-01 19:41:23 +00003648 .set_page_dirty = __set_page_dirty_nobuffers,
Suresh Jayaraman85f2d6b2010-07-05 18:13:00 +05303649 .releasepage = cifs_release_page,
3650 .invalidatepage = cifs_invalidate_page,
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04003651 .launder_page = cifs_launder_page,
Dave Kleikamp273d81d2006-06-01 19:41:23 +00003652};