blob: 5fcc10fa62bd768d4858d0d2c3a4dae42bfb8bd1 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * fs/cifs/file.c
3 *
4 * vfs operations that deal with files
Steve Frenchfb8c4b12007-07-10 01:16:18 +00005 *
Steve Frenchf19159d2010-04-21 04:12:10 +00006 * Copyright (C) International Business Machines Corp., 2002,2010
Linus Torvalds1da177e2005-04-16 15:20:36 -07007 * Author(s): Steve French (sfrench@us.ibm.com)
Jeremy Allison7ee1af72006-08-02 21:56:33 +00008 * Jeremy Allison (jra@samba.org)
Linus Torvalds1da177e2005-04-16 15:20:36 -07009 *
10 * This library is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU Lesser General Public License as published
12 * by the Free Software Foundation; either version 2.1 of the License, or
13 * (at your option) any later version.
14 *
15 * This library is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
18 * the GNU Lesser General Public License for more details.
19 *
20 * You should have received a copy of the GNU Lesser General Public License
21 * along with this library; if not, write to the Free Software
22 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
23 */
24#include <linux/fs.h>
Steve French37c0eb42005-10-05 14:50:29 -070025#include <linux/backing-dev.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070026#include <linux/stat.h>
27#include <linux/fcntl.h>
28#include <linux/pagemap.h>
29#include <linux/pagevec.h>
Steve French37c0eb42005-10-05 14:50:29 -070030#include <linux/writeback.h>
Andrew Morton6f88cc22006-12-10 02:19:44 -080031#include <linux/task_io_accounting_ops.h>
Steve French23e7dd72005-10-20 13:44:56 -070032#include <linux/delay.h>
Jeff Layton3bc303c2009-09-21 06:47:50 -040033#include <linux/mount.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090034#include <linux/slab.h>
Jeff Layton690c5e32011-10-19 15:30:16 -040035#include <linux/swap.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070036#include <asm/div64.h>
37#include "cifsfs.h"
38#include "cifspdu.h"
39#include "cifsglob.h"
40#include "cifsproto.h"
41#include "cifs_unicode.h"
42#include "cifs_debug.h"
43#include "cifs_fs_sb.h"
Suresh Jayaraman9451a9a2010-07-05 18:12:45 +053044#include "fscache.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070045
Steve French07b92d02013-02-18 10:34:26 -060046
Linus Torvalds1da177e2005-04-16 15:20:36 -070047static inline int cifs_convert_flags(unsigned int flags)
48{
49 if ((flags & O_ACCMODE) == O_RDONLY)
50 return GENERIC_READ;
51 else if ((flags & O_ACCMODE) == O_WRONLY)
52 return GENERIC_WRITE;
53 else if ((flags & O_ACCMODE) == O_RDWR) {
54 /* GENERIC_ALL is too much permission to request
55 can cause unnecessary access denied on create */
56 /* return GENERIC_ALL; */
57 return (GENERIC_READ | GENERIC_WRITE);
58 }
59
Jeff Laytone10f7b52008-05-14 10:21:33 -070060 return (READ_CONTROL | FILE_WRITE_ATTRIBUTES | FILE_READ_ATTRIBUTES |
61 FILE_WRITE_EA | FILE_APPEND_DATA | FILE_WRITE_DATA |
62 FILE_READ_DATA);
Steve French7fc8f4e2009-02-23 20:43:11 +000063}
Jeff Laytone10f7b52008-05-14 10:21:33 -070064
Jeff Layton608712f2010-10-15 15:33:56 -040065static u32 cifs_posix_convert_flags(unsigned int flags)
Steve French7fc8f4e2009-02-23 20:43:11 +000066{
Jeff Layton608712f2010-10-15 15:33:56 -040067 u32 posix_flags = 0;
Jeff Laytone10f7b52008-05-14 10:21:33 -070068
Steve French7fc8f4e2009-02-23 20:43:11 +000069 if ((flags & O_ACCMODE) == O_RDONLY)
Jeff Layton608712f2010-10-15 15:33:56 -040070 posix_flags = SMB_O_RDONLY;
Steve French7fc8f4e2009-02-23 20:43:11 +000071 else if ((flags & O_ACCMODE) == O_WRONLY)
Jeff Layton608712f2010-10-15 15:33:56 -040072 posix_flags = SMB_O_WRONLY;
73 else if ((flags & O_ACCMODE) == O_RDWR)
74 posix_flags = SMB_O_RDWR;
75
Steve French07b92d02013-02-18 10:34:26 -060076 if (flags & O_CREAT) {
Jeff Layton608712f2010-10-15 15:33:56 -040077 posix_flags |= SMB_O_CREAT;
Steve French07b92d02013-02-18 10:34:26 -060078 if (flags & O_EXCL)
79 posix_flags |= SMB_O_EXCL;
80 } else if (flags & O_EXCL)
Joe Perchesf96637b2013-05-04 22:12:25 -050081 cifs_dbg(FYI, "Application %s pid %d has incorrectly set O_EXCL flag but not O_CREAT on file open. Ignoring O_EXCL\n",
82 current->comm, current->tgid);
Steve French07b92d02013-02-18 10:34:26 -060083
Jeff Layton608712f2010-10-15 15:33:56 -040084 if (flags & O_TRUNC)
85 posix_flags |= SMB_O_TRUNC;
86 /* be safe and imply O_SYNC for O_DSYNC */
Christoph Hellwig6b2f3d12009-10-27 11:05:28 +010087 if (flags & O_DSYNC)
Jeff Layton608712f2010-10-15 15:33:56 -040088 posix_flags |= SMB_O_SYNC;
Steve French7fc8f4e2009-02-23 20:43:11 +000089 if (flags & O_DIRECTORY)
Jeff Layton608712f2010-10-15 15:33:56 -040090 posix_flags |= SMB_O_DIRECTORY;
Steve French7fc8f4e2009-02-23 20:43:11 +000091 if (flags & O_NOFOLLOW)
Jeff Layton608712f2010-10-15 15:33:56 -040092 posix_flags |= SMB_O_NOFOLLOW;
Steve French7fc8f4e2009-02-23 20:43:11 +000093 if (flags & O_DIRECT)
Jeff Layton608712f2010-10-15 15:33:56 -040094 posix_flags |= SMB_O_DIRECT;
Steve French7fc8f4e2009-02-23 20:43:11 +000095
96 return posix_flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -070097}
98
99static inline int cifs_get_disposition(unsigned int flags)
100{
101 if ((flags & (O_CREAT | O_EXCL)) == (O_CREAT | O_EXCL))
102 return FILE_CREATE;
103 else if ((flags & (O_CREAT | O_TRUNC)) == (O_CREAT | O_TRUNC))
104 return FILE_OVERWRITE_IF;
105 else if ((flags & O_CREAT) == O_CREAT)
106 return FILE_OPEN_IF;
Steve French55aa2e02006-05-30 18:09:31 +0000107 else if ((flags & O_TRUNC) == O_TRUNC)
108 return FILE_OVERWRITE;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700109 else
110 return FILE_OPEN;
111}
112
Jeff Layton608712f2010-10-15 15:33:56 -0400113int cifs_posix_open(char *full_path, struct inode **pinode,
114 struct super_block *sb, int mode, unsigned int f_flags,
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400115 __u32 *poplock, __u16 *pnetfid, unsigned int xid)
Jeff Layton608712f2010-10-15 15:33:56 -0400116{
117 int rc;
118 FILE_UNIX_BASIC_INFO *presp_data;
119 __u32 posix_flags = 0;
120 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
121 struct cifs_fattr fattr;
122 struct tcon_link *tlink;
Steve French96daf2b2011-05-27 04:34:02 +0000123 struct cifs_tcon *tcon;
Jeff Layton608712f2010-10-15 15:33:56 -0400124
Joe Perchesf96637b2013-05-04 22:12:25 -0500125 cifs_dbg(FYI, "posix open %s\n", full_path);
Jeff Layton608712f2010-10-15 15:33:56 -0400126
127 presp_data = kzalloc(sizeof(FILE_UNIX_BASIC_INFO), GFP_KERNEL);
128 if (presp_data == NULL)
129 return -ENOMEM;
130
131 tlink = cifs_sb_tlink(cifs_sb);
132 if (IS_ERR(tlink)) {
133 rc = PTR_ERR(tlink);
134 goto posix_open_ret;
135 }
136
137 tcon = tlink_tcon(tlink);
138 mode &= ~current_umask();
139
140 posix_flags = cifs_posix_convert_flags(f_flags);
141 rc = CIFSPOSIXCreate(xid, tcon, posix_flags, mode, pnetfid, presp_data,
142 poplock, full_path, cifs_sb->local_nls,
143 cifs_sb->mnt_cifs_flags &
144 CIFS_MOUNT_MAP_SPECIAL_CHR);
145 cifs_put_tlink(tlink);
146
147 if (rc)
148 goto posix_open_ret;
149
150 if (presp_data->Type == cpu_to_le32(-1))
151 goto posix_open_ret; /* open ok, caller does qpathinfo */
152
153 if (!pinode)
154 goto posix_open_ret; /* caller does not need info */
155
156 cifs_unix_basic_to_fattr(&fattr, presp_data, cifs_sb);
157
158 /* get new inode and set it up */
159 if (*pinode == NULL) {
160 cifs_fill_uniqueid(sb, &fattr);
161 *pinode = cifs_iget(sb, &fattr);
162 if (!*pinode) {
163 rc = -ENOMEM;
164 goto posix_open_ret;
165 }
166 } else {
167 cifs_fattr_to_inode(*pinode, &fattr);
168 }
169
170posix_open_ret:
171 kfree(presp_data);
172 return rc;
173}
174
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300175static int
176cifs_nt_open(char *full_path, struct inode *inode, struct cifs_sb_info *cifs_sb,
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700177 struct cifs_tcon *tcon, unsigned int f_flags, __u32 *oplock,
178 struct cifs_fid *fid, unsigned int xid)
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300179{
180 int rc;
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700181 int desired_access;
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300182 int disposition;
Shirish Pargaonkar3d3ea8e2011-09-26 09:56:44 -0500183 int create_options = CREATE_NOT_DIR;
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300184 FILE_ALL_INFO *buf;
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700185 struct TCP_Server_Info *server = tcon->ses->server;
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300186
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700187 if (!server->ops->open)
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700188 return -ENOSYS;
189
190 desired_access = cifs_convert_flags(f_flags);
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300191
192/*********************************************************************
193 * open flag mapping table:
194 *
195 * POSIX Flag CIFS Disposition
196 * ---------- ----------------
197 * O_CREAT FILE_OPEN_IF
198 * O_CREAT | O_EXCL FILE_CREATE
199 * O_CREAT | O_TRUNC FILE_OVERWRITE_IF
200 * O_TRUNC FILE_OVERWRITE
201 * none of the above FILE_OPEN
202 *
203 * Note that there is not a direct match between disposition
204 * FILE_SUPERSEDE (ie create whether or not file exists although
205 * O_CREAT | O_TRUNC is similar but truncates the existing
206 * file rather than creating a new file as FILE_SUPERSEDE does
207 * (which uses the attributes / metadata passed in on open call)
208 *?
209 *? O_SYNC is a reasonable match to CIFS writethrough flag
210 *? and the read write flags match reasonably. O_LARGEFILE
211 *? is irrelevant because largefile support is always used
212 *? by this client. Flags O_APPEND, O_DIRECT, O_DIRECTORY,
213 * O_FASYNC, O_NOFOLLOW, O_NONBLOCK need further investigation
214 *********************************************************************/
215
216 disposition = cifs_get_disposition(f_flags);
217
218 /* BB pass O_SYNC flag through on file attributes .. BB */
219
220 buf = kmalloc(sizeof(FILE_ALL_INFO), GFP_KERNEL);
221 if (!buf)
222 return -ENOMEM;
223
Shirish Pargaonkar3d3ea8e2011-09-26 09:56:44 -0500224 if (backup_cred(cifs_sb))
225 create_options |= CREATE_OPEN_BACKUP_INTENT;
226
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700227 rc = server->ops->open(xid, tcon, full_path, disposition,
228 desired_access, create_options, fid, oplock, buf,
229 cifs_sb);
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300230
231 if (rc)
232 goto out;
233
234 if (tcon->unix_ext)
235 rc = cifs_get_inode_info_unix(&inode, full_path, inode->i_sb,
236 xid);
237 else
238 rc = cifs_get_inode_info(&inode, full_path, buf, inode->i_sb,
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700239 xid, &fid->netfid);
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300240
241out:
242 kfree(buf);
243 return rc;
244}
245
Pavel Shilovsky63b7d3a2012-12-24 14:41:19 +0400246static bool
247cifs_has_mand_locks(struct cifsInodeInfo *cinode)
248{
249 struct cifs_fid_locks *cur;
250 bool has_locks = false;
251
252 down_read(&cinode->lock_sem);
253 list_for_each_entry(cur, &cinode->llist, llist) {
254 if (!list_empty(&cur->locks)) {
255 has_locks = true;
256 break;
257 }
258 }
259 up_read(&cinode->lock_sem);
260 return has_locks;
261}
262
Jeff Layton15ecb432010-10-15 15:34:02 -0400263struct cifsFileInfo *
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700264cifs_new_fileinfo(struct cifs_fid *fid, struct file *file,
Jeff Layton15ecb432010-10-15 15:34:02 -0400265 struct tcon_link *tlink, __u32 oplock)
266{
267 struct dentry *dentry = file->f_path.dentry;
268 struct inode *inode = dentry->d_inode;
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700269 struct cifsInodeInfo *cinode = CIFS_I(inode);
270 struct cifsFileInfo *cfile;
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700271 struct cifs_fid_locks *fdlocks;
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700272 struct cifs_tcon *tcon = tlink_tcon(tlink);
Pavel Shilovsky63b7d3a2012-12-24 14:41:19 +0400273 struct TCP_Server_Info *server = tcon->ses->server;
Jeff Layton15ecb432010-10-15 15:34:02 -0400274
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700275 cfile = kzalloc(sizeof(struct cifsFileInfo), GFP_KERNEL);
276 if (cfile == NULL)
277 return cfile;
Jeff Layton15ecb432010-10-15 15:34:02 -0400278
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700279 fdlocks = kzalloc(sizeof(struct cifs_fid_locks), GFP_KERNEL);
280 if (!fdlocks) {
281 kfree(cfile);
282 return NULL;
283 }
284
285 INIT_LIST_HEAD(&fdlocks->locks);
286 fdlocks->cfile = cfile;
287 cfile->llist = fdlocks;
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700288 down_write(&cinode->lock_sem);
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700289 list_add(&fdlocks->llist, &cinode->llist);
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700290 up_write(&cinode->lock_sem);
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700291
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700292 cfile->count = 1;
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700293 cfile->pid = current->tgid;
294 cfile->uid = current_fsuid();
295 cfile->dentry = dget(dentry);
296 cfile->f_flags = file->f_flags;
297 cfile->invalidHandle = false;
298 cfile->tlink = cifs_get_tlink(tlink);
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700299 INIT_WORK(&cfile->oplock_break, cifs_oplock_break);
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700300 mutex_init(&cfile->fh_mutex);
Jeff Layton15ecb432010-10-15 15:34:02 -0400301
Mateusz Guzik24261fc2013-03-08 16:30:03 +0100302 cifs_sb_active(inode->i_sb);
303
Pavel Shilovsky63b7d3a2012-12-24 14:41:19 +0400304 /*
305 * If the server returned a read oplock and we have mandatory brlocks,
306 * set oplock level to None.
307 */
308 if (oplock == server->vals->oplock_read &&
309 cifs_has_mand_locks(cinode)) {
Joe Perchesf96637b2013-05-04 22:12:25 -0500310 cifs_dbg(FYI, "Reset oplock val from read to None due to mand locks\n");
Pavel Shilovsky63b7d3a2012-12-24 14:41:19 +0400311 oplock = 0;
312 }
313
Jeff Layton44772882010-10-15 15:34:03 -0400314 spin_lock(&cifs_file_list_lock);
Pavel Shilovsky63b7d3a2012-12-24 14:41:19 +0400315 if (fid->pending_open->oplock != CIFS_OPLOCK_NO_CHANGE && oplock)
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700316 oplock = fid->pending_open->oplock;
317 list_del(&fid->pending_open->olist);
318
Pavel Shilovsky63b7d3a2012-12-24 14:41:19 +0400319 server->ops->set_fid(cfile, fid, oplock);
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700320
321 list_add(&cfile->tlist, &tcon->openFileList);
Jeff Layton15ecb432010-10-15 15:34:02 -0400322 /* if readable file instance put first in list*/
323 if (file->f_mode & FMODE_READ)
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700324 list_add(&cfile->flist, &cinode->openFileList);
Jeff Layton15ecb432010-10-15 15:34:02 -0400325 else
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700326 list_add_tail(&cfile->flist, &cinode->openFileList);
Jeff Layton44772882010-10-15 15:34:03 -0400327 spin_unlock(&cifs_file_list_lock);
Jeff Layton15ecb432010-10-15 15:34:02 -0400328
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700329 file->private_data = cfile;
330 return cfile;
Jeff Layton15ecb432010-10-15 15:34:02 -0400331}
332
Jeff Layton764a1b12012-07-25 14:59:54 -0400333struct cifsFileInfo *
334cifsFileInfo_get(struct cifsFileInfo *cifs_file)
335{
336 spin_lock(&cifs_file_list_lock);
337 cifsFileInfo_get_locked(cifs_file);
338 spin_unlock(&cifs_file_list_lock);
339 return cifs_file;
340}
341
Steve Frenchcdff08e2010-10-21 22:46:14 +0000342/*
343 * Release a reference on the file private data. This may involve closing
Jeff Layton5f6dbc92010-10-15 15:34:06 -0400344 * the filehandle out on the server. Must be called without holding
345 * cifs_file_list_lock.
Steve Frenchcdff08e2010-10-21 22:46:14 +0000346 */
Jeff Laytonb33879a2010-10-15 15:34:04 -0400347void cifsFileInfo_put(struct cifsFileInfo *cifs_file)
348{
Pavel Shilovskye66673e2010-11-02 12:00:42 +0300349 struct inode *inode = cifs_file->dentry->d_inode;
Steve French96daf2b2011-05-27 04:34:02 +0000350 struct cifs_tcon *tcon = tlink_tcon(cifs_file->tlink);
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700351 struct TCP_Server_Info *server = tcon->ses->server;
Pavel Shilovskye66673e2010-11-02 12:00:42 +0300352 struct cifsInodeInfo *cifsi = CIFS_I(inode);
Mateusz Guzik24261fc2013-03-08 16:30:03 +0100353 struct super_block *sb = inode->i_sb;
354 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
Steve Frenchcdff08e2010-10-21 22:46:14 +0000355 struct cifsLockInfo *li, *tmp;
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700356 struct cifs_fid fid;
357 struct cifs_pending_open open;
Steve Frenchcdff08e2010-10-21 22:46:14 +0000358
359 spin_lock(&cifs_file_list_lock);
Jeff Layton5f6dbc92010-10-15 15:34:06 -0400360 if (--cifs_file->count > 0) {
Steve Frenchcdff08e2010-10-21 22:46:14 +0000361 spin_unlock(&cifs_file_list_lock);
362 return;
Jeff Laytonb33879a2010-10-15 15:34:04 -0400363 }
Steve Frenchcdff08e2010-10-21 22:46:14 +0000364
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700365 if (server->ops->get_lease_key)
366 server->ops->get_lease_key(inode, &fid);
367
368 /* store open in pending opens to make sure we don't miss lease break */
369 cifs_add_pending_open_locked(&fid, cifs_file->tlink, &open);
370
Steve Frenchcdff08e2010-10-21 22:46:14 +0000371 /* remove it from the lists */
372 list_del(&cifs_file->flist);
373 list_del(&cifs_file->tlist);
374
375 if (list_empty(&cifsi->openFileList)) {
Joe Perchesf96637b2013-05-04 22:12:25 -0500376 cifs_dbg(FYI, "closing last open instance for inode %p\n",
377 cifs_file->dentry->d_inode);
Pavel Shilovsky25364132012-09-18 16:20:27 -0700378 /*
379 * In strict cache mode we need invalidate mapping on the last
380 * close because it may cause a error when we open this file
381 * again and get at least level II oplock.
382 */
Pavel Shilovsky4f8ba8a2010-11-21 22:36:12 +0300383 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_STRICT_IO)
384 CIFS_I(inode)->invalid_mapping = true;
Pavel Shilovskyc6723622010-11-03 10:58:57 +0300385 cifs_set_oplock_level(cifsi, 0);
Steve Frenchcdff08e2010-10-21 22:46:14 +0000386 }
387 spin_unlock(&cifs_file_list_lock);
388
Jeff Laytonad635942011-07-26 12:20:17 -0400389 cancel_work_sync(&cifs_file->oplock_break);
390
Steve Frenchcdff08e2010-10-21 22:46:14 +0000391 if (!tcon->need_reconnect && !cifs_file->invalidHandle) {
Pavel Shilovsky0ff78a22012-09-18 16:20:26 -0700392 struct TCP_Server_Info *server = tcon->ses->server;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400393 unsigned int xid;
Pavel Shilovsky0ff78a22012-09-18 16:20:26 -0700394
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400395 xid = get_xid();
Pavel Shilovsky0ff78a22012-09-18 16:20:26 -0700396 if (server->ops->close)
Pavel Shilovsky760ad0c2012-09-25 11:00:07 +0400397 server->ops->close(xid, tcon, &cifs_file->fid);
398 _free_xid(xid);
Steve Frenchcdff08e2010-10-21 22:46:14 +0000399 }
400
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700401 cifs_del_pending_open(&open);
402
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700403 /*
404 * Delete any outstanding lock records. We'll lose them when the file
Steve Frenchcdff08e2010-10-21 22:46:14 +0000405 * is closed anyway.
406 */
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700407 down_write(&cifsi->lock_sem);
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700408 list_for_each_entry_safe(li, tmp, &cifs_file->llist->locks, llist) {
Steve Frenchcdff08e2010-10-21 22:46:14 +0000409 list_del(&li->llist);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400410 cifs_del_lock_waiters(li);
Steve Frenchcdff08e2010-10-21 22:46:14 +0000411 kfree(li);
412 }
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700413 list_del(&cifs_file->llist->llist);
414 kfree(cifs_file->llist);
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700415 up_write(&cifsi->lock_sem);
Steve Frenchcdff08e2010-10-21 22:46:14 +0000416
417 cifs_put_tlink(cifs_file->tlink);
418 dput(cifs_file->dentry);
Mateusz Guzik24261fc2013-03-08 16:30:03 +0100419 cifs_sb_deactive(sb);
Steve Frenchcdff08e2010-10-21 22:46:14 +0000420 kfree(cifs_file);
Jeff Laytonb33879a2010-10-15 15:34:04 -0400421}
422
Linus Torvalds1da177e2005-04-16 15:20:36 -0700423int cifs_open(struct inode *inode, struct file *file)
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700424
Linus Torvalds1da177e2005-04-16 15:20:36 -0700425{
426 int rc = -EACCES;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400427 unsigned int xid;
Jeff Layton590a3fe2009-09-12 11:54:28 -0400428 __u32 oplock;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700429 struct cifs_sb_info *cifs_sb;
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700430 struct TCP_Server_Info *server;
Steve French96daf2b2011-05-27 04:34:02 +0000431 struct cifs_tcon *tcon;
Jeff Layton7ffec372010-09-29 19:51:11 -0400432 struct tcon_link *tlink;
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700433 struct cifsFileInfo *cfile = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700434 char *full_path = NULL;
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300435 bool posix_open_ok = false;
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700436 struct cifs_fid fid;
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700437 struct cifs_pending_open open;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700438
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400439 xid = get_xid();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700440
441 cifs_sb = CIFS_SB(inode->i_sb);
Jeff Layton7ffec372010-09-29 19:51:11 -0400442 tlink = cifs_sb_tlink(cifs_sb);
443 if (IS_ERR(tlink)) {
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400444 free_xid(xid);
Jeff Layton7ffec372010-09-29 19:51:11 -0400445 return PTR_ERR(tlink);
446 }
447 tcon = tlink_tcon(tlink);
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700448 server = tcon->ses->server;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700449
Josef "Jeff" Sipeke6a00292006-12-08 02:36:48 -0800450 full_path = build_path_from_dentry(file->f_path.dentry);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700451 if (full_path == NULL) {
Suresh Jayaraman0f3bc092009-06-25 18:12:34 +0530452 rc = -ENOMEM;
Jeff Layton232341b2010-08-05 13:58:38 -0400453 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700454 }
455
Joe Perchesf96637b2013-05-04 22:12:25 -0500456 cifs_dbg(FYI, "inode = 0x%p file flags are 0x%x for %s\n",
Joe Perchesb6b38f72010-04-21 03:50:45 +0000457 inode, file->f_flags, full_path);
Steve French276a74a2009-03-03 18:00:34 +0000458
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700459 if (server->oplocks)
Steve French276a74a2009-03-03 18:00:34 +0000460 oplock = REQ_OPLOCK;
461 else
462 oplock = 0;
463
Steve French64cc2c62009-03-04 19:54:08 +0000464 if (!tcon->broken_posix_open && tcon->unix_ext &&
Pavel Shilovsky29e20f92012-07-13 13:58:14 +0400465 cap_unix(tcon->ses) && (CIFS_UNIX_POSIX_PATH_OPS_CAP &
466 le64_to_cpu(tcon->fsUnixInfo.Capability))) {
Steve French276a74a2009-03-03 18:00:34 +0000467 /* can not refresh inode info since size could be stale */
Jeff Layton2422f672010-06-16 13:40:16 -0400468 rc = cifs_posix_open(full_path, &inode, inode->i_sb,
Steve Frenchfa588e02010-04-22 19:21:55 +0000469 cifs_sb->mnt_file_mode /* ignored */,
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700470 file->f_flags, &oplock, &fid.netfid, xid);
Steve French276a74a2009-03-03 18:00:34 +0000471 if (rc == 0) {
Joe Perchesf96637b2013-05-04 22:12:25 -0500472 cifs_dbg(FYI, "posix open succeeded\n");
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300473 posix_open_ok = true;
Steve French64cc2c62009-03-04 19:54:08 +0000474 } else if ((rc == -EINVAL) || (rc == -EOPNOTSUPP)) {
475 if (tcon->ses->serverNOS)
Joe Perchesf96637b2013-05-04 22:12:25 -0500476 cifs_dbg(VFS, "server %s of type %s returned unexpected error on SMB posix open, disabling posix open support. Check if server update available.\n",
477 tcon->ses->serverName,
478 tcon->ses->serverNOS);
Steve French64cc2c62009-03-04 19:54:08 +0000479 tcon->broken_posix_open = true;
Steve French276a74a2009-03-03 18:00:34 +0000480 } else if ((rc != -EIO) && (rc != -EREMOTE) &&
481 (rc != -EOPNOTSUPP)) /* path not found or net err */
482 goto out;
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700483 /*
484 * Else fallthrough to retry open the old way on network i/o
485 * or DFS errors.
486 */
Steve French276a74a2009-03-03 18:00:34 +0000487 }
488
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700489 if (server->ops->get_lease_key)
490 server->ops->get_lease_key(inode, &fid);
491
492 cifs_add_pending_open(&fid, tlink, &open);
493
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300494 if (!posix_open_ok) {
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700495 if (server->ops->get_lease_key)
496 server->ops->get_lease_key(inode, &fid);
497
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300498 rc = cifs_nt_open(full_path, inode, cifs_sb, tcon,
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700499 file->f_flags, &oplock, &fid, xid);
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700500 if (rc) {
501 cifs_del_pending_open(&open);
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300502 goto out;
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700503 }
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300504 }
Jeff Layton47c78b72010-06-16 13:40:17 -0400505
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700506 cfile = cifs_new_fileinfo(&fid, file, tlink, oplock);
507 if (cfile == NULL) {
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700508 if (server->ops->close)
509 server->ops->close(xid, tcon, &fid);
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700510 cifs_del_pending_open(&open);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700511 rc = -ENOMEM;
512 goto out;
513 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700514
Suresh Jayaraman9451a9a2010-07-05 18:12:45 +0530515 cifs_fscache_set_inode_cookie(inode, file);
516
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300517 if ((oplock & CIFS_CREATE_ACTION) && !posix_open_ok && tcon->unix_ext) {
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700518 /*
519 * Time to set mode which we can not set earlier due to
520 * problems creating new read-only files.
521 */
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300522 struct cifs_unix_set_info_args args = {
523 .mode = inode->i_mode,
Eric W. Biederman49418b22013-02-06 00:57:56 -0800524 .uid = INVALID_UID, /* no change */
525 .gid = INVALID_GID, /* no change */
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300526 .ctime = NO_CHANGE_64,
527 .atime = NO_CHANGE_64,
528 .mtime = NO_CHANGE_64,
529 .device = 0,
530 };
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700531 CIFSSMBUnixSetFileInfo(xid, tcon, &args, fid.netfid,
532 cfile->pid);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700533 }
534
535out:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700536 kfree(full_path);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400537 free_xid(xid);
Jeff Layton7ffec372010-09-29 19:51:11 -0400538 cifs_put_tlink(tlink);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700539 return rc;
540}
541
Pavel Shilovskyf152fd52012-11-22 17:10:57 +0400542static int cifs_push_posix_locks(struct cifsFileInfo *cfile);
543
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700544/*
545 * Try to reacquire byte range locks that were released when session
Pavel Shilovskyf152fd52012-11-22 17:10:57 +0400546 * to server was lost.
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700547 */
Pavel Shilovskyf152fd52012-11-22 17:10:57 +0400548static int
549cifs_relock_file(struct cifsFileInfo *cfile)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700550{
Pavel Shilovskyf152fd52012-11-22 17:10:57 +0400551 struct cifs_sb_info *cifs_sb = CIFS_SB(cfile->dentry->d_sb);
552 struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
553 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700554 int rc = 0;
555
Pavel Shilovsky059a1672013-07-11 11:17:45 +0400556 down_read(&cinode->lock_sem);
Pavel Shilovskyf152fd52012-11-22 17:10:57 +0400557 if (cinode->can_cache_brlcks) {
Pavel Shilovsky059a1672013-07-11 11:17:45 +0400558 /* can cache locks - no need to relock */
559 up_read(&cinode->lock_sem);
Pavel Shilovskyf152fd52012-11-22 17:10:57 +0400560 return rc;
561 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700562
Pavel Shilovskyf152fd52012-11-22 17:10:57 +0400563 if (cap_unix(tcon->ses) &&
564 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
565 ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
566 rc = cifs_push_posix_locks(cfile);
567 else
568 rc = tcon->ses->server->ops->push_mand_locks(cfile);
569
Pavel Shilovsky059a1672013-07-11 11:17:45 +0400570 up_read(&cinode->lock_sem);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700571 return rc;
572}
573
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700574static int
575cifs_reopen_file(struct cifsFileInfo *cfile, bool can_flush)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700576{
577 int rc = -EACCES;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400578 unsigned int xid;
Jeff Layton590a3fe2009-09-12 11:54:28 -0400579 __u32 oplock;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700580 struct cifs_sb_info *cifs_sb;
Steve French96daf2b2011-05-27 04:34:02 +0000581 struct cifs_tcon *tcon;
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700582 struct TCP_Server_Info *server;
583 struct cifsInodeInfo *cinode;
Steve Frenchfb8c4b12007-07-10 01:16:18 +0000584 struct inode *inode;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700585 char *full_path = NULL;
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700586 int desired_access;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700587 int disposition = FILE_OPEN;
Shirish Pargaonkar3d3ea8e2011-09-26 09:56:44 -0500588 int create_options = CREATE_NOT_DIR;
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700589 struct cifs_fid fid;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700590
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400591 xid = get_xid();
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700592 mutex_lock(&cfile->fh_mutex);
593 if (!cfile->invalidHandle) {
594 mutex_unlock(&cfile->fh_mutex);
Suresh Jayaraman0f3bc092009-06-25 18:12:34 +0530595 rc = 0;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400596 free_xid(xid);
Suresh Jayaraman0f3bc092009-06-25 18:12:34 +0530597 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700598 }
599
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700600 inode = cfile->dentry->d_inode;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700601 cifs_sb = CIFS_SB(inode->i_sb);
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700602 tcon = tlink_tcon(cfile->tlink);
603 server = tcon->ses->server;
Steve French3a9f4622007-04-04 17:10:24 +0000604
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700605 /*
606 * Can not grab rename sem here because various ops, including those
607 * that already have the rename sem can end up causing writepage to get
608 * called and if the server was down that means we end up here, and we
609 * can never tell if the caller already has the rename_sem.
610 */
611 full_path = build_path_from_dentry(cfile->dentry);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700612 if (full_path == NULL) {
Steve French3a9f4622007-04-04 17:10:24 +0000613 rc = -ENOMEM;
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700614 mutex_unlock(&cfile->fh_mutex);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400615 free_xid(xid);
Steve French3a9f4622007-04-04 17:10:24 +0000616 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700617 }
618
Joe Perchesf96637b2013-05-04 22:12:25 -0500619 cifs_dbg(FYI, "inode = 0x%p file flags 0x%x for %s\n",
620 inode, cfile->f_flags, full_path);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700621
Pavel Shilovsky10b9b982012-03-20 12:55:09 +0300622 if (tcon->ses->server->oplocks)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700623 oplock = REQ_OPLOCK;
624 else
Steve French4b18f2a2008-04-29 00:06:05 +0000625 oplock = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700626
Pavel Shilovsky29e20f92012-07-13 13:58:14 +0400627 if (tcon->unix_ext && cap_unix(tcon->ses) &&
Steve French7fc8f4e2009-02-23 20:43:11 +0000628 (CIFS_UNIX_POSIX_PATH_OPS_CAP &
Pavel Shilovsky29e20f92012-07-13 13:58:14 +0400629 le64_to_cpu(tcon->fsUnixInfo.Capability))) {
Jeff Layton608712f2010-10-15 15:33:56 -0400630 /*
631 * O_CREAT, O_EXCL and O_TRUNC already had their effect on the
632 * original open. Must mask them off for a reopen.
633 */
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700634 unsigned int oflags = cfile->f_flags &
Jeff Layton15886172010-10-15 15:33:59 -0400635 ~(O_CREAT | O_EXCL | O_TRUNC);
Jeff Layton608712f2010-10-15 15:33:56 -0400636
Jeff Layton2422f672010-06-16 13:40:16 -0400637 rc = cifs_posix_open(full_path, NULL, inode->i_sb,
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700638 cifs_sb->mnt_file_mode /* ignored */,
639 oflags, &oplock, &fid.netfid, xid);
Steve French7fc8f4e2009-02-23 20:43:11 +0000640 if (rc == 0) {
Joe Perchesf96637b2013-05-04 22:12:25 -0500641 cifs_dbg(FYI, "posix reopen succeeded\n");
Steve French7fc8f4e2009-02-23 20:43:11 +0000642 goto reopen_success;
643 }
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700644 /*
645 * fallthrough to retry open the old way on errors, especially
646 * in the reconnect path it is important to retry hard
647 */
Steve French7fc8f4e2009-02-23 20:43:11 +0000648 }
649
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700650 desired_access = cifs_convert_flags(cfile->f_flags);
Steve French7fc8f4e2009-02-23 20:43:11 +0000651
Shirish Pargaonkar3d3ea8e2011-09-26 09:56:44 -0500652 if (backup_cred(cifs_sb))
653 create_options |= CREATE_OPEN_BACKUP_INTENT;
654
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700655 if (server->ops->get_lease_key)
656 server->ops->get_lease_key(inode, &fid);
657
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700658 /*
659 * Can not refresh inode by passing in file_info buf to be returned by
660 * CIFSSMBOpen and then calling get_inode_info with returned buf since
661 * file might have write behind data that needs to be flushed and server
662 * version of file size can be stale. If we knew for sure that inode was
663 * not dirty locally we could do this.
664 */
665 rc = server->ops->open(xid, tcon, full_path, disposition,
666 desired_access, create_options, &fid, &oplock,
667 NULL, cifs_sb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700668 if (rc) {
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700669 mutex_unlock(&cfile->fh_mutex);
Joe Perchesf96637b2013-05-04 22:12:25 -0500670 cifs_dbg(FYI, "cifs_reopen returned 0x%x\n", rc);
671 cifs_dbg(FYI, "oplock: %d\n", oplock);
Jeff Layton15886172010-10-15 15:33:59 -0400672 goto reopen_error_exit;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700673 }
Jeff Layton15886172010-10-15 15:33:59 -0400674
675reopen_success:
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700676 cfile->invalidHandle = false;
677 mutex_unlock(&cfile->fh_mutex);
678 cinode = CIFS_I(inode);
Jeff Layton15886172010-10-15 15:33:59 -0400679
680 if (can_flush) {
681 rc = filemap_write_and_wait(inode->i_mapping);
Jeff Laytoneb4b7562010-10-22 14:52:29 -0400682 mapping_set_error(inode->i_mapping, rc);
Jeff Layton15886172010-10-15 15:33:59 -0400683
Jeff Layton15886172010-10-15 15:33:59 -0400684 if (tcon->unix_ext)
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700685 rc = cifs_get_inode_info_unix(&inode, full_path,
686 inode->i_sb, xid);
Jeff Layton15886172010-10-15 15:33:59 -0400687 else
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700688 rc = cifs_get_inode_info(&inode, full_path, NULL,
689 inode->i_sb, xid, NULL);
690 }
691 /*
692 * Else we are writing out data to server already and could deadlock if
693 * we tried to flush data, and since we do not know if we have data that
694 * would invalidate the current end of file on the server we can not go
695 * to the server to get the new inode info.
696 */
Pavel Shilovskye66673e2010-11-02 12:00:42 +0300697
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700698 server->ops->set_fid(cfile, &fid, oplock);
699 cifs_relock_file(cfile);
Jeff Layton15886172010-10-15 15:33:59 -0400700
701reopen_error_exit:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700702 kfree(full_path);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400703 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700704 return rc;
705}
706
707int cifs_close(struct inode *inode, struct file *file)
708{
Jeff Layton77970692011-04-05 16:23:47 -0700709 if (file->private_data != NULL) {
710 cifsFileInfo_put(file->private_data);
711 file->private_data = NULL;
712 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700713
Steve Frenchcdff08e2010-10-21 22:46:14 +0000714 /* return code from the ->release op is always ignored */
715 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700716}
717
718int cifs_closedir(struct inode *inode, struct file *file)
719{
720 int rc = 0;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400721 unsigned int xid;
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700722 struct cifsFileInfo *cfile = file->private_data;
Pavel Shilovsky92fc65a2012-09-18 16:20:32 -0700723 struct cifs_tcon *tcon;
724 struct TCP_Server_Info *server;
725 char *buf;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700726
Joe Perchesf96637b2013-05-04 22:12:25 -0500727 cifs_dbg(FYI, "Closedir inode = 0x%p\n", inode);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700728
Pavel Shilovsky92fc65a2012-09-18 16:20:32 -0700729 if (cfile == NULL)
730 return rc;
731
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400732 xid = get_xid();
Pavel Shilovsky92fc65a2012-09-18 16:20:32 -0700733 tcon = tlink_tcon(cfile->tlink);
734 server = tcon->ses->server;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700735
Joe Perchesf96637b2013-05-04 22:12:25 -0500736 cifs_dbg(FYI, "Freeing private data in close dir\n");
Pavel Shilovsky92fc65a2012-09-18 16:20:32 -0700737 spin_lock(&cifs_file_list_lock);
Pavel Shilovsky922798d2014-08-18 20:49:57 +0400738 if (server->ops->dir_needs_close(cfile)) {
Pavel Shilovsky92fc65a2012-09-18 16:20:32 -0700739 cfile->invalidHandle = true;
740 spin_unlock(&cifs_file_list_lock);
741 if (server->ops->close_dir)
742 rc = server->ops->close_dir(xid, tcon, &cfile->fid);
743 else
744 rc = -ENOSYS;
Joe Perchesf96637b2013-05-04 22:12:25 -0500745 cifs_dbg(FYI, "Closing uncompleted readdir with rc %d\n", rc);
Pavel Shilovsky92fc65a2012-09-18 16:20:32 -0700746 /* not much we can do if it fails anyway, ignore rc */
747 rc = 0;
748 } else
749 spin_unlock(&cifs_file_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700750
Pavel Shilovsky92fc65a2012-09-18 16:20:32 -0700751 buf = cfile->srch_inf.ntwrk_buf_start;
752 if (buf) {
Joe Perchesf96637b2013-05-04 22:12:25 -0500753 cifs_dbg(FYI, "closedir free smb buf in srch struct\n");
Pavel Shilovsky92fc65a2012-09-18 16:20:32 -0700754 cfile->srch_inf.ntwrk_buf_start = NULL;
755 if (cfile->srch_inf.smallBuf)
756 cifs_small_buf_release(buf);
757 else
758 cifs_buf_release(buf);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700759 }
Pavel Shilovsky92fc65a2012-09-18 16:20:32 -0700760
761 cifs_put_tlink(cfile->tlink);
762 kfree(file->private_data);
763 file->private_data = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700764 /* BB can we lock the filestruct while this is going on? */
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400765 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700766 return rc;
767}
768
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400769static struct cifsLockInfo *
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300770cifs_lock_init(__u64 offset, __u64 length, __u8 type)
Jeremy Allison7ee1af72006-08-02 21:56:33 +0000771{
Pavel Shilovskya88b4702011-10-29 17:17:59 +0400772 struct cifsLockInfo *lock =
Steve Frenchfb8c4b12007-07-10 01:16:18 +0000773 kmalloc(sizeof(struct cifsLockInfo), GFP_KERNEL);
Pavel Shilovskya88b4702011-10-29 17:17:59 +0400774 if (!lock)
775 return lock;
776 lock->offset = offset;
777 lock->length = length;
778 lock->type = type;
Pavel Shilovskya88b4702011-10-29 17:17:59 +0400779 lock->pid = current->tgid;
780 INIT_LIST_HEAD(&lock->blist);
781 init_waitqueue_head(&lock->block_q);
782 return lock;
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400783}
784
Pavel Shilovskyf7ba7fe2012-09-19 06:22:43 -0700785void
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400786cifs_del_lock_waiters(struct cifsLockInfo *lock)
787{
788 struct cifsLockInfo *li, *tmp;
789 list_for_each_entry_safe(li, tmp, &lock->blist, blist) {
790 list_del_init(&li->blist);
791 wake_up(&li->block_q);
792 }
793}
794
Pavel Shilovsky081c0412012-11-27 18:38:53 +0400795#define CIFS_LOCK_OP 0
796#define CIFS_READ_OP 1
797#define CIFS_WRITE_OP 2
798
799/* @rw_check : 0 - no op, 1 - read, 2 - write */
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400800static bool
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700801cifs_find_fid_lock_conflict(struct cifs_fid_locks *fdlocks, __u64 offset,
802 __u64 length, __u8 type, struct cifsFileInfo *cfile,
Pavel Shilovsky081c0412012-11-27 18:38:53 +0400803 struct cifsLockInfo **conf_lock, int rw_check)
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400804{
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300805 struct cifsLockInfo *li;
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700806 struct cifsFileInfo *cur_cfile = fdlocks->cfile;
Pavel Shilovsky106dc532012-02-28 14:23:34 +0300807 struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server;
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400808
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700809 list_for_each_entry(li, &fdlocks->locks, llist) {
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400810 if (offset + length <= li->offset ||
811 offset >= li->offset + li->length)
812 continue;
Pavel Shilovsky081c0412012-11-27 18:38:53 +0400813 if (rw_check != CIFS_LOCK_OP && current->tgid == li->pid &&
814 server->ops->compare_fids(cfile, cur_cfile)) {
815 /* shared lock prevents write op through the same fid */
816 if (!(li->type & server->vals->shared_lock_type) ||
817 rw_check != CIFS_WRITE_OP)
818 continue;
819 }
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700820 if ((type & server->vals->shared_lock_type) &&
821 ((server->ops->compare_fids(cfile, cur_cfile) &&
822 current->tgid == li->pid) || type == li->type))
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400823 continue;
Pavel Shilovsky579f9052012-09-19 06:22:44 -0700824 if (conf_lock)
825 *conf_lock = li;
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700826 return true;
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400827 }
828 return false;
829}
830
Pavel Shilovsky579f9052012-09-19 06:22:44 -0700831bool
Pavel Shilovsky55157df2012-02-28 14:04:17 +0300832cifs_find_lock_conflict(struct cifsFileInfo *cfile, __u64 offset, __u64 length,
Pavel Shilovsky579f9052012-09-19 06:22:44 -0700833 __u8 type, struct cifsLockInfo **conf_lock,
Pavel Shilovsky081c0412012-11-27 18:38:53 +0400834 int rw_check)
Pavel Shilovsky161ebf92011-10-29 17:17:58 +0400835{
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300836 bool rc = false;
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700837 struct cifs_fid_locks *cur;
Pavel Shilovsky55157df2012-02-28 14:04:17 +0300838 struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300839
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700840 list_for_each_entry(cur, &cinode->llist, llist) {
841 rc = cifs_find_fid_lock_conflict(cur, offset, length, type,
Pavel Shilovsky579f9052012-09-19 06:22:44 -0700842 cfile, conf_lock, rw_check);
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300843 if (rc)
844 break;
845 }
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300846
847 return rc;
Pavel Shilovsky161ebf92011-10-29 17:17:58 +0400848}
849
Pavel Shilovsky9a5101c2011-11-07 16:11:24 +0300850/*
851 * Check if there is another lock that prevents us to set the lock (mandatory
852 * style). If such a lock exists, update the flock structure with its
853 * properties. Otherwise, set the flock type to F_UNLCK if we can cache brlocks
854 * or leave it the same if we can't. Returns 0 if we don't need to request to
855 * the server or 1 otherwise.
856 */
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400857static int
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300858cifs_lock_test(struct cifsFileInfo *cfile, __u64 offset, __u64 length,
859 __u8 type, struct file_lock *flock)
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400860{
861 int rc = 0;
862 struct cifsLockInfo *conf_lock;
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300863 struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
Pavel Shilovsky106dc532012-02-28 14:23:34 +0300864 struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server;
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400865 bool exist;
866
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700867 down_read(&cinode->lock_sem);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400868
Pavel Shilovsky55157df2012-02-28 14:04:17 +0300869 exist = cifs_find_lock_conflict(cfile, offset, length, type,
Pavel Shilovsky081c0412012-11-27 18:38:53 +0400870 &conf_lock, CIFS_LOCK_OP);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400871 if (exist) {
872 flock->fl_start = conf_lock->offset;
873 flock->fl_end = conf_lock->offset + conf_lock->length - 1;
874 flock->fl_pid = conf_lock->pid;
Pavel Shilovsky106dc532012-02-28 14:23:34 +0300875 if (conf_lock->type & server->vals->shared_lock_type)
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400876 flock->fl_type = F_RDLCK;
877 else
878 flock->fl_type = F_WRLCK;
879 } else if (!cinode->can_cache_brlcks)
880 rc = 1;
881 else
882 flock->fl_type = F_UNLCK;
883
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700884 up_read(&cinode->lock_sem);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400885 return rc;
886}
887
Pavel Shilovsky161ebf92011-10-29 17:17:58 +0400888static void
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300889cifs_lock_add(struct cifsFileInfo *cfile, struct cifsLockInfo *lock)
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400890{
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300891 struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700892 down_write(&cinode->lock_sem);
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700893 list_add_tail(&lock->llist, &cfile->llist->locks);
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700894 up_write(&cinode->lock_sem);
Jeremy Allison7ee1af72006-08-02 21:56:33 +0000895}
896
Pavel Shilovsky9a5101c2011-11-07 16:11:24 +0300897/*
898 * Set the byte-range lock (mandatory style). Returns:
899 * 1) 0, if we set the lock and don't need to request to the server;
900 * 2) 1, if no locks prevent us but we need to request to the server;
901 * 3) -EACCESS, if there is a lock that prevents us and wait is false.
902 */
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400903static int
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300904cifs_lock_add_if(struct cifsFileInfo *cfile, struct cifsLockInfo *lock,
Pavel Shilovsky161ebf92011-10-29 17:17:58 +0400905 bool wait)
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400906{
Pavel Shilovsky161ebf92011-10-29 17:17:58 +0400907 struct cifsLockInfo *conf_lock;
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300908 struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400909 bool exist;
910 int rc = 0;
911
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400912try_again:
913 exist = false;
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700914 down_write(&cinode->lock_sem);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400915
Pavel Shilovsky55157df2012-02-28 14:04:17 +0300916 exist = cifs_find_lock_conflict(cfile, lock->offset, lock->length,
Pavel Shilovsky081c0412012-11-27 18:38:53 +0400917 lock->type, &conf_lock, CIFS_LOCK_OP);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400918 if (!exist && cinode->can_cache_brlcks) {
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700919 list_add_tail(&lock->llist, &cfile->llist->locks);
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700920 up_write(&cinode->lock_sem);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400921 return rc;
922 }
923
924 if (!exist)
925 rc = 1;
926 else if (!wait)
927 rc = -EACCES;
928 else {
929 list_add_tail(&lock->blist, &conf_lock->blist);
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700930 up_write(&cinode->lock_sem);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400931 rc = wait_event_interruptible(lock->block_q,
932 (lock->blist.prev == &lock->blist) &&
933 (lock->blist.next == &lock->blist));
934 if (!rc)
935 goto try_again;
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700936 down_write(&cinode->lock_sem);
Pavel Shilovskya88b4702011-10-29 17:17:59 +0400937 list_del_init(&lock->blist);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400938 }
939
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700940 up_write(&cinode->lock_sem);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400941 return rc;
942}
943
Pavel Shilovsky9a5101c2011-11-07 16:11:24 +0300944/*
945 * Check if there is another lock that prevents us to set the lock (posix
946 * style). If such a lock exists, update the flock structure with its
947 * properties. Otherwise, set the flock type to F_UNLCK if we can cache brlocks
948 * or leave it the same if we can't. Returns 0 if we don't need to request to
949 * the server or 1 otherwise.
950 */
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400951static int
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +0400952cifs_posix_lock_test(struct file *file, struct file_lock *flock)
953{
954 int rc = 0;
Al Viro496ad9a2013-01-23 17:07:38 -0500955 struct cifsInodeInfo *cinode = CIFS_I(file_inode(file));
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +0400956 unsigned char saved_type = flock->fl_type;
957
Pavel Shilovsky50792762011-10-29 17:17:57 +0400958 if ((flock->fl_flags & FL_POSIX) == 0)
959 return 1;
960
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700961 down_read(&cinode->lock_sem);
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +0400962 posix_test_lock(file, flock);
963
964 if (flock->fl_type == F_UNLCK && !cinode->can_cache_brlcks) {
965 flock->fl_type = saved_type;
966 rc = 1;
967 }
968
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700969 up_read(&cinode->lock_sem);
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +0400970 return rc;
971}
972
Pavel Shilovsky9a5101c2011-11-07 16:11:24 +0300973/*
974 * Set the byte-range lock (posix style). Returns:
975 * 1) 0, if we set the lock and don't need to request to the server;
976 * 2) 1, if we need to request to the server;
977 * 3) <0, if the error occurs while setting the lock.
978 */
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +0400979static int
980cifs_posix_lock_set(struct file *file, struct file_lock *flock)
981{
Al Viro496ad9a2013-01-23 17:07:38 -0500982 struct cifsInodeInfo *cinode = CIFS_I(file_inode(file));
Pavel Shilovsky50792762011-10-29 17:17:57 +0400983 int rc = 1;
984
985 if ((flock->fl_flags & FL_POSIX) == 0)
986 return rc;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +0400987
Pavel Shilovsky66189be2012-03-28 21:56:19 +0400988try_again:
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700989 down_write(&cinode->lock_sem);
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +0400990 if (!cinode->can_cache_brlcks) {
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700991 up_write(&cinode->lock_sem);
Pavel Shilovsky50792762011-10-29 17:17:57 +0400992 return rc;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +0400993 }
Pavel Shilovsky66189be2012-03-28 21:56:19 +0400994
995 rc = posix_lock_file(file, flock, NULL);
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700996 up_write(&cinode->lock_sem);
Pavel Shilovsky66189be2012-03-28 21:56:19 +0400997 if (rc == FILE_LOCK_DEFERRED) {
998 rc = wait_event_interruptible(flock->fl_wait, !flock->fl_next);
999 if (!rc)
1000 goto try_again;
1001 locks_delete_block(flock);
1002 }
Steve French9ebb3892012-04-01 13:52:54 -05001003 return rc;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001004}
1005
Pavel Shilovskyd39a4f72012-09-19 06:22:43 -07001006int
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001007cifs_push_mandatory_locks(struct cifsFileInfo *cfile)
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001008{
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001009 unsigned int xid;
1010 int rc = 0, stored_rc;
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001011 struct cifsLockInfo *li, *tmp;
1012 struct cifs_tcon *tcon;
Pavel Shilovsky0013fb42012-05-31 13:03:26 +04001013 unsigned int num, max_num, max_buf;
Pavel Shilovsky32b9aaf2011-10-22 15:33:32 +04001014 LOCKING_ANDX_RANGE *buf, *cur;
1015 int types[] = {LOCKING_ANDX_LARGE_FILES,
1016 LOCKING_ANDX_SHARED_LOCK | LOCKING_ANDX_LARGE_FILES};
1017 int i;
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001018
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001019 xid = get_xid();
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001020 tcon = tlink_tcon(cfile->tlink);
1021
Pavel Shilovsky0013fb42012-05-31 13:03:26 +04001022 /*
1023 * Accessing maxBuf is racy with cifs_reconnect - need to store value
1024 * and check it for zero before using.
1025 */
1026 max_buf = tcon->ses->server->maxBuf;
1027 if (!max_buf) {
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001028 free_xid(xid);
Pavel Shilovsky0013fb42012-05-31 13:03:26 +04001029 return -EINVAL;
1030 }
1031
1032 max_num = (max_buf - sizeof(struct smb_hdr)) /
1033 sizeof(LOCKING_ANDX_RANGE);
Pavel Shilovsky32b9aaf2011-10-22 15:33:32 +04001034 buf = kzalloc(max_num * sizeof(LOCKING_ANDX_RANGE), GFP_KERNEL);
1035 if (!buf) {
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001036 free_xid(xid);
Pavel Shilovskye2f28862012-08-29 21:13:38 +04001037 return -ENOMEM;
Pavel Shilovsky32b9aaf2011-10-22 15:33:32 +04001038 }
1039
1040 for (i = 0; i < 2; i++) {
1041 cur = buf;
1042 num = 0;
Pavel Shilovskyf45d3412012-09-19 06:22:43 -07001043 list_for_each_entry_safe(li, tmp, &cfile->llist->locks, llist) {
Pavel Shilovsky32b9aaf2011-10-22 15:33:32 +04001044 if (li->type != types[i])
1045 continue;
1046 cur->Pid = cpu_to_le16(li->pid);
1047 cur->LengthLow = cpu_to_le32((u32)li->length);
1048 cur->LengthHigh = cpu_to_le32((u32)(li->length>>32));
1049 cur->OffsetLow = cpu_to_le32((u32)li->offset);
1050 cur->OffsetHigh = cpu_to_le32((u32)(li->offset>>32));
1051 if (++num == max_num) {
Pavel Shilovsky4b4de762012-09-18 16:20:26 -07001052 stored_rc = cifs_lockv(xid, tcon,
1053 cfile->fid.netfid,
Pavel Shilovsky04a6aa82012-02-28 14:16:55 +03001054 (__u8)li->type, 0, num,
1055 buf);
Pavel Shilovsky32b9aaf2011-10-22 15:33:32 +04001056 if (stored_rc)
1057 rc = stored_rc;
1058 cur = buf;
1059 num = 0;
1060 } else
1061 cur++;
1062 }
1063
1064 if (num) {
Pavel Shilovsky4b4de762012-09-18 16:20:26 -07001065 stored_rc = cifs_lockv(xid, tcon, cfile->fid.netfid,
Pavel Shilovsky04a6aa82012-02-28 14:16:55 +03001066 (__u8)types[i], 0, num, buf);
Pavel Shilovsky32b9aaf2011-10-22 15:33:32 +04001067 if (stored_rc)
1068 rc = stored_rc;
1069 }
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001070 }
1071
Pavel Shilovsky32b9aaf2011-10-22 15:33:32 +04001072 kfree(buf);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001073 free_xid(xid);
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001074 return rc;
1075}
1076
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001077/* copied from fs/locks.c with a name change */
1078#define cifs_for_each_lock(inode, lockp) \
1079 for (lockp = &inode->i_flock; *lockp != NULL; \
1080 lockp = &(*lockp)->fl_next)
1081
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001082struct lock_to_push {
1083 struct list_head llist;
1084 __u64 offset;
1085 __u64 length;
1086 __u32 pid;
1087 __u16 netfid;
1088 __u8 type;
1089};
1090
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001091static int
Pavel Shilovskyb8db9282012-11-22 17:07:16 +04001092cifs_push_posix_locks(struct cifsFileInfo *cfile)
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001093{
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001094 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
1095 struct file_lock *flock, **before;
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001096 unsigned int count = 0, i = 0;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001097 int rc = 0, xid, type;
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001098 struct list_head locks_to_send, *el;
1099 struct lock_to_push *lck, *tmp;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001100 __u64 length;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001101
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001102 xid = get_xid();
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001103
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001104 lock_flocks();
1105 cifs_for_each_lock(cfile->dentry->d_inode, before) {
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001106 if ((*before)->fl_flags & FL_POSIX)
1107 count++;
1108 }
1109 unlock_flocks();
1110
1111 INIT_LIST_HEAD(&locks_to_send);
1112
1113 /*
Pavel Shilovskyce858522012-03-17 09:46:55 +03001114 * Allocating count locks is enough because no FL_POSIX locks can be
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -07001115 * added to the list while we are holding cinode->lock_sem that
Pavel Shilovskyce858522012-03-17 09:46:55 +03001116 * protects locking operations of this inode.
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001117 */
1118 for (; i < count; i++) {
1119 lck = kmalloc(sizeof(struct lock_to_push), GFP_KERNEL);
1120 if (!lck) {
1121 rc = -ENOMEM;
1122 goto err_out;
1123 }
1124 list_add_tail(&lck->llist, &locks_to_send);
1125 }
1126
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001127 el = locks_to_send.next;
1128 lock_flocks();
1129 cifs_for_each_lock(cfile->dentry->d_inode, before) {
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001130 flock = *before;
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001131 if ((flock->fl_flags & FL_POSIX) == 0)
1132 continue;
Pavel Shilovskyce858522012-03-17 09:46:55 +03001133 if (el == &locks_to_send) {
1134 /*
1135 * The list ended. We don't have enough allocated
1136 * structures - something is really wrong.
1137 */
Joe Perchesf96637b2013-05-04 22:12:25 -05001138 cifs_dbg(VFS, "Can't push all brlocks!\n");
Pavel Shilovskyce858522012-03-17 09:46:55 +03001139 break;
1140 }
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001141 length = 1 + flock->fl_end - flock->fl_start;
1142 if (flock->fl_type == F_RDLCK || flock->fl_type == F_SHLCK)
1143 type = CIFS_RDLCK;
1144 else
1145 type = CIFS_WRLCK;
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001146 lck = list_entry(el, struct lock_to_push, llist);
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001147 lck->pid = flock->fl_pid;
Pavel Shilovsky4b4de762012-09-18 16:20:26 -07001148 lck->netfid = cfile->fid.netfid;
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001149 lck->length = length;
1150 lck->type = type;
1151 lck->offset = flock->fl_start;
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001152 el = el->next;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001153 }
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001154 unlock_flocks();
1155
1156 list_for_each_entry_safe(lck, tmp, &locks_to_send, llist) {
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001157 int stored_rc;
1158
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001159 stored_rc = CIFSSMBPosixLock(xid, tcon, lck->netfid, lck->pid,
Jeff Laytonc5fd3632012-07-23 13:28:37 -04001160 lck->offset, lck->length, NULL,
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001161 lck->type, 0);
1162 if (stored_rc)
1163 rc = stored_rc;
1164 list_del(&lck->llist);
1165 kfree(lck);
1166 }
1167
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001168out:
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001169 free_xid(xid);
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001170 return rc;
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001171err_out:
1172 list_for_each_entry_safe(lck, tmp, &locks_to_send, llist) {
1173 list_del(&lck->llist);
1174 kfree(lck);
1175 }
1176 goto out;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001177}
1178
1179static int
Pavel Shilovskyb8db9282012-11-22 17:07:16 +04001180cifs_push_locks(struct cifsFileInfo *cfile)
Pavel Shilovsky9ec3c882012-11-22 17:00:10 +04001181{
Pavel Shilovskyb8db9282012-11-22 17:07:16 +04001182 struct cifs_sb_info *cifs_sb = CIFS_SB(cfile->dentry->d_sb);
Pavel Shilovsky9ec3c882012-11-22 17:00:10 +04001183 struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
Pavel Shilovskyb8db9282012-11-22 17:07:16 +04001184 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
Pavel Shilovsky9ec3c882012-11-22 17:00:10 +04001185 int rc = 0;
1186
1187 /* we are going to update can_cache_brlcks here - need a write access */
1188 down_write(&cinode->lock_sem);
1189 if (!cinode->can_cache_brlcks) {
1190 up_write(&cinode->lock_sem);
1191 return rc;
1192 }
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001193
Pavel Shilovsky29e20f92012-07-13 13:58:14 +04001194 if (cap_unix(tcon->ses) &&
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001195 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
1196 ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
Pavel Shilovskyb8db9282012-11-22 17:07:16 +04001197 rc = cifs_push_posix_locks(cfile);
1198 else
1199 rc = tcon->ses->server->ops->push_mand_locks(cfile);
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001200
Pavel Shilovskyb8db9282012-11-22 17:07:16 +04001201 cinode->can_cache_brlcks = false;
1202 up_write(&cinode->lock_sem);
1203 return rc;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001204}
1205
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001206static void
Pavel Shilovsky04a6aa82012-02-28 14:16:55 +03001207cifs_read_flock(struct file_lock *flock, __u32 *type, int *lock, int *unlock,
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001208 bool *wait_flag, struct TCP_Server_Info *server)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001209{
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001210 if (flock->fl_flags & FL_POSIX)
Joe Perchesf96637b2013-05-04 22:12:25 -05001211 cifs_dbg(FYI, "Posix\n");
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001212 if (flock->fl_flags & FL_FLOCK)
Joe Perchesf96637b2013-05-04 22:12:25 -05001213 cifs_dbg(FYI, "Flock\n");
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001214 if (flock->fl_flags & FL_SLEEP) {
Joe Perchesf96637b2013-05-04 22:12:25 -05001215 cifs_dbg(FYI, "Blocking lock\n");
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001216 *wait_flag = true;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001217 }
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001218 if (flock->fl_flags & FL_ACCESS)
Joe Perchesf96637b2013-05-04 22:12:25 -05001219 cifs_dbg(FYI, "Process suspended by mandatory locking - not implemented yet\n");
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001220 if (flock->fl_flags & FL_LEASE)
Joe Perchesf96637b2013-05-04 22:12:25 -05001221 cifs_dbg(FYI, "Lease on file - not implemented yet\n");
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001222 if (flock->fl_flags &
Jeff Layton3d6d8542012-09-19 06:22:46 -07001223 (~(FL_POSIX | FL_FLOCK | FL_SLEEP |
1224 FL_ACCESS | FL_LEASE | FL_CLOSE)))
Joe Perchesf96637b2013-05-04 22:12:25 -05001225 cifs_dbg(FYI, "Unknown lock flags 0x%x\n", flock->fl_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001226
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001227 *type = server->vals->large_lock_type;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001228 if (flock->fl_type == F_WRLCK) {
Joe Perchesf96637b2013-05-04 22:12:25 -05001229 cifs_dbg(FYI, "F_WRLCK\n");
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001230 *type |= server->vals->exclusive_lock_type;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001231 *lock = 1;
1232 } else if (flock->fl_type == F_UNLCK) {
Joe Perchesf96637b2013-05-04 22:12:25 -05001233 cifs_dbg(FYI, "F_UNLCK\n");
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001234 *type |= server->vals->unlock_lock_type;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001235 *unlock = 1;
1236 /* Check if unlock includes more than one lock range */
1237 } else if (flock->fl_type == F_RDLCK) {
Joe Perchesf96637b2013-05-04 22:12:25 -05001238 cifs_dbg(FYI, "F_RDLCK\n");
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001239 *type |= server->vals->shared_lock_type;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001240 *lock = 1;
1241 } else if (flock->fl_type == F_EXLCK) {
Joe Perchesf96637b2013-05-04 22:12:25 -05001242 cifs_dbg(FYI, "F_EXLCK\n");
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001243 *type |= server->vals->exclusive_lock_type;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001244 *lock = 1;
1245 } else if (flock->fl_type == F_SHLCK) {
Joe Perchesf96637b2013-05-04 22:12:25 -05001246 cifs_dbg(FYI, "F_SHLCK\n");
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001247 *type |= server->vals->shared_lock_type;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001248 *lock = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001249 } else
Joe Perchesf96637b2013-05-04 22:12:25 -05001250 cifs_dbg(FYI, "Unknown type of lock\n");
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001251}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001252
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001253static int
Pavel Shilovsky04a6aa82012-02-28 14:16:55 +03001254cifs_getlk(struct file *file, struct file_lock *flock, __u32 type,
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001255 bool wait_flag, bool posix_lck, unsigned int xid)
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001256{
1257 int rc = 0;
1258 __u64 length = 1 + flock->fl_end - flock->fl_start;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001259 struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data;
1260 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001261 struct TCP_Server_Info *server = tcon->ses->server;
Pavel Shilovsky4b4de762012-09-18 16:20:26 -07001262 __u16 netfid = cfile->fid.netfid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001263
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001264 if (posix_lck) {
1265 int posix_lock_type;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001266
1267 rc = cifs_posix_lock_test(file, flock);
1268 if (!rc)
1269 return rc;
1270
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001271 if (type & server->vals->shared_lock_type)
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001272 posix_lock_type = CIFS_RDLCK;
1273 else
1274 posix_lock_type = CIFS_WRLCK;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001275 rc = CIFSSMBPosixLock(xid, tcon, netfid, current->tgid,
Jeff Laytonc5fd3632012-07-23 13:28:37 -04001276 flock->fl_start, length, flock,
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001277 posix_lock_type, wait_flag);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001278 return rc;
1279 }
Jeremy Allison7ee1af72006-08-02 21:56:33 +00001280
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +03001281 rc = cifs_lock_test(cfile, flock->fl_start, length, type, flock);
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001282 if (!rc)
1283 return rc;
1284
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001285 /* BB we could chain these into one lock request BB */
Pavel Shilovskyd39a4f72012-09-19 06:22:43 -07001286 rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length, type,
1287 1, 0, false);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001288 if (rc == 0) {
Pavel Shilovskyd39a4f72012-09-19 06:22:43 -07001289 rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
1290 type, 0, 1, false);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001291 flock->fl_type = F_UNLCK;
1292 if (rc != 0)
Joe Perchesf96637b2013-05-04 22:12:25 -05001293 cifs_dbg(VFS, "Error unlocking previously locked range %d during test of lock\n",
1294 rc);
Pavel Shilovskya88b4702011-10-29 17:17:59 +04001295 return 0;
Jeremy Allison7ee1af72006-08-02 21:56:33 +00001296 }
1297
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001298 if (type & server->vals->shared_lock_type) {
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001299 flock->fl_type = F_WRLCK;
Pavel Shilovskya88b4702011-10-29 17:17:59 +04001300 return 0;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001301 }
1302
Pavel Shilovskyd39a4f72012-09-19 06:22:43 -07001303 type &= ~server->vals->exclusive_lock_type;
1304
1305 rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
1306 type | server->vals->shared_lock_type,
1307 1, 0, false);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001308 if (rc == 0) {
Pavel Shilovskyd39a4f72012-09-19 06:22:43 -07001309 rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
1310 type | server->vals->shared_lock_type, 0, 1, false);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001311 flock->fl_type = F_RDLCK;
1312 if (rc != 0)
Joe Perchesf96637b2013-05-04 22:12:25 -05001313 cifs_dbg(VFS, "Error unlocking previously locked range %d during test of lock\n",
1314 rc);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001315 } else
1316 flock->fl_type = F_WRLCK;
1317
Pavel Shilovskya88b4702011-10-29 17:17:59 +04001318 return 0;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001319}
1320
Pavel Shilovskyf7ba7fe2012-09-19 06:22:43 -07001321void
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001322cifs_move_llist(struct list_head *source, struct list_head *dest)
1323{
1324 struct list_head *li, *tmp;
1325 list_for_each_safe(li, tmp, source)
1326 list_move(li, dest);
1327}
1328
Pavel Shilovskyf7ba7fe2012-09-19 06:22:43 -07001329void
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001330cifs_free_llist(struct list_head *llist)
1331{
1332 struct cifsLockInfo *li, *tmp;
1333 list_for_each_entry_safe(li, tmp, llist, llist) {
1334 cifs_del_lock_waiters(li);
1335 list_del(&li->llist);
1336 kfree(li);
1337 }
1338}
1339
Pavel Shilovskyd39a4f72012-09-19 06:22:43 -07001340int
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001341cifs_unlock_range(struct cifsFileInfo *cfile, struct file_lock *flock,
1342 unsigned int xid)
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001343{
1344 int rc = 0, stored_rc;
1345 int types[] = {LOCKING_ANDX_LARGE_FILES,
1346 LOCKING_ANDX_SHARED_LOCK | LOCKING_ANDX_LARGE_FILES};
1347 unsigned int i;
Pavel Shilovsky0013fb42012-05-31 13:03:26 +04001348 unsigned int max_num, num, max_buf;
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001349 LOCKING_ANDX_RANGE *buf, *cur;
1350 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
1351 struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
1352 struct cifsLockInfo *li, *tmp;
1353 __u64 length = 1 + flock->fl_end - flock->fl_start;
1354 struct list_head tmp_llist;
1355
1356 INIT_LIST_HEAD(&tmp_llist);
1357
Pavel Shilovsky0013fb42012-05-31 13:03:26 +04001358 /*
1359 * Accessing maxBuf is racy with cifs_reconnect - need to store value
1360 * and check it for zero before using.
1361 */
1362 max_buf = tcon->ses->server->maxBuf;
1363 if (!max_buf)
1364 return -EINVAL;
1365
1366 max_num = (max_buf - sizeof(struct smb_hdr)) /
1367 sizeof(LOCKING_ANDX_RANGE);
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001368 buf = kzalloc(max_num * sizeof(LOCKING_ANDX_RANGE), GFP_KERNEL);
1369 if (!buf)
1370 return -ENOMEM;
1371
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -07001372 down_write(&cinode->lock_sem);
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001373 for (i = 0; i < 2; i++) {
1374 cur = buf;
1375 num = 0;
Pavel Shilovskyf45d3412012-09-19 06:22:43 -07001376 list_for_each_entry_safe(li, tmp, &cfile->llist->locks, llist) {
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001377 if (flock->fl_start > li->offset ||
1378 (flock->fl_start + length) <
1379 (li->offset + li->length))
1380 continue;
1381 if (current->tgid != li->pid)
1382 continue;
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001383 if (types[i] != li->type)
1384 continue;
Pavel Shilovskyea319d52012-05-31 13:59:36 +04001385 if (cinode->can_cache_brlcks) {
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001386 /*
1387 * We can cache brlock requests - simply remove
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +03001388 * a lock from the file's list.
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001389 */
1390 list_del(&li->llist);
1391 cifs_del_lock_waiters(li);
1392 kfree(li);
Pavel Shilovskyea319d52012-05-31 13:59:36 +04001393 continue;
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001394 }
Pavel Shilovskyea319d52012-05-31 13:59:36 +04001395 cur->Pid = cpu_to_le16(li->pid);
1396 cur->LengthLow = cpu_to_le32((u32)li->length);
1397 cur->LengthHigh = cpu_to_le32((u32)(li->length>>32));
1398 cur->OffsetLow = cpu_to_le32((u32)li->offset);
1399 cur->OffsetHigh = cpu_to_le32((u32)(li->offset>>32));
1400 /*
1401 * We need to save a lock here to let us add it again to
1402 * the file's list if the unlock range request fails on
1403 * the server.
1404 */
1405 list_move(&li->llist, &tmp_llist);
1406 if (++num == max_num) {
Pavel Shilovsky4b4de762012-09-18 16:20:26 -07001407 stored_rc = cifs_lockv(xid, tcon,
1408 cfile->fid.netfid,
Pavel Shilovskyea319d52012-05-31 13:59:36 +04001409 li->type, num, 0, buf);
1410 if (stored_rc) {
1411 /*
1412 * We failed on the unlock range
1413 * request - add all locks from the tmp
1414 * list to the head of the file's list.
1415 */
1416 cifs_move_llist(&tmp_llist,
Pavel Shilovskyf45d3412012-09-19 06:22:43 -07001417 &cfile->llist->locks);
Pavel Shilovskyea319d52012-05-31 13:59:36 +04001418 rc = stored_rc;
1419 } else
1420 /*
1421 * The unlock range request succeed -
1422 * free the tmp list.
1423 */
1424 cifs_free_llist(&tmp_llist);
1425 cur = buf;
1426 num = 0;
1427 } else
1428 cur++;
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001429 }
1430 if (num) {
Pavel Shilovsky4b4de762012-09-18 16:20:26 -07001431 stored_rc = cifs_lockv(xid, tcon, cfile->fid.netfid,
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001432 types[i], num, 0, buf);
1433 if (stored_rc) {
Pavel Shilovskyf45d3412012-09-19 06:22:43 -07001434 cifs_move_llist(&tmp_llist,
1435 &cfile->llist->locks);
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001436 rc = stored_rc;
1437 } else
1438 cifs_free_llist(&tmp_llist);
1439 }
1440 }
1441
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -07001442 up_write(&cinode->lock_sem);
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001443 kfree(buf);
1444 return rc;
1445}
1446
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001447static int
Pavel Shilovskyf45d3412012-09-19 06:22:43 -07001448cifs_setlk(struct file *file, struct file_lock *flock, __u32 type,
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001449 bool wait_flag, bool posix_lck, int lock, int unlock,
1450 unsigned int xid)
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001451{
1452 int rc = 0;
1453 __u64 length = 1 + flock->fl_end - flock->fl_start;
1454 struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data;
1455 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001456 struct TCP_Server_Info *server = tcon->ses->server;
Pavel Shilovsky63b7d3a2012-12-24 14:41:19 +04001457 struct inode *inode = cfile->dentry->d_inode;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001458
1459 if (posix_lck) {
Steve French08547b02006-02-28 22:39:25 +00001460 int posix_lock_type;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001461
1462 rc = cifs_posix_lock_set(file, flock);
1463 if (!rc || rc < 0)
1464 return rc;
1465
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001466 if (type & server->vals->shared_lock_type)
Steve French08547b02006-02-28 22:39:25 +00001467 posix_lock_type = CIFS_RDLCK;
1468 else
1469 posix_lock_type = CIFS_WRLCK;
Steve French50c2f752007-07-13 00:33:32 +00001470
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001471 if (unlock == 1)
Steve Frenchbeb84dc2006-03-03 23:36:34 +00001472 posix_lock_type = CIFS_UNLCK;
Jeremy Allison7ee1af72006-08-02 21:56:33 +00001473
Pavel Shilovskyf45d3412012-09-19 06:22:43 -07001474 rc = CIFSSMBPosixLock(xid, tcon, cfile->fid.netfid,
1475 current->tgid, flock->fl_start, length,
1476 NULL, posix_lock_type, wait_flag);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001477 goto out;
Jeremy Allison7ee1af72006-08-02 21:56:33 +00001478 }
1479
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001480 if (lock) {
Pavel Shilovsky161ebf92011-10-29 17:17:58 +04001481 struct cifsLockInfo *lock;
1482
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +03001483 lock = cifs_lock_init(flock->fl_start, length, type);
Pavel Shilovsky161ebf92011-10-29 17:17:58 +04001484 if (!lock)
1485 return -ENOMEM;
1486
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +03001487 rc = cifs_lock_add_if(cfile, lock, wait_flag);
Pavel Shilovsky21cb2d92012-11-22 18:56:39 +04001488 if (rc < 0) {
Pavel Shilovsky161ebf92011-10-29 17:17:58 +04001489 kfree(lock);
Pavel Shilovsky21cb2d92012-11-22 18:56:39 +04001490 return rc;
1491 }
1492 if (!rc)
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001493 goto out;
1494
Pavel Shilovsky63b7d3a2012-12-24 14:41:19 +04001495 /*
1496 * Windows 7 server can delay breaking lease from read to None
1497 * if we set a byte-range lock on a file - break it explicitly
1498 * before sending the lock to the server to be sure the next
1499 * read won't conflict with non-overlapted locks due to
1500 * pagereading.
1501 */
1502 if (!CIFS_I(inode)->clientCanCacheAll &&
1503 CIFS_I(inode)->clientCanCacheRead) {
1504 cifs_invalidate_mapping(inode);
Joe Perchesf96637b2013-05-04 22:12:25 -05001505 cifs_dbg(FYI, "Set no oplock for inode=%p due to mand locks\n",
1506 inode);
Pavel Shilovsky63b7d3a2012-12-24 14:41:19 +04001507 CIFS_I(inode)->clientCanCacheRead = false;
1508 }
1509
Pavel Shilovskyd39a4f72012-09-19 06:22:43 -07001510 rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
1511 type, 1, 0, wait_flag);
Pavel Shilovsky161ebf92011-10-29 17:17:58 +04001512 if (rc) {
1513 kfree(lock);
Pavel Shilovsky21cb2d92012-11-22 18:56:39 +04001514 return rc;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001515 }
Pavel Shilovsky161ebf92011-10-29 17:17:58 +04001516
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +03001517 cifs_lock_add(cfile, lock);
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001518 } else if (unlock)
Pavel Shilovskyd39a4f72012-09-19 06:22:43 -07001519 rc = server->ops->mand_unlock_range(cfile, flock, xid);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001520
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001521out:
1522 if (flock->fl_flags & FL_POSIX)
Steve French9ebb3892012-04-01 13:52:54 -05001523 posix_lock_file_wait(file, flock);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001524 return rc;
1525}
1526
1527int cifs_lock(struct file *file, int cmd, struct file_lock *flock)
1528{
1529 int rc, xid;
1530 int lock = 0, unlock = 0;
1531 bool wait_flag = false;
1532 bool posix_lck = false;
1533 struct cifs_sb_info *cifs_sb;
1534 struct cifs_tcon *tcon;
1535 struct cifsInodeInfo *cinode;
1536 struct cifsFileInfo *cfile;
1537 __u16 netfid;
Pavel Shilovsky04a6aa82012-02-28 14:16:55 +03001538 __u32 type;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001539
1540 rc = -EACCES;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001541 xid = get_xid();
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001542
Joe Perchesf96637b2013-05-04 22:12:25 -05001543 cifs_dbg(FYI, "Lock parm: 0x%x flockflags: 0x%x flocktype: 0x%x start: %lld end: %lld\n",
1544 cmd, flock->fl_flags, flock->fl_type,
1545 flock->fl_start, flock->fl_end);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001546
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001547 cfile = (struct cifsFileInfo *)file->private_data;
1548 tcon = tlink_tcon(cfile->tlink);
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001549
1550 cifs_read_flock(flock, &type, &lock, &unlock, &wait_flag,
1551 tcon->ses->server);
1552
1553 cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
Pavel Shilovsky4b4de762012-09-18 16:20:26 -07001554 netfid = cfile->fid.netfid;
Al Viro496ad9a2013-01-23 17:07:38 -05001555 cinode = CIFS_I(file_inode(file));
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001556
Pavel Shilovsky29e20f92012-07-13 13:58:14 +04001557 if (cap_unix(tcon->ses) &&
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001558 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
1559 ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
1560 posix_lck = true;
1561 /*
1562 * BB add code here to normalize offset and length to account for
1563 * negative length which we can not accept over the wire.
1564 */
1565 if (IS_GETLK(cmd)) {
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001566 rc = cifs_getlk(file, flock, type, wait_flag, posix_lck, xid);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001567 free_xid(xid);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001568 return rc;
1569 }
1570
1571 if (!lock && !unlock) {
1572 /*
1573 * if no lock or unlock then nothing to do since we do not
1574 * know what it is
1575 */
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001576 free_xid(xid);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001577 return -EOPNOTSUPP;
1578 }
1579
1580 rc = cifs_setlk(file, flock, type, wait_flag, posix_lck, lock, unlock,
1581 xid);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001582 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001583 return rc;
1584}
1585
Jeff Layton597b0272012-03-23 14:40:56 -04001586/*
1587 * update the file size (if needed) after a write. Should be called with
1588 * the inode->i_lock held
1589 */
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05001590void
Jeff Laytonfbec9ab2009-04-03 13:44:00 -04001591cifs_update_eof(struct cifsInodeInfo *cifsi, loff_t offset,
1592 unsigned int bytes_written)
1593{
1594 loff_t end_of_write = offset + bytes_written;
1595
1596 if (end_of_write > cifsi->server_eof)
1597 cifsi->server_eof = end_of_write;
1598}
1599
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001600static ssize_t
1601cifs_write(struct cifsFileInfo *open_file, __u32 pid, const char *write_data,
1602 size_t write_size, loff_t *offset)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001603{
1604 int rc = 0;
1605 unsigned int bytes_written = 0;
1606 unsigned int total_written;
1607 struct cifs_sb_info *cifs_sb;
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001608 struct cifs_tcon *tcon;
1609 struct TCP_Server_Info *server;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001610 unsigned int xid;
Jeff Layton7da4b492010-10-15 15:34:00 -04001611 struct dentry *dentry = open_file->dentry;
1612 struct cifsInodeInfo *cifsi = CIFS_I(dentry->d_inode);
Pavel Shilovskyfa2989f2011-05-26 10:01:59 +04001613 struct cifs_io_parms io_parms;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001614
Jeff Layton7da4b492010-10-15 15:34:00 -04001615 cifs_sb = CIFS_SB(dentry->d_sb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001616
Joe Perchesf96637b2013-05-04 22:12:25 -05001617 cifs_dbg(FYI, "write %zd bytes to offset %lld of %s\n",
1618 write_size, *offset, dentry->d_name.name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001619
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001620 tcon = tlink_tcon(open_file->tlink);
1621 server = tcon->ses->server;
1622
1623 if (!server->ops->sync_write)
1624 return -ENOSYS;
Steve French50c2f752007-07-13 00:33:32 +00001625
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001626 xid = get_xid();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001627
Linus Torvalds1da177e2005-04-16 15:20:36 -07001628 for (total_written = 0; write_size > total_written;
1629 total_written += bytes_written) {
1630 rc = -EAGAIN;
1631 while (rc == -EAGAIN) {
Jeff Laytonca83ce32011-04-12 09:13:44 -04001632 struct kvec iov[2];
1633 unsigned int len;
1634
Linus Torvalds1da177e2005-04-16 15:20:36 -07001635 if (open_file->invalidHandle) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001636 /* we could deadlock if we called
1637 filemap_fdatawait from here so tell
Steve Frenchfb8c4b12007-07-10 01:16:18 +00001638 reopen_file not to flush data to
Linus Torvalds1da177e2005-04-16 15:20:36 -07001639 server now */
Jeff Layton15886172010-10-15 15:33:59 -04001640 rc = cifs_reopen_file(open_file, false);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001641 if (rc != 0)
1642 break;
1643 }
Steve French3e844692005-10-03 13:37:24 -07001644
Jeff Laytonca83ce32011-04-12 09:13:44 -04001645 len = min((size_t)cifs_sb->wsize,
1646 write_size - total_written);
1647 /* iov[0] is reserved for smb header */
1648 iov[1].iov_base = (char *)write_data + total_written;
1649 iov[1].iov_len = len;
Pavel Shilovskyfa2989f2011-05-26 10:01:59 +04001650 io_parms.pid = pid;
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001651 io_parms.tcon = tcon;
1652 io_parms.offset = *offset;
Pavel Shilovskyfa2989f2011-05-26 10:01:59 +04001653 io_parms.length = len;
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001654 rc = server->ops->sync_write(xid, open_file, &io_parms,
1655 &bytes_written, iov, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001656 }
1657 if (rc || (bytes_written == 0)) {
1658 if (total_written)
1659 break;
1660 else {
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001661 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001662 return rc;
1663 }
Jeff Laytonfbec9ab2009-04-03 13:44:00 -04001664 } else {
Jeff Layton597b0272012-03-23 14:40:56 -04001665 spin_lock(&dentry->d_inode->i_lock);
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001666 cifs_update_eof(cifsi, *offset, bytes_written);
Jeff Layton597b0272012-03-23 14:40:56 -04001667 spin_unlock(&dentry->d_inode->i_lock);
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001668 *offset += bytes_written;
Jeff Laytonfbec9ab2009-04-03 13:44:00 -04001669 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001670 }
1671
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001672 cifs_stats_bytes_written(tcon, total_written);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001673
Jeff Layton7da4b492010-10-15 15:34:00 -04001674 if (total_written > 0) {
1675 spin_lock(&dentry->d_inode->i_lock);
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001676 if (*offset > dentry->d_inode->i_size)
1677 i_size_write(dentry->d_inode, *offset);
Jeff Layton7da4b492010-10-15 15:34:00 -04001678 spin_unlock(&dentry->d_inode->i_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001679 }
Jeff Layton7da4b492010-10-15 15:34:00 -04001680 mark_inode_dirty_sync(dentry->d_inode);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001681 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001682 return total_written;
1683}
1684
Jeff Layton6508d902010-09-29 19:51:11 -04001685struct cifsFileInfo *find_readable_file(struct cifsInodeInfo *cifs_inode,
1686 bool fsuid_only)
Steve French630f3f0c2007-10-25 21:17:17 +00001687{
1688 struct cifsFileInfo *open_file = NULL;
Jeff Layton6508d902010-09-29 19:51:11 -04001689 struct cifs_sb_info *cifs_sb = CIFS_SB(cifs_inode->vfs_inode.i_sb);
1690
1691 /* only filter by fsuid on multiuser mounts */
1692 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER))
1693 fsuid_only = false;
Steve French630f3f0c2007-10-25 21:17:17 +00001694
Jeff Layton44772882010-10-15 15:34:03 -04001695 spin_lock(&cifs_file_list_lock);
Steve French630f3f0c2007-10-25 21:17:17 +00001696 /* we could simply get the first_list_entry since write-only entries
1697 are always at the end of the list but since the first entry might
1698 have a close pending, we go through the whole list */
1699 list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
Eric W. Biedermanfef59fd2013-02-06 02:23:02 -08001700 if (fsuid_only && !uid_eq(open_file->uid, current_fsuid()))
Jeff Layton6508d902010-09-29 19:51:11 -04001701 continue;
Jeff Layton2e396b82010-10-15 15:34:01 -04001702 if (OPEN_FMODE(open_file->f_flags) & FMODE_READ) {
Steve French630f3f0c2007-10-25 21:17:17 +00001703 if (!open_file->invalidHandle) {
1704 /* found a good file */
1705 /* lock it so it will not be closed on us */
Jeff Layton764a1b12012-07-25 14:59:54 -04001706 cifsFileInfo_get_locked(open_file);
Jeff Layton44772882010-10-15 15:34:03 -04001707 spin_unlock(&cifs_file_list_lock);
Steve French630f3f0c2007-10-25 21:17:17 +00001708 return open_file;
1709 } /* else might as well continue, and look for
1710 another, or simply have the caller reopen it
1711 again rather than trying to fix this handle */
1712 } else /* write only file */
1713 break; /* write only files are last so must be done */
1714 }
Jeff Layton44772882010-10-15 15:34:03 -04001715 spin_unlock(&cifs_file_list_lock);
Steve French630f3f0c2007-10-25 21:17:17 +00001716 return NULL;
1717}
Steve French630f3f0c2007-10-25 21:17:17 +00001718
Jeff Layton6508d902010-09-29 19:51:11 -04001719struct cifsFileInfo *find_writable_file(struct cifsInodeInfo *cifs_inode,
1720 bool fsuid_only)
Steve French6148a742005-10-05 12:23:19 -07001721{
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001722 struct cifsFileInfo *open_file, *inv_file = NULL;
Jeff Laytond3892292010-11-02 16:22:50 -04001723 struct cifs_sb_info *cifs_sb;
Jeff Layton2846d382008-09-22 21:33:33 -04001724 bool any_available = false;
Steve Frenchdd99cd82005-10-05 19:32:49 -07001725 int rc;
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001726 unsigned int refind = 0;
Steve French6148a742005-10-05 12:23:19 -07001727
Steve French60808232006-04-22 15:53:05 +00001728 /* Having a null inode here (because mapping->host was set to zero by
1729 the VFS or MM) should not happen but we had reports of on oops (due to
1730 it being zero) during stress testcases so we need to check for it */
1731
Steve Frenchfb8c4b12007-07-10 01:16:18 +00001732 if (cifs_inode == NULL) {
Joe Perchesf96637b2013-05-04 22:12:25 -05001733 cifs_dbg(VFS, "Null inode passed to cifs_writeable_file\n");
Steve French60808232006-04-22 15:53:05 +00001734 dump_stack();
1735 return NULL;
1736 }
1737
Jeff Laytond3892292010-11-02 16:22:50 -04001738 cifs_sb = CIFS_SB(cifs_inode->vfs_inode.i_sb);
1739
Jeff Layton6508d902010-09-29 19:51:11 -04001740 /* only filter by fsuid on multiuser mounts */
1741 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER))
1742 fsuid_only = false;
1743
Jeff Layton44772882010-10-15 15:34:03 -04001744 spin_lock(&cifs_file_list_lock);
Steve French9b22b0b2007-10-02 01:11:08 +00001745refind_writable:
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001746 if (refind > MAX_REOPEN_ATT) {
1747 spin_unlock(&cifs_file_list_lock);
1748 return NULL;
1749 }
Steve French6148a742005-10-05 12:23:19 -07001750 list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
Jeff Layton6508d902010-09-29 19:51:11 -04001751 if (!any_available && open_file->pid != current->tgid)
1752 continue;
Eric W. Biedermanfef59fd2013-02-06 02:23:02 -08001753 if (fsuid_only && !uid_eq(open_file->uid, current_fsuid()))
Jeff Layton6508d902010-09-29 19:51:11 -04001754 continue;
Jeff Layton2e396b82010-10-15 15:34:01 -04001755 if (OPEN_FMODE(open_file->f_flags) & FMODE_WRITE) {
Steve French9b22b0b2007-10-02 01:11:08 +00001756 if (!open_file->invalidHandle) {
1757 /* found a good writable file */
Jeff Layton764a1b12012-07-25 14:59:54 -04001758 cifsFileInfo_get_locked(open_file);
Jeff Layton44772882010-10-15 15:34:03 -04001759 spin_unlock(&cifs_file_list_lock);
Steve French9b22b0b2007-10-02 01:11:08 +00001760 return open_file;
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001761 } else {
1762 if (!inv_file)
1763 inv_file = open_file;
Steve French9b22b0b2007-10-02 01:11:08 +00001764 }
Steve French6148a742005-10-05 12:23:19 -07001765 }
1766 }
Jeff Layton2846d382008-09-22 21:33:33 -04001767 /* couldn't find useable FH with same pid, try any available */
1768 if (!any_available) {
1769 any_available = true;
1770 goto refind_writable;
1771 }
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001772
1773 if (inv_file) {
1774 any_available = false;
Jeff Layton764a1b12012-07-25 14:59:54 -04001775 cifsFileInfo_get_locked(inv_file);
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001776 }
1777
Jeff Layton44772882010-10-15 15:34:03 -04001778 spin_unlock(&cifs_file_list_lock);
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001779
1780 if (inv_file) {
1781 rc = cifs_reopen_file(inv_file, false);
1782 if (!rc)
1783 return inv_file;
1784 else {
1785 spin_lock(&cifs_file_list_lock);
1786 list_move_tail(&inv_file->flist,
1787 &cifs_inode->openFileList);
1788 spin_unlock(&cifs_file_list_lock);
1789 cifsFileInfo_put(inv_file);
1790 spin_lock(&cifs_file_list_lock);
1791 ++refind;
1792 goto refind_writable;
1793 }
1794 }
1795
Steve French6148a742005-10-05 12:23:19 -07001796 return NULL;
1797}
1798
Linus Torvalds1da177e2005-04-16 15:20:36 -07001799static int cifs_partialpagewrite(struct page *page, unsigned from, unsigned to)
1800{
1801 struct address_space *mapping = page->mapping;
1802 loff_t offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
1803 char *write_data;
1804 int rc = -EFAULT;
1805 int bytes_written = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001806 struct inode *inode;
Steve French6148a742005-10-05 12:23:19 -07001807 struct cifsFileInfo *open_file;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001808
1809 if (!mapping || !mapping->host)
1810 return -EFAULT;
1811
1812 inode = page->mapping->host;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001813
1814 offset += (loff_t)from;
1815 write_data = kmap(page);
1816 write_data += from;
1817
1818 if ((to > PAGE_CACHE_SIZE) || (from > to)) {
1819 kunmap(page);
1820 return -EIO;
1821 }
1822
1823 /* racing with truncate? */
1824 if (offset > mapping->host->i_size) {
1825 kunmap(page);
1826 return 0; /* don't care */
1827 }
1828
1829 /* check to make sure that we are not extending the file */
1830 if (mapping->host->i_size - offset < (loff_t)to)
Steve Frenchfb8c4b12007-07-10 01:16:18 +00001831 to = (unsigned)(mapping->host->i_size - offset);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001832
Jeff Layton6508d902010-09-29 19:51:11 -04001833 open_file = find_writable_file(CIFS_I(mapping->host), false);
Steve French6148a742005-10-05 12:23:19 -07001834 if (open_file) {
Pavel Shilovskyfa2989f2011-05-26 10:01:59 +04001835 bytes_written = cifs_write(open_file, open_file->pid,
1836 write_data, to - from, &offset);
Dave Kleikamp6ab409b2009-08-31 11:07:12 -04001837 cifsFileInfo_put(open_file);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001838 /* Does mm or vfs already set times? */
Steve French6148a742005-10-05 12:23:19 -07001839 inode->i_atime = inode->i_mtime = current_fs_time(inode->i_sb);
Steve Frenchbb5a9a02007-12-31 04:21:29 +00001840 if ((bytes_written > 0) && (offset))
Steve French6148a742005-10-05 12:23:19 -07001841 rc = 0;
Steve Frenchbb5a9a02007-12-31 04:21:29 +00001842 else if (bytes_written < 0)
1843 rc = bytes_written;
Steve French6148a742005-10-05 12:23:19 -07001844 } else {
Joe Perchesf96637b2013-05-04 22:12:25 -05001845 cifs_dbg(FYI, "No writeable filehandles for inode\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001846 rc = -EIO;
1847 }
1848
1849 kunmap(page);
1850 return rc;
1851}
1852
Linus Torvalds1da177e2005-04-16 15:20:36 -07001853static int cifs_writepages(struct address_space *mapping,
Steve French37c0eb42005-10-05 14:50:29 -07001854 struct writeback_control *wbc)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001855{
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001856 struct cifs_sb_info *cifs_sb = CIFS_SB(mapping->host->i_sb);
1857 bool done = false, scanned = false, range_whole = false;
1858 pgoff_t end, index;
1859 struct cifs_writedata *wdata;
Pavel Shilovskyc9de5c82012-09-18 16:20:29 -07001860 struct TCP_Server_Info *server;
Steve French37c0eb42005-10-05 14:50:29 -07001861 struct page *page;
Steve French37c0eb42005-10-05 14:50:29 -07001862 int rc = 0;
Steve French50c2f752007-07-13 00:33:32 +00001863
Steve French37c0eb42005-10-05 14:50:29 -07001864 /*
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001865 * If wsize is smaller than the page cache size, default to writing
Steve French37c0eb42005-10-05 14:50:29 -07001866 * one page at a time via cifs_writepage
1867 */
1868 if (cifs_sb->wsize < PAGE_CACHE_SIZE)
1869 return generic_writepages(mapping, wbc);
1870
OGAWA Hirofumi111ebb62006-06-23 02:03:26 -07001871 if (wbc->range_cyclic) {
Steve French37c0eb42005-10-05 14:50:29 -07001872 index = mapping->writeback_index; /* Start from prev offset */
OGAWA Hirofumi111ebb62006-06-23 02:03:26 -07001873 end = -1;
1874 } else {
1875 index = wbc->range_start >> PAGE_CACHE_SHIFT;
1876 end = wbc->range_end >> PAGE_CACHE_SHIFT;
1877 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001878 range_whole = true;
1879 scanned = true;
Steve French37c0eb42005-10-05 14:50:29 -07001880 }
1881retry:
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001882 while (!done && index <= end) {
1883 unsigned int i, nr_pages, found_pages;
1884 pgoff_t next = 0, tofind;
1885 struct page **pages;
Steve French37c0eb42005-10-05 14:50:29 -07001886
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001887 tofind = min((cifs_sb->wsize / PAGE_CACHE_SIZE) - 1,
1888 end - index) + 1;
Steve French37c0eb42005-10-05 14:50:29 -07001889
Jeff Laytonc2e87642012-03-23 14:40:55 -04001890 wdata = cifs_writedata_alloc((unsigned int)tofind,
1891 cifs_writev_complete);
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001892 if (!wdata) {
1893 rc = -ENOMEM;
1894 break;
1895 }
1896
1897 /*
1898 * find_get_pages_tag seems to return a max of 256 on each
1899 * iteration, so we must call it several times in order to
1900 * fill the array or the wsize is effectively limited to
1901 * 256 * PAGE_CACHE_SIZE.
1902 */
1903 found_pages = 0;
1904 pages = wdata->pages;
1905 do {
1906 nr_pages = find_get_pages_tag(mapping, &index,
1907 PAGECACHE_TAG_DIRTY,
1908 tofind, pages);
1909 found_pages += nr_pages;
1910 tofind -= nr_pages;
1911 pages += nr_pages;
1912 } while (nr_pages && tofind && index <= end);
1913
1914 if (found_pages == 0) {
1915 kref_put(&wdata->refcount, cifs_writedata_release);
1916 break;
1917 }
1918
1919 nr_pages = 0;
1920 for (i = 0; i < found_pages; i++) {
1921 page = wdata->pages[i];
Steve French37c0eb42005-10-05 14:50:29 -07001922 /*
1923 * At this point we hold neither mapping->tree_lock nor
1924 * lock on the page itself: the page may be truncated or
1925 * invalidated (changing page->mapping to NULL), or even
1926 * swizzled back from swapper_space to tmpfs file
1927 * mapping
1928 */
1929
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001930 if (nr_pages == 0)
Steve French37c0eb42005-10-05 14:50:29 -07001931 lock_page(page);
Nick Piggin529ae9a2008-08-02 12:01:03 +02001932 else if (!trylock_page(page))
Steve French37c0eb42005-10-05 14:50:29 -07001933 break;
1934
1935 if (unlikely(page->mapping != mapping)) {
1936 unlock_page(page);
1937 break;
1938 }
1939
OGAWA Hirofumi111ebb62006-06-23 02:03:26 -07001940 if (!wbc->range_cyclic && page->index > end) {
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001941 done = true;
Steve French37c0eb42005-10-05 14:50:29 -07001942 unlock_page(page);
1943 break;
1944 }
1945
1946 if (next && (page->index != next)) {
1947 /* Not next consecutive page */
1948 unlock_page(page);
1949 break;
1950 }
1951
1952 if (wbc->sync_mode != WB_SYNC_NONE)
1953 wait_on_page_writeback(page);
1954
1955 if (PageWriteback(page) ||
Linus Torvaldscb876f42006-12-23 16:19:07 -08001956 !clear_page_dirty_for_io(page)) {
Steve French37c0eb42005-10-05 14:50:29 -07001957 unlock_page(page);
1958 break;
1959 }
Steve French84d2f072005-10-12 15:32:05 -07001960
Linus Torvaldscb876f42006-12-23 16:19:07 -08001961 /*
1962 * This actually clears the dirty bit in the radix tree.
1963 * See cifs_writepage() for more commentary.
1964 */
1965 set_page_writeback(page);
1966
Jeff Layton3a98b862012-11-26 09:48:41 -05001967 if (page_offset(page) >= i_size_read(mapping->host)) {
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001968 done = true;
Steve French84d2f072005-10-12 15:32:05 -07001969 unlock_page(page);
Linus Torvaldscb876f42006-12-23 16:19:07 -08001970 end_page_writeback(page);
Steve French84d2f072005-10-12 15:32:05 -07001971 break;
1972 }
1973
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001974 wdata->pages[i] = page;
Steve French37c0eb42005-10-05 14:50:29 -07001975 next = page->index + 1;
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001976 ++nr_pages;
Steve French37c0eb42005-10-05 14:50:29 -07001977 }
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001978
1979 /* reset index to refind any pages skipped */
1980 if (nr_pages == 0)
1981 index = wdata->pages[0]->index + 1;
1982
1983 /* put any pages we aren't going to use */
1984 for (i = nr_pages; i < found_pages; i++) {
1985 page_cache_release(wdata->pages[i]);
1986 wdata->pages[i] = NULL;
1987 }
1988
1989 /* nothing to write? */
1990 if (nr_pages == 0) {
1991 kref_put(&wdata->refcount, cifs_writedata_release);
1992 continue;
1993 }
1994
1995 wdata->sync_mode = wbc->sync_mode;
1996 wdata->nr_pages = nr_pages;
1997 wdata->offset = page_offset(wdata->pages[0]);
Jeff Laytoneddb0792012-09-18 16:20:35 -07001998 wdata->pagesz = PAGE_CACHE_SIZE;
1999 wdata->tailsz =
Jeff Layton3a98b862012-11-26 09:48:41 -05002000 min(i_size_read(mapping->host) -
2001 page_offset(wdata->pages[nr_pages - 1]),
Jeff Laytoneddb0792012-09-18 16:20:35 -07002002 (loff_t)PAGE_CACHE_SIZE);
2003 wdata->bytes = ((nr_pages - 1) * PAGE_CACHE_SIZE) +
2004 wdata->tailsz;
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002005
2006 do {
2007 if (wdata->cfile != NULL)
2008 cifsFileInfo_put(wdata->cfile);
2009 wdata->cfile = find_writable_file(CIFS_I(mapping->host),
2010 false);
2011 if (!wdata->cfile) {
Joe Perchesf96637b2013-05-04 22:12:25 -05002012 cifs_dbg(VFS, "No writable handles for inode\n");
Steve French23e7dd72005-10-20 13:44:56 -07002013 rc = -EBADF;
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002014 break;
Steve French37c0eb42005-10-05 14:50:29 -07002015 }
Jeff Laytonfe5f5d22012-03-23 14:40:55 -04002016 wdata->pid = wdata->cfile->pid;
Pavel Shilovskyc9de5c82012-09-18 16:20:29 -07002017 server = tlink_tcon(wdata->cfile->tlink)->ses->server;
2018 rc = server->ops->async_writev(wdata);
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002019 } while (wbc->sync_mode == WB_SYNC_ALL && rc == -EAGAIN);
Jeff Laytonf3983c22010-09-22 16:17:40 -07002020
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002021 for (i = 0; i < nr_pages; ++i)
2022 unlock_page(wdata->pages[i]);
Jeff Layton941b8532011-01-11 07:24:01 -05002023
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002024 /* send failure -- clean up the mess */
2025 if (rc != 0) {
2026 for (i = 0; i < nr_pages; ++i) {
Jeff Layton941b8532011-01-11 07:24:01 -05002027 if (rc == -EAGAIN)
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002028 redirty_page_for_writepage(wbc,
2029 wdata->pages[i]);
2030 else
2031 SetPageError(wdata->pages[i]);
2032 end_page_writeback(wdata->pages[i]);
2033 page_cache_release(wdata->pages[i]);
Steve French37c0eb42005-10-05 14:50:29 -07002034 }
Jeff Layton941b8532011-01-11 07:24:01 -05002035 if (rc != -EAGAIN)
2036 mapping_set_error(mapping, rc);
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002037 }
2038 kref_put(&wdata->refcount, cifs_writedata_release);
Jeff Layton941b8532011-01-11 07:24:01 -05002039
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002040 wbc->nr_to_write -= nr_pages;
2041 if (wbc->nr_to_write <= 0)
2042 done = true;
Dave Kleikampb066a482008-11-18 03:49:05 +00002043
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002044 index = next;
Steve French37c0eb42005-10-05 14:50:29 -07002045 }
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002046
Steve French37c0eb42005-10-05 14:50:29 -07002047 if (!scanned && !done) {
2048 /*
2049 * We hit the last page and there is more work to be done: wrap
2050 * back to the start of the file
2051 */
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002052 scanned = true;
Steve French37c0eb42005-10-05 14:50:29 -07002053 index = 0;
2054 goto retry;
2055 }
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002056
OGAWA Hirofumi111ebb62006-06-23 02:03:26 -07002057 if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
Steve French37c0eb42005-10-05 14:50:29 -07002058 mapping->writeback_index = index;
2059
Linus Torvalds1da177e2005-04-16 15:20:36 -07002060 return rc;
2061}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002062
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04002063static int
2064cifs_writepage_locked(struct page *page, struct writeback_control *wbc)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002065{
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04002066 int rc;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002067 unsigned int xid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002068
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002069 xid = get_xid();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002070/* BB add check for wbc flags */
2071 page_cache_get(page);
Steve Frenchad7a2922008-02-07 23:25:02 +00002072 if (!PageUptodate(page))
Joe Perchesf96637b2013-05-04 22:12:25 -05002073 cifs_dbg(FYI, "ppw - page not up to date\n");
Linus Torvaldscb876f42006-12-23 16:19:07 -08002074
2075 /*
2076 * Set the "writeback" flag, and clear "dirty" in the radix tree.
2077 *
2078 * A writepage() implementation always needs to do either this,
2079 * or re-dirty the page with "redirty_page_for_writepage()" in
2080 * the case of a failure.
2081 *
2082 * Just unlocking the page will cause the radix tree tag-bits
2083 * to fail to update with the state of the page correctly.
2084 */
Steve Frenchfb8c4b12007-07-10 01:16:18 +00002085 set_page_writeback(page);
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04002086retry_write:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002087 rc = cifs_partialpagewrite(page, 0, PAGE_CACHE_SIZE);
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04002088 if (rc == -EAGAIN && wbc->sync_mode == WB_SYNC_ALL)
2089 goto retry_write;
2090 else if (rc == -EAGAIN)
2091 redirty_page_for_writepage(wbc, page);
2092 else if (rc != 0)
2093 SetPageError(page);
2094 else
2095 SetPageUptodate(page);
Linus Torvaldscb876f42006-12-23 16:19:07 -08002096 end_page_writeback(page);
2097 page_cache_release(page);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002098 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002099 return rc;
2100}
2101
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04002102static int cifs_writepage(struct page *page, struct writeback_control *wbc)
2103{
2104 int rc = cifs_writepage_locked(page, wbc);
2105 unlock_page(page);
2106 return rc;
2107}
2108
Nick Piggind9414772008-09-24 11:32:59 -04002109static int cifs_write_end(struct file *file, struct address_space *mapping,
2110 loff_t pos, unsigned len, unsigned copied,
2111 struct page *page, void *fsdata)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002112{
Nick Piggind9414772008-09-24 11:32:59 -04002113 int rc;
2114 struct inode *inode = mapping->host;
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00002115 struct cifsFileInfo *cfile = file->private_data;
2116 struct cifs_sb_info *cifs_sb = CIFS_SB(cfile->dentry->d_sb);
2117 __u32 pid;
2118
2119 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
2120 pid = cfile->pid;
2121 else
2122 pid = current->tgid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002123
Joe Perchesf96637b2013-05-04 22:12:25 -05002124 cifs_dbg(FYI, "write_end for page %p from pos %lld with %d bytes\n",
Joe Perchesb6b38f72010-04-21 03:50:45 +00002125 page, pos, copied);
Steve Frenchad7a2922008-02-07 23:25:02 +00002126
Jeff Laytona98ee8c2008-11-26 19:32:33 +00002127 if (PageChecked(page)) {
2128 if (copied == len)
2129 SetPageUptodate(page);
2130 ClearPageChecked(page);
2131 } else if (!PageUptodate(page) && copied == PAGE_CACHE_SIZE)
Nick Piggind9414772008-09-24 11:32:59 -04002132 SetPageUptodate(page);
2133
Linus Torvalds1da177e2005-04-16 15:20:36 -07002134 if (!PageUptodate(page)) {
Nick Piggind9414772008-09-24 11:32:59 -04002135 char *page_data;
2136 unsigned offset = pos & (PAGE_CACHE_SIZE - 1);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002137 unsigned int xid;
Nick Piggind9414772008-09-24 11:32:59 -04002138
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002139 xid = get_xid();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002140 /* this is probably better than directly calling
2141 partialpage_write since in this function the file handle is
2142 known which we might as well leverage */
2143 /* BB check if anything else missing out of ppw
2144 such as updating last write time */
2145 page_data = kmap(page);
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00002146 rc = cifs_write(cfile, pid, page_data + offset, copied, &pos);
Nick Piggind9414772008-09-24 11:32:59 -04002147 /* if (rc < 0) should we set writebehind rc? */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002148 kunmap(page);
Nick Piggind9414772008-09-24 11:32:59 -04002149
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002150 free_xid(xid);
Steve Frenchfb8c4b12007-07-10 01:16:18 +00002151 } else {
Nick Piggind9414772008-09-24 11:32:59 -04002152 rc = copied;
2153 pos += copied;
Pavel Shilovskyca8aa292012-12-21 15:05:47 +04002154 set_page_dirty(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002155 }
2156
Nick Piggind9414772008-09-24 11:32:59 -04002157 if (rc > 0) {
2158 spin_lock(&inode->i_lock);
2159 if (pos > inode->i_size)
2160 i_size_write(inode, pos);
2161 spin_unlock(&inode->i_lock);
2162 }
2163
2164 unlock_page(page);
2165 page_cache_release(page);
2166
Linus Torvalds1da177e2005-04-16 15:20:36 -07002167 return rc;
2168}
2169
Josef Bacik02c24a82011-07-16 20:44:56 -04002170int cifs_strict_fsync(struct file *file, loff_t start, loff_t end,
2171 int datasync)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002172{
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002173 unsigned int xid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002174 int rc = 0;
Steve French96daf2b2011-05-27 04:34:02 +00002175 struct cifs_tcon *tcon;
Pavel Shilovsky1d8c4c02012-09-18 16:20:27 -07002176 struct TCP_Server_Info *server;
Joe Perchesc21dfb62010-07-12 13:50:14 -07002177 struct cifsFileInfo *smbfile = file->private_data;
Al Viro496ad9a2013-01-23 17:07:38 -05002178 struct inode *inode = file_inode(file);
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002179 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002180
Josef Bacik02c24a82011-07-16 20:44:56 -04002181 rc = filemap_write_and_wait_range(inode->i_mapping, start, end);
2182 if (rc)
2183 return rc;
2184 mutex_lock(&inode->i_mutex);
2185
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002186 xid = get_xid();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002187
Joe Perchesf96637b2013-05-04 22:12:25 -05002188 cifs_dbg(FYI, "Sync file - name: %s datasync: 0x%x\n",
2189 file->f_path.dentry->d_name.name, datasync);
Steve French50c2f752007-07-13 00:33:32 +00002190
Pavel Shilovsky6feb9892011-04-07 18:18:11 +04002191 if (!CIFS_I(inode)->clientCanCacheRead) {
2192 rc = cifs_invalidate_mapping(inode);
2193 if (rc) {
Joe Perchesf96637b2013-05-04 22:12:25 -05002194 cifs_dbg(FYI, "rc: %d during invalidate phase\n", rc);
Pavel Shilovsky6feb9892011-04-07 18:18:11 +04002195 rc = 0; /* don't care about it in fsync */
2196 }
2197 }
Jeff Laytoneb4b7562010-10-22 14:52:29 -04002198
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002199 tcon = tlink_tcon(smbfile->tlink);
Pavel Shilovsky1d8c4c02012-09-18 16:20:27 -07002200 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC)) {
2201 server = tcon->ses->server;
2202 if (server->ops->flush)
2203 rc = server->ops->flush(xid, tcon, &smbfile->fid);
2204 else
2205 rc = -ENOSYS;
2206 }
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002207
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002208 free_xid(xid);
Josef Bacik02c24a82011-07-16 20:44:56 -04002209 mutex_unlock(&inode->i_mutex);
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002210 return rc;
2211}
2212
Josef Bacik02c24a82011-07-16 20:44:56 -04002213int cifs_fsync(struct file *file, loff_t start, loff_t end, int datasync)
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002214{
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002215 unsigned int xid;
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002216 int rc = 0;
Steve French96daf2b2011-05-27 04:34:02 +00002217 struct cifs_tcon *tcon;
Pavel Shilovsky1d8c4c02012-09-18 16:20:27 -07002218 struct TCP_Server_Info *server;
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002219 struct cifsFileInfo *smbfile = file->private_data;
2220 struct cifs_sb_info *cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
Josef Bacik02c24a82011-07-16 20:44:56 -04002221 struct inode *inode = file->f_mapping->host;
2222
2223 rc = filemap_write_and_wait_range(inode->i_mapping, start, end);
2224 if (rc)
2225 return rc;
2226 mutex_lock(&inode->i_mutex);
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002227
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002228 xid = get_xid();
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002229
Joe Perchesf96637b2013-05-04 22:12:25 -05002230 cifs_dbg(FYI, "Sync file - name: %s datasync: 0x%x\n",
2231 file->f_path.dentry->d_name.name, datasync);
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002232
2233 tcon = tlink_tcon(smbfile->tlink);
Pavel Shilovsky1d8c4c02012-09-18 16:20:27 -07002234 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC)) {
2235 server = tcon->ses->server;
2236 if (server->ops->flush)
2237 rc = server->ops->flush(xid, tcon, &smbfile->fid);
2238 else
2239 rc = -ENOSYS;
2240 }
Steve Frenchb298f222009-02-21 21:17:43 +00002241
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002242 free_xid(xid);
Josef Bacik02c24a82011-07-16 20:44:56 -04002243 mutex_unlock(&inode->i_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002244 return rc;
2245}
2246
Linus Torvalds1da177e2005-04-16 15:20:36 -07002247/*
2248 * As file closes, flush all cached write data for this inode checking
2249 * for write behind errors.
2250 */
Miklos Szeredi75e1fcc2006-06-23 02:05:12 -07002251int cifs_flush(struct file *file, fl_owner_t id)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002252{
Al Viro496ad9a2013-01-23 17:07:38 -05002253 struct inode *inode = file_inode(file);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002254 int rc = 0;
2255
Jeff Laytoneb4b7562010-10-22 14:52:29 -04002256 if (file->f_mode & FMODE_WRITE)
Jeff Laytond3f13222010-10-15 15:34:07 -04002257 rc = filemap_write_and_wait(inode->i_mapping);
Steve French50c2f752007-07-13 00:33:32 +00002258
Joe Perchesf96637b2013-05-04 22:12:25 -05002259 cifs_dbg(FYI, "Flush inode %p file %p rc %d\n", inode, file, rc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002260
2261 return rc;
2262}
2263
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002264static int
2265cifs_write_allocate_pages(struct page **pages, unsigned long num_pages)
2266{
2267 int rc = 0;
2268 unsigned long i;
2269
2270 for (i = 0; i < num_pages; i++) {
Jeff Laytone94f7ba2012-03-23 14:40:56 -04002271 pages[i] = alloc_page(GFP_KERNEL|__GFP_HIGHMEM);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002272 if (!pages[i]) {
2273 /*
2274 * save number of pages we have already allocated and
2275 * return with ENOMEM error
2276 */
2277 num_pages = i;
2278 rc = -ENOMEM;
Jeff Laytone94f7ba2012-03-23 14:40:56 -04002279 break;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002280 }
2281 }
2282
Jeff Laytone94f7ba2012-03-23 14:40:56 -04002283 if (rc) {
2284 for (i = 0; i < num_pages; i++)
2285 put_page(pages[i]);
2286 }
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002287 return rc;
2288}
2289
2290static inline
2291size_t get_numpages(const size_t wsize, const size_t len, size_t *cur_len)
2292{
2293 size_t num_pages;
2294 size_t clen;
2295
2296 clen = min_t(const size_t, len, wsize);
Jeff Laytona7103b92012-03-23 14:40:56 -04002297 num_pages = DIV_ROUND_UP(clen, PAGE_SIZE);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002298
2299 if (cur_len)
2300 *cur_len = clen;
2301
2302 return num_pages;
2303}
2304
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002305static void
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002306cifs_uncached_writev_complete(struct work_struct *work)
2307{
2308 int i;
2309 struct cifs_writedata *wdata = container_of(work,
2310 struct cifs_writedata, work);
2311 struct inode *inode = wdata->cfile->dentry->d_inode;
2312 struct cifsInodeInfo *cifsi = CIFS_I(inode);
2313
2314 spin_lock(&inode->i_lock);
2315 cifs_update_eof(cifsi, wdata->offset, wdata->bytes);
2316 if (cifsi->server_eof > inode->i_size)
2317 i_size_write(inode, cifsi->server_eof);
2318 spin_unlock(&inode->i_lock);
2319
2320 complete(&wdata->done);
2321
2322 if (wdata->result != -EAGAIN) {
2323 for (i = 0; i < wdata->nr_pages; i++)
2324 put_page(wdata->pages[i]);
2325 }
2326
2327 kref_put(&wdata->refcount, cifs_writedata_release);
2328}
2329
2330/* attempt to send write to server, retry on any -EAGAIN errors */
2331static int
2332cifs_uncached_retry_writev(struct cifs_writedata *wdata)
2333{
2334 int rc;
Pavel Shilovskyc9de5c82012-09-18 16:20:29 -07002335 struct TCP_Server_Info *server;
2336
2337 server = tlink_tcon(wdata->cfile->tlink)->ses->server;
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002338
2339 do {
2340 if (wdata->cfile->invalidHandle) {
2341 rc = cifs_reopen_file(wdata->cfile, false);
2342 if (rc != 0)
2343 continue;
2344 }
Pavel Shilovskyc9de5c82012-09-18 16:20:29 -07002345 rc = server->ops->async_writev(wdata);
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002346 } while (rc == -EAGAIN);
2347
2348 return rc;
2349}
2350
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002351static ssize_t
2352cifs_iovec_write(struct file *file, const struct iovec *iov,
2353 unsigned long nr_segs, loff_t *poffset)
2354{
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002355 unsigned long nr_pages, i;
Jeff Layton9f0afaf2014-02-14 07:20:35 -05002356 size_t bytes, copied, len, cur_len;
Pavel Shilovsky76429c12011-01-31 16:03:08 +03002357 ssize_t total_written = 0;
Jeff Layton3af9d8f2012-04-13 17:16:59 -04002358 loff_t offset;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002359 struct iov_iter it;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002360 struct cifsFileInfo *open_file;
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002361 struct cifs_tcon *tcon;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002362 struct cifs_sb_info *cifs_sb;
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002363 struct cifs_writedata *wdata, *tmp;
2364 struct list_head wdata_list;
2365 int rc;
2366 pid_t pid;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002367
2368 len = iov_length(iov, nr_segs);
2369 if (!len)
2370 return 0;
2371
2372 rc = generic_write_checks(file, poffset, &len, 0);
2373 if (rc)
2374 return rc;
2375
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002376 INIT_LIST_HEAD(&wdata_list);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002377 cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002378 open_file = file->private_data;
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002379 tcon = tlink_tcon(open_file->tlink);
Pavel Shilovskyc9de5c82012-09-18 16:20:29 -07002380
2381 if (!tcon->ses->server->ops->async_writev)
2382 return -ENOSYS;
2383
Jeff Layton3af9d8f2012-04-13 17:16:59 -04002384 offset = *poffset;
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00002385
2386 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
2387 pid = open_file->pid;
2388 else
2389 pid = current->tgid;
2390
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002391 iov_iter_init(&it, iov, nr_segs, len, 0);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002392 do {
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002393 size_t save_len;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002394
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002395 nr_pages = get_numpages(cifs_sb->wsize, len, &cur_len);
2396 wdata = cifs_writedata_alloc(nr_pages,
2397 cifs_uncached_writev_complete);
2398 if (!wdata) {
2399 rc = -ENOMEM;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002400 break;
2401 }
2402
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002403 rc = cifs_write_allocate_pages(wdata->pages, nr_pages);
2404 if (rc) {
2405 kfree(wdata);
2406 break;
2407 }
2408
2409 save_len = cur_len;
2410 for (i = 0; i < nr_pages; i++) {
Jeff Layton9f0afaf2014-02-14 07:20:35 -05002411 bytes = min_t(const size_t, cur_len, PAGE_SIZE);
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002412 copied = iov_iter_copy_from_user(wdata->pages[i], &it,
Jeff Layton9f0afaf2014-02-14 07:20:35 -05002413 0, bytes);
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002414 cur_len -= copied;
2415 iov_iter_advance(&it, copied);
Jeff Layton9f0afaf2014-02-14 07:20:35 -05002416 /*
2417 * If we didn't copy as much as we expected, then that
2418 * may mean we trod into an unmapped area. Stop copying
2419 * at that point. On the next pass through the big
2420 * loop, we'll likely end up getting a zero-length
2421 * write and bailing out of it.
2422 */
2423 if (copied < bytes)
2424 break;
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002425 }
2426 cur_len = save_len - cur_len;
2427
Jeff Layton9f0afaf2014-02-14 07:20:35 -05002428 /*
2429 * If we have no data to send, then that probably means that
2430 * the copy above failed altogether. That's most likely because
2431 * the address in the iovec was bogus. Set the rc to -EFAULT,
2432 * free anything we allocated and bail out.
2433 */
2434 if (!cur_len) {
2435 for (i = 0; i < nr_pages; i++)
2436 put_page(wdata->pages[i]);
2437 kfree(wdata);
2438 rc = -EFAULT;
2439 break;
2440 }
2441
2442 /*
2443 * i + 1 now represents the number of pages we actually used in
2444 * the copy phase above. Bring nr_pages down to that, and free
2445 * any pages that we didn't use.
2446 */
2447 for ( ; nr_pages > i + 1; nr_pages--)
2448 put_page(wdata->pages[nr_pages - 1]);
2449
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002450 wdata->sync_mode = WB_SYNC_ALL;
2451 wdata->nr_pages = nr_pages;
2452 wdata->offset = (__u64)offset;
2453 wdata->cfile = cifsFileInfo_get(open_file);
2454 wdata->pid = pid;
2455 wdata->bytes = cur_len;
Jeff Laytoneddb0792012-09-18 16:20:35 -07002456 wdata->pagesz = PAGE_SIZE;
2457 wdata->tailsz = cur_len - ((nr_pages - 1) * PAGE_SIZE);
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002458 rc = cifs_uncached_retry_writev(wdata);
2459 if (rc) {
2460 kref_put(&wdata->refcount, cifs_writedata_release);
2461 break;
2462 }
2463
2464 list_add_tail(&wdata->list, &wdata_list);
2465 offset += cur_len;
2466 len -= cur_len;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002467 } while (len > 0);
2468
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002469 /*
2470 * If at least one write was successfully sent, then discard any rc
2471 * value from the later writes. If the other write succeeds, then
2472 * we'll end up returning whatever was written. If it fails, then
2473 * we'll get a new rc value from that.
2474 */
2475 if (!list_empty(&wdata_list))
2476 rc = 0;
2477
2478 /*
2479 * Wait for and collect replies for any successful sends in order of
2480 * increasing offset. Once an error is hit or we get a fatal signal
2481 * while waiting, then return without waiting for any more replies.
2482 */
2483restart_loop:
2484 list_for_each_entry_safe(wdata, tmp, &wdata_list, list) {
2485 if (!rc) {
2486 /* FIXME: freezable too? */
2487 rc = wait_for_completion_killable(&wdata->done);
2488 if (rc)
2489 rc = -EINTR;
2490 else if (wdata->result)
2491 rc = wdata->result;
2492 else
2493 total_written += wdata->bytes;
2494
2495 /* resend call if it's a retryable error */
2496 if (rc == -EAGAIN) {
2497 rc = cifs_uncached_retry_writev(wdata);
2498 goto restart_loop;
2499 }
2500 }
2501 list_del_init(&wdata->list);
2502 kref_put(&wdata->refcount, cifs_writedata_release);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002503 }
2504
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002505 if (total_written > 0)
2506 *poffset += total_written;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002507
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002508 cifs_stats_bytes_written(tcon, total_written);
2509 return total_written ? total_written : (ssize_t)rc;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002510}
2511
Pavel Shilovsky0b81c1c2011-03-10 10:11:05 +03002512ssize_t cifs_user_writev(struct kiocb *iocb, const struct iovec *iov,
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002513 unsigned long nr_segs, loff_t pos)
2514{
2515 ssize_t written;
2516 struct inode *inode;
2517
Al Viro496ad9a2013-01-23 17:07:38 -05002518 inode = file_inode(iocb->ki_filp);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002519
2520 /*
2521 * BB - optimize the way when signing is disabled. We can drop this
2522 * extra memory-to-memory copying and use iovec buffers for constructing
2523 * write request.
2524 */
2525
2526 written = cifs_iovec_write(iocb->ki_filp, iov, nr_segs, &pos);
2527 if (written > 0) {
2528 CIFS_I(inode)->invalid_mapping = true;
2529 iocb->ki_pos = pos;
2530 }
2531
2532 return written;
2533}
2534
Pavel Shilovsky579f9052012-09-19 06:22:44 -07002535static ssize_t
Pavel Shilovskyca8aa292012-12-21 15:05:47 +04002536cifs_writev(struct kiocb *iocb, const struct iovec *iov,
2537 unsigned long nr_segs, loff_t pos)
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002538{
Pavel Shilovsky579f9052012-09-19 06:22:44 -07002539 struct file *file = iocb->ki_filp;
2540 struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data;
2541 struct inode *inode = file->f_mapping->host;
2542 struct cifsInodeInfo *cinode = CIFS_I(inode);
2543 struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server;
2544 ssize_t rc = -EACCES;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002545
Pavel Shilovsky579f9052012-09-19 06:22:44 -07002546 BUG_ON(iocb->ki_pos != pos);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002547
Pavel Shilovsky579f9052012-09-19 06:22:44 -07002548 /*
2549 * We need to hold the sem to be sure nobody modifies lock list
2550 * with a brlock that prevents writing.
2551 */
2552 down_read(&cinode->lock_sem);
2553 if (!cifs_find_lock_conflict(cfile, pos, iov_length(iov, nr_segs),
2554 server->vals->exclusive_lock_type, NULL,
Pavel Shilovsky081c0412012-11-27 18:38:53 +04002555 CIFS_WRITE_OP)) {
Pavel Shilovsky579f9052012-09-19 06:22:44 -07002556 mutex_lock(&inode->i_mutex);
2557 rc = __generic_file_aio_write(iocb, iov, nr_segs,
Pavel Shilovskyca8aa292012-12-21 15:05:47 +04002558 &iocb->ki_pos);
Pavel Shilovsky579f9052012-09-19 06:22:44 -07002559 mutex_unlock(&inode->i_mutex);
2560 }
2561
2562 if (rc > 0 || rc == -EIOCBQUEUED) {
2563 ssize_t err;
2564
2565 err = generic_write_sync(file, pos, rc);
2566 if (err < 0 && rc > 0)
2567 rc = err;
2568 }
2569
2570 up_read(&cinode->lock_sem);
Pavel Shilovsky579f9052012-09-19 06:22:44 -07002571 return rc;
2572}
2573
2574ssize_t
2575cifs_strict_writev(struct kiocb *iocb, const struct iovec *iov,
2576 unsigned long nr_segs, loff_t pos)
2577{
Al Viro496ad9a2013-01-23 17:07:38 -05002578 struct inode *inode = file_inode(iocb->ki_filp);
Pavel Shilovsky579f9052012-09-19 06:22:44 -07002579 struct cifsInodeInfo *cinode = CIFS_I(inode);
2580 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
2581 struct cifsFileInfo *cfile = (struct cifsFileInfo *)
2582 iocb->ki_filp->private_data;
2583 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
Pavel Shilovsky88cf75a2012-12-21 15:07:52 +04002584 ssize_t written;
Pavel Shilovskyca8aa292012-12-21 15:05:47 +04002585
Pavel Shilovsky88cf75a2012-12-21 15:07:52 +04002586 if (cinode->clientCanCacheAll) {
2587 if (cap_unix(tcon->ses) &&
2588 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability))
2589 && ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
2590 return generic_file_aio_write(iocb, iov, nr_segs, pos);
2591 return cifs_writev(iocb, iov, nr_segs, pos);
Pavel Shilovskyc299dd02012-12-06 22:07:52 +04002592 }
Pavel Shilovskyca8aa292012-12-21 15:05:47 +04002593 /*
2594 * For non-oplocked files in strict cache mode we need to write the data
2595 * to the server exactly from the pos to pos+len-1 rather than flush all
2596 * affected pages because it may cause a error with mandatory locks on
2597 * these pages but not on the region from pos to ppos+len-1.
2598 */
Pavel Shilovsky88cf75a2012-12-21 15:07:52 +04002599 written = cifs_user_writev(iocb, iov, nr_segs, pos);
2600 if (written > 0 && cinode->clientCanCacheRead) {
2601 /*
2602 * Windows 7 server can delay breaking level2 oplock if a write
2603 * request comes - break it on the client to prevent reading
2604 * an old data.
2605 */
2606 cifs_invalidate_mapping(inode);
Joe Perchesf96637b2013-05-04 22:12:25 -05002607 cifs_dbg(FYI, "Set no oplock for inode=%p after a write operation\n",
2608 inode);
Pavel Shilovsky88cf75a2012-12-21 15:07:52 +04002609 cinode->clientCanCacheRead = false;
2610 }
2611 return written;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002612}
2613
Jeff Layton0471ca32012-05-16 07:13:16 -04002614static struct cifs_readdata *
Jeff Laytonf4e49cd2012-09-18 16:20:36 -07002615cifs_readdata_alloc(unsigned int nr_pages, work_func_t complete)
Jeff Layton0471ca32012-05-16 07:13:16 -04002616{
2617 struct cifs_readdata *rdata;
Jeff Laytonf4e49cd2012-09-18 16:20:36 -07002618
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002619 rdata = kzalloc(sizeof(*rdata) + (sizeof(struct page *) * nr_pages),
2620 GFP_KERNEL);
Jeff Layton0471ca32012-05-16 07:13:16 -04002621 if (rdata != NULL) {
Jeff Layton6993f742012-05-16 07:13:17 -04002622 kref_init(&rdata->refcount);
Jeff Layton1c892542012-05-16 07:13:17 -04002623 INIT_LIST_HEAD(&rdata->list);
2624 init_completion(&rdata->done);
Jeff Layton0471ca32012-05-16 07:13:16 -04002625 INIT_WORK(&rdata->work, complete);
Jeff Layton0471ca32012-05-16 07:13:16 -04002626 }
Jeff Laytonf4e49cd2012-09-18 16:20:36 -07002627
Jeff Layton0471ca32012-05-16 07:13:16 -04002628 return rdata;
2629}
2630
Jeff Layton6993f742012-05-16 07:13:17 -04002631void
2632cifs_readdata_release(struct kref *refcount)
Jeff Layton0471ca32012-05-16 07:13:16 -04002633{
Jeff Layton6993f742012-05-16 07:13:17 -04002634 struct cifs_readdata *rdata = container_of(refcount,
2635 struct cifs_readdata, refcount);
2636
2637 if (rdata->cfile)
2638 cifsFileInfo_put(rdata->cfile);
2639
Jeff Layton0471ca32012-05-16 07:13:16 -04002640 kfree(rdata);
2641}
2642
Jeff Layton2a1bb132012-05-16 07:13:17 -04002643static int
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002644cifs_read_allocate_pages(struct cifs_readdata *rdata, unsigned int nr_pages)
Jeff Layton1c892542012-05-16 07:13:17 -04002645{
2646 int rc = 0;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002647 struct page *page;
Jeff Layton1c892542012-05-16 07:13:17 -04002648 unsigned int i;
2649
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002650 for (i = 0; i < nr_pages; i++) {
Jeff Layton1c892542012-05-16 07:13:17 -04002651 page = alloc_page(GFP_KERNEL|__GFP_HIGHMEM);
2652 if (!page) {
2653 rc = -ENOMEM;
2654 break;
2655 }
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002656 rdata->pages[i] = page;
Jeff Layton1c892542012-05-16 07:13:17 -04002657 }
2658
2659 if (rc) {
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002660 for (i = 0; i < nr_pages; i++) {
2661 put_page(rdata->pages[i]);
2662 rdata->pages[i] = NULL;
Jeff Layton1c892542012-05-16 07:13:17 -04002663 }
2664 }
2665 return rc;
2666}
2667
2668static void
2669cifs_uncached_readdata_release(struct kref *refcount)
2670{
Jeff Layton1c892542012-05-16 07:13:17 -04002671 struct cifs_readdata *rdata = container_of(refcount,
2672 struct cifs_readdata, refcount);
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002673 unsigned int i;
Jeff Layton1c892542012-05-16 07:13:17 -04002674
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002675 for (i = 0; i < rdata->nr_pages; i++) {
2676 put_page(rdata->pages[i]);
2677 rdata->pages[i] = NULL;
Jeff Layton1c892542012-05-16 07:13:17 -04002678 }
2679 cifs_readdata_release(refcount);
2680}
2681
2682static int
Jeff Layton2a1bb132012-05-16 07:13:17 -04002683cifs_retry_async_readv(struct cifs_readdata *rdata)
2684{
2685 int rc;
Pavel Shilovskyfc9c5962012-09-18 16:20:28 -07002686 struct TCP_Server_Info *server;
2687
2688 server = tlink_tcon(rdata->cfile->tlink)->ses->server;
Jeff Layton2a1bb132012-05-16 07:13:17 -04002689
2690 do {
2691 if (rdata->cfile->invalidHandle) {
2692 rc = cifs_reopen_file(rdata->cfile, true);
2693 if (rc != 0)
2694 continue;
2695 }
Pavel Shilovskyfc9c5962012-09-18 16:20:28 -07002696 rc = server->ops->async_readv(rdata);
Jeff Layton2a1bb132012-05-16 07:13:17 -04002697 } while (rc == -EAGAIN);
2698
2699 return rc;
2700}
2701
Jeff Layton1c892542012-05-16 07:13:17 -04002702/**
2703 * cifs_readdata_to_iov - copy data from pages in response to an iovec
2704 * @rdata: the readdata response with list of pages holding data
2705 * @iov: vector in which we should copy the data
2706 * @nr_segs: number of segments in vector
2707 * @offset: offset into file of the first iovec
2708 * @copied: used to return the amount of data copied to the iov
2709 *
2710 * This function copies data from a list of pages in a readdata response into
2711 * an array of iovecs. It will first calculate where the data should go
2712 * based on the info in the readdata and then copy the data into that spot.
2713 */
2714static ssize_t
2715cifs_readdata_to_iov(struct cifs_readdata *rdata, const struct iovec *iov,
2716 unsigned long nr_segs, loff_t offset, ssize_t *copied)
2717{
2718 int rc = 0;
2719 struct iov_iter ii;
2720 size_t pos = rdata->offset - offset;
Jeff Layton1c892542012-05-16 07:13:17 -04002721 ssize_t remaining = rdata->bytes;
2722 unsigned char *pdata;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002723 unsigned int i;
Jeff Layton1c892542012-05-16 07:13:17 -04002724
2725 /* set up iov_iter and advance to the correct offset */
2726 iov_iter_init(&ii, iov, nr_segs, iov_length(iov, nr_segs), 0);
2727 iov_iter_advance(&ii, pos);
2728
2729 *copied = 0;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002730 for (i = 0; i < rdata->nr_pages; i++) {
Jeff Layton1c892542012-05-16 07:13:17 -04002731 ssize_t copy;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002732 struct page *page = rdata->pages[i];
Jeff Layton1c892542012-05-16 07:13:17 -04002733
2734 /* copy a whole page or whatever's left */
2735 copy = min_t(ssize_t, remaining, PAGE_SIZE);
2736
2737 /* ...but limit it to whatever space is left in the iov */
2738 copy = min_t(ssize_t, copy, iov_iter_count(&ii));
2739
2740 /* go while there's data to be copied and no errors */
2741 if (copy && !rc) {
2742 pdata = kmap(page);
2743 rc = memcpy_toiovecend(ii.iov, pdata, ii.iov_offset,
2744 (int)copy);
2745 kunmap(page);
2746 if (!rc) {
2747 *copied += copy;
2748 remaining -= copy;
2749 iov_iter_advance(&ii, copy);
2750 }
2751 }
Jeff Layton1c892542012-05-16 07:13:17 -04002752 }
2753
2754 return rc;
2755}
2756
2757static void
2758cifs_uncached_readv_complete(struct work_struct *work)
2759{
2760 struct cifs_readdata *rdata = container_of(work,
2761 struct cifs_readdata, work);
Jeff Layton1c892542012-05-16 07:13:17 -04002762
2763 complete(&rdata->done);
2764 kref_put(&rdata->refcount, cifs_uncached_readdata_release);
2765}
2766
2767static int
Jeff Layton8321fec2012-09-19 06:22:32 -07002768cifs_uncached_read_into_pages(struct TCP_Server_Info *server,
2769 struct cifs_readdata *rdata, unsigned int len)
Jeff Layton1c892542012-05-16 07:13:17 -04002770{
Jeff Layton8321fec2012-09-19 06:22:32 -07002771 int total_read = 0, result = 0;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002772 unsigned int i;
2773 unsigned int nr_pages = rdata->nr_pages;
Jeff Layton8321fec2012-09-19 06:22:32 -07002774 struct kvec iov;
Jeff Layton1c892542012-05-16 07:13:17 -04002775
Jeff Layton8321fec2012-09-19 06:22:32 -07002776 rdata->tailsz = PAGE_SIZE;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002777 for (i = 0; i < nr_pages; i++) {
2778 struct page *page = rdata->pages[i];
2779
Jeff Layton8321fec2012-09-19 06:22:32 -07002780 if (len >= PAGE_SIZE) {
Jeff Layton1c892542012-05-16 07:13:17 -04002781 /* enough data to fill the page */
Jeff Layton8321fec2012-09-19 06:22:32 -07002782 iov.iov_base = kmap(page);
2783 iov.iov_len = PAGE_SIZE;
Joe Perchesf96637b2013-05-04 22:12:25 -05002784 cifs_dbg(FYI, "%u: iov_base=%p iov_len=%zu\n",
2785 i, iov.iov_base, iov.iov_len);
Jeff Layton8321fec2012-09-19 06:22:32 -07002786 len -= PAGE_SIZE;
2787 } else if (len > 0) {
Jeff Layton1c892542012-05-16 07:13:17 -04002788 /* enough for partial page, fill and zero the rest */
Jeff Layton8321fec2012-09-19 06:22:32 -07002789 iov.iov_base = kmap(page);
2790 iov.iov_len = len;
Joe Perchesf96637b2013-05-04 22:12:25 -05002791 cifs_dbg(FYI, "%u: iov_base=%p iov_len=%zu\n",
2792 i, iov.iov_base, iov.iov_len);
Jeff Layton8321fec2012-09-19 06:22:32 -07002793 memset(iov.iov_base + len, '\0', PAGE_SIZE - len);
2794 rdata->tailsz = len;
2795 len = 0;
Jeff Layton1c892542012-05-16 07:13:17 -04002796 } else {
2797 /* no need to hold page hostage */
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002798 rdata->pages[i] = NULL;
2799 rdata->nr_pages--;
Jeff Layton1c892542012-05-16 07:13:17 -04002800 put_page(page);
Jeff Layton8321fec2012-09-19 06:22:32 -07002801 continue;
Jeff Layton1c892542012-05-16 07:13:17 -04002802 }
Jeff Layton8321fec2012-09-19 06:22:32 -07002803
2804 result = cifs_readv_from_socket(server, &iov, 1, iov.iov_len);
2805 kunmap(page);
2806 if (result < 0)
2807 break;
2808
2809 total_read += result;
Jeff Layton1c892542012-05-16 07:13:17 -04002810 }
2811
Pavel Shilovsky8f516091b2014-06-27 10:33:11 +04002812 return total_read > 0 && result != -EAGAIN ? total_read : result;
Jeff Layton1c892542012-05-16 07:13:17 -04002813}
2814
Pavel Shilovskya70307e2010-12-14 11:50:41 +03002815static ssize_t
2816cifs_iovec_read(struct file *file, const struct iovec *iov,
2817 unsigned long nr_segs, loff_t *poffset)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002818{
Jeff Layton1c892542012-05-16 07:13:17 -04002819 ssize_t rc;
Pavel Shilovskya70307e2010-12-14 11:50:41 +03002820 size_t len, cur_len;
Jeff Layton1c892542012-05-16 07:13:17 -04002821 ssize_t total_read = 0;
2822 loff_t offset = *poffset;
2823 unsigned int npages;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002824 struct cifs_sb_info *cifs_sb;
Jeff Layton1c892542012-05-16 07:13:17 -04002825 struct cifs_tcon *tcon;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002826 struct cifsFileInfo *open_file;
Jeff Layton1c892542012-05-16 07:13:17 -04002827 struct cifs_readdata *rdata, *tmp;
2828 struct list_head rdata_list;
2829 pid_t pid;
Pavel Shilovskya70307e2010-12-14 11:50:41 +03002830
2831 if (!nr_segs)
2832 return 0;
2833
2834 len = iov_length(iov, nr_segs);
2835 if (!len)
2836 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002837
Jeff Layton1c892542012-05-16 07:13:17 -04002838 INIT_LIST_HEAD(&rdata_list);
Josef "Jeff" Sipeke6a00292006-12-08 02:36:48 -08002839 cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
Joe Perchesc21dfb62010-07-12 13:50:14 -07002840 open_file = file->private_data;
Jeff Layton1c892542012-05-16 07:13:17 -04002841 tcon = tlink_tcon(open_file->tlink);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002842
Pavel Shilovskyfc9c5962012-09-18 16:20:28 -07002843 if (!tcon->ses->server->ops->async_readv)
2844 return -ENOSYS;
2845
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00002846 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
2847 pid = open_file->pid;
2848 else
2849 pid = current->tgid;
2850
Steve Frenchad7a2922008-02-07 23:25:02 +00002851 if ((file->f_flags & O_ACCMODE) == O_WRONLY)
Joe Perchesf96637b2013-05-04 22:12:25 -05002852 cifs_dbg(FYI, "attempting read on write only file instance\n");
Steve Frenchad7a2922008-02-07 23:25:02 +00002853
Jeff Layton1c892542012-05-16 07:13:17 -04002854 do {
2855 cur_len = min_t(const size_t, len - total_read, cifs_sb->rsize);
2856 npages = DIV_ROUND_UP(cur_len, PAGE_SIZE);
Pavel Shilovskya70307e2010-12-14 11:50:41 +03002857
Jeff Layton1c892542012-05-16 07:13:17 -04002858 /* allocate a readdata struct */
2859 rdata = cifs_readdata_alloc(npages,
2860 cifs_uncached_readv_complete);
2861 if (!rdata) {
2862 rc = -ENOMEM;
2863 goto error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002864 }
Pavel Shilovskya70307e2010-12-14 11:50:41 +03002865
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002866 rc = cifs_read_allocate_pages(rdata, npages);
Jeff Layton1c892542012-05-16 07:13:17 -04002867 if (rc)
2868 goto error;
2869
2870 rdata->cfile = cifsFileInfo_get(open_file);
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002871 rdata->nr_pages = npages;
Jeff Layton1c892542012-05-16 07:13:17 -04002872 rdata->offset = offset;
2873 rdata->bytes = cur_len;
2874 rdata->pid = pid;
Jeff Layton8321fec2012-09-19 06:22:32 -07002875 rdata->pagesz = PAGE_SIZE;
2876 rdata->read_into_pages = cifs_uncached_read_into_pages;
Jeff Layton1c892542012-05-16 07:13:17 -04002877
2878 rc = cifs_retry_async_readv(rdata);
2879error:
2880 if (rc) {
2881 kref_put(&rdata->refcount,
2882 cifs_uncached_readdata_release);
2883 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002884 }
Jeff Layton1c892542012-05-16 07:13:17 -04002885
2886 list_add_tail(&rdata->list, &rdata_list);
2887 offset += cur_len;
2888 len -= cur_len;
2889 } while (len > 0);
2890
2891 /* if at least one read request send succeeded, then reset rc */
2892 if (!list_empty(&rdata_list))
2893 rc = 0;
2894
2895 /* the loop below should proceed in the order of increasing offsets */
2896restart_loop:
2897 list_for_each_entry_safe(rdata, tmp, &rdata_list, list) {
2898 if (!rc) {
2899 ssize_t copied;
2900
2901 /* FIXME: freezable sleep too? */
2902 rc = wait_for_completion_killable(&rdata->done);
2903 if (rc)
2904 rc = -EINTR;
2905 else if (rdata->result)
2906 rc = rdata->result;
2907 else {
2908 rc = cifs_readdata_to_iov(rdata, iov,
2909 nr_segs, *poffset,
2910 &copied);
2911 total_read += copied;
2912 }
2913
2914 /* resend call if it's a retryable error */
2915 if (rc == -EAGAIN) {
2916 rc = cifs_retry_async_readv(rdata);
2917 goto restart_loop;
2918 }
2919 }
2920 list_del_init(&rdata->list);
2921 kref_put(&rdata->refcount, cifs_uncached_readdata_release);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002922 }
Pavel Shilovskya70307e2010-12-14 11:50:41 +03002923
Jeff Layton1c892542012-05-16 07:13:17 -04002924 cifs_stats_bytes_read(tcon, total_read);
2925 *poffset += total_read;
2926
Pavel Shilovsky09a47072012-09-18 16:20:29 -07002927 /* mask nodata case */
2928 if (rc == -ENODATA)
2929 rc = 0;
2930
Jeff Layton1c892542012-05-16 07:13:17 -04002931 return total_read ? total_read : rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002932}
2933
Pavel Shilovsky0b81c1c2011-03-10 10:11:05 +03002934ssize_t cifs_user_readv(struct kiocb *iocb, const struct iovec *iov,
Pavel Shilovskya70307e2010-12-14 11:50:41 +03002935 unsigned long nr_segs, loff_t pos)
2936{
2937 ssize_t read;
2938
2939 read = cifs_iovec_read(iocb->ki_filp, iov, nr_segs, &pos);
2940 if (read > 0)
2941 iocb->ki_pos = pos;
2942
2943 return read;
2944}
2945
Pavel Shilovsky579f9052012-09-19 06:22:44 -07002946ssize_t
2947cifs_strict_readv(struct kiocb *iocb, const struct iovec *iov,
2948 unsigned long nr_segs, loff_t pos)
Pavel Shilovskya70307e2010-12-14 11:50:41 +03002949{
Al Viro496ad9a2013-01-23 17:07:38 -05002950 struct inode *inode = file_inode(iocb->ki_filp);
Pavel Shilovsky579f9052012-09-19 06:22:44 -07002951 struct cifsInodeInfo *cinode = CIFS_I(inode);
2952 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
2953 struct cifsFileInfo *cfile = (struct cifsFileInfo *)
2954 iocb->ki_filp->private_data;
2955 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
2956 int rc = -EACCES;
Pavel Shilovskya70307e2010-12-14 11:50:41 +03002957
2958 /*
2959 * In strict cache mode we need to read from the server all the time
2960 * if we don't have level II oplock because the server can delay mtime
2961 * change - so we can't make a decision about inode invalidating.
2962 * And we can also fail with pagereading if there are mandatory locks
2963 * on pages affected by this read but not on the region from pos to
2964 * pos+len-1.
2965 */
Pavel Shilovsky579f9052012-09-19 06:22:44 -07002966 if (!cinode->clientCanCacheRead)
2967 return cifs_user_readv(iocb, iov, nr_segs, pos);
Pavel Shilovskya70307e2010-12-14 11:50:41 +03002968
Pavel Shilovsky579f9052012-09-19 06:22:44 -07002969 if (cap_unix(tcon->ses) &&
2970 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
2971 ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
2972 return generic_file_aio_read(iocb, iov, nr_segs, pos);
2973
2974 /*
2975 * We need to hold the sem to be sure nobody modifies lock list
2976 * with a brlock that prevents reading.
2977 */
2978 down_read(&cinode->lock_sem);
2979 if (!cifs_find_lock_conflict(cfile, pos, iov_length(iov, nr_segs),
2980 tcon->ses->server->vals->shared_lock_type,
Pavel Shilovsky081c0412012-11-27 18:38:53 +04002981 NULL, CIFS_READ_OP))
Pavel Shilovsky579f9052012-09-19 06:22:44 -07002982 rc = generic_file_aio_read(iocb, iov, nr_segs, pos);
2983 up_read(&cinode->lock_sem);
2984 return rc;
Pavel Shilovskya70307e2010-12-14 11:50:41 +03002985}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002986
Pavel Shilovskyf9c6e232012-09-18 16:20:29 -07002987static ssize_t
2988cifs_read(struct file *file, char *read_data, size_t read_size, loff_t *offset)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002989{
2990 int rc = -EACCES;
2991 unsigned int bytes_read = 0;
2992 unsigned int total_read;
2993 unsigned int current_read_size;
Jeff Layton5eba8ab2011-10-19 15:30:26 -04002994 unsigned int rsize;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002995 struct cifs_sb_info *cifs_sb;
Pavel Shilovsky29e20f92012-07-13 13:58:14 +04002996 struct cifs_tcon *tcon;
Pavel Shilovskyf9c6e232012-09-18 16:20:29 -07002997 struct TCP_Server_Info *server;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002998 unsigned int xid;
Pavel Shilovskyf9c6e232012-09-18 16:20:29 -07002999 char *cur_offset;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003000 struct cifsFileInfo *open_file;
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00003001 struct cifs_io_parms io_parms;
Steve Frenchec637e32005-12-12 20:53:18 -08003002 int buf_type = CIFS_NO_BUFFER;
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00003003 __u32 pid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003004
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003005 xid = get_xid();
Josef "Jeff" Sipeke6a00292006-12-08 02:36:48 -08003006 cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003007
Jeff Layton5eba8ab2011-10-19 15:30:26 -04003008 /* FIXME: set up handlers for larger reads and/or convert to async */
3009 rsize = min_t(unsigned int, cifs_sb->rsize, CIFSMaxBufSize);
3010
Linus Torvalds1da177e2005-04-16 15:20:36 -07003011 if (file->private_data == NULL) {
Suresh Jayaraman0f3bc092009-06-25 18:12:34 +05303012 rc = -EBADF;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003013 free_xid(xid);
Suresh Jayaraman0f3bc092009-06-25 18:12:34 +05303014 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003015 }
Joe Perchesc21dfb62010-07-12 13:50:14 -07003016 open_file = file->private_data;
Pavel Shilovsky29e20f92012-07-13 13:58:14 +04003017 tcon = tlink_tcon(open_file->tlink);
Pavel Shilovskyf9c6e232012-09-18 16:20:29 -07003018 server = tcon->ses->server;
3019
3020 if (!server->ops->sync_read) {
3021 free_xid(xid);
3022 return -ENOSYS;
3023 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003024
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00003025 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
3026 pid = open_file->pid;
3027 else
3028 pid = current->tgid;
3029
Linus Torvalds1da177e2005-04-16 15:20:36 -07003030 if ((file->f_flags & O_ACCMODE) == O_WRONLY)
Joe Perchesf96637b2013-05-04 22:12:25 -05003031 cifs_dbg(FYI, "attempting read on write only file instance\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07003032
Pavel Shilovskyf9c6e232012-09-18 16:20:29 -07003033 for (total_read = 0, cur_offset = read_data; read_size > total_read;
3034 total_read += bytes_read, cur_offset += bytes_read) {
Jeff Layton5eba8ab2011-10-19 15:30:26 -04003035 current_read_size = min_t(uint, read_size - total_read, rsize);
Pavel Shilovsky29e20f92012-07-13 13:58:14 +04003036 /*
3037 * For windows me and 9x we do not want to request more than it
3038 * negotiated since it will refuse the read then.
3039 */
3040 if ((tcon->ses) && !(tcon->ses->capabilities &
3041 tcon->ses->server->vals->cap_large_files)) {
Dan Carpenter7748dd62011-10-18 12:41:35 +03003042 current_read_size = min_t(uint, current_read_size,
Jeff Laytonc974bef2011-10-11 06:41:32 -04003043 CIFSMaxBufSize);
Steve Frenchf9f5c8172005-09-15 23:06:38 -07003044 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003045 rc = -EAGAIN;
3046 while (rc == -EAGAIN) {
Steve Frenchcdff08e2010-10-21 22:46:14 +00003047 if (open_file->invalidHandle) {
Jeff Layton15886172010-10-15 15:33:59 -04003048 rc = cifs_reopen_file(open_file, true);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003049 if (rc != 0)
3050 break;
3051 }
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00003052 io_parms.pid = pid;
Pavel Shilovsky29e20f92012-07-13 13:58:14 +04003053 io_parms.tcon = tcon;
Pavel Shilovskyf9c6e232012-09-18 16:20:29 -07003054 io_parms.offset = *offset;
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00003055 io_parms.length = current_read_size;
Pavel Shilovskyf9c6e232012-09-18 16:20:29 -07003056 rc = server->ops->sync_read(xid, open_file, &io_parms,
3057 &bytes_read, &cur_offset,
3058 &buf_type);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003059 }
3060 if (rc || (bytes_read == 0)) {
3061 if (total_read) {
3062 break;
3063 } else {
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003064 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003065 return rc;
3066 }
3067 } else {
Pavel Shilovsky29e20f92012-07-13 13:58:14 +04003068 cifs_stats_bytes_read(tcon, total_read);
Pavel Shilovskyf9c6e232012-09-18 16:20:29 -07003069 *offset += bytes_read;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003070 }
3071 }
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003072 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003073 return total_read;
3074}
3075
Jeff Laytonca83ce32011-04-12 09:13:44 -04003076/*
3077 * If the page is mmap'ed into a process' page tables, then we need to make
3078 * sure that it doesn't change while being written back.
3079 */
3080static int
3081cifs_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
3082{
3083 struct page *page = vmf->page;
3084
3085 lock_page(page);
3086 return VM_FAULT_LOCKED;
3087}
3088
3089static struct vm_operations_struct cifs_file_vm_ops = {
3090 .fault = filemap_fault,
3091 .page_mkwrite = cifs_page_mkwrite,
Konstantin Khlebnikov0b173bc2012-10-08 16:28:46 -07003092 .remap_pages = generic_file_remap_pages,
Jeff Laytonca83ce32011-04-12 09:13:44 -04003093};
3094
Pavel Shilovsky7a6a19b2010-12-14 11:29:51 +03003095int cifs_file_strict_mmap(struct file *file, struct vm_area_struct *vma)
3096{
3097 int rc, xid;
Al Viro496ad9a2013-01-23 17:07:38 -05003098 struct inode *inode = file_inode(file);
Pavel Shilovsky7a6a19b2010-12-14 11:29:51 +03003099
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003100 xid = get_xid();
Pavel Shilovsky7a6a19b2010-12-14 11:29:51 +03003101
Pavel Shilovsky6feb9892011-04-07 18:18:11 +04003102 if (!CIFS_I(inode)->clientCanCacheRead) {
3103 rc = cifs_invalidate_mapping(inode);
3104 if (rc)
3105 return rc;
3106 }
Pavel Shilovsky7a6a19b2010-12-14 11:29:51 +03003107
3108 rc = generic_file_mmap(file, vma);
Jeff Laytonca83ce32011-04-12 09:13:44 -04003109 if (rc == 0)
3110 vma->vm_ops = &cifs_file_vm_ops;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003111 free_xid(xid);
Pavel Shilovsky7a6a19b2010-12-14 11:29:51 +03003112 return rc;
3113}
3114
Linus Torvalds1da177e2005-04-16 15:20:36 -07003115int cifs_file_mmap(struct file *file, struct vm_area_struct *vma)
3116{
Linus Torvalds1da177e2005-04-16 15:20:36 -07003117 int rc, xid;
3118
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003119 xid = get_xid();
Jeff Laytonabab0952010-02-12 07:44:18 -05003120 rc = cifs_revalidate_file(file);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003121 if (rc) {
Joe Perchesf96637b2013-05-04 22:12:25 -05003122 cifs_dbg(FYI, "Validation prior to mmap failed, error=%d\n",
3123 rc);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003124 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003125 return rc;
3126 }
3127 rc = generic_file_mmap(file, vma);
Jeff Laytonca83ce32011-04-12 09:13:44 -04003128 if (rc == 0)
3129 vma->vm_ops = &cifs_file_vm_ops;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003130 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003131 return rc;
3132}
3133
Jeff Layton0471ca32012-05-16 07:13:16 -04003134static void
3135cifs_readv_complete(struct work_struct *work)
3136{
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003137 unsigned int i;
Jeff Layton0471ca32012-05-16 07:13:16 -04003138 struct cifs_readdata *rdata = container_of(work,
3139 struct cifs_readdata, work);
Jeff Layton0471ca32012-05-16 07:13:16 -04003140
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003141 for (i = 0; i < rdata->nr_pages; i++) {
3142 struct page *page = rdata->pages[i];
3143
Jeff Layton0471ca32012-05-16 07:13:16 -04003144 lru_cache_add_file(page);
3145
3146 if (rdata->result == 0) {
Jeff Layton0471ca32012-05-16 07:13:16 -04003147 flush_dcache_page(page);
3148 SetPageUptodate(page);
3149 }
3150
3151 unlock_page(page);
3152
3153 if (rdata->result == 0)
3154 cifs_readpage_to_fscache(rdata->mapping->host, page);
3155
3156 page_cache_release(page);
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003157 rdata->pages[i] = NULL;
Jeff Layton0471ca32012-05-16 07:13:16 -04003158 }
Jeff Layton6993f742012-05-16 07:13:17 -04003159 kref_put(&rdata->refcount, cifs_readdata_release);
Jeff Layton0471ca32012-05-16 07:13:16 -04003160}
3161
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003162static int
Jeff Layton8321fec2012-09-19 06:22:32 -07003163cifs_readpages_read_into_pages(struct TCP_Server_Info *server,
3164 struct cifs_readdata *rdata, unsigned int len)
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003165{
Jeff Layton8321fec2012-09-19 06:22:32 -07003166 int total_read = 0, result = 0;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003167 unsigned int i;
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003168 u64 eof;
3169 pgoff_t eof_index;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003170 unsigned int nr_pages = rdata->nr_pages;
Jeff Layton8321fec2012-09-19 06:22:32 -07003171 struct kvec iov;
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003172
3173 /* determine the eof that the server (probably) has */
3174 eof = CIFS_I(rdata->mapping->host)->server_eof;
3175 eof_index = eof ? (eof - 1) >> PAGE_CACHE_SHIFT : 0;
Joe Perchesf96637b2013-05-04 22:12:25 -05003176 cifs_dbg(FYI, "eof=%llu eof_index=%lu\n", eof, eof_index);
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003177
Jeff Layton8321fec2012-09-19 06:22:32 -07003178 rdata->tailsz = PAGE_CACHE_SIZE;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003179 for (i = 0; i < nr_pages; i++) {
3180 struct page *page = rdata->pages[i];
3181
Jeff Layton8321fec2012-09-19 06:22:32 -07003182 if (len >= PAGE_CACHE_SIZE) {
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003183 /* enough data to fill the page */
Jeff Layton8321fec2012-09-19 06:22:32 -07003184 iov.iov_base = kmap(page);
3185 iov.iov_len = PAGE_CACHE_SIZE;
Joe Perchesf96637b2013-05-04 22:12:25 -05003186 cifs_dbg(FYI, "%u: idx=%lu iov_base=%p iov_len=%zu\n",
3187 i, page->index, iov.iov_base, iov.iov_len);
Jeff Layton8321fec2012-09-19 06:22:32 -07003188 len -= PAGE_CACHE_SIZE;
3189 } else if (len > 0) {
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003190 /* enough for partial page, fill and zero the rest */
Jeff Layton8321fec2012-09-19 06:22:32 -07003191 iov.iov_base = kmap(page);
3192 iov.iov_len = len;
Joe Perchesf96637b2013-05-04 22:12:25 -05003193 cifs_dbg(FYI, "%u: idx=%lu iov_base=%p iov_len=%zu\n",
3194 i, page->index, iov.iov_base, iov.iov_len);
Jeff Layton8321fec2012-09-19 06:22:32 -07003195 memset(iov.iov_base + len,
3196 '\0', PAGE_CACHE_SIZE - len);
3197 rdata->tailsz = len;
3198 len = 0;
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003199 } else if (page->index > eof_index) {
3200 /*
3201 * The VFS will not try to do readahead past the
3202 * i_size, but it's possible that we have outstanding
3203 * writes with gaps in the middle and the i_size hasn't
3204 * caught up yet. Populate those with zeroed out pages
3205 * to prevent the VFS from repeatedly attempting to
3206 * fill them until the writes are flushed.
3207 */
3208 zero_user(page, 0, PAGE_CACHE_SIZE);
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003209 lru_cache_add_file(page);
3210 flush_dcache_page(page);
3211 SetPageUptodate(page);
3212 unlock_page(page);
3213 page_cache_release(page);
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003214 rdata->pages[i] = NULL;
3215 rdata->nr_pages--;
Jeff Layton8321fec2012-09-19 06:22:32 -07003216 continue;
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003217 } else {
3218 /* no need to hold page hostage */
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003219 lru_cache_add_file(page);
3220 unlock_page(page);
3221 page_cache_release(page);
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003222 rdata->pages[i] = NULL;
3223 rdata->nr_pages--;
Jeff Layton8321fec2012-09-19 06:22:32 -07003224 continue;
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003225 }
Jeff Layton8321fec2012-09-19 06:22:32 -07003226
3227 result = cifs_readv_from_socket(server, &iov, 1, iov.iov_len);
3228 kunmap(page);
3229 if (result < 0)
3230 break;
3231
3232 total_read += result;
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003233 }
3234
Pavel Shilovsky8f516091b2014-06-27 10:33:11 +04003235 return total_read > 0 && result != -EAGAIN ? total_read : result;
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003236}
3237
Linus Torvalds1da177e2005-04-16 15:20:36 -07003238static int cifs_readpages(struct file *file, struct address_space *mapping,
3239 struct list_head *page_list, unsigned num_pages)
3240{
Jeff Layton690c5e32011-10-19 15:30:16 -04003241 int rc;
3242 struct list_head tmplist;
3243 struct cifsFileInfo *open_file = file->private_data;
3244 struct cifs_sb_info *cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
3245 unsigned int rsize = cifs_sb->rsize;
3246 pid_t pid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003247
Jeff Layton690c5e32011-10-19 15:30:16 -04003248 /*
3249 * Give up immediately if rsize is too small to read an entire page.
3250 * The VFS will fall back to readpage. We should never reach this
3251 * point however since we set ra_pages to 0 when the rsize is smaller
3252 * than a cache page.
3253 */
3254 if (unlikely(rsize < PAGE_CACHE_SIZE))
3255 return 0;
Steve Frenchbfa0d752005-08-31 21:50:37 -07003256
Suresh Jayaraman56698232010-07-05 18:13:25 +05303257 /*
3258 * Reads as many pages as possible from fscache. Returns -ENOBUFS
3259 * immediately if the cookie is negative
3260 */
3261 rc = cifs_readpages_from_fscache(mapping->host, mapping, page_list,
3262 &num_pages);
3263 if (rc == 0)
Jeff Layton690c5e32011-10-19 15:30:16 -04003264 return rc;
Suresh Jayaraman56698232010-07-05 18:13:25 +05303265
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00003266 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
3267 pid = open_file->pid;
3268 else
3269 pid = current->tgid;
3270
Jeff Layton690c5e32011-10-19 15:30:16 -04003271 rc = 0;
3272 INIT_LIST_HEAD(&tmplist);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003273
Joe Perchesf96637b2013-05-04 22:12:25 -05003274 cifs_dbg(FYI, "%s: file=%p mapping=%p num_pages=%u\n",
3275 __func__, file, mapping, num_pages);
Jeff Layton690c5e32011-10-19 15:30:16 -04003276
3277 /*
3278 * Start with the page at end of list and move it to private
3279 * list. Do the same with any following pages until we hit
3280 * the rsize limit, hit an index discontinuity, or run out of
3281 * pages. Issue the async read and then start the loop again
3282 * until the list is empty.
3283 *
3284 * Note that list order is important. The page_list is in
3285 * the order of declining indexes. When we put the pages in
3286 * the rdata->pages, then we want them in increasing order.
3287 */
3288 while (!list_empty(page_list)) {
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003289 unsigned int i;
Jeff Layton690c5e32011-10-19 15:30:16 -04003290 unsigned int bytes = PAGE_CACHE_SIZE;
3291 unsigned int expected_index;
3292 unsigned int nr_pages = 1;
3293 loff_t offset;
3294 struct page *page, *tpage;
3295 struct cifs_readdata *rdata;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003296
3297 page = list_entry(page_list->prev, struct page, lru);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003298
Jeff Layton690c5e32011-10-19 15:30:16 -04003299 /*
3300 * Lock the page and put it in the cache. Since no one else
3301 * should have access to this page, we're safe to simply set
3302 * PG_locked without checking it first.
3303 */
3304 __set_page_locked(page);
3305 rc = add_to_page_cache_locked(page, mapping,
3306 page->index, GFP_KERNEL);
3307
3308 /* give up if we can't stick it in the cache */
3309 if (rc) {
3310 __clear_page_locked(page);
3311 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003312 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003313
Jeff Layton690c5e32011-10-19 15:30:16 -04003314 /* move first page to the tmplist */
3315 offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
3316 list_move_tail(&page->lru, &tmplist);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003317
Jeff Layton690c5e32011-10-19 15:30:16 -04003318 /* now try and add more pages onto the request */
3319 expected_index = page->index + 1;
3320 list_for_each_entry_safe_reverse(page, tpage, page_list, lru) {
3321 /* discontinuity ? */
3322 if (page->index != expected_index)
3323 break;
3324
3325 /* would this page push the read over the rsize? */
3326 if (bytes + PAGE_CACHE_SIZE > rsize)
3327 break;
3328
3329 __set_page_locked(page);
3330 if (add_to_page_cache_locked(page, mapping,
3331 page->index, GFP_KERNEL)) {
3332 __clear_page_locked(page);
3333 break;
3334 }
3335 list_move_tail(&page->lru, &tmplist);
3336 bytes += PAGE_CACHE_SIZE;
3337 expected_index++;
3338 nr_pages++;
3339 }
3340
Jeff Layton0471ca32012-05-16 07:13:16 -04003341 rdata = cifs_readdata_alloc(nr_pages, cifs_readv_complete);
Jeff Layton690c5e32011-10-19 15:30:16 -04003342 if (!rdata) {
3343 /* best to give up if we're out of mem */
3344 list_for_each_entry_safe(page, tpage, &tmplist, lru) {
3345 list_del(&page->lru);
3346 lru_cache_add_file(page);
3347 unlock_page(page);
3348 page_cache_release(page);
3349 }
3350 rc = -ENOMEM;
3351 break;
3352 }
3353
Jeff Layton6993f742012-05-16 07:13:17 -04003354 rdata->cfile = cifsFileInfo_get(open_file);
Jeff Layton690c5e32011-10-19 15:30:16 -04003355 rdata->mapping = mapping;
3356 rdata->offset = offset;
3357 rdata->bytes = bytes;
3358 rdata->pid = pid;
Jeff Layton8321fec2012-09-19 06:22:32 -07003359 rdata->pagesz = PAGE_CACHE_SIZE;
3360 rdata->read_into_pages = cifs_readpages_read_into_pages;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003361
3362 list_for_each_entry_safe(page, tpage, &tmplist, lru) {
3363 list_del(&page->lru);
3364 rdata->pages[rdata->nr_pages++] = page;
3365 }
Jeff Layton690c5e32011-10-19 15:30:16 -04003366
Jeff Layton2a1bb132012-05-16 07:13:17 -04003367 rc = cifs_retry_async_readv(rdata);
Jeff Layton690c5e32011-10-19 15:30:16 -04003368 if (rc != 0) {
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003369 for (i = 0; i < rdata->nr_pages; i++) {
3370 page = rdata->pages[i];
Jeff Layton690c5e32011-10-19 15:30:16 -04003371 lru_cache_add_file(page);
3372 unlock_page(page);
3373 page_cache_release(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003374 }
Jeff Layton6993f742012-05-16 07:13:17 -04003375 kref_put(&rdata->refcount, cifs_readdata_release);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003376 break;
3377 }
Jeff Layton6993f742012-05-16 07:13:17 -04003378
3379 kref_put(&rdata->refcount, cifs_readdata_release);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003380 }
3381
Linus Torvalds1da177e2005-04-16 15:20:36 -07003382 return rc;
3383}
3384
3385static int cifs_readpage_worker(struct file *file, struct page *page,
3386 loff_t *poffset)
3387{
3388 char *read_data;
3389 int rc;
3390
Suresh Jayaraman56698232010-07-05 18:13:25 +05303391 /* Is the page cached? */
Al Viro496ad9a2013-01-23 17:07:38 -05003392 rc = cifs_readpage_from_fscache(file_inode(file), page);
Suresh Jayaraman56698232010-07-05 18:13:25 +05303393 if (rc == 0)
3394 goto read_complete;
3395
Linus Torvalds1da177e2005-04-16 15:20:36 -07003396 page_cache_get(page);
3397 read_data = kmap(page);
3398 /* for reads over a certain size could initiate async read ahead */
Steve Frenchfb8c4b12007-07-10 01:16:18 +00003399
Linus Torvalds1da177e2005-04-16 15:20:36 -07003400 rc = cifs_read(file, read_data, PAGE_CACHE_SIZE, poffset);
Steve Frenchfb8c4b12007-07-10 01:16:18 +00003401
Linus Torvalds1da177e2005-04-16 15:20:36 -07003402 if (rc < 0)
3403 goto io_error;
3404 else
Joe Perchesf96637b2013-05-04 22:12:25 -05003405 cifs_dbg(FYI, "Bytes read %d\n", rc);
Steve Frenchfb8c4b12007-07-10 01:16:18 +00003406
Al Viro496ad9a2013-01-23 17:07:38 -05003407 file_inode(file)->i_atime =
3408 current_fs_time(file_inode(file)->i_sb);
Steve Frenchfb8c4b12007-07-10 01:16:18 +00003409
Linus Torvalds1da177e2005-04-16 15:20:36 -07003410 if (PAGE_CACHE_SIZE > rc)
3411 memset(read_data + rc, 0, PAGE_CACHE_SIZE - rc);
3412
3413 flush_dcache_page(page);
3414 SetPageUptodate(page);
Suresh Jayaraman9dc06552010-07-05 18:13:11 +05303415
3416 /* send this page to the cache */
Al Viro496ad9a2013-01-23 17:07:38 -05003417 cifs_readpage_to_fscache(file_inode(file), page);
Suresh Jayaraman9dc06552010-07-05 18:13:11 +05303418
Linus Torvalds1da177e2005-04-16 15:20:36 -07003419 rc = 0;
Steve Frenchfb8c4b12007-07-10 01:16:18 +00003420
Linus Torvalds1da177e2005-04-16 15:20:36 -07003421io_error:
Steve Frenchfb8c4b12007-07-10 01:16:18 +00003422 kunmap(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003423 page_cache_release(page);
Suresh Jayaraman56698232010-07-05 18:13:25 +05303424
3425read_complete:
Linus Torvalds1da177e2005-04-16 15:20:36 -07003426 return rc;
3427}
3428
3429static int cifs_readpage(struct file *file, struct page *page)
3430{
3431 loff_t offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
3432 int rc = -EACCES;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003433 unsigned int xid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003434
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003435 xid = get_xid();
Linus Torvalds1da177e2005-04-16 15:20:36 -07003436
3437 if (file->private_data == NULL) {
Suresh Jayaraman0f3bc092009-06-25 18:12:34 +05303438 rc = -EBADF;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003439 free_xid(xid);
Suresh Jayaraman0f3bc092009-06-25 18:12:34 +05303440 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003441 }
3442
Joe Perchesf96637b2013-05-04 22:12:25 -05003443 cifs_dbg(FYI, "readpage %p at offset %d 0x%x\n",
Joe Perchesb6b38f72010-04-21 03:50:45 +00003444 page, (int)offset, (int)offset);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003445
3446 rc = cifs_readpage_worker(file, page, &offset);
3447
3448 unlock_page(page);
3449
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003450 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003451 return rc;
3452}
3453
Steve Frencha403a0a2007-07-26 15:54:16 +00003454static int is_inode_writable(struct cifsInodeInfo *cifs_inode)
3455{
3456 struct cifsFileInfo *open_file;
3457
Jeff Layton44772882010-10-15 15:34:03 -04003458 spin_lock(&cifs_file_list_lock);
Steve Frencha403a0a2007-07-26 15:54:16 +00003459 list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
Jeff Layton2e396b82010-10-15 15:34:01 -04003460 if (OPEN_FMODE(open_file->f_flags) & FMODE_WRITE) {
Jeff Layton44772882010-10-15 15:34:03 -04003461 spin_unlock(&cifs_file_list_lock);
Steve Frencha403a0a2007-07-26 15:54:16 +00003462 return 1;
3463 }
3464 }
Jeff Layton44772882010-10-15 15:34:03 -04003465 spin_unlock(&cifs_file_list_lock);
Steve Frencha403a0a2007-07-26 15:54:16 +00003466 return 0;
3467}
3468
Linus Torvalds1da177e2005-04-16 15:20:36 -07003469/* We do not want to update the file size from server for inodes
3470 open for write - to avoid races with writepage extending
3471 the file - in the future we could consider allowing
Steve Frenchfb8c4b12007-07-10 01:16:18 +00003472 refreshing the inode only on increases in the file size
Linus Torvalds1da177e2005-04-16 15:20:36 -07003473 but this is tricky to do without racing with writebehind
3474 page caching in the current Linux kernel design */
Steve French4b18f2a2008-04-29 00:06:05 +00003475bool is_size_safe_to_change(struct cifsInodeInfo *cifsInode, __u64 end_of_file)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003476{
Steve Frencha403a0a2007-07-26 15:54:16 +00003477 if (!cifsInode)
Steve French4b18f2a2008-04-29 00:06:05 +00003478 return true;
Steve French23e7dd72005-10-20 13:44:56 -07003479
Steve Frencha403a0a2007-07-26 15:54:16 +00003480 if (is_inode_writable(cifsInode)) {
3481 /* This inode is open for write at least once */
Steve Frenchc32a0b62006-01-12 14:41:28 -08003482 struct cifs_sb_info *cifs_sb;
3483
Steve Frenchc32a0b62006-01-12 14:41:28 -08003484 cifs_sb = CIFS_SB(cifsInode->vfs_inode.i_sb);
Steve Frenchad7a2922008-02-07 23:25:02 +00003485 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DIRECT_IO) {
Steve Frenchfb8c4b12007-07-10 01:16:18 +00003486 /* since no page cache to corrupt on directio
Steve Frenchc32a0b62006-01-12 14:41:28 -08003487 we can change size safely */
Steve French4b18f2a2008-04-29 00:06:05 +00003488 return true;
Steve Frenchc32a0b62006-01-12 14:41:28 -08003489 }
3490
Steve Frenchfb8c4b12007-07-10 01:16:18 +00003491 if (i_size_read(&cifsInode->vfs_inode) < end_of_file)
Steve French4b18f2a2008-04-29 00:06:05 +00003492 return true;
Steve French7ba52632007-02-08 18:14:13 +00003493
Steve French4b18f2a2008-04-29 00:06:05 +00003494 return false;
Steve French23e7dd72005-10-20 13:44:56 -07003495 } else
Steve French4b18f2a2008-04-29 00:06:05 +00003496 return true;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003497}
3498
Nick Piggind9414772008-09-24 11:32:59 -04003499static int cifs_write_begin(struct file *file, struct address_space *mapping,
3500 loff_t pos, unsigned len, unsigned flags,
3501 struct page **pagep, void **fsdata)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003502{
Nick Piggind9414772008-09-24 11:32:59 -04003503 pgoff_t index = pos >> PAGE_CACHE_SHIFT;
3504 loff_t offset = pos & (PAGE_CACHE_SIZE - 1);
Jeff Laytona98ee8c2008-11-26 19:32:33 +00003505 loff_t page_start = pos & PAGE_MASK;
3506 loff_t i_size;
3507 struct page *page;
3508 int rc = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003509
Joe Perchesf96637b2013-05-04 22:12:25 -05003510 cifs_dbg(FYI, "write_begin from %lld len %d\n", (long long)pos, len);
Nick Piggind9414772008-09-24 11:32:59 -04003511
Nick Piggin54566b22009-01-04 12:00:53 -08003512 page = grab_cache_page_write_begin(mapping, index, flags);
Jeff Laytona98ee8c2008-11-26 19:32:33 +00003513 if (!page) {
3514 rc = -ENOMEM;
3515 goto out;
3516 }
Nick Piggind9414772008-09-24 11:32:59 -04003517
Jeff Laytona98ee8c2008-11-26 19:32:33 +00003518 if (PageUptodate(page))
3519 goto out;
Steve French8a236262007-03-06 00:31:00 +00003520
Jeff Laytona98ee8c2008-11-26 19:32:33 +00003521 /*
3522 * If we write a full page it will be up to date, no need to read from
3523 * the server. If the write is short, we'll end up doing a sync write
3524 * instead.
3525 */
3526 if (len == PAGE_CACHE_SIZE)
3527 goto out;
3528
3529 /*
3530 * optimize away the read when we have an oplock, and we're not
3531 * expecting to use any of the data we'd be reading in. That
3532 * is, when the page lies beyond the EOF, or straddles the EOF
3533 * and the write will cover all of the existing data.
3534 */
3535 if (CIFS_I(mapping->host)->clientCanCacheRead) {
3536 i_size = i_size_read(mapping->host);
3537 if (page_start >= i_size ||
3538 (offset == 0 && (pos + len) >= i_size)) {
3539 zero_user_segments(page, 0, offset,
3540 offset + len,
3541 PAGE_CACHE_SIZE);
3542 /*
3543 * PageChecked means that the parts of the page
3544 * to which we're not writing are considered up
3545 * to date. Once the data is copied to the
3546 * page, it can be set uptodate.
3547 */
3548 SetPageChecked(page);
3549 goto out;
3550 }
3551 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003552
Nick Piggind9414772008-09-24 11:32:59 -04003553 if ((file->f_flags & O_ACCMODE) != O_WRONLY) {
Jeff Laytona98ee8c2008-11-26 19:32:33 +00003554 /*
3555 * might as well read a page, it is fast enough. If we get
3556 * an error, we don't need to return it. cifs_write_end will
3557 * do a sync write instead since PG_uptodate isn't set.
3558 */
3559 cifs_readpage_worker(file, page, &page_start);
Steve French8a236262007-03-06 00:31:00 +00003560 } else {
3561 /* we could try using another file handle if there is one -
3562 but how would we lock it to prevent close of that handle
3563 racing with this read? In any case
Nick Piggind9414772008-09-24 11:32:59 -04003564 this will be written out by write_end so is fine */
Steve French8a236262007-03-06 00:31:00 +00003565 }
Jeff Laytona98ee8c2008-11-26 19:32:33 +00003566out:
3567 *pagep = page;
3568 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003569}
3570
Suresh Jayaraman85f2d6b2010-07-05 18:13:00 +05303571static int cifs_release_page(struct page *page, gfp_t gfp)
3572{
3573 if (PagePrivate(page))
3574 return 0;
3575
3576 return cifs_fscache_release_page(page, gfp);
3577}
3578
3579static void cifs_invalidate_page(struct page *page, unsigned long offset)
3580{
3581 struct cifsInodeInfo *cifsi = CIFS_I(page->mapping->host);
3582
3583 if (offset == 0)
3584 cifs_fscache_invalidate_page(page, &cifsi->vfs_inode);
3585}
3586
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04003587static int cifs_launder_page(struct page *page)
3588{
3589 int rc = 0;
3590 loff_t range_start = page_offset(page);
3591 loff_t range_end = range_start + (loff_t)(PAGE_CACHE_SIZE - 1);
3592 struct writeback_control wbc = {
3593 .sync_mode = WB_SYNC_ALL,
3594 .nr_to_write = 0,
3595 .range_start = range_start,
3596 .range_end = range_end,
3597 };
3598
Joe Perchesf96637b2013-05-04 22:12:25 -05003599 cifs_dbg(FYI, "Launder page: %p\n", page);
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04003600
3601 if (clear_page_dirty_for_io(page))
3602 rc = cifs_writepage_locked(page, &wbc);
3603
3604 cifs_fscache_invalidate_page(page, page->mapping->host);
3605 return rc;
3606}
3607
Tejun Heo9b646972010-07-20 22:09:02 +02003608void cifs_oplock_break(struct work_struct *work)
Jeff Layton3bc303c2009-09-21 06:47:50 -04003609{
3610 struct cifsFileInfo *cfile = container_of(work, struct cifsFileInfo,
3611 oplock_break);
Jeff Laytona5e18bc2010-10-11 15:07:18 -04003612 struct inode *inode = cfile->dentry->d_inode;
Jeff Layton3bc303c2009-09-21 06:47:50 -04003613 struct cifsInodeInfo *cinode = CIFS_I(inode);
Pavel Shilovsky95a3f2f2012-09-18 16:20:33 -07003614 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
Jeff Laytoneb4b7562010-10-22 14:52:29 -04003615 int rc = 0;
Jeff Layton3bc303c2009-09-21 06:47:50 -04003616
Pavel Shilovsky63b7d3a2012-12-24 14:41:19 +04003617 if (!cinode->clientCanCacheAll && cinode->clientCanCacheRead &&
3618 cifs_has_mand_locks(cinode)) {
Joe Perchesf96637b2013-05-04 22:12:25 -05003619 cifs_dbg(FYI, "Reset oplock to None for inode=%p due to mand locks\n",
3620 inode);
Pavel Shilovsky63b7d3a2012-12-24 14:41:19 +04003621 cinode->clientCanCacheRead = false;
3622 }
3623
Jeff Layton3bc303c2009-09-21 06:47:50 -04003624 if (inode && S_ISREG(inode->i_mode)) {
Steve Frenchd54ff732010-04-27 04:38:15 +00003625 if (cinode->clientCanCacheRead)
Al Viro8737c932009-12-24 06:47:55 -05003626 break_lease(inode, O_RDONLY);
Steve Frenchd54ff732010-04-27 04:38:15 +00003627 else
Al Viro8737c932009-12-24 06:47:55 -05003628 break_lease(inode, O_WRONLY);
Jeff Layton3bc303c2009-09-21 06:47:50 -04003629 rc = filemap_fdatawrite(inode->i_mapping);
3630 if (cinode->clientCanCacheRead == 0) {
Jeff Laytoneb4b7562010-10-22 14:52:29 -04003631 rc = filemap_fdatawait(inode->i_mapping);
3632 mapping_set_error(inode->i_mapping, rc);
Pavel Shilovsky03eca702012-12-06 21:24:33 +04003633 cifs_invalidate_mapping(inode);
Jeff Layton3bc303c2009-09-21 06:47:50 -04003634 }
Joe Perchesf96637b2013-05-04 22:12:25 -05003635 cifs_dbg(FYI, "Oplock flush inode %p rc %d\n", inode, rc);
Jeff Layton3bc303c2009-09-21 06:47:50 -04003636 }
3637
Pavel Shilovsky85160e02011-10-22 15:33:29 +04003638 rc = cifs_push_locks(cfile);
3639 if (rc)
Joe Perchesf96637b2013-05-04 22:12:25 -05003640 cifs_dbg(VFS, "Push locks rc = %d\n", rc);
Pavel Shilovsky85160e02011-10-22 15:33:29 +04003641
Jeff Layton3bc303c2009-09-21 06:47:50 -04003642 /*
3643 * releasing stale oplock after recent reconnect of smb session using
3644 * a now incorrect file handle is not a data integrity issue but do
3645 * not bother sending an oplock release if session to server still is
3646 * disconnected since oplock already released by the server
3647 */
Steve Frenchcdff08e2010-10-21 22:46:14 +00003648 if (!cfile->oplock_break_cancelled) {
Pavel Shilovsky95a3f2f2012-09-18 16:20:33 -07003649 rc = tcon->ses->server->ops->oplock_response(tcon, &cfile->fid,
3650 cinode);
Joe Perchesf96637b2013-05-04 22:12:25 -05003651 cifs_dbg(FYI, "Oplock release rc = %d\n", rc);
Jeff Layton3bc303c2009-09-21 06:47:50 -04003652 }
Jeff Layton3bc303c2009-09-21 06:47:50 -04003653}
3654
Christoph Hellwigf5e54d62006-06-28 04:26:44 -07003655const struct address_space_operations cifs_addr_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003656 .readpage = cifs_readpage,
3657 .readpages = cifs_readpages,
3658 .writepage = cifs_writepage,
Steve French37c0eb42005-10-05 14:50:29 -07003659 .writepages = cifs_writepages,
Nick Piggind9414772008-09-24 11:32:59 -04003660 .write_begin = cifs_write_begin,
3661 .write_end = cifs_write_end,
Linus Torvalds1da177e2005-04-16 15:20:36 -07003662 .set_page_dirty = __set_page_dirty_nobuffers,
Suresh Jayaraman85f2d6b2010-07-05 18:13:00 +05303663 .releasepage = cifs_release_page,
3664 .invalidatepage = cifs_invalidate_page,
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04003665 .launder_page = cifs_launder_page,
Linus Torvalds1da177e2005-04-16 15:20:36 -07003666};
Dave Kleikamp273d81d2006-06-01 19:41:23 +00003667
3668/*
3669 * cifs_readpages requires the server to support a buffer large enough to
3670 * contain the header plus one complete page of data. Otherwise, we need
3671 * to leave cifs_readpages out of the address space operations.
3672 */
Christoph Hellwigf5e54d62006-06-28 04:26:44 -07003673const struct address_space_operations cifs_addr_ops_smallbuf = {
Dave Kleikamp273d81d2006-06-01 19:41:23 +00003674 .readpage = cifs_readpage,
3675 .writepage = cifs_writepage,
3676 .writepages = cifs_writepages,
Nick Piggind9414772008-09-24 11:32:59 -04003677 .write_begin = cifs_write_begin,
3678 .write_end = cifs_write_end,
Dave Kleikamp273d81d2006-06-01 19:41:23 +00003679 .set_page_dirty = __set_page_dirty_nobuffers,
Suresh Jayaraman85f2d6b2010-07-05 18:13:00 +05303680 .releasepage = cifs_release_page,
3681 .invalidatepage = cifs_invalidate_page,
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04003682 .launder_page = cifs_launder_page,
Dave Kleikamp273d81d2006-06-01 19:41:23 +00003683};