aboutsummaryrefslogtreecommitdiff
path: root/fs
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2014-08-09 13:03:34 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2014-08-09 13:03:34 -0700
commit023f78b02c729070116fa3a7ebd4107a032d3f5c (patch)
tree5f2839a9577b852bff73df933d5604d0996f7c36 /fs
parent63b12bdb0d21aca527996fb2c547387bfd3e14b8 (diff)
parentf29ebb47d5bb59ef246966b047356c03629a9705 (diff)
Merge branch 'for-next' of git://git.samba.org/sfrench/cifs-2.6
Pull CIFS updates from Steve French: "The most visible change in this set is the additional of multi-credit support for SMB2/SMB3 which dramatically improves the large file i/o performance for these dialects and significantly increases the maximum i/o size used on the wire for SMB2/SMB3. Also reconnection behavior after network failure is improved" * 'for-next' of git://git.samba.org/sfrench/cifs-2.6: (35 commits) Add worker function to set allocation size [CIFS] Fix incorrect hex vs. decimal in some debug print statements update CIFS TODO list Add Pavel to contributor list in cifs AUTHORS file Update cifs version CIFS: Fix STATUS_CANNOT_DELETE error mapping for SMB2 CIFS: Optimize readpages in a short read case on reconnects CIFS: Optimize cifs_user_read() in a short read case on reconnects CIFS: Improve indentation in cifs_user_read() CIFS: Fix possible buffer corruption in cifs_user_read() CIFS: Count got bytes in read_into_pages() CIFS: Use separate var for the number of bytes got in async read CIFS: Indicate reconnect with ECONNABORTED error code CIFS: Use multicredits for SMB 2.1/3 reads CIFS: Fix rsize usage for sync read CIFS: Fix rsize usage in user read CIFS: Separate page reading from user read CIFS: Fix rsize usage in readpages CIFS: Separate page search from readpages CIFS: Use multicredits for SMB 2.1/3 writes ...
Diffstat (limited to 'fs')
-rw-r--r--fs/cifs/cifs_debug.c2
-rw-r--r--fs/cifs/cifsfs.h2
-rw-r--r--fs/cifs/cifsglob.h19
-rw-r--r--fs/cifs/cifsproto.h4
-rw-r--r--fs/cifs/cifssmb.c119
-rw-r--r--fs/cifs/connect.c8
-rw-r--r--fs/cifs/file.c872
-rw-r--r--fs/cifs/misc.c13
-rw-r--r--fs/cifs/sess.c1192
-rw-r--r--fs/cifs/smb1ops.c8
-rw-r--r--fs/cifs/smb2inode.c2
-rw-r--r--fs/cifs/smb2maperror.c2
-rw-r--r--fs/cifs/smb2misc.c6
-rw-r--r--fs/cifs/smb2ops.c73
-rw-r--r--fs/cifs/smb2pdu.c94
-rw-r--r--fs/cifs/smb2proto.h2
-rw-r--r--fs/cifs/smb2transport.c5
-rw-r--r--fs/cifs/transport.c25
18 files changed, 1665 insertions, 783 deletions
diff --git a/fs/cifs/cifs_debug.c b/fs/cifs/cifs_debug.c
index f3ac4154cbb6..44ec72684df5 100644
--- a/fs/cifs/cifs_debug.c
+++ b/fs/cifs/cifs_debug.c
@@ -213,7 +213,7 @@ static int cifs_debug_data_proc_show(struct seq_file *m, void *v)
tcon->nativeFileSystem);
}
seq_printf(m, "DevInfo: 0x%x Attributes: 0x%x"
- "\n\tPathComponentMax: %d Status: 0x%d",
+ "\n\tPathComponentMax: %d Status: %d",
le32_to_cpu(tcon->fsDevInfo.DeviceCharacteristics),
le32_to_cpu(tcon->fsAttrInfo.Attributes),
le32_to_cpu(tcon->fsAttrInfo.MaxPathNameComponentLength),
diff --git a/fs/cifs/cifsfs.h b/fs/cifs/cifsfs.h
index 70f178a7c759..560480263336 100644
--- a/fs/cifs/cifsfs.h
+++ b/fs/cifs/cifsfs.h
@@ -136,5 +136,5 @@ extern long cifs_ioctl(struct file *filep, unsigned int cmd, unsigned long arg);
extern const struct export_operations cifs_export_ops;
#endif /* CONFIG_CIFS_NFSD_EXPORT */
-#define CIFS_VERSION "2.03"
+#define CIFS_VERSION "2.04"
#endif /* _CIFSFS_H */
diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
index de6aed8c78e5..0012e1e291d4 100644
--- a/fs/cifs/cifsglob.h
+++ b/fs/cifs/cifsglob.h
@@ -404,6 +404,11 @@ struct smb_version_operations {
const struct cifs_fid *, u32 *);
int (*set_acl)(struct cifs_ntsd *, __u32, struct inode *, const char *,
int);
+ /* writepages retry size */
+ unsigned int (*wp_retry_size)(struct inode *);
+ /* get mtu credits */
+ int (*wait_mtu_credits)(struct TCP_Server_Info *, unsigned int,
+ unsigned int *, unsigned int *);
};
struct smb_version_values {
@@ -640,6 +645,16 @@ add_credits(struct TCP_Server_Info *server, const unsigned int add,
}
static inline void
+add_credits_and_wake_if(struct TCP_Server_Info *server, const unsigned int add,
+ const int optype)
+{
+ if (add) {
+ server->ops->add_credits(server, add, optype);
+ wake_up(&server->request_q);
+ }
+}
+
+static inline void
set_credits(struct TCP_Server_Info *server, const int val)
{
server->ops->set_credits(server, val);
@@ -1044,6 +1059,7 @@ struct cifs_readdata {
struct address_space *mapping;
__u64 offset;
unsigned int bytes;
+ unsigned int got_bytes;
pid_t pid;
int result;
struct work_struct work;
@@ -1053,6 +1069,7 @@ struct cifs_readdata {
struct kvec iov;
unsigned int pagesz;
unsigned int tailsz;
+ unsigned int credits;
unsigned int nr_pages;
struct page *pages[];
};
@@ -1073,6 +1090,7 @@ struct cifs_writedata {
int result;
unsigned int pagesz;
unsigned int tailsz;
+ unsigned int credits;
unsigned int nr_pages;
struct page *pages[];
};
@@ -1398,6 +1416,7 @@ static inline void free_dfs_info_array(struct dfs_info3_param *param,
#define CIFS_OBREAK_OP 0x0100 /* oplock break request */
#define CIFS_NEG_OP 0x0200 /* negotiate request */
#define CIFS_OP_MASK 0x0380 /* mask request type */
+#define CIFS_HAS_CREDITS 0x0400 /* already has credits */
/* Security Flags: indicate type of session setup needed */
#define CIFSSEC_MAY_SIGN 0x00001
diff --git a/fs/cifs/cifsproto.h b/fs/cifs/cifsproto.h
index ca7980a1e303..c31ce98c1704 100644
--- a/fs/cifs/cifsproto.h
+++ b/fs/cifs/cifsproto.h
@@ -36,6 +36,7 @@ extern struct smb_hdr *cifs_buf_get(void);
extern void cifs_buf_release(void *);
extern struct smb_hdr *cifs_small_buf_get(void);
extern void cifs_small_buf_release(void *);
+extern void free_rsp_buf(int, void *);
extern void cifs_rqst_page_to_kvec(struct smb_rqst *rqst, unsigned int idx,
struct kvec *iov);
extern int smb_send(struct TCP_Server_Info *, struct smb_hdr *,
@@ -89,6 +90,9 @@ extern struct mid_q_entry *cifs_setup_async_request(struct TCP_Server_Info *,
struct smb_rqst *);
extern int cifs_check_receive(struct mid_q_entry *mid,
struct TCP_Server_Info *server, bool log_error);
+extern int cifs_wait_mtu_credits(struct TCP_Server_Info *server,
+ unsigned int size, unsigned int *num,
+ unsigned int *credits);
extern int SendReceive2(const unsigned int /* xid */ , struct cifs_ses *,
struct kvec *, int /* nvec to send */,
int * /* type of buf returned */ , const int flags);
diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c
index 6ce4e0954b98..66f65001a6d8 100644
--- a/fs/cifs/cifssmb.c
+++ b/fs/cifs/cifssmb.c
@@ -196,10 +196,6 @@ cifs_reconnect_tcon(struct cifs_tcon *tcon, int smb_command)
if (rc)
goto out;
- /*
- * FIXME: check if wsize needs updated due to negotiated smb buffer
- * size shrinking
- */
atomic_inc(&tconInfoReconnectCount);
/* tell server Unix caps we support */
@@ -1517,7 +1513,6 @@ cifs_readv_receive(struct TCP_Server_Info *server, struct mid_q_entry *mid)
return length;
server->total_read += length;
- rdata->bytes = length;
cifs_dbg(FYI, "total_read=%u buflen=%u remaining=%u\n",
server->total_read, buflen, data_len);
@@ -1560,12 +1555,18 @@ cifs_readv_callback(struct mid_q_entry *mid)
rc);
}
/* FIXME: should this be counted toward the initiating task? */
- task_io_account_read(rdata->bytes);
- cifs_stats_bytes_read(tcon, rdata->bytes);
+ task_io_account_read(rdata->got_bytes);
+ cifs_stats_bytes_read(tcon, rdata->got_bytes);
break;
case MID_REQUEST_SUBMITTED:
case MID_RETRY_NEEDED:
rdata->result = -EAGAIN;
+ if (server->sign && rdata->got_bytes)
+ /* reset bytes number since we can not check a sign */
+ rdata->got_bytes = 0;
+ /* FIXME: should this be counted toward the initiating task? */
+ task_io_account_read(rdata->got_bytes);
+ cifs_stats_bytes_read(tcon, rdata->got_bytes);
break;
default:
rdata->result = -EIO;
@@ -1734,10 +1735,7 @@ CIFSSMBRead(const unsigned int xid, struct cifs_io_parms *io_parms,
/* cifs_small_buf_release(pSMB); */ /* Freed earlier now in SendReceive2 */
if (*buf) {
- if (resp_buf_type == CIFS_SMALL_BUFFER)
- cifs_small_buf_release(iov[0].iov_base);
- else if (resp_buf_type == CIFS_LARGE_BUFFER)
- cifs_buf_release(iov[0].iov_base);
+ free_rsp_buf(resp_buf_type, iov[0].iov_base);
} else if (resp_buf_type != CIFS_NO_BUFFER) {
/* return buffer to caller to free */
*buf = iov[0].iov_base;
@@ -1899,28 +1897,80 @@ cifs_writedata_release(struct kref *refcount)
static void
cifs_writev_requeue(struct cifs_writedata *wdata)
{
- int i, rc;
+ int i, rc = 0;
struct inode *inode = wdata->cfile->dentry->d_inode;
struct TCP_Server_Info *server;
+ unsigned int rest_len;
- for (i = 0; i < wdata->nr_pages; i++) {
- lock_page(wdata->pages[i]);
- clear_page_dirty_for_io(wdata->pages[i]);
- }
-
+ server = tlink_tcon(wdata->cfile->tlink)->ses->server;
+ i = 0;
+ rest_len = wdata->bytes;
do {
- server = tlink_tcon(wdata->cfile->tlink)->ses->server;
- rc = server->ops->async_writev(wdata, cifs_writedata_release);
- } while (rc == -EAGAIN);
+ struct cifs_writedata *wdata2;
+ unsigned int j, nr_pages, wsize, tailsz, cur_len;
+
+ wsize = server->ops->wp_retry_size(inode);
+ if (wsize < rest_len) {
+ nr_pages = wsize / PAGE_CACHE_SIZE;
+ if (!nr_pages) {
+ rc = -ENOTSUPP;
+ break;
+ }
+ cur_len = nr_pages * PAGE_CACHE_SIZE;
+ tailsz = PAGE_CACHE_SIZE;
+ } else {
+ nr_pages = DIV_ROUND_UP(rest_len, PAGE_CACHE_SIZE);
+ cur_len = rest_len;
+ tailsz = rest_len - (nr_pages - 1) * PAGE_CACHE_SIZE;
+ }
- for (i = 0; i < wdata->nr_pages; i++) {
- unlock_page(wdata->pages[i]);
- if (rc != 0) {
- SetPageError(wdata->pages[i]);
- end_page_writeback(wdata->pages[i]);
- page_cache_release(wdata->pages[i]);
+ wdata2 = cifs_writedata_alloc(nr_pages, cifs_writev_complete);
+ if (!wdata2) {
+ rc = -ENOMEM;
+ break;
}
- }
+
+ for (j = 0; j < nr_pages; j++) {
+ wdata2->pages[j] = wdata->pages[i + j];
+ lock_page(wdata2->pages[j]);
+ clear_page_dirty_for_io(wdata2->pages[j]);
+ }
+
+ wdata2->sync_mode = wdata->sync_mode;
+ wdata2->nr_pages = nr_pages;
+ wdata2->offset = page_offset(wdata2->pages[0]);
+ wdata2->pagesz = PAGE_CACHE_SIZE;
+ wdata2->tailsz = tailsz;
+ wdata2->bytes = cur_len;
+
+ wdata2->cfile = find_writable_file(CIFS_I(inode), false);
+ if (!wdata2->cfile) {
+ cifs_dbg(VFS, "No writable handles for inode\n");
+ rc = -EBADF;
+ break;
+ }
+ wdata2->pid = wdata2->cfile->pid;
+ rc = server->ops->async_writev(wdata2, cifs_writedata_release);
+
+ for (j = 0; j < nr_pages; j++) {
+ unlock_page(wdata2->pages[j]);
+ if (rc != 0 && rc != -EAGAIN) {
+ SetPageError(wdata2->pages[j]);
+ end_page_writeback(wdata2->pages[j]);
+ page_cache_release(wdata2->pages[j]);
+ }
+ }
+
+ if (rc) {
+ kref_put(&wdata2->refcount, cifs_writedata_release);
+ if (rc == -EAGAIN)
+ continue;
+ break;
+ }
+
+ rest_len -= cur_len;
+ i += nr_pages;
+ } while (i < wdata->nr_pages);
mapping_set_error(inode->i_mapping, rc);
kref_put(&wdata->refcount, cifs_writedata_release);
@@ -2203,10 +2253,7 @@ CIFSSMBWrite2(const unsigned int xid, struct cifs_io_parms *io_parms,
}
/* cifs_small_buf_release(pSMB); */ /* Freed earlier now in SendReceive2 */
- if (resp_buf_type == CIFS_SMALL_BUFFER)
- cifs_small_buf_release(iov[0].iov_base);
- else if (resp_buf_type == CIFS_LARGE_BUFFER)
- cifs_buf_release(iov[0].iov_base);
+ free_rsp_buf(resp_buf_type, iov[0].iov_base);
/* Note: On -EAGAIN error only caller can retry on handle based calls
since file handle passed in no longer valid */
@@ -2451,10 +2498,7 @@ plk_err_exit:
if (pSMB)
cifs_small_buf_release(pSMB);
- if (resp_buf_type == CIFS_SMALL_BUFFER)
- cifs_small_buf_release(iov[0].iov_base);
- else if (resp_buf_type == CIFS_LARGE_BUFFER)
- cifs_buf_release(iov[0].iov_base);
+ free_rsp_buf(resp_buf_type, iov[0].iov_base);
/* Note: On -EAGAIN error only caller can retry on handle based calls
since file handle passed in no longer valid */
@@ -3838,10 +3882,7 @@ CIFSSMBGetCIFSACL(const unsigned int xid, struct cifs_tcon *tcon, __u16 fid,
}
}
qsec_out:
- if (buf_type == CIFS_SMALL_BUFFER)
- cifs_small_buf_release(iov[0].iov_base);
- else if (buf_type == CIFS_LARGE_BUFFER)
- cifs_buf_release(iov[0].iov_base);
+ free_rsp_buf(buf_type, iov[0].iov_base);
/* cifs_small_buf_release(pSMB); */ /* Freed earlier now in SendReceive2 */
return rc;
}
diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
index b98366f21f9e..03ed8a09581c 100644
--- a/fs/cifs/connect.c
+++ b/fs/cifs/connect.c
@@ -557,7 +557,7 @@ cifs_readv_from_socket(struct TCP_Server_Info *server, struct kvec *iov_orig,
try_to_freeze();
if (server_unresponsive(server)) {
- total_read = -EAGAIN;
+ total_read = -ECONNABORTED;
break;
}
@@ -571,7 +571,7 @@ cifs_readv_from_socket(struct TCP_Server_Info *server, struct kvec *iov_orig,
break;
} else if (server->tcpStatus == CifsNeedReconnect) {
cifs_reconnect(server);
- total_read = -EAGAIN;
+ total_read = -ECONNABORTED;
break;
} else if (length == -ERESTARTSYS ||
length == -EAGAIN ||
@@ -588,7 +588,7 @@ cifs_readv_from_socket(struct TCP_Server_Info *server, struct kvec *iov_orig,
cifs_dbg(FYI, "Received no data or error: expecting %d\n"
"got %d", to_read, length);
cifs_reconnect(server);
- total_read = -EAGAIN;
+ total_read = -ECONNABORTED;
break;
}
}
@@ -786,7 +786,7 @@ standard_receive3(struct TCP_Server_Info *server, struct mid_q_entry *mid)
cifs_dbg(VFS, "SMB response too long (%u bytes)\n", pdu_length);
cifs_reconnect(server);
wake_up(&server->response_q);
- return -EAGAIN;
+ return -ECONNABORTED;
}
/* switch to large buffer if too big for a small one */
diff --git a/fs/cifs/file.c b/fs/cifs/file.c
index b88b1ade4d3d..4ab2f79ffa7a 100644
--- a/fs/cifs/file.c
+++ b/fs/cifs/file.c
@@ -1670,8 +1670,8 @@ cifs_write(struct cifsFileInfo *open_file, __u32 pid, const char *write_data,
break;
}
- len = min((size_t)cifs_sb->wsize,
- write_size - total_written);
+ len = min(server->ops->wp_retry_size(dentry->d_inode),
+ (unsigned int)write_size - total_written);
/* iov[0] is reserved for smb header */
iov[1].iov_base = (char *)write_data + total_written;
iov[1].iov_len = len;
@@ -1878,15 +1878,163 @@ static int cifs_partialpagewrite(struct page *page, unsigned from, unsigned to)
return rc;
}
+static struct cifs_writedata *
+wdata_alloc_and_fillpages(pgoff_t tofind, struct address_space *mapping,
+ pgoff_t end, pgoff_t *index,
+ unsigned int *found_pages)
+{
+ unsigned int nr_pages;
+ struct page **pages;
+ struct cifs_writedata *wdata;
+
+ wdata = cifs_writedata_alloc((unsigned int)tofind,
+ cifs_writev_complete);
+ if (!wdata)
+ return NULL;
+
+ /*
+ * find_get_pages_tag seems to return a max of 256 on each
+ * iteration, so we must call it several times in order to
+ * fill the array or the wsize is effectively limited to
+ * 256 * PAGE_CACHE_SIZE.
+ */
+ *found_pages = 0;
+ pages = wdata->pages;
+ do {
+ nr_pages = find_get_pages_tag(mapping, index,
+ PAGECACHE_TAG_DIRTY, tofind,
+ pages);
+ *found_pages += nr_pages;
+ tofind -= nr_pages;
+ pages += nr_pages;
+ } while (nr_pages && tofind && *index <= end);
+
+ return wdata;
+}
+
+static unsigned int
+wdata_prepare_pages(struct cifs_writedata *wdata, unsigned int found_pages,
+ struct address_space *mapping,
+ struct writeback_control *wbc,
+ pgoff_t end, pgoff_t *index, pgoff_t *next, bool *done)
+{
+ unsigned int nr_pages = 0, i;
+ struct page *page;
+
+ for (i = 0; i < found_pages; i++) {
+ page = wdata->pages[i];
+ /*
+ * At this point we hold neither mapping->tree_lock nor
+ * lock on the page itself: the page may be truncated or
+ * invalidated (changing page->mapping to NULL), or even
+ * swizzled back from swapper_space to tmpfs file
+ * mapping
+ */
+
+ if (nr_pages == 0)
+ lock_page(page);
+ else if (!trylock_page(page))
+ break;
+
+ if (unlikely(page->mapping != mapping)) {
+ unlock_page(page);
+ break;
+ }
+
+ if (!wbc->range_cyclic && page->index > end) {
+ *done = true;
+ unlock_page(page);
+ break;
+ }
+
+ if (*next && (page->index != *next)) {
+ /* Not next consecutive page */
+ unlock_page(page);
+ break;
+ }
+
+ if (wbc->sync_mode != WB_SYNC_NONE)
+ wait_on_page_writeback(page);
+
+ if (PageWriteback(page) ||
+ !clear_page_dirty_for_io(page)) {
+ unlock_page(page);
+ break;
+ }
+
+ /*
+ * This actually clears the dirty bit in the radix tree.
+ * See cifs_writepage() for more commentary.
+ */
+ set_page_writeback(page);
+ if (page_offset(page) >= i_size_read(mapping->host)) {
+ *done = true;
+ unlock_page(page);
+ end_page_writeback(page);
+ break;
+ }
+
+ wdata->pages[i] = page;
+ *next = page->index + 1;
+ ++nr_pages;
+ }
+
+ /* reset index to refind any pages skipped */
+ if (nr_pages == 0)
+ *index = wdata->pages[0]->index + 1;
+
+ /* put any pages we aren't going to use */
+ for (i = nr_pages; i < found_pages; i++) {
+ page_cache_release(wdata->pages[i]);
+ wdata->pages[i] = NULL;
+ }
+
+ return nr_pages;
+}
+
+static int
+wdata_send_pages(struct cifs_writedata *wdata, unsigned int nr_pages,
+ struct address_space *mapping, struct writeback_control *wbc)
+{
+ int rc = 0;
+ struct TCP_Server_Info *server;
+ unsigned int i;
+
+ wdata->sync_mode = wbc->sync_mode;
+ wdata->nr_pages = nr_pages;
+ wdata->offset = page_offset(wdata->pages[0]);
+ wdata->pagesz = PAGE_CACHE_SIZE;
+ wdata->tailsz = min(i_size_read(mapping->host) -
+ page_offset(wdata->pages[nr_pages - 1]),
+ (loff_t)PAGE_CACHE_SIZE);
+ wdata->bytes = ((nr_pages - 1) * PAGE_CACHE_SIZE) + wdata->tailsz;
+
+ if (wdata->cfile != NULL)
+ cifsFileInfo_put(wdata->cfile);
+ wdata->cfile = find_writable_file(CIFS_I(mapping->host), false);
+ if (!wdata->cfile) {
+ cifs_dbg(VFS, "No writable handles for inode\n");
+ rc = -EBADF;
+ } else {
+ wdata->pid = wdata->cfile->pid;
+ server = tlink_tcon(wdata->cfile->tlink)->ses->server;
+ rc = server->ops->async_writev(wdata, cifs_writedata_release);
+ }
+
+ for (i = 0; i < nr_pages; ++i)
+ unlock_page(wdata->pages[i]);
+
+ return rc;
+}
+
static int cifs_writepages(struct address_space *mapping,
struct writeback_control *wbc)
{
struct cifs_sb_info *cifs_sb = CIFS_SB(mapping->host->i_sb);
+ struct TCP_Server_Info *server;
bool done = false, scanned = false, range_whole = false;
pgoff_t end, index;
struct cifs_writedata *wdata;
- struct TCP_Server_Info *server;
- struct page *page;
int rc = 0;
/*
@@ -1906,152 +2054,50 @@ static int cifs_writepages(struct address_space *mapping,
range_whole = true;
scanned = true;
}
+ server = cifs_sb_master_tcon(cifs_sb)->ses->server;
retry:
while (!done && index <= end) {
- unsigned int i, nr_pages, found_pages;
- pgoff_t next = 0, tofind;
- struct page **pages;
+ unsigned int i, nr_pages, found_pages, wsize, credits;
+ pgoff_t next = 0, tofind, saved_index = index;
+
+ rc = server->ops->wait_mtu_credits(server, cifs_sb->wsize,
+ &wsize, &credits);
+ if (rc)
+ break;
- tofind = min((cifs_sb->wsize / PAGE_CACHE_SIZE) - 1,
- end - index) + 1;
+ tofind = min((wsize / PAGE_CACHE_SIZE) - 1, end - index) + 1;
- wdata = cifs_writedata_alloc((unsigned int)tofind,
- cifs_writev_complete);
+ wdata = wdata_alloc_and_fillpages(tofind, mapping, end, &index,
+ &found_pages);
if (!wdata) {
rc = -ENOMEM;
+ add_credits_and_wake_if(server, credits, 0);
break;
}
- /*
- * find_get_pages_tag seems to return a max of 256 on each
- * iteration, so we must call it several times in order to
- * fill the array or the wsize is effectively limited to
- * 256 * PAGE_CACHE_SIZE.
- */
- found_pages = 0;
- pages = wdata->pages;
- do {
- nr_pages = find_get_pages_tag(mapping, &index,
- PAGECACHE_TAG_DIRTY,
- tofind, pages);
- found_pages += nr_pages;
- tofind -= nr_pages;
- pages += nr_pages;
- } while (nr_pages && tofind && index <= end);
-
if (found_pages == 0) {
kref_put(&wdata->refcount, cifs_writedata_release);
+ add_credits_and_wake_if(server, credits, 0);
break;
}
- nr_pages = 0;
- for (i = 0; i < found_pages; i++) {
- page = wdata->pages[i];
- /*
- * At this point we hold neither mapping->tree_lock nor
- * lock on the page itself: the page may be truncated or
- * invalidated (changing page->mapping to NULL), or even
- * swizzled back from swapper_space to tmpfs file
- * mapping
- */
-
- if (nr_pages == 0)
- lock_page(page);
- else if (!trylock_page(page))
- break;
-
- if (unlikely(page->mapping != mapping)) {
- unlock_page(page);
- break;
- }
-
- if (!wbc->range_cyclic && page->index > end) {
- done = true;
- unlock_page(page);
- break;
- }
-
- if (next && (page->index != next)) {
- /* Not next consecutive page */
- unlock_page(page);
- break;
- }
-
- if (wbc->sync_mode != WB_SYNC_NONE)
- wait_on_page_writeback(page);
-
- if (PageWriteback(page) ||
- !clear_page_dirty_for_io(page)) {
- unlock_page(page);
- break;
- }
-
- /*
- * This actually clears the dirty bit in the radix tree.
- * See cifs_writepage() for more commentary.
- */
- set_page_writeback(page);
-
- if (page_offset(page) >= i_size_read(mapping->host)) {
- done = true;
- unlock_page(page);
- end_page_writeback(page);
- break;
- }
-
- wdata->pages[i] = page;
- next = page->index + 1;
- ++nr_pages;
- }
-
- /* reset index to refind any pages skipped */
- if (nr_pages == 0)
- index = wdata->pages[0]->index + 1;
-
- /* put any pages we aren't going to use */
- for (i = nr_pages; i < found_pages; i++) {
- page_cache_release(wdata->pages[i]);
- wdata->pages[i] = NULL;
- }
+ nr_pages = wdata_prepare_pages(wdata, found_pages, mapping, wbc,
+ end, &index, &next, &done);
/* nothing to write? */
if (nr_pages == 0) {
kref_put(&wdata->refcount, cifs_writedata_release);
+ add_credits_and_wake_if(server, credits, 0);
continue;
}
- wdata->sync_mode = wbc->sync_mode;
- wdata->nr_pages = nr_pages;
- wdata->offset = page_offset(wdata->pages[0]);
- wdata->pagesz = PAGE_CACHE_SIZE;
- wdata->tailsz =
- min(i_size_read(mapping->host) -
- page_offset(wdata->pages[nr_pages - 1]),
- (loff_t)PAGE_CACHE_SIZE);
- wdata->bytes = ((nr_pages - 1) * PAGE_CACHE_SIZE) +
- wdata->tailsz;
-
- do {
- if (wdata->cfile != NULL)
- cifsFileInfo_put(wdata->cfile);
- wdata->cfile = find_writable_file(CIFS_I(mapping->host),
- false);
- if (!wdata->cfile) {
- cifs_dbg(VFS, "No writable handles for inode\n");
- rc = -EBADF;
- break;
- }
- wdata->pid = wdata->cfile->pid;
- server = tlink_tcon(wdata->cfile->tlink)->ses->server;
- rc = server->ops->async_writev(wdata,
- cifs_writedata_release);
- } while (wbc->sync_mode == WB_SYNC_ALL && rc == -EAGAIN);
+ wdata->credits = credits;
- for (i = 0; i < nr_pages; ++i)
- unlock_page(wdata->pages[i]);
+ rc = wdata_send_pages(wdata, nr_pages, mapping, wbc);
/* send failure -- clean up the mess */
if (rc != 0) {
+ add_credits_and_wake_if(server, wdata->credits, 0);
for (i = 0; i < nr_pages; ++i) {
if (rc == -EAGAIN)
redirty_page_for_writepage(wbc,
@@ -2066,6 +2112,11 @@ retry:
}
kref_put(&wdata->refcount, cifs_writedata_release);
+ if (wbc->sync_mode == WB_SYNC_ALL && rc == -EAGAIN) {
+ index = saved_index;
+ continue;
+ }
+
wbc->nr_to_write -= nr_pages;
if (wbc->nr_to_write <= 0)
done = true;
@@ -2362,123 +2413,109 @@ cifs_uncached_writev_complete(struct work_struct *work)
kref_put(&wdata->refcount, cifs_uncached_writedata_release);
}
-/* attempt to send write to server, retry on any -EAGAIN errors */
static int
-cifs_uncached_retry_writev(struct cifs_writedata *wdata)
+wdata_fill_from_iovec(struct cifs_writedata *wdata, struct iov_iter *from,
+ size_t *len, unsigned long *num_pages)
{
- int rc;
- struct TCP_Server_Info *server;
+ size_t save_len, copied, bytes, cur_len = *len;
+ unsigned long i, nr_pages = *num_pages;
- server = tlink_tcon(wdata->cfile->tlink)->ses->server;
+ save_len = cur_len;
+ for (i = 0; i < nr_pages; i++) {
+ bytes = min_t(const size_t, cur_len, PAGE_SIZE);
+ copied = copy_page_from_iter(wdata->pages[i], 0, bytes, from);
+ cur_len -= copied;
+ /*
+ * If we didn't copy as much as we expected, then that
+ * may mean we trod into an unmapped area. Stop copying
+ * at that point. On the next pass through the big
+ * loop, we'll likely end up getting a zero-length
+ * write and bailing out of it.
+ */
+ if (copied < bytes)
+ break;
+ }
+ cur_len = save_len - cur_len;
+ *len = cur_len;
- do {
- if (wdata->cfile->invalidHandle) {
- rc = cifs_reopen_file(wdata->cfile, false);
- if (rc != 0)
- continue;
- }
- rc = server->ops->async_writev(wdata,
- cifs_uncached_writedata_release);
- } while (rc == -EAGAIN);
+ /*
+ * If we have no data to send, then that probably means that
+ * the copy above failed altogether. That's most likely because
+ * the address in the iovec was bogus. Return -EFAULT and let
+ * the caller free anything we allocated and bail out.
+ */
+ if (!cur_len)
+ return -EFAULT;
- return rc;
+ /*
+ * i + 1 now represents the number of pages we actually used in
+ * the copy phase above.
+ */
+ *num_pages = i + 1;
+ return 0;
}
-static ssize_t
-cifs_iovec_write(struct file *file, struct iov_iter *from, loff_t *poffset)
+static int
+cifs_write_from_iter(loff_t offset, size_t len, struct iov_iter *from,
+ struct cifsFileInfo *open_file,
+ struct cifs_sb_info *cifs_sb, struct list_head *wdata_list)
{
- unsigned long nr_pages, i;
- size_t bytes, copied, len, cur_len;
- ssize_t total_written = 0;
- loff_t offset;
- struct cifsFileInfo *open_file;
- struct cifs_tcon *tcon;
- struct cifs_sb_info *cifs_sb;
- struct cifs_writedata *wdata, *tmp;
- struct list_head wdata_list;
- int rc;
+ int rc = 0;
+ size_t cur_len;
+ unsigned long nr_pages, num_pages, i;
+ struct cifs_writedata *wdata;
+ struct iov_iter saved_from;
+ loff_t saved_offset = offset;
pid_t pid;
-
- len = iov_iter_count(from);
- rc = generic_write_checks(file, poffset, &len, 0);
- if (rc)
- return rc;
-
- if (!len)
- return 0;
-
- iov_iter_truncate(from, len);
-
- INIT_LIST_HEAD(&wdata_list);
- cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
- open_file = file->private_data;
- tcon = tlink_tcon(open_file->tlink);
-
- if (!tcon->ses->server->ops->async_writev)
- return -ENOSYS;
-
- offset = *poffset;
+ struct TCP_Server_Info *server;
if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
pid = open_file->pid;
else
pid = current->tgid;
+ server = tlink_tcon(open_file->tlink)->ses->server;
+ memcpy(&saved_from, from, sizeof(struct iov_iter));
+
do {
- size_t save_len;
+ unsigned int wsize, credits;
+
+ rc = server->ops->wait_mtu_credits(server, cifs_sb->wsize,
+ &wsize, &credits);
+ if (rc)
+ break;
- nr_pages = get_numpages(cifs_sb->wsize, len, &cur_len);
+ nr_pages = get_numpages(wsize, len, &cur_len);
wdata = cifs_writedata_alloc(nr_pages,
cifs_uncached_writev_complete);
if (!wdata) {
rc = -ENOMEM;
+ add_credits_and_wake_if(server, credits, 0);
break;
}
rc = cifs_write_allocate_pages(wdata->pages, nr_pages);
if (rc) {
kfree(wdata);
+ add_credits_and_wake_if(server, credits, 0);
break;
}
- save_len = cur_len;
- for (i = 0; i < nr_pages; i++) {
- bytes = min_t(size_t, cur_len, PAGE_SIZE);
- copied = copy_page_from_iter(wdata->pages[i], 0, bytes,
- from);
- cur_len -= copied;
- /*
- * If we didn't copy as much as we expected, then that
- * may mean we trod into an unmapped area. Stop copying
- * at that point. On the next pass through the big
- * loop, we'll likely end up getting a zero-length
- * write and bailing out of it.
- */
- if (copied < bytes)
- break;
- }
- cur_len = save_len - cur_len;
-
- /*
- * If we have no data to send, then that probably means that
- * the copy above failed altogether. That's most likely because
- * the address in the iovec was bogus. Set the rc to -EFAULT,
- * free anything we allocated and bail out.
- */
- if (!cur_len) {
+ num_pages = nr_pages;
+ rc = wdata_fill_from_iovec(wdata, from, &cur_len, &num_pages);
+ if (rc) {
for (i = 0; i < nr_pages; i++)
put_page(wdata->pages[i]);
kfree(wdata);
- rc = -EFAULT;
+ add_credits_and_wake_if(server, credits, 0);
break;
}
/*
- * i + 1 now represents the number of pages we actually used in
- * the copy phase above. Bring nr_pages down to that, and free
- * any pages that we didn't use.
+ * Bring nr_pages down to the number of pages we actually used,
+ * and free any pages that we didn't use.
*/
- for ( ; nr_pages > i + 1; nr_pages--)
+ for ( ; nr_pages > num_pages; nr_pages--)
put_page(wdata->pages[nr_pages - 1]);
wdata->sync_mode = WB_SYNC_ALL;
@@ -2489,18 +2526,69 @@ cifs_iovec_write(struct file *file, struct iov_iter *from, loff_t *poffset)
wdata->bytes = cur_len;
wdata->pagesz = PAGE_SIZE;
wdata->tailsz = cur_len - ((nr_pages - 1) * PAGE_SIZE);
- rc = cifs_uncached_retry_writev(wdata);
+ wdata->credits = credits;
+
+ if (!wdata->cfile->invalidHandle ||
+ !cifs_reopen_file(wdata->cfile, false))
+ rc = server->ops->async_writev(wdata,
+ cifs_uncached_writedata_release);
if (rc) {
+ add_credits_and_wake_if(server, wdata->credits, 0);
kref_put(&wdata->refcount,
cifs_uncached_writedata_release);
+ if (rc == -EAGAIN) {
+ memcpy(from, &saved_from,
+ sizeof(struct iov_iter));
+ iov_iter_advance(from, offset - saved_offset);
+ continue;
+ }
break;
}
- list_add_tail(&wdata->list, &wdata_list);
+ list_add_tail(&wdata->list, wdata_list);
offset += cur_len;
len -= cur_len;
} while (len > 0);
+ return rc;
+}
+
+static ssize_t
+cifs_iovec_write(struct file *file, struct iov_iter *from, loff_t *poffset)
+{
+ size_t len;
+ ssize_t total_written = 0;
+ struct cifsFileInfo *open_file;
+ struct cifs_tcon *tcon;
+ struct cifs_sb_info *cifs_sb;
+ struct cifs_writedata *wdata, *tmp;
+ struct list_head wdata_list;
+ struct iov_iter saved_from;
+ int rc;
+
+ len = iov_iter_count(from);
+ rc = generic_write_checks(file, poffset, &len, 0);
+ if (rc)
+ return rc;
+
+ if (!len)
+ return 0;
+
+ iov_iter_truncate(from, len);
+
+ INIT_LIST_HEAD(&wdata_list);
+ cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
+ open_file = file->private_data;
+ tcon = tlink_tcon(open_file->tlink);
+
+ if (!tcon->ses->server->ops->async_writev)
+ return -ENOSYS;
+
+ memcpy(&saved_from, from, sizeof(struct iov_iter));
+
+ rc = cifs_write_from_iter(*poffset, len, from, open_file, cifs_sb,
+ &wdata_list);
+
/*
* If at least one write was successfully sent, then discard any rc
* value from the later writes. If the other write succeeds, then
@@ -2529,7 +2617,25 @@ restart_loop:
/* resend call if it's a retryable error */
if (rc == -EAGAIN) {
- rc = cifs_uncached_retry_writev(wdata);
+ struct list_head tmp_list;
+ struct iov_iter tmp_from;
+
+ INIT_LIST_HEAD(&tmp_list);
+ list_del_init(&wdata->list);
+
+ memcpy(&tmp_from, &saved_from,
+ sizeof(struct iov_iter));
+ iov_iter_advance(&tmp_from,
+ wdata->offset - *poffset);
+
+ rc = cifs_write_from_iter(wdata->offset,
+ wdata->bytes, &tmp_from,
+ open_file, cifs_sb, &tmp_list);
+
+ list_splice(&tmp_list, &wdata_list);
+
+ kref_put(&wdata->refcount,
+ cifs_uncached_writedata_release);
goto restart_loop;
}
}
@@ -2722,26 +2828,6 @@ cifs_uncached_readdata_release(struct kref *refcount)
cifs_readdata_release(refcount);
}
-static int
-cifs_retry_async_readv(struct cifs_readdata *rdata)
-{
- int rc;
- struct TCP_Server_Info *server;
-
- server = tlink_tcon(rdata->cfile->tlink)->ses->server;
-
- do {
- if (rdata->cfile->invalidHandle) {
- rc = cifs_reopen_file(rdata->cfile, true);
- if (rc != 0)
- continue;
- }
- rc = server->ops->async_readv(rdata);
- } while (rc == -EAGAIN);
-
- return rc;
-}
-
/**
* cifs_readdata_to_iov - copy data from pages in response to an iovec
* @rdata: the readdata response with list of pages holding data
@@ -2754,7 +2840,7 @@ cifs_retry_async_readv(struct cifs_readdata *rdata)
static int
cifs_readdata_to_iov(struct cifs_readdata *rdata, struct iov_iter *iter)
{
- size_t remaining = rdata->bytes;
+ size_t remaining = rdata->got_bytes;
unsigned int i;
for (i = 0; i < rdata->nr_pages; i++) {
@@ -2782,11 +2868,12 @@ static int
cifs_uncached_read_into_pages(struct TCP_Server_Info *server,
struct cifs_readdata *rdata, unsigned int len)
{
- int total_read = 0, result = 0;
+ int result = 0;
unsigned int i;
unsigned int nr_pages = rdata->nr_pages;
struct kvec iov;
+ rdata->got_bytes = 0;
rdata->tailsz = PAGE_SIZE;
for (i = 0; i < nr_pages; i++) {
struct page *page = rdata->pages[i];
@@ -2820,55 +2907,45 @@ cifs_uncached_read_into_pages(struct TCP_Server_Info *server,
if (result < 0)
break;
- total_read += result;
+ rdata->got_bytes += result;
}
- return total_read > 0 ? total_read : result;
+ return rdata->got_bytes > 0 && result != -ECONNABORTED ?
+ rdata->got_bytes : result;
}
-ssize_t cifs_user_readv(struct kiocb *iocb, struct iov_iter *to)
+static int
+cifs_send_async_read(loff_t offset, size_t len, struct cifsFileInfo *open_file,
+ struct cifs_sb_info *cifs_sb, struct list_head *rdata_list)
{
- struct file *file = iocb->ki_filp;
- ssize_t rc;
- size_t len, cur_len;
- ssize_t total_read = 0;
- loff_t offset = iocb->ki_pos;
- unsigned int npages;
- struct cifs_sb_info *cifs_sb;
- struct cifs_tcon *tcon;
- struct cifsFileInfo *open_file;
- struct cifs_readdata *rdata, *tmp;
- struct list_head rdata_list;
+ struct cifs_readdata *rdata;
+ unsigned int npages, rsize, credits;
+ size_t cur_len;
+ int rc;
pid_t pid;
+ struct TCP_Server_Info *server;
- len = iov_iter_count(to);
- if (!len)
- return 0;
-
- INIT_LIST_HEAD(&rdata_list);
- cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
- open_file = file->private_data;
- tcon = tlink_tcon(open_file->tlink);
-
- if (!tcon->ses->server->ops->async_readv)
- return -ENOSYS;
+ server = tlink_tcon(open_file->tlink)->ses->server;
if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
pid = open_file->pid;
else
pid = current->tgid;
- if ((file->f_flags & O_ACCMODE) == O_WRONLY)
- cifs_dbg(FYI, "attempting read on write only file instance\n");
-
do {
- cur_len = min_t(const size_t, len - total_read, cifs_sb->rsize);
+ rc = server->ops->wait_mtu_credits(server, cifs_sb->rsize,
+ &rsize, &credits);
+ if (rc)
+ break;
+
+ cur_len = min_t(const size_t, len, rsize);
npages = DIV_ROUND_UP(cur_len, PAGE_SIZE);
/* allocate a readdata struct */
rdata = cifs_readdata_alloc(npages,
cifs_uncached_readv_complete);
if (!rdata) {
+ add_credits_and_wake_if(server, credits, 0);
rc = -ENOMEM;
break;
}
@@ -2884,44 +2961,113 @@ ssize_t cifs_user_readv(struct kiocb *iocb, struct iov_iter *to)
rdata->pid = pid;
rdata->pagesz = PAGE_SIZE;
rdata->read_into_pages = cifs_uncached_read_into_pages;
+ rdata->credits = credits;
- rc = cifs_retry_async_readv(rdata);
+ if (!rdata->cfile->invalidHandle ||
+ !cifs_reopen_file(rdata->cfile, true))
+ rc = server->ops->async_readv(rdata);
error:
if (rc) {
+ add_credits_and_wake_if(server, rdata->credits, 0);
kref_put(&rdata->refcount,
cifs_uncached_readdata_release);
+ if (rc == -EAGAIN)
+ continue;
break;
}
- list_add_tail(&rdata->list, &rdata_list);
+ list_add_tail(&rdata->list, rdata_list);
offset += cur_len;
len -= cur_len;
} while (len > 0);
+ return rc;
+}
+
+ssize_t cifs_user_readv(struct kiocb *iocb, struct iov_iter *to)
+{
+ struct file *file = iocb->ki_filp;
+ ssize_t rc;
+ size_t len;
+ ssize_t total_read = 0;
+ loff_t offset = iocb->ki_pos;
+ struct cifs_sb_info *cifs_sb;
+ struct cifs_tcon *tcon;
+ struct cifsFileInfo *open_file;
+ struct cifs_readdata *rdata, *tmp;
+ struct list_head rdata_list;
+
+ len = iov_iter_count(to);
+ if (!len)
+ return 0;
+
+ INIT_LIST_HEAD(&rdata_list);
+ cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
+ open_file = file->private_data;
+ tcon = tlink_tcon(open_file->tlink);
+
+ if (!tcon->ses->server->ops->async_readv)
+ return -ENOSYS;
+
+ if ((file->f_flags & O_ACCMODE) == O_WRONLY)
+ cifs_dbg(FYI, "attempting read on write only file instance\n");
+
+ rc = cifs_send_async_read(offset, len, open_file, cifs_sb, &rdata_list);
+
/* if at least one read request send succeeded, then reset rc */
if (!list_empty(&rdata_list))
rc = 0;
len = iov_iter_count(to);
/* the loop below should proceed in the order of increasing offsets */
+again:
list_for_each_entry_safe(rdata, tmp, &rdata_list, list) {
- again:
if (!rc) {
/* FIXME: freezable sleep too? */
rc = wait_for_completion_killable(&rdata->done);
if (rc)
rc = -EINTR;
- else if (rdata->result) {
- rc = rdata->result;
+ else if (rdata->result == -EAGAIN) {
/* resend call if it's a retryable error */
- if (rc == -EAGAIN) {
- rc = cifs_retry_async_readv(rdata);
- goto again;
+ struct list_head tmp_list;
+ unsigned int got_bytes = rdata->got_bytes;
+
+ list_del_init(&rdata->list);
+ INIT_LIST_HEAD(&tmp_list);
+
+ /*
+ * Got a part of data and then reconnect has
+ * happened -- fill the buffer and continue
+ * reading.
+ */
+ if (got_bytes && got_bytes < rdata->bytes) {
+ rc = cifs_readdata_to_iov(rdata, to);
+ if (rc) {
+ kref_put(&rdata->refcount,
+ cifs_uncached_readdata_release);
+ continue;
+ }
}
- } else {
+
+ rc = cifs_send_async_read(
+ rdata->offset + got_bytes,
+ rdata->bytes - got_bytes,
+ rdata->cfile, cifs_sb,
+ &tmp_list);
+
+ list_splice(&tmp_list, &rdata_list);
+
+ kref_put(&rdata->refcount,
+ cifs_uncached_readdata_release);
+ goto again;
+ } else if (rdata->result)
+ rc = rdata->result;
+ else
rc = cifs_readdata_to_iov(rdata, to);
- }
+ /* if there was a short read -- discard anything left */
+ if (rdata->got_bytes && rdata->got_bytes < rdata->bytes)
+ rc = -ENODATA;
}
list_del_init(&rdata->list);
kref_put(&rdata->refcount, cifs_uncached_readdata_release);
@@ -3030,18 +3176,19 @@ cifs_read(struct file *file, char *read_data, size_t read_size, loff_t *offset)
for (total_read = 0, cur_offset = read_data; read_size > total_read;
total_read += bytes_read, cur_offset += bytes_read) {
- current_read_size = min_t(uint, read_size - total_read, rsize);
- /*
- * For windows me and 9x we do not want to request more than it
- * negotiated since it will refuse the read then.
- */
- if ((tcon->ses) && !(tcon->ses->capabilities &
+ do {
+ current_read_size = min_t(uint, read_size - total_read,
+ rsize);
+ /*
+ * For windows me and 9x we do not want to request more
+ * than it negotiated since it will refuse the read
+ * then.
+ */
+ if ((tcon->ses) && !(tcon->ses->capabilities &
tcon->ses->server->vals->cap_large_files)) {
- current_read_size = min_t(uint, current_read_size,
- CIFSMaxBufSize);
- }
- rc = -EAGAIN;
- while (rc == -EAGAIN) {
+ current_read_size = min_t(uint,
+ current_read_size, CIFSMaxBufSize);
+ }
if (open_file->invalidHandle) {
rc = cifs_reopen_file(open_file, true);
if (rc != 0)
@@ -3054,7 +3201,8 @@ cifs_read(struct file *file, char *read_data, size_t read_size, loff_t *offset)
rc = server->ops->sync_read(xid, open_file, &io_parms,
&bytes_read, &cur_offset,
&buf_type);
- }
+ } while (rc == -EAGAIN);
+
if (rc || (bytes_read == 0)) {
if (total_read) {
break;
@@ -3133,25 +3281,30 @@ int cifs_file_mmap(struct file *file, struct vm_area_struct *vma)
static void
cifs_readv_complete(struct work_struct *work)
{
- unsigned int i;
+ unsigned int i, got_bytes;
struct cifs_readdata *rdata = container_of(work,
struct cifs_readdata, work);
+ got_bytes = rdata->got_bytes;
for (i = 0; i < rdata->nr_pages; i++) {
struct page *page = rdata->pages[i];
lru_cache_add_file(page);
- if (rdata->result == 0) {
+ if (rdata->result == 0 ||
+ (rdata->result == -EAGAIN && got_bytes)) {
flush_dcache_page(page);
SetPageUptodate(page);
}
unlock_page(page);
- if (rdata->result == 0)
+ if (rdata->result == 0 ||
+ (rdata->result == -EAGAIN && got_bytes))
cifs_readpage_to_fscache(rdata->mapping->host, page);
+ got_bytes -= min_t(unsigned int, PAGE_CACHE_SIZE, got_bytes);
+
page_cache_release(page);
rdata->pages[i] = NULL;
}
@@ -3162,7 +3315,7 @@ static int
cifs_readpages_read_into_pages(struct TCP_Server_Info *server,
struct cifs_readdata *rdata, unsigned int len)
{
- int total_read = 0, result = 0;
+ int result = 0;
unsigned int i;
u64 eof;
pgoff_t eof_index;
@@ -3174,6 +3327,7 @@ cifs_readpages_read_into_pages(struct TCP_Server_Info *server,
eof_index = eof ? (eof - 1) >> PAGE_CACHE_SHIFT : 0;
cifs_dbg(FYI, "eof=%llu eof_index=%lu\n", eof, eof_index);
+ rdata->got_bytes = 0;
rdata->tailsz = PAGE_CACHE_SIZE;
for (i = 0; i < nr_pages; i++) {
struct page *page = rdata->pages[i];
@@ -3228,10 +3382,70 @@ cifs_readpages_read_into_pages(struct TCP_Server_Info *server,
if (result < 0)
break;
- total_read += result;
+ rdata->got_bytes += result;
}
- return total_read > 0 ? total_read : result;
+ return rdata->got_bytes > 0 && result != -ECONNABORTED ?
+ rdata->got_bytes : result;
+}
+
+static int
+readpages_get_pages(struct address_space *mapping, struct list_head *page_list,
+ unsigned int rsize, struct list_head *tmplist,
+ unsigned int *nr_pages, loff_t *offset, unsigned int *bytes)
+{
+ struct page *page, *tpage;
+ unsigned int expected_index;
+ int rc;
+
+ INIT_LIST_HEAD(tmplist);
+
+ page = list_entry(page_list->prev, struct page, lru);
+
+ /*
+ * Lock the page and put it in the cache. Since no one else
+ * should have access to this page, we're safe to simply set
+ * PG_locked without checking it first.
+ */
+ __set_page_locked(page);
+ rc = add_to_page_cache_locked(page, mapping,
+ page->index, GFP_KERNEL);
+
+ /* give up if we can't stick it in the cache */
+ if (rc) {
+ __clear_page_locked(page);
+ return rc;
+ }
+
+ /* move first page to the tmplist */
+ *offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
+ *bytes = PAGE_CACHE_SIZE;
+ *nr_pages = 1;
+ list_move_tail(&page->lru, tmplist);
+
+ /* now try and add more pages onto the request */
+ expected_index = page->index + 1;
+ list_for_each_entry_safe_reverse(page, tpage, page_list, lru) {
+ /* discontinuity ? */
+ if (page->index != expected_index)
+ break;
+
+ /* would this page push the read over the rsize? */
+ if (*bytes + PAGE_CACHE_SIZE > rsize)
+ break;
+
+ __set_page_locked(page);
+ if (add_to_page_cache_locked(page, mapping, page->index,
+ GFP_KERNEL)) {
+ __clear_page_locked(page);
+ break;
+ }
+ list_move_tail(&page->lru, tmplist);
+ (*bytes) += PAGE_CACHE_SIZE;
+ expected_index++;
+ (*nr_pages)++;
+ }
+ return rc;
}
static int cifs_readpages(struct file *file, struct address_space *mapping,
@@ -3241,19 +3455,10 @@ static int cifs_readpages(struct file *file, struct address_space *mapping,
struct list_head tmplist;
struct cifsFileInfo *open_file = file->private_data;
struct cifs_sb_info *cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
- unsigned int rsize = cifs_sb->rsize;
+ struct TCP_Server_Info *server;
pid_t pid;
/*
- * Give up immediately if rsize is too small to read an entire page.
- * The VFS will fall back to readpage. We should never reach this
- * point however since we set ra_pages to 0 when the rsize is smaller
- * than a cache page.
- */
- if (unlikely(rsize < PAGE_CACHE_SIZE))
- return 0;
-
- /*
* Reads as many pages as possible from fscache. Returns -ENOBUFS
* immediately if the cookie is negative
*
@@ -3271,7 +3476,7 @@ static int cifs_readpages(struct file *file, struct address_space *mapping,
pid = current->tgid;
rc = 0;
- INIT_LIST_HEAD(&tmplist);
+ server = tlink_tcon(open_file->tlink)->ses->server;
cifs_dbg(FYI, "%s: file=%p mapping=%p num_pages=%u\n",
__func__, file, mapping, num_pages);
@@ -3288,58 +3493,35 @@ static int cifs_readpages(struct file *file, struct address_space *mapping,
* the rdata->pages, then we want them in increasing order.
*/
while (!list_empty(page_list)) {
- unsigned int i;
- unsigned int bytes = PAGE_CACHE_SIZE;
- unsigned int expected_index;
- unsigned int nr_pages = 1;
+ unsigned int i, nr_pages, bytes, rsize;
loff_t offset;
struct page *page, *tpage;
struct cifs_readdata *rdata;
+ unsigned credits;
- page = list_entry(page_list->prev, struct page, lru);
+ rc = server->ops->wait_mtu_credits(server, cifs_sb->rsize,
+ &rsize, &credits);
+ if (rc)
+ break;
/*
- * Lock the page and put it in the cache. Since no one else
- * should have access to this page, we're safe to simply set
- * PG_locked without checking it first.
+ * Give up immediately if rsize is too small to read an entire
+ * page. The VFS will fall back to readpage. We should never
+ * reach this point however since we set ra_pages to 0 when the
+ * rsize is smaller than a cache page.
*/
- __set_page_locked(page);
- rc = add_to_page_cache_locked(page, mapping,
- page->index, GFP_KERNEL);
+ if (unlikely(rsize < PAGE_CACHE_SIZE)) {
+ add_credits_and_wake_if(server, credits, 0);
+ return 0;
+ }
- /* give up if we can't stick it in the cache */
+ rc = readpages_get_pages(mapping, page_list, rsize, &tmplist,
+ &nr_pages, &offset, &bytes);
if (rc) {
- __clear_page_locked(page);
+ add_credits_and_wake_if(server, credits, 0);
break;
}
- /* move first page to the tmplist */
- offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
- list_move_tail(&page->lru, &tmplist);
-
- /* now try and add more pages onto the request */
- expected_index = page->index + 1;
- list_for_each_entry_safe_reverse(page, tpage, page_list, lru) {
- /* discontinuity ? */
- if (page->index != expected_index)
- break;
-
- /* would this page push the read over the rsize? */
- if (bytes + PAGE_CACHE_SIZE > rsize)
- break;
-
- __set_page_locked(page);
- if (add_to_page_cache_locked(page, mapping,
- page->index, GFP_KERNEL)) {
- __clear_page_locked(page);
- break;
- }
- list_move_tail(&page->lru, &tmplist);
- bytes += PAGE_CACHE_SIZE;
- expected_index++;
- nr_pages++;
- }
-
rdata = cifs_readdata_alloc(nr_pages, cifs_readv_complete);
if (!rdata) {
/* best to give up if we're out of mem */
@@ -3350,6 +3532,7 @@ static int cifs_readpages(struct file *file, struct address_space *mapping,
page_cache_release(page);
}
rc = -ENOMEM;
+ add_credits_and_wake_if(server, credits, 0);
break;
}
@@ -3360,21 +3543,32 @@ static int cifs_readpages(struct file *file, struct address_space *mapping,
rdata->pid = pid;
rdata->pagesz = PAGE_CACHE_SIZE;
rdata->read_into_pages = cifs_readpages_read_into_pages;
+ rdata->credits = credits;
list_for_each_entry_safe(page, tpage, &tmplist, lru) {
list_del(&page->lru);
rdata->pages[rdata->nr_pages++] = page;
}
- rc = cifs_retry_async_readv(rdata);
- if (rc != 0) {
+ if (!rdata->cfile->invalidHandle ||
+ !cifs_reopen_file(rdata->cfile, true))
+ rc = server->ops->async_readv(rdata);
+ if (rc) {
+ add_credits_and_wake_if(server, rdata->credits, 0);
for (i = 0; i < rdata->nr_pages; i++) {
page = rdata->pages[i];
lru_cache_add_file(page);
unlock_page(page);
page_cache_release(page);
+ if (rc == -EAGAIN)
+ list_add_tail(&page->lru, &tmplist);
}
kref_put(&rdata->refcount, cifs_readdata_release);
+ if (rc == -EAGAIN) {
+ /* Re-add pages to the page_list and retry */
+ list_splice(&tmplist, page_list);
+ continue;
+ }
break;
}
diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c
index 6bf55d0ed494..81340c6253eb 100644
--- a/fs/cifs/misc.c
+++ b/fs/cifs/misc.c
@@ -226,6 +226,15 @@ cifs_small_buf_release(void *buf_to_free)
return;
}
+void
+free_rsp_buf(int resp_buftype, void *rsp)
+{
+ if (resp_buftype == CIFS_SMALL_BUFFER)
+ cifs_small_buf_release(rsp);
+ else if (resp_buftype == CIFS_LARGE_BUFFER)
+ cifs_buf_release(rsp);
+}
+
/* NB: MID can not be set if treeCon not passed in, in that
case it is responsbility of caller to set the mid */
void
@@ -414,7 +423,7 @@ is_valid_oplock_break(char *buffer, struct TCP_Server_Info *srv)
return true;
}
if (pSMBr->hdr.Status.CifsError) {
- cifs_dbg(FYI, "notify err 0x%d\n",
+ cifs_dbg(FYI, "notify err 0x%x\n",
pSMBr->hdr.Status.CifsError);
return true;
}
@@ -441,7 +450,7 @@ is_valid_oplock_break(char *buffer, struct TCP_Server_Info *srv)
if (pSMB->hdr.WordCount != 8)
return false;
- cifs_dbg(FYI, "oplock type 0x%d level 0x%d\n",
+ cifs_dbg(FYI, "oplock type 0x%x level 0x%x\n",
pSMB->LockType, pSMB->OplockLevel);
if (!(pSMB->LockType & LOCKING_ANDX_OPLOCK_RELEASE))
return false;
diff --git a/fs/cifs/sess.c b/fs/cifs/sess.c
index e87387dbf39f..39ee32688eac 100644
--- a/fs/cifs/sess.c
+++ b/fs/cifs/sess.c
@@ -520,382 +520,559 @@ select_sectype(struct TCP_Server_Info *server, enum securityEnum requested)
}
}
-int
-CIFS_SessSetup(const unsigned int xid, struct cifs_ses *ses,
- const struct nls_table *nls_cp)
+struct sess_data {
+ unsigned int xid;
+ struct cifs_ses *ses;
+ struct nls_table *nls_cp;
+ void (*func)(struct sess_data *);
+ int result;
+
+ /* we will send the SMB in three pieces:
+ * a fixed length beginning part, an optional
+ * SPNEGO blob (which can be zero length), and a
+ * last part which will include the strings
+ * and rest of bcc area. This allows us to avoid
+ * a large buffer 17K allocation
+ */
+ int buf0_type;
+ struct kvec iov[3];
+};
+
+static int
+sess_alloc_buffer(struct sess_data *sess_data, int wct)
{
- int rc = 0;
- int wct;
+ int rc;
+ struct cifs_ses *ses = sess_data->ses;
struct smb_hdr *smb_buf;
- char *bcc_ptr;
- char *str_area;
- SESSION_SETUP_ANDX *pSMB;
- __u32 capabilities;
- __u16 count;
- int resp_buf_type;
- struct kvec iov[3];
- enum securityEnum type;
- __u16 action, bytes_remaining;
- struct key *spnego_key = NULL;
- __le32 phase = NtLmNegotiate; /* NTLMSSP, if needed, is multistage */
- u16 blob_len;
- char *ntlmsspblob = NULL;
- if (ses == NULL) {
- WARN(1, "%s: ses == NULL!", __func__);
- return -EINVAL;
- }
+ rc = small_smb_init_no_tc(SMB_COM_SESSION_SETUP_ANDX, wct, ses,
+ (void **)&smb_buf);
- type = select_sectype(ses->server, ses->sectype);
- cifs_dbg(FYI, "sess setup type %d\n", type);
- if (type == Unspecified) {
- cifs_dbg(VFS,
- "Unable to select appropriate authentication method!");
- return -EINVAL;
+ if (rc)
+ return rc;
+
+ sess_data->iov[0].iov_base = (char *)smb_buf;
+ sess_data->iov[0].iov_len = be32_to_cpu(smb_buf->smb_buf_length) + 4;
+ /*
+ * This variable will be used to clear the buffer
+ * allocated above in case of any error in the calling function.
+ */
+ sess_data->buf0_type = CIFS_SMALL_BUFFER;
+
+ /* 2000 big enough to fit max user, domain, NOS name etc. */
+ sess_data->iov[2].iov_base = kmalloc(2000, GFP_KERNEL);
+ if (!sess_data->iov[2].iov_base) {
+ rc = -ENOMEM;
+ goto out_free_smb_buf;
}
- if (type == RawNTLMSSP) {
- /* if memory allocation is successful, caller of this function
- * frees it.
- */
- ses->ntlmssp = kmalloc(sizeof(struct ntlmssp_auth), GFP_KERNEL);
- if (!ses->ntlmssp)
- return -ENOMEM;
- ses->ntlmssp->sesskey_per_smbsess = false;
+ return 0;
+
+out_free_smb_buf:
+ kfree(smb_buf);
+ sess_data->iov[0].iov_base = NULL;
+ sess_data->iov[0].iov_len = 0;
+ sess_data->buf0_type = CIFS_NO_BUFFER;
+ return rc;
+}
+
+static void
+sess_free_buffer(struct sess_data *sess_data)
+{
+ free_rsp_buf(sess_data->buf0_type, sess_data->iov[0].iov_base);
+ sess_data->buf0_type = CIFS_NO_BUFFER;
+ kfree(sess_data->iov[2].iov_base);
+}
+
+static int
+sess_establish_session(struct sess_data *sess_data)
+{
+ struct cifs_ses *ses = sess_data->ses;
+
+ mutex_lock(&ses->server->srv_mutex);
+ if (!ses->server->session_estab) {
+ if (ses->server->sign) {
+ ses->server->session_key.response =
+ kmemdup(ses->auth_key.response,
+ ses->auth_key.len, GFP_KERNEL);
+ if (!ses->server->session_key.response) {
+ mutex_unlock(&ses->server->srv_mutex);
+ return -ENOMEM;
+ }
+ ses->server->session_key.len =
+ ses->auth_key.len;
+ }
+ ses->server->sequence_number = 0x2;
+ ses->server->session_estab = true;
}
+ mutex_unlock(&ses->server->srv_mutex);
-ssetup_ntlmssp_authenticate:
- if (phase == NtLmChallenge)
- phase = NtLmAuthenticate; /* if ntlmssp, now final phase */
+ cifs_dbg(FYI, "CIFS session established successfully\n");
+ spin_lock(&GlobalMid_Lock);
+ ses->status = CifsGood;
+ ses->need_reconnect = false;
+ spin_unlock(&GlobalMid_Lock);
- if (type == LANMAN) {
-#ifndef CONFIG_CIFS_WEAK_PW_HASH
- /* LANMAN and plaintext are less secure and off by default.
- So we make this explicitly be turned on in kconfig (in the
- build) and turned on at runtime (changed from the default)
- in proc/fs/cifs or via mount parm. Unfortunately this is
- needed for old Win (e.g. Win95), some obscure NAS and OS/2 */
- return -EOPNOTSUPP;
-#endif
- wct = 10; /* lanman 2 style sessionsetup */
- } else if ((type == NTLM) || (type == NTLMv2)) {
- /* For NTLMv2 failures eventually may need to retry NTLM */
- wct = 13; /* old style NTLM sessionsetup */
- } else /* same size: negotiate or auth, NTLMSSP or extended security */
- wct = 12;
+ return 0;
+}
- rc = small_smb_init_no_tc(SMB_COM_SESSION_SETUP_ANDX, wct, ses,
- (void **)&smb_buf);
- if (rc)
- return rc;
+static int
+sess_sendreceive(struct sess_data *sess_data)
+{
+ int rc;
+ struct smb_hdr *smb_buf = (struct smb_hdr *) sess_data->iov[0].iov_base;
+ __u16 count;
- pSMB = (SESSION_SETUP_ANDX *)smb_buf;
+ count = sess_data->iov[1].iov_len + sess_data->iov[2].iov_len;
+ smb_buf->smb_buf_length =
+ cpu_to_be32(be32_to_cpu(smb_buf->smb_buf_length) + count);
+ put_bcc(count, smb_buf);
+
+ rc = SendReceive2(sess_data->xid, sess_data->ses,
+ sess_data->iov, 3 /* num_iovecs */,
+ &sess_data->buf0_type,
+ CIFS_LOG_ERROR);
+
+ return rc;
+}
+/*
+ * LANMAN and plaintext are less secure and off by default.
+ * So we make this explicitly be turned on in kconfig (in the
+ * build) and turned on at runtime (changed from the default)
+ * in proc/fs/cifs or via mount parm. Unfortunately this is
+ * needed for old Win (e.g. Win95), some obscure NAS and OS/2
+ */
+#ifdef CONFIG_CIFS_WEAK_PW_HASH
+static void
+sess_auth_lanman(struct sess_data *sess_data)
+{
+ int rc = 0;
+ struct smb_hdr *smb_buf;
+ SESSION_SETUP_ANDX *pSMB;
+ char *bcc_ptr;
+ struct cifs_ses *ses = sess_data->ses;
+ char lnm_session_key[CIFS_AUTH_RESP_SIZE];
+ __u32 capabilities;
+ __u16 bytes_remaining;
+
+ /* lanman 2 style sessionsetup */
+ /* wct = 10 */
+ rc = sess_alloc_buffer(sess_data, 10);
+ if (rc)
+ goto out;
+
+ pSMB = (SESSION_SETUP_ANDX *)sess_data->iov[0].iov_base;
+ bcc_ptr = sess_data->iov[2].iov_base;
capabilities = cifs_ssetup_hdr(ses, pSMB);
- /* we will send the SMB in three pieces:
- a fixed length beginning part, an optional
- SPNEGO blob (which can be zero length), and a
- last part which will include the strings
- and rest of bcc area. This allows us to avoid
- a large buffer 17K allocation */
- iov[0].iov_base = (char *)pSMB;
- iov[0].iov_len = be32_to_cpu(smb_buf->smb_buf_length) + 4;
-
- /* setting this here allows the code at the end of the function
- to free the request buffer if there's an error */
- resp_buf_type = CIFS_SMALL_BUFFER;
+ pSMB->req.hdr.Flags2 &= ~SMBFLG2_UNICODE;
- /* 2000 big enough to fit max user, domain, NOS name etc. */
- str_area = kmalloc(2000, GFP_KERNEL);
- if (str_area == NULL) {
- rc = -ENOMEM;
- goto ssetup_exit;
- }
- bcc_ptr = str_area;
+ /* no capabilities flags in old lanman negotiation */
+ pSMB->old_req.PasswordLength = cpu_to_le16(CIFS_AUTH_RESP_SIZE);
- iov[1].iov_base = NULL;
- iov[1].iov_len = 0;
+ /* Calculate hash with password and copy into bcc_ptr.
+ * Encryption Key (stored as in cryptkey) gets used if the
+ * security mode bit in Negottiate Protocol response states
+ * to use challenge/response method (i.e. Password bit is 1).
+ */
+ rc = calc_lanman_hash(ses->password, ses->server->cryptkey,
+ ses->server->sec_mode & SECMODE_PW_ENCRYPT ?
+ true : false, lnm_session_key);
- if (type == LANMAN) {
-#ifdef CONFIG_CIFS_WEAK_PW_HASH
- char lnm_session_key[CIFS_AUTH_RESP_SIZE];
+ memcpy(bcc_ptr, (char *)lnm_session_key, CIFS_AUTH_RESP_SIZE);
+ bcc_ptr += CIFS_AUTH_RESP_SIZE;
+
+ /*
+ * can not sign if LANMAN negotiated so no need
+ * to calculate signing key? but what if server
+ * changed to do higher than lanman dialect and
+ * we reconnected would we ever calc signing_key?
+ */
- pSMB->req.hdr.Flags2 &= ~SMBFLG2_UNICODE;
+ cifs_dbg(FYI, "Negotiating LANMAN setting up strings\n");
+ /* Unicode not allowed for LANMAN dialects */
+ ascii_ssetup_strings(&bcc_ptr, ses, sess_data->nls_cp);
- /* no capabilities flags in old lanman negotiation */
+ sess_data->iov[2].iov_len = (long) bcc_ptr -
+ (long) sess_data->iov[2].iov_base;
- pSMB->old_req.PasswordLength = cpu_to_le16(CIFS_AUTH_RESP_SIZE);
+ rc = sess_sendreceive(sess_data);
+ if (rc)
+ goto out;
- /* Calculate hash with password and copy into bcc_ptr.
- * Encryption Key (stored as in cryptkey) gets used if the
- * security mode bit in Negottiate Protocol response states
- * to use challenge/response method (i.e. Password bit is 1).
- */
+ pSMB = (SESSION_SETUP_ANDX *)sess_data->iov[0].iov_base;
+ smb_buf = (struct smb_hdr *)sess_data->iov[0].iov_base;
- rc = calc_lanman_hash(ses->password, ses->server->cryptkey,
- ses->server->sec_mode & SECMODE_PW_ENCRYPT ?
- true : false, lnm_session_key);
+ /* lanman response has a word count of 3 */
+ if (smb_buf->WordCount != 3) {
+ rc = -EIO;
+ cifs_dbg(VFS, "bad word count %d\n", smb_buf->WordCount);
+ goto out;
+ }
- memcpy(bcc_ptr, (char *)lnm_session_key, CIFS_AUTH_RESP_SIZE);
- bcc_ptr += CIFS_AUTH_RESP_SIZE;
+ if (le16_to_cpu(pSMB->resp.Action) & GUEST_LOGIN)
+ cifs_dbg(FYI, "Guest login\n"); /* BB mark SesInfo struct? */
+
+ ses->Suid = smb_buf->Uid; /* UID left in wire format (le) */
+ cifs_dbg(FYI, "UID = %llu\n", ses->Suid);
- /* can not sign if LANMAN negotiated so no need
- to calculate signing key? but what if server
- changed to do higher than lanman dialect and
- we reconnected would we ever calc signing_key? */
+ bytes_remaining = get_bcc(smb_buf);
+ bcc_ptr = pByteArea(smb_buf);
- cifs_dbg(FYI, "Negotiating LANMAN setting up strings\n");
- /* Unicode not allowed for LANMAN dialects */
- ascii_ssetup_strings(&bcc_ptr, ses, nls_cp);
+ /* BB check if Unicode and decode strings */
+ if (bytes_remaining == 0) {
+ /* no string area to decode, do nothing */
+ } else if (smb_buf->Flags2 & SMBFLG2_UNICODE) {
+ /* unicode string area must be word-aligned */
+ if (((unsigned long) bcc_ptr - (unsigned long) smb_buf) % 2) {
+ ++bcc_ptr;
+ --bytes_remaining;
+ }
+ decode_unicode_ssetup(&bcc_ptr, bytes_remaining, ses,
+ sess_data->nls_cp);
+ } else {
+ decode_ascii_ssetup(&bcc_ptr, bytes_remaining, ses,
+ sess_data->nls_cp);
+ }
+
+ rc = sess_establish_session(sess_data);
+out:
+ sess_data->result = rc;
+ sess_data->func = NULL;
+ sess_free_buffer(sess_data);
+}
+
+#else
+
+static void
+sess_auth_lanman(struct sess_data *sess_data)
+{
+ sess_data->result = -EOPNOTSUPP;
+ sess_data->func = NULL;
+}
#endif
- } else if (type == NTLM) {
- pSMB->req_no_secext.Capabilities = cpu_to_le32(capabilities);
- pSMB->req_no_secext.CaseInsensitivePasswordLength =
+
+static void
+sess_auth_ntlm(struct sess_data *sess_data)
+{
+ int rc = 0;
+ struct smb_hdr *smb_buf;
+ SESSION_SETUP_ANDX *pSMB;
+ char *bcc_ptr;
+ struct cifs_ses *ses = sess_data->ses;
+ __u32 capabilities;
+ __u16 bytes_remaining;
+
+ /* old style NTLM sessionsetup */
+ /* wct = 13 */
+ rc = sess_alloc_buffer(sess_data, 13);
+ if (rc)
+ goto out;
+
+ pSMB = (SESSION_SETUP_ANDX *)sess_data->iov[0].iov_base;
+ bcc_ptr = sess_data->iov[2].iov_base;
+ capabilities = cifs_ssetup_hdr(ses, pSMB);
+
+ pSMB->req_no_secext.Capabilities = cpu_to_le32(capabilities);
+ pSMB->req_no_secext.CaseInsensitivePasswordLength =
cpu_to_le16(CIFS_AUTH_RESP_SIZE);
- pSMB->req_no_secext.CaseSensitivePasswordLength =
+ pSMB->req_no_secext.CaseSensitivePasswordLength =
cpu_to_le16(CIFS_AUTH_RESP_SIZE);
- /* calculate ntlm response and session key */
- rc = setup_ntlm_response(ses, nls_cp);
- if (rc) {
- cifs_dbg(VFS, "Error %d during NTLM authentication\n",
+ /* calculate ntlm response and session key */
+ rc = setup_ntlm_response(ses, sess_data->nls_cp);
+ if (rc) {
+ cifs_dbg(VFS, "Error %d during NTLM authentication\n",
rc);
- goto ssetup_exit;
- }
+ goto out;
+ }
- /* copy ntlm response */
- memcpy(bcc_ptr, ses->auth_key.response + CIFS_SESS_KEY_SIZE,
- CIFS_AUTH_RESP_SIZE);
- bcc_ptr += CIFS_AUTH_RESP_SIZE;
- memcpy(bcc_ptr, ses->auth_key.response + CIFS_SESS_KEY_SIZE,
- CIFS_AUTH_RESP_SIZE);
- bcc_ptr += CIFS_AUTH_RESP_SIZE;
-
- if (ses->capabilities & CAP_UNICODE) {
- /* unicode strings must be word aligned */
- if (iov[0].iov_len % 2) {
- *bcc_ptr = 0;
- bcc_ptr++;
- }
- unicode_ssetup_strings(&bcc_ptr, ses, nls_cp);
- } else
- ascii_ssetup_strings(&bcc_ptr, ses, nls_cp);
- } else if (type == NTLMv2) {
- pSMB->req_no_secext.Capabilities = cpu_to_le32(capabilities);
-
- /* LM2 password would be here if we supported it */
- pSMB->req_no_secext.CaseInsensitivePasswordLength = 0;
-
- /* calculate nlmv2 response and session key */
- rc = setup_ntlmv2_rsp(ses, nls_cp);
- if (rc) {
- cifs_dbg(VFS, "Error %d during NTLMv2 authentication\n",
- rc);
- goto ssetup_exit;
+ /* copy ntlm response */
+ memcpy(bcc_ptr, ses->auth_key.response + CIFS_SESS_KEY_SIZE,
+ CIFS_AUTH_RESP_SIZE);
+ bcc_ptr += CIFS_AUTH_RESP_SIZE;
+ memcpy(bcc_ptr, ses->auth_key.response + CIFS_SESS_KEY_SIZE,
+ CIFS_AUTH_RESP_SIZE);
+ bcc_ptr += CIFS_AUTH_RESP_SIZE;
+
+ if (ses->capabilities & CAP_UNICODE) {
+ /* unicode strings must be word aligned */
+ if (sess_data->iov[0].iov_len % 2) {
+ *bcc_ptr = 0;
+ bcc_ptr++;
}
- memcpy(bcc_ptr, ses->auth_key.response + CIFS_SESS_KEY_SIZE,
- ses->auth_key.len - CIFS_SESS_KEY_SIZE);
- bcc_ptr += ses->auth_key.len - CIFS_SESS_KEY_SIZE;
-
- /* set case sensitive password length after tilen may get
- * assigned, tilen is 0 otherwise.
- */
- pSMB->req_no_secext.CaseSensitivePasswordLength =
- cpu_to_le16(ses->auth_key.len - CIFS_SESS_KEY_SIZE);
+ unicode_ssetup_strings(&bcc_ptr, ses, sess_data->nls_cp);
+ } else {
+ ascii_ssetup_strings(&bcc_ptr, ses, sess_data->nls_cp);
+ }
- if (ses->capabilities & CAP_UNICODE) {
- if (iov[0].iov_len % 2) {
- *bcc_ptr = 0;
- bcc_ptr++;
- }
- unicode_ssetup_strings(&bcc_ptr, ses, nls_cp);
- } else
- ascii_ssetup_strings(&bcc_ptr, ses, nls_cp);
- } else if (type == Kerberos) {
-#ifdef CONFIG_CIFS_UPCALL
- struct cifs_spnego_msg *msg;
- spnego_key = cifs_get_spnego_key(ses);
- if (IS_ERR(spnego_key)) {
- rc = PTR_ERR(spnego_key);
- spnego_key = NULL;
- goto ssetup_exit;
- }
+ sess_data->iov[2].iov_len = (long) bcc_ptr -
+ (long) sess_data->iov[2].iov_base;
- msg = spnego_key->payload.data;
- /* check version field to make sure that cifs.upcall is
- sending us a response in an expected form */
- if (msg->version != CIFS_SPNEGO_UPCALL_VERSION) {
- cifs_dbg(VFS, "incorrect version of cifs.upcall "
- "expected %d but got %d)",
- CIFS_SPNEGO_UPCALL_VERSION, msg->version);
- rc = -EKEYREJECTED;
- goto ssetup_exit;
- }
+ rc = sess_sendreceive(sess_data);
+ if (rc)
+ goto out;
- ses->auth_key.response = kmemdup(msg->data, msg->sesskey_len,
- GFP_KERNEL);
- if (!ses->auth_key.response) {
- cifs_dbg(VFS,
- "Kerberos can't allocate (%u bytes) memory",
- msg->sesskey_len);
- rc = -ENOMEM;
- goto ssetup_exit;
- }
- ses->auth_key.len = msg->sesskey_len;
-
- pSMB->req.hdr.Flags2 |= SMBFLG2_EXT_SEC;
- capabilities |= CAP_EXTENDED_SECURITY;
- pSMB->req.Capabilities = cpu_to_le32(capabilities);
- iov[1].iov_base = msg->data + msg->sesskey_len;
- iov[1].iov_len = msg->secblob_len;
- pSMB->req.SecurityBlobLength = cpu_to_le16(iov[1].iov_len);
-
- if (ses->capabilities & CAP_UNICODE) {
- /* unicode strings must be word aligned */
- if ((iov[0].iov_len + iov[1].iov_len) % 2) {
- *bcc_ptr = 0;
- bcc_ptr++;
- }
- unicode_oslm_strings(&bcc_ptr, nls_cp);
- unicode_domain_string(&bcc_ptr, ses, nls_cp);
- } else
- /* BB: is this right? */
- ascii_ssetup_strings(&bcc_ptr, ses, nls_cp);
-#else /* ! CONFIG_CIFS_UPCALL */
- cifs_dbg(VFS, "Kerberos negotiated but upcall support disabled!\n");
- rc = -ENOSYS;
- goto ssetup_exit;
-#endif /* CONFIG_CIFS_UPCALL */
- } else if (type == RawNTLMSSP) {
- if ((pSMB->req.hdr.Flags2 & SMBFLG2_UNICODE) == 0) {
- cifs_dbg(VFS, "NTLMSSP requires Unicode support\n");
- rc = -ENOSYS;
- goto ssetup_exit;
- }
+ pSMB = (SESSION_SETUP_ANDX *)sess_data->iov[0].iov_base;
+ smb_buf = (struct smb_hdr *)sess_data->iov[0].iov_base;
- cifs_dbg(FYI, "ntlmssp session setup phase %d\n", phase);
- pSMB->req.hdr.Flags2 |= SMBFLG2_EXT_SEC;
- capabilities |= CAP_EXTENDED_SECURITY;
- pSMB->req.Capabilities |= cpu_to_le32(capabilities);
- switch(phase) {
- case NtLmNegotiate:
- build_ntlmssp_negotiate_blob(
- pSMB->req.SecurityBlob, ses);
- iov[1].iov_len = sizeof(NEGOTIATE_MESSAGE);
- iov[1].iov_base = pSMB->req.SecurityBlob;
- pSMB->req.SecurityBlobLength =
- cpu_to_le16(sizeof(NEGOTIATE_MESSAGE));
- break;
- case NtLmAuthenticate:
- /*
- * 5 is an empirical value, large enough to hold
- * authenticate message plus max 10 of av paris,
- * domain, user, workstation names, flags, etc.
- */
- ntlmsspblob = kzalloc(
- 5*sizeof(struct _AUTHENTICATE_MESSAGE),
- GFP_KERNEL);
- if (!ntlmsspblob) {
- rc = -ENOMEM;
- goto ssetup_exit;
- }
+ if (smb_buf->WordCount != 3) {
+ rc = -EIO;
+ cifs_dbg(VFS, "bad word count %d\n", smb_buf->WordCount);
+ goto out;
+ }
- rc = build_ntlmssp_auth_blob(ntlmsspblob,
- &blob_len, ses, nls_cp);
- if (rc)
- goto ssetup_exit;
- iov[1].iov_len = blob_len;
- iov[1].iov_base = ntlmsspblob;
- pSMB->req.SecurityBlobLength = cpu_to_le16(blob_len);
- /*
- * Make sure that we tell the server that we are using
- * the uid that it just gave us back on the response
- * (challenge)
- */
- smb_buf->Uid = ses->Suid;
- break;
- default:
- cifs_dbg(VFS, "invalid phase %d\n", phase);
- rc = -ENOSYS;
- goto ssetup_exit;
+ if (le16_to_cpu(pSMB->resp.Action) & GUEST_LOGIN)
+ cifs_dbg(FYI, "Guest login\n"); /* BB mark SesInfo struct? */
+
+ ses->Suid = smb_buf->Uid; /* UID left in wire format (le) */
+ cifs_dbg(FYI, "UID = %llu\n", ses->Suid);
+
+ bytes_remaining = get_bcc(smb_buf);
+ bcc_ptr = pByteArea(smb_buf);
+
+ /* BB check if Unicode and decode strings */
+ if (bytes_remaining == 0) {
+ /* no string area to decode, do nothing */
+ } else if (smb_buf->Flags2 & SMBFLG2_UNICODE) {
+ /* unicode string area must be word-aligned */
+ if (((unsigned long) bcc_ptr - (unsigned long) smb_buf) % 2) {
+ ++bcc_ptr;
+ --bytes_remaining;
}
- /* unicode strings must be word aligned */
- if ((iov[0].iov_len + iov[1].iov_len) % 2) {
+ decode_unicode_ssetup(&bcc_ptr, bytes_remaining, ses,
+ sess_data->nls_cp);
+ } else {
+ decode_ascii_ssetup(&bcc_ptr, bytes_remaining, ses,
+ sess_data->nls_cp);
+ }
+
+ rc = sess_establish_session(sess_data);
+out:
+ sess_data->result = rc;
+ sess_data->func = NULL;
+ sess_free_buffer(sess_data);
+ kfree(ses->auth_key.response);
+ ses->auth_key.response = NULL;
+}
+
+static void
+sess_auth_ntlmv2(struct sess_data *sess_data)
+{
+ int rc = 0;
+ struct smb_hdr *smb_buf;
+ SESSION_SETUP_ANDX *pSMB;
+ char *bcc_ptr;
+ struct cifs_ses *ses = sess_data->ses;
+ __u32 capabilities;
+ __u16 bytes_remaining;
+
+ /* old style NTLM sessionsetup */
+ /* wct = 13 */
+ rc = sess_alloc_buffer(sess_data, 13);
+ if (rc)
+ goto out;
+
+ pSMB = (SESSION_SETUP_ANDX *)sess_data->iov[0].iov_base;
+ bcc_ptr = sess_data->iov[2].iov_base;
+ capabilities = cifs_ssetup_hdr(ses, pSMB);
+
+ pSMB->req_no_secext.Capabilities = cpu_to_le32(capabilities);
+
+ /* LM2 password would be here if we supported it */
+ pSMB->req_no_secext.CaseInsensitivePasswordLength = 0;
+
+ /* calculate nlmv2 response and session key */
+ rc = setup_ntlmv2_rsp(ses, sess_data->nls_cp);
+ if (rc) {
+ cifs_dbg(VFS, "Error %d during NTLMv2 authentication\n", rc);
+ goto out;
+ }
+
+ memcpy(bcc_ptr, ses->auth_key.response + CIFS_SESS_KEY_SIZE,
+ ses->auth_key.len - CIFS_SESS_KEY_SIZE);
+ bcc_ptr += ses->auth_key.len - CIFS_SESS_KEY_SIZE;
+
+ /* set case sensitive password length after tilen may get
+ * assigned, tilen is 0 otherwise.
+ */
+ pSMB->req_no_secext.CaseSensitivePasswordLength =
+ cpu_to_le16(ses->auth_key.len - CIFS_SESS_KEY_SIZE);
+
+ if (ses->capabilities & CAP_UNICODE) {
+ if (sess_data->iov[0].iov_len % 2) {
*bcc_ptr = 0;
bcc_ptr++;
}
- unicode_oslm_strings(&bcc_ptr, nls_cp);
+ unicode_ssetup_strings(&bcc_ptr, ses, sess_data->nls_cp);
} else {
- cifs_dbg(VFS, "secType %d not supported!\n", type);
- rc = -ENOSYS;
- goto ssetup_exit;
+ ascii_ssetup_strings(&bcc_ptr, ses, sess_data->nls_cp);
}
- iov[2].iov_base = str_area;
- iov[2].iov_len = (long) bcc_ptr - (long) str_area;
- count = iov[1].iov_len + iov[2].iov_len;
- smb_buf->smb_buf_length =
- cpu_to_be32(be32_to_cpu(smb_buf->smb_buf_length) + count);
+ sess_data->iov[2].iov_len = (long) bcc_ptr -
+ (long) sess_data->iov[2].iov_base;
- put_bcc(count, smb_buf);
+ rc = sess_sendreceive(sess_data);
+ if (rc)
+ goto out;
- rc = SendReceive2(xid, ses, iov, 3 /* num_iovecs */, &resp_buf_type,
- CIFS_LOG_ERROR);
- /* SMB request buf freed in SendReceive2 */
+ pSMB = (SESSION_SETUP_ANDX *)sess_data->iov[0].iov_base;
+ smb_buf = (struct smb_hdr *)sess_data->iov[0].iov_base;
+
+ if (smb_buf->WordCount != 3) {
+ rc = -EIO;
+ cifs_dbg(VFS, "bad word count %d\n", smb_buf->WordCount);
+ goto out;
+ }
+
+ if (le16_to_cpu(pSMB->resp.Action) & GUEST_LOGIN)
+ cifs_dbg(FYI, "Guest login\n"); /* BB mark SesInfo struct? */
+
+ ses->Suid = smb_buf->Uid; /* UID left in wire format (le) */
+ cifs_dbg(FYI, "UID = %llu\n", ses->Suid);
- pSMB = (SESSION_SETUP_ANDX *)iov[0].iov_base;
- smb_buf = (struct smb_hdr *)iov[0].iov_base;
+ bytes_remaining = get_bcc(smb_buf);
+ bcc_ptr = pByteArea(smb_buf);
- if ((type == RawNTLMSSP) && (resp_buf_type != CIFS_NO_BUFFER) &&
- (smb_buf->Status.CifsError ==
- cpu_to_le32(NT_STATUS_MORE_PROCESSING_REQUIRED))) {
- if (phase != NtLmNegotiate) {
- cifs_dbg(VFS, "Unexpected more processing error\n");
- goto ssetup_exit;
+ /* BB check if Unicode and decode strings */
+ if (bytes_remaining == 0) {
+ /* no string area to decode, do nothing */
+ } else if (smb_buf->Flags2 & SMBFLG2_UNICODE) {
+ /* unicode string area must be word-aligned */
+ if (((unsigned long) bcc_ptr - (unsigned long) smb_buf) % 2) {
+ ++bcc_ptr;
+ --bytes_remaining;
}
- /* NTLMSSP Negotiate sent now processing challenge (response) */
- phase = NtLmChallenge; /* process ntlmssp challenge */
- rc = 0; /* MORE_PROC rc is not an error here, but expected */
+ decode_unicode_ssetup(&bcc_ptr, bytes_remaining, ses,
+ sess_data->nls_cp);
+ } else {
+ decode_ascii_ssetup(&bcc_ptr, bytes_remaining, ses,
+ sess_data->nls_cp);
}
+
+ rc = sess_establish_session(sess_data);
+out:
+ sess_data->result = rc;
+ sess_data->func = NULL;
+ sess_free_buffer(sess_data);
+ kfree(ses->auth_key.response);
+ ses->auth_key.response = NULL;
+}
+
+#ifdef CONFIG_CIFS_UPCALL
+static void
+sess_auth_kerberos(struct sess_data *sess_data)
+{
+ int rc = 0;
+ struct smb_hdr *smb_buf;
+ SESSION_SETUP_ANDX *pSMB;
+ char *bcc_ptr;
+ struct cifs_ses *ses = sess_data->ses;
+ __u32 capabilities;
+ __u16 bytes_remaining;
+ struct key *spnego_key = NULL;
+ struct cifs_spnego_msg *msg;
+ u16 blob_len;
+
+ /* extended security */
+ /* wct = 12 */
+ rc = sess_alloc_buffer(sess_data, 12);
if (rc)
- goto ssetup_exit;
+ goto out;
- if ((smb_buf->WordCount != 3) && (smb_buf->WordCount != 4)) {
+ pSMB = (SESSION_SETUP_ANDX *)sess_data->iov[0].iov_base;
+ bcc_ptr = sess_data->iov[2].iov_base;
+ capabilities = cifs_ssetup_hdr(ses, pSMB);
+
+ spnego_key = cifs_get_spnego_key(ses);
+ if (IS_ERR(spnego_key)) {
+ rc = PTR_ERR(spnego_key);
+ spnego_key = NULL;
+ goto out;
+ }
+
+ msg = spnego_key->payload.data;
+ /*
+ * check version field to make sure that cifs.upcall is
+ * sending us a response in an expected form
+ */
+ if (msg->version != CIFS_SPNEGO_UPCALL_VERSION) {
+ cifs_dbg(VFS,
+ "incorrect version of cifs.upcall (expected %d but got %d)",
+ CIFS_SPNEGO_UPCALL_VERSION, msg->version);
+ rc = -EKEYREJECTED;
+ goto out_put_spnego_key;
+ }
+
+ ses->auth_key.response = kmemdup(msg->data, msg->sesskey_len,
+ GFP_KERNEL);
+ if (!ses->auth_key.response) {
+ cifs_dbg(VFS, "Kerberos can't allocate (%u bytes) memory",
+ msg->sesskey_len);
+ rc = -ENOMEM;
+ goto out_put_spnego_key;
+ }
+ ses->auth_key.len = msg->sesskey_len;
+
+ pSMB->req.hdr.Flags2 |= SMBFLG2_EXT_SEC;
+ capabilities |= CAP_EXTENDED_SECURITY;
+ pSMB->req.Capabilities = cpu_to_le32(capabilities);
+ sess_data->iov[1].iov_base = msg->data + msg->sesskey_len;
+ sess_data->iov[1].iov_len = msg->secblob_len;
+ pSMB->req.SecurityBlobLength = cpu_to_le16(sess_data->iov[1].iov_len);
+
+ if (ses->capabilities & CAP_UNICODE) {
+ /* unicode strings must be word aligned */
+ if ((sess_data->iov[0].iov_len
+ + sess_data->iov[1].iov_len) % 2) {
+ *bcc_ptr = 0;
+ bcc_ptr++;
+ }
+ unicode_oslm_strings(&bcc_ptr, sess_data->nls_cp);
+ unicode_domain_string(&bcc_ptr, ses, sess_data->nls_cp);
+ } else {
+ /* BB: is this right? */
+ ascii_ssetup_strings(&bcc_ptr, ses, sess_data->nls_cp);
+ }
+
+ sess_data->iov[2].iov_len = (long) bcc_ptr -
+ (long) sess_data->iov[2].iov_base;
+
+ rc = sess_sendreceive(sess_data);
+ if (rc)
+ goto out_put_spnego_key;
+
+ pSMB = (SESSION_SETUP_ANDX *)sess_data->iov[0].iov_base;
+ smb_buf = (struct smb_hdr *)sess_data->iov[0].iov_base;
+
+ if (smb_buf->WordCount != 4) {
rc = -EIO;
cifs_dbg(VFS, "bad word count %d\n", smb_buf->WordCount);
- goto ssetup_exit;
+ goto out_put_spnego_key;
}
- action = le16_to_cpu(pSMB->resp.Action);
- if (action & GUEST_LOGIN)
+
+ if (le16_to_cpu(pSMB->resp.Action) & GUEST_LOGIN)
cifs_dbg(FYI, "Guest login\n"); /* BB mark SesInfo struct? */
+
ses->Suid = smb_buf->Uid; /* UID left in wire format (le) */
cifs_dbg(FYI, "UID = %llu\n", ses->Suid);
- /* response can have either 3 or 4 word count - Samba sends 3 */
- /* and lanman response is 3 */
+
bytes_remaining = get_bcc(smb_buf);
bcc_ptr = pByteArea(smb_buf);
- if (smb_buf->WordCount == 4) {
- blob_len = le16_to_cpu(pSMB->resp.SecurityBlobLength);
- if (blob_len > bytes_remaining) {
- cifs_dbg(VFS, "bad security blob length %d\n",
- blob_len);
- rc = -EINVAL;
- goto ssetup_exit;
- }
- if (phase == NtLmChallenge) {
- rc = decode_ntlmssp_challenge(bcc_ptr, blob_len, ses);
- /* now goto beginning for ntlmssp authenticate phase */
- if (rc)
- goto ssetup_exit;
- }
- bcc_ptr += blob_len;
- bytes_remaining -= blob_len;
+ blob_len = le16_to_cpu(pSMB->resp.SecurityBlobLength);
+ if (blob_len > bytes_remaining) {
+ cifs_dbg(VFS, "bad security blob length %d\n",
+ blob_len);
+ rc = -EINVAL;
+ goto out_put_spnego_key;
}
+ bcc_ptr += blob_len;
+ bytes_remaining -= blob_len;
/* BB check if Unicode and decode strings */
if (bytes_remaining == 0) {
@@ -906,60 +1083,371 @@ ssetup_ntlmssp_authenticate:
++bcc_ptr;
--bytes_remaining;
}
- decode_unicode_ssetup(&bcc_ptr, bytes_remaining, ses, nls_cp);
+ decode_unicode_ssetup(&bcc_ptr, bytes_remaining, ses,
+ sess_data->nls_cp);
} else {
- decode_ascii_ssetup(&bcc_ptr, bytes_remaining, ses, nls_cp);
+ decode_ascii_ssetup(&bcc_ptr, bytes_remaining, ses,
+ sess_data->nls_cp);
}
-ssetup_exit:
- if (spnego_key) {
- key_invalidate(spnego_key);
- key_put(spnego_key);
+ rc = sess_establish_session(sess_data);
+out_put_spnego_key:
+ key_invalidate(spnego_key);
+ key_put(spnego_key);
+out:
+ sess_data->result = rc;
+ sess_data->func = NULL;
+ sess_free_buffer(sess_data);
+ kfree(ses->auth_key.response);
+ ses->auth_key.response = NULL;
+}
+
+#else
+
+static void
+sess_auth_kerberos(struct sess_data *sess_data)
+{
+ cifs_dbg(VFS, "Kerberos negotiated but upcall support disabled!\n");
+ sess_data->result = -ENOSYS;
+ sess_data->func = NULL;
+}
+#endif /* ! CONFIG_CIFS_UPCALL */
+
+/*
+ * The required kvec buffers have to be allocated before calling this
+ * function.
+ */
+static int
+_sess_auth_rawntlmssp_assemble_req(struct sess_data *sess_data)
+{
+ struct smb_hdr *smb_buf;
+ SESSION_SETUP_ANDX *pSMB;
+ struct cifs_ses *ses = sess_data->ses;
+ __u32 capabilities;
+ char *bcc_ptr;
+
+ pSMB = (SESSION_SETUP_ANDX *)sess_data->iov[0].iov_base;
+ smb_buf = (struct smb_hdr *)pSMB;
+
+ capabilities = cifs_ssetup_hdr(ses, pSMB);
+ if ((pSMB->req.hdr.Flags2 & SMBFLG2_UNICODE) == 0) {
+ cifs_dbg(VFS, "NTLMSSP requires Unicode support\n");
+ return -ENOSYS;
}
- kfree(str_area);
- kfree(ntlmsspblob);
- ntlmsspblob = NULL;
- if (resp_buf_type == CIFS_SMALL_BUFFER) {
- cifs_dbg(FYI, "ssetup freeing small buf %p\n", iov[0].iov_base);
- cifs_small_buf_release(iov[0].iov_base);
- } else if (resp_buf_type == CIFS_LARGE_BUFFER)
- cifs_buf_release(iov[0].iov_base);
- /* if ntlmssp, and negotiate succeeded, proceed to authenticate phase */
- if ((phase == NtLmChallenge) && (rc == 0))
- goto ssetup_ntlmssp_authenticate;
+ pSMB->req.hdr.Flags2 |= SMBFLG2_EXT_SEC;
+ capabilities |= CAP_EXTENDED_SECURITY;
+ pSMB->req.Capabilities |= cpu_to_le32(capabilities);
+
+ bcc_ptr = sess_data->iov[2].iov_base;
+ /* unicode strings must be word aligned */
+ if ((sess_data->iov[0].iov_len + sess_data->iov[1].iov_len) % 2) {
+ *bcc_ptr = 0;
+ bcc_ptr++;
+ }
+ unicode_oslm_strings(&bcc_ptr, sess_data->nls_cp);
+
+ sess_data->iov[2].iov_len = (long) bcc_ptr -
+ (long) sess_data->iov[2].iov_base;
+
+ return 0;
+}
+
+static void
+sess_auth_rawntlmssp_authenticate(struct sess_data *sess_data);
+
+static void
+sess_auth_rawntlmssp_negotiate(struct sess_data *sess_data)
+{
+ int rc;
+ struct smb_hdr *smb_buf;
+ SESSION_SETUP_ANDX *pSMB;
+ struct cifs_ses *ses = sess_data->ses;
+ __u16 bytes_remaining;
+ char *bcc_ptr;
+ u16 blob_len;
+
+ cifs_dbg(FYI, "rawntlmssp session setup negotiate phase\n");
+
+ /*
+ * if memory allocation is successful, caller of this function
+ * frees it.
+ */
+ ses->ntlmssp = kmalloc(sizeof(struct ntlmssp_auth), GFP_KERNEL);
+ if (!ses->ntlmssp) {
+ rc = -ENOMEM;
+ goto out;
+ }
+ ses->ntlmssp->sesskey_per_smbsess = false;
+
+ /* wct = 12 */
+ rc = sess_alloc_buffer(sess_data, 12);
+ if (rc)
+ goto out;
+
+ pSMB = (SESSION_SETUP_ANDX *)sess_data->iov[0].iov_base;
+
+ /* Build security blob before we assemble the request */
+ build_ntlmssp_negotiate_blob(pSMB->req.SecurityBlob, ses);
+ sess_data->iov[1].iov_len = sizeof(NEGOTIATE_MESSAGE);
+ sess_data->iov[1].iov_base = pSMB->req.SecurityBlob;
+ pSMB->req.SecurityBlobLength = cpu_to_le16(sizeof(NEGOTIATE_MESSAGE));
+
+ rc = _sess_auth_rawntlmssp_assemble_req(sess_data);
+ if (rc)
+ goto out;
+
+ rc = sess_sendreceive(sess_data);
+
+ pSMB = (SESSION_SETUP_ANDX *)sess_data->iov[0].iov_base;
+ smb_buf = (struct smb_hdr *)sess_data->iov[0].iov_base;
+
+ /* If true, rc here is expected and not an error */
+ if (sess_data->buf0_type != CIFS_NO_BUFFER &&
+ smb_buf->Status.CifsError ==
+ cpu_to_le32(NT_STATUS_MORE_PROCESSING_REQUIRED))
+ rc = 0;
+
+ if (rc)
+ goto out;
+
+ cifs_dbg(FYI, "rawntlmssp session setup challenge phase\n");
+
+ if (smb_buf->WordCount != 4) {
+ rc = -EIO;
+ cifs_dbg(VFS, "bad word count %d\n", smb_buf->WordCount);
+ goto out;
+ }
+
+ ses->Suid = smb_buf->Uid; /* UID left in wire format (le) */
+ cifs_dbg(FYI, "UID = %llu\n", ses->Suid);
+
+ bytes_remaining = get_bcc(smb_buf);
+ bcc_ptr = pByteArea(smb_buf);
+
+ blob_len = le16_to_cpu(pSMB->resp.SecurityBlobLength);
+ if (blob_len > bytes_remaining) {
+ cifs_dbg(VFS, "bad security blob length %d\n",
+ blob_len);
+ rc = -EINVAL;
+ goto out;
+ }
+
+ rc = decode_ntlmssp_challenge(bcc_ptr, blob_len, ses);
+out:
+ sess_free_buffer(sess_data);
if (!rc) {
- mutex_lock(&ses->server->srv_mutex);
- if (!ses->server->session_estab) {
- if (ses->server->sign) {
- ses->server->session_key.response =
- kmemdup(ses->auth_key.response,
- ses->auth_key.len, GFP_KERNEL);
- if (!ses->server->session_key.response) {
- rc = -ENOMEM;
- mutex_unlock(&ses->server->srv_mutex);
- goto keycp_exit;
- }
- ses->server->session_key.len =
- ses->auth_key.len;
- }
- ses->server->sequence_number = 0x2;
- ses->server->session_estab = true;
- }
- mutex_unlock(&ses->server->srv_mutex);
+ sess_data->func = sess_auth_rawntlmssp_authenticate;
+ return;
+ }
+
+ /* Else error. Cleanup */
+ kfree(ses->auth_key.response);
+ ses->auth_key.response = NULL;
+ kfree(ses->ntlmssp);
+ ses->ntlmssp = NULL;
+
+ sess_data->func = NULL;
+ sess_data->result = rc;
+}
- cifs_dbg(FYI, "CIFS session established successfully\n");
- spin_lock(&GlobalMid_Lock);
- ses->status = CifsGood;
- ses->need_reconnect = false;
- spin_unlock(&GlobalMid_Lock);
+static void
+sess_auth_rawntlmssp_authenticate(struct sess_data *sess_data)
+{
+ int rc;
+ struct smb_hdr *smb_buf;
+ SESSION_SETUP_ANDX *pSMB;
+ struct cifs_ses *ses = sess_data->ses;
+ __u16 bytes_remaining;
+ char *bcc_ptr;
+ char *ntlmsspblob = NULL;
+ u16 blob_len;
+
+ cifs_dbg(FYI, "rawntlmssp session setup authenticate phase\n");
+
+ /* wct = 12 */
+ rc = sess_alloc_buffer(sess_data, 12);
+ if (rc)
+ goto out;
+
+ /* Build security blob before we assemble the request */
+ pSMB = (SESSION_SETUP_ANDX *)sess_data->iov[0].iov_base;
+ smb_buf = (struct smb_hdr *)pSMB;
+ /*
+ * 5 is an empirical value, large enough to hold
+ * authenticate message plus max 10 of av paris,
+ * domain, user, workstation names, flags, etc.
+ */
+ ntlmsspblob = kzalloc(5*sizeof(struct _AUTHENTICATE_MESSAGE),
+ GFP_KERNEL);
+ if (!ntlmsspblob) {
+ rc = -ENOMEM;
+ goto out;
}
-keycp_exit:
+ rc = build_ntlmssp_auth_blob(ntlmsspblob,
+ &blob_len, ses, sess_data->nls_cp);
+ if (rc)
+ goto out_free_ntlmsspblob;
+ sess_data->iov[1].iov_len = blob_len;
+ sess_data->iov[1].iov_base = ntlmsspblob;
+ pSMB->req.SecurityBlobLength = cpu_to_le16(blob_len);
+ /*
+ * Make sure that we tell the server that we are using
+ * the uid that it just gave us back on the response
+ * (challenge)
+ */
+ smb_buf->Uid = ses->Suid;
+
+ rc = _sess_auth_rawntlmssp_assemble_req(sess_data);
+ if (rc)
+ goto out_free_ntlmsspblob;
+
+ rc = sess_sendreceive(sess_data);
+ if (rc)
+ goto out_free_ntlmsspblob;
+
+ pSMB = (SESSION_SETUP_ANDX *)sess_data->iov[0].iov_base;
+ smb_buf = (struct smb_hdr *)sess_data->iov[0].iov_base;
+ if (smb_buf->WordCount != 4) {
+ rc = -EIO;
+ cifs_dbg(VFS, "bad word count %d\n", smb_buf->WordCount);
+ goto out_free_ntlmsspblob;
+ }
+
+ if (le16_to_cpu(pSMB->resp.Action) & GUEST_LOGIN)
+ cifs_dbg(FYI, "Guest login\n"); /* BB mark SesInfo struct? */
+
+ bytes_remaining = get_bcc(smb_buf);
+ bcc_ptr = pByteArea(smb_buf);
+ blob_len = le16_to_cpu(pSMB->resp.SecurityBlobLength);
+ if (blob_len > bytes_remaining) {
+ cifs_dbg(VFS, "bad security blob length %d\n",
+ blob_len);
+ rc = -EINVAL;
+ goto out_free_ntlmsspblob;
+ }
+ bcc_ptr += blob_len;
+ bytes_remaining -= blob_len;
+
+
+ /* BB check if Unicode and decode strings */
+ if (bytes_remaining == 0) {
+ /* no string area to decode, do nothing */
+ } else if (smb_buf->Flags2 & SMBFLG2_UNICODE) {
+ /* unicode string area must be word-aligned */
+ if (((unsigned long) bcc_ptr - (unsigned long) smb_buf) % 2) {
+ ++bcc_ptr;
+ --bytes_remaining;
+ }
+ decode_unicode_ssetup(&bcc_ptr, bytes_remaining, ses,
+ sess_data->nls_cp);
+ } else {
+ decode_ascii_ssetup(&bcc_ptr, bytes_remaining, ses,
+ sess_data->nls_cp);
+ }
+
+out_free_ntlmsspblob:
+ kfree(ntlmsspblob);
+out:
+ sess_free_buffer(sess_data);
+
+ if (!rc)
+ rc = sess_establish_session(sess_data);
+
+ /* Cleanup */
kfree(ses->auth_key.response);
ses->auth_key.response = NULL;
kfree(ses->ntlmssp);
+ ses->ntlmssp = NULL;
+
+ sess_data->func = NULL;
+ sess_data->result = rc;
+}
+
+static int select_sec(struct cifs_ses *ses, struct sess_data *sess_data)
+{
+ int type;
+
+ type = select_sectype(ses->server, ses->sectype);
+ cifs_dbg(FYI, "sess setup type %d\n", type);
+ if (type == Unspecified) {
+ cifs_dbg(VFS,
+ "Unable to select appropriate authentication method!");
+ return -EINVAL;
+ }
+
+ switch (type) {
+ case LANMAN:
+ /* LANMAN and plaintext are less secure and off by default.
+ * So we make this explicitly be turned on in kconfig (in the
+ * build) and turned on at runtime (changed from the default)
+ * in proc/fs/cifs or via mount parm. Unfortunately this is
+ * needed for old Win (e.g. Win95), some obscure NAS and OS/2 */
+#ifdef CONFIG_CIFS_WEAK_PW_HASH
+ sess_data->func = sess_auth_lanman;
+ break;
+#else
+ return -EOPNOTSUPP;
+#endif
+ case NTLM:
+ sess_data->func = sess_auth_ntlm;
+ break;
+ case NTLMv2:
+ sess_data->func = sess_auth_ntlmv2;
+ break;
+ case Kerberos:
+#ifdef CONFIG_CIFS_UPCALL
+ sess_data->func = sess_auth_kerberos;
+ break;
+#else
+ cifs_dbg(VFS, "Kerberos negotiated but upcall support disabled!\n");
+ return -ENOSYS;
+ break;
+#endif /* CONFIG_CIFS_UPCALL */
+ case RawNTLMSSP:
+ sess_data->func = sess_auth_rawntlmssp_negotiate;
+ break;
+ default:
+ cifs_dbg(VFS, "secType %d not supported!\n", type);
+ return -ENOSYS;
+ }
+
+ return 0;
+}
+
+int CIFS_SessSetup(const unsigned int xid, struct cifs_ses *ses,
+ const struct nls_table *nls_cp)
+{
+ int rc = 0;
+ struct sess_data *sess_data;
+
+ if (ses == NULL) {
+ WARN(1, "%s: ses == NULL!", __func__);
+ return -EINVAL;
+ }
+
+ sess_data = kzalloc(sizeof(struct sess_data), GFP_KERNEL);
+ if (!sess_data)
+ return -ENOMEM;
+
+ rc = select_sec(ses, sess_data);
+ if (rc)
+ goto out;
+
+ sess_data->xid = xid;
+ sess_data->ses = ses;
+ sess_data->buf0_type = CIFS_NO_BUFFER;
+ sess_data->nls_cp = (struct nls_table *) nls_cp;
+
+ while (sess_data->func)
+ sess_data->func(sess_data);
+
+ /* Store result before we free sess_data */
+ rc = sess_data->result;
+out:
+ kfree(sess_data);
return rc;
}
diff --git a/fs/cifs/smb1ops.c b/fs/cifs/smb1ops.c
index d1fdfa848703..5e8c22d6c7b9 100644
--- a/fs/cifs/smb1ops.c
+++ b/fs/cifs/smb1ops.c
@@ -1009,6 +1009,12 @@ cifs_is_read_op(__u32 oplock)
return oplock == OPLOCK_READ;
}
+static unsigned int
+cifs_wp_retry_size(struct inode *inode)
+{
+ return CIFS_SB(inode->i_sb)->wsize;
+}
+
struct smb_version_operations smb1_operations = {
.send_cancel = send_nt_cancel,
.compare_fids = cifs_compare_fids,
@@ -1019,6 +1025,7 @@ struct smb_version_operations smb1_operations = {
.set_credits = cifs_set_credits,
.get_credits_field = cifs_get_credits_field,
.get_credits = cifs_get_credits,
+ .wait_mtu_credits = cifs_wait_mtu_credits,
.get_next_mid = cifs_get_next_mid,
.read_data_offset = cifs_read_data_offset,
.read_data_length = cifs_read_data_length,
@@ -1078,6 +1085,7 @@ struct smb_version_operations smb1_operations = {
.query_mf_symlink = cifs_query_mf_symlink,
.create_mf_symlink = cifs_create_mf_symlink,
.is_read_op = cifs_is_read_op,
+ .wp_retry_size = cifs_wp_retry_size,
#ifdef CONFIG_CIFS_XATTR
.query_all_EAs = CIFSSMBQAllEAs,
.set_EA = CIFSSMBSetEA,
diff --git a/fs/cifs/smb2inode.c b/fs/cifs/smb2inode.c
index 84c012a6aba0..0150182a4494 100644
--- a/fs/cifs/smb2inode.c
+++ b/fs/cifs/smb2inode.c
@@ -91,7 +91,7 @@ smb2_open_op_close(const unsigned int xid, struct cifs_tcon *tcon,
case SMB2_OP_SET_EOF:
tmprc = SMB2_set_eof(xid, tcon, fid.persistent_fid,
fid.volatile_fid, current->tgid,
- (__le64 *)data);
+ (__le64 *)data, false);
break;
case SMB2_OP_SET_INFO:
tmprc = SMB2_set_info(xid, tcon, fid.persistent_fid,
diff --git a/fs/cifs/smb2maperror.c b/fs/cifs/smb2maperror.c
index 94bd4fbb13d3..e31a9dfdcd39 100644
--- a/fs/cifs/smb2maperror.c
+++ b/fs/cifs/smb2maperror.c
@@ -605,7 +605,7 @@ static const struct status_to_posix_error smb2_error_map_table[] = {
{STATUS_MAPPED_FILE_SIZE_ZERO, -EIO, "STATUS_MAPPED_FILE_SIZE_ZERO"},
{STATUS_TOO_MANY_OPENED_FILES, -EMFILE, "STATUS_TOO_MANY_OPENED_FILES"},
{STATUS_CANCELLED, -EIO, "STATUS_CANCELLED"},
- {STATUS_CANNOT_DELETE, -EIO, "STATUS_CANNOT_DELETE"},
+ {STATUS_CANNOT_DELETE, -EACCES, "STATUS_CANNOT_DELETE"},
{STATUS_INVALID_COMPUTER_NAME, -EIO, "STATUS_INVALID_COMPUTER_NAME"},
{STATUS_FILE_DELETED, -EIO, "STATUS_FILE_DELETED"},
{STATUS_SPECIAL_ACCOUNT, -EIO, "STATUS_SPECIAL_ACCOUNT"},
diff --git a/fs/cifs/smb2misc.c b/fs/cifs/smb2misc.c
index b8021fde987d..f2e6ac29a8d6 100644
--- a/fs/cifs/smb2misc.c
+++ b/fs/cifs/smb2misc.c
@@ -437,7 +437,7 @@ smb2_tcon_has_lease(struct cifs_tcon *tcon, struct smb2_lease_break *rsp,
continue;
cifs_dbg(FYI, "found in the open list\n");
- cifs_dbg(FYI, "lease key match, lease break 0x%d\n",
+ cifs_dbg(FYI, "lease key match, lease break 0x%x\n",
le32_to_cpu(rsp->NewLeaseState));
server->ops->set_oplock_level(cinode, lease_state, 0, NULL);
@@ -467,7 +467,7 @@ smb2_tcon_has_lease(struct cifs_tcon *tcon, struct smb2_lease_break *rsp,
}
cifs_dbg(FYI, "found in the pending open list\n");
- cifs_dbg(FYI, "lease key match, lease break 0x%d\n",
+ cifs_dbg(FYI, "lease key match, lease break 0x%x\n",
le32_to_cpu(rsp->NewLeaseState));
open->oplock = lease_state;
@@ -546,7 +546,7 @@ smb2_is_valid_oplock_break(char *buffer, struct TCP_Server_Info *server)
return false;
}
- cifs_dbg(FYI, "oplock level 0x%d\n", rsp->OplockLevel);
+ cifs_dbg(FYI, "oplock level 0x%x\n", rsp->OplockLevel);
/* look up tcon based on tid & uid */
spin_lock(&cifs_tcp_ses_lock);
diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
index 787844bde384..77f8aeb9c2fc 100644
--- a/fs/cifs/smb2ops.c
+++ b/fs/cifs/smb2ops.c
@@ -19,6 +19,7 @@
#include <linux/pagemap.h>
#include <linux/vfs.h>
+#include <linux/falloc.h>
#include "cifsglob.h"
#include "smb2pdu.h"
#include "smb2proto.h"
@@ -112,6 +113,53 @@ smb2_get_credits(struct mid_q_entry *mid)
return le16_to_cpu(((struct smb2_hdr *)mid->resp_buf)->CreditRequest);
}
+static int
+smb2_wait_mtu_credits(struct TCP_Server_Info *server, unsigned int size,
+ unsigned int *num, unsigned int *credits)
+{
+ int rc = 0;
+ unsigned int scredits;
+
+ spin_lock(&server->req_lock);
+ while (1) {
+ if (server->credits <= 0) {
+ spin_unlock(&server->req_lock);
+ cifs_num_waiters_inc(server);
+ rc = wait_event_killable(server->request_q,
+ has_credits(server, &server->credits));
+ cifs_num_waiters_dec(server);
+ if (rc)
+ return rc;
+ spin_lock(&server->req_lock);
+ } else {
+ if (server->tcpStatus == CifsExiting) {
+ spin_unlock(&server->req_lock);
+ return -ENOENT;
+ }
+
+ scredits = server->credits;
+ /* can deadlock with reopen */
+ if (scredits == 1) {
+ *num = SMB2_MAX_BUFFER_SIZE;
+ *credits = 0;
+ break;
+ }
+
+ /* leave one credit for a possible reopen */
+ scredits--;
+ *num = min_t(unsigned int, size,
+ scredits * SMB2_MAX_BUFFER_SIZE);
+
+ *credits = DIV_ROUND_UP(*num, SMB2_MAX_BUFFER_SIZE);
+ server->credits -= *credits;
+ server->in_flight++;
+ break;
+ }
+ }
+ spin_unlock(&server->req_lock);
+ return rc;
+}
+
static __u64
smb2_get_next_mid(struct TCP_Server_Info *server)
{
@@ -182,8 +230,9 @@ smb2_negotiate_wsize(struct cifs_tcon *tcon, struct smb_vol *volume_info)
/* start with specified wsize, or default */
wsize = volume_info->wsize ? volume_info->wsize : CIFS_DEFAULT_IOSIZE;
wsize = min_t(unsigned int, wsize, server->max_write);
- /* set it to the maximum buffer size value we can send with 1 credit */
- wsize = min_t(unsigned int, wsize, SMB2_MAX_BUFFER_SIZE);
+
+ if (!(server->capabilities & SMB2_GLOBAL_CAP_LARGE_MTU))
+ wsize = min_t(unsigned int, wsize, SMB2_MAX_BUFFER_SIZE);
return wsize;
}
@@ -197,8 +246,9 @@ smb2_negotiate_rsize(struct cifs_tcon *tcon, struct smb_vol *volume_info)
/* start with specified rsize, or default */
rsize = volume_info->rsize ? volume_info->rsize : CIFS_DEFAULT_IOSIZE;
rsize = min_t(unsigned int, rsize, server->max_read);
- /* set it to the maximum buffer size value we can send with 1 credit */
- rsize = min_t(unsigned int, rsize, SMB2_MAX_BUFFER_SIZE);
+
+ if (!(server->capabilities & SMB2_GLOBAL_CAP_LARGE_MTU))
+ rsize = min_t(unsigned int, rsize, SMB2_MAX_BUFFER_SIZE);
return rsize;
}
@@ -687,7 +737,7 @@ smb2_set_file_size(const unsigned int xid, struct cifs_tcon *tcon,
{
__le64 eof = cpu_to_le64(size);
return SMB2_set_eof(xid, tcon, cfile->fid.persistent_fid,
- cfile->fid.volatile_fid, cfile->pid, &eof);
+ cfile->fid.volatile_fid, cfile->pid, &eof, false);
}
static int
@@ -1104,6 +1154,13 @@ smb3_parse_lease_buf(void *buf, unsigned int *epoch)
return le32_to_cpu(lc->lcontext.LeaseState);
}
+static unsigned int
+smb2_wp_retry_size(struct inode *inode)
+{
+ return min_t(unsigned int, CIFS_SB(inode->i_sb)->wsize,
+ SMB2_MAX_BUFFER_SIZE);
+}
+
struct smb_version_operations smb20_operations = {
.compare_fids = smb2_compare_fids,
.setup_request = smb2_setup_request,
@@ -1113,6 +1170,7 @@ struct smb_version_operations smb20_operations = {
.set_credits = smb2_set_credits,
.get_credits_field = smb2_get_credits_field,
.get_credits = smb2_get_credits,
+ .wait_mtu_credits = cifs_wait_mtu_credits,
.get_next_mid = smb2_get_next_mid,
.read_data_offset = smb2_read_data_offset,
.read_data_length = smb2_read_data_length,
@@ -1177,6 +1235,7 @@ struct smb_version_operations smb20_operations = {
.create_lease_buf = smb2_create_lease_buf,
.parse_lease_buf = smb2_parse_lease_buf,
.clone_range = smb2_clone_range,
+ .wp_retry_size = smb2_wp_retry_size,
};
struct smb_version_operations smb21_operations = {
@@ -1188,6 +1247,7 @@ struct smb_version_operations smb21_operations = {
.set_credits = smb2_set_credits,
.get_credits_field = smb2_get_credits_field,
.get_credits = smb2_get_credits,
+ .wait_mtu_credits = smb2_wait_mtu_credits,
.get_next_mid = smb2_get_next_mid,
.read_data_offset = smb2_read_data_offset,
.read_data_length = smb2_read_data_length,
@@ -1252,6 +1312,7 @@ struct smb_version_operations smb21_operations = {
.create_lease_buf = smb2_create_lease_buf,
.parse_lease_buf = smb2_parse_lease_buf,
.clone_range = smb2_clone_range,
+ .wp_retry_size = smb2_wp_retry_size,
};
struct smb_version_operations smb30_operations = {
@@ -1263,6 +1324,7 @@ struct smb_version_operations smb30_operations = {
.set_credits = smb2_set_credits,
.get_credits_field = smb2_get_credits_field,
.get_credits = smb2_get_credits,
+ .wait_mtu_credits = smb2_wait_mtu_credits,
.get_next_mid = smb2_get_next_mid,
.read_data_offset = smb2_read_data_offset,
.read_data_length = smb2_read_data_length,
@@ -1330,6 +1392,7 @@ struct smb_version_operations smb30_operations = {
.parse_lease_buf = smb3_parse_lease_buf,
.clone_range = smb2_clone_range,
.validate_negotiate = smb3_validate_negotiate,
+ .wp_retry_size = smb2_wp_retry_size,
};
struct smb_version_values smb20_values = {
diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
index b0b260dbb19d..42ebc1a8be6c 100644
--- a/fs/cifs/smb2pdu.c
+++ b/fs/cifs/smb2pdu.c
@@ -108,7 +108,6 @@ smb2_hdr_assemble(struct smb2_hdr *hdr, __le16 smb2_cmd /* command */ ,
if (!tcon)
goto out;
- /* BB FIXME when we do write > 64K add +1 for every 64K in req or rsp */
/* GLOBAL_CAP_LARGE_MTU will only be set if dialect > SMB2.02 */
/* See sections 2.2.4 and 3.2.4.1.5 of MS-SMB2 */
if ((tcon->ses) &&
@@ -245,10 +244,6 @@ smb2_reconnect(__le16 smb2_command, struct cifs_tcon *tcon)
if (rc)
goto out;
atomic_inc(&tconInfoReconnectCount);
- /*
- * BB FIXME add code to check if wsize needs update due to negotiated
- * smb buffer size shrinking.
- */
out:
/*
* Check if handle based operation so we know whether we can continue
@@ -309,16 +304,6 @@ small_smb2_init(__le16 smb2_command, struct cifs_tcon *tcon,
return rc;
}
-static void
-free_rsp_buf(int resp_buftype, void *rsp)
-{
- if (resp_buftype == CIFS_SMALL_BUFFER)
- cifs_small_buf_release(rsp);
- else if (resp_buftype == CIFS_LARGE_BUFFER)
- cifs_buf_release(rsp);
-}
-
-
/*
*
* SMB2 Worker functions follow:
@@ -1738,12 +1723,18 @@ smb2_readv_callback(struct mid_q_entry *mid)
rc);
}
/* FIXME: should this be counted toward the initiating task? */
- task_io_account_read(rdata->bytes);
- cifs_stats_bytes_read(tcon, rdata->bytes);
+ task_io_account_read(rdata->got_bytes);
+ cifs_stats_bytes_read(tcon, rdata->got_bytes);
break;
case MID_REQUEST_SUBMITTED:
case MID_RETRY_NEEDED:
rdata->result = -EAGAIN;
+ if (server->sign && rdata->got_bytes)
+ /* reset bytes number since we can not check a sign */
+ rdata->got_bytes = 0;
+ /* FIXME: should this be counted toward the initiating task? */
+ task_io_account_read(rdata->got_bytes);
+ cifs_stats_bytes_read(tcon, rdata->got_bytes);
break;
default:
if (rdata->result != -ENODATA)
@@ -1762,11 +1753,12 @@ smb2_readv_callback(struct mid_q_entry *mid)
int
smb2_async_readv(struct cifs_readdata *rdata)
{
- int rc;
+ int rc, flags = 0;
struct smb2_hdr *buf;
struct cifs_io_parms io_parms;
struct smb_rqst rqst = { .rq_iov = &rdata->iov,
.rq_nvec = 1 };
+ struct TCP_Server_Info *server;
cifs_dbg(FYI, "%s: offset=%llu bytes=%u\n",
__func__, rdata->offset, rdata->bytes);
@@ -1777,18 +1769,41 @@ smb2_async_readv(struct cifs_readdata *rdata)
io_parms.persistent_fid = rdata->cfile->fid.persistent_fid;
io_parms.volatile_fid = rdata->cfile->fid.volatile_fid;
io_parms.pid = rdata->pid;
+
+ server = io_parms.tcon->ses->server;
+
rc = smb2_new_read_req(&rdata->iov, &io_parms, 0, 0);
- if (rc)
+ if (rc) {
+ if (rc == -EAGAIN && rdata->credits) {
+ /* credits was reset by reconnect */
+ rdata->credits = 0;
+ /* reduce in_flight value since we won't send the req */
+ spin_lock(&server->req_lock);
+ server->in_flight--;
+ spin_unlock(&server->req_lock);
+ }
return rc;
+ }
buf = (struct smb2_hdr *)rdata->iov.iov_base;
/* 4 for rfc1002 length field */
rdata->iov.iov_len = get_rfc1002_length(rdata->iov.iov_base) + 4;
+ if (rdata->credits) {
+ buf->CreditCharge = cpu_to_le16(DIV_ROUND_UP(rdata->bytes,
+ SMB2_MAX_BUFFER_SIZE));
+ spin_lock(&server->req_lock);
+ server->credits += rdata->credits -
+ le16_to_cpu(buf->CreditCharge);
+ spin_unlock(&server->req_lock);
+ wake_up(&server->request_q);
+ flags = CIFS_HAS_CREDITS;
+ }
+
kref_get(&rdata->refcount);
rc = cifs_call_async(io_parms.tcon->ses->server, &rqst,
cifs_readv_receive, smb2_readv_callback,
- rdata, 0);
+ rdata, flags);
if (rc) {
kref_put(&rdata->refcount, cifs_readdata_release);
cifs_stats_fail_inc(io_parms.tcon, SMB2_READ_HE);
@@ -1906,15 +1921,25 @@ int
smb2_async_writev(struct cifs_writedata *wdata,
void (*release)(struct kref *kref))
{
- int rc = -EACCES;
+ int rc = -EACCES, flags = 0;
struct smb2_write_req *req = NULL;
struct cifs_tcon *tcon = tlink_tcon(wdata->cfile->tlink);
+ struct TCP_Server_Info *server = tcon->ses->server;
struct kvec iov;
struct smb_rqst rqst;
rc = small_smb2_init(SMB2_WRITE, tcon, (void **) &req);
- if (rc)
+ if (rc) {
+ if (rc == -EAGAIN && wdata->credits) {
+ /* credits was reset by reconnect */
+ wdata->credits = 0;
+ /* reduce in_flight value since we won't send the req */
+ spin_lock(&server->req_lock);
+ server->in_flight--;
+ spin_unlock(&server->req_lock);
+ }
goto async_writev_out;
+ }
req->hdr.ProcessId = cpu_to_le32(wdata->cfile->pid);
@@ -1947,9 +1972,20 @@ smb2_async_writev(struct cifs_writedata *wdata,
inc_rfc1001_len(&req->hdr, wdata->bytes - 1 /* Buffer */);
+ if (wdata->credits) {
+ req->hdr.CreditCharge = cpu_to_le16(DIV_ROUND_UP(wdata->bytes,
+ SMB2_MAX_BUFFER_SIZE));
+ spin_lock(&server->req_lock);
+ server->credits += wdata->credits -
+ le16_to_cpu(req->hdr.CreditCharge);
+ spin_unlock(&server->req_lock);
+ wake_up(&server->request_q);
+ flags = CIFS_HAS_CREDITS;
+ }
+
kref_get(&wdata->refcount);
- rc = cifs_call_async(tcon->ses->server, &rqst, NULL,
- smb2_writev_callback, wdata, 0);
+ rc = cifs_call_async(server, &rqst, NULL, smb2_writev_callback, wdata,
+ flags);
if (rc) {
kref_put(&wdata->refcount, release);
@@ -2325,7 +2361,7 @@ SMB2_set_hardlink(const unsigned int xid, struct cifs_tcon *tcon,
int
SMB2_set_eof(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid,
- u64 volatile_fid, u32 pid, __le64 *eof)
+ u64 volatile_fid, u32 pid, __le64 *eof, bool is_falloc)
{
struct smb2_file_eof_info info;
void *data;
@@ -2336,8 +2372,12 @@ SMB2_set_eof(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid,
data = &info;
size = sizeof(struct smb2_file_eof_info);
- return send_set_info(xid, tcon, persistent_fid, volatile_fid, pid,
- FILE_END_OF_FILE_INFORMATION, 1, &data, &size);
+ if (is_falloc)
+ return send_set_info(xid, tcon, persistent_fid, volatile_fid,
+ pid, FILE_ALLOCATION_INFORMATION, 1, &data, &size);
+ else
+ return send_set_info(xid, tcon, persistent_fid, volatile_fid,
+ pid, FILE_END_OF_FILE_INFORMATION, 1, &data, &size);
}
int
diff --git a/fs/cifs/smb2proto.h b/fs/cifs/smb2proto.h
index 0ce48db20a65..67e8ce8055de 100644
--- a/fs/cifs/smb2proto.h
+++ b/fs/cifs/smb2proto.h
@@ -139,7 +139,7 @@ extern int SMB2_set_hardlink(const unsigned int xid, struct cifs_tcon *tcon,
__le16 *target_file);
extern int SMB2_set_eof(const unsigned int xid, struct cifs_tcon *tcon,
u64 persistent_fid, u64 volatile_fid, u32 pid,
- __le64 *eof);
+ __le64 *eof, bool is_fallocate);
extern int SMB2_set_info(const unsigned int xid, struct cifs_tcon *tcon,
u64 persistent_fid, u64 volatile_fid,
FILE_BASIC_INFO *buf);
diff --git a/fs/cifs/smb2transport.c b/fs/cifs/smb2transport.c
index 59c748ce872f..5111e7272db6 100644
--- a/fs/cifs/smb2transport.c
+++ b/fs/cifs/smb2transport.c
@@ -466,7 +466,12 @@ smb2_verify_signature(struct smb_rqst *rqst, struct TCP_Server_Info *server)
static inline void
smb2_seq_num_into_buf(struct TCP_Server_Info *server, struct smb2_hdr *hdr)
{
+ unsigned int i, num = le16_to_cpu(hdr->CreditCharge);
+
hdr->MessageId = get_next_mid64(server);
+ /* skip message numbers according to CreditCharge field */
+ for (i = 1; i < num; i++)
+ get_next_mid(server);
}
static struct mid_q_entry *
diff --git a/fs/cifs/transport.c b/fs/cifs/transport.c
index 18cd5650a5fc..9d087f4e7d4e 100644
--- a/fs/cifs/transport.c
+++ b/fs/cifs/transport.c
@@ -448,6 +448,15 @@ wait_for_free_request(struct TCP_Server_Info *server, const int timeout,
return wait_for_free_credits(server, timeout, val);
}
+int
+cifs_wait_mtu_credits(struct TCP_Server_Info *server, unsigned int size,
+ unsigned int *num, unsigned int *credits)
+{
+ *num = size;
+ *credits = 0;
+ return 0;
+}
+
static int allocate_mid(struct cifs_ses *ses, struct smb_hdr *in_buf,
struct mid_q_entry **ppmidQ)
{
@@ -531,20 +540,23 @@ cifs_call_async(struct TCP_Server_Info *server, struct smb_rqst *rqst,
{
int rc, timeout, optype;
struct mid_q_entry *mid;
+ unsigned int credits = 0;
timeout = flags & CIFS_TIMEOUT_MASK;
optype = flags & CIFS_OP_MASK;
- rc = wait_for_free_request(server, timeout, optype);
- if (rc)
- return rc;
+ if ((flags & CIFS_HAS_CREDITS) == 0) {
+ rc = wait_for_free_request(server, timeout, optype);
+ if (rc)
+ return rc;
+ credits = 1;
+ }
mutex_lock(&server->srv_mutex);
mid = server->ops->setup_async_request(server, rqst);
if (IS_ERR(mid)) {
mutex_unlock(&server->srv_mutex);
- add_credits(server, 1, optype);
- wake_up(&server->request_q);
+ add_credits_and_wake_if(server, credits, optype);
return PTR_ERR(mid);
}
@@ -572,8 +584,7 @@ cifs_call_async(struct TCP_Server_Info *server, struct smb_rqst *rqst,
return 0;
cifs_delete_mid(mid);
- add_credits(server, 1, optype);
- wake_up(&server->request_q);
+ add_credits_and_wake_if(server, credits, optype);
return rc;
}