aboutsummaryrefslogtreecommitdiff
path: root/fs/gfs2
diff options
context:
space:
mode:
Diffstat (limited to 'fs/gfs2')
-rw-r--r--fs/gfs2/acl.c234
-rw-r--r--fs/gfs2/acl.h4
-rw-r--r--fs/gfs2/aops.c49
-rw-r--r--fs/gfs2/dir.c90
-rw-r--r--fs/gfs2/dir.h19
-rw-r--r--fs/gfs2/glock.c31
-rw-r--r--fs/gfs2/glock.h2
-rw-r--r--fs/gfs2/glops.c36
-rw-r--r--fs/gfs2/incore.h23
-rw-r--r--fs/gfs2/inode.c152
-rw-r--r--fs/gfs2/log.c4
-rw-r--r--fs/gfs2/lops.c7
-rw-r--r--fs/gfs2/main.c1
-rw-r--r--fs/gfs2/meta_io.c8
-rw-r--r--fs/gfs2/ops_fstype.c72
-rw-r--r--fs/gfs2/quota.c342
-rw-r--r--fs/gfs2/quota.h1
-rw-r--r--fs/gfs2/rgrp.c113
-rw-r--r--fs/gfs2/rgrp.h2
-rw-r--r--fs/gfs2/super.c43
-rw-r--r--fs/gfs2/xattr.c4
21 files changed, 660 insertions, 577 deletions
diff --git a/fs/gfs2/acl.c b/fs/gfs2/acl.c
index f69ac0af5496..ba9456685f47 100644
--- a/fs/gfs2/acl.c
+++ b/fs/gfs2/acl.c
@@ -49,10 +49,6 @@ struct posix_acl *gfs2_get_acl(struct inode *inode, int type)
if (!ip->i_eattr)
return NULL;
- acl = get_cached_acl(&ip->i_inode, type);
- if (acl != ACL_NOT_CACHED)
- return acl;
-
name = gfs2_acl_name(type);
if (name == NULL)
return ERR_PTR(-EINVAL);
@@ -80,7 +76,7 @@ static int gfs2_set_mode(struct inode *inode, umode_t mode)
return error;
}
-static int gfs2_acl_set(struct inode *inode, int type, struct posix_acl *acl)
+int gfs2_set_acl(struct inode *inode, struct posix_acl *acl, int type)
{
int error;
int len;
@@ -88,219 +84,49 @@ static int gfs2_acl_set(struct inode *inode, int type, struct posix_acl *acl)
const char *name = gfs2_acl_name(type);
BUG_ON(name == NULL);
- len = posix_acl_to_xattr(&init_user_ns, acl, NULL, 0);
- if (len == 0)
- return 0;
- data = kmalloc(len, GFP_NOFS);
- if (data == NULL)
- return -ENOMEM;
- error = posix_acl_to_xattr(&init_user_ns, acl, data, len);
- if (error < 0)
- goto out;
- error = __gfs2_xattr_set(inode, name, data, len, 0, GFS2_EATYPE_SYS);
- if (!error)
- set_cached_acl(inode, type, acl);
-out:
- kfree(data);
- return error;
-}
-
-int gfs2_acl_create(struct gfs2_inode *dip, struct inode *inode)
-{
- struct gfs2_sbd *sdp = GFS2_SB(&dip->i_inode);
- struct posix_acl *acl;
- umode_t mode = inode->i_mode;
- int error = 0;
-
- if (!sdp->sd_args.ar_posix_acl)
- return 0;
- if (S_ISLNK(inode->i_mode))
- return 0;
-
- acl = gfs2_get_acl(&dip->i_inode, ACL_TYPE_DEFAULT);
- if (IS_ERR(acl))
- return PTR_ERR(acl);
- if (!acl) {
- mode &= ~current_umask();
- return gfs2_set_mode(inode, mode);
- }
-
- if (S_ISDIR(inode->i_mode)) {
- error = gfs2_acl_set(inode, ACL_TYPE_DEFAULT, acl);
- if (error)
- goto out;
- }
-
- error = posix_acl_create(&acl, GFP_NOFS, &mode);
- if (error < 0)
- return error;
- if (error == 0)
- goto munge;
-
- error = gfs2_acl_set(inode, ACL_TYPE_ACCESS, acl);
- if (error)
- goto out;
-munge:
- error = gfs2_set_mode(inode, mode);
-out:
- posix_acl_release(acl);
- return error;
-}
-
-int gfs2_acl_chmod(struct gfs2_inode *ip, struct iattr *attr)
-{
- struct inode *inode = &ip->i_inode;
- struct posix_acl *acl;
- char *data;
- unsigned int len;
- int error;
-
- acl = gfs2_get_acl(&ip->i_inode, ACL_TYPE_ACCESS);
- if (IS_ERR(acl))
- return PTR_ERR(acl);
- if (!acl)
- return gfs2_setattr_simple(inode, attr);
-
- error = posix_acl_chmod(&acl, GFP_NOFS, attr->ia_mode);
- if (error)
- return error;
-
- len = posix_acl_to_xattr(&init_user_ns, acl, NULL, 0);
- data = kmalloc(len, GFP_NOFS);
- error = -ENOMEM;
- if (data == NULL)
- goto out;
- posix_acl_to_xattr(&init_user_ns, acl, data, len);
- error = gfs2_xattr_acl_chmod(ip, attr, data);
- kfree(data);
- set_cached_acl(&ip->i_inode, ACL_TYPE_ACCESS, acl);
-
-out:
- posix_acl_release(acl);
- return error;
-}
-
-static int gfs2_acl_type(const char *name)
-{
- if (strcmp(name, GFS2_POSIX_ACL_ACCESS) == 0)
- return ACL_TYPE_ACCESS;
- if (strcmp(name, GFS2_POSIX_ACL_DEFAULT) == 0)
- return ACL_TYPE_DEFAULT;
- return -EINVAL;
-}
-
-static int gfs2_xattr_system_get(struct dentry *dentry, const char *name,
- void *buffer, size_t size, int xtype)
-{
- struct inode *inode = dentry->d_inode;
- struct gfs2_sbd *sdp = GFS2_SB(inode);
- struct posix_acl *acl;
- int type;
- int error;
-
- if (!sdp->sd_args.ar_posix_acl)
- return -EOPNOTSUPP;
-
- type = gfs2_acl_type(name);
- if (type < 0)
- return type;
-
- acl = gfs2_get_acl(inode, type);
- if (IS_ERR(acl))
- return PTR_ERR(acl);
- if (acl == NULL)
- return -ENODATA;
-
- error = posix_acl_to_xattr(&init_user_ns, acl, buffer, size);
- posix_acl_release(acl);
-
- return error;
-}
-
-static int gfs2_xattr_system_set(struct dentry *dentry, const char *name,
- const void *value, size_t size, int flags,
- int xtype)
-{
- struct inode *inode = dentry->d_inode;
- struct gfs2_sbd *sdp = GFS2_SB(inode);
- struct posix_acl *acl = NULL;
- int error = 0, type;
-
- if (!sdp->sd_args.ar_posix_acl)
- return -EOPNOTSUPP;
-
- type = gfs2_acl_type(name);
- if (type < 0)
- return type;
- if (flags & XATTR_CREATE)
- return -EINVAL;
- if (type == ACL_TYPE_DEFAULT && !S_ISDIR(inode->i_mode))
- return value ? -EACCES : 0;
- if (!uid_eq(current_fsuid(), inode->i_uid) && !capable(CAP_FOWNER))
- return -EPERM;
- if (S_ISLNK(inode->i_mode))
- return -EOPNOTSUPP;
-
- if (!value)
- goto set_acl;
-
- acl = posix_acl_from_xattr(&init_user_ns, value, size);
- if (!acl) {
- /*
- * acl_set_file(3) may request that we set default ACLs with
- * zero length -- defend (gracefully) against that here.
- */
- goto out;
- }
- if (IS_ERR(acl)) {
- error = PTR_ERR(acl);
- goto out;
- }
-
- error = posix_acl_valid(acl);
- if (error)
- goto out_release;
-
- error = -EINVAL;
if (acl->a_count > GFS2_ACL_MAX_ENTRIES)
- goto out_release;
+ return -EINVAL;
if (type == ACL_TYPE_ACCESS) {
umode_t mode = inode->i_mode;
+
error = posix_acl_equiv_mode(acl, &mode);
+ if (error < 0)
+ return error;
- if (error <= 0) {
- posix_acl_release(acl);
+ if (error == 0)
acl = NULL;
- if (error < 0)
- return error;
- }
-
error = gfs2_set_mode(inode, mode);
if (error)
- goto out_release;
+ return error;
}
-set_acl:
- error = __gfs2_xattr_set(inode, name, value, size, 0, GFS2_EATYPE_SYS);
- if (!error) {
- if (acl)
- set_cached_acl(inode, type, acl);
- else
- forget_cached_acl(inode, type);
+ if (acl) {
+ len = posix_acl_to_xattr(&init_user_ns, acl, NULL, 0);
+ if (len == 0)
+ return 0;
+ data = kmalloc(len, GFP_NOFS);
+ if (data == NULL)
+ return -ENOMEM;
+ error = posix_acl_to_xattr(&init_user_ns, acl, data, len);
+ if (error < 0)
+ goto out;
+ } else {
+ data = NULL;
+ len = 0;
}
-out_release:
- posix_acl_release(acl);
+
+ error = __gfs2_xattr_set(inode, name, data, len, 0, GFS2_EATYPE_SYS);
+ if (error)
+ goto out;
+
+ if (acl)
+ set_cached_acl(inode, type, acl);
+ else
+ forget_cached_acl(inode, type);
out:
+ kfree(data);
return error;
}
-
-const struct xattr_handler gfs2_xattr_system_handler = {
- .prefix = XATTR_SYSTEM_PREFIX,
- .flags = GFS2_EATYPE_SYS,
- .get = gfs2_xattr_system_get,
- .set = gfs2_xattr_system_set,
-};
-
diff --git a/fs/gfs2/acl.h b/fs/gfs2/acl.h
index 0da38dc7efec..301260c999ba 100644
--- a/fs/gfs2/acl.h
+++ b/fs/gfs2/acl.h
@@ -17,8 +17,6 @@
#define GFS2_ACL_MAX_ENTRIES 25
extern struct posix_acl *gfs2_get_acl(struct inode *inode, int type);
-extern int gfs2_acl_create(struct gfs2_inode *dip, struct inode *inode);
-extern int gfs2_acl_chmod(struct gfs2_inode *ip, struct iattr *attr);
-extern const struct xattr_handler gfs2_xattr_system_handler;
+extern int gfs2_set_acl(struct inode *inode, struct posix_acl *acl, int type);
#endif /* __ACL_DOT_H__ */
diff --git a/fs/gfs2/aops.c b/fs/gfs2/aops.c
index b7fc035a6943..49436fa7cd4f 100644
--- a/fs/gfs2/aops.c
+++ b/fs/gfs2/aops.c
@@ -986,6 +986,7 @@ static ssize_t gfs2_direct_IO(int rw, struct kiocb *iocb,
{
struct file *file = iocb->ki_filp;
struct inode *inode = file->f_mapping->host;
+ struct address_space *mapping = inode->i_mapping;
struct gfs2_inode *ip = GFS2_I(inode);
struct gfs2_holder gh;
int rv;
@@ -1006,6 +1007,36 @@ static ssize_t gfs2_direct_IO(int rw, struct kiocb *iocb,
if (rv != 1)
goto out; /* dio not valid, fall back to buffered i/o */
+ /*
+ * Now since we are holding a deferred (CW) lock at this point, you
+ * might be wondering why this is ever needed. There is a case however
+ * where we've granted a deferred local lock against a cached exclusive
+ * glock. That is ok provided all granted local locks are deferred, but
+ * it also means that it is possible to encounter pages which are
+ * cached and possibly also mapped. So here we check for that and sort
+ * them out ahead of the dio. The glock state machine will take care of
+ * everything else.
+ *
+ * If in fact the cached glock state (gl->gl_state) is deferred (CW) in
+ * the first place, mapping->nr_pages will always be zero.
+ */
+ if (mapping->nrpages) {
+ loff_t lstart = offset & (PAGE_CACHE_SIZE - 1);
+ loff_t len = iov_length(iov, nr_segs);
+ loff_t end = PAGE_ALIGN(offset + len) - 1;
+
+ rv = 0;
+ if (len == 0)
+ goto out;
+ if (test_and_clear_bit(GIF_SW_PAGED, &ip->i_flags))
+ unmap_shared_mapping_range(ip->i_inode.i_mapping, offset, len);
+ rv = filemap_write_and_wait_range(mapping, lstart, end);
+ if (rv)
+ goto out;
+ if (rw == WRITE)
+ truncate_inode_pages_range(mapping, lstart, end);
+ }
+
rv = __blockdev_direct_IO(rw, iocb, inode, inode->i_sb->s_bdev, iov,
offset, nr_segs, gfs2_get_block_direct,
NULL, NULL, 0);
@@ -1050,30 +1081,22 @@ int gfs2_releasepage(struct page *page, gfp_t gfp_mask)
bh = bh->b_this_page;
} while(bh != head);
spin_unlock(&sdp->sd_ail_lock);
- gfs2_log_unlock(sdp);
head = bh = page_buffers(page);
do {
- gfs2_log_lock(sdp);
bd = bh->b_private;
if (bd) {
gfs2_assert_warn(sdp, bd->bd_bh == bh);
- if (!list_empty(&bd->bd_list)) {
- if (!buffer_pinned(bh))
- list_del_init(&bd->bd_list);
- else
- bd = NULL;
- }
- if (bd)
- bd->bd_bh = NULL;
+ if (!list_empty(&bd->bd_list))
+ list_del_init(&bd->bd_list);
+ bd->bd_bh = NULL;
bh->b_private = NULL;
- }
- gfs2_log_unlock(sdp);
- if (bd)
kmem_cache_free(gfs2_bufdata_cachep, bd);
+ }
bh = bh->b_this_page;
} while (bh != head);
+ gfs2_log_unlock(sdp);
return try_to_free_buffers(page);
diff --git a/fs/gfs2/dir.c b/fs/gfs2/dir.c
index 2e5fc268d324..fa32655449c8 100644
--- a/fs/gfs2/dir.c
+++ b/fs/gfs2/dir.c
@@ -834,6 +834,7 @@ static struct gfs2_leaf *new_leaf(struct inode *inode, struct buffer_head **pbh,
struct gfs2_leaf *leaf;
struct gfs2_dirent *dent;
struct qstr name = { .name = "" };
+ struct timespec tv = CURRENT_TIME;
error = gfs2_alloc_blocks(ip, &bn, &n, 0, NULL);
if (error)
@@ -850,7 +851,11 @@ static struct gfs2_leaf *new_leaf(struct inode *inode, struct buffer_head **pbh,
leaf->lf_entries = 0;
leaf->lf_dirent_format = cpu_to_be32(GFS2_FORMAT_DE);
leaf->lf_next = 0;
- memset(leaf->lf_reserved, 0, sizeof(leaf->lf_reserved));
+ leaf->lf_inode = cpu_to_be64(ip->i_no_addr);
+ leaf->lf_dist = cpu_to_be32(1);
+ leaf->lf_nsec = cpu_to_be32(tv.tv_nsec);
+ leaf->lf_sec = cpu_to_be64(tv.tv_sec);
+ memset(leaf->lf_reserved2, 0, sizeof(leaf->lf_reserved2));
dent = (struct gfs2_dirent *)(leaf+1);
gfs2_qstr2dirent(&name, bh->b_size - sizeof(struct gfs2_leaf), dent);
*pbh = bh;
@@ -1612,11 +1617,31 @@ out:
return ret;
}
+/**
+ * dir_new_leaf - Add a new leaf onto hash chain
+ * @inode: The directory
+ * @name: The name we are adding
+ *
+ * This adds a new dir leaf onto an existing leaf when there is not
+ * enough space to add a new dir entry. This is a last resort after
+ * we've expanded the hash table to max size and also split existing
+ * leaf blocks, so it will only occur for very large directories.
+ *
+ * The dist parameter is set to 1 for leaf blocks directly attached
+ * to the hash table, 2 for one layer of indirection, 3 for two layers
+ * etc. We are thus able to tell the difference between an old leaf
+ * with dist set to zero (i.e. "don't know") and a new one where we
+ * set this information for debug/fsck purposes.
+ *
+ * Returns: 0 on success, or -ve on error
+ */
+
static int dir_new_leaf(struct inode *inode, const struct qstr *name)
{
struct buffer_head *bh, *obh;
struct gfs2_inode *ip = GFS2_I(inode);
struct gfs2_leaf *leaf, *oleaf;
+ u32 dist = 1;
int error;
u32 index;
u64 bn;
@@ -1626,6 +1651,7 @@ static int dir_new_leaf(struct inode *inode, const struct qstr *name)
if (error)
return error;
do {
+ dist++;
oleaf = (struct gfs2_leaf *)obh->b_data;
bn = be64_to_cpu(oleaf->lf_next);
if (!bn)
@@ -1643,6 +1669,7 @@ static int dir_new_leaf(struct inode *inode, const struct qstr *name)
brelse(obh);
return -ENOSPC;
}
+ leaf->lf_dist = cpu_to_be32(dist);
oleaf->lf_next = cpu_to_be64(bh->b_blocknr);
brelse(bh);
brelse(obh);
@@ -1659,39 +1686,53 @@ static int dir_new_leaf(struct inode *inode, const struct qstr *name)
/**
* gfs2_dir_add - Add new filename into directory
- * @dip: The GFS2 inode
- * @filename: The new name
- * @inode: The inode number of the entry
- * @type: The type of the entry
+ * @inode: The directory inode
+ * @name: The new name
+ * @nip: The GFS2 inode to be linked in to the directory
+ * @da: The directory addition info
+ *
+ * If the call to gfs2_diradd_alloc_required resulted in there being
+ * no need to allocate any new directory blocks, then it will contain
+ * a pointer to the directory entry and the bh in which it resides. We
+ * can use that without having to repeat the search. If there was no
+ * free space, then we must now create more space.
*
* Returns: 0 on success, error code on failure
*/
int gfs2_dir_add(struct inode *inode, const struct qstr *name,
- const struct gfs2_inode *nip)
+ const struct gfs2_inode *nip, struct gfs2_diradd *da)
{
struct gfs2_inode *ip = GFS2_I(inode);
- struct buffer_head *bh;
- struct gfs2_dirent *dent;
+ struct buffer_head *bh = da->bh;
+ struct gfs2_dirent *dent = da->dent;
+ struct timespec tv;
struct gfs2_leaf *leaf;
int error;
while(1) {
- dent = gfs2_dirent_search(inode, name, gfs2_dirent_find_space,
- &bh);
+ if (da->bh == NULL) {
+ dent = gfs2_dirent_search(inode, name,
+ gfs2_dirent_find_space, &bh);
+ }
if (dent) {
if (IS_ERR(dent))
return PTR_ERR(dent);
dent = gfs2_init_dirent(inode, dent, name, bh);
gfs2_inum_out(nip, dent);
dent->de_type = cpu_to_be16(IF2DT(nip->i_inode.i_mode));
+ tv = CURRENT_TIME;
if (ip->i_diskflags & GFS2_DIF_EXHASH) {
leaf = (struct gfs2_leaf *)bh->b_data;
be16_add_cpu(&leaf->lf_entries, 1);
+ leaf->lf_nsec = cpu_to_be32(tv.tv_nsec);
+ leaf->lf_sec = cpu_to_be64(tv.tv_sec);
}
+ da->dent = NULL;
+ da->bh = NULL;
brelse(bh);
ip->i_entries++;
- ip->i_inode.i_mtime = ip->i_inode.i_ctime = CURRENT_TIME;
+ ip->i_inode.i_mtime = ip->i_inode.i_ctime = tv;
if (S_ISDIR(nip->i_inode.i_mode))
inc_nlink(&ip->i_inode);
mark_inode_dirty(inode);
@@ -1742,6 +1783,7 @@ int gfs2_dir_del(struct gfs2_inode *dip, const struct dentry *dentry)
const struct qstr *name = &dentry->d_name;
struct gfs2_dirent *dent, *prev = NULL;
struct buffer_head *bh;
+ struct timespec tv = CURRENT_TIME;
/* Returns _either_ the entry (if its first in block) or the
previous entry otherwise */
@@ -1767,13 +1809,15 @@ int gfs2_dir_del(struct gfs2_inode *dip, const struct dentry *dentry)
if (!entries)
gfs2_consist_inode(dip);
leaf->lf_entries = cpu_to_be16(--entries);
+ leaf->lf_nsec = cpu_to_be32(tv.tv_nsec);
+ leaf->lf_sec = cpu_to_be64(tv.tv_sec);
}
brelse(bh);
if (!dip->i_entries)
gfs2_consist_inode(dip);
dip->i_entries--;
- dip->i_inode.i_mtime = dip->i_inode.i_ctime = CURRENT_TIME;
+ dip->i_inode.i_mtime = dip->i_inode.i_ctime = tv;
if (S_ISDIR(dentry->d_inode->i_mode))
drop_nlink(&dip->i_inode);
mark_inode_dirty(&dip->i_inode);
@@ -2017,22 +2061,36 @@ out:
* gfs2_diradd_alloc_required - find if adding entry will require an allocation
* @ip: the file being written to
* @filname: the filename that's going to be added
+ * @da: The structure to return dir alloc info
*
- * Returns: 1 if alloc required, 0 if not, -ve on error
+ * Returns: 0 if ok, -ve on error
*/
-int gfs2_diradd_alloc_required(struct inode *inode, const struct qstr *name)
+int gfs2_diradd_alloc_required(struct inode *inode, const struct qstr *name,
+ struct gfs2_diradd *da)
{
+ struct gfs2_inode *ip = GFS2_I(inode);
+ struct gfs2_sbd *sdp = GFS2_SB(inode);
+ const unsigned int extra = sizeof(struct gfs2_dinode) - sizeof(struct gfs2_leaf);
struct gfs2_dirent *dent;
struct buffer_head *bh;
+ da->nr_blocks = 0;
+ da->bh = NULL;
+ da->dent = NULL;
+
dent = gfs2_dirent_search(inode, name, gfs2_dirent_find_space, &bh);
if (!dent) {
- return 1;
+ da->nr_blocks = sdp->sd_max_dirres;
+ if (!(ip->i_diskflags & GFS2_DIF_EXHASH) &&
+ (GFS2_DIRENT_SIZE(name->len) < extra))
+ da->nr_blocks = 1;
+ return 0;
}
if (IS_ERR(dent))
return PTR_ERR(dent);
- brelse(bh);
+ da->bh = bh;
+ da->dent = dent;
return 0;
}
diff --git a/fs/gfs2/dir.h b/fs/gfs2/dir.h
index 4f03bbd1873f..126c65dda028 100644
--- a/fs/gfs2/dir.h
+++ b/fs/gfs2/dir.h
@@ -16,6 +16,14 @@
struct inode;
struct gfs2_inode;
struct gfs2_inum;
+struct buffer_head;
+struct gfs2_dirent;
+
+struct gfs2_diradd {
+ unsigned nr_blocks;
+ struct gfs2_dirent *dent;
+ struct buffer_head *bh;
+};
extern struct inode *gfs2_dir_search(struct inode *dir,
const struct qstr *filename,
@@ -23,7 +31,13 @@ extern struct inode *gfs2_dir_search(struct inode *dir,
extern int gfs2_dir_check(struct inode *dir, const struct qstr *filename,
const struct gfs2_inode *ip);
extern int gfs2_dir_add(struct inode *inode, const struct qstr *filename,
- const struct gfs2_inode *ip);
+ const struct gfs2_inode *ip, struct gfs2_diradd *da);
+static inline void gfs2_dir_no_add(struct gfs2_diradd *da)
+{
+ if (da->bh)
+ brelse(da->bh);
+ da->bh = NULL;
+}
extern int gfs2_dir_del(struct gfs2_inode *dip, const struct dentry *dentry);
extern int gfs2_dir_read(struct inode *inode, struct dir_context *ctx,
struct file_ra_state *f_ra);
@@ -33,7 +47,8 @@ extern int gfs2_dir_mvino(struct gfs2_inode *dip, const struct qstr *filename,
extern int gfs2_dir_exhash_dealloc(struct gfs2_inode *dip);
extern int gfs2_diradd_alloc_required(struct inode *dir,
- const struct qstr *filename);
+ const struct qstr *filename,
+ struct gfs2_diradd *da);
extern int gfs2_dir_get_new_buffer(struct gfs2_inode *ip, u64 block,
struct buffer_head **bhp);
extern void gfs2_dir_hash_inval(struct gfs2_inode *ip);
diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c
index c8420f7e4db6..ca0be6c69a26 100644
--- a/fs/gfs2/glock.c
+++ b/fs/gfs2/glock.c
@@ -1552,13 +1552,11 @@ void gfs2_glock_thaw(struct gfs2_sbd *sdp)
glock_hash_walk(thaw_glock, sdp);
}
-static int dump_glock(struct seq_file *seq, struct gfs2_glock *gl)
+static void dump_glock(struct seq_file *seq, struct gfs2_glock *gl)
{
- int ret;
spin_lock(&gl->gl_spin);
- ret = gfs2_dump_glock(seq, gl);
+ gfs2_dump_glock(seq, gl);
spin_unlock(&gl->gl_spin);
- return ret;
}
static void dump_glock_func(struct gfs2_glock *gl)
@@ -1647,14 +1645,14 @@ static const char *hflags2str(char *buf, unsigned flags, unsigned long iflags)
* @seq: the seq_file struct
* @gh: the glock holder
*
- * Returns: 0 on success, -ENOBUFS when we run out of space
*/
-static int dump_holder(struct seq_file *seq, const struct gfs2_holder *gh)
+static void dump_holder(struct seq_file *seq, const struct gfs2_holder *gh)
{
struct task_struct *gh_owner = NULL;
char flags_buf[32];
+ rcu_read_lock();
if (gh->gh_owner_pid)
gh_owner = pid_task(gh->gh_owner_pid, PIDTYPE_PID);
gfs2_print_dbg(seq, " H: s:%s f:%s e:%d p:%ld [%s] %pS\n",
@@ -1664,7 +1662,7 @@ static int dump_holder(struct seq_file *seq, const struct gfs2_holder *gh)
gh->gh_owner_pid ? (long)pid_nr(gh->gh_owner_pid) : -1,
gh_owner ? gh_owner->comm : "(ended)",
(void *)gh->gh_ip);
- return 0;
+ rcu_read_unlock();
}
static const char *gflags2str(char *buf, const struct gfs2_glock *gl)
@@ -1719,16 +1717,14 @@ static const char *gflags2str(char *buf, const struct gfs2_glock *gl)
* example. The field's are n = number (id of the object), f = flags,
* t = type, s = state, r = refcount, e = error, p = pid.
*
- * Returns: 0 on success, -ENOBUFS when we run out of space
*/
-int gfs2_dump_glock(struct seq_file *seq, const struct gfs2_glock *gl)
+void gfs2_dump_glock(struct seq_file *seq, const struct gfs2_glock *gl)
{
const struct gfs2_glock_operations *glops = gl->gl_ops;
unsigned long long dtime;
const struct gfs2_holder *gh;
char gflags_buf[32];
- int error = 0;
dtime = jiffies - gl->gl_demote_time;
dtime *= 1000000/HZ; /* demote time in uSec */
@@ -1745,15 +1741,11 @@ int gfs2_dump_glock(struct seq_file *seq, const struct gfs2_glock *gl)
atomic_read(&gl->gl_revokes),
(int)gl->gl_lockref.count, gl->gl_hold_time);
- list_for_each_entry(gh, &gl->gl_holders, gh_list) {
- error = dump_holder(seq, gh);
- if (error)
- goto out;
- }
+ list_for_each_entry(gh, &gl->gl_holders, gh_list)
+ dump_holder(seq, gh);
+
if (gl->gl_state != LM_ST_UNLOCKED && glops->go_dump)
- error = glops->go_dump(seq, gl);
-out:
- return error;
+ glops->go_dump(seq, gl);
}
static int gfs2_glstats_seq_show(struct seq_file *seq, void *iter_ptr)
@@ -1951,7 +1943,8 @@ static void gfs2_glock_seq_stop(struct seq_file *seq, void *iter_ptr)
static int gfs2_glock_seq_show(struct seq_file *seq, void *iter_ptr)
{
- return dump_glock(seq, iter_ptr);
+ dump_glock(seq, iter_ptr);
+ return 0;
}
static void *gfs2_sbstats_seq_start(struct seq_file *seq, loff_t *pos)
diff --git a/fs/gfs2/glock.h b/fs/gfs2/glock.h
index 6647d77366ba..32572f71f027 100644
--- a/fs/gfs2/glock.h
+++ b/fs/gfs2/glock.h
@@ -199,7 +199,7 @@ extern int gfs2_glock_nq_num(struct gfs2_sbd *sdp, u64 number,
struct gfs2_holder *gh);
extern int gfs2_glock_nq_m(unsigned int num_gh, struct gfs2_holder *ghs);
extern void gfs2_glock_dq_m(unsigned int num_gh, struct gfs2_holder *ghs);
-extern int gfs2_dump_glock(struct seq_file *seq, const struct gfs2_glock *gl);
+extern void gfs2_dump_glock(struct seq_file *seq, const struct gfs2_glock *gl);
#define GLOCK_BUG_ON(gl,x) do { if (unlikely(x)) { gfs2_dump_glock(NULL, gl); BUG(); } } while(0)
extern __printf(2, 3)
void gfs2_print_dbg(struct seq_file *seq, const char *fmt, ...);
diff --git a/fs/gfs2/glops.c b/fs/gfs2/glops.c
index db908f697139..3bf0631b5d56 100644
--- a/fs/gfs2/glops.c
+++ b/fs/gfs2/glops.c
@@ -133,7 +133,8 @@ void gfs2_ail_flush(struct gfs2_glock *gl, bool fsync)
static void rgrp_go_sync(struct gfs2_glock *gl)
{
- struct address_space *metamapping = gfs2_glock2aspace(gl);
+ struct gfs2_sbd *sdp = gl->gl_sbd;
+ struct address_space *mapping = &sdp->sd_aspace;
struct gfs2_rgrpd *rgd;
int error;
@@ -141,10 +142,10 @@ static void rgrp_go_sync(struct gfs2_glock *gl)
return;
GLOCK_BUG_ON(gl, gl->gl_state != LM_ST_EXCLUSIVE);
- gfs2_log_flush(gl->gl_sbd, gl);
- filemap_fdatawrite(metamapping);
- error = filemap_fdatawait(metamapping);
- mapping_set_error(metamapping, error);
+ gfs2_log_flush(sdp, gl);
+ filemap_fdatawrite_range(mapping, gl->gl_vm.start, gl->gl_vm.end);
+ error = filemap_fdatawait_range(mapping, gl->gl_vm.start, gl->gl_vm.end);
+ mapping_set_error(mapping, error);
gfs2_ail_empty_gl(gl);
spin_lock(&gl->gl_spin);
@@ -166,11 +167,12 @@ static void rgrp_go_sync(struct gfs2_glock *gl)
static void rgrp_go_inval(struct gfs2_glock *gl, int flags)
{
- struct address_space *mapping = gfs2_glock2aspace(gl);
+ struct gfs2_sbd *sdp = gl->gl_sbd;
+ struct address_space *mapping = &sdp->sd_aspace;
WARN_ON_ONCE(!(flags & DIO_METADATA));
- gfs2_assert_withdraw(gl->gl_sbd, !atomic_read(&gl->gl_ail_count));
- truncate_inode_pages(mapping, 0);
+ gfs2_assert_withdraw(sdp, !atomic_read(&gl->gl_ail_count));
+ truncate_inode_pages_range(mapping, gl->gl_vm.start, gl->gl_vm.end);
if (gl->gl_object) {
struct gfs2_rgrpd *rgd = (struct gfs2_rgrpd *)gl->gl_object;
@@ -192,8 +194,11 @@ static void inode_go_sync(struct gfs2_glock *gl)
if (ip && !S_ISREG(ip->i_inode.i_mode))
ip = NULL;
- if (ip && test_and_clear_bit(GIF_SW_PAGED, &ip->i_flags))
- unmap_shared_mapping_range(ip->i_inode.i_mapping, 0, 0);
+ if (ip) {
+ if (test_and_clear_bit(GIF_SW_PAGED, &ip->i_flags))
+ unmap_shared_mapping_range(ip->i_inode.i_mapping, 0, 0);
+ inode_dio_wait(&ip->i_inode);
+ }
if (!test_and_clear_bit(GLF_DIRTY, &gl->gl_flags))
return;
@@ -410,6 +415,9 @@ static int inode_go_lock(struct gfs2_holder *gh)
return error;
}
+ if (gh->gh_state != LM_ST_DEFERRED)
+ inode_dio_wait(&ip->i_inode);
+
if ((ip->i_diskflags & GFS2_DIF_TRUNC_IN_PROG) &&
(gl->gl_state == LM_ST_EXCLUSIVE) &&
(gh->gh_state == LM_ST_EXCLUSIVE)) {
@@ -429,21 +437,19 @@ static int inode_go_lock(struct gfs2_holder *gh)
* @seq: The iterator
* @ip: the inode
*
- * Returns: 0 on success, -ENOBUFS when we run out of space
*/
-static int inode_go_dump(struct seq_file *seq, const struct gfs2_glock *gl)
+static void inode_go_dump(struct seq_file *seq, const struct gfs2_glock *gl)
{
const struct gfs2_inode *ip = gl->gl_object;
if (ip == NULL)
- return 0;
+ return;
gfs2_print_dbg(seq, " I: n:%llu/%llu t:%u f:0x%02lx d:0x%08x s:%llu\n",
(unsigned long long)ip->i_no_formal_ino,
(unsigned long long)ip->i_no_addr,
IF2DT(ip->i_inode.i_mode), ip->i_flags,
(unsigned int)ip->i_diskflags,
(unsigned long long)i_size_read(&ip->i_inode));
- return 0;
}
/**
@@ -552,7 +558,7 @@ const struct gfs2_glock_operations gfs2_rgrp_glops = {
.go_unlock = gfs2_rgrp_go_unlock,
.go_dump = gfs2_rgrp_dump,
.go_type = LM_TYPE_RGRP,
- .go_flags = GLOF_ASPACE | GLOF_LVB,
+ .go_flags = GLOF_LVB,
};
const struct gfs2_glock_operations gfs2_trans_glops = {
diff --git a/fs/gfs2/incore.h b/fs/gfs2/incore.h
index ba1ea67f4eeb..cf0e34400f71 100644
--- a/fs/gfs2/incore.h
+++ b/fs/gfs2/incore.h
@@ -93,6 +93,7 @@ struct gfs2_rgrpd {
struct gfs2_rgrp_lvb *rd_rgl;
u32 rd_last_alloc;
u32 rd_flags;
+ u32 rd_extfail_pt; /* extent failure point */
#define GFS2_RDF_CHECK 0x10000000 /* check for unlinked inodes */
#define GFS2_RDF_UPTODATE 0x20000000 /* rg is up to date */
#define GFS2_RDF_ERROR 0x40000000 /* error in rg */
@@ -217,7 +218,7 @@ struct gfs2_glock_operations {
int (*go_demote_ok) (const struct gfs2_glock *gl);
int (*go_lock) (struct gfs2_holder *gh);
void (*go_unlock) (struct gfs2_holder *gh);
- int (*go_dump)(struct seq_file *seq, const struct gfs2_glock *gl);
+ void (*go_dump)(struct seq_file *seq, const struct gfs2_glock *gl);
void (*go_callback)(struct gfs2_glock *gl, bool remote);
const int go_type;
const unsigned long go_flags;
@@ -350,7 +351,15 @@ struct gfs2_glock {
atomic_t gl_ail_count;
atomic_t gl_revokes;
struct delayed_work gl_work;
- struct work_struct gl_delete;
+ union {
+ /* For inode and iopen glocks only */
+ struct work_struct gl_delete;
+ /* For rgrp glocks only */
+ struct {
+ loff_t start;
+ loff_t end;
+ } gl_vm;
+ };
struct rcu_head gl_rcu;
};
@@ -419,10 +428,13 @@ enum {
};
struct gfs2_quota_data {
+ struct hlist_bl_node qd_hlist;
struct list_head qd_list;
struct kqid qd_id;
+ struct gfs2_sbd *qd_sbd;
struct lockref qd_lockref;
struct list_head qd_lru;
+ unsigned qd_hash;
unsigned long qd_flags; /* QDF_... */
@@ -441,6 +453,7 @@ struct gfs2_quota_data {
u64 qd_sync_gen;
unsigned long qd_last_warn;
+ struct rcu_head qd_rcu;
};
struct gfs2_trans {
@@ -720,13 +733,15 @@ struct gfs2_sbd {
spinlock_t sd_trunc_lock;
unsigned int sd_quota_slots;
- unsigned int sd_quota_chunks;
- unsigned char **sd_quota_bitmap;
+ unsigned long *sd_quota_bitmap;
+ spinlock_t sd_bitmap_lock;
u64 sd_quota_sync_gen;
/* Log stuff */
+ struct address_space sd_aspace;
+
spinlock_t sd_log_lock;
struct gfs2_trans *sd_log_tr;
diff --git a/fs/gfs2/inode.c b/fs/gfs2/inode.c
index 7119504159f1..5c524180c98e 100644
--- a/fs/gfs2/inode.c
+++ b/fs/gfs2/inode.c
@@ -149,7 +149,7 @@ struct inode *gfs2_inode_lookup(struct super_block *sb, unsigned int type,
ip = GFS2_I(inode);
if (!inode)
- return ERR_PTR(-ENOBUFS);
+ return ERR_PTR(-ENOMEM);
if (inode->i_state & I_NEW) {
struct gfs2_sbd *sdp = GFS2_SB(inode);
@@ -469,14 +469,36 @@ static void init_dinode(struct gfs2_inode *dip, struct gfs2_inode *ip,
brelse(dibh);
}
+/**
+ * gfs2_trans_da_blocks - Calculate number of blocks to link inode
+ * @dip: The directory we are linking into
+ * @da: The dir add information
+ * @nr_inodes: The number of inodes involved
+ *
+ * This calculate the number of blocks we need to reserve in a
+ * transaction to link @nr_inodes into a directory. In most cases
+ * @nr_inodes will be 2 (the directory plus the inode being linked in)
+ * but in case of rename, 4 may be required.
+ *
+ * Returns: Number of blocks
+ */
+
+static unsigned gfs2_trans_da_blks(const struct gfs2_inode *dip,
+ const struct gfs2_diradd *da,
+ unsigned nr_inodes)
+{
+ return da->nr_blocks + gfs2_rg_blocks(dip, da->nr_blocks) +
+ (nr_inodes * RES_DINODE) + RES_QUOTA + RES_STATFS;
+}
+
static int link_dinode(struct gfs2_inode *dip, const struct qstr *name,
- struct gfs2_inode *ip, int arq)
+ struct gfs2_inode *ip, struct gfs2_diradd *da)
{
struct gfs2_sbd *sdp = GFS2_SB(&dip->i_inode);
- struct gfs2_alloc_parms ap = { .target = sdp->sd_max_dirres, };
+ struct gfs2_alloc_parms ap = { .target = da->nr_blocks, };
int error;
- if (arq) {
+ if (da->nr_blocks) {
error = gfs2_quota_lock_check(dip);
if (error)
goto fail_quota_locks;
@@ -485,10 +507,7 @@ static int link_dinode(struct gfs2_inode *dip, const struct qstr *name,
if (error)
goto fail_quota_locks;
- error = gfs2_trans_begin(sdp, sdp->sd_max_dirres +
- dip->i_rgd->rd_length +
- 2 * RES_DINODE +
- RES_STATFS + RES_QUOTA, 0);
+ error = gfs2_trans_begin(sdp, gfs2_trans_da_blks(dip, da, 2), 0);
if (error)
goto fail_ipreserv;
} else {
@@ -497,7 +516,7 @@ static int link_dinode(struct gfs2_inode *dip, const struct qstr *name,
goto fail_quota_locks;
}
- error = gfs2_dir_add(&dip->i_inode, name, ip);
+ error = gfs2_dir_add(&dip->i_inode, name, ip, da);
if (error)
goto fail_end_trans;
@@ -552,6 +571,7 @@ static int gfs2_create_inode(struct inode *dir, struct dentry *dentry,
unsigned int size, int excl, int *opened)
{
const struct qstr *name = &dentry->d_name;
+ struct posix_acl *default_acl, *acl;
struct gfs2_holder ghs[2];
struct inode *inode = NULL;
struct gfs2_inode *dip = GFS2_I(dir), *ip;
@@ -560,7 +580,7 @@ static int gfs2_create_inode(struct inode *dir, struct dentry *dentry,
struct dentry *d;
int error;
u32 aflags = 0;
- int arq;
+ struct gfs2_diradd da = { .bh = NULL, };
if (!name->len || name->len > GFS2_FNAMESIZE)
return -ENAMETOOLONG;
@@ -585,6 +605,9 @@ static int gfs2_create_inode(struct inode *dir, struct dentry *dentry,
error = PTR_ERR(inode);
if (!IS_ERR(inode)) {
d = d_splice_alias(inode, dentry);
+ error = PTR_ERR(d);
+ if (IS_ERR(d))
+ goto fail_gunlock;
error = 0;
if (file) {
if (S_ISREG(inode->i_mode)) {
@@ -602,7 +625,7 @@ static int gfs2_create_inode(struct inode *dir, struct dentry *dentry,
goto fail_gunlock;
}
- arq = error = gfs2_diradd_alloc_required(dir, name);
+ error = gfs2_diradd_alloc_required(dir, name, &da);
if (error < 0)
goto fail_gunlock;
@@ -611,10 +634,14 @@ static int gfs2_create_inode(struct inode *dir, struct dentry *dentry,
if (!inode)
goto fail_gunlock;
+ error = posix_acl_create(dir, &mode, &default_acl, &acl);
+ if (error)
+ goto fail_free_vfs_inode;
+
ip = GFS2_I(inode);
error = gfs2_rs_alloc(ip);
if (error)
- goto fail_free_inode;
+ goto fail_free_acls;
inode->i_mode = mode;
set_nlink(inode, S_ISDIR(mode) ? 2 : 1);
@@ -682,7 +709,16 @@ static int gfs2_create_inode(struct inode *dir, struct dentry *dentry,
gfs2_set_iop(inode);
insert_inode_hash(inode);
- error = gfs2_acl_create(dip, inode);
+ if (default_acl) {
+ error = gfs2_set_acl(inode, default_acl, ACL_TYPE_DEFAULT);
+ posix_acl_release(default_acl);
+ }
+ if (acl) {
+ if (!error)
+ error = gfs2_set_acl(inode, acl, ACL_TYPE_ACCESS);
+ posix_acl_release(acl);
+ }
+
if (error)
goto fail_gunlock3;
@@ -690,7 +726,7 @@ static int gfs2_create_inode(struct inode *dir, struct dentry *dentry,
if (error)
goto fail_gunlock3;
- error = link_dinode(dip, name, ip, arq);
+ error = link_dinode(dip, name, ip, &da);
if (error)
goto fail_gunlock3;
@@ -716,9 +752,16 @@ fail_free_inode:
if (ip->i_gl)
gfs2_glock_put(ip->i_gl);
gfs2_rs_delete(ip, NULL);
+fail_free_acls:
+ if (default_acl)
+ posix_acl_release(default_acl);
+ if (acl)
+ posix_acl_release(acl);
+fail_free_vfs_inode:
free_inode_nonrcu(inode);
inode = NULL;
fail_gunlock:
+ gfs2_dir_no_add(&da);
gfs2_glock_dq_uninit(ghs);
if (inode && !IS_ERR(inode)) {
clear_nlink(inode);
@@ -779,6 +822,11 @@ static struct dentry *__gfs2_lookup(struct inode *dir, struct dentry *dentry,
}
d = d_splice_alias(inode, dentry);
+ if (IS_ERR(d)) {
+ iput(inode);
+ gfs2_glock_dq_uninit(&gh);
+ return d;
+ }
if (file && S_ISREG(inode->i_mode))
error = finish_open(file, dentry, gfs2_open_common, opened);
@@ -817,7 +865,7 @@ static int gfs2_link(struct dentry *old_dentry, struct inode *dir,
struct gfs2_inode *ip = GFS2_I(inode);
struct gfs2_holder ghs[2];
struct buffer_head *dibh;
- int alloc_required;
+ struct gfs2_diradd da = { .bh = NULL, };
int error;
if (S_ISDIR(inode->i_mode))
@@ -872,13 +920,12 @@ static int gfs2_link(struct dentry *old_dentry, struct inode *dir,
if (ip->i_inode.i_nlink == (u32)-1)
goto out_gunlock;
- alloc_required = error = gfs2_diradd_alloc_required(dir, &dentry->d_name);
+ error = gfs2_diradd_alloc_required(dir, &dentry->d_name, &da);
if (error < 0)
goto out_gunlock;
- error = 0;
- if (alloc_required) {
- struct gfs2_alloc_parms ap = { .target = sdp->sd_max_dirres, };
+ if (da.nr_blocks) {
+ struct gfs2_alloc_parms ap = { .target = da.nr_blocks, };
error = gfs2_quota_lock_check(dip);
if (error)
goto out_gunlock;
@@ -887,10 +934,7 @@ static int gfs2_link(struct dentry *old_dentry, struct inode *dir,
if (error)
goto out_gunlock_q;
- error = gfs2_trans_begin(sdp, sdp->sd_max_dirres +
- gfs2_rg_blocks(dip, sdp->sd_max_dirres) +
- 2 * RES_DINODE + RES_STATFS +
- RES_QUOTA, 0);
+ error = gfs2_trans_begin(sdp, gfs2_trans_da_blks(dip, &da, 2), 0);
if (error)
goto out_ipres;
} else {
@@ -903,7 +947,7 @@ static int gfs2_link(struct dentry *old_dentry, struct inode *dir,
if (error)
goto out_end_trans;
- error = gfs2_dir_add(dir, &dentry->d_name, ip);
+ error = gfs2_dir_add(dir, &dentry->d_name, ip, &da);
if (error)
goto out_brelse;
@@ -919,12 +963,13 @@ out_brelse:
out_end_trans:
gfs2_trans_end(sdp);
out_ipres:
- if (alloc_required)
+ if (da.nr_blocks)
gfs2_inplace_release(dip);
out_gunlock_q:
- if (alloc_required)
+ if (da.nr_blocks)
gfs2_quota_unlock(dip);
out_gunlock:
+ gfs2_dir_no_add(&da);
gfs2_glock_dq(ghs + 1);
out_child:
gfs2_glock_dq(ghs);
@@ -1254,7 +1299,7 @@ static int gfs2_rename(struct inode *odir, struct dentry *odentry,
struct gfs2_rgrpd *nrgd;
unsigned int num_gh;
int dir_rename = 0;
- int alloc_required = 0;
+ struct gfs2_diradd da = { .nr_blocks = 0, };
unsigned int x;
int error;
@@ -1388,14 +1433,14 @@ static int gfs2_rename(struct inode *odir, struct dentry *odentry,
goto out_gunlock;
}
- if (nip == NULL)
- alloc_required = gfs2_diradd_alloc_required(ndir, &ndentry->d_name);
- error = alloc_required;
- if (error < 0)
- goto out_gunlock;
+ if (nip == NULL) {
+ error = gfs2_diradd_alloc_required(ndir, &ndentry->d_name, &da);
+ if (error)
+ goto out_gunlock;
+ }
- if (alloc_required) {
- struct gfs2_alloc_parms ap = { .target = sdp->sd_max_dirres, };
+ if (da.nr_blocks) {
+ struct gfs2_alloc_parms ap = { .target = da.nr_blocks, };
error = gfs2_quota_lock_check(ndip);
if (error)
goto out_gunlock;
@@ -1404,10 +1449,8 @@ static int gfs2_rename(struct inode *odir, struct dentry *odentry,
if (error)
goto out_gunlock_q;
- error = gfs2_trans_begin(sdp, sdp->sd_max_dirres +
- gfs2_rg_blocks(ndip, sdp->sd_max_dirres) +
- 4 * RES_DINODE + 4 * RES_LEAF +
- RES_STATFS + RES_QUOTA + 4, 0);
+ error = gfs2_trans_begin(sdp, gfs2_trans_da_blks(ndip, &da, 4) +
+ 4 * RES_LEAF + 4, 0);
if (error)
goto out_ipreserv;
} else {
@@ -1441,19 +1484,20 @@ static int gfs2_rename(struct inode *odir, struct dentry *odentry,
if (error)
goto out_end_trans;
- error = gfs2_dir_add(ndir, &ndentry->d_name, ip);
+ error = gfs2_dir_add(ndir, &ndentry->d_name, ip, &da);
if (error)
goto out_end_trans;
out_end_trans:
gfs2_trans_end(sdp);
out_ipreserv:
- if (alloc_required)
+ if (da.nr_blocks)
gfs2_inplace_release(ndip);
out_gunlock_q:
- if (alloc_required)
+ if (da.nr_blocks)
gfs2_quota_unlock(ndip);
out_gunlock:
+ gfs2_dir_no_add(&da);
while (x--) {
gfs2_glock_dq(ghs + x);
gfs2_holder_uninit(ghs + x);
@@ -1607,10 +1651,22 @@ static int setattr_chown(struct inode *inode, struct iattr *attr)
if (!(attr->ia_valid & ATTR_GID) || gid_eq(ogid, ngid))
ogid = ngid = NO_GID_QUOTA_CHANGE;
- error = gfs2_quota_lock(ip, nuid, ngid);
+ error = get_write_access(inode);
if (error)
return error;
+ error = gfs2_rs_alloc(ip);
+ if (error)
+ goto out;
+
+ error = gfs2_rindex_update(sdp);
+ if (error)
+ goto out;
+
+ error = gfs2_quota_lock(ip, nuid, ngid);
+ if (error)
+ goto out;
+
if (!uid_eq(ouid, NO_UID_QUOTA_CHANGE) ||
!gid_eq(ogid, NO_GID_QUOTA_CHANGE)) {
error = gfs2_quota_check(ip, nuid, ngid);
@@ -1637,6 +1693,8 @@ out_end_trans:
gfs2_trans_end(sdp);
out_gunlock_q:
gfs2_quota_unlock(ip);
+out:
+ put_write_access(inode);
return error;
}
@@ -1678,10 +1736,11 @@ static int gfs2_setattr(struct dentry *dentry, struct iattr *attr)
error = gfs2_setattr_size(inode, attr->ia_size);
else if (attr->ia_valid & (ATTR_UID | ATTR_GID))
error = setattr_chown(inode, attr);
- else if ((attr->ia_valid & ATTR_MODE) && IS_POSIXACL(inode))
- error = gfs2_acl_chmod(ip, attr);
- else
+ else {
error = gfs2_setattr_simple(inode, attr);
+ if (!error && attr->ia_valid & ATTR_MODE)
+ error = posix_acl_chmod(inode, inode->i_mode);
+ }
out:
if (!error)
@@ -1841,6 +1900,7 @@ const struct inode_operations gfs2_file_iops = {
.removexattr = gfs2_removexattr,
.fiemap = gfs2_fiemap,
.get_acl = gfs2_get_acl,
+ .set_acl = gfs2_set_acl,
};
const struct inode_operations gfs2_dir_iops = {
@@ -1862,6 +1922,7 @@ const struct inode_operations gfs2_dir_iops = {
.removexattr = gfs2_removexattr,
.fiemap = gfs2_fiemap,
.get_acl = gfs2_get_acl,
+ .set_acl = gfs2_set_acl,
.atomic_open = gfs2_atomic_open,
};
@@ -1877,6 +1938,5 @@ const struct inode_operations gfs2_symlink_iops = {
.listxattr = gfs2_listxattr,
.removexattr = gfs2_removexattr,
.fiemap = gfs2_fiemap,
- .get_acl = gfs2_get_acl,
};
diff --git a/fs/gfs2/log.c b/fs/gfs2/log.c
index 610613fb65b5..9dcb9777a5f8 100644
--- a/fs/gfs2/log.c
+++ b/fs/gfs2/log.c
@@ -551,10 +551,10 @@ void gfs2_add_revoke(struct gfs2_sbd *sdp, struct gfs2_bufdata *bd)
struct buffer_head *bh = bd->bd_bh;
struct gfs2_glock *gl = bd->bd_gl;
- gfs2_remove_from_ail(bd);
- bd->bd_bh = NULL;
bh->b_private = NULL;
bd->bd_blkno = bh->b_blocknr;
+ gfs2_remove_from_ail(bd); /* drops ref on bh */
+ bd->bd_bh = NULL;
bd->bd_ops = &gfs2_revoke_lops;
sdp->sd_log_num_revoke++;
atomic_inc(&gl->gl_revokes);
diff --git a/fs/gfs2/lops.c b/fs/gfs2/lops.c
index 010b9fb9fec6..76693793cedd 100644
--- a/fs/gfs2/lops.c
+++ b/fs/gfs2/lops.c
@@ -83,6 +83,7 @@ static void maybe_release_space(struct gfs2_bufdata *bd)
bd->bd_bh->b_data + bi->bi_offset, bi->bi_len);
clear_bit(GBF_FULL, &bi->bi_flags);
rgd->rd_free_clone = rgd->rd_free;
+ rgd->rd_extfail_pt = rgd->rd_free;
}
/**
@@ -272,7 +273,7 @@ static struct bio *gfs2_log_alloc_bio(struct gfs2_sbd *sdp, u64 blkno)
nrvecs = max(nrvecs/2, 1U);
}
- bio->bi_sector = blkno * (sb->s_blocksize >> 9);
+ bio->bi_iter.bi_sector = blkno * (sb->s_blocksize >> 9);
bio->bi_bdev = sb->s_bdev;
bio->bi_end_io = gfs2_end_log_write;
bio->bi_private = sdp;
@@ -588,8 +589,12 @@ static int buf_lo_scan_elements(struct gfs2_jdesc *jd, unsigned int start,
static void gfs2_meta_sync(struct gfs2_glock *gl)
{
struct address_space *mapping = gfs2_glock2aspace(gl);
+ struct gfs2_sbd *sdp = gl->gl_sbd;
int error;
+ if (mapping == NULL)
+ mapping = &sdp->sd_aspace;
+
filemap_fdatawrite(mapping);
error = filemap_fdatawait(mapping);
diff --git a/fs/gfs2/main.c b/fs/gfs2/main.c
index 0650db2541ef..c272e73063de 100644
--- a/fs/gfs2/main.c
+++ b/fs/gfs2/main.c
@@ -76,6 +76,7 @@ static int __init init_gfs2_fs(void)
gfs2_str2qstr(&gfs2_qdot, ".");
gfs2_str2qstr(&gfs2_qdotdot, "..");
+ gfs2_quota_hash_init();
error = gfs2_sys_init();
if (error)
diff --git a/fs/gfs2/meta_io.c b/fs/gfs2/meta_io.c
index 932415050540..c7f24690ed05 100644
--- a/fs/gfs2/meta_io.c
+++ b/fs/gfs2/meta_io.c
@@ -116,6 +116,9 @@ struct buffer_head *gfs2_getbuf(struct gfs2_glock *gl, u64 blkno, int create)
unsigned long index;
unsigned int bufnum;
+ if (mapping == NULL)
+ mapping = &sdp->sd_aspace;
+
shift = PAGE_CACHE_SHIFT - sdp->sd_sb.sb_bsize_shift;
index = blkno >> shift; /* convert block to page */
bufnum = blkno - (index << shift); /* block buf index within page */
@@ -258,6 +261,7 @@ void gfs2_remove_from_journal(struct buffer_head *bh, struct gfs2_trans *tr, int
struct address_space *mapping = bh->b_page->mapping;
struct gfs2_sbd *sdp = gfs2_mapping2sbd(mapping);
struct gfs2_bufdata *bd = bh->b_private;
+ int was_pinned = 0;
if (test_clear_buffer_pinned(bh)) {
trace_gfs2_pin(bd, 0);
@@ -273,12 +277,16 @@ void gfs2_remove_from_journal(struct buffer_head *bh, struct gfs2_trans *tr, int
tr->tr_num_databuf_rm++;
}
tr->tr_touched = 1;
+ was_pinned = 1;
brelse(bh);
}
if (bd) {
spin_lock(&sdp->sd_ail_lock);
if (bd->bd_tr) {
gfs2_trans_add_revoke(sdp, bd);
+ } else if (was_pinned) {
+ bh->b_private = NULL;
+ kmem_cache_free(gfs2_bufdata_cachep, bd);
}
spin_unlock(&sdp->sd_ail_lock);
}
diff --git a/fs/gfs2/ops_fstype.c b/fs/gfs2/ops_fstype.c
index 82303b474958..c6872d09561a 100644
--- a/fs/gfs2/ops_fstype.c
+++ b/fs/gfs2/ops_fstype.c
@@ -36,6 +36,7 @@
#include "log.h"
#include "quota.h"
#include "dir.h"
+#include "meta_io.h"
#include "trace_gfs2.h"
#define DO 0
@@ -62,6 +63,7 @@ static void gfs2_tune_init(struct gfs2_tune *gt)
static struct gfs2_sbd *init_sbd(struct super_block *sb)
{
struct gfs2_sbd *sdp;
+ struct address_space *mapping;
sdp = kzalloc(sizeof(struct gfs2_sbd), GFP_KERNEL);
if (!sdp)
@@ -97,6 +99,18 @@ static struct gfs2_sbd *init_sbd(struct super_block *sb)
init_waitqueue_head(&sdp->sd_quota_wait);
INIT_LIST_HEAD(&sdp->sd_trunc_list);
spin_lock_init(&sdp->sd_trunc_lock);
+ spin_lock_init(&sdp->sd_bitmap_lock);
+
+ mapping = &sdp->sd_aspace;
+
+ address_space_init_once(mapping);
+ mapping->a_ops = &gfs2_meta_aops;
+ mapping->host = sb->s_bdev->bd_inode;
+ mapping->flags = 0;
+ mapping_set_gfp_mask(mapping, GFP_NOFS);
+ mapping->private_data = NULL;
+ mapping->backing_dev_info = sb->s_bdi;
+ mapping->writeback_index = 0;
spin_lock_init(&sdp->sd_log_lock);
atomic_set(&sdp->sd_log_pinned, 0);
@@ -217,14 +231,14 @@ static int gfs2_read_super(struct gfs2_sbd *sdp, sector_t sector, int silent)
page = alloc_page(GFP_NOFS);
if (unlikely(!page))
- return -ENOBUFS;
+ return -ENOMEM;
ClearPageUptodate(page);
ClearPageDirty(page);
lock_page(page);
bio = bio_alloc(GFP_NOFS, 1);
- bio->bi_sector = sector * (sb->s_blocksize >> 9);
+ bio->bi_iter.bi_sector = sector * (sb->s_blocksize >> 9);
bio->bi_bdev = sb->s_bdev;
bio_add_page(bio, page, PAGE_SIZE, 0);
@@ -956,40 +970,6 @@ fail:
return error;
}
-static int init_threads(struct gfs2_sbd *sdp, int undo)
-{
- struct task_struct *p;
- int error = 0;
-
- if (undo)
- goto fail_quotad;
-
- p = kthread_run(gfs2_logd, sdp, "gfs2_logd");
- if (IS_ERR(p)) {
- error = PTR_ERR(p);
- fs_err(sdp, "can't start logd thread: %d\n", error);
- return error;
- }
- sdp->sd_logd_process = p;
-
- p = kthread_run(gfs2_quotad, sdp, "gfs2_quotad");
- if (IS_ERR(p)) {
- error = PTR_ERR(p);
- fs_err(sdp, "can't start quotad thread: %d\n", error);
- goto fail;
- }
- sdp->sd_quotad_process = p;
-
- return 0;
-
-
-fail_quotad:
- kthread_stop(sdp->sd_quotad_process);
-fail:
- kthread_stop(sdp->sd_logd_process);
- return error;
-}
-
static const match_table_t nolock_tokens = {
{ Opt_jid, "jid=%d\n", },
{ Opt_err, NULL },
@@ -1254,15 +1234,11 @@ static int fill_super(struct super_block *sb, struct gfs2_args *args, int silent
goto fail_per_node;
}
- error = init_threads(sdp, DO);
- if (error)
- goto fail_per_node;
-
if (!(sb->s_flags & MS_RDONLY)) {
error = gfs2_make_fs_rw(sdp);
if (error) {
fs_err(sdp, "can't make FS RW: %d\n", error);
- goto fail_threads;
+ goto fail_per_node;
}
}
@@ -1270,8 +1246,6 @@ static int fill_super(struct super_block *sb, struct gfs2_args *args, int silent
gfs2_online_uevent(sdp);
return 0;
-fail_threads:
- init_threads(sdp, UNDO);
fail_per_node:
init_per_node(sdp, UNDO);
fail_inodes:
@@ -1366,8 +1340,18 @@ static struct dentry *gfs2_mount(struct file_system_type *fs_type, int flags,
if (IS_ERR(s))
goto error_bdev;
- if (s->s_root)
+ if (s->s_root) {
+ /*
+ * s_umount nests inside bd_mutex during
+ * __invalidate_device(). blkdev_put() acquires
+ * bd_mutex and can't be called under s_umount. Drop
+ * s_umount temporarily. This is safe as we're
+ * holding an active reference.
+ */
+ up_write(&s->s_umount);
blkdev_put(bdev, mode);
+ down_write(&s->s_umount);
+ }
memset(&args, 0, sizeof(args));
args.ar_quota = GFS2_QUOTA_DEFAULT;
diff --git a/fs/gfs2/quota.c b/fs/gfs2/quota.c
index 98236d0df3ca..8bec0e3192dd 100644
--- a/fs/gfs2/quota.c
+++ b/fs/gfs2/quota.c
@@ -52,6 +52,11 @@
#include <linux/dqblk_xfs.h>
#include <linux/lockref.h>
#include <linux/list_lru.h>
+#include <linux/rcupdate.h>
+#include <linux/rculist_bl.h>
+#include <linux/bit_spinlock.h>
+#include <linux/jhash.h>
+#include <linux/vmalloc.h>
#include "gfs2.h"
#include "incore.h"
@@ -67,16 +72,44 @@
#include "inode.h"
#include "util.h"
-struct gfs2_quota_change_host {
- u64 qc_change;
- u32 qc_flags; /* GFS2_QCF_... */
- struct kqid qc_id;
-};
+#define GFS2_QD_HASH_SHIFT 12
+#define GFS2_QD_HASH_SIZE (1 << GFS2_QD_HASH_SHIFT)
+#define GFS2_QD_HASH_MASK (GFS2_QD_HASH_SIZE - 1)
-/* Lock order: qd_lock -> qd->lockref.lock -> lru lock */
+/* Lock order: qd_lock -> bucket lock -> qd->lockref.lock -> lru lock */
+/* -> sd_bitmap_lock */
static DEFINE_SPINLOCK(qd_lock);
struct list_lru gfs2_qd_lru;
+static struct hlist_bl_head qd_hash_table[GFS2_QD_HASH_SIZE];
+
+static unsigned int gfs2_qd_hash(const struct gfs2_sbd *sdp,
+ const struct kqid qid)
+{
+ unsigned int h;
+
+ h = jhash(&sdp, sizeof(struct gfs2_sbd *), 0);
+ h = jhash(&qid, sizeof(struct kqid), h);
+
+ return h & GFS2_QD_HASH_MASK;
+}
+
+static inline void spin_lock_bucket(unsigned int hash)
+{
+ hlist_bl_lock(&qd_hash_table[hash]);
+}
+
+static inline void spin_unlock_bucket(unsigned int hash)
+{
+ hlist_bl_unlock(&qd_hash_table[hash]);
+}
+
+static void gfs2_qd_dealloc(struct rcu_head *rcu)
+{
+ struct gfs2_quota_data *qd = container_of(rcu, struct gfs2_quota_data, qd_rcu);
+ kmem_cache_free(gfs2_quotad_cachep, qd);
+}
+
static void gfs2_qd_dispose(struct list_head *list)
{
struct gfs2_quota_data *qd;
@@ -93,6 +126,10 @@ static void gfs2_qd_dispose(struct list_head *list)
list_del(&qd->qd_list);
spin_unlock(&qd_lock);
+ spin_lock_bucket(qd->qd_hash);
+ hlist_bl_del_rcu(&qd->qd_hlist);
+ spin_unlock_bucket(qd->qd_hash);
+
gfs2_assert_warn(sdp, !qd->qd_change);
gfs2_assert_warn(sdp, !qd->qd_slot_count);
gfs2_assert_warn(sdp, !qd->qd_bh_count);
@@ -101,7 +138,7 @@ static void gfs2_qd_dispose(struct list_head *list)
atomic_dec(&sdp->sd_quota_count);
/* Delete it from the common reclaim list */
- kmem_cache_free(gfs2_quotad_cachep, qd);
+ call_rcu(&qd->qd_rcu, gfs2_qd_dealloc);
}
}
@@ -171,83 +208,95 @@ static u64 qd2offset(struct gfs2_quota_data *qd)
return offset;
}
-static int qd_alloc(struct gfs2_sbd *sdp, struct kqid qid,
- struct gfs2_quota_data **qdp)
+static struct gfs2_quota_data *qd_alloc(unsigned hash, struct gfs2_sbd *sdp, struct kqid qid)
{
struct gfs2_quota_data *qd;
int error;
qd = kmem_cache_zalloc(gfs2_quotad_cachep, GFP_NOFS);
if (!qd)
- return -ENOMEM;
+ return NULL;
+ qd->qd_sbd = sdp;
qd->qd_lockref.count = 1;
spin_lock_init(&qd->qd_lockref.lock);
qd->qd_id = qid;
qd->qd_slot = -1;
INIT_LIST_HEAD(&qd->qd_lru);
+ qd->qd_hash = hash;
error = gfs2_glock_get(sdp, qd2index(qd),
&gfs2_quota_glops, CREATE, &qd->qd_gl);
if (error)
goto fail;
- *qdp = qd;
-
- return 0;
+ return qd;
fail:
kmem_cache_free(gfs2_quotad_cachep, qd);
- return error;
+ return NULL;
}
-static int qd_get(struct gfs2_sbd *sdp, struct kqid qid,
- struct gfs2_quota_data **qdp)
+static struct gfs2_quota_data *gfs2_qd_search_bucket(unsigned int hash,
+ const struct gfs2_sbd *sdp,
+ struct kqid qid)
{
- struct gfs2_quota_data *qd = NULL, *new_qd = NULL;
- int error, found;
-
- *qdp = NULL;
+ struct gfs2_quota_data *qd;
+ struct hlist_bl_node *h;
- for (;;) {
- found = 0;
- spin_lock(&qd_lock);
- list_for_each_entry(qd, &sdp->sd_quota_list, qd_list) {
- if (qid_eq(qd->qd_id, qid) &&
- lockref_get_not_dead(&qd->qd_lockref)) {
- list_lru_del(&gfs2_qd_lru, &qd->qd_lru);
- found = 1;
- break;
- }
+ hlist_bl_for_each_entry_rcu(qd, h, &qd_hash_table[hash], qd_hlist) {
+ if (!qid_eq(qd->qd_id, qid))
+ continue;
+ if (qd->qd_sbd != sdp)
+ continue;
+ if (lockref_get_not_dead(&qd->qd_lockref)) {
+ list_lru_del(&gfs2_qd_lru, &qd->qd_lru);
+ return qd;
}
+ }
- if (!found)
- qd = NULL;
+ return NULL;
+}
- if (!qd && new_qd) {
- qd = new_qd;
- list_add(&qd->qd_list, &sdp->sd_quota_list);
- atomic_inc(&sdp->sd_quota_count);
- new_qd = NULL;
- }
- spin_unlock(&qd_lock);
+static int qd_get(struct gfs2_sbd *sdp, struct kqid qid,
+ struct gfs2_quota_data **qdp)
+{
+ struct gfs2_quota_data *qd, *new_qd;
+ unsigned int hash = gfs2_qd_hash(sdp, qid);
- if (qd) {
- if (new_qd) {
- gfs2_glock_put(new_qd->qd_gl);
- kmem_cache_free(gfs2_quotad_cachep, new_qd);
- }
- *qdp = qd;
- return 0;
- }
+ rcu_read_lock();
+ *qdp = qd = gfs2_qd_search_bucket(hash, sdp, qid);
+ rcu_read_unlock();
- error = qd_alloc(sdp, qid, &new_qd);
- if (error)
- return error;
+ if (qd)
+ return 0;
+
+ new_qd = qd_alloc(hash, sdp, qid);
+ if (!new_qd)
+ return -ENOMEM;
+
+ spin_lock(&qd_lock);
+ spin_lock_bucket(hash);
+ *qdp = qd = gfs2_qd_search_bucket(hash, sdp, qid);
+ if (qd == NULL) {
+ *qdp = new_qd;
+ list_add(&new_qd->qd_list, &sdp->sd_quota_list);
+ hlist_bl_add_head_rcu(&new_qd->qd_hlist, &qd_hash_table[hash]);
+ atomic_inc(&sdp->sd_quota_count);
}
+ spin_unlock_bucket(hash);
+ spin_unlock(&qd_lock);
+
+ if (qd) {
+ gfs2_glock_put(new_qd->qd_gl);
+ kmem_cache_free(gfs2_quotad_cachep, new_qd);
+ }
+
+ return 0;
}
+
static void qd_hold(struct gfs2_quota_data *qd)
{
struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
@@ -268,88 +317,48 @@ static void qd_put(struct gfs2_quota_data *qd)
static int slot_get(struct gfs2_quota_data *qd)
{
- struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
- unsigned int c, o = 0, b;
- unsigned char byte = 0;
+ struct gfs2_sbd *sdp = qd->qd_sbd;
+ unsigned int bit;
+ int error = 0;
- spin_lock(&qd_lock);
+ spin_lock(&sdp->sd_bitmap_lock);
+ if (qd->qd_slot_count != 0)
+ goto out;
- if (qd->qd_slot_count++) {
- spin_unlock(&qd_lock);
- return 0;
+ error = -ENOSPC;
+ bit = find_first_zero_bit(sdp->sd_quota_bitmap, sdp->sd_quota_slots);
+ if (bit < sdp->sd_quota_slots) {
+ set_bit(bit, sdp->sd_quota_bitmap);
+ qd->qd_slot = bit;
+out:
+ qd->qd_slot_count++;
}
+ spin_unlock(&sdp->sd_bitmap_lock);
- for (c = 0; c < sdp->sd_quota_chunks; c++)
- for (o = 0; o < PAGE_SIZE; o++) {
- byte = sdp->sd_quota_bitmap[c][o];
- if (byte != 0xFF)
- goto found;
- }
-
- goto fail;
-
-found:
- for (b = 0; b < 8; b++)
- if (!(byte & (1 << b)))
- break;
- qd->qd_slot = c * (8 * PAGE_SIZE) + o * 8 + b;
-
- if (qd->qd_slot >= sdp->sd_quota_slots)
- goto fail;
-
- sdp->sd_quota_bitmap[c][o] |= 1 << b;
-
- spin_unlock(&qd_lock);
-
- return 0;
-
-fail:
- qd->qd_slot_count--;
- spin_unlock(&qd_lock);
- return -ENOSPC;
+ return error;
}
static void slot_hold(struct gfs2_quota_data *qd)
{
- struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
+ struct gfs2_sbd *sdp = qd->qd_sbd;
- spin_lock(&qd_lock);
+ spin_lock(&sdp->sd_bitmap_lock);
gfs2_assert(sdp, qd->qd_slot_count);
qd->qd_slot_count++;
- spin_unlock(&qd_lock);
-}
-
-static void gfs2_icbit_munge(struct gfs2_sbd *sdp, unsigned char **bitmap,
- unsigned int bit, int new_value)
-{
- unsigned int c, o, b = bit;
- int old_value;
-
- c = b / (8 * PAGE_SIZE);
- b %= 8 * PAGE_SIZE;
- o = b / 8;
- b %= 8;
-
- old_value = (bitmap[c][o] & (1 << b));
- gfs2_assert_withdraw(sdp, !old_value != !new_value);
-
- if (new_value)
- bitmap[c][o] |= 1 << b;
- else
- bitmap[c][o] &= ~(1 << b);
+ spin_unlock(&sdp->sd_bitmap_lock);
}
static void slot_put(struct gfs2_quota_data *qd)
{
- struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
+ struct gfs2_sbd *sdp = qd->qd_sbd;
- spin_lock(&qd_lock);
+ spin_lock(&sdp->sd_bitmap_lock);
gfs2_assert(sdp, qd->qd_slot_count);
if (!--qd->qd_slot_count) {
- gfs2_icbit_munge(sdp, sdp->sd_quota_bitmap, qd->qd_slot, 0);
+ BUG_ON(!test_and_clear_bit(qd->qd_slot, sdp->sd_quota_bitmap));
qd->qd_slot = -1;
}
- spin_unlock(&qd_lock);
+ spin_unlock(&sdp->sd_bitmap_lock);
}
static int bh_get(struct gfs2_quota_data *qd)
@@ -427,8 +436,7 @@ static int qd_check_sync(struct gfs2_sbd *sdp, struct gfs2_quota_data *qd,
list_move_tail(&qd->qd_list, &sdp->sd_quota_list);
set_bit(QDF_LOCKED, &qd->qd_flags);
qd->qd_change_sync = qd->qd_change;
- gfs2_assert_warn(sdp, qd->qd_slot_count);
- qd->qd_slot_count++;
+ slot_hold(qd);
return 1;
}
@@ -1214,17 +1222,6 @@ int gfs2_quota_refresh(struct gfs2_sbd *sdp, struct kqid qid)
return error;
}
-static void gfs2_quota_change_in(struct gfs2_quota_change_host *qc, const void *buf)
-{
- const struct gfs2_quota_change *str = buf;
-
- qc->qc_change = be64_to_cpu(str->qc_change);
- qc->qc_flags = be32_to_cpu(str->qc_flags);
- qc->qc_id = make_kqid(&init_user_ns,
- (qc->qc_flags & GFS2_QCF_USER)?USRQUOTA:GRPQUOTA,
- be32_to_cpu(str->qc_id));
-}
-
int gfs2_quota_init(struct gfs2_sbd *sdp)
{
struct gfs2_inode *ip = GFS2_I(sdp->sd_qc_inode);
@@ -1232,6 +1229,8 @@ int gfs2_quota_init(struct gfs2_sbd *sdp)
unsigned int blocks = size >> sdp->sd_sb.sb_bsize_shift;
unsigned int x, slot = 0;
unsigned int found = 0;
+ unsigned int hash;
+ unsigned int bm_size;
u64 dblock;
u32 extlen = 0;
int error;
@@ -1240,23 +1239,20 @@ int gfs2_quota_init(struct gfs2_sbd *sdp)
return -EIO;
sdp->sd_quota_slots = blocks * sdp->sd_qc_per_block;
- sdp->sd_quota_chunks = DIV_ROUND_UP(sdp->sd_quota_slots, 8 * PAGE_SIZE);
-
+ bm_size = DIV_ROUND_UP(sdp->sd_quota_slots, 8 * sizeof(unsigned long));
+ bm_size *= sizeof(unsigned long);
error = -ENOMEM;
-
- sdp->sd_quota_bitmap = kcalloc(sdp->sd_quota_chunks,
- sizeof(unsigned char *), GFP_NOFS);
+ sdp->sd_quota_bitmap = kmalloc(bm_size, GFP_NOFS|__GFP_NOWARN);
+ if (sdp->sd_quota_bitmap == NULL)
+ sdp->sd_quota_bitmap = __vmalloc(bm_size, GFP_NOFS, PAGE_KERNEL);
if (!sdp->sd_quota_bitmap)
return error;
- for (x = 0; x < sdp->sd_quota_chunks; x++) {
- sdp->sd_quota_bitmap[x] = kzalloc(PAGE_SIZE, GFP_NOFS);
- if (!sdp->sd_quota_bitmap[x])
- goto fail;
- }
+ memset(sdp->sd_quota_bitmap, 0, bm_size);
for (x = 0; x < blocks; x++) {
struct buffer_head *bh;
+ const struct gfs2_quota_change *qc;
unsigned int y;
if (!extlen) {
@@ -1274,34 +1270,42 @@ int gfs2_quota_init(struct gfs2_sbd *sdp)
goto fail;
}
+ qc = (const struct gfs2_quota_change *)(bh->b_data + sizeof(struct gfs2_meta_header));
for (y = 0; y < sdp->sd_qc_per_block && slot < sdp->sd_quota_slots;
y++, slot++) {
- struct gfs2_quota_change_host qc;
struct gfs2_quota_data *qd;
-
- gfs2_quota_change_in(&qc, bh->b_data +
- sizeof(struct gfs2_meta_header) +
- y * sizeof(struct gfs2_quota_change));
- if (!qc.qc_change)
+ s64 qc_change = be64_to_cpu(qc->qc_change);
+ u32 qc_flags = be32_to_cpu(qc->qc_flags);
+ enum quota_type qtype = (qc_flags & GFS2_QCF_USER) ?
+ USRQUOTA : GRPQUOTA;
+ struct kqid qc_id = make_kqid(&init_user_ns, qtype,
+ be32_to_cpu(qc->qc_id));
+ qc++;
+ if (!qc_change)
continue;
- error = qd_alloc(sdp, qc.qc_id, &qd);
- if (error) {
+ hash = gfs2_qd_hash(sdp, qc_id);
+ qd = qd_alloc(hash, sdp, qc_id);
+ if (qd == NULL) {
brelse(bh);
goto fail;
}
set_bit(QDF_CHANGE, &qd->qd_flags);
- qd->qd_change = qc.qc_change;
+ qd->qd_change = qc_change;
qd->qd_slot = slot;
qd->qd_slot_count = 1;
spin_lock(&qd_lock);
- gfs2_icbit_munge(sdp, sdp->sd_quota_bitmap, slot, 1);
+ BUG_ON(test_and_set_bit(slot, sdp->sd_quota_bitmap));
list_add(&qd->qd_list, &sdp->sd_quota_list);
atomic_inc(&sdp->sd_quota_count);
spin_unlock(&qd_lock);
+ spin_lock_bucket(hash);
+ hlist_bl_add_head_rcu(&qd->qd_hlist, &qd_hash_table[hash]);
+ spin_unlock_bucket(hash);
+
found++;
}
@@ -1324,44 +1328,28 @@ void gfs2_quota_cleanup(struct gfs2_sbd *sdp)
{
struct list_head *head = &sdp->sd_quota_list;
struct gfs2_quota_data *qd;
- unsigned int x;
spin_lock(&qd_lock);
while (!list_empty(head)) {
qd = list_entry(head->prev, struct gfs2_quota_data, qd_list);
- /*
- * To be removed in due course... we should be able to
- * ensure that all refs to the qd have done by this point
- * so that this rather odd test is not required
- */
- spin_lock(&qd->qd_lockref.lock);
- if (qd->qd_lockref.count > 1 ||
- (qd->qd_lockref.count && !test_bit(QDF_CHANGE, &qd->qd_flags))) {
- spin_unlock(&qd->qd_lockref.lock);
- list_move(&qd->qd_list, head);
- spin_unlock(&qd_lock);
- schedule();
- spin_lock(&qd_lock);
- continue;
- }
- spin_unlock(&qd->qd_lockref.lock);
-
list_del(&qd->qd_list);
+
/* Also remove if this qd exists in the reclaim list */
list_lru_del(&gfs2_qd_lru, &qd->qd_lru);
atomic_dec(&sdp->sd_quota_count);
spin_unlock(&qd_lock);
- if (!qd->qd_lockref.count) {
- gfs2_assert_warn(sdp, !qd->qd_change);
- gfs2_assert_warn(sdp, !qd->qd_slot_count);
- } else
- gfs2_assert_warn(sdp, qd->qd_slot_count == 1);
+ spin_lock_bucket(qd->qd_hash);
+ hlist_bl_del_rcu(&qd->qd_hlist);
+ spin_unlock_bucket(qd->qd_hash);
+
+ gfs2_assert_warn(sdp, !qd->qd_change);
+ gfs2_assert_warn(sdp, !qd->qd_slot_count);
gfs2_assert_warn(sdp, !qd->qd_bh_count);
gfs2_glock_put(qd->qd_gl);
- kmem_cache_free(gfs2_quotad_cachep, qd);
+ call_rcu(&qd->qd_rcu, gfs2_qd_dealloc);
spin_lock(&qd_lock);
}
@@ -1370,9 +1358,11 @@ void gfs2_quota_cleanup(struct gfs2_sbd *sdp)
gfs2_assert_warn(sdp, !atomic_read(&sdp->sd_quota_count));
if (sdp->sd_quota_bitmap) {
- for (x = 0; x < sdp->sd_quota_chunks; x++)
- kfree(sdp->sd_quota_bitmap[x]);
- kfree(sdp->sd_quota_bitmap);
+ if (is_vmalloc_addr(sdp->sd_quota_bitmap))
+ vfree(sdp->sd_quota_bitmap);
+ else
+ kfree(sdp->sd_quota_bitmap);
+ sdp->sd_quota_bitmap = NULL;
}
}
@@ -1656,3 +1646,11 @@ const struct quotactl_ops gfs2_quotactl_ops = {
.get_dqblk = gfs2_get_dqblk,
.set_dqblk = gfs2_set_dqblk,
};
+
+void __init gfs2_quota_hash_init(void)
+{
+ unsigned i;
+
+ for(i = 0; i < GFS2_QD_HASH_SIZE; i++)
+ INIT_HLIST_BL_HEAD(&qd_hash_table[i]);
+}
diff --git a/fs/gfs2/quota.h b/fs/gfs2/quota.h
index 96e4f34a03b0..55d506eb3c4a 100644
--- a/fs/gfs2/quota.h
+++ b/fs/gfs2/quota.h
@@ -57,5 +57,6 @@ static inline int gfs2_quota_lock_check(struct gfs2_inode *ip)
extern const struct quotactl_ops gfs2_quotactl_ops;
extern struct shrinker gfs2_qd_shrinker;
extern struct list_lru gfs2_qd_lru;
+extern void __init gfs2_quota_hash_init(void);
#endif /* __QUOTA_DOT_H__ */
diff --git a/fs/gfs2/rgrp.c b/fs/gfs2/rgrp.c
index c8d6161bd682..a1da21349235 100644
--- a/fs/gfs2/rgrp.c
+++ b/fs/gfs2/rgrp.c
@@ -57,6 +57,11 @@
* 3 = Used (metadata)
*/
+struct gfs2_extent {
+ struct gfs2_rbm rbm;
+ u32 len;
+};
+
static const char valid_change[16] = {
/* current */
/* n */ 0, 1, 1, 1,
@@ -65,8 +70,9 @@ static const char valid_change[16] = {
1, 0, 0, 0
};
-static int gfs2_rbm_find(struct gfs2_rbm *rbm, u8 state, u32 minext,
- const struct gfs2_inode *ip, bool nowrap);
+static int gfs2_rbm_find(struct gfs2_rbm *rbm, u8 state, u32 *minext,
+ const struct gfs2_inode *ip, bool nowrap,
+ const struct gfs2_alloc_parms *ap);
/**
@@ -635,9 +641,13 @@ static void __rs_deltree(struct gfs2_blkreserv *rs)
/* return reserved blocks to the rgrp */
BUG_ON(rs->rs_rbm.rgd->rd_reserved < rs->rs_free);
rs->rs_rbm.rgd->rd_reserved -= rs->rs_free;
+ /* The rgrp extent failure point is likely not to increase;
+ it will only do so if the freed blocks are somehow
+ contiguous with a span of free blocks that follows. Still,
+ it will force the number to be recalculated later. */
+ rgd->rd_extfail_pt += rs->rs_free;
rs->rs_free = 0;
clear_bit(GBF_FULL, &bi->bi_flags);
- smp_mb__after_clear_bit();
}
}
@@ -876,6 +886,7 @@ static int rgd_insert(struct gfs2_rgrpd *rgd)
static int read_rindex_entry(struct gfs2_inode *ip)
{
struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
+ const unsigned bsize = sdp->sd_sb.sb_bsize;
loff_t pos = sdp->sd_rgrps * sizeof(struct gfs2_rindex);
struct gfs2_rindex buf;
int error;
@@ -913,6 +924,8 @@ static int read_rindex_entry(struct gfs2_inode *ip)
goto fail;
rgd->rd_gl->gl_object = rgd;
+ rgd->rd_gl->gl_vm.start = rgd->rd_addr * bsize;
+ rgd->rd_gl->gl_vm.end = rgd->rd_gl->gl_vm.start + (rgd->rd_length * bsize) - 1;
rgd->rd_rgl = (struct gfs2_rgrp_lvb *)rgd->rd_gl->gl_lksb.sb_lvbptr;
rgd->rd_flags &= ~GFS2_RDF_UPTODATE;
if (rgd->rd_data > sdp->sd_max_rg_data)
@@ -1126,6 +1139,8 @@ int gfs2_rgrp_bh_get(struct gfs2_rgrpd *rgd)
gfs2_rgrp_in(rgd, (rgd->rd_bits[0].bi_bh)->b_data);
rgd->rd_flags |= (GFS2_RDF_UPTODATE | GFS2_RDF_CHECK);
rgd->rd_free_clone = rgd->rd_free;
+ /* max out the rgrp allocation failure point */
+ rgd->rd_extfail_pt = rgd->rd_free;
}
if (cpu_to_be32(GFS2_MAGIC) != rgd->rd_rgl->rl_magic) {
rgd->rd_rgl->rl_unlinked = cpu_to_be32(count_unlinked(rgd));
@@ -1184,7 +1199,7 @@ int gfs2_rgrp_go_lock(struct gfs2_holder *gh)
if (gh->gh_flags & GL_SKIP && sdp->sd_args.ar_rgrplvb)
return 0;
- return gfs2_rgrp_bh_get((struct gfs2_rgrpd *)gh->gh_gl->gl_object);
+ return gfs2_rgrp_bh_get(rgd);
}
/**
@@ -1455,7 +1470,7 @@ static void rg_mblk_search(struct gfs2_rgrpd *rgd, struct gfs2_inode *ip,
if (WARN_ON(gfs2_rbm_from_block(&rbm, goal)))
return;
- ret = gfs2_rbm_find(&rbm, GFS2_BLKST_FREE, extlen, ip, true);
+ ret = gfs2_rbm_find(&rbm, GFS2_BLKST_FREE, &extlen, ip, true, ap);
if (ret == 0) {
rs->rs_rbm = rbm;
rs->rs_free = extlen;
@@ -1520,6 +1535,7 @@ static u64 gfs2_next_unreserved_block(struct gfs2_rgrpd *rgd, u64 block,
* @rbm: The current position in the resource group
* @ip: The inode for which we are searching for blocks
* @minext: The minimum extent length
+ * @maxext: A pointer to the maximum extent structure
*
* This checks the current position in the rgrp to see whether there is
* a reservation covering this block. If not then this function is a
@@ -1532,7 +1548,8 @@ static u64 gfs2_next_unreserved_block(struct gfs2_rgrpd *rgd, u64 block,
static int gfs2_reservation_check_and_update(struct gfs2_rbm *rbm,
const struct gfs2_inode *ip,
- u32 minext)
+ u32 minext,
+ struct gfs2_extent *maxext)
{
u64 block = gfs2_rbm_to_block(rbm);
u32 extlen = 1;
@@ -1545,8 +1562,7 @@ static int gfs2_reservation_check_and_update(struct gfs2_rbm *rbm,
*/
if (minext) {
extlen = gfs2_free_extlen(rbm, minext);
- nblock = block + extlen;
- if (extlen < minext)
+ if (extlen <= maxext->len)
goto fail;
}
@@ -1555,9 +1571,17 @@ static int gfs2_reservation_check_and_update(struct gfs2_rbm *rbm,
* and skip if parts of it are already reserved
*/
nblock = gfs2_next_unreserved_block(rbm->rgd, block, extlen, ip);
- if (nblock == block)
- return 0;
+ if (nblock == block) {
+ if (!minext || extlen >= minext)
+ return 0;
+
+ if (extlen > maxext->len) {
+ maxext->len = extlen;
+ maxext->rbm = *rbm;
+ }
fail:
+ nblock = block + extlen;
+ }
ret = gfs2_rbm_from_block(rbm, nblock);
if (ret < 0)
return ret;
@@ -1568,30 +1592,38 @@ fail:
* gfs2_rbm_find - Look for blocks of a particular state
* @rbm: Value/result starting position and final position
* @state: The state which we want to find
- * @minext: The requested extent length (0 for a single block)
+ * @minext: Pointer to the requested extent length (NULL for a single block)
+ * This is updated to be the actual reservation size.
* @ip: If set, check for reservations
* @nowrap: Stop looking at the end of the rgrp, rather than wrapping
* around until we've reached the starting point.
+ * @ap: the allocation parameters
*
* Side effects:
* - If looking for free blocks, we set GBF_FULL on each bitmap which
* has no free blocks in it.
+ * - If looking for free blocks, we set rd_extfail_pt on each rgrp which
+ * has come up short on a free block search.
*
* Returns: 0 on success, -ENOSPC if there is no block of the requested state
*/
-static int gfs2_rbm_find(struct gfs2_rbm *rbm, u8 state, u32 minext,
- const struct gfs2_inode *ip, bool nowrap)
+static int gfs2_rbm_find(struct gfs2_rbm *rbm, u8 state, u32 *minext,
+ const struct gfs2_inode *ip, bool nowrap,
+ const struct gfs2_alloc_parms *ap)
{
struct buffer_head *bh;
int initial_bii;
u32 initial_offset;
+ int first_bii = rbm->bii;
+ u32 first_offset = rbm->offset;
u32 offset;
u8 *buffer;
int n = 0;
int iters = rbm->rgd->rd_length;
int ret;
struct gfs2_bitmap *bi;
+ struct gfs2_extent maxext = { .rbm.rgd = rbm->rgd, };
/* If we are not starting at the beginning of a bitmap, then we
* need to add one to the bitmap count to ensure that we search
@@ -1620,7 +1652,9 @@ static int gfs2_rbm_find(struct gfs2_rbm *rbm, u8 state, u32 minext,
return 0;
initial_bii = rbm->bii;
- ret = gfs2_reservation_check_and_update(rbm, ip, minext);
+ ret = gfs2_reservation_check_and_update(rbm, ip,
+ minext ? *minext : 0,
+ &maxext);
if (ret == 0)
return 0;
if (ret > 0) {
@@ -1655,6 +1689,24 @@ next_iter:
break;
}
+ if (minext == NULL || state != GFS2_BLKST_FREE)
+ return -ENOSPC;
+
+ /* If the extent was too small, and it's smaller than the smallest
+ to have failed before, remember for future reference that it's
+ useless to search this rgrp again for this amount or more. */
+ if ((first_offset == 0) && (first_bii == 0) &&
+ (*minext < rbm->rgd->rd_extfail_pt))
+ rbm->rgd->rd_extfail_pt = *minext;
+
+ /* If the maximum extent we found is big enough to fulfill the
+ minimum requirements, use it anyway. */
+ if (maxext.len) {
+ *rbm = maxext.rbm;
+ *minext = maxext.len;
+ return 0;
+ }
+
return -ENOSPC;
}
@@ -1680,7 +1732,8 @@ static void try_rgrp_unlink(struct gfs2_rgrpd *rgd, u64 *last_unlinked, u64 skip
while (1) {
down_write(&sdp->sd_log_flush_lock);
- error = gfs2_rbm_find(&rbm, GFS2_BLKST_UNLINKED, 0, NULL, true);
+ error = gfs2_rbm_find(&rbm, GFS2_BLKST_UNLINKED, NULL, NULL,
+ true, NULL);
up_write(&sdp->sd_log_flush_lock);
if (error == -ENOSPC)
break;
@@ -1891,7 +1944,9 @@ int gfs2_inplace_reserve(struct gfs2_inode *ip, const struct gfs2_alloc_parms *a
}
/* Skip unuseable resource groups */
- if (rs->rs_rbm.rgd->rd_flags & (GFS2_RGF_NOALLOC | GFS2_RDF_ERROR))
+ if ((rs->rs_rbm.rgd->rd_flags & (GFS2_RGF_NOALLOC |
+ GFS2_RDF_ERROR)) ||
+ (ap->target > rs->rs_rbm.rgd->rd_extfail_pt))
goto skip_rgrp;
if (sdp->sd_args.ar_rgrplvb)
@@ -1911,15 +1966,16 @@ int gfs2_inplace_reserve(struct gfs2_inode *ip, const struct gfs2_alloc_parms *a
return 0;
}
- /* Drop reservation, if we couldn't use reserved rgrp */
- if (gfs2_rs_active(rs))
- gfs2_rs_deltree(rs);
check_rgrp:
/* Check for unlinked inodes which can be reclaimed */
if (rs->rs_rbm.rgd->rd_flags & GFS2_RDF_CHECK)
try_rgrp_unlink(rs->rs_rbm.rgd, &last_unlinked,
ip->i_no_addr);
skip_rgrp:
+ /* Drop reservation, if we couldn't use reserved rgrp */
+ if (gfs2_rs_active(rs))
+ gfs2_rs_deltree(rs);
+
/* Unlock rgrp if required */
if (!rg_locked)
gfs2_glock_dq_uninit(&rs->rs_rgd_gh);
@@ -2064,25 +2120,24 @@ static struct gfs2_rgrpd *rgblk_free(struct gfs2_sbd *sdp, u64 bstart,
*
*/
-int gfs2_rgrp_dump(struct seq_file *seq, const struct gfs2_glock *gl)
+void gfs2_rgrp_dump(struct seq_file *seq, const struct gfs2_glock *gl)
{
struct gfs2_rgrpd *rgd = gl->gl_object;
struct gfs2_blkreserv *trs;
const struct rb_node *n;
if (rgd == NULL)
- return 0;
- gfs2_print_dbg(seq, " R: n:%llu f:%02x b:%u/%u i:%u r:%u\n",
+ return;
+ gfs2_print_dbg(seq, " R: n:%llu f:%02x b:%u/%u i:%u r:%u e:%u\n",
(unsigned long long)rgd->rd_addr, rgd->rd_flags,
rgd->rd_free, rgd->rd_free_clone, rgd->rd_dinodes,
- rgd->rd_reserved);
+ rgd->rd_reserved, rgd->rd_extfail_pt);
spin_lock(&rgd->rd_rsspin);
for (n = rb_first(&rgd->rd_rstree); n; n = rb_next(&trs->rs_node)) {
trs = rb_entry(n, struct gfs2_blkreserv, rs_node);
dump_rs(seq, trs);
}
spin_unlock(&rgd->rd_rsspin);
- return 0;
}
static void gfs2_rgrp_error(struct gfs2_rgrpd *rgd)
@@ -2184,18 +2239,20 @@ int gfs2_alloc_blocks(struct gfs2_inode *ip, u64 *bn, unsigned int *nblocks,
int error;
gfs2_set_alloc_start(&rbm, ip, dinode);
- error = gfs2_rbm_find(&rbm, GFS2_BLKST_FREE, 0, ip, false);
+ error = gfs2_rbm_find(&rbm, GFS2_BLKST_FREE, NULL, ip, false, NULL);
if (error == -ENOSPC) {
gfs2_set_alloc_start(&rbm, ip, dinode);
- error = gfs2_rbm_find(&rbm, GFS2_BLKST_FREE, 0, NULL, false);
+ error = gfs2_rbm_find(&rbm, GFS2_BLKST_FREE, NULL, NULL, false,
+ NULL);
}
/* Since all blocks are reserved in advance, this shouldn't happen */
if (error) {
- fs_warn(sdp, "inum=%llu error=%d, nblocks=%u, full=%d\n",
+ fs_warn(sdp, "inum=%llu error=%d, nblocks=%u, full=%d fail_pt=%d\n",
(unsigned long long)ip->i_no_addr, error, *nblocks,
- test_bit(GBF_FULL, &rbm.rgd->rd_bits->bi_flags));
+ test_bit(GBF_FULL, &rbm.rgd->rd_bits->bi_flags),
+ rbm.rgd->rd_extfail_pt);
goto rgrp_error;
}
diff --git a/fs/gfs2/rgrp.h b/fs/gfs2/rgrp.h
index 3a10d2ffbbe7..463ab2e95d1c 100644
--- a/fs/gfs2/rgrp.h
+++ b/fs/gfs2/rgrp.h
@@ -68,7 +68,7 @@ extern void gfs2_rlist_add(struct gfs2_inode *ip, struct gfs2_rgrp_list *rlist,
extern void gfs2_rlist_alloc(struct gfs2_rgrp_list *rlist, unsigned int state);
extern void gfs2_rlist_free(struct gfs2_rgrp_list *rlist);
extern u64 gfs2_ri_total(struct gfs2_sbd *sdp);
-extern int gfs2_rgrp_dump(struct seq_file *seq, const struct gfs2_glock *gl);
+extern void gfs2_rgrp_dump(struct seq_file *seq, const struct gfs2_glock *gl);
extern int gfs2_rgrp_send_discards(struct gfs2_sbd *sdp, u64 offset,
struct buffer_head *bh,
const struct gfs2_bitmap *bi, unsigned minlen, u64 *ptrimmed);
diff --git a/fs/gfs2/super.c b/fs/gfs2/super.c
index 35da5b19c0de..60f60f6181f3 100644
--- a/fs/gfs2/super.c
+++ b/fs/gfs2/super.c
@@ -369,6 +369,33 @@ int gfs2_jdesc_check(struct gfs2_jdesc *jd)
return 0;
}
+static int init_threads(struct gfs2_sbd *sdp)
+{
+ struct task_struct *p;
+ int error = 0;
+
+ p = kthread_run(gfs2_logd, sdp, "gfs2_logd");
+ if (IS_ERR(p)) {
+ error = PTR_ERR(p);
+ fs_err(sdp, "can't start logd thread: %d\n", error);
+ return error;
+ }
+ sdp->sd_logd_process = p;
+
+ p = kthread_run(gfs2_quotad, sdp, "gfs2_quotad");
+ if (IS_ERR(p)) {
+ error = PTR_ERR(p);
+ fs_err(sdp, "can't start quotad thread: %d\n", error);
+ goto fail;
+ }
+ sdp->sd_quotad_process = p;
+ return 0;
+
+fail:
+ kthread_stop(sdp->sd_logd_process);
+ return error;
+}
+
/**
* gfs2_make_fs_rw - Turn a Read-Only FS into a Read-Write one
* @sdp: the filesystem
@@ -384,10 +411,14 @@ int gfs2_make_fs_rw(struct gfs2_sbd *sdp)
struct gfs2_log_header_host head;
int error;
- error = gfs2_glock_nq_init(sdp->sd_trans_gl, LM_ST_SHARED, 0, &t_gh);
+ error = init_threads(sdp);
if (error)
return error;
+ error = gfs2_glock_nq_init(sdp->sd_trans_gl, LM_ST_SHARED, 0, &t_gh);
+ if (error)
+ goto fail_threads;
+
j_gl->gl_ops->go_inval(j_gl, DIO_METADATA);
error = gfs2_find_jhead(sdp->sd_jdesc, &head);
@@ -417,7 +448,9 @@ int gfs2_make_fs_rw(struct gfs2_sbd *sdp)
fail:
t_gh.gh_flags |= GL_NOCACHE;
gfs2_glock_dq_uninit(&t_gh);
-
+fail_threads:
+ kthread_stop(sdp->sd_quotad_process);
+ kthread_stop(sdp->sd_logd_process);
return error;
}
@@ -800,6 +833,9 @@ static int gfs2_make_fs_ro(struct gfs2_sbd *sdp)
struct gfs2_holder t_gh;
int error;
+ kthread_stop(sdp->sd_quotad_process);
+ kthread_stop(sdp->sd_logd_process);
+
flush_workqueue(gfs2_delete_workqueue);
gfs2_quota_sync(sdp->sd_vfs, 0);
gfs2_statfs_sync(sdp->sd_vfs, 0);
@@ -857,9 +893,6 @@ restart:
}
spin_unlock(&sdp->sd_jindex_spin);
- kthread_stop(sdp->sd_quotad_process);
- kthread_stop(sdp->sd_logd_process);
-
if (!(sb->s_flags & MS_RDONLY)) {
error = gfs2_make_fs_ro(sdp);
if (error)
diff --git a/fs/gfs2/xattr.c b/fs/gfs2/xattr.c
index 8c6a6f6bdba9..0b81f783f787 100644
--- a/fs/gfs2/xattr.c
+++ b/fs/gfs2/xattr.c
@@ -13,6 +13,7 @@
#include <linux/buffer_head.h>
#include <linux/xattr.h>
#include <linux/gfs2_ondisk.h>
+#include <linux/posix_acl_xattr.h>
#include <asm/uaccess.h>
#include "gfs2.h"
@@ -1500,7 +1501,8 @@ static const struct xattr_handler gfs2_xattr_security_handler = {
const struct xattr_handler *gfs2_xattr_handlers[] = {
&gfs2_xattr_user_handler,
&gfs2_xattr_security_handler,
- &gfs2_xattr_system_handler,
+ &posix_acl_access_xattr_handler,
+ &posix_acl_default_xattr_handler,
NULL,
};