dect
/
linux-2.6
Archived
13
0
Fork 0

Merge branch 'nfs-for-3.3' of git://git.linux-nfs.org/projects/trondmy/linux-nfs

* 'nfs-for-3.3' of git://git.linux-nfs.org/projects/trondmy/linux-nfs:
  NFSv4: Change the default setting of the nfs4_disable_idmapping parameter
  NFSv4: Save the owner/group name string when doing open
  NFS: Remove pNFS bloat from the generic write path
  pnfs-obj: Must return layout on IO error
  pnfs-obj: pNFS errors are communicated on iodata->pnfs_error
  NFS: Cache state owners after files are closed
  NFS: Clean up nfs4_find_state_owners_locked()
  NFSv4: include bitmap in nfsv4 get acl data
  nfs: fix a minor do_div portability issue
  NFSv4.1: cleanup comment and debug printk
  NFSv4.1: change nfs4_free_slot parameters for dynamic slots
  NFSv4.1: cleanup init and reset of session slot tables
  NFSv4.1: fix backchannel slotid off-by-one bug
  nfs: fix regression in handling of context= option in NFSv4
  NFS - fix recent breakage to NFS error handling.
  NFS: Retry mounting NFSROOT
  SUNRPC: Clean up the RPCSEC_GSS service ticket requests
This commit is contained in:
Linus Torvalds 2012-01-10 14:57:40 -08:00
commit 57eccf1c2a
29 changed files with 525 additions and 271 deletions

View File

@ -1634,12 +1634,17 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
The default is to return 64-bit inode numbers.
nfs.nfs4_disable_idmapping=
[NFSv4] When set, this option disables the NFSv4
idmapper on the client, but only if the mount
is using the 'sec=sys' security flavour. This may
make migration from legacy NFSv2/v3 systems easier
provided that the server has the appropriate support.
The default is to always enable NFSv4 idmapping.
[NFSv4] When set to the default of '1', this option
ensures that both the RPC level authentication
scheme and the NFS level operations agree to use
numeric uids/gids if the mount is using the
'sec=sys' security flavour. In effect it is
disabling idmapping, which can make migration from
legacy NFSv2/v3 systems to NFSv4 easier.
Servers that do not support this mode of operation
will be autodetected by the client, and it will fall
back to using the idmapper.
To turn off this behaviour, set the value to '0'.
nmi_debug= [KNL,AVR32,SH] Specify one or more actions to take
when a NMI is triggered.

View File

@ -339,7 +339,7 @@ validate_seqid(struct nfs4_slot_table *tbl, struct cb_sequenceargs * args)
dprintk("%s enter. slotid %d seqid %d\n",
__func__, args->csa_slotid, args->csa_sequenceid);
if (args->csa_slotid > NFS41_BC_MAX_CALLBACKS)
if (args->csa_slotid >= NFS41_BC_MAX_CALLBACKS)
return htonl(NFS4ERR_BADSLOT);
slot = tbl->slots + args->csa_slotid;

View File

@ -84,7 +84,7 @@ retry:
/*
* Turn off NFSv4 uid/gid mapping when using AUTH_SYS
*/
static int nfs4_disable_idmapping = 0;
static int nfs4_disable_idmapping = 1;
/*
* RPC cruft for NFS
@ -185,7 +185,7 @@ static struct nfs_client *nfs_alloc_client(const struct nfs_client_initdata *cl_
clp->cl_minorversion = cl_init->minorversion;
clp->cl_mvops = nfs_v4_minor_ops[cl_init->minorversion];
#endif
cred = rpc_lookup_machine_cred();
cred = rpc_lookup_machine_cred("*");
if (!IS_ERR(cred))
clp->cl_machine_cred = cred;
nfs_fscache_get_client_cookie(clp);
@ -250,6 +250,11 @@ static void pnfs_init_server(struct nfs_server *server)
rpc_init_wait_queue(&server->roc_rpcwaitq, "pNFS ROC");
}
static void nfs4_destroy_server(struct nfs_server *server)
{
nfs4_purge_state_owners(server);
}
#else
static void nfs4_shutdown_client(struct nfs_client *clp)
{
@ -1065,6 +1070,7 @@ static struct nfs_server *nfs_alloc_server(void)
INIT_LIST_HEAD(&server->master_link);
INIT_LIST_HEAD(&server->delegations);
INIT_LIST_HEAD(&server->layouts);
INIT_LIST_HEAD(&server->state_owners_lru);
atomic_set(&server->active, 0);
@ -1538,6 +1544,7 @@ static int nfs4_server_common_setup(struct nfs_server *server,
nfs_server_insert_lists(server);
server->mount_time = jiffies;
server->destroy = nfs4_destroy_server;
out:
nfs_free_fattr(fattr);
return error;
@ -1719,6 +1726,7 @@ struct nfs_server *nfs_clone_server(struct nfs_server *source,
/* Copy data from the source */
server->nfs_client = source->nfs_client;
server->destroy = source->destroy;
atomic_inc(&server->nfs_client->cl_count);
nfs_server_copy_userdata(server, source);

View File

@ -272,13 +272,13 @@ nfs_file_fsync(struct file *file, loff_t start, loff_t end, int datasync)
datasync);
ret = filemap_write_and_wait_range(inode->i_mapping, start, end);
if (ret)
return ret;
mutex_lock(&inode->i_mutex);
nfs_inc_stats(inode, NFSIOS_VFSFSYNC);
have_error = test_and_clear_bit(NFS_CONTEXT_ERROR_WRITE, &ctx->flags);
status = nfs_commit_inode(inode, FLUSH_SYNC);
if (status >= 0 && ret < 0)
status = ret;
have_error |= test_bit(NFS_CONTEXT_ERROR_WRITE, &ctx->flags);
if (have_error)
ret = xchg(&ctx->error, 0);

View File

@ -38,6 +38,89 @@
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/nfs_idmap.h>
#include <linux/nfs_fs.h>
/**
* nfs_fattr_init_names - initialise the nfs_fattr owner_name/group_name fields
* @fattr: fully initialised struct nfs_fattr
* @owner_name: owner name string cache
* @group_name: group name string cache
*/
void nfs_fattr_init_names(struct nfs_fattr *fattr,
struct nfs4_string *owner_name,
struct nfs4_string *group_name)
{
fattr->owner_name = owner_name;
fattr->group_name = group_name;
}
static void nfs_fattr_free_owner_name(struct nfs_fattr *fattr)
{
fattr->valid &= ~NFS_ATTR_FATTR_OWNER_NAME;
kfree(fattr->owner_name->data);
}
static void nfs_fattr_free_group_name(struct nfs_fattr *fattr)
{
fattr->valid &= ~NFS_ATTR_FATTR_GROUP_NAME;
kfree(fattr->group_name->data);
}
static bool nfs_fattr_map_owner_name(struct nfs_server *server, struct nfs_fattr *fattr)
{
struct nfs4_string *owner = fattr->owner_name;
__u32 uid;
if (!(fattr->valid & NFS_ATTR_FATTR_OWNER_NAME))
return false;
if (nfs_map_name_to_uid(server, owner->data, owner->len, &uid) == 0) {
fattr->uid = uid;
fattr->valid |= NFS_ATTR_FATTR_OWNER;
}
return true;
}
static bool nfs_fattr_map_group_name(struct nfs_server *server, struct nfs_fattr *fattr)
{
struct nfs4_string *group = fattr->group_name;
__u32 gid;
if (!(fattr->valid & NFS_ATTR_FATTR_GROUP_NAME))
return false;
if (nfs_map_group_to_gid(server, group->data, group->len, &gid) == 0) {
fattr->gid = gid;
fattr->valid |= NFS_ATTR_FATTR_GROUP;
}
return true;
}
/**
* nfs_fattr_free_names - free up the NFSv4 owner and group strings
* @fattr: a fully initialised nfs_fattr structure
*/
void nfs_fattr_free_names(struct nfs_fattr *fattr)
{
if (fattr->valid & NFS_ATTR_FATTR_OWNER_NAME)
nfs_fattr_free_owner_name(fattr);
if (fattr->valid & NFS_ATTR_FATTR_GROUP_NAME)
nfs_fattr_free_group_name(fattr);
}
/**
* nfs_fattr_map_and_free_names - map owner/group strings into uid/gid and free
* @server: pointer to the filesystem nfs_server structure
* @fattr: a fully initialised nfs_fattr structure
*
* This helper maps the cached NFSv4 owner/group strings in fattr into
* their numeric uid/gid equivalents, and then frees the cached strings.
*/
void nfs_fattr_map_and_free_names(struct nfs_server *server, struct nfs_fattr *fattr)
{
if (nfs_fattr_map_owner_name(server, fattr))
nfs_fattr_free_owner_name(fattr);
if (nfs_fattr_map_group_name(server, fattr))
nfs_fattr_free_group_name(fattr);
}
static int nfs_map_string_to_numeric(const char *name, size_t namelen, __u32 *res)
{

View File

@ -1020,6 +1020,8 @@ void nfs_fattr_init(struct nfs_fattr *fattr)
fattr->valid = 0;
fattr->time_start = jiffies;
fattr->gencount = nfs_inc_attr_generation_counter();
fattr->owner_name = NULL;
fattr->group_name = NULL;
}
struct nfs_fattr *nfs_alloc_fattr(void)

View File

@ -307,6 +307,8 @@ extern void nfs_readdata_release(struct nfs_read_data *rdata);
/* write.c */
extern int nfs_generic_flush(struct nfs_pageio_descriptor *desc,
struct list_head *head);
extern void nfs_pageio_init_write_mds(struct nfs_pageio_descriptor *pgio,
struct inode *inode, int ioflags);
extern void nfs_pageio_reset_write_mds(struct nfs_pageio_descriptor *pgio);
extern void nfs_writedata_release(struct nfs_write_data *wdata);
extern void nfs_commit_free(struct nfs_write_data *p);

View File

@ -94,6 +94,8 @@ struct nfs_unique_id {
struct nfs4_state_owner {
struct nfs_unique_id so_owner_id;
struct nfs_server *so_server;
struct list_head so_lru;
unsigned long so_expires;
struct rb_node so_server_node;
struct rpc_cred *so_cred; /* Associated cred */
@ -319,6 +321,7 @@ static inline void nfs4_schedule_session_recovery(struct nfs4_session *session)
extern struct nfs4_state_owner * nfs4_get_state_owner(struct nfs_server *, struct rpc_cred *);
extern void nfs4_put_state_owner(struct nfs4_state_owner *);
extern void nfs4_purge_state_owners(struct nfs_server *);
extern struct nfs4_state * nfs4_get_open_state(struct inode *, struct nfs4_state_owner *);
extern void nfs4_put_open_state(struct nfs4_state *);
extern void nfs4_close_state(struct nfs4_state *, fmode_t);

View File

@ -49,13 +49,14 @@ filelayout_get_dense_offset(struct nfs4_filelayout_segment *flseg,
loff_t offset)
{
u32 stripe_width = flseg->stripe_unit * flseg->dsaddr->stripe_count;
u64 tmp;
u64 stripe_no;
u32 rem;
offset -= flseg->pattern_offset;
tmp = offset;
do_div(tmp, stripe_width);
stripe_no = div_u64(offset, stripe_width);
div_u64_rem(offset, flseg->stripe_unit, &rem);
return tmp * flseg->stripe_unit + do_div(offset, flseg->stripe_unit);
return stripe_no * flseg->stripe_unit + rem;
}
/* This function is used by the layout driver to calculate the

View File

@ -52,6 +52,7 @@
#include <linux/namei.h>
#include <linux/mount.h>
#include <linux/module.h>
#include <linux/nfs_idmap.h>
#include <linux/sunrpc/bc_xprt.h>
#include <linux/xattr.h>
#include <linux/utsname.h>
@ -364,9 +365,8 @@ static void renew_lease(const struct nfs_server *server, unsigned long timestamp
* Must be called while holding tbl->slot_tbl_lock
*/
static void
nfs4_free_slot(struct nfs4_slot_table *tbl, struct nfs4_slot *free_slot)
nfs4_free_slot(struct nfs4_slot_table *tbl, u8 free_slotid)
{
int free_slotid = free_slot - tbl->slots;
int slotid = free_slotid;
BUG_ON(slotid < 0 || slotid >= NFS4_MAX_SLOT_TABLE);
@ -431,7 +431,7 @@ static void nfs41_sequence_free_slot(struct nfs4_sequence_res *res)
}
spin_lock(&tbl->slot_tbl_lock);
nfs4_free_slot(tbl, res->sr_slot);
nfs4_free_slot(tbl, res->sr_slot - tbl->slots);
nfs4_check_drain_fc_complete(res->sr_session);
spin_unlock(&tbl->slot_tbl_lock);
res->sr_slot = NULL;
@ -554,13 +554,10 @@ int nfs41_setup_sequence(struct nfs4_session *session,
spin_lock(&tbl->slot_tbl_lock);
if (test_bit(NFS4_SESSION_DRAINING, &session->session_state) &&
!rpc_task_has_priority(task, RPC_PRIORITY_PRIVILEGED)) {
/*
* The state manager will wait until the slot table is empty.
* Schedule the reset thread
*/
/* The state manager will wait until the slot table is empty */
rpc_sleep_on(&tbl->slot_tbl_waitq, task, NULL);
spin_unlock(&tbl->slot_tbl_lock);
dprintk("%s Schedule Session Reset\n", __func__);
dprintk("%s session is draining\n", __func__);
return -EAGAIN;
}
@ -765,6 +762,8 @@ struct nfs4_opendata {
struct nfs_openres o_res;
struct nfs_open_confirmargs c_arg;
struct nfs_open_confirmres c_res;
struct nfs4_string owner_name;
struct nfs4_string group_name;
struct nfs_fattr f_attr;
struct nfs_fattr dir_attr;
struct dentry *dir;
@ -788,6 +787,7 @@ static void nfs4_init_opendata_res(struct nfs4_opendata *p)
p->o_res.server = p->o_arg.server;
nfs_fattr_init(&p->f_attr);
nfs_fattr_init(&p->dir_attr);
nfs_fattr_init_names(&p->f_attr, &p->owner_name, &p->group_name);
}
static struct nfs4_opendata *nfs4_opendata_alloc(struct dentry *dentry,
@ -819,6 +819,7 @@ static struct nfs4_opendata *nfs4_opendata_alloc(struct dentry *dentry,
p->o_arg.name = &dentry->d_name;
p->o_arg.server = server;
p->o_arg.bitmask = server->attr_bitmask;
p->o_arg.dir_bitmask = server->cache_consistency_bitmask;
p->o_arg.claim = NFS4_OPEN_CLAIM_NULL;
if (flags & O_CREAT) {
u32 *s;
@ -855,6 +856,7 @@ static void nfs4_opendata_free(struct kref *kref)
dput(p->dir);
dput(p->dentry);
nfs_sb_deactive(sb);
nfs_fattr_free_names(&p->f_attr);
kfree(p);
}
@ -1579,6 +1581,8 @@ static int _nfs4_recover_proc_open(struct nfs4_opendata *data)
if (status != 0 || !data->rpc_done)
return status;
nfs_fattr_map_and_free_names(NFS_SERVER(dir), &data->f_attr);
nfs_refresh_inode(dir, o_res->dir_attr);
if (o_res->rflags & NFS4_OPEN_RESULT_CONFIRM) {
@ -1611,6 +1615,8 @@ static int _nfs4_proc_open(struct nfs4_opendata *data)
return status;
}
nfs_fattr_map_and_free_names(server, &data->f_attr);
if (o_arg->open_flags & O_CREAT) {
update_changeattr(dir, &o_res->cinfo);
nfs_post_op_update_inode(dir, o_res->dir_attr);
@ -3431,19 +3437,6 @@ static inline int nfs4_server_supports_acls(struct nfs_server *server)
*/
#define NFS4ACL_MAXPAGES (XATTR_SIZE_MAX >> PAGE_CACHE_SHIFT)
static void buf_to_pages(const void *buf, size_t buflen,
struct page **pages, unsigned int *pgbase)
{
const void *p = buf;
*pgbase = offset_in_page(buf);
p -= *pgbase;
while (p < buf + buflen) {
*(pages++) = virt_to_page(p);
p += PAGE_CACHE_SIZE;
}
}
static int buf_to_pages_noslab(const void *buf, size_t buflen,
struct page **pages, unsigned int *pgbase)
{
@ -3540,9 +3533,19 @@ out:
nfs4_set_cached_acl(inode, acl);
}
/*
* The getxattr API returns the required buffer length when called with a
* NULL buf. The NFSv4 acl tool then calls getxattr again after allocating
* the required buf. On a NULL buf, we send a page of data to the server
* guessing that the ACL request can be serviced by a page. If so, we cache
* up to the page of ACL data, and the 2nd call to getxattr is serviced by
* the cache. If not so, we throw away the page, and cache the required
* length. The next getxattr call will then produce another round trip to
* the server, this time with the input buf of the required size.
*/
static ssize_t __nfs4_get_acl_uncached(struct inode *inode, void *buf, size_t buflen)
{
struct page *pages[NFS4ACL_MAXPAGES];
struct page *pages[NFS4ACL_MAXPAGES] = {NULL, };
struct nfs_getaclargs args = {
.fh = NFS_FH(inode),
.acl_pages = pages,
@ -3557,41 +3560,60 @@ static ssize_t __nfs4_get_acl_uncached(struct inode *inode, void *buf, size_t bu
.rpc_argp = &args,
.rpc_resp = &res,
};
struct page *localpage = NULL;
int ret;
int ret = -ENOMEM, npages, i, acl_len = 0;
if (buflen < PAGE_SIZE) {
/* As long as we're doing a round trip to the server anyway,
* let's be prepared for a page of acl data. */
localpage = alloc_page(GFP_KERNEL);
resp_buf = page_address(localpage);
if (localpage == NULL)
return -ENOMEM;
args.acl_pages[0] = localpage;
args.acl_pgbase = 0;
args.acl_len = PAGE_SIZE;
} else {
resp_buf = buf;
buf_to_pages(buf, buflen, args.acl_pages, &args.acl_pgbase);
npages = (buflen + PAGE_SIZE - 1) >> PAGE_SHIFT;
/* As long as we're doing a round trip to the server anyway,
* let's be prepared for a page of acl data. */
if (npages == 0)
npages = 1;
for (i = 0; i < npages; i++) {
pages[i] = alloc_page(GFP_KERNEL);
if (!pages[i])
goto out_free;
}
ret = nfs4_call_sync(NFS_SERVER(inode)->client, NFS_SERVER(inode), &msg, &args.seq_args, &res.seq_res, 0);
if (npages > 1) {
/* for decoding across pages */
args.acl_scratch = alloc_page(GFP_KERNEL);
if (!args.acl_scratch)
goto out_free;
}
args.acl_len = npages * PAGE_SIZE;
args.acl_pgbase = 0;
/* Let decode_getfacl know not to fail if the ACL data is larger than
* the page we send as a guess */
if (buf == NULL)
res.acl_flags |= NFS4_ACL_LEN_REQUEST;
resp_buf = page_address(pages[0]);
dprintk("%s buf %p buflen %ld npages %d args.acl_len %ld\n",
__func__, buf, buflen, npages, args.acl_len);
ret = nfs4_call_sync(NFS_SERVER(inode)->client, NFS_SERVER(inode),
&msg, &args.seq_args, &res.seq_res, 0);
if (ret)
goto out_free;
if (res.acl_len > args.acl_len)
nfs4_write_cached_acl(inode, NULL, res.acl_len);
acl_len = res.acl_len - res.acl_data_offset;
if (acl_len > args.acl_len)
nfs4_write_cached_acl(inode, NULL, acl_len);
else
nfs4_write_cached_acl(inode, resp_buf, res.acl_len);
nfs4_write_cached_acl(inode, resp_buf + res.acl_data_offset,
acl_len);
if (buf) {
ret = -ERANGE;
if (res.acl_len > buflen)
if (acl_len > buflen)
goto out_free;
if (localpage)
memcpy(buf, resp_buf, res.acl_len);
_copy_from_pages(buf, pages, res.acl_data_offset,
res.acl_len);
}
ret = res.acl_len;
ret = acl_len;
out_free:
if (localpage)
__free_page(localpage);
for (i = 0; i < npages; i++)
if (pages[i])
__free_page(pages[i]);
if (args.acl_scratch)
__free_page(args.acl_scratch);
return ret;
}
@ -3622,6 +3644,8 @@ static ssize_t nfs4_proc_get_acl(struct inode *inode, void *buf, size_t buflen)
nfs_zap_acl_cache(inode);
ret = nfs4_read_cached_acl(inode, buf, buflen);
if (ret != -ENOENT)
/* -ENOENT is returned if there is no ACL or if there is an ACL
* but no cached acl data, just the acl length */
return ret;
return nfs4_get_acl_uncached(inode, buf, buflen);
}
@ -5022,23 +5046,6 @@ out:
return ret;
}
/*
* Reset the forechannel and backchannel slot tables
*/
static int nfs4_reset_slot_tables(struct nfs4_session *session)
{
int status;
status = nfs4_reset_slot_table(&session->fc_slot_table,
session->fc_attrs.max_reqs, 1);
if (status)
return status;
status = nfs4_reset_slot_table(&session->bc_slot_table,
session->bc_attrs.max_reqs, 0);
return status;
}
/* Destroy the slot table */
static void nfs4_destroy_slot_tables(struct nfs4_session *session)
{
@ -5084,29 +5091,35 @@ out:
}
/*
* Initialize the forechannel and backchannel tables
* Initialize or reset the forechannel and backchannel tables
*/
static int nfs4_init_slot_tables(struct nfs4_session *session)
static int nfs4_setup_session_slot_tables(struct nfs4_session *ses)
{
struct nfs4_slot_table *tbl;
int status = 0;
int status;
tbl = &session->fc_slot_table;
dprintk("--> %s\n", __func__);
/* Fore channel */
tbl = &ses->fc_slot_table;
if (tbl->slots == NULL) {
status = nfs4_init_slot_table(tbl,
session->fc_attrs.max_reqs, 1);
status = nfs4_init_slot_table(tbl, ses->fc_attrs.max_reqs, 1);
if (status) /* -ENOMEM */
return status;
} else {
status = nfs4_reset_slot_table(tbl, ses->fc_attrs.max_reqs, 1);
if (status)
return status;
}
tbl = &session->bc_slot_table;
/* Back channel */
tbl = &ses->bc_slot_table;
if (tbl->slots == NULL) {
status = nfs4_init_slot_table(tbl,
session->bc_attrs.max_reqs, 0);
status = nfs4_init_slot_table(tbl, ses->bc_attrs.max_reqs, 0);
if (status)
nfs4_destroy_slot_tables(session);
}
/* Fore and back channel share a connection so get
* both slot tables or neither */
nfs4_destroy_slot_tables(ses);
} else
status = nfs4_reset_slot_table(tbl, ses->bc_attrs.max_reqs, 0);
return status;
}
@ -5294,13 +5307,9 @@ int nfs4_proc_create_session(struct nfs_client *clp)
if (status)
goto out;
/* Init and reset the fore channel */
status = nfs4_init_slot_tables(session);
dprintk("slot table initialization returned %d\n", status);
if (status)
goto out;
status = nfs4_reset_slot_tables(session);
dprintk("slot table reset returned %d\n", status);
/* Init or reset the session slot tables */
status = nfs4_setup_session_slot_tables(session);
dprintk("slot table setup returned %d\n", status);
if (status)
goto out;

View File

@ -49,6 +49,7 @@
#include <linux/ratelimit.h>
#include <linux/workqueue.h>
#include <linux/bitops.h>
#include <linux/jiffies.h>
#include "nfs4_fs.h"
#include "callback.h"
@ -377,31 +378,24 @@ nfs4_find_state_owner_locked(struct nfs_server *server, struct rpc_cred *cred)
{
struct rb_node **p = &server->state_owners.rb_node,
*parent = NULL;
struct nfs4_state_owner *sp, *res = NULL;
struct nfs4_state_owner *sp;
while (*p != NULL) {
parent = *p;
sp = rb_entry(parent, struct nfs4_state_owner, so_server_node);
if (server < sp->so_server) {
p = &parent->rb_left;
continue;
}
if (server > sp->so_server) {
p = &parent->rb_right;
continue;
}
if (cred < sp->so_cred)
p = &parent->rb_left;
else if (cred > sp->so_cred)
p = &parent->rb_right;
else {
if (!list_empty(&sp->so_lru))
list_del_init(&sp->so_lru);
atomic_inc(&sp->so_count);
res = sp;
break;
return sp;
}
}
return res;
return NULL;
}
static struct nfs4_state_owner *
@ -421,6 +415,8 @@ nfs4_insert_state_owner_locked(struct nfs4_state_owner *new)
else if (new->so_cred > sp->so_cred)
p = &parent->rb_right;
else {
if (!list_empty(&sp->so_lru))
list_del_init(&sp->so_lru);
atomic_inc(&sp->so_count);
return sp;
}
@ -462,6 +458,7 @@ nfs4_alloc_state_owner(void)
spin_lock_init(&sp->so_sequence.lock);
INIT_LIST_HEAD(&sp->so_sequence.list);
atomic_set(&sp->so_count, 1);
INIT_LIST_HEAD(&sp->so_lru);
return sp;
}
@ -479,6 +476,38 @@ nfs4_drop_state_owner(struct nfs4_state_owner *sp)
}
}
static void nfs4_free_state_owner(struct nfs4_state_owner *sp)
{
rpc_destroy_wait_queue(&sp->so_sequence.wait);
put_rpccred(sp->so_cred);
kfree(sp);
}
static void nfs4_gc_state_owners(struct nfs_server *server)
{
struct nfs_client *clp = server->nfs_client;
struct nfs4_state_owner *sp, *tmp;
unsigned long time_min, time_max;
LIST_HEAD(doomed);
spin_lock(&clp->cl_lock);
time_max = jiffies;
time_min = (long)time_max - (long)clp->cl_lease_time;
list_for_each_entry_safe(sp, tmp, &server->state_owners_lru, so_lru) {
/* NB: LRU is sorted so that oldest is at the head */
if (time_in_range(sp->so_expires, time_min, time_max))
break;
list_move(&sp->so_lru, &doomed);
nfs4_remove_state_owner_locked(sp);
}
spin_unlock(&clp->cl_lock);
list_for_each_entry_safe(sp, tmp, &doomed, so_lru) {
list_del(&sp->so_lru);
nfs4_free_state_owner(sp);
}
}
/**
* nfs4_get_state_owner - Look up a state owner given a credential
* @server: nfs_server to search
@ -496,10 +525,10 @@ struct nfs4_state_owner *nfs4_get_state_owner(struct nfs_server *server,
sp = nfs4_find_state_owner_locked(server, cred);
spin_unlock(&clp->cl_lock);
if (sp != NULL)
return sp;
goto out;
new = nfs4_alloc_state_owner();
if (new == NULL)
return NULL;
goto out;
new->so_server = server;
new->so_cred = cred;
spin_lock(&clp->cl_lock);
@ -511,26 +540,58 @@ struct nfs4_state_owner *nfs4_get_state_owner(struct nfs_server *server,
rpc_destroy_wait_queue(&new->so_sequence.wait);
kfree(new);
}
out:
nfs4_gc_state_owners(server);
return sp;
}
/**
* nfs4_put_state_owner - Release a nfs4_state_owner
* @sp: state owner data to release
*
*/
void nfs4_put_state_owner(struct nfs4_state_owner *sp)
{
struct nfs_client *clp = sp->so_server->nfs_client;
struct rpc_cred *cred = sp->so_cred;
struct nfs_server *server = sp->so_server;
struct nfs_client *clp = server->nfs_client;
if (!atomic_dec_and_lock(&sp->so_count, &clp->cl_lock))
return;
nfs4_remove_state_owner_locked(sp);
if (!RB_EMPTY_NODE(&sp->so_server_node)) {
sp->so_expires = jiffies;
list_add_tail(&sp->so_lru, &server->state_owners_lru);
spin_unlock(&clp->cl_lock);
} else {
nfs4_remove_state_owner_locked(sp);
spin_unlock(&clp->cl_lock);
nfs4_free_state_owner(sp);
}
}
/**
* nfs4_purge_state_owners - Release all cached state owners
* @server: nfs_server with cached state owners to release
*
* Called at umount time. Remaining state owners will be on
* the LRU with ref count of zero.
*/
void nfs4_purge_state_owners(struct nfs_server *server)
{
struct nfs_client *clp = server->nfs_client;
struct nfs4_state_owner *sp, *tmp;
LIST_HEAD(doomed);
spin_lock(&clp->cl_lock);
list_for_each_entry_safe(sp, tmp, &server->state_owners_lru, so_lru) {
list_move(&sp->so_lru, &doomed);
nfs4_remove_state_owner_locked(sp);
}
spin_unlock(&clp->cl_lock);
rpc_destroy_wait_queue(&sp->so_sequence.wait);
put_rpccred(cred);
kfree(sp);
list_for_each_entry_safe(sp, tmp, &doomed, so_lru) {
list_del(&sp->so_lru);
nfs4_free_state_owner(sp);
}
}
static struct nfs4_state *
@ -1402,6 +1463,7 @@ static int nfs4_do_reclaim(struct nfs_client *clp, const struct nfs4_state_recov
restart:
rcu_read_lock();
list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link) {
nfs4_purge_state_owners(server);
spin_lock(&clp->cl_lock);
for (pos = rb_first(&server->state_owners);
pos != NULL;

View File

@ -2298,7 +2298,7 @@ static void nfs4_xdr_enc_open(struct rpc_rqst *req, struct xdr_stream *xdr,
encode_getfh(xdr, &hdr);
encode_getfattr(xdr, args->bitmask, &hdr);
encode_restorefh(xdr, &hdr);
encode_getfattr(xdr, args->bitmask, &hdr);
encode_getfattr(xdr, args->dir_bitmask, &hdr);
encode_nops(&hdr);
}
@ -2517,11 +2517,13 @@ static void nfs4_xdr_enc_getacl(struct rpc_rqst *req, struct xdr_stream *xdr,
encode_compound_hdr(xdr, req, &hdr);
encode_sequence(xdr, &args->seq_args, &hdr);
encode_putfh(xdr, args->fh, &hdr);
replen = hdr.replen + op_decode_hdr_maxsz + nfs4_fattr_bitmap_maxsz + 1;
replen = hdr.replen + op_decode_hdr_maxsz + 1;
encode_getattr_two(xdr, FATTR4_WORD0_ACL, 0, &hdr);
xdr_inline_pages(&req->rq_rcv_buf, replen << 2,
args->acl_pages, args->acl_pgbase, args->acl_len);
xdr_set_scratch_buffer(xdr, page_address(args->acl_scratch), PAGE_SIZE);
encode_nops(&hdr);
}
@ -3790,7 +3792,8 @@ out_overflow:
}
static int decode_attr_owner(struct xdr_stream *xdr, uint32_t *bitmap,
const struct nfs_server *server, uint32_t *uid, int may_sleep)
const struct nfs_server *server, uint32_t *uid,
struct nfs4_string *owner_name)
{
uint32_t len;
__be32 *p;
@ -3807,8 +3810,12 @@ static int decode_attr_owner(struct xdr_stream *xdr, uint32_t *bitmap,
p = xdr_inline_decode(xdr, len);
if (unlikely(!p))
goto out_overflow;
if (!may_sleep) {
/* do nothing */
if (owner_name != NULL) {
owner_name->data = kmemdup(p, len, GFP_NOWAIT);
if (owner_name->data != NULL) {
owner_name->len = len;
ret = NFS_ATTR_FATTR_OWNER_NAME;
}
} else if (len < XDR_MAX_NETOBJ) {
if (nfs_map_name_to_uid(server, (char *)p, len, uid) == 0)
ret = NFS_ATTR_FATTR_OWNER;
@ -3828,7 +3835,8 @@ out_overflow:
}
static int decode_attr_group(struct xdr_stream *xdr, uint32_t *bitmap,
const struct nfs_server *server, uint32_t *gid, int may_sleep)
const struct nfs_server *server, uint32_t *gid,
struct nfs4_string *group_name)
{
uint32_t len;
__be32 *p;
@ -3845,8 +3853,12 @@ static int decode_attr_group(struct xdr_stream *xdr, uint32_t *bitmap,
p = xdr_inline_decode(xdr, len);
if (unlikely(!p))
goto out_overflow;
if (!may_sleep) {
/* do nothing */
if (group_name != NULL) {
group_name->data = kmemdup(p, len, GFP_NOWAIT);
if (group_name->data != NULL) {
group_name->len = len;
ret = NFS_ATTR_FATTR_GROUP_NAME;
}
} else if (len < XDR_MAX_NETOBJ) {
if (nfs_map_group_to_gid(server, (char *)p, len, gid) == 0)
ret = NFS_ATTR_FATTR_GROUP;
@ -4283,7 +4295,7 @@ xdr_error:
static int decode_getfattr_attrs(struct xdr_stream *xdr, uint32_t *bitmap,
struct nfs_fattr *fattr, struct nfs_fh *fh,
const struct nfs_server *server, int may_sleep)
const struct nfs_server *server)
{
int status;
umode_t fmode = 0;
@ -4350,12 +4362,12 @@ static int decode_getfattr_attrs(struct xdr_stream *xdr, uint32_t *bitmap,
goto xdr_error;
fattr->valid |= status;
status = decode_attr_owner(xdr, bitmap, server, &fattr->uid, may_sleep);
status = decode_attr_owner(xdr, bitmap, server, &fattr->uid, fattr->owner_name);
if (status < 0)
goto xdr_error;
fattr->valid |= status;
status = decode_attr_group(xdr, bitmap, server, &fattr->gid, may_sleep);
status = decode_attr_group(xdr, bitmap, server, &fattr->gid, fattr->group_name);
if (status < 0)
goto xdr_error;
fattr->valid |= status;
@ -4396,7 +4408,7 @@ xdr_error:
}
static int decode_getfattr_generic(struct xdr_stream *xdr, struct nfs_fattr *fattr,
struct nfs_fh *fh, const struct nfs_server *server, int may_sleep)
struct nfs_fh *fh, const struct nfs_server *server)
{
__be32 *savep;
uint32_t attrlen,
@ -4415,7 +4427,7 @@ static int decode_getfattr_generic(struct xdr_stream *xdr, struct nfs_fattr *fat
if (status < 0)
goto xdr_error;
status = decode_getfattr_attrs(xdr, bitmap, fattr, fh, server, may_sleep);
status = decode_getfattr_attrs(xdr, bitmap, fattr, fh, server);
if (status < 0)
goto xdr_error;
@ -4426,9 +4438,9 @@ xdr_error:
}
static int decode_getfattr(struct xdr_stream *xdr, struct nfs_fattr *fattr,
const struct nfs_server *server, int may_sleep)
const struct nfs_server *server)
{
return decode_getfattr_generic(xdr, fattr, NULL, server, may_sleep);
return decode_getfattr_generic(xdr, fattr, NULL, server);
}
/*
@ -4957,17 +4969,18 @@ decode_restorefh(struct xdr_stream *xdr)
}
static int decode_getacl(struct xdr_stream *xdr, struct rpc_rqst *req,
size_t *acl_len)
struct nfs_getaclres *res)
{
__be32 *savep;
__be32 *savep, *bm_p;
uint32_t attrlen,
bitmap[3] = {0};
struct kvec *iov = req->rq_rcv_buf.head;
int status;
*acl_len = 0;
res->acl_len = 0;
if ((status = decode_op_hdr(xdr, OP_GETATTR)) != 0)
goto out;
bm_p = xdr->p;
if ((status = decode_attr_bitmap(xdr, bitmap)) != 0)
goto out;
if ((status = decode_attr_length(xdr, &attrlen, &savep)) != 0)
@ -4979,18 +4992,30 @@ static int decode_getacl(struct xdr_stream *xdr, struct rpc_rqst *req,
size_t hdrlen;
u32 recvd;
/* The bitmap (xdr len + bitmaps) and the attr xdr len words
* are stored with the acl data to handle the problem of
* variable length bitmaps.*/
xdr->p = bm_p;
res->acl_data_offset = be32_to_cpup(bm_p) + 2;
res->acl_data_offset <<= 2;
/* We ignore &savep and don't do consistency checks on
* the attr length. Let userspace figure it out.... */
hdrlen = (u8 *)xdr->p - (u8 *)iov->iov_base;
attrlen += res->acl_data_offset;
recvd = req->rq_rcv_buf.len - hdrlen;
if (attrlen > recvd) {
dprintk("NFS: server cheating in getattr"
" acl reply: attrlen %u > recvd %u\n",
if (res->acl_flags & NFS4_ACL_LEN_REQUEST) {
/* getxattr interface called with a NULL buf */
res->acl_len = attrlen;
goto out;
}
dprintk("NFS: acl reply: attrlen %u > recvd %u\n",
attrlen, recvd);
return -EINVAL;
}
xdr_read_pages(xdr, attrlen);
*acl_len = attrlen;
res->acl_len = attrlen;
} else
status = -EOPNOTSUPP;
@ -5696,8 +5721,7 @@ static int nfs4_xdr_dec_open_downgrade(struct rpc_rqst *rqstp,
status = decode_open_downgrade(xdr, res);
if (status != 0)
goto out;
decode_getfattr(xdr, res->fattr, res->server,
!RPC_IS_ASYNC(rqstp->rq_task));
decode_getfattr(xdr, res->fattr, res->server);
out:
return status;
}
@ -5723,8 +5747,7 @@ static int nfs4_xdr_dec_access(struct rpc_rqst *rqstp, struct xdr_stream *xdr,
status = decode_access(xdr, res);
if (status != 0)
goto out;
decode_getfattr(xdr, res->fattr, res->server,
!RPC_IS_ASYNC(rqstp->rq_task));
decode_getfattr(xdr, res->fattr, res->server);
out:
return status;
}
@ -5753,8 +5776,7 @@ static int nfs4_xdr_dec_lookup(struct rpc_rqst *rqstp, struct xdr_stream *xdr,
status = decode_getfh(xdr, res->fh);
if (status)
goto out;
status = decode_getfattr(xdr, res->fattr, res->server
,!RPC_IS_ASYNC(rqstp->rq_task));
status = decode_getfattr(xdr, res->fattr, res->server);
out:
return status;
}
@ -5780,8 +5802,7 @@ static int nfs4_xdr_dec_lookup_root(struct rpc_rqst *rqstp,
goto out;
status = decode_getfh(xdr, res->fh);
if (status == 0)
status = decode_getfattr(xdr, res->fattr, res->server,
!RPC_IS_ASYNC(rqstp->rq_task));
status = decode_getfattr(xdr, res->fattr, res->server);
out:
return status;
}
@ -5807,8 +5828,7 @@ static int nfs4_xdr_dec_remove(struct rpc_rqst *rqstp, struct xdr_stream *xdr,
status = decode_remove(xdr, &res->cinfo);
if (status)
goto out;
decode_getfattr(xdr, res->dir_attr, res->server,
!RPC_IS_ASYNC(rqstp->rq_task));
decode_getfattr(xdr, res->dir_attr, res->server);
out:
return status;
}
@ -5841,14 +5861,12 @@ static int nfs4_xdr_dec_rename(struct rpc_rqst *rqstp, struct xdr_stream *xdr,
if (status)
goto out;
/* Current FH is target directory */
if (decode_getfattr(xdr, res->new_fattr, res->server,
!RPC_IS_ASYNC(rqstp->rq_task)) != 0)
if (decode_getfattr(xdr, res->new_fattr, res->server))
goto out;
status = decode_restorefh(xdr);
if (status)
goto out;
decode_getfattr(xdr, res->old_fattr, res->server,
!RPC_IS_ASYNC(rqstp->rq_task));
decode_getfattr(xdr, res->old_fattr, res->server);
out:
return status;
}
@ -5884,14 +5902,12 @@ static int nfs4_xdr_dec_link(struct rpc_rqst *rqstp, struct xdr_stream *xdr,
* Note order: OP_LINK leaves the directory as the current
* filehandle.
*/
if (decode_getfattr(xdr, res->dir_attr, res->server,
!RPC_IS_ASYNC(rqstp->rq_task)) != 0)
if (decode_getfattr(xdr, res->dir_attr, res->server))
goto out;
status = decode_restorefh(xdr);
if (status)
goto out;
decode_getfattr(xdr, res->fattr, res->server,
!RPC_IS_ASYNC(rqstp->rq_task));
decode_getfattr(xdr, res->fattr, res->server);
out:
return status;
}
@ -5923,14 +5939,12 @@ static int nfs4_xdr_dec_create(struct rpc_rqst *rqstp, struct xdr_stream *xdr,
status = decode_getfh(xdr, res->fh);
if (status)
goto out;
if (decode_getfattr(xdr, res->fattr, res->server,
!RPC_IS_ASYNC(rqstp->rq_task)) != 0)
if (decode_getfattr(xdr, res->fattr, res->server))
goto out;
status = decode_restorefh(xdr);
if (status)
goto out;
decode_getfattr(xdr, res->dir_fattr, res->server,
!RPC_IS_ASYNC(rqstp->rq_task));
decode_getfattr(xdr, res->dir_fattr, res->server);
out:
return status;
}
@ -5962,8 +5976,7 @@ static int nfs4_xdr_dec_getattr(struct rpc_rqst *rqstp, struct xdr_stream *xdr,
status = decode_putfh(xdr);
if (status)
goto out;
status = decode_getfattr(xdr, res->fattr, res->server,
!RPC_IS_ASYNC(rqstp->rq_task));
status = decode_getfattr(xdr, res->fattr, res->server);
out:
return status;
}
@ -6028,7 +6041,7 @@ nfs4_xdr_dec_getacl(struct rpc_rqst *rqstp, struct xdr_stream *xdr,
status = decode_putfh(xdr);
if (status)
goto out;
status = decode_getacl(xdr, rqstp, &res->acl_len);
status = decode_getacl(xdr, rqstp, res);
out:
return status;
@ -6061,8 +6074,7 @@ static int nfs4_xdr_dec_close(struct rpc_rqst *rqstp, struct xdr_stream *xdr,
* an ESTALE error. Shouldn't be a problem,
* though, since fattr->valid will remain unset.
*/
decode_getfattr(xdr, res->fattr, res->server,
!RPC_IS_ASYNC(rqstp->rq_task));
decode_getfattr(xdr, res->fattr, res->server);
out:
return status;
}
@ -6093,13 +6105,11 @@ static int nfs4_xdr_dec_open(struct rpc_rqst *rqstp, struct xdr_stream *xdr,
goto out;
if (decode_getfh(xdr, &res->fh) != 0)
goto out;
if (decode_getfattr(xdr, res->f_attr, res->server,
!RPC_IS_ASYNC(rqstp->rq_task)) != 0)
if (decode_getfattr(xdr, res->f_attr, res->server) != 0)
goto out;
if (decode_restorefh(xdr) != 0)
goto out;
decode_getfattr(xdr, res->dir_attr, res->server,
!RPC_IS_ASYNC(rqstp->rq_task));
decode_getfattr(xdr, res->dir_attr, res->server);
out:
return status;
}
@ -6147,8 +6157,7 @@ static int nfs4_xdr_dec_open_noattr(struct rpc_rqst *rqstp,
status = decode_open(xdr, res);
if (status)
goto out;
decode_getfattr(xdr, res->f_attr, res->server,
!RPC_IS_ASYNC(rqstp->rq_task));
decode_getfattr(xdr, res->f_attr, res->server);
out:
return status;
}
@ -6175,8 +6184,7 @@ static int nfs4_xdr_dec_setattr(struct rpc_rqst *rqstp,
status = decode_setattr(xdr);
if (status)
goto out;
decode_getfattr(xdr, res->fattr, res->server,
!RPC_IS_ASYNC(rqstp->rq_task));
decode_getfattr(xdr, res->fattr, res->server);
out:
return status;
}
@ -6356,8 +6364,7 @@ static int nfs4_xdr_dec_write(struct rpc_rqst *rqstp, struct xdr_stream *xdr,
if (status)
goto out;
if (res->fattr)
decode_getfattr(xdr, res->fattr, res->server,
!RPC_IS_ASYNC(rqstp->rq_task));
decode_getfattr(xdr, res->fattr, res->server);
if (!status)
status = res->count;
out:
@ -6386,8 +6393,7 @@ static int nfs4_xdr_dec_commit(struct rpc_rqst *rqstp, struct xdr_stream *xdr,
if (status)
goto out;
if (res->fattr)
decode_getfattr(xdr, res->fattr, res->server,
!RPC_IS_ASYNC(rqstp->rq_task));
decode_getfattr(xdr, res->fattr, res->server);
out:
return status;
}
@ -6546,8 +6552,7 @@ static int nfs4_xdr_dec_delegreturn(struct rpc_rqst *rqstp,
status = decode_delegreturn(xdr);
if (status != 0)
goto out;
decode_getfattr(xdr, res->fattr, res->server,
!RPC_IS_ASYNC(rqstp->rq_task));
decode_getfattr(xdr, res->fattr, res->server);
out:
return status;
}
@ -6576,8 +6581,7 @@ static int nfs4_xdr_dec_fs_locations(struct rpc_rqst *req,
goto out;
xdr_enter_page(xdr, PAGE_SIZE);
status = decode_getfattr(xdr, &res->fs_locations->fattr,
res->fs_locations->server,
!RPC_IS_ASYNC(req->rq_task));
res->fs_locations->server);
out:
return status;
}
@ -6826,8 +6830,7 @@ static int nfs4_xdr_dec_layoutcommit(struct rpc_rqst *rqstp,
status = decode_layoutcommit(xdr, rqstp, res);
if (status)
goto out;
decode_getfattr(xdr, res->fattr, res->server,
!RPC_IS_ASYNC(rqstp->rq_task));
decode_getfattr(xdr, res->fattr, res->server);
out:
return status;
}
@ -6958,7 +6961,7 @@ int nfs4_decode_dirent(struct xdr_stream *xdr, struct nfs_entry *entry,
goto out_overflow;
if (decode_getfattr_attrs(xdr, bitmap, entry->fattr, entry->fh,
entry->server, 1) < 0)
entry->server) < 0)
goto out_overflow;
if (entry->fattr->valid & NFS_ATTR_FATTR_MOUNTED_ON_FILEID)
entry->ino = entry->fattr->mounted_on_fileid;

View File

@ -551,7 +551,8 @@ static const struct nfs_pageio_ops objio_pg_write_ops = {
static struct pnfs_layoutdriver_type objlayout_type = {
.id = LAYOUT_OSD2_OBJECTS,
.name = "LAYOUT_OSD2_OBJECTS",
.flags = PNFS_LAYOUTRET_ON_SETATTR,
.flags = PNFS_LAYOUTRET_ON_SETATTR |
PNFS_LAYOUTRET_ON_ERROR,
.alloc_layout_hdr = objlayout_alloc_layout_hdr,
.free_layout_hdr = objlayout_free_layout_hdr,

View File

@ -254,6 +254,8 @@ objlayout_read_done(struct objlayout_io_res *oir, ssize_t status, bool sync)
oir->status = rdata->task.tk_status = status;
if (status >= 0)
rdata->res.count = status;
else
rdata->pnfs_error = status;
objlayout_iodone(oir);
/* must not use oir after this point */
@ -334,6 +336,8 @@ objlayout_write_done(struct objlayout_io_res *oir, ssize_t status, bool sync)
if (status >= 0) {
wdata->res.count = status;
wdata->verf.committed = oir->committed;
} else {
wdata->pnfs_error = status;
}
objlayout_iodone(oir);
/* must not use oir after this point */

View File

@ -1166,6 +1166,33 @@ pnfs_generic_pg_test(struct nfs_pageio_descriptor *pgio, struct nfs_page *prev,
}
EXPORT_SYMBOL_GPL(pnfs_generic_pg_test);
static int pnfs_write_done_resend_to_mds(struct inode *inode, struct list_head *head)
{
struct nfs_pageio_descriptor pgio;
LIST_HEAD(failed);
/* Resend all requests through the MDS */
nfs_pageio_init_write_mds(&pgio, inode, FLUSH_STABLE);
while (!list_empty(head)) {
struct nfs_page *req = nfs_list_entry(head->next);
nfs_list_remove_request(req);
if (!nfs_pageio_add_request(&pgio, req))
nfs_list_add_request(req, &failed);
}
nfs_pageio_complete(&pgio);
if (!list_empty(&failed)) {
/* For some reason our attempt to resend pages. Mark the
* overall send request as having failed, and let
* nfs_writeback_release_full deal with the error.
*/
list_move(&failed, head);
return -EIO;
}
return 0;
}
/*
* Called by non rpc-based layout drivers
*/
@ -1175,9 +1202,17 @@ void pnfs_ld_write_done(struct nfs_write_data *data)
pnfs_set_layoutcommit(data);
data->mds_ops->rpc_call_done(&data->task, data);
} else {
put_lseg(data->lseg);
data->lseg = NULL;
dprintk("pnfs write error = %d\n", data->pnfs_error);
if (NFS_SERVER(data->inode)->pnfs_curr_ld->flags &
PNFS_LAYOUTRET_ON_ERROR) {
/* Don't lo_commit on error, Server will needs to
* preform a file recovery.
*/
clear_bit(NFS_INO_LAYOUTCOMMIT,
&NFS_I(data->inode)->flags);
pnfs_return_layout(data->inode);
}
data->task.tk_status = pnfs_write_done_resend_to_mds(data->inode, &data->pages);
}
data->mds_ops->rpc_release(data);
}
@ -1267,6 +1302,9 @@ static void pnfs_ld_handle_read_error(struct nfs_read_data *data)
put_lseg(data->lseg);
data->lseg = NULL;
dprintk("pnfs write error = %d\n", data->pnfs_error);
if (NFS_SERVER(data->inode)->pnfs_curr_ld->flags &
PNFS_LAYOUTRET_ON_ERROR)
pnfs_return_layout(data->inode);
nfs_pageio_init_read_mds(&pgio, data->inode);

View File

@ -68,6 +68,7 @@ enum {
enum layoutdriver_policy_flags {
/* Should the pNFS client commit and return the layout upon a setattr */
PNFS_LAYOUTRET_ON_SETATTR = 1 << 0,
PNFS_LAYOUTRET_ON_ERROR = 1 << 1,
};
struct nfs4_deviceid_node;

View File

@ -908,10 +908,24 @@ static struct nfs_parsed_mount_data *nfs_alloc_parsed_mount_data(unsigned int ve
data->auth_flavor_len = 1;
data->version = version;
data->minorversion = 0;
security_init_mnt_opts(&data->lsm_opts);
}
return data;
}
static void nfs_free_parsed_mount_data(struct nfs_parsed_mount_data *data)
{
if (data) {
kfree(data->client_address);
kfree(data->mount_server.hostname);
kfree(data->nfs_server.export_path);
kfree(data->nfs_server.hostname);
kfree(data->fscache_uniq);
security_free_mnt_opts(&data->lsm_opts);
kfree(data);
}
}
/*
* Sanity-check a server address provided by the mount command.
*
@ -2219,9 +2233,7 @@ static struct dentry *nfs_fs_mount(struct file_system_type *fs_type,
data = nfs_alloc_parsed_mount_data(NFS_DEFAULT_VERSION);
mntfh = nfs_alloc_fhandle();
if (data == NULL || mntfh == NULL)
goto out_free_fh;
security_init_mnt_opts(&data->lsm_opts);
goto out;
/* Validate the mount data */
error = nfs_validate_mount_data(raw_data, data, mntfh, dev_name);
@ -2233,8 +2245,6 @@ static struct dentry *nfs_fs_mount(struct file_system_type *fs_type,
#ifdef CONFIG_NFS_V4
if (data->version == 4) {
mntroot = nfs4_try_mount(flags, dev_name, data);
kfree(data->client_address);
kfree(data->nfs_server.export_path);
goto out;
}
#endif /* CONFIG_NFS_V4 */
@ -2289,13 +2299,8 @@ static struct dentry *nfs_fs_mount(struct file_system_type *fs_type,
s->s_flags |= MS_ACTIVE;
out:
kfree(data->nfs_server.hostname);
kfree(data->mount_server.hostname);
kfree(data->fscache_uniq);
security_free_mnt_opts(&data->lsm_opts);
out_free_fh:
nfs_free_parsed_mount_data(data);
nfs_free_fhandle(mntfh);
kfree(data);
return mntroot;
out_err_nosb:
@ -2622,9 +2627,7 @@ nfs4_remote_mount(struct file_system_type *fs_type, int flags,
mntfh = nfs_alloc_fhandle();
if (data == NULL || mntfh == NULL)
goto out_free_fh;
security_init_mnt_opts(&data->lsm_opts);
goto out;
/* Get a volume representation */
server = nfs4_create_server(data, mntfh);
@ -2676,13 +2679,10 @@ nfs4_remote_mount(struct file_system_type *fs_type, int flags,
s->s_flags |= MS_ACTIVE;
security_free_mnt_opts(&data->lsm_opts);
nfs_free_fhandle(mntfh);
return mntroot;
out:
security_free_mnt_opts(&data->lsm_opts);
out_free_fh:
nfs_free_fhandle(mntfh);
return ERR_PTR(error);
@ -2839,7 +2839,7 @@ static struct dentry *nfs4_mount(struct file_system_type *fs_type,
data = nfs_alloc_parsed_mount_data(4);
if (data == NULL)
goto out_free_data;
goto out;
/* Validate the mount data */
error = nfs4_validate_mount_data(raw_data, data, dev_name);
@ -2853,12 +2853,7 @@ static struct dentry *nfs4_mount(struct file_system_type *fs_type,
error = PTR_ERR(res);
out:
kfree(data->client_address);
kfree(data->nfs_server.export_path);
kfree(data->nfs_server.hostname);
kfree(data->fscache_uniq);
out_free_data:
kfree(data);
nfs_free_parsed_mount_data(data);
dprintk("<-- nfs4_mount() = %d%s\n", error,
error != 0 ? " [error]" : "");
return res;

View File

@ -1052,7 +1052,7 @@ static const struct nfs_pageio_ops nfs_pageio_write_ops = {
.pg_doio = nfs_generic_pg_writepages,
};
static void nfs_pageio_init_write_mds(struct nfs_pageio_descriptor *pgio,
void nfs_pageio_init_write_mds(struct nfs_pageio_descriptor *pgio,
struct inode *inode, int ioflags)
{
nfs_pageio_init(pgio, inode, &nfs_pageio_write_ops,
@ -1166,13 +1166,7 @@ static void nfs_writeback_done_full(struct rpc_task *task, void *calldata)
static void nfs_writeback_release_full(void *calldata)
{
struct nfs_write_data *data = calldata;
int ret, status = data->task.tk_status;
struct nfs_pageio_descriptor pgio;
if (data->pnfs_error) {
nfs_pageio_init_write_mds(&pgio, data->inode, FLUSH_STABLE);
pgio.pg_recoalesce = 1;
}
int status = data->task.tk_status;
/* Update attributes as result of writeback. */
while (!list_empty(&data->pages)) {
@ -1188,11 +1182,6 @@ static void nfs_writeback_release_full(void *calldata)
req->wb_bytes,
(long long)req_offset(req));
if (data->pnfs_error) {
dprintk(", pnfs error = %d\n", data->pnfs_error);
goto next;
}
if (status < 0) {
nfs_set_pageerror(page);
nfs_context_set_write_error(req->wb_context, status);
@ -1212,19 +1201,7 @@ remove_request:
next:
nfs_clear_page_tag_locked(req);
nfs_end_page_writeback(page);
if (data->pnfs_error) {
lock_page(page);
nfs_pageio_cond_complete(&pgio, page->index);
ret = nfs_page_async_flush(&pgio, page, 0);
if (ret) {
nfs_set_pageerror(page);
dprintk("rewrite to MDS error = %d\n", ret);
}
unlock_page(page);
}
}
if (data->pnfs_error)
nfs_pageio_complete(&pgio);
nfs_writedata_release(calldata);
}

View File

@ -718,7 +718,7 @@ int set_callback_cred(void)
{
if (callback_cred)
return 0;
callback_cred = rpc_lookup_machine_cred();
callback_cred = rpc_lookup_machine_cred("nfs");
if (!callback_cred)
return -ENOMEM;
return 0;

View File

@ -153,6 +153,7 @@ struct nfs_server {
struct rb_root openowner_id;
struct rb_root lockowner_id;
#endif
struct list_head state_owners_lru;
struct list_head layouts;
struct list_head delegations;
void (*destroy)(struct nfs_server *);

View File

@ -66,6 +66,8 @@ struct idmap_msg {
/* Forward declaration to make this header independent of others */
struct nfs_client;
struct nfs_server;
struct nfs_fattr;
struct nfs4_string;
#ifdef CONFIG_NFS_USE_NEW_IDMAPPER
@ -97,6 +99,12 @@ void nfs_idmap_delete(struct nfs_client *);
#endif /* CONFIG_NFS_USE_NEW_IDMAPPER */
void nfs_fattr_init_names(struct nfs_fattr *fattr,
struct nfs4_string *owner_name,
struct nfs4_string *group_name);
void nfs_fattr_free_names(struct nfs_fattr *);
void nfs_fattr_map_and_free_names(struct nfs_server *, struct nfs_fattr *);
int nfs_map_name_to_uid(const struct nfs_server *, const char *, size_t, __u32 *);
int nfs_map_group_to_gid(const struct nfs_server *, const char *, size_t, __u32 *);
int nfs_map_uid_to_name(const struct nfs_server *, __u32, char *, size_t);

View File

@ -18,6 +18,11 @@
/* Forward declaration for NFS v3 */
struct nfs4_secinfo_flavors;
struct nfs4_string {
unsigned int len;
char *data;
};
struct nfs_fsid {
uint64_t major;
uint64_t minor;
@ -61,6 +66,8 @@ struct nfs_fattr {
struct timespec pre_ctime; /* pre_op_attr.ctime */
unsigned long time_start;
unsigned long gencount;
struct nfs4_string *owner_name;
struct nfs4_string *group_name;
};
#define NFS_ATTR_FATTR_TYPE (1U << 0)
@ -85,6 +92,8 @@ struct nfs_fattr {
#define NFS_ATTR_FATTR_V4_REFERRAL (1U << 19) /* NFSv4 referral */
#define NFS_ATTR_FATTR_MOUNTPOINT (1U << 20) /* Treat as mountpoint */
#define NFS_ATTR_FATTR_MOUNTED_ON_FILEID (1U << 21)
#define NFS_ATTR_FATTR_OWNER_NAME (1U << 22)
#define NFS_ATTR_FATTR_GROUP_NAME (1U << 23)
#define NFS_ATTR_FATTR (NFS_ATTR_FATTR_TYPE \
| NFS_ATTR_FATTR_MODE \
@ -324,6 +333,7 @@ struct nfs_openargs {
const struct qstr * name;
const struct nfs_server *server; /* Needed for ID mapping */
const u32 * bitmask;
const u32 * dir_bitmask;
__u32 claim;
struct nfs4_sequence_args seq_args;
};
@ -342,6 +352,8 @@ struct nfs_openres {
__u32 do_recall;
__u64 maxsize;
__u32 attrset[NFS4_BITMAP_SIZE];
struct nfs4_string *owner;
struct nfs4_string *group_owner;
struct nfs4_sequence_res seq_res;
};
@ -602,11 +614,16 @@ struct nfs_getaclargs {
size_t acl_len;
unsigned int acl_pgbase;
struct page ** acl_pages;
struct page * acl_scratch;
struct nfs4_sequence_args seq_args;
};
/* getxattr ACL interface flags */
#define NFS4_ACL_LEN_REQUEST 0x0001 /* zero length getxattr buffer */
struct nfs_getaclres {
size_t acl_len;
size_t acl_data_offset;
int acl_flags;
struct nfs4_sequence_res seq_res;
};
@ -773,11 +790,6 @@ struct nfs3_getaclres {
struct posix_acl * acl_default;
};
struct nfs4_string {
unsigned int len;
char *data;
};
#ifdef CONFIG_NFS_V4
typedef u64 clientid4;

View File

@ -26,6 +26,7 @@ struct auth_cred {
uid_t uid;
gid_t gid;
struct group_info *group_info;
const char *principal;
unsigned char machine_cred : 1;
};
@ -127,7 +128,7 @@ void rpc_destroy_generic_auth(void);
void rpc_destroy_authunix(void);
struct rpc_cred * rpc_lookup_cred(void);
struct rpc_cred * rpc_lookup_machine_cred(void);
struct rpc_cred * rpc_lookup_machine_cred(const char *service_name);
int rpcauth_register(const struct rpc_authops *);
int rpcauth_unregister(const struct rpc_authops *);
struct rpc_auth * rpcauth_create(rpc_authflavor_t, struct rpc_clnt *);

View File

@ -82,8 +82,8 @@ struct gss_cred {
enum rpc_gss_svc gc_service;
struct gss_cl_ctx __rcu *gc_ctx;
struct gss_upcall_msg *gc_upcall;
const char *gc_principal;
unsigned long gc_upcall_timestamp;
unsigned char gc_machine_cred : 1;
};
#endif /* __KERNEL__ */

View File

@ -191,6 +191,8 @@ extern int xdr_decode_array2(struct xdr_buf *buf, unsigned int base,
struct xdr_array2_desc *desc);
extern int xdr_encode_array2(struct xdr_buf *buf, unsigned int base,
struct xdr_array2_desc *desc);
extern void _copy_from_pages(char *p, struct page **pages, size_t pgbase,
size_t len);
/*
* Provide some simple tools for XDR buffer overflow-checking etc.

View File

@ -400,15 +400,42 @@ out:
}
#ifdef CONFIG_ROOT_NFS
#define NFSROOT_TIMEOUT_MIN 5
#define NFSROOT_TIMEOUT_MAX 30
#define NFSROOT_RETRY_MAX 5
static int __init mount_nfs_root(void)
{
char *root_dev, *root_data;
unsigned int timeout;
int try, err;
if (nfs_root_data(&root_dev, &root_data) != 0)
err = nfs_root_data(&root_dev, &root_data);
if (err != 0)
return 0;
if (do_mount_root(root_dev, "nfs", root_mountflags, root_data) != 0)
return 0;
return 1;
/*
* The server or network may not be ready, so try several
* times. Stop after a few tries in case the client wants
* to fall back to other boot methods.
*/
timeout = NFSROOT_TIMEOUT_MIN;
for (try = 1; ; try++) {
err = do_mount_root(root_dev, "nfs",
root_mountflags, root_data);
if (err == 0)
return 1;
if (try > NFSROOT_RETRY_MAX)
break;
/* Wait, in case the server refused us immediately */
ssleep(timeout);
timeout <<= 1;
if (timeout > NFSROOT_TIMEOUT_MAX)
timeout = NFSROOT_TIMEOUT_MAX;
}
return 0;
}
#endif

View File

@ -41,15 +41,17 @@ EXPORT_SYMBOL_GPL(rpc_lookup_cred);
/*
* Public call interface for looking up machine creds.
*/
struct rpc_cred *rpc_lookup_machine_cred(void)
struct rpc_cred *rpc_lookup_machine_cred(const char *service_name)
{
struct auth_cred acred = {
.uid = RPC_MACHINE_CRED_USERID,
.gid = RPC_MACHINE_CRED_GROUPID,
.principal = service_name,
.machine_cred = 1,
};
dprintk("RPC: looking up machine cred\n");
dprintk("RPC: looking up machine cred for service %s\n",
service_name);
return generic_auth.au_ops->lookup_cred(&generic_auth, &acred, 0);
}
EXPORT_SYMBOL_GPL(rpc_lookup_machine_cred);

View File

@ -392,7 +392,8 @@ static void gss_encode_v0_msg(struct gss_upcall_msg *gss_msg)
}
static void gss_encode_v1_msg(struct gss_upcall_msg *gss_msg,
struct rpc_clnt *clnt, int machine_cred)
struct rpc_clnt *clnt,
const char *service_name)
{
struct gss_api_mech *mech = gss_msg->auth->mech;
char *p = gss_msg->databuf;
@ -407,12 +408,8 @@ static void gss_encode_v1_msg(struct gss_upcall_msg *gss_msg,
p += len;
gss_msg->msg.len += len;
}
if (machine_cred) {
len = sprintf(p, "service=* ");
p += len;
gss_msg->msg.len += len;
} else if (!strcmp(clnt->cl_program->name, "nfs4_cb")) {
len = sprintf(p, "service=nfs ");
if (service_name != NULL) {
len = sprintf(p, "service=%s ", service_name);
p += len;
gss_msg->msg.len += len;
}
@ -429,17 +426,18 @@ static void gss_encode_v1_msg(struct gss_upcall_msg *gss_msg,
}
static void gss_encode_msg(struct gss_upcall_msg *gss_msg,
struct rpc_clnt *clnt, int machine_cred)
struct rpc_clnt *clnt,
const char *service_name)
{
if (pipe_version == 0)
gss_encode_v0_msg(gss_msg);
else /* pipe_version == 1 */
gss_encode_v1_msg(gss_msg, clnt, machine_cred);
gss_encode_v1_msg(gss_msg, clnt, service_name);
}
static inline struct gss_upcall_msg *
gss_alloc_msg(struct gss_auth *gss_auth, uid_t uid, struct rpc_clnt *clnt,
int machine_cred)
static struct gss_upcall_msg *
gss_alloc_msg(struct gss_auth *gss_auth, struct rpc_clnt *clnt,
uid_t uid, const char *service_name)
{
struct gss_upcall_msg *gss_msg;
int vers;
@ -459,7 +457,7 @@ gss_alloc_msg(struct gss_auth *gss_auth, uid_t uid, struct rpc_clnt *clnt,
atomic_set(&gss_msg->count, 1);
gss_msg->uid = uid;
gss_msg->auth = gss_auth;
gss_encode_msg(gss_msg, clnt, machine_cred);
gss_encode_msg(gss_msg, clnt, service_name);
return gss_msg;
}
@ -471,7 +469,7 @@ gss_setup_upcall(struct rpc_clnt *clnt, struct gss_auth *gss_auth, struct rpc_cr
struct gss_upcall_msg *gss_new, *gss_msg;
uid_t uid = cred->cr_uid;
gss_new = gss_alloc_msg(gss_auth, uid, clnt, gss_cred->gc_machine_cred);
gss_new = gss_alloc_msg(gss_auth, clnt, uid, gss_cred->gc_principal);
if (IS_ERR(gss_new))
return gss_new;
gss_msg = gss_add_msg(gss_new);
@ -995,7 +993,9 @@ gss_create_cred(struct rpc_auth *auth, struct auth_cred *acred, int flags)
*/
cred->gc_base.cr_flags = 1UL << RPCAUTH_CRED_NEW;
cred->gc_service = gss_auth->service;
cred->gc_machine_cred = acred->machine_cred;
cred->gc_principal = NULL;
if (acred->machine_cred)
cred->gc_principal = acred->principal;
kref_get(&gss_auth->kref);
return &cred->gc_base;
@ -1030,7 +1030,12 @@ gss_match(struct auth_cred *acred, struct rpc_cred *rc, int flags)
if (!test_bit(RPCAUTH_CRED_UPTODATE, &rc->cr_flags))
return 0;
out:
if (acred->machine_cred != gss_cred->gc_machine_cred)
if (acred->principal != NULL) {
if (gss_cred->gc_principal == NULL)
return 0;
return strcmp(acred->principal, gss_cred->gc_principal) == 0;
}
if (gss_cred->gc_principal != NULL)
return 0;
return rc->cr_uid == acred->uid;
}
@ -1104,7 +1109,8 @@ static int gss_renew_cred(struct rpc_task *task)
struct rpc_auth *auth = oldcred->cr_auth;
struct auth_cred acred = {
.uid = oldcred->cr_uid,
.machine_cred = gss_cred->gc_machine_cred,
.principal = gss_cred->gc_principal,
.machine_cred = (gss_cred->gc_principal != NULL ? 1 : 0),
};
struct rpc_cred *new;

View File

@ -296,7 +296,7 @@ _copy_to_pages(struct page **pages, size_t pgbase, const char *p, size_t len)
* Copies data into an arbitrary memory location from an array of pages
* The copy is assumed to be non-overlapping.
*/
static void
void
_copy_from_pages(char *p, struct page **pages, size_t pgbase, size_t len)
{
struct page **pgfrom;
@ -324,6 +324,7 @@ _copy_from_pages(char *p, struct page **pages, size_t pgbase, size_t len)
} while ((len -= copy) != 0);
}
EXPORT_SYMBOL_GPL(_copy_from_pages);
/*
* xdr_shrink_bufhead