142 lines
3.5 KiB
Diff
142 lines
3.5 KiB
Diff
Patch-mainline: submitted 04aug2009
|
|
References: bnc#498708
|
|
From: NeilBrown <neilb@suse.de>
|
|
Date: Tue, 4 Aug 2009 15:06:39 +1000
|
|
Subject: [PATCH 09/12] nfsd/idmap: drop special request deferal in favour of improved default.
|
|
|
|
The idmap code manages request deferal by waiting for a reply from
|
|
userspace rather than putting the NFS request on a queue to be retried
|
|
from the start.
|
|
Now that the comment deferal code does this there is no need for the
|
|
special code in idmap.
|
|
|
|
Signed-off-by: NeilBrown <neilb@suse.de>
|
|
|
|
---
|
|
fs/nfsd/nfs4idmap.c | 105 +++++-----------------------------------------------
|
|
1 file changed, 11 insertions(+), 94 deletions(-)
|
|
|
|
--- a/fs/nfsd/nfs4idmap.c
|
|
+++ b/fs/nfsd/nfs4idmap.c
|
|
@@ -481,109 +481,26 @@ nfsd_idmap_shutdown(void)
|
|
cache_unregister(&nametoid_cache);
|
|
}
|
|
|
|
-/*
|
|
- * Deferred request handling
|
|
- */
|
|
-
|
|
-struct idmap_defer_req {
|
|
- struct cache_req req;
|
|
- struct cache_deferred_req deferred_req;
|
|
- wait_queue_head_t waitq;
|
|
- atomic_t count;
|
|
-};
|
|
-
|
|
-static inline void
|
|
-put_mdr(struct idmap_defer_req *mdr)
|
|
-{
|
|
- if (atomic_dec_and_test(&mdr->count))
|
|
- kfree(mdr);
|
|
-}
|
|
-
|
|
-static inline void
|
|
-get_mdr(struct idmap_defer_req *mdr)
|
|
-{
|
|
- atomic_inc(&mdr->count);
|
|
-}
|
|
-
|
|
-static void
|
|
-idmap_revisit(struct cache_deferred_req *dreq, int toomany)
|
|
-{
|
|
- struct idmap_defer_req *mdr =
|
|
- container_of(dreq, struct idmap_defer_req, deferred_req);
|
|
-
|
|
- wake_up(&mdr->waitq);
|
|
- put_mdr(mdr);
|
|
-}
|
|
-
|
|
-static struct cache_deferred_req *
|
|
-idmap_defer(struct cache_req *req)
|
|
-{
|
|
- struct idmap_defer_req *mdr =
|
|
- container_of(req, struct idmap_defer_req, req);
|
|
-
|
|
- mdr->deferred_req.revisit = idmap_revisit;
|
|
- get_mdr(mdr);
|
|
- return (&mdr->deferred_req);
|
|
-}
|
|
-
|
|
-static inline int
|
|
-do_idmap_lookup(struct ent *(*lookup_fn)(struct ent *), struct ent *key,
|
|
- struct cache_detail *detail, struct ent **item,
|
|
- struct idmap_defer_req *mdr)
|
|
-{
|
|
- *item = lookup_fn(key);
|
|
- if (!*item)
|
|
- return -ENOMEM;
|
|
- return cache_check(detail, &(*item)->h, &mdr->req);
|
|
-}
|
|
-
|
|
-static inline int
|
|
-do_idmap_lookup_nowait(struct ent *(*lookup_fn)(struct ent *),
|
|
- struct ent *key, struct cache_detail *detail,
|
|
- struct ent **item)
|
|
-{
|
|
- int ret = -ENOMEM;
|
|
-
|
|
- *item = lookup_fn(key);
|
|
- if (!*item)
|
|
- goto out_err;
|
|
- ret = -ETIMEDOUT;
|
|
- if (!test_bit(CACHE_VALID, &(*item)->h.flags)
|
|
- || (*item)->h.expiry_time < get_seconds()
|
|
- || detail->flush_time > (*item)->h.last_refresh)
|
|
- goto out_put;
|
|
- ret = -ENOENT;
|
|
- if (test_bit(CACHE_NEGATIVE, &(*item)->h.flags))
|
|
- goto out_put;
|
|
- return 0;
|
|
-out_put:
|
|
- cache_put(&(*item)->h, detail);
|
|
-out_err:
|
|
- *item = NULL;
|
|
- return ret;
|
|
-}
|
|
-
|
|
static int
|
|
idmap_lookup(struct svc_rqst *rqstp,
|
|
struct ent *(*lookup_fn)(struct ent *), struct ent *key,
|
|
struct cache_detail *detail, struct ent **item)
|
|
{
|
|
- struct idmap_defer_req *mdr;
|
|
int ret;
|
|
|
|
- mdr = kzalloc(sizeof(*mdr), GFP_KERNEL);
|
|
- if (!mdr)
|
|
+ *item = lookup_fn(key);
|
|
+ if (!*item)
|
|
return -ENOMEM;
|
|
- atomic_set(&mdr->count, 1);
|
|
- init_waitqueue_head(&mdr->waitq);
|
|
- mdr->req.defer = idmap_defer;
|
|
- ret = do_idmap_lookup(lookup_fn, key, detail, item, mdr);
|
|
- if (ret == -EAGAIN) {
|
|
- wait_event_interruptible_timeout(mdr->waitq,
|
|
- test_bit(CACHE_VALID, &(*item)->h.flags), 1 * HZ);
|
|
- ret = do_idmap_lookup_nowait(lookup_fn, key, detail, item);
|
|
+ retry:
|
|
+ ret = cache_check(detail, &(*item)->h, &rqstp->rq_chandle);
|
|
+
|
|
+ if (ret == -ETIMEDOUT) {
|
|
+ struct ent *prev_item = *item;
|
|
+ *item = lookup_fn(key);
|
|
+ if (*item != prev_item)
|
|
+ goto retry;
|
|
+ cache_put(&(*item)->h, detail);
|
|
}
|
|
- put_mdr(mdr);
|
|
return ret;
|
|
}
|
|
|