243 lines
7.5 KiB
Plaintext
243 lines
7.5 KiB
Plaintext
From: jbeulich@novell.com
|
|
Subject: cleanup to blkback and blktap
|
|
Patch-mainline: n/a
|
|
|
|
Remove unused/unneccessary fields of their pending_req_t structures,
|
|
and reduce the width of those structures' nr_pages field.
|
|
|
|
Move loop-invariant grant table flags calculation out of loops (also
|
|
in scsiback).
|
|
|
|
--- head-2011-02-17.orig/drivers/xen/blkback/blkback.c 2011-02-01 15:09:47.000000000 +0100
|
|
+++ head-2011-02-17/drivers/xen/blkback/blkback.c 2011-02-28 14:15:32.000000000 +0100
|
|
@@ -73,10 +73,9 @@ module_param(debug_lvl, int, 0644);
|
|
typedef struct {
|
|
blkif_t *blkif;
|
|
u64 id;
|
|
- int nr_pages;
|
|
atomic_t pendcnt;
|
|
+ unsigned short nr_pages;
|
|
unsigned short operation;
|
|
- int status;
|
|
struct list_head free_list;
|
|
} pending_req_t;
|
|
|
|
@@ -255,22 +254,24 @@ int blkif_schedule(void *arg)
|
|
|
|
static void __end_block_io_op(pending_req_t *pending_req, int error)
|
|
{
|
|
+ int status = BLKIF_RSP_OKAY;
|
|
+
|
|
/* An error fails the entire request. */
|
|
if ((pending_req->operation == BLKIF_OP_WRITE_BARRIER) &&
|
|
(error == -EOPNOTSUPP)) {
|
|
DPRINTK("blkback: write barrier op failed, not supported\n");
|
|
blkback_barrier(XBT_NIL, pending_req->blkif->be, 0);
|
|
- pending_req->status = BLKIF_RSP_EOPNOTSUPP;
|
|
+ status = BLKIF_RSP_EOPNOTSUPP;
|
|
} else if (error) {
|
|
DPRINTK("Buffer not up-to-date at end of operation, "
|
|
"error=%d\n", error);
|
|
- pending_req->status = BLKIF_RSP_ERROR;
|
|
+ status = BLKIF_RSP_ERROR;
|
|
}
|
|
|
|
if (atomic_dec_and_test(&pending_req->pendcnt)) {
|
|
fast_flush_area(pending_req);
|
|
make_response(pending_req->blkif, pending_req->id,
|
|
- pending_req->operation, pending_req->status);
|
|
+ pending_req->operation, status);
|
|
blkif_put(pending_req->blkif);
|
|
free_req(pending_req);
|
|
}
|
|
@@ -387,7 +388,6 @@ static void dispatch_rw_block_io(blkif_t
|
|
blkif_request_t *req,
|
|
pending_req_t *pending_req)
|
|
{
|
|
- extern void ll_rw_block(int rw, int nr, struct buffer_head * bhs[]);
|
|
struct gnttab_map_grant_ref map[BLKIF_MAX_SEGMENTS_PER_REQUEST];
|
|
struct phys_req preq;
|
|
struct {
|
|
@@ -395,6 +395,7 @@ static void dispatch_rw_block_io(blkif_t
|
|
} seg[BLKIF_MAX_SEGMENTS_PER_REQUEST];
|
|
unsigned int nseg;
|
|
struct bio *bio = NULL;
|
|
+ uint32_t flags;
|
|
int ret, i;
|
|
int operation;
|
|
|
|
@@ -428,12 +429,13 @@ static void dispatch_rw_block_io(blkif_t
|
|
pending_req->blkif = blkif;
|
|
pending_req->id = req->id;
|
|
pending_req->operation = req->operation;
|
|
- pending_req->status = BLKIF_RSP_OKAY;
|
|
pending_req->nr_pages = nseg;
|
|
|
|
- for (i = 0; i < nseg; i++) {
|
|
- uint32_t flags;
|
|
+ flags = GNTMAP_host_map;
|
|
+ if (operation != READ)
|
|
+ flags |= GNTMAP_readonly;
|
|
|
|
+ for (i = 0; i < nseg; i++) {
|
|
seg[i].nsec = req->seg[i].last_sect -
|
|
req->seg[i].first_sect + 1;
|
|
|
|
@@ -442,9 +444,6 @@ static void dispatch_rw_block_io(blkif_t
|
|
goto fail_response;
|
|
preq.nr_sects += seg[i].nsec;
|
|
|
|
- flags = GNTMAP_host_map;
|
|
- if (operation != READ)
|
|
- flags |= GNTMAP_readonly;
|
|
gnttab_set_map_op(&map[i], vaddr(pending_req, i), flags,
|
|
req->seg[i].gref, blkif->domid);
|
|
}
|
|
--- head-2011-02-17.orig/drivers/xen/blktap/blktap.c 2011-02-17 10:19:26.000000000 +0100
|
|
+++ head-2011-02-17/drivers/xen/blktap/blktap.c 2011-02-28 14:15:27.000000000 +0100
|
|
@@ -134,20 +134,14 @@ module_param(debug_lvl, int, 0644);
|
|
|
|
/*
|
|
* Each outstanding request that we've passed to the lower device layers has a
|
|
- * 'pending_req' allocated to it. Each buffer_head that completes decrements
|
|
- * the pendcnt towards zero. When it hits zero, the specified domain has a
|
|
- * response queued for it, with the saved 'id' passed back.
|
|
+ * 'pending_req' allocated to it.
|
|
*/
|
|
typedef struct {
|
|
blkif_t *blkif;
|
|
u64 id;
|
|
unsigned short mem_idx;
|
|
- int nr_pages;
|
|
- atomic_t pendcnt;
|
|
- unsigned short operation;
|
|
- int status;
|
|
+ unsigned short nr_pages;
|
|
struct list_head free_list;
|
|
- int inuse;
|
|
} pending_req_t;
|
|
|
|
static pending_req_t *pending_reqs[MAX_PENDING_REQS];
|
|
@@ -994,10 +988,8 @@ static pending_req_t* alloc_req(void)
|
|
list_del(&req->free_list);
|
|
}
|
|
|
|
- if (req) {
|
|
- req->inuse = 1;
|
|
+ if (req)
|
|
alloc_pending_reqs++;
|
|
- }
|
|
spin_unlock_irqrestore(&pending_free_lock, flags);
|
|
|
|
return req;
|
|
@@ -1011,7 +1003,6 @@ static void free_req(pending_req_t *req)
|
|
spin_lock_irqsave(&pending_free_lock, flags);
|
|
|
|
alloc_pending_reqs--;
|
|
- req->inuse = 0;
|
|
if (mmap_lock && (req->mem_idx == mmap_alloc-1)) {
|
|
mmap_inuse--;
|
|
if (mmap_inuse == 0) mmap_req_del(mmap_alloc-1);
|
|
@@ -1413,16 +1404,15 @@ static void dispatch_rw_block_io(blkif_t
|
|
blkif_request_t *req,
|
|
pending_req_t *pending_req)
|
|
{
|
|
- extern void ll_rw_block(int rw, int nr, struct buffer_head * bhs[]);
|
|
- int op, operation = (req->operation == BLKIF_OP_WRITE) ? WRITE : READ;
|
|
struct gnttab_map_grant_ref map[BLKIF_MAX_SEGMENTS_PER_REQUEST*2];
|
|
unsigned int nseg;
|
|
- int ret, i, nr_sects = 0;
|
|
+ int ret, i, op, nr_sects = 0;
|
|
tap_blkif_t *info;
|
|
blkif_request_t *target;
|
|
unsigned int mmap_idx = pending_req->mem_idx;
|
|
unsigned int pending_idx = RTN_PEND_IDX(pending_req, mmap_idx);
|
|
unsigned int usr_idx;
|
|
+ uint32_t flags;
|
|
struct mm_struct *mm;
|
|
struct vm_area_struct *vma = NULL;
|
|
|
|
@@ -1465,9 +1455,11 @@ static void dispatch_rw_block_io(blkif_t
|
|
|
|
pending_req->blkif = blkif;
|
|
pending_req->id = req->id;
|
|
- pending_req->operation = operation;
|
|
- pending_req->status = BLKIF_RSP_OKAY;
|
|
pending_req->nr_pages = nseg;
|
|
+
|
|
+ flags = GNTMAP_host_map;
|
|
+ if (req->operation == BLKIF_OP_WRITE)
|
|
+ flags |= GNTMAP_readonly;
|
|
op = 0;
|
|
mm = info->mm;
|
|
if (!xen_feature(XENFEAT_auto_translated_physmap))
|
|
@@ -1476,14 +1468,10 @@ static void dispatch_rw_block_io(blkif_t
|
|
unsigned long uvaddr;
|
|
unsigned long kvaddr;
|
|
uint64_t ptep;
|
|
- uint32_t flags;
|
|
|
|
uvaddr = MMAP_VADDR(info->user_vstart, usr_idx, i);
|
|
kvaddr = idx_to_kaddr(mmap_idx, pending_idx, i);
|
|
|
|
- flags = GNTMAP_host_map;
|
|
- if (operation == WRITE)
|
|
- flags |= GNTMAP_readonly;
|
|
gnttab_set_map_op(&map[op], kvaddr, flags,
|
|
req->seg[i].gref, blkif->domid);
|
|
op++;
|
|
@@ -1497,11 +1485,9 @@ static void dispatch_rw_block_io(blkif_t
|
|
goto fail_flush;
|
|
}
|
|
|
|
- flags = GNTMAP_host_map | GNTMAP_application_map
|
|
- | GNTMAP_contains_pte;
|
|
- if (operation == WRITE)
|
|
- flags |= GNTMAP_readonly;
|
|
- gnttab_set_map_op(&map[op], ptep, flags,
|
|
+ gnttab_set_map_op(&map[op], ptep,
|
|
+ flags | GNTMAP_application_map
|
|
+ | GNTMAP_contains_pte,
|
|
req->seg[i].gref, blkif->domid);
|
|
op++;
|
|
}
|
|
@@ -1631,10 +1617,14 @@ static void dispatch_rw_block_io(blkif_t
|
|
wmb(); /* blktap_poll() reads req_prod_pvt asynchronously */
|
|
info->ufe_ring.req_prod_pvt++;
|
|
|
|
- if (operation == READ)
|
|
+ switch (req->operation) {
|
|
+ case BLKIF_OP_READ:
|
|
blkif->st_rd_sect += nr_sects;
|
|
- else if (operation == WRITE)
|
|
+ break;
|
|
+ case BLKIF_OP_WRITE:
|
|
blkif->st_wr_sect += nr_sects;
|
|
+ break;
|
|
+ }
|
|
|
|
return;
|
|
|
|
--- head-2011-02-17.orig/drivers/xen/scsiback/scsiback.c 2011-02-01 15:04:27.000000000 +0100
|
|
+++ head-2011-02-17/drivers/xen/scsiback/scsiback.c 2011-02-28 14:51:23.000000000 +0100
|
|
@@ -272,14 +272,14 @@ static int scsiback_gnttab_data_map(vscs
|
|
|
|
sg_init_table(pending_req->sgl, nr_segments);
|
|
|
|
- for (i = 0; i < nr_segments; i++) {
|
|
- flags = GNTMAP_host_map;
|
|
- if (write)
|
|
- flags |= GNTMAP_readonly;
|
|
+ flags = GNTMAP_host_map;
|
|
+ if (write)
|
|
+ flags |= GNTMAP_readonly;
|
|
+
|
|
+ for (i = 0; i < nr_segments; i++)
|
|
gnttab_set_map_op(&map[i], vaddr(pending_req, i), flags,
|
|
ring_req->seg[i].gref,
|
|
info->domid);
|
|
- }
|
|
|
|
err = HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, map, nr_segments);
|
|
BUG_ON(err);
|