From: Nick Piggin <piggin@cyberone.com.au>

Large TCQ depths can potentially keep a lot or all requests tied up.
This can cause merging and batching to stop working completely. The
patch simply ensures nr_requests is >= 2 * tcq depth.



 25-akpm/drivers/block/ll_rw_blk.c |    8 ++++----
 1 files changed, 4 insertions(+), 4 deletions(-)

diff -puN drivers/block/ll_rw_blk.c~scale-nr_requests drivers/block/ll_rw_blk.c
--- 25/drivers/block/ll_rw_blk.c~scale-nr_requests	Mon Nov  3 11:47:28 2003
+++ 25-akpm/drivers/block/ll_rw_blk.c	Mon Nov  3 11:47:28 2003
@@ -517,10 +517,10 @@ init_tag_map(request_queue_t *q, struct 
 {
 	int bits, i;
 
-	if (depth > q->nr_requests * 2) {
-		depth = q->nr_requests * 2;
-		printk(KERN_ERR "%s: adjusted depth to %d\n",
-				__FUNCTION__, depth);
+	if (depth > q->nr_requests / 2) {
+		q->nr_requests = depth * 2;
+		printk(KERN_INFO "%s: large TCQ depth: adjusted nr_requests "
+				 "to %lu\n", __FUNCTION__, q->nr_requests);
 	}
 
 	tags->tag_index = kmalloc(depth * sizeof(struct request *), GFP_ATOMIC);

_