From: Oliver Xymoron <oxymoron@waste.org>

This patch adds locking for SMP. Apparently Willy never managed to
revive his laptop with his version so I revived mine.

The batch pool is copied as a block to avoid long lock hold times
while mixing it into the primary pool. 

Two locks are added:

gobal batch_lock
   batch_entropy_store can be called from any context, and typically from
   interrupts -> spin_lock_irqsave

   batch_entropy_process is called called via schedule_delayed_work and
   runs in process context -> spin_lock_irq

entropy_store.lock
   the mixing process is too expensive to be called from an interrupt
   context and the basic worker function extract_entropy can sleep, so
   all this stuff can be under a normal spin_lock




 drivers/char/random.c |  123 ++++++++++++++++++++++++++++++++++++++------------
 1 files changed, 94 insertions(+), 29 deletions(-)

diff -puN drivers/char/random.c~random-locking-fixes drivers/char/random.c
--- 25/drivers/char/random.c~random-locking-fixes	2003-08-02 15:20:40.000000000 -0700
+++ 25-akpm/drivers/char/random.c	2003-08-02 15:20:40.000000000 -0700
@@ -486,6 +486,7 @@ struct entropy_store {
 	int		extract_count;
 	struct poolinfo poolinfo;
 	__u32		*pool;
+	spinlock_t lock;
 };
 
 /*
@@ -502,7 +503,7 @@ static int create_entropy_store(int size
 
 	poolwords = (size + 3) / 4; /* Convert bytes->words */
 	/* The pool size must be a multiple of 16 32-bit words */
-	poolwords = ((poolwords + 15) / 16) * 16; 
+	poolwords = ((poolwords + 15) / 16) * 16;
 
 	for (p = poolinfo_table; p->poolwords; p++) {
 		if (poolwords == p->poolwords)
@@ -524,6 +525,7 @@ static int create_entropy_store(int size
 		return -ENOMEM;
 	}
 	memset(r->pool, 0, POOLBYTES);
+	r->lock = SPIN_LOCK_UNLOCKED;
 	*ret_bucket = r;
 	return 0;
 }
@@ -565,6 +567,9 @@ static void add_entropy_words(struct ent
 	int new_rotate;
 	int wordmask = r->poolinfo.poolwords - 1;
 	__u32 w;
+	unsigned long flags;
+
+	spin_lock_irqsave(&r->lock, flags);
 
 	while (nwords--) {
 		w = rotate_left(r->input_rotate, *in++);
@@ -589,6 +594,8 @@ static void add_entropy_words(struct ent
 		w ^= r->pool[i];
 		r->pool[i] = (w >> 3) ^ twist_table[w & 7];
 	}
+
+	spin_unlock_irqrestore(&r->lock, flags);
 }
 
 /*
@@ -596,6 +603,10 @@ static void add_entropy_words(struct ent
  */
 static void credit_entropy_store(struct entropy_store *r, int nbits)
 {
+	unsigned long flags;
+
+	spin_lock_irqsave(&r->lock, flags);
+
 	if (r->entropy_count + nbits < 0) {
 		DEBUG_ENT("negative entropy/overflow (%d+%d)\n",
 			  r->entropy_count, nbits);
@@ -610,6 +621,8 @@ static void credit_entropy_store(struct 
 				  r == random_state ? "primary" : "unknown",
 				  nbits, r->entropy_count);
 	}
+
+	spin_unlock_irqrestore(&r->lock, flags);
 }
 
 /**********************************************************************
@@ -620,27 +633,33 @@ static void credit_entropy_store(struct 
  *
  **********************************************************************/
 
-static __u32	*batch_entropy_pool;
-static int	*batch_entropy_credit;
-static int	batch_max;
+struct sample {
+	__u32 data[2];
+	int credit;
+};
+
+static struct sample *batch_entropy_pool, *batch_entropy_copy;
 static int	batch_head, batch_tail;
+static spinlock_t batch_lock = SPIN_LOCK_UNLOCKED;
+
+static int	batch_max;
 static void batch_entropy_process(void *private_);
 static DECLARE_WORK(batch_work, batch_entropy_process, NULL);
 
 /* note: the size must be a power of 2 */
 static int __init batch_entropy_init(int size, struct entropy_store *r)
 {
-	batch_entropy_pool = kmalloc(2*size*sizeof(__u32), GFP_KERNEL);
+	batch_entropy_pool = kmalloc(size*sizeof(struct sample), GFP_KERNEL);
 	if (!batch_entropy_pool)
 		return -1;
-	batch_entropy_credit =kmalloc(size*sizeof(int), GFP_KERNEL);
-	if (!batch_entropy_credit) {
+	batch_entropy_copy = kmalloc(size*sizeof(struct sample), GFP_KERNEL);
+	if (!batch_entropy_copy) {
 		kfree(batch_entropy_pool);
 		return -1;
 	}
 	batch_head = batch_tail = 0;
-	batch_max = size;
 	batch_work.data = r;
+	batch_max = size;
 	return 0;
 }
 
@@ -652,27 +671,33 @@ static int __init batch_entropy_init(int
  */
 void batch_entropy_store(u32 a, u32 b, int num)
 {
-	int	new;
+	int new;
+	unsigned long flags;
 
 	if (!batch_max)
 		return;
-	
-	batch_entropy_pool[2*batch_head] = a;
-	batch_entropy_pool[(2*batch_head) + 1] = b;
-	batch_entropy_credit[batch_head] = num;
 
-	new = (batch_head+1) & (batch_max-1);
-	if ((unsigned)(new - batch_tail) >= (unsigned)(batch_max / 2)) {
+	spin_lock_irqsave(&batch_lock, flags);
+
+	batch_entropy_pool[batch_head].data[0] = a;
+	batch_entropy_pool[batch_head].data[1] = b;
+	batch_entropy_pool[batch_head].credit = num;
+
+	if (((batch_head - batch_tail) & (batch_max-1)) >= (batch_max / 2)) {
 		/*
 		 * Schedule it for the next timer tick:
 		 */
 		schedule_delayed_work(&batch_work, 1);
-		batch_head = new;
-	} else if (new == batch_tail) {
+	}
+
+	new = (batch_head+1) & (batch_max-1);
+	if (new == batch_tail) {
 		DEBUG_ENT("batch entropy buffer full\n");
 	} else {
 		batch_head = new;
 	}
+
+	spin_unlock_irqrestore(&batch_lock, flags);
 }
 
 /*
@@ -684,20 +709,34 @@ static void batch_entropy_process(void *
 {
 	struct entropy_store *r	= (struct entropy_store *) private_, *p;
 	int max_entropy = r->poolinfo.POOLBITS;
+	unsigned head, tail;
 
-	if (!batch_max)
-		return;
+	/* Mixing into the pool is expensive, so copy over the batch
+	 * data and release the batch lock. The pool is at least half
+	 * full, so don't worry too much about copying only the used
+	 * part.
+	 */
+	spin_lock_irq(&batch_lock);
+
+	memcpy(batch_entropy_copy, batch_entropy_pool,
+	       batch_max*sizeof(struct sample));
+
+	head = batch_head;
+	tail = batch_tail;
+	batch_tail = batch_head;
+
+	spin_unlock_irq(&batch_lock);
 
 	p = r;
-	while (batch_head != batch_tail) {
+	while (head != tail) {
 		if (r->entropy_count >= max_entropy) {
 			r = (r == sec_random_state) ?	random_state :
 							sec_random_state;
 			max_entropy = r->poolinfo.POOLBITS;
 		}
-		add_entropy_words(r, batch_entropy_pool + 2*batch_tail, 2);
-		credit_entropy_store(r, batch_entropy_credit[batch_tail]);
-		batch_tail = (batch_tail+1) & (batch_max-1);
+		add_entropy_words(r, batch_entropy_copy[tail].data, 2);
+		credit_entropy_store(r, batch_entropy_copy[tail].credit);
+		tail = (tail+1) & (batch_max-1);
 	}
 	if (p->entropy_count >= random_read_wakeup_thresh)
 		wake_up_interruptible(&random_read_wait);
@@ -1276,6 +1315,7 @@ static ssize_t extract_entropy(struct en
 	ssize_t ret, i;
 	__u32 tmp[TMP_BUF_SIZE];
 	__u32 x;
+	unsigned long cpuflags;
 
 	add_timer_randomness(&extract_timer_state, nbytes);
 
@@ -1286,6 +1326,9 @@ static ssize_t extract_entropy(struct en
 	if (flags & EXTRACT_ENTROPY_SECONDARY)
 		xfer_secondary_pool(r, nbytes, tmp);
 
+	/* Hold lock while accounting */
+	spin_lock_irqsave(&r->lock, cpuflags);
+
 	DEBUG_ENT("%s has %d bits, want %d bits\n",
 		  r == sec_random_state ? "secondary" :
 		  r == random_state ? "primary" : "unknown",
@@ -1300,6 +1343,8 @@ static ssize_t extract_entropy(struct en
 		wake_up_interruptible(&random_write_wait);
 
 	r->extract_count += nbytes;
+
+	spin_unlock_irqrestore(&r->lock, cpuflags);
 	
 	ret = 0;
 	while (nbytes) {
@@ -1595,8 +1640,9 @@ static int
 random_ioctl(struct inode * inode, struct file * file,
 	     unsigned int cmd, unsigned long arg)
 {
-	int *p, size, ent_count;
+	int *p, *tmp, size, ent_count;
 	int retval;
+	unsigned long flags;
 	
 	switch (cmd) {
 	case RNDGETENTCNT:
@@ -1621,17 +1667,36 @@ random_ioctl(struct inode * inode, struc
 		if (!capable(CAP_SYS_ADMIN))
 			return -EPERM;
 		p = (int *) arg;
-		ent_count = random_state->entropy_count;
-		if (put_user(ent_count, p++) ||
-		    get_user(size, p) ||
+		if (get_user(size, p) ||
 		    put_user(random_state->poolinfo.poolwords, p++))
 			return -EFAULT;
 		if (size < 0)
-			return -EINVAL;
+			return -EFAULT;
 		if (size > random_state->poolinfo.poolwords)
 			size = random_state->poolinfo.poolwords;
-		if (copy_to_user(p, random_state->pool, size * sizeof(__u32)))
+
+		/* prepare to atomically snapshot pool */
+
+		tmp = kmalloc(size * sizeof(__u32), GFP_KERNEL);
+
+		if (!tmp)
+			return -EFAULT;
+
+		spin_lock_irqsave(&random_state->lock, flags);
+		ent_count = random_state->entropy_count;
+		memcpy(tmp, random_state->pool, size * sizeof(__u32));
+		spin_unlock_irqrestore(&random_state->lock, flags);
+
+		if (!copy_to_user(p, tmp, size * sizeof(__u32))) {
+			kfree(tmp);
 			return -EFAULT;
+		}
+
+		kfree(tmp);
+
+		if(put_user(ent_count, p++))
+			return -EFAULT;
+
 		return 0;
 	case RNDADDENTROPY:
 		if (!capable(CAP_SYS_ADMIN))

_