

Go through all b_next_transaction instances, implement locking rules.

Hit a ranking snag in __journal_refile_buffer.  Fix it later.


 fs/jbd/commit.c      |    0 
 fs/jbd/transaction.c |    6 +++++-
 2 files changed, 5 insertions(+), 1 deletion(-)

diff -puN fs/jbd/commit.c~jbd-080-b_next_transaction-locking fs/jbd/commit.c
diff -puN fs/jbd/transaction.c~jbd-080-b_next_transaction-locking fs/jbd/transaction.c
--- 25/fs/jbd/transaction.c~jbd-080-b_next_transaction-locking	2003-05-24 22:02:28.000000000 -0700
+++ 25-akpm/fs/jbd/transaction.c	2003-05-24 22:02:28.000000000 -0700
@@ -1982,14 +1982,18 @@ void journal_file_buffer(struct journal_
  * dropping it from its current transaction entirely.  If the buffer has
  * already started to be used by a subsequent transaction, refile the
  * buffer on that transaction's metadata list.
+ *
+ * Called under journal_datalist_lock
+ *
+ * Called under jbd_lock_bh_state(jh2bh(jh))
  */
-
 void __journal_refile_buffer(struct journal_head *jh)
 {
 	int was_dirty;
 	struct buffer_head *bh = jh2bh(jh);
 
 	assert_spin_locked(&journal_datalist_lock);
+	WARN_ON(!buffer_state(bh));
 
 	/* If the buffer is now unused, just drop it. */
 	if (jh->b_next_transaction == NULL) {

_
