<html><head><meta name="color-scheme" content="light dark"></head><body><pre style="word-wrap: break-word; white-space: pre-wrap;">
From: Hugh Dickins &lt;hugh@veritas.com&gt;

Why is mapping-&gt;truncate_count atomic?  It's incremented inside i_mmap_lock
(and i_sem), and the reads don't need it to be atomic.

And why smp_rmb() before call to -&gt;nopage?  The compiler cannot reorder the
initial assignment of sequence after the call to -&gt;nopage, and no cpu (yet!)
can read from the future, which is all that matters there.

And delete totally bogus reset of truncate_count from blkmtd add_device. 
truncate_count is all about detecting i_size changes: i_size does not change
there; and if it did, the count should be incremented not reset.

Signed-off-by: Hugh Dickins &lt;hugh@veritas.com&gt;
Signed-off-by: Andrew Morton &lt;akpm@osdl.org&gt;
---

 25-akpm/drivers/mtd/devices/blkmtd.c |    1 -
 25-akpm/fs/inode.c                   |    1 -
 25-akpm/include/linux/fs.h           |    2 +-
 25-akpm/mm/memory.c                  |   12 +++++-------
 4 files changed, 6 insertions(+), 10 deletions(-)

diff -puN drivers/mtd/devices/blkmtd.c~vmtrunc-truncate_count-not-atomic drivers/mtd/devices/blkmtd.c
--- 25/drivers/mtd/devices/blkmtd.c~vmtrunc-truncate_count-not-atomic	2004-10-21 14:54:35.449534344 -0700
+++ 25-akpm/drivers/mtd/devices/blkmtd.c	2004-10-21 14:54:35.457533128 -0700
@@ -661,7 +661,6 @@ static struct blkmtd_dev *add_device(cha
 
 	memset(dev, 0, sizeof(struct blkmtd_dev));
 	dev-&gt;blkdev = bdev;
-	atomic_set(&amp;(dev-&gt;blkdev-&gt;bd_inode-&gt;i_mapping-&gt;truncate_count), 0);
 	if(!readonly) {
 		init_MUTEX(&amp;dev-&gt;wrbuf_mutex);
 	}
diff -puN fs/inode.c~vmtrunc-truncate_count-not-atomic fs/inode.c
--- 25/fs/inode.c~vmtrunc-truncate_count-not-atomic	2004-10-21 14:54:35.450534192 -0700
+++ 25-akpm/fs/inode.c	2004-10-21 14:54:35.458532976 -0700
@@ -198,7 +198,6 @@ void inode_init_once(struct inode *inode
 	INIT_RADIX_TREE(&amp;inode-&gt;i_data.page_tree, GFP_ATOMIC);
 	rwlock_init(&amp;inode-&gt;i_data.tree_lock);
 	spin_lock_init(&amp;inode-&gt;i_data.i_mmap_lock);
-	atomic_set(&amp;inode-&gt;i_data.truncate_count, 0);
 	INIT_LIST_HEAD(&amp;inode-&gt;i_data.private_list);
 	spin_lock_init(&amp;inode-&gt;i_data.private_lock);
 	INIT_PRIO_TREE_ROOT(&amp;inode-&gt;i_data.i_mmap);
diff -puN include/linux/fs.h~vmtrunc-truncate_count-not-atomic include/linux/fs.h
--- 25/include/linux/fs.h~vmtrunc-truncate_count-not-atomic	2004-10-21 14:54:35.452533888 -0700
+++ 25-akpm/include/linux/fs.h	2004-10-21 14:54:35.460532672 -0700
@@ -339,7 +339,7 @@ struct address_space {
 	struct prio_tree_root	i_mmap;		/* tree of private and shared mappings */
 	struct list_head	i_mmap_nonlinear;/*list VM_NONLINEAR mappings */
 	spinlock_t		i_mmap_lock;	/* protect tree, count, list */
-	atomic_t		truncate_count;	/* Cover race condition with truncate */
+	unsigned int		truncate_count;	/* Cover race condition with truncate */
 	unsigned long		nrpages;	/* number of total pages */
 	pgoff_t			writeback_index;/* writeback starts here */
 	struct address_space_operations *a_ops;	/* methods */
diff -puN mm/memory.c~vmtrunc-truncate_count-not-atomic mm/memory.c
--- 25/mm/memory.c~vmtrunc-truncate_count-not-atomic	2004-10-21 14:54:35.454533584 -0700
+++ 25-akpm/mm/memory.c	2004-10-21 14:54:35.461532520 -0700
@@ -1217,7 +1217,7 @@ void unmap_mapping_range(struct address_
 
 	spin_lock(&amp;mapping-&gt;i_mmap_lock);
 	/* Protect against page fault */
-	atomic_inc(&amp;mapping-&gt;truncate_count);
+	mapping-&gt;truncate_count++;
 
 	if (unlikely(!prio_tree_empty(&amp;mapping-&gt;i_mmap)))
 		unmap_mapping_range_list(&amp;mapping-&gt;i_mmap, &amp;details);
@@ -1515,7 +1515,7 @@ do_no_page(struct mm_struct *mm, struct 
 	struct page * new_page;
 	struct address_space *mapping = NULL;
 	pte_t entry;
-	int sequence = 0;
+	unsigned int sequence = 0;
 	int ret = VM_FAULT_MINOR;
 	int anon = 0;
 
@@ -1527,9 +1527,8 @@ do_no_page(struct mm_struct *mm, struct 
 
 	if (vma-&gt;vm_file) {
 		mapping = vma-&gt;vm_file-&gt;f_mapping;
-		sequence = atomic_read(&amp;mapping-&gt;truncate_count);
+		sequence = mapping-&gt;truncate_count;
 	}
-	smp_rmb();  /* Prevent CPU from reordering lock-free -&gt;nopage() */
 retry:
 	cond_resched();
 	new_page = vma-&gt;vm_ops-&gt;nopage(vma, address &amp; PAGE_MASK, &amp;ret);
@@ -1563,9 +1562,8 @@ retry:
 	 * invalidated this page.  If unmap_mapping_range got called,
 	 * retry getting the page.
 	 */
-	if (mapping &amp;&amp;
-	      (unlikely(sequence != atomic_read(&amp;mapping-&gt;truncate_count)))) {
-		sequence = atomic_read(&amp;mapping-&gt;truncate_count);
+	if (mapping &amp;&amp; unlikely(sequence != mapping-&gt;truncate_count)) {
+		sequence = mapping-&gt;truncate_count;
 		spin_unlock(&amp;mm-&gt;page_table_lock);
 		page_cache_release(new_page);
 		goto retry;
_
</pre></body></html>