[pve-devel] [PATCH kernel] cherry-pick fix for CVE-2016-4794
Fabian Grünbichler
f.gruenbichler at proxmox.com
Wed Jul 13 10:23:09 CEST 2016
---
...synchronization-between-chunk-map_extend_.patch | 162 +++++++++++++++++++++
...synchronization-between-synchronous-map-e.patch | 113 ++++++++++++++
Makefile | 2 +
3 files changed, 277 insertions(+)
create mode 100644 CVE-2016-4794-1-percpu-fix-synchronization-between-chunk-map_extend_.patch
create mode 100644 CVE-2016-4794-2-percpu-fix-synchronization-between-synchronous-map-e.patch
diff --git a/CVE-2016-4794-1-percpu-fix-synchronization-between-chunk-map_extend_.patch b/CVE-2016-4794-1-percpu-fix-synchronization-between-chunk-map_extend_.patch
new file mode 100644
index 0000000..e028219
--- /dev/null
+++ b/CVE-2016-4794-1-percpu-fix-synchronization-between-chunk-map_extend_.patch
@@ -0,0 +1,162 @@
+From 120f27d6c4ff44d31052fc74438efa64b361980a Mon Sep 17 00:00:00 2001
+From: Tejun Heo <tj at kernel.org>
+Date: Tue, 12 Jul 2016 17:03:15 +0100
+Subject: [PATCH 1/2] percpu: fix synchronization between
+ chunk->map_extend_work and chunk destruction
+
+Atomic allocations can trigger async map extensions which is serviced
+by chunk->map_extend_work. pcpu_balance_work which is responsible for
+destroying idle chunks wasn't synchronizing properly against
+chunk->map_extend_work and may end up freeing the chunk while the work
+item is still in flight.
+
+This patch fixes the bug by rolling async map extension operations
+into pcpu_balance_work.
+
+Signed-off-by: Tejun Heo <tj at kernel.org>
+Reported-and-tested-by: Alexei Starovoitov <alexei.starovoitov at gmail.com>
+Reported-by: Vlastimil Babka <vbabka at suse.cz>
+Reported-by: Sasha Levin <sasha.levin at oracle.com>
+Cc: stable at vger.kernel.org # v3.18+
+Fixes: 9c824b6a172c ("percpu: make sure chunk->map array has available space")
+(cherry picked from commit 4f996e234dad488e5d9ba0858bc1bae12eff82c3)
+CVE-2016-4794
+BugLink: https://bugs.launchpad.net/bugs/1581871
+Signed-off-by: Luis Henriques <luis.henriques at canonical.com>
+Acked-by: Christopher Arges <chris.j.arges at canonical.com>
+Signed-off-by: Kamal Mostafa <kamal at canonical.com>
+---
+ mm/percpu.c | 57 ++++++++++++++++++++++++++++++++++++---------------------
+ 1 file changed, 36 insertions(+), 21 deletions(-)
+
+diff --git a/mm/percpu.c b/mm/percpu.c
+index 8a943b9..58b0149 100644
+--- a/mm/percpu.c
++++ b/mm/percpu.c
+@@ -110,7 +110,7 @@ struct pcpu_chunk {
+ int map_used; /* # of map entries used before the sentry */
+ int map_alloc; /* # of map entries allocated */
+ int *map; /* allocation map */
+- struct work_struct map_extend_work;/* async ->map[] extension */
++ struct list_head map_extend_list;/* on pcpu_map_extend_chunks */
+
+ void *data; /* chunk data */
+ int first_free; /* no free below this */
+@@ -164,6 +164,9 @@ static DEFINE_MUTEX(pcpu_alloc_mutex); /* chunk create/destroy, [de]pop */
+
+ static struct list_head *pcpu_slot __read_mostly; /* chunk list slots */
+
++/* chunks which need their map areas extended, protected by pcpu_lock */
++static LIST_HEAD(pcpu_map_extend_chunks);
++
+ /*
+ * The number of empty populated pages, protected by pcpu_lock. The
+ * reserved chunk doesn't contribute to the count.
+@@ -397,13 +400,19 @@ static int pcpu_need_to_extend(struct pcpu_chunk *chunk, bool is_atomic)
+ {
+ int margin, new_alloc;
+
++ lockdep_assert_held(&pcpu_lock);
++
+ if (is_atomic) {
+ margin = 3;
+
+ if (chunk->map_alloc <
+- chunk->map_used + PCPU_ATOMIC_MAP_MARGIN_LOW &&
+- pcpu_async_enabled)
+- schedule_work(&chunk->map_extend_work);
++ chunk->map_used + PCPU_ATOMIC_MAP_MARGIN_LOW) {
++ if (list_empty(&chunk->map_extend_list)) {
++ list_add_tail(&chunk->map_extend_list,
++ &pcpu_map_extend_chunks);
++ pcpu_schedule_balance_work();
++ }
++ }
+ } else {
+ margin = PCPU_ATOMIC_MAP_MARGIN_HIGH;
+ }
+@@ -469,20 +478,6 @@ out_unlock:
+ return 0;
+ }
+
+-static void pcpu_map_extend_workfn(struct work_struct *work)
+-{
+- struct pcpu_chunk *chunk = container_of(work, struct pcpu_chunk,
+- map_extend_work);
+- int new_alloc;
+-
+- spin_lock_irq(&pcpu_lock);
+- new_alloc = pcpu_need_to_extend(chunk, false);
+- spin_unlock_irq(&pcpu_lock);
+-
+- if (new_alloc)
+- pcpu_extend_area_map(chunk, new_alloc);
+-}
+-
+ /**
+ * pcpu_fit_in_area - try to fit the requested allocation in a candidate area
+ * @chunk: chunk the candidate area belongs to
+@@ -742,7 +737,7 @@ static struct pcpu_chunk *pcpu_alloc_chunk(void)
+ chunk->map_used = 1;
+
+ INIT_LIST_HEAD(&chunk->list);
+- INIT_WORK(&chunk->map_extend_work, pcpu_map_extend_workfn);
++ INIT_LIST_HEAD(&chunk->map_extend_list);
+ chunk->free_size = pcpu_unit_size;
+ chunk->contig_hint = pcpu_unit_size;
+
+@@ -1131,6 +1126,7 @@ static void pcpu_balance_workfn(struct work_struct *work)
+ if (chunk == list_first_entry(free_head, struct pcpu_chunk, list))
+ continue;
+
++ list_del_init(&chunk->map_extend_list);
+ list_move(&chunk->list, &to_free);
+ }
+
+@@ -1148,6 +1144,25 @@ static void pcpu_balance_workfn(struct work_struct *work)
+ pcpu_destroy_chunk(chunk);
+ }
+
++ /* service chunks which requested async area map extension */
++ do {
++ int new_alloc = 0;
++
++ spin_lock_irq(&pcpu_lock);
++
++ chunk = list_first_entry_or_null(&pcpu_map_extend_chunks,
++ struct pcpu_chunk, map_extend_list);
++ if (chunk) {
++ list_del_init(&chunk->map_extend_list);
++ new_alloc = pcpu_need_to_extend(chunk, false);
++ }
++
++ spin_unlock_irq(&pcpu_lock);
++
++ if (new_alloc)
++ pcpu_extend_area_map(chunk, new_alloc);
++ } while (chunk);
++
+ /*
+ * Ensure there are certain number of free populated pages for
+ * atomic allocs. Fill up from the most packed so that atomic
+@@ -1646,7 +1661,7 @@ int __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai,
+ */
+ schunk = memblock_virt_alloc(pcpu_chunk_struct_size, 0);
+ INIT_LIST_HEAD(&schunk->list);
+- INIT_WORK(&schunk->map_extend_work, pcpu_map_extend_workfn);
++ INIT_LIST_HEAD(&schunk->map_extend_list);
+ schunk->base_addr = base_addr;
+ schunk->map = smap;
+ schunk->map_alloc = ARRAY_SIZE(smap);
+@@ -1675,7 +1690,7 @@ int __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai,
+ if (dyn_size) {
+ dchunk = memblock_virt_alloc(pcpu_chunk_struct_size, 0);
+ INIT_LIST_HEAD(&dchunk->list);
+- INIT_WORK(&dchunk->map_extend_work, pcpu_map_extend_workfn);
++ INIT_LIST_HEAD(&dchunk->map_extend_list);
+ dchunk->base_addr = base_addr;
+ dchunk->map = dmap;
+ dchunk->map_alloc = ARRAY_SIZE(dmap);
+--
+2.1.4
+
diff --git a/CVE-2016-4794-2-percpu-fix-synchronization-between-synchronous-map-e.patch b/CVE-2016-4794-2-percpu-fix-synchronization-between-synchronous-map-e.patch
new file mode 100644
index 0000000..0910d45
--- /dev/null
+++ b/CVE-2016-4794-2-percpu-fix-synchronization-between-synchronous-map-e.patch
@@ -0,0 +1,113 @@
+From b79d7d28463cd1988fa43b3a8bb5279471d837f7 Mon Sep 17 00:00:00 2001
+From: Tejun Heo <tj at kernel.org>
+Date: Tue, 12 Jul 2016 17:03:16 +0100
+Subject: [PATCH 2/2] percpu: fix synchronization between synchronous map
+ extension and chunk destruction
+
+For non-atomic allocations, pcpu_alloc() can try to extend the area
+map synchronously after dropping pcpu_lock; however, the extension
+wasn't synchronized against chunk destruction and the chunk might get
+freed while extension is in progress.
+
+This patch fixes the bug by putting most of non-atomic allocations
+under pcpu_alloc_mutex to synchronize against pcpu_balance_work which
+is responsible for async chunk management including destruction.
+
+Signed-off-by: Tejun Heo <tj at kernel.org>
+Reported-and-tested-by: Alexei Starovoitov <alexei.starovoitov at gmail.com>
+Reported-by: Vlastimil Babka <vbabka at suse.cz>
+Reported-by: Sasha Levin <sasha.levin at oracle.com>
+Cc: stable at vger.kernel.org # v3.18+
+Fixes: 1a4d76076cda ("percpu: implement asynchronous chunk population")
+(cherry picked from commit 6710e594f71ccaad8101bc64321152af7cd9ea28)
+CVE-2016-4794
+BugLink: https://bugs.launchpad.net/bugs/1581871
+Signed-off-by: Luis Henriques <luis.henriques at canonical.com>
+Acked-by: Christopher Arges <chris.j.arges at canonical.com>
+Signed-off-by: Kamal Mostafa <kamal at canonical.com>
+---
+ mm/percpu.c | 16 ++++++++--------
+ 1 file changed, 8 insertions(+), 8 deletions(-)
+
+diff --git a/mm/percpu.c b/mm/percpu.c
+index 58b0149..1f376bc 100644
+--- a/mm/percpu.c
++++ b/mm/percpu.c
+@@ -160,7 +160,7 @@ static struct pcpu_chunk *pcpu_reserved_chunk;
+ static int pcpu_reserved_chunk_limit;
+
+ static DEFINE_SPINLOCK(pcpu_lock); /* all internal data structures */
+-static DEFINE_MUTEX(pcpu_alloc_mutex); /* chunk create/destroy, [de]pop */
++static DEFINE_MUTEX(pcpu_alloc_mutex); /* chunk create/destroy, [de]pop, map ext */
+
+ static struct list_head *pcpu_slot __read_mostly; /* chunk list slots */
+
+@@ -446,6 +446,8 @@ static int pcpu_extend_area_map(struct pcpu_chunk *chunk, int new_alloc)
+ size_t old_size = 0, new_size = new_alloc * sizeof(new[0]);
+ unsigned long flags;
+
++ lockdep_assert_held(&pcpu_alloc_mutex);
++
+ new = pcpu_mem_zalloc(new_size);
+ if (!new)
+ return -ENOMEM;
+@@ -892,6 +894,9 @@ static void __percpu *pcpu_alloc(size_t size, size_t align, bool reserved,
+ return NULL;
+ }
+
++ if (!is_atomic)
++ mutex_lock(&pcpu_alloc_mutex);
++
+ spin_lock_irqsave(&pcpu_lock, flags);
+
+ /* serve reserved allocations from the reserved chunk if available */
+@@ -964,12 +969,9 @@ restart:
+ if (is_atomic)
+ goto fail;
+
+- mutex_lock(&pcpu_alloc_mutex);
+-
+ if (list_empty(&pcpu_slot[pcpu_nr_slots - 1])) {
+ chunk = pcpu_create_chunk();
+ if (!chunk) {
+- mutex_unlock(&pcpu_alloc_mutex);
+ err = "failed to allocate new chunk";
+ goto fail;
+ }
+@@ -980,7 +982,6 @@ restart:
+ spin_lock_irqsave(&pcpu_lock, flags);
+ }
+
+- mutex_unlock(&pcpu_alloc_mutex);
+ goto restart;
+
+ area_found:
+@@ -990,8 +991,6 @@ area_found:
+ if (!is_atomic) {
+ int page_start, page_end, rs, re;
+
+- mutex_lock(&pcpu_alloc_mutex);
+-
+ page_start = PFN_DOWN(off);
+ page_end = PFN_UP(off + size);
+
+@@ -1002,7 +1001,6 @@ area_found:
+
+ spin_lock_irqsave(&pcpu_lock, flags);
+ if (ret) {
+- mutex_unlock(&pcpu_alloc_mutex);
+ pcpu_free_area(chunk, off, &occ_pages);
+ err = "failed to populate";
+ goto fail_unlock;
+@@ -1042,6 +1040,8 @@ fail:
+ /* see the flag handling in pcpu_blance_workfn() */
+ pcpu_atomic_alloc_failed = true;
+ pcpu_schedule_balance_work();
++ } else {
++ mutex_unlock(&pcpu_alloc_mutex);
+ }
+ return NULL;
+ }
+--
+2.1.4
+
diff --git a/Makefile b/Makefile
index 5c3df76..7936548 100644
--- a/Makefile
+++ b/Makefile
@@ -255,6 +255,8 @@ ${KERNEL_SRC}/README ${KERNEL_CFG_ORG}: ${KERNELSRCTAR}
cd ${KERNEL_SRC}; patch -p1 < ../981-1-PCI-Reverse-standard-ACS-vs-device-specific-ACS-enabling.patch
cd ${KERNEL_SRC}; patch -p1 < ../981-2-PCI-Quirk-PCH-root-port-ACS-for-Sunrise-Point.patch
cd ${KERNEL_SRC}; patch -p1 < ../kvm-dynamic-halt-polling-disable-default.patch
+ cd ${KERNEL_SRC}; patch -p1 < ../CVE-2016-4794-1-percpu-fix-synchronization-between-chunk-map_extend_.patch
+ cd ${KERNEL_SRC}; patch -p1 < ../CVE-2016-4794-2-percpu-fix-synchronization-between-synchronous-map-e.patch
sed -i ${KERNEL_SRC}/Makefile -e 's/^EXTRAVERSION.*$$/EXTRAVERSION=${EXTRAVERSION}/'
touch $@
--
2.1.4
More information about the pve-devel
mailing list