[pve-devel] [PATCH kernel 2/2] likely fix #6746: cherry-pick fix for md issue during shutdown
    Fiona Ebner 
    f.ebner at proxmox.com
       
    Wed Oct 22 16:57:14 CEST 2025
    
    
  
The same commit is already present in Ubuntu's 6.14 kernel as
c1cf81e4153b ("md: fix mddev uaf while iterating all_mddevs list") as
well as upstream stable branches, e.g. in 6.6.x it's d69a23d8e925
("md: fix mddev uaf while iterating all_mddevs list").
The commit was identified by Roland in a bugzilla comment.
Signed-off-by: Fiona Ebner <f.ebner at proxmox.com>
---
 ...-uaf-while-iterating-all_mddevs-list.patch | 136 ++++++++++++++++++
 1 file changed, 136 insertions(+)
 create mode 100644 patches/kernel/0016-md-fix-mddev-uaf-while-iterating-all_mddevs-list.patch
diff --git a/patches/kernel/0016-md-fix-mddev-uaf-while-iterating-all_mddevs-list.patch b/patches/kernel/0016-md-fix-mddev-uaf-while-iterating-all_mddevs-list.patch
new file mode 100644
index 0000000..9886cc1
--- /dev/null
+++ b/patches/kernel/0016-md-fix-mddev-uaf-while-iterating-all_mddevs-list.patch
@@ -0,0 +1,136 @@
+From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001
+From: Yu Kuai <yukuai3 at huawei.com>
+Date: Thu, 20 Feb 2025 20:43:48 +0800
+Subject: [PATCH] md: fix mddev uaf while iterating all_mddevs list
+
+BugLink: https://bugs.launchpad.net/bugs/2107212
+
+[ Upstream commit 8542870237c3a48ff049b6c5df5f50c8728284fa ]
+
+While iterating all_mddevs list from md_notify_reboot() and md_exit(),
+list_for_each_entry_safe is used, and this can race with deletint the
+next mddev, causing UAF:
+
+t1:
+spin_lock
+//list_for_each_entry_safe(mddev, n, ...)
+ mddev_get(mddev1)
+ // assume mddev2 is the next entry
+ spin_unlock
+            t2:
+            //remove mddev2
+            ...
+            mddev_free
+            spin_lock
+            list_del
+            spin_unlock
+            kfree(mddev2)
+ mddev_put(mddev1)
+ spin_lock
+ //continue dereference mddev2->all_mddevs
+
+The old helper for_each_mddev() actually grab the reference of mddev2
+while holding the lock, to prevent from being freed. This problem can be
+fixed the same way, however, the code will be complex.
+
+Hence switch to use list_for_each_entry, in this case mddev_put() can free
+the mddev1 and it's not safe as well. Refer to md_seq_show(), also factor
+out a helper mddev_put_locked() to fix this problem.
+
+Cc: Christoph Hellwig <hch at lst.de>
+Link: https://lore.kernel.org/linux-raid/20250220124348.845222-1-yukuai1@huaweicloud.com
+Fixes: f26514342255 ("md: stop using for_each_mddev in md_notify_reboot")
+Fixes: 16648bac862f ("md: stop using for_each_mddev in md_exit")
+Reported-and-tested-by: Guillaume Morin <guillaume at morinfr.org>
+Closes: https://lore.kernel.org/all/Z7Y0SURoA8xwg7vn@bender.morinfr.org/
+Signed-off-by: Yu Kuai <yukuai3 at huawei.com>
+Reviewed-by: Christoph Hellwig <hch at lst.de>
+Signed-off-by: Sasha Levin <sashal at kernel.org>
+Signed-off-by: Manuel Diewald <manuel.diewald at canonical.com>
+Signed-off-by: Timo Aaltonen <timo.aaltonen at canonical.com>
+(cherry picked from commit c1cf81e4153b46ab94188c72e615014e7f9ae547)
+Signed-off-by: Fiona Ebner <f.ebner at proxmox.com>
+---
+ drivers/md/md.c | 22 +++++++++++++---------
+ 1 file changed, 13 insertions(+), 9 deletions(-)
+
+diff --git a/drivers/md/md.c b/drivers/md/md.c
+index 260abee6dbcc587873e0127b94f237429319ee47..3a5d8fe64999a254e4acb108ef26a3afc0a33988 100644
+--- a/drivers/md/md.c
++++ b/drivers/md/md.c
+@@ -689,6 +689,12 @@ static void __mddev_put(struct mddev *mddev)
+ 	queue_work(md_misc_wq, &mddev->del_work);
+ }
+ 
++static void mddev_put_locked(struct mddev *mddev)
++{
++	if (atomic_dec_and_test(&mddev->active))
++		__mddev_put(mddev);
++}
++
+ void mddev_put(struct mddev *mddev)
+ {
+ 	if (!atomic_dec_and_lock(&mddev->active, &all_mddevs_lock))
+@@ -8455,9 +8461,7 @@ static int md_seq_show(struct seq_file *seq, void *v)
+ 	if (mddev == list_last_entry(&all_mddevs, struct mddev, all_mddevs))
+ 		status_unused(seq);
+ 
+-	if (atomic_dec_and_test(&mddev->active))
+-		__mddev_put(mddev);
+-
++	mddev_put_locked(mddev);
+ 	return 0;
+ }
+ 
+@@ -9862,11 +9866,11 @@ EXPORT_SYMBOL_GPL(rdev_clear_badblocks);
+ static int md_notify_reboot(struct notifier_block *this,
+ 			    unsigned long code, void *x)
+ {
+-	struct mddev *mddev, *n;
++	struct mddev *mddev;
+ 	int need_delay = 0;
+ 
+ 	spin_lock(&all_mddevs_lock);
+-	list_for_each_entry_safe(mddev, n, &all_mddevs, all_mddevs) {
++	list_for_each_entry(mddev, &all_mddevs, all_mddevs) {
+ 		if (!mddev_get(mddev))
+ 			continue;
+ 		spin_unlock(&all_mddevs_lock);
+@@ -9878,8 +9882,8 @@ static int md_notify_reboot(struct notifier_block *this,
+ 			mddev_unlock(mddev);
+ 		}
+ 		need_delay = 1;
+-		mddev_put(mddev);
+ 		spin_lock(&all_mddevs_lock);
++		mddev_put_locked(mddev);
+ 	}
+ 	spin_unlock(&all_mddevs_lock);
+ 
+@@ -10202,7 +10206,7 @@ void md_autostart_arrays(int part)
+ 
+ static __exit void md_exit(void)
+ {
+-	struct mddev *mddev, *n;
++	struct mddev *mddev;
+ 	int delay = 1;
+ 
+ 	unregister_blkdev(MD_MAJOR,"md");
+@@ -10223,7 +10227,7 @@ static __exit void md_exit(void)
+ 	remove_proc_entry("mdstat", NULL);
+ 
+ 	spin_lock(&all_mddevs_lock);
+-	list_for_each_entry_safe(mddev, n, &all_mddevs, all_mddevs) {
++	list_for_each_entry(mddev, &all_mddevs, all_mddevs) {
+ 		if (!mddev_get(mddev))
+ 			continue;
+ 		spin_unlock(&all_mddevs_lock);
+@@ -10235,8 +10239,8 @@ static __exit void md_exit(void)
+ 		 * the mddev for destruction by a workqueue, and the
+ 		 * destroy_workqueue() below will wait for that to complete.
+ 		 */
+-		mddev_put(mddev);
+ 		spin_lock(&all_mddevs_lock);
++		mddev_put_locked(mddev);
+ 	}
+ 	spin_unlock(&all_mddevs_lock);
+ 
-- 
2.47.3
    
    
More information about the pve-devel
mailing list