Follow @Openwall on Twitter for new release announcements and other news
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Date: Fri, 15 Jul 2016 14:44:25 -0700
From: Kees Cook <keescook@...omium.org>
To: linux-kernel@...r.kernel.org
Cc: Kees Cook <keescook@...omium.org>,
	Balbir Singh <bsingharora@...il.com>,
	Daniel Micay <danielmicay@...il.com>,
	Josh Poimboeuf <jpoimboe@...hat.com>,
	Rik van Riel <riel@...hat.com>,
	Casey Schaufler <casey@...aufler-ca.com>,
	PaX Team <pageexec@...email.hu>,
	Brad Spengler <spender@...ecurity.net>,
	Russell King <linux@...linux.org.uk>,
	Catalin Marinas <catalin.marinas@....com>,
	Will Deacon <will.deacon@....com>,
	Ard Biesheuvel <ard.biesheuvel@...aro.org>,
	Benjamin Herrenschmidt <benh@...nel.crashing.org>,
	Michael Ellerman <mpe@...erman.id.au>,
	Tony Luck <tony.luck@...el.com>,
	Fenghua Yu <fenghua.yu@...el.com>,
	"David S. Miller" <davem@...emloft.net>,
	x86@...nel.org,
	Christoph Lameter <cl@...ux.com>,
	Pekka Enberg <penberg@...nel.org>,
	David Rientjes <rientjes@...gle.com>,
	Joonsoo Kim <iamjoonsoo.kim@....com>,
	Andrew Morton <akpm@...ux-foundation.org>,
	Andy Lutomirski <luto@...nel.org>,
	Borislav Petkov <bp@...e.de>,
	Mathias Krause <minipli@...glemail.com>,
	Jan Kara <jack@...e.cz>,
	Vitaly Wool <vitalywool@...il.com>,
	Andrea Arcangeli <aarcange@...hat.com>,
	Dmitry Vyukov <dvyukov@...gle.com>,
	Laura Abbott <labbott@...oraproject.org>,
	linux-arm-kernel@...ts.infradead.org,
	linux-ia64@...r.kernel.org,
	linuxppc-dev@...ts.ozlabs.org,
	sparclinux@...r.kernel.org,
	linux-arch@...r.kernel.org,
	linux-mm@...ck.org,
	kernel-hardening@...ts.openwall.com
Subject: [PATCH v3 11/11] mm: SLUB hardened usercopy support

Under CONFIG_HARDENED_USERCOPY, this adds object size checking to the
SLUB allocator to catch any copies that may span objects. Includes a
redzone handling fix discovered by Michael Ellerman.

Based on code from PaX and grsecurity.

Signed-off-by: Kees Cook <keescook@...omium.org>
Tested-by: Michael Ellerman <mpe@...erman.id.au>
---
 init/Kconfig |  1 +
 mm/slub.c    | 36 ++++++++++++++++++++++++++++++++++++
 2 files changed, 37 insertions(+)

diff --git a/init/Kconfig b/init/Kconfig
index 798c2020ee7c..1c4711819dfd 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -1765,6 +1765,7 @@ config SLAB
 
 config SLUB
 	bool "SLUB (Unqueued Allocator)"
+	select HAVE_HARDENED_USERCOPY_ALLOCATOR
 	help
 	   SLUB is a slab allocator that minimizes cache line usage
 	   instead of managing queues of cached objects (SLAB approach).
diff --git a/mm/slub.c b/mm/slub.c
index 825ff4505336..7dee3d9a5843 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -3614,6 +3614,42 @@ void *__kmalloc_node(size_t size, gfp_t flags, int node)
 EXPORT_SYMBOL(__kmalloc_node);
 #endif
 
+#ifdef CONFIG_HARDENED_USERCOPY
+/*
+ * Rejects objects that are incorrectly sized.
+ *
+ * Returns NULL if check passes, otherwise const char * to name of cache
+ * to indicate an error.
+ */
+const char *__check_heap_object(const void *ptr, unsigned long n,
+				struct page *page)
+{
+	struct kmem_cache *s;
+	unsigned long offset;
+	size_t object_size;
+
+	/* Find object and usable object size. */
+	s = page->slab_cache;
+	object_size = slab_ksize(s);
+
+	/* Find offset within object. */
+	offset = (ptr - page_address(page)) % s->size;
+
+	/* Adjust for redzone and reject if within the redzone. */
+	if (kmem_cache_debug(s) && s->flags & SLAB_RED_ZONE) {
+		if (offset < s->red_left_pad)
+			return s->name;
+		offset -= s->red_left_pad;
+	}
+
+	/* Allow address range falling entirely within object size. */
+	if (offset <= object_size && n <= object_size - offset)
+		return NULL;
+
+	return s->name;
+}
+#endif /* CONFIG_HARDENED_USERCOPY */
+
 static size_t __ksize(const void *object)
 {
 	struct page *page;
-- 
2.7.4

Powered by blists - more mailing lists

Confused about mailing lists and their use? Read about mailing lists on Wikipedia and check out these guidelines on proper formatting of your messages.