Follow @Openwall on Twitter for new release announcements and other news
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Date: Mon,  1 Aug 2016 10:07:59 -0700
From: Thomas Garnier <thgarnie@...gle.com>
To: Thomas Gleixner <tglx@...utronix.de>,
	Ingo Molnar <mingo@...hat.com>,
	"H . Peter Anvin" <hpa@...or.com>,
	Kees Cook <keescook@...omium.org>,
	Thomas Garnier <thgarnie@...gle.com>,
	Yinghai Lu <yinghai@...nel.org>,
	"Rafael J . Wysocki" <rjw@...ysocki.net>,
	Pavel Machek <pavel@....cz>
Cc: x86@...nel.org,
	linux-kernel@...r.kernel.org,
	linux-pm@...r.kernel.org,
	kernel-hardening@...ts.openwall.com
Subject: [PATCH v1 1/2] x86/power/64: Support unaligned addresses for temporary mapping

Correctly setup the temporary mapping for hibernation. Previous
implementation assumed the address was aligned on the PGD level. With
KASLR memory randomization enabled, the address is randomized on the PUD
level. This change supports unaligned address up to PMD.

Signed-off-by: Thomas Garnier <thgarnie@...gle.com>
---
 arch/x86/mm/ident_map.c | 18 ++++++++++--------
 1 file changed, 10 insertions(+), 8 deletions(-)

diff --git a/arch/x86/mm/ident_map.c b/arch/x86/mm/ident_map.c
index ec21796..ea1ebf1 100644
--- a/arch/x86/mm/ident_map.c
+++ b/arch/x86/mm/ident_map.c
@@ -3,15 +3,16 @@
  * included by both the compressed kernel and the regular kernel.
  */
 
-static void ident_pmd_init(unsigned long pmd_flag, pmd_t *pmd_page,
+static void ident_pmd_init(struct x86_mapping_info *info, pmd_t *pmd_page,
 			   unsigned long addr, unsigned long end)
 {
-	addr &= PMD_MASK;
-	for (; addr < end; addr += PMD_SIZE) {
-		pmd_t *pmd = pmd_page + pmd_index(addr);
+	int off = info->kernel_mapping ? pmd_index(__PAGE_OFFSET) : 0;
+
+	for (addr &= PMD_MASK; addr < end; addr += PMD_SIZE) {
+		pmd_t *pmd = pmd_page + pmd_index(addr) + off;
 
 		if (!pmd_present(*pmd))
-			set_pmd(pmd, __pmd(addr | pmd_flag));
+			set_pmd(pmd, __pmd(addr | info->pmd_flag));
 	}
 }
 
@@ -19,9 +20,10 @@ static int ident_pud_init(struct x86_mapping_info *info, pud_t *pud_page,
 			  unsigned long addr, unsigned long end)
 {
 	unsigned long next;
+	int off = info->kernel_mapping ? pud_index(__PAGE_OFFSET) : 0;
 
 	for (; addr < end; addr = next) {
-		pud_t *pud = pud_page + pud_index(addr);
+		pud_t *pud = pud_page + pud_index(addr) + off;
 		pmd_t *pmd;
 
 		next = (addr & PUD_MASK) + PUD_SIZE;
@@ -30,13 +32,13 @@ static int ident_pud_init(struct x86_mapping_info *info, pud_t *pud_page,
 
 		if (pud_present(*pud)) {
 			pmd = pmd_offset(pud, 0);
-			ident_pmd_init(info->pmd_flag, pmd, addr, next);
+			ident_pmd_init(info, pmd, addr, next);
 			continue;
 		}
 		pmd = (pmd_t *)info->alloc_pgt_page(info->context);
 		if (!pmd)
 			return -ENOMEM;
-		ident_pmd_init(info->pmd_flag, pmd, addr, next);
+		ident_pmd_init(info, pmd, addr, next);
 		set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE));
 	}
 
-- 
2.8.0.rc3.226.g39d4020

Powered by blists - more mailing lists

Confused about mailing lists and their use? Read about mailing lists on Wikipedia and check out these guidelines on proper formatting of your messages.