|
|
Message-ID: <691dfbc3-c07a-4fbf-968d-60302e072967@foss.arm.com>
Date: Sun, 16 Nov 2025 04:58:31 -0600
From: Bill Roberts <bill.roberts@...s.arm.com>
To: musl@...ts.openwall.com
Subject: Re: [PATCH 1/3] dso/aarch64: add PROT_BTI support for mem maps
<snip>
>
> you can place the hook+macro in arch/aarch64/reloc.h
> then no new file is needed.
>
>> static void *map_library(int fd, struct dso *dso)
>> {
>> Ehdr buf[(896+sizeof(Ehdr))/sizeof(Ehdr)];
>> @@ -693,7 +708,7 @@ static void *map_library(int fd, struct dso *dso)
>> off_t off_start;
>> Ehdr *eh;
>> Phdr *ph, *ph0;
>> - unsigned prot;
>> + unsigned prot, arch_prot, arch_mask;
>> unsigned char *map=MAP_FAILED, *base;
>> size_t dyn=0;
>> size_t tls_image=0;
>> @@ -720,6 +735,16 @@ static void *map_library(int fd, struct dso *dso)
>> } else {
>> ph = ph0 = (void *)((char *)buf + eh->e_phoff);
>> }
>> +
>> + /*
>> + * some architectures may have additional protection flags embedded in the
>> + * ELF section somewhere. Start with those and OR in the extras. Do this
>> + * before we need to map any sections.
>> + */
>> + if(dl_add_protections(ph, eh, fd, &arch_prot, &arch_mask))
>> + goto error;
>> +
>> +
>> for (i=eh->e_phnum; i; i--, ph=(void *)((char *)ph+eh->e_phentsize)) {
>> if (ph->p_type == PT_DYNAMIC) {
>> dyn = ph->p_vaddr;
>> @@ -746,6 +771,7 @@ static void *map_library(int fd, struct dso *dso)
>> prot = (((ph->p_flags&PF_R) ? PROT_READ : 0) |
>> ((ph->p_flags&PF_W) ? PROT_WRITE: 0) |
>> ((ph->p_flags&PF_X) ? PROT_EXEC : 0));
>> + prot |= prot&arch_mask ? arch_prot : 0;
>> }
>> if (ph->p_vaddr+ph->p_memsz > addr_max) {
>> addr_max = ph->p_vaddr+ph->p_memsz;
>> @@ -762,6 +788,7 @@ static void *map_library(int fd, struct dso *dso)
>> prot = (((ph->p_flags&PF_R) ? PROT_READ : 0) |
>> ((ph->p_flags&PF_W) ? PROT_WRITE: 0) |
>> ((ph->p_flags&PF_X) ? PROT_EXEC : 0));
>> + prot |= prot&arch_mask ? arch_prot : 0;
>> map = mmap(0, ph->p_memsz + (ph->p_vaddr & PAGE_SIZE-1),
>> prot, MAP_PRIVATE,
>> fd, ph->p_offset & -PAGE_SIZE);
>> @@ -780,6 +807,7 @@ static void *map_library(int fd, struct dso *dso)
>> size_t pgbrk = brk + PAGE_SIZE-1 & -PAGE_SIZE;
>> size_t pgend = brk + ph->p_memsz - ph->p_filesz
>> + PAGE_SIZE-1 & -PAGE_SIZE;
>> + /* arch_prot has already been added */
>> if (pgend > pgbrk && mmap_fixed(map+pgbrk,
>> pgend-pgbrk, prot,
>> MAP_PRIVATE|MAP_FIXED|MAP_ANONYMOUS,
>> @@ -801,11 +829,16 @@ static void *map_library(int fd, struct dso *dso)
>> * the length of the file. This is okay because we will not
>> * use the invalid part; we just need to reserve the right
>> * amount of virtual address space to map over later. */
>> - map = DL_NOMMU_SUPPORT
>> - ? mmap((void *)addr_min, map_len, PROT_READ|PROT_WRITE|PROT_EXEC,
>> - MAP_PRIVATE|MAP_ANONYMOUS, -1, 0)
>> - : mmap((void *)addr_min, map_len, prot,
>> - MAP_PRIVATE, fd, off_start);
>> + if (map == DL_NOMMU_SUPPORT) {
>> + prot = PROT_READ|PROT_WRITE|PROT_EXEC;
>> + prot |= prot&arch_mask ? arch_prot : 0;
>> + map = mmap((void *)addr_min, map_len, prot,
>> + MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
>> + } else {
>> + prot = prot & arch_mask ? prot | arch_prot : prot;
>> + map = mmap((void *)addr_min, map_len, prot,
>> + MAP_PRIVATE, fd, off_start);
>> + }
>
> i'd leave this code alone, prot should be
> set up already with arch_prot.
> the nommu case does not need bti support.
>
>> if (map==MAP_FAILED) goto error;
>> dso->map = map;
>> dso->map_len = map_len;
>> @@ -835,6 +868,7 @@ static void *map_library(int fd, struct dso *dso)
>> prot = (((ph->p_flags&PF_R) ? PROT_READ : 0) |
>> ((ph->p_flags&PF_W) ? PROT_WRITE: 0) |
>> ((ph->p_flags&PF_X) ? PROT_EXEC : 0));
>> + prot |= prot&arch_mask ? arch_prot : 0;
>> /* Reuse the existing mapping for the lowest-address LOAD */
>> if ((ph->p_vaddr & -PAGE_SIZE) != addr_min || DL_NOMMU_SUPPORT)
>> if (mmap_fixed(base+this_min, this_max-this_min, prot, MAP_PRIVATE|MAP_FIXED, fd, off_start) == MAP_FAILED)
>> @@ -849,7 +883,9 @@ static void *map_library(int fd, struct dso *dso)
>> }
>> for (i=0; ((size_t *)(base+dyn))[i]; i+=2)
>> if (((size_t *)(base+dyn))[i]==DT_TEXTREL) {
>> - if (mprotect(map, map_len, PROT_READ|PROT_WRITE|PROT_EXEC)
>> + prot = PROT_READ|PROT_WRITE|PROT_EXEC;
>> + prot |= prot&arch_mask ? arch_prot : 0;
>> + if (mprotect(map, map_len, prot)
>> && errno != ENOSYS)
>> goto error;
>
> i'd leave this alone too: DT_TEXTREL is a
> much bigger security hole than what bti
> is trying to cover here.
>
> i don't think PROT_BTI generalizes to other targets
Oh I forgot to elaborate on this. So, while for aarch64, we're adding
PROT_BTI, other arches that wish to use the hook to propagate whatever
flags they need, would potentially need this everywhere. With that said,
I'm amenable to limiting the patch to just the aarch64 use case and if
other arches need it, they can amend whatever spots they need.
Powered by blists - more mailing lists
Confused about mailing lists and their use? Read about mailing lists on Wikipedia and check out these guidelines on proper formatting of your messages.