Openwall GNU/*/Linux - a small security-enhanced Linux distro for servers
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Date: Wed, 3 Jan 2018 14:17:12 +0100
From: Jens Gustedt <Jens.Gustedt@...ia.fr>
To: musl@...ts.openwall.com
Subject: [PATCH 5/7] use the new lock algorithm for malloc

Malloc used a specialized lock implementation in many places. Now that we
have a generic lock that has the desired properties, we should just use
this, instead of this multitude of very similar lock mechanisms.
---
 src/malloc/malloc.c | 38 +++++++++++++-------------------------
 1 file changed, 13 insertions(+), 25 deletions(-)

diff --git a/src/malloc/malloc.c b/src/malloc/malloc.c
index 9e05e1d6..6c667a5a 100644
--- a/src/malloc/malloc.c
+++ b/src/malloc/malloc.c
@@ -13,6 +13,8 @@
 #define inline inline __attribute__((always_inline))
 #endif
 
+#include "__lock.h"
+
 void *__mmap(void *, size_t, int, int, int, off_t);
 int __munmap(void *, size_t);
 void *__mremap(void *, size_t, size_t, int, ...);
@@ -24,7 +26,7 @@ struct chunk {
 };
 
 struct bin {
-	volatile int lock[2];
+	volatile int lock[1];
 	struct chunk *head;
 	struct chunk *tail;
 };
@@ -32,7 +34,7 @@ struct bin {
 static struct {
 	volatile uint64_t binmap;
 	struct bin bins[64];
-	volatile int free_lock[2];
+	volatile int free_lock[1];
 } mal;
 
 
@@ -58,30 +60,16 @@ static struct {
 
 /* Synchronization tools */
 
-static inline void lock(volatile int *lk)
-{
-	if (libc.threads_minus_1)
-		while(a_swap(lk, 1)) __wait(lk, lk+1, 1, 1);
-}
-
-static inline void unlock(volatile int *lk)
-{
-	if (lk[0]) {
-		a_store(lk, 0);
-		if (lk[1]) __wake(lk, 1, 1);
-	}
-}
-
 static inline void lock_bin(int i)
 {
-	lock(mal.bins[i].lock);
+	__lock_fast(mal.bins[i].lock);
 	if (!mal.bins[i].head)
 		mal.bins[i].head = mal.bins[i].tail = BIN_TO_CHUNK(i);
 }
 
 static inline void unlock_bin(int i)
 {
-	unlock(mal.bins[i].lock);
+	__unlock_fast(mal.bins[i].lock);
 }
 
 static int first_set(uint64_t x)
@@ -161,7 +149,7 @@ void *__expand_heap(size_t *);
 
 static struct chunk *expand_heap(size_t n)
 {
-	static int heap_lock[2];
+	static volatile int heap_lock[1];
 	static void *end;
 	void *p;
 	struct chunk *w;
@@ -171,11 +159,11 @@ static struct chunk *expand_heap(size_t n)
 	 * we need room for an extra zero-sized sentinel chunk. */
 	n += SIZE_ALIGN;
 
-	lock(heap_lock);
+	__lock_fast(heap_lock);
 
 	p = __expand_heap(&n);
 	if (!p) {
-		unlock(heap_lock);
+		__unlock_fast(heap_lock);
 		return 0;
 	}
 
@@ -200,7 +188,7 @@ static struct chunk *expand_heap(size_t n)
 	w = MEM_TO_CHUNK(p);
 	w->csize = n | C_INUSE;
 
-	unlock(heap_lock);
+	__unlock_fast(heap_lock);
 
 	return w;
 }
@@ -481,10 +469,10 @@ void free(void *p)
 			next->psize = final_size | C_INUSE;
 			i = bin_index(final_size);
 			lock_bin(i);
-			lock(mal.free_lock);
+			__lock_fast(mal.free_lock);
 			if (self->psize & next->csize & C_INUSE)
 				break;
-			unlock(mal.free_lock);
+			__unlock_fast(mal.free_lock);
 			unlock_bin(i);
 		}
 
@@ -510,7 +498,7 @@ void free(void *p)
 
 	self->csize = final_size;
 	next->psize = final_size;
-	unlock(mal.free_lock);
+	__unlock_fast(mal.free_lock);
 
 	self->next = BIN_TO_CHUNK(i);
 	self->prev = mal.bins[i].tail;
-- 
2.15.1

Powered by blists - more mailing lists

Your e-mail address:

Powered by Openwall GNU/*/Linux - Powered by OpenVZ