diff --git a/src/aio/aio.c b/src/aio/aio.c index a1a3e791..a097de94 100644 --- a/src/aio/aio.c +++ b/src/aio/aio.c @@ -8,6 +8,7 @@ #include #include "syscall.h" #include "atomic.h" +#include "lock.h" #include "pthread_impl.h" #include "aio_impl.h" @@ -71,6 +72,7 @@ struct aio_args { sem_t sem; }; +static volatile int map_update_lock[1]; static pthread_rwlock_t maplock = PTHREAD_RWLOCK_INITIALIZER; static struct aio_queue *****map; static volatile int aio_fd_cnt; @@ -86,40 +88,62 @@ static struct aio_queue *__aio_get_queue(int fd, int need) errno = EBADF; return 0; } + sigset_t allmask, origmask; int a=fd>>24; unsigned char b=fd>>16, c=fd>>8, d=fd; - struct aio_queue *q = 0; + struct aio_queue *****m = 0, ****ma = 0, ***mb = 0, **mc = 0, *q = 0; + int n = 0; + pthread_rwlock_rdlock(&maplock); - if ((!map || !map[a] || !map[a][b] || !map[a][b][c] || !(q=map[a][b][c][d])) && need) { - pthread_rwlock_unlock(&maplock); - if (fcntl(fd, F_GETFD) < 0) return 0; - pthread_rwlock_wrlock(&maplock); + if (map && map[a] && map[a][b] && map[a][b][c] && (q=map[a][b][c][d])) + pthread_mutex_lock(&q->lock); + pthread_rwlock_unlock(&maplock); + + if (q || !need) + return q; + if (fcntl(fd, F_GETFD) < 0) + return 0; + + LOCK(map_update_lock); + if (!map || !(n++, map[a]) || !(n++, map[a][b]) || !(n++, map[a][b][c]) || !(n++, q=map[a][b][c][d])) { if (!io_thread_stack_size) { unsigned long val = __getauxval(AT_MINSIGSTKSZ); io_thread_stack_size = MAX(MINSIGSTKSZ+2048, val+512); } - if (!map) map = calloc(sizeof *map, (-1U/2+1)>>24); - if (!map) goto out; - if (!map[a]) map[a] = calloc(sizeof **map, 256); - if (!map[a]) goto out; - if (!map[a][b]) map[a][b] = calloc(sizeof ***map, 256); - if (!map[a][b]) goto out; - if (!map[a][b][c]) map[a][b][c] = calloc(sizeof ****map, 256); - if (!map[a][b][c]) goto out; - if (!(q = map[a][b][c][d])) { - map[a][b][c][d] = q = calloc(sizeof *****map, 1); - if (q) { - q->fd = fd; - pthread_mutex_init(&q->lock, 0); - pthread_cond_init(&q->cond, 0); - a_inc(&aio_fd_cnt); - } + switch (n) { + case 0: if (!(m = calloc(sizeof *m, (-1U/2+1)>>24))) goto fail; + case 1: if (!(ma = calloc(sizeof *ma, 256))) goto fail; + case 2: if (!(mb = calloc(sizeof *mb, 256))) goto fail; + case 3: if (!(mc = calloc(sizeof *mc, 256))) goto fail; + case 4: if (!(q = calloc(sizeof *q, 1))) goto fail; } + q->fd = fd; + pthread_mutex_init(&q->lock, 0); + pthread_cond_init(&q->cond, 0); + a_inc(&aio_fd_cnt); } - if (q) pthread_mutex_lock(&q->lock); -out: + sigfillset(&allmask); + pthread_sigmask(SIG_BLOCK, &allmask, &origmask); + pthread_rwlock_wrlock(&maplock); + switch (n) { + case 0: map = m; + case 1: map[a] = ma; + case 2: map[a][b] = mb; + case 3: map[a][b][c] = mc; + case 4: map[a][b][c][d] = q; + } + pthread_mutex_lock(&q->lock); pthread_rwlock_unlock(&maplock); + pthread_sigmask(SIG_SETMASK, &origmask, 0); + UNLOCK(map_update_lock); return q; +fail: + UNLOCK(map_update_lock); + free(mc); + free(mb); + free(ma); + free(m); + return 0; } static void __aio_unref_queue(struct aio_queue *q) @@ -134,6 +158,7 @@ static void __aio_unref_queue(struct aio_queue *q) * may arrive since we cannot free the queue object without first * taking the maplock, which requires releasing the queue lock. */ pthread_mutex_unlock(&q->lock); + LOCK(map_update_lock); pthread_rwlock_wrlock(&maplock); pthread_mutex_lock(&q->lock); if (q->ref == 1) { @@ -144,11 +169,13 @@ static void __aio_unref_queue(struct aio_queue *q) a_dec(&aio_fd_cnt); pthread_rwlock_unlock(&maplock); pthread_mutex_unlock(&q->lock); + UNLOCK(map_update_lock); free(q); } else { q->ref--; pthread_rwlock_unlock(&maplock); pthread_mutex_unlock(&q->lock); + UNLOCK(map_update_lock); } }