|
2 | 2 |
|
3 | 3 | #ifdef WITH_PYMALLOC |
4 | 4 |
|
5 | | -#ifdef HAVE_MALLOPT_MMAP_THRESHOLD |
6 | | - #include <malloc.h> |
| 5 | +#ifdef HAVE_MMAP |
| 6 | + #include <sys/mman.h> |
| 7 | + #ifdef MAP_ANONYMOUS |
| 8 | + #define ARENAS_USE_MMAP |
| 9 | + #endif |
7 | 10 | #endif |
8 | 11 |
|
9 | 12 | #ifdef WITH_VALGRIND |
@@ -183,15 +186,15 @@ static int running_on_valgrind = -1; |
183 | 186 | /* |
184 | 187 | * The allocator sub-allocates <Big> blocks of memory (called arenas) aligned |
185 | 188 | * on a page boundary. This is a reserved virtual address space for the |
186 | | - * current process (obtained through a malloc call). In no way this means |
187 | | - * that the memory arenas will be used entirely. A malloc(<Big>) is usually |
188 | | - * an address range reservation for <Big> bytes, unless all pages within this |
189 | | - * space are referenced subsequently. So malloc'ing big blocks and not using |
190 | | - * them does not mean "wasting memory". It's an addressable range wastage... |
| 189 | + * current process (obtained through a malloc()/mmap() call). In no way this |
| 190 | + * means that the memory arenas will be used entirely. A malloc(<Big>) is |
| 191 | + * usually an address range reservation for <Big> bytes, unless all pages within |
| 192 | + * this space are referenced subsequently. So malloc'ing big blocks and not |
| 193 | + * using them does not mean "wasting memory". It's an addressable range |
| 194 | + * wastage... |
191 | 195 | * |
192 | | - * Therefore, allocating arenas with malloc is not optimal, because there is |
193 | | - * some address space wastage, but this is the most portable way to request |
194 | | - * memory from the system across various platforms. |
| 196 | + * Arenas are allocated with mmap() on systems supporting anonymous memory |
| 197 | + * mappings to reduce heap fragmentation. |
195 | 198 | */ |
196 | 199 | #define ARENA_SIZE (256 << 10) /* 256KB */ |
197 | 200 |
|
@@ -556,11 +559,6 @@ new_arena(void) |
556 | 559 | #if SIZEOF_SIZE_T <= SIZEOF_INT |
557 | 560 | if (numarenas > PY_SIZE_MAX / sizeof(*arenas)) |
558 | 561 | return NULL; /* overflow */ |
559 | | -#endif |
560 | | -#ifdef HAVE_MALLOPT_MMAP_THRESHOLD |
561 | | - /* Ensure arenas are allocated by mmap to avoid heap fragmentation. */ |
562 | | - if (numarenas == INITIAL_ARENA_OBJECTS) |
563 | | - mallopt(M_MMAP_THRESHOLD, ARENA_SIZE); |
564 | 562 | #endif |
565 | 563 | nbytes = numarenas * sizeof(*arenas); |
566 | 564 | arenaobj = (struct arena_object *)realloc(arenas, nbytes); |
@@ -594,7 +592,12 @@ new_arena(void) |
594 | 592 | arenaobj = unused_arena_objects; |
595 | 593 | unused_arena_objects = arenaobj->nextarena; |
596 | 594 | assert(arenaobj->address == 0); |
| 595 | +#ifdef ARENAS_USE_MMAP |
| 596 | + arenaobj->address = (uptr)mmap(NULL, ARENA_SIZE, PROT_READ|PROT_WRITE, |
| 597 | + MAP_PRIVATE|MAP_ANONYMOUS, -1, 0); |
| 598 | +#else |
597 | 599 | arenaobj->address = (uptr)malloc(ARENA_SIZE); |
| 600 | +#endif |
598 | 601 | if (arenaobj->address == 0) { |
599 | 602 | /* The allocation failed: return NULL after putting the |
600 | 603 | * arenaobj back. |
@@ -1071,7 +1074,11 @@ PyObject_Free(void *p) |
1071 | 1074 | unused_arena_objects = ao; |
1072 | 1075 |
|
1073 | 1076 | /* Free the entire arena. */ |
| 1077 | +#ifdef ARENAS_USE_MMAP |
| 1078 | + munmap((void *)ao->address, ARENA_SIZE); |
| 1079 | +#else |
1074 | 1080 | free((void *)ao->address); |
| 1081 | +#endif |
1075 | 1082 | ao->address = 0; /* mark unassociated */ |
1076 | 1083 | --narenas_currently_allocated; |
1077 | 1084 |
|
|
0 commit comments