Thanks to visit codestin.com
Credit goes to doxygen.postgresql.org

PostgreSQL Source Code git master
memutils_internal.h File Reference
#include "utils/memutils.h"
Include dependency graph for memutils_internal.h:
This graph shows which files directly or indirectly include this file:

Go to the source code of this file.

Macros

#define PallocAlignedExtraBytes(alignto)    ((alignto) + (sizeof(MemoryChunk) - MAXIMUM_ALIGNOF))
 
#define MEMORY_CONTEXT_METHODID_BITS   4
 
#define MEMORY_CONTEXT_METHODID_MASK    ((((uint64) 1) << MEMORY_CONTEXT_METHODID_BITS) - 1)
 

Typedefs

typedef enum MemoryContextMethodID MemoryContextMethodID
 

Enumerations

enum  MemoryContextMethodID {
  MCTX_0_RESERVED_UNUSEDMEM_ID , MCTX_1_RESERVED_GLIBC_ID , MCTX_2_RESERVED_GLIBC_ID , MCTX_ASET_ID ,
  MCTX_GENERATION_ID , MCTX_SLAB_ID , MCTX_ALIGNED_REDIRECT_ID , MCTX_BUMP_ID ,
  MCTX_8_UNUSED_ID , MCTX_9_UNUSED_ID , MCTX_10_UNUSED_ID , MCTX_11_UNUSED_ID ,
  MCTX_12_UNUSED_ID , MCTX_13_UNUSED_ID , MCTX_14_UNUSED_ID , MCTX_15_RESERVED_WIPEDMEM_ID
}
 

Functions

void * AllocSetAlloc (MemoryContext context, Size size, int flags)
 
void AllocSetFree (void *pointer)
 
void * AllocSetRealloc (void *pointer, Size size, int flags)
 
void AllocSetReset (MemoryContext context)
 
void AllocSetDelete (MemoryContext context)
 
MemoryContext AllocSetGetChunkContext (void *pointer)
 
Size AllocSetGetChunkSpace (void *pointer)
 
bool AllocSetIsEmpty (MemoryContext context)
 
void AllocSetStats (MemoryContext context, MemoryStatsPrintFunc printfunc, void *passthru, MemoryContextCounters *totals, bool print_to_stderr)
 
void * GenerationAlloc (MemoryContext context, Size size, int flags)
 
void GenerationFree (void *pointer)
 
void * GenerationRealloc (void *pointer, Size size, int flags)
 
void GenerationReset (MemoryContext context)
 
void GenerationDelete (MemoryContext context)
 
MemoryContext GenerationGetChunkContext (void *pointer)
 
Size GenerationGetChunkSpace (void *pointer)
 
bool GenerationIsEmpty (MemoryContext context)
 
void GenerationStats (MemoryContext context, MemoryStatsPrintFunc printfunc, void *passthru, MemoryContextCounters *totals, bool print_to_stderr)
 
void * SlabAlloc (MemoryContext context, Size size, int flags)
 
void SlabFree (void *pointer)
 
void * SlabRealloc (void *pointer, Size size, int flags)
 
void SlabReset (MemoryContext context)
 
void SlabDelete (MemoryContext context)
 
MemoryContext SlabGetChunkContext (void *pointer)
 
Size SlabGetChunkSpace (void *pointer)
 
bool SlabIsEmpty (MemoryContext context)
 
void SlabStats (MemoryContext context, MemoryStatsPrintFunc printfunc, void *passthru, MemoryContextCounters *totals, bool print_to_stderr)
 
void AlignedAllocFree (void *pointer)
 
void * AlignedAllocRealloc (void *pointer, Size size, int flags)
 
MemoryContext AlignedAllocGetChunkContext (void *pointer)
 
Size AlignedAllocGetChunkSpace (void *pointer)
 
void * BumpAlloc (MemoryContext context, Size size, int flags)
 
void BumpFree (void *pointer)
 
void * BumpRealloc (void *pointer, Size size, int flags)
 
void BumpReset (MemoryContext context)
 
void BumpDelete (MemoryContext context)
 
MemoryContext BumpGetChunkContext (void *pointer)
 
Size BumpGetChunkSpace (void *pointer)
 
bool BumpIsEmpty (MemoryContext context)
 
void BumpStats (MemoryContext context, MemoryStatsPrintFunc printfunc, void *passthru, MemoryContextCounters *totals, bool print_to_stderr)
 
void MemoryContextCreate (MemoryContext node, NodeTag tag, MemoryContextMethodID method_id, MemoryContext parent, const char *name)
 
void * MemoryContextAllocationFailure (MemoryContext context, Size size, int flags)
 
pg_noreturn void MemoryContextSizeFailure (MemoryContext context, Size size, int flags)
 
static void MemoryContextCheckSize (MemoryContext context, Size size, int flags)
 

Macro Definition Documentation

◆ MEMORY_CONTEXT_METHODID_BITS

#define MEMORY_CONTEXT_METHODID_BITS   4

Definition at line 145 of file memutils_internal.h.

◆ MEMORY_CONTEXT_METHODID_MASK

#define MEMORY_CONTEXT_METHODID_MASK    ((((uint64) 1) << MEMORY_CONTEXT_METHODID_BITS) - 1)

Definition at line 146 of file memutils_internal.h.

◆ PallocAlignedExtraBytes

#define PallocAlignedExtraBytes (   alignto)     ((alignto) + (sizeof(MemoryChunk) - MAXIMUM_ALIGNOF))

Definition at line 104 of file memutils_internal.h.

Typedef Documentation

◆ MemoryContextMethodID

Enumeration Type Documentation

◆ MemoryContextMethodID

Enumerator
MCTX_0_RESERVED_UNUSEDMEM_ID 
MCTX_1_RESERVED_GLIBC_ID 
MCTX_2_RESERVED_GLIBC_ID 
MCTX_ASET_ID 
MCTX_GENERATION_ID 
MCTX_SLAB_ID 
MCTX_ALIGNED_REDIRECT_ID 
MCTX_BUMP_ID 
MCTX_8_UNUSED_ID 
MCTX_9_UNUSED_ID 
MCTX_10_UNUSED_ID 
MCTX_11_UNUSED_ID 
MCTX_12_UNUSED_ID 
MCTX_13_UNUSED_ID 
MCTX_14_UNUSED_ID 
MCTX_15_RESERVED_WIPEDMEM_ID 

Definition at line 121 of file memutils_internal.h.

122{
123 MCTX_0_RESERVED_UNUSEDMEM_ID, /* 0000 occurs in never-used memory */
124 MCTX_1_RESERVED_GLIBC_ID, /* glibc malloc'd chunks usually match 0001 */
125 MCTX_2_RESERVED_GLIBC_ID, /* glibc malloc'd chunks > 128kB match 0010 */
138 MCTX_15_RESERVED_WIPEDMEM_ID /* 1111 occurs in wipe_mem'd memory */
MemoryContextMethodID
@ MCTX_15_RESERVED_WIPEDMEM_ID
@ MCTX_GENERATION_ID
@ MCTX_14_UNUSED_ID
@ MCTX_12_UNUSED_ID
@ MCTX_10_UNUSED_ID
@ MCTX_BUMP_ID
@ MCTX_11_UNUSED_ID
@ MCTX_8_UNUSED_ID
@ MCTX_1_RESERVED_GLIBC_ID
@ MCTX_SLAB_ID
@ MCTX_9_UNUSED_ID
@ MCTX_0_RESERVED_UNUSEDMEM_ID
@ MCTX_ASET_ID
@ MCTX_2_RESERVED_GLIBC_ID
@ MCTX_ALIGNED_REDIRECT_ID
@ MCTX_13_UNUSED_ID

Function Documentation

◆ AlignedAllocFree()

void AlignedAllocFree ( void *  pointer)

Definition at line 29 of file alignedalloc.c.

30{
31 MemoryChunk *chunk = PointerGetMemoryChunk(pointer);
32 void *unaligned;
33
35
37
38 /* obtain the original (unaligned) allocated pointer */
39 unaligned = MemoryChunkGetBlock(chunk);
40
41#ifdef MEMORY_CONTEXT_CHECKING
42 /* Test for someone scribbling on unused space in chunk */
43 if (!sentinel_ok(pointer, chunk->requested_size))
44 elog(WARNING, "detected write past chunk end in %s %p",
45 GetMemoryChunkContext(unaligned)->name, chunk);
46#endif
47
48 /*
49 * Create a dummy vchunk covering the start of the unaligned chunk, but
50 * not overlapping the aligned chunk. This will be freed while pfree'ing
51 * the unaligned chunk, keeping Valgrind happy. Then when we return to
52 * the outer pfree, that will clean up the vchunk for the aligned chunk.
53 */
55 (char *) pointer - (char *) unaligned);
56
57 /* Recursively pfree the unaligned chunk */
58 pfree(unaligned);
59}
#define WARNING
Definition: elog.h:36
#define elog(elevel,...)
Definition: elog.h:226
Assert(PointerIsAligned(start, uint64))
void pfree(void *pointer)
Definition: mcxt.c:1594
MemoryContext GetMemoryChunkContext(void *pointer)
Definition: mcxt.c:753
#define VALGRIND_MAKE_MEM_DEFINED(addr, size)
Definition: memdebug.h:26
#define VALGRIND_MEMPOOL_ALLOC(context, addr, size)
Definition: memdebug.h:29
static bool MemoryChunkIsExternal(MemoryChunk *chunk)
static void * MemoryChunkGetBlock(MemoryChunk *chunk)
#define PointerGetMemoryChunk(p)
const char * name

References Assert(), elog, GetMemoryChunkContext(), MemoryChunkGetBlock(), MemoryChunkIsExternal(), name, pfree(), PointerGetMemoryChunk, VALGRIND_MAKE_MEM_DEFINED, VALGRIND_MEMPOOL_ALLOC, and WARNING.

◆ AlignedAllocGetChunkContext()

MemoryContext AlignedAllocGetChunkContext ( void *  pointer)

Definition at line 154 of file alignedalloc.c.

155{
156 MemoryChunk *redirchunk = PointerGetMemoryChunk(pointer);
157 MemoryContext cxt;
158
159 VALGRIND_MAKE_MEM_DEFINED(redirchunk, sizeof(MemoryChunk));
160
161 Assert(!MemoryChunkIsExternal(redirchunk));
162
164
165 VALGRIND_MAKE_MEM_NOACCESS(redirchunk, sizeof(MemoryChunk));
166
167 return cxt;
168}
#define VALGRIND_MAKE_MEM_NOACCESS(addr, size)
Definition: memdebug.h:27

References Assert(), GetMemoryChunkContext(), MemoryChunkGetBlock(), MemoryChunkIsExternal(), PointerGetMemoryChunk, VALGRIND_MAKE_MEM_DEFINED, and VALGRIND_MAKE_MEM_NOACCESS.

◆ AlignedAllocGetChunkSpace()

Size AlignedAllocGetChunkSpace ( void *  pointer)

Definition at line 176 of file alignedalloc.c.

177{
178 MemoryChunk *redirchunk = PointerGetMemoryChunk(pointer);
179 void *unaligned;
180 Size space;
181
182 VALGRIND_MAKE_MEM_DEFINED(redirchunk, sizeof(MemoryChunk));
183
184 unaligned = MemoryChunkGetBlock(redirchunk);
185 space = GetMemoryChunkSpace(unaligned);
186
187 VALGRIND_MAKE_MEM_NOACCESS(redirchunk, sizeof(MemoryChunk));
188
189 return space;
190}
size_t Size
Definition: c.h:610
Size GetMemoryChunkSpace(void *pointer)
Definition: mcxt.c:767

References GetMemoryChunkSpace(), MemoryChunkGetBlock(), PointerGetMemoryChunk, VALGRIND_MAKE_MEM_DEFINED, and VALGRIND_MAKE_MEM_NOACCESS.

◆ AlignedAllocRealloc()

void * AlignedAllocRealloc ( void *  pointer,
Size  size,
int  flags 
)

Definition at line 70 of file alignedalloc.c.

71{
72 MemoryChunk *redirchunk = PointerGetMemoryChunk(pointer);
73 Size alignto;
74 void *unaligned;
75 MemoryContext ctx;
76 Size old_size;
77 void *newptr;
78
79 VALGRIND_MAKE_MEM_DEFINED(redirchunk, sizeof(MemoryChunk));
80
81 alignto = MemoryChunkGetValue(redirchunk);
82 unaligned = MemoryChunkGetBlock(redirchunk);
83
84 /* sanity check this is a power of 2 value */
85 Assert((alignto & (alignto - 1)) == 0);
86
87 /*
88 * Determine the size of the original allocation. We can't determine this
89 * exactly as GetMemoryChunkSpace() returns the total space used for the
90 * allocation, which for contexts like aset includes rounding up to the
91 * next power of 2. However, this value is just used to memcpy() the old
92 * data into the new allocation, so we only need to concern ourselves with
93 * not reading beyond the end of the original allocation's memory. The
94 * drawback here is that we may copy more bytes than we need to, which
95 * only amounts to wasted effort. We can safely subtract the extra bytes
96 * that we requested to allow us to align the pointer. We must also
97 * subtract the space for the unaligned pointer's MemoryChunk since
98 * GetMemoryChunkSpace should have included that. This does assume that
99 * all context types use MemoryChunk as a chunk header.
100 */
101 old_size = GetMemoryChunkSpace(unaligned) -
102 PallocAlignedExtraBytes(alignto) - sizeof(MemoryChunk);
103
104#ifdef MEMORY_CONTEXT_CHECKING
105 /* check that GetMemoryChunkSpace returned something realistic */
106 Assert(old_size >= redirchunk->requested_size);
107#endif
108
109 /*
110 * To keep things simple, we always allocate a new aligned chunk and copy
111 * data into it. Because of the above inaccuracy, this may end in copying
112 * more data than was in the original allocation request size, but that
113 * should be OK.
114 */
115 ctx = GetMemoryChunkContext(unaligned);
116 newptr = MemoryContextAllocAligned(ctx, size, alignto, flags);
117
118 /* Cope cleanly with OOM */
119 if (unlikely(newptr == NULL))
120 {
121 VALGRIND_MAKE_MEM_NOACCESS(redirchunk, sizeof(MemoryChunk));
122 return MemoryContextAllocationFailure(ctx, size, flags);
123 }
124
125 /*
126 * We may memcpy more than the original allocation request size, which
127 * would result in trying to copy trailing bytes that the original
128 * MemoryContextAllocAligned call marked NOACCESS. So we must mark the
129 * entire old_size as defined. That's slightly annoying, but probably not
130 * worth improving.
131 */
132 VALGRIND_MAKE_MEM_DEFINED(pointer, old_size);
133 memcpy(newptr, pointer, Min(size, old_size));
134
135 /*
136 * Create a dummy vchunk covering the start of the old unaligned chunk,
137 * but not overlapping the aligned chunk. This will be freed while
138 * pfree'ing the old unaligned chunk, keeping Valgrind happy. Then when
139 * we return to repalloc, it will move the vchunk for the aligned chunk.
140 */
141 VALGRIND_MEMPOOL_ALLOC(ctx, unaligned,
142 (char *) pointer - (char *) unaligned);
143
144 pfree(unaligned);
145
146 return newptr;
147}
#define Min(x, y)
Definition: c.h:1003
#define unlikely(x)
Definition: c.h:402
void * MemoryContextAllocAligned(MemoryContext context, Size size, Size alignto, int flags)
Definition: mcxt.c:1460
void * MemoryContextAllocationFailure(MemoryContext context, Size size, int flags)
Definition: mcxt.c:1195
#define PallocAlignedExtraBytes(alignto)
static Size MemoryChunkGetValue(MemoryChunk *chunk)
struct MemoryChunk MemoryChunk

References Assert(), GetMemoryChunkContext(), GetMemoryChunkSpace(), MemoryChunkGetBlock(), MemoryChunkGetValue(), MemoryContextAllocAligned(), MemoryContextAllocationFailure(), Min, PallocAlignedExtraBytes, pfree(), PointerGetMemoryChunk, unlikely, VALGRIND_MAKE_MEM_DEFINED, VALGRIND_MAKE_MEM_NOACCESS, and VALGRIND_MEMPOOL_ALLOC.

◆ AllocSetAlloc()

void * AllocSetAlloc ( MemoryContext  context,
Size  size,
int  flags 
)

Definition at line 1014 of file aset.c.

1015{
1016 AllocSet set = (AllocSet) context;
1017 AllocBlock block;
1018 MemoryChunk *chunk;
1019 int fidx;
1020 Size chunk_size;
1021 Size availspace;
1022
1023 Assert(AllocSetIsValid(set));
1024
1025 /* due to the keeper block set->blocks should never be NULL */
1026 Assert(set->blocks != NULL);
1027
1028 /*
1029 * If requested size exceeds maximum for chunks we hand the request off to
1030 * AllocSetAllocLarge().
1031 */
1032 if (size > set->allocChunkLimit)
1033 return AllocSetAllocLarge(context, size, flags);
1034
1035 /*
1036 * Request is small enough to be treated as a chunk. Look in the
1037 * corresponding free list to see if there is a free chunk we could reuse.
1038 * If one is found, remove it from the free list, make it again a member
1039 * of the alloc set and return its data address.
1040 *
1041 * Note that we don't attempt to ensure there's space for the sentinel
1042 * byte here. We expect a large proportion of allocations to be for sizes
1043 * which are already a power of 2. If we were to always make space for a
1044 * sentinel byte in MEMORY_CONTEXT_CHECKING builds, then we'd end up
1045 * doubling the memory requirements for such allocations.
1046 */
1047 fidx = AllocSetFreeIndex(size);
1048 chunk = set->freelist[fidx];
1049 if (chunk != NULL)
1050 {
1052
1053 /* Allow access to the chunk header. */
1055
1056 Assert(fidx == MemoryChunkGetValue(chunk));
1057
1058 /* pop this chunk off the freelist */
1060 set->freelist[fidx] = link->next;
1062
1063#ifdef MEMORY_CONTEXT_CHECKING
1064 chunk->requested_size = size;
1065 /* set mark to catch clobber of "unused" space */
1066 if (size < GetChunkSizeFromFreeListIdx(fidx))
1067 set_sentinel(MemoryChunkGetPointer(chunk), size);
1068#endif
1069#ifdef RANDOMIZE_ALLOCATED_MEMORY
1070 /* fill the allocated space with junk */
1071 randomize_mem((char *) MemoryChunkGetPointer(chunk), size);
1072#endif
1073
1074 /* Ensure any padding bytes are marked NOACCESS. */
1076 GetChunkSizeFromFreeListIdx(fidx) - size);
1077
1078 /* Disallow access to the chunk header. */
1080
1081 return MemoryChunkGetPointer(chunk);
1082 }
1083
1084 /*
1085 * Choose the actual chunk size to allocate.
1086 */
1087 chunk_size = GetChunkSizeFromFreeListIdx(fidx);
1088 Assert(chunk_size >= size);
1089
1090 block = set->blocks;
1091 availspace = block->endptr - block->freeptr;
1092
1093 /*
1094 * If there is enough room in the active allocation block, we will put the
1095 * chunk into that block. Else must start a new one.
1096 */
1097 if (unlikely(availspace < (chunk_size + ALLOC_CHUNKHDRSZ)))
1098 return AllocSetAllocFromNewBlock(context, size, flags, fidx);
1099
1100 /* There's enough space on the current block, so allocate from that */
1101 return AllocSetAllocChunkFromBlock(context, block, size, chunk_size, fidx);
1102}
static pg_noinline void * AllocSetAllocFromNewBlock(MemoryContext context, Size size, int flags, int fidx)
Definition: aset.c:863
#define AllocSetIsValid(set)
Definition: aset.c:196
#define GetFreeListLink(chkptr)
Definition: aset.c:134
#define ALLOC_CHUNKHDRSZ
Definition: aset.c:105
#define GetChunkSizeFromFreeListIdx(fidx)
Definition: aset.c:142
static int AllocSetFreeIndex(Size size)
Definition: aset.c:273
static void * AllocSetAllocChunkFromBlock(MemoryContext context, AllocBlock block, Size size, Size chunk_size, int fidx)
Definition: aset.c:818
static pg_noinline void * AllocSetAllocLarge(MemoryContext context, Size size, int flags)
Definition: aset.c:737
AllocSetContext * AllocSet
Definition: aset.c:169
#define MemoryChunkGetPointer(c)
char * freeptr
Definition: aset.c:188
char * endptr
Definition: aset.c:189
uint32 allocChunkLimit
Definition: aset.c:164
AllocBlock blocks
Definition: aset.c:158
MemoryChunk * freelist[ALLOCSET_NUM_FREELISTS]
Definition: aset.c:159

References ALLOC_CHUNKHDRSZ, AllocSetContext::allocChunkLimit, AllocSetAllocChunkFromBlock(), AllocSetAllocFromNewBlock(), AllocSetAllocLarge(), AllocSetFreeIndex(), AllocSetIsValid, Assert(), AllocSetContext::blocks, AllocBlockData::endptr, AllocSetContext::freelist, AllocBlockData::freeptr, GetChunkSizeFromFreeListIdx, GetFreeListLink, MemoryChunkGetPointer, MemoryChunkGetValue(), unlikely, VALGRIND_MAKE_MEM_DEFINED, and VALGRIND_MAKE_MEM_NOACCESS.

Referenced by AllocSetRealloc().

◆ AllocSetDelete()

void AllocSetDelete ( MemoryContext  context)

Definition at line 634 of file aset.c.

635{
636 AllocSet set = (AllocSet) context;
637 AllocBlock block = set->blocks;
639
641
642#ifdef MEMORY_CONTEXT_CHECKING
643 /* Check for corruption and leaks before freeing */
644 AllocSetCheck(context);
645#endif
646
647 /* Remember keeper block size for Assert below */
648 keepersize = KeeperBlock(set)->endptr - ((char *) set);
649
650 /*
651 * If the context is a candidate for a freelist, put it into that freelist
652 * instead of destroying it.
653 */
654 if (set->freeListIndex >= 0)
655 {
657
658 /*
659 * Reset the context, if it needs it, so that we aren't hanging on to
660 * more than the initial malloc chunk.
661 */
662 if (!context->isReset)
663 MemoryContextResetOnly(context);
664
665 /*
666 * If the freelist is full, just discard what's already in it. See
667 * comments with context_freelists[].
668 */
669 if (freelist->num_free >= MAX_FREE_CONTEXTS)
670 {
671 while (freelist->first_free != NULL)
672 {
673 AllocSetContext *oldset = freelist->first_free;
674
675 freelist->first_free = (AllocSetContext *) oldset->header.nextchild;
676 freelist->num_free--;
677
678 /* Destroy the context's vpool --- see notes below */
680
681 /* All that remains is to free the header/initial block */
682 free(oldset);
683 }
684 Assert(freelist->num_free == 0);
685 }
686
687 /* Now add the just-deleted context to the freelist. */
688 set->header.nextchild = (MemoryContext) freelist->first_free;
689 freelist->first_free = set;
690 freelist->num_free++;
691
692 return;
693 }
694
695 /* Free all blocks, except the keeper which is part of context header */
696 while (block != NULL)
697 {
698 AllocBlock next = block->next;
699
700 if (!IsKeeperBlock(set, block))
701 context->mem_allocated -= block->endptr - ((char *) block);
702
703#ifdef CLOBBER_FREED_MEMORY
704 wipe_mem(block, block->freeptr - ((char *) block));
705#endif
706
707 if (!IsKeeperBlock(set, block))
708 {
709 /* As in AllocSetReset, free block-header vchunks explicitly */
710 VALGRIND_MEMPOOL_FREE(set, block);
711 free(block);
712 }
713
714 block = next;
715 }
716
717 Assert(context->mem_allocated == keepersize);
718
719 /*
720 * Destroy the vpool. We don't seem to need to explicitly free the
721 * initial block's header vchunk, nor any user-data vchunks that Valgrind
722 * still knows about; they'll all go away automatically.
723 */
725
726 /* Finally, free the context header, including the keeper block */
727 free(set);
728}
#define IsKeeperBlock(set, block)
Definition: aset.c:244
#define KeeperBlock(set)
Definition: aset.c:240
#define MAX_FREE_CONTEXTS
Definition: aset.c:237
static AllocSetFreeList context_freelists[2]
Definition: aset.c:253
static int32 next
Definition: blutils.c:224
#define PG_USED_FOR_ASSERTS_ONLY
Definition: c.h:223
#define free(a)
Definition: header.h:65
void MemoryContextResetOnly(MemoryContext context)
Definition: mcxt.c:419
#define VALGRIND_DESTROY_MEMPOOL(context)
Definition: memdebug.h:25
#define VALGRIND_MEMPOOL_FREE(context, addr)
Definition: memdebug.h:30
struct MemoryContextData * MemoryContext
Definition: palloc.h:36
AllocBlock next
Definition: aset.c:187
MemoryContextData header
Definition: aset.c:156
int freeListIndex
Definition: aset.c:166
int num_free
Definition: aset.c:248
AllocSetContext * first_free
Definition: aset.c:249
MemoryContext nextchild
Definition: memnodes.h:130

References AllocSetIsValid, Assert(), AllocSetContext::blocks, context_freelists, AllocBlockData::endptr, AllocSetFreeList::first_free, free, AllocSetContext::freeListIndex, AllocBlockData::freeptr, AllocSetContext::header, IsKeeperBlock, MemoryContextData::isReset, KeeperBlock, MAX_FREE_CONTEXTS, MemoryContextData::mem_allocated, MemoryContextResetOnly(), next, AllocBlockData::next, MemoryContextData::nextchild, AllocSetFreeList::num_free, PG_USED_FOR_ASSERTS_ONLY, VALGRIND_DESTROY_MEMPOOL, and VALGRIND_MEMPOOL_FREE.

◆ AllocSetFree()

void AllocSetFree ( void *  pointer)

Definition at line 1109 of file aset.c.

1110{
1111 AllocSet set;
1112 MemoryChunk *chunk = PointerGetMemoryChunk(pointer);
1113
1114 /* Allow access to the chunk header. */
1116
1117 if (MemoryChunkIsExternal(chunk))
1118 {
1119 /* Release single-chunk block. */
1120 AllocBlock block = ExternalChunkGetBlock(chunk);
1121
1122 /*
1123 * Try to verify that we have a sane block pointer: the block header
1124 * should reference an aset and the freeptr should match the endptr.
1125 */
1126 if (!AllocBlockIsValid(block) || block->freeptr != block->endptr)
1127 elog(ERROR, "could not find block containing chunk %p", chunk);
1128
1129 set = block->aset;
1130
1131#ifdef MEMORY_CONTEXT_CHECKING
1132 {
1133 /* Test for someone scribbling on unused space in chunk */
1134 Assert(chunk->requested_size < (block->endptr - (char *) pointer));
1135 if (!sentinel_ok(pointer, chunk->requested_size))
1136 elog(WARNING, "detected write past chunk end in %s %p",
1137 set->header.name, chunk);
1138 }
1139#endif
1140
1141 /* OK, remove block from aset's list and free it */
1142 if (block->prev)
1143 block->prev->next = block->next;
1144 else
1145 set->blocks = block->next;
1146 if (block->next)
1147 block->next->prev = block->prev;
1148
1149 set->header.mem_allocated -= block->endptr - ((char *) block);
1150
1151#ifdef CLOBBER_FREED_MEMORY
1152 wipe_mem(block, block->freeptr - ((char *) block));
1153#endif
1154
1155 /* As in AllocSetReset, free block-header vchunks explicitly */
1156 VALGRIND_MEMPOOL_FREE(set, block);
1157
1158 free(block);
1159 }
1160 else
1161 {
1162 AllocBlock block = MemoryChunkGetBlock(chunk);
1163 int fidx;
1165
1166 /*
1167 * In this path, for speed reasons we just Assert that the referenced
1168 * block is good. We can also Assert that the value field is sane.
1169 * Future field experience may show that these Asserts had better
1170 * become regular runtime test-and-elog checks.
1171 */
1172 Assert(AllocBlockIsValid(block));
1173 set = block->aset;
1174
1175 fidx = MemoryChunkGetValue(chunk);
1177 link = GetFreeListLink(chunk);
1178
1179#ifdef MEMORY_CONTEXT_CHECKING
1180 /* Test for someone scribbling on unused space in chunk */
1181 if (chunk->requested_size < GetChunkSizeFromFreeListIdx(fidx))
1182 if (!sentinel_ok(pointer, chunk->requested_size))
1183 elog(WARNING, "detected write past chunk end in %s %p",
1184 set->header.name, chunk);
1185#endif
1186
1187#ifdef CLOBBER_FREED_MEMORY
1188 wipe_mem(pointer, GetChunkSizeFromFreeListIdx(fidx));
1189#endif
1190 /* push this chunk onto the top of the free list */
1192 link->next = set->freelist[fidx];
1194 set->freelist[fidx] = chunk;
1195
1196#ifdef MEMORY_CONTEXT_CHECKING
1197
1198 /*
1199 * Reset requested_size to InvalidAllocSize in chunks that are on free
1200 * list.
1201 */
1202 chunk->requested_size = InvalidAllocSize;
1203#endif
1204 }
1205}
#define AllocBlockIsValid(block)
Definition: aset.c:203
#define FreeListIdxIsValid(fidx)
Definition: aset.c:138
#define ExternalChunkGetBlock(chunk)
Definition: aset.c:211
#define ERROR
Definition: elog.h:39
#define InvalidAllocSize
Definition: memutils.h:47
AllocBlock prev
Definition: aset.c:186
AllocSet aset
Definition: aset.c:185
const char * name
Definition: memnodes.h:131

References ALLOC_CHUNKHDRSZ, AllocBlockIsValid, AllocBlockData::aset, Assert(), AllocSetContext::blocks, elog, AllocBlockData::endptr, ERROR, ExternalChunkGetBlock, free, AllocSetContext::freelist, FreeListIdxIsValid, AllocBlockData::freeptr, GetChunkSizeFromFreeListIdx, GetFreeListLink, AllocSetContext::header, InvalidAllocSize, link(), MemoryContextData::mem_allocated, MemoryChunkGetBlock(), MemoryChunkGetValue(), MemoryChunkIsExternal(), MemoryContextData::name, AllocBlockData::next, PointerGetMemoryChunk, AllocBlockData::prev, VALGRIND_MAKE_MEM_DEFINED, VALGRIND_MAKE_MEM_NOACCESS, VALGRIND_MEMPOOL_FREE, and WARNING.

Referenced by AllocSetRealloc().

◆ AllocSetGetChunkContext()

MemoryContext AllocSetGetChunkContext ( void *  pointer)

Definition at line 1492 of file aset.c.

1493{
1494 MemoryChunk *chunk = PointerGetMemoryChunk(pointer);
1495 AllocBlock block;
1496 AllocSet set;
1497
1498 /* Allow access to the chunk header. */
1500
1501 if (MemoryChunkIsExternal(chunk))
1502 block = ExternalChunkGetBlock(chunk);
1503 else
1504 block = (AllocBlock) MemoryChunkGetBlock(chunk);
1505
1506 /* Disallow access to the chunk header. */
1508
1509 Assert(AllocBlockIsValid(block));
1510 set = block->aset;
1511
1512 return &set->header;
1513}
struct AllocBlockData * AllocBlock
Definition: aset.c:109

References ALLOC_CHUNKHDRSZ, AllocBlockIsValid, AllocBlockData::aset, Assert(), ExternalChunkGetBlock, AllocSetContext::header, MemoryChunkGetBlock(), MemoryChunkIsExternal(), PointerGetMemoryChunk, VALGRIND_MAKE_MEM_DEFINED, and VALGRIND_MAKE_MEM_NOACCESS.

◆ AllocSetGetChunkSpace()

Size AllocSetGetChunkSpace ( void *  pointer)

Definition at line 1521 of file aset.c.

1522{
1523 MemoryChunk *chunk = PointerGetMemoryChunk(pointer);
1524 int fidx;
1525
1526 /* Allow access to the chunk header. */
1528
1529 if (MemoryChunkIsExternal(chunk))
1530 {
1531 AllocBlock block = ExternalChunkGetBlock(chunk);
1532
1533 /* Disallow access to the chunk header. */
1535
1536 Assert(AllocBlockIsValid(block));
1537
1538 return block->endptr - (char *) chunk;
1539 }
1540
1541 fidx = MemoryChunkGetValue(chunk);
1543
1544 /* Disallow access to the chunk header. */
1546
1548}

References ALLOC_CHUNKHDRSZ, AllocBlockIsValid, Assert(), AllocBlockData::endptr, ExternalChunkGetBlock, FreeListIdxIsValid, GetChunkSizeFromFreeListIdx, MemoryChunkGetValue(), MemoryChunkIsExternal(), PointerGetMemoryChunk, VALGRIND_MAKE_MEM_DEFINED, and VALGRIND_MAKE_MEM_NOACCESS.

◆ AllocSetIsEmpty()

bool AllocSetIsEmpty ( MemoryContext  context)

Definition at line 1555 of file aset.c.

1556{
1557 Assert(AllocSetIsValid(context));
1558
1559 /*
1560 * For now, we say "empty" only if the context is new or just reset. We
1561 * could examine the freelists to determine if all space has been freed,
1562 * but it's not really worth the trouble for present uses of this
1563 * functionality.
1564 */
1565 if (context->isReset)
1566 return true;
1567 return false;
1568}

References AllocSetIsValid, Assert(), and MemoryContextData::isReset.

◆ AllocSetRealloc()

void * AllocSetRealloc ( void *  pointer,
Size  size,
int  flags 
)

Definition at line 1220 of file aset.c.

1221{
1222 AllocBlock block;
1223 AllocSet set;
1224 MemoryChunk *chunk = PointerGetMemoryChunk(pointer);
1225 Size oldchksize;
1226 int fidx;
1227
1228 /* Allow access to the chunk header. */
1230
1231 if (MemoryChunkIsExternal(chunk))
1232 {
1233 /*
1234 * The chunk must have been allocated as a single-chunk block. Use
1235 * realloc() to make the containing block bigger, or smaller, with
1236 * minimum space wastage.
1237 */
1238 AllocBlock newblock;
1239 Size chksize;
1240 Size blksize;
1241 Size oldblksize;
1242
1243 block = ExternalChunkGetBlock(chunk);
1244
1245 /*
1246 * Try to verify that we have a sane block pointer: the block header
1247 * should reference an aset and the freeptr should match the endptr.
1248 */
1249 if (!AllocBlockIsValid(block) || block->freeptr != block->endptr)
1250 elog(ERROR, "could not find block containing chunk %p", chunk);
1251
1252 set = block->aset;
1253
1254 /* only check size in paths where the limits could be hit */
1255 MemoryContextCheckSize((MemoryContext) set, size, flags);
1256
1257 oldchksize = block->endptr - (char *) pointer;
1258
1259#ifdef MEMORY_CONTEXT_CHECKING
1260 /* Test for someone scribbling on unused space in chunk */
1261 Assert(chunk->requested_size < oldchksize);
1262 if (!sentinel_ok(pointer, chunk->requested_size))
1263 elog(WARNING, "detected write past chunk end in %s %p",
1264 set->header.name, chunk);
1265#endif
1266
1267#ifdef MEMORY_CONTEXT_CHECKING
1268 /* ensure there's always space for the sentinel byte */
1269 chksize = MAXALIGN(size + 1);
1270#else
1271 chksize = MAXALIGN(size);
1272#endif
1273
1274 /* Do the realloc */
1275 blksize = chksize + ALLOC_BLOCKHDRSZ + ALLOC_CHUNKHDRSZ;
1276 oldblksize = block->endptr - ((char *) block);
1277
1278 newblock = (AllocBlock) realloc(block, blksize);
1279 if (newblock == NULL)
1280 {
1281 /* Disallow access to the chunk header. */
1283 return MemoryContextAllocationFailure(&set->header, size, flags);
1284 }
1285
1286 /*
1287 * Move the block-header vchunk explicitly. (mcxt.c will take care of
1288 * moving the vchunk for the user data.)
1289 */
1290 VALGRIND_MEMPOOL_CHANGE(set, block, newblock, ALLOC_BLOCKHDRSZ);
1291 block = newblock;
1292
1293 /* updated separately, not to underflow when (oldblksize > blksize) */
1294 set->header.mem_allocated -= oldblksize;
1295 set->header.mem_allocated += blksize;
1296
1297 block->freeptr = block->endptr = ((char *) block) + blksize;
1298
1299 /* Update pointers since block has likely been moved */
1300 chunk = (MemoryChunk *) (((char *) block) + ALLOC_BLOCKHDRSZ);
1301 pointer = MemoryChunkGetPointer(chunk);
1302 if (block->prev)
1303 block->prev->next = block;
1304 else
1305 set->blocks = block;
1306 if (block->next)
1307 block->next->prev = block;
1308
1309#ifdef MEMORY_CONTEXT_CHECKING
1310#ifdef RANDOMIZE_ALLOCATED_MEMORY
1311
1312 /*
1313 * We can only randomize the extra space if we know the prior request.
1314 * When using Valgrind, randomize_mem() also marks memory UNDEFINED.
1315 */
1316 if (size > chunk->requested_size)
1317 randomize_mem((char *) pointer + chunk->requested_size,
1318 size - chunk->requested_size);
1319#else
1320
1321 /*
1322 * If this is an increase, realloc() will have marked any
1323 * newly-allocated part (from oldchksize to chksize) UNDEFINED, but we
1324 * also need to adjust trailing bytes from the old allocation (from
1325 * chunk->requested_size to oldchksize) as they are marked NOACCESS.
1326 * Make sure not to mark too many bytes in case chunk->requested_size
1327 * < size < oldchksize.
1328 */
1329#ifdef USE_VALGRIND
1330 if (Min(size, oldchksize) > chunk->requested_size)
1331 VALGRIND_MAKE_MEM_UNDEFINED((char *) pointer + chunk->requested_size,
1332 Min(size, oldchksize) - chunk->requested_size);
1333#endif
1334#endif
1335
1336 chunk->requested_size = size;
1337 /* set mark to catch clobber of "unused" space */
1338 Assert(size < chksize);
1339 set_sentinel(pointer, size);
1340#else /* !MEMORY_CONTEXT_CHECKING */
1341
1342 /*
1343 * We may need to adjust marking of bytes from the old allocation as
1344 * some of them may be marked NOACCESS. We don't know how much of the
1345 * old chunk size was the requested size; it could have been as small
1346 * as one byte. We have to be conservative and just mark the entire
1347 * old portion DEFINED. Make sure not to mark memory beyond the new
1348 * allocation in case it's smaller than the old one.
1349 */
1350 VALGRIND_MAKE_MEM_DEFINED(pointer, Min(size, oldchksize));
1351#endif
1352
1353 /* Ensure any padding bytes are marked NOACCESS. */
1354 VALGRIND_MAKE_MEM_NOACCESS((char *) pointer + size, chksize - size);
1355
1356 /* Disallow access to the chunk header. */
1358
1359 return pointer;
1360 }
1361
1362 block = MemoryChunkGetBlock(chunk);
1363
1364 /*
1365 * In this path, for speed reasons we just Assert that the referenced
1366 * block is good. We can also Assert that the value field is sane. Future
1367 * field experience may show that these Asserts had better become regular
1368 * runtime test-and-elog checks.
1369 */
1370 Assert(AllocBlockIsValid(block));
1371 set = block->aset;
1372
1373 fidx = MemoryChunkGetValue(chunk);
1375 oldchksize = GetChunkSizeFromFreeListIdx(fidx);
1376
1377#ifdef MEMORY_CONTEXT_CHECKING
1378 /* Test for someone scribbling on unused space in chunk */
1379 if (chunk->requested_size < oldchksize)
1380 if (!sentinel_ok(pointer, chunk->requested_size))
1381 elog(WARNING, "detected write past chunk end in %s %p",
1382 set->header.name, chunk);
1383#endif
1384
1385 /*
1386 * Chunk sizes are aligned to power of 2 in AllocSetAlloc(). Maybe the
1387 * allocated area already is >= the new size. (In particular, we will
1388 * fall out here if the requested size is a decrease.)
1389 */
1390 if (oldchksize >= size)
1391 {
1392#ifdef MEMORY_CONTEXT_CHECKING
1393 Size oldrequest = chunk->requested_size;
1394
1395#ifdef RANDOMIZE_ALLOCATED_MEMORY
1396 /* We can only fill the extra space if we know the prior request */
1397 if (size > oldrequest)
1398 randomize_mem((char *) pointer + oldrequest,
1399 size - oldrequest);
1400#endif
1401
1402 chunk->requested_size = size;
1403
1404 /*
1405 * If this is an increase, mark any newly-available part UNDEFINED.
1406 * Otherwise, mark the obsolete part NOACCESS.
1407 */
1408 if (size > oldrequest)
1409 VALGRIND_MAKE_MEM_UNDEFINED((char *) pointer + oldrequest,
1410 size - oldrequest);
1411 else
1412 VALGRIND_MAKE_MEM_NOACCESS((char *) pointer + size,
1413 oldchksize - size);
1414
1415 /* set mark to catch clobber of "unused" space */
1416 if (size < oldchksize)
1417 set_sentinel(pointer, size);
1418#else /* !MEMORY_CONTEXT_CHECKING */
1419
1420 /*
1421 * We don't have the information to determine whether we're growing
1422 * the old request or shrinking it, so we conservatively mark the
1423 * entire new allocation DEFINED.
1424 */
1425 VALGRIND_MAKE_MEM_NOACCESS(pointer, oldchksize);
1426 VALGRIND_MAKE_MEM_DEFINED(pointer, size);
1427#endif
1428
1429 /* Disallow access to the chunk header. */
1431
1432 return pointer;
1433 }
1434 else
1435 {
1436 /*
1437 * Enlarge-a-small-chunk case. We just do this by brute force, ie,
1438 * allocate a new chunk and copy the data. Since we know the existing
1439 * data isn't huge, this won't involve any great memcpy expense, so
1440 * it's not worth being smarter. (At one time we tried to avoid
1441 * memcpy when it was possible to enlarge the chunk in-place, but that
1442 * turns out to misbehave unpleasantly for repeated cycles of
1443 * palloc/repalloc/pfree: the eventually freed chunks go into the
1444 * wrong freelist for the next initial palloc request, and so we leak
1445 * memory indefinitely. See pgsql-hackers archives for 2007-08-11.)
1446 */
1447 AllocPointer newPointer;
1448 Size oldsize;
1449
1450 /* allocate new chunk (this also checks size is valid) */
1451 newPointer = AllocSetAlloc((MemoryContext) set, size, flags);
1452
1453 /* leave immediately if request was not completed */
1454 if (newPointer == NULL)
1455 {
1456 /* Disallow access to the chunk header. */
1458 return MemoryContextAllocationFailure((MemoryContext) set, size, flags);
1459 }
1460
1461 /*
1462 * AllocSetAlloc() may have returned a region that is still NOACCESS.
1463 * Change it to UNDEFINED for the moment; memcpy() will then transfer
1464 * definedness from the old allocation to the new. If we know the old
1465 * allocation, copy just that much. Otherwise, make the entire old
1466 * chunk defined to avoid errors as we copy the currently-NOACCESS
1467 * trailing bytes.
1468 */
1469 VALGRIND_MAKE_MEM_UNDEFINED(newPointer, size);
1470#ifdef MEMORY_CONTEXT_CHECKING
1471 oldsize = chunk->requested_size;
1472#else
1473 oldsize = oldchksize;
1474 VALGRIND_MAKE_MEM_DEFINED(pointer, oldsize);
1475#endif
1476
1477 /* transfer existing data (certain to fit) */
1478 memcpy(newPointer, pointer, oldsize);
1479
1480 /* free old chunk */
1481 AllocSetFree(pointer);
1482
1483 return newPointer;
1484 }
1485}
#define ALLOC_BLOCKHDRSZ
Definition: aset.c:104
void * AllocSetAlloc(MemoryContext context, Size size, int flags)
Definition: aset.c:1014
void * AllocPointer
Definition: aset.c:115
void AllocSetFree(void *pointer)
Definition: aset.c:1109
#define MAXALIGN(LEN)
Definition: c.h:810
#define realloc(a, b)
Definition: header.h:60
#define VALGRIND_MEMPOOL_CHANGE(context, optr, nptr, size)
Definition: memdebug.h:31
#define VALGRIND_MAKE_MEM_UNDEFINED(addr, size)
Definition: memdebug.h:28
static void MemoryContextCheckSize(MemoryContext context, Size size, int flags)

References ALLOC_BLOCKHDRSZ, ALLOC_CHUNKHDRSZ, AllocBlockIsValid, AllocSetAlloc(), AllocSetFree(), AllocBlockData::aset, Assert(), elog, AllocBlockData::endptr, ERROR, ExternalChunkGetBlock, FreeListIdxIsValid, AllocBlockData::freeptr, GetChunkSizeFromFreeListIdx, AllocSetContext::header, MAXALIGN, MemoryContextData::mem_allocated, MemoryChunkGetBlock(), MemoryChunkGetPointer, MemoryChunkGetValue(), MemoryChunkIsExternal(), MemoryContextAllocationFailure(), MemoryContextCheckSize(), Min, MemoryContextData::name, AllocBlockData::next, PointerGetMemoryChunk, AllocBlockData::prev, realloc, VALGRIND_MAKE_MEM_DEFINED, VALGRIND_MAKE_MEM_NOACCESS, VALGRIND_MAKE_MEM_UNDEFINED, VALGRIND_MEMPOOL_CHANGE, and WARNING.

◆ AllocSetReset()

void AllocSetReset ( MemoryContext  context)

Definition at line 548 of file aset.c.

549{
550 AllocSet set = (AllocSet) context;
551 AllocBlock block;
553
555
556#ifdef MEMORY_CONTEXT_CHECKING
557 /* Check for corruption and leaks before freeing */
558 AllocSetCheck(context);
559#endif
560
561 /* Remember keeper block size for Assert below */
562 keepersize = KeeperBlock(set)->endptr - ((char *) set);
563
564 /* Clear chunk freelists */
565 MemSetAligned(set->freelist, 0, sizeof(set->freelist));
566
567 block = set->blocks;
568
569 /* New blocks list will be just the keeper block */
570 set->blocks = KeeperBlock(set);
571
572 while (block != NULL)
573 {
574 AllocBlock next = block->next;
575
576 if (IsKeeperBlock(set, block))
577 {
578 /* Reset the block, but don't return it to malloc */
579 char *datastart = ((char *) block) + ALLOC_BLOCKHDRSZ;
580
581#ifdef CLOBBER_FREED_MEMORY
582 wipe_mem(datastart, block->freeptr - datastart);
583#else
584 /* wipe_mem() would have done this */
585 VALGRIND_MAKE_MEM_NOACCESS(datastart, block->freeptr - datastart);
586#endif
587 block->freeptr = datastart;
588 block->prev = NULL;
589 block->next = NULL;
590 }
591 else
592 {
593 /* Normal case, release the block */
594 context->mem_allocated -= block->endptr - ((char *) block);
595
596#ifdef CLOBBER_FREED_MEMORY
597 wipe_mem(block, block->freeptr - ((char *) block));
598#endif
599
600 /*
601 * We need to free the block header's vchunk explicitly, although
602 * the user-data vchunks within will go away in the TRIM below.
603 * Otherwise Valgrind complains about leaked allocations.
604 */
605 VALGRIND_MEMPOOL_FREE(set, block);
606
607 free(block);
608 }
609 block = next;
610 }
611
612 Assert(context->mem_allocated == keepersize);
613
614 /*
615 * Instruct Valgrind to throw away all the vchunks associated with this
616 * context, except for the one covering the AllocSetContext and
617 * keeper-block header. This gets rid of the vchunks for whatever user
618 * data is getting discarded by the context reset.
619 */
621
622 /* Reset block size allocation sequence, too */
623 set->nextBlockSize = set->initBlockSize;
624}
#define FIRST_BLOCKHDRSZ
Definition: aset.c:106
#define MemSetAligned(start, val, len)
Definition: c.h:1049
#define VALGRIND_MEMPOOL_TRIM(context, addr, size)
Definition: memdebug.h:32
uint32 initBlockSize
Definition: aset.c:161
uint32 nextBlockSize
Definition: aset.c:163

References ALLOC_BLOCKHDRSZ, AllocSetIsValid, Assert(), AllocSetContext::blocks, AllocBlockData::endptr, FIRST_BLOCKHDRSZ, free, AllocSetContext::freelist, AllocBlockData::freeptr, AllocSetContext::initBlockSize, IsKeeperBlock, KeeperBlock, MemoryContextData::mem_allocated, MemSetAligned, next, AllocBlockData::next, AllocSetContext::nextBlockSize, PG_USED_FOR_ASSERTS_ONLY, AllocBlockData::prev, VALGRIND_MAKE_MEM_NOACCESS, VALGRIND_MEMPOOL_FREE, and VALGRIND_MEMPOOL_TRIM.

◆ AllocSetStats()

void AllocSetStats ( MemoryContext  context,
MemoryStatsPrintFunc  printfunc,
void *  passthru,
MemoryContextCounters totals,
bool  print_to_stderr 
)

Definition at line 1580 of file aset.c.

1583{
1584 AllocSet set = (AllocSet) context;
1585 Size nblocks = 0;
1586 Size freechunks = 0;
1587 Size totalspace;
1588 Size freespace = 0;
1589 AllocBlock block;
1590 int fidx;
1591
1592 Assert(AllocSetIsValid(set));
1593
1594 /* Include context header in totalspace */
1595 totalspace = MAXALIGN(sizeof(AllocSetContext));
1596
1597 for (block = set->blocks; block != NULL; block = block->next)
1598 {
1599 nblocks++;
1600 totalspace += block->endptr - ((char *) block);
1601 freespace += block->endptr - block->freeptr;
1602 }
1603 for (fidx = 0; fidx < ALLOCSET_NUM_FREELISTS; fidx++)
1604 {
1605 Size chksz = GetChunkSizeFromFreeListIdx(fidx);
1606 MemoryChunk *chunk = set->freelist[fidx];
1607
1608 while (chunk != NULL)
1609 {
1611
1612 /* Allow access to the chunk header. */
1614 Assert(MemoryChunkGetValue(chunk) == fidx);
1616
1617 freechunks++;
1618 freespace += chksz + ALLOC_CHUNKHDRSZ;
1619
1621 chunk = link->next;
1623 }
1624 }
1625
1626 if (printfunc)
1627 {
1628 char stats_string[200];
1629
1630 snprintf(stats_string, sizeof(stats_string),
1631 "%zu total in %zu blocks; %zu free (%zu chunks); %zu used",
1632 totalspace, nblocks, freespace, freechunks,
1633 totalspace - freespace);
1634 printfunc(context, passthru, stats_string, print_to_stderr);
1635 }
1636
1637 if (totals)
1638 {
1639 totals->nblocks += nblocks;
1640 totals->freechunks += freechunks;
1641 totals->totalspace += totalspace;
1642 totals->freespace += freespace;
1643 }
1644}
#define ALLOCSET_NUM_FREELISTS
Definition: aset.c:84
#define snprintf
Definition: port.h:239

References ALLOC_CHUNKHDRSZ, ALLOCSET_NUM_FREELISTS, AllocSetIsValid, Assert(), AllocSetContext::blocks, AllocBlockData::endptr, MemoryContextCounters::freechunks, AllocSetContext::freelist, AllocBlockData::freeptr, MemoryContextCounters::freespace, GetChunkSizeFromFreeListIdx, GetFreeListLink, MAXALIGN, MemoryChunkGetValue(), MemoryContextCounters::nblocks, AllocBlockData::next, snprintf, MemoryContextCounters::totalspace, VALGRIND_MAKE_MEM_DEFINED, and VALGRIND_MAKE_MEM_NOACCESS.

◆ BumpAlloc()

void * BumpAlloc ( MemoryContext  context,
Size  size,
int  flags 
)

Definition at line 517 of file bump.c.

518{
519 BumpContext *set = (BumpContext *) context;
520 BumpBlock *block;
521 Size chunk_size;
522 Size required_size;
523
524 Assert(BumpIsValid(set));
525
526#ifdef MEMORY_CONTEXT_CHECKING
527 /* ensure there's always space for the sentinel byte */
528 chunk_size = MAXALIGN(size + 1);
529#else
530 chunk_size = MAXALIGN(size);
531#endif
532
533 /*
534 * If requested size exceeds maximum for chunks we hand the request off to
535 * BumpAllocLarge().
536 */
537 if (chunk_size > set->allocChunkLimit)
538 return BumpAllocLarge(context, size, flags);
539
540 required_size = chunk_size + Bump_CHUNKHDRSZ;
541
542 /*
543 * Not an oversized chunk. We try to first make use of the latest block,
544 * but if there's not enough space in it we must allocate a new block.
545 */
546 block = dlist_container(BumpBlock, node, dlist_head_node(&set->blocks));
547
548 if (BumpBlockFreeBytes(block) < required_size)
549 return BumpAllocFromNewBlock(context, size, flags, chunk_size);
550
551 /* The current block has space, so just allocate chunk there. */
552 return BumpAllocChunkFromBlock(context, block, size, chunk_size);
553}
#define Bump_CHUNKHDRSZ
Definition: bump.c:56
static pg_noinline void * BumpAllocLarge(MemoryContext context, Size size, int flags)
Definition: bump.c:313
#define BumpIsValid(set)
Definition: bump.c:102
static pg_noinline void * BumpAllocFromNewBlock(MemoryContext context, Size size, int flags, Size chunk_size)
Definition: bump.c:453
static void * BumpAllocChunkFromBlock(MemoryContext context, BumpBlock *block, Size size, Size chunk_size)
Definition: bump.c:394
static Size BumpBlockFreeBytes(BumpBlock *block)
Definition: bump.c:611
static dlist_node * dlist_head_node(dlist_head *head)
Definition: ilist.h:565
#define dlist_container(type, membername, ptr)
Definition: ilist.h:593
Definition: bump.c:89
dlist_head blocks
Definition: bump.c:78
uint32 allocChunkLimit
Definition: bump.c:76

References BumpContext::allocChunkLimit, Assert(), BumpContext::blocks, Bump_CHUNKHDRSZ, BumpAllocChunkFromBlock(), BumpAllocFromNewBlock(), BumpAllocLarge(), BumpBlockFreeBytes(), BumpIsValid, dlist_container, dlist_head_node(), and MAXALIGN.

◆ BumpDelete()

void BumpDelete ( MemoryContext  context)

Definition at line 294 of file bump.c.

295{
296 /* Reset to release all releasable BumpBlocks */
297 BumpReset(context);
298
299 /* Destroy the vpool -- see notes in aset.c */
301
302 /* And free the context header and keeper block */
303 free(context);
304}
void BumpReset(MemoryContext context)
Definition: bump.c:251

References BumpReset(), free, and VALGRIND_DESTROY_MEMPOOL.

◆ BumpFree()

void BumpFree ( void *  pointer)

Definition at line 646 of file bump.c.

647{
648 elog(ERROR, "%s is not supported by the bump memory allocator", "pfree");
649}

References elog, and ERROR.

◆ BumpGetChunkContext()

MemoryContext BumpGetChunkContext ( void *  pointer)

Definition at line 667 of file bump.c.

668{
669 elog(ERROR, "%s is not supported by the bump memory allocator", "GetMemoryChunkContext");
670 return NULL; /* keep compiler quiet */
671}

References elog, and ERROR.

◆ BumpGetChunkSpace()

Size BumpGetChunkSpace ( void *  pointer)

Definition at line 678 of file bump.c.

679{
680 elog(ERROR, "%s is not supported by the bump memory allocator", "GetMemoryChunkSpace");
681 return 0; /* keep compiler quiet */
682}

References elog, and ERROR.

◆ BumpIsEmpty()

bool BumpIsEmpty ( MemoryContext  context)

Definition at line 689 of file bump.c.

690{
691 BumpContext *set = (BumpContext *) context;
692 dlist_iter iter;
693
694 Assert(BumpIsValid(set));
695
696 dlist_foreach(iter, &set->blocks)
697 {
698 BumpBlock *block = dlist_container(BumpBlock, node, iter.cur);
699
700 if (!BumpBlockIsEmpty(block))
701 return false;
702 }
703
704 return true;
705}
static bool BumpBlockIsEmpty(BumpBlock *block)
Definition: bump.c:578
#define dlist_foreach(iter, lhead)
Definition: ilist.h:623
dlist_node * cur
Definition: ilist.h:179

References Assert(), BumpContext::blocks, BumpBlockIsEmpty(), BumpIsValid, dlist_iter::cur, dlist_container, and dlist_foreach.

◆ BumpRealloc()

void * BumpRealloc ( void *  pointer,
Size  size,
int  flags 
)

Definition at line 656 of file bump.c.

657{
658 elog(ERROR, "%s is not supported by the bump memory allocator", "realloc");
659 return NULL; /* keep compiler quiet */
660}

References elog, and ERROR.

◆ BumpReset()

void BumpReset ( MemoryContext  context)

Definition at line 251 of file bump.c.

252{
253 BumpContext *set = (BumpContext *) context;
254 dlist_mutable_iter miter;
255
256 Assert(BumpIsValid(set));
257
258#ifdef MEMORY_CONTEXT_CHECKING
259 /* Check for corruption and leaks before freeing */
260 BumpCheck(context);
261#endif
262
263 dlist_foreach_modify(miter, &set->blocks)
264 {
265 BumpBlock *block = dlist_container(BumpBlock, node, miter.cur);
266
267 if (IsKeeperBlock(set, block))
268 BumpBlockMarkEmpty(block);
269 else
270 BumpBlockFree(set, block);
271 }
272
273 /*
274 * Instruct Valgrind to throw away all the vchunks associated with this
275 * context, except for the one covering the BumpContext and keeper-block
276 * header. This gets rid of the vchunks for whatever user data is getting
277 * discarded by the context reset.
278 */
280
281 /* Reset block size allocation sequence, too */
282 set->nextBlockSize = set->initBlockSize;
283
284 /* Ensure there is only 1 item in the dlist */
287}
static void BumpBlockFree(BumpContext *set, BumpBlock *block)
Definition: bump.c:621
static void BumpBlockMarkEmpty(BumpBlock *block)
Definition: bump.c:589
#define IsKeeperBlock(set, blk)
Definition: bump.c:64
#define FIRST_BLOCKHDRSZ
Definition: bump.c:49
static bool dlist_has_next(const dlist_head *head, const dlist_node *node)
Definition: ilist.h:503
#define dlist_foreach_modify(iter, lhead)
Definition: ilist.h:640
static bool dlist_is_empty(const dlist_head *head)
Definition: ilist.h:336
uint32 initBlockSize
Definition: bump.c:73
uint32 nextBlockSize
Definition: bump.c:75
dlist_node * cur
Definition: ilist.h:200

References Assert(), BumpContext::blocks, BumpBlockFree(), BumpBlockMarkEmpty(), BumpIsValid, dlist_mutable_iter::cur, dlist_container, dlist_foreach_modify, dlist_has_next(), dlist_head_node(), dlist_is_empty(), FIRST_BLOCKHDRSZ, BumpContext::initBlockSize, IsKeeperBlock, BumpContext::nextBlockSize, and VALGRIND_MEMPOOL_TRIM.

Referenced by BumpDelete().

◆ BumpStats()

void BumpStats ( MemoryContext  context,
MemoryStatsPrintFunc  printfunc,
void *  passthru,
MemoryContextCounters totals,
bool  print_to_stderr 
)

Definition at line 717 of file bump.c.

719{
720 BumpContext *set = (BumpContext *) context;
721 Size nblocks = 0;
722 Size totalspace = 0;
723 Size freespace = 0;
724 dlist_iter iter;
725
726 Assert(BumpIsValid(set));
727
728 dlist_foreach(iter, &set->blocks)
729 {
730 BumpBlock *block = dlist_container(BumpBlock, node, iter.cur);
731
732 nblocks++;
733 totalspace += (block->endptr - (char *) block);
734 freespace += (block->endptr - block->freeptr);
735 }
736
737 if (printfunc)
738 {
739 char stats_string[200];
740
741 snprintf(stats_string, sizeof(stats_string),
742 "%zu total in %zu blocks; %zu free; %zu used",
743 totalspace, nblocks, freespace, totalspace - freespace);
744 printfunc(context, passthru, stats_string, print_to_stderr);
745 }
746
747 if (totals)
748 {
749 totals->nblocks += nblocks;
750 totals->totalspace += totalspace;
751 totals->freespace += freespace;
752 }
753}
char * endptr
Definition: bump.c:95
char * freeptr
Definition: bump.c:94

References Assert(), BumpContext::blocks, BumpIsValid, dlist_iter::cur, dlist_container, dlist_foreach, BumpBlock::endptr, BumpBlock::freeptr, MemoryContextCounters::freespace, MemoryContextCounters::nblocks, snprintf, and MemoryContextCounters::totalspace.

◆ GenerationAlloc()

void * GenerationAlloc ( MemoryContext  context,
Size  size,
int  flags 
)

Definition at line 553 of file generation.c.

554{
555 GenerationContext *set = (GenerationContext *) context;
556 GenerationBlock *block;
557 Size chunk_size;
558 Size required_size;
559
561
562#ifdef MEMORY_CONTEXT_CHECKING
563 /* ensure there's always space for the sentinel byte */
564 chunk_size = MAXALIGN(size + 1);
565#else
566 chunk_size = MAXALIGN(size);
567#endif
568
569 /*
570 * If requested size exceeds maximum for chunks we hand the request off to
571 * GenerationAllocLarge().
572 */
573 if (chunk_size > set->allocChunkLimit)
574 return GenerationAllocLarge(context, size, flags);
575
576 required_size = chunk_size + Generation_CHUNKHDRSZ;
577
578 /*
579 * Not an oversized chunk. We try to first make use of the current block,
580 * but if there's not enough space in it, instead of allocating a new
581 * block, we look to see if the empty freeblock has enough space. We
582 * don't try reusing the keeper block. If it's become empty we'll reuse
583 * that again only if the context is reset.
584 *
585 * We only try reusing the freeblock if we've no space for this allocation
586 * on the current block. When a freeblock exists, we'll switch to it once
587 * the first time we can't fit an allocation in the current block. We
588 * avoid ping-ponging between the two as we need to be careful not to
589 * fragment differently sized consecutive allocations between several
590 * blocks. Going between the two could cause fragmentation for FIFO
591 * workloads, which generation is meant to be good at.
592 */
593 block = set->block;
594
595 if (unlikely(GenerationBlockFreeBytes(block) < required_size))
596 {
597 GenerationBlock *freeblock = set->freeblock;
598
599 /* freeblock, if set, must be empty */
600 Assert(freeblock == NULL || GenerationBlockIsEmpty(freeblock));
601
602 /* check if we have a freeblock and if it's big enough */
603 if (freeblock != NULL &&
604 GenerationBlockFreeBytes(freeblock) >= required_size)
605 {
606 /* make the freeblock the current block */
607 set->freeblock = NULL;
608 set->block = freeblock;
609
610 return GenerationAllocChunkFromBlock(context,
611 freeblock,
612 size,
613 chunk_size);
614 }
615 else
616 {
617 /*
618 * No freeblock, or it's not big enough for this allocation. Make
619 * a new block.
620 */
621 return GenerationAllocFromNewBlock(context, size, flags, chunk_size);
622 }
623 }
624
625 /* The current block has space, so just allocate chunk there. */
626 return GenerationAllocChunkFromBlock(context, block, size, chunk_size);
627}
static pg_noinline void * GenerationAllocLarge(MemoryContext context, Size size, int flags)
Definition: generation.c:363
static Size GenerationBlockFreeBytes(GenerationBlock *block)
Definition: generation.c:680
#define Generation_CHUNKHDRSZ
Definition: generation.c:47
static void * GenerationAllocChunkFromBlock(MemoryContext context, GenerationBlock *block, Size size, Size chunk_size)
Definition: generation.c:436
#define GenerationBlockIsEmpty(b)
Definition: generation.c:118
static pg_noinline void * GenerationAllocFromNewBlock(MemoryContext context, Size size, int flags, Size chunk_size)
Definition: generation.c:484
#define GenerationIsValid(set)
Definition: generation.c:104
GenerationBlock * freeblock
Definition: generation.c:72
GenerationBlock * block
Definition: generation.c:71
uint32 allocChunkLimit
Definition: generation.c:69

References GenerationContext::allocChunkLimit, Assert(), GenerationContext::block, GenerationContext::freeblock, Generation_CHUNKHDRSZ, GenerationAllocChunkFromBlock(), GenerationAllocFromNewBlock(), GenerationAllocLarge(), GenerationBlockFreeBytes(), GenerationBlockIsEmpty, GenerationIsValid, MAXALIGN, and unlikely.

Referenced by GenerationRealloc().

◆ GenerationDelete()

void GenerationDelete ( MemoryContext  context)

Definition at line 344 of file generation.c.

345{
346 /* Reset to release all releasable GenerationBlocks */
347 GenerationReset(context);
348
349 /* Destroy the vpool -- see notes in aset.c */
351
352 /* And free the context header and keeper block */
353 free(context);
354}
void GenerationReset(MemoryContext context)
Definition: generation.c:291

References free, GenerationReset(), and VALGRIND_DESTROY_MEMPOOL.

◆ GenerationFree()

void GenerationFree ( void *  pointer)

Definition at line 718 of file generation.c.

719{
720 MemoryChunk *chunk = PointerGetMemoryChunk(pointer);
721 GenerationBlock *block;
723#if (defined(MEMORY_CONTEXT_CHECKING) && defined(USE_ASSERT_CHECKING)) \
724 || defined(CLOBBER_FREED_MEMORY)
725 Size chunksize;
726#endif
727
728 /* Allow access to the chunk header. */
730
731 if (MemoryChunkIsExternal(chunk))
732 {
733 block = ExternalChunkGetBlock(chunk);
734
735 /*
736 * Try to verify that we have a sane block pointer: the block header
737 * should reference a generation context.
738 */
739 if (!GenerationBlockIsValid(block))
740 elog(ERROR, "could not find block containing chunk %p", chunk);
741
742#if (defined(MEMORY_CONTEXT_CHECKING) && defined(USE_ASSERT_CHECKING)) \
743 || defined(CLOBBER_FREED_MEMORY)
744 chunksize = block->endptr - (char *) pointer;
745#endif
746 }
747 else
748 {
749 block = MemoryChunkGetBlock(chunk);
750
751 /*
752 * In this path, for speed reasons we just Assert that the referenced
753 * block is good. Future field experience may show that this Assert
754 * had better become a regular runtime test-and-elog check.
755 */
757
758#if (defined(MEMORY_CONTEXT_CHECKING) && defined(USE_ASSERT_CHECKING)) \
759 || defined(CLOBBER_FREED_MEMORY)
760 chunksize = MemoryChunkGetValue(chunk);
761#endif
762 }
763
764#ifdef MEMORY_CONTEXT_CHECKING
765 /* Test for someone scribbling on unused space in chunk */
766 Assert(chunk->requested_size < chunksize);
767 if (!sentinel_ok(pointer, chunk->requested_size))
768 elog(WARNING, "detected write past chunk end in %s %p",
769 ((MemoryContext) block->context)->name, chunk);
770#endif
771
772#ifdef CLOBBER_FREED_MEMORY
773 wipe_mem(pointer, chunksize);
774#endif
775
776#ifdef MEMORY_CONTEXT_CHECKING
777 /* Reset requested_size to InvalidAllocSize in freed chunks */
778 chunk->requested_size = InvalidAllocSize;
779#endif
780
781 block->nfree += 1;
782
783 Assert(block->nchunks > 0);
784 Assert(block->nfree <= block->nchunks);
785 Assert(block != block->context->freeblock);
786
787 /* If there are still allocated chunks in the block, we're done. */
788 if (likely(block->nfree < block->nchunks))
789 return;
790
791 set = block->context;
792
793 /*-----------------------
794 * The block this allocation was on has now become completely empty of
795 * chunks. In the general case, we can now return the memory for this
796 * block back to malloc. However, there are cases where we don't want to
797 * do that:
798 *
799 * 1) If it's the keeper block. This block was malloc'd in the same
800 * allocation as the context itself and can't be free'd without
801 * freeing the context.
802 * 2) If it's the current block. We could free this, but doing so would
803 * leave us nothing to set the current block to, so we just mark the
804 * block as empty so new allocations can reuse it again.
805 * 3) If we have no "freeblock" set, then we save a single block for
806 * future allocations to avoid having to malloc a new block again.
807 * This is useful for FIFO workloads as it avoids continual
808 * free/malloc cycles.
809 */
810 if (IsKeeperBlock(set, block) || set->block == block)
811 GenerationBlockMarkEmpty(block); /* case 1 and 2 */
812 else if (set->freeblock == NULL)
813 {
814 /* case 3 */
816 set->freeblock = block;
817 }
818 else
819 GenerationBlockFree(set, block); /* Otherwise, free it */
820}
#define likely(x)
Definition: c.h:401
#define IsKeeperBlock(set, block)
Definition: generation.c:134
static void GenerationBlockFree(GenerationContext *set, GenerationBlock *block)
Definition: generation.c:690
static void GenerationBlockMarkEmpty(GenerationBlock *block)
Definition: generation.c:656
#define GenerationBlockIsValid(block)
Definition: generation.c:111
#define ExternalChunkGetBlock(chunk)
Definition: generation.c:125
GenerationContext * context
Definition: generation.c:92

References Assert(), GenerationContext::block, GenerationBlock::context, elog, GenerationBlock::endptr, ERROR, ExternalChunkGetBlock, GenerationContext::freeblock, Generation_CHUNKHDRSZ, GenerationBlockFree(), GenerationBlockIsValid, GenerationBlockMarkEmpty(), InvalidAllocSize, IsKeeperBlock, likely, MemoryChunkGetBlock(), MemoryChunkGetValue(), MemoryChunkIsExternal(), GenerationBlock::nchunks, GenerationBlock::nfree, PointerGetMemoryChunk, VALGRIND_MAKE_MEM_DEFINED, and WARNING.

Referenced by GenerationRealloc().

◆ GenerationGetChunkContext()

MemoryContext GenerationGetChunkContext ( void *  pointer)

Definition at line 976 of file generation.c.

977{
978 MemoryChunk *chunk = PointerGetMemoryChunk(pointer);
979 GenerationBlock *block;
980
981 /* Allow access to the chunk header. */
983
984 if (MemoryChunkIsExternal(chunk))
985 block = ExternalChunkGetBlock(chunk);
986 else
987 block = (GenerationBlock *) MemoryChunkGetBlock(chunk);
988
989 /* Disallow access to the chunk header. */
991
993 return &block->context->header;
994}
MemoryContextData header
Definition: generation.c:63

References Assert(), GenerationBlock::context, ExternalChunkGetBlock, Generation_CHUNKHDRSZ, GenerationBlockIsValid, GenerationContext::header, MemoryChunkGetBlock(), MemoryChunkIsExternal(), PointerGetMemoryChunk, VALGRIND_MAKE_MEM_DEFINED, and VALGRIND_MAKE_MEM_NOACCESS.

◆ GenerationGetChunkSpace()

Size GenerationGetChunkSpace ( void *  pointer)

Definition at line 1002 of file generation.c.

1003{
1004 MemoryChunk *chunk = PointerGetMemoryChunk(pointer);
1005 Size chunksize;
1006
1007 /* Allow access to the chunk header. */
1009
1010 if (MemoryChunkIsExternal(chunk))
1011 {
1012 GenerationBlock *block = ExternalChunkGetBlock(chunk);
1013
1015 chunksize = block->endptr - (char *) pointer;
1016 }
1017 else
1018 chunksize = MemoryChunkGetValue(chunk);
1019
1020 /* Disallow access to the chunk header. */
1022
1023 return Generation_CHUNKHDRSZ + chunksize;
1024}

References Assert(), GenerationBlock::endptr, ExternalChunkGetBlock, Generation_CHUNKHDRSZ, GenerationBlockIsValid, MemoryChunkGetValue(), MemoryChunkIsExternal(), PointerGetMemoryChunk, VALGRIND_MAKE_MEM_DEFINED, and VALGRIND_MAKE_MEM_NOACCESS.

◆ GenerationIsEmpty()

bool GenerationIsEmpty ( MemoryContext  context)

Definition at line 1031 of file generation.c.

1032{
1033 GenerationContext *set = (GenerationContext *) context;
1034 dlist_iter iter;
1035
1037
1038 dlist_foreach(iter, &set->blocks)
1039 {
1040 GenerationBlock *block = dlist_container(GenerationBlock, node, iter.cur);
1041
1042 if (block->nchunks > 0)
1043 return false;
1044 }
1045
1046 return true;
1047}
dlist_head blocks
Definition: generation.c:74

References Assert(), GenerationContext::blocks, dlist_iter::cur, dlist_container, dlist_foreach, GenerationIsValid, and GenerationBlock::nchunks.

◆ GenerationRealloc()

void * GenerationRealloc ( void *  pointer,
Size  size,
int  flags 
)

Definition at line 829 of file generation.c.

830{
831 MemoryChunk *chunk = PointerGetMemoryChunk(pointer);
833 GenerationBlock *block;
834 GenerationPointer newPointer;
835 Size oldsize;
836
837 /* Allow access to the chunk header. */
839
840 if (MemoryChunkIsExternal(chunk))
841 {
842 block = ExternalChunkGetBlock(chunk);
843
844 /*
845 * Try to verify that we have a sane block pointer: the block header
846 * should reference a generation context.
847 */
848 if (!GenerationBlockIsValid(block))
849 elog(ERROR, "could not find block containing chunk %p", chunk);
850
851 oldsize = block->endptr - (char *) pointer;
852 }
853 else
854 {
855 block = MemoryChunkGetBlock(chunk);
856
857 /*
858 * In this path, for speed reasons we just Assert that the referenced
859 * block is good. Future field experience may show that this Assert
860 * had better become a regular runtime test-and-elog check.
861 */
863
864 oldsize = MemoryChunkGetValue(chunk);
865 }
866
867 set = block->context;
868
869#ifdef MEMORY_CONTEXT_CHECKING
870 /* Test for someone scribbling on unused space in chunk */
871 Assert(chunk->requested_size < oldsize);
872 if (!sentinel_ok(pointer, chunk->requested_size))
873 elog(WARNING, "detected write past chunk end in %s %p",
874 ((MemoryContext) set)->name, chunk);
875#endif
876
877 /*
878 * Maybe the allocated area already big enough. (In particular, we always
879 * fall out here if the requested size is a decrease.)
880 *
881 * This memory context does not use power-of-2 chunk sizing and instead
882 * carves the chunks to be as small as possible, so most repalloc() calls
883 * will end up in the palloc/memcpy/pfree branch.
884 *
885 * XXX Perhaps we should annotate this condition with unlikely()?
886 */
887#ifdef MEMORY_CONTEXT_CHECKING
888 /* With MEMORY_CONTEXT_CHECKING, we need an extra byte for the sentinel */
889 if (oldsize > size)
890#else
891 if (oldsize >= size)
892#endif
893 {
894#ifdef MEMORY_CONTEXT_CHECKING
895 Size oldrequest = chunk->requested_size;
896
897#ifdef RANDOMIZE_ALLOCATED_MEMORY
898 /* We can only fill the extra space if we know the prior request */
899 if (size > oldrequest)
900 randomize_mem((char *) pointer + oldrequest,
901 size - oldrequest);
902#endif
903
904 chunk->requested_size = size;
905
906 /*
907 * If this is an increase, mark any newly-available part UNDEFINED.
908 * Otherwise, mark the obsolete part NOACCESS.
909 */
910 if (size > oldrequest)
911 VALGRIND_MAKE_MEM_UNDEFINED((char *) pointer + oldrequest,
912 size - oldrequest);
913 else
914 VALGRIND_MAKE_MEM_NOACCESS((char *) pointer + size,
915 oldsize - size);
916
917 /* set mark to catch clobber of "unused" space */
918 set_sentinel(pointer, size);
919#else /* !MEMORY_CONTEXT_CHECKING */
920
921 /*
922 * We don't have the information to determine whether we're growing
923 * the old request or shrinking it, so we conservatively mark the
924 * entire new allocation DEFINED.
925 */
926 VALGRIND_MAKE_MEM_NOACCESS(pointer, oldsize);
927 VALGRIND_MAKE_MEM_DEFINED(pointer, size);
928#endif
929
930 /* Disallow access to the chunk header. */
932
933 return pointer;
934 }
935
936 /* allocate new chunk (this also checks size is valid) */
937 newPointer = GenerationAlloc((MemoryContext) set, size, flags);
938
939 /* leave immediately if request was not completed */
940 if (newPointer == NULL)
941 {
942 /* Disallow access to the chunk header. */
944 return MemoryContextAllocationFailure((MemoryContext) set, size, flags);
945 }
946
947 /*
948 * GenerationAlloc() may have returned a region that is still NOACCESS.
949 * Change it to UNDEFINED for the moment; memcpy() will then transfer
950 * definedness from the old allocation to the new. If we know the old
951 * allocation, copy just that much. Otherwise, make the entire old chunk
952 * defined to avoid errors as we copy the currently-NOACCESS trailing
953 * bytes.
954 */
955 VALGRIND_MAKE_MEM_UNDEFINED(newPointer, size);
956#ifdef MEMORY_CONTEXT_CHECKING
957 oldsize = chunk->requested_size;
958#else
959 VALGRIND_MAKE_MEM_DEFINED(pointer, oldsize);
960#endif
961
962 /* transfer existing data (certain to fit) */
963 memcpy(newPointer, pointer, oldsize);
964
965 /* free old chunk */
966 GenerationFree(pointer);
967
968 return newPointer;
969}
void GenerationFree(void *pointer)
Definition: generation.c:718
void * GenerationPointer
Definition: generation.c:55
void * GenerationAlloc(MemoryContext context, Size size, int flags)
Definition: generation.c:553

References Assert(), GenerationBlock::context, elog, GenerationBlock::endptr, ERROR, ExternalChunkGetBlock, Generation_CHUNKHDRSZ, GenerationAlloc(), GenerationBlockIsValid, GenerationFree(), MemoryChunkGetBlock(), MemoryChunkGetValue(), MemoryChunkIsExternal(), MemoryContextAllocationFailure(), name, PointerGetMemoryChunk, VALGRIND_MAKE_MEM_DEFINED, VALGRIND_MAKE_MEM_NOACCESS, VALGRIND_MAKE_MEM_UNDEFINED, and WARNING.

◆ GenerationReset()

void GenerationReset ( MemoryContext  context)

Definition at line 291 of file generation.c.

292{
293 GenerationContext *set = (GenerationContext *) context;
294 dlist_mutable_iter miter;
295
297
298#ifdef MEMORY_CONTEXT_CHECKING
299 /* Check for corruption and leaks before freeing */
300 GenerationCheck(context);
301#endif
302
303 /*
304 * NULLify the free block pointer. We must do this before calling
305 * GenerationBlockFree as that function never expects to free the
306 * freeblock.
307 */
308 set->freeblock = NULL;
309
310 dlist_foreach_modify(miter, &set->blocks)
311 {
312 GenerationBlock *block = dlist_container(GenerationBlock, node, miter.cur);
313
314 if (IsKeeperBlock(set, block))
316 else
317 GenerationBlockFree(set, block);
318 }
319
320 /*
321 * Instruct Valgrind to throw away all the vchunks associated with this
322 * context, except for the one covering the GenerationContext and
323 * keeper-block header. This gets rid of the vchunks for whatever user
324 * data is getting discarded by the context reset.
325 */
327
328 /* set it so new allocations to make use of the keeper block */
329 set->block = KeeperBlock(set);
330
331 /* Reset block size allocation sequence, too */
332 set->nextBlockSize = set->initBlockSize;
333
334 /* Ensure there is only 1 item in the dlist */
337}
#define KeeperBlock(set)
Definition: generation.c:129
#define FIRST_BLOCKHDRSZ
Definition: generation.c:48
uint32 nextBlockSize
Definition: generation.c:68
uint32 initBlockSize
Definition: generation.c:66

References Assert(), GenerationContext::block, GenerationContext::blocks, dlist_mutable_iter::cur, dlist_container, dlist_foreach_modify, dlist_has_next(), dlist_head_node(), dlist_is_empty(), FIRST_BLOCKHDRSZ, GenerationContext::freeblock, GenerationBlockFree(), GenerationBlockMarkEmpty(), GenerationIsValid, GenerationContext::initBlockSize, IsKeeperBlock, KeeperBlock, GenerationContext::nextBlockSize, and VALGRIND_MEMPOOL_TRIM.

Referenced by GenerationDelete().

◆ GenerationStats()

void GenerationStats ( MemoryContext  context,
MemoryStatsPrintFunc  printfunc,
void *  passthru,
MemoryContextCounters totals,
bool  print_to_stderr 
)

Definition at line 1062 of file generation.c.

1065{
1066 GenerationContext *set = (GenerationContext *) context;
1067 Size nblocks = 0;
1068 Size nchunks = 0;
1069 Size nfreechunks = 0;
1070 Size totalspace;
1071 Size freespace = 0;
1072 dlist_iter iter;
1073
1075
1076 /* Include context header in totalspace */
1077 totalspace = MAXALIGN(sizeof(GenerationContext));
1078
1079 dlist_foreach(iter, &set->blocks)
1080 {
1081 GenerationBlock *block = dlist_container(GenerationBlock, node, iter.cur);
1082
1083 nblocks++;
1084 nchunks += block->nchunks;
1085 nfreechunks += block->nfree;
1086 totalspace += block->blksize;
1087 freespace += (block->endptr - block->freeptr);
1088 }
1089
1090 if (printfunc)
1091 {
1092 char stats_string[200];
1093
1094 snprintf(stats_string, sizeof(stats_string),
1095 "%zu total in %zu blocks (%zu chunks); %zu free (%zu chunks); %zu used",
1096 totalspace, nblocks, nchunks, freespace,
1097 nfreechunks, totalspace - freespace);
1098 printfunc(context, passthru, stats_string, print_to_stderr);
1099 }
1100
1101 if (totals)
1102 {
1103 totals->nblocks += nblocks;
1104 totals->freechunks += nfreechunks;
1105 totals->totalspace += totalspace;
1106 totals->freespace += freespace;
1107 }
1108}
char * freeptr
Definition: generation.c:96

References Assert(), GenerationBlock::blksize, GenerationContext::blocks, dlist_iter::cur, dlist_container, dlist_foreach, GenerationBlock::endptr, MemoryContextCounters::freechunks, GenerationBlock::freeptr, MemoryContextCounters::freespace, GenerationIsValid, MAXALIGN, MemoryContextCounters::nblocks, GenerationBlock::nchunks, GenerationBlock::nfree, snprintf, and MemoryContextCounters::totalspace.

◆ MemoryContextAllocationFailure()

void * MemoryContextAllocationFailure ( MemoryContext  context,
Size  size,
int  flags 
)

Definition at line 1195 of file mcxt.c.

1196{
1197 if ((flags & MCXT_ALLOC_NO_OOM) == 0)
1198 {
1199 if (TopMemoryContext)
1201 ereport(ERROR,
1202 (errcode(ERRCODE_OUT_OF_MEMORY),
1203 errmsg("out of memory"),
1204 errdetail("Failed on request of size %zu in memory context \"%s\".",
1205 size, context->name)));
1206 }
1207 return NULL;
1208}
int errdetail(const char *fmt,...)
Definition: elog.c:1207
int errcode(int sqlerrcode)
Definition: elog.c:854
int errmsg(const char *fmt,...)
Definition: elog.c:1071
#define ereport(elevel,...)
Definition: elog.h:150
#define MCXT_ALLOC_NO_OOM
Definition: fe_memutils.h:29
MemoryContext TopMemoryContext
Definition: mcxt.c:166
void MemoryContextStats(MemoryContext context)
Definition: mcxt.c:860

References ereport, errcode(), errdetail(), errmsg(), ERROR, MCXT_ALLOC_NO_OOM, MemoryContextStats(), MemoryContextData::name, and TopMemoryContext.

Referenced by AlignedAllocRealloc(), AllocSetAllocFromNewBlock(), AllocSetAllocLarge(), AllocSetRealloc(), BumpAllocFromNewBlock(), BumpAllocLarge(), GenerationAllocFromNewBlock(), GenerationAllocLarge(), GenerationRealloc(), and SlabAllocFromNewBlock().

◆ MemoryContextCheckSize()

static void MemoryContextCheckSize ( MemoryContext  context,
Size  size,
int  flags 
)
inlinestatic

Definition at line 167 of file memutils_internal.h.

168{
169 if (unlikely(!AllocSizeIsValid(size)))
170 {
171 if (!(flags & MCXT_ALLOC_HUGE) || !AllocHugeSizeIsValid(size))
172 MemoryContextSizeFailure(context, size, flags);
173 }
174}
#define MCXT_ALLOC_HUGE
Definition: fe_memutils.h:28
#define AllocHugeSizeIsValid(size)
Definition: memutils.h:49
#define AllocSizeIsValid(size)
Definition: memutils.h:42
pg_noreturn void MemoryContextSizeFailure(MemoryContext context, Size size, int flags)
Definition: mcxt.c:1216

References AllocHugeSizeIsValid, AllocSizeIsValid, MCXT_ALLOC_HUGE, MemoryContextSizeFailure(), and unlikely.

Referenced by AllocSetAllocLarge(), AllocSetRealloc(), BumpAllocLarge(), and GenerationAllocLarge().

◆ MemoryContextCreate()

void MemoryContextCreate ( MemoryContext  node,
NodeTag  tag,
MemoryContextMethodID  method_id,
MemoryContext  parent,
const char *  name 
)

Definition at line 1146 of file mcxt.c.

1151{
1152 /* Creating new memory contexts is not allowed in a critical section */
1154
1155 /* Validate parent, to help prevent crazy context linkages */
1156 Assert(parent == NULL || MemoryContextIsValid(parent));
1157 Assert(node != parent);
1158
1159 /* Initialize all standard fields of memory context header */
1160 node->type = tag;
1161 node->isReset = true;
1162 node->methods = &mcxt_methods[method_id];
1163 node->parent = parent;
1164 node->firstchild = NULL;
1165 node->mem_allocated = 0;
1166 node->prevchild = NULL;
1167 node->name = name;
1168 node->ident = NULL;
1169 node->reset_cbs = NULL;
1170
1171 /* OK to link node into context tree */
1172 if (parent)
1173 {
1174 node->nextchild = parent->firstchild;
1175 if (parent->firstchild != NULL)
1176 parent->firstchild->prevchild = node;
1177 parent->firstchild = node;
1178 /* inherit allowInCritSection flag from parent */
1179 node->allowInCritSection = parent->allowInCritSection;
1180 }
1181 else
1182 {
1183 node->nextchild = NULL;
1184 node->allowInCritSection = false;
1185 }
1186}
volatile uint32 CritSectionCount
Definition: globals.c:45
static const MemoryContextMethods mcxt_methods[]
Definition: mcxt.c:63
#define MemoryContextIsValid(context)
Definition: memnodes.h:145
MemoryContext prevchild
Definition: memnodes.h:129
MemoryContext firstchild
Definition: memnodes.h:128
bool allowInCritSection
Definition: memnodes.h:124
const char * ident
Definition: memnodes.h:132
MemoryContext parent
Definition: memnodes.h:127
MemoryContextCallback * reset_cbs
Definition: memnodes.h:133
const MemoryContextMethods * methods
Definition: memnodes.h:126

References MemoryContextData::allowInCritSection, Assert(), CritSectionCount, MemoryContextData::firstchild, MemoryContextData::ident, MemoryContextData::isReset, mcxt_methods, MemoryContextData::mem_allocated, MemoryContextIsValid, MemoryContextData::methods, name, MemoryContextData::name, MemoryContextData::nextchild, MemoryContextData::parent, MemoryContextData::prevchild, and MemoryContextData::reset_cbs.

Referenced by AllocSetContextCreateInternal(), BumpContextCreate(), GenerationContextCreate(), and SlabContextCreate().

◆ MemoryContextSizeFailure()

pg_noreturn void MemoryContextSizeFailure ( MemoryContext  context,
Size  size,
int  flags 
)

Definition at line 1216 of file mcxt.c.

1217{
1218 elog(ERROR, "invalid memory alloc request size %zu", size);
1219}

References elog, and ERROR.

Referenced by MemoryContextCheckSize().

◆ SlabAlloc()

void * SlabAlloc ( MemoryContext  context,
Size  size,
int  flags 
)

Definition at line 658 of file slab.c.

659{
660 SlabContext *slab = (SlabContext *) context;
661 SlabBlock *block;
662 MemoryChunk *chunk;
663
664 Assert(SlabIsValid(slab));
665
666 /* sanity check that this is pointing to a valid blocklist */
667 Assert(slab->curBlocklistIndex >= 0);
669
670 /*
671 * Make sure we only allow correct request size. This doubles as the
672 * MemoryContextCheckSize check.
673 */
674 if (unlikely(size != slab->chunkSize))
675 SlabAllocInvalidSize(context, size);
676
677 if (unlikely(slab->curBlocklistIndex == 0))
678 {
679 /*
680 * Handle the case when there are no partially filled blocks
681 * available. This happens either when the last allocation took the
682 * last chunk in the block, or when SlabFree() free'd the final block.
683 */
684 return SlabAllocFromNewBlock(context, size, flags);
685 }
686 else
687 {
688 dlist_head *blocklist = &slab->blocklist[slab->curBlocklistIndex];
689 int new_blocklist_idx;
690
691 Assert(!dlist_is_empty(blocklist));
692
693 /* grab the block from the blocklist */
694 block = dlist_head_element(SlabBlock, node, blocklist);
695
696 /* make sure we actually got a valid block, with matching nfree */
697 Assert(block != NULL);
698 Assert(slab->curBlocklistIndex == SlabBlocklistIndex(slab, block->nfree));
699 Assert(block->nfree > 0);
700
701 /* fetch the next chunk from this block */
702 chunk = SlabGetNextFreeChunk(slab, block);
703
704 /* get the new blocklist index based on the new free chunk count */
705 new_blocklist_idx = SlabBlocklistIndex(slab, block->nfree);
706
707 /*
708 * Handle the case where the blocklist index changes. This also deals
709 * with blocks becoming full as only full blocks go at index 0.
710 */
711 if (unlikely(slab->curBlocklistIndex != new_blocklist_idx))
712 {
713 dlist_delete_from(blocklist, &block->node);
714 dlist_push_head(&slab->blocklist[new_blocklist_idx], &block->node);
715
716 if (dlist_is_empty(blocklist))
718 }
719 }
720
721 return SlabAllocSetupNewChunk(context, block, chunk, size);
722}
static void dlist_delete_from(dlist_head *head, dlist_node *node)
Definition: ilist.h:429
#define dlist_head_element(type, membername, lhead)
Definition: ilist.h:603
static void dlist_push_head(dlist_head *head, dlist_node *node)
Definition: ilist.h:347
static pg_noinline void * SlabAllocFromNewBlock(MemoryContext context, Size size, int flags)
Definition: slab.c:564
#define SlabIsValid(set)
Definition: slab.c:196
static MemoryChunk * SlabGetNextFreeChunk(SlabContext *slab, SlabBlock *block)
Definition: slab.c:271
static int32 SlabBlocklistIndex(SlabContext *slab, int nfree)
Definition: slab.c:211
static void * SlabAllocSetupNewChunk(MemoryContext context, SlabBlock *block, MemoryChunk *chunk, Size size)
Definition: slab.c:523
pg_noinline static pg_noreturn void SlabAllocInvalidSize(MemoryContext context, Size size)
Definition: slab.c:634
static int32 SlabFindNextBlockListIndex(SlabContext *slab)
Definition: slab.c:251
int32 nfree
Definition: slab.c:149
dlist_node node
Definition: slab.c:153
dlist_head blocklist[SLAB_BLOCKLIST_COUNT]
Definition: slab.c:129
int32 chunksPerBlock
Definition: slab.c:110
int32 curBlocklistIndex
Definition: slab.c:111
uint32 chunkSize
Definition: slab.c:107

References Assert(), SlabContext::blocklist, SlabContext::chunkSize, SlabContext::chunksPerBlock, SlabContext::curBlocklistIndex, dlist_delete_from(), dlist_head_element, dlist_is_empty(), dlist_push_head(), SlabBlock::nfree, SlabBlock::node, SlabAllocFromNewBlock(), SlabAllocInvalidSize(), SlabAllocSetupNewChunk(), SlabBlocklistIndex(), SlabFindNextBlockListIndex(), SlabGetNextFreeChunk(), SlabIsValid, and unlikely.

◆ SlabDelete()

void SlabDelete ( MemoryContext  context)

Definition at line 506 of file slab.c.

507{
508 /* Reset to release all the SlabBlocks */
509 SlabReset(context);
510
511 /* Destroy the vpool -- see notes in aset.c */
513
514 /* And free the context header */
515 free(context);
516}
void SlabReset(MemoryContext context)
Definition: slab.c:436

References free, SlabReset(), and VALGRIND_DESTROY_MEMPOOL.

◆ SlabFree()

void SlabFree ( void *  pointer)

Definition at line 729 of file slab.c.

730{
731 MemoryChunk *chunk = PointerGetMemoryChunk(pointer);
732 SlabBlock *block;
733 SlabContext *slab;
734 int curBlocklistIdx;
735 int newBlocklistIdx;
736
737 /* Allow access to the chunk header. */
739
740 block = MemoryChunkGetBlock(chunk);
741
742 /*
743 * For speed reasons we just Assert that the referenced block is good.
744 * Future field experience may show that this Assert had better become a
745 * regular runtime test-and-elog check.
746 */
747 Assert(SlabBlockIsValid(block));
748 slab = block->slab;
749
750#ifdef MEMORY_CONTEXT_CHECKING
751 /* Test for someone scribbling on unused space in chunk */
753 if (!sentinel_ok(pointer, slab->chunkSize))
754 elog(WARNING, "detected write past chunk end in %s %p",
755 slab->header.name, chunk);
756#endif
757
758 /* push this chunk onto the head of the block's free list */
759 *(MemoryChunk **) pointer = block->freehead;
760 block->freehead = chunk;
761
762 block->nfree++;
763
764 Assert(block->nfree > 0);
765 Assert(block->nfree <= slab->chunksPerBlock);
766
767#ifdef CLOBBER_FREED_MEMORY
768 /* don't wipe the free list MemoryChunk pointer stored in the chunk */
769 wipe_mem((char *) pointer + sizeof(MemoryChunk *),
770 slab->chunkSize - sizeof(MemoryChunk *));
771#endif
772
773 curBlocklistIdx = SlabBlocklistIndex(slab, block->nfree - 1);
774 newBlocklistIdx = SlabBlocklistIndex(slab, block->nfree);
775
776 /*
777 * Check if the block needs to be moved to another element on the
778 * blocklist based on it now having 1 more free chunk.
779 */
780 if (unlikely(curBlocklistIdx != newBlocklistIdx))
781 {
782 /* do the move */
783 dlist_delete_from(&slab->blocklist[curBlocklistIdx], &block->node);
784 dlist_push_head(&slab->blocklist[newBlocklistIdx], &block->node);
785
786 /*
787 * The blocklist[curBlocklistIdx] may now be empty or we may now be
788 * able to use a lower-element blocklist. We'll need to redetermine
789 * what the slab->curBlocklistIndex is if the current blocklist was
790 * changed or if a lower element one was changed. We must ensure we
791 * use the list with the fullest block(s).
792 */
793 if (slab->curBlocklistIndex >= curBlocklistIdx)
794 {
796
797 /*
798 * We know there must be a block with at least 1 unused chunk as
799 * we just pfree'd one. Ensure curBlocklistIndex reflects this.
800 */
801 Assert(slab->curBlocklistIndex > 0);
802 }
803 }
804
805 /* Handle when a block becomes completely empty */
806 if (unlikely(block->nfree == slab->chunksPerBlock))
807 {
808 /* remove the block */
809 dlist_delete_from(&slab->blocklist[newBlocklistIdx], &block->node);
810
811 /*
812 * To avoid thrashing malloc/free, we keep a list of empty blocks that
813 * we can reuse again instead of having to malloc a new one.
814 */
816 dclist_push_head(&slab->emptyblocks, &block->node);
817 else
818 {
819 /*
820 * When we have enough empty blocks stored already, we actually
821 * free the block.
822 */
823#ifdef CLOBBER_FREED_MEMORY
824 wipe_mem(block, slab->blockSize);
825#endif
826
827 /* As in aset.c, free block-header vchunks explicitly */
828 VALGRIND_MEMPOOL_FREE(slab, block);
829
830 free(block);
831 slab->header.mem_allocated -= slab->blockSize;
832 }
833
834 /*
835 * Check if we need to reset the blocklist index. This is required
836 * when the blocklist this block is on has become completely empty.
837 */
838 if (slab->curBlocklistIndex == newBlocklistIdx &&
839 dlist_is_empty(&slab->blocklist[newBlocklistIdx]))
841 }
842}
static uint32 dclist_count(const dclist_head *head)
Definition: ilist.h:932
static void dclist_push_head(dclist_head *head, dlist_node *node)
Definition: ilist.h:693
#define Slab_CHUNKHDRSZ
Definition: slab.c:157
#define SlabBlockIsValid(block)
Definition: slab.c:202
#define SLAB_MAXIMUM_EMPTY_BLOCKS
Definition: slab.c:98
MemoryChunk * freehead
Definition: slab.c:151
SlabContext * slab
Definition: slab.c:148
uint32 fullChunkSize
Definition: slab.c:108
MemoryContextData header
Definition: slab.c:105
uint32 blockSize
Definition: slab.c:109
dclist_head emptyblocks
Definition: slab.c:120

References Assert(), SlabContext::blocklist, SlabContext::blockSize, SlabContext::chunkSize, SlabContext::chunksPerBlock, SlabContext::curBlocklistIndex, dclist_count(), dclist_push_head(), dlist_delete_from(), dlist_is_empty(), dlist_push_head(), elog, SlabContext::emptyblocks, free, SlabBlock::freehead, SlabContext::fullChunkSize, SlabContext::header, MemoryContextData::mem_allocated, MemoryChunkGetBlock(), MemoryContextData::name, SlabBlock::nfree, SlabBlock::node, PointerGetMemoryChunk, SlabBlock::slab, Slab_CHUNKHDRSZ, SLAB_MAXIMUM_EMPTY_BLOCKS, SlabBlockIsValid, SlabBlocklistIndex(), SlabFindNextBlockListIndex(), unlikely, VALGRIND_MAKE_MEM_DEFINED, VALGRIND_MEMPOOL_FREE, and WARNING.

◆ SlabGetChunkContext()

MemoryContext SlabGetChunkContext ( void *  pointer)

Definition at line 895 of file slab.c.

896{
897 MemoryChunk *chunk = PointerGetMemoryChunk(pointer);
898 SlabBlock *block;
899
900 /* Allow access to the chunk header. */
902
903 block = MemoryChunkGetBlock(chunk);
904
905 /* Disallow access to the chunk header. */
907
908 Assert(SlabBlockIsValid(block));
909
910 return &block->slab->header;
911}

References Assert(), SlabContext::header, MemoryChunkGetBlock(), PointerGetMemoryChunk, SlabBlock::slab, Slab_CHUNKHDRSZ, SlabBlockIsValid, VALGRIND_MAKE_MEM_DEFINED, and VALGRIND_MAKE_MEM_NOACCESS.

◆ SlabGetChunkSpace()

Size SlabGetChunkSpace ( void *  pointer)

Definition at line 919 of file slab.c.

920{
921 MemoryChunk *chunk = PointerGetMemoryChunk(pointer);
922 SlabBlock *block;
923 SlabContext *slab;
924
925 /* Allow access to the chunk header. */
927
928 block = MemoryChunkGetBlock(chunk);
929
930 /* Disallow access to the chunk header. */
932
933 Assert(SlabBlockIsValid(block));
934 slab = block->slab;
935
936 return slab->fullChunkSize;
937}

References Assert(), SlabContext::fullChunkSize, MemoryChunkGetBlock(), PointerGetMemoryChunk, SlabBlock::slab, Slab_CHUNKHDRSZ, SlabBlockIsValid, VALGRIND_MAKE_MEM_DEFINED, and VALGRIND_MAKE_MEM_NOACCESS.

◆ SlabIsEmpty()

bool SlabIsEmpty ( MemoryContext  context)

Definition at line 944 of file slab.c.

945{
946 Assert(SlabIsValid((SlabContext *) context));
947
948 return (context->mem_allocated == 0);
949}

References Assert(), MemoryContextData::mem_allocated, and SlabIsValid.

◆ SlabRealloc()

void * SlabRealloc ( void *  pointer,
Size  size,
int  flags 
)

Definition at line 858 of file slab.c.

859{
860 MemoryChunk *chunk = PointerGetMemoryChunk(pointer);
861 SlabBlock *block;
862 SlabContext *slab;
863
864 /* Allow access to the chunk header. */
866
867 block = MemoryChunkGetBlock(chunk);
868
869 /* Disallow access to the chunk header. */
871
872 /*
873 * Try to verify that we have a sane block pointer: the block header
874 * should reference a slab context. (We use a test-and-elog, not just
875 * Assert, because it seems highly likely that we're here in error in the
876 * first place.)
877 */
878 if (!SlabBlockIsValid(block))
879 elog(ERROR, "could not find block containing chunk %p", chunk);
880 slab = block->slab;
881
882 /* can't do actual realloc with slab, but let's try to be gentle */
883 if (size == slab->chunkSize)
884 return pointer;
885
886 elog(ERROR, "slab allocator does not support realloc()");
887 return NULL; /* keep compiler quiet */
888}

References SlabContext::chunkSize, elog, ERROR, MemoryChunkGetBlock(), PointerGetMemoryChunk, SlabBlock::slab, Slab_CHUNKHDRSZ, SlabBlockIsValid, VALGRIND_MAKE_MEM_DEFINED, and VALGRIND_MAKE_MEM_NOACCESS.

◆ SlabReset()

void SlabReset ( MemoryContext  context)

Definition at line 436 of file slab.c.

437{
438 SlabContext *slab = (SlabContext *) context;
439 dlist_mutable_iter miter;
440 int i;
441
442 Assert(SlabIsValid(slab));
443
444#ifdef MEMORY_CONTEXT_CHECKING
445 /* Check for corruption and leaks before freeing */
446 SlabCheck(context);
447#endif
448
449 /* release any retained empty blocks */
451 {
452 SlabBlock *block = dlist_container(SlabBlock, node, miter.cur);
453
454 dclist_delete_from(&slab->emptyblocks, miter.cur);
455
456#ifdef CLOBBER_FREED_MEMORY
457 wipe_mem(block, slab->blockSize);
458#endif
459
460 /* As in aset.c, free block-header vchunks explicitly */
461 VALGRIND_MEMPOOL_FREE(slab, block);
462
463 free(block);
464 context->mem_allocated -= slab->blockSize;
465 }
466
467 /* walk over blocklist and free the blocks */
468 for (i = 0; i < SLAB_BLOCKLIST_COUNT; i++)
469 {
470 dlist_foreach_modify(miter, &slab->blocklist[i])
471 {
472 SlabBlock *block = dlist_container(SlabBlock, node, miter.cur);
473
474 dlist_delete(miter.cur);
475
476#ifdef CLOBBER_FREED_MEMORY
477 wipe_mem(block, slab->blockSize);
478#endif
479
480 /* As in aset.c, free block-header vchunks explicitly */
481 VALGRIND_MEMPOOL_FREE(slab, block);
482
483 free(block);
484 context->mem_allocated -= slab->blockSize;
485 }
486 }
487
488 /*
489 * Instruct Valgrind to throw away all the vchunks associated with this
490 * context, except for the one covering the SlabContext. This gets rid of
491 * the vchunks for whatever user data is getting discarded by the context
492 * reset.
493 */
494 VALGRIND_MEMPOOL_TRIM(slab, slab, sizeof(SlabContext));
495
496 slab->curBlocklistIndex = 0;
497
498 Assert(context->mem_allocated == 0);
499}
static void dlist_delete(dlist_node *node)
Definition: ilist.h:405
static void dclist_delete_from(dclist_head *head, dlist_node *node)
Definition: ilist.h:763
#define dclist_foreach_modify(iter, lhead)
Definition: ilist.h:973
int i
Definition: isn.c:77
#define SLAB_BLOCKLIST_COUNT
Definition: slab.c:95

References Assert(), SlabContext::blocklist, SlabContext::blockSize, dlist_mutable_iter::cur, SlabContext::curBlocklistIndex, dclist_delete_from(), dclist_foreach_modify, dlist_container, dlist_delete(), dlist_foreach_modify, SlabContext::emptyblocks, free, i, MemoryContextData::mem_allocated, SLAB_BLOCKLIST_COUNT, SlabIsValid, VALGRIND_MEMPOOL_FREE, and VALGRIND_MEMPOOL_TRIM.

Referenced by SlabDelete().

◆ SlabStats()

void SlabStats ( MemoryContext  context,
MemoryStatsPrintFunc  printfunc,
void *  passthru,
MemoryContextCounters totals,
bool  print_to_stderr 
)

Definition at line 961 of file slab.c.

965{
966 SlabContext *slab = (SlabContext *) context;
967 Size nblocks = 0;
968 Size freechunks = 0;
969 Size totalspace;
970 Size freespace = 0;
971 int i;
972
973 Assert(SlabIsValid(slab));
974
975 /* Include context header in totalspace */
976 totalspace = Slab_CONTEXT_HDRSZ(slab->chunksPerBlock);
977
978 /* Add the space consumed by blocks in the emptyblocks list */
979 totalspace += dclist_count(&slab->emptyblocks) * slab->blockSize;
980
981 for (i = 0; i < SLAB_BLOCKLIST_COUNT; i++)
982 {
983 dlist_iter iter;
984
985 dlist_foreach(iter, &slab->blocklist[i])
986 {
987 SlabBlock *block = dlist_container(SlabBlock, node, iter.cur);
988
989 nblocks++;
990 totalspace += slab->blockSize;
991 freespace += slab->fullChunkSize * block->nfree;
992 freechunks += block->nfree;
993 }
994 }
995
996 if (printfunc)
997 {
998 char stats_string[200];
999
1000 /* XXX should we include free chunks on empty blocks? */
1001 snprintf(stats_string, sizeof(stats_string),
1002 "%zu total in %zu blocks; %u empty blocks; %zu free (%zu chunks); %zu used",
1003 totalspace, nblocks, dclist_count(&slab->emptyblocks),
1004 freespace, freechunks, totalspace - freespace);
1005 printfunc(context, passthru, stats_string, print_to_stderr);
1006 }
1007
1008 if (totals)
1009 {
1010 totals->nblocks += nblocks;
1011 totals->freechunks += freechunks;
1012 totals->totalspace += totalspace;
1013 totals->freespace += freespace;
1014 }
1015}
#define Slab_CONTEXT_HDRSZ(chunksPerBlock)
Definition: slab.c:88

References Assert(), SlabContext::blocklist, SlabContext::blockSize, SlabContext::chunksPerBlock, dlist_iter::cur, dclist_count(), dlist_container, dlist_foreach, SlabContext::emptyblocks, MemoryContextCounters::freechunks, MemoryContextCounters::freespace, SlabContext::fullChunkSize, i, MemoryContextCounters::nblocks, SlabBlock::nfree, SLAB_BLOCKLIST_COUNT, Slab_CONTEXT_HDRSZ, SlabIsValid, snprintf, and MemoryContextCounters::totalspace.