Thanks to visit codestin.com
Credit goes to github.com

Skip to content

Commit 9e0fe09

Browse files
committed
Refactor bitmap heap scan in preparation for parallel support.
The final patch will be less messy if the prefetching support is a bit better isolated, so do that. Dilip Kumar, with some changes by me. The larger patch set of which this is a part has been reviewed and tested by (at least) Andres Freund, Amit Khandekar, Tushar Ahuja, Rafia Sabih, Haribabu Kommi, and Thomas Munro.
1 parent 3c3bb99 commit 9e0fe09

File tree

1 file changed

+88
-60
lines changed

1 file changed

+88
-60
lines changed

src/backend/executor/nodeBitmapHeapscan.c

Lines changed: 88 additions & 60 deletions
Original file line numberDiff line numberDiff line change
@@ -53,6 +53,11 @@
5353

5454
static TupleTableSlot *BitmapHeapNext(BitmapHeapScanState *node);
5555
static void bitgetpage(HeapScanDesc scan, TBMIterateResult *tbmres);
56+
static inline void BitmapAdjustPrefetchIterator(BitmapHeapScanState *node,
57+
TBMIterateResult *tbmres);
58+
static inline void BitmapAdjustPrefetchTarget(BitmapHeapScanState *node);
59+
static inline void BitmapPrefetch(BitmapHeapScanState *node,
60+
HeapScanDesc scan);
5661

5762

5863
/* ----------------------------------------------------------------
@@ -69,10 +74,6 @@ BitmapHeapNext(BitmapHeapScanState *node)
6974
TIDBitmap *tbm;
7075
TBMIterator *tbmiterator;
7176
TBMIterateResult *tbmres;
72-
73-
#ifdef USE_PREFETCH
74-
TBMIterator *prefetch_iterator;
75-
#endif
7677
OffsetNumber targoffset;
7778
TupleTableSlot *slot;
7879

@@ -85,9 +86,6 @@ BitmapHeapNext(BitmapHeapScanState *node)
8586
tbm = node->tbm;
8687
tbmiterator = node->tbmiterator;
8788
tbmres = node->tbmres;
88-
#ifdef USE_PREFETCH
89-
prefetch_iterator = node->prefetch_iterator;
90-
#endif
9189

9290
/*
9391
* If we haven't yet performed the underlying index scan, do it, and begin
@@ -115,7 +113,7 @@ BitmapHeapNext(BitmapHeapScanState *node)
115113
#ifdef USE_PREFETCH
116114
if (node->prefetch_maximum > 0)
117115
{
118-
node->prefetch_iterator = prefetch_iterator = tbm_begin_iterate(tbm);
116+
node->prefetch_iterator = tbm_begin_iterate(tbm);
119117
node->prefetch_pages = 0;
120118
node->prefetch_target = -1;
121119
}
@@ -139,21 +137,7 @@ BitmapHeapNext(BitmapHeapScanState *node)
139137
break;
140138
}
141139

142-
#ifdef USE_PREFETCH
143-
if (node->prefetch_pages > 0)
144-
{
145-
/* The main iterator has closed the distance by one page */
146-
node->prefetch_pages--;
147-
}
148-
else if (prefetch_iterator)
149-
{
150-
/* Do not let the prefetch iterator get behind the main one */
151-
TBMIterateResult *tbmpre = tbm_iterate(prefetch_iterator);
152-
153-
if (tbmpre == NULL || tbmpre->blockno != tbmres->blockno)
154-
elog(ERROR, "prefetch and main iterators are out of sync");
155-
}
156-
#endif /* USE_PREFETCH */
140+
BitmapAdjustPrefetchIterator(node, tbmres);
157141

158142
/*
159143
* Ignore any claimed entries past what we think is the end of the
@@ -182,23 +166,8 @@ BitmapHeapNext(BitmapHeapScanState *node)
182166
*/
183167
scan->rs_cindex = 0;
184168

185-
#ifdef USE_PREFETCH
186-
187-
/*
188-
* Increase prefetch target if it's not yet at the max. Note that
189-
* we will increase it to zero after fetching the very first
190-
* page/tuple, then to one after the second tuple is fetched, then
191-
* it doubles as later pages are fetched.
192-
*/
193-
if (node->prefetch_target >= node->prefetch_maximum)
194-
/* don't increase any further */ ;
195-
else if (node->prefetch_target >= node->prefetch_maximum / 2)
196-
node->prefetch_target = node->prefetch_maximum;
197-
else if (node->prefetch_target > 0)
198-
node->prefetch_target *= 2;
199-
else
200-
node->prefetch_target++;
201-
#endif /* USE_PREFETCH */
169+
/* Adjust the prefetch target */
170+
BitmapAdjustPrefetchTarget(node);
202171
}
203172
else
204173
{
@@ -227,33 +196,14 @@ BitmapHeapNext(BitmapHeapScanState *node)
227196
continue;
228197
}
229198

230-
#ifdef USE_PREFETCH
231-
232199
/*
233200
* We issue prefetch requests *after* fetching the current page to try
234201
* to avoid having prefetching interfere with the main I/O. Also, this
235202
* should happen only when we have determined there is still something
236203
* to do on the current page, else we may uselessly prefetch the same
237204
* page we are just about to request for real.
238205
*/
239-
if (prefetch_iterator)
240-
{
241-
while (node->prefetch_pages < node->prefetch_target)
242-
{
243-
TBMIterateResult *tbmpre = tbm_iterate(prefetch_iterator);
244-
245-
if (tbmpre == NULL)
246-
{
247-
/* No more pages to prefetch */
248-
tbm_end_iterate(prefetch_iterator);
249-
node->prefetch_iterator = prefetch_iterator = NULL;
250-
break;
251-
}
252-
node->prefetch_pages++;
253-
PrefetchBuffer(scan->rs_rd, MAIN_FORKNUM, tbmpre->blockno);
254-
}
255-
}
256-
#endif /* USE_PREFETCH */
206+
BitmapPrefetch(node, scan);
257207

258208
/*
259209
* Okay to fetch the tuple
@@ -411,6 +361,84 @@ bitgetpage(HeapScanDesc scan, TBMIterateResult *tbmres)
411361
scan->rs_ntuples = ntup;
412362
}
413363

364+
/*
365+
* BitmapAdjustPrefetchIterator - Adjust the prefetch iterator
366+
*/
367+
static inline void
368+
BitmapAdjustPrefetchIterator(BitmapHeapScanState *node,
369+
TBMIterateResult *tbmres)
370+
{
371+
#ifdef USE_PREFETCH
372+
TBMIterator *prefetch_iterator = node->prefetch_iterator;
373+
374+
if (node->prefetch_pages > 0)
375+
{
376+
/* The main iterator has closed the distance by one page */
377+
node->prefetch_pages--;
378+
}
379+
else if (prefetch_iterator)
380+
{
381+
/* Do not let the prefetch iterator get behind the main one */
382+
TBMIterateResult *tbmpre = tbm_iterate(prefetch_iterator);
383+
384+
if (tbmpre == NULL || tbmpre->blockno != tbmres->blockno)
385+
elog(ERROR, "prefetch and main iterators are out of sync");
386+
}
387+
#endif /* USE_PREFETCH */
388+
}
389+
390+
/*
391+
* BitmapAdjustPrefetchTarget - Adjust the prefetch target
392+
*
393+
* Increase prefetch target if it's not yet at the max. Note that
394+
* we will increase it to zero after fetching the very first
395+
* page/tuple, then to one after the second tuple is fetched, then
396+
* it doubles as later pages are fetched.
397+
*/
398+
static inline void
399+
BitmapAdjustPrefetchTarget(BitmapHeapScanState *node)
400+
{
401+
#ifdef USE_PREFETCH
402+
if (node->prefetch_target >= node->prefetch_maximum)
403+
/* don't increase any further */ ;
404+
else if (node->prefetch_target >= node->prefetch_maximum / 2)
405+
node->prefetch_target = node->prefetch_maximum;
406+
else if (node->prefetch_target > 0)
407+
node->prefetch_target *= 2;
408+
else
409+
node->prefetch_target++;
410+
#endif /* USE_PREFETCH */
411+
}
412+
413+
/*
414+
* BitmapPrefetch - Prefetch, if prefetch_pages are behind prefetch_target
415+
*/
416+
static inline void
417+
BitmapPrefetch(BitmapHeapScanState *node, HeapScanDesc scan)
418+
{
419+
#ifdef USE_PREFETCH
420+
TBMIterator *prefetch_iterator = node->prefetch_iterator;
421+
422+
if (prefetch_iterator)
423+
{
424+
while (node->prefetch_pages < node->prefetch_target)
425+
{
426+
TBMIterateResult *tbmpre = tbm_iterate(prefetch_iterator);
427+
428+
if (tbmpre == NULL)
429+
{
430+
/* No more pages to prefetch */
431+
tbm_end_iterate(prefetch_iterator);
432+
node->prefetch_iterator = NULL;
433+
break;
434+
}
435+
node->prefetch_pages++;
436+
PrefetchBuffer(scan->rs_rd, MAIN_FORKNUM, tbmpre->blockno);
437+
}
438+
}
439+
#endif /* USE_PREFETCH */
440+
}
441+
414442
/*
415443
* BitmapHeapRecheck -- access method routine to recheck a tuple in EvalPlanQual
416444
*/

0 commit comments

Comments
 (0)