Thanks to visit codestin.com
Credit goes to doxygen.postgresql.org

PostgreSQL Source Code git master
heapam_xlog.c
Go to the documentation of this file.
1/*-------------------------------------------------------------------------
2 *
3 * heapam_xlog.c
4 * WAL replay logic for heap access method.
5 *
6 * Portions Copyright (c) 1996-2025, PostgreSQL Global Development Group
7 * Portions Copyright (c) 1994, Regents of the University of California
8 *
9 *
10 * IDENTIFICATION
11 * src/backend/access/heap/heapam_xlog.c
12 *
13 *-------------------------------------------------------------------------
14 */
15#include "postgres.h"
16
17#include "access/bufmask.h"
18#include "access/heapam.h"
20#include "access/xlog.h"
21#include "access/xlogutils.h"
22#include "storage/freespace.h"
23#include "storage/standby.h"
24
25
26/*
27 * Replay XLOG_HEAP2_PRUNE_* records.
28 */
29static void
31{
32 XLogRecPtr lsn = record->EndRecPtr;
33 char *maindataptr = XLogRecGetData(record);
34 xl_heap_prune xlrec;
35 Buffer buffer;
36 RelFileLocator rlocator;
37 BlockNumber blkno;
39
40 XLogRecGetBlockTag(record, 0, &rlocator, NULL, &blkno);
41 memcpy(&xlrec, maindataptr, SizeOfHeapPrune);
42 maindataptr += SizeOfHeapPrune;
43
44 /*
45 * We will take an ordinary exclusive lock or a cleanup lock depending on
46 * whether the XLHP_CLEANUP_LOCK flag is set. With an ordinary exclusive
47 * lock, we better not be doing anything that requires moving existing
48 * tuple data.
49 */
50 Assert((xlrec.flags & XLHP_CLEANUP_LOCK) != 0 ||
52
53 /*
54 * We are about to remove and/or freeze tuples. In Hot Standby mode,
55 * ensure that there are no queries running for which the removed tuples
56 * are still visible or which still consider the frozen xids as running.
57 * The conflict horizon XID comes after xl_heap_prune.
58 */
59 if ((xlrec.flags & XLHP_HAS_CONFLICT_HORIZON) != 0)
60 {
61 TransactionId snapshot_conflict_horizon;
62
63 /* memcpy() because snapshot_conflict_horizon is stored unaligned */
64 memcpy(&snapshot_conflict_horizon, maindataptr, sizeof(TransactionId));
65 maindataptr += sizeof(TransactionId);
66
67 if (InHotStandby)
68 ResolveRecoveryConflictWithSnapshot(snapshot_conflict_horizon,
69 (xlrec.flags & XLHP_IS_CATALOG_REL) != 0,
70 rlocator);
71 }
72
73 /*
74 * If we have a full-page image, restore it and we're done.
75 */
77 (xlrec.flags & XLHP_CLEANUP_LOCK) != 0,
78 &buffer);
80 {
81 Page page = BufferGetPage(buffer);
82 OffsetNumber *redirected;
83 OffsetNumber *nowdead;
84 OffsetNumber *nowunused;
85 int nredirected;
86 int ndead;
87 int nunused;
88 int nplans;
89 Size datalen;
90 xlhp_freeze_plan *plans;
91 OffsetNumber *frz_offsets;
92 char *dataptr = XLogRecGetBlockData(record, 0, &datalen);
93
95 &nplans, &plans, &frz_offsets,
96 &nredirected, &redirected,
97 &ndead, &nowdead,
98 &nunused, &nowunused);
99
100 /*
101 * Update all line pointers per the record, and repair fragmentation
102 * if needed.
103 */
104 if (nredirected > 0 || ndead > 0 || nunused > 0)
106 (xlrec.flags & XLHP_CLEANUP_LOCK) == 0,
107 redirected, nredirected,
108 nowdead, ndead,
109 nowunused, nunused);
110
111 /* Freeze tuples */
112 for (int p = 0; p < nplans; p++)
113 {
114 HeapTupleFreeze frz;
115
116 /*
117 * Convert freeze plan representation from WAL record into
118 * per-tuple format used by heap_execute_freeze_tuple
119 */
120 frz.xmax = plans[p].xmax;
121 frz.t_infomask2 = plans[p].t_infomask2;
122 frz.t_infomask = plans[p].t_infomask;
123 frz.frzflags = plans[p].frzflags;
124 frz.offset = InvalidOffsetNumber; /* unused, but be tidy */
125
126 for (int i = 0; i < plans[p].ntuples; i++)
127 {
128 OffsetNumber offset = *(frz_offsets++);
129 ItemId lp;
130 HeapTupleHeader tuple;
131
132 lp = PageGetItemId(page, offset);
133 tuple = (HeapTupleHeader) PageGetItem(page, lp);
134 heap_execute_freeze_tuple(tuple, &frz);
135 }
136 }
137
138 /* There should be no more data */
139 Assert((char *) frz_offsets == dataptr + datalen);
140
141 /*
142 * Note: we don't worry about updating the page's prunability hints.
143 * At worst this will cause an extra prune cycle to occur soon.
144 */
145
146 PageSetLSN(page, lsn);
147 MarkBufferDirty(buffer);
148 }
149
150 /*
151 * If we released any space or line pointers, update the free space map.
152 *
153 * Do this regardless of a full-page image being applied, since the FSM
154 * data is not in the page anyway.
155 */
156 if (BufferIsValid(buffer))
157 {
158 if (xlrec.flags & (XLHP_HAS_REDIRECTIONS |
161 {
162 Size freespace = PageGetHeapFreeSpace(BufferGetPage(buffer));
163
164 UnlockReleaseBuffer(buffer);
165
166 XLogRecordPageWithFreeSpace(rlocator, blkno, freespace);
167 }
168 else
169 UnlockReleaseBuffer(buffer);
170 }
171}
172
173/*
174 * Replay XLOG_HEAP2_VISIBLE records.
175 *
176 * The critical integrity requirement here is that we must never end up with
177 * a situation where the visibility map bit is set, and the page-level
178 * PD_ALL_VISIBLE bit is clear. If that were to occur, then a subsequent
179 * page modification would fail to clear the visibility map bit.
180 */
181static void
183{
184 XLogRecPtr lsn = record->EndRecPtr;
186 Buffer vmbuffer = InvalidBuffer;
187 Buffer buffer;
188 Page page;
189 RelFileLocator rlocator;
190 BlockNumber blkno;
192
193 Assert((xlrec->flags & VISIBILITYMAP_XLOG_VALID_BITS) == xlrec->flags);
194
195 XLogRecGetBlockTag(record, 1, &rlocator, NULL, &blkno);
196
197 /*
198 * If there are any Hot Standby transactions running that have an xmin
199 * horizon old enough that this page isn't all-visible for them, they
200 * might incorrectly decide that an index-only scan can skip a heap fetch.
201 *
202 * NB: It might be better to throw some kind of "soft" conflict here that
203 * forces any index-only scan that is in flight to perform heap fetches,
204 * rather than killing the transaction outright.
205 */
206 if (InHotStandby)
209 rlocator);
210
211 /*
212 * Read the heap page, if it still exists. If the heap file has dropped or
213 * truncated later in recovery, we don't need to update the page, but we'd
214 * better still update the visibility map.
215 */
216 action = XLogReadBufferForRedo(record, 1, &buffer);
217 if (action == BLK_NEEDS_REDO)
218 {
219 /*
220 * We don't bump the LSN of the heap page when setting the visibility
221 * map bit (unless checksums or wal_hint_bits is enabled, in which
222 * case we must). This exposes us to torn page hazards, but since
223 * we're not inspecting the existing page contents in any way, we
224 * don't care.
225 */
226 page = BufferGetPage(buffer);
227
228 PageSetAllVisible(page);
229
231 PageSetLSN(page, lsn);
232
233 MarkBufferDirty(buffer);
234 }
235 else if (action == BLK_RESTORED)
236 {
237 /*
238 * If heap block was backed up, we already restored it and there's
239 * nothing more to do. (This can only happen with checksums or
240 * wal_log_hints enabled.)
241 */
242 }
243
244 if (BufferIsValid(buffer))
245 {
246 Size space = PageGetFreeSpace(BufferGetPage(buffer));
247
248 UnlockReleaseBuffer(buffer);
249
250 /*
251 * Since FSM is not WAL-logged and only updated heuristically, it
252 * easily becomes stale in standbys. If the standby is later promoted
253 * and runs VACUUM, it will skip updating individual free space
254 * figures for pages that became all-visible (or all-frozen, depending
255 * on the vacuum mode,) which is troublesome when FreeSpaceMapVacuum
256 * propagates too optimistic free space values to upper FSM layers;
257 * later inserters try to use such pages only to find out that they
258 * are unusable. This can cause long stalls when there are many such
259 * pages.
260 *
261 * Forestall those problems by updating FSM's idea about a page that
262 * is becoming all-visible or all-frozen.
263 *
264 * Do this regardless of a full-page image being applied, since the
265 * FSM data is not in the page anyway.
266 */
267 if (xlrec->flags & VISIBILITYMAP_VALID_BITS)
268 XLogRecordPageWithFreeSpace(rlocator, blkno, space);
269 }
270
271 /*
272 * Even if we skipped the heap page update due to the LSN interlock, it's
273 * still safe to update the visibility map. Any WAL record that clears
274 * the visibility map bit does so before checking the page LSN, so any
275 * bits that need to be cleared will still be cleared.
276 */
278 &vmbuffer) == BLK_NEEDS_REDO)
279 {
280 Page vmpage = BufferGetPage(vmbuffer);
281 Relation reln;
282 uint8 vmbits;
283
284 /* initialize the page if it was read as zeros */
285 if (PageIsNew(vmpage))
286 PageInit(vmpage, BLCKSZ, 0);
287
288 /* remove VISIBILITYMAP_XLOG_* */
289 vmbits = xlrec->flags & VISIBILITYMAP_VALID_BITS;
290
291 /*
292 * XLogReadBufferForRedoExtended locked the buffer. But
293 * visibilitymap_set will handle locking itself.
294 */
296
297 reln = CreateFakeRelcacheEntry(rlocator);
298
299 visibilitymap_set(reln, blkno, InvalidBuffer, lsn, vmbuffer,
300 xlrec->snapshotConflictHorizon, vmbits);
301
302 ReleaseBuffer(vmbuffer);
304 }
305 else if (BufferIsValid(vmbuffer))
306 UnlockReleaseBuffer(vmbuffer);
307}
308
309/*
310 * Given an "infobits" field from an XLog record, set the correct bits in the
311 * given infomask and infomask2 for the tuple touched by the record.
312 *
313 * (This is the reverse of compute_infobits).
314 */
315static void
316fix_infomask_from_infobits(uint8 infobits, uint16 *infomask, uint16 *infomask2)
317{
318 *infomask &= ~(HEAP_XMAX_IS_MULTI | HEAP_XMAX_LOCK_ONLY |
320 *infomask2 &= ~HEAP_KEYS_UPDATED;
321
322 if (infobits & XLHL_XMAX_IS_MULTI)
323 *infomask |= HEAP_XMAX_IS_MULTI;
324 if (infobits & XLHL_XMAX_LOCK_ONLY)
325 *infomask |= HEAP_XMAX_LOCK_ONLY;
326 if (infobits & XLHL_XMAX_EXCL_LOCK)
327 *infomask |= HEAP_XMAX_EXCL_LOCK;
328 /* note HEAP_XMAX_SHR_LOCK isn't considered here */
329 if (infobits & XLHL_XMAX_KEYSHR_LOCK)
330 *infomask |= HEAP_XMAX_KEYSHR_LOCK;
331
332 if (infobits & XLHL_KEYS_UPDATED)
333 *infomask2 |= HEAP_KEYS_UPDATED;
334}
335
336/*
337 * Replay XLOG_HEAP_DELETE records.
338 */
339static void
341{
342 XLogRecPtr lsn = record->EndRecPtr;
343 xl_heap_delete *xlrec = (xl_heap_delete *) XLogRecGetData(record);
344 Buffer buffer;
345 Page page;
346 ItemId lp = NULL;
347 HeapTupleHeader htup;
348 BlockNumber blkno;
349 RelFileLocator target_locator;
350 ItemPointerData target_tid;
351
352 XLogRecGetBlockTag(record, 0, &target_locator, NULL, &blkno);
353 ItemPointerSetBlockNumber(&target_tid, blkno);
354 ItemPointerSetOffsetNumber(&target_tid, xlrec->offnum);
355
356 /*
357 * The visibility map may need to be fixed even if the heap page is
358 * already up-to-date.
359 */
361 {
362 Relation reln = CreateFakeRelcacheEntry(target_locator);
363 Buffer vmbuffer = InvalidBuffer;
364
365 visibilitymap_pin(reln, blkno, &vmbuffer);
366 visibilitymap_clear(reln, blkno, vmbuffer, VISIBILITYMAP_VALID_BITS);
367 ReleaseBuffer(vmbuffer);
369 }
370
371 if (XLogReadBufferForRedo(record, 0, &buffer) == BLK_NEEDS_REDO)
372 {
373 page = BufferGetPage(buffer);
374
375 if (PageGetMaxOffsetNumber(page) >= xlrec->offnum)
376 lp = PageGetItemId(page, xlrec->offnum);
377
378 if (PageGetMaxOffsetNumber(page) < xlrec->offnum || !ItemIdIsNormal(lp))
379 elog(PANIC, "invalid lp");
380
381 htup = (HeapTupleHeader) PageGetItem(page, lp);
382
384 htup->t_infomask2 &= ~HEAP_KEYS_UPDATED;
387 &htup->t_infomask, &htup->t_infomask2);
388 if (!(xlrec->flags & XLH_DELETE_IS_SUPER))
389 HeapTupleHeaderSetXmax(htup, xlrec->xmax);
390 else
393
394 /* Mark the page as a candidate for pruning */
395 PageSetPrunable(page, XLogRecGetXid(record));
396
399
400 /* Make sure t_ctid is set correctly */
403 else
404 htup->t_ctid = target_tid;
405 PageSetLSN(page, lsn);
406 MarkBufferDirty(buffer);
407 }
408 if (BufferIsValid(buffer))
409 UnlockReleaseBuffer(buffer);
410}
411
412/*
413 * Replay XLOG_HEAP_INSERT records.
414 */
415static void
417{
418 XLogRecPtr lsn = record->EndRecPtr;
419 xl_heap_insert *xlrec = (xl_heap_insert *) XLogRecGetData(record);
420 Buffer buffer;
421 Page page;
422 union
423 {
426 } tbuf;
427 HeapTupleHeader htup;
428 xl_heap_header xlhdr;
429 uint32 newlen;
430 Size freespace = 0;
431 RelFileLocator target_locator;
432 BlockNumber blkno;
433 ItemPointerData target_tid;
435
436 XLogRecGetBlockTag(record, 0, &target_locator, NULL, &blkno);
437 ItemPointerSetBlockNumber(&target_tid, blkno);
438 ItemPointerSetOffsetNumber(&target_tid, xlrec->offnum);
439
440 /* No freezing in the heap_insert() code path */
442
443 /*
444 * The visibility map may need to be fixed even if the heap page is
445 * already up-to-date.
446 */
448 {
449 Relation reln = CreateFakeRelcacheEntry(target_locator);
450 Buffer vmbuffer = InvalidBuffer;
451
452 visibilitymap_pin(reln, blkno, &vmbuffer);
453 visibilitymap_clear(reln, blkno, vmbuffer, VISIBILITYMAP_VALID_BITS);
454 ReleaseBuffer(vmbuffer);
456 }
457
458 /*
459 * If we inserted the first and only tuple on the page, re-initialize the
460 * page from scratch.
461 */
463 {
464 buffer = XLogInitBufferForRedo(record, 0);
465 page = BufferGetPage(buffer);
466 PageInit(page, BufferGetPageSize(buffer), 0);
468 }
469 else
470 action = XLogReadBufferForRedo(record, 0, &buffer);
471 if (action == BLK_NEEDS_REDO)
472 {
473 Size datalen;
474 char *data;
475
476 page = BufferGetPage(buffer);
477
478 if (PageGetMaxOffsetNumber(page) + 1 < xlrec->offnum)
479 elog(PANIC, "invalid max offset number");
480
481 data = XLogRecGetBlockData(record, 0, &datalen);
482
483 newlen = datalen - SizeOfHeapHeader;
484 Assert(datalen > SizeOfHeapHeader && newlen <= MaxHeapTupleSize);
485 memcpy(&xlhdr, data, SizeOfHeapHeader);
487
488 htup = &tbuf.hdr;
490 /* PG73FORMAT: get bitmap [+ padding] [+ oid] + data */
491 memcpy((char *) htup + SizeofHeapTupleHeader,
492 data,
493 newlen);
494 newlen += SizeofHeapTupleHeader;
495 htup->t_infomask2 = xlhdr.t_infomask2;
496 htup->t_infomask = xlhdr.t_infomask;
497 htup->t_hoff = xlhdr.t_hoff;
500 htup->t_ctid = target_tid;
501
502 if (PageAddItem(page, (Item) htup, newlen, xlrec->offnum,
503 true, true) == InvalidOffsetNumber)
504 elog(PANIC, "failed to add tuple");
505
506 freespace = PageGetHeapFreeSpace(page); /* needed to update FSM below */
507
508 PageSetLSN(page, lsn);
509
512
513 MarkBufferDirty(buffer);
514 }
515 if (BufferIsValid(buffer))
516 UnlockReleaseBuffer(buffer);
517
518 /*
519 * If the page is running low on free space, update the FSM as well.
520 * Arbitrarily, our definition of "low" is less than 20%. We can't do much
521 * better than that without knowing the fill-factor for the table.
522 *
523 * XXX: Don't do this if the page was restored from full page image. We
524 * don't bother to update the FSM in that case, it doesn't need to be
525 * totally accurate anyway.
526 */
527 if (action == BLK_NEEDS_REDO && freespace < BLCKSZ / 5)
528 XLogRecordPageWithFreeSpace(target_locator, blkno, freespace);
529}
530
531/*
532 * Replay XLOG_HEAP2_MULTI_INSERT records.
533 */
534static void
536{
537 XLogRecPtr lsn = record->EndRecPtr;
539 RelFileLocator rlocator;
540 BlockNumber blkno;
541 Buffer buffer;
542 Page page;
543 union
544 {
547 } tbuf;
548 HeapTupleHeader htup;
549 uint32 newlen;
550 Size freespace = 0;
551 int i;
552 bool isinit = (XLogRecGetInfo(record) & XLOG_HEAP_INIT_PAGE) != 0;
554
555 /*
556 * Insertion doesn't overwrite MVCC data, so no conflict processing is
557 * required.
558 */
559 xlrec = (xl_heap_multi_insert *) XLogRecGetData(record);
560
561 XLogRecGetBlockTag(record, 0, &rlocator, NULL, &blkno);
562
563 /* check that the mutually exclusive flags are not both set */
565 (xlrec->flags & XLH_INSERT_ALL_FROZEN_SET)));
566
567 /*
568 * The visibility map may need to be fixed even if the heap page is
569 * already up-to-date.
570 */
572 {
573 Relation reln = CreateFakeRelcacheEntry(rlocator);
574 Buffer vmbuffer = InvalidBuffer;
575
576 visibilitymap_pin(reln, blkno, &vmbuffer);
577 visibilitymap_clear(reln, blkno, vmbuffer, VISIBILITYMAP_VALID_BITS);
578 ReleaseBuffer(vmbuffer);
580 }
581
582 if (isinit)
583 {
584 buffer = XLogInitBufferForRedo(record, 0);
585 page = BufferGetPage(buffer);
586 PageInit(page, BufferGetPageSize(buffer), 0);
588 }
589 else
590 action = XLogReadBufferForRedo(record, 0, &buffer);
591 if (action == BLK_NEEDS_REDO)
592 {
593 char *tupdata;
594 char *endptr;
595 Size len;
596
597 /* Tuples are stored as block data */
598 tupdata = XLogRecGetBlockData(record, 0, &len);
599 endptr = tupdata + len;
600
601 page = BufferGetPage(buffer);
602
603 for (i = 0; i < xlrec->ntuples; i++)
604 {
605 OffsetNumber offnum;
607
608 /*
609 * If we're reinitializing the page, the tuples are stored in
610 * order from FirstOffsetNumber. Otherwise there's an array of
611 * offsets in the WAL record, and the tuples come after that.
612 */
613 if (isinit)
614 offnum = FirstOffsetNumber + i;
615 else
616 offnum = xlrec->offsets[i];
617 if (PageGetMaxOffsetNumber(page) + 1 < offnum)
618 elog(PANIC, "invalid max offset number");
619
620 xlhdr = (xl_multi_insert_tuple *) SHORTALIGN(tupdata);
621 tupdata = ((char *) xlhdr) + SizeOfMultiInsertTuple;
622
623 newlen = xlhdr->datalen;
624 Assert(newlen <= MaxHeapTupleSize);
625 htup = &tbuf.hdr;
627 /* PG73FORMAT: get bitmap [+ padding] [+ oid] + data */
628 memcpy((char *) htup + SizeofHeapTupleHeader,
629 tupdata,
630 newlen);
631 tupdata += newlen;
632
633 newlen += SizeofHeapTupleHeader;
634 htup->t_infomask2 = xlhdr->t_infomask2;
635 htup->t_infomask = xlhdr->t_infomask;
636 htup->t_hoff = xlhdr->t_hoff;
639 ItemPointerSetBlockNumber(&htup->t_ctid, blkno);
640 ItemPointerSetOffsetNumber(&htup->t_ctid, offnum);
641
642 offnum = PageAddItem(page, (Item) htup, newlen, offnum, true, true);
643 if (offnum == InvalidOffsetNumber)
644 elog(PANIC, "failed to add tuple");
645 }
646 if (tupdata != endptr)
647 elog(PANIC, "total tuple length mismatch");
648
649 freespace = PageGetHeapFreeSpace(page); /* needed to update FSM below */
650
651 PageSetLSN(page, lsn);
652
655
656 /* XLH_INSERT_ALL_FROZEN_SET implies that all tuples are visible */
657 if (xlrec->flags & XLH_INSERT_ALL_FROZEN_SET)
658 PageSetAllVisible(page);
659
660 MarkBufferDirty(buffer);
661 }
662 if (BufferIsValid(buffer))
663 UnlockReleaseBuffer(buffer);
664
665 /*
666 * If the page is running low on free space, update the FSM as well.
667 * Arbitrarily, our definition of "low" is less than 20%. We can't do much
668 * better than that without knowing the fill-factor for the table.
669 *
670 * XXX: Don't do this if the page was restored from full page image. We
671 * don't bother to update the FSM in that case, it doesn't need to be
672 * totally accurate anyway.
673 */
674 if (action == BLK_NEEDS_REDO && freespace < BLCKSZ / 5)
675 XLogRecordPageWithFreeSpace(rlocator, blkno, freespace);
676}
677
678/*
679 * Replay XLOG_HEAP_UPDATE and XLOG_HEAP_HOT_UPDATE records.
680 */
681static void
682heap_xlog_update(XLogReaderState *record, bool hot_update)
683{
684 XLogRecPtr lsn = record->EndRecPtr;
685 xl_heap_update *xlrec = (xl_heap_update *) XLogRecGetData(record);
686 RelFileLocator rlocator;
687 BlockNumber oldblk;
688 BlockNumber newblk;
689 ItemPointerData newtid;
690 Buffer obuffer,
691 nbuffer;
692 Page page;
693 OffsetNumber offnum;
694 ItemId lp = NULL;
695 HeapTupleData oldtup;
696 HeapTupleHeader htup;
697 uint16 prefixlen = 0,
698 suffixlen = 0;
699 char *newp;
700 union
701 {
704 } tbuf;
705 xl_heap_header xlhdr;
706 uint32 newlen;
707 Size freespace = 0;
708 XLogRedoAction oldaction;
709 XLogRedoAction newaction;
710
711 /* initialize to keep the compiler quiet */
712 oldtup.t_data = NULL;
713 oldtup.t_len = 0;
714
715 XLogRecGetBlockTag(record, 0, &rlocator, NULL, &newblk);
716 if (XLogRecGetBlockTagExtended(record, 1, NULL, NULL, &oldblk, NULL))
717 {
718 /* HOT updates are never done across pages */
719 Assert(!hot_update);
720 }
721 else
722 oldblk = newblk;
723
724 ItemPointerSet(&newtid, newblk, xlrec->new_offnum);
725
726 /*
727 * The visibility map may need to be fixed even if the heap page is
728 * already up-to-date.
729 */
731 {
732 Relation reln = CreateFakeRelcacheEntry(rlocator);
733 Buffer vmbuffer = InvalidBuffer;
734
735 visibilitymap_pin(reln, oldblk, &vmbuffer);
736 visibilitymap_clear(reln, oldblk, vmbuffer, VISIBILITYMAP_VALID_BITS);
737 ReleaseBuffer(vmbuffer);
739 }
740
741 /*
742 * In normal operation, it is important to lock the two pages in
743 * page-number order, to avoid possible deadlocks against other update
744 * operations going the other way. However, during WAL replay there can
745 * be no other update happening, so we don't need to worry about that. But
746 * we *do* need to worry that we don't expose an inconsistent state to Hot
747 * Standby queries --- so the original page can't be unlocked before we've
748 * added the new tuple to the new page.
749 */
750
751 /* Deal with old tuple version */
752 oldaction = XLogReadBufferForRedo(record, (oldblk == newblk) ? 0 : 1,
753 &obuffer);
754 if (oldaction == BLK_NEEDS_REDO)
755 {
756 page = BufferGetPage(obuffer);
757 offnum = xlrec->old_offnum;
758 if (PageGetMaxOffsetNumber(page) >= offnum)
759 lp = PageGetItemId(page, offnum);
760
761 if (PageGetMaxOffsetNumber(page) < offnum || !ItemIdIsNormal(lp))
762 elog(PANIC, "invalid lp");
763
764 htup = (HeapTupleHeader) PageGetItem(page, lp);
765
766 oldtup.t_data = htup;
767 oldtup.t_len = ItemIdGetLength(lp);
768
770 htup->t_infomask2 &= ~HEAP_KEYS_UPDATED;
771 if (hot_update)
773 else
776 &htup->t_infomask2);
777 HeapTupleHeaderSetXmax(htup, xlrec->old_xmax);
779 /* Set forward chain link in t_ctid */
780 htup->t_ctid = newtid;
781
782 /* Mark the page as a candidate for pruning */
783 PageSetPrunable(page, XLogRecGetXid(record));
784
787
788 PageSetLSN(page, lsn);
789 MarkBufferDirty(obuffer);
790 }
791
792 /*
793 * Read the page the new tuple goes into, if different from old.
794 */
795 if (oldblk == newblk)
796 {
797 nbuffer = obuffer;
798 newaction = oldaction;
799 }
800 else if (XLogRecGetInfo(record) & XLOG_HEAP_INIT_PAGE)
801 {
802 nbuffer = XLogInitBufferForRedo(record, 0);
803 page = BufferGetPage(nbuffer);
804 PageInit(page, BufferGetPageSize(nbuffer), 0);
805 newaction = BLK_NEEDS_REDO;
806 }
807 else
808 newaction = XLogReadBufferForRedo(record, 0, &nbuffer);
809
810 /*
811 * The visibility map may need to be fixed even if the heap page is
812 * already up-to-date.
813 */
815 {
816 Relation reln = CreateFakeRelcacheEntry(rlocator);
817 Buffer vmbuffer = InvalidBuffer;
818
819 visibilitymap_pin(reln, newblk, &vmbuffer);
820 visibilitymap_clear(reln, newblk, vmbuffer, VISIBILITYMAP_VALID_BITS);
821 ReleaseBuffer(vmbuffer);
823 }
824
825 /* Deal with new tuple */
826 if (newaction == BLK_NEEDS_REDO)
827 {
828 char *recdata;
829 char *recdata_end;
830 Size datalen;
831 Size tuplen;
832
833 recdata = XLogRecGetBlockData(record, 0, &datalen);
834 recdata_end = recdata + datalen;
835
836 page = BufferGetPage(nbuffer);
837
838 offnum = xlrec->new_offnum;
839 if (PageGetMaxOffsetNumber(page) + 1 < offnum)
840 elog(PANIC, "invalid max offset number");
841
843 {
844 Assert(newblk == oldblk);
845 memcpy(&prefixlen, recdata, sizeof(uint16));
846 recdata += sizeof(uint16);
847 }
849 {
850 Assert(newblk == oldblk);
851 memcpy(&suffixlen, recdata, sizeof(uint16));
852 recdata += sizeof(uint16);
853 }
854
855 memcpy(&xlhdr, recdata, SizeOfHeapHeader);
856 recdata += SizeOfHeapHeader;
857
858 tuplen = recdata_end - recdata;
859 Assert(tuplen <= MaxHeapTupleSize);
860
861 htup = &tbuf.hdr;
863
864 /*
865 * Reconstruct the new tuple using the prefix and/or suffix from the
866 * old tuple, and the data stored in the WAL record.
867 */
868 newp = (char *) htup + SizeofHeapTupleHeader;
869 if (prefixlen > 0)
870 {
871 int len;
872
873 /* copy bitmap [+ padding] [+ oid] from WAL record */
875 memcpy(newp, recdata, len);
876 recdata += len;
877 newp += len;
878
879 /* copy prefix from old tuple */
880 memcpy(newp, (char *) oldtup.t_data + oldtup.t_data->t_hoff, prefixlen);
881 newp += prefixlen;
882
883 /* copy new tuple data from WAL record */
884 len = tuplen - (xlhdr.t_hoff - SizeofHeapTupleHeader);
885 memcpy(newp, recdata, len);
886 recdata += len;
887 newp += len;
888 }
889 else
890 {
891 /*
892 * copy bitmap [+ padding] [+ oid] + data from record, all in one
893 * go
894 */
895 memcpy(newp, recdata, tuplen);
896 recdata += tuplen;
897 newp += tuplen;
898 }
899 Assert(recdata == recdata_end);
900
901 /* copy suffix from old tuple */
902 if (suffixlen > 0)
903 memcpy(newp, (char *) oldtup.t_data + oldtup.t_len - suffixlen, suffixlen);
904
905 newlen = SizeofHeapTupleHeader + tuplen + prefixlen + suffixlen;
906 htup->t_infomask2 = xlhdr.t_infomask2;
907 htup->t_infomask = xlhdr.t_infomask;
908 htup->t_hoff = xlhdr.t_hoff;
909
912 HeapTupleHeaderSetXmax(htup, xlrec->new_xmax);
913 /* Make sure there is no forward chain link in t_ctid */
914 htup->t_ctid = newtid;
915
916 offnum = PageAddItem(page, (Item) htup, newlen, offnum, true, true);
917 if (offnum == InvalidOffsetNumber)
918 elog(PANIC, "failed to add tuple");
919
922
923 freespace = PageGetHeapFreeSpace(page); /* needed to update FSM below */
924
925 PageSetLSN(page, lsn);
926 MarkBufferDirty(nbuffer);
927 }
928
929 if (BufferIsValid(nbuffer) && nbuffer != obuffer)
930 UnlockReleaseBuffer(nbuffer);
931 if (BufferIsValid(obuffer))
932 UnlockReleaseBuffer(obuffer);
933
934 /*
935 * If the new page is running low on free space, update the FSM as well.
936 * Arbitrarily, our definition of "low" is less than 20%. We can't do much
937 * better than that without knowing the fill-factor for the table.
938 *
939 * However, don't update the FSM on HOT updates, because after crash
940 * recovery, either the old or the new tuple will certainly be dead and
941 * prunable. After pruning, the page will have roughly as much free space
942 * as it did before the update, assuming the new tuple is about the same
943 * size as the old one.
944 *
945 * XXX: Don't do this if the page was restored from full page image. We
946 * don't bother to update the FSM in that case, it doesn't need to be
947 * totally accurate anyway.
948 */
949 if (newaction == BLK_NEEDS_REDO && !hot_update && freespace < BLCKSZ / 5)
950 XLogRecordPageWithFreeSpace(rlocator, newblk, freespace);
951}
952
953/*
954 * Replay XLOG_HEAP_CONFIRM records.
955 */
956static void
958{
959 XLogRecPtr lsn = record->EndRecPtr;
961 Buffer buffer;
962 Page page;
963 OffsetNumber offnum;
964 ItemId lp = NULL;
965 HeapTupleHeader htup;
966
967 if (XLogReadBufferForRedo(record, 0, &buffer) == BLK_NEEDS_REDO)
968 {
969 page = BufferGetPage(buffer);
970
971 offnum = xlrec->offnum;
972 if (PageGetMaxOffsetNumber(page) >= offnum)
973 lp = PageGetItemId(page, offnum);
974
975 if (PageGetMaxOffsetNumber(page) < offnum || !ItemIdIsNormal(lp))
976 elog(PANIC, "invalid lp");
977
978 htup = (HeapTupleHeader) PageGetItem(page, lp);
979
980 /*
981 * Confirm tuple as actually inserted
982 */
983 ItemPointerSet(&htup->t_ctid, BufferGetBlockNumber(buffer), offnum);
984
985 PageSetLSN(page, lsn);
986 MarkBufferDirty(buffer);
987 }
988 if (BufferIsValid(buffer))
989 UnlockReleaseBuffer(buffer);
990}
991
992/*
993 * Replay XLOG_HEAP_LOCK records.
994 */
995static void
997{
998 XLogRecPtr lsn = record->EndRecPtr;
999 xl_heap_lock *xlrec = (xl_heap_lock *) XLogRecGetData(record);
1000 Buffer buffer;
1001 Page page;
1002 OffsetNumber offnum;
1003 ItemId lp = NULL;
1004 HeapTupleHeader htup;
1005
1006 /*
1007 * The visibility map may need to be fixed even if the heap page is
1008 * already up-to-date.
1009 */
1011 {
1012 RelFileLocator rlocator;
1013 Buffer vmbuffer = InvalidBuffer;
1014 BlockNumber block;
1015 Relation reln;
1016
1017 XLogRecGetBlockTag(record, 0, &rlocator, NULL, &block);
1018 reln = CreateFakeRelcacheEntry(rlocator);
1019
1020 visibilitymap_pin(reln, block, &vmbuffer);
1021 visibilitymap_clear(reln, block, vmbuffer, VISIBILITYMAP_ALL_FROZEN);
1022
1023 ReleaseBuffer(vmbuffer);
1025 }
1026
1027 if (XLogReadBufferForRedo(record, 0, &buffer) == BLK_NEEDS_REDO)
1028 {
1029 page = BufferGetPage(buffer);
1030
1031 offnum = xlrec->offnum;
1032 if (PageGetMaxOffsetNumber(page) >= offnum)
1033 lp = PageGetItemId(page, offnum);
1034
1035 if (PageGetMaxOffsetNumber(page) < offnum || !ItemIdIsNormal(lp))
1036 elog(PANIC, "invalid lp");
1037
1038 htup = (HeapTupleHeader) PageGetItem(page, lp);
1039
1040 htup->t_infomask &= ~(HEAP_XMAX_BITS | HEAP_MOVED);
1041 htup->t_infomask2 &= ~HEAP_KEYS_UPDATED;
1043 &htup->t_infomask2);
1044
1045 /*
1046 * Clear relevant update flags, but only if the modified infomask says
1047 * there's no update.
1048 */
1050 {
1052 /* Make sure there is no forward chain link in t_ctid */
1053 ItemPointerSet(&htup->t_ctid,
1054 BufferGetBlockNumber(buffer),
1055 offnum);
1056 }
1057 HeapTupleHeaderSetXmax(htup, xlrec->xmax);
1059 PageSetLSN(page, lsn);
1060 MarkBufferDirty(buffer);
1061 }
1062 if (BufferIsValid(buffer))
1063 UnlockReleaseBuffer(buffer);
1064}
1065
1066/*
1067 * Replay XLOG_HEAP2_LOCK_UPDATED records.
1068 */
1069static void
1071{
1072 XLogRecPtr lsn = record->EndRecPtr;
1073 xl_heap_lock_updated *xlrec;
1074 Buffer buffer;
1075 Page page;
1076 OffsetNumber offnum;
1077 ItemId lp = NULL;
1078 HeapTupleHeader htup;
1079
1080 xlrec = (xl_heap_lock_updated *) XLogRecGetData(record);
1081
1082 /*
1083 * The visibility map may need to be fixed even if the heap page is
1084 * already up-to-date.
1085 */
1087 {
1088 RelFileLocator rlocator;
1089 Buffer vmbuffer = InvalidBuffer;
1090 BlockNumber block;
1091 Relation reln;
1092
1093 XLogRecGetBlockTag(record, 0, &rlocator, NULL, &block);
1094 reln = CreateFakeRelcacheEntry(rlocator);
1095
1096 visibilitymap_pin(reln, block, &vmbuffer);
1097 visibilitymap_clear(reln, block, vmbuffer, VISIBILITYMAP_ALL_FROZEN);
1098
1099 ReleaseBuffer(vmbuffer);
1101 }
1102
1103 if (XLogReadBufferForRedo(record, 0, &buffer) == BLK_NEEDS_REDO)
1104 {
1105 page = BufferGetPage(buffer);
1106
1107 offnum = xlrec->offnum;
1108 if (PageGetMaxOffsetNumber(page) >= offnum)
1109 lp = PageGetItemId(page, offnum);
1110
1111 if (PageGetMaxOffsetNumber(page) < offnum || !ItemIdIsNormal(lp))
1112 elog(PANIC, "invalid lp");
1113
1114 htup = (HeapTupleHeader) PageGetItem(page, lp);
1115
1116 htup->t_infomask &= ~(HEAP_XMAX_BITS | HEAP_MOVED);
1117 htup->t_infomask2 &= ~HEAP_KEYS_UPDATED;
1119 &htup->t_infomask2);
1120 HeapTupleHeaderSetXmax(htup, xlrec->xmax);
1121
1122 PageSetLSN(page, lsn);
1123 MarkBufferDirty(buffer);
1124 }
1125 if (BufferIsValid(buffer))
1126 UnlockReleaseBuffer(buffer);
1127}
1128
1129/*
1130 * Replay XLOG_HEAP_INPLACE records.
1131 */
1132static void
1134{
1135 XLogRecPtr lsn = record->EndRecPtr;
1136 xl_heap_inplace *xlrec = (xl_heap_inplace *) XLogRecGetData(record);
1137 Buffer buffer;
1138 Page page;
1139 OffsetNumber offnum;
1140 ItemId lp = NULL;
1141 HeapTupleHeader htup;
1142 uint32 oldlen;
1143 Size newlen;
1144
1145 if (XLogReadBufferForRedo(record, 0, &buffer) == BLK_NEEDS_REDO)
1146 {
1147 char *newtup = XLogRecGetBlockData(record, 0, &newlen);
1148
1149 page = BufferGetPage(buffer);
1150
1151 offnum = xlrec->offnum;
1152 if (PageGetMaxOffsetNumber(page) >= offnum)
1153 lp = PageGetItemId(page, offnum);
1154
1155 if (PageGetMaxOffsetNumber(page) < offnum || !ItemIdIsNormal(lp))
1156 elog(PANIC, "invalid lp");
1157
1158 htup = (HeapTupleHeader) PageGetItem(page, lp);
1159
1160 oldlen = ItemIdGetLength(lp) - htup->t_hoff;
1161 if (oldlen != newlen)
1162 elog(PANIC, "wrong tuple length");
1163
1164 memcpy((char *) htup + htup->t_hoff, newtup, newlen);
1165
1166 PageSetLSN(page, lsn);
1167 MarkBufferDirty(buffer);
1168 }
1169 if (BufferIsValid(buffer))
1170 UnlockReleaseBuffer(buffer);
1171
1173 xlrec->nmsgs,
1174 xlrec->relcacheInitFileInval,
1175 xlrec->dbId,
1176 xlrec->tsId);
1177}
1178
1179void
1181{
1182 uint8 info = XLogRecGetInfo(record) & ~XLR_INFO_MASK;
1183
1184 /*
1185 * These operations don't overwrite MVCC data so no conflict processing is
1186 * required. The ones in heap2 rmgr do.
1187 */
1188
1189 switch (info & XLOG_HEAP_OPMASK)
1190 {
1191 case XLOG_HEAP_INSERT:
1192 heap_xlog_insert(record);
1193 break;
1194 case XLOG_HEAP_DELETE:
1195 heap_xlog_delete(record);
1196 break;
1197 case XLOG_HEAP_UPDATE:
1198 heap_xlog_update(record, false);
1199 break;
1200 case XLOG_HEAP_TRUNCATE:
1201
1202 /*
1203 * TRUNCATE is a no-op because the actions are already logged as
1204 * SMGR WAL records. TRUNCATE WAL record only exists for logical
1205 * decoding.
1206 */
1207 break;
1209 heap_xlog_update(record, true);
1210 break;
1211 case XLOG_HEAP_CONFIRM:
1212 heap_xlog_confirm(record);
1213 break;
1214 case XLOG_HEAP_LOCK:
1215 heap_xlog_lock(record);
1216 break;
1217 case XLOG_HEAP_INPLACE:
1218 heap_xlog_inplace(record);
1219 break;
1220 default:
1221 elog(PANIC, "heap_redo: unknown op code %u", info);
1222 }
1223}
1224
1225void
1227{
1228 uint8 info = XLogRecGetInfo(record) & ~XLR_INFO_MASK;
1229
1230 switch (info & XLOG_HEAP_OPMASK)
1231 {
1235 heap_xlog_prune_freeze(record);
1236 break;
1237 case XLOG_HEAP2_VISIBLE:
1238 heap_xlog_visible(record);
1239 break;
1241 heap_xlog_multi_insert(record);
1242 break;
1244 heap_xlog_lock_updated(record);
1245 break;
1246 case XLOG_HEAP2_NEW_CID:
1247
1248 /*
1249 * Nothing to do on a real replay, only used during logical
1250 * decoding.
1251 */
1252 break;
1253 case XLOG_HEAP2_REWRITE:
1255 break;
1256 default:
1257 elog(PANIC, "heap2_redo: unknown op code %u", info);
1258 }
1259}
1260
1261/*
1262 * Mask a heap page before performing consistency checks on it.
1263 */
1264void
1265heap_mask(char *pagedata, BlockNumber blkno)
1266{
1267 Page page = (Page) pagedata;
1268 OffsetNumber off;
1269
1271
1272 mask_page_hint_bits(page);
1273 mask_unused_space(page);
1274
1275 for (off = 1; off <= PageGetMaxOffsetNumber(page); off++)
1276 {
1277 ItemId iid = PageGetItemId(page, off);
1278 char *page_item;
1279
1280 page_item = (char *) (page + ItemIdGetOffset(iid));
1281
1282 if (ItemIdIsNormal(iid))
1283 {
1284 HeapTupleHeader page_htup = (HeapTupleHeader) page_item;
1285
1286 /*
1287 * If xmin of a tuple is not yet frozen, we should ignore
1288 * differences in hint bits, since they can be set without
1289 * emitting WAL.
1290 */
1291 if (!HeapTupleHeaderXminFrozen(page_htup))
1292 page_htup->t_infomask &= ~HEAP_XACT_MASK;
1293 else
1294 {
1295 /* Still we need to mask xmax hint bits. */
1296 page_htup->t_infomask &= ~HEAP_XMAX_INVALID;
1297 page_htup->t_infomask &= ~HEAP_XMAX_COMMITTED;
1298 }
1299
1300 /*
1301 * During replay, we set Command Id to FirstCommandId. Hence, mask
1302 * it. See heap_xlog_insert() for details.
1303 */
1304 page_htup->t_choice.t_heap.t_field3.t_cid = MASK_MARKER;
1305
1306 /*
1307 * For a speculative tuple, heap_insert() does not set ctid in the
1308 * caller-passed heap tuple itself, leaving the ctid field to
1309 * contain a speculative token value - a per-backend monotonically
1310 * increasing identifier. Besides, it does not WAL-log ctid under
1311 * any circumstances.
1312 *
1313 * During redo, heap_xlog_insert() sets t_ctid to current block
1314 * number and self offset number. It doesn't care about any
1315 * speculative insertions on the primary. Hence, we set t_ctid to
1316 * current block number and self offset number to ignore any
1317 * inconsistency.
1318 */
1319 if (HeapTupleHeaderIsSpeculative(page_htup))
1320 ItemPointerSet(&page_htup->t_ctid, blkno, off);
1321
1322 /*
1323 * NB: Not ignoring ctid changes due to the tuple having moved
1324 * (i.e. HeapTupleHeaderIndicatesMovedPartitions), because that's
1325 * important information that needs to be in-sync between primary
1326 * and standby, and thus is WAL logged.
1327 */
1328 }
1329
1330 /*
1331 * Ignore any padding bytes after the tuple, when the length of the
1332 * item is not MAXALIGNed.
1333 */
1334 if (ItemIdHasStorage(iid))
1335 {
1336 int len = ItemIdGetLength(iid);
1337 int padlen = MAXALIGN(len) - len;
1338
1339 if (padlen > 0)
1340 memset(page_item + len, MASK_MARKER, padlen);
1341 }
1342 }
1343}
uint32 BlockNumber
Definition: block.h:31
int Buffer
Definition: buf.h:23
#define InvalidBuffer
Definition: buf.h:25
void mask_page_lsn_and_checksum(Page page)
Definition: bufmask.c:31
void mask_unused_space(Page page)
Definition: bufmask.c:71
void mask_page_hint_bits(Page page)
Definition: bufmask.c:46
#define MASK_MARKER
Definition: bufmask.h:24
BlockNumber BufferGetBlockNumber(Buffer buffer)
Definition: bufmgr.c:4198
void ReleaseBuffer(Buffer buffer)
Definition: bufmgr.c:5338
void UnlockReleaseBuffer(Buffer buffer)
Definition: bufmgr.c:5355
void MarkBufferDirty(Buffer buffer)
Definition: bufmgr.c:2921
void LockBuffer(Buffer buffer, int mode)
Definition: bufmgr.c:5572
#define BUFFER_LOCK_UNLOCK
Definition: bufmgr.h:196
static Page BufferGetPage(Buffer buffer)
Definition: bufmgr.h:417
static Size BufferGetPageSize(Buffer buffer)
Definition: bufmgr.h:406
@ RBM_ZERO_ON_ERROR
Definition: bufmgr.h:51
@ RBM_NORMAL
Definition: bufmgr.h:46
static bool BufferIsValid(Buffer bufnum)
Definition: bufmgr.h:368
Size PageGetFreeSpace(const PageData *page)
Definition: bufpage.c:906
Size PageGetHeapFreeSpace(const PageData *page)
Definition: bufpage.c:990
void PageInit(Page page, Size pageSize, Size specialSize)
Definition: bufpage.c:42
static void PageClearAllVisible(Page page)
Definition: bufpage.h:439
static Item PageGetItem(const PageData *page, const ItemIdData *itemId)
Definition: bufpage.h:354
static bool PageIsNew(const PageData *page)
Definition: bufpage.h:234
static void PageSetAllVisible(Page page)
Definition: bufpage.h:434
static ItemId PageGetItemId(Page page, OffsetNumber offsetNumber)
Definition: bufpage.h:244
static void PageSetLSN(Page page, XLogRecPtr lsn)
Definition: bufpage.h:391
PageData * Page
Definition: bufpage.h:82
#define PageSetPrunable(page, xid)
Definition: bufpage.h:447
#define PageAddItem(page, item, size, offsetNumber, overwrite, is_heap)
Definition: bufpage.h:472
static OffsetNumber PageGetMaxOffsetNumber(const PageData *page)
Definition: bufpage.h:372
#define MAXALIGN(LEN)
Definition: c.h:811
uint8_t uint8
Definition: c.h:537
#define FirstCommandId
Definition: c.h:674
#define SHORTALIGN(LEN)
Definition: c.h:807
uint16_t uint16
Definition: c.h:538
uint32_t uint32
Definition: c.h:539
#define MemSet(start, val, len)
Definition: c.h:1020
uint32 TransactionId
Definition: c.h:658
size_t Size
Definition: c.h:611
#define PANIC
Definition: elog.h:42
#define elog(elevel,...)
Definition: elog.h:226
void XLogRecordPageWithFreeSpace(RelFileLocator rlocator, BlockNumber heapBlk, Size spaceAvail)
Definition: freespace.c:211
Assert(PointerIsAligned(start, uint64))
static void heap_execute_freeze_tuple(HeapTupleHeader tuple, HeapTupleFreeze *frz)
Definition: heapam.h:435
void heap_redo(XLogReaderState *record)
Definition: heapam_xlog.c:1180
static void heap_xlog_prune_freeze(XLogReaderState *record)
Definition: heapam_xlog.c:30
void heap_mask(char *pagedata, BlockNumber blkno)
Definition: heapam_xlog.c:1265
static void heap_xlog_insert(XLogReaderState *record)
Definition: heapam_xlog.c:416
static void fix_infomask_from_infobits(uint8 infobits, uint16 *infomask, uint16 *infomask2)
Definition: heapam_xlog.c:316
static void heap_xlog_update(XLogReaderState *record, bool hot_update)
Definition: heapam_xlog.c:682
static void heap_xlog_delete(XLogReaderState *record)
Definition: heapam_xlog.c:340
static void heap_xlog_lock_updated(XLogReaderState *record)
Definition: heapam_xlog.c:1070
static void heap_xlog_lock(XLogReaderState *record)
Definition: heapam_xlog.c:996
static void heap_xlog_multi_insert(XLogReaderState *record)
Definition: heapam_xlog.c:535
static void heap_xlog_visible(XLogReaderState *record)
Definition: heapam_xlog.c:182
static void heap_xlog_inplace(XLogReaderState *record)
Definition: heapam_xlog.c:1133
static void heap_xlog_confirm(XLogReaderState *record)
Definition: heapam_xlog.c:957
void heap2_redo(XLogReaderState *record)
Definition: heapam_xlog.c:1226
#define XLOG_HEAP2_MULTI_INSERT
Definition: heapam_xlog.h:64
#define XLH_UPDATE_NEW_ALL_VISIBLE_CLEARED
Definition: heapam_xlog.h:87
#define XLOG_HEAP_HOT_UPDATE
Definition: heapam_xlog.h:37
#define XLOG_HEAP_DELETE
Definition: heapam_xlog.h:34
#define XLHP_HAS_CONFLICT_HORIZON
Definition: heapam_xlog.h:316
#define XLOG_HEAP2_REWRITE
Definition: heapam_xlog.h:59
#define XLH_LOCK_ALL_FROZEN_CLEARED
Definition: heapam_xlog.h:392
#define XLOG_HEAP_TRUNCATE
Definition: heapam_xlog.h:36
#define XLH_INSERT_ALL_FROZEN_SET
Definition: heapam_xlog.h:79
#define XLOG_HEAP_OPMASK
Definition: heapam_xlog.h:42
#define XLOG_HEAP_UPDATE
Definition: heapam_xlog.h:35
#define SizeOfHeapPrune
Definition: heapam_xlog.h:295
#define XLHL_XMAX_KEYSHR_LOCK
Definition: heapam_xlog.h:388
#define XLH_DELETE_ALL_VISIBLE_CLEARED
Definition: heapam_xlog.h:102
#define XLHP_HAS_NOW_UNUSED_ITEMS
Definition: heapam_xlog.h:331
#define XLHL_XMAX_IS_MULTI
Definition: heapam_xlog.h:385
#define XLHP_HAS_REDIRECTIONS
Definition: heapam_xlog.h:329
#define XLH_INSERT_ALL_VISIBLE_CLEARED
Definition: heapam_xlog.h:72
#define SizeOfHeapHeader
Definition: heapam_xlog.h:157
#define XLOG_HEAP2_PRUNE_VACUUM_SCAN
Definition: heapam_xlog.h:61
#define XLH_DELETE_IS_PARTITION_MOVE
Definition: heapam_xlog.h:106
#define XLH_UPDATE_OLD_ALL_VISIBLE_CLEARED
Definition: heapam_xlog.h:85
#define XLHL_XMAX_LOCK_ONLY
Definition: heapam_xlog.h:386
#define XLOG_HEAP_INPLACE
Definition: heapam_xlog.h:40
#define XLOG_HEAP2_LOCK_UPDATED
Definition: heapam_xlog.h:65
#define XLH_UPDATE_SUFFIX_FROM_OLD
Definition: heapam_xlog.h:92
#define XLH_UPDATE_PREFIX_FROM_OLD
Definition: heapam_xlog.h:91
#define SizeOfMultiInsertTuple
Definition: heapam_xlog.h:199
#define XLHL_XMAX_EXCL_LOCK
Definition: heapam_xlog.h:387
#define XLOG_HEAP2_PRUNE_ON_ACCESS
Definition: heapam_xlog.h:60
#define XLOG_HEAP2_NEW_CID
Definition: heapam_xlog.h:66
#define XLHP_CLEANUP_LOCK
Definition: heapam_xlog.h:308
#define XLHP_HAS_DEAD_ITEMS
Definition: heapam_xlog.h:330
#define XLOG_HEAP_LOCK
Definition: heapam_xlog.h:39
#define XLOG_HEAP2_PRUNE_VACUUM_CLEANUP
Definition: heapam_xlog.h:62
#define XLOG_HEAP_INSERT
Definition: heapam_xlog.h:33
#define XLH_DELETE_IS_SUPER
Definition: heapam_xlog.h:105
#define XLHL_KEYS_UPDATED
Definition: heapam_xlog.h:389
#define XLOG_HEAP2_VISIBLE
Definition: heapam_xlog.h:63
#define XLHP_IS_CATALOG_REL
Definition: heapam_xlog.h:298
#define XLOG_HEAP_INIT_PAGE
Definition: heapam_xlog.h:47
#define XLOG_HEAP_CONFIRM
Definition: heapam_xlog.h:38
void heap_xlog_deserialize_prune_and_freeze(char *cursor, uint8 flags, int *nplans, xlhp_freeze_plan **plans, OffsetNumber **frz_offsets, int *nredirected, OffsetNumber **redirected, int *ndead, OffsetNumber **nowdead, int *nunused, OffsetNumber **nowunused)
Definition: heapdesc.c:105
HeapTupleHeaderData * HeapTupleHeader
Definition: htup.h:23
static bool HeapTupleHeaderXminFrozen(const HeapTupleHeaderData *tup)
Definition: htup_details.h:350
#define SizeofHeapTupleHeader
Definition: htup_details.h:185
#define HEAP_KEYS_UPDATED
Definition: htup_details.h:289
static bool HEAP_XMAX_IS_LOCKED_ONLY(uint16 infomask)
Definition: htup_details.h:226
static void HeapTupleHeaderSetCmax(HeapTupleHeaderData *tup, CommandId cid, bool iscombo)
Definition: htup_details.h:431
#define HEAP_XMAX_LOCK_ONLY
Definition: htup_details.h:197
static void HeapTupleHeaderClearHotUpdated(HeapTupleHeaderData *tup)
Definition: htup_details.h:549
static void HeapTupleHeaderSetCmin(HeapTupleHeaderData *tup, CommandId cid)
Definition: htup_details.h:422
#define HEAP_XMAX_BITS
Definition: htup_details.h:281
#define HEAP_MOVED
Definition: htup_details.h:213
#define HEAP_XMAX_IS_MULTI
Definition: htup_details.h:209
#define HEAP_XMAX_COMMITTED
Definition: htup_details.h:207
#define HEAP_XACT_MASK
Definition: htup_details.h:215
#define HEAP_XMAX_EXCL_LOCK
Definition: htup_details.h:196
#define HEAP_XMAX_INVALID
Definition: htup_details.h:208
static bool HeapTupleHeaderIsSpeculative(const HeapTupleHeaderData *tup)
Definition: htup_details.h:461
static void HeapTupleHeaderSetXmin(HeapTupleHeaderData *tup, TransactionId xid)
Definition: htup_details.h:331
#define HEAP_XMAX_KEYSHR_LOCK
Definition: htup_details.h:194
static void HeapTupleHeaderSetMovedPartitions(HeapTupleHeaderData *tup)
Definition: htup_details.h:486
#define MaxHeapTupleSize
Definition: htup_details.h:610
static void HeapTupleHeaderSetXmax(HeapTupleHeaderData *tup, TransactionId xid)
Definition: htup_details.h:383
static void HeapTupleHeaderSetHotUpdated(HeapTupleHeaderData *tup)
Definition: htup_details.h:543
void ProcessCommittedInvalidationMessages(SharedInvalidationMessage *msgs, int nmsgs, bool RelcacheInitFileInval, Oid dbid, Oid tsid)
Definition: inval.c:1135
int i
Definition: isn.c:77
Pointer Item
Definition: item.h:17
#define ItemIdGetLength(itemId)
Definition: itemid.h:59
#define ItemIdIsNormal(itemId)
Definition: itemid.h:99
#define ItemIdGetOffset(itemId)
Definition: itemid.h:65
#define ItemIdHasStorage(itemId)
Definition: itemid.h:120
static void ItemPointerSet(ItemPointerData *pointer, BlockNumber blockNumber, OffsetNumber offNum)
Definition: itemptr.h:135
static void ItemPointerSetOffsetNumber(ItemPointerData *pointer, OffsetNumber offsetNumber)
Definition: itemptr.h:158
static void ItemPointerSetBlockNumber(ItemPointerData *pointer, BlockNumber blockNumber)
Definition: itemptr.h:147
#define InvalidOffsetNumber
Definition: off.h:26
uint16 OffsetNumber
Definition: off.h:24
#define FirstOffsetNumber
Definition: off.h:27
const void size_t len
const void * data
void heap_page_prune_execute(Buffer buffer, bool lp_truncate_only, OffsetNumber *redirected, int nredirected, OffsetNumber *nowdead, int ndead, OffsetNumber *nowunused, int nunused)
Definition: pruneheap.c:1561
void heap_xlog_logical_rewrite(XLogReaderState *r)
Definition: rewriteheap.c:1073
void ResolveRecoveryConflictWithSnapshot(TransactionId snapshotConflictHorizon, bool isCatalogRel, RelFileLocator locator)
Definition: standby.c:468
uint32 t_len
Definition: htup.h:64
HeapTupleHeader t_data
Definition: htup.h:68
uint8 frzflags
Definition: heapam.h:147
uint16 t_infomask2
Definition: heapam.h:145
TransactionId xmax
Definition: heapam.h:144
OffsetNumber offset
Definition: heapam.h:152
uint16 t_infomask
Definition: heapam.h:146
ItemPointerData t_ctid
Definition: htup_details.h:161
XLogRecPtr EndRecPtr
Definition: xlogreader.h:207
OffsetNumber offnum
Definition: heapam_xlog.h:419
TransactionId xmax
Definition: heapam_xlog.h:115
OffsetNumber offnum
Definition: heapam_xlog.h:116
uint8 infobits_set
Definition: heapam_xlog.h:117
uint16 t_infomask
Definition: heapam_xlog.h:153
uint16 t_infomask2
Definition: heapam_xlog.h:152
OffsetNumber offnum
Definition: heapam_xlog.h:427
SharedInvalidationMessage msgs[FLEXIBLE_ARRAY_MEMBER]
Definition: heapam_xlog.h:432
bool relcacheInitFileInval
Definition: heapam_xlog.h:430
OffsetNumber offnum
Definition: heapam_xlog.h:162
TransactionId xmax
Definition: heapam_xlog.h:408
OffsetNumber offnum
Definition: heapam_xlog.h:409
uint8 infobits_set
Definition: heapam_xlog.h:399
OffsetNumber offnum
Definition: heapam_xlog.h:398
TransactionId xmax
Definition: heapam_xlog.h:397
OffsetNumber offsets[FLEXIBLE_ARRAY_MEMBER]
Definition: heapam_xlog.h:185
TransactionId new_xmax
Definition: heapam_xlog.h:224
uint8 old_infobits_set
Definition: heapam_xlog.h:222
TransactionId old_xmax
Definition: heapam_xlog.h:220
OffsetNumber old_offnum
Definition: heapam_xlog.h:221
OffsetNumber new_offnum
Definition: heapam_xlog.h:225
TransactionId snapshotConflictHorizon
Definition: heapam_xlog.h:445
TransactionId xmax
Definition: heapam_xlog.h:343
#define InvalidTransactionId
Definition: transam.h:31
bool visibilitymap_clear(Relation rel, BlockNumber heapBlk, Buffer vmbuf, uint8 flags)
void visibilitymap_pin(Relation rel, BlockNumber heapBlk, Buffer *vmbuf)
uint8 visibilitymap_set(Relation rel, BlockNumber heapBlk, Buffer heapBuf, XLogRecPtr recptr, Buffer vmBuf, TransactionId cutoff_xid, uint8 flags)
#define VISIBILITYMAP_VALID_BITS
#define VISIBILITYMAP_ALL_FROZEN
#define VISIBILITYMAP_XLOG_VALID_BITS
#define VISIBILITYMAP_XLOG_CATALOG_REL
#define XLogHintBitIsNeeded()
Definition: xlog.h:120
uint64 XLogRecPtr
Definition: xlogdefs.h:21
bool XLogRecGetBlockTagExtended(XLogReaderState *record, uint8 block_id, RelFileLocator *rlocator, ForkNumber *forknum, BlockNumber *blknum, Buffer *prefetch_buffer)
Definition: xlogreader.c:2017
char * XLogRecGetBlockData(XLogReaderState *record, uint8 block_id, Size *len)
Definition: xlogreader.c:2045
void XLogRecGetBlockTag(XLogReaderState *record, uint8 block_id, RelFileLocator *rlocator, ForkNumber *forknum, BlockNumber *blknum)
Definition: xlogreader.c:1991
#define XLogRecGetInfo(decoder)
Definition: xlogreader.h:410
#define XLogRecGetData(decoder)
Definition: xlogreader.h:415
#define XLogRecGetXid(decoder)
Definition: xlogreader.h:412
void FreeFakeRelcacheEntry(Relation fakerel)
Definition: xlogutils.c:618
XLogRedoAction XLogReadBufferForRedo(XLogReaderState *record, uint8 block_id, Buffer *buf)
Definition: xlogutils.c:303
Buffer XLogInitBufferForRedo(XLogReaderState *record, uint8 block_id)
Definition: xlogutils.c:315
Relation CreateFakeRelcacheEntry(RelFileLocator rlocator)
Definition: xlogutils.c:571
XLogRedoAction XLogReadBufferForRedoExtended(XLogReaderState *record, uint8 block_id, ReadBufferMode mode, bool get_cleanup_lock, Buffer *buf)
Definition: xlogutils.c:340
#define InHotStandby
Definition: xlogutils.h:60
XLogRedoAction
Definition: xlogutils.h:73
@ BLK_RESTORED
Definition: xlogutils.h:76
@ BLK_NEEDS_REDO
Definition: xlogutils.h:74