Thanks to visit codestin.com
Credit goes to doxygen.postgresql.org

PostgreSQL Source Code git master
vacuumlazy.c File Reference
#include "postgres.h"
#include <math.h>
#include "access/genam.h"
#include "access/heapam.h"
#include "access/htup_details.h"
#include "access/multixact.h"
#include "access/tidstore.h"
#include "access/transam.h"
#include "access/visibilitymap.h"
#include "access/xloginsert.h"
#include "catalog/storage.h"
#include "commands/progress.h"
#include "commands/vacuum.h"
#include "common/int.h"
#include "common/pg_prng.h"
#include "executor/instrument.h"
#include "miscadmin.h"
#include "pgstat.h"
#include "portability/instr_time.h"
#include "postmaster/autovacuum.h"
#include "storage/bufmgr.h"
#include "storage/freespace.h"
#include "storage/lmgr.h"
#include "storage/read_stream.h"
#include "utils/lsyscache.h"
#include "utils/pg_rusage.h"
#include "utils/timestamp.h"
Include dependency graph for vacuumlazy.c:

Go to the source code of this file.

Data Structures

struct  LVRelState
 
struct  LVSavedErrInfo
 

Macros

#define REL_TRUNCATE_MINIMUM   1000
 
#define REL_TRUNCATE_FRACTION   16
 
#define VACUUM_TRUNCATE_LOCK_CHECK_INTERVAL   20 /* ms */
 
#define VACUUM_TRUNCATE_LOCK_WAIT_INTERVAL   50 /* ms */
 
#define VACUUM_TRUNCATE_LOCK_TIMEOUT   5000 /* ms */
 
#define BYPASS_THRESHOLD_PAGES   0.02 /* i.e. 2% of rel_pages */
 
#define FAILSAFE_EVERY_PAGES    ((BlockNumber) (((uint64) 4 * 1024 * 1024 * 1024) / BLCKSZ))
 
#define VACUUM_FSM_EVERY_PAGES    ((BlockNumber) (((uint64) 8 * 1024 * 1024 * 1024) / BLCKSZ))
 
#define SKIP_PAGES_THRESHOLD   ((BlockNumber) 32)
 
#define PREFETCH_SIZE   ((BlockNumber) 32)
 
#define ParallelVacuumIsActive(vacrel)   ((vacrel)->pvs != NULL)
 
#define MAX_EAGER_FREEZE_SUCCESS_RATE   0.2
 
#define EAGER_SCAN_REGION_SIZE   4096
 
#define VAC_BLK_WAS_EAGER_SCANNED   (1 << 0)
 
#define VAC_BLK_ALL_VISIBLE_ACCORDING_TO_VM   (1 << 1)
 

Typedefs

typedef struct LVRelState LVRelState
 
typedef struct LVSavedErrInfo LVSavedErrInfo
 

Enumerations

enum  VacErrPhase {
  VACUUM_ERRCB_PHASE_UNKNOWN , VACUUM_ERRCB_PHASE_SCAN_HEAP , VACUUM_ERRCB_PHASE_VACUUM_INDEX , VACUUM_ERRCB_PHASE_VACUUM_HEAP ,
  VACUUM_ERRCB_PHASE_INDEX_CLEANUP , VACUUM_ERRCB_PHASE_TRUNCATE
}
 

Functions

static void lazy_scan_heap (LVRelState *vacrel)
 
static void heap_vacuum_eager_scan_setup (LVRelState *vacrel, const VacuumParams params)
 
static BlockNumber heap_vac_scan_next_block (ReadStream *stream, void *callback_private_data, void *per_buffer_data)
 
static void find_next_unskippable_block (LVRelState *vacrel, bool *skipsallvis)
 
static bool lazy_scan_new_or_empty (LVRelState *vacrel, Buffer buf, BlockNumber blkno, Page page, bool sharelock, Buffer vmbuffer)
 
static int lazy_scan_prune (LVRelState *vacrel, Buffer buf, BlockNumber blkno, Page page, Buffer vmbuffer, bool all_visible_according_to_vm, bool *has_lpdead_items, bool *vm_page_frozen)
 
static bool lazy_scan_noprune (LVRelState *vacrel, Buffer buf, BlockNumber blkno, Page page, bool *has_lpdead_items)
 
static void lazy_vacuum (LVRelState *vacrel)
 
static bool lazy_vacuum_all_indexes (LVRelState *vacrel)
 
static void lazy_vacuum_heap_rel (LVRelState *vacrel)
 
static void lazy_vacuum_heap_page (LVRelState *vacrel, BlockNumber blkno, Buffer buffer, OffsetNumber *deadoffsets, int num_offsets, Buffer vmbuffer)
 
static bool lazy_check_wraparound_failsafe (LVRelState *vacrel)
 
static void lazy_cleanup_all_indexes (LVRelState *vacrel)
 
static IndexBulkDeleteResultlazy_vacuum_one_index (Relation indrel, IndexBulkDeleteResult *istat, double reltuples, LVRelState *vacrel)
 
static IndexBulkDeleteResultlazy_cleanup_one_index (Relation indrel, IndexBulkDeleteResult *istat, double reltuples, bool estimated_count, LVRelState *vacrel)
 
static bool should_attempt_truncation (LVRelState *vacrel)
 
static void lazy_truncate_heap (LVRelState *vacrel)
 
static BlockNumber count_nondeletable_pages (LVRelState *vacrel, bool *lock_waiter_detected)
 
static void dead_items_alloc (LVRelState *vacrel, int nworkers)
 
static void dead_items_add (LVRelState *vacrel, BlockNumber blkno, OffsetNumber *offsets, int num_offsets)
 
static void dead_items_reset (LVRelState *vacrel)
 
static void dead_items_cleanup (LVRelState *vacrel)
 
static bool heap_page_is_all_visible (LVRelState *vacrel, Buffer buf, TransactionId *visibility_cutoff_xid, bool *all_frozen)
 
static void update_relstats_all_indexes (LVRelState *vacrel)
 
static void vacuum_error_callback (void *arg)
 
static void update_vacuum_error_info (LVRelState *vacrel, LVSavedErrInfo *saved_vacrel, int phase, BlockNumber blkno, OffsetNumber offnum)
 
static void restore_vacuum_error_info (LVRelState *vacrel, const LVSavedErrInfo *saved_vacrel)
 
void heap_vacuum_rel (Relation rel, const VacuumParams params, BufferAccessStrategy bstrategy)
 
static int cmpOffsetNumbers (const void *a, const void *b)
 
static BlockNumber vacuum_reap_lp_read_stream_next (ReadStream *stream, void *callback_private_data, void *per_buffer_data)
 

Macro Definition Documentation

◆ BYPASS_THRESHOLD_PAGES

#define BYPASS_THRESHOLD_PAGES   0.02 /* i.e. 2% of rel_pages */

Definition at line 186 of file vacuumlazy.c.

◆ EAGER_SCAN_REGION_SIZE

#define EAGER_SCAN_REGION_SIZE   4096

Definition at line 249 of file vacuumlazy.c.

◆ FAILSAFE_EVERY_PAGES

#define FAILSAFE_EVERY_PAGES    ((BlockNumber) (((uint64) 4 * 1024 * 1024 * 1024) / BLCKSZ))

Definition at line 192 of file vacuumlazy.c.

◆ MAX_EAGER_FREEZE_SUCCESS_RATE

#define MAX_EAGER_FREEZE_SUCCESS_RATE   0.2

Definition at line 240 of file vacuumlazy.c.

◆ ParallelVacuumIsActive

#define ParallelVacuumIsActive (   vacrel)    ((vacrel)->pvs != NULL)

Definition at line 220 of file vacuumlazy.c.

◆ PREFETCH_SIZE

#define PREFETCH_SIZE   ((BlockNumber) 32)

Definition at line 214 of file vacuumlazy.c.

◆ REL_TRUNCATE_FRACTION

#define REL_TRUNCATE_FRACTION   16

Definition at line 169 of file vacuumlazy.c.

◆ REL_TRUNCATE_MINIMUM

#define REL_TRUNCATE_MINIMUM   1000

Definition at line 168 of file vacuumlazy.c.

◆ SKIP_PAGES_THRESHOLD

#define SKIP_PAGES_THRESHOLD   ((BlockNumber) 32)

Definition at line 208 of file vacuumlazy.c.

◆ VAC_BLK_ALL_VISIBLE_ACCORDING_TO_VM

#define VAC_BLK_ALL_VISIBLE_ACCORDING_TO_VM   (1 << 1)

Definition at line 256 of file vacuumlazy.c.

◆ VAC_BLK_WAS_EAGER_SCANNED

#define VAC_BLK_WAS_EAGER_SCANNED   (1 << 0)

Definition at line 255 of file vacuumlazy.c.

◆ VACUUM_FSM_EVERY_PAGES

#define VACUUM_FSM_EVERY_PAGES    ((BlockNumber) (((uint64) 8 * 1024 * 1024 * 1024) / BLCKSZ))

Definition at line 201 of file vacuumlazy.c.

◆ VACUUM_TRUNCATE_LOCK_CHECK_INTERVAL

#define VACUUM_TRUNCATE_LOCK_CHECK_INTERVAL   20 /* ms */

Definition at line 178 of file vacuumlazy.c.

◆ VACUUM_TRUNCATE_LOCK_TIMEOUT

#define VACUUM_TRUNCATE_LOCK_TIMEOUT   5000 /* ms */

Definition at line 180 of file vacuumlazy.c.

◆ VACUUM_TRUNCATE_LOCK_WAIT_INTERVAL

#define VACUUM_TRUNCATE_LOCK_WAIT_INTERVAL   50 /* ms */

Definition at line 179 of file vacuumlazy.c.

Typedef Documentation

◆ LVRelState

typedef struct LVRelState LVRelState

◆ LVSavedErrInfo

Enumeration Type Documentation

◆ VacErrPhase

Enumerator
VACUUM_ERRCB_PHASE_UNKNOWN 
VACUUM_ERRCB_PHASE_SCAN_HEAP 
VACUUM_ERRCB_PHASE_VACUUM_INDEX 
VACUUM_ERRCB_PHASE_VACUUM_HEAP 
VACUUM_ERRCB_PHASE_INDEX_CLEANUP 
VACUUM_ERRCB_PHASE_TRUNCATE 

Definition at line 223 of file vacuumlazy.c.

224{
VacErrPhase
Definition: vacuumlazy.c:224
@ VACUUM_ERRCB_PHASE_SCAN_HEAP
Definition: vacuumlazy.c:226
@ VACUUM_ERRCB_PHASE_VACUUM_INDEX
Definition: vacuumlazy.c:227
@ VACUUM_ERRCB_PHASE_TRUNCATE
Definition: vacuumlazy.c:230
@ VACUUM_ERRCB_PHASE_INDEX_CLEANUP
Definition: vacuumlazy.c:229
@ VACUUM_ERRCB_PHASE_VACUUM_HEAP
Definition: vacuumlazy.c:228
@ VACUUM_ERRCB_PHASE_UNKNOWN
Definition: vacuumlazy.c:225

Function Documentation

◆ cmpOffsetNumbers()

static int cmpOffsetNumbers ( const void *  a,
const void *  b 
)
static

Definition at line 1918 of file vacuumlazy.c.

1919{
1920 return pg_cmp_u16(*(const OffsetNumber *) a, *(const OffsetNumber *) b);
1921}
static int pg_cmp_u16(uint16 a, uint16 b)
Definition: int.h:640
int b
Definition: isn.c:74
int a
Definition: isn.c:73
uint16 OffsetNumber
Definition: off.h:24

References a, b, and pg_cmp_u16().

Referenced by lazy_scan_prune().

◆ count_nondeletable_pages()

static BlockNumber count_nondeletable_pages ( LVRelState vacrel,
bool *  lock_waiter_detected 
)
static

Definition at line 3336 of file vacuumlazy.c.

3337{
3338 BlockNumber blkno;
3339 BlockNumber prefetchedUntil;
3340 instr_time starttime;
3341
3342 /* Initialize the starttime if we check for conflicting lock requests */
3343 INSTR_TIME_SET_CURRENT(starttime);
3344
3345 /*
3346 * Start checking blocks at what we believe relation end to be and move
3347 * backwards. (Strange coding of loop control is needed because blkno is
3348 * unsigned.) To make the scan faster, we prefetch a few blocks at a time
3349 * in forward direction, so that OS-level readahead can kick in.
3350 */
3351 blkno = vacrel->rel_pages;
3353 "prefetch size must be power of 2");
3354 prefetchedUntil = InvalidBlockNumber;
3355 while (blkno > vacrel->nonempty_pages)
3356 {
3357 Buffer buf;
3358 Page page;
3359 OffsetNumber offnum,
3360 maxoff;
3361 bool hastup;
3362
3363 /*
3364 * Check if another process requests a lock on our relation. We are
3365 * holding an AccessExclusiveLock here, so they will be waiting. We
3366 * only do this once per VACUUM_TRUNCATE_LOCK_CHECK_INTERVAL, and we
3367 * only check if that interval has elapsed once every 32 blocks to
3368 * keep the number of system calls and actual shared lock table
3369 * lookups to a minimum.
3370 */
3371 if ((blkno % 32) == 0)
3372 {
3373 instr_time currenttime;
3374 instr_time elapsed;
3375
3376 INSTR_TIME_SET_CURRENT(currenttime);
3377 elapsed = currenttime;
3378 INSTR_TIME_SUBTRACT(elapsed, starttime);
3379 if ((INSTR_TIME_GET_MICROSEC(elapsed) / 1000)
3381 {
3383 {
3384 ereport(vacrel->verbose ? INFO : DEBUG2,
3385 (errmsg("table \"%s\": suspending truncate due to conflicting lock request",
3386 vacrel->relname)));
3387
3388 *lock_waiter_detected = true;
3389 return blkno;
3390 }
3391 starttime = currenttime;
3392 }
3393 }
3394
3395 /*
3396 * We don't insert a vacuum delay point here, because we have an
3397 * exclusive lock on the table which we want to hold for as short a
3398 * time as possible. We still need to check for interrupts however.
3399 */
3401
3402 blkno--;
3403
3404 /* If we haven't prefetched this lot yet, do so now. */
3405 if (prefetchedUntil > blkno)
3406 {
3407 BlockNumber prefetchStart;
3408 BlockNumber pblkno;
3409
3410 prefetchStart = blkno & ~(PREFETCH_SIZE - 1);
3411 for (pblkno = prefetchStart; pblkno <= blkno; pblkno++)
3412 {
3413 PrefetchBuffer(vacrel->rel, MAIN_FORKNUM, pblkno);
3415 }
3416 prefetchedUntil = prefetchStart;
3417 }
3418
3420 vacrel->bstrategy);
3421
3422 /* In this phase we only need shared access to the buffer */
3424
3425 page = BufferGetPage(buf);
3426
3427 if (PageIsNew(page) || PageIsEmpty(page))
3428 {
3430 continue;
3431 }
3432
3433 hastup = false;
3434 maxoff = PageGetMaxOffsetNumber(page);
3435 for (offnum = FirstOffsetNumber;
3436 offnum <= maxoff;
3437 offnum = OffsetNumberNext(offnum))
3438 {
3439 ItemId itemid;
3440
3441 itemid = PageGetItemId(page, offnum);
3442
3443 /*
3444 * Note: any non-unused item should be taken as a reason to keep
3445 * this page. Even an LP_DEAD item makes truncation unsafe, since
3446 * we must not have cleaned out its index entries.
3447 */
3448 if (ItemIdIsUsed(itemid))
3449 {
3450 hastup = true;
3451 break; /* can stop scanning */
3452 }
3453 } /* scan along page */
3454
3456
3457 /* Done scanning if we found a tuple here */
3458 if (hastup)
3459 return blkno + 1;
3460 }
3461
3462 /*
3463 * If we fall out of the loop, all the previously-thought-to-be-empty
3464 * pages still are; we need not bother to look at the last known-nonempty
3465 * page.
3466 */
3467 return vacrel->nonempty_pages;
3468}
uint32 BlockNumber
Definition: block.h:31
#define InvalidBlockNumber
Definition: block.h:33
int Buffer
Definition: buf.h:23
PrefetchBufferResult PrefetchBuffer(Relation reln, ForkNumber forkNum, BlockNumber blockNum)
Definition: bufmgr.c:651
void UnlockReleaseBuffer(Buffer buffer)
Definition: bufmgr.c:5355
void LockBuffer(Buffer buffer, int mode)
Definition: bufmgr.c:5572
Buffer ReadBufferExtended(Relation reln, ForkNumber forkNum, BlockNumber blockNum, ReadBufferMode mode, BufferAccessStrategy strategy)
Definition: bufmgr.c:805
#define BUFFER_LOCK_SHARE
Definition: bufmgr.h:197
static Page BufferGetPage(Buffer buffer)
Definition: bufmgr.h:417
@ RBM_NORMAL
Definition: bufmgr.h:46
static bool PageIsEmpty(const PageData *page)
Definition: bufpage.h:224
static bool PageIsNew(const PageData *page)
Definition: bufpage.h:234
static ItemId PageGetItemId(Page page, OffsetNumber offsetNumber)
Definition: bufpage.h:244
PageData * Page
Definition: bufpage.h:82
static OffsetNumber PageGetMaxOffsetNumber(const PageData *page)
Definition: bufpage.h:372
#define StaticAssertStmt(condition, errmessage)
Definition: c.h:937
int errmsg(const char *fmt,...)
Definition: elog.c:1071
#define DEBUG2
Definition: elog.h:29
#define INFO
Definition: elog.h:34
#define ereport(elevel,...)
Definition: elog.h:150
#define INSTR_TIME_SET_CURRENT(t)
Definition: instr_time.h:122
#define INSTR_TIME_SUBTRACT(x, y)
Definition: instr_time.h:181
#define INSTR_TIME_GET_MICROSEC(t)
Definition: instr_time.h:194
#define ItemIdIsUsed(itemId)
Definition: itemid.h:92
bool LockHasWaitersRelation(Relation relation, LOCKMODE lockmode)
Definition: lmgr.c:367
#define AccessExclusiveLock
Definition: lockdefs.h:43
#define CHECK_FOR_INTERRUPTS()
Definition: miscadmin.h:122
#define OffsetNumberNext(offsetNumber)
Definition: off.h:52
#define FirstOffsetNumber
Definition: off.h:27
static char * buf
Definition: pg_test_fsync.c:72
@ MAIN_FORKNUM
Definition: relpath.h:58
bool verbose
Definition: vacuumlazy.c:297
BlockNumber nonempty_pages
Definition: vacuumlazy.c:340
Relation rel
Definition: vacuumlazy.c:261
BlockNumber rel_pages
Definition: vacuumlazy.c:312
BufferAccessStrategy bstrategy
Definition: vacuumlazy.c:266
char * relname
Definition: vacuumlazy.c:292
#define PREFETCH_SIZE
Definition: vacuumlazy.c:214
#define VACUUM_TRUNCATE_LOCK_CHECK_INTERVAL
Definition: vacuumlazy.c:178

References AccessExclusiveLock, LVRelState::bstrategy, buf, BUFFER_LOCK_SHARE, BufferGetPage(), CHECK_FOR_INTERRUPTS, DEBUG2, ereport, errmsg(), FirstOffsetNumber, INFO, INSTR_TIME_GET_MICROSEC, INSTR_TIME_SET_CURRENT, INSTR_TIME_SUBTRACT, InvalidBlockNumber, ItemIdIsUsed, LockBuffer(), LockHasWaitersRelation(), MAIN_FORKNUM, LVRelState::nonempty_pages, OffsetNumberNext, PageGetItemId(), PageGetMaxOffsetNumber(), PageIsEmpty(), PageIsNew(), PREFETCH_SIZE, PrefetchBuffer(), RBM_NORMAL, ReadBufferExtended(), LVRelState::rel, LVRelState::rel_pages, LVRelState::relname, StaticAssertStmt, UnlockReleaseBuffer(), VACUUM_TRUNCATE_LOCK_CHECK_INTERVAL, and LVRelState::verbose.

Referenced by lazy_truncate_heap().

◆ dead_items_add()

static void dead_items_add ( LVRelState vacrel,
BlockNumber  blkno,
OffsetNumber offsets,
int  num_offsets 
)
static

Definition at line 3543 of file vacuumlazy.c.

3545{
3546 const int prog_index[2] = {
3549 };
3550 int64 prog_val[2];
3551
3552 TidStoreSetBlockOffsets(vacrel->dead_items, blkno, offsets, num_offsets);
3553 vacrel->dead_items_info->num_items += num_offsets;
3554
3555 /* update the progress information */
3556 prog_val[0] = vacrel->dead_items_info->num_items;
3557 prog_val[1] = TidStoreMemoryUsage(vacrel->dead_items);
3558 pgstat_progress_update_multi_param(2, prog_index, prog_val);
3559}
void pgstat_progress_update_multi_param(int nparam, const int *index, const int64 *val)
int64_t int64
Definition: c.h:535
#define PROGRESS_VACUUM_DEAD_TUPLE_BYTES
Definition: progress.h:27
#define PROGRESS_VACUUM_NUM_DEAD_ITEM_IDS
Definition: progress.h:28
VacDeadItemsInfo * dead_items_info
Definition: vacuumlazy.c:310
TidStore * dead_items
Definition: vacuumlazy.c:309
int64 num_items
Definition: vacuum.h:295
void TidStoreSetBlockOffsets(TidStore *ts, BlockNumber blkno, OffsetNumber *offsets, int num_offsets)
Definition: tidstore.c:345
size_t TidStoreMemoryUsage(TidStore *ts)
Definition: tidstore.c:532

References LVRelState::dead_items, LVRelState::dead_items_info, VacDeadItemsInfo::num_items, pgstat_progress_update_multi_param(), PROGRESS_VACUUM_DEAD_TUPLE_BYTES, PROGRESS_VACUUM_NUM_DEAD_ITEM_IDS, TidStoreMemoryUsage(), and TidStoreSetBlockOffsets().

Referenced by lazy_scan_noprune(), and lazy_scan_prune().

◆ dead_items_alloc()

static void dead_items_alloc ( LVRelState vacrel,
int  nworkers 
)
static

Definition at line 3478 of file vacuumlazy.c.

3479{
3480 VacDeadItemsInfo *dead_items_info;
3481 int vac_work_mem = AmAutoVacuumWorkerProcess() &&
3482 autovacuum_work_mem != -1 ?
3484
3485 /*
3486 * Initialize state for a parallel vacuum. As of now, only one worker can
3487 * be used for an index, so we invoke parallelism only if there are at
3488 * least two indexes on a table.
3489 */
3490 if (nworkers >= 0 && vacrel->nindexes > 1 && vacrel->do_index_vacuuming)
3491 {
3492 /*
3493 * Since parallel workers cannot access data in temporary tables, we
3494 * can't perform parallel vacuum on them.
3495 */
3496 if (RelationUsesLocalBuffers(vacrel->rel))
3497 {
3498 /*
3499 * Give warning only if the user explicitly tries to perform a
3500 * parallel vacuum on the temporary table.
3501 */
3502 if (nworkers > 0)
3504 (errmsg("disabling parallel option of vacuum on \"%s\" --- cannot vacuum temporary tables in parallel",
3505 vacrel->relname)));
3506 }
3507 else
3508 vacrel->pvs = parallel_vacuum_init(vacrel->rel, vacrel->indrels,
3509 vacrel->nindexes, nworkers,
3510 vac_work_mem,
3511 vacrel->verbose ? INFO : DEBUG2,
3512 vacrel->bstrategy);
3513
3514 /*
3515 * If parallel mode started, dead_items and dead_items_info spaces are
3516 * allocated in DSM.
3517 */
3518 if (ParallelVacuumIsActive(vacrel))
3519 {
3521 &vacrel->dead_items_info);
3522 return;
3523 }
3524 }
3525
3526 /*
3527 * Serial VACUUM case. Allocate both dead_items and dead_items_info
3528 * locally.
3529 */
3530
3531 dead_items_info = (VacDeadItemsInfo *) palloc(sizeof(VacDeadItemsInfo));
3532 dead_items_info->max_bytes = vac_work_mem * (Size) 1024;
3533 dead_items_info->num_items = 0;
3534 vacrel->dead_items_info = dead_items_info;
3535
3536 vacrel->dead_items = TidStoreCreateLocal(dead_items_info->max_bytes, true);
3537}
int autovacuum_work_mem
Definition: autovacuum.c:120
size_t Size
Definition: c.h:610
#define WARNING
Definition: elog.h:36
int maintenance_work_mem
Definition: globals.c:133
void * palloc(Size size)
Definition: mcxt.c:1365
#define AmAutoVacuumWorkerProcess()
Definition: miscadmin.h:382
#define RelationUsesLocalBuffers(relation)
Definition: rel.h:646
ParallelVacuumState * pvs
Definition: vacuumlazy.c:267
int nindexes
Definition: vacuumlazy.c:263
Relation * indrels
Definition: vacuumlazy.c:262
bool do_index_vacuuming
Definition: vacuumlazy.c:277
size_t max_bytes
Definition: vacuum.h:294
TidStore * TidStoreCreateLocal(size_t max_bytes, bool insert_only)
Definition: tidstore.c:162
#define ParallelVacuumIsActive(vacrel)
Definition: vacuumlazy.c:220
TidStore * parallel_vacuum_get_dead_items(ParallelVacuumState *pvs, VacDeadItemsInfo **dead_items_info_p)
ParallelVacuumState * parallel_vacuum_init(Relation rel, Relation *indrels, int nindexes, int nrequested_workers, int vac_work_mem, int elevel, BufferAccessStrategy bstrategy)

References AmAutoVacuumWorkerProcess, autovacuum_work_mem, LVRelState::bstrategy, LVRelState::dead_items, LVRelState::dead_items_info, DEBUG2, LVRelState::do_index_vacuuming, ereport, errmsg(), LVRelState::indrels, INFO, maintenance_work_mem, VacDeadItemsInfo::max_bytes, LVRelState::nindexes, VacDeadItemsInfo::num_items, palloc(), parallel_vacuum_get_dead_items(), parallel_vacuum_init(), ParallelVacuumIsActive, LVRelState::pvs, LVRelState::rel, RelationUsesLocalBuffers, LVRelState::relname, TidStoreCreateLocal(), LVRelState::verbose, and WARNING.

Referenced by heap_vacuum_rel().

◆ dead_items_cleanup()

static void dead_items_cleanup ( LVRelState vacrel)
static

Definition at line 3585 of file vacuumlazy.c.

3586{
3587 if (!ParallelVacuumIsActive(vacrel))
3588 {
3589 /* Don't bother with pfree here */
3590 return;
3591 }
3592
3593 /* End parallel mode */
3594 parallel_vacuum_end(vacrel->pvs, vacrel->indstats);
3595 vacrel->pvs = NULL;
3596}
IndexBulkDeleteResult ** indstats
Definition: vacuumlazy.c:346
void parallel_vacuum_end(ParallelVacuumState *pvs, IndexBulkDeleteResult **istats)

References LVRelState::indstats, parallel_vacuum_end(), ParallelVacuumIsActive, and LVRelState::pvs.

Referenced by heap_vacuum_rel().

◆ dead_items_reset()

static void dead_items_reset ( LVRelState vacrel)
static

Definition at line 3565 of file vacuumlazy.c.

3566{
3567 if (ParallelVacuumIsActive(vacrel))
3568 {
3570 return;
3571 }
3572
3573 /* Recreate the tidstore with the same max_bytes limitation */
3574 TidStoreDestroy(vacrel->dead_items);
3575 vacrel->dead_items = TidStoreCreateLocal(vacrel->dead_items_info->max_bytes, true);
3576
3577 /* Reset the counter */
3578 vacrel->dead_items_info->num_items = 0;
3579}
void TidStoreDestroy(TidStore *ts)
Definition: tidstore.c:317
void parallel_vacuum_reset_dead_items(ParallelVacuumState *pvs)

References LVRelState::dead_items, LVRelState::dead_items_info, VacDeadItemsInfo::max_bytes, VacDeadItemsInfo::num_items, parallel_vacuum_reset_dead_items(), ParallelVacuumIsActive, LVRelState::pvs, TidStoreCreateLocal(), and TidStoreDestroy().

Referenced by lazy_vacuum().

◆ find_next_unskippable_block()

static void find_next_unskippable_block ( LVRelState vacrel,
bool *  skipsallvis 
)
static

Definition at line 1676 of file vacuumlazy.c.

1677{
1678 BlockNumber rel_pages = vacrel->rel_pages;
1679 BlockNumber next_unskippable_block = vacrel->next_unskippable_block + 1;
1680 Buffer next_unskippable_vmbuffer = vacrel->next_unskippable_vmbuffer;
1681 bool next_unskippable_eager_scanned = false;
1682 bool next_unskippable_allvis;
1683
1684 *skipsallvis = false;
1685
1686 for (;; next_unskippable_block++)
1687 {
1688 uint8 mapbits = visibilitymap_get_status(vacrel->rel,
1689 next_unskippable_block,
1690 &next_unskippable_vmbuffer);
1691
1692 next_unskippable_allvis = (mapbits & VISIBILITYMAP_ALL_VISIBLE) != 0;
1693
1694 /*
1695 * At the start of each eager scan region, normal vacuums with eager
1696 * scanning enabled reset the failure counter, allowing vacuum to
1697 * resume eager scanning if it had been suspended in the previous
1698 * region.
1699 */
1700 if (next_unskippable_block >= vacrel->next_eager_scan_region_start)
1701 {
1705 }
1706
1707 /*
1708 * A block is unskippable if it is not all visible according to the
1709 * visibility map.
1710 */
1711 if (!next_unskippable_allvis)
1712 {
1713 Assert((mapbits & VISIBILITYMAP_ALL_FROZEN) == 0);
1714 break;
1715 }
1716
1717 /*
1718 * Caller must scan the last page to determine whether it has tuples
1719 * (caller must have the opportunity to set vacrel->nonempty_pages).
1720 * This rule avoids having lazy_truncate_heap() take access-exclusive
1721 * lock on rel to attempt a truncation that fails anyway, just because
1722 * there are tuples on the last page (it is likely that there will be
1723 * tuples on other nearby pages as well, but those can be skipped).
1724 *
1725 * Implement this by always treating the last block as unsafe to skip.
1726 */
1727 if (next_unskippable_block == rel_pages - 1)
1728 break;
1729
1730 /* DISABLE_PAGE_SKIPPING makes all skipping unsafe */
1731 if (!vacrel->skipwithvm)
1732 break;
1733
1734 /*
1735 * All-frozen pages cannot contain XIDs < OldestXmin (XIDs that aren't
1736 * already frozen by now), so this page can be skipped.
1737 */
1738 if ((mapbits & VISIBILITYMAP_ALL_FROZEN) != 0)
1739 continue;
1740
1741 /*
1742 * Aggressive vacuums cannot skip any all-visible pages that are not
1743 * also all-frozen.
1744 */
1745 if (vacrel->aggressive)
1746 break;
1747
1748 /*
1749 * Normal vacuums with eager scanning enabled only skip all-visible
1750 * but not all-frozen pages if they have hit the failure limit for the
1751 * current eager scan region.
1752 */
1753 if (vacrel->eager_scan_remaining_fails > 0)
1754 {
1755 next_unskippable_eager_scanned = true;
1756 break;
1757 }
1758
1759 /*
1760 * All-visible blocks are safe to skip in a normal vacuum. But
1761 * remember that the final range contains such a block for later.
1762 */
1763 *skipsallvis = true;
1764 }
1765
1766 /* write the local variables back to vacrel */
1767 vacrel->next_unskippable_block = next_unskippable_block;
1768 vacrel->next_unskippable_allvis = next_unskippable_allvis;
1769 vacrel->next_unskippable_eager_scanned = next_unskippable_eager_scanned;
1770 vacrel->next_unskippable_vmbuffer = next_unskippable_vmbuffer;
1771}
uint8_t uint8
Definition: c.h:536
Assert(PointerIsAligned(start, uint64))
BlockNumber next_eager_scan_region_start
Definition: vacuumlazy.c:377
bool next_unskippable_eager_scanned
Definition: vacuumlazy.c:362
Buffer next_unskippable_vmbuffer
Definition: vacuumlazy.c:363
BlockNumber eager_scan_remaining_fails
Definition: vacuumlazy.c:409
bool aggressive
Definition: vacuumlazy.c:270
BlockNumber next_unskippable_block
Definition: vacuumlazy.c:360
bool skipwithvm
Definition: vacuumlazy.c:272
bool next_unskippable_allvis
Definition: vacuumlazy.c:361
BlockNumber eager_scan_max_fails_per_region
Definition: vacuumlazy.c:399
#define EAGER_SCAN_REGION_SIZE
Definition: vacuumlazy.c:249
uint8 visibilitymap_get_status(Relation rel, BlockNumber heapBlk, Buffer *vmbuf)
#define VISIBILITYMAP_ALL_FROZEN
#define VISIBILITYMAP_ALL_VISIBLE

References LVRelState::aggressive, Assert(), LVRelState::eager_scan_max_fails_per_region, EAGER_SCAN_REGION_SIZE, LVRelState::eager_scan_remaining_fails, LVRelState::next_eager_scan_region_start, LVRelState::next_unskippable_allvis, LVRelState::next_unskippable_block, LVRelState::next_unskippable_eager_scanned, LVRelState::next_unskippable_vmbuffer, LVRelState::rel, LVRelState::rel_pages, LVRelState::skipwithvm, VISIBILITYMAP_ALL_FROZEN, VISIBILITYMAP_ALL_VISIBLE, and visibilitymap_get_status().

Referenced by heap_vac_scan_next_block().

◆ heap_page_is_all_visible()

static bool heap_page_is_all_visible ( LVRelState vacrel,
Buffer  buf,
TransactionId visibility_cutoff_xid,
bool *  all_frozen 
)
static

Definition at line 3610 of file vacuumlazy.c.

3613{
3614 Page page = BufferGetPage(buf);
3616 OffsetNumber offnum,
3617 maxoff;
3618 bool all_visible = true;
3619
3620 *visibility_cutoff_xid = InvalidTransactionId;
3621 *all_frozen = true;
3622
3623 maxoff = PageGetMaxOffsetNumber(page);
3624 for (offnum = FirstOffsetNumber;
3625 offnum <= maxoff && all_visible;
3626 offnum = OffsetNumberNext(offnum))
3627 {
3628 ItemId itemid;
3629 HeapTupleData tuple;
3630
3631 /*
3632 * Set the offset number so that we can display it along with any
3633 * error that occurred while processing this tuple.
3634 */
3635 vacrel->offnum = offnum;
3636 itemid = PageGetItemId(page, offnum);
3637
3638 /* Unused or redirect line pointers are of no interest */
3639 if (!ItemIdIsUsed(itemid) || ItemIdIsRedirected(itemid))
3640 continue;
3641
3642 ItemPointerSet(&(tuple.t_self), blockno, offnum);
3643
3644 /*
3645 * Dead line pointers can have index pointers pointing to them. So
3646 * they can't be treated as visible
3647 */
3648 if (ItemIdIsDead(itemid))
3649 {
3650 all_visible = false;
3651 *all_frozen = false;
3652 break;
3653 }
3654
3655 Assert(ItemIdIsNormal(itemid));
3656
3657 tuple.t_data = (HeapTupleHeader) PageGetItem(page, itemid);
3658 tuple.t_len = ItemIdGetLength(itemid);
3659 tuple.t_tableOid = RelationGetRelid(vacrel->rel);
3660
3661 switch (HeapTupleSatisfiesVacuum(&tuple, vacrel->cutoffs.OldestXmin,
3662 buf))
3663 {
3664 case HEAPTUPLE_LIVE:
3665 {
3666 TransactionId xmin;
3667
3668 /* Check comments in lazy_scan_prune. */
3670 {
3671 all_visible = false;
3672 *all_frozen = false;
3673 break;
3674 }
3675
3676 /*
3677 * The inserter definitely committed. But is it old enough
3678 * that everyone sees it as committed?
3679 */
3680 xmin = HeapTupleHeaderGetXmin(tuple.t_data);
3681 if (!TransactionIdPrecedes(xmin,
3682 vacrel->cutoffs.OldestXmin))
3683 {
3684 all_visible = false;
3685 *all_frozen = false;
3686 break;
3687 }
3688
3689 /* Track newest xmin on page. */
3690 if (TransactionIdFollows(xmin, *visibility_cutoff_xid) &&
3692 *visibility_cutoff_xid = xmin;
3693
3694 /* Check whether this tuple is already frozen or not */
3695 if (all_visible && *all_frozen &&
3697 *all_frozen = false;
3698 }
3699 break;
3700
3701 case HEAPTUPLE_DEAD:
3705 {
3706 all_visible = false;
3707 *all_frozen = false;
3708 break;
3709 }
3710 default:
3711 elog(ERROR, "unexpected HeapTupleSatisfiesVacuum result");
3712 break;
3713 }
3714 } /* scan along page */
3715
3716 /* Clear the offset information once we have processed the given page. */
3717 vacrel->offnum = InvalidOffsetNumber;
3718
3719 return all_visible;
3720}
BlockNumber BufferGetBlockNumber(Buffer buffer)
Definition: bufmgr.c:4198
static Item PageGetItem(const PageData *page, const ItemIdData *itemId)
Definition: bufpage.h:354
uint32 TransactionId
Definition: c.h:657
#define ERROR
Definition: elog.h:39
#define elog(elevel,...)
Definition: elog.h:226
bool heap_tuple_needs_eventual_freeze(HeapTupleHeader tuple)
Definition: heapam.c:7810
@ HEAPTUPLE_RECENTLY_DEAD
Definition: heapam.h:128
@ HEAPTUPLE_INSERT_IN_PROGRESS
Definition: heapam.h:129
@ HEAPTUPLE_LIVE
Definition: heapam.h:127
@ HEAPTUPLE_DELETE_IN_PROGRESS
Definition: heapam.h:130
@ HEAPTUPLE_DEAD
Definition: heapam.h:126
HTSV_Result HeapTupleSatisfiesVacuum(HeapTuple htup, TransactionId OldestXmin, Buffer buffer)
HeapTupleHeaderData * HeapTupleHeader
Definition: htup.h:23
static TransactionId HeapTupleHeaderGetXmin(const HeapTupleHeaderData *tup)
Definition: htup_details.h:324
static bool HeapTupleHeaderXminCommitted(const HeapTupleHeaderData *tup)
Definition: htup_details.h:337
#define ItemIdGetLength(itemId)
Definition: itemid.h:59
#define ItemIdIsNormal(itemId)
Definition: itemid.h:99
#define ItemIdIsDead(itemId)
Definition: itemid.h:113
#define ItemIdIsRedirected(itemId)
Definition: itemid.h:106
static void ItemPointerSet(ItemPointerData *pointer, BlockNumber blockNumber, OffsetNumber offNum)
Definition: itemptr.h:135
#define InvalidOffsetNumber
Definition: off.h:26
#define RelationGetRelid(relation)
Definition: rel.h:514
ItemPointerData t_self
Definition: htup.h:65
uint32 t_len
Definition: htup.h:64
HeapTupleHeader t_data
Definition: htup.h:68
Oid t_tableOid
Definition: htup.h:66
OffsetNumber offnum
Definition: vacuumlazy.c:295
struct VacuumCutoffs cutoffs
Definition: vacuumlazy.c:282
TransactionId OldestXmin
Definition: vacuum.h:274
bool TransactionIdPrecedes(TransactionId id1, TransactionId id2)
Definition: transam.c:280
bool TransactionIdFollows(TransactionId id1, TransactionId id2)
Definition: transam.c:314
#define InvalidTransactionId
Definition: transam.h:31
#define TransactionIdIsNormal(xid)
Definition: transam.h:42

References Assert(), buf, BufferGetBlockNumber(), BufferGetPage(), LVRelState::cutoffs, elog, ERROR, FirstOffsetNumber, heap_tuple_needs_eventual_freeze(), HEAPTUPLE_DEAD, HEAPTUPLE_DELETE_IN_PROGRESS, HEAPTUPLE_INSERT_IN_PROGRESS, HEAPTUPLE_LIVE, HEAPTUPLE_RECENTLY_DEAD, HeapTupleHeaderGetXmin(), HeapTupleHeaderXminCommitted(), HeapTupleSatisfiesVacuum(), InvalidOffsetNumber, InvalidTransactionId, ItemIdGetLength, ItemIdIsDead, ItemIdIsNormal, ItemIdIsRedirected, ItemIdIsUsed, ItemPointerSet(), LVRelState::offnum, OffsetNumberNext, VacuumCutoffs::OldestXmin, PageGetItem(), PageGetItemId(), PageGetMaxOffsetNumber(), LVRelState::rel, RelationGetRelid, HeapTupleData::t_data, HeapTupleData::t_len, HeapTupleData::t_self, HeapTupleData::t_tableOid, TransactionIdFollows(), TransactionIdIsNormal, and TransactionIdPrecedes().

Referenced by lazy_scan_prune(), and lazy_vacuum_heap_page().

◆ heap_vac_scan_next_block()

static BlockNumber heap_vac_scan_next_block ( ReadStream stream,
void *  callback_private_data,
void *  per_buffer_data 
)
static

Definition at line 1571 of file vacuumlazy.c.

1574{
1575 BlockNumber next_block;
1576 LVRelState *vacrel = callback_private_data;
1577 uint8 blk_info = 0;
1578
1579 /* relies on InvalidBlockNumber + 1 overflowing to 0 on first call */
1580 next_block = vacrel->current_block + 1;
1581
1582 /* Have we reached the end of the relation? */
1583 if (next_block >= vacrel->rel_pages)
1584 {
1586 {
1589 }
1590 return InvalidBlockNumber;
1591 }
1592
1593 /*
1594 * We must be in one of the three following states:
1595 */
1596 if (next_block > vacrel->next_unskippable_block ||
1598 {
1599 /*
1600 * 1. We have just processed an unskippable block (or we're at the
1601 * beginning of the scan). Find the next unskippable block using the
1602 * visibility map.
1603 */
1604 bool skipsallvis;
1605
1606 find_next_unskippable_block(vacrel, &skipsallvis);
1607
1608 /*
1609 * We now know the next block that we must process. It can be the
1610 * next block after the one we just processed, or something further
1611 * ahead. If it's further ahead, we can jump to it, but we choose to
1612 * do so only if we can skip at least SKIP_PAGES_THRESHOLD consecutive
1613 * pages. Since we're reading sequentially, the OS should be doing
1614 * readahead for us, so there's no gain in skipping a page now and
1615 * then. Skipping such a range might even discourage sequential
1616 * detection.
1617 *
1618 * This test also enables more frequent relfrozenxid advancement
1619 * during non-aggressive VACUUMs. If the range has any all-visible
1620 * pages then skipping makes updating relfrozenxid unsafe, which is a
1621 * real downside.
1622 */
1623 if (vacrel->next_unskippable_block - next_block >= SKIP_PAGES_THRESHOLD)
1624 {
1625 next_block = vacrel->next_unskippable_block;
1626 if (skipsallvis)
1627 vacrel->skippedallvis = true;
1628 }
1629 }
1630
1631 /* Now we must be in one of the two remaining states: */
1632 if (next_block < vacrel->next_unskippable_block)
1633 {
1634 /*
1635 * 2. We are processing a range of blocks that we could have skipped
1636 * but chose not to. We know that they are all-visible in the VM,
1637 * otherwise they would've been unskippable.
1638 */
1639 vacrel->current_block = next_block;
1641 *((uint8 *) per_buffer_data) = blk_info;
1642 return vacrel->current_block;
1643 }
1644 else
1645 {
1646 /*
1647 * 3. We reached the next unskippable block. Process it. On next
1648 * iteration, we will be back in state 1.
1649 */
1650 Assert(next_block == vacrel->next_unskippable_block);
1651
1652 vacrel->current_block = next_block;
1653 if (vacrel->next_unskippable_allvis)
1656 blk_info |= VAC_BLK_WAS_EAGER_SCANNED;
1657 *((uint8 *) per_buffer_data) = blk_info;
1658 return vacrel->current_block;
1659 }
1660}
#define InvalidBuffer
Definition: buf.h:25
void ReleaseBuffer(Buffer buffer)
Definition: bufmgr.c:5338
static bool BufferIsValid(Buffer bufnum)
Definition: bufmgr.h:368
BlockNumber current_block
Definition: vacuumlazy.c:359
bool skippedallvis
Definition: vacuumlazy.c:287
#define VAC_BLK_WAS_EAGER_SCANNED
Definition: vacuumlazy.c:255
static void find_next_unskippable_block(LVRelState *vacrel, bool *skipsallvis)
Definition: vacuumlazy.c:1676
#define VAC_BLK_ALL_VISIBLE_ACCORDING_TO_VM
Definition: vacuumlazy.c:256
#define SKIP_PAGES_THRESHOLD
Definition: vacuumlazy.c:208

References Assert(), BufferIsValid(), LVRelState::current_block, find_next_unskippable_block(), InvalidBlockNumber, InvalidBuffer, LVRelState::next_unskippable_allvis, LVRelState::next_unskippable_block, LVRelState::next_unskippable_eager_scanned, LVRelState::next_unskippable_vmbuffer, LVRelState::rel_pages, ReleaseBuffer(), SKIP_PAGES_THRESHOLD, LVRelState::skippedallvis, VAC_BLK_ALL_VISIBLE_ACCORDING_TO_VM, and VAC_BLK_WAS_EAGER_SCANNED.

Referenced by lazy_scan_heap().

◆ heap_vacuum_eager_scan_setup()

static void heap_vacuum_eager_scan_setup ( LVRelState vacrel,
const VacuumParams  params 
)
static

Definition at line 487 of file vacuumlazy.c.

488{
489 uint32 randseed;
490 BlockNumber allvisible;
491 BlockNumber allfrozen;
492 float first_region_ratio;
493 bool oldest_unfrozen_before_cutoff = false;
494
495 /*
496 * Initialize eager scan management fields to their disabled values.
497 * Aggressive vacuums, normal vacuums of small tables, and normal vacuums
498 * of tables without sufficiently old tuples disable eager scanning.
499 */
502 vacrel->eager_scan_remaining_fails = 0;
504
505 /* If eager scanning is explicitly disabled, just return. */
506 if (params.max_eager_freeze_failure_rate == 0)
507 return;
508
509 /*
510 * The caller will have determined whether or not an aggressive vacuum is
511 * required by either the vacuum parameters or the relative age of the
512 * oldest unfrozen transaction IDs. An aggressive vacuum must scan every
513 * all-visible page to safely advance the relfrozenxid and/or relminmxid,
514 * so scans of all-visible pages are not considered eager.
515 */
516 if (vacrel->aggressive)
517 return;
518
519 /*
520 * Aggressively vacuuming a small relation shouldn't take long, so it
521 * isn't worth amortizing. We use two times the region size as the size
522 * cutoff because the eager scan start block is a random spot somewhere in
523 * the first region, making the second region the first to be eager
524 * scanned normally.
525 */
526 if (vacrel->rel_pages < 2 * EAGER_SCAN_REGION_SIZE)
527 return;
528
529 /*
530 * We only want to enable eager scanning if we are likely to be able to
531 * freeze some of the pages in the relation.
532 *
533 * Tuples with XIDs older than OldestXmin or MXIDs older than OldestMxact
534 * are technically freezable, but we won't freeze them unless the criteria
535 * for opportunistic freezing is met. Only tuples with XIDs/MXIDs older
536 * than the FreezeLimit/MultiXactCutoff are frozen in the common case.
537 *
538 * So, as a heuristic, we wait until the FreezeLimit has advanced past the
539 * relfrozenxid or the MultiXactCutoff has advanced past the relminmxid to
540 * enable eager scanning.
541 */
544 vacrel->cutoffs.FreezeLimit))
545 oldest_unfrozen_before_cutoff = true;
546
547 if (!oldest_unfrozen_before_cutoff &&
550 vacrel->cutoffs.MultiXactCutoff))
551 oldest_unfrozen_before_cutoff = true;
552
553 if (!oldest_unfrozen_before_cutoff)
554 return;
555
556 /* We have met the criteria to eagerly scan some pages. */
557
558 /*
559 * Our success cap is MAX_EAGER_FREEZE_SUCCESS_RATE of the number of
560 * all-visible but not all-frozen blocks in the relation.
561 */
562 visibilitymap_count(vacrel->rel, &allvisible, &allfrozen);
563
566 (allvisible - allfrozen));
567
568 /* If every all-visible page is frozen, eager scanning is disabled. */
569 if (vacrel->eager_scan_remaining_successes == 0)
570 return;
571
572 /*
573 * Now calculate the bounds of the first eager scan region. Its end block
574 * will be a random spot somewhere in the first EAGER_SCAN_REGION_SIZE
575 * blocks. This affects the bounds of all subsequent regions and avoids
576 * eager scanning and failing to freeze the same blocks each vacuum of the
577 * relation.
578 */
580
582
585
589
590 /*
591 * The first region will be smaller than subsequent regions. As such,
592 * adjust the eager freeze failures tolerated for this region.
593 */
594 first_region_ratio = 1 - (float) vacrel->next_eager_scan_region_start /
596
599 first_region_ratio;
600}
uint32_t uint32
Definition: c.h:538
bool MultiXactIdPrecedes(MultiXactId multi1, MultiXactId multi2)
Definition: multixact.c:3265
#define MultiXactIdIsValid(multi)
Definition: multixact.h:29
uint32 pg_prng_uint32(pg_prng_state *state)
Definition: pg_prng.c:227
pg_prng_state pg_global_prng_state
Definition: pg_prng.c:34
BlockNumber eager_scan_remaining_successes
Definition: vacuumlazy.c:388
TransactionId FreezeLimit
Definition: vacuum.h:284
TransactionId relfrozenxid
Definition: vacuum.h:258
MultiXactId relminmxid
Definition: vacuum.h:259
MultiXactId MultiXactCutoff
Definition: vacuum.h:285
double max_eager_freeze_failure_rate
Definition: vacuum.h:239
#define MAX_EAGER_FREEZE_SUCCESS_RATE
Definition: vacuumlazy.c:240
void visibilitymap_count(Relation rel, BlockNumber *all_visible, BlockNumber *all_frozen)

References LVRelState::aggressive, Assert(), LVRelState::cutoffs, LVRelState::eager_scan_max_fails_per_region, EAGER_SCAN_REGION_SIZE, LVRelState::eager_scan_remaining_fails, LVRelState::eager_scan_remaining_successes, VacuumCutoffs::FreezeLimit, InvalidBlockNumber, VacuumParams::max_eager_freeze_failure_rate, MAX_EAGER_FREEZE_SUCCESS_RATE, VacuumCutoffs::MultiXactCutoff, MultiXactIdIsValid, MultiXactIdPrecedes(), LVRelState::next_eager_scan_region_start, pg_global_prng_state, pg_prng_uint32(), LVRelState::rel, LVRelState::rel_pages, VacuumCutoffs::relfrozenxid, VacuumCutoffs::relminmxid, TransactionIdIsNormal, TransactionIdPrecedes(), and visibilitymap_count().

Referenced by heap_vacuum_rel().

◆ heap_vacuum_rel()

void heap_vacuum_rel ( Relation  rel,
const VacuumParams  params,
BufferAccessStrategy  bstrategy 
)

Definition at line 614 of file vacuumlazy.c.

616{
617 LVRelState *vacrel;
618 bool verbose,
619 instrument,
620 skipwithvm,
621 frozenxid_updated,
622 minmulti_updated;
623 BlockNumber orig_rel_pages,
624 new_rel_pages,
625 new_rel_allvisible,
626 new_rel_allfrozen;
627 PGRUsage ru0;
628 TimestampTz starttime = 0;
629 PgStat_Counter startreadtime = 0,
630 startwritetime = 0;
631 WalUsage startwalusage = pgWalUsage;
632 BufferUsage startbufferusage = pgBufferUsage;
633 ErrorContextCallback errcallback;
634 char **indnames = NULL;
635
636 verbose = (params.options & VACOPT_VERBOSE) != 0;
637 instrument = (verbose || (AmAutoVacuumWorkerProcess() &&
638 params.log_min_duration >= 0));
639 if (instrument)
640 {
641 pg_rusage_init(&ru0);
642 if (track_io_timing)
643 {
644 startreadtime = pgStatBlockReadTime;
645 startwritetime = pgStatBlockWriteTime;
646 }
647 }
648
649 /* Used for instrumentation and stats report */
650 starttime = GetCurrentTimestamp();
651
653 RelationGetRelid(rel));
654
655 /*
656 * Setup error traceback support for ereport() first. The idea is to set
657 * up an error context callback to display additional information on any
658 * error during a vacuum. During different phases of vacuum, we update
659 * the state so that the error context callback always display current
660 * information.
661 *
662 * Copy the names of heap rel into local memory for error reporting
663 * purposes, too. It isn't always safe to assume that we can get the name
664 * of each rel. It's convenient for code in lazy_scan_heap to always use
665 * these temp copies.
666 */
667 vacrel = (LVRelState *) palloc0(sizeof(LVRelState));
671 vacrel->indname = NULL;
673 vacrel->verbose = verbose;
674 errcallback.callback = vacuum_error_callback;
675 errcallback.arg = vacrel;
676 errcallback.previous = error_context_stack;
677 error_context_stack = &errcallback;
678
679 /* Set up high level stuff about rel and its indexes */
680 vacrel->rel = rel;
682 &vacrel->indrels);
683 vacrel->bstrategy = bstrategy;
684 if (instrument && vacrel->nindexes > 0)
685 {
686 /* Copy index names used by instrumentation (not error reporting) */
687 indnames = palloc(sizeof(char *) * vacrel->nindexes);
688 for (int i = 0; i < vacrel->nindexes; i++)
689 indnames[i] = pstrdup(RelationGetRelationName(vacrel->indrels[i]));
690 }
691
692 /*
693 * The index_cleanup param either disables index vacuuming and cleanup or
694 * forces it to go ahead when we would otherwise apply the index bypass
695 * optimization. The default is 'auto', which leaves the final decision
696 * up to lazy_vacuum().
697 *
698 * The truncate param allows user to avoid attempting relation truncation,
699 * though it can't force truncation to happen.
700 */
703 params.truncate != VACOPTVALUE_AUTO);
704
705 /*
706 * While VacuumFailSafeActive is reset to false before calling this, we
707 * still need to reset it here due to recursive calls.
708 */
709 VacuumFailsafeActive = false;
710 vacrel->consider_bypass_optimization = true;
711 vacrel->do_index_vacuuming = true;
712 vacrel->do_index_cleanup = true;
713 vacrel->do_rel_truncate = (params.truncate != VACOPTVALUE_DISABLED);
715 {
716 /* Force disable index vacuuming up-front */
717 vacrel->do_index_vacuuming = false;
718 vacrel->do_index_cleanup = false;
719 }
720 else if (params.index_cleanup == VACOPTVALUE_ENABLED)
721 {
722 /* Force index vacuuming. Note that failsafe can still bypass. */
723 vacrel->consider_bypass_optimization = false;
724 }
725 else
726 {
727 /* Default/auto, make all decisions dynamically */
729 }
730
731 /* Initialize page counters explicitly (be tidy) */
732 vacrel->scanned_pages = 0;
733 vacrel->eager_scanned_pages = 0;
734 vacrel->removed_pages = 0;
735 vacrel->new_frozen_tuple_pages = 0;
736 vacrel->lpdead_item_pages = 0;
737 vacrel->missed_dead_pages = 0;
738 vacrel->nonempty_pages = 0;
739 /* dead_items_alloc allocates vacrel->dead_items later on */
740
741 /* Allocate/initialize output statistics state */
742 vacrel->new_rel_tuples = 0;
743 vacrel->new_live_tuples = 0;
744 vacrel->indstats = (IndexBulkDeleteResult **)
745 palloc0(vacrel->nindexes * sizeof(IndexBulkDeleteResult *));
746
747 /* Initialize remaining counters (be tidy) */
748 vacrel->num_index_scans = 0;
749 vacrel->tuples_deleted = 0;
750 vacrel->tuples_frozen = 0;
751 vacrel->lpdead_items = 0;
752 vacrel->live_tuples = 0;
753 vacrel->recently_dead_tuples = 0;
754 vacrel->missed_dead_tuples = 0;
755
756 vacrel->vm_new_visible_pages = 0;
757 vacrel->vm_new_visible_frozen_pages = 0;
758 vacrel->vm_new_frozen_pages = 0;
759
760 /*
761 * Get cutoffs that determine which deleted tuples are considered DEAD,
762 * not just RECENTLY_DEAD, and which XIDs/MXIDs to freeze. Then determine
763 * the extent of the blocks that we'll scan in lazy_scan_heap. It has to
764 * happen in this order to ensure that the OldestXmin cutoff field works
765 * as an upper bound on the XIDs stored in the pages we'll actually scan
766 * (NewRelfrozenXid tracking must never be allowed to miss unfrozen XIDs).
767 *
768 * Next acquire vistest, a related cutoff that's used in pruning. We use
769 * vistest in combination with OldestXmin to ensure that
770 * heap_page_prune_and_freeze() always removes any deleted tuple whose
771 * xmax is < OldestXmin. lazy_scan_prune must never become confused about
772 * whether a tuple should be frozen or removed. (In the future we might
773 * want to teach lazy_scan_prune to recompute vistest from time to time,
774 * to increase the number of dead tuples it can prune away.)
775 */
776 vacrel->aggressive = vacuum_get_cutoffs(rel, params, &vacrel->cutoffs);
777 vacrel->rel_pages = orig_rel_pages = RelationGetNumberOfBlocks(rel);
778 vacrel->vistest = GlobalVisTestFor(rel);
779
780 /* Initialize state used to track oldest extant XID/MXID */
781 vacrel->NewRelfrozenXid = vacrel->cutoffs.OldestXmin;
782 vacrel->NewRelminMxid = vacrel->cutoffs.OldestMxact;
783
784 /*
785 * Initialize state related to tracking all-visible page skipping. This is
786 * very important to determine whether or not it is safe to advance the
787 * relfrozenxid/relminmxid.
788 */
789 vacrel->skippedallvis = false;
790 skipwithvm = true;
792 {
793 /*
794 * Force aggressive mode, and disable skipping blocks using the
795 * visibility map (even those set all-frozen)
796 */
797 vacrel->aggressive = true;
798 skipwithvm = false;
799 }
800
801 vacrel->skipwithvm = skipwithvm;
802
803 /*
804 * Set up eager scan tracking state. This must happen after determining
805 * whether or not the vacuum must be aggressive, because only normal
806 * vacuums use the eager scan algorithm.
807 */
808 heap_vacuum_eager_scan_setup(vacrel, params);
809
810 if (verbose)
811 {
812 if (vacrel->aggressive)
814 (errmsg("aggressively vacuuming \"%s.%s.%s\"",
815 vacrel->dbname, vacrel->relnamespace,
816 vacrel->relname)));
817 else
819 (errmsg("vacuuming \"%s.%s.%s\"",
820 vacrel->dbname, vacrel->relnamespace,
821 vacrel->relname)));
822 }
823
824 /*
825 * Allocate dead_items memory using dead_items_alloc. This handles
826 * parallel VACUUM initialization as part of allocating shared memory
827 * space used for dead_items. (But do a failsafe precheck first, to
828 * ensure that parallel VACUUM won't be attempted at all when relfrozenxid
829 * is already dangerously old.)
830 */
832 dead_items_alloc(vacrel, params.nworkers);
833
834 /*
835 * Call lazy_scan_heap to perform all required heap pruning, index
836 * vacuuming, and heap vacuuming (plus related processing)
837 */
838 lazy_scan_heap(vacrel);
839
840 /*
841 * Free resources managed by dead_items_alloc. This ends parallel mode in
842 * passing when necessary.
843 */
844 dead_items_cleanup(vacrel);
846
847 /*
848 * Update pg_class entries for each of rel's indexes where appropriate.
849 *
850 * Unlike the later update to rel's pg_class entry, this is not critical.
851 * Maintains relpages/reltuples statistics used by the planner only.
852 */
853 if (vacrel->do_index_cleanup)
855
856 /* Done with rel's indexes */
857 vac_close_indexes(vacrel->nindexes, vacrel->indrels, NoLock);
858
859 /* Optionally truncate rel */
860 if (should_attempt_truncation(vacrel))
861 lazy_truncate_heap(vacrel);
862
863 /* Pop the error context stack */
864 error_context_stack = errcallback.previous;
865
866 /* Report that we are now doing final cleanup */
869
870 /*
871 * Prepare to update rel's pg_class entry.
872 *
873 * Aggressive VACUUMs must always be able to advance relfrozenxid to a
874 * value >= FreezeLimit, and relminmxid to a value >= MultiXactCutoff.
875 * Non-aggressive VACUUMs may advance them by any amount, or not at all.
876 */
877 Assert(vacrel->NewRelfrozenXid == vacrel->cutoffs.OldestXmin ||
879 vacrel->cutoffs.relfrozenxid,
880 vacrel->NewRelfrozenXid));
881 Assert(vacrel->NewRelminMxid == vacrel->cutoffs.OldestMxact ||
883 vacrel->cutoffs.relminmxid,
884 vacrel->NewRelminMxid));
885 if (vacrel->skippedallvis)
886 {
887 /*
888 * Must keep original relfrozenxid in a non-aggressive VACUUM that
889 * chose to skip an all-visible page range. The state that tracks new
890 * values will have missed unfrozen XIDs from the pages we skipped.
891 */
892 Assert(!vacrel->aggressive);
895 }
896
897 /*
898 * For safety, clamp relallvisible to be not more than what we're setting
899 * pg_class.relpages to
900 */
901 new_rel_pages = vacrel->rel_pages; /* After possible rel truncation */
902 visibilitymap_count(rel, &new_rel_allvisible, &new_rel_allfrozen);
903 if (new_rel_allvisible > new_rel_pages)
904 new_rel_allvisible = new_rel_pages;
905
906 /*
907 * An all-frozen block _must_ be all-visible. As such, clamp the count of
908 * all-frozen blocks to the count of all-visible blocks. This matches the
909 * clamping of relallvisible above.
910 */
911 if (new_rel_allfrozen > new_rel_allvisible)
912 new_rel_allfrozen = new_rel_allvisible;
913
914 /*
915 * Now actually update rel's pg_class entry.
916 *
917 * In principle new_live_tuples could be -1 indicating that we (still)
918 * don't know the tuple count. In practice that can't happen, since we
919 * scan every page that isn't skipped using the visibility map.
920 */
921 vac_update_relstats(rel, new_rel_pages, vacrel->new_live_tuples,
922 new_rel_allvisible, new_rel_allfrozen,
923 vacrel->nindexes > 0,
924 vacrel->NewRelfrozenXid, vacrel->NewRelminMxid,
925 &frozenxid_updated, &minmulti_updated, false);
926
927 /*
928 * Report results to the cumulative stats system, too.
929 *
930 * Deliberately avoid telling the stats system about LP_DEAD items that
931 * remain in the table due to VACUUM bypassing index and heap vacuuming.
932 * ANALYZE will consider the remaining LP_DEAD items to be dead "tuples".
933 * It seems like a good idea to err on the side of not vacuuming again too
934 * soon in cases where the failsafe prevented significant amounts of heap
935 * vacuuming.
936 */
938 rel->rd_rel->relisshared,
939 Max(vacrel->new_live_tuples, 0),
940 vacrel->recently_dead_tuples +
941 vacrel->missed_dead_tuples,
942 starttime);
944
945 if (instrument)
946 {
948
949 if (verbose || params.log_min_duration == 0 ||
950 TimestampDifferenceExceeds(starttime, endtime,
951 params.log_min_duration))
952 {
953 long secs_dur;
954 int usecs_dur;
955 WalUsage walusage;
956 BufferUsage bufferusage;
958 char *msgfmt;
959 int32 diff;
960 double read_rate = 0,
961 write_rate = 0;
962 int64 total_blks_hit;
963 int64 total_blks_read;
964 int64 total_blks_dirtied;
965
966 TimestampDifference(starttime, endtime, &secs_dur, &usecs_dur);
967 memset(&walusage, 0, sizeof(WalUsage));
968 WalUsageAccumDiff(&walusage, &pgWalUsage, &startwalusage);
969 memset(&bufferusage, 0, sizeof(BufferUsage));
970 BufferUsageAccumDiff(&bufferusage, &pgBufferUsage, &startbufferusage);
971
972 total_blks_hit = bufferusage.shared_blks_hit +
973 bufferusage.local_blks_hit;
974 total_blks_read = bufferusage.shared_blks_read +
975 bufferusage.local_blks_read;
976 total_blks_dirtied = bufferusage.shared_blks_dirtied +
977 bufferusage.local_blks_dirtied;
978
980 if (verbose)
981 {
982 /*
983 * Aggressiveness already reported earlier, in dedicated
984 * VACUUM VERBOSE ereport
985 */
986 Assert(!params.is_wraparound);
987 msgfmt = _("finished vacuuming \"%s.%s.%s\": index scans: %d\n");
988 }
989 else if (params.is_wraparound)
990 {
991 /*
992 * While it's possible for a VACUUM to be both is_wraparound
993 * and !aggressive, that's just a corner-case -- is_wraparound
994 * implies aggressive. Produce distinct output for the corner
995 * case all the same, just in case.
996 */
997 if (vacrel->aggressive)
998 msgfmt = _("automatic aggressive vacuum to prevent wraparound of table \"%s.%s.%s\": index scans: %d\n");
999 else
1000 msgfmt = _("automatic vacuum to prevent wraparound of table \"%s.%s.%s\": index scans: %d\n");
1001 }
1002 else
1003 {
1004 if (vacrel->aggressive)
1005 msgfmt = _("automatic aggressive vacuum of table \"%s.%s.%s\": index scans: %d\n");
1006 else
1007 msgfmt = _("automatic vacuum of table \"%s.%s.%s\": index scans: %d\n");
1008 }
1009 appendStringInfo(&buf, msgfmt,
1010 vacrel->dbname,
1011 vacrel->relnamespace,
1012 vacrel->relname,
1013 vacrel->num_index_scans);
1014 appendStringInfo(&buf, _("pages: %u removed, %u remain, %u scanned (%.2f%% of total), %u eagerly scanned\n"),
1015 vacrel->removed_pages,
1016 new_rel_pages,
1017 vacrel->scanned_pages,
1018 orig_rel_pages == 0 ? 100.0 :
1019 100.0 * vacrel->scanned_pages /
1020 orig_rel_pages,
1021 vacrel->eager_scanned_pages);
1023 _("tuples: %" PRId64 " removed, %" PRId64 " remain, %" PRId64 " are dead but not yet removable\n"),
1024 vacrel->tuples_deleted,
1025 (int64) vacrel->new_rel_tuples,
1026 vacrel->recently_dead_tuples);
1027 if (vacrel->missed_dead_tuples > 0)
1029 _("tuples missed: %" PRId64 " dead from %u pages not removed due to cleanup lock contention\n"),
1030 vacrel->missed_dead_tuples,
1031 vacrel->missed_dead_pages);
1032 diff = (int32) (ReadNextTransactionId() -
1033 vacrel->cutoffs.OldestXmin);
1035 _("removable cutoff: %u, which was %d XIDs old when operation ended\n"),
1036 vacrel->cutoffs.OldestXmin, diff);
1037 if (frozenxid_updated)
1038 {
1039 diff = (int32) (vacrel->NewRelfrozenXid -
1040 vacrel->cutoffs.relfrozenxid);
1042 _("new relfrozenxid: %u, which is %d XIDs ahead of previous value\n"),
1043 vacrel->NewRelfrozenXid, diff);
1044 }
1045 if (minmulti_updated)
1046 {
1047 diff = (int32) (vacrel->NewRelminMxid -
1048 vacrel->cutoffs.relminmxid);
1050 _("new relminmxid: %u, which is %d MXIDs ahead of previous value\n"),
1051 vacrel->NewRelminMxid, diff);
1052 }
1053 appendStringInfo(&buf, _("frozen: %u pages from table (%.2f%% of total) had %" PRId64 " tuples frozen\n"),
1054 vacrel->new_frozen_tuple_pages,
1055 orig_rel_pages == 0 ? 100.0 :
1056 100.0 * vacrel->new_frozen_tuple_pages /
1057 orig_rel_pages,
1058 vacrel->tuples_frozen);
1059
1061 _("visibility map: %u pages set all-visible, %u pages set all-frozen (%u were all-visible)\n"),
1062 vacrel->vm_new_visible_pages,
1064 vacrel->vm_new_frozen_pages,
1065 vacrel->vm_new_frozen_pages);
1066 if (vacrel->do_index_vacuuming)
1067 {
1068 if (vacrel->nindexes == 0 || vacrel->num_index_scans == 0)
1069 appendStringInfoString(&buf, _("index scan not needed: "));
1070 else
1071 appendStringInfoString(&buf, _("index scan needed: "));
1072
1073 msgfmt = _("%u pages from table (%.2f%% of total) had %" PRId64 " dead item identifiers removed\n");
1074 }
1075 else
1076 {
1078 appendStringInfoString(&buf, _("index scan bypassed: "));
1079 else
1080 appendStringInfoString(&buf, _("index scan bypassed by failsafe: "));
1081
1082 msgfmt = _("%u pages from table (%.2f%% of total) have %" PRId64 " dead item identifiers\n");
1083 }
1084 appendStringInfo(&buf, msgfmt,
1085 vacrel->lpdead_item_pages,
1086 orig_rel_pages == 0 ? 100.0 :
1087 100.0 * vacrel->lpdead_item_pages / orig_rel_pages,
1088 vacrel->lpdead_items);
1089 for (int i = 0; i < vacrel->nindexes; i++)
1090 {
1091 IndexBulkDeleteResult *istat = vacrel->indstats[i];
1092
1093 if (!istat)
1094 continue;
1095
1097 _("index \"%s\": pages: %u in total, %u newly deleted, %u currently deleted, %u reusable\n"),
1098 indnames[i],
1099 istat->num_pages,
1100 istat->pages_newly_deleted,
1101 istat->pages_deleted,
1102 istat->pages_free);
1103 }
1105 {
1106 /*
1107 * We bypass the changecount mechanism because this value is
1108 * only updated by the calling process. We also rely on the
1109 * above call to pgstat_progress_end_command() to not clear
1110 * the st_progress_param array.
1111 */
1112 appendStringInfo(&buf, _("delay time: %.3f ms\n"),
1114 }
1115 if (track_io_timing)
1116 {
1117 double read_ms = (double) (pgStatBlockReadTime - startreadtime) / 1000;
1118 double write_ms = (double) (pgStatBlockWriteTime - startwritetime) / 1000;
1119
1120 appendStringInfo(&buf, _("I/O timings: read: %.3f ms, write: %.3f ms\n"),
1121 read_ms, write_ms);
1122 }
1123 if (secs_dur > 0 || usecs_dur > 0)
1124 {
1125 read_rate = (double) BLCKSZ * total_blks_read /
1126 (1024 * 1024) / (secs_dur + usecs_dur / 1000000.0);
1127 write_rate = (double) BLCKSZ * total_blks_dirtied /
1128 (1024 * 1024) / (secs_dur + usecs_dur / 1000000.0);
1129 }
1130 appendStringInfo(&buf, _("avg read rate: %.3f MB/s, avg write rate: %.3f MB/s\n"),
1131 read_rate, write_rate);
1133 _("buffer usage: %" PRId64 " hits, %" PRId64 " reads, %" PRId64 " dirtied\n"),
1134 total_blks_hit,
1135 total_blks_read,
1136 total_blks_dirtied);
1138 _("WAL usage: %" PRId64 " records, %" PRId64 " full page images, %" PRIu64 " bytes, %" PRId64 " buffers full\n"),
1139 walusage.wal_records,
1140 walusage.wal_fpi,
1141 walusage.wal_bytes,
1142 walusage.wal_buffers_full);
1143 appendStringInfo(&buf, _("system usage: %s"), pg_rusage_show(&ru0));
1144
1145 ereport(verbose ? INFO : LOG,
1146 (errmsg_internal("%s", buf.data)));
1147 pfree(buf.data);
1148 }
1149 }
1150
1151 /* Cleanup index statistics and index names */
1152 for (int i = 0; i < vacrel->nindexes; i++)
1153 {
1154 if (vacrel->indstats[i])
1155 pfree(vacrel->indstats[i]);
1156
1157 if (instrument)
1158 pfree(indnames[i]);
1159 }
1160}
void TimestampDifference(TimestampTz start_time, TimestampTz stop_time, long *secs, int *microsecs)
Definition: timestamp.c:1721
bool TimestampDifferenceExceeds(TimestampTz start_time, TimestampTz stop_time, int msec)
Definition: timestamp.c:1781
TimestampTz GetCurrentTimestamp(void)
Definition: timestamp.c:1645
void pgstat_progress_start_command(ProgressCommandType cmdtype, Oid relid)
void pgstat_progress_update_param(int index, int64 val)
void pgstat_progress_end_command(void)
@ PROGRESS_COMMAND_VACUUM
PgBackendStatus * MyBEEntry
bool track_io_timing
Definition: bufmgr.c:147
#define RelationGetNumberOfBlocks(reln)
Definition: bufmgr.h:283
#define Max(x, y)
Definition: c.h:997
int32_t int32
Definition: c.h:534
int64 TimestampTz
Definition: timestamp.h:39
int errmsg_internal(const char *fmt,...)
Definition: elog.c:1161
ErrorContextCallback * error_context_stack
Definition: elog.c:95
#define _(x)
Definition: elog.c:91
#define LOG
Definition: elog.h:31
Oid MyDatabaseId
Definition: globals.c:94
int verbose
WalUsage pgWalUsage
Definition: instrument.c:22
void WalUsageAccumDiff(WalUsage *dst, const WalUsage *add, const WalUsage *sub)
Definition: instrument.c:287
BufferUsage pgBufferUsage
Definition: instrument.c:20
void BufferUsageAccumDiff(BufferUsage *dst, const BufferUsage *add, const BufferUsage *sub)
Definition: instrument.c:248
int i
Definition: isn.c:77
#define NoLock
Definition: lockdefs.h:34
#define RowExclusiveLock
Definition: lockdefs.h:38
char * get_database_name(Oid dbid)
Definition: lsyscache.c:1259
char * get_namespace_name(Oid nspid)
Definition: lsyscache.c:3533
char * pstrdup(const char *in)
Definition: mcxt.c:1759
void pfree(void *pointer)
Definition: mcxt.c:1594
void * palloc0(Size size)
Definition: mcxt.c:1395
bool MultiXactIdPrecedesOrEquals(MultiXactId multi1, MultiXactId multi2)
Definition: multixact.c:3279
#define InvalidMultiXactId
Definition: multixact.h:25
const char * pg_rusage_show(const PGRUsage *ru0)
Definition: pg_rusage.c:40
void pg_rusage_init(PGRUsage *ru0)
Definition: pg_rusage.c:27
int64 PgStat_Counter
Definition: pgstat.h:66
PgStat_Counter pgStatBlockReadTime
PgStat_Counter pgStatBlockWriteTime
void pgstat_report_vacuum(Oid tableoid, bool shared, PgStat_Counter livetuples, PgStat_Counter deadtuples, TimestampTz starttime)
GlobalVisState * GlobalVisTestFor(Relation rel)
Definition: procarray.c:4069
#define PROGRESS_VACUUM_PHASE_FINAL_CLEANUP
Definition: progress.h:39
#define PROGRESS_VACUUM_PHASE
Definition: progress.h:21
#define PROGRESS_VACUUM_DELAY_TIME
Definition: progress.h:31
#define RelationGetRelationName(relation)
Definition: rel.h:548
#define RelationGetNamespace(relation)
Definition: rel.h:555
void appendStringInfo(StringInfo str, const char *fmt,...)
Definition: stringinfo.c:145
void appendStringInfoString(StringInfo str, const char *s)
Definition: stringinfo.c:230
void initStringInfo(StringInfo str)
Definition: stringinfo.c:97
int64 shared_blks_dirtied
Definition: instrument.h:28
int64 local_blks_hit
Definition: instrument.h:30
int64 shared_blks_read
Definition: instrument.h:27
int64 local_blks_read
Definition: instrument.h:31
int64 local_blks_dirtied
Definition: instrument.h:32
int64 shared_blks_hit
Definition: instrument.h:26
struct ErrorContextCallback * previous
Definition: elog.h:297
void(* callback)(void *arg)
Definition: elog.h:298
BlockNumber pages_deleted
Definition: genam.h:109
BlockNumber pages_newly_deleted
Definition: genam.h:108
BlockNumber pages_free
Definition: genam.h:110
BlockNumber num_pages
Definition: genam.h:104
BlockNumber vm_new_frozen_pages
Definition: vacuumlazy.c:336
int64 tuples_deleted
Definition: vacuumlazy.c:351
bool do_rel_truncate
Definition: vacuumlazy.c:279
BlockNumber scanned_pages
Definition: vacuumlazy.c:313
BlockNumber new_frozen_tuple_pages
Definition: vacuumlazy.c:322
GlobalVisState * vistest
Definition: vacuumlazy.c:283
BlockNumber removed_pages
Definition: vacuumlazy.c:321
int num_index_scans
Definition: vacuumlazy.c:349
double new_live_tuples
Definition: vacuumlazy.c:344
double new_rel_tuples
Definition: vacuumlazy.c:343
TransactionId NewRelfrozenXid
Definition: vacuumlazy.c:285
bool consider_bypass_optimization
Definition: vacuumlazy.c:274
int64 recently_dead_tuples
Definition: vacuumlazy.c:355
int64 tuples_frozen
Definition: vacuumlazy.c:352
char * dbname
Definition: vacuumlazy.c:290
BlockNumber missed_dead_pages
Definition: vacuumlazy.c:339
char * relnamespace
Definition: vacuumlazy.c:291
int64 live_tuples
Definition: vacuumlazy.c:354
int64 lpdead_items
Definition: vacuumlazy.c:353
BlockNumber lpdead_item_pages
Definition: vacuumlazy.c:338
BlockNumber eager_scanned_pages
Definition: vacuumlazy.c:319
bool do_index_cleanup
Definition: vacuumlazy.c:278
MultiXactId NewRelminMxid
Definition: vacuumlazy.c:286
int64 missed_dead_tuples
Definition: vacuumlazy.c:356
BlockNumber vm_new_visible_pages
Definition: vacuumlazy.c:325
VacErrPhase phase
Definition: vacuumlazy.c:296
char * indname
Definition: vacuumlazy.c:293
BlockNumber vm_new_visible_frozen_pages
Definition: vacuumlazy.c:333
int64 st_progress_param[PGSTAT_NUM_PROGRESS_PARAM]
Form_pg_class rd_rel
Definition: rel.h:111
MultiXactId OldestMxact
Definition: vacuum.h:275
int nworkers
Definition: vacuum.h:246
VacOptValue truncate
Definition: vacuum.h:231
bits32 options
Definition: vacuum.h:219
bool is_wraparound
Definition: vacuum.h:226
int log_min_duration
Definition: vacuum.h:227
VacOptValue index_cleanup
Definition: vacuum.h:230
int64 wal_buffers_full
Definition: instrument.h:56
uint64 wal_bytes
Definition: instrument.h:55
int64 wal_fpi
Definition: instrument.h:54
int64 wal_records
Definition: instrument.h:53
bool TransactionIdPrecedesOrEquals(TransactionId id1, TransactionId id2)
Definition: transam.c:299
static TransactionId ReadNextTransactionId(void)
Definition: transam.h:315
bool track_cost_delay_timing
Definition: vacuum.c:81
void vac_open_indexes(Relation relation, LOCKMODE lockmode, int *nindexes, Relation **Irel)
Definition: vacuum.c:2359
void vac_close_indexes(int nindexes, Relation *Irel, LOCKMODE lockmode)
Definition: vacuum.c:2402
bool VacuumFailsafeActive
Definition: vacuum.c:109
void vac_update_relstats(Relation relation, BlockNumber num_pages, double num_tuples, BlockNumber num_all_visible_pages, BlockNumber num_all_frozen_pages, bool hasindex, TransactionId frozenxid, MultiXactId minmulti, bool *frozenxid_updated, bool *minmulti_updated, bool in_outer_xact)
Definition: vacuum.c:1429
bool vacuum_get_cutoffs(Relation rel, const VacuumParams params, struct VacuumCutoffs *cutoffs)
Definition: vacuum.c:1103
#define VACOPT_VERBOSE
Definition: vacuum.h:182
@ VACOPTVALUE_AUTO
Definition: vacuum.h:203
@ VACOPTVALUE_ENABLED
Definition: vacuum.h:205
@ VACOPTVALUE_UNSPECIFIED
Definition: vacuum.h:202
@ VACOPTVALUE_DISABLED
Definition: vacuum.h:204
#define VACOPT_DISABLE_PAGE_SKIPPING
Definition: vacuum.h:188
static void dead_items_cleanup(LVRelState *vacrel)
Definition: vacuumlazy.c:3585
static void update_relstats_all_indexes(LVRelState *vacrel)
Definition: vacuumlazy.c:3726
static void heap_vacuum_eager_scan_setup(LVRelState *vacrel, const VacuumParams params)
Definition: vacuumlazy.c:487
static void vacuum_error_callback(void *arg)
Definition: vacuumlazy.c:3761
static void lazy_truncate_heap(LVRelState *vacrel)
Definition: vacuumlazy.c:3205
static bool should_attempt_truncation(LVRelState *vacrel)
Definition: vacuumlazy.c:3185
static void lazy_scan_heap(LVRelState *vacrel)
Definition: vacuumlazy.c:1199
static bool lazy_check_wraparound_failsafe(LVRelState *vacrel)
Definition: vacuumlazy.c:2955
static void dead_items_alloc(LVRelState *vacrel, int nworkers)
Definition: vacuumlazy.c:3478
bool IsInParallelMode(void)
Definition: xact.c:1089

References _, LVRelState::aggressive, AmAutoVacuumWorkerProcess, appendStringInfo(), appendStringInfoString(), ErrorContextCallback::arg, Assert(), LVRelState::bstrategy, buf, BufferUsageAccumDiff(), ErrorContextCallback::callback, LVRelState::consider_bypass_optimization, LVRelState::cutoffs, LVRelState::dbname, dead_items_alloc(), dead_items_cleanup(), LVRelState::do_index_cleanup, LVRelState::do_index_vacuuming, LVRelState::do_rel_truncate, LVRelState::eager_scanned_pages, ereport, errmsg(), errmsg_internal(), error_context_stack, VacuumCutoffs::FreezeLimit, get_database_name(), get_namespace_name(), GetCurrentTimestamp(), GlobalVisTestFor(), heap_vacuum_eager_scan_setup(), i, VacuumParams::index_cleanup, LVRelState::indname, LVRelState::indrels, LVRelState::indstats, INFO, initStringInfo(), InvalidMultiXactId, InvalidTransactionId, VacuumParams::is_wraparound, IsInParallelMode(), lazy_check_wraparound_failsafe(), lazy_scan_heap(), lazy_truncate_heap(), LVRelState::live_tuples, BufferUsage::local_blks_dirtied, BufferUsage::local_blks_hit, BufferUsage::local_blks_read, LOG, VacuumParams::log_min_duration, LVRelState::lpdead_item_pages, LVRelState::lpdead_items, Max, LVRelState::missed_dead_pages, LVRelState::missed_dead_tuples, VacuumCutoffs::MultiXactCutoff, MultiXactIdPrecedesOrEquals(), MyBEEntry, MyDatabaseId, LVRelState::new_frozen_tuple_pages, LVRelState::new_live_tuples, LVRelState::new_rel_tuples, LVRelState::NewRelfrozenXid, LVRelState::NewRelminMxid, LVRelState::nindexes, NoLock, LVRelState::nonempty_pages, LVRelState::num_index_scans, IndexBulkDeleteResult::num_pages, VacuumParams::nworkers, VacuumCutoffs::OldestMxact, VacuumCutoffs::OldestXmin, VacuumParams::options, IndexBulkDeleteResult::pages_deleted, IndexBulkDeleteResult::pages_free, IndexBulkDeleteResult::pages_newly_deleted, palloc(), palloc0(), pfree(), pg_rusage_init(), pg_rusage_show(), pgBufferUsage, pgstat_progress_end_command(), pgstat_progress_start_command(), pgstat_progress_update_param(), pgstat_report_vacuum(), pgStatBlockReadTime, pgStatBlockWriteTime, pgWalUsage, LVRelState::phase, ErrorContextCallback::previous, PROGRESS_COMMAND_VACUUM, PROGRESS_VACUUM_DELAY_TIME, PROGRESS_VACUUM_PHASE, PROGRESS_VACUUM_PHASE_FINAL_CLEANUP, pstrdup(), RelationData::rd_rel, ReadNextTransactionId(), LVRelState::recently_dead_tuples, LVRelState::rel, LVRelState::rel_pages, RelationGetNamespace, RelationGetNumberOfBlocks, RelationGetRelationName, RelationGetRelid, VacuumCutoffs::relfrozenxid, VacuumCutoffs::relminmxid, LVRelState::relname, LVRelState::relnamespace, LVRelState::removed_pages, RowExclusiveLock, LVRelState::scanned_pages, BufferUsage::shared_blks_dirtied, BufferUsage::shared_blks_hit, BufferUsage::shared_blks_read, should_attempt_truncation(), LVRelState::skippedallvis, LVRelState::skipwithvm, PgBackendStatus::st_progress_param, TimestampDifference(), TimestampDifferenceExceeds(), track_cost_delay_timing, track_io_timing, TransactionIdPrecedesOrEquals(), VacuumParams::truncate, LVRelState::tuples_deleted, LVRelState::tuples_frozen, update_relstats_all_indexes(), vac_close_indexes(), vac_open_indexes(), vac_update_relstats(), VACOPT_DISABLE_PAGE_SKIPPING, VACOPT_VERBOSE, VACOPTVALUE_AUTO, VACOPTVALUE_DISABLED, VACOPTVALUE_ENABLED, VACOPTVALUE_UNSPECIFIED, VACUUM_ERRCB_PHASE_UNKNOWN, vacuum_error_callback(), vacuum_get_cutoffs(), VacuumFailsafeActive, LVRelState::verbose, verbose, visibilitymap_count(), LVRelState::vistest, LVRelState::vm_new_frozen_pages, LVRelState::vm_new_visible_frozen_pages, LVRelState::vm_new_visible_pages, WalUsage::wal_buffers_full, WalUsage::wal_bytes, WalUsage::wal_fpi, WalUsage::wal_records, and WalUsageAccumDiff().

◆ lazy_check_wraparound_failsafe()

static bool lazy_check_wraparound_failsafe ( LVRelState vacrel)
static

Definition at line 2955 of file vacuumlazy.c.

2956{
2957 /* Don't warn more than once per VACUUM */
2959 return true;
2960
2962 {
2963 const int progress_index[] = {
2966 };
2967 int64 progress_val[2] = {0, 0};
2968
2969 VacuumFailsafeActive = true;
2970
2971 /*
2972 * Abandon use of a buffer access strategy to allow use of all of
2973 * shared buffers. We assume the caller who allocated the memory for
2974 * the BufferAccessStrategy will free it.
2975 */
2976 vacrel->bstrategy = NULL;
2977
2978 /* Disable index vacuuming, index cleanup, and heap rel truncation */
2979 vacrel->do_index_vacuuming = false;
2980 vacrel->do_index_cleanup = false;
2981 vacrel->do_rel_truncate = false;
2982
2983 /* Reset the progress counters */
2984 pgstat_progress_update_multi_param(2, progress_index, progress_val);
2985
2987 (errmsg("bypassing nonessential maintenance of table \"%s.%s.%s\" as a failsafe after %d index scans",
2988 vacrel->dbname, vacrel->relnamespace, vacrel->relname,
2989 vacrel->num_index_scans),
2990 errdetail("The table's relfrozenxid or relminmxid is too far in the past."),
2991 errhint("Consider increasing configuration parameter \"maintenance_work_mem\" or \"autovacuum_work_mem\".\n"
2992 "You might also need to consider other ways for VACUUM to keep up with the allocation of transaction IDs.")));
2993
2994 /* Stop applying cost limits from this point on */
2995 VacuumCostActive = false;
2997
2998 return true;
2999 }
3000
3001 return false;
3002}
#define unlikely(x)
Definition: c.h:402
int errdetail(const char *fmt,...)
Definition: elog.c:1207
int errhint(const char *fmt,...)
Definition: elog.c:1321
bool VacuumCostActive
Definition: globals.c:158
int VacuumCostBalance
Definition: globals.c:157
#define PROGRESS_VACUUM_INDEXES_PROCESSED
Definition: progress.h:30
#define PROGRESS_VACUUM_INDEXES_TOTAL
Definition: progress.h:29
bool vacuum_xid_failsafe_check(const struct VacuumCutoffs *cutoffs)
Definition: vacuum.c:1271

References LVRelState::bstrategy, LVRelState::cutoffs, LVRelState::dbname, LVRelState::do_index_cleanup, LVRelState::do_index_vacuuming, LVRelState::do_rel_truncate, ereport, errdetail(), errhint(), errmsg(), LVRelState::num_index_scans, pgstat_progress_update_multi_param(), PROGRESS_VACUUM_INDEXES_PROCESSED, PROGRESS_VACUUM_INDEXES_TOTAL, LVRelState::relname, LVRelState::relnamespace, unlikely, vacuum_xid_failsafe_check(), VacuumCostActive, VacuumCostBalance, VacuumFailsafeActive, and WARNING.

Referenced by heap_vacuum_rel(), lazy_scan_heap(), and lazy_vacuum_all_indexes().

◆ lazy_cleanup_all_indexes()

static void lazy_cleanup_all_indexes ( LVRelState vacrel)
static

Definition at line 3008 of file vacuumlazy.c.

3009{
3010 double reltuples = vacrel->new_rel_tuples;
3011 bool estimated_count = vacrel->scanned_pages < vacrel->rel_pages;
3012 const int progress_start_index[] = {
3015 };
3016 const int progress_end_index[] = {
3019 };
3020 int64 progress_start_val[2];
3021 int64 progress_end_val[2] = {0, 0};
3022
3023 Assert(vacrel->do_index_cleanup);
3024 Assert(vacrel->nindexes > 0);
3025
3026 /*
3027 * Report that we are now cleaning up indexes and the number of indexes to
3028 * cleanup.
3029 */
3030 progress_start_val[0] = PROGRESS_VACUUM_PHASE_INDEX_CLEANUP;
3031 progress_start_val[1] = vacrel->nindexes;
3032 pgstat_progress_update_multi_param(2, progress_start_index, progress_start_val);
3033
3034 if (!ParallelVacuumIsActive(vacrel))
3035 {
3036 for (int idx = 0; idx < vacrel->nindexes; idx++)
3037 {
3038 Relation indrel = vacrel->indrels[idx];
3039 IndexBulkDeleteResult *istat = vacrel->indstats[idx];
3040
3041 vacrel->indstats[idx] =
3042 lazy_cleanup_one_index(indrel, istat, reltuples,
3043 estimated_count, vacrel);
3044
3045 /* Report the number of indexes cleaned up */
3047 idx + 1);
3048 }
3049 }
3050 else
3051 {
3052 /* Outsource everything to parallel variant */
3053 parallel_vacuum_cleanup_all_indexes(vacrel->pvs, reltuples,
3054 vacrel->num_index_scans,
3055 estimated_count);
3056 }
3057
3058 /* Reset the progress counters */
3059 pgstat_progress_update_multi_param(2, progress_end_index, progress_end_val);
3060}
Datum idx(PG_FUNCTION_ARGS)
Definition: _int_op.c:262
#define PROGRESS_VACUUM_PHASE_INDEX_CLEANUP
Definition: progress.h:37
static IndexBulkDeleteResult * lazy_cleanup_one_index(Relation indrel, IndexBulkDeleteResult *istat, double reltuples, bool estimated_count, LVRelState *vacrel)
Definition: vacuumlazy.c:3125
void parallel_vacuum_cleanup_all_indexes(ParallelVacuumState *pvs, long num_table_tuples, int num_index_scans, bool estimated_count)

References Assert(), LVRelState::do_index_cleanup, idx(), LVRelState::indrels, LVRelState::indstats, lazy_cleanup_one_index(), LVRelState::new_rel_tuples, LVRelState::nindexes, LVRelState::num_index_scans, parallel_vacuum_cleanup_all_indexes(), ParallelVacuumIsActive, pgstat_progress_update_multi_param(), pgstat_progress_update_param(), PROGRESS_VACUUM_INDEXES_PROCESSED, PROGRESS_VACUUM_INDEXES_TOTAL, PROGRESS_VACUUM_PHASE, PROGRESS_VACUUM_PHASE_INDEX_CLEANUP, LVRelState::pvs, LVRelState::rel_pages, and LVRelState::scanned_pages.

Referenced by lazy_scan_heap().

◆ lazy_cleanup_one_index()

static IndexBulkDeleteResult * lazy_cleanup_one_index ( Relation  indrel,
IndexBulkDeleteResult istat,
double  reltuples,
bool  estimated_count,
LVRelState vacrel 
)
static

Definition at line 3125 of file vacuumlazy.c.

3128{
3129 IndexVacuumInfo ivinfo;
3130 LVSavedErrInfo saved_err_info;
3131
3132 ivinfo.index = indrel;
3133 ivinfo.heaprel = vacrel->rel;
3134 ivinfo.analyze_only = false;
3135 ivinfo.report_progress = false;
3136 ivinfo.estimated_count = estimated_count;
3137 ivinfo.message_level = DEBUG2;
3138
3139 ivinfo.num_heap_tuples = reltuples;
3140 ivinfo.strategy = vacrel->bstrategy;
3141
3142 /*
3143 * Update error traceback information.
3144 *
3145 * The index name is saved during this phase and restored immediately
3146 * after this phase. See vacuum_error_callback.
3147 */
3148 Assert(vacrel->indname == NULL);
3149 vacrel->indname = pstrdup(RelationGetRelationName(indrel));
3150 update_vacuum_error_info(vacrel, &saved_err_info,
3153
3154 istat = vac_cleanup_one_index(&ivinfo, istat);
3155
3156 /* Revert to the previous phase information for error traceback */
3157 restore_vacuum_error_info(vacrel, &saved_err_info);
3158 pfree(vacrel->indname);
3159 vacrel->indname = NULL;
3160
3161 return istat;
3162}
Relation index
Definition: genam.h:73
double num_heap_tuples
Definition: genam.h:79
bool analyze_only
Definition: genam.h:75
BufferAccessStrategy strategy
Definition: genam.h:80
Relation heaprel
Definition: genam.h:74
bool report_progress
Definition: genam.h:76
int message_level
Definition: genam.h:78
bool estimated_count
Definition: genam.h:77
IndexBulkDeleteResult * vac_cleanup_one_index(IndexVacuumInfo *ivinfo, IndexBulkDeleteResult *istat)
Definition: vacuum.c:2651
static void restore_vacuum_error_info(LVRelState *vacrel, const LVSavedErrInfo *saved_vacrel)
Definition: vacuumlazy.c:3844
static void update_vacuum_error_info(LVRelState *vacrel, LVSavedErrInfo *saved_vacrel, int phase, BlockNumber blkno, OffsetNumber offnum)
Definition: vacuumlazy.c:3825

References IndexVacuumInfo::analyze_only, Assert(), LVRelState::bstrategy, DEBUG2, IndexVacuumInfo::estimated_count, IndexVacuumInfo::heaprel, IndexVacuumInfo::index, LVRelState::indname, InvalidBlockNumber, InvalidOffsetNumber, IndexVacuumInfo::message_level, IndexVacuumInfo::num_heap_tuples, pfree(), pstrdup(), LVRelState::rel, RelationGetRelationName, IndexVacuumInfo::report_progress, restore_vacuum_error_info(), IndexVacuumInfo::strategy, update_vacuum_error_info(), vac_cleanup_one_index(), and VACUUM_ERRCB_PHASE_INDEX_CLEANUP.

Referenced by lazy_cleanup_all_indexes().

◆ lazy_scan_heap()

static void lazy_scan_heap ( LVRelState vacrel)
static

Definition at line 1199 of file vacuumlazy.c.

1200{
1201 ReadStream *stream;
1202 BlockNumber rel_pages = vacrel->rel_pages,
1203 blkno = 0,
1204 next_fsm_block_to_vacuum = 0;
1205 BlockNumber orig_eager_scan_success_limit =
1206 vacrel->eager_scan_remaining_successes; /* for logging */
1207 Buffer vmbuffer = InvalidBuffer;
1208 const int initprog_index[] = {
1212 };
1213 int64 initprog_val[3];
1214
1215 /* Report that we're scanning the heap, advertising total # of blocks */
1216 initprog_val[0] = PROGRESS_VACUUM_PHASE_SCAN_HEAP;
1217 initprog_val[1] = rel_pages;
1218 initprog_val[2] = vacrel->dead_items_info->max_bytes;
1219 pgstat_progress_update_multi_param(3, initprog_index, initprog_val);
1220
1221 /* Initialize for the first heap_vac_scan_next_block() call */
1224 vacrel->next_unskippable_allvis = false;
1225 vacrel->next_unskippable_eager_scanned = false;
1227
1228 /*
1229 * Set up the read stream for vacuum's first pass through the heap.
1230 *
1231 * This could be made safe for READ_STREAM_USE_BATCHING, but only with
1232 * explicit work in heap_vac_scan_next_block.
1233 */
1235 vacrel->bstrategy,
1236 vacrel->rel,
1239 vacrel,
1240 sizeof(uint8));
1241
1242 while (true)
1243 {
1244 Buffer buf;
1245 Page page;
1246 uint8 blk_info = 0;
1247 int ndeleted = 0;
1248 bool has_lpdead_items;
1249 void *per_buffer_data = NULL;
1250 bool vm_page_frozen = false;
1251 bool got_cleanup_lock = false;
1252
1253 vacuum_delay_point(false);
1254
1255 /*
1256 * Regularly check if wraparound failsafe should trigger.
1257 *
1258 * There is a similar check inside lazy_vacuum_all_indexes(), but
1259 * relfrozenxid might start to look dangerously old before we reach
1260 * that point. This check also provides failsafe coverage for the
1261 * one-pass strategy, and the two-pass strategy with the index_cleanup
1262 * param set to 'off'.
1263 */
1264 if (vacrel->scanned_pages > 0 &&
1265 vacrel->scanned_pages % FAILSAFE_EVERY_PAGES == 0)
1267
1268 /*
1269 * Consider if we definitely have enough space to process TIDs on page
1270 * already. If we are close to overrunning the available space for
1271 * dead_items TIDs, pause and do a cycle of vacuuming before we tackle
1272 * this page. However, let's force at least one page-worth of tuples
1273 * to be stored as to ensure we do at least some work when the memory
1274 * configured is so low that we run out before storing anything.
1275 */
1276 if (vacrel->dead_items_info->num_items > 0 &&
1278 {
1279 /*
1280 * Before beginning index vacuuming, we release any pin we may
1281 * hold on the visibility map page. This isn't necessary for
1282 * correctness, but we do it anyway to avoid holding the pin
1283 * across a lengthy, unrelated operation.
1284 */
1285 if (BufferIsValid(vmbuffer))
1286 {
1287 ReleaseBuffer(vmbuffer);
1288 vmbuffer = InvalidBuffer;
1289 }
1290
1291 /* Perform a round of index and heap vacuuming */
1292 vacrel->consider_bypass_optimization = false;
1293 lazy_vacuum(vacrel);
1294
1295 /*
1296 * Vacuum the Free Space Map to make newly-freed space visible on
1297 * upper-level FSM pages. Note that blkno is the previously
1298 * processed block.
1299 */
1300 FreeSpaceMapVacuumRange(vacrel->rel, next_fsm_block_to_vacuum,
1301 blkno + 1);
1302 next_fsm_block_to_vacuum = blkno;
1303
1304 /* Report that we are once again scanning the heap */
1307 }
1308
1309 buf = read_stream_next_buffer(stream, &per_buffer_data);
1310
1311 /* The relation is exhausted. */
1312 if (!BufferIsValid(buf))
1313 break;
1314
1315 blk_info = *((uint8 *) per_buffer_data);
1317 page = BufferGetPage(buf);
1318 blkno = BufferGetBlockNumber(buf);
1319
1320 vacrel->scanned_pages++;
1321 if (blk_info & VAC_BLK_WAS_EAGER_SCANNED)
1322 vacrel->eager_scanned_pages++;
1323
1324 /* Report as block scanned, update error traceback information */
1327 blkno, InvalidOffsetNumber);
1328
1329 /*
1330 * Pin the visibility map page in case we need to mark the page
1331 * all-visible. In most cases this will be very cheap, because we'll
1332 * already have the correct page pinned anyway.
1333 */
1334 visibilitymap_pin(vacrel->rel, blkno, &vmbuffer);
1335
1336 /*
1337 * We need a buffer cleanup lock to prune HOT chains and defragment
1338 * the page in lazy_scan_prune. But when it's not possible to acquire
1339 * a cleanup lock right away, we may be able to settle for reduced
1340 * processing using lazy_scan_noprune.
1341 */
1342 got_cleanup_lock = ConditionalLockBufferForCleanup(buf);
1343
1344 if (!got_cleanup_lock)
1346
1347 /* Check for new or empty pages before lazy_scan_[no]prune call */
1348 if (lazy_scan_new_or_empty(vacrel, buf, blkno, page, !got_cleanup_lock,
1349 vmbuffer))
1350 {
1351 /* Processed as new/empty page (lock and pin released) */
1352 continue;
1353 }
1354
1355 /*
1356 * If we didn't get the cleanup lock, we can still collect LP_DEAD
1357 * items in the dead_items area for later vacuuming, count live and
1358 * recently dead tuples for vacuum logging, and determine if this
1359 * block could later be truncated. If we encounter any xid/mxids that
1360 * require advancing the relfrozenxid/relminxid, we'll have to wait
1361 * for a cleanup lock and call lazy_scan_prune().
1362 */
1363 if (!got_cleanup_lock &&
1364 !lazy_scan_noprune(vacrel, buf, blkno, page, &has_lpdead_items))
1365 {
1366 /*
1367 * lazy_scan_noprune could not do all required processing. Wait
1368 * for a cleanup lock, and call lazy_scan_prune in the usual way.
1369 */
1370 Assert(vacrel->aggressive);
1373 got_cleanup_lock = true;
1374 }
1375
1376 /*
1377 * If we have a cleanup lock, we must now prune, freeze, and count
1378 * tuples. We may have acquired the cleanup lock originally, or we may
1379 * have gone back and acquired it after lazy_scan_noprune() returned
1380 * false. Either way, the page hasn't been processed yet.
1381 *
1382 * Like lazy_scan_noprune(), lazy_scan_prune() will count
1383 * recently_dead_tuples and live tuples for vacuum logging, determine
1384 * if the block can later be truncated, and accumulate the details of
1385 * remaining LP_DEAD line pointers on the page into dead_items. These
1386 * dead items include those pruned by lazy_scan_prune() as well as
1387 * line pointers previously marked LP_DEAD.
1388 */
1389 if (got_cleanup_lock)
1390 ndeleted = lazy_scan_prune(vacrel, buf, blkno, page,
1391 vmbuffer,
1393 &has_lpdead_items, &vm_page_frozen);
1394
1395 /*
1396 * Count an eagerly scanned page as a failure or a success.
1397 *
1398 * Only lazy_scan_prune() freezes pages, so if we didn't get the
1399 * cleanup lock, we won't have frozen the page. However, we only count
1400 * pages that were too new to require freezing as eager freeze
1401 * failures.
1402 *
1403 * We could gather more information from lazy_scan_noprune() about
1404 * whether or not there were tuples with XIDs or MXIDs older than the
1405 * FreezeLimit or MultiXactCutoff. However, for simplicity, we simply
1406 * exclude pages skipped due to cleanup lock contention from eager
1407 * freeze algorithm caps.
1408 */
1409 if (got_cleanup_lock &&
1410 (blk_info & VAC_BLK_WAS_EAGER_SCANNED))
1411 {
1412 /* Aggressive vacuums do not eager scan. */
1413 Assert(!vacrel->aggressive);
1414
1415 if (vm_page_frozen)
1416 {
1417 if (vacrel->eager_scan_remaining_successes > 0)
1419
1420 if (vacrel->eager_scan_remaining_successes == 0)
1421 {
1422 /*
1423 * Report only once that we disabled eager scanning. We
1424 * may eagerly read ahead blocks in excess of the success
1425 * or failure caps before attempting to freeze them, so we
1426 * could reach here even after disabling additional eager
1427 * scanning.
1428 */
1429 if (vacrel->eager_scan_max_fails_per_region > 0)
1430 ereport(vacrel->verbose ? INFO : DEBUG2,
1431 (errmsg("disabling eager scanning after freezing %u eagerly scanned blocks of relation \"%s.%s.%s\"",
1432 orig_eager_scan_success_limit,
1433 vacrel->dbname, vacrel->relnamespace,
1434 vacrel->relname)));
1435
1436 /*
1437 * If we hit our success cap, permanently disable eager
1438 * scanning by setting the other eager scan management
1439 * fields to their disabled values.
1440 */
1441 vacrel->eager_scan_remaining_fails = 0;
1444 }
1445 }
1446 else if (vacrel->eager_scan_remaining_fails > 0)
1448 }
1449
1450 /*
1451 * Now drop the buffer lock and, potentially, update the FSM.
1452 *
1453 * Our goal is to update the freespace map the last time we touch the
1454 * page. If we'll process a block in the second pass, we may free up
1455 * additional space on the page, so it is better to update the FSM
1456 * after the second pass. If the relation has no indexes, or if index
1457 * vacuuming is disabled, there will be no second heap pass; if this
1458 * particular page has no dead items, the second heap pass will not
1459 * touch this page. So, in those cases, update the FSM now.
1460 *
1461 * Note: In corner cases, it's possible to miss updating the FSM
1462 * entirely. If index vacuuming is currently enabled, we'll skip the
1463 * FSM update now. But if failsafe mode is later activated, or there
1464 * are so few dead tuples that index vacuuming is bypassed, there will
1465 * also be no opportunity to update the FSM later, because we'll never
1466 * revisit this page. Since updating the FSM is desirable but not
1467 * absolutely required, that's OK.
1468 */
1469 if (vacrel->nindexes == 0
1470 || !vacrel->do_index_vacuuming
1471 || !has_lpdead_items)
1472 {
1473 Size freespace = PageGetHeapFreeSpace(page);
1474
1476 RecordPageWithFreeSpace(vacrel->rel, blkno, freespace);
1477
1478 /*
1479 * Periodically perform FSM vacuuming to make newly-freed space
1480 * visible on upper FSM pages. This is done after vacuuming if the
1481 * table has indexes. There will only be newly-freed space if we
1482 * held the cleanup lock and lazy_scan_prune() was called.
1483 */
1484 if (got_cleanup_lock && vacrel->nindexes == 0 && ndeleted > 0 &&
1485 blkno - next_fsm_block_to_vacuum >= VACUUM_FSM_EVERY_PAGES)
1486 {
1487 FreeSpaceMapVacuumRange(vacrel->rel, next_fsm_block_to_vacuum,
1488 blkno);
1489 next_fsm_block_to_vacuum = blkno;
1490 }
1491 }
1492 else
1494 }
1495
1496 vacrel->blkno = InvalidBlockNumber;
1497 if (BufferIsValid(vmbuffer))
1498 ReleaseBuffer(vmbuffer);
1499
1500 /*
1501 * Report that everything is now scanned. We never skip scanning the last
1502 * block in the relation, so we can pass rel_pages here.
1503 */
1505 rel_pages);
1506
1507 /* now we can compute the new value for pg_class.reltuples */
1508 vacrel->new_live_tuples = vac_estimate_reltuples(vacrel->rel, rel_pages,
1509 vacrel->scanned_pages,
1510 vacrel->live_tuples);
1511
1512 /*
1513 * Also compute the total number of surviving heap entries. In the
1514 * (unlikely) scenario that new_live_tuples is -1, take it as zero.
1515 */
1516 vacrel->new_rel_tuples =
1517 Max(vacrel->new_live_tuples, 0) + vacrel->recently_dead_tuples +
1518 vacrel->missed_dead_tuples;
1519
1520 read_stream_end(stream);
1521
1522 /*
1523 * Do index vacuuming (call each index's ambulkdelete routine), then do
1524 * related heap vacuuming
1525 */
1526 if (vacrel->dead_items_info->num_items > 0)
1527 lazy_vacuum(vacrel);
1528
1529 /*
1530 * Vacuum the remainder of the Free Space Map. We must do this whether or
1531 * not there were indexes, and whether or not we bypassed index vacuuming.
1532 * We can pass rel_pages here because we never skip scanning the last
1533 * block of the relation.
1534 */
1535 if (rel_pages > next_fsm_block_to_vacuum)
1536 FreeSpaceMapVacuumRange(vacrel->rel, next_fsm_block_to_vacuum, rel_pages);
1537
1538 /* report all blocks vacuumed */
1540
1541 /* Do final index cleanup (call each index's amvacuumcleanup routine) */
1542 if (vacrel->nindexes > 0 && vacrel->do_index_cleanup)
1544}
void CheckBufferIsPinnedOnce(Buffer buffer)
Definition: bufmgr.c:5619
void LockBufferForCleanup(Buffer buffer)
Definition: bufmgr.c:5652
bool ConditionalLockBufferForCleanup(Buffer buffer)
Definition: bufmgr.c:5820
#define BUFFER_LOCK_UNLOCK
Definition: bufmgr.h:196
Size PageGetHeapFreeSpace(const PageData *page)
Definition: bufpage.c:990
void FreeSpaceMapVacuumRange(Relation rel, BlockNumber start, BlockNumber end)
Definition: freespace.c:377
void RecordPageWithFreeSpace(Relation rel, BlockNumber heapBlk, Size spaceAvail)
Definition: freespace.c:194
#define PROGRESS_VACUUM_PHASE_SCAN_HEAP
Definition: progress.h:34
#define PROGRESS_VACUUM_TOTAL_HEAP_BLKS
Definition: progress.h:22
#define PROGRESS_VACUUM_MAX_DEAD_TUPLE_BYTES
Definition: progress.h:26
#define PROGRESS_VACUUM_HEAP_BLKS_SCANNED
Definition: progress.h:23
#define PROGRESS_VACUUM_HEAP_BLKS_VACUUMED
Definition: progress.h:24
Buffer read_stream_next_buffer(ReadStream *stream, void **per_buffer_data)
Definition: read_stream.c:791
ReadStream * read_stream_begin_relation(int flags, BufferAccessStrategy strategy, Relation rel, ForkNumber forknum, ReadStreamBlockNumberCB callback, void *callback_private_data, size_t per_buffer_data_size)
Definition: read_stream.c:737
void read_stream_end(ReadStream *stream)
Definition: read_stream.c:1089
#define READ_STREAM_MAINTENANCE
Definition: read_stream.h:28
BlockNumber blkno
Definition: vacuumlazy.c:294
void vacuum_delay_point(bool is_analyze)
Definition: vacuum.c:2423
double vac_estimate_reltuples(Relation relation, BlockNumber total_pages, BlockNumber scanned_pages, double scanned_tuples)
Definition: vacuum.c:1333
static int lazy_scan_prune(LVRelState *vacrel, Buffer buf, BlockNumber blkno, Page page, Buffer vmbuffer, bool all_visible_according_to_vm, bool *has_lpdead_items, bool *vm_page_frozen)
Definition: vacuumlazy.c:1943
static BlockNumber heap_vac_scan_next_block(ReadStream *stream, void *callback_private_data, void *per_buffer_data)
Definition: vacuumlazy.c:1571
static void lazy_vacuum(LVRelState *vacrel)
Definition: vacuumlazy.c:2455
static void lazy_cleanup_all_indexes(LVRelState *vacrel)
Definition: vacuumlazy.c:3008
static bool lazy_scan_noprune(LVRelState *vacrel, Buffer buf, BlockNumber blkno, Page page, bool *has_lpdead_items)
Definition: vacuumlazy.c:2244
static bool lazy_scan_new_or_empty(LVRelState *vacrel, Buffer buf, BlockNumber blkno, Page page, bool sharelock, Buffer vmbuffer)
Definition: vacuumlazy.c:1808
#define FAILSAFE_EVERY_PAGES
Definition: vacuumlazy.c:192
#define VACUUM_FSM_EVERY_PAGES
Definition: vacuumlazy.c:201
void visibilitymap_pin(Relation rel, BlockNumber heapBlk, Buffer *vmbuf)

References LVRelState::aggressive, Assert(), LVRelState::blkno, LVRelState::bstrategy, buf, BUFFER_LOCK_SHARE, BUFFER_LOCK_UNLOCK, BufferGetBlockNumber(), BufferGetPage(), BufferIsValid(), CheckBufferIsPinnedOnce(), ConditionalLockBufferForCleanup(), LVRelState::consider_bypass_optimization, LVRelState::current_block, LVRelState::dbname, LVRelState::dead_items, LVRelState::dead_items_info, DEBUG2, LVRelState::do_index_cleanup, LVRelState::do_index_vacuuming, LVRelState::eager_scan_max_fails_per_region, LVRelState::eager_scan_remaining_fails, LVRelState::eager_scan_remaining_successes, LVRelState::eager_scanned_pages, ereport, errmsg(), FAILSAFE_EVERY_PAGES, FreeSpaceMapVacuumRange(), heap_vac_scan_next_block(), INFO, InvalidBlockNumber, InvalidBuffer, InvalidOffsetNumber, lazy_check_wraparound_failsafe(), lazy_cleanup_all_indexes(), lazy_scan_new_or_empty(), lazy_scan_noprune(), lazy_scan_prune(), lazy_vacuum(), LVRelState::live_tuples, LockBuffer(), LockBufferForCleanup(), MAIN_FORKNUM, Max, VacDeadItemsInfo::max_bytes, LVRelState::missed_dead_tuples, LVRelState::new_live_tuples, LVRelState::new_rel_tuples, LVRelState::next_eager_scan_region_start, LVRelState::next_unskippable_allvis, LVRelState::next_unskippable_block, LVRelState::next_unskippable_eager_scanned, LVRelState::next_unskippable_vmbuffer, LVRelState::nindexes, VacDeadItemsInfo::num_items, PageGetHeapFreeSpace(), pgstat_progress_update_multi_param(), pgstat_progress_update_param(), PROGRESS_VACUUM_HEAP_BLKS_SCANNED, PROGRESS_VACUUM_HEAP_BLKS_VACUUMED, PROGRESS_VACUUM_MAX_DEAD_TUPLE_BYTES, PROGRESS_VACUUM_PHASE, PROGRESS_VACUUM_PHASE_SCAN_HEAP, PROGRESS_VACUUM_TOTAL_HEAP_BLKS, read_stream_begin_relation(), read_stream_end(), READ_STREAM_MAINTENANCE, read_stream_next_buffer(), LVRelState::recently_dead_tuples, RecordPageWithFreeSpace(), LVRelState::rel, LVRelState::rel_pages, ReleaseBuffer(), LVRelState::relname, LVRelState::relnamespace, LVRelState::scanned_pages, TidStoreMemoryUsage(), UnlockReleaseBuffer(), update_vacuum_error_info(), VAC_BLK_ALL_VISIBLE_ACCORDING_TO_VM, VAC_BLK_WAS_EAGER_SCANNED, vac_estimate_reltuples(), vacuum_delay_point(), VACUUM_ERRCB_PHASE_SCAN_HEAP, VACUUM_FSM_EVERY_PAGES, LVRelState::verbose, and visibilitymap_pin().

Referenced by heap_vacuum_rel().

◆ lazy_scan_new_or_empty()

static bool lazy_scan_new_or_empty ( LVRelState vacrel,
Buffer  buf,
BlockNumber  blkno,
Page  page,
bool  sharelock,
Buffer  vmbuffer 
)
static

Definition at line 1808 of file vacuumlazy.c.

1810{
1811 Size freespace;
1812
1813 if (PageIsNew(page))
1814 {
1815 /*
1816 * All-zeroes pages can be left over if either a backend extends the
1817 * relation by a single page, but crashes before the newly initialized
1818 * page has been written out, or when bulk-extending the relation
1819 * (which creates a number of empty pages at the tail end of the
1820 * relation), and then enters them into the FSM.
1821 *
1822 * Note we do not enter the page into the visibilitymap. That has the
1823 * downside that we repeatedly visit this page in subsequent vacuums,
1824 * but otherwise we'll never discover the space on a promoted standby.
1825 * The harm of repeated checking ought to normally not be too bad. The
1826 * space usually should be used at some point, otherwise there
1827 * wouldn't be any regular vacuums.
1828 *
1829 * Make sure these pages are in the FSM, to ensure they can be reused.
1830 * Do that by testing if there's any space recorded for the page. If
1831 * not, enter it. We do so after releasing the lock on the heap page,
1832 * the FSM is approximate, after all.
1833 */
1835
1836 if (GetRecordedFreeSpace(vacrel->rel, blkno) == 0)
1837 {
1838 freespace = BLCKSZ - SizeOfPageHeaderData;
1839
1840 RecordPageWithFreeSpace(vacrel->rel, blkno, freespace);
1841 }
1842
1843 return true;
1844 }
1845
1846 if (PageIsEmpty(page))
1847 {
1848 /*
1849 * It seems likely that caller will always be able to get a cleanup
1850 * lock on an empty page. But don't take any chances -- escalate to
1851 * an exclusive lock (still don't need a cleanup lock, though).
1852 */
1853 if (sharelock)
1854 {
1857
1858 if (!PageIsEmpty(page))
1859 {
1860 /* page isn't new or empty -- keep lock and pin for now */
1861 return false;
1862 }
1863 }
1864 else
1865 {
1866 /* Already have a full cleanup lock (which is more than enough) */
1867 }
1868
1869 /*
1870 * Unlike new pages, empty pages are always set all-visible and
1871 * all-frozen.
1872 */
1873 if (!PageIsAllVisible(page))
1874 {
1876
1877 /* mark buffer dirty before writing a WAL record */
1879
1880 /*
1881 * It's possible that another backend has extended the heap,
1882 * initialized the page, and then failed to WAL-log the page due
1883 * to an ERROR. Since heap extension is not WAL-logged, recovery
1884 * might try to replay our record setting the page all-visible and
1885 * find that the page isn't initialized, which will cause a PANIC.
1886 * To prevent that, check whether the page has been previously
1887 * WAL-logged, and if not, do that now.
1888 */
1889 if (RelationNeedsWAL(vacrel->rel) &&
1891 log_newpage_buffer(buf, true);
1892
1893 PageSetAllVisible(page);
1894 visibilitymap_set(vacrel->rel, blkno, buf,
1896 vmbuffer, InvalidTransactionId,
1900
1901 /* Count the newly all-frozen pages for logging */
1902 vacrel->vm_new_visible_pages++;
1904 }
1905
1906 freespace = PageGetHeapFreeSpace(page);
1908 RecordPageWithFreeSpace(vacrel->rel, blkno, freespace);
1909 return true;
1910 }
1911
1912 /* page isn't new or empty -- keep lock and pin */
1913 return false;
1914}
void MarkBufferDirty(Buffer buffer)
Definition: bufmgr.c:2921
#define BUFFER_LOCK_EXCLUSIVE
Definition: bufmgr.h:198
static bool PageIsAllVisible(const PageData *page)
Definition: bufpage.h:429
#define SizeOfPageHeaderData
Definition: bufpage.h:217
static void PageSetAllVisible(Page page)
Definition: bufpage.h:434
static XLogRecPtr PageGetLSN(const PageData *page)
Definition: bufpage.h:386
Size GetRecordedFreeSpace(Relation rel, BlockNumber heapBlk)
Definition: freespace.c:244
#define START_CRIT_SECTION()
Definition: miscadmin.h:149
#define END_CRIT_SECTION()
Definition: miscadmin.h:151
#define RelationNeedsWAL(relation)
Definition: rel.h:637
uint8 visibilitymap_set(Relation rel, BlockNumber heapBlk, Buffer heapBuf, XLogRecPtr recptr, Buffer vmBuf, TransactionId cutoff_xid, uint8 flags)
#define InvalidXLogRecPtr
Definition: xlogdefs.h:28
XLogRecPtr log_newpage_buffer(Buffer buffer, bool page_std)
Definition: xloginsert.c:1249

References buf, BUFFER_LOCK_EXCLUSIVE, BUFFER_LOCK_UNLOCK, END_CRIT_SECTION, GetRecordedFreeSpace(), InvalidTransactionId, InvalidXLogRecPtr, LockBuffer(), log_newpage_buffer(), MarkBufferDirty(), PageGetHeapFreeSpace(), PageGetLSN(), PageIsAllVisible(), PageIsEmpty(), PageIsNew(), PageSetAllVisible(), RecordPageWithFreeSpace(), LVRelState::rel, RelationNeedsWAL, SizeOfPageHeaderData, START_CRIT_SECTION, UnlockReleaseBuffer(), VISIBILITYMAP_ALL_FROZEN, VISIBILITYMAP_ALL_VISIBLE, visibilitymap_set(), LVRelState::vm_new_visible_frozen_pages, and LVRelState::vm_new_visible_pages.

Referenced by lazy_scan_heap().

◆ lazy_scan_noprune()

static bool lazy_scan_noprune ( LVRelState vacrel,
Buffer  buf,
BlockNumber  blkno,
Page  page,
bool *  has_lpdead_items 
)
static

Definition at line 2244 of file vacuumlazy.c.

2249{
2250 OffsetNumber offnum,
2251 maxoff;
2252 int lpdead_items,
2253 live_tuples,
2254 recently_dead_tuples,
2255 missed_dead_tuples;
2256 bool hastup;
2257 HeapTupleHeader tupleheader;
2258 TransactionId NoFreezePageRelfrozenXid = vacrel->NewRelfrozenXid;
2259 MultiXactId NoFreezePageRelminMxid = vacrel->NewRelminMxid;
2261
2262 Assert(BufferGetBlockNumber(buf) == blkno);
2263
2264 hastup = false; /* for now */
2265
2266 lpdead_items = 0;
2267 live_tuples = 0;
2268 recently_dead_tuples = 0;
2269 missed_dead_tuples = 0;
2270
2271 maxoff = PageGetMaxOffsetNumber(page);
2272 for (offnum = FirstOffsetNumber;
2273 offnum <= maxoff;
2274 offnum = OffsetNumberNext(offnum))
2275 {
2276 ItemId itemid;
2277 HeapTupleData tuple;
2278
2279 vacrel->offnum = offnum;
2280 itemid = PageGetItemId(page, offnum);
2281
2282 if (!ItemIdIsUsed(itemid))
2283 continue;
2284
2285 if (ItemIdIsRedirected(itemid))
2286 {
2287 hastup = true;
2288 continue;
2289 }
2290
2291 if (ItemIdIsDead(itemid))
2292 {
2293 /*
2294 * Deliberately don't set hastup=true here. See same point in
2295 * lazy_scan_prune for an explanation.
2296 */
2297 deadoffsets[lpdead_items++] = offnum;
2298 continue;
2299 }
2300
2301 hastup = true; /* page prevents rel truncation */
2302 tupleheader = (HeapTupleHeader) PageGetItem(page, itemid);
2303 if (heap_tuple_should_freeze(tupleheader, &vacrel->cutoffs,
2304 &NoFreezePageRelfrozenXid,
2305 &NoFreezePageRelminMxid))
2306 {
2307 /* Tuple with XID < FreezeLimit (or MXID < MultiXactCutoff) */
2308 if (vacrel->aggressive)
2309 {
2310 /*
2311 * Aggressive VACUUMs must always be able to advance rel's
2312 * relfrozenxid to a value >= FreezeLimit (and be able to
2313 * advance rel's relminmxid to a value >= MultiXactCutoff).
2314 * The ongoing aggressive VACUUM won't be able to do that
2315 * unless it can freeze an XID (or MXID) from this tuple now.
2316 *
2317 * The only safe option is to have caller perform processing
2318 * of this page using lazy_scan_prune. Caller might have to
2319 * wait a while for a cleanup lock, but it can't be helped.
2320 */
2321 vacrel->offnum = InvalidOffsetNumber;
2322 return false;
2323 }
2324
2325 /*
2326 * Non-aggressive VACUUMs are under no obligation to advance
2327 * relfrozenxid (even by one XID). We can be much laxer here.
2328 *
2329 * Currently we always just accept an older final relfrozenxid
2330 * and/or relminmxid value. We never make caller wait or work a
2331 * little harder, even when it likely makes sense to do so.
2332 */
2333 }
2334
2335 ItemPointerSet(&(tuple.t_self), blkno, offnum);
2336 tuple.t_data = (HeapTupleHeader) PageGetItem(page, itemid);
2337 tuple.t_len = ItemIdGetLength(itemid);
2338 tuple.t_tableOid = RelationGetRelid(vacrel->rel);
2339
2340 switch (HeapTupleSatisfiesVacuum(&tuple, vacrel->cutoffs.OldestXmin,
2341 buf))
2342 {
2344 case HEAPTUPLE_LIVE:
2345
2346 /*
2347 * Count both cases as live, just like lazy_scan_prune
2348 */
2349 live_tuples++;
2350
2351 break;
2352 case HEAPTUPLE_DEAD:
2353
2354 /*
2355 * There is some useful work for pruning to do, that won't be
2356 * done due to failure to get a cleanup lock.
2357 */
2358 missed_dead_tuples++;
2359 break;
2361
2362 /*
2363 * Count in recently_dead_tuples, just like lazy_scan_prune
2364 */
2365 recently_dead_tuples++;
2366 break;
2368
2369 /*
2370 * Do not count these rows as live, just like lazy_scan_prune
2371 */
2372 break;
2373 default:
2374 elog(ERROR, "unexpected HeapTupleSatisfiesVacuum result");
2375 break;
2376 }
2377 }
2378
2379 vacrel->offnum = InvalidOffsetNumber;
2380
2381 /*
2382 * By here we know for sure that caller can put off freezing and pruning
2383 * this particular page until the next VACUUM. Remember its details now.
2384 * (lazy_scan_prune expects a clean slate, so we have to do this last.)
2385 */
2386 vacrel->NewRelfrozenXid = NoFreezePageRelfrozenXid;
2387 vacrel->NewRelminMxid = NoFreezePageRelminMxid;
2388
2389 /* Save any LP_DEAD items found on the page in dead_items */
2390 if (vacrel->nindexes == 0)
2391 {
2392 /* Using one-pass strategy (since table has no indexes) */
2393 if (lpdead_items > 0)
2394 {
2395 /*
2396 * Perfunctory handling for the corner case where a single pass
2397 * strategy VACUUM cannot get a cleanup lock, and it turns out
2398 * that there is one or more LP_DEAD items: just count the LP_DEAD
2399 * items as missed_dead_tuples instead. (This is a bit dishonest,
2400 * but it beats having to maintain specialized heap vacuuming code
2401 * forever, for vanishingly little benefit.)
2402 */
2403 hastup = true;
2404 missed_dead_tuples += lpdead_items;
2405 }
2406 }
2407 else if (lpdead_items > 0)
2408 {
2409 /*
2410 * Page has LP_DEAD items, and so any references/TIDs that remain in
2411 * indexes will be deleted during index vacuuming (and then marked
2412 * LP_UNUSED in the heap)
2413 */
2414 vacrel->lpdead_item_pages++;
2415
2416 dead_items_add(vacrel, blkno, deadoffsets, lpdead_items);
2417
2418 vacrel->lpdead_items += lpdead_items;
2419 }
2420
2421 /*
2422 * Finally, add relevant page-local counts to whole-VACUUM counts
2423 */
2424 vacrel->live_tuples += live_tuples;
2425 vacrel->recently_dead_tuples += recently_dead_tuples;
2426 vacrel->missed_dead_tuples += missed_dead_tuples;
2427 if (missed_dead_tuples > 0)
2428 vacrel->missed_dead_pages++;
2429
2430 /* Can't truncate this page */
2431 if (hastup)
2432 vacrel->nonempty_pages = blkno + 1;
2433
2434 /* Did we find LP_DEAD items? */
2435 *has_lpdead_items = (lpdead_items > 0);
2436
2437 /* Caller won't need to call lazy_scan_prune with same page */
2438 return true;
2439}
TransactionId MultiXactId
Definition: c.h:667
bool heap_tuple_should_freeze(HeapTupleHeader tuple, const struct VacuumCutoffs *cutoffs, TransactionId *NoFreezePageRelfrozenXid, MultiXactId *NoFreezePageRelminMxid)
Definition: heapam.c:7865
#define MaxHeapTuplesPerPage
Definition: htup_details.h:624
static void dead_items_add(LVRelState *vacrel, BlockNumber blkno, OffsetNumber *offsets, int num_offsets)
Definition: vacuumlazy.c:3543

References LVRelState::aggressive, Assert(), buf, BufferGetBlockNumber(), LVRelState::cutoffs, dead_items_add(), elog, ERROR, FirstOffsetNumber, heap_tuple_should_freeze(), HEAPTUPLE_DEAD, HEAPTUPLE_DELETE_IN_PROGRESS, HEAPTUPLE_INSERT_IN_PROGRESS, HEAPTUPLE_LIVE, HEAPTUPLE_RECENTLY_DEAD, HeapTupleSatisfiesVacuum(), InvalidOffsetNumber, ItemIdGetLength, ItemIdIsDead, ItemIdIsRedirected, ItemIdIsUsed, ItemPointerSet(), LVRelState::live_tuples, LVRelState::lpdead_item_pages, LVRelState::lpdead_items, MaxHeapTuplesPerPage, LVRelState::missed_dead_pages, LVRelState::missed_dead_tuples, LVRelState::NewRelfrozenXid, LVRelState::NewRelminMxid, LVRelState::nindexes, LVRelState::nonempty_pages, LVRelState::offnum, OffsetNumberNext, VacuumCutoffs::OldestXmin, PageGetItem(), PageGetItemId(), PageGetMaxOffsetNumber(), LVRelState::recently_dead_tuples, LVRelState::rel, RelationGetRelid, HeapTupleData::t_data, HeapTupleData::t_len, HeapTupleData::t_self, and HeapTupleData::t_tableOid.

Referenced by lazy_scan_heap().

◆ lazy_scan_prune()

static int lazy_scan_prune ( LVRelState vacrel,
Buffer  buf,
BlockNumber  blkno,
Page  page,
Buffer  vmbuffer,
bool  all_visible_according_to_vm,
bool *  has_lpdead_items,
bool *  vm_page_frozen 
)
static

Definition at line 1943 of file vacuumlazy.c.

1951{
1952 Relation rel = vacrel->rel;
1953 PruneFreezeResult presult;
1954 int prune_options = 0;
1955
1956 Assert(BufferGetBlockNumber(buf) == blkno);
1957
1958 /*
1959 * Prune all HOT-update chains and potentially freeze tuples on this page.
1960 *
1961 * If the relation has no indexes, we can immediately mark would-be dead
1962 * items LP_UNUSED.
1963 *
1964 * The number of tuples removed from the page is returned in
1965 * presult.ndeleted. It should not be confused with presult.lpdead_items;
1966 * presult.lpdead_items's final value can be thought of as the number of
1967 * tuples that were deleted from indexes.
1968 *
1969 * We will update the VM after collecting LP_DEAD items and freezing
1970 * tuples. Pruning will have determined whether or not the page is
1971 * all-visible.
1972 */
1973 prune_options = HEAP_PAGE_PRUNE_FREEZE;
1974 if (vacrel->nindexes == 0)
1975 prune_options |= HEAP_PAGE_PRUNE_MARK_UNUSED_NOW;
1976
1977 heap_page_prune_and_freeze(rel, buf, vacrel->vistest, prune_options,
1978 &vacrel->cutoffs, &presult, PRUNE_VACUUM_SCAN,
1979 &vacrel->offnum,
1980 &vacrel->NewRelfrozenXid, &vacrel->NewRelminMxid);
1981
1984
1985 if (presult.nfrozen > 0)
1986 {
1987 /*
1988 * We don't increment the new_frozen_tuple_pages instrumentation
1989 * counter when nfrozen == 0, since it only counts pages with newly
1990 * frozen tuples (don't confuse that with pages newly set all-frozen
1991 * in VM).
1992 */
1993 vacrel->new_frozen_tuple_pages++;
1994 }
1995
1996 /*
1997 * VACUUM will call heap_page_is_all_visible() during the second pass over
1998 * the heap to determine all_visible and all_frozen for the page -- this
1999 * is a specialized version of the logic from this function. Now that
2000 * we've finished pruning and freezing, make sure that we're in total
2001 * agreement with heap_page_is_all_visible() using an assertion.
2002 */
2003#ifdef USE_ASSERT_CHECKING
2004 /* Note that all_frozen value does not matter when !all_visible */
2005 if (presult.all_visible)
2006 {
2007 TransactionId debug_cutoff;
2008 bool debug_all_frozen;
2009
2010 Assert(presult.lpdead_items == 0);
2011
2012 if (!heap_page_is_all_visible(vacrel, buf,
2013 &debug_cutoff, &debug_all_frozen))
2014 Assert(false);
2015
2016 Assert(presult.all_frozen == debug_all_frozen);
2017
2018 Assert(!TransactionIdIsValid(debug_cutoff) ||
2019 debug_cutoff == presult.vm_conflict_horizon);
2020 }
2021#endif
2022
2023 /*
2024 * Now save details of the LP_DEAD items from the page in vacrel
2025 */
2026 if (presult.lpdead_items > 0)
2027 {
2028 vacrel->lpdead_item_pages++;
2029
2030 /*
2031 * deadoffsets are collected incrementally in
2032 * heap_page_prune_and_freeze() as each dead line pointer is recorded,
2033 * with an indeterminate order, but dead_items_add requires them to be
2034 * sorted.
2035 */
2036 qsort(presult.deadoffsets, presult.lpdead_items, sizeof(OffsetNumber),
2038
2039 dead_items_add(vacrel, blkno, presult.deadoffsets, presult.lpdead_items);
2040 }
2041
2042 /* Finally, add page-local counts to whole-VACUUM counts */
2043 vacrel->tuples_deleted += presult.ndeleted;
2044 vacrel->tuples_frozen += presult.nfrozen;
2045 vacrel->lpdead_items += presult.lpdead_items;
2046 vacrel->live_tuples += presult.live_tuples;
2047 vacrel->recently_dead_tuples += presult.recently_dead_tuples;
2048
2049 /* Can't truncate this page */
2050 if (presult.hastup)
2051 vacrel->nonempty_pages = blkno + 1;
2052
2053 /* Did we find LP_DEAD items? */
2054 *has_lpdead_items = (presult.lpdead_items > 0);
2055
2056 Assert(!presult.all_visible || !(*has_lpdead_items));
2057
2058 /*
2059 * Handle setting visibility map bit based on information from the VM (as
2060 * of last heap_vac_scan_next_block() call), and from all_visible and
2061 * all_frozen variables
2062 */
2063 if (!all_visible_according_to_vm && presult.all_visible)
2064 {
2065 uint8 old_vmbits;
2067
2068 if (presult.all_frozen)
2069 {
2071 flags |= VISIBILITYMAP_ALL_FROZEN;
2072 }
2073
2074 /*
2075 * It should never be the case that the visibility map page is set
2076 * while the page-level bit is clear, but the reverse is allowed (if
2077 * checksums are not enabled). Regardless, set both bits so that we
2078 * get back in sync.
2079 *
2080 * NB: If the heap page is all-visible but the VM bit is not set, we
2081 * don't need to dirty the heap page. However, if checksums are
2082 * enabled, we do need to make sure that the heap page is dirtied
2083 * before passing it to visibilitymap_set(), because it may be logged.
2084 * Given that this situation should only happen in rare cases after a
2085 * crash, it is not worth optimizing.
2086 */
2087 PageSetAllVisible(page);
2089 old_vmbits = visibilitymap_set(vacrel->rel, blkno, buf,
2091 vmbuffer, presult.vm_conflict_horizon,
2092 flags);
2093
2094 /*
2095 * If the page wasn't already set all-visible and/or all-frozen in the
2096 * VM, count it as newly set for logging.
2097 */
2098 if ((old_vmbits & VISIBILITYMAP_ALL_VISIBLE) == 0)
2099 {
2100 vacrel->vm_new_visible_pages++;
2101 if (presult.all_frozen)
2102 {
2104 *vm_page_frozen = true;
2105 }
2106 }
2107 else if ((old_vmbits & VISIBILITYMAP_ALL_FROZEN) == 0 &&
2108 presult.all_frozen)
2109 {
2110 vacrel->vm_new_frozen_pages++;
2111 *vm_page_frozen = true;
2112 }
2113 }
2114
2115 /*
2116 * As of PostgreSQL 9.2, the visibility map bit should never be set if the
2117 * page-level bit is clear. However, it's possible that the bit got
2118 * cleared after heap_vac_scan_next_block() was called, so we must recheck
2119 * with buffer lock before concluding that the VM is corrupt.
2120 */
2121 else if (all_visible_according_to_vm && !PageIsAllVisible(page) &&
2122 visibilitymap_get_status(vacrel->rel, blkno, &vmbuffer) != 0)
2123 {
2126 errmsg("page is not marked all-visible but visibility map bit is set in relation \"%s\" page %u",
2127 vacrel->relname, blkno)));
2128
2129 visibilitymap_clear(vacrel->rel, blkno, vmbuffer,
2131 }
2132
2133 /*
2134 * It's possible for the value returned by
2135 * GetOldestNonRemovableTransactionId() to move backwards, so it's not
2136 * wrong for us to see tuples that appear to not be visible to everyone
2137 * yet, while PD_ALL_VISIBLE is already set. The real safe xmin value
2138 * never moves backwards, but GetOldestNonRemovableTransactionId() is
2139 * conservative and sometimes returns a value that's unnecessarily small,
2140 * so if we see that contradiction it just means that the tuples that we
2141 * think are not visible to everyone yet actually are, and the
2142 * PD_ALL_VISIBLE flag is correct.
2143 *
2144 * There should never be LP_DEAD items on a page with PD_ALL_VISIBLE set,
2145 * however.
2146 */
2147 else if (presult.lpdead_items > 0 && PageIsAllVisible(page))
2148 {
2151 errmsg("page containing LP_DEAD items is marked as all-visible in relation \"%s\" page %u",
2152 vacrel->relname, blkno)));
2153
2154 PageClearAllVisible(page);
2156 visibilitymap_clear(vacrel->rel, blkno, vmbuffer,
2158 }
2159
2160 /*
2161 * If the all-visible page is all-frozen but not marked as such yet, mark
2162 * it as all-frozen. Note that all_frozen is only valid if all_visible is
2163 * true, so we must check both all_visible and all_frozen.
2164 */
2165 else if (all_visible_according_to_vm && presult.all_visible &&
2166 presult.all_frozen && !VM_ALL_FROZEN(vacrel->rel, blkno, &vmbuffer))
2167 {
2168 uint8 old_vmbits;
2169
2170 /*
2171 * Avoid relying on all_visible_according_to_vm as a proxy for the
2172 * page-level PD_ALL_VISIBLE bit being set, since it might have become
2173 * stale -- even when all_visible is set
2174 */
2175 if (!PageIsAllVisible(page))
2176 {
2177 PageSetAllVisible(page);
2179 }
2180
2181 /*
2182 * Set the page all-frozen (and all-visible) in the VM.
2183 *
2184 * We can pass InvalidTransactionId as our cutoff_xid, since a
2185 * snapshotConflictHorizon sufficient to make everything safe for REDO
2186 * was logged when the page's tuples were frozen.
2187 */
2189 old_vmbits = visibilitymap_set(vacrel->rel, blkno, buf,
2191 vmbuffer, InvalidTransactionId,
2194
2195 /*
2196 * The page was likely already set all-visible in the VM. However,
2197 * there is a small chance that it was modified sometime between
2198 * setting all_visible_according_to_vm and checking the visibility
2199 * during pruning. Check the return value of old_vmbits anyway to
2200 * ensure the visibility map counters used for logging are accurate.
2201 */
2202 if ((old_vmbits & VISIBILITYMAP_ALL_VISIBLE) == 0)
2203 {
2204 vacrel->vm_new_visible_pages++;
2206 *vm_page_frozen = true;
2207 }
2208
2209 /*
2210 * We already checked that the page was not set all-frozen in the VM
2211 * above, so we don't need to test the value of old_vmbits.
2212 */
2213 else
2214 {
2215 vacrel->vm_new_frozen_pages++;
2216 *vm_page_frozen = true;
2217 }
2218 }
2219
2220 return presult.ndeleted;
2221}
static void PageClearAllVisible(Page page)
Definition: bufpage.h:439
int errcode(int sqlerrcode)
Definition: elog.c:854
#define HEAP_PAGE_PRUNE_FREEZE
Definition: heapam.h:44
@ PRUNE_VACUUM_SCAN
Definition: heapam.h:271
#define HEAP_PAGE_PRUNE_MARK_UNUSED_NOW
Definition: heapam.h:43
#define ERRCODE_DATA_CORRUPTED
Definition: pg_basebackup.c:42
#define qsort(a, b, c, d)
Definition: port.h:479
void heap_page_prune_and_freeze(Relation relation, Buffer buffer, GlobalVisState *vistest, int options, struct VacuumCutoffs *cutoffs, PruneFreezeResult *presult, PruneReason reason, OffsetNumber *off_loc, TransactionId *new_relfrozen_xid, MultiXactId *new_relmin_mxid)
Definition: pruneheap.c:350
int recently_dead_tuples
Definition: heapam.h:235
TransactionId vm_conflict_horizon
Definition: heapam.h:250
OffsetNumber deadoffsets[MaxHeapTuplesPerPage]
Definition: heapam.h:264
bool all_visible
Definition: heapam.h:248
#define TransactionIdIsValid(xid)
Definition: transam.h:41
static bool heap_page_is_all_visible(LVRelState *vacrel, Buffer buf, TransactionId *visibility_cutoff_xid, bool *all_frozen)
Definition: vacuumlazy.c:3610
static int cmpOffsetNumbers(const void *a, const void *b)
Definition: vacuumlazy.c:1918
bool visibilitymap_clear(Relation rel, BlockNumber heapBlk, Buffer vmbuf, uint8 flags)
#define VM_ALL_FROZEN(r, b, v)
Definition: visibilitymap.h:26
#define VISIBILITYMAP_VALID_BITS

References PruneFreezeResult::all_frozen, PruneFreezeResult::all_visible, Assert(), buf, BufferGetBlockNumber(), cmpOffsetNumbers(), LVRelState::cutoffs, dead_items_add(), PruneFreezeResult::deadoffsets, ereport, errcode(), ERRCODE_DATA_CORRUPTED, errmsg(), PruneFreezeResult::hastup, heap_page_is_all_visible(), heap_page_prune_and_freeze(), HEAP_PAGE_PRUNE_FREEZE, HEAP_PAGE_PRUNE_MARK_UNUSED_NOW, InvalidTransactionId, InvalidXLogRecPtr, LVRelState::live_tuples, PruneFreezeResult::live_tuples, LVRelState::lpdead_item_pages, LVRelState::lpdead_items, PruneFreezeResult::lpdead_items, MarkBufferDirty(), MultiXactIdIsValid, PruneFreezeResult::ndeleted, LVRelState::new_frozen_tuple_pages, LVRelState::NewRelfrozenXid, LVRelState::NewRelminMxid, PruneFreezeResult::nfrozen, LVRelState::nindexes, LVRelState::nonempty_pages, LVRelState::offnum, PageClearAllVisible(), PageIsAllVisible(), PageSetAllVisible(), PRUNE_VACUUM_SCAN, qsort, LVRelState::recently_dead_tuples, PruneFreezeResult::recently_dead_tuples, LVRelState::rel, LVRelState::relname, TransactionIdIsValid, LVRelState::tuples_deleted, LVRelState::tuples_frozen, VISIBILITYMAP_ALL_FROZEN, VISIBILITYMAP_ALL_VISIBLE, visibilitymap_clear(), visibilitymap_get_status(), visibilitymap_set(), VISIBILITYMAP_VALID_BITS, LVRelState::vistest, VM_ALL_FROZEN, PruneFreezeResult::vm_conflict_horizon, LVRelState::vm_new_frozen_pages, LVRelState::vm_new_visible_frozen_pages, LVRelState::vm_new_visible_pages, and WARNING.

Referenced by lazy_scan_heap().

◆ lazy_truncate_heap()

static void lazy_truncate_heap ( LVRelState vacrel)
static

Definition at line 3205 of file vacuumlazy.c.

3206{
3207 BlockNumber orig_rel_pages = vacrel->rel_pages;
3208 BlockNumber new_rel_pages;
3209 bool lock_waiter_detected;
3210 int lock_retry;
3211
3212 /* Report that we are now truncating */
3215
3216 /* Update error traceback information one last time */
3219
3220 /*
3221 * Loop until no more truncating can be done.
3222 */
3223 do
3224 {
3225 /*
3226 * We need full exclusive lock on the relation in order to do
3227 * truncation. If we can't get it, give up rather than waiting --- we
3228 * don't want to block other backends, and we don't want to deadlock
3229 * (which is quite possible considering we already hold a lower-grade
3230 * lock).
3231 */
3232 lock_waiter_detected = false;
3233 lock_retry = 0;
3234 while (true)
3235 {
3237 break;
3238
3239 /*
3240 * Check for interrupts while trying to (re-)acquire the exclusive
3241 * lock.
3242 */
3244
3245 if (++lock_retry > (VACUUM_TRUNCATE_LOCK_TIMEOUT /
3247 {
3248 /*
3249 * We failed to establish the lock in the specified number of
3250 * retries. This means we give up truncating.
3251 */
3252 ereport(vacrel->verbose ? INFO : DEBUG2,
3253 (errmsg("\"%s\": stopping truncate due to conflicting lock request",
3254 vacrel->relname)));
3255 return;
3256 }
3257
3258 (void) WaitLatch(MyLatch,
3261 WAIT_EVENT_VACUUM_TRUNCATE);
3263 }
3264
3265 /*
3266 * Now that we have exclusive lock, look to see if the rel has grown
3267 * whilst we were vacuuming with non-exclusive lock. If so, give up;
3268 * the newly added pages presumably contain non-deletable tuples.
3269 */
3270 new_rel_pages = RelationGetNumberOfBlocks(vacrel->rel);
3271 if (new_rel_pages != orig_rel_pages)
3272 {
3273 /*
3274 * Note: we intentionally don't update vacrel->rel_pages with the
3275 * new rel size here. If we did, it would amount to assuming that
3276 * the new pages are empty, which is unlikely. Leaving the numbers
3277 * alone amounts to assuming that the new pages have the same
3278 * tuple density as existing ones, which is less unlikely.
3279 */
3281 return;
3282 }
3283
3284 /*
3285 * Scan backwards from the end to verify that the end pages actually
3286 * contain no tuples. This is *necessary*, not optional, because
3287 * other backends could have added tuples to these pages whilst we
3288 * were vacuuming.
3289 */
3290 new_rel_pages = count_nondeletable_pages(vacrel, &lock_waiter_detected);
3291 vacrel->blkno = new_rel_pages;
3292
3293 if (new_rel_pages >= orig_rel_pages)
3294 {
3295 /* can't do anything after all */
3297 return;
3298 }
3299
3300 /*
3301 * Okay to truncate.
3302 */
3303 RelationTruncate(vacrel->rel, new_rel_pages);
3304
3305 /*
3306 * We can release the exclusive lock as soon as we have truncated.
3307 * Other backends can't safely access the relation until they have
3308 * processed the smgr invalidation that smgrtruncate sent out ... but
3309 * that should happen as part of standard invalidation processing once
3310 * they acquire lock on the relation.
3311 */
3313
3314 /*
3315 * Update statistics. Here, it *is* correct to adjust rel_pages
3316 * without also touching reltuples, since the tuple count wasn't
3317 * changed by the truncation.
3318 */
3319 vacrel->removed_pages += orig_rel_pages - new_rel_pages;
3320 vacrel->rel_pages = new_rel_pages;
3321
3322 ereport(vacrel->verbose ? INFO : DEBUG2,
3323 (errmsg("table \"%s\": truncated %u to %u pages",
3324 vacrel->relname,
3325 orig_rel_pages, new_rel_pages)));
3326 orig_rel_pages = new_rel_pages;
3327 } while (new_rel_pages > vacrel->nonempty_pages && lock_waiter_detected);
3328}
struct Latch * MyLatch
Definition: globals.c:63
void ResetLatch(Latch *latch)
Definition: latch.c:374
int WaitLatch(Latch *latch, int wakeEvents, long timeout, uint32 wait_event_info)
Definition: latch.c:172
void UnlockRelation(Relation relation, LOCKMODE lockmode)
Definition: lmgr.c:314
bool ConditionalLockRelation(Relation relation, LOCKMODE lockmode)
Definition: lmgr.c:278
#define PROGRESS_VACUUM_PHASE_TRUNCATE
Definition: progress.h:38
void RelationTruncate(Relation rel, BlockNumber nblocks)
Definition: storage.c:289
#define VACUUM_TRUNCATE_LOCK_WAIT_INTERVAL
Definition: vacuumlazy.c:179
#define VACUUM_TRUNCATE_LOCK_TIMEOUT
Definition: vacuumlazy.c:180
static BlockNumber count_nondeletable_pages(LVRelState *vacrel, bool *lock_waiter_detected)
Definition: vacuumlazy.c:3336
#define WL_TIMEOUT
Definition: waiteventset.h:37
#define WL_EXIT_ON_PM_DEATH
Definition: waiteventset.h:39
#define WL_LATCH_SET
Definition: waiteventset.h:34

References AccessExclusiveLock, LVRelState::blkno, CHECK_FOR_INTERRUPTS, ConditionalLockRelation(), count_nondeletable_pages(), DEBUG2, ereport, errmsg(), INFO, InvalidOffsetNumber, MyLatch, LVRelState::nonempty_pages, pgstat_progress_update_param(), PROGRESS_VACUUM_PHASE, PROGRESS_VACUUM_PHASE_TRUNCATE, LVRelState::rel, LVRelState::rel_pages, RelationGetNumberOfBlocks, RelationTruncate(), LVRelState::relname, LVRelState::removed_pages, ResetLatch(), UnlockRelation(), update_vacuum_error_info(), VACUUM_ERRCB_PHASE_TRUNCATE, VACUUM_TRUNCATE_LOCK_TIMEOUT, VACUUM_TRUNCATE_LOCK_WAIT_INTERVAL, LVRelState::verbose, WaitLatch(), WL_EXIT_ON_PM_DEATH, WL_LATCH_SET, and WL_TIMEOUT.

Referenced by heap_vacuum_rel().

◆ lazy_vacuum()

static void lazy_vacuum ( LVRelState vacrel)
static

Definition at line 2455 of file vacuumlazy.c.

2456{
2457 bool bypass;
2458
2459 /* Should not end up here with no indexes */
2460 Assert(vacrel->nindexes > 0);
2461 Assert(vacrel->lpdead_item_pages > 0);
2462
2463 if (!vacrel->do_index_vacuuming)
2464 {
2465 Assert(!vacrel->do_index_cleanup);
2466 dead_items_reset(vacrel);
2467 return;
2468 }
2469
2470 /*
2471 * Consider bypassing index vacuuming (and heap vacuuming) entirely.
2472 *
2473 * We currently only do this in cases where the number of LP_DEAD items
2474 * for the entire VACUUM operation is close to zero. This avoids sharp
2475 * discontinuities in the duration and overhead of successive VACUUM
2476 * operations that run against the same table with a fixed workload.
2477 * Ideally, successive VACUUM operations will behave as if there are
2478 * exactly zero LP_DEAD items in cases where there are close to zero.
2479 *
2480 * This is likely to be helpful with a table that is continually affected
2481 * by UPDATEs that can mostly apply the HOT optimization, but occasionally
2482 * have small aberrations that lead to just a few heap pages retaining
2483 * only one or two LP_DEAD items. This is pretty common; even when the
2484 * DBA goes out of their way to make UPDATEs use HOT, it is practically
2485 * impossible to predict whether HOT will be applied in 100% of cases.
2486 * It's far easier to ensure that 99%+ of all UPDATEs against a table use
2487 * HOT through careful tuning.
2488 */
2489 bypass = false;
2490 if (vacrel->consider_bypass_optimization && vacrel->rel_pages > 0)
2491 {
2492 BlockNumber threshold;
2493
2494 Assert(vacrel->num_index_scans == 0);
2495 Assert(vacrel->lpdead_items == vacrel->dead_items_info->num_items);
2496 Assert(vacrel->do_index_vacuuming);
2497 Assert(vacrel->do_index_cleanup);
2498
2499 /*
2500 * This crossover point at which we'll start to do index vacuuming is
2501 * expressed as a percentage of the total number of heap pages in the
2502 * table that are known to have at least one LP_DEAD item. This is
2503 * much more important than the total number of LP_DEAD items, since
2504 * it's a proxy for the number of heap pages whose visibility map bits
2505 * cannot be set on account of bypassing index and heap vacuuming.
2506 *
2507 * We apply one further precautionary test: the space currently used
2508 * to store the TIDs (TIDs that now all point to LP_DEAD items) must
2509 * not exceed 32MB. This limits the risk that we will bypass index
2510 * vacuuming again and again until eventually there is a VACUUM whose
2511 * dead_items space is not CPU cache resident.
2512 *
2513 * We don't take any special steps to remember the LP_DEAD items (such
2514 * as counting them in our final update to the stats system) when the
2515 * optimization is applied. Though the accounting used in analyze.c's
2516 * acquire_sample_rows() will recognize the same LP_DEAD items as dead
2517 * rows in its own stats report, that's okay. The discrepancy should
2518 * be negligible. If this optimization is ever expanded to cover more
2519 * cases then this may need to be reconsidered.
2520 */
2521 threshold = (double) vacrel->rel_pages * BYPASS_THRESHOLD_PAGES;
2522 bypass = (vacrel->lpdead_item_pages < threshold &&
2523 TidStoreMemoryUsage(vacrel->dead_items) < 32 * 1024 * 1024);
2524 }
2525
2526 if (bypass)
2527 {
2528 /*
2529 * There are almost zero TIDs. Behave as if there were precisely
2530 * zero: bypass index vacuuming, but do index cleanup.
2531 *
2532 * We expect that the ongoing VACUUM operation will finish very
2533 * quickly, so there is no point in considering speeding up as a
2534 * failsafe against wraparound failure. (Index cleanup is expected to
2535 * finish very quickly in cases where there were no ambulkdelete()
2536 * calls.)
2537 */
2538 vacrel->do_index_vacuuming = false;
2539 }
2540 else if (lazy_vacuum_all_indexes(vacrel))
2541 {
2542 /*
2543 * We successfully completed a round of index vacuuming. Do related
2544 * heap vacuuming now.
2545 */
2546 lazy_vacuum_heap_rel(vacrel);
2547 }
2548 else
2549 {
2550 /*
2551 * Failsafe case.
2552 *
2553 * We attempted index vacuuming, but didn't finish a full round/full
2554 * index scan. This happens when relfrozenxid or relminmxid is too
2555 * far in the past.
2556 *
2557 * From this point on the VACUUM operation will do no further index
2558 * vacuuming or heap vacuuming. This VACUUM operation won't end up
2559 * back here again.
2560 */
2562 }
2563
2564 /*
2565 * Forget the LP_DEAD items that we just vacuumed (or just decided to not
2566 * vacuum)
2567 */
2568 dead_items_reset(vacrel);
2569}
static void dead_items_reset(LVRelState *vacrel)
Definition: vacuumlazy.c:3565
#define BYPASS_THRESHOLD_PAGES
Definition: vacuumlazy.c:186
static bool lazy_vacuum_all_indexes(LVRelState *vacrel)
Definition: vacuumlazy.c:2580
static void lazy_vacuum_heap_rel(LVRelState *vacrel)
Definition: vacuumlazy.c:2725

References Assert(), BYPASS_THRESHOLD_PAGES, LVRelState::consider_bypass_optimization, LVRelState::dead_items, LVRelState::dead_items_info, dead_items_reset(), LVRelState::do_index_cleanup, LVRelState::do_index_vacuuming, lazy_vacuum_all_indexes(), lazy_vacuum_heap_rel(), LVRelState::lpdead_item_pages, LVRelState::lpdead_items, LVRelState::nindexes, LVRelState::num_index_scans, VacDeadItemsInfo::num_items, LVRelState::rel_pages, TidStoreMemoryUsage(), and VacuumFailsafeActive.

Referenced by lazy_scan_heap().

◆ lazy_vacuum_all_indexes()

static bool lazy_vacuum_all_indexes ( LVRelState vacrel)
static

Definition at line 2580 of file vacuumlazy.c.

2581{
2582 bool allindexes = true;
2583 double old_live_tuples = vacrel->rel->rd_rel->reltuples;
2584 const int progress_start_index[] = {
2587 };
2588 const int progress_end_index[] = {
2592 };
2593 int64 progress_start_val[2];
2594 int64 progress_end_val[3];
2595
2596 Assert(vacrel->nindexes > 0);
2597 Assert(vacrel->do_index_vacuuming);
2598 Assert(vacrel->do_index_cleanup);
2599
2600 /* Precheck for XID wraparound emergencies */
2602 {
2603 /* Wraparound emergency -- don't even start an index scan */
2604 return false;
2605 }
2606
2607 /*
2608 * Report that we are now vacuuming indexes and the number of indexes to
2609 * vacuum.
2610 */
2611 progress_start_val[0] = PROGRESS_VACUUM_PHASE_VACUUM_INDEX;
2612 progress_start_val[1] = vacrel->nindexes;
2613 pgstat_progress_update_multi_param(2, progress_start_index, progress_start_val);
2614
2615 if (!ParallelVacuumIsActive(vacrel))
2616 {
2617 for (int idx = 0; idx < vacrel->nindexes; idx++)
2618 {
2619 Relation indrel = vacrel->indrels[idx];
2620 IndexBulkDeleteResult *istat = vacrel->indstats[idx];
2621
2622 vacrel->indstats[idx] = lazy_vacuum_one_index(indrel, istat,
2623 old_live_tuples,
2624 vacrel);
2625
2626 /* Report the number of indexes vacuumed */
2628 idx + 1);
2629
2631 {
2632 /* Wraparound emergency -- end current index scan */
2633 allindexes = false;
2634 break;
2635 }
2636 }
2637 }
2638 else
2639 {
2640 /* Outsource everything to parallel variant */
2641 parallel_vacuum_bulkdel_all_indexes(vacrel->pvs, old_live_tuples,
2642 vacrel->num_index_scans);
2643
2644 /*
2645 * Do a postcheck to consider applying wraparound failsafe now. Note
2646 * that parallel VACUUM only gets the precheck and this postcheck.
2647 */
2649 allindexes = false;
2650 }
2651
2652 /*
2653 * We delete all LP_DEAD items from the first heap pass in all indexes on
2654 * each call here (except calls where we choose to do the failsafe). This
2655 * makes the next call to lazy_vacuum_heap_rel() safe (except in the event
2656 * of the failsafe triggering, which prevents the next call from taking
2657 * place).
2658 */
2659 Assert(vacrel->num_index_scans > 0 ||
2660 vacrel->dead_items_info->num_items == vacrel->lpdead_items);
2661 Assert(allindexes || VacuumFailsafeActive);
2662
2663 /*
2664 * Increase and report the number of index scans. Also, we reset
2665 * PROGRESS_VACUUM_INDEXES_TOTAL and PROGRESS_VACUUM_INDEXES_PROCESSED.
2666 *
2667 * We deliberately include the case where we started a round of bulk
2668 * deletes that we weren't able to finish due to the failsafe triggering.
2669 */
2670 vacrel->num_index_scans++;
2671 progress_end_val[0] = 0;
2672 progress_end_val[1] = 0;
2673 progress_end_val[2] = vacrel->num_index_scans;
2674 pgstat_progress_update_multi_param(3, progress_end_index, progress_end_val);
2675
2676 return allindexes;
2677}
#define PROGRESS_VACUUM_NUM_INDEX_VACUUMS
Definition: progress.h:25
#define PROGRESS_VACUUM_PHASE_VACUUM_INDEX
Definition: progress.h:35
static IndexBulkDeleteResult * lazy_vacuum_one_index(Relation indrel, IndexBulkDeleteResult *istat, double reltuples, LVRelState *vacrel)
Definition: vacuumlazy.c:3076
void parallel_vacuum_bulkdel_all_indexes(ParallelVacuumState *pvs, long num_table_tuples, int num_index_scans)

References Assert(), LVRelState::dead_items_info, LVRelState::do_index_cleanup, LVRelState::do_index_vacuuming, idx(), LVRelState::indrels, LVRelState::indstats, lazy_check_wraparound_failsafe(), lazy_vacuum_one_index(), LVRelState::lpdead_items, LVRelState::nindexes, LVRelState::num_index_scans, VacDeadItemsInfo::num_items, parallel_vacuum_bulkdel_all_indexes(), ParallelVacuumIsActive, pgstat_progress_update_multi_param(), pgstat_progress_update_param(), PROGRESS_VACUUM_INDEXES_PROCESSED, PROGRESS_VACUUM_INDEXES_TOTAL, PROGRESS_VACUUM_NUM_INDEX_VACUUMS, PROGRESS_VACUUM_PHASE, PROGRESS_VACUUM_PHASE_VACUUM_INDEX, LVRelState::pvs, RelationData::rd_rel, LVRelState::rel, and VacuumFailsafeActive.

Referenced by lazy_vacuum().

◆ lazy_vacuum_heap_page()

static void lazy_vacuum_heap_page ( LVRelState vacrel,
BlockNumber  blkno,
Buffer  buffer,
OffsetNumber deadoffsets,
int  num_offsets,
Buffer  vmbuffer 
)
static

Definition at line 2843 of file vacuumlazy.c.

2846{
2847 Page page = BufferGetPage(buffer);
2849 int nunused = 0;
2850 TransactionId visibility_cutoff_xid;
2851 bool all_frozen;
2852 LVSavedErrInfo saved_err_info;
2853
2854 Assert(vacrel->do_index_vacuuming);
2855
2857
2858 /* Update error traceback information */
2859 update_vacuum_error_info(vacrel, &saved_err_info,
2862
2864
2865 for (int i = 0; i < num_offsets; i++)
2866 {
2867 ItemId itemid;
2868 OffsetNumber toff = deadoffsets[i];
2869
2870 itemid = PageGetItemId(page, toff);
2871
2872 Assert(ItemIdIsDead(itemid) && !ItemIdHasStorage(itemid));
2873 ItemIdSetUnused(itemid);
2874 unused[nunused++] = toff;
2875 }
2876
2877 Assert(nunused > 0);
2878
2879 /* Attempt to truncate line pointer array now */
2881
2882 /*
2883 * Mark buffer dirty before we write WAL.
2884 */
2885 MarkBufferDirty(buffer);
2886
2887 /* XLOG stuff */
2888 if (RelationNeedsWAL(vacrel->rel))
2889 {
2890 log_heap_prune_and_freeze(vacrel->rel, buffer,
2892 false, /* no cleanup lock required */
2894 NULL, 0, /* frozen */
2895 NULL, 0, /* redirected */
2896 NULL, 0, /* dead */
2897 unused, nunused);
2898 }
2899
2900 /*
2901 * End critical section, so we safely can do visibility tests (which
2902 * possibly need to perform IO and allocate memory!). If we crash now the
2903 * page (including the corresponding vm bit) might not be marked all
2904 * visible, but that's fine. A later vacuum will fix that.
2905 */
2907
2908 /*
2909 * Now that we have removed the LP_DEAD items from the page, once again
2910 * check if the page has become all-visible. The page is already marked
2911 * dirty, exclusively locked, and, if needed, a full page image has been
2912 * emitted.
2913 */
2914 Assert(!PageIsAllVisible(page));
2915 if (heap_page_is_all_visible(vacrel, buffer, &visibility_cutoff_xid,
2916 &all_frozen))
2917 {
2919
2920 if (all_frozen)
2921 {
2922 Assert(!TransactionIdIsValid(visibility_cutoff_xid));
2923 flags |= VISIBILITYMAP_ALL_FROZEN;
2924 }
2925
2926 PageSetAllVisible(page);
2927 visibilitymap_set(vacrel->rel, blkno, buffer,
2929 vmbuffer, visibility_cutoff_xid,
2930 flags);
2931
2932 /* Count the newly set VM page for logging */
2933 vacrel->vm_new_visible_pages++;
2934 if (all_frozen)
2936 }
2937
2938 /* Revert to the previous phase information for error traceback */
2939 restore_vacuum_error_info(vacrel, &saved_err_info);
2940}
void PageTruncateLinePointerArray(Page page)
Definition: bufpage.c:834
@ PRUNE_VACUUM_CLEANUP
Definition: heapam.h:272
#define ItemIdSetUnused(itemId)
Definition: itemid.h:128
#define ItemIdHasStorage(itemId)
Definition: itemid.h:120
void log_heap_prune_and_freeze(Relation relation, Buffer buffer, TransactionId conflict_xid, bool cleanup_lock, PruneReason reason, HeapTupleFreeze *frozen, int nfrozen, OffsetNumber *redirected, int nredirected, OffsetNumber *dead, int ndead, OffsetNumber *unused, int nunused)
Definition: pruneheap.c:2053

References Assert(), BufferGetPage(), LVRelState::do_index_vacuuming, END_CRIT_SECTION, heap_page_is_all_visible(), i, InvalidOffsetNumber, InvalidTransactionId, InvalidXLogRecPtr, ItemIdHasStorage, ItemIdIsDead, ItemIdSetUnused, log_heap_prune_and_freeze(), MarkBufferDirty(), MaxHeapTuplesPerPage, PageGetItemId(), PageIsAllVisible(), PageSetAllVisible(), PageTruncateLinePointerArray(), pgstat_progress_update_param(), PROGRESS_VACUUM_HEAP_BLKS_VACUUMED, PRUNE_VACUUM_CLEANUP, LVRelState::rel, RelationNeedsWAL, restore_vacuum_error_info(), START_CRIT_SECTION, TransactionIdIsValid, update_vacuum_error_info(), VACUUM_ERRCB_PHASE_VACUUM_HEAP, VISIBILITYMAP_ALL_FROZEN, VISIBILITYMAP_ALL_VISIBLE, visibilitymap_set(), LVRelState::vm_new_visible_frozen_pages, and LVRelState::vm_new_visible_pages.

Referenced by lazy_vacuum_heap_rel().

◆ lazy_vacuum_heap_rel()

static void lazy_vacuum_heap_rel ( LVRelState vacrel)
static

Definition at line 2725 of file vacuumlazy.c.

2726{
2727 ReadStream *stream;
2728 BlockNumber vacuumed_pages = 0;
2729 Buffer vmbuffer = InvalidBuffer;
2730 LVSavedErrInfo saved_err_info;
2731 TidStoreIter *iter;
2732
2733 Assert(vacrel->do_index_vacuuming);
2734 Assert(vacrel->do_index_cleanup);
2735 Assert(vacrel->num_index_scans > 0);
2736
2737 /* Report that we are now vacuuming the heap */
2740
2741 /* Update error traceback information */
2742 update_vacuum_error_info(vacrel, &saved_err_info,
2745
2746 iter = TidStoreBeginIterate(vacrel->dead_items);
2747
2748 /*
2749 * Set up the read stream for vacuum's second pass through the heap.
2750 *
2751 * It is safe to use batchmode, as vacuum_reap_lp_read_stream_next() does
2752 * not need to wait for IO and does not perform locking. Once we support
2753 * parallelism it should still be fine, as presumably the holder of locks
2754 * would never be blocked by IO while holding the lock.
2755 */
2758 vacrel->bstrategy,
2759 vacrel->rel,
2762 iter,
2763 sizeof(TidStoreIterResult));
2764
2765 while (true)
2766 {
2767 BlockNumber blkno;
2768 Buffer buf;
2769 Page page;
2770 TidStoreIterResult *iter_result;
2771 Size freespace;
2773 int num_offsets;
2774
2775 vacuum_delay_point(false);
2776
2777 buf = read_stream_next_buffer(stream, (void **) &iter_result);
2778
2779 /* The relation is exhausted */
2780 if (!BufferIsValid(buf))
2781 break;
2782
2783 vacrel->blkno = blkno = BufferGetBlockNumber(buf);
2784
2785 Assert(iter_result);
2786 num_offsets = TidStoreGetBlockOffsets(iter_result, offsets, lengthof(offsets));
2787 Assert(num_offsets <= lengthof(offsets));
2788
2789 /*
2790 * Pin the visibility map page in case we need to mark the page
2791 * all-visible. In most cases this will be very cheap, because we'll
2792 * already have the correct page pinned anyway.
2793 */
2794 visibilitymap_pin(vacrel->rel, blkno, &vmbuffer);
2795
2796 /* We need a non-cleanup exclusive lock to mark dead_items unused */
2798 lazy_vacuum_heap_page(vacrel, blkno, buf, offsets,
2799 num_offsets, vmbuffer);
2800
2801 /* Now that we've vacuumed the page, record its available space */
2802 page = BufferGetPage(buf);
2803 freespace = PageGetHeapFreeSpace(page);
2804
2806 RecordPageWithFreeSpace(vacrel->rel, blkno, freespace);
2807 vacuumed_pages++;
2808 }
2809
2810 read_stream_end(stream);
2811 TidStoreEndIterate(iter);
2812
2813 vacrel->blkno = InvalidBlockNumber;
2814 if (BufferIsValid(vmbuffer))
2815 ReleaseBuffer(vmbuffer);
2816
2817 /*
2818 * We set all LP_DEAD items from the first heap pass to LP_UNUSED during
2819 * the second heap pass. No more, no less.
2820 */
2821 Assert(vacrel->num_index_scans > 1 ||
2822 (vacrel->dead_items_info->num_items == vacrel->lpdead_items &&
2823 vacuumed_pages == vacrel->lpdead_item_pages));
2824
2826 (errmsg("table \"%s\": removed %" PRId64 " dead item identifiers in %u pages",
2827 vacrel->relname, vacrel->dead_items_info->num_items,
2828 vacuumed_pages)));
2829
2830 /* Revert to the previous phase information for error traceback */
2831 restore_vacuum_error_info(vacrel, &saved_err_info);
2832}
#define lengthof(array)
Definition: c.h:787
#define MaxOffsetNumber
Definition: off.h:28
#define PROGRESS_VACUUM_PHASE_VACUUM_HEAP
Definition: progress.h:36
#define READ_STREAM_USE_BATCHING
Definition: read_stream.h:64
TidStoreIter * TidStoreBeginIterate(TidStore *ts)
Definition: tidstore.c:471
void TidStoreEndIterate(TidStoreIter *iter)
Definition: tidstore.c:518
int TidStoreGetBlockOffsets(TidStoreIterResult *result, OffsetNumber *offsets, int max_offsets)
Definition: tidstore.c:566
static BlockNumber vacuum_reap_lp_read_stream_next(ReadStream *stream, void *callback_private_data, void *per_buffer_data)
Definition: vacuumlazy.c:2687
static void lazy_vacuum_heap_page(LVRelState *vacrel, BlockNumber blkno, Buffer buffer, OffsetNumber *deadoffsets, int num_offsets, Buffer vmbuffer)
Definition: vacuumlazy.c:2843

References Assert(), LVRelState::blkno, LVRelState::bstrategy, buf, BUFFER_LOCK_EXCLUSIVE, BufferGetBlockNumber(), BufferGetPage(), BufferIsValid(), LVRelState::dead_items, LVRelState::dead_items_info, DEBUG2, LVRelState::do_index_cleanup, LVRelState::do_index_vacuuming, ereport, errmsg(), InvalidBlockNumber, InvalidBuffer, InvalidOffsetNumber, lazy_vacuum_heap_page(), lengthof, LockBuffer(), LVRelState::lpdead_item_pages, LVRelState::lpdead_items, MAIN_FORKNUM, MaxOffsetNumber, LVRelState::num_index_scans, VacDeadItemsInfo::num_items, PageGetHeapFreeSpace(), pgstat_progress_update_param(), PROGRESS_VACUUM_PHASE, PROGRESS_VACUUM_PHASE_VACUUM_HEAP, read_stream_begin_relation(), read_stream_end(), READ_STREAM_MAINTENANCE, read_stream_next_buffer(), READ_STREAM_USE_BATCHING, RecordPageWithFreeSpace(), LVRelState::rel, ReleaseBuffer(), LVRelState::relname, restore_vacuum_error_info(), TidStoreBeginIterate(), TidStoreEndIterate(), TidStoreGetBlockOffsets(), UnlockReleaseBuffer(), update_vacuum_error_info(), vacuum_delay_point(), VACUUM_ERRCB_PHASE_VACUUM_HEAP, vacuum_reap_lp_read_stream_next(), and visibilitymap_pin().

Referenced by lazy_vacuum().

◆ lazy_vacuum_one_index()

static IndexBulkDeleteResult * lazy_vacuum_one_index ( Relation  indrel,
IndexBulkDeleteResult istat,
double  reltuples,
LVRelState vacrel 
)
static

Definition at line 3076 of file vacuumlazy.c.

3078{
3079 IndexVacuumInfo ivinfo;
3080 LVSavedErrInfo saved_err_info;
3081
3082 ivinfo.index = indrel;
3083 ivinfo.heaprel = vacrel->rel;
3084 ivinfo.analyze_only = false;
3085 ivinfo.report_progress = false;
3086 ivinfo.estimated_count = true;
3087 ivinfo.message_level = DEBUG2;
3088 ivinfo.num_heap_tuples = reltuples;
3089 ivinfo.strategy = vacrel->bstrategy;
3090
3091 /*
3092 * Update error traceback information.
3093 *
3094 * The index name is saved during this phase and restored immediately
3095 * after this phase. See vacuum_error_callback.
3096 */
3097 Assert(vacrel->indname == NULL);
3098 vacrel->indname = pstrdup(RelationGetRelationName(indrel));
3099 update_vacuum_error_info(vacrel, &saved_err_info,
3102
3103 /* Do bulk deletion */
3104 istat = vac_bulkdel_one_index(&ivinfo, istat, vacrel->dead_items,
3105 vacrel->dead_items_info);
3106
3107 /* Revert to the previous phase information for error traceback */
3108 restore_vacuum_error_info(vacrel, &saved_err_info);
3109 pfree(vacrel->indname);
3110 vacrel->indname = NULL;
3111
3112 return istat;
3113}
IndexBulkDeleteResult * vac_bulkdel_one_index(IndexVacuumInfo *ivinfo, IndexBulkDeleteResult *istat, TidStore *dead_items, VacDeadItemsInfo *dead_items_info)
Definition: vacuum.c:2630

References IndexVacuumInfo::analyze_only, Assert(), LVRelState::bstrategy, LVRelState::dead_items, LVRelState::dead_items_info, DEBUG2, IndexVacuumInfo::estimated_count, IndexVacuumInfo::heaprel, IndexVacuumInfo::index, LVRelState::indname, InvalidBlockNumber, InvalidOffsetNumber, IndexVacuumInfo::message_level, IndexVacuumInfo::num_heap_tuples, pfree(), pstrdup(), LVRelState::rel, RelationGetRelationName, IndexVacuumInfo::report_progress, restore_vacuum_error_info(), IndexVacuumInfo::strategy, update_vacuum_error_info(), vac_bulkdel_one_index(), and VACUUM_ERRCB_PHASE_VACUUM_INDEX.

Referenced by lazy_vacuum_all_indexes().

◆ restore_vacuum_error_info()

static void restore_vacuum_error_info ( LVRelState vacrel,
const LVSavedErrInfo saved_vacrel 
)
static

Definition at line 3844 of file vacuumlazy.c.

3846{
3847 vacrel->blkno = saved_vacrel->blkno;
3848 vacrel->offnum = saved_vacrel->offnum;
3849 vacrel->phase = saved_vacrel->phase;
3850}
BlockNumber blkno
Definition: vacuumlazy.c:416
VacErrPhase phase
Definition: vacuumlazy.c:418
OffsetNumber offnum
Definition: vacuumlazy.c:417

References LVRelState::blkno, LVSavedErrInfo::blkno, LVRelState::offnum, LVSavedErrInfo::offnum, LVRelState::phase, and LVSavedErrInfo::phase.

Referenced by lazy_cleanup_one_index(), lazy_vacuum_heap_page(), lazy_vacuum_heap_rel(), and lazy_vacuum_one_index().

◆ should_attempt_truncation()

static bool should_attempt_truncation ( LVRelState vacrel)
static

Definition at line 3185 of file vacuumlazy.c.

3186{
3187 BlockNumber possibly_freeable;
3188
3189 if (!vacrel->do_rel_truncate || VacuumFailsafeActive)
3190 return false;
3191
3192 possibly_freeable = vacrel->rel_pages - vacrel->nonempty_pages;
3193 if (possibly_freeable > 0 &&
3194 (possibly_freeable >= REL_TRUNCATE_MINIMUM ||
3195 possibly_freeable >= vacrel->rel_pages / REL_TRUNCATE_FRACTION))
3196 return true;
3197
3198 return false;
3199}
#define REL_TRUNCATE_MINIMUM
Definition: vacuumlazy.c:168
#define REL_TRUNCATE_FRACTION
Definition: vacuumlazy.c:169

References LVRelState::do_rel_truncate, LVRelState::nonempty_pages, LVRelState::rel_pages, REL_TRUNCATE_FRACTION, REL_TRUNCATE_MINIMUM, and VacuumFailsafeActive.

Referenced by heap_vacuum_rel().

◆ update_relstats_all_indexes()

static void update_relstats_all_indexes ( LVRelState vacrel)
static

Definition at line 3726 of file vacuumlazy.c.

3727{
3728 Relation *indrels = vacrel->indrels;
3729 int nindexes = vacrel->nindexes;
3730 IndexBulkDeleteResult **indstats = vacrel->indstats;
3731
3732 Assert(vacrel->do_index_cleanup);
3733
3734 for (int idx = 0; idx < nindexes; idx++)
3735 {
3736 Relation indrel = indrels[idx];
3737 IndexBulkDeleteResult *istat = indstats[idx];
3738
3739 if (istat == NULL || istat->estimated_count)
3740 continue;
3741
3742 /* Update index statistics */
3743 vac_update_relstats(indrel,
3744 istat->num_pages,
3745 istat->num_index_tuples,
3746 0, 0,
3747 false,
3750 NULL, NULL, false);
3751 }
3752}
double num_index_tuples
Definition: genam.h:106

References Assert(), LVRelState::do_index_cleanup, IndexBulkDeleteResult::estimated_count, idx(), LVRelState::indrels, LVRelState::indstats, InvalidMultiXactId, InvalidTransactionId, LVRelState::nindexes, IndexBulkDeleteResult::num_index_tuples, IndexBulkDeleteResult::num_pages, and vac_update_relstats().

Referenced by heap_vacuum_rel().

◆ update_vacuum_error_info()

static void update_vacuum_error_info ( LVRelState vacrel,
LVSavedErrInfo saved_vacrel,
int  phase,
BlockNumber  blkno,
OffsetNumber  offnum 
)
static

Definition at line 3825 of file vacuumlazy.c.

3827{
3828 if (saved_vacrel)
3829 {
3830 saved_vacrel->offnum = vacrel->offnum;
3831 saved_vacrel->blkno = vacrel->blkno;
3832 saved_vacrel->phase = vacrel->phase;
3833 }
3834
3835 vacrel->blkno = blkno;
3836 vacrel->offnum = offnum;
3837 vacrel->phase = phase;
3838}

References LVRelState::blkno, LVSavedErrInfo::blkno, LVRelState::offnum, LVSavedErrInfo::offnum, LVRelState::phase, and LVSavedErrInfo::phase.

Referenced by lazy_cleanup_one_index(), lazy_scan_heap(), lazy_truncate_heap(), lazy_vacuum_heap_page(), lazy_vacuum_heap_rel(), and lazy_vacuum_one_index().

◆ vacuum_error_callback()

static void vacuum_error_callback ( void *  arg)
static

Definition at line 3761 of file vacuumlazy.c.

3762{
3763 LVRelState *errinfo = arg;
3764
3765 switch (errinfo->phase)
3766 {
3768 if (BlockNumberIsValid(errinfo->blkno))
3769 {
3770 if (OffsetNumberIsValid(errinfo->offnum))
3771 errcontext("while scanning block %u offset %u of relation \"%s.%s\"",
3772 errinfo->blkno, errinfo->offnum, errinfo->relnamespace, errinfo->relname);
3773 else
3774 errcontext("while scanning block %u of relation \"%s.%s\"",
3775 errinfo->blkno, errinfo->relnamespace, errinfo->relname);
3776 }
3777 else
3778 errcontext("while scanning relation \"%s.%s\"",
3779 errinfo->relnamespace, errinfo->relname);
3780 break;
3781
3783 if (BlockNumberIsValid(errinfo->blkno))
3784 {
3785 if (OffsetNumberIsValid(errinfo->offnum))
3786 errcontext("while vacuuming block %u offset %u of relation \"%s.%s\"",
3787 errinfo->blkno, errinfo->offnum, errinfo->relnamespace, errinfo->relname);
3788 else
3789 errcontext("while vacuuming block %u of relation \"%s.%s\"",
3790 errinfo->blkno, errinfo->relnamespace, errinfo->relname);
3791 }
3792 else
3793 errcontext("while vacuuming relation \"%s.%s\"",
3794 errinfo->relnamespace, errinfo->relname);
3795 break;
3796
3798 errcontext("while vacuuming index \"%s\" of relation \"%s.%s\"",
3799 errinfo->indname, errinfo->relnamespace, errinfo->relname);
3800 break;
3801
3803 errcontext("while cleaning up index \"%s\" of relation \"%s.%s\"",
3804 errinfo->indname, errinfo->relnamespace, errinfo->relname);
3805 break;
3806
3808 if (BlockNumberIsValid(errinfo->blkno))
3809 errcontext("while truncating relation \"%s.%s\" to %u blocks",
3810 errinfo->relnamespace, errinfo->relname, errinfo->blkno);
3811 break;
3812
3814 default:
3815 return; /* do nothing; the errinfo may not be
3816 * initialized */
3817 }
3818}
static bool BlockNumberIsValid(BlockNumber blockNumber)
Definition: block.h:71
#define errcontext
Definition: elog.h:198
#define OffsetNumberIsValid(offsetNumber)
Definition: off.h:39
void * arg

References arg, LVRelState::blkno, BlockNumberIsValid(), errcontext, LVRelState::indname, LVRelState::offnum, OffsetNumberIsValid, LVRelState::phase, LVRelState::relname, LVRelState::relnamespace, VACUUM_ERRCB_PHASE_INDEX_CLEANUP, VACUUM_ERRCB_PHASE_SCAN_HEAP, VACUUM_ERRCB_PHASE_TRUNCATE, VACUUM_ERRCB_PHASE_UNKNOWN, VACUUM_ERRCB_PHASE_VACUUM_HEAP, and VACUUM_ERRCB_PHASE_VACUUM_INDEX.

Referenced by heap_vacuum_rel().

◆ vacuum_reap_lp_read_stream_next()

static BlockNumber vacuum_reap_lp_read_stream_next ( ReadStream stream,
void *  callback_private_data,
void *  per_buffer_data 
)
static

Definition at line 2687 of file vacuumlazy.c.

2690{
2691 TidStoreIter *iter = callback_private_data;
2692 TidStoreIterResult *iter_result;
2693
2694 iter_result = TidStoreIterateNext(iter);
2695 if (iter_result == NULL)
2696 return InvalidBlockNumber;
2697
2698 /*
2699 * Save the TidStoreIterResult for later, so we can extract the offsets.
2700 * It is safe to copy the result, according to TidStoreIterateNext().
2701 */
2702 memcpy(per_buffer_data, iter_result, sizeof(*iter_result));
2703
2704 return iter_result->blkno;
2705}
BlockNumber blkno
Definition: tidstore.h:29
TidStoreIterResult * TidStoreIterateNext(TidStoreIter *iter)
Definition: tidstore.c:493

References TidStoreIterResult::blkno, InvalidBlockNumber, and TidStoreIterateNext().

Referenced by lazy_vacuum_heap_rel().