Thanks to visit codestin.com
Credit goes to github.com

Skip to content

Commit 21d304d

Browse files
committed
Final pgindent + perltidy run for v10.
1 parent 5b6289c commit 21d304d

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

46 files changed

+273
-273
lines changed

src/backend/access/hash/hashpage.c

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -1320,10 +1320,10 @@ _hash_splitbucket(Relation rel,
13201320
/*
13211321
* If possible, clean up the old bucket. We might not be able to do this
13221322
* if someone else has a pin on it, but if not then we can go ahead. This
1323-
* isn't absolutely necessary, but it reduces bloat; if we don't do it now,
1324-
* VACUUM will do it eventually, but maybe not until new overflow pages
1325-
* have been allocated. Note that there's no need to clean up the new
1326-
* bucket.
1323+
* isn't absolutely necessary, but it reduces bloat; if we don't do it
1324+
* now, VACUUM will do it eventually, but maybe not until new overflow
1325+
* pages have been allocated. Note that there's no need to clean up the
1326+
* new bucket.
13271327
*/
13281328
if (IsBufferCleanupOK(bucket_obuf))
13291329
{

src/backend/access/transam/slru.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -233,7 +233,7 @@ SimpleLruInit(SlruCtl ctl, const char *name, int nslots, int nlsns,
233233
}
234234

235235
/* Should fit to estimated shmem size */
236-
Assert(ptr - (char *) shared <= SimpleLruShmemSize(nslots, nlsns));
236+
Assert(ptr - (char *) shared <= SimpleLruShmemSize(nslots, nlsns));
237237
}
238238
else
239239
Assert(found);

src/backend/catalog/namespace.c

Lines changed: 7 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -3802,14 +3802,14 @@ InitTempTableNamespace(void)
38023802
get_database_name(MyDatabaseId))));
38033803

38043804
/*
3805-
* Do not allow a Hot Standby session to make temp tables. Aside
3806-
* from problems with modifying the system catalogs, there is a naming
3805+
* Do not allow a Hot Standby session to make temp tables. Aside from
3806+
* problems with modifying the system catalogs, there is a naming
38073807
* conflict: pg_temp_N belongs to the session with BackendId N on the
3808-
* master, not to a hot standby session with the same BackendId. We should not
3809-
* be able to get here anyway due to XactReadOnly checks, but let's just
3810-
* make real sure. Note that this also backstops various operations that
3811-
* allow XactReadOnly transactions to modify temp tables; they'd need
3812-
* RecoveryInProgress checks if not for this.
3808+
* master, not to a hot standby session with the same BackendId. We
3809+
* should not be able to get here anyway due to XactReadOnly checks, but
3810+
* let's just make real sure. Note that this also backstops various
3811+
* operations that allow XactReadOnly transactions to modify temp tables;
3812+
* they'd need RecoveryInProgress checks if not for this.
38133813
*/
38143814
if (RecoveryInProgress())
38153815
ereport(ERROR,

src/backend/catalog/partition.c

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -728,9 +728,9 @@ check_new_partition_bound(char *relname, Relation parent,
728728
errmsg("empty range bound specified for partition \"%s\"",
729729
relname),
730730
errdetail("Specified lower bound %s is greater than or equal to upper bound %s.",
731-
get_range_partbound_string(spec->lowerdatums),
732-
get_range_partbound_string(spec->upperdatums)),
733-
parser_errposition(pstate, spec->location)));
731+
get_range_partbound_string(spec->lowerdatums),
732+
get_range_partbound_string(spec->upperdatums)),
733+
parser_errposition(pstate, spec->location)));
734734
}
735735

736736
if (partdesc->nparts > 0)

src/backend/commands/copy.c

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1454,7 +1454,7 @@ BeginCopy(ParseState *pstate,
14541454
*/
14551455
if (cstate->transition_capture != NULL)
14561456
{
1457-
int i;
1457+
int i;
14581458

14591459
cstate->transition_tupconv_maps = (TupleConversionMap **)
14601460
palloc0(sizeof(TupleConversionMap *) *
@@ -2651,6 +2651,7 @@ CopyFrom(CopyState cstate)
26512651
cstate->transition_capture->tcs_map = NULL;
26522652
}
26532653
}
2654+
26542655
/*
26552656
* We might need to convert from the parent rowtype to the
26562657
* partition rowtype.

src/backend/commands/subscriptioncmds.c

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -919,9 +919,10 @@ DropSubscription(DropSubscriptionStmt *stmt, bool isTopLevel)
919919
LWLockAcquire(LogicalRepWorkerLock, LW_SHARED);
920920
subworkers = logicalrep_workers_find(subid, false);
921921
LWLockRelease(LogicalRepWorkerLock);
922-
foreach (lc, subworkers)
922+
foreach(lc, subworkers)
923923
{
924924
LogicalRepWorker *w = (LogicalRepWorker *) lfirst(lc);
925+
925926
if (slotname)
926927
logicalrep_worker_stop(w->subid, w->relid);
927928
else

src/backend/commands/tablecmds.c

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -13509,8 +13509,8 @@ ATExecAttachPartition(List **wqueue, Relation rel, PartitionCmd *cmd)
1350913509
* having to construct this list again, so we request the strongest lock
1351013510
* on all partitions. We need the strongest lock, because we may decide
1351113511
* to scan them if we find out that the table being attached (or its leaf
13512-
* partitions) may contain rows that violate the partition constraint.
13513-
* If the table has a constraint that would prevent such rows, which by
13512+
* partitions) may contain rows that violate the partition constraint. If
13513+
* the table has a constraint that would prevent such rows, which by
1351413514
* definition is present in all the partitions, we need not scan the
1351513515
* table, nor its partitions. But we cannot risk a deadlock by taking a
1351613516
* weaker lock now and the stronger one only when needed.

src/backend/commands/trigger.c

Lines changed: 7 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -2071,11 +2071,11 @@ FindTriggerIncompatibleWithInheritance(TriggerDesc *trigdesc)
20712071
{
20722072
if (trigdesc != NULL)
20732073
{
2074-
int i;
2074+
int i;
20752075

20762076
for (i = 0; i < trigdesc->numtriggers; ++i)
20772077
{
2078-
Trigger *trigger = &trigdesc->triggers[i];
2078+
Trigger *trigger = &trigdesc->triggers[i];
20792079

20802080
if (trigger->tgoldtable != NULL || trigger->tgnewtable != NULL)
20812081
return trigger->tgname;
@@ -5253,12 +5253,12 @@ AfterTriggerSaveEvent(EState *estate, ResultRelInfo *relinfo,
52535253
*/
52545254
if (row_trigger && transition_capture != NULL)
52555255
{
5256-
HeapTuple original_insert_tuple = transition_capture->tcs_original_insert_tuple;
5256+
HeapTuple original_insert_tuple = transition_capture->tcs_original_insert_tuple;
52575257
TupleConversionMap *map = transition_capture->tcs_map;
5258-
bool delete_old_table = transition_capture->tcs_delete_old_table;
5259-
bool update_old_table = transition_capture->tcs_update_old_table;
5260-
bool update_new_table = transition_capture->tcs_update_new_table;
5261-
bool insert_new_table = transition_capture->tcs_insert_new_table;;
5258+
bool delete_old_table = transition_capture->tcs_delete_old_table;
5259+
bool update_old_table = transition_capture->tcs_update_old_table;
5260+
bool update_new_table = transition_capture->tcs_update_new_table;
5261+
bool insert_new_table = transition_capture->tcs_insert_new_table;;
52625262

52635263
if ((event == TRIGGER_EVENT_DELETE && delete_old_table) ||
52645264
(event == TRIGGER_EVENT_UPDATE && update_old_table))

src/backend/commands/vacuumlazy.c

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -529,11 +529,11 @@ lazy_scan_heap(Relation onerel, int options, LVRelStats *vacrelstats,
529529
* safely set for relfrozenxid or relminmxid.
530530
*
531531
* Before entering the main loop, establish the invariant that
532-
* next_unskippable_block is the next block number >= blkno that we
533-
* can't skip based on the visibility map, either all-visible for a
534-
* regular scan or all-frozen for an aggressive scan. We set it to
535-
* nblocks if there's no such block. We also set up the skipping_blocks
536-
* flag correctly at this stage.
532+
* next_unskippable_block is the next block number >= blkno that we can't
533+
* skip based on the visibility map, either all-visible for a regular scan
534+
* or all-frozen for an aggressive scan. We set it to nblocks if there's
535+
* no such block. We also set up the skipping_blocks flag correctly at
536+
* this stage.
537537
*
538538
* Note: The value returned by visibilitymap_get_status could be slightly
539539
* out-of-date, since we make this test before reading the corresponding

src/backend/executor/execProcnode.c

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -411,9 +411,9 @@ ExecProcNodeFirst(PlanState *node)
411411
/*
412412
* Perform stack depth check during the first execution of the node. We
413413
* only do so the first time round because it turns out to not be cheap on
414-
* some common architectures (eg. x86). This relies on the assumption that
415-
* ExecProcNode calls for a given plan node will always be made at roughly
416-
* the same stack depth.
414+
* some common architectures (eg. x86). This relies on the assumption
415+
* that ExecProcNode calls for a given plan node will always be made at
416+
* roughly the same stack depth.
417417
*/
418418
check_stack_depth();
419419

0 commit comments

Comments
 (0)