qr/shell command for backup is not configured/,
'fails if basebackup_to_shell.command is not set');
-# Configure basebackup_to_shell.command and reload the configuation file.
+# Configure basebackup_to_shell.command and reload the configuration file.
my $backup_path = PostgreSQL::Test::Utils::tempdir;
my $escaped_backup_path = $backup_path;
$escaped_backup_path =~ s{\\}{\\\\}g
}
/*
- * Intialize WAL reader and identify first valid LSN.
+ * Initialize WAL reader and identify first valid LSN.
*/
static XLogReaderState *
InitXLogReaderState(XLogRecPtr lsn)
/*
* Serialize the transaction snapshot if the transaction
- * isolation-level uses a transaction snapshot.
+ * isolation level uses a transaction snapshot.
*/
if (IsolationUsesXactSnapshot())
{
{
/*
* We have reached the end of base backup, as indicated by pg_control. The
- * data on disk is now consistent (unless minRecovery point is further
+ * data on disk is now consistent (unless minRecoveryPoint is further
* ahead, which can happen if we crashed during previous recovery). Reset
* backupStartPoint and backupEndPoint, and update minRecoveryPoint to
* make sure we don't allow starting up at an earlier point even if
* *lsn, and the I/O will be considered to have completed once that LSN is
* replayed.
*
- * Returns LRQ_NO_IO if we examined the next block reference and found that it
- * was already in the buffer pool, or we decided for various reasons not to
- * prefetch.
+ * Returns LRQ_NEXT_NO_IO if we examined the next block reference and found
+ * that it was already in the buffer pool, or we decided for various reasons
+ * not to prefetch.
*/
static LsnReadQueueNextStatus
XLogPrefetcherNextBlock(uintptr_t pgsr_private, XLogRecPtr *lsn)
* or standby mode, depending on configuration options and the state of
* the control file and possible backup label file. PerformWalRecovery()
* performs the actual WAL replay, calling the rmgr-specific redo routines.
- * EndWalRecovery() performs end-of-recovery checks and cleanup actions,
+ * FinishWalRecovery() performs end-of-recovery checks and cleanup actions,
* and prepares information needed to initialize the WAL for writes. In
* addition to these three main functions, there are a bunch of functions
* for interrogating recovery state and controlling the recovery process.
* disk does after initializing other subsystems, but before calling
* PerformWalRecovery().
*
- * This initializes some global variables like ArchiveModeRequested, and
+ * This initializes some global variables like ArchiveRecoveryRequested, and
* StandbyModeRequested and InRecovery.
*/
void
*
* This does not close the 'xlogreader' yet, because in some cases the caller
* still wants to re-read the last checkpoint record by calling
- * ReadCheckPointRecord().
+ * ReadCheckpointRecord().
*
* Returns the position of the last valid or applied record, after which new
* WAL should be appended, information about why recovery was ended, and some
- * other things. See the WalRecoveryResult struct for details.
+ * other things. See the EndOfWalRecoveryInfo struct for details.
*/
EndOfWalRecoveryInfo *
FinishWalRecovery(void)
* basebackup_copy.c
* send basebackup archives using COPY OUT
*
- * We send a result set with information about the tabelspaces to be included
+ * We send a result set with information about the tablespaces to be included
* in the backup before starting COPY OUT. Then, we start a single COPY OUT
* operation and transmits all the archives and the manifest if present during
* the course of that single COPY OUT. Each CopyData message begins with a
* and a Bitmapset with them; verify that each attribute is appropriate
* to have in a publication column list (no system or generated attributes,
* no duplicates). Additional checks with replica identity are done later;
- * see check_publication_columns.
+ * see pub_collist_contains_invalid_column.
*
* Note that the attribute numbers are *not* offset by
* FirstLowInvalidHeapAttributeNumber; system columns are forbidden so this
*
* In a parallel vacuum, we perform both index bulk deletion and index cleanup
* with parallel worker processes. Individual indexes are processed by one
- * vacuum process. ParalleVacuumState contains shared information as well as
+ * vacuum process. ParallelVacuumState contains shared information as well as
* the memory space for storing dead items allocated in the DSM segment. We
* launch parallel worker processes at the start of parallel index
* bulk-deletion and index cleanup and once all indexes are processed, the
numTransArgs = pertrans->numTransInputs + 1;
/*
- * Set up infrastructure for calling the transfn. Note that invtrans is
+ * Set up infrastructure for calling the transfn. Note that invtransfn is
* not needed here.
*/
build_aggregate_transfn_expr(inputTypes,
*
* Returns RETURNING result if any, otherwise NULL.
* *inserted_tuple is the tuple that's effectively inserted;
- * *inserted_destrel is the relation where it was inserted.
+ * *insert_destrel is the relation where it was inserted.
* These are only set on success.
*
* This may change the currently active tuple conversion map in
/*
* Pop stack entries, stopping if we find one marked internal_xact (that
- * one belongs to the caller of SPI_commit or SPI_abort).
+ * one belongs to the caller of SPI_commit or SPI_rollback).
*/
while (_SPI_connected >= 0)
{
/*
* check_memoizable
* If the restrictinfo's clause is suitable to be used for a Memoize node,
- * set the lefthasheqoperator and righthasheqoperator to the hash equality
+ * set the left_hasheqoperator and right_hasheqoperator to the hash equality
* operator that will be needed during caching.
*/
static void
* Does the subscription have tables?
*
* If there were not-READY relations found then we know it does. But
- * if table_state_not_ready was empty we still need to check again to
+ * if table_states_not_ready was empty we still need to check again to
* see if there are 0 tables.
*/
has_subrels = (table_states_not_ready != NIL) ||
GlobalVisHorizonKindForRel(Relation rel)
{
/*
- * Other relkkinds currently don't contain xids, nor always the necessary
+ * Other relkinds currently don't contain xids, nor always the necessary
* logical decoding markers.
*/
Assert(!rel ||
/*
* GenerationBlockIsEmpty
- * Returns true iif 'block' contains no chunks
+ * Returns true iff 'block' contains no chunks
*/
static inline bool
GenerationBlockIsEmpty(GenerationBlock *block)
* abbreviations of text or multi-key sorts. There could be! Is it worth it?
*/
-/* Used if first key's comparator is ssup_datum_unsigned_compare */
+/* Used if first key's comparator is ssup_datum_unsigned_cmp */
static pg_attribute_always_inline int
qsort_tuple_unsigned_compare(SortTuple *a, SortTuple *b, Tuplesortstate *state)
{
}
#if SIZEOF_DATUM >= 8
-/* Used if first key's comparator is ssup_datum_signed_compare */
+/* Used if first key's comparator is ssup_datum_signed_cmp */
static pg_attribute_always_inline int
qsort_tuple_signed_compare(SortTuple *a, SortTuple *b, Tuplesortstate *state)
{
}
#endif
-/* Used if first key's comparator is ssup_datum_int32_compare */
+/* Used if first key's comparator is ssup_datum_int32_cmp */
static pg_attribute_always_inline int
qsort_tuple_int32_compare(SortTuple *a, SortTuple *b, Tuplesortstate *state)
{
}
/*
- * HaveRegisteredOrActiveSnapshots
+ * HaveRegisteredOrActiveSnapshot
* Is there any registered or active snapshot?
*
* NB: Unless pushed or active, the cached catalog snapshot will not cause
* calling gzclose.
*
* It makes no difference whether we opened the file or the caller did it,
- * because libz provides no way of avoiding a close on the underling file
+ * because libz provides no way of avoiding a close on the underlying file
* handle. Notice, however, that bbstreamer_gzip_writer_new() uses dup() to
* work around this issue, so that the behavior from the caller's viewpoint
* is the same as for bbstreamer_plain_writer.
{
if (dobj->objType == DO_TABLE)
{
- /* For a column initpriv, set the table's ACL flags */
+ /* For a column initprivs, set the table's ACL flags */
dobj->components |= DUMP_COMPONENT_ACL;
((TableInfo *) dobj)->hascolumnACLs = true;
}
} PgStatShared_HashEntry;
/*
- * Common header struct for PgStatShm_Stat*Entry.
+ * Common header struct for PgStatShared_*.
*/
typedef struct PgStatShared_Common
{
/*
* Datum comparison functions that we have specialized sort routines for.
- * Datatypes that install these as their comparator or abbrevated comparator
+ * Datatypes that install these as their comparator or abbreviated comparator
* are eligible for faster sorting.
*/
extern int ssup_datum_unsigned_cmp(Datum x, Datum y, SortSupport ssup);
/*
* Print the current time, with microseconds, into a caller-supplied
* buffer.
- * Cribbed from setup_formatted_log_time, but much simpler.
+ * Cribbed from get_formatted_log_time, but much simpler.
*/
static void
pqTraceFormatTimestamp(char *timestr, size_t ts_len)
/*
* Setup error traceback support for ereport().
* plpython_inline_error_callback doesn't currently need exec_ctx, but
- * for consistency with plpython_call_handler we do it the same way.
+ * for consistency with plpython3_call_handler we do it the same way.
*/
plerrcontext.callback = plpython_inline_error_callback;
plerrcontext.arg = exec_ctx;
# now switch the order of publications in the list, try again, the result
-# should be the same (no dependence on order of pulications)
+# should be the same (no dependence on order of publications)
$node_subscriber2->safe_psql('postgres',
"ALTER SUBSCRIPTION sub2 SET PUBLICATION pub_all, pub_lower_level");
is($result, qq(1), 'transaction is prepared on subscriber');
# Insert a different record (now we are outside of the 2PC transaction)
- # Note: the 2PC transaction still holds row locks so make sure this insert is for a separare primary key
+ # Note: the 2PC transaction still holds row locks so make sure this insert is for a separate primary key
$node_publisher->safe_psql('postgres',
"INSERT INTO test_tab VALUES (99999, 'foobar')");
# Test skipping the transaction. This function must be called after the caller
# has inserted data that conflicts with the subscriber. The finish LSN of the
# error transaction that is used to specify to ALTER SUBSCRIPTION ... SKIP is
-# fetched from the server logs. After executing ALTER SUBSCRITPION ... SKIP, we
+# fetched from the server logs. After executing ALTER SUBSCRIPTION ... SKIP, we
# check if logical replication can continue working by inserting $nonconflict_data
# on the publisher.
sub test_skip_lsn
# Perl script that tries to add PGDLLIMPORT markings to PostgreSQL
# header files.
#
-# This relies on a few idiosyncracies of the PostgreSQL coding style,
+# This relies on a few idiosyncrasies of the PostgreSQL coding style,
# such as the fact that we always use "extern" in function
# declarations, and that we don't use // comments. It's not very
# smart and may not catch all cases.