*/
#include "postgres.h"
+#include "access/clog.h"
#include "access/heapam.h"
#include "access/heapam_xlog.h"
#include "access/hio.h"
* with queries.
*/
void
-HeapTupleHeaderAdvanceLatestRemovedXid(HeapTupleHeader tuple,
- TransactionId *latestRemovedXid)
+HeapTupleHeaderAdvanceLatestRemoved(HeapTupleHeader tuple,
+ XLogRecPtr *latestRemovedCommitLSN)
{
TransactionId xmin = HeapTupleHeaderGetXmin(tuple);
TransactionId xmax = HeapTupleHeaderGetUpdateXid(tuple);
if (tuple->t_infomask & HEAP_MOVED)
{
- if (TransactionIdPrecedes(*latestRemovedXid, xvac))
- *latestRemovedXid = xvac;
+ XLogRecPtr commitlsn = TransactionIdGetCommitLSN(xvac);
+
+ if (COMMITLSN_IS_NORMAL(commitlsn) &&
+ commitlsn > *latestRemovedCommitLSN)
+ *latestRemovedCommitLSN = commitlsn;
}
/*
* This needs to work on both master and standby, where it is used to
* assess btree delete records.
*/
- if (HeapTupleHeaderXminCommitted(tuple) ||
- (!HeapTupleHeaderXminInvalid(tuple) && TransactionIdDidCommit(xmin)))
+ if (!HeapTupleHeaderXminInvalid(tuple) && xmax != xmin)
{
- if (xmax != xmin &&
- TransactionIdFollows(xmax, *latestRemovedXid))
- *latestRemovedXid = xmax;
+ XLogRecPtr commitlsn = TransactionIdGetCommitLSN(xmin);
+
+ if (COMMITLSN_IS_NORMAL(commitlsn) &&
+ commitlsn > *latestRemovedCommitLSN)
+ *latestRemovedCommitLSN = commitlsn;
}
- /* *latestRemovedXid may still be invalid at end */
+ /* *latestRemovedCommitLSN may still be invalid at end */
}
/*
* see comments for vacuum_log_cleanup_info().
*/
XLogRecPtr
-log_heap_cleanup_info(RelFileNode rnode, TransactionId latestRemovedXid)
+log_heap_cleanup_info(RelFileNode rnode, XLogRecPtr latestRemovedCommitLSN)
{
xl_heap_cleanup_info xlrec;
XLogRecPtr recptr;
XLogRecData rdata;
xlrec.node = rnode;
- xlrec.latestRemovedXid = latestRemovedXid;
+ xlrec.latestRemovedCommitLSN = latestRemovedCommitLSN;
rdata.data = (char *) &xlrec;
rdata.len = SizeOfHeapCleanupInfo;
OffsetNumber *redirected, int nredirected,
OffsetNumber *nowdead, int ndead,
OffsetNumber *nowunused, int nunused,
- TransactionId latestRemovedXid)
+ XLogRecPtr latestRemovedCommitLSN)
{
xl_heap_clean xlrec;
uint8 info;
xlrec.node = reln->rd_node;
xlrec.block = BufferGetBlockNumber(buffer);
- xlrec.latestRemovedXid = latestRemovedXid;
+ xlrec.latestRemovedCommitLSN = latestRemovedCommitLSN;
xlrec.nredirected = nredirected;
xlrec.ndead = ndead;
xl_heap_cleanup_info *xlrec = (xl_heap_cleanup_info *) XLogRecGetData(record);
if (InHotStandby)
- ResolveRecoveryConflictWithSnapshot(xlrec->latestRemovedXid, xlrec->node);
+ ResolveRecoveryConflictWithSnapshot(xlrec->latestRemovedCommitLSN, xlrec->node);
/*
* Actual operation is a no-op. Record type exists to provide a means for
* conflict on the records that cause MVCC failures for user queries. If
* latestRemovedXid is invalid, skip conflict processing.
*/
- if (InHotStandby && TransactionIdIsValid(xlrec->latestRemovedXid))
- ResolveRecoveryConflictWithSnapshot(xlrec->latestRemovedXid,
+ if (InHotStandby && xlrec->latestRemovedCommitLSN != InvalidXLogRecPtr)
+ ResolveRecoveryConflictWithSnapshot(xlrec->latestRemovedCommitLSN,
xlrec->node);
/*
typedef struct
{
TransactionId new_prune_xid; /* new prune hint value for page */
- TransactionId latestRemovedXid; /* latest xid to be removed by this
- * prune */
+ XLogRecPtr latestRemovedCommitLSN; /* latest commit LSN of a tuple
+ * removed by this prune */
int nredirected; /* numbers of entries in arrays below */
int ndead;
int nunused;
*/
if (PageIsFull(page) || PageGetHeapFreeSpace(page) < minfree)
{
- TransactionId ignore = InvalidTransactionId; /* return value not
+ XLogRecPtr ignore = InvalidXLogRecPtr; /* return value not
* needed */
/* OK to prune */
*/
int
heap_page_prune(Relation relation, Buffer buffer, XLogRecPtr OldestSnapshot,
- bool report_stats, TransactionId *latestRemovedXid)
+ bool report_stats, XLogRecPtr *latestRemovedCommitLSN)
{
int ndeleted = 0;
Page page = BufferGetPage(buffer);
* initialize the rest of our working state.
*/
prstate.new_prune_xid = InvalidTransactionId;
- prstate.latestRemovedXid = *latestRemovedXid;
+ prstate.latestRemovedCommitLSN = *latestRemovedCommitLSN;
prstate.nredirected = prstate.ndead = prstate.nunused = 0;
memset(prstate.marked, 0, sizeof(prstate.marked));
prstate.redirected, prstate.nredirected,
prstate.nowdead, prstate.ndead,
prstate.nowunused, prstate.nunused,
- prstate.latestRemovedXid);
+ prstate.latestRemovedCommitLSN);
PageSetLSN(BufferGetPage(buffer), recptr);
}
if (report_stats && ndeleted > prstate.ndead)
pgstat_update_heap_dead_tuples(relation, ndeleted - prstate.ndead);
- *latestRemovedXid = prstate.latestRemovedXid;
+ *latestRemovedCommitLSN = prstate.latestRemovedCommitLSN;
/*
* XXX Should we update the FSM information of this page ?
== HEAPTUPLE_DEAD && !HeapTupleHeaderIsHotUpdated(htup))
{
heap_prune_record_unused(prstate, rootoffnum);
- HeapTupleHeaderAdvanceLatestRemovedXid(htup,
- &prstate->latestRemovedXid);
+ HeapTupleHeaderAdvanceLatestRemoved(htup,
+ &prstate->latestRemovedCommitLSN);
ndeleted++;
}
if (tupdead)
{
latestdead = offnum;
- HeapTupleHeaderAdvanceLatestRemovedXid(htup,
- &prstate->latestRemovedXid);
+ HeapTupleHeaderAdvanceLatestRemoved(htup,
+ &prstate->latestRemovedCommitLSN);
}
else if (!recent_dead)
break;
}
/*
- * Get the latestRemovedXid from the heap pages pointed at by the index
+ * Get the latestRemovedCommitLSN from the heap pages pointed at by the index
* tuples being deleted. This puts the work for calculating latestRemovedXid
* into the recovery path rather than the primary path.
*
* XXX optimise later with something like XLogPrefetchBuffer()
*/
static TransactionId
-btree_xlog_delete_get_latestRemovedXid(xl_btree_delete *xlrec)
+btree_xlog_delete_get_latestRemovedCommitLSN(xl_btree_delete *xlrec)
{
OffsetNumber *unused;
Buffer ibuffer,
HeapTupleHeader htuphdr;
BlockNumber hblkno;
OffsetNumber hoffnum;
- TransactionId latestRemovedXid = InvalidTransactionId;
+ XLogRecPtr latestRemovedCommitLSN = InvalidXLogRecPtr;
int i;
/*
* coding will result in throwing a conflict anyway.
*/
if (CountDBBackends(InvalidOid) == 0)
- return latestRemovedXid;
+ return InvalidXLogRecPtr;
/*
* In what follows, we have to examine the previous state of the index
* won't have let in any user sessions before we reach consistency.
*/
if (!reachedConsistency)
- elog(PANIC, "btree_xlog_delete_get_latestRemovedXid: cannot operate with inconsistent data");
+ elog(PANIC, "btree_xlog_delete_get_latestRemovedCommitLSN: cannot operate with inconsistent data");
/*
* Get index page. If the DB is consistent, this should not fail, nor
* should any of the heap page fetches below. If one does, we return
- * InvalidTransactionId to cancel all HS transactions. That's probably
+ * InvalidXLogRecPtr to cancel all HS transactions. That's probably
* overkill, but it's safe, and certainly better than panicking here.
*/
ibuffer = XLogReadBuffer(xlrec->node, xlrec->block, false);
if (!BufferIsValid(ibuffer))
- return InvalidTransactionId;
+ return InvalidXLogRecPtr;
ipage = (Page) BufferGetPage(ibuffer);
/*
if (!BufferIsValid(hbuffer))
{
UnlockReleaseBuffer(ibuffer);
- return InvalidTransactionId;
+ return InvalidXLogRecPtr;
}
hpage = (Page) BufferGetPage(hbuffer);
{
htuphdr = (HeapTupleHeader) PageGetItem(hpage, hitemid);
- HeapTupleHeaderAdvanceLatestRemovedXid(htuphdr, &latestRemovedXid);
+ HeapTupleHeaderAdvanceLatestRemoved(htuphdr, &latestRemovedCommitLSN);
}
else if (ItemIdIsDead(hitemid))
{
/*
* If all heap tuples were LP_DEAD then we will be returning
- * InvalidTransactionId here, which avoids conflicts. This matches
+ * InvalidXLogRecPtr here, which avoids conflicts. This matches
* existing logic which assumes that LP_DEAD tuples must already be older
* than the latestRemovedXid on the cleanup record that set them as
* LP_DEAD, hence must already have generated a conflict.
*/
- return latestRemovedXid;
+ return latestRemovedCommitLSN;
}
static void
*/
if (InHotStandby)
{
- TransactionId latestRemovedXid = btree_xlog_delete_get_latestRemovedXid(xlrec);
+ TransactionId latestRemovedCommitLSN = btree_xlog_delete_get_latestRemovedCommitLSN(xlrec);
- ResolveRecoveryConflictWithSnapshot(latestRemovedXid, xlrec->node);
+ ResolveRecoveryConflictWithSnapshot(latestRemovedCommitLSN, xlrec->node);
}
/* If we have a full-page image, restore it and we're done */
{
xl_heap_clean *xlrec = (xl_heap_clean *) rec;
- appendStringInfo(buf, "clean: rel %u/%u/%u; blk %u remxid %u",
+ appendStringInfo(buf, "clean: rel %u/%u/%u; blk %u remlsn %X/%X",
xlrec->node.spcNode, xlrec->node.dbNode,
xlrec->node.relNode, xlrec->block,
- xlrec->latestRemovedXid);
+ (uint32) (xlrec->latestRemovedCommitLSN >> 32),
+ (uint32) xlrec->latestRemovedCommitLSN);
}
else if (info == XLOG_HEAP2_FREEZE_PAGE)
{
{
xl_heap_cleanup_info *xlrec = (xl_heap_cleanup_info *) rec;
- appendStringInfo(buf, "cleanup info: remxid %u",
- xlrec->latestRemovedXid);
+ appendStringInfo(buf, "cleanup info: remxid %X/%X",
+ (uint32) (xlrec->latestRemovedCommitLSN >> 32),
+ (uint32) xlrec->latestRemovedCommitLSN);
}
else if (info == XLOG_HEAP2_VISIBLE)
{
static void
standby_desc_running_xacts(StringInfo buf, xl_running_xacts *xlrec)
{
- appendStringInfo(buf, " nextXid %u latestCompletedXid %u oldestRunningXid %u",
+ appendStringInfo(buf, " nextXid %u oldestRunningXid %u",
xlrec->nextXid,
- xlrec->latestCompletedXid,
xlrec->oldestRunningXid);
}
What we actually enforce is strict serialization of commits and rollbacks
with snapshot-taking. We use the LSNs generated by Write-Ahead-Logging as
-a convenient monotonically-increasing counter, to serialize commits with
+a convenient monotonically increasing counter, to serialize commits with
snapshots. Each commit is naturally assigned an LSN; it's the LSN of the
commit WAL record. Snapshots are also represented by an LSN; all commits
with a commit record's LSN <= the snapshot's LSN are considered as visible
-to the snapshot. Therefore acquiring a snapshot is a matter of reading the
-current WAL insert location.
+to the snapshot. Acquiring a snapshot is a matter of reading the current
+WAL insert location.
That means that we need to be able to look up the commit LSN of each
transaction, by XID. For that purpose, we store the commit LSN of each
happen rarely.
So, a snapshot is simply an LSN, such that all transactions that committed
-before that LSN are visible, and everything later is still considered
-as in-progress. However, to avoid consulting the clog every time the
-visibility of a tuple is checked, we also record a lower and upper bound of
-the XIDs considered visible by the snapshot, in SnapshotData. When a snapshot
-is taken, xmin is set to the current nextXid value; any transaction that
-begins after the snapshot is surely still running. The xmin is tracked
-lazily in shared memory, by AdvanceGlobalXmin().
+before that LSN are visible, and everything later is still considered as
+in-progress. However, to avoid consulting the clog every time the visibilty
+of a tuple is checked, we also record a lower and upper bound of the XIDs
+considered visible by the snapshot, in SnapshotData. When a snapshot is
+taken, xmax is set to the current nextXid value; any transaction that begins
+after the snapshot is surely still running. The xmin is tracked lazily in
+shared memory, by AdvanceGlobalXmin().
We allow GetNewTransactionId to store the XID into MyPgXact->xid (or the
subxid array) without taking ProcArrayLock. This was once necessary to
int max_dead_tuples; /* # slots allocated in array */
ItemPointer dead_tuples; /* array of ItemPointerData */
int num_index_scans;
- TransactionId latestRemovedXid;
+ XLogRecPtr latestRemovedCommitLSN;
bool lock_waiter_detected;
} LVRelStats;
/*
* No need to write the record at all unless it contains a valid value
*/
- if (TransactionIdIsValid(vacrelstats->latestRemovedXid))
- (void) log_heap_cleanup_info(rel->rd_node, vacrelstats->latestRemovedXid);
+ if (vacrelstats->latestRemovedCommitLSN != InvalidXLogRecPtr)
+ (void) log_heap_cleanup_info(rel->rd_node, vacrelstats->latestRemovedCommitLSN);
}
/*
vacrelstats->rel_pages = nblocks;
vacrelstats->scanned_pages = 0;
vacrelstats->nonempty_pages = 0;
- vacrelstats->latestRemovedXid = InvalidTransactionId;
+ vacrelstats->latestRemovedCommitLSN = InvalidXLogRecPtr;
lazy_space_alloc(vacrelstats, nblocks);
frozen = palloc(sizeof(xl_heap_freeze_tuple) * MaxHeapTuplesPerPage);
* We count tuples removed by the pruning step as removed by VACUUM.
*/
tups_vacuumed += heap_page_prune(onerel, buf, OldestSnapshot, false,
- &vacrelstats->latestRemovedXid);
+ &vacrelstats->latestRemovedCommitLSN);
/*
* Now scan the page to collect vacuumable items and check for tuples
if (tupgone)
{
lazy_record_dead_tuple(vacrelstats, &(tuple.t_self));
- HeapTupleHeaderAdvanceLatestRemovedXid(tuple.t_data,
- &vacrelstats->latestRemovedXid);
+ HeapTupleHeaderAdvanceLatestRemoved(tuple.t_data,
+ &vacrelstats->latestRemovedCommitLSN);
tups_vacuumed += 1;
has_dead_tuples = true;
}
recptr = log_heap_clean(onerel, buffer,
NULL, 0, NULL, 0,
unused, uncnt,
- vacrelstats->latestRemovedXid);
+ vacrelstats->latestRemovedCommitLSN);
PageSetLSN(page, recptr);
}
{
PGXACT *pgxact = &allPgXact[proc->pgprocno];
- /*
- * We don't need to lock, because we're only changing our own struct.
- * FIXME: Do we need to enforce some specific order for these stores
- * anyway? Resetting snapshotlsn is not atomic, for example..
- */
+ /* A shared lock is enough to modify our own fields */
+ LWLockAcquire(ProcArrayLock, LW_SHARED);
pgxact->xid = InvalidTransactionId;
proc->lxid = InvalidLocalTransactionId;
pgxact->snapshotlsn = InvalidXLogRecPtr;
pgxact->vacuumFlags &= ~PROC_VACUUM_STATE_MASK;
pgxact->delayChkpt = false; /* be sure this is cleared in abort */
proc->recoveryConflictPending = false;
+ LWLockRelease(ProcArrayLock);
}
{
PGXACT *pgxact = &allPgXact[proc->pgprocno];
- /*
- * We can skip locking ProcArrayLock here, because this action does not
- * actually change anyone's view of the set of running XIDs: our entry is
- * duplicate with the gxact that has already been inserted into the
- * ProcArray.
- */
+ /* A shared lock is enough to modify our own fields. */
+ LWLockAcquire(ProcArrayLock, LW_SHARED);
pgxact->xid = InvalidTransactionId;
proc->lxid = InvalidLocalTransactionId;
pgxact->snapshotlsn = InvalidXLogRecPtr;
/* redundant, but just in case */
pgxact->vacuumFlags &= ~PROC_VACUUM_STATE_MASK;
pgxact->delayChkpt = false;
+ LWLockRelease(ProcArrayLock);
}
/*
proc->databaseId == MyDatabaseId ||
proc->databaseId == 0) /* always include WalSender */
{
- /* FIXME: is this atomic? If not, do we have enough locking? If it
- * is, could we get away with less locking?
- */
XLogRecPtr snapshotlsn = pgxact->snapshotlsn;
if (snapshotlsn < result && snapshotlsn != InvalidXLogRecPtr)
if (!TransactionIdIsNormal(sourcexid))
return false;
- /* Get lock so source xact can't end while we're doing this */
- LWLockAcquire(ProcArrayLock, LW_SHARED);
+ /* Get exclusive lock so source xact can't end while we're doing this */
+ LWLockAcquire(ProcArrayLock, LW_EXCLUSIVE);
for (index = 0; index < arrayP->numProcs; index++)
{
*
* The arguments allow filtering the set of VXIDs returned. Our own process
* is always skipped. In addition:
- * If limitSnapshotLSN is not InvalidTransactionId, skip processes with
+ * If limitSnapshotLSN is not InvalidXLogRecPtr, skip processes with
* snapshotlsn > limitSnapshotLSN, or no snapshot at all .
* If allDbs is false, skip processes attached to other databases.
* If excludeVacuum isn't zero, skip processes for which
* (vacuumFlags & excludeVacuum) is not zero.
*
- * Note: the purpose of the limitXmin and excludeXmin0 parameters is to
+ * Note: the purpose of the limitSnapshotLSN parameter is to
* allow skipping backends whose oldest live snapshot is no older than
- * some snapshot we have. Since we examine the procarray with only shared
- * lock, there are race conditions: a backend could set its xmin just after
- * we look. Indeed, on multiprocessors with weak memory ordering, the
- * other backend could have set its xmin *before* we look. We know however
- * that such a backend must have held shared ProcArrayLock overlapping our
- * own hold of ProcArrayLock, else we would see its xmin update. Therefore,
- * any snapshot the other backend is taking concurrently with our scan cannot
- * consider any transactions as still running that we think are committed
- * (since backends must hold ProcArrayLock exclusive to commit).
+ * some snapshot we have.
*/
VirtualTransactionId *
GetCurrentVirtualXIDs(XLogRecPtr limitSnapshotLSN,
vxids = (VirtualTransactionId *)
palloc(sizeof(VirtualTransactionId) * arrayP->maxProcs);
- LWLockAcquire(ProcArrayLock, LW_SHARED);
+ /*
+ * Take an exclusive lock, to prevent backends from changing their
+ * snapshotlsn values while we're reading them.
+ */
+ LWLockAcquire(ProcArrayLock, LW_EXCLUSIVE);
for (index = 0; index < arrayP->numProcs; index++)
{
if (allDbs || proc->databaseId == MyDatabaseId)
{
- /*
- * Fetch LSN just once - might change on us
- *
- * FIXME: really?
- */
XLogRecPtr snapshotlsn = pgxact->snapshotlsn;
if (limitSnapshotLSN == InvalidXLogRecPtr ||
* limitXmin is supplied as either latestRemovedXid, or InvalidTransactionId
* in cases where we cannot accurately determine a value for latestRemovedXid.
*
- * If limitSnapshotLSN is InvalidTransactionId then we want to kill everybody,
+ * If limitSnapshotLSN is InvalidXLogRecPtr then we want to kill everybody,
* so we're not worried if they have a snapshot or not. Otherwise the result
- * only includes those backends that hold onto a snapshot that still considers
- * transactions committed before limitSnapshotLSN as in-progress.
- *
- * FIXME: the below comment is obsolete (or belons somewhere else now)
- * All callers that are checking xmins always now supply a valid and useful
- * value for limitXmin. The limitXmin is always lower than the lowest
- * numbered KnownAssignedXid that is not already a FATAL error. This is
- * because we only care about cleanup records that are cleaning up tuple
- * versions from committed transactions. In that case they will only occur
- * at the point where the record is less than the lowest running xid. That
- * allows us to say that if any backend takes a snapshot concurrently with
- * us then the conflict assessment made here would never include the snapshot
- * that is being derived. So we take LW_SHARED on the ProcArray and allow
- * concurrent snapshots when limitXmin is valid. We might think about adding
- * Assert(limitXmin < lowest(KnownAssignedXids))
- * but that would not be true in the case of FATAL errors lagging in array,
- * but we already know those are bogus anyway, so we skip that test.
+ * only includes those backends that hold onto a snapshot that's older than
+ * limitSnapshotLSN.
*
* If dbOid is valid we skip backends attached to other databases.
*
errmsg("out of memory")));
}
- LWLockAcquire(ProcArrayLock, LW_SHARED);
+ /*
+ * Take an exclusive lock, to prevent backends from changing their
+ * snapshotlsn values while we're reading them.
+ */
+ LWLockAcquire(ProcArrayLock, LW_EXCLUSIVE);
for (index = 0; index < arrayP->numProcs; index++)
{
if (!OidIsValid(dbOid) ||
proc->databaseId == dbOid)
{
- /* Fetch LSN just once - can't change on us, but good coding */
- /* FIXME: this ain't really atomic... */
XLogRecPtr snapshotlsn = pgxact->snapshotlsn;
/*
RunningTransactionsData running;
running.nextXid = xlrec->nextXid;
- running.latestCompletedXid = xlrec->latestCompletedXid;
running.oldestRunningXid = xlrec->oldestRunningXid;
ProcArrayApplyRecoveryInfo(&running);
xlrec.nextXid = CurrRunningXacts->nextXid;
xlrec.oldestRunningXid = CurrRunningXacts->oldestRunningXid;
- xlrec.latestCompletedXid = CurrRunningXacts->latestCompletedXid;
/* Header */
rdata.data = (char *) (&xlrec);
extern void heap_page_prune_opt(Relation relation, Buffer buffer);
extern int heap_page_prune(Relation relation, Buffer buffer,
XLogRecPtr OldestSnapshot,
- bool report_stats, TransactionId *latestRemovedXid);
+ bool report_stats, XLogRecPtr *latestRemovedCommitLSN);
extern void heap_page_prune_execute(Buffer buffer,
OffsetNumber *redirected, int nredirected,
OffsetNumber *nowdead, int ndead,
{
RelFileNode node;
BlockNumber block;
- TransactionId latestRemovedXid;
+ XLogRecPtr latestRemovedCommitLSN;
uint16 nredirected;
uint16 ndead;
/* OFFSET NUMBERS FOLLOW */
/*
* Cleanup_info is required in some cases during a lazy VACUUM.
- * Used for reporting the results of HeapTupleHeaderAdvanceLatestRemovedXid()
+ * Used for reporting the results of HeapTupleHeaderAdvanceLatestRemoved()
* see vacuumlazy.c for full explanation
*/
typedef struct xl_heap_cleanup_info
{
RelFileNode node;
- TransactionId latestRemovedXid;
+ XLogRecPtr latestRemovedCommitLSN;
} xl_heap_cleanup_info;
#define SizeOfHeapCleanupInfo (sizeof(xl_heap_cleanup_info))
XLogRecPtr start_lsn; /* Insert LSN at begin of rewrite */
} xl_heap_rewrite_mapping;
-extern void HeapTupleHeaderAdvanceLatestRemovedXid(HeapTupleHeader tuple,
- TransactionId *latestRemovedXid);
+extern void HeapTupleHeaderAdvanceLatestRemoved(HeapTupleHeader tuple,
+ XLogRecPtr *latestRemovedCommitLSN);
extern void heap_redo(XLogRecPtr lsn, XLogRecord *rptr);
extern void heap_desc(StringInfo buf, uint8 xl_info, char *rec);
extern void heap_xlog_logical_rewrite(XLogRecPtr lsn, XLogRecord *r);
extern XLogRecPtr log_heap_cleanup_info(RelFileNode rnode,
- TransactionId latestRemovedXid);
+ XLogRecPtr latestRemovedCommitLSN);
extern XLogRecPtr log_heap_clean(Relation reln, Buffer buffer,
OffsetNumber *redirected, int nredirected,
OffsetNumber *nowdead, int ndead,
OffsetNumber *nowunused, int nunused,
- TransactionId latestRemovedXid);
+ XLogRecPtr latestRemovedCommitLSN);
extern XLogRecPtr log_heap_freeze(Relation reln, Buffer buffer,
TransactionId cutoff_xid, xl_heap_freeze_tuple *tuples,
int ntuples);
{
TransactionId nextXid; /* copy of ShmemVariableCache->nextXid */
TransactionId oldestRunningXid; /* *not* oldestXmin */
- TransactionId latestCompletedXid; /* so we can set xmax */
} xl_running_xacts;
-#define MinSizeOfXactRunningXacts (offsetof(xl_running_xacts, latestCompletedXid) + sizeof(TransactionId))
+#define MinSizeOfXactRunningXacts (offsetof(xl_running_xacts, oldestRunningXid) + sizeof(TransactionId))
/* Recovery handlers for the Standby Rmgr (RM_STANDBY_ID) */