/* Local functions */
static int heap_prune_chain(Relation relation, Buffer buffer,
OffsetNumber rootoffnum,
- XLogRecPtr OldestSnapshot,
+ TransactionId OldestXmin,
PruneState *prstate);
static void heap_prune_record_prunable(PruneState *prstate, TransactionId xid);
static void heap_prune_record_redirect(PruneState *prstate,
{
Page page = BufferGetPage(buffer);
Size minfree;
+ TransactionId OldestXmin;
/*
* We can't write WAL in recovery mode, so there's no point trying to
* horizon can be used. Note that the toast relation of user defined
* relations are *not* considered catalog relations.
*/
-#ifdef BROKEN
+ AdvanceRecentGlobalXmin();
+#ifdef FIXME
if (IsCatalogRelation(relation) ||
RelationIsAccessibleInLogicalDecoding(relation))
OldestXmin = RecentGlobalXmin;
else
OldestXmin = RecentGlobalDataXmin;
#endif
- if (!TransactionIdIsValid(RecentGlobalXmin))
+ OldestXmin = RecentGlobalXmin;
+
+ if (!TransactionIdIsValid(OldestXmin))
return;
/*
* Forget it if page is not hinted to contain something prunable that's
* older than OldestXmin.
*/
- if (!PageIsPrunable(page, RecentGlobalXmin))
+ if (!PageIsPrunable(page, OldestXmin))
return;
/*
* needed */
/* OK to prune */
- (void) heap_page_prune(relation, buffer,
- GetOldestSnapshotLSN(NULL, false),
- true, &ignore);
+ (void) heap_page_prune(relation, buffer, OldestXmin, true, &ignore);
}
/* And release buffer lock */
* latestRemovedXid.
*/
int
-heap_page_prune(Relation relation, Buffer buffer, XLogRecPtr OldestSnapshot,
+heap_page_prune(Relation relation, Buffer buffer, TransactionId OldestXmin,
bool report_stats, XLogRecPtr *latestRemovedCommitLSN)
{
int ndeleted = 0;
/* Process this item or chain of items */
ndeleted += heap_prune_chain(relation, buffer, offnum,
- OldestSnapshot,
+ OldestXmin,
&prstate);
}
*/
static int
heap_prune_chain(Relation relation, Buffer buffer, OffsetNumber rootoffnum,
- XLogRecPtr OldestSnapshot,
+ TransactionId OldestXmin,
PruneState *prstate)
{
int ndeleted = 0;
* either here or while following a chain below. Whichever path
* gets there first will mark the tuple unused.
*/
- if (HeapTupleSatisfiesVacuumX(&tup, OldestSnapshot, buffer)
+ if (HeapTupleSatisfiesVacuum(&tup, OldestXmin, buffer)
== HEAPTUPLE_DEAD && !HeapTupleHeaderIsHotUpdated(htup))
{
heap_prune_record_unused(prstate, rootoffnum);
*/
tupdead = recent_dead = false;
- switch (HeapTupleSatisfiesVacuumX(&tup, OldestSnapshot, buffer))
+ switch (HeapTupleSatisfiesVacuum(&tup, OldestXmin, buffer))
{
case HEAPTUPLE_DEAD:
tupdead = true;
* attempt to reference any pg_subtrans entry older than that (see Asserts
* in subtrans.c). During recovery, though, we mustn't do this because
* StartupSUBTRANS hasn't been called yet.
+ *
+ * FIXME: this is broken, because the xmin's in snapshots are now
+ * just conservative estimates, and can lag behind the value calculated by
+ * GetOldestXmin(). IOW, GetOldestXmin() can return a value smaller than
+ * the xmin of a snapshot. All the transactions between GetOldestXmin's
+ * return values and the xmin of any snapshots are in fact considered
+ * visible to all snapshots, which is why the return value of
+ * GetOldestXmin() is OK for deciding e.g whether a tuple can be frozen,
+ * but when checking visibility with that snapshot, you might need to
+ * access subtrans and clog to reach that conclusion, so it's not cool
+ * to truncate away that piece of subtrans or clog.
*/
if (!RecoveryInProgress())
- {
- AdvanceRecentGlobalXmin();
- TruncateSUBTRANS(RecentGlobalXmin);
- }
+ TruncateSUBTRANS(GetOldestXmin(NULL, false));
/* Real work is done, but log and update stats before releasing lock. */
LogCheckpointEnd(false);
EState *estate;
ExprContext *econtext;
Snapshot snapshot;
- XLogRecPtr OldestSnapshot;
+ TransactionId OldestXmin;
BlockNumber root_blkno = InvalidBlockNumber;
OffsetNumber root_offsets[MaxHeapTuplesPerPage];
if (IsBootstrapProcessingMode() || indexInfo->ii_Concurrent)
{
snapshot = RegisterSnapshot(GetTransactionSnapshot());
- OldestSnapshot = InvalidXLogRecPtr; /* not used */
+ OldestXmin = InvalidTransactionId; /* not used */
}
else
{
snapshot = SnapshotAny;
/* okay to ignore lazy VACUUMs here */
- OldestSnapshot = GetOldestSnapshotLSN(heapRelation, true);
+ OldestXmin = GetOldestXmin(heapRelation, true);
}
scan = heap_beginscan_strat(heapRelation, /* relation */
*/
LockBuffer(scan->rs_cbuf, BUFFER_LOCK_SHARE);
- switch (HeapTupleSatisfiesVacuumX(heapTuple, OldestSnapshot,
+ switch (HeapTupleSatisfiesVacuum(heapTuple, OldestXmin,
scan->rs_cbuf))
{
case HEAPTUPLE_DEAD:
double deadrows = 0; /* # dead rows seen */
double rowstoskip = -1; /* -1 means not set yet */
BlockNumber totalblocks;
- XLogRecPtr OldestSnapshot;
+ TransactionId OldestXmin;
BlockSamplerData bs;
double rstate;
totalblocks = RelationGetNumberOfBlocks(onerel);
/* Need a cutoff xmin for HeapTupleSatisfiesVacuum */
- OldestSnapshot = GetOldestSnapshotLSN(onerel, true);
+ OldestXmin = GetOldestXmin(onerel, true);
/* Prepare for sampling block numbers */
BlockSampler_Init(&bs, totalblocks, targrows);
targtuple.t_data = (HeapTupleHeader) PageGetItem(targpage, itemid);
targtuple.t_len = ItemIdGetLength(itemid);
- switch (HeapTupleSatisfiesVacuumX(&targtuple,
- OldestSnapshot,
+ switch (HeapTupleSatisfiesVacuum(&targtuple,
+ OldestXmin,
targbuffer))
{
case HEAPTUPLE_LIVE:
HeapScanDesc heapScan;
bool use_wal;
bool is_system_catalog;
- XLogRecPtr OldestSnapshot;
+ TransactionId OldestXmin;
TransactionId FreezeXid;
MultiXactId MultiXactCutoff;
RewriteState rwstate;
* it from being vacuumed. This is needed because autovacuum processes
* toast tables independently of their main tables, with no lock on the
* latter. If an autovacuum were to start on the toast table after we
- * compute our OldestSnapshot below, it would use a later OldestSnapshot, and then
+ * compute our OldestXmin below, it would use a later OldestXmin, and then
* possibly remove as DEAD toast tuples belonging to main tuples we think
* are only RECENTLY_DEAD. Then we'd fail while trying to copy those
* tuples.
* not to be aggressive about this.
*/
vacuum_set_xid_limits(OldHeap, 0, 0, 0, 0,
- &OldestSnapshot, &FreezeXid, NULL, &MultiXactCutoff,
+ &OldestXmin, &FreezeXid, NULL, &MultiXactCutoff,
NULL);
/*
is_system_catalog = IsSystemRelation(OldHeap);
/* Initialize the rewrite operation */
- rwstate = begin_heap_rewrite(OldHeap, NewHeap, OldestSnapshot, FreezeXid,
+ rwstate = begin_heap_rewrite(OldHeap, NewHeap, OldestXmin, FreezeXid,
MultiXactCutoff, use_wal);
/*
LockBuffer(buf, BUFFER_LOCK_SHARE);
- switch (HeapTupleSatisfiesVacuumX(tuple, OldestSnapshot, buf))
+ switch (HeapTupleSatisfiesVacuum(tuple, OldestXmin, buf))
{
case HEAPTUPLE_DEAD:
/* Definitely dead */
int freeze_table_age,
int multixact_freeze_min_age,
int multixact_freeze_table_age,
- XLogRecPtr *oldestSnapshot,
+ TransactionId *oldestXmin,
TransactionId *freezeLimit,
TransactionId *xidFullScanLimit,
MultiXactId *multiXactCutoff,
TransactionId safeLimit;
MultiXactId mxactLimit;
MultiXactId safeMxactLimit;
- TransactionId oldestXmin;
/*
* We can always ignore processes running lazy vacuum. This is because we
* working on a particular table at any time, and that each vacuum is
* always an independent transaction.
*/
- *oldestSnapshot = GetOldestSnapshotLSN(rel, true);
+ *oldestXmin = GetOldestXmin(rel, true);
- /*
- * Note that we no longer use the oldestXmin value for deciding which
- * tuples can be removed. That's oldestSnapshot's charter now. oldestXmin
- * is only used to calculate the freeze limit.
- */
- oldestXmin = GetOldestXmin(rel, true);
-
- Assert(TransactionIdIsNormal(oldestXmin));
+ Assert(TransactionIdIsNormal(*oldestXmin));
/*
* Determine the minimum freeze age to use: as specified by the caller, or
/*
* Compute the cutoff XID, being careful not to generate a "permanent" XID
*/
- limit = oldestXmin - freezemin;
+ limit = *oldestXmin - freezemin;
if (!TransactionIdIsNormal(limit))
limit = FirstNormalTransactionId;
return;
}
+ /*
+ * FIXME: this is broken, because the xmin's in snapshots are now
+ * just conservative estimates, and can lag behind the value calculated by
+ * GetOldestXmin(). IOW, GetOldestXmin() can return a value smaller than
+ * the xmin of a snapshot. All the transactions between GetOldestXmin's
+ * return values and the xmin of any snapshots are in fact considered
+ * visible to all snapshots, which is why the return value of
+ * GetOldestXmin() is OK for deciding e.g whether a tuple can be frozen,
+ * but when checking visibility with that snapshot, you might need to
+ * access subtrans and clog to reach that conclusion, so it's not cool
+ * to truncate away that piece of subtrans or clog.
+ *
+ * For truncating the clog, what we'd need is the MIN(xmin) among all
+ * the snapshots still active in the system.
+ */
+
/* Truncate CLOG and Multi to the oldest computed value */
TruncateCLOG(frozenXID);
TruncateMultiXact(minMulti);
/* A few variables that don't seem worth passing around as parameters */
static int elevel = -1;
-static XLogRecPtr OldestSnapshot;
+static TransactionId OldestXmin;
static TransactionId FreezeLimit;
static MultiXactId MultiXactCutoff;
vacstmt->freeze_min_age, vacstmt->freeze_table_age,
vacstmt->multixact_freeze_min_age,
vacstmt->multixact_freeze_table_age,
- &OldestSnapshot, &FreezeLimit, &xidFullScanLimit,
+ &OldestXmin, &FreezeLimit, &xidFullScanLimit,
&MultiXactCutoff, &mxactFullScanLimit);
/*
bool all_visible;
bool has_dead_tuples;
TransactionId visibility_cutoff_xid = InvalidTransactionId;
- XLogRecPtr commitlsn;
if (blkno == next_not_all_visible_block)
{
*
* We count tuples removed by the pruning step as removed by VACUUM.
*/
- tups_vacuumed += heap_page_prune(onerel, buf, OldestSnapshot, false,
+ tups_vacuumed += heap_page_prune(onerel, buf, OldestXmin, false,
&vacrelstats->latestRemovedCommitLSN);
/*
tupgone = false;
- switch (HeapTupleSatisfiesVacuumX(&tuple, OldestSnapshot, buf))
+ switch (HeapTupleSatisfiesVacuum(&tuple, OldestXmin, buf))
{
case HEAPTUPLE_DEAD:
* enough that everyone sees it as committed?
*/
xmin = HeapTupleHeaderGetXmin(tuple.t_data);
- commitlsn = TransactionIdGetCommitLSN(xmin);
- Assert(COMMITLSN_IS_COMMITTED(commitlsn));
- if (commitlsn > OldestSnapshot)
+ if (xmin > OldestXmin)
{
all_visible = false;
break;
OffsetNumber offnum,
maxoff;
bool all_visible = true;
- XLogRecPtr commitlsn;
*visibility_cutoff_xid = InvalidTransactionId;
tuple.t_len = ItemIdGetLength(itemid);
tuple.t_tableOid = RelationGetRelid(rel);
- switch (HeapTupleSatisfiesVacuumX(&tuple, OldestSnapshot, buf))
+ switch (HeapTupleSatisfiesVacuum(&tuple, OldestXmin, buf))
{
case HEAPTUPLE_LIVE:
{
* that everyone sees it as committed?
*/
xmin = HeapTupleHeaderGetXmin(tuple.t_data);
- commitlsn = TransactionIdGetCommitLSN(xmin);
- Assert(COMMITLSN_IS_COMMITTED(commitlsn));
- if (commitlsn > OldestSnapshot)
+ if (xmin > OldestXmin)
{
all_visible = false;
break;
/*
* Get the LSN of the oldest snapshot still active.
*
- * With LSN-based snapshots, this is more accurate than GetOldestXmin().
+ * With LSN-based snapshots, this is more accurate than GetOldestXmin()
+ * (i.e. this allows you to remove more dead tuples).
*
- * FIXME: the replication_slot_xmin and replication_slot_catalog_xmin values
- * don't affect this, so when this is used to decide if a dead tuple can be
+ * XXX: the replication_slot_xmin and replication_slot_catalog_xmin values
+ * don't affect this, so if this is used to decide if a dead tuple can be
* vacuumed, it breaks logical decoding.
*/
XLogRecPtr
* tuple is visible to us, while HeapTupleSatisfiesVacuum checks what else
* is going on with it.
*/
- htsvResult = HeapTupleSatisfiesVacuumX(tuple, TransactionSnapshotLSN, buffer);
+ htsvResult = HeapTupleSatisfiesVacuum(tuple, TransactionXmin, buffer);
switch (htsvResult)
{
case HEAPTUPLE_LIVE:
* FIXME: renamed to make sure we don't miss modifying any callers.
*/
HTSV_Result
-HeapTupleSatisfiesVacuumX(HeapTuple htup, XLogRecPtr OldestSnapshot,
+HeapTupleSatisfiesVacuum(HeapTuple htup, TransactionId OldestXmin,
Buffer buffer)
{
HeapTupleHeader tuple = htup->t_data;
commitlsn = TransactionIdGetCommitLSN(xmax);
if (COMMITLSN_IS_COMMITTED(commitlsn))
{
- if (commitlsn > OldestSnapshot)
+ if (!TransactionIdPrecedes(xmax, OldestXmin))
return HEAPTUPLE_RECENTLY_DEAD;
else
return HEAPTUPLE_DEAD;
* Deleter committed, but perhaps it was recent enough that some open
* transactions could still see the tuple.
*/
- commitlsn = TransactionIdGetCommitLSN(HeapTupleHeaderGetRawXmax(tuple));
- Assert(COMMITLSN_IS_COMMITTED(commitlsn));
- if (commitlsn > OldestSnapshot)
+ if (!TransactionIdPrecedes(HeapTupleHeaderGetRawXmax(tuple), OldestXmin))
return HEAPTUPLE_RECENTLY_DEAD;
/* Otherwise, it's dead and removable */
/* in heap/pruneheap.c */
extern void heap_page_prune_opt(Relation relation, Buffer buffer);
extern int heap_page_prune(Relation relation, Buffer buffer,
- XLogRecPtr OldestSnapshot,
+ TransactionId OldestXmin,
bool report_stats, XLogRecPtr *latestRemovedCommitLSN);
extern void heap_page_prune_execute(Buffer buffer,
OffsetNumber *redirected, int nredirected,
int freeze_min_age, int freeze_table_age,
int multixact_freeze_min_age,
int multixact_freeze_table_age,
- XLogRecPtr *oldestSnapshot,
+ TransactionId *oldestXmin,
TransactionId *freezeLimit,
TransactionId *xidFullScanLimit,
MultiXactId *multiXactCutoff,
/* Special "satisfies" routines with different APIs */
extern HTSU_Result HeapTupleSatisfiesUpdate(HeapTuple htup,
CommandId curcid, Buffer buffer);
-extern HTSV_Result HeapTupleSatisfiesVacuumX(HeapTuple htup,
- XLogRecPtr OldestSnapshot, Buffer buffer);
+extern HTSV_Result HeapTupleSatisfiesVacuum(HeapTuple htup,
+ TransactionId OldestXmin, Buffer buffer);
extern bool HeapTupleIsSurelyDead(HeapTuple htup,
TransactionId OldestXmin);