<refsynopsisdiv>
<synopsis>
-VACUUM [ ( { FULL | FREEZE | VERBOSE | ANALYZE } [, ...] ) ] [ <replaceable class="PARAMETER">table_name</replaceable> [ (<replaceable class="PARAMETER">column_name</replaceable> [, ...] ) ] ]
+VACUUM [ ( { FULL | FREEZE | VERBOSE | ANALYZE | DISABLE_PAGE_SKIPPING } [, ...] ) ] [ <replaceable class="PARAMETER">table_name</replaceable> [ (<replaceable class="PARAMETER">column_name</replaceable> [, ...] ) ] ]
VACUUM [ FULL ] [ FREEZE ] [ VERBOSE ] [ <replaceable class="PARAMETER">table_name</replaceable> ]
VACUUM [ FULL ] [ FREEZE ] [ VERBOSE ] ANALYZE [ <replaceable class="PARAMETER">table_name</replaceable> [ (<replaceable class="PARAMETER">column_name</replaceable> [, ...] ) ] ]
</synopsis>
</listitem>
</varlistentry>
+ <varlistentry>
+ <term><literal>DISABLE_PAGE_SKIPPING</literal></term>
+ <listitem>
+ <para>
+ Normally, <command>VACUUM</> will skip pages based on the <link
+ linkend="vacuum-for-visibility-map">visibility map</>. Pages where
+ all tuples are known to be frozen can always be skipped, and those
+ where all tuples are known to be visible to all transactions may be
+ skipped except when performing an aggressive vacuum. Furthermore,
+ except when performing an aggressive vacuum, some pages may be skipped
+ in order to avoid waiting for other sessions to finish using them.
+ This option disables all page-skipping behavior, and is intended to
+ be used only the contents of the visibility map are thought to
+ be suspect, which should happen only if there is a hardware or software
+ issue causing database corruption.
+ </para>
+ </listitem>
+ </varlistentry>
+
<varlistentry>
<term><replaceable class="PARAMETER">table_name</replaceable></term>
<listitem>
/* non-export function prototypes */
-static void lazy_scan_heap(Relation onerel, LVRelStats *vacrelstats,
- Relation *Irel, int nindexes, bool aggressive);
+static void lazy_scan_heap(Relation onerel, int options,
+ LVRelStats *vacrelstats, Relation *Irel, int nindexes,
+ bool aggressive);
static void lazy_vacuum_heap(Relation onerel, LVRelStats *vacrelstats);
static bool lazy_check_needs_freeze(Buffer buf, bool *hastup);
static void lazy_vacuum_index(Relation indrel,
&MultiXactCutoff, &mxactFullScanLimit);
/*
- * We request an aggressive scan if either the table's frozen Xid is now
- * older than or equal to the requested Xid full-table scan limit; or if
- * the table's minimum MultiXactId is older than or equal to the requested
- * mxid full-table scan limit.
+ * We request an aggressive scan if the table's frozen Xid is now older
+ * than or equal to the requested Xid full-table scan limit; or if the
+ * table's minimum MultiXactId is older than or equal to the requested
+ * mxid full-table scan limit; or if DISABLE_PAGE_SKIPPING was specified.
*/
aggressive = TransactionIdPrecedesOrEquals(onerel->rd_rel->relfrozenxid,
xidFullScanLimit);
aggressive |= MultiXactIdPrecedesOrEquals(onerel->rd_rel->relminmxid,
mxactFullScanLimit);
+ if (options & VACOPT_DISABLE_PAGE_SKIPPING)
+ aggressive = true;
vacrelstats = (LVRelStats *) palloc0(sizeof(LVRelStats));
vacrelstats->hasindex = (nindexes > 0);
/* Do the vacuuming */
- lazy_scan_heap(onerel, vacrelstats, Irel, nindexes, aggressive);
+ lazy_scan_heap(onerel, options, vacrelstats, Irel, nindexes, aggressive);
/* Done with indexes */
vac_close_indexes(nindexes, Irel, NoLock);
* reference them have been killed.
*/
static void
-lazy_scan_heap(Relation onerel, LVRelStats *vacrelstats,
+lazy_scan_heap(Relation onerel, int options, LVRelStats *vacrelstats,
Relation *Irel, int nindexes, bool aggressive)
{
BlockNumber nblocks,
* the last page. This is worth avoiding mainly because such a lock must
* be replayed on any hot standby, where it can be disruptive.
*/
- for (next_unskippable_block = 0;
- next_unskippable_block < nblocks;
- next_unskippable_block++)
+ next_unskippable_block = 0;
+ if ((options & VACOPT_DISABLE_PAGE_SKIPPING) == 0)
{
- uint8 vmstatus;
-
- vmstatus = visibilitymap_get_status(onerel, next_unskippable_block,
- &vmbuffer);
- if (aggressive)
+ while (next_unskippable_block < nblocks)
{
- if ((vmstatus & VISIBILITYMAP_ALL_FROZEN) == 0)
- break;
- }
- else
- {
- if ((vmstatus & VISIBILITYMAP_ALL_VISIBLE) == 0)
- break;
+ uint8 vmstatus;
+
+ vmstatus = visibilitymap_get_status(onerel, next_unskippable_block,
+ &vmbuffer);
+ if (aggressive)
+ {
+ if ((vmstatus & VISIBILITYMAP_ALL_FROZEN) == 0)
+ break;
+ }
+ else
+ {
+ if ((vmstatus & VISIBILITYMAP_ALL_VISIBLE) == 0)
+ break;
+ }
+ vacuum_delay_point();
+ next_unskippable_block++;
}
- vacuum_delay_point();
}
if (next_unskippable_block >= SKIP_PAGES_THRESHOLD)
if (blkno == next_unskippable_block)
{
/* Time to advance next_unskippable_block */
- for (next_unskippable_block++;
- next_unskippable_block < nblocks;
- next_unskippable_block++)
+ next_unskippable_block++;
+ if ((options & VACOPT_DISABLE_PAGE_SKIPPING) == 0)
{
- uint8 vmskipflags;
-
- vmskipflags = visibilitymap_get_status(onerel,
- next_unskippable_block,
- &vmbuffer);
- if (aggressive)
+ while (next_unskippable_block < nblocks)
{
- if ((vmskipflags & VISIBILITYMAP_ALL_FROZEN) == 0)
- break;
- }
- else
- {
- if ((vmskipflags & VISIBILITYMAP_ALL_VISIBLE) == 0)
- break;
+ uint8 vmskipflags;
+
+ vmskipflags = visibilitymap_get_status(onerel,
+ next_unskippable_block,
+ &vmbuffer);
+ if (aggressive)
+ {
+ if ((vmskipflags & VISIBILITYMAP_ALL_FROZEN) == 0)
+ break;
+ }
+ else
+ {
+ if ((vmskipflags & VISIBILITYMAP_ALL_VISIBLE) == 0)
+ break;
+ }
+ vacuum_delay_point();
+ next_unskippable_block++;
}
- vacuum_delay_point();
}
/*
}
else
{
- bool tuple_totally_frozen;
+ bool tuple_totally_frozen;
num_tuples += 1;
hastup = true;
* freezing. Note we already have exclusive buffer lock.
*/
if (heap_prepare_freeze_tuple(tuple.t_data, FreezeLimit,
- MultiXactCutoff, &frozen[nfrozen],
- &tuple_totally_frozen))
+ MultiXactCutoff, &frozen[nfrozen],
+ &tuple_totally_frozen))
frozen[nfrozen++].offset = offnum;
if (!tuple_totally_frozen)