{
bufHdr = GetLocalBufferDescriptor(-buffer - 1);
if (bufHdr->tag.blockNum == blockNum &&
- RelFileLocatorEquals(bufHdr->tag.rlocator, relation->rd_locator) &&
- bufHdr->tag.forkNum == forkNum)
+ BufTagMatchesRelFileLocator(&bufHdr->tag, &relation->rd_locator) &&
+ BufTagGetForkNum(&bufHdr->tag) == forkNum)
return buffer;
ResourceOwnerForgetBuffer(CurrentResourceOwner, buffer);
LocalRefCount[-buffer - 1]--;
bufHdr = GetBufferDescriptor(buffer - 1);
/* we have pin, so it's ok to examine tag without spinlock */
if (bufHdr->tag.blockNum == blockNum &&
- RelFileLocatorEquals(bufHdr->tag.rlocator, relation->rd_locator) &&
- bufHdr->tag.forkNum == forkNum)
+ BufTagMatchesRelFileLocator(&bufHdr->tag, &relation->rd_locator) &&
+ BufTagGetForkNum(&bufHdr->tag) == forkNum)
return buffer;
UnpinBuffer(bufHdr, true);
}
item = &CkptBufferIds[num_to_scan++];
item->buf_id = buf_id;
- item->tsId = bufHdr->tag.rlocator.spcOid;
- item->relNumber = bufHdr->tag.rlocator.relNumber;
- item->forkNum = bufHdr->tag.forkNum;
+ item->tsId = bufHdr->tag.spcOid;
+ item->relNumber = BufTagGetRelNumber(&bufHdr->tag);
+ item->forkNum = BufTagGetForkNum(&bufHdr->tag);
item->blockNum = bufHdr->tag.blockNum;
}
}
/* theoretically we should lock the bufhdr here */
- path = relpathbackend(buf->tag.rlocator, backend, buf->tag.forkNum);
+ path = relpathbackend(BufTagGetRelFileLocator(&buf->tag), backend,
+ BufTagGetForkNum(&buf->tag));
buf_state = pg_atomic_read_u32(&buf->state);
elog(WARNING,
"buffer refcount leak: [%03d] "
bufHdr = GetBufferDescriptor(buffer - 1);
/* pinned, so OK to read tag without spinlock */
- *rlocator = bufHdr->tag.rlocator;
- *forknum = bufHdr->tag.forkNum;
+ *rlocator = BufTagGetRelFileLocator(&bufHdr->tag);
+ *forknum = BufTagGetForkNum(&bufHdr->tag);
*blknum = bufHdr->tag.blockNum;
}
/* Find smgr relation for buffer */
if (reln == NULL)
- reln = smgropen(buf->tag.rlocator, InvalidBackendId);
+ reln = smgropen(BufTagGetRelFileLocator(&buf->tag), InvalidBackendId);
- TRACE_POSTGRESQL_BUFFER_FLUSH_START(buf->tag.forkNum,
+ TRACE_POSTGRESQL_BUFFER_FLUSH_START(BufTagGetForkNum(&buf->tag),
buf->tag.blockNum,
reln->smgr_rlocator.locator.spcOid,
reln->smgr_rlocator.locator.dbOid,
* bufToWrite is either the shared buffer or a copy, as appropriate.
*/
smgrwrite(reln,
- buf->tag.forkNum,
+ BufTagGetForkNum(&buf->tag),
buf->tag.blockNum,
bufToWrite,
false);
*/
TerminateBufferIO(buf, true, 0);
- TRACE_POSTGRESQL_BUFFER_FLUSH_DONE(buf->tag.forkNum,
+ TRACE_POSTGRESQL_BUFFER_FLUSH_DONE(BufTagGetForkNum(&buf->tag),
buf->tag.blockNum,
reln->smgr_rlocator.locator.spcOid,
reln->smgr_rlocator.locator.dbOid,
* We could check forkNum and blockNum as well as the rlocator, but
* the incremental win from doing so seems small.
*/
- if (!RelFileLocatorEquals(bufHdr->tag.rlocator, rlocator.locator))
+ if (!BufTagMatchesRelFileLocator(&bufHdr->tag, &rlocator.locator))
continue;
buf_state = LockBufHdr(bufHdr);
for (j = 0; j < nforks; j++)
{
- if (RelFileLocatorEquals(bufHdr->tag.rlocator, rlocator.locator) &&
- bufHdr->tag.forkNum == forkNum[j] &&
+ if (BufTagMatchesRelFileLocator(&bufHdr->tag, &rlocator.locator) &&
+ BufTagGetForkNum(&bufHdr->tag) == forkNum[j] &&
bufHdr->tag.blockNum >= firstDelBlock[j])
{
InvalidateBuffer(bufHdr); /* releases spinlock */
for (j = 0; j < n; j++)
{
- if (RelFileLocatorEquals(bufHdr->tag.rlocator, locators[j]))
+ if (BufTagMatchesRelFileLocator(&bufHdr->tag, &locators[j]))
{
rlocator = &locators[j];
break;
}
else
{
- rlocator = bsearch((const void *) &(bufHdr->tag.rlocator),
+ RelFileLocator locator;
+
+ locator = BufTagGetRelFileLocator(&bufHdr->tag);
+ rlocator = bsearch((const void *) &(locator),
locators, n, sizeof(RelFileLocator),
rlocator_comparator);
}
continue;
buf_state = LockBufHdr(bufHdr);
- if (RelFileLocatorEquals(bufHdr->tag.rlocator, (*rlocator)))
+ if (BufTagMatchesRelFileLocator(&bufHdr->tag, rlocator))
InvalidateBuffer(bufHdr); /* releases spinlock */
else
UnlockBufHdr(bufHdr, buf_state);
*/
buf_state = LockBufHdr(bufHdr);
- if (RelFileLocatorEquals(bufHdr->tag.rlocator, rlocator) &&
- bufHdr->tag.forkNum == forkNum &&
+ if (BufTagMatchesRelFileLocator(&bufHdr->tag, &rlocator) &&
+ BufTagGetForkNum(&bufHdr->tag) == forkNum &&
bufHdr->tag.blockNum >= firstDelBlock)
InvalidateBuffer(bufHdr); /* releases spinlock */
else
* As in DropRelationBuffers, an unlocked precheck should be
* safe and saves some cycles.
*/
- if (bufHdr->tag.rlocator.dbOid != dbid)
+ if (bufHdr->tag.dbOid != dbid)
continue;
buf_state = LockBufHdr(bufHdr);
- if (bufHdr->tag.rlocator.dbOid == dbid)
+ if (bufHdr->tag.dbOid == dbid)
InvalidateBuffer(bufHdr); /* releases spinlock */
else
UnlockBufHdr(bufHdr, buf_state);
"[%02d] (freeNext=%d, rel=%s, "
"blockNum=%u, flags=0x%x, refcount=%u %d)",
i, buf->freeNext,
- relpathbackend(buf->tag.rlocator, InvalidBackendId, buf->tag.forkNum),
+ relpathbackend(BufTagGetRelFileLocator(&buf->tag),
+ InvalidBackendId, BufTagGetForkNum(&buf->tag)),
buf->tag.blockNum, buf->flags,
buf->refcount, GetPrivateRefCount(b));
}
"[%02d] (freeNext=%d, rel=%s, "
"blockNum=%u, flags=0x%x, refcount=%u %d)",
i, buf->freeNext,
- relpathperm(buf->tag.rlocator, buf->tag.forkNum),
+ relpathperm(BufTagGetRelFileLocator(&buf->tag),
+ BufTagGetForkNum(&buf->tag)),
buf->tag.blockNum, buf->flags,
buf->refcount, GetPrivateRefCount(b));
}
uint32 buf_state;
bufHdr = GetLocalBufferDescriptor(i);
- if (RelFileLocatorEquals(bufHdr->tag.rlocator, rel->rd_locator) &&
+ if (BufTagMatchesRelFileLocator(&bufHdr->tag, &rel->rd_locator) &&
((buf_state = pg_atomic_read_u32(&bufHdr->state)) &
(BM_VALID | BM_DIRTY)) == (BM_VALID | BM_DIRTY))
{
PageSetChecksumInplace(localpage, bufHdr->tag.blockNum);
smgrwrite(RelationGetSmgr(rel),
- bufHdr->tag.forkNum,
+ BufTagGetForkNum(&bufHdr->tag),
bufHdr->tag.blockNum,
localpage,
false);
* As in DropRelationBuffers, an unlocked precheck should be
* safe and saves some cycles.
*/
- if (!RelFileLocatorEquals(bufHdr->tag.rlocator, rel->rd_locator))
+ if (!BufTagMatchesRelFileLocator(&bufHdr->tag, &rel->rd_locator))
continue;
ReservePrivateRefCountEntry();
buf_state = LockBufHdr(bufHdr);
- if (RelFileLocatorEquals(bufHdr->tag.rlocator, rel->rd_locator) &&
+ if (BufTagMatchesRelFileLocator(&bufHdr->tag, &rel->rd_locator) &&
(buf_state & (BM_VALID | BM_DIRTY)) == (BM_VALID | BM_DIRTY))
{
PinBuffer_Locked(bufHdr);
for (j = 0; j < nrels; j++)
{
- if (RelFileLocatorEquals(bufHdr->tag.rlocator, srels[j].rlocator))
+ if (BufTagMatchesRelFileLocator(&bufHdr->tag, &srels[j].rlocator))
{
srelent = &srels[j];
break;
}
else
{
- srelent = bsearch((const void *) &(bufHdr->tag.rlocator),
+ RelFileLocator rlocator;
+
+ rlocator = BufTagGetRelFileLocator(&bufHdr->tag);
+ srelent = bsearch((const void *) &(rlocator),
srels, nrels, sizeof(SMgrSortArray),
rlocator_comparator);
}
ReservePrivateRefCountEntry();
buf_state = LockBufHdr(bufHdr);
- if (RelFileLocatorEquals(bufHdr->tag.rlocator, srelent->rlocator) &&
+ if (BufTagMatchesRelFileLocator(&bufHdr->tag, &srelent->rlocator) &&
(buf_state & (BM_VALID | BM_DIRTY)) == (BM_VALID | BM_DIRTY))
{
PinBuffer_Locked(bufHdr);
* As in DropRelationBuffers, an unlocked precheck should be
* safe and saves some cycles.
*/
- if (bufHdr->tag.rlocator.dbOid != dbid)
+ if (bufHdr->tag.dbOid != dbid)
continue;
ReservePrivateRefCountEntry();
buf_state = LockBufHdr(bufHdr);
- if (bufHdr->tag.rlocator.dbOid == dbid &&
+ if (bufHdr->tag.dbOid == dbid &&
(buf_state & (BM_VALID | BM_DIRTY)) == (BM_VALID | BM_DIRTY))
{
PinBuffer_Locked(bufHdr);
* See src/backend/storage/page/README for longer discussion.
*/
if (RecoveryInProgress() ||
- RelFileLocatorSkippingWAL(bufHdr->tag.rlocator))
+ RelFileLocatorSkippingWAL(BufTagGetRelFileLocator(&bufHdr->tag)))
return;
/*
/* Buffer is pinned, so we can read tag without spinlock */
char *path;
- path = relpathperm(buf->tag.rlocator, buf->tag.forkNum);
+ path = relpathperm(BufTagGetRelFileLocator(&buf->tag),
+ BufTagGetForkNum(&buf->tag));
ereport(WARNING,
(errcode(ERRCODE_IO_ERROR),
errmsg("could not write block %u of %s",
/* Buffer is pinned, so we can read the tag without locking the spinlock */
if (bufHdr != NULL)
{
- char *path = relpathperm(bufHdr->tag.rlocator, bufHdr->tag.forkNum);
+ char *path = relpathperm(BufTagGetRelFileLocator(&bufHdr->tag),
+ BufTagGetForkNum(&bufHdr->tag));
errcontext("writing block %u of relation %s",
bufHdr->tag.blockNum, path);
if (bufHdr != NULL)
{
- char *path = relpathbackend(bufHdr->tag.rlocator, MyBackendId,
- bufHdr->tag.forkNum);
+ char *path = relpathbackend(BufTagGetRelFileLocator(&bufHdr->tag),
+ MyBackendId,
+ BufTagGetForkNum(&bufHdr->tag));
errcontext("writing block %u of relation %s",
bufHdr->tag.blockNum, path);
buffertag_comparator(const BufferTag *ba, const BufferTag *bb)
{
int ret;
+ RelFileLocator rlocatora;
+ RelFileLocator rlocatorb;
- ret = rlocator_comparator(&ba->rlocator, &bb->rlocator);
+ rlocatora = BufTagGetRelFileLocator(ba);
+ rlocatorb = BufTagGetRelFileLocator(bb);
+
+ ret = rlocator_comparator(&rlocatora, &rlocatorb);
if (ret != 0)
return ret;
- if (ba->forkNum < bb->forkNum)
+ if (BufTagGetForkNum(ba) < BufTagGetForkNum(bb))
return -1;
- if (ba->forkNum > bb->forkNum)
+ if (BufTagGetForkNum(ba) > BufTagGetForkNum(bb))
return 1;
if (ba->blockNum < bb->blockNum)
SMgrRelation reln;
int ahead;
BufferTag tag;
+ RelFileLocator currlocator;
Size nblocks = 1;
cur = &context->pending_writebacks[i];
tag = cur->tag;
+ currlocator = BufTagGetRelFileLocator(&tag);
/*
* Peek ahead, into following writeback requests, to see if they can
*/
for (ahead = 0; i + ahead + 1 < context->nr_pending; ahead++)
{
+
next = &context->pending_writebacks[i + ahead + 1];
/* different file, stop */
- if (!RelFileLocatorEquals(cur->tag.rlocator, next->tag.rlocator) ||
- cur->tag.forkNum != next->tag.forkNum)
+ if (!RelFileLocatorEquals(currlocator,
+ BufTagGetRelFileLocator(&next->tag)) ||
+ BufTagGetForkNum(&cur->tag) != BufTagGetForkNum(&next->tag))
break;
/* ok, block queued twice, skip */
i += ahead;
/* and finally tell the kernel to write the data to storage */
- reln = smgropen(tag.rlocator, InvalidBackendId);
- smgrwriteback(reln, tag.forkNum, tag.blockNum, nblocks);
+ reln = smgropen(currlocator, InvalidBackendId);
+ smgrwriteback(reln, BufTagGetForkNum(&tag), tag.blockNum, nblocks);
}
context->nr_pending = 0;
*/
typedef struct buftag
{
- RelFileLocator rlocator; /* physical relation identifier */
- ForkNumber forkNum;
+ Oid spcOid; /* tablespace oid */
+ Oid dbOid; /* database oid */
+ RelFileNumber relNumber; /* relation file number */
+ ForkNumber forkNum; /* fork number */
BlockNumber blockNum; /* blknum relative to begin of reln */
} BufferTag;
+static inline RelFileNumber
+BufTagGetRelNumber(const BufferTag *tag)
+{
+ return tag->relNumber;
+}
+
+static inline ForkNumber
+BufTagGetForkNum(const BufferTag *tag)
+{
+ return tag->forkNum;
+}
+
+static inline void
+BufTagSetRelForkDetails(BufferTag *tag, RelFileNumber relnumber,
+ ForkNumber forknum)
+{
+ tag->relNumber = relnumber;
+ tag->forkNum = forknum;
+}
+
+static inline RelFileLocator
+BufTagGetRelFileLocator(const BufferTag *tag)
+{
+ RelFileLocator rlocator;
+
+ rlocator.spcOid = tag->spcOid;
+ rlocator.dbOid = tag->dbOid;
+ rlocator.relNumber = BufTagGetRelNumber(tag);
+
+ return rlocator;
+}
+
static inline void
ClearBufferTag(BufferTag *tag)
{
- tag->rlocator.spcOid = InvalidOid;
- tag->rlocator.dbOid = InvalidOid;
- tag->rlocator.relNumber = InvalidRelFileNumber;
- tag->forkNum = InvalidForkNumber;
+ tag->spcOid = InvalidOid;
+ tag->dbOid = InvalidOid;
+ BufTagSetRelForkDetails(tag, InvalidRelFileNumber, InvalidForkNumber);
tag->blockNum = InvalidBlockNumber;
}
InitBufferTag(BufferTag *tag, const RelFileLocator *rlocator,
ForkNumber forkNum, BlockNumber blockNum)
{
- tag->rlocator = *rlocator;
- tag->forkNum = forkNum;
+ tag->spcOid = rlocator->spcOid;
+ tag->dbOid = rlocator->dbOid;
+ BufTagSetRelForkDetails(tag, rlocator->relNumber, forkNum);
tag->blockNum = blockNum;
}
static inline bool
BufferTagsEqual(const BufferTag *tag1, const BufferTag *tag2)
{
- return RelFileLocatorEquals(tag1->rlocator, tag2->rlocator) &&
+ return (tag1->spcOid == tag2->spcOid) &&
+ (tag1->dbOid == tag2->dbOid) &&
+ (tag1->relNumber == tag2->relNumber) &&
(tag1->blockNum == tag2->blockNum) &&
(tag1->forkNum == tag2->forkNum);
}
+static inline bool
+BufTagMatchesRelFileLocator(const BufferTag *tag,
+ const RelFileLocator *rlocator)
+{
+ return (tag->spcOid == rlocator->spcOid) &&
+ (tag->dbOid == rlocator->dbOid) &&
+ (BufTagGetRelNumber(tag) == rlocator->relNumber);
+}
+
+
/*
* The shared buffer mapping table is partitioned to reduce contention.
* To determine which partition lock a given tag requires, compute the tag's