<varlistentry>
<term><option>-b</option></term>
- <term><option>--blobs</option></term>
+ <term><option>--large-objects</option></term>
+ <term><option>--blobs</option> (deprecated)</term>
<listitem>
<para>
Include large objects in the dump. This is the default behavior
<option>--schema-only</option> is specified. The <option>-b</option>
switch is therefore only useful to add large objects to dumps
where a specific schema or table has been requested. Note that
- blobs are considered data and therefore will be included when
+ large objects are considered data and therefore will be included when
<option>--data-only</option> is used, but not
when <option>--schema-only</option> is.
</para>
<varlistentry>
<term><option>-B</option></term>
- <term><option>--no-blobs</option></term>
+ <term><option>--no-large-objects</option></term>
+ <term><option>--no-blobs</option> (deprecated)</term>
<listitem>
<para>
Exclude large objects in the dump.
<para>
Output a directory-format archive suitable for input into
<application>pg_restore</application>. This will create a directory
- with one file for each table and blob being dumped, plus a
+ with one file for each table and large object being dumped, plus a
so-called Table of Contents file describing the dumped objects in a
machine-readable format that <application>pg_restore</application>
can read. A directory format archive can be manipulated with
<note>
<para>
- Non-schema objects such as blobs are not dumped when <option>-n</option> is
- specified. You can add blobs back to the dump with the
- <option>--blobs</option> switch.
+ Non-schema objects such as large objects are not dumped when <option>-n</option> is
+ specified. You can add large objects back to the dump with the
+ <option>--large-objects</option> switch.
</para>
</note>
typedef enum _teSection
{
- SECTION_NONE = 1, /* COMMENTs, ACLs, etc; can be anywhere */
+ SECTION_NONE = 1, /* comments, ACLs, etc; can be anywhere */
SECTION_PRE_DATA, /* stuff to be processed before data */
- SECTION_DATA, /* TABLE DATA, BLOBS, BLOB COMMENTS */
+ SECTION_DATA, /* table data, large objects, LO comments */
SECTION_POST_DATA /* stuff to be processed after data */
} teSection;
int outputClean;
int outputCreateDB;
- bool outputBlobs;
- bool dontOutputBlobs;
+ bool outputLOs;
+ bool dontOutputLOs;
int outputNoOwner;
char *outputSuperuser;
/* Called to write *data* to the archive */
extern void WriteData(Archive *AHX, const void *data, size_t dLen);
-extern int StartBlob(Archive *AHX, Oid oid);
-extern int EndBlob(Archive *AHX, Oid oid);
+extern int StartLO(Archive *AHX, Oid oid);
+extern int EndLO(Archive *AHX, Oid oid);
extern void CloseArchive(Archive *AHX);
*/
if (strncmp(te->desc, "BLOB", 4) == 0)
{
- DropBlobIfExists(AH, te->catalogId.oid);
+ DropLOIfExists(AH, te->catalogId.oid);
}
else
{
}
/***********
- * BLOB Archival
+ * Large Object Archival
***********/
-/* Called by a dumper to signal start of a BLOB */
+/* Called by a dumper to signal start of a LO */
int
-StartBlob(Archive *AHX, Oid oid)
+StartLO(Archive *AHX, Oid oid)
{
ArchiveHandle *AH = (ArchiveHandle *) AHX;
- if (!AH->StartBlobPtr)
+ if (!AH->StartLOPtr)
pg_fatal("large-object output not supported in chosen format");
- AH->StartBlobPtr(AH, AH->currToc, oid);
+ AH->StartLOPtr(AH, AH->currToc, oid);
return 1;
}
-/* Called by a dumper to signal end of a BLOB */
+/* Called by a dumper to signal end of a LO */
int
-EndBlob(Archive *AHX, Oid oid)
+EndLO(Archive *AHX, Oid oid)
{
ArchiveHandle *AH = (ArchiveHandle *) AHX;
- if (AH->EndBlobPtr)
- AH->EndBlobPtr(AH, AH->currToc, oid);
+ if (AH->EndLOPtr)
+ AH->EndLOPtr(AH, AH->currToc, oid);
return 1;
}
/**********
- * BLOB Restoration
+ * Large Object Restoration
**********/
/*
- * Called by a format handler before any blobs are restored
+ * Called by a format handler before any LOs are restored
*/
void
-StartRestoreBlobs(ArchiveHandle *AH)
+StartRestoreLOs(ArchiveHandle *AH)
{
RestoreOptions *ropt = AH->public.ropt;
ahprintf(AH, "BEGIN;\n\n");
}
- AH->blobCount = 0;
+ AH->loCount = 0;
}
/*
- * Called by a format handler after all blobs are restored
+ * Called by a format handler after all LOs are restored
*/
void
-EndRestoreBlobs(ArchiveHandle *AH)
+EndRestoreLOs(ArchiveHandle *AH)
{
RestoreOptions *ropt = AH->public.ropt;
pg_log_info(ngettext("restored %d large object",
"restored %d large objects",
- AH->blobCount),
- AH->blobCount);
+ AH->loCount),
+ AH->loCount);
}
/*
- * Called by a format handler to initiate restoration of a blob
+ * Called by a format handler to initiate restoration of a LO
*/
void
-StartRestoreBlob(ArchiveHandle *AH, Oid oid, bool drop)
+StartRestoreLO(ArchiveHandle *AH, Oid oid, bool drop)
{
- bool old_blob_style = (AH->version < K_VERS_1_12);
+ bool old_lo_style = (AH->version < K_VERS_1_12);
Oid loOid;
- AH->blobCount++;
+ AH->loCount++;
/* Initialize the LO Buffer */
AH->lo_buf_used = 0;
pg_log_info("restoring large object with OID %u", oid);
/* With an old archive we must do drop and create logic here */
- if (old_blob_style && drop)
- DropBlobIfExists(AH, oid);
+ if (old_lo_style && drop)
+ DropLOIfExists(AH, oid);
if (AH->connection)
{
- if (old_blob_style)
+ if (old_lo_style)
{
loOid = lo_create(AH->connection, oid);
if (loOid == 0 || loOid != oid)
}
else
{
- if (old_blob_style)
+ if (old_lo_style)
ahprintf(AH, "SELECT pg_catalog.lo_open(pg_catalog.lo_create('%u'), %d);\n",
oid, INV_WRITE);
else
oid, INV_WRITE);
}
- AH->writingBlob = 1;
+ AH->writingLO = true;
}
void
-EndRestoreBlob(ArchiveHandle *AH, Oid oid)
+EndRestoreLO(ArchiveHandle *AH, Oid oid)
{
if (AH->lo_buf_used > 0)
{
dump_lo_buf(AH);
}
- AH->writingBlob = 0;
+ AH->writingLO = false;
if (AH->connection)
{
}
/*
- * Dump the current contents of the LO data buffer while writing a BLOB
+ * Dump the current contents of the LO data buffer while writing a LO
*/
static void
dump_lo_buf(ArchiveHandle *AH)
AH->lo_buf_used,
AH);
- /* Hack: turn off writingBlob so ahwrite doesn't recurse to here */
- AH->writingBlob = 0;
+ /* Hack: turn off writingLO so ahwrite doesn't recurse to here */
+ AH->writingLO = false;
ahprintf(AH, "SELECT pg_catalog.lowrite(0, %s);\n", buf->data);
- AH->writingBlob = 1;
+ AH->writingLO = true;
destroyPQExpBuffer(buf);
}
{
int bytes_written = 0;
- if (AH->writingBlob)
+ if (AH->writingLO)
{
size_t remaining = size * nmemb;
}
/*
- * Write out all data (tables & blobs)
+ * Write out all data (tables & LOs)
*/
void
WriteDataChunks(ArchiveHandle *AH, ParallelState *pstate)
if (strcmp(te->desc, "BLOBS") == 0)
{
- startPtr = AH->StartBlobsPtr;
- endPtr = AH->EndBlobsPtr;
+ startPtr = AH->StartLOsPtr;
+ endPtr = AH->EndLOsPtr;
}
else
{
if (!te->hadDumper)
{
/*
- * Special Case: If 'SEQUENCE SET' or anything to do with BLOBs, then
+ * Special Case: If 'SEQUENCE SET' or anything to do with LOs, then
* it is considered a data entry. We don't need to check for the
* BLOBS entry or old-style BLOB COMMENTS, because they will have
* hadDumper = true ... but we do need to check new-style BLOB ACLs,
appendPQExpBuffer(buf, "%s.", fmtId(te->namespace));
appendPQExpBufferStr(buf, fmtId(te->tag));
}
- /* BLOBs just have a name, but it's numeric so must not use fmtId */
+ /* LOs just have a name, but it's numeric so must not use fmtId */
else if (strcmp(type, "BLOB") == 0)
{
appendPQExpBuffer(buf, "LARGE OBJECT %s", te->tag);
/* Historical version numbers (checked in code) */
#define K_VERS_1_0 MAKE_ARCHIVE_VERSION(1, 0, 0)
#define K_VERS_1_2 MAKE_ARCHIVE_VERSION(1, 2, 0) /* Allow No ZLIB */
-#define K_VERS_1_3 MAKE_ARCHIVE_VERSION(1, 3, 0) /* BLOBs */
+#define K_VERS_1_3 MAKE_ARCHIVE_VERSION(1, 3, 0) /* BLOBS */
#define K_VERS_1_4 MAKE_ARCHIVE_VERSION(1, 4, 0) /* Date & name in header */
#define K_VERS_1_5 MAKE_ARCHIVE_VERSION(1, 5, 0) /* Handle dependencies */
#define K_VERS_1_6 MAKE_ARCHIVE_VERSION(1, 6, 0) /* Schema field in TOCs */
typedef void (*WriteDataPtrType) (ArchiveHandle *AH, const void *data, size_t dLen);
typedef void (*EndDataPtrType) (ArchiveHandle *AH, TocEntry *te);
-typedef void (*StartBlobsPtrType) (ArchiveHandle *AH, TocEntry *te);
-typedef void (*StartBlobPtrType) (ArchiveHandle *AH, TocEntry *te, Oid oid);
-typedef void (*EndBlobPtrType) (ArchiveHandle *AH, TocEntry *te, Oid oid);
-typedef void (*EndBlobsPtrType) (ArchiveHandle *AH, TocEntry *te);
+typedef void (*StartLOsPtrType) (ArchiveHandle *AH, TocEntry *te);
+typedef void (*StartLOPtrType) (ArchiveHandle *AH, TocEntry *te, Oid oid);
+typedef void (*EndLOPtrType) (ArchiveHandle *AH, TocEntry *te, Oid oid);
+typedef void (*EndLOsPtrType) (ArchiveHandle *AH, TocEntry *te);
typedef int (*WriteBytePtrType) (ArchiveHandle *AH, const int i);
typedef int (*ReadBytePtrType) (ArchiveHandle *AH);
PrintExtraTocPtrType PrintExtraTocPtr; /* Extra TOC info for format */
PrintTocDataPtrType PrintTocDataPtr;
- StartBlobsPtrType StartBlobsPtr;
- EndBlobsPtrType EndBlobsPtr;
- StartBlobPtrType StartBlobPtr;
- EndBlobPtrType EndBlobPtr;
+ StartLOsPtrType StartLOsPtr;
+ EndLOsPtrType EndLOsPtr;
+ StartLOPtrType StartLOPtr;
+ EndLOPtrType EndLOPtr;
SetupWorkerPtrType SetupWorkerPtr;
WorkerJobDumpPtrType WorkerJobDumpPtr;
ArchiverOutput outputKind; /* Flag for what we're currently writing */
bool pgCopyIn; /* Currently in libpq 'COPY IN' mode. */
- int loFd; /* BLOB fd */
- int writingBlob; /* Flag */
- int blobCount; /* # of blobs restored */
+ int loFd;
+ bool writingLO;
+ int loCount; /* # of LOs restored */
char *fSpec; /* Archive File Spec */
FILE *FH; /* General purpose file handle */
int ReadOffset(ArchiveHandle *, pgoff_t *);
size_t WriteOffset(ArchiveHandle *, pgoff_t, int);
-extern void StartRestoreBlobs(ArchiveHandle *AH);
-extern void StartRestoreBlob(ArchiveHandle *AH, Oid oid, bool drop);
-extern void EndRestoreBlob(ArchiveHandle *AH, Oid oid);
-extern void EndRestoreBlobs(ArchiveHandle *AH);
+extern void StartRestoreLOs(ArchiveHandle *AH);
+extern void StartRestoreLO(ArchiveHandle *AH, Oid oid, bool drop);
+extern void EndRestoreLO(ArchiveHandle *AH, Oid oid);
+extern void EndRestoreLOs(ArchiveHandle *AH);
extern void InitArchiveFmt_Custom(ArchiveHandle *AH);
extern void InitArchiveFmt_Null(ArchiveHandle *AH);
extern bool isValidTarHeader(char *header);
extern void ReconnectToServer(ArchiveHandle *AH, const char *dbname);
-extern void DropBlobIfExists(ArchiveHandle *AH, Oid oid);
+extern void DropLOIfExists(ArchiveHandle *AH, Oid oid);
void ahwrite(const void *ptr, size_t size, size_t nmemb, ArchiveHandle *AH);
int ahprintf(ArchiveHandle *AH, const char *fmt,...) pg_attribute_printf(2, 3);
static void _PrintData(ArchiveHandle *AH);
static void _skipData(ArchiveHandle *AH);
-static void _skipBlobs(ArchiveHandle *AH);
+static void _skipLOs(ArchiveHandle *AH);
-static void _StartBlobs(ArchiveHandle *AH, TocEntry *te);
-static void _StartBlob(ArchiveHandle *AH, TocEntry *te, Oid oid);
-static void _EndBlob(ArchiveHandle *AH, TocEntry *te, Oid oid);
-static void _EndBlobs(ArchiveHandle *AH, TocEntry *te);
-static void _LoadBlobs(ArchiveHandle *AH, bool drop);
+static void _StartLOs(ArchiveHandle *AH, TocEntry *te);
+static void _StartLO(ArchiveHandle *AH, TocEntry *te, Oid oid);
+static void _EndLO(ArchiveHandle *AH, TocEntry *te, Oid oid);
+static void _EndLOs(ArchiveHandle *AH, TocEntry *te);
+static void _LoadLOs(ArchiveHandle *AH, bool drop);
static void _PrepParallelRestore(ArchiveHandle *AH);
static void _Clone(ArchiveHandle *AH);
AH->WriteExtraTocPtr = _WriteExtraToc;
AH->PrintExtraTocPtr = _PrintExtraToc;
- AH->StartBlobsPtr = _StartBlobs;
- AH->StartBlobPtr = _StartBlob;
- AH->EndBlobPtr = _EndBlob;
- AH->EndBlobsPtr = _EndBlobs;
+ AH->StartLOsPtr = _StartLOs;
+ AH->StartLOPtr = _StartLO;
+ AH->EndLOPtr = _EndLO;
+ AH->EndLOsPtr = _EndLOs;
AH->PrepParallelRestorePtr = _PrepParallelRestore;
AH->ClonePtr = _Clone;
/*
* Called by archiver when dumper calls WriteData. This routine is
- * called for both BLOB and TABLE data; it is the responsibility of
- * the format to manage each kind of data using StartBlob/StartData.
+ * called for both LO and table data; it is the responsibility of
+ * the format to manage each kind of data using StartLO/StartData.
*
* It should only be called from within a DataDumper routine.
*
/*
* Called by the archiver when starting to save all BLOB DATA (not schema).
* This routine should save whatever format-specific information is needed
- * to read the BLOBs back into memory.
+ * to read the LOs back into memory.
*
* It is called just prior to the dumper's DataDumper routine.
*
* Optional, but strongly recommended.
*/
static void
-_StartBlobs(ArchiveHandle *AH, TocEntry *te)
+_StartLOs(ArchiveHandle *AH, TocEntry *te)
{
lclContext *ctx = (lclContext *) AH->formatData;
lclTocEntry *tctx = (lclTocEntry *) te->formatData;
}
/*
- * Called by the archiver when the dumper calls StartBlob.
+ * Called by the archiver when the dumper calls StartLO.
*
* Mandatory.
*
* Must save the passed OID for retrieval at restore-time.
*/
static void
-_StartBlob(ArchiveHandle *AH, TocEntry *te, Oid oid)
+_StartLO(ArchiveHandle *AH, TocEntry *te, Oid oid)
{
lclContext *ctx = (lclContext *) AH->formatData;
}
/*
- * Called by the archiver when the dumper calls EndBlob.
+ * Called by the archiver when the dumper calls EndLO.
*
* Optional.
*/
static void
-_EndBlob(ArchiveHandle *AH, TocEntry *te, Oid oid)
+_EndLO(ArchiveHandle *AH, TocEntry *te, Oid oid)
{
lclContext *ctx = (lclContext *) AH->formatData;
* Optional.
*/
static void
-_EndBlobs(ArchiveHandle *AH, TocEntry *te)
+_EndLOs(ArchiveHandle *AH, TocEntry *te)
{
- /* Write out a fake zero OID to mark end-of-blobs. */
+ /* Write out a fake zero OID to mark end-of-LOs. */
WriteInt(AH, 0);
}
break;
case BLK_BLOBS:
- _skipBlobs(AH);
+ _skipLOs(AH);
break;
default: /* Always have a default */
break;
case BLK_BLOBS:
- _LoadBlobs(AH, AH->public.ropt->dropSchema);
+ _LoadLOs(AH, AH->public.ropt->dropSchema);
break;
default: /* Always have a default */
}
static void
-_LoadBlobs(ArchiveHandle *AH, bool drop)
+_LoadLOs(ArchiveHandle *AH, bool drop)
{
Oid oid;
- StartRestoreBlobs(AH);
+ StartRestoreLOs(AH);
oid = ReadInt(AH);
while (oid != 0)
{
- StartRestoreBlob(AH, oid, drop);
+ StartRestoreLO(AH, oid, drop);
_PrintData(AH);
- EndRestoreBlob(AH, oid);
+ EndRestoreLO(AH, oid);
oid = ReadInt(AH);
}
- EndRestoreBlobs(AH);
+ EndRestoreLOs(AH);
}
/*
- * Skip the BLOBs from the current file position.
- * BLOBS are written sequentially as data blocks (see below).
- * Each BLOB is preceded by its original OID.
- * A zero OID indicates the end of the BLOBS.
+ * Skip the LOs from the current file position.
+ * LOs are written sequentially as data blocks (see below).
+ * Each LO is preceded by its original OID.
+ * A zero OID indicates the end of the LOs.
*/
static void
-_skipBlobs(ArchiveHandle *AH)
+_skipLOs(ArchiveHandle *AH)
{
Oid oid;
* If an archive is to be written, this routine must call:
* WriteHead to save the archive header
* WriteToc to save the TOC entries
- * WriteDataChunks to save all DATA & BLOBs.
+ * WriteDataChunks to save all data & LOs.
*
*/
static void
}
void
-DropBlobIfExists(ArchiveHandle *AH, Oid oid)
+DropLOIfExists(ArchiveHandle *AH, Oid oid)
{
/*
* If we are not restoring to a direct database connection, we have to
- * guess about how to detect whether the blob exists. Assume new-style.
+ * guess about how to detect whether the LO exists. Assume new-style.
*/
if (AH->connection == NULL ||
PQserverVersion(AH->connection) >= 90000)
*
* A directory format dump is a directory, which contains a "toc.dat" file
* for the TOC, and a separate file for each data entry, named "<oid>.dat".
- * Large objects (BLOBs) are stored in separate files named "blob_<oid>.dat",
+ * Large objects are stored in separate files named "blob_<oid>.dat",
* and there's a plain-text TOC file for them called "blobs.toc". If
* compression is used, each data file is individually compressed and the
* ".gz" suffix is added to the filenames. The TOC files are never
cfp *dataFH; /* currently open data file */
- cfp *blobsTocFH; /* file handle for blobs.toc */
+ cfp *LOsTocFH; /* file handle for blobs.toc */
ParallelState *pstate; /* for parallel backup / restore */
} lclContext;
static void _ReadExtraToc(ArchiveHandle *AH, TocEntry *te);
static void _PrintExtraToc(ArchiveHandle *AH, TocEntry *te);
-static void _StartBlobs(ArchiveHandle *AH, TocEntry *te);
-static void _StartBlob(ArchiveHandle *AH, TocEntry *te, Oid oid);
-static void _EndBlob(ArchiveHandle *AH, TocEntry *te, Oid oid);
-static void _EndBlobs(ArchiveHandle *AH, TocEntry *te);
-static void _LoadBlobs(ArchiveHandle *AH);
+static void _StartLOs(ArchiveHandle *AH, TocEntry *te);
+static void _StartLO(ArchiveHandle *AH, TocEntry *te, Oid oid);
+static void _EndLO(ArchiveHandle *AH, TocEntry *te, Oid oid);
+static void _EndLOs(ArchiveHandle *AH, TocEntry *te);
+static void _LoadLOs(ArchiveHandle *AH);
static void _PrepParallelRestore(ArchiveHandle *AH);
static void _Clone(ArchiveHandle *AH);
AH->WriteExtraTocPtr = _WriteExtraToc;
AH->PrintExtraTocPtr = _PrintExtraToc;
- AH->StartBlobsPtr = _StartBlobs;
- AH->StartBlobPtr = _StartBlob;
- AH->EndBlobPtr = _EndBlob;
- AH->EndBlobsPtr = _EndBlobs;
+ AH->StartLOsPtr = _StartLOs;
+ AH->StartLOPtr = _StartLO;
+ AH->EndLOPtr = _EndLO;
+ AH->EndLOsPtr = _EndLOs;
AH->PrepParallelRestorePtr = _PrepParallelRestore;
AH->ClonePtr = _Clone;
AH->formatData = (void *) ctx;
ctx->dataFH = NULL;
- ctx->blobsTocFH = NULL;
+ ctx->LOsTocFH = NULL;
/* Initialize LO buffering */
AH->lo_buf_size = LOBBUFSIZE;
/*
* Called by archiver when dumper calls WriteData. This routine is
- * called for both BLOB and TABLE data; it is the responsibility of
- * the format to manage each kind of data using StartBlob/StartData.
+ * called for both LO and table data; it is the responsibility of
+ * the format to manage each kind of data using StartLO/StartData.
*
* It should only be called from within a DataDumper routine.
*
}
/*
- * Print data for a given file (can be a BLOB as well)
+ * Print data for a given file (can be a LO as well)
*/
static void
_PrintFileData(ArchiveHandle *AH, char *filename)
return;
if (strcmp(te->desc, "BLOBS") == 0)
- _LoadBlobs(AH);
+ _LoadLOs(AH);
else
{
char fname[MAXPGPATH];
}
static void
-_LoadBlobs(ArchiveHandle *AH)
+_LoadLOs(ArchiveHandle *AH)
{
Oid oid;
lclContext *ctx = (lclContext *) AH->formatData;
char tocfname[MAXPGPATH];
char line[MAXPGPATH];
- StartRestoreBlobs(AH);
+ StartRestoreLOs(AH);
setFilePath(AH, tocfname, "blobs.toc");
- ctx->blobsTocFH = cfopen_read(tocfname, PG_BINARY_R);
+ ctx->LOsTocFH = cfopen_read(tocfname, PG_BINARY_R);
- if (ctx->blobsTocFH == NULL)
+ if (ctx->LOsTocFH == NULL)
pg_fatal("could not open large object TOC file \"%s\" for input: %m",
tocfname);
- /* Read the blobs TOC file line-by-line, and process each blob */
- while ((cfgets(ctx->blobsTocFH, line, MAXPGPATH)) != NULL)
+ /* Read the LOs TOC file line-by-line, and process each LO */
+ while ((cfgets(ctx->LOsTocFH, line, MAXPGPATH)) != NULL)
{
- char blobfname[MAXPGPATH + 1];
+ char lofname[MAXPGPATH + 1];
char path[MAXPGPATH];
- /* Can't overflow because line and blobfname are the same length */
- if (sscanf(line, "%u %" CppAsString2(MAXPGPATH) "s\n", &oid, blobfname) != 2)
+ /* Can't overflow because line and lofname are the same length */
+ if (sscanf(line, "%u %" CppAsString2(MAXPGPATH) "s\n", &oid, lofname) != 2)
pg_fatal("invalid line in large object TOC file \"%s\": \"%s\"",
tocfname, line);
- StartRestoreBlob(AH, oid, AH->public.ropt->dropSchema);
- snprintf(path, MAXPGPATH, "%s/%s", ctx->directory, blobfname);
+ StartRestoreLO(AH, oid, AH->public.ropt->dropSchema);
+ snprintf(path, MAXPGPATH, "%s/%s", ctx->directory, lofname);
_PrintFileData(AH, path);
- EndRestoreBlob(AH, oid);
+ EndRestoreLO(AH, oid);
}
- if (!cfeof(ctx->blobsTocFH))
+ if (!cfeof(ctx->LOsTocFH))
pg_fatal("error reading large object TOC file \"%s\"",
tocfname);
- if (cfclose(ctx->blobsTocFH) != 0)
+ if (cfclose(ctx->LOsTocFH) != 0)
pg_fatal("could not close large object TOC file \"%s\": %m",
tocfname);
- ctx->blobsTocFH = NULL;
+ ctx->LOsTocFH = NULL;
- EndRestoreBlobs(AH);
+ EndRestoreLOs(AH);
}
* If an archive is to be written, this routine must call:
* WriteHead to save the archive header
* WriteToc to save the TOC entries
- * WriteDataChunks to save all DATA & BLOBs.
+ * WriteDataChunks to save all data & LOs.
*/
static void
_CloseArchive(ArchiveHandle *AH)
}
/*
- * BLOB support
+ * LO support
*/
/*
* It is called just prior to the dumper's DataDumper routine.
*
* We open the large object TOC file here, so that we can append a line to
- * it for each blob.
+ * it for each LO.
*/
static void
-_StartBlobs(ArchiveHandle *AH, TocEntry *te)
+_StartLOs(ArchiveHandle *AH, TocEntry *te)
{
lclContext *ctx = (lclContext *) AH->formatData;
pg_compress_specification compression_spec = {0};
setFilePath(AH, fname, "blobs.toc");
- /* The blob TOC file is never compressed */
+ /* The LO TOC file is never compressed */
compression_spec.algorithm = PG_COMPRESSION_NONE;
- ctx->blobsTocFH = cfopen_write(fname, "ab", compression_spec);
- if (ctx->blobsTocFH == NULL)
+ ctx->LOsTocFH = cfopen_write(fname, "ab", compression_spec);
+ if (ctx->LOsTocFH == NULL)
pg_fatal("could not open output file \"%s\": %m", fname);
}
/*
- * Called by the archiver when we're about to start dumping a blob.
+ * Called by the archiver when we're about to start dumping a LO.
*
- * We create a file to write the blob to.
+ * We create a file to write the LO to.
*/
static void
-_StartBlob(ArchiveHandle *AH, TocEntry *te, Oid oid)
+_StartLO(ArchiveHandle *AH, TocEntry *te, Oid oid)
{
lclContext *ctx = (lclContext *) AH->formatData;
char fname[MAXPGPATH];
}
/*
- * Called by the archiver when the dumper is finished writing a blob.
+ * Called by the archiver when the dumper is finished writing a LO.
*
- * We close the blob file and write an entry to the blob TOC file for it.
+ * We close the LO file and write an entry to the LO TOC file for it.
*/
static void
-_EndBlob(ArchiveHandle *AH, TocEntry *te, Oid oid)
+_EndLO(ArchiveHandle *AH, TocEntry *te, Oid oid)
{
lclContext *ctx = (lclContext *) AH->formatData;
char buf[50];
int len;
- /* Close the BLOB data file itself */
+ /* Close the LO data file itself */
if (cfclose(ctx->dataFH) != 0)
- pg_fatal("could not close blob data file: %m");
+ pg_fatal("could not close LO data file: %m");
ctx->dataFH = NULL;
- /* register the blob in blobs.toc */
+ /* register the LO in blobs.toc */
len = snprintf(buf, sizeof(buf), "%u blob_%u.dat\n", oid, oid);
- if (cfwrite(buf, len, ctx->blobsTocFH) != len)
- pg_fatal("could not write to blobs TOC file");
+ if (cfwrite(buf, len, ctx->LOsTocFH) != len)
+ pg_fatal("could not write to LOs TOC file");
}
/*
* Called by the archiver when finishing saving all BLOB DATA.
*
- * We close the blobs TOC file.
+ * We close the LOs TOC file.
*/
static void
-_EndBlobs(ArchiveHandle *AH, TocEntry *te)
+_EndLOs(ArchiveHandle *AH, TocEntry *te)
{
lclContext *ctx = (lclContext *) AH->formatData;
- if (cfclose(ctx->blobsTocFH) != 0)
- pg_fatal("could not close blobs TOC file: %m");
- ctx->blobsTocFH = NULL;
+ if (cfclose(ctx->LOsTocFH) != 0)
+ pg_fatal("could not close LOs TOC file: %m");
+ ctx->LOsTocFH = NULL;
}
/*
* If this is the BLOBS entry, what we stat'd was blobs.toc, which
* most likely is a lot smaller than the actual blob data. We don't
* have a cheap way to estimate how much smaller, but fortunately it
- * doesn't matter too much as long as we get the blobs processed
+ * doesn't matter too much as long as we get the LOs processed
* reasonably early. Arbitrarily scale up by a factor of 1K.
*/
if (strcmp(te->desc, "BLOBS") == 0)
#include "pg_backup_utils.h"
static void _WriteData(ArchiveHandle *AH, const void *data, size_t dLen);
-static void _WriteBlobData(ArchiveHandle *AH, const void *data, size_t dLen);
+static void _WriteLOData(ArchiveHandle *AH, const void *data, size_t dLen);
static void _EndData(ArchiveHandle *AH, TocEntry *te);
static int _WriteByte(ArchiveHandle *AH, const int i);
static void _WriteBuf(ArchiveHandle *AH, const void *buf, size_t len);
static void _CloseArchive(ArchiveHandle *AH);
static void _PrintTocData(ArchiveHandle *AH, TocEntry *te);
-static void _StartBlobs(ArchiveHandle *AH, TocEntry *te);
-static void _StartBlob(ArchiveHandle *AH, TocEntry *te, Oid oid);
-static void _EndBlob(ArchiveHandle *AH, TocEntry *te, Oid oid);
-static void _EndBlobs(ArchiveHandle *AH, TocEntry *te);
+static void _StartLOs(ArchiveHandle *AH, TocEntry *te);
+static void _StartLO(ArchiveHandle *AH, TocEntry *te, Oid oid);
+static void _EndLO(ArchiveHandle *AH, TocEntry *te, Oid oid);
+static void _EndLOs(ArchiveHandle *AH, TocEntry *te);
/*
AH->ReopenPtr = NULL;
AH->PrintTocDataPtr = _PrintTocData;
- AH->StartBlobsPtr = _StartBlobs;
- AH->StartBlobPtr = _StartBlob;
- AH->EndBlobPtr = _EndBlob;
- AH->EndBlobsPtr = _EndBlobs;
+ AH->StartLOsPtr = _StartLOs;
+ AH->StartLOPtr = _StartLO;
+ AH->EndLOPtr = _EndLO;
+ AH->EndLOsPtr = _EndLOs;
AH->ClonePtr = NULL;
AH->DeClonePtr = NULL;
/*
* Called by dumper via archiver from within a data dump routine
- * We substitute this for _WriteData while emitting a BLOB
+ * We substitute this for _WriteData while emitting a LO
*/
static void
-_WriteBlobData(ArchiveHandle *AH, const void *data, size_t dLen)
+_WriteLOData(ArchiveHandle *AH, const void *data, size_t dLen)
{
if (dLen > 0)
{
/*
* Called by the archiver when starting to save all BLOB DATA (not schema).
* This routine should save whatever format-specific information is needed
- * to read the BLOBs back into memory.
+ * to read the LOs back into memory.
*
* It is called just prior to the dumper's DataDumper routine.
*
* Optional, but strongly recommended.
*/
static void
-_StartBlobs(ArchiveHandle *AH, TocEntry *te)
+_StartLOs(ArchiveHandle *AH, TocEntry *te)
{
ahprintf(AH, "BEGIN;\n\n");
}
/*
- * Called by the archiver when the dumper calls StartBlob.
+ * Called by the archiver when the dumper calls StartLO.
*
* Mandatory.
*
* Must save the passed OID for retrieval at restore-time.
*/
static void
-_StartBlob(ArchiveHandle *AH, TocEntry *te, Oid oid)
+_StartLO(ArchiveHandle *AH, TocEntry *te, Oid oid)
{
- bool old_blob_style = (AH->version < K_VERS_1_12);
+ bool old_lo_style = (AH->version < K_VERS_1_12);
if (oid == 0)
pg_fatal("invalid OID for large object");
/* With an old archive we must do drop and create logic here */
- if (old_blob_style && AH->public.ropt->dropSchema)
- DropBlobIfExists(AH, oid);
+ if (old_lo_style && AH->public.ropt->dropSchema)
+ DropLOIfExists(AH, oid);
- if (old_blob_style)
+ if (old_lo_style)
ahprintf(AH, "SELECT pg_catalog.lo_open(pg_catalog.lo_create('%u'), %d);\n",
oid, INV_WRITE);
else
ahprintf(AH, "SELECT pg_catalog.lo_open('%u', %d);\n",
oid, INV_WRITE);
- AH->WriteDataPtr = _WriteBlobData;
+ AH->WriteDataPtr = _WriteLOData;
}
/*
- * Called by the archiver when the dumper calls EndBlob.
+ * Called by the archiver when the dumper calls EndLO.
*
* Optional.
*/
static void
-_EndBlob(ArchiveHandle *AH, TocEntry *te, Oid oid)
+_EndLO(ArchiveHandle *AH, TocEntry *te, Oid oid)
{
AH->WriteDataPtr = _WriteData;
* Optional.
*/
static void
-_EndBlobs(ArchiveHandle *AH, TocEntry *te)
+_EndLOs(ArchiveHandle *AH, TocEntry *te)
{
ahprintf(AH, "COMMIT;\n\n");
}
AH->currToc = te;
if (strcmp(te->desc, "BLOBS") == 0)
- _StartBlobs(AH, te);
+ _StartLOs(AH, te);
te->dataDumper((Archive *) AH, te->dataDumperArg);
if (strcmp(te->desc, "BLOBS") == 0)
- _EndBlobs(AH, te);
+ _EndLOs(AH, te);
AH->currToc = NULL;
}
static void _ReadExtraToc(ArchiveHandle *AH, TocEntry *te);
static void _PrintExtraToc(ArchiveHandle *AH, TocEntry *te);
-static void _StartBlobs(ArchiveHandle *AH, TocEntry *te);
-static void _StartBlob(ArchiveHandle *AH, TocEntry *te, Oid oid);
-static void _EndBlob(ArchiveHandle *AH, TocEntry *te, Oid oid);
-static void _EndBlobs(ArchiveHandle *AH, TocEntry *te);
+static void _StartLOs(ArchiveHandle *AH, TocEntry *te);
+static void _StartLO(ArchiveHandle *AH, TocEntry *te, Oid oid);
+static void _EndLO(ArchiveHandle *AH, TocEntry *te, Oid oid);
+static void _EndLOs(ArchiveHandle *AH, TocEntry *te);
#define K_STD_BUF_SIZE 1024
{
int hasSeek;
pgoff_t filePos;
- TAR_MEMBER *blobToc;
+ TAR_MEMBER *loToc;
FILE *tarFH;
pgoff_t tarFHpos;
pgoff_t tarNextMember;
char *filename;
} lclTocEntry;
-static void _LoadBlobs(ArchiveHandle *AH);
+static void _LoadLOs(ArchiveHandle *AH);
static TAR_MEMBER *tarOpen(ArchiveHandle *AH, const char *filename, char mode);
static void tarClose(ArchiveHandle *AH, TAR_MEMBER *th);
AH->WriteExtraTocPtr = _WriteExtraToc;
AH->PrintExtraTocPtr = _PrintExtraToc;
- AH->StartBlobsPtr = _StartBlobs;
- AH->StartBlobPtr = _StartBlob;
- AH->EndBlobPtr = _EndBlob;
- AH->EndBlobsPtr = _EndBlobs;
+ AH->StartLOsPtr = _StartLOs;
+ AH->StartLOPtr = _StartLO;
+ AH->EndLOPtr = _EndLO;
+ AH->EndLOsPtr = _EndLOs;
AH->ClonePtr = NULL;
AH->DeClonePtr = NULL;
}
if (strcmp(te->desc, "BLOBS") == 0)
- _LoadBlobs(AH);
+ _LoadLOs(AH);
else
_PrintFileData(AH, tctx->filename);
}
static void
-_LoadBlobs(ArchiveHandle *AH)
+_LoadLOs(ArchiveHandle *AH)
{
Oid oid;
lclContext *ctx = (lclContext *) AH->formatData;
TAR_MEMBER *th;
size_t cnt;
- bool foundBlob = false;
+ bool foundLO = false;
char buf[4096];
- StartRestoreBlobs(AH);
+ StartRestoreLOs(AH);
th = tarOpen(AH, NULL, 'r'); /* Open next file */
while (th != NULL)
{
pg_log_info("restoring large object with OID %u", oid);
- StartRestoreBlob(AH, oid, AH->public.ropt->dropSchema);
+ StartRestoreLO(AH, oid, AH->public.ropt->dropSchema);
while ((cnt = tarRead(buf, 4095, th)) > 0)
{
buf[cnt] = '\0';
ahwrite(buf, 1, cnt, AH);
}
- EndRestoreBlob(AH, oid);
- foundBlob = true;
+ EndRestoreLO(AH, oid);
+ foundLO = true;
}
tarClose(AH, th);
}
tarClose(AH, th);
/*
- * Once we have found the first blob, stop at the first non-blob
+ * Once we have found the first LO, stop at the first non-LO
* entry (which will be 'blobs.toc'). This coding would eat all
- * the rest of the archive if there are no blobs ... but this
+ * the rest of the archive if there are no LOs ... but this
* function shouldn't be called at all in that case.
*/
- if (foundBlob)
+ if (foundLO)
break;
}
th = tarOpen(AH, NULL, 'r');
}
- EndRestoreBlobs(AH);
+ EndRestoreLOs(AH);
}
tarClose(AH, th); /* Not needed any more */
/*
- * Now send the data (tables & blobs)
+ * Now send the data (tables & LOs)
*/
WriteDataChunks(AH, NULL);
}
/*
- * BLOB support
+ * Large Object support
*/
/*
* Called by the archiver when starting to save all BLOB DATA (not schema).
* This routine should save whatever format-specific information is needed
- * to read the BLOBs back into memory.
+ * to read the LOs back into memory.
*
* It is called just prior to the dumper's DataDumper routine.
*
*
*/
static void
-_StartBlobs(ArchiveHandle *AH, TocEntry *te)
+_StartLOs(ArchiveHandle *AH, TocEntry *te)
{
lclContext *ctx = (lclContext *) AH->formatData;
char fname[K_STD_BUF_SIZE];
sprintf(fname, "blobs.toc");
- ctx->blobToc = tarOpen(AH, fname, 'w');
+ ctx->loToc = tarOpen(AH, fname, 'w');
}
/*
- * Called by the archiver when the dumper calls StartBlob.
+ * Called by the archiver when the dumper calls StartLO.
*
* Mandatory.
*
* Must save the passed OID for retrieval at restore-time.
*/
static void
-_StartBlob(ArchiveHandle *AH, TocEntry *te, Oid oid)
+_StartLO(ArchiveHandle *AH, TocEntry *te, Oid oid)
{
lclContext *ctx = (lclContext *) AH->formatData;
lclTocEntry *tctx = (lclTocEntry *) te->formatData;
sprintf(fname, "blob_%u.dat", oid);
- tarPrintf(ctx->blobToc, "%u %s\n", oid, fname);
+ tarPrintf(ctx->loToc, "%u %s\n", oid, fname);
tctx->TH = tarOpen(AH, fname, 'w');
}
/*
- * Called by the archiver when the dumper calls EndBlob.
+ * Called by the archiver when the dumper calls EndLO.
*
* Optional.
*
*/
static void
-_EndBlob(ArchiveHandle *AH, TocEntry *te, Oid oid)
+_EndLO(ArchiveHandle *AH, TocEntry *te, Oid oid)
{
lclTocEntry *tctx = (lclTocEntry *) te->formatData;
*
*/
static void
-_EndBlobs(ArchiveHandle *AH, TocEntry *te)
+_EndLOs(ArchiveHandle *AH, TocEntry *te)
{
lclContext *ctx = (lclContext *) AH->formatData;
- /* Write out a fake zero OID to mark end-of-blobs. */
+ /* Write out a fake zero OID to mark end-of-LOs. */
/* WriteInt(AH, 0); */
- tarClose(AH, ctx->blobToc);
+ tarClose(AH, ctx->loToc);
}
static char *getFormattedOperatorName(const char *oproid);
static char *convertTSFunction(Archive *fout, Oid funcOid);
static const char *getFormattedTypeName(Archive *fout, Oid oid, OidOptions opts);
-static void getBlobs(Archive *fout);
-static void dumpBlob(Archive *fout, const BlobInfo *binfo);
-static int dumpBlobs(Archive *fout, const void *arg);
+static void getLOs(Archive *fout);
+static void dumpLO(Archive *fout, const LoInfo *binfo);
+static int dumpLOs(Archive *fout, const void *arg);
static void dumpPolicy(Archive *fout, const PolicyInfo *polinfo);
static void dumpPublication(Archive *fout, const PublicationInfo *pubinfo);
static void dumpPublicationTable(Archive *fout, const PublicationRelInfo *pubrinfo);
static struct option long_options[] = {
{"data-only", no_argument, NULL, 'a'},
{"blobs", no_argument, NULL, 'b'},
+ {"large-objects", no_argument, NULL, 'b'},
{"no-blobs", no_argument, NULL, 'B'},
+ {"no-large-objects", no_argument, NULL, 'B'},
{"clean", no_argument, NULL, 'c'},
{"create", no_argument, NULL, 'C'},
{"dbname", required_argument, NULL, 'd'},
dopt.dataOnly = true;
break;
- case 'b': /* Dump blobs */
- dopt.outputBlobs = true;
+ case 'b': /* Dump LOs */
+ dopt.outputLOs = true;
break;
- case 'B': /* Don't dump blobs */
- dopt.dontOutputBlobs = true;
+ case 'B': /* Don't dump LOs */
+ dopt.dontOutputLOs = true;
break;
case 'c': /* clean (i.e., drop) schema prior to create */
}
/*
- * Dumping blobs is the default for dumps where an inclusion switch is not
- * used (an "include everything" dump). -B can be used to exclude blobs
- * from those dumps. -b can be used to include blobs even when an
+ * Dumping LOs is the default for dumps where an inclusion switch is not
+ * used (an "include everything" dump). -B can be used to exclude LOs
+ * from those dumps. -b can be used to include LOs even when an
* inclusion switch is used.
*
- * -s means "schema only" and blobs are data, not schema, so we never
- * include blobs when -s is used.
+ * -s means "schema only" and LOs are data, not schema, so we never
+ * include LOs when -s is used.
*/
- if (dopt.include_everything && !dopt.schemaOnly && !dopt.dontOutputBlobs)
- dopt.outputBlobs = true;
+ if (dopt.include_everything && !dopt.schemaOnly && !dopt.dontOutputLOs)
+ dopt.outputLOs = true;
/*
* Collect role names so we can map object owner OIDs to names.
getTableData(&dopt, tblinfo, numTables, RELKIND_SEQUENCE);
/*
- * In binary-upgrade mode, we do not have to worry about the actual blob
+ * In binary-upgrade mode, we do not have to worry about the actual LO
* data or the associated metadata that resides in the pg_largeobject and
* pg_largeobject_metadata tables, respectively.
*
- * However, we do need to collect blob information as there may be
- * comments or other information on blobs that we do need to dump out.
+ * However, we do need to collect LO information as there may be
+ * comments or other information on LOs that we do need to dump out.
*/
- if (dopt.outputBlobs || dopt.binary_upgrade)
- getBlobs(fout);
+ if (dopt.outputLOs || dopt.binary_upgrade)
+ getLOs(fout);
/*
* Collect dependency data to assist in ordering the objects.
printf(_("\nOptions controlling the output content:\n"));
printf(_(" -a, --data-only dump only the data, not the schema\n"));
- printf(_(" -b, --blobs include large objects in dump\n"));
- printf(_(" -B, --no-blobs exclude large objects in dump\n"));
+ printf(_(" -b, --large-objects, --blobs\n"
+ " include large objects in dump\n"));
+ printf(_(" -B, --no-large-objects, --no-blobs\n"
+ " exclude large objects in dump\n"));
printf(_(" -c, --clean clean (drop) database objects before recreating\n"));
printf(_(" -C, --create include commands to create database in dump\n"));
printf(_(" -e, --extension=PATTERN dump the specified extension(s) only\n"));
/*
- * getBlobs:
+ * getLOs:
* Collect schema-level data about large objects
*/
static void
-getBlobs(Archive *fout)
+getLOs(Archive *fout)
{
DumpOptions *dopt = fout->dopt;
- PQExpBuffer blobQry = createPQExpBuffer();
- BlobInfo *binfo;
- DumpableObject *bdata;
+ PQExpBuffer loQry = createPQExpBuffer();
+ LoInfo *loinfo;
+ DumpableObject *lodata;
PGresult *res;
int ntups;
int i;
pg_log_info("reading large objects");
- /* Fetch BLOB OIDs, and owner/ACL data */
- appendPQExpBufferStr(blobQry,
+ /* Fetch LO OIDs, and owner/ACL data */
+ appendPQExpBufferStr(loQry,
"SELECT oid, lomowner, lomacl, "
"acldefault('L', lomowner) AS acldefault "
"FROM pg_largeobject_metadata");
- res = ExecuteSqlQuery(fout, blobQry->data, PGRES_TUPLES_OK);
+ res = ExecuteSqlQuery(fout, loQry->data, PGRES_TUPLES_OK);
i_oid = PQfnumber(res, "oid");
i_lomowner = PQfnumber(res, "lomowner");
ntups = PQntuples(res);
/*
- * Each large object has its own BLOB archive entry.
+ * Each large object has its own "BLOB" archive entry.
*/
- binfo = (BlobInfo *) pg_malloc(ntups * sizeof(BlobInfo));
+ loinfo = (LoInfo *) pg_malloc(ntups * sizeof(LoInfo));
for (i = 0; i < ntups; i++)
{
- binfo[i].dobj.objType = DO_BLOB;
- binfo[i].dobj.catId.tableoid = LargeObjectRelationId;
- binfo[i].dobj.catId.oid = atooid(PQgetvalue(res, i, i_oid));
- AssignDumpId(&binfo[i].dobj);
+ loinfo[i].dobj.objType = DO_LARGE_OBJECT;
+ loinfo[i].dobj.catId.tableoid = LargeObjectRelationId;
+ loinfo[i].dobj.catId.oid = atooid(PQgetvalue(res, i, i_oid));
+ AssignDumpId(&loinfo[i].dobj);
- binfo[i].dobj.name = pg_strdup(PQgetvalue(res, i, i_oid));
- binfo[i].dacl.acl = pg_strdup(PQgetvalue(res, i, i_lomacl));
- binfo[i].dacl.acldefault = pg_strdup(PQgetvalue(res, i, i_acldefault));
- binfo[i].dacl.privtype = 0;
- binfo[i].dacl.initprivs = NULL;
- binfo[i].rolname = getRoleName(PQgetvalue(res, i, i_lomowner));
+ loinfo[i].dobj.name = pg_strdup(PQgetvalue(res, i, i_oid));
+ loinfo[i].dacl.acl = pg_strdup(PQgetvalue(res, i, i_lomacl));
+ loinfo[i].dacl.acldefault = pg_strdup(PQgetvalue(res, i, i_acldefault));
+ loinfo[i].dacl.privtype = 0;
+ loinfo[i].dacl.initprivs = NULL;
+ loinfo[i].rolname = getRoleName(PQgetvalue(res, i, i_lomowner));
- /* Blobs have data */
- binfo[i].dobj.components |= DUMP_COMPONENT_DATA;
+ /* LOs have data */
+ loinfo[i].dobj.components |= DUMP_COMPONENT_DATA;
- /* Mark whether blob has an ACL */
+ /* Mark whether LO has an ACL */
if (!PQgetisnull(res, i, i_lomacl))
- binfo[i].dobj.components |= DUMP_COMPONENT_ACL;
+ loinfo[i].dobj.components |= DUMP_COMPONENT_ACL;
/*
- * In binary-upgrade mode for blobs, we do *not* dump out the blob
+ * In binary-upgrade mode for LOs, we do *not* dump out the LO
* data, as it will be copied by pg_upgrade, which simply copies the
* pg_largeobject table. We *do* however dump out anything but the
* data, as pg_upgrade copies just pg_largeobject, but not
* pg_largeobject_metadata, after the dump is restored.
*/
if (dopt->binary_upgrade)
- binfo[i].dobj.dump &= ~DUMP_COMPONENT_DATA;
+ loinfo[i].dobj.dump &= ~DUMP_COMPONENT_DATA;
}
/*
*/
if (ntups > 0)
{
- bdata = (DumpableObject *) pg_malloc(sizeof(DumpableObject));
- bdata->objType = DO_BLOB_DATA;
- bdata->catId = nilCatalogId;
- AssignDumpId(bdata);
- bdata->name = pg_strdup("BLOBS");
- bdata->components |= DUMP_COMPONENT_DATA;
+ lodata = (DumpableObject *) pg_malloc(sizeof(DumpableObject));
+ lodata->objType = DO_LARGE_OBJECT_DATA;
+ lodata->catId = nilCatalogId;
+ AssignDumpId(lodata);
+ lodata->name = pg_strdup("BLOBS");
+ lodata->components |= DUMP_COMPONENT_DATA;
}
PQclear(res);
- destroyPQExpBuffer(blobQry);
+ destroyPQExpBuffer(loQry);
}
/*
- * dumpBlob
+ * dumpLO
*
* dump the definition (metadata) of the given large object
*/
static void
-dumpBlob(Archive *fout, const BlobInfo *binfo)
+dumpLO(Archive *fout, const LoInfo *loinfo)
{
PQExpBuffer cquery = createPQExpBuffer();
PQExpBuffer dquery = createPQExpBuffer();
appendPQExpBuffer(cquery,
"SELECT pg_catalog.lo_create('%s');\n",
- binfo->dobj.name);
+ loinfo->dobj.name);
appendPQExpBuffer(dquery,
"SELECT pg_catalog.lo_unlink('%s');\n",
- binfo->dobj.name);
+ loinfo->dobj.name);
- if (binfo->dobj.dump & DUMP_COMPONENT_DEFINITION)
- ArchiveEntry(fout, binfo->dobj.catId, binfo->dobj.dumpId,
- ARCHIVE_OPTS(.tag = binfo->dobj.name,
- .owner = binfo->rolname,
+ if (loinfo->dobj.dump & DUMP_COMPONENT_DEFINITION)
+ ArchiveEntry(fout, loinfo->dobj.catId, loinfo->dobj.dumpId,
+ ARCHIVE_OPTS(.tag = loinfo->dobj.name,
+ .owner = loinfo->rolname,
.description = "BLOB",
.section = SECTION_PRE_DATA,
.createStmt = cquery->data,
.dropStmt = dquery->data));
/* Dump comment if any */
- if (binfo->dobj.dump & DUMP_COMPONENT_COMMENT)
- dumpComment(fout, "LARGE OBJECT", binfo->dobj.name,
- NULL, binfo->rolname,
- binfo->dobj.catId, 0, binfo->dobj.dumpId);
+ if (loinfo->dobj.dump & DUMP_COMPONENT_COMMENT)
+ dumpComment(fout, "LARGE OBJECT", loinfo->dobj.name,
+ NULL, loinfo->rolname,
+ loinfo->dobj.catId, 0, loinfo->dobj.dumpId);
/* Dump security label if any */
- if (binfo->dobj.dump & DUMP_COMPONENT_SECLABEL)
- dumpSecLabel(fout, "LARGE OBJECT", binfo->dobj.name,
- NULL, binfo->rolname,
- binfo->dobj.catId, 0, binfo->dobj.dumpId);
+ if (loinfo->dobj.dump & DUMP_COMPONENT_SECLABEL)
+ dumpSecLabel(fout, "LARGE OBJECT", loinfo->dobj.name,
+ NULL, loinfo->rolname,
+ loinfo->dobj.catId, 0, loinfo->dobj.dumpId);
/* Dump ACL if any */
- if (binfo->dobj.dump & DUMP_COMPONENT_ACL)
- dumpACL(fout, binfo->dobj.dumpId, InvalidDumpId, "LARGE OBJECT",
- binfo->dobj.name, NULL,
- NULL, binfo->rolname, &binfo->dacl);
+ if (loinfo->dobj.dump & DUMP_COMPONENT_ACL)
+ dumpACL(fout, loinfo->dobj.dumpId, InvalidDumpId, "LARGE OBJECT",
+ loinfo->dobj.name, NULL,
+ NULL, loinfo->rolname, &loinfo->dacl);
destroyPQExpBuffer(cquery);
destroyPQExpBuffer(dquery);
}
/*
- * dumpBlobs:
+ * dumpLOs:
* dump the data contents of all large objects
*/
static int
-dumpBlobs(Archive *fout, const void *arg)
+dumpLOs(Archive *fout, const void *arg)
{
- const char *blobQry;
- const char *blobFetchQry;
+ const char *loQry;
+ const char *loFetchQry;
PGconn *conn = GetConnection(fout);
PGresult *res;
char buf[LOBBUFSIZE];
pg_log_info("saving large objects");
/*
- * Currently, we re-fetch all BLOB OIDs using a cursor. Consider scanning
+ * Currently, we re-fetch all LO OIDs using a cursor. Consider scanning
* the already-in-memory dumpable objects instead...
*/
- blobQry =
- "DECLARE bloboid CURSOR FOR "
+ loQry =
+ "DECLARE looid CURSOR FOR "
"SELECT oid FROM pg_largeobject_metadata ORDER BY 1";
- ExecuteSqlStatement(fout, blobQry);
+ ExecuteSqlStatement(fout, loQry);
/* Command to fetch from cursor */
- blobFetchQry = "FETCH 1000 IN bloboid";
+ loFetchQry = "FETCH 1000 IN looid";
do
{
/* Do a fetch */
- res = ExecuteSqlQuery(fout, blobFetchQry, PGRES_TUPLES_OK);
+ res = ExecuteSqlQuery(fout, loFetchQry, PGRES_TUPLES_OK);
/* Process the tuples, if any */
ntups = PQntuples(res);
for (i = 0; i < ntups; i++)
{
- Oid blobOid;
+ Oid loOid;
int loFd;
- blobOid = atooid(PQgetvalue(res, i, 0));
- /* Open the BLOB */
- loFd = lo_open(conn, blobOid, INV_READ);
+ loOid = atooid(PQgetvalue(res, i, 0));
+ /* Open the LO */
+ loFd = lo_open(conn, loOid, INV_READ);
if (loFd == -1)
pg_fatal("could not open large object %u: %s",
- blobOid, PQerrorMessage(conn));
+ loOid, PQerrorMessage(conn));
- StartBlob(fout, blobOid);
+ StartLO(fout, loOid);
/* Now read it in chunks, sending data to archive */
do
cnt = lo_read(conn, loFd, buf, LOBBUFSIZE);
if (cnt < 0)
pg_fatal("error reading large object %u: %s",
- blobOid, PQerrorMessage(conn));
+ loOid, PQerrorMessage(conn));
WriteData(fout, buf, cnt);
} while (cnt > 0);
lo_close(conn, loFd);
- EndBlob(fout, blobOid);
+ EndLO(fout, loOid);
}
PQclear(res);
if (dopt->no_comments)
return;
- /* Comments are schema not data ... except blob comments are data */
+ /* Comments are schema not data ... except LO comments are data */
if (strcmp(type, "LARGE OBJECT") != 0)
{
if (dopt->dataOnly)
}
else
{
- /* We do dump blob comments in binary-upgrade mode */
+ /* We do dump LO comments in binary-upgrade mode */
if (dopt->schemaOnly && !dopt->binary_upgrade)
return;
}
case DO_DEFAULT_ACL:
dumpDefaultACL(fout, (const DefaultACLInfo *) dobj);
break;
- case DO_BLOB:
- dumpBlob(fout, (const BlobInfo *) dobj);
+ case DO_LARGE_OBJECT:
+ dumpLO(fout, (const LoInfo *) dobj);
break;
- case DO_BLOB_DATA:
+ case DO_LARGE_OBJECT_DATA:
if (dobj->dump & DUMP_COMPONENT_DATA)
{
TocEntry *te;
ARCHIVE_OPTS(.tag = dobj->name,
.description = "BLOBS",
.section = SECTION_DATA,
- .dumpFn = dumpBlobs));
+ .dumpFn = dumpLOs));
/*
* Set the TocEntry's dataLength in case we are doing a
* parallel dump and want to order dump jobs by table size.
* (We need some size estimate for every TocEntry with a
* DataDumper function.) We don't currently have any cheap
- * way to estimate the size of blobs, but it doesn't matter;
+ * way to estimate the size of LOs, but it doesn't matter;
* let's just set the size to a large value so parallel dumps
- * will launch this job first. If there's lots of blobs, we
+ * will launch this job first. If there's lots of LOs, we
* win, and if there aren't, we don't lose much. (If you want
* to improve on this, really what you should be thinking
- * about is allowing blob dumping to be parallelized, not just
+ * about is allowing LO dumping to be parallelized, not just
* getting a smarter estimate for the single TOC entry.)
*/
te->dataLength = INT_MAX;
if (dopt->aclsSkip)
return InvalidDumpId;
- /* --data-only skips ACLs *except* BLOB ACLs */
+ /* --data-only skips ACLs *except* large object ACLs */
if (dopt->dataOnly && strcmp(type, "LARGE OBJECT") != 0)
return InvalidDumpId;
if (dopt->no_security_labels)
return;
- /* Security labels are schema not data ... except blob labels are data */
+ /* Security labels are schema not data ... except large object labels are data */
if (strcmp(type, "LARGE OBJECT") != 0)
{
if (dopt->dataOnly)
}
else
{
- /* We do dump blob security labels in binary-upgrade mode */
+ /* We do dump large object security labels in binary-upgrade mode */
if (dopt->schemaOnly && !dopt->binary_upgrade)
return;
}
case DO_FDW:
case DO_FOREIGN_SERVER:
case DO_TRANSFORM:
- case DO_BLOB:
+ case DO_LARGE_OBJECT:
/* Pre-data objects: must come before the pre-data boundary */
addObjectDependency(preDataBound, dobj->dumpId);
break;
case DO_TABLE_DATA:
case DO_SEQUENCE_SET:
- case DO_BLOB_DATA:
+ case DO_LARGE_OBJECT_DATA:
/* Data objects: must come between the boundaries */
addObjectDependency(dobj, preDataBound->dumpId);
addObjectDependency(postDataBound, dobj->dumpId);
DO_FOREIGN_SERVER,
DO_DEFAULT_ACL,
DO_TRANSFORM,
- DO_BLOB,
- DO_BLOB_DATA,
+ DO_LARGE_OBJECT,
+ DO_LARGE_OBJECT_DATA,
DO_PRE_DATA_BOUNDARY,
DO_POST_DATA_BOUNDARY,
DO_EVENT_TRIGGER,
char defaclobjtype;
} DefaultACLInfo;
-typedef struct _blobInfo
+typedef struct _loInfo
{
DumpableObject dobj;
DumpableAcl dacl;
const char *rolname;
-} BlobInfo;
+} LoInfo;
/*
* The PolicyInfo struct is used to represent policies on a table and
PRIO_TABLE_ATTACH,
PRIO_DUMMY_TYPE,
PRIO_ATTRDEF,
- PRIO_BLOB,
+ PRIO_LARGE_OBJECT,
PRIO_PRE_DATA_BOUNDARY, /* boundary! */
PRIO_TABLE_DATA,
PRIO_SEQUENCE_SET,
- PRIO_BLOB_DATA,
+ PRIO_LARGE_OBJECT_DATA,
PRIO_POST_DATA_BOUNDARY, /* boundary! */
PRIO_CONSTRAINT,
PRIO_INDEX,
PRIO_FOREIGN_SERVER, /* DO_FOREIGN_SERVER */
PRIO_DEFAULT_ACL, /* DO_DEFAULT_ACL */
PRIO_TRANSFORM, /* DO_TRANSFORM */
- PRIO_BLOB, /* DO_BLOB */
- PRIO_BLOB_DATA, /* DO_BLOB_DATA */
+ PRIO_LARGE_OBJECT, /* DO_LARGE_OBJECT */
+ PRIO_LARGE_OBJECT_DATA, /* DO_LARGE_OJECT_DATA */
PRIO_PRE_DATA_BOUNDARY, /* DO_PRE_DATA_BOUNDARY */
PRIO_POST_DATA_BOUNDARY, /* DO_POST_DATA_BOUNDARY */
PRIO_EVENT_TRIGGER, /* DO_EVENT_TRIGGER */
"DEFAULT ACL %s (ID %d OID %u)",
obj->name, obj->dumpId, obj->catId.oid);
return;
- case DO_BLOB:
+ case DO_LARGE_OBJECT:
snprintf(buf, bufsize,
- "BLOB (ID %d OID %u)",
+ "LARGE OBJECT (ID %d OID %u)",
obj->dumpId, obj->catId.oid);
return;
- case DO_BLOB_DATA:
+ case DO_LARGE_OBJECT_DATA:
snprintf(buf, bufsize,
- "BLOB DATA (ID %d)",
+ "LARGE OBJECT DATA (ID %d)",
obj->dumpId);
return;
case DO_POLICY:
'--no-toast-compression', 'postgres',
],
},
- no_blobs => {
+ no_large_objects => {
dump_cmd => [
'pg_dump', '--no-sync',
- "--file=$tempdir/no_blobs.sql", '-B',
+ "--file=$tempdir/no_large_objects.sql", '-B',
'postgres',
],
},
'--section=post-data', '--no-sync', 'postgres',
],
},
- test_schema_plus_blobs => {
+ test_schema_plus_large_objects => {
dump_cmd => [
- 'pg_dump', "--file=$tempdir/test_schema_plus_blobs.sql",
+ 'pg_dump', "--file=$tempdir/test_schema_plus_large_objects.sql",
'--schema=dump_test', '-b', '-B', '--no-sync', 'postgres',
],
# Tests which target the 'dump_test' schema, specifically.
my %dump_test_schema_runs = (
only_dump_test_schema => 1,
- test_schema_plus_blobs => 1,);
+ test_schema_plus_large_objects => 1,);
# Tests which are considered 'full' dumps by pg_dump, but there
-# are flags used to exclude specific items (ACLs, blobs, etc).
+# are flags used to exclude specific items (ACLs, LOs, etc).
my %full_runs = (
binary_upgrade => 1,
clean => 1,
exclude_test_table => 1,
exclude_test_table_data => 1,
no_toast_compression => 1,
- no_blobs => 1,
+ no_large_objects => 1,
no_owner => 1,
no_privs => 1,
no_table_access_method => 1,
data_only => 1,
inserts => 1,
section_pre_data => 1,
- test_schema_plus_blobs => 1,
+ test_schema_plus_large_objects => 1,
},
unlike => {
- no_blobs => 1,
+ no_large_objects => 1,
no_owner => 1,
schema_only => 1,
},
},
},
- 'BLOB create (using lo_from_bytea)' => {
+ 'LO create (using lo_from_bytea)' => {
create_order => 50,
create_sql =>
'SELECT pg_catalog.lo_from_bytea(0, \'\\x310a320a330a340a350a360a370a380a390a\');',
data_only => 1,
inserts => 1,
section_pre_data => 1,
- test_schema_plus_blobs => 1,
+ test_schema_plus_large_objects => 1,
},
unlike => {
schema_only => 1,
- no_blobs => 1,
+ no_large_objects => 1,
},
},
- 'BLOB load (using lo_from_bytea)' => {
+ 'LO load (using lo_from_bytea)' => {
regexp => qr/^
\QSELECT pg_catalog.lo_open\E \('\d+',\ \d+\);\n
\QSELECT pg_catalog.lowrite(0, \E
data_only => 1,
inserts => 1,
section_data => 1,
- test_schema_plus_blobs => 1,
+ test_schema_plus_large_objects => 1,
},
unlike => {
binary_upgrade => 1,
- no_blobs => 1,
+ no_large_objects => 1,
schema_only => 1,
},
},
data_only => 1,
inserts => 1,
section_pre_data => 1,
- test_schema_plus_blobs => 1,
+ test_schema_plus_large_objects => 1,
},
unlike => {
- no_blobs => 1,
+ no_large_objects => 1,
schema_only => 1,
},
},
exclude_test_table => 1,
exclude_test_table_data => 1,
no_toast_compression => 1,
- no_blobs => 1,
+ no_large_objects => 1,
no_privs => 1,
no_owner => 1,
no_table_access_method => 1,
pg_dumpall_exclude => 1,
schema_only => 1,
section_post_data => 1,
- test_schema_plus_blobs => 1,
+ test_schema_plus_large_objects => 1,
},
unlike => {
exclude_dump_test_schema => 1,
exclude_test_table => 1,
exclude_test_table_data => 1,
no_toast_compression => 1,
- no_blobs => 1,
+ no_large_objects => 1,
no_privs => 1,
no_owner => 1,
no_table_access_method => 1,
pg_dumpall_globals => 1,
pg_dumpall_globals_clean => 1,
section_pre_data => 1,
- test_schema_plus_blobs => 1,
+ test_schema_plus_large_objects => 1,
},
},
data_only => 1,
inserts => 1,
section_pre_data => 1,
- test_schema_plus_blobs => 1,
+ test_schema_plus_large_objects => 1,
binary_upgrade => 1,
},
unlike => {
- no_blobs => 1,
+ no_large_objects => 1,
no_privs => 1,
schema_only => 1,
},
# as the regexps are used for each run the test applies to.
# Tests which are considered 'full' dumps by pg_dump, but there
-# are flags used to exclude specific items (ACLs, blobs, etc).
+# are flags used to exclude specific items (ACLs, LOs, etc).
my %full_runs = (
binary_upgrade => 1,
clean => 1,