From 81aa4ae59778f1193d6e1a8c81931502c941e997 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fr=C3=A9d=C3=A9ric=20Yhuel?= Date: Mon, 18 Sep 2023 08:18:19 +0200 Subject: [PATCH] pg_dump: fix OOM handling Exit with better error messages when there's not enough memory to process large objects. --- src/bin/pg_dump/pg_backup_archiver.c | 10 +++++++--- src/bin/pg_dump/pg_backup_archiver.h | 6 ++++-- src/bin/pg_dump/pg_dump.c | 13 +++++++++++-- 3 files changed, 22 insertions(+), 7 deletions(-) diff --git a/src/bin/pg_dump/pg_backup_archiver.c b/src/bin/pg_dump/pg_backup_archiver.c index 39ebcfec32..8370e26075 100644 --- a/src/bin/pg_dump/pg_backup_archiver.c +++ b/src/bin/pg_dump/pg_backup_archiver.c @@ -1112,13 +1112,17 @@ WriteData(Archive *AHX, const void *data, size_t dLen) /* Public */ TocEntry * -ArchiveEntry(Archive *AHX, CatalogId catalogId, DumpId dumpId, - ArchiveOpts *opts) +ArchiveEntry2(Archive *AHX, CatalogId catalogId, DumpId dumpId, + ArchiveOpts *opts, const char* caller) { ArchiveHandle *AH = (ArchiveHandle *) AHX; TocEntry *newToc; - newToc = (TocEntry *) pg_malloc0(sizeof(TocEntry)); + newToc = (TocEntry *) pg_malloc_extended(sizeof(TocEntry), MCXT_ALLOC_NO_OOM|MCXT_ALLOC_ZERO); + if (newToc == NULL) + { + pg_fatal("%s: could not add a new archive entry: %s", caller, strerror(errno)); + } AH->tocCount++; if (dumpId > AH->maxDumpId) diff --git a/src/bin/pg_dump/pg_backup_archiver.h b/src/bin/pg_dump/pg_backup_archiver.h index 18b38c17ab..60872744a6 100644 --- a/src/bin/pg_dump/pg_backup_archiver.h +++ b/src/bin/pg_dump/pg_backup_archiver.h @@ -400,8 +400,10 @@ typedef struct _archiveOpts } ArchiveOpts; #define ARCHIVE_OPTS(...) &(ArchiveOpts){__VA_ARGS__} /* Called to add a TOC entry */ -extern TocEntry *ArchiveEntry(Archive *AHX, CatalogId catalogId, - DumpId dumpId, ArchiveOpts *opts); +extern TocEntry *ArchiveEntry2(Archive *AHX, CatalogId catalogId, + DumpId dumpId, ArchiveOpts *opts, const char* caller); + +#define ArchiveEntry(a, b, c, d) ArchiveEntry2(a, b, c, d, __func__) extern void WriteHead(ArchiveHandle *AH); extern void ReadHead(ArchiveHandle *AH); diff --git a/src/bin/pg_dump/pg_dump.c b/src/bin/pg_dump/pg_dump.c index 7977d6a9c0..8413d4f115 100644 --- a/src/bin/pg_dump/pg_dump.c +++ b/src/bin/pg_dump/pg_dump.c @@ -3566,7 +3566,11 @@ getLOs(Archive *fout) /* * Each large object has its own "BLOB" archive entry. */ - loinfo = (LoInfo *) pg_malloc(ntups * sizeof(LoInfo)); + loinfo = (LoInfo *) pg_malloc_extended(ntups * sizeof(LoInfo), MCXT_ALLOC_NO_OOM); + if (loinfo == NULL) + { + pg_fatal("getLOs: out of memory"); + } for (i = 0; i < ntups; i++) { @@ -3606,7 +3610,12 @@ getLOs(Archive *fout) */ if (ntups > 0) { - lodata = (DumpableObject *) pg_malloc(sizeof(DumpableObject)); + lodata = (DumpableObject *) pg_malloc_extended(sizeof(DumpableObject), MCXT_ALLOC_NO_OOM); + if (lodata == NULL) + { + pg_fatal("getLOs: out of memory"); + } + lodata->objType = DO_LARGE_OBJECT_DATA; lodata->catId = nilCatalogId; AssignDumpId(lodata); -- 2.39.2