summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--contrib/intarray/_int_selfuncs.c2
-rw-r--r--contrib/pageinspect/heapfuncs.c2
-rw-r--r--contrib/pg_buffercache/pg_buffercache_pages.c14
-rw-r--r--contrib/pg_visibility/Makefile1
-rw-r--r--contrib/pg_visibility/expected/pg_visibility.out44
-rw-r--r--contrib/pg_visibility/sql/pg_visibility.sql20
-rw-r--r--contrib/uuid-ossp/uuid-ossp.c4
-rw-r--r--doc/src/sgml/func/func-admin.sgml79
-rw-r--r--doc/src/sgml/ref/psql-ref.sgml10
-rw-r--r--src/backend/access/common/toast_compression.c8
-rw-r--r--src/backend/access/heap/vacuumlazy.c314
-rw-r--r--src/backend/access/heap/visibilitymap.c9
-rw-r--r--src/backend/access/nbtree/nbtinsert.c4
-rw-r--r--src/backend/access/spgist/spgquadtreeproc.c8
-rw-r--r--src/backend/access/transam/xloginsert.c4
-rw-r--r--src/backend/backup/backup_manifest.c2
-rw-r--r--src/backend/backup/basebackup.c2
-rw-r--r--src/backend/backup/basebackup_incremental.c6
-rw-r--r--src/backend/executor/nodeIndexscan.c4
-rw-r--r--src/backend/executor/nodeSeqscan.c4
-rw-r--r--src/backend/libpq/auth-scram.c4
-rw-r--r--src/backend/libpq/crypt.c4
-rw-r--r--src/backend/nodes/nodeFuncs.c6
-rw-r--r--src/backend/nodes/outfuncs.c12
-rw-r--r--src/backend/nodes/tidbitmap.c4
-rw-r--r--src/backend/optimizer/util/clauses.c11
-rw-r--r--src/backend/port/sysv_shmem.c9
-rw-r--r--src/backend/statistics/extended_stats.c4
-rw-r--r--src/backend/statistics/extended_stats_funcs.c360
-rw-r--r--src/backend/storage/ipc/shm_mq.c2
-rw-r--r--src/backend/storage/ipc/shmem.c32
-rw-r--r--src/backend/tsearch/spell.c4
-rw-r--r--src/backend/utils/adt/geo_spgist.c4
-rw-r--r--src/backend/utils/adt/json.c2
-rw-r--r--src/backend/utils/adt/pg_locale_builtin.c2
-rw-r--r--src/backend/utils/adt/rangetypes.c8
-rw-r--r--src/backend/utils/adt/rangetypes_gist.c4
-rw-r--r--src/backend/utils/sort/tuplesortvariants.c27
-rw-r--r--src/bin/pg_basebackup/walmethods.c2
-rw-r--r--src/bin/pg_combinebackup/meson.build2
-rw-r--r--src/bin/pg_combinebackup/t/011_ib_truncation.pl (renamed from src/bin/pg_combinebackup/t/011_incremental_backup_truncation_block.pl)0
-rw-r--r--src/bin/pg_rewind/filemap.c4
-rw-r--r--src/bin/pg_verifybackup/astreamer_verify.c2
-rw-r--r--src/bin/pg_walsummary/pg_walsummary.c4
-rw-r--r--src/common/scram-common.c4
-rw-r--r--src/common/unicode/case_test.c2
-rw-r--r--src/common/unicode_case.c4
-rw-r--r--src/include/access/tupmacs.h2
-rw-r--r--src/include/access/visibilitymap.h18
-rw-r--r--src/include/c.h20
-rw-r--r--src/include/catalog/catversion.h2
-rw-r--r--src/include/catalog/pg_proc.dat5
-rw-r--r--src/include/common/hashfn_unstable.h2
-rw-r--r--src/include/portability/mem.h2
-rw-r--r--src/include/utils/memutils.h12
-rw-r--r--src/include/varatt.h40
-rw-r--r--src/interfaces/libpq/fe-auth-scram.c2
-rw-r--r--src/interfaces/libpq/fe-auth.c4
-rw-r--r--src/port/pg_crc32c_armv8.c10
-rw-r--r--src/port/pg_popcount_aarch64.c4
-rw-r--r--src/test/isolation/isolationtester.c8
-rw-r--r--src/test/modules/libpq_pipeline/libpq_pipeline.c2
-rw-r--r--src/test/modules/test_tidstore/test_tidstore.c8
-rw-r--r--src/test/regress/expected/jsonb.out3
-rw-r--r--src/test/regress/expected/stats_import.out263
-rw-r--r--src/test/regress/expected/strings.out54
-rw-r--r--src/test/regress/expected/tsearch.out1
-rw-r--r--src/test/regress/sql/jsonb.sql3
-rw-r--r--src/test/regress/sql/stats_import.sql190
-rw-r--r--src/test/regress/sql/strings.sql24
-rw-r--r--src/test/regress/sql/tsearch.sql1
-rw-r--r--src/tools/pgindent/typedefs.list1
72 files changed, 1395 insertions, 350 deletions
diff --git a/contrib/intarray/_int_selfuncs.c b/contrib/intarray/_int_selfuncs.c
index c3e19cdf27f..4a7053028c6 100644
--- a/contrib/intarray/_int_selfuncs.c
+++ b/contrib/intarray/_int_selfuncs.c
@@ -328,7 +328,7 @@ int_query_opr_selec(ITEM *item, Datum *mcelems, float4 *mcefreqs,
static int
compare_val_int4(const void *a, const void *b)
{
- int32 key = *(int32 *) a;
+ int32 key = *(const int32 *) a;
int32 value = DatumGetInt32(*(const Datum *) b);
if (key < value)
diff --git a/contrib/pageinspect/heapfuncs.c b/contrib/pageinspect/heapfuncs.c
index 1cf0b44e731..8277fa256c3 100644
--- a/contrib/pageinspect/heapfuncs.c
+++ b/contrib/pageinspect/heapfuncs.c
@@ -46,7 +46,7 @@ static inline Oid
HeapTupleHeaderGetOidOld(const HeapTupleHeaderData *tup)
{
if (tup->t_infomask & HEAP_HASOID_OLD)
- return *((Oid *) ((char *) (tup) + (tup)->t_hoff - sizeof(Oid)));
+ return *((const Oid *) ((const char *) (tup) + (tup)->t_hoff - sizeof(Oid)));
else
return InvalidOid;
}
diff --git a/contrib/pg_buffercache/pg_buffercache_pages.c b/contrib/pg_buffercache/pg_buffercache_pages.c
index dcba3fb5473..89b86855243 100644
--- a/contrib/pg_buffercache/pg_buffercache_pages.c
+++ b/contrib/pg_buffercache/pg_buffercache_pages.c
@@ -551,8 +551,18 @@ pg_buffercache_os_pages_internal(FunctionCallInfo fcinfo, bool include_numa)
if (fctx->include_numa)
{
- values[2] = Int32GetDatum(fctx->record[i].numa_node);
- nulls[2] = false;
+ /* status is valid node number */
+ if (fctx->record[i].numa_node >= 0)
+ {
+ values[2] = Int32GetDatum(fctx->record[i].numa_node);
+ nulls[2] = false;
+ }
+ else
+ {
+ /* some kind of error (e.g. pages moved to swap) */
+ values[2] = (Datum) 0;
+ nulls[2] = true;
+ }
}
else
{
diff --git a/contrib/pg_visibility/Makefile b/contrib/pg_visibility/Makefile
index d3cb411cc90..e5a74f32c48 100644
--- a/contrib/pg_visibility/Makefile
+++ b/contrib/pg_visibility/Makefile
@@ -10,6 +10,7 @@ DATA = pg_visibility--1.1.sql pg_visibility--1.1--1.2.sql \
pg_visibility--1.0--1.1.sql
PGFILEDESC = "pg_visibility - page visibility information"
+EXTRA_INSTALL = contrib/pageinspect
REGRESS = pg_visibility
TAP_TESTS = 1
diff --git a/contrib/pg_visibility/expected/pg_visibility.out b/contrib/pg_visibility/expected/pg_visibility.out
index 09fa5933a35..e10f1706015 100644
--- a/contrib/pg_visibility/expected/pg_visibility.out
+++ b/contrib/pg_visibility/expected/pg_visibility.out
@@ -1,4 +1,5 @@
CREATE EXTENSION pg_visibility;
+CREATE EXTENSION pageinspect;
--
-- recently-dropped table
--
@@ -204,6 +205,49 @@ select pg_truncate_visibility_map('test_partition');
(1 row)
+-- test the case where vacuum phase I does not need to modify the heap buffer
+-- and only needs to set the VM
+create table test_vac_unmodified_heap(a int);
+insert into test_vac_unmodified_heap values (1);
+vacuum (freeze) test_vac_unmodified_heap;
+select pg_visibility_map_summary('test_vac_unmodified_heap');
+ pg_visibility_map_summary
+---------------------------
+ (1,1)
+(1 row)
+
+-- the checkpoint cleans the buffer dirtied by freezing the sole tuple
+checkpoint;
+-- truncating the VM ensures that the next vacuum will need to set it
+select pg_truncate_visibility_map('test_vac_unmodified_heap');
+ pg_truncate_visibility_map
+----------------------------
+
+(1 row)
+
+select pg_visibility_map_summary('test_vac_unmodified_heap');
+ pg_visibility_map_summary
+---------------------------
+ (0,0)
+(1 row)
+
+-- though the VM is truncated, the heap page-level visibility hint,
+-- PD_ALL_VISIBLE should still be set
+SELECT (flags & x'0004'::int) <> 0
+ FROM page_header(get_raw_page('test_vac_unmodified_heap', 0));
+ ?column?
+----------
+ t
+(1 row)
+
+-- vacuum sets the VM
+vacuum test_vac_unmodified_heap;
+select pg_visibility_map_summary('test_vac_unmodified_heap');
+ pg_visibility_map_summary
+---------------------------
+ (1,1)
+(1 row)
+
-- test copy freeze
create table copyfreeze (a int, b char(1500));
-- load all rows via COPY FREEZE and ensure that all pages are set all-visible
diff --git a/contrib/pg_visibility/sql/pg_visibility.sql b/contrib/pg_visibility/sql/pg_visibility.sql
index 5af06ec5b76..57af8a0c5b6 100644
--- a/contrib/pg_visibility/sql/pg_visibility.sql
+++ b/contrib/pg_visibility/sql/pg_visibility.sql
@@ -1,4 +1,5 @@
CREATE EXTENSION pg_visibility;
+CREATE EXTENSION pageinspect;
--
-- recently-dropped table
@@ -94,6 +95,25 @@ select count(*) > 0 from pg_visibility_map_summary('test_partition');
select * from pg_check_frozen('test_partition'); -- hopefully none
select pg_truncate_visibility_map('test_partition');
+-- test the case where vacuum phase I does not need to modify the heap buffer
+-- and only needs to set the VM
+create table test_vac_unmodified_heap(a int);
+insert into test_vac_unmodified_heap values (1);
+vacuum (freeze) test_vac_unmodified_heap;
+select pg_visibility_map_summary('test_vac_unmodified_heap');
+-- the checkpoint cleans the buffer dirtied by freezing the sole tuple
+checkpoint;
+-- truncating the VM ensures that the next vacuum will need to set it
+select pg_truncate_visibility_map('test_vac_unmodified_heap');
+select pg_visibility_map_summary('test_vac_unmodified_heap');
+-- though the VM is truncated, the heap page-level visibility hint,
+-- PD_ALL_VISIBLE should still be set
+SELECT (flags & x'0004'::int) <> 0
+ FROM page_header(get_raw_page('test_vac_unmodified_heap', 0));
+-- vacuum sets the VM
+vacuum test_vac_unmodified_heap;
+select pg_visibility_map_summary('test_vac_unmodified_heap');
+
-- test copy freeze
create table copyfreeze (a int, b char(1500));
diff --git a/contrib/uuid-ossp/uuid-ossp.c b/contrib/uuid-ossp/uuid-ossp.c
index 70e698f4ab3..aa4d0becace 100644
--- a/contrib/uuid-ossp/uuid-ossp.c
+++ b/contrib/uuid-ossp/uuid-ossp.c
@@ -337,7 +337,7 @@ uuid_generate_internal(int v, unsigned char *ns, const char *ptr, int len)
elog(ERROR, "could not initialize %s context: %s", "MD5",
pg_cryptohash_error(ctx));
if (pg_cryptohash_update(ctx, ns, sizeof(uu)) < 0 ||
- pg_cryptohash_update(ctx, (unsigned char *) ptr, len) < 0)
+ pg_cryptohash_update(ctx, (const unsigned char *) ptr, len) < 0)
elog(ERROR, "could not update %s context: %s", "MD5",
pg_cryptohash_error(ctx));
/* we assume sizeof MD5 result is 16, same as UUID size */
@@ -356,7 +356,7 @@ uuid_generate_internal(int v, unsigned char *ns, const char *ptr, int len)
elog(ERROR, "could not initialize %s context: %s", "SHA1",
pg_cryptohash_error(ctx));
if (pg_cryptohash_update(ctx, ns, sizeof(uu)) < 0 ||
- pg_cryptohash_update(ctx, (unsigned char *) ptr, len) < 0)
+ pg_cryptohash_update(ctx, (const unsigned char *) ptr, len) < 0)
elog(ERROR, "could not update %s context: %s", "SHA1",
pg_cryptohash_error(ctx));
if (pg_cryptohash_final(ctx, sha1result, sizeof(sha1result)) < 0)
diff --git a/doc/src/sgml/func/func-admin.sgml b/doc/src/sgml/func/func-admin.sgml
index e7ea16f73b3..ea42056bbc9 100644
--- a/doc/src/sgml/func/func-admin.sgml
+++ b/doc/src/sgml/func/func-admin.sgml
@@ -2166,6 +2166,85 @@ SELECT pg_restore_attribute_stats(
</entry>
</row>
<row>
+ <entry role="func_table_entry"><para role="func_signature">
+ <indexterm>
+ <primary>pg_restore_extended_stats</primary>
+ </indexterm>
+ <function>pg_restore_extended_stats</function> (
+ <literal>VARIADIC</literal> <parameter>kwargs</parameter> <type>"any"</type> )
+ <returnvalue>boolean</returnvalue>
+ </para>
+ <para>
+ Creates or updates statistics for statistics objects. Ordinarily,
+ these statistics are collected automatically or updated as a part of
+ <xref linkend="sql-vacuum"/> or <xref linkend="sql-analyze"/>, so
+ it's not necessary to call this function. However, it is useful
+ after a restore to enable the optimizer to choose better plans if
+ <command>ANALYZE</command> has not been run yet.
+ </para>
+ <para>
+ The tracked statistics may change from version to version, so
+ arguments are passed as pairs of <replaceable>argname</replaceable>
+ and <replaceable>argvalue</replaceable> in the form:
+<programlisting>
+ SELECT pg_restore_extended_stats(
+ '<replaceable>arg1name</replaceable>', '<replaceable>arg1value</replaceable>'::<replaceable>arg1type</replaceable>,
+ '<replaceable>arg2name</replaceable>', '<replaceable>arg2value</replaceable>'::<replaceable>arg2type</replaceable>,
+ '<replaceable>arg3name</replaceable>', '<replaceable>arg3value</replaceable>'::<replaceable>arg3type</replaceable>);
+</programlisting>
+ </para>
+ <para>
+ For example, to set some values for the statistics object
+ <structname>myschema.mystatsobj</structname>:
+<programlisting>
+ SELECT pg_restore_extended_stats(
+ 'schemaname', 'tab_schema'::name,
+ 'relname', 'tab_name'::name,
+ 'statistics_schemaname', 'stats_schema'::name,
+ 'statistics_name', 'stats_name'::name,
+ 'inherited', false,
+ 'n_distinct', '[{"attributes" : [2,3], "ndistinct" : 4}]'::pg_ndistinct);
+</programlisting>
+ </para>
+ <para>
+ The required arguments are <literal>schemaname</literal> with a value
+ of type <type>name</type>, for the schema of the table to which the
+ statistics are related to, <literal>relname</literal> with a value
+ of type <type>name</type>, for the table to which the statistics are
+ related to, <literal>statistics_schemaname</literal>
+ with a value of type <type>name</type>, which specifies the statistics
+ object's schema, <literal>statistics_name</literal> with a value of
+ type <type>name</type>, which specifies the name of the statistics
+ object and <literal>inherited</literal>, which specifies whether
+ the statistics include values from child tables.
+ </para>
+ <para>
+ Other arguments are the names and values of statistics corresponding
+ to columns in <link linkend="view-pg-stats-ext"><structname>pg_stats_ext</structname>
+ </link>.
+ This function currently supports <literal>n_distinct</literal> and
+ <literal>dependencies</literal>.
+ </para>
+ <para>
+ Additionally, this function accepts argument name
+ <literal>version</literal> of type <type>integer</type>, which
+ specifies the server version from which the statistics originated.
+ This is anticipated to be helpful in porting statistics from older
+ versions of <productname>PostgreSQL</productname>.
+ </para>
+ <para>
+ Minor errors are reported as a <literal>WARNING</literal> and
+ ignored, and remaining statistics will still be restored. If all
+ specified statistics are successfully restored, returns
+ <literal>true</literal>, otherwise <literal>false</literal>.
+ </para>
+ <para>
+ The caller must have the <literal>MAINTAIN</literal> privilege on the
+ table or be the owner of the database.
+ </para>
+ </entry>
+ </row>
+ <row>
<entry role="func_table_entry">
<para role="func_signature">
<indexterm>
diff --git a/doc/src/sgml/ref/psql-ref.sgml b/doc/src/sgml/ref/psql-ref.sgml
index f56c70263e0..e464e3b13de 100644
--- a/doc/src/sgml/ref/psql-ref.sgml
+++ b/doc/src/sgml/ref/psql-ref.sgml
@@ -1299,9 +1299,9 @@ SELECT $1 \parse stmt1
<replaceable class="parameter">pattern</replaceable>, show all
columns, their types, the tablespace (if not the default) and any
special attributes such as <literal>NOT NULL</literal> or defaults.
- Associated indexes, constraints, rules, and triggers are
- also shown. For foreign tables, the associated foreign
- server is shown as well.
+ Associated objects, such as indexes, constraints, rules, triggers,
+ and publications, are also shown. For foreign tables,
+ the associated foreign server is shown as well.
(<quote>Matching the pattern</quote> is defined in
<xref linkend="app-psql-patterns"/> below.)
</para>
@@ -1314,8 +1314,8 @@ SELECT $1 \parse stmt1
<para>
The command form <literal>\d+</literal> is identical, except that
- more information is displayed: any comments associated with the
- columns of the table are shown, as is the presence of OIDs in the
+ more information is displayed: for example, any comments associated
+ with the columns of the table, the presence of OIDs in the
table, the view definition if the relation is a view, a non-default
<link linkend="sql-altertable-replica-identity">replica
identity</link> setting and the
diff --git a/src/backend/access/common/toast_compression.c b/src/backend/access/common/toast_compression.c
index 1336328cc0b..d449613b21f 100644
--- a/src/backend/access/common/toast_compression.c
+++ b/src/backend/access/common/toast_compression.c
@@ -88,7 +88,7 @@ pglz_decompress_datum(const struct varlena *value)
result = (struct varlena *) palloc(VARDATA_COMPRESSED_GET_EXTSIZE(value) + VARHDRSZ);
/* decompress the data */
- rawsize = pglz_decompress((char *) value + VARHDRSZ_COMPRESSED,
+ rawsize = pglz_decompress((const char *) value + VARHDRSZ_COMPRESSED,
VARSIZE(value) - VARHDRSZ_COMPRESSED,
VARDATA(result),
VARDATA_COMPRESSED_GET_EXTSIZE(value), true);
@@ -116,7 +116,7 @@ pglz_decompress_datum_slice(const struct varlena *value,
result = (struct varlena *) palloc(slicelength + VARHDRSZ);
/* decompress the data */
- rawsize = pglz_decompress((char *) value + VARHDRSZ_COMPRESSED,
+ rawsize = pglz_decompress((const char *) value + VARHDRSZ_COMPRESSED,
VARSIZE(value) - VARHDRSZ_COMPRESSED,
VARDATA(result),
slicelength, false);
@@ -192,7 +192,7 @@ lz4_decompress_datum(const struct varlena *value)
result = (struct varlena *) palloc(VARDATA_COMPRESSED_GET_EXTSIZE(value) + VARHDRSZ);
/* decompress the data */
- rawsize = LZ4_decompress_safe((char *) value + VARHDRSZ_COMPRESSED,
+ rawsize = LZ4_decompress_safe((const char *) value + VARHDRSZ_COMPRESSED,
VARDATA(result),
VARSIZE(value) - VARHDRSZ_COMPRESSED,
VARDATA_COMPRESSED_GET_EXTSIZE(value));
@@ -229,7 +229,7 @@ lz4_decompress_datum_slice(const struct varlena *value, int32 slicelength)
result = (struct varlena *) palloc(slicelength + VARHDRSZ);
/* decompress the data */
- rawsize = LZ4_decompress_safe_partial((char *) value + VARHDRSZ_COMPRESSED,
+ rawsize = LZ4_decompress_safe_partial((const char *) value + VARHDRSZ_COMPRESSED,
VARDATA(result),
VARSIZE(value) - VARHDRSZ_COMPRESSED,
slicelength,
diff --git a/src/backend/access/heap/vacuumlazy.c b/src/backend/access/heap/vacuumlazy.c
index 1fcb212ab3d..4be267ff657 100644
--- a/src/backend/access/heap/vacuumlazy.c
+++ b/src/backend/access/heap/vacuumlazy.c
@@ -246,13 +246,6 @@ typedef enum
*/
#define EAGER_SCAN_REGION_SIZE 4096
-/*
- * heap_vac_scan_next_block() sets these flags to communicate information
- * about the block it read to the caller.
- */
-#define VAC_BLK_WAS_EAGER_SCANNED (1 << 0)
-#define VAC_BLK_ALL_VISIBLE_ACCORDING_TO_VM (1 << 1)
-
typedef struct LVRelState
{
/* Target heap relation and its indexes */
@@ -358,7 +351,6 @@ typedef struct LVRelState
/* State maintained by heap_vac_scan_next_block() */
BlockNumber current_block; /* last block returned */
BlockNumber next_unskippable_block; /* next unskippable block */
- bool next_unskippable_allvis; /* its visibility status */
bool next_unskippable_eager_scanned; /* if it was eagerly scanned */
Buffer next_unskippable_vmbuffer; /* buffer containing its VM bit */
@@ -430,9 +422,14 @@ static void find_next_unskippable_block(LVRelState *vacrel, bool *skipsallvis);
static bool lazy_scan_new_or_empty(LVRelState *vacrel, Buffer buf,
BlockNumber blkno, Page page,
bool sharelock, Buffer vmbuffer);
+static void identify_and_fix_vm_corruption(Relation rel, Buffer heap_buffer,
+ BlockNumber heap_blk, Page heap_page,
+ int nlpdead_items,
+ Buffer vmbuffer,
+ uint8 *vmbits);
static int lazy_scan_prune(LVRelState *vacrel, Buffer buf,
BlockNumber blkno, Page page,
- Buffer vmbuffer, bool all_visible_according_to_vm,
+ Buffer vmbuffer,
bool *has_lpdead_items, bool *vm_page_frozen);
static bool lazy_scan_noprune(LVRelState *vacrel, Buffer buf,
BlockNumber blkno, Page page,
@@ -1275,7 +1272,6 @@ lazy_scan_heap(LVRelState *vacrel)
/* Initialize for the first heap_vac_scan_next_block() call */
vacrel->current_block = InvalidBlockNumber;
vacrel->next_unskippable_block = InvalidBlockNumber;
- vacrel->next_unskippable_allvis = false;
vacrel->next_unskippable_eager_scanned = false;
vacrel->next_unskippable_vmbuffer = InvalidBuffer;
@@ -1291,13 +1287,13 @@ lazy_scan_heap(LVRelState *vacrel)
MAIN_FORKNUM,
heap_vac_scan_next_block,
vacrel,
- sizeof(uint8));
+ sizeof(bool));
while (true)
{
Buffer buf;
Page page;
- uint8 blk_info = 0;
+ bool was_eager_scanned = false;
int ndeleted = 0;
bool has_lpdead_items;
void *per_buffer_data = NULL;
@@ -1366,13 +1362,13 @@ lazy_scan_heap(LVRelState *vacrel)
if (!BufferIsValid(buf))
break;
- blk_info = *((uint8 *) per_buffer_data);
+ was_eager_scanned = *((bool *) per_buffer_data);
CheckBufferIsPinnedOnce(buf);
page = BufferGetPage(buf);
blkno = BufferGetBlockNumber(buf);
vacrel->scanned_pages++;
- if (blk_info & VAC_BLK_WAS_EAGER_SCANNED)
+ if (was_eager_scanned)
vacrel->eager_scanned_pages++;
/* Report as block scanned, update error traceback information */
@@ -1443,7 +1439,6 @@ lazy_scan_heap(LVRelState *vacrel)
if (got_cleanup_lock)
ndeleted = lazy_scan_prune(vacrel, buf, blkno, page,
vmbuffer,
- blk_info & VAC_BLK_ALL_VISIBLE_ACCORDING_TO_VM,
&has_lpdead_items, &vm_page_frozen);
/*
@@ -1460,8 +1455,7 @@ lazy_scan_heap(LVRelState *vacrel)
* exclude pages skipped due to cleanup lock contention from eager
* freeze algorithm caps.
*/
- if (got_cleanup_lock &&
- (blk_info & VAC_BLK_WAS_EAGER_SCANNED))
+ if (got_cleanup_lock && was_eager_scanned)
{
/* Aggressive vacuums do not eager scan. */
Assert(!vacrel->aggressive);
@@ -1628,7 +1622,6 @@ heap_vac_scan_next_block(ReadStream *stream,
{
BlockNumber next_block;
LVRelState *vacrel = callback_private_data;
- uint8 blk_info = 0;
/* relies on InvalidBlockNumber + 1 overflowing to 0 on first call */
next_block = vacrel->current_block + 1;
@@ -1691,8 +1684,8 @@ heap_vac_scan_next_block(ReadStream *stream,
* otherwise they would've been unskippable.
*/
vacrel->current_block = next_block;
- blk_info |= VAC_BLK_ALL_VISIBLE_ACCORDING_TO_VM;
- *((uint8 *) per_buffer_data) = blk_info;
+ /* Block was not eager scanned */
+ *((bool *) per_buffer_data) = false;
return vacrel->current_block;
}
else
@@ -1704,11 +1697,7 @@ heap_vac_scan_next_block(ReadStream *stream,
Assert(next_block == vacrel->next_unskippable_block);
vacrel->current_block = next_block;
- if (vacrel->next_unskippable_allvis)
- blk_info |= VAC_BLK_ALL_VISIBLE_ACCORDING_TO_VM;
- if (vacrel->next_unskippable_eager_scanned)
- blk_info |= VAC_BLK_WAS_EAGER_SCANNED;
- *((uint8 *) per_buffer_data) = blk_info;
+ *((bool *) per_buffer_data) = vacrel->next_unskippable_eager_scanned;
return vacrel->current_block;
}
}
@@ -1733,7 +1722,6 @@ find_next_unskippable_block(LVRelState *vacrel, bool *skipsallvis)
BlockNumber next_unskippable_block = vacrel->next_unskippable_block + 1;
Buffer next_unskippable_vmbuffer = vacrel->next_unskippable_vmbuffer;
bool next_unskippable_eager_scanned = false;
- bool next_unskippable_allvis;
*skipsallvis = false;
@@ -1743,7 +1731,6 @@ find_next_unskippable_block(LVRelState *vacrel, bool *skipsallvis)
next_unskippable_block,
&next_unskippable_vmbuffer);
- next_unskippable_allvis = (mapbits & VISIBILITYMAP_ALL_VISIBLE) != 0;
/*
* At the start of each eager scan region, normal vacuums with eager
@@ -1762,7 +1749,7 @@ find_next_unskippable_block(LVRelState *vacrel, bool *skipsallvis)
* A block is unskippable if it is not all visible according to the
* visibility map.
*/
- if (!next_unskippable_allvis)
+ if ((mapbits & VISIBILITYMAP_ALL_VISIBLE) == 0)
{
Assert((mapbits & VISIBILITYMAP_ALL_FROZEN) == 0);
break;
@@ -1819,7 +1806,6 @@ find_next_unskippable_block(LVRelState *vacrel, bool *skipsallvis)
/* write the local variables back to vacrel */
vacrel->next_unskippable_block = next_unskippable_block;
- vacrel->next_unskippable_allvis = next_unskippable_allvis;
vacrel->next_unskippable_eager_scanned = next_unskippable_eager_scanned;
vacrel->next_unskippable_vmbuffer = next_unskippable_vmbuffer;
}
@@ -1975,14 +1961,87 @@ cmpOffsetNumbers(const void *a, const void *b)
}
/*
+ * Helper to correct any corruption detected on a heap page and its
+ * corresponding visibility map page after pruning but before setting the
+ * visibility map. It examines the heap page, the associated VM page, and the
+ * number of dead items previously identified.
+ *
+ * This function must be called while holding an exclusive lock on the heap
+ * buffer, and the dead items must have been discovered under that same lock.
+
+ * The provided vmbits must reflect the current state of the VM block
+ * referenced by vmbuffer. Although we do not hold a lock on the VM buffer, it
+ * is pinned, and the heap buffer is exclusively locked, ensuring that no
+ * other backend can update the VM bits corresponding to this heap page.
+ *
+ * If it clears corruption, it will zero out vmbits.
+ */
+static void
+identify_and_fix_vm_corruption(Relation rel, Buffer heap_buffer,
+ BlockNumber heap_blk, Page heap_page,
+ int nlpdead_items,
+ Buffer vmbuffer,
+ uint8 *vmbits)
+{
+ Assert(visibilitymap_get_status(rel, heap_blk, &vmbuffer) == *vmbits);
+
+ Assert(BufferIsLockedByMeInMode(heap_buffer, BUFFER_LOCK_EXCLUSIVE));
+
+ /*
+ * As of PostgreSQL 9.2, the visibility map bit should never be set if the
+ * page-level bit is clear. However, it's possible that the bit got
+ * cleared after heap_vac_scan_next_block() was called, so we must recheck
+ * with buffer lock before concluding that the VM is corrupt.
+ */
+ if (!PageIsAllVisible(heap_page) &&
+ ((*vmbits & VISIBILITYMAP_VALID_BITS) != 0))
+ {
+ ereport(WARNING,
+ (errcode(ERRCODE_DATA_CORRUPTED),
+ errmsg("page is not marked all-visible but visibility map bit is set in relation \"%s\" page %u",
+ RelationGetRelationName(rel), heap_blk)));
+
+ visibilitymap_clear(rel, heap_blk, vmbuffer,
+ VISIBILITYMAP_VALID_BITS);
+ *vmbits = 0;
+ }
+
+ /*
+ * It's possible for the value returned by
+ * GetOldestNonRemovableTransactionId() to move backwards, so it's not
+ * wrong for us to see tuples that appear to not be visible to everyone
+ * yet, while PD_ALL_VISIBLE is already set. The real safe xmin value
+ * never moves backwards, but GetOldestNonRemovableTransactionId() is
+ * conservative and sometimes returns a value that's unnecessarily small,
+ * so if we see that contradiction it just means that the tuples that we
+ * think are not visible to everyone yet actually are, and the
+ * PD_ALL_VISIBLE flag is correct.
+ *
+ * There should never be LP_DEAD items on a page with PD_ALL_VISIBLE set,
+ * however.
+ */
+ else if (PageIsAllVisible(heap_page) && nlpdead_items > 0)
+ {
+ ereport(WARNING,
+ (errcode(ERRCODE_DATA_CORRUPTED),
+ errmsg("page containing LP_DEAD items is marked as all-visible in relation \"%s\" page %u",
+ RelationGetRelationName(rel), heap_blk)));
+
+ PageClearAllVisible(heap_page);
+ MarkBufferDirty(heap_buffer);
+ visibilitymap_clear(rel, heap_blk, vmbuffer,
+ VISIBILITYMAP_VALID_BITS);
+ *vmbits = 0;
+ }
+}
+
+/*
* lazy_scan_prune() -- lazy_scan_heap() pruning and freezing.
*
* Caller must hold pin and buffer cleanup lock on the buffer.
*
* vmbuffer is the buffer containing the VM block with visibility information
- * for the heap block, blkno. all_visible_according_to_vm is the saved
- * visibility status of the heap block looked up earlier by the caller. We
- * won't rely entirely on this status, as it may be out of date.
+ * for the heap block, blkno.
*
* *has_lpdead_items is set to true or false depending on whether, upon return
* from this function, any LP_DEAD items are still present on the page.
@@ -1999,7 +2058,6 @@ lazy_scan_prune(LVRelState *vacrel,
BlockNumber blkno,
Page page,
Buffer vmbuffer,
- bool all_visible_according_to_vm,
bool *has_lpdead_items,
bool *vm_page_frozen)
{
@@ -2013,6 +2071,8 @@ lazy_scan_prune(LVRelState *vacrel,
.vistest = vacrel->vistest,
.cutoffs = &vacrel->cutoffs,
};
+ uint8 old_vmbits = 0;
+ uint8 new_vmbits = 0;
Assert(BufferGetBlockNumber(buf) == blkno);
@@ -2115,165 +2175,71 @@ lazy_scan_prune(LVRelState *vacrel,
Assert(!presult.all_visible || !(*has_lpdead_items));
Assert(!presult.all_frozen || presult.all_visible);
- /*
- * Handle setting visibility map bit based on information from the VM (as
- * of last heap_vac_scan_next_block() call), and from all_visible and
- * all_frozen variables
- */
- if (!all_visible_according_to_vm && presult.all_visible)
- {
- uint8 old_vmbits;
- uint8 flags = VISIBILITYMAP_ALL_VISIBLE;
+ old_vmbits = visibilitymap_get_status(vacrel->rel, blkno, &vmbuffer);
- if (presult.all_frozen)
- {
- Assert(!TransactionIdIsValid(presult.vm_conflict_horizon));
- flags |= VISIBILITYMAP_ALL_FROZEN;
- }
+ identify_and_fix_vm_corruption(vacrel->rel, buf, blkno, page,
+ presult.lpdead_items, vmbuffer,
+ &old_vmbits);
- /*
- * It should never be the case that the visibility map page is set
- * while the page-level bit is clear, but the reverse is allowed (if
- * checksums are not enabled). Regardless, set both bits so that we
- * get back in sync.
- *
- * NB: If the heap page is all-visible but the VM bit is not set, we
- * don't need to dirty the heap page. However, if checksums are
- * enabled, we do need to make sure that the heap page is dirtied
- * before passing it to visibilitymap_set(), because it may be logged.
- * Given that this situation should only happen in rare cases after a
- * crash, it is not worth optimizing.
- */
- PageSetAllVisible(page);
- MarkBufferDirty(buf);
- old_vmbits = visibilitymap_set(vacrel->rel, blkno, buf,
- InvalidXLogRecPtr,
- vmbuffer, presult.vm_conflict_horizon,
- flags);
+ if (!presult.all_visible)
+ return presult.ndeleted;
- /*
- * If the page wasn't already set all-visible and/or all-frozen in the
- * VM, count it as newly set for logging.
- */
- if ((old_vmbits & VISIBILITYMAP_ALL_VISIBLE) == 0)
- {
- vacrel->vm_new_visible_pages++;
- if (presult.all_frozen)
- {
- vacrel->vm_new_visible_frozen_pages++;
- *vm_page_frozen = true;
- }
- }
- else if ((old_vmbits & VISIBILITYMAP_ALL_FROZEN) == 0 &&
- presult.all_frozen)
- {
- vacrel->vm_new_frozen_pages++;
- *vm_page_frozen = true;
- }
- }
+ /* Set the visibility map and page visibility hint */
+ new_vmbits = VISIBILITYMAP_ALL_VISIBLE;
- /*
- * As of PostgreSQL 9.2, the visibility map bit should never be set if the
- * page-level bit is clear. However, it's possible that the bit got
- * cleared after heap_vac_scan_next_block() was called, so we must recheck
- * with buffer lock before concluding that the VM is corrupt.
- */
- else if (all_visible_according_to_vm && !PageIsAllVisible(page) &&
- visibilitymap_get_status(vacrel->rel, blkno, &vmbuffer) != 0)
- {
- ereport(WARNING,
- (errcode(ERRCODE_DATA_CORRUPTED),
- errmsg("page is not marked all-visible but visibility map bit is set in relation \"%s\" page %u",
- vacrel->relname, blkno)));
+ if (presult.all_frozen)
+ new_vmbits |= VISIBILITYMAP_ALL_FROZEN;
- visibilitymap_clear(vacrel->rel, blkno, vmbuffer,
- VISIBILITYMAP_VALID_BITS);
- }
+ /* Nothing to do */
+ if (old_vmbits == new_vmbits)
+ return presult.ndeleted;
/*
- * It's possible for the value returned by
- * GetOldestNonRemovableTransactionId() to move backwards, so it's not
- * wrong for us to see tuples that appear to not be visible to everyone
- * yet, while PD_ALL_VISIBLE is already set. The real safe xmin value
- * never moves backwards, but GetOldestNonRemovableTransactionId() is
- * conservative and sometimes returns a value that's unnecessarily small,
- * so if we see that contradiction it just means that the tuples that we
- * think are not visible to everyone yet actually are, and the
- * PD_ALL_VISIBLE flag is correct.
+ * It should never be the case that the visibility map page is set while
+ * the page-level bit is clear (and if so, we cleared it above), but the
+ * reverse is allowed (if checksums are not enabled). Regardless, set both
+ * bits so that we get back in sync.
*
- * There should never be LP_DEAD items on a page with PD_ALL_VISIBLE set,
- * however.
+ * The heap buffer must be marked dirty before adding it to the WAL chain
+ * when setting the VM. We don't worry about unnecessarily dirtying the
+ * heap buffer if PD_ALL_VISIBLE is already set, though. It is extremely
+ * rare to have a clean heap buffer with PD_ALL_VISIBLE already set and
+ * the VM bits clear, so there is no point in optimizing it.
*/
- else if (presult.lpdead_items > 0 && PageIsAllVisible(page))
- {
- ereport(WARNING,
- (errcode(ERRCODE_DATA_CORRUPTED),
- errmsg("page containing LP_DEAD items is marked as all-visible in relation \"%s\" page %u",
- vacrel->relname, blkno)));
-
- PageClearAllVisible(page);
- MarkBufferDirty(buf);
- visibilitymap_clear(vacrel->rel, blkno, vmbuffer,
- VISIBILITYMAP_VALID_BITS);
- }
+ PageSetAllVisible(page);
+ MarkBufferDirty(buf);
/*
- * If the all-visible page is all-frozen but not marked as such yet, mark
- * it as all-frozen.
+ * If the page is being set all-frozen, we pass InvalidTransactionId as
+ * the cutoff_xid, since a snapshot conflict horizon sufficient to make
+ * everything safe for REDO was logged when the page's tuples were frozen.
*/
- else if (all_visible_according_to_vm && presult.all_frozen &&
- !VM_ALL_FROZEN(vacrel->rel, blkno, &vmbuffer))
- {
- uint8 old_vmbits;
-
- /*
- * Avoid relying on all_visible_according_to_vm as a proxy for the
- * page-level PD_ALL_VISIBLE bit being set, since it might have become
- * stale -- even when all_visible is set
- */
- if (!PageIsAllVisible(page))
- {
- PageSetAllVisible(page);
- MarkBufferDirty(buf);
- }
+ Assert(!presult.all_frozen ||
+ !TransactionIdIsValid(presult.vm_conflict_horizon));
- /*
- * Set the page all-frozen (and all-visible) in the VM.
- *
- * We can pass InvalidTransactionId as our cutoff_xid, since a
- * snapshotConflictHorizon sufficient to make everything safe for REDO
- * was logged when the page's tuples were frozen.
- */
- Assert(!TransactionIdIsValid(presult.vm_conflict_horizon));
- old_vmbits = visibilitymap_set(vacrel->rel, blkno, buf,
- InvalidXLogRecPtr,
- vmbuffer, InvalidTransactionId,
- VISIBILITYMAP_ALL_VISIBLE |
- VISIBILITYMAP_ALL_FROZEN);
+ visibilitymap_set(vacrel->rel, blkno, buf,
+ InvalidXLogRecPtr,
+ vmbuffer, presult.vm_conflict_horizon,
+ new_vmbits);
- /*
- * The page was likely already set all-visible in the VM. However,
- * there is a small chance that it was modified sometime between
- * setting all_visible_according_to_vm and checking the visibility
- * during pruning. Check the return value of old_vmbits anyway to
- * ensure the visibility map counters used for logging are accurate.
- */
- if ((old_vmbits & VISIBILITYMAP_ALL_VISIBLE) == 0)
+ /*
+ * If the page wasn't already set all-visible and/or all-frozen in the VM,
+ * count it as newly set for logging.
+ */
+ if ((old_vmbits & VISIBILITYMAP_ALL_VISIBLE) == 0)
+ {
+ vacrel->vm_new_visible_pages++;
+ if (presult.all_frozen)
{
- vacrel->vm_new_visible_pages++;
vacrel->vm_new_visible_frozen_pages++;
*vm_page_frozen = true;
}
-
- /*
- * We already checked that the page was not set all-frozen in the VM
- * above, so we don't need to test the value of old_vmbits.
- */
- else
- {
- vacrel->vm_new_frozen_pages++;
- *vm_page_frozen = true;
- }
+ }
+ else if ((old_vmbits & VISIBILITYMAP_ALL_FROZEN) == 0 &&
+ presult.all_frozen)
+ {
+ vacrel->vm_new_frozen_pages++;
+ *vm_page_frozen = true;
}
return presult.ndeleted;
diff --git a/src/backend/access/heap/visibilitymap.c b/src/backend/access/heap/visibilitymap.c
index 2382d18f72b..3047bd46def 100644
--- a/src/backend/access/heap/visibilitymap.c
+++ b/src/backend/access/heap/visibilitymap.c
@@ -240,10 +240,8 @@ visibilitymap_pin_ok(BlockNumber heapBlk, Buffer vmbuf)
* You must pass a buffer containing the correct map page to this function.
* Call visibilitymap_pin first to pin the right one. This function doesn't do
* any I/O.
- *
- * Returns the state of the page's VM bits before setting flags.
*/
-uint8
+void
visibilitymap_set(Relation rel, BlockNumber heapBlk, Buffer heapBuf,
XLogRecPtr recptr, Buffer vmBuf, TransactionId cutoff_xid,
uint8 flags)
@@ -320,7 +318,6 @@ visibilitymap_set(Relation rel, BlockNumber heapBlk, Buffer heapBuf,
}
LockBuffer(vmBuf, BUFFER_LOCK_UNLOCK);
- return status;
}
/*
@@ -343,7 +340,7 @@ visibilitymap_set(Relation rel, BlockNumber heapBlk, Buffer heapBuf,
*
* rlocator is used only for debugging messages.
*/
-uint8
+void
visibilitymap_set_vmbits(BlockNumber heapBlk,
Buffer vmBuf, uint8 flags,
const RelFileLocator rlocator)
@@ -386,8 +383,6 @@ visibilitymap_set_vmbits(BlockNumber heapBlk,
map[mapByte] |= (flags << mapOffset);
MarkBufferDirty(vmBuf);
}
-
- return status;
}
/*
diff --git a/src/backend/access/nbtree/nbtinsert.c b/src/backend/access/nbtree/nbtinsert.c
index 63eda08f7a2..d17aaa5aa0f 100644
--- a/src/backend/access/nbtree/nbtinsert.c
+++ b/src/backend/access/nbtree/nbtinsert.c
@@ -3023,8 +3023,8 @@ _bt_deadblocks(Page page, OffsetNumber *deletable, int ndeletable,
static inline int
_bt_blk_cmp(const void *arg1, const void *arg2)
{
- BlockNumber b1 = *((BlockNumber *) arg1);
- BlockNumber b2 = *((BlockNumber *) arg2);
+ BlockNumber b1 = *((const BlockNumber *) arg1);
+ BlockNumber b2 = *((const BlockNumber *) arg2);
return pg_cmp_u32(b1, b2);
}
diff --git a/src/backend/access/spgist/spgquadtreeproc.c b/src/backend/access/spgist/spgquadtreeproc.c
index 39e7749fe16..946dabc4527 100644
--- a/src/backend/access/spgist/spgquadtreeproc.c
+++ b/src/backend/access/spgist/spgquadtreeproc.c
@@ -147,8 +147,8 @@ spg_quad_choose(PG_FUNCTION_ARGS)
static int
x_cmp(const void *a, const void *b, void *arg)
{
- Point *pa = *(Point **) a;
- Point *pb = *(Point **) b;
+ Point *pa = *(Point *const *) a;
+ Point *pb = *(Point *const *) b;
if (pa->x == pb->x)
return 0;
@@ -158,8 +158,8 @@ x_cmp(const void *a, const void *b, void *arg)
static int
y_cmp(const void *a, const void *b, void *arg)
{
- Point *pa = *(Point **) a;
- Point *pb = *(Point **) b;
+ Point *pa = *(Point *const *) a;
+ Point *pb = *(Point *const *) b;
if (pa->y == pb->y)
return 0;
diff --git a/src/backend/access/transam/xloginsert.c b/src/backend/access/transam/xloginsert.c
index 92c48e768c3..f928bc7c0ef 100644
--- a/src/backend/access/transam/xloginsert.c
+++ b/src/backend/access/transam/xloginsert.c
@@ -677,8 +677,8 @@ XLogRecordAssemble(RmgrId rmid, uint8 info,
if (regbuf->flags & REGBUF_STANDARD)
{
/* Assume we can omit data between pd_lower and pd_upper */
- uint16 lower = ((PageHeader) page)->pd_lower;
- uint16 upper = ((PageHeader) page)->pd_upper;
+ uint16 lower = ((const PageHeaderData *) page)->pd_lower;
+ uint16 upper = ((const PageHeaderData *) page)->pd_upper;
if (lower >= SizeOfPageHeaderData &&
upper > lower &&
diff --git a/src/backend/backup/backup_manifest.c b/src/backend/backup/backup_manifest.c
index c697caef895..3760b003907 100644
--- a/src/backend/backup/backup_manifest.c
+++ b/src/backend/backup/backup_manifest.c
@@ -388,7 +388,7 @@ AppendStringToManifest(backup_manifest_info *manifest, const char *s)
Assert(manifest != NULL);
if (manifest->still_checksumming)
{
- if (pg_cryptohash_update(manifest->manifest_ctx, (uint8 *) s, len) < 0)
+ if (pg_cryptohash_update(manifest->manifest_ctx, (const uint8 *) s, len) < 0)
elog(ERROR, "failed to update checksum of backup manifest: %s",
pg_cryptohash_error(manifest->manifest_ctx));
}
diff --git a/src/backend/backup/basebackup.c b/src/backend/backup/basebackup.c
index ba06a38c033..463c0756b5e 100644
--- a/src/backend/backup/basebackup.c
+++ b/src/backend/backup/basebackup.c
@@ -1104,7 +1104,7 @@ sendFileWithContent(bbsink *sink, const char *filename, const char *content,
_tarWriteHeader(sink, filename, NULL, &statbuf, false);
- if (pg_checksum_update(&checksum_ctx, (uint8 *) content, len) < 0)
+ if (pg_checksum_update(&checksum_ctx, (const uint8 *) content, len) < 0)
elog(ERROR, "could not update checksum of file \"%s\"",
filename);
diff --git a/src/backend/backup/basebackup_incremental.c b/src/backend/backup/basebackup_incremental.c
index 77dce24ad38..f58ed9b198a 100644
--- a/src/backend/backup/basebackup_incremental.c
+++ b/src/backend/backup/basebackup_incremental.c
@@ -930,7 +930,7 @@ GetIncrementalFileSize(unsigned num_blocks_required)
static uint32
hash_string_pointer(const char *s)
{
- unsigned char *ss = (unsigned char *) s;
+ const unsigned char *ss = (const unsigned char *) s;
return hash_bytes(ss, strlen(s));
}
@@ -1049,8 +1049,8 @@ manifest_report_error(JsonManifestParseContext *context, const char *fmt,...)
static int
compare_block_numbers(const void *a, const void *b)
{
- BlockNumber aa = *(BlockNumber *) a;
- BlockNumber bb = *(BlockNumber *) b;
+ BlockNumber aa = *(const BlockNumber *) a;
+ BlockNumber bb = *(const BlockNumber *) b;
return pg_cmp_u32(aa, bb);
}
diff --git a/src/backend/executor/nodeIndexscan.c b/src/backend/executor/nodeIndexscan.c
index 84823f0b615..a616abff04c 100644
--- a/src/backend/executor/nodeIndexscan.c
+++ b/src/backend/executor/nodeIndexscan.c
@@ -443,8 +443,8 @@ static int
reorderqueue_cmp(const pairingheap_node *a, const pairingheap_node *b,
void *arg)
{
- ReorderTuple *rta = (ReorderTuple *) a;
- ReorderTuple *rtb = (ReorderTuple *) b;
+ const ReorderTuple *rta = (const ReorderTuple *) a;
+ const ReorderTuple *rtb = (const ReorderTuple *) b;
IndexScanState *node = (IndexScanState *) arg;
/* exchange argument order to invert the sort order */
diff --git a/src/backend/executor/nodeSeqscan.c b/src/backend/executor/nodeSeqscan.c
index b8119face43..af3c788ce8b 100644
--- a/src/backend/executor/nodeSeqscan.c
+++ b/src/backend/executor/nodeSeqscan.c
@@ -47,7 +47,7 @@ static TupleTableSlot *SeqNext(SeqScanState *node);
* This is a workhorse for ExecSeqScan
* ----------------------------------------------------------------
*/
-static TupleTableSlot *
+static pg_attribute_always_inline TupleTableSlot *
SeqNext(SeqScanState *node)
{
TableScanDesc scandesc;
@@ -86,7 +86,7 @@ SeqNext(SeqScanState *node)
/*
* SeqRecheck -- access method routine to recheck a tuple in EvalPlanQual
*/
-static bool
+static pg_attribute_always_inline bool
SeqRecheck(SeqScanState *node, TupleTableSlot *slot)
{
/*
diff --git a/src/backend/libpq/auth-scram.c b/src/backend/libpq/auth-scram.c
index 3c41145d926..0267edb29cd 100644
--- a/src/backend/libpq/auth-scram.c
+++ b/src/backend/libpq/auth-scram.c
@@ -1490,8 +1490,8 @@ scram_mock_salt(const char *username, pg_cryptohash_type hash_type,
ctx = pg_cryptohash_create(hash_type);
if (pg_cryptohash_init(ctx) < 0 ||
- pg_cryptohash_update(ctx, (uint8 *) username, strlen(username)) < 0 ||
- pg_cryptohash_update(ctx, (uint8 *) mock_auth_nonce, MOCK_AUTH_NONCE_LEN) < 0 ||
+ pg_cryptohash_update(ctx, (const uint8 *) username, strlen(username)) < 0 ||
+ pg_cryptohash_update(ctx, (const uint8 *) mock_auth_nonce, MOCK_AUTH_NONCE_LEN) < 0 ||
pg_cryptohash_final(ctx, sha_digest, key_length) < 0)
{
pg_cryptohash_free(ctx);
diff --git a/src/backend/libpq/crypt.c b/src/backend/libpq/crypt.c
index 4c1052b3d42..52722060451 100644
--- a/src/backend/libpq/crypt.c
+++ b/src/backend/libpq/crypt.c
@@ -136,7 +136,7 @@ encrypt_password(PasswordType target_type, const char *role,
case PASSWORD_TYPE_MD5:
encrypted_password = palloc(MD5_PASSWD_LEN + 1);
- if (!pg_md5_encrypt(password, (uint8 *) role, strlen(role),
+ if (!pg_md5_encrypt(password, (const uint8 *) role, strlen(role),
encrypted_password, &errstr))
elog(ERROR, "password encryption failed: %s", errstr);
break;
@@ -284,7 +284,7 @@ plain_crypt_verify(const char *role, const char *shadow_pass,
case PASSWORD_TYPE_MD5:
if (!pg_md5_encrypt(client_pass,
- (uint8 *) role,
+ (const uint8 *) role,
strlen(role),
crypt_client_pass,
&errstr))
diff --git a/src/backend/nodes/nodeFuncs.c b/src/backend/nodes/nodeFuncs.c
index d29664ca5d4..199ed27995f 100644
--- a/src/backend/nodes/nodeFuncs.c
+++ b/src/backend/nodes/nodeFuncs.c
@@ -1009,14 +1009,14 @@ exprCollation(const Node *expr)
break;
case T_JsonExpr:
{
- const JsonExpr *jsexpr = (JsonExpr *) expr;
+ const JsonExpr *jsexpr = (const JsonExpr *) expr;
coll = jsexpr->collation;
}
break;
case T_JsonBehavior:
{
- const JsonBehavior *behavior = (JsonBehavior *) expr;
+ const JsonBehavior *behavior = (const JsonBehavior *) expr;
if (behavior->expr)
coll = exprCollation(behavior->expr);
@@ -1593,7 +1593,7 @@ exprLocation(const Node *expr)
}
break;
case T_JsonBehavior:
- loc = exprLocation(((JsonBehavior *) expr)->expr);
+ loc = exprLocation(((const JsonBehavior *) expr)->expr);
break;
case T_NullTest:
{
diff --git a/src/backend/nodes/outfuncs.c b/src/backend/nodes/outfuncs.c
index c8eef2c75d2..40990143927 100644
--- a/src/backend/nodes/outfuncs.c
+++ b/src/backend/nodes/outfuncs.c
@@ -736,17 +736,17 @@ outNode(StringInfo str, const void *obj)
_outList(str, obj);
/* nodeRead does not want to see { } around these! */
else if (IsA(obj, Integer))
- _outInteger(str, (Integer *) obj);
+ _outInteger(str, (const Integer *) obj);
else if (IsA(obj, Float))
- _outFloat(str, (Float *) obj);
+ _outFloat(str, (const Float *) obj);
else if (IsA(obj, Boolean))
- _outBoolean(str, (Boolean *) obj);
+ _outBoolean(str, (const Boolean *) obj);
else if (IsA(obj, String))
- _outString(str, (String *) obj);
+ _outString(str, (const String *) obj);
else if (IsA(obj, BitString))
- _outBitString(str, (BitString *) obj);
+ _outBitString(str, (const BitString *) obj);
else if (IsA(obj, Bitmapset))
- outBitmapset(str, (Bitmapset *) obj);
+ outBitmapset(str, (const Bitmapset *) obj);
else
{
appendStringInfoChar(str, '{');
diff --git a/src/backend/nodes/tidbitmap.c b/src/backend/nodes/tidbitmap.c
index 805ff21c7f1..f1f925cb13b 100644
--- a/src/backend/nodes/tidbitmap.c
+++ b/src/backend/nodes/tidbitmap.c
@@ -1439,8 +1439,8 @@ static int
tbm_shared_comparator(const void *left, const void *right, void *arg)
{
PagetableEntry *base = (PagetableEntry *) arg;
- PagetableEntry *lpage = &base[*(int *) left];
- PagetableEntry *rpage = &base[*(int *) right];
+ PagetableEntry *lpage = &base[*(const int *) left];
+ PagetableEntry *rpage = &base[*(const int *) right];
if (lpage->blockno < rpage->blockno)
return -1;
diff --git a/src/backend/optimizer/util/clauses.c b/src/backend/optimizer/util/clauses.c
index 39d35827c35..32204776c45 100644
--- a/src/backend/optimizer/util/clauses.c
+++ b/src/backend/optimizer/util/clauses.c
@@ -261,13 +261,10 @@ find_window_functions_walker(Node *node, WindowFuncLists *lists)
if (wfunc->winref > lists->maxWinRef)
elog(ERROR, "WindowFunc contains out-of-range winref %u",
wfunc->winref);
- /* eliminate duplicates, so that we avoid repeated computation */
- if (!list_member(lists->windowFuncs[wfunc->winref], wfunc))
- {
- lists->windowFuncs[wfunc->winref] =
- lappend(lists->windowFuncs[wfunc->winref], wfunc);
- lists->numWindowFuncs++;
- }
+
+ lists->windowFuncs[wfunc->winref] =
+ lappend(lists->windowFuncs[wfunc->winref], wfunc);
+ lists->numWindowFuncs++;
/*
* We assume that the parser checked that there are no window
diff --git a/src/backend/port/sysv_shmem.c b/src/backend/port/sysv_shmem.c
index 5239b6acbbc..3cd3544fa2b 100644
--- a/src/backend/port/sysv_shmem.c
+++ b/src/backend/port/sysv_shmem.c
@@ -602,6 +602,7 @@ CreateAnonymousSegment(Size *size)
Size allocsize = *size;
void *ptr = MAP_FAILED;
int mmap_errno = 0;
+ int mmap_flags = MAP_SHARED | MAP_ANONYMOUS | MAP_HASSEMAPHORE;
#ifndef MAP_HUGETLB
/* PGSharedMemoryCreate should have dealt with this case */
@@ -613,15 +614,15 @@ CreateAnonymousSegment(Size *size)
* Round up the request size to a suitable large value.
*/
Size hugepagesize;
- int mmap_flags;
+ int huge_mmap_flags;
- GetHugePageSize(&hugepagesize, &mmap_flags);
+ GetHugePageSize(&hugepagesize, &huge_mmap_flags);
if (allocsize % hugepagesize != 0)
allocsize = add_size(allocsize, hugepagesize - (allocsize % hugepagesize));
ptr = mmap(NULL, allocsize, PROT_READ | PROT_WRITE,
- PG_MMAP_FLAGS | mmap_flags, -1, 0);
+ mmap_flags | huge_mmap_flags, -1, 0);
mmap_errno = errno;
if (huge_pages == HUGE_PAGES_TRY && ptr == MAP_FAILED)
elog(DEBUG1, "mmap(%zu) with MAP_HUGETLB failed, huge pages disabled: %m",
@@ -645,7 +646,7 @@ CreateAnonymousSegment(Size *size)
*/
allocsize = *size;
ptr = mmap(NULL, allocsize, PROT_READ | PROT_WRITE,
- PG_MMAP_FLAGS, -1, 0);
+ mmap_flags, -1, 0);
mmap_errno = errno;
}
diff --git a/src/backend/statistics/extended_stats.c b/src/backend/statistics/extended_stats.c
index b9a8f257042..3895ed72ef7 100644
--- a/src/backend/statistics/extended_stats.c
+++ b/src/backend/statistics/extended_stats.c
@@ -915,8 +915,8 @@ multi_sort_compare_dims(int start, int end,
int
compare_scalars_simple(const void *a, const void *b, void *arg)
{
- return compare_datums_simple(*(Datum *) a,
- *(Datum *) b,
+ return compare_datums_simple(*(const Datum *) a,
+ *(const Datum *) b,
(SortSupport) arg);
}
diff --git a/src/backend/statistics/extended_stats_funcs.c b/src/backend/statistics/extended_stats_funcs.c
index b4b1bf26463..18814093f50 100644
--- a/src/backend/statistics/extended_stats_funcs.c
+++ b/src/backend/statistics/extended_stats_funcs.c
@@ -24,8 +24,10 @@
#include "catalog/pg_statistic_ext_data.h"
#include "miscadmin.h"
#include "nodes/makefuncs.h"
+#include "statistics/extended_stats_internal.h"
#include "statistics/stat_utils.h"
#include "utils/acl.h"
+#include "utils/array.h"
#include "utils/builtins.h"
#include "utils/fmgroids.h"
#include "utils/lsyscache.h"
@@ -42,6 +44,8 @@ enum extended_stats_argnum
STATSCHEMA_ARG,
STATNAME_ARG,
INHERITED_ARG,
+ NDISTINCT_ARG,
+ DEPENDENCIES_ARG,
NUM_EXTENDED_STATS_ARGS,
};
@@ -56,14 +60,35 @@ static struct StatsArgInfo extarginfo[] =
[STATSCHEMA_ARG] = {"statistics_schemaname", TEXTOID},
[STATNAME_ARG] = {"statistics_name", TEXTOID},
[INHERITED_ARG] = {"inherited", BOOLOID},
+ [NDISTINCT_ARG] = {"n_distinct", PG_NDISTINCTOID},
+ [DEPENDENCIES_ARG] = {"dependencies", PG_DEPENDENCIESOID},
[NUM_EXTENDED_STATS_ARGS] = {0},
};
+static bool extended_statistics_update(FunctionCallInfo fcinfo);
+
static HeapTuple get_pg_statistic_ext(Relation pg_stext, Oid nspoid,
const char *stxname);
static bool delete_pg_statistic_ext_data(Oid stxoid, bool inherited);
/*
+ * Track the extended statistics kinds expected for a pg_statistic_ext
+ * tuple.
+ */
+typedef struct
+{
+ bool ndistinct;
+ bool dependencies;
+ bool mcv;
+ bool expressions;
+} StakindFlags;
+
+static void expand_stxkind(HeapTuple tup, StakindFlags *enabled);
+static void upsert_pg_statistic_ext_data(const Datum *values,
+ const bool *nulls,
+ const bool *replaces);
+
+/*
* Fetch a pg_statistic_ext row by name and namespace OID.
*/
static HeapTuple
@@ -110,6 +135,316 @@ get_pg_statistic_ext(Relation pg_stext, Oid nspoid, const char *stxname)
}
/*
+ * Decode the stxkind column so that we know which stats types to expect,
+ * returning a StakindFlags set depending on the stats kinds expected by
+ * a pg_statistic_ext tuple.
+ */
+static void
+expand_stxkind(HeapTuple tup, StakindFlags *enabled)
+{
+ Datum datum;
+ ArrayType *arr;
+ char *kinds;
+
+ datum = SysCacheGetAttrNotNull(STATEXTOID,
+ tup,
+ Anum_pg_statistic_ext_stxkind);
+ arr = DatumGetArrayTypeP(datum);
+ if (ARR_NDIM(arr) != 1 || ARR_HASNULL(arr) || ARR_ELEMTYPE(arr) != CHAROID)
+ elog(ERROR, "stxkind is not a one-dimension char array");
+
+ kinds = (char *) ARR_DATA_PTR(arr);
+
+ for (int i = 0; i < ARR_DIMS(arr)[0]; i++)
+ {
+ switch (kinds[i])
+ {
+ case STATS_EXT_NDISTINCT:
+ enabled->ndistinct = true;
+ break;
+ case STATS_EXT_DEPENDENCIES:
+ enabled->dependencies = true;
+ break;
+ case STATS_EXT_MCV:
+ enabled->mcv = true;
+ break;
+ case STATS_EXT_EXPRESSIONS:
+ enabled->expressions = true;
+ break;
+ default:
+ elog(ERROR, "incorrect stxkind %c found", kinds[i]);
+ break;
+ }
+ }
+}
+
+/*
+ * Perform the actual storage of a pg_statistic_ext_data tuple.
+ */
+static void
+upsert_pg_statistic_ext_data(const Datum *values, const bool *nulls,
+ const bool *replaces)
+{
+ Relation pg_stextdata;
+ HeapTuple stxdtup;
+ HeapTuple newtup;
+
+ pg_stextdata = table_open(StatisticExtDataRelationId, RowExclusiveLock);
+
+ stxdtup = SearchSysCache2(STATEXTDATASTXOID,
+ values[Anum_pg_statistic_ext_data_stxoid - 1],
+ values[Anum_pg_statistic_ext_data_stxdinherit - 1]);
+
+ if (HeapTupleIsValid(stxdtup))
+ {
+ newtup = heap_modify_tuple(stxdtup,
+ RelationGetDescr(pg_stextdata),
+ values,
+ nulls,
+ replaces);
+ CatalogTupleUpdate(pg_stextdata, &newtup->t_self, newtup);
+ ReleaseSysCache(stxdtup);
+ }
+ else
+ {
+ newtup = heap_form_tuple(RelationGetDescr(pg_stextdata), values, nulls);
+ CatalogTupleInsert(pg_stextdata, newtup);
+ }
+
+ heap_freetuple(newtup);
+
+ CommandCounterIncrement();
+
+ table_close(pg_stextdata, RowExclusiveLock);
+}
+
+/*
+ * Insert or update an extended statistics object.
+ *
+ * Major errors, such as the table not existing or permission errors, are
+ * reported as ERRORs. There are a couple of paths that generate a WARNING,
+ * like when the statistics object or its schema do not exist, a conversion
+ * failure on one statistic kind, or when other statistic kinds may still
+ * be updated.
+ */
+static bool
+extended_statistics_update(FunctionCallInfo fcinfo)
+{
+ char *relnspname;
+ char *relname;
+ Oid nspoid;
+ char *nspname;
+ char *stxname;
+ bool inherited;
+ Relation pg_stext = NULL;
+ HeapTuple tup = NULL;
+
+ StakindFlags enabled = {false, false, false, false};
+ StakindFlags has = {false, false, false, false};
+
+ Form_pg_statistic_ext stxform;
+
+ Datum values[Natts_pg_statistic_ext_data] = {0};
+ bool nulls[Natts_pg_statistic_ext_data] = {0};
+ bool replaces[Natts_pg_statistic_ext_data] = {0};
+ bool success = true;
+ int numexprs = 0;
+
+ /* arrays of type info, if we need them */
+ Oid relid;
+ Oid locked_table = InvalidOid;
+
+ /*
+ * Fill out the StakindFlags "has" structure based on which parameters
+ * were provided to the function.
+ */
+ has.ndistinct = !PG_ARGISNULL(NDISTINCT_ARG);
+ has.dependencies = !PG_ARGISNULL(DEPENDENCIES_ARG);
+
+ if (RecoveryInProgress())
+ {
+ ereport(WARNING,
+ errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
+ errmsg("recovery is in progress"),
+ errhint("Statistics cannot be modified during recovery."));
+ return false;
+ }
+
+ /* relation arguments */
+ stats_check_required_arg(fcinfo, extarginfo, RELSCHEMA_ARG);
+ relnspname = TextDatumGetCString(PG_GETARG_DATUM(RELSCHEMA_ARG));
+ stats_check_required_arg(fcinfo, extarginfo, RELNAME_ARG);
+ relname = TextDatumGetCString(PG_GETARG_DATUM(RELNAME_ARG));
+
+ /* extended statistics arguments */
+ stats_check_required_arg(fcinfo, extarginfo, STATSCHEMA_ARG);
+ nspname = TextDatumGetCString(PG_GETARG_DATUM(STATSCHEMA_ARG));
+ stats_check_required_arg(fcinfo, extarginfo, STATNAME_ARG);
+ stxname = TextDatumGetCString(PG_GETARG_DATUM(STATNAME_ARG));
+ stats_check_required_arg(fcinfo, extarginfo, INHERITED_ARG);
+ inherited = PG_GETARG_BOOL(INHERITED_ARG);
+
+ /*
+ * First open the relation where we expect to find the statistics. This
+ * is similar to relation and attribute statistics, so as ACL checks are
+ * done before any locks are taken, even before any attempts related to
+ * the extended stats object.
+ */
+ relid = RangeVarGetRelidExtended(makeRangeVar(relnspname, relname, -1),
+ ShareUpdateExclusiveLock, 0,
+ RangeVarCallbackForStats, &locked_table);
+
+ nspoid = get_namespace_oid(nspname, true);
+ if (nspoid == InvalidOid)
+ {
+ ereport(WARNING,
+ errcode(ERRCODE_UNDEFINED_OBJECT),
+ errmsg("could not find schema \"%s\"", nspname));
+ success = false;
+ goto cleanup;
+ }
+
+ pg_stext = table_open(StatisticExtRelationId, RowExclusiveLock);
+ tup = get_pg_statistic_ext(pg_stext, nspoid, stxname);
+
+ if (!HeapTupleIsValid(tup))
+ {
+ ereport(WARNING,
+ errcode(ERRCODE_UNDEFINED_OBJECT),
+ errmsg("could not find extended statistics object \"%s\".\"%s\"",
+ quote_identifier(nspname),
+ quote_identifier(stxname)));
+ success = false;
+ goto cleanup;
+ }
+
+ stxform = (Form_pg_statistic_ext) GETSTRUCT(tup);
+
+ /*
+ * The relation tracked by the stats object has to match with the relation
+ * we have already locked.
+ */
+ if (stxform->stxrelid != relid)
+ {
+ ereport(WARNING,
+ errcode(ERRCODE_INVALID_PARAMETER_VALUE),
+ errmsg("could not restore extended statistics object \"%s\".\"%s\": incorrect relation \"%s\".\"%s\" specified",
+ quote_identifier(nspname),
+ quote_identifier(stxname),
+ quote_identifier(relnspname),
+ quote_identifier(relname)));
+
+ success = false;
+ goto cleanup;
+ }
+
+ /* Find out what extended statistics kinds we should expect. */
+ expand_stxkind(tup, &enabled);
+
+ /*
+ * If the object cannot support ndistinct, we should not have data for it.
+ */
+ if (has.ndistinct && !enabled.ndistinct)
+ {
+ ereport(WARNING,
+ errcode(ERRCODE_INVALID_PARAMETER_VALUE),
+ errmsg("cannot not specify parameter \"%s\"",
+ extarginfo[NDISTINCT_ARG].argname),
+ errhint("Extended statistics object \"%s\".\"%s\" does not support statistics of this type.",
+ quote_identifier(nspname),
+ quote_identifier(stxname)));
+
+ has.ndistinct = false;
+ success = false;
+ }
+
+ /*
+ * If the object cannot support dependencies, we should not have data for
+ * it.
+ */
+ if (has.dependencies && !enabled.dependencies)
+ {
+ ereport(WARNING,
+ errcode(ERRCODE_INVALID_PARAMETER_VALUE),
+ errmsg("cannot specify parameter \"%s\".",
+ extarginfo[DEPENDENCIES_ARG].argname),
+ errhint("Extended statistics object \"%s\".\"%s\" does not support statistics of this type.",
+ quote_identifier(nspname),
+ quote_identifier(stxname)));
+ has.dependencies = false;
+ success = false;
+ }
+
+ /*
+ * Populate the pg_statistic_ext_data result tuple.
+ */
+
+ /* Primary Key: cannot be NULL or replaced. */
+ values[Anum_pg_statistic_ext_data_stxoid - 1] = ObjectIdGetDatum(stxform->oid);
+ nulls[Anum_pg_statistic_ext_data_stxoid - 1] = false;
+ values[Anum_pg_statistic_ext_data_stxdinherit - 1] = BoolGetDatum(inherited);
+ nulls[Anum_pg_statistic_ext_data_stxdinherit - 1] = false;
+
+ /* All unspecified parameters will be left unmodified */
+ nulls[Anum_pg_statistic_ext_data_stxdndistinct - 1] = true;
+ nulls[Anum_pg_statistic_ext_data_stxddependencies - 1] = true;
+ nulls[Anum_pg_statistic_ext_data_stxdmcv - 1] = true;
+ nulls[Anum_pg_statistic_ext_data_stxdexpr - 1] = true;
+
+ /*
+ * For each stats kind, deserialize the data at hand and perform a round
+ * of validation. The resulting tuple is filled with a set of updated
+ * values.
+ */
+
+ if (has.ndistinct)
+ {
+ Datum ndistinct_datum = PG_GETARG_DATUM(NDISTINCT_ARG);
+ bytea *data = DatumGetByteaPP(ndistinct_datum);
+ MVNDistinct *ndistinct = statext_ndistinct_deserialize(data);
+
+ if (statext_ndistinct_validate(ndistinct, &stxform->stxkeys,
+ numexprs, WARNING))
+ {
+ values[Anum_pg_statistic_ext_data_stxdndistinct - 1] = ndistinct_datum;
+ nulls[Anum_pg_statistic_ext_data_stxdndistinct - 1] = false;
+ replaces[Anum_pg_statistic_ext_data_stxdndistinct - 1] = true;
+ }
+ else
+ success = false;
+
+ statext_ndistinct_free(ndistinct);
+ }
+
+ if (has.dependencies)
+ {
+ Datum dependencies_datum = PG_GETARG_DATUM(DEPENDENCIES_ARG);
+ bytea *data = DatumGetByteaPP(dependencies_datum);
+ MVDependencies *dependencies = statext_dependencies_deserialize(data);
+
+ if (statext_dependencies_validate(dependencies, &stxform->stxkeys, numexprs, WARNING))
+ {
+ values[Anum_pg_statistic_ext_data_stxddependencies - 1] = dependencies_datum;
+ nulls[Anum_pg_statistic_ext_data_stxddependencies - 1] = false;
+ replaces[Anum_pg_statistic_ext_data_stxddependencies - 1] = true;
+ }
+ else
+ success = false;
+
+ statext_dependencies_free(dependencies);
+ }
+
+ upsert_pg_statistic_ext_data(values, nulls, replaces);
+
+cleanup:
+ if (HeapTupleIsValid(tup))
+ heap_freetuple(tup);
+ if (pg_stext != NULL)
+ table_close(pg_stext, RowExclusiveLock);
+ return success;
+}
+
+/*
* Remove an existing pg_statistic_ext_data row for a given pg_statistic_ext
* row and "inherited" pair.
*/
@@ -140,6 +475,31 @@ delete_pg_statistic_ext_data(Oid stxoid, bool inherited)
}
/*
+ * Restore (insert or replace) statistics for the given statistics object.
+ *
+ * This function accepts variadic arguments in key-value pairs, which are
+ * given to stats_fill_fcinfo_from_arg_pairs to be mapped into positional
+ * arguments.
+ */
+Datum
+pg_restore_extended_stats(PG_FUNCTION_ARGS)
+{
+ LOCAL_FCINFO(positional_fcinfo, NUM_EXTENDED_STATS_ARGS);
+ bool result = true;
+
+ InitFunctionCallInfoData(*positional_fcinfo, NULL, NUM_EXTENDED_STATS_ARGS,
+ InvalidOid, NULL, NULL);
+
+ if (!stats_fill_fcinfo_from_arg_pairs(fcinfo, positional_fcinfo, extarginfo))
+ result = false;
+
+ if (!extended_statistics_update(positional_fcinfo))
+ result = false;
+
+ PG_RETURN_BOOL(result);
+}
+
+/*
* Delete statistics for the given statistics object.
*/
Datum
diff --git a/src/backend/storage/ipc/shm_mq.c b/src/backend/storage/ipc/shm_mq.c
index 4d6cc16c0bc..3ce6068ac54 100644
--- a/src/backend/storage/ipc/shm_mq.c
+++ b/src/backend/storage/ipc/shm_mq.c
@@ -1041,7 +1041,7 @@ shm_mq_send_bytes(shm_mq_handle *mqh, Size nbytes, const void *data,
*/
pg_memory_barrier();
memcpy(&mq->mq_ring[mq->mq_ring_offset + offset],
- (char *) data + sent, sendnow);
+ (const char *) data + sent, sendnow);
sent += sendnow;
/*
diff --git a/src/backend/storage/ipc/shmem.c b/src/backend/storage/ipc/shmem.c
index d2f4710f141..1b536363152 100644
--- a/src/backend/storage/ipc/shmem.c
+++ b/src/backend/storage/ipc/shmem.c
@@ -599,7 +599,7 @@ pg_get_shmem_allocations_numa(PG_FUNCTION_ARGS)
InitMaterializedSRF(fcinfo, 0);
max_nodes = pg_numa_get_max_node();
- nodes = palloc_array(Size, max_nodes + 1);
+ nodes = palloc_array(Size, max_nodes + 2);
/*
* Shared memory allocations can vary in size and may not align with OS
@@ -635,7 +635,6 @@ pg_get_shmem_allocations_numa(PG_FUNCTION_ARGS)
hash_seq_init(&hstat, ShmemIndex);
/* output all allocated entries */
- memset(nulls, 0, sizeof(nulls));
while ((ent = (ShmemIndexEnt *) hash_seq_search(&hstat)) != NULL)
{
int i;
@@ -684,22 +683,33 @@ pg_get_shmem_allocations_numa(PG_FUNCTION_ARGS)
elog(ERROR, "failed NUMA pages inquiry status: %m");
/* Count number of NUMA nodes used for this shared memory entry */
- memset(nodes, 0, sizeof(Size) * (max_nodes + 1));
+ memset(nodes, 0, sizeof(Size) * (max_nodes + 2));
for (i = 0; i < shm_ent_page_count; i++)
{
int s = pages_status[i];
/* Ensure we are adding only valid index to the array */
- if (s < 0 || s > max_nodes)
+ if (s >= 0 && s <= max_nodes)
+ {
+ /* valid NUMA node */
+ nodes[s]++;
+ continue;
+ }
+ else if (s == -2)
{
- elog(ERROR, "invalid NUMA node id outside of allowed range "
- "[0, " UINT64_FORMAT "]: %d", max_nodes, s);
+ /* -2 means ENOENT (e.g. page was moved to swap) */
+ nodes[max_nodes + 1]++;
+ continue;
}
- nodes[s]++;
+ elog(ERROR, "invalid NUMA node id outside of allowed range "
+ "[0, " UINT64_FORMAT "]: %d", max_nodes, s);
}
+ /* no NULLs for regular nodes */
+ memset(nulls, 0, sizeof(nulls));
+
/*
* Add one entry for each NUMA node, including those without allocated
* memory for this segment.
@@ -713,6 +723,14 @@ pg_get_shmem_allocations_numa(PG_FUNCTION_ARGS)
tuplestore_putvalues(rsinfo->setResult, rsinfo->setDesc,
values, nulls);
}
+
+ /* The last entry is used for pages without a NUMA node. */
+ nulls[1] = true;
+ values[0] = CStringGetTextDatum(ent->key);
+ values[2] = Int64GetDatum(nodes[max_nodes + 1] * os_page_size);
+
+ tuplestore_putvalues(rsinfo->setResult, rsinfo->setDesc,
+ values, nulls);
}
LWLockRelease(ShmemIndexLock);
diff --git a/src/backend/tsearch/spell.c b/src/backend/tsearch/spell.c
index e3436dbddd2..ad0ceec37b0 100644
--- a/src/backend/tsearch/spell.c
+++ b/src/backend/tsearch/spell.c
@@ -2463,9 +2463,9 @@ SplitToVariants(IspellDict *Conf, SPNode *snode, SplitVar *orig, const char *wor
while (StopLow < StopHigh)
{
StopMiddle = StopLow + ((StopHigh - StopLow) >> 1);
- if (StopMiddle->val == ((uint8 *) (word))[level])
+ if (StopMiddle->val == ((const uint8 *) (word))[level])
break;
- else if (StopMiddle->val < ((uint8 *) (word))[level])
+ else if (StopMiddle->val < ((const uint8 *) (word))[level])
StopLow = StopMiddle + 1;
else
StopHigh = StopMiddle;
diff --git a/src/backend/utils/adt/geo_spgist.c b/src/backend/utils/adt/geo_spgist.c
index 19bcae3848b..7a19ca3892b 100644
--- a/src/backend/utils/adt/geo_spgist.c
+++ b/src/backend/utils/adt/geo_spgist.c
@@ -92,8 +92,8 @@
static int
compareDoubles(const void *a, const void *b)
{
- float8 x = *(float8 *) a;
- float8 y = *(float8 *) b;
+ float8 x = *(const float8 *) a;
+ float8 y = *(const float8 *) b;
if (x == y)
return 0;
diff --git a/src/backend/utils/adt/json.c b/src/backend/utils/adt/json.c
index 78e84727fdc..0b161398465 100644
--- a/src/backend/utils/adt/json.c
+++ b/src/backend/utils/adt/json.c
@@ -901,7 +901,7 @@ json_agg_finalfn(PG_FUNCTION_ARGS)
static uint32
json_unique_hash(const void *key, Size keysize)
{
- const JsonUniqueHashEntry *entry = (JsonUniqueHashEntry *) key;
+ const JsonUniqueHashEntry *entry = (const JsonUniqueHashEntry *) key;
uint32 hash = hash_bytes_uint32(entry->object_id);
hash ^= hash_bytes((const unsigned char *) entry->key, entry->key_len);
diff --git a/src/backend/utils/adt/pg_locale_builtin.c b/src/backend/utils/adt/pg_locale_builtin.c
index 1f5fc1c97f3..b5aeb7a337a 100644
--- a/src/backend/utils/adt/pg_locale_builtin.c
+++ b/src/backend/utils/adt/pg_locale_builtin.c
@@ -63,7 +63,7 @@ initcap_wbnext(void *state)
while (wbstate->offset < wbstate->len &&
wbstate->str[wbstate->offset] != '\0')
{
- char32_t u = utf8_to_unicode((unsigned char *) wbstate->str +
+ char32_t u = utf8_to_unicode((const unsigned char *) wbstate->str +
wbstate->offset);
bool curr_alnum = pg_u_isalnum(u, wbstate->posix);
diff --git a/src/backend/utils/adt/rangetypes.c b/src/backend/utils/adt/rangetypes.c
index 701aacbbfb0..06cc3af4f4a 100644
--- a/src/backend/utils/adt/rangetypes.c
+++ b/src/backend/utils/adt/rangetypes.c
@@ -2108,7 +2108,7 @@ range_deserialize(TypeCacheEntry *typcache, const RangeType *range,
typalign = typcache->rngelemtype->typalign;
/* initialize data pointer just after the range OID */
- ptr = (char *) (range + 1);
+ ptr = (const char *) (range + 1);
/* fetch lower bound, if any */
if (RANGE_HAS_LBOUND(flags))
@@ -2155,7 +2155,7 @@ char
range_get_flags(const RangeType *range)
{
/* fetch the flag byte from datum's last byte */
- return *((char *) range + VARSIZE(range) - 1);
+ return *((const char *) range + VARSIZE(range) - 1);
}
/*
@@ -2360,8 +2360,8 @@ range_cmp_bound_values(TypeCacheEntry *typcache, const RangeBound *b1,
int
range_compare(const void *key1, const void *key2, void *arg)
{
- RangeType *r1 = *(RangeType **) key1;
- RangeType *r2 = *(RangeType **) key2;
+ RangeType *r1 = *(RangeType *const *) key1;
+ RangeType *r2 = *(RangeType *const *) key2;
TypeCacheEntry *typcache = (TypeCacheEntry *) arg;
RangeBound lower1;
RangeBound upper1;
diff --git a/src/backend/utils/adt/rangetypes_gist.c b/src/backend/utils/adt/rangetypes_gist.c
index cb9f769cdb2..1a01a8f4c3c 100644
--- a/src/backend/utils/adt/rangetypes_gist.c
+++ b/src/backend/utils/adt/rangetypes_gist.c
@@ -1768,8 +1768,8 @@ interval_cmp_upper(const void *a, const void *b, void *arg)
static int
common_entry_cmp(const void *i1, const void *i2)
{
- double delta1 = ((CommonEntry *) i1)->delta;
- double delta2 = ((CommonEntry *) i2)->delta;
+ double delta1 = ((const CommonEntry *) i1)->delta;
+ double delta2 = ((const CommonEntry *) i2)->delta;
if (delta1 < delta2)
return -1;
diff --git a/src/backend/utils/sort/tuplesortvariants.c b/src/backend/utils/sort/tuplesortvariants.c
index e3e1142126e..2509ac3e3a4 100644
--- a/src/backend/utils/sort/tuplesortvariants.c
+++ b/src/backend/utils/sort/tuplesortvariants.c
@@ -20,6 +20,7 @@
#include "postgres.h"
#include "access/brin_tuple.h"
+#include "access/gin.h"
#include "access/gin_tuple.h"
#include "access/hash.h"
#include "access/htup_details.h"
@@ -28,6 +29,7 @@
#include "catalog/pg_collation.h"
#include "executor/executor.h"
#include "pg_trace.h"
+#include "utils/builtins.h"
#include "utils/datum.h"
#include "utils/guc.h"
#include "utils/lsyscache.h"
@@ -615,7 +617,7 @@ tuplesort_begin_index_gin(Relation heapRel,
{
SortSupport sortKey = base->sortKeys + i;
Form_pg_attribute att = TupleDescAttr(desc, i);
- TypeCacheEntry *typentry;
+ Oid cmpFunc;
sortKey->ssup_cxt = CurrentMemoryContext;
sortKey->ssup_collation = indexRel->rd_indcollation[i];
@@ -629,11 +631,26 @@ tuplesort_begin_index_gin(Relation heapRel,
sortKey->ssup_collation = DEFAULT_COLLATION_OID;
/*
- * Look for a ordering for the index key data type, and then the sort
- * support function.
+ * If the compare proc isn't specified in the opclass definition, look
+ * up the index key type's default btree comparator.
*/
- typentry = lookup_type_cache(att->atttypid, TYPECACHE_LT_OPR);
- PrepareSortSupportFromOrderingOp(typentry->lt_opr, sortKey);
+ cmpFunc = index_getprocid(indexRel, i + 1, GIN_COMPARE_PROC);
+ if (cmpFunc == InvalidOid)
+ {
+ TypeCacheEntry *typentry;
+
+ typentry = lookup_type_cache(att->atttypid,
+ TYPECACHE_CMP_PROC_FINFO);
+ if (!OidIsValid(typentry->cmp_proc_finfo.fn_oid))
+ ereport(ERROR,
+ (errcode(ERRCODE_UNDEFINED_FUNCTION),
+ errmsg("could not identify a comparison function for type %s",
+ format_type_be(att->atttypid))));
+
+ cmpFunc = typentry->cmp_proc_finfo.fn_oid;
+ }
+
+ PrepareSortSupportComparisonShim(cmpFunc, sortKey);
}
base->removeabbrev = removeabbrev_index_gin;
diff --git a/src/bin/pg_basebackup/walmethods.c b/src/bin/pg_basebackup/walmethods.c
index f6e2371f477..17d22c79f68 100644
--- a/src/bin/pg_basebackup/walmethods.c
+++ b/src/bin/pg_basebackup/walmethods.c
@@ -359,7 +359,7 @@ dir_write(Walfile *f, const void *buf, size_t count)
return -1;
}
- inbuf = ((char *) inbuf) + chunk;
+ inbuf = ((const char *) inbuf) + chunk;
}
/* Our caller keeps track of the uncompressed size. */
diff --git a/src/bin/pg_combinebackup/meson.build b/src/bin/pg_combinebackup/meson.build
index 613f73a756b..a35b86f3f59 100644
--- a/src/bin/pg_combinebackup/meson.build
+++ b/src/bin/pg_combinebackup/meson.build
@@ -38,7 +38,7 @@ tests += {
't/008_promote.pl',
't/009_no_full_file.pl',
't/010_hardlink.pl',
- 't/011_incremental_backup_truncation_block.pl',
+ 't/011_ib_truncation.pl',
],
}
}
diff --git a/src/bin/pg_combinebackup/t/011_incremental_backup_truncation_block.pl b/src/bin/pg_combinebackup/t/011_ib_truncation.pl
index 47d84434452..47d84434452 100644
--- a/src/bin/pg_combinebackup/t/011_incremental_backup_truncation_block.pl
+++ b/src/bin/pg_combinebackup/t/011_ib_truncation.pl
diff --git a/src/bin/pg_rewind/filemap.c b/src/bin/pg_rewind/filemap.c
index 2610bae7e09..b79c47f9252 100644
--- a/src/bin/pg_rewind/filemap.c
+++ b/src/bin/pg_rewind/filemap.c
@@ -694,8 +694,8 @@ datasegpath(RelFileLocator rlocator, ForkNumber forknum, BlockNumber segno)
static int
final_filemap_cmp(const void *a, const void *b)
{
- file_entry_t *fa = *((file_entry_t **) a);
- file_entry_t *fb = *((file_entry_t **) b);
+ file_entry_t *fa = *((file_entry_t *const *) a);
+ file_entry_t *fb = *((file_entry_t *const *) b);
if (fa->action > fb->action)
return 1;
diff --git a/src/bin/pg_verifybackup/astreamer_verify.c b/src/bin/pg_verifybackup/astreamer_verify.c
index 440c96269d7..0edc8123b43 100644
--- a/src/bin/pg_verifybackup/astreamer_verify.c
+++ b/src/bin/pg_verifybackup/astreamer_verify.c
@@ -268,7 +268,7 @@ member_compute_checksum(astreamer *streamer, astreamer_member *member,
mystreamer->checksum_bytes += len;
/* Feed these bytes to the checksum calculation. */
- if (pg_checksum_update(checksum_ctx, (uint8 *) data, len) < 0)
+ if (pg_checksum_update(checksum_ctx, (const uint8 *) data, len) < 0)
{
report_backup_error(mystreamer->context,
"could not update checksum of file \"%s\"",
diff --git a/src/bin/pg_walsummary/pg_walsummary.c b/src/bin/pg_walsummary/pg_walsummary.c
index 74851f84f92..aa214b8616d 100644
--- a/src/bin/pg_walsummary/pg_walsummary.c
+++ b/src/bin/pg_walsummary/pg_walsummary.c
@@ -217,8 +217,8 @@ dump_one_relation(ws_options *opt, RelFileLocator *rlocator,
static int
compare_block_numbers(const void *a, const void *b)
{
- BlockNumber aa = *(BlockNumber *) a;
- BlockNumber bb = *(BlockNumber *) b;
+ BlockNumber aa = *(const BlockNumber *) a;
+ BlockNumber bb = *(const BlockNumber *) b;
return pg_cmp_u32(aa, bb);
}
diff --git a/src/common/scram-common.c b/src/common/scram-common.c
index dbf188ae872..259fa5554b6 100644
--- a/src/common/scram-common.c
+++ b/src/common/scram-common.c
@@ -61,7 +61,7 @@ scram_SaltedPassword(const char *password,
*/
/* First iteration */
- if (pg_hmac_init(hmac_ctx, (uint8 *) password, password_len) < 0 ||
+ if (pg_hmac_init(hmac_ctx, (const uint8 *) password, password_len) < 0 ||
pg_hmac_update(hmac_ctx, salt, saltlen) < 0 ||
pg_hmac_update(hmac_ctx, (uint8 *) &one, sizeof(uint32)) < 0 ||
pg_hmac_final(hmac_ctx, Ui_prev, key_length) < 0)
@@ -84,7 +84,7 @@ scram_SaltedPassword(const char *password,
CHECK_FOR_INTERRUPTS();
#endif
- if (pg_hmac_init(hmac_ctx, (uint8 *) password, password_len) < 0 ||
+ if (pg_hmac_init(hmac_ctx, (const uint8 *) password, password_len) < 0 ||
pg_hmac_update(hmac_ctx, (uint8 *) Ui_prev, key_length) < 0 ||
pg_hmac_final(hmac_ctx, Ui, key_length) < 0)
{
diff --git a/src/common/unicode/case_test.c b/src/common/unicode/case_test.c
index 2144219e178..fb159c1c27c 100644
--- a/src/common/unicode/case_test.c
+++ b/src/common/unicode/case_test.c
@@ -55,7 +55,7 @@ initcap_wbnext(void *state)
while (wbstate->offset < wbstate->len &&
wbstate->str[wbstate->offset] != '\0')
{
- char32_t u = utf8_to_unicode((unsigned char *) wbstate->str +
+ char32_t u = utf8_to_unicode((const unsigned char *) wbstate->str +
wbstate->offset);
bool curr_alnum = pg_u_isalnum(u, wbstate->posix);
diff --git a/src/common/unicode_case.c b/src/common/unicode_case.c
index 71acd38d6fe..0b8d3ffc0b4 100644
--- a/src/common/unicode_case.c
+++ b/src/common/unicode_case.c
@@ -231,7 +231,7 @@ convert_case(char *dst, size_t dstsize, const char *src, ssize_t srclen,
while ((srclen < 0 || srcoff < srclen) && src[srcoff] != '\0')
{
- char32_t u1 = utf8_to_unicode((unsigned char *) src + srcoff);
+ char32_t u1 = utf8_to_unicode((const unsigned char *) src + srcoff);
int u1len = unicode_utf8len(u1);
char32_t simple = 0;
const char32_t *special = NULL;
@@ -373,7 +373,7 @@ check_special_conditions(int conditions, const char *str, size_t len,
if (conditions == 0)
return true;
else if (conditions == PG_U_FINAL_SIGMA)
- return check_final_sigma((unsigned char *) str, len, offset);
+ return check_final_sigma((const unsigned char *) str, len, offset);
/* no other conditions supported */
Assert(false);
diff --git a/src/include/access/tupmacs.h b/src/include/access/tupmacs.h
index e6df8264750..3e5530658c9 100644
--- a/src/include/access/tupmacs.h
+++ b/src/include/access/tupmacs.h
@@ -190,7 +190,7 @@ fetch_att(const void *T, bool attbyval, int attlen)
: \
( \
AssertMacro((attlen) == -2), \
- (cur_offset) + (strlen((char *) (attptr)) + 1) \
+ (cur_offset) + (strlen((const char *) (attptr)) + 1) \
)) \
)
diff --git a/src/include/access/visibilitymap.h b/src/include/access/visibilitymap.h
index 47ad489a9a7..a0166c5b410 100644
--- a/src/include/access/visibilitymap.h
+++ b/src/include/access/visibilitymap.h
@@ -32,15 +32,15 @@ extern bool visibilitymap_clear(Relation rel, BlockNumber heapBlk,
extern void visibilitymap_pin(Relation rel, BlockNumber heapBlk,
Buffer *vmbuf);
extern bool visibilitymap_pin_ok(BlockNumber heapBlk, Buffer vmbuf);
-extern uint8 visibilitymap_set(Relation rel,
- BlockNumber heapBlk, Buffer heapBuf,
- XLogRecPtr recptr,
- Buffer vmBuf,
- TransactionId cutoff_xid,
- uint8 flags);
-extern uint8 visibilitymap_set_vmbits(BlockNumber heapBlk,
- Buffer vmBuf, uint8 flags,
- const RelFileLocator rlocator);
+extern void visibilitymap_set(Relation rel,
+ BlockNumber heapBlk, Buffer heapBuf,
+ XLogRecPtr recptr,
+ Buffer vmBuf,
+ TransactionId cutoff_xid,
+ uint8 flags);
+extern void visibilitymap_set_vmbits(BlockNumber heapBlk,
+ Buffer vmBuf, uint8 flags,
+ const RelFileLocator rlocator);
extern uint8 visibilitymap_get_status(Relation rel, BlockNumber heapBlk, Buffer *vmbuf);
extern void visibilitymap_count(Relation rel, BlockNumber *all_visible, BlockNumber *all_frozen);
extern BlockNumber visibilitymap_prepare_truncate(Relation rel,
diff --git a/src/include/c.h b/src/include/c.h
index 17afaef9a6a..48e4087c09c 100644
--- a/src/include/c.h
+++ b/src/include/c.h
@@ -263,16 +263,6 @@
#endif
/*
- * alignas is buggy in g++ < 9, but the more or less equivalent attribute
- * works.
- *
- * <https://gcc.gnu.org/bugzilla/show_bug.cgi?id=89357>
- */
-#if defined(__cplusplus) && defined(__GNUC__) && !defined(__clang__) && __GNUC__ < 9
-#define alignas(a) __attribute__((aligned(a)))
-#endif
-
-/*
* Use "pg_attribute_always_inline" in place of "inline" for functions that
* we wish to force inlining of, even when the compiler's heuristics would
* choose not to. But, if possible, don't force inlining in unoptimized
@@ -1124,6 +1114,14 @@ typedef struct PGAlignedBlock
} PGAlignedBlock;
/*
+ * alignas with extended alignments is buggy in g++ < 9. As a simple
+ * workaround, we disable these definitions in that case.
+ *
+ * <https://gcc.gnu.org/bugzilla/show_bug.cgi?id=89357>
+ */
+#if !(defined(__cplusplus) && defined(__GNUC__) && !defined(__clang__) && __GNUC__ < 9)
+
+/*
* Use this to declare a field or local variable holding a page buffer, if that
* page might be accessed as a page or passed to an SMgr I/O function. If
* allocating using the MemoryContext API, the aligned allocation functions
@@ -1142,6 +1140,8 @@ typedef struct PGAlignedXLogBlock
alignas(PG_IO_ALIGN_SIZE) char data[XLOG_BLCKSZ];
} PGAlignedXLogBlock;
+#endif /* !(g++ < 9) */
+
/* msb for char */
#define HIGHBIT (0x80)
#define IS_HIGHBIT_SET(ch) ((unsigned char)(ch) & HIGHBIT)
diff --git a/src/include/catalog/catversion.h b/src/include/catalog/catversion.h
index 79db8731621..fb577026666 100644
--- a/src/include/catalog/catversion.h
+++ b/src/include/catalog/catversion.h
@@ -57,6 +57,6 @@
*/
/* yyyymmddN */
-#define CATALOG_VERSION_NO 202601221
+#define CATALOG_VERSION_NO 202601261
#endif
diff --git a/src/include/catalog/pg_proc.dat b/src/include/catalog/pg_proc.dat
index 894b6a1b6d6..5e5e33f64fc 100644
--- a/src/include/catalog/pg_proc.dat
+++ b/src/include/catalog/pg_proc.dat
@@ -12619,6 +12619,11 @@
proargtypes => 'int4', prosrc => 'gist_translate_cmptype_common' },
# Extended Statistics functions
+{ oid => '9947', descr => 'restore statistics on extended statistics object',
+ proname => 'pg_restore_extended_stats', provariadic => 'any',
+ proisstrict => 'f', provolatile => 'v', proparallel => 'u',
+ prorettype => 'bool', proargtypes => 'any', proargmodes => '{v}',
+ proargnames => '{kwargs}', prosrc => 'pg_restore_extended_stats' },
{ oid => '9948', descr => 'clear statistics on extended statistics object',
proname => 'pg_clear_extended_stats', proisstrict => 'f', provolatile => 'v',
proparallel => 'u', prorettype => 'void', proargtypes => 'text text text text bool',
diff --git a/src/include/common/hashfn_unstable.h b/src/include/common/hashfn_unstable.h
index 5214b861550..6966daa2b09 100644
--- a/src/include/common/hashfn_unstable.h
+++ b/src/include/common/hashfn_unstable.h
@@ -271,7 +271,7 @@ fasthash_accum_cstring_aligned(fasthash_state *hs, const char *str)
*/
for (;;)
{
- uint64 chunk = *(uint64 *) str;
+ uint64 chunk = *(const uint64 *) str;
zero_byte_low = haszero64(chunk);
if (zero_byte_low)
diff --git a/src/include/portability/mem.h b/src/include/portability/mem.h
index 091328f680d..c048e8836c5 100644
--- a/src/include/portability/mem.h
+++ b/src/include/portability/mem.h
@@ -38,8 +38,6 @@
#define MAP_NOSYNC 0
#endif
-#define PG_MMAP_FLAGS (MAP_SHARED|MAP_ANONYMOUS|MAP_HASSEMAPHORE)
-
/* Some really old systems don't define MAP_FAILED. */
#ifndef MAP_FAILED
#define MAP_FAILED ((void *) -1)
diff --git a/src/include/utils/memutils.h b/src/include/utils/memutils.h
index 2bc13c3a054..11ab1717a16 100644
--- a/src/include/utils/memutils.h
+++ b/src/include/utils/memutils.h
@@ -253,7 +253,7 @@ pg_memory_is_all_zeros(const void *ptr, size_t len)
*/
for (; p < aligned_end; p += sizeof(size_t))
{
- if (*(size_t *) p != 0)
+ if (*(const size_t *) p != 0)
return false;
}
@@ -290,10 +290,10 @@ pg_memory_is_all_zeros(const void *ptr, size_t len)
*/
for (; p < aligned_end - (sizeof(size_t) * 7); p += sizeof(size_t) * 8)
{
- if ((((size_t *) p)[0] != 0) | (((size_t *) p)[1] != 0) |
- (((size_t *) p)[2] != 0) | (((size_t *) p)[3] != 0) |
- (((size_t *) p)[4] != 0) | (((size_t *) p)[5] != 0) |
- (((size_t *) p)[6] != 0) | (((size_t *) p)[7] != 0))
+ if ((((const size_t *) p)[0] != 0) | (((const size_t *) p)[1] != 0) |
+ (((const size_t *) p)[2] != 0) | (((const size_t *) p)[3] != 0) |
+ (((const size_t *) p)[4] != 0) | (((const size_t *) p)[5] != 0) |
+ (((const size_t *) p)[6] != 0) | (((const size_t *) p)[7] != 0))
return false;
}
@@ -305,7 +305,7 @@ pg_memory_is_all_zeros(const void *ptr, size_t len)
*/
for (; p < aligned_end; p += sizeof(size_t))
{
- if (*(size_t *) p != 0)
+ if (*(const size_t *) p != 0)
return false;
}
diff --git a/src/include/varatt.h b/src/include/varatt.h
index eccd3ca04d6..fd7d5912f7d 100644
--- a/src/include/varatt.h
+++ b/src/include/varatt.h
@@ -193,25 +193,25 @@ typedef struct
#ifdef WORDS_BIGENDIAN
#define VARATT_IS_4B(PTR) \
- ((((varattrib_1b *) (PTR))->va_header & 0x80) == 0x00)
+ ((((const varattrib_1b *) (PTR))->va_header & 0x80) == 0x00)
#define VARATT_IS_4B_U(PTR) \
- ((((varattrib_1b *) (PTR))->va_header & 0xC0) == 0x00)
+ ((((const varattrib_1b *) (PTR))->va_header & 0xC0) == 0x00)
#define VARATT_IS_4B_C(PTR) \
- ((((varattrib_1b *) (PTR))->va_header & 0xC0) == 0x40)
+ ((((const varattrib_1b *) (PTR))->va_header & 0xC0) == 0x40)
#define VARATT_IS_1B(PTR) \
- ((((varattrib_1b *) (PTR))->va_header & 0x80) == 0x80)
+ ((((const varattrib_1b *) (PTR))->va_header & 0x80) == 0x80)
#define VARATT_IS_1B_E(PTR) \
- ((((varattrib_1b *) (PTR))->va_header) == 0x80)
+ ((((const varattrib_1b *) (PTR))->va_header) == 0x80)
#define VARATT_NOT_PAD_BYTE(PTR) \
- (*((uint8 *) (PTR)) != 0)
+ (*((const uint8 *) (PTR)) != 0)
/* VARSIZE_4B() should only be used on known-aligned data */
#define VARSIZE_4B(PTR) \
- (((varattrib_4b *) (PTR))->va_4byte.va_header & 0x3FFFFFFF)
+ (((const varattrib_4b *) (PTR))->va_4byte.va_header & 0x3FFFFFFF)
#define VARSIZE_1B(PTR) \
- (((varattrib_1b *) (PTR))->va_header & 0x7F)
+ (((const varattrib_1b *) (PTR))->va_header & 0x7F)
#define VARTAG_1B_E(PTR) \
- ((vartag_external) ((varattrib_1b_e *) (PTR))->va_tag)
+ ((vartag_external) ((const varattrib_1b_e *) (PTR))->va_tag)
#define SET_VARSIZE_4B(PTR,len) \
(((varattrib_4b *) (PTR))->va_4byte.va_header = (len) & 0x3FFFFFFF)
@@ -226,25 +226,25 @@ typedef struct
#else /* !WORDS_BIGENDIAN */
#define VARATT_IS_4B(PTR) \
- ((((varattrib_1b *) (PTR))->va_header & 0x01) == 0x00)
+ ((((const varattrib_1b *) (PTR))->va_header & 0x01) == 0x00)
#define VARATT_IS_4B_U(PTR) \
- ((((varattrib_1b *) (PTR))->va_header & 0x03) == 0x00)
+ ((((const varattrib_1b *) (PTR))->va_header & 0x03) == 0x00)
#define VARATT_IS_4B_C(PTR) \
- ((((varattrib_1b *) (PTR))->va_header & 0x03) == 0x02)
+ ((((const varattrib_1b *) (PTR))->va_header & 0x03) == 0x02)
#define VARATT_IS_1B(PTR) \
- ((((varattrib_1b *) (PTR))->va_header & 0x01) == 0x01)
+ ((((const varattrib_1b *) (PTR))->va_header & 0x01) == 0x01)
#define VARATT_IS_1B_E(PTR) \
- ((((varattrib_1b *) (PTR))->va_header) == 0x01)
+ ((((const varattrib_1b *) (PTR))->va_header) == 0x01)
#define VARATT_NOT_PAD_BYTE(PTR) \
- (*((uint8 *) (PTR)) != 0)
+ (*((const uint8 *) (PTR)) != 0)
/* VARSIZE_4B() should only be used on known-aligned data */
#define VARSIZE_4B(PTR) \
- ((((varattrib_4b *) (PTR))->va_4byte.va_header >> 2) & 0x3FFFFFFF)
+ ((((const varattrib_4b *) (PTR))->va_4byte.va_header >> 2) & 0x3FFFFFFF)
#define VARSIZE_1B(PTR) \
- ((((varattrib_1b *) (PTR))->va_header >> 1) & 0x7F)
+ ((((const varattrib_1b *) (PTR))->va_header >> 1) & 0x7F)
#define VARTAG_1B_E(PTR) \
- ((vartag_external) ((varattrib_1b_e *) (PTR))->va_tag)
+ ((vartag_external) ((const varattrib_1b_e *) (PTR))->va_tag)
#define SET_VARSIZE_4B(PTR,len) \
(((varattrib_4b *) (PTR))->va_4byte.va_header = (((uint32) (len)) << 2))
@@ -492,14 +492,14 @@ VARDATA_ANY(const void *PTR)
static inline Size
VARDATA_COMPRESSED_GET_EXTSIZE(const void *PTR)
{
- return ((varattrib_4b *) PTR)->va_compressed.va_tcinfo & VARLENA_EXTSIZE_MASK;
+ return ((const varattrib_4b *) PTR)->va_compressed.va_tcinfo & VARLENA_EXTSIZE_MASK;
}
/* Compression method of a compressed-in-line varlena datum */
static inline uint32
VARDATA_COMPRESSED_GET_COMPRESS_METHOD(const void *PTR)
{
- return ((varattrib_4b *) PTR)->va_compressed.va_tcinfo >> VARLENA_EXTSIZE_BITS;
+ return ((const varattrib_4b *) PTR)->va_compressed.va_tcinfo >> VARLENA_EXTSIZE_BITS;
}
/* Same for external Datums; but note argument is a struct varatt_external */
diff --git a/src/interfaces/libpq/fe-auth-scram.c b/src/interfaces/libpq/fe-auth-scram.c
index 05273c91f98..99103b7e2b6 100644
--- a/src/interfaces/libpq/fe-auth-scram.c
+++ b/src/interfaces/libpq/fe-auth-scram.c
@@ -819,7 +819,7 @@ calculate_client_proof(fe_scram_state *state,
strlen(state->server_first_message)) < 0 ||
pg_hmac_update(ctx, (uint8 *) ",", 1) < 0 ||
pg_hmac_update(ctx,
- (uint8 *) client_final_message_without_proof,
+ (const uint8 *) client_final_message_without_proof,
strlen(client_final_message_without_proof)) < 0 ||
pg_hmac_final(ctx, ClientSignature, state->key_length) < 0)
{
diff --git a/src/interfaces/libpq/fe-auth.c b/src/interfaces/libpq/fe-auth.c
index f08db30dbb7..f05aaea9651 100644
--- a/src/interfaces/libpq/fe-auth.c
+++ b/src/interfaces/libpq/fe-auth.c
@@ -1369,7 +1369,7 @@ PQencryptPassword(const char *passwd, const char *user)
if (!crypt_pwd)
return NULL;
- if (!pg_md5_encrypt(passwd, (uint8 *) user, strlen(user), crypt_pwd, &errstr))
+ if (!pg_md5_encrypt(passwd, (const uint8 *) user, strlen(user), crypt_pwd, &errstr))
{
free(crypt_pwd);
return NULL;
@@ -1482,7 +1482,7 @@ PQencryptPasswordConn(PGconn *conn, const char *passwd, const char *user,
{
const char *errstr = NULL;
- if (!pg_md5_encrypt(passwd, (uint8 *) user, strlen(user), crypt_pwd, &errstr))
+ if (!pg_md5_encrypt(passwd, (const uint8 *) user, strlen(user), crypt_pwd, &errstr))
{
libpq_append_conn_error(conn, "could not encrypt password: %s", errstr);
free(crypt_pwd);
diff --git a/src/port/pg_crc32c_armv8.c b/src/port/pg_crc32c_armv8.c
index 039986c7b33..9ca0f728d39 100644
--- a/src/port/pg_crc32c_armv8.c
+++ b/src/port/pg_crc32c_armv8.c
@@ -42,32 +42,32 @@ pg_comp_crc32c_armv8(pg_crc32c crc, const void *data, size_t len)
if (!PointerIsAligned(p, uint32) &&
p + 2 <= pend)
{
- crc = __crc32ch(crc, *(uint16 *) p);
+ crc = __crc32ch(crc, *(const uint16 *) p);
p += 2;
}
if (!PointerIsAligned(p, uint64) &&
p + 4 <= pend)
{
- crc = __crc32cw(crc, *(uint32 *) p);
+ crc = __crc32cw(crc, *(const uint32 *) p);
p += 4;
}
/* Process eight bytes at a time, as far as we can. */
while (p + 8 <= pend)
{
- crc = __crc32cd(crc, *(uint64 *) p);
+ crc = __crc32cd(crc, *(const uint64 *) p);
p += 8;
}
/* Process remaining 0-7 bytes. */
if (p + 4 <= pend)
{
- crc = __crc32cw(crc, *(uint32 *) p);
+ crc = __crc32cw(crc, *(const uint32 *) p);
p += 4;
}
if (p + 2 <= pend)
{
- crc = __crc32ch(crc, *(uint16 *) p);
+ crc = __crc32ch(crc, *(const uint16 *) p);
p += 2;
}
if (p < pend)
diff --git a/src/port/pg_popcount_aarch64.c b/src/port/pg_popcount_aarch64.c
index 2184854dbf7..ba57f2cd4bd 100644
--- a/src/port/pg_popcount_aarch64.c
+++ b/src/port/pg_popcount_aarch64.c
@@ -383,7 +383,7 @@ pg_popcount_neon(const char *buf, int bytes)
*/
for (; bytes >= sizeof(uint64); bytes -= sizeof(uint64))
{
- popcnt += pg_popcount64(*((uint64 *) buf));
+ popcnt += pg_popcount64(*((const uint64 *) buf));
buf += sizeof(uint64);
}
@@ -465,7 +465,7 @@ pg_popcount_masked_neon(const char *buf, int bytes, bits8 mask)
*/
for (; bytes >= sizeof(uint64); bytes -= sizeof(uint64))
{
- popcnt += pg_popcount64(*((uint64 *) buf) & mask64);
+ popcnt += pg_popcount64(*((const uint64 *) buf) & mask64);
buf += sizeof(uint64);
}
diff --git a/src/test/isolation/isolationtester.c b/src/test/isolation/isolationtester.c
index e01c0c9de93..a0aec04d994 100644
--- a/src/test/isolation/isolationtester.c
+++ b/src/test/isolation/isolationtester.c
@@ -498,8 +498,8 @@ run_named_permutations(TestSpec *testspec)
static int
step_qsort_cmp(const void *a, const void *b)
{
- Step *stepa = *((Step **) a);
- Step *stepb = *((Step **) b);
+ Step *stepa = *((Step *const *) a);
+ Step *stepb = *((Step *const *) b);
return strcmp(stepa->name, stepb->name);
}
@@ -507,8 +507,8 @@ step_qsort_cmp(const void *a, const void *b)
static int
step_bsearch_cmp(const void *a, const void *b)
{
- char *stepname = (char *) a;
- Step *step = *((Step **) b);
+ const char *stepname = (const char *) a;
+ Step *step = *((Step *const *) b);
return strcmp(stepname, step->name);
}
diff --git a/src/test/modules/libpq_pipeline/libpq_pipeline.c b/src/test/modules/libpq_pipeline/libpq_pipeline.c
index 0fb44be32ce..ce1a9995f46 100644
--- a/src/test/modules/libpq_pipeline/libpq_pipeline.c
+++ b/src/test/modules/libpq_pipeline/libpq_pipeline.c
@@ -1594,7 +1594,7 @@ test_singlerowmode(PGconn *conn)
"SELECT generate_series(42, $1)",
1,
NULL,
- (const char **) param,
+ (const char *const *) param,
NULL,
NULL,
0) != 1)
diff --git a/src/test/modules/test_tidstore/test_tidstore.c b/src/test/modules/test_tidstore/test_tidstore.c
index 9a3a209da0b..c9a035fa494 100644
--- a/src/test/modules/test_tidstore/test_tidstore.c
+++ b/src/test/modules/test_tidstore/test_tidstore.c
@@ -56,16 +56,16 @@ itemptr_cmp(const void *left, const void *right)
OffsetNumber loff,
roff;
- lblk = ItemPointerGetBlockNumber((ItemPointer) left);
- rblk = ItemPointerGetBlockNumber((ItemPointer) right);
+ lblk = ItemPointerGetBlockNumber((const ItemPointerData *) left);
+ rblk = ItemPointerGetBlockNumber((const ItemPointerData *) right);
if (lblk < rblk)
return -1;
if (lblk > rblk)
return 1;
- loff = ItemPointerGetOffsetNumber((ItemPointer) left);
- roff = ItemPointerGetOffsetNumber((ItemPointer) right);
+ loff = ItemPointerGetOffsetNumber((const ItemPointerData *) left);
+ roff = ItemPointerGetOffsetNumber((const ItemPointerData *) right);
if (loff < roff)
return -1;
diff --git a/src/test/regress/expected/jsonb.out b/src/test/regress/expected/jsonb.out
index 93535fd7dee..4e2467852db 100644
--- a/src/test/regress/expected/jsonb.out
+++ b/src/test/regress/expected/jsonb.out
@@ -3131,6 +3131,7 @@ SELECT count(*) FROM testjsonb WHERE j @? '$.bar';
0
(1 row)
+ALTER TABLE testjsonb SET (parallel_workers = 2);
CREATE INDEX jidx ON testjsonb USING gin (j);
SET enable_seqscan = off;
SELECT count(*) FROM testjsonb WHERE j @> '{"wait":null}';
@@ -3507,7 +3508,7 @@ SELECT count(*) FROM testjsonb WHERE j = '{"pos":98, "line":371, "node":"CBA", "
--gin path opclass
DROP INDEX jidx;
-CREATE INDEX jidx ON testjsonb USING gin (j jsonb_path_ops);
+CREATE INDEX CONCURRENTLY jidx ON testjsonb USING gin (j jsonb_path_ops);
SET enable_seqscan = off;
SELECT count(*) FROM testjsonb WHERE j @> '{"wait":null}';
count
diff --git a/src/test/regress/expected/stats_import.out b/src/test/regress/expected/stats_import.out
index d61ab92d17b..b5664e5513c 100644
--- a/src/test/regress/expected/stats_import.out
+++ b/src/test/regress/expected/stats_import.out
@@ -12,6 +12,11 @@ CREATE TABLE stats_import.test(
arange int4range,
tags text[]
) WITH (autovacuum_enabled = false);
+CREATE TABLE stats_import.test_mr(
+ id INTEGER PRIMARY KEY,
+ name text,
+ mrange int4multirange
+) WITH (autovacuum_enabled = false);
SELECT
pg_catalog.pg_restore_relation_stats(
'schemaname', 'stats_import',
@@ -1095,6 +1100,27 @@ AND attname = 'id';
stats_import | test | id | f | 0.36 | 5 | 0.6 | {2,1,3} | {0.3,0.25,0.05} | {1,2,3,4} | | | | | | |
(1 row)
+-- test for multiranges
+INSERT INTO stats_import.test_mr
+VALUES
+ (1, 'red', '{[1,3),[5,9),[20,30)}'::int4multirange),
+ (2, 'red', '{[11,13),[15,19),[20,30)}'::int4multirange),
+ (3, 'red', '{[21,23),[25,29),[120,130)}'::int4multirange);
+-- ensure that we set attribute stats for a multirange
+SELECT pg_catalog.pg_restore_attribute_stats(
+ 'schemaname', 'stats_import',
+ 'relname', 'test_mr',
+ 'attname', 'mrange',
+ 'inherited', false,
+ 'range_length_histogram', '{19,29,109}'::text,
+ 'range_empty_frac', '0'::real,
+ 'range_bounds_histogram', '{"[1,30)","[11,30)","[21,130)"}'::text
+);
+ pg_restore_attribute_stats
+----------------------------
+ t
+(1 row)
+
--
-- Test the ability to exactly copy data from one table to an identical table,
-- correctly reconstructing the stakind order as well as the staopN and
@@ -1115,6 +1141,12 @@ CREATE INDEX is_odd ON stats_import.test(((comp).a % 2 = 1));
CREATE STATISTICS stats_import.test_stat
ON name, comp, lower(arange), array_length(tags,1)
FROM stats_import.test;
+CREATE STATISTICS stats_import.test_stat_ndistinct (ndistinct)
+ ON name, comp
+ FROM stats_import.test;
+CREATE STATISTICS stats_import.test_stat_dependencies (dependencies)
+ ON name, comp
+ FROM stats_import.test;
-- Generate statistics on table with data
ANALYZE stats_import.test;
CREATE TABLE stats_import.test_clone ( LIKE stats_import.test )
@@ -1564,10 +1596,239 @@ RESET ROLE;
REVOKE MAINTAIN ON stats_import.test FROM regress_test_extstat_clear;
REVOKE ALL ON SCHEMA stats_import FROM regress_test_extstat_clear;
DROP ROLE regress_test_extstat_clear;
+-- Tests for pg_restore_extended_stats().
+-- Invalid argument values.
+SELECT pg_catalog.pg_restore_extended_stats(
+ 'schemaname', NULL,
+ 'relname', 'test_clone',
+ 'statistics_schemaname', 'stats_import',
+ 'statistics_name', 'test_stat_clone',
+ 'inherited', false);
+ERROR: argument "schemaname" must not be null
+SELECT pg_catalog.pg_restore_extended_stats(
+ 'schemaname', 'stats_import',
+ 'relname', NULL,
+ 'statistics_schemaname', 'stats_import',
+ 'statistics_name', 'test_stat_clone',
+ 'inherited', false);
+ERROR: argument "relname" must not be null
+SELECT pg_catalog.pg_restore_extended_stats(
+ 'schemaname', 'stats_import',
+ 'relname', 'test_clone',
+ 'statistics_schemaname', NULL,
+ 'statistics_name', 'test_stat_clone',
+ 'inherited', false);
+ERROR: argument "statistics_schemaname" must not be null
+SELECT pg_catalog.pg_restore_extended_stats(
+ 'schemaname', 'stats_import',
+ 'relname', 'test_clone',
+ 'statistics_schemaname', 'stats_import',
+ 'statistics_name', NULL,
+ 'inherited', false);
+ERROR: argument "statistics_name" must not be null
+SELECT pg_catalog.pg_restore_extended_stats(
+ 'schemaname', 'stats_import',
+ 'relname', 'test_clone',
+ 'statistics_schemaname', 'stats_import',
+ 'statistics_name', 'test_stat_clone',
+ 'inherited', NULL);
+ERROR: argument "inherited" must not be null
+-- Missing objects
+SELECT pg_catalog.pg_restore_extended_stats(
+ 'schemaname', 'schema_not_exist',
+ 'relname', 'test_clone',
+ 'statistics_schemaname', 'stats_import',
+ 'statistics_name', 'test_stat_clone',
+ 'inherited', false);
+ERROR: schema "schema_not_exist" does not exist
+SELECT pg_catalog.pg_restore_extended_stats(
+ 'schemaname', 'stats_import',
+ 'relname', 'table_not_exist',
+ 'statistics_schemaname', 'stats_import',
+ 'statistics_name', 'test_stat_clone',
+ 'inherited', false);
+ERROR: relation "stats_import.table_not_exist" does not exist
+SELECT pg_catalog.pg_restore_extended_stats(
+ 'schemaname', 'stats_import',
+ 'relname', 'test_clone',
+ 'statistics_schemaname', 'schema_not_exist',
+ 'statistics_name', 'test_stat_clone',
+ 'inherited', false);
+WARNING: could not find schema "schema_not_exist"
+ pg_restore_extended_stats
+---------------------------
+ f
+(1 row)
+
+SELECT pg_catalog.pg_restore_extended_stats(
+ 'schemaname', 'stats_import',
+ 'relname', 'test_clone',
+ 'statistics_schemaname', 'stats_import',
+ 'statistics_name', 'ext_stats_not_exist',
+ 'inherited', false);
+WARNING: could not find extended statistics object "stats_import"."ext_stats_not_exist"
+ pg_restore_extended_stats
+---------------------------
+ f
+(1 row)
+
+-- Incorrect relation/extended stats combination
+SELECT pg_catalog.pg_restore_extended_stats(
+ 'schemaname', 'stats_import',
+ 'relname', 'test',
+ 'statistics_schemaname', 'stats_import',
+ 'statistics_name', 'test_stat_clone',
+ 'inherited', false);
+WARNING: could not restore extended statistics object "stats_import"."test_stat_clone": incorrect relation "stats_import"."test" specified
+ pg_restore_extended_stats
+---------------------------
+ f
+(1 row)
+
+-- Check that MAINTAIN is required when restoring statistics.
+CREATE ROLE regress_test_extstat_restore;
+GRANT ALL ON SCHEMA stats_import TO regress_test_extstat_restore;
+SET ROLE regress_test_extstat_restore;
+-- No data to restore; this fails on a permission failure.
+SELECT pg_catalog.pg_restore_extended_stats(
+ 'schemaname', 'stats_import',
+ 'relname', 'test_clone',
+ 'statistics_schemaname', 'stats_import',
+ 'statistics_name', 'test_stat_clone',
+ 'inherited', false);
+ERROR: permission denied for table test_clone
+RESET ROLE;
+GRANT MAINTAIN ON stats_import.test_clone TO regress_test_extstat_restore;
+SET ROLE regress_test_extstat_restore;
+-- This works, check the lock on the relation while on it.
+BEGIN;
+SELECT pg_catalog.pg_restore_extended_stats(
+ 'schemaname', 'stats_import',
+ 'relname', 'test_clone',
+ 'statistics_schemaname', 'stats_import',
+ 'statistics_name', 'test_stat_clone',
+ 'inherited', false,
+ 'n_distinct', '[{"attributes" : [2,3], "ndistinct" : 4}]'::pg_ndistinct);
+ pg_restore_extended_stats
+---------------------------
+ t
+(1 row)
+
+SELECT mode FROM pg_locks WHERE locktype = 'relation' AND
+ relation = 'stats_import.test_clone'::regclass AND
+ pid = pg_backend_pid();
+ mode
+--------------------------
+ ShareUpdateExclusiveLock
+(1 row)
+
+COMMIT;
+RESET ROLE;
+REVOKE MAINTAIN ON stats_import.test_clone FROM regress_test_extstat_restore;
+REVOKE ALL ON SCHEMA stats_import FROM regress_test_extstat_restore;
+DROP ROLE regress_test_extstat_restore;
+-- ndistinct value doesn't match object definition
+SELECT pg_catalog.pg_restore_extended_stats(
+ 'schemaname', 'stats_import',
+ 'relname', 'test',
+ 'statistics_schemaname', 'stats_import',
+ 'statistics_name', 'test_stat_ndistinct',
+ 'inherited', false,
+ 'n_distinct', '[{"attributes" : [1,3], "ndistinct" : 4}]'::pg_ndistinct);
+WARNING: could not validate "pg_ndistinct" object: invalid attribute number 1 found
+ pg_restore_extended_stats
+---------------------------
+ f
+(1 row)
+
+-- Incorrect extended stats kind, ndistinct not supported
+SELECT pg_catalog.pg_restore_extended_stats(
+ 'schemaname', 'stats_import',
+ 'relname', 'test',
+ 'statistics_schemaname', 'stats_import',
+ 'statistics_name', 'test_stat_dependencies',
+ 'inherited', false,
+ 'n_distinct', '[{"attributes" : [1,3], "ndistinct" : 4}]'::pg_ndistinct);
+WARNING: cannot not specify parameter "n_distinct"
+HINT: Extended statistics object "stats_import"."test_stat_dependencies" does not support statistics of this type.
+ pg_restore_extended_stats
+---------------------------
+ f
+(1 row)
+
+-- Incorrect extended stats kind, dependencies not supported
+SELECT pg_catalog.pg_restore_extended_stats(
+ 'schemaname', 'stats_import',
+ 'relname', 'test',
+ 'statistics_schemaname', 'stats_import',
+ 'statistics_name', 'test_stat_ndistinct',
+ 'inherited', false,
+ 'dependencies', '[{"attributes": [2], "dependency": 3, "degree": 1.000000},
+ {"attributes": [3], "dependency": 2, "degree": 1.000000}]'::pg_dependencies);
+WARNING: cannot specify parameter "dependencies".
+HINT: Extended statistics object "stats_import"."test_stat_ndistinct" does not support statistics of this type.
+ pg_restore_extended_stats
+---------------------------
+ f
+(1 row)
+
+-- ok: ndistinct
+SELECT pg_catalog.pg_restore_extended_stats(
+ 'schemaname', 'stats_import',
+ 'relname', 'test',
+ 'statistics_schemaname', 'stats_import',
+ 'statistics_name', 'test_stat_ndistinct',
+ 'inherited', false,
+ 'n_distinct', '[{"attributes" : [2,3], "ndistinct" : 4}]'::pg_ndistinct);
+ pg_restore_extended_stats
+---------------------------
+ t
+(1 row)
+
+-- dependencies value doesn't match definition
+SELECT pg_catalog.pg_restore_extended_stats(
+ 'schemaname', 'stats_import',
+ 'relname', 'test',
+ 'statistics_schemaname', 'stats_import',
+ 'statistics_name', 'test_stat_dependencies',
+ 'inherited', false,
+ 'dependencies', '[{"attributes": [1], "dependency": 3, "degree": 1.000000},
+ {"attributes": [3], "dependency": 1, "degree": 1.000000}]'::pg_dependencies);
+WARNING: could not validate "pg_dependencies" object: invalid attribute number 1 found
+ pg_restore_extended_stats
+---------------------------
+ f
+(1 row)
+
+-- ok: dependencies
+SELECT pg_catalog.pg_restore_extended_stats(
+ 'schemaname', 'stats_import',
+ 'relname', 'test',
+ 'statistics_schemaname', 'stats_import',
+ 'statistics_name', 'test_stat_dependencies',
+ 'inherited', false,
+ 'dependencies', '[{"attributes": [2], "dependency": 3, "degree": 1.000000},
+ {"attributes": [3], "dependency": 2, "degree": 1.000000}]'::pg_dependencies);
+ pg_restore_extended_stats
+---------------------------
+ t
+(1 row)
+
+SELECT replace(e.n_distinct, '}, ', E'},\n') AS n_distinct
+FROM pg_stats_ext AS e
+WHERE e.statistics_schemaname = 'stats_import' AND
+ e.statistics_name = 'test_stat_ndistinct' AND
+ e.inherited = false;
+ n_distinct
+------------------------------------------
+ [{"attributes": [2, 3], "ndistinct": 4}]
+(1 row)
+
DROP SCHEMA stats_import CASCADE;
-NOTICE: drop cascades to 6 other objects
+NOTICE: drop cascades to 7 other objects
DETAIL: drop cascades to type stats_import.complex_type
drop cascades to table stats_import.test
+drop cascades to table stats_import.test_mr
drop cascades to table stats_import.part_parent
drop cascades to sequence stats_import.testseq
drop cascades to view stats_import.testview
diff --git a/src/test/regress/expected/strings.out b/src/test/regress/expected/strings.out
index 5f2b5c39173..f38688b5c37 100644
--- a/src/test/regress/expected/strings.out
+++ b/src/test/regress/expected/strings.out
@@ -2159,6 +2159,60 @@ SELECT pg_column_compression(f1) AS f1_comp, pg_column_compression(f2) AS f2_com
|
(1 row)
+TRUNCATE toasttest;
+-- test with inline compressible varlenas.
+SET default_toast_compression = 'pglz';
+ALTER TABLE toasttest ALTER COLUMN f1 SET STORAGE MAIN;
+ALTER TABLE toasttest ALTER COLUMN f2 SET STORAGE MAIN;
+INSERT INTO toasttest values(repeat('1234', 1024), repeat('5678', 1024));
+-- There should be no values in the toast relation.
+SELECT substr(f1, 5, 10) AS f1_data, substr(f2, 5, 10) AS f2_data
+ FROM toasttest;
+ f1_data | f2_data
+------------+------------
+ 1234123412 | 5678567856
+(1 row)
+
+SELECT pg_column_compression(f1) AS f1_comp, pg_column_compression(f2) AS f2_comp
+ FROM toasttest;
+ f1_comp | f2_comp
+---------+---------
+ pglz | pglz
+(1 row)
+
+SELECT count(*) FROM :reltoastname;
+ count
+-------
+ 0
+(1 row)
+
+TRUNCATE toasttest;
+-- test with external compressed data (default).
+ALTER TABLE toasttest ALTER COLUMN f1 SET STORAGE EXTENDED;
+ALTER TABLE toasttest ALTER COLUMN f2 SET STORAGE EXTENDED;
+INSERT INTO toasttest values(repeat('1234', 10240), NULL);
+-- There should be one value in the toast relation.
+SELECT substr(f1, 5, 10) AS f1_data, substr(f2, 5, 10) AS f2_data
+ FROM toasttest;
+ f1_data | f2_data
+------------+---------
+ 1234123412 |
+(1 row)
+
+SELECT pg_column_compression(f1) AS f1_comp, pg_column_compression(f2) AS f2_comp
+ FROM toasttest;
+ f1_comp | f2_comp
+---------+---------
+ pglz |
+(1 row)
+
+SELECT count(*) FROM :reltoastname WHERE chunk_seq = 0;
+ count
+-------
+ 1
+(1 row)
+
+RESET default_toast_compression;
DROP TABLE toasttest;
--
-- test length
diff --git a/src/test/regress/expected/tsearch.out b/src/test/regress/expected/tsearch.out
index 9fad6c8b04b..9287c440709 100644
--- a/src/test/regress/expected/tsearch.out
+++ b/src/test/regress/expected/tsearch.out
@@ -870,6 +870,7 @@ RESET enable_seqscan;
RESET enable_indexscan;
RESET enable_bitmapscan;
DROP INDEX wowidx;
+ALTER TABLE test_tsvector SET (parallel_workers = 2);
CREATE INDEX wowidx ON test_tsvector USING gin (a);
SET enable_seqscan=OFF;
-- GIN only supports bitmapscan, so no need to test plain indexscan
diff --git a/src/test/regress/sql/jsonb.sql b/src/test/regress/sql/jsonb.sql
index 21db0db81d6..d28ed1c1e85 100644
--- a/src/test/regress/sql/jsonb.sql
+++ b/src/test/regress/sql/jsonb.sql
@@ -857,6 +857,7 @@ SELECT count(*) FROM testjsonb WHERE j @? '$';
SELECT count(*) FROM testjsonb WHERE j @? '$.public';
SELECT count(*) FROM testjsonb WHERE j @? '$.bar';
+ALTER TABLE testjsonb SET (parallel_workers = 2);
CREATE INDEX jidx ON testjsonb USING gin (j);
SET enable_seqscan = off;
@@ -945,7 +946,7 @@ SELECT count(*) FROM testjsonb WHERE j = '{"pos":98, "line":371, "node":"CBA", "
--gin path opclass
DROP INDEX jidx;
-CREATE INDEX jidx ON testjsonb USING gin (j jsonb_path_ops);
+CREATE INDEX CONCURRENTLY jidx ON testjsonb USING gin (j jsonb_path_ops);
SET enable_seqscan = off;
SELECT count(*) FROM testjsonb WHERE j @> '{"wait":null}';
diff --git a/src/test/regress/sql/stats_import.sql b/src/test/regress/sql/stats_import.sql
index d1934a8a42b..3e70b817513 100644
--- a/src/test/regress/sql/stats_import.sql
+++ b/src/test/regress/sql/stats_import.sql
@@ -15,6 +15,12 @@ CREATE TABLE stats_import.test(
tags text[]
) WITH (autovacuum_enabled = false);
+CREATE TABLE stats_import.test_mr(
+ id INTEGER PRIMARY KEY,
+ name text,
+ mrange int4multirange
+) WITH (autovacuum_enabled = false);
+
SELECT
pg_catalog.pg_restore_relation_stats(
'schemaname', 'stats_import',
@@ -764,6 +770,24 @@ AND tablename = 'test'
AND inherited = false
AND attname = 'id';
+-- test for multiranges
+INSERT INTO stats_import.test_mr
+VALUES
+ (1, 'red', '{[1,3),[5,9),[20,30)}'::int4multirange),
+ (2, 'red', '{[11,13),[15,19),[20,30)}'::int4multirange),
+ (3, 'red', '{[21,23),[25,29),[120,130)}'::int4multirange);
+
+-- ensure that we set attribute stats for a multirange
+SELECT pg_catalog.pg_restore_attribute_stats(
+ 'schemaname', 'stats_import',
+ 'relname', 'test_mr',
+ 'attname', 'mrange',
+ 'inherited', false,
+ 'range_length_histogram', '{19,29,109}'::text,
+ 'range_empty_frac', '0'::real,
+ 'range_bounds_histogram', '{"[1,30)","[11,30)","[21,130)"}'::text
+);
+
--
-- Test the ability to exactly copy data from one table to an identical table,
-- correctly reconstructing the stakind order as well as the staopN and
@@ -787,6 +811,14 @@ CREATE STATISTICS stats_import.test_stat
ON name, comp, lower(arange), array_length(tags,1)
FROM stats_import.test;
+CREATE STATISTICS stats_import.test_stat_ndistinct (ndistinct)
+ ON name, comp
+ FROM stats_import.test;
+
+CREATE STATISTICS stats_import.test_stat_dependencies (dependencies)
+ ON name, comp
+ FROM stats_import.test;
+
-- Generate statistics on table with data
ANALYZE stats_import.test;
@@ -1113,4 +1145,162 @@ REVOKE MAINTAIN ON stats_import.test FROM regress_test_extstat_clear;
REVOKE ALL ON SCHEMA stats_import FROM regress_test_extstat_clear;
DROP ROLE regress_test_extstat_clear;
+-- Tests for pg_restore_extended_stats().
+-- Invalid argument values.
+SELECT pg_catalog.pg_restore_extended_stats(
+ 'schemaname', NULL,
+ 'relname', 'test_clone',
+ 'statistics_schemaname', 'stats_import',
+ 'statistics_name', 'test_stat_clone',
+ 'inherited', false);
+SELECT pg_catalog.pg_restore_extended_stats(
+ 'schemaname', 'stats_import',
+ 'relname', NULL,
+ 'statistics_schemaname', 'stats_import',
+ 'statistics_name', 'test_stat_clone',
+ 'inherited', false);
+SELECT pg_catalog.pg_restore_extended_stats(
+ 'schemaname', 'stats_import',
+ 'relname', 'test_clone',
+ 'statistics_schemaname', NULL,
+ 'statistics_name', 'test_stat_clone',
+ 'inherited', false);
+SELECT pg_catalog.pg_restore_extended_stats(
+ 'schemaname', 'stats_import',
+ 'relname', 'test_clone',
+ 'statistics_schemaname', 'stats_import',
+ 'statistics_name', NULL,
+ 'inherited', false);
+SELECT pg_catalog.pg_restore_extended_stats(
+ 'schemaname', 'stats_import',
+ 'relname', 'test_clone',
+ 'statistics_schemaname', 'stats_import',
+ 'statistics_name', 'test_stat_clone',
+ 'inherited', NULL);
+-- Missing objects
+SELECT pg_catalog.pg_restore_extended_stats(
+ 'schemaname', 'schema_not_exist',
+ 'relname', 'test_clone',
+ 'statistics_schemaname', 'stats_import',
+ 'statistics_name', 'test_stat_clone',
+ 'inherited', false);
+SELECT pg_catalog.pg_restore_extended_stats(
+ 'schemaname', 'stats_import',
+ 'relname', 'table_not_exist',
+ 'statistics_schemaname', 'stats_import',
+ 'statistics_name', 'test_stat_clone',
+ 'inherited', false);
+SELECT pg_catalog.pg_restore_extended_stats(
+ 'schemaname', 'stats_import',
+ 'relname', 'test_clone',
+ 'statistics_schemaname', 'schema_not_exist',
+ 'statistics_name', 'test_stat_clone',
+ 'inherited', false);
+SELECT pg_catalog.pg_restore_extended_stats(
+ 'schemaname', 'stats_import',
+ 'relname', 'test_clone',
+ 'statistics_schemaname', 'stats_import',
+ 'statistics_name', 'ext_stats_not_exist',
+ 'inherited', false);
+-- Incorrect relation/extended stats combination
+SELECT pg_catalog.pg_restore_extended_stats(
+ 'schemaname', 'stats_import',
+ 'relname', 'test',
+ 'statistics_schemaname', 'stats_import',
+ 'statistics_name', 'test_stat_clone',
+ 'inherited', false);
+
+-- Check that MAINTAIN is required when restoring statistics.
+CREATE ROLE regress_test_extstat_restore;
+GRANT ALL ON SCHEMA stats_import TO regress_test_extstat_restore;
+SET ROLE regress_test_extstat_restore;
+-- No data to restore; this fails on a permission failure.
+SELECT pg_catalog.pg_restore_extended_stats(
+ 'schemaname', 'stats_import',
+ 'relname', 'test_clone',
+ 'statistics_schemaname', 'stats_import',
+ 'statistics_name', 'test_stat_clone',
+ 'inherited', false);
+RESET ROLE;
+GRANT MAINTAIN ON stats_import.test_clone TO regress_test_extstat_restore;
+SET ROLE regress_test_extstat_restore;
+-- This works, check the lock on the relation while on it.
+BEGIN;
+SELECT pg_catalog.pg_restore_extended_stats(
+ 'schemaname', 'stats_import',
+ 'relname', 'test_clone',
+ 'statistics_schemaname', 'stats_import',
+ 'statistics_name', 'test_stat_clone',
+ 'inherited', false,
+ 'n_distinct', '[{"attributes" : [2,3], "ndistinct" : 4}]'::pg_ndistinct);
+SELECT mode FROM pg_locks WHERE locktype = 'relation' AND
+ relation = 'stats_import.test_clone'::regclass AND
+ pid = pg_backend_pid();
+COMMIT;
+RESET ROLE;
+REVOKE MAINTAIN ON stats_import.test_clone FROM regress_test_extstat_restore;
+REVOKE ALL ON SCHEMA stats_import FROM regress_test_extstat_restore;
+DROP ROLE regress_test_extstat_restore;
+
+-- ndistinct value doesn't match object definition
+SELECT pg_catalog.pg_restore_extended_stats(
+ 'schemaname', 'stats_import',
+ 'relname', 'test',
+ 'statistics_schemaname', 'stats_import',
+ 'statistics_name', 'test_stat_ndistinct',
+ 'inherited', false,
+ 'n_distinct', '[{"attributes" : [1,3], "ndistinct" : 4}]'::pg_ndistinct);
+-- Incorrect extended stats kind, ndistinct not supported
+SELECT pg_catalog.pg_restore_extended_stats(
+ 'schemaname', 'stats_import',
+ 'relname', 'test',
+ 'statistics_schemaname', 'stats_import',
+ 'statistics_name', 'test_stat_dependencies',
+ 'inherited', false,
+ 'n_distinct', '[{"attributes" : [1,3], "ndistinct" : 4}]'::pg_ndistinct);
+-- Incorrect extended stats kind, dependencies not supported
+SELECT pg_catalog.pg_restore_extended_stats(
+ 'schemaname', 'stats_import',
+ 'relname', 'test',
+ 'statistics_schemaname', 'stats_import',
+ 'statistics_name', 'test_stat_ndistinct',
+ 'inherited', false,
+ 'dependencies', '[{"attributes": [2], "dependency": 3, "degree": 1.000000},
+ {"attributes": [3], "dependency": 2, "degree": 1.000000}]'::pg_dependencies);
+
+-- ok: ndistinct
+SELECT pg_catalog.pg_restore_extended_stats(
+ 'schemaname', 'stats_import',
+ 'relname', 'test',
+ 'statistics_schemaname', 'stats_import',
+ 'statistics_name', 'test_stat_ndistinct',
+ 'inherited', false,
+ 'n_distinct', '[{"attributes" : [2,3], "ndistinct" : 4}]'::pg_ndistinct);
+
+-- dependencies value doesn't match definition
+SELECT pg_catalog.pg_restore_extended_stats(
+ 'schemaname', 'stats_import',
+ 'relname', 'test',
+ 'statistics_schemaname', 'stats_import',
+ 'statistics_name', 'test_stat_dependencies',
+ 'inherited', false,
+ 'dependencies', '[{"attributes": [1], "dependency": 3, "degree": 1.000000},
+ {"attributes": [3], "dependency": 1, "degree": 1.000000}]'::pg_dependencies);
+
+-- ok: dependencies
+SELECT pg_catalog.pg_restore_extended_stats(
+ 'schemaname', 'stats_import',
+ 'relname', 'test',
+ 'statistics_schemaname', 'stats_import',
+ 'statistics_name', 'test_stat_dependencies',
+ 'inherited', false,
+ 'dependencies', '[{"attributes": [2], "dependency": 3, "degree": 1.000000},
+ {"attributes": [3], "dependency": 2, "degree": 1.000000}]'::pg_dependencies);
+
+SELECT replace(e.n_distinct, '}, ', E'},\n') AS n_distinct
+FROM pg_stats_ext AS e
+WHERE e.statistics_schemaname = 'stats_import' AND
+ e.statistics_name = 'test_stat_ndistinct' AND
+ e.inherited = false;
+
DROP SCHEMA stats_import CASCADE;
diff --git a/src/test/regress/sql/strings.sql b/src/test/regress/sql/strings.sql
index 37c0893ae83..d8a09737668 100644
--- a/src/test/regress/sql/strings.sql
+++ b/src/test/regress/sql/strings.sql
@@ -678,6 +678,30 @@ SELECT substr(f1, 5, 10) AS f1_data, substr(f2, 5, 10) AS f2_data
FROM toasttest;
SELECT pg_column_compression(f1) AS f1_comp, pg_column_compression(f2) AS f2_comp
FROM toasttest;
+TRUNCATE toasttest;
+-- test with inline compressible varlenas.
+SET default_toast_compression = 'pglz';
+ALTER TABLE toasttest ALTER COLUMN f1 SET STORAGE MAIN;
+ALTER TABLE toasttest ALTER COLUMN f2 SET STORAGE MAIN;
+INSERT INTO toasttest values(repeat('1234', 1024), repeat('5678', 1024));
+-- There should be no values in the toast relation.
+SELECT substr(f1, 5, 10) AS f1_data, substr(f2, 5, 10) AS f2_data
+ FROM toasttest;
+SELECT pg_column_compression(f1) AS f1_comp, pg_column_compression(f2) AS f2_comp
+ FROM toasttest;
+SELECT count(*) FROM :reltoastname;
+TRUNCATE toasttest;
+-- test with external compressed data (default).
+ALTER TABLE toasttest ALTER COLUMN f1 SET STORAGE EXTENDED;
+ALTER TABLE toasttest ALTER COLUMN f2 SET STORAGE EXTENDED;
+INSERT INTO toasttest values(repeat('1234', 10240), NULL);
+-- There should be one value in the toast relation.
+SELECT substr(f1, 5, 10) AS f1_data, substr(f2, 5, 10) AS f2_data
+ FROM toasttest;
+SELECT pg_column_compression(f1) AS f1_comp, pg_column_compression(f2) AS f2_comp
+ FROM toasttest;
+SELECT count(*) FROM :reltoastname WHERE chunk_seq = 0;
+RESET default_toast_compression;
DROP TABLE toasttest;
--
diff --git a/src/test/regress/sql/tsearch.sql b/src/test/regress/sql/tsearch.sql
index fbd26cdba45..dc74aa0c889 100644
--- a/src/test/regress/sql/tsearch.sql
+++ b/src/test/regress/sql/tsearch.sql
@@ -222,6 +222,7 @@ RESET enable_bitmapscan;
DROP INDEX wowidx;
+ALTER TABLE test_tsvector SET (parallel_workers = 2);
CREATE INDEX wowidx ON test_tsvector USING gin (a);
SET enable_seqscan=OFF;
diff --git a/src/tools/pgindent/typedefs.list b/src/tools/pgindent/typedefs.list
index 1c8610fd46c..ddbe4c64971 100644
--- a/src/tools/pgindent/typedefs.list
+++ b/src/tools/pgindent/typedefs.list
@@ -2897,6 +2897,7 @@ SplitPoint
SplitTextOutputData
SplitVar
StackElem
+StakindFlags
StartDataPtrType
StartLOPtrType
StartLOsPtrType