summaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
authorAndres Freund <andres@anarazel.de>2016-03-07 14:24:52 -0800
committerAndres Freund <andres@anarazel.de>2016-03-07 14:24:52 -0800
commit250e5bd7129d0b54594dc225f7380545189b70c1 (patch)
tree30eb398370d1067cfdc5e770d56c66a3aa846030 /src
parent45b87cc57fe02f571de33ce20b4d7bec881a785a (diff)
Further improvements to c8f621c43.
Coverity and inspection for the issue addressed in fd45d16f found some questionable code. Specifically coverity noticed that the wrong length was added in ReorderBufferSerializeChange() - without immediate negative consequences as the variable isn't used afterwards. During code-review and testing I noticed that a bit of space was wasted when allocating tuple bufs in several places. Thirdly, the debug memset()s in ReorderBufferGetTupleBuf() reduce the error checking valgrind can do. Backpatch: 9.4, like c8f621c43.
Diffstat (limited to 'src')
-rw-r--r--src/backend/replication/logical/decode.c28
-rw-r--r--src/backend/replication/logical/reorderbuffer.c5
2 files changed, 23 insertions, 10 deletions
diff --git a/src/backend/replication/logical/decode.c b/src/backend/replication/logical/decode.c
index 3f8c5e50fba..9c080471626 100644
--- a/src/backend/replication/logical/decode.c
+++ b/src/backend/replication/logical/decode.c
@@ -607,13 +607,14 @@ DecodeInsert(LogicalDecodingContext *ctx, XLogRecordBuffer *buf)
if (xlrec->flags & XLH_INSERT_CONTAINS_NEW_TUPLE)
{
- Size tuplelen;
- char *tupledata = XLogRecGetBlockData(r, 0, &tuplelen);
+ Size datalen;
+ char *tupledata = XLogRecGetBlockData(r, 0, &datalen);
+ Size tuplelen = datalen - SizeOfHeapHeader;
change->data.tp.newtuple =
ReorderBufferGetTupleBuf(ctx->reorder, tuplelen);
- DecodeXLogTuple(tupledata, tuplelen, change->data.tp.newtuple);
+ DecodeXLogTuple(tupledata, datalen, change->data.tp.newtuple);
}
change->data.tp.clear_toast_afterwards = true;
@@ -634,7 +635,6 @@ DecodeUpdate(LogicalDecodingContext *ctx, XLogRecordBuffer *buf)
xl_heap_update *xlrec;
ReorderBufferChange *change;
char *data;
- Size datalen;
RelFileNode target_node;
xlrec = (xl_heap_update *) XLogRecGetData(r);
@@ -655,22 +655,31 @@ DecodeUpdate(LogicalDecodingContext *ctx, XLogRecordBuffer *buf)
if (xlrec->flags & XLH_UPDATE_CONTAINS_NEW_TUPLE)
{
+ Size datalen;
+ Size tuplelen;
+
data = XLogRecGetBlockData(r, 0, &datalen);
+ tuplelen = datalen - SizeOfHeapHeader;
+
change->data.tp.newtuple =
- ReorderBufferGetTupleBuf(ctx->reorder, datalen);
+ ReorderBufferGetTupleBuf(ctx->reorder, tuplelen);
DecodeXLogTuple(data, datalen, change->data.tp.newtuple);
}
if (xlrec->flags & XLH_UPDATE_CONTAINS_OLD)
{
+ Size datalen;
+ Size tuplelen;
+
/* caution, remaining data in record is not aligned */
data = XLogRecGetData(r) + SizeOfHeapUpdate;
datalen = XLogRecGetDataLen(r) - SizeOfHeapUpdate;
+ tuplelen = datalen - SizeOfHeapHeader;
change->data.tp.oldtuple =
- ReorderBufferGetTupleBuf(ctx->reorder, datalen);
+ ReorderBufferGetTupleBuf(ctx->reorder, tuplelen);
DecodeXLogTuple(data, datalen, change->data.tp.oldtuple);
}
@@ -720,15 +729,16 @@ DecodeDelete(LogicalDecodingContext *ctx, XLogRecordBuffer *buf)
/* old primary key stored */
if (xlrec->flags & XLH_DELETE_CONTAINS_OLD)
{
- Size len = XLogRecGetDataLen(r) - SizeOfHeapDelete;
+ Size datalen = XLogRecGetDataLen(r) - SizeOfHeapDelete;
+ Size tuplelen = datalen - SizeOfHeapHeader;
Assert(XLogRecGetDataLen(r) > (SizeOfHeapDelete + SizeOfHeapHeader));
change->data.tp.oldtuple =
- ReorderBufferGetTupleBuf(ctx->reorder, len);
+ ReorderBufferGetTupleBuf(ctx->reorder, tuplelen);
DecodeXLogTuple((char *) xlrec + SizeOfHeapDelete,
- len, change->data.tp.oldtuple);
+ datalen, change->data.tp.oldtuple);
}
change->data.tp.clear_toast_afterwards = true;
diff --git a/src/backend/replication/logical/reorderbuffer.c b/src/backend/replication/logical/reorderbuffer.c
index 40d5c4833c2..d3830095c78 100644
--- a/src/backend/replication/logical/reorderbuffer.c
+++ b/src/backend/replication/logical/reorderbuffer.c
@@ -471,12 +471,15 @@ ReorderBufferGetTupleBuf(ReorderBuffer *rb, Size tuple_len)
rb->nr_cached_tuplebufs--;
tuple = slist_container(ReorderBufferTupleBuf, node,
slist_pop_head_node(&rb->cached_tuplebufs));
+ Assert(tuple->alloc_tuple_size == MaxHeapTupleSize);
#ifdef USE_ASSERT_CHECKING
memset(&tuple->tuple, 0xa9, sizeof(HeapTupleData));
+ VALGRIND_MAKE_MEM_UNDEFINED(&tuple->tuple, sizeof(HeapTupleData));
#endif
tuple->tuple.t_data = ReorderBufferTupleBufData(tuple);
#ifdef USE_ASSERT_CHECKING
memset(tuple->tuple.t_data, 0xa8, tuple->alloc_tuple_size);
+ VALGRIND_MAKE_MEM_UNDEFINED(tuple->tuple.t_data, tuple->alloc_tuple_size);
#endif
}
else
@@ -2152,7 +2155,7 @@ ReorderBufferSerializeChange(ReorderBuffer *rb, ReorderBufferTXN *txn,
data += sizeof(HeapTupleData);
memcpy(data, newtup->tuple.t_data, newlen);
- data += oldlen;
+ data += newlen;
}
break;
}