summaryrefslogtreecommitdiff
path: root/lib/zstd/compress/zstd_ldm.h
diff options
context:
space:
mode:
authorNick Terrell <terrelln@meta.com>2025-03-08 12:09:33 -0800
committerNick Terrell <terrelln@meta.com>2025-03-13 13:25:58 -0700
commit65d1f5507ed2c78c64fce40e44e5574a9419eb09 (patch)
tree4a1b819db2ea7f2a322e9bcc7582d946d0a4ea29 /lib/zstd/compress/zstd_ldm.h
parent7eb172143d5508b4da468ed59ee857c6e5e01da6 (diff)
zstd: Import upstream v1.5.7
In addition to keeping the kernel's copy of zstd up to date, this update was requested by Intel to expose upstream's APIs that allow QAT to accelerate the LZ match finding stage of Zstd. This patch is imported from the upstream tag v1.5.7-kernel [0], which is signed with upstream's signing key EF8FE99528B52FFD [1]. It was imported from upstream using this command: export ZSTD=/path/to/repo/zstd/ export LINUX=/path/to/repo/linux/ cd "$ZSTD/contrib/linux-kernel" git checkout v1.5.7-kernel make import LINUX="$LINUX" This patch has been tested on x86-64, and has been boot tested with a zstd compressed kernel & initramfs on i386 and aarch64. I benchmarked the patch on x86-64 with gcc-14.2.1 on an Intel i9-9900K by measruing the performance of compressed filesystem reads and writes. Component, Level, Size delta, C. time delta, D. time delta Btrfs , 1, +0.00%, -6.1%, +1.4% Btrfs , 3, +0.00%, -9.8%, +3.0% Btrfs , 5, +0.00%, +1.7%, +1.4% Btrfs , 7, +0.00%, -1.9%, +2.7% Btrfs , 9, +0.00%, -3.4%, +3.7% Btrfs , 15, +0.00%, -0.3%, +3.6% SquashFS , 1, +0.00%, N/A, +1.9% The major changes that impact the kernel use cases for each version are: v1.5.7: https://github.com/facebook/zstd/releases/tag/v1.5.7 * Add zstd_compress_sequences_and_literals() for use by Intel's QAT driver to implement Zstd compression acceleration in the kernel. * Fix an underflow bug in 32-bit builds that can cause data corruption when processing more than 4GB of data with a single `ZSTD_CCtx` object, when an input crosses the 4GB boundry. I don't believe this impacts any current kernel use cases, because the `ZSTD_CCtx` is typically reconstructed between compressions. * Levels 1-4 see 5-10% compression speed improvements for inputs smaller than 128KB. v1.5.6: https://github.com/facebook/zstd/releases/tag/v1.5.6 * Improved compression ratio for the highest compression levels. I don't expect these see much use however, due to their slow speeds. v1.5.5: https://github.com/facebook/zstd/releases/tag/v1.5.5 * Fix a rare corruption bug that can trigger on levels 13 and above. * Improve compression speed of levels 5-11 on incompressible data. v1.5.4: https://github.com/facebook/zstd/releases/tag/v1.5.4 * Improve copmression speed of levels 5-11 on ARM. * Improve dictionary compression speed. Signed-off-by: Nick Terrell <terrelln@fb.com>
Diffstat (limited to 'lib/zstd/compress/zstd_ldm.h')
-rw-r--r--lib/zstd/compress/zstd_ldm.h17
1 files changed, 8 insertions, 9 deletions
diff --git a/lib/zstd/compress/zstd_ldm.h b/lib/zstd/compress/zstd_ldm.h
index fbc6a5e88fd7..41400a7191b2 100644
--- a/lib/zstd/compress/zstd_ldm.h
+++ b/lib/zstd/compress/zstd_ldm.h
@@ -1,5 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0+ OR BSD-3-Clause */
/*
- * Copyright (c) Yann Collet, Facebook, Inc.
+ * Copyright (c) Meta Platforms, Inc. and affiliates.
* All rights reserved.
*
* This source code is licensed under both the BSD-style license (found in the
@@ -11,7 +12,6 @@
#ifndef ZSTD_LDM_H
#define ZSTD_LDM_H
-
#include "zstd_compress_internal.h" /* ldmParams_t, U32 */
#include <linux/zstd.h> /* ZSTD_CCtx, size_t */
@@ -40,7 +40,7 @@ void ZSTD_ldm_fillHashTable(
* sequences.
*/
size_t ZSTD_ldm_generateSequences(
- ldmState_t* ldms, rawSeqStore_t* sequences,
+ ldmState_t* ldms, RawSeqStore_t* sequences,
ldmParams_t const* params, void const* src, size_t srcSize);
/*
@@ -61,9 +61,9 @@ size_t ZSTD_ldm_generateSequences(
* two. We handle that case correctly, and update `rawSeqStore` appropriately.
* NOTE: This function does not return any errors.
*/
-size_t ZSTD_ldm_blockCompress(rawSeqStore_t* rawSeqStore,
- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
- ZSTD_paramSwitch_e useRowMatchFinder,
+size_t ZSTD_ldm_blockCompress(RawSeqStore_t* rawSeqStore,
+ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ ZSTD_ParamSwitch_e useRowMatchFinder,
void const* src, size_t srcSize);
/*
@@ -73,7 +73,7 @@ size_t ZSTD_ldm_blockCompress(rawSeqStore_t* rawSeqStore,
* Avoids emitting matches less than `minMatch` bytes.
* Must be called for data that is not passed to ZSTD_ldm_blockCompress().
*/
-void ZSTD_ldm_skipSequences(rawSeqStore_t* rawSeqStore, size_t srcSize,
+void ZSTD_ldm_skipSequences(RawSeqStore_t* rawSeqStore, size_t srcSize,
U32 const minMatch);
/* ZSTD_ldm_skipRawSeqStoreBytes():
@@ -81,7 +81,7 @@ void ZSTD_ldm_skipSequences(rawSeqStore_t* rawSeqStore, size_t srcSize,
* Not to be used in conjunction with ZSTD_ldm_skipSequences().
* Must be called for data with is not passed to ZSTD_ldm_blockCompress().
*/
-void ZSTD_ldm_skipRawSeqStoreBytes(rawSeqStore_t* rawSeqStore, size_t nbBytes);
+void ZSTD_ldm_skipRawSeqStoreBytes(RawSeqStore_t* rawSeqStore, size_t nbBytes);
/* ZSTD_ldm_getTableSize() :
* Estimate the space needed for long distance matching tables or 0 if LDM is
@@ -107,5 +107,4 @@ size_t ZSTD_ldm_getMaxNbSeq(ldmParams_t params, size_t maxChunkSize);
void ZSTD_ldm_adjustParameters(ldmParams_t* params,
ZSTD_compressionParameters const* cParams);
-
#endif /* ZSTD_FAST_H */